Compare commits

..

1 Commits

Author SHA1 Message Date
Alex Corey
26a947ed31 Adds functionality to add multiple rrules to a schedule and save the form 2023-01-09 09:27:47 -05:00
725 changed files with 14237 additions and 26846 deletions

View File

@@ -106,13 +106,6 @@ The Ansible Community is looking at building an EE that corresponds to all of th
### Oracle AWX
We'd be happy to help if you can reproduce this with AWX since we do not have Oracle's Linux Automation Manager. If you need help with this specific version of Oracles Linux Automation Manager you will need to contact your Oracle for support.
### Community Resolved
Hi,
We are happy to see that it appears a fix has been provided for your issue, so we will go ahead and close this ticket. Please feel free to reopen if any other problems arise.
<name of community member who helped> thanks so much for taking the time to write a thoughtful and helpful response to this issue!
### AWX Release
Subject: Announcing AWX Xa.Ya.za and AWX-Operator Xb.Yb.zb

View File

@@ -1,10 +1,8 @@
---
name: CI
env:
BRANCH: ${{ github.base_ref || 'devel' }}
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
CI_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
DEV_DOCKER_TAG_BASE: ghcr.io/${{ github.repository_owner }}
COMPOSE_TAG: ${{ github.base_ref || 'devel' }}
on:
pull_request:
jobs:
@@ -20,33 +18,85 @@ jobs:
tests:
- name: api-test
command: /start_tests.sh
label: Run API Tests
- name: api-lint
command: /var/lib/awx/venv/awx/bin/tox -e linters
label: Run API Linters
- name: api-swagger
command: /start_tests.sh swagger
label: Generate API Reference
- name: awx-collection
command: /start_tests.sh test_collection_all
label: Run Collection Tests
- name: api-schema
label: Check API Schema
command: /start_tests.sh detect-schema-change SCHEMA_DIFF_BASE_BRANCH=${{ github.event.pull_request.base.ref }}
- name: ui-lint
label: Run UI Linters
command: make ui-lint
- name: ui-test-screens
label: Run UI Screens Tests
command: make ui-test-screens
- name: ui-test-general
label: Run UI General Tests
command: make ui-test-general
steps:
- uses: actions/checkout@v2
- name: Run check ${{ matrix.tests.name }}
run: AWX_DOCKER_CMD='${{ matrix.tests.command }}' make github_ci_runner
- name: Get python version from Makefile
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
- name: Install python ${{ env.py_version }}
uses: actions/setup-python@v2
with:
python-version: ${{ env.py_version }}
- name: Log in to registry
run: |
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
- name: Pre-pull image to warm build cache
run: |
docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${{ env.BRANCH }} || :
- name: Build image
run: |
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} COMPOSE_TAG=${{ env.BRANCH }} make docker-compose-build
- name: ${{ matrix.texts.label }}
run: |
docker run -u $(id -u) --rm -v ${{ github.workspace}}:/awx_devel/:Z \
--workdir=/awx_devel ghcr.io/${{ github.repository_owner }}/awx_devel:${{ env.BRANCH }} ${{ matrix.tests.command }}
dev-env:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Get python version from Makefile
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
- name: Install python ${{ env.py_version }}
uses: actions/setup-python@v2
with:
python-version: ${{ env.py_version }}
- name: Log in to registry
run: |
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
- name: Pre-pull image to warm build cache
run: |
docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${{ env.BRANCH }} || :
- name: Build image
run: |
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} COMPOSE_TAG=${{ env.BRANCH }} make docker-compose-build
- name: Run smoke test
run: make github_ci_setup && ansible-playbook tools/docker-compose/ansible/smoke-test.yml -v
run: |
export DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }}
export COMPOSE_TAG=${{ env.BRANCH }}
ansible-playbook tools/docker-compose/ansible/smoke-test.yml -e repo_dir=$(pwd) -v
awx-operator:
runs-on: ubuntu-latest

View File

@@ -7,7 +7,6 @@ on:
branches:
- devel
- release_*
- feature_*
jobs:
push:
if: endsWith(github.repository, '/awx') || startsWith(github.ref, 'refs/heads/release_')
@@ -21,12 +20,6 @@ jobs:
- name: Get python version from Makefile
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
- name: Set lower case owner name
run: |
echo "OWNER_LC=${OWNER,,}" >>${GITHUB_ENV}
env:
OWNER: '${{ github.repository_owner }}'
- name: Install python ${{ env.py_version }}
uses: actions/setup-python@v2
with:
@@ -38,18 +31,15 @@ jobs:
- name: Pre-pull image to warm build cache
run: |
docker pull ghcr.io/${OWNER_LC}/awx_devel:${GITHUB_REF##*/} || :
docker pull ghcr.io/${OWNER_LC}/awx_kube_devel:${GITHUB_REF##*/} || :
docker pull ghcr.io/${OWNER_LC}/awx:${GITHUB_REF##*/} || :
docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${GITHUB_REF##*/} || :
docker pull ghcr.io/${{ github.repository_owner }}/awx_kube_devel:${GITHUB_REF##*/} || :
- name: Build images
run: |
DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} COMPOSE_TAG=${GITHUB_REF##*/} make docker-compose-build
DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} COMPOSE_TAG=${GITHUB_REF##*/} make awx-kube-dev-build
DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} COMPOSE_TAG=${GITHUB_REF##*/} make awx-kube-build
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} COMPOSE_TAG=${GITHUB_REF##*/} make docker-compose-build
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} COMPOSE_TAG=${GITHUB_REF##*/} make awx-kube-dev-build
- name: Push image
run: |
docker push ghcr.io/${OWNER_LC}/awx_devel:${GITHUB_REF##*/}
docker push ghcr.io/${OWNER_LC}/awx_kube_devel:${GITHUB_REF##*/}
docker push ghcr.io/${OWNER_LC}/awx:${GITHUB_REF##*/}
docker push ghcr.io/${{ github.repository_owner }}/awx_devel:${GITHUB_REF##*/}
docker push ghcr.io/${{ github.repository_owner }}/awx_kube_devel:${GITHUB_REF##*/}

View File

@@ -17,9 +17,9 @@ jobs:
env:
PR_BODY: ${{ github.event.pull_request.body }}
run: |
echo "$PR_BODY" | grep "Bug, Docs Fix or other nominal change" > Z
echo "$PR_BODY" | grep "New or Enhanced Feature" > Y
echo "$PR_BODY" | grep "Breaking Change" > X
echo $PR_BODY | grep "Bug, Docs Fix or other nominal change" > Z
echo $PR_BODY | grep "New or Enhanced Feature" > Y
echo $PR_BODY | grep "Breaking Change" > X
exit 0
# We exit 0 and set the shell to prevent the returns from the greps from failing this step
# See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#exit-codes-and-error-action-preference

View File

@@ -10,7 +10,6 @@ on:
jobs:
promote:
if: endsWith(github.repository, '/awx')
runs-on: ubuntu-latest
steps:
- name: Checkout awx
@@ -39,13 +38,9 @@ jobs:
- name: Build collection and publish to galaxy
run: |
COLLECTION_TEMPLATE_VERSION=true COLLECTION_NAMESPACE=${{ env.collection_namespace }} make build_collection
if [ "$(curl --head -sw '%{http_code}' https://galaxy.ansible.com/download/${{ env.collection_namespace }}-awx-${{ github.event.release.tag_name }}.tar.gz | tail -1)" == "302" ] ; then \
echo "Galaxy release already done"; \
else \
ansible-galaxy collection publish \
--token=${{ secrets.GALAXY_TOKEN }} \
awx_collection_build/${{ env.collection_namespace }}-awx-${{ github.event.release.tag_name }}.tar.gz; \
fi
ansible-galaxy collection publish \
--token=${{ secrets.GALAXY_TOKEN }} \
awx_collection_build/${{ env.collection_namespace }}-awx-${{ github.event.release.tag_name }}.tar.gz
- name: Set official pypi info
run: echo pypi_repo=pypi >> $GITHUB_ENV
@@ -57,7 +52,6 @@ jobs:
- name: Build awxkit and upload to pypi
run: |
git reset --hard
cd awxkit && python3 setup.py bdist_wheel
twine upload \
-r ${{ env.pypi_repo }} \
@@ -80,6 +74,4 @@ jobs:
docker tag ghcr.io/${{ github.repository }}:${{ github.event.release.tag_name }} quay.io/${{ github.repository }}:latest
docker push quay.io/${{ github.repository }}:${{ github.event.release.tag_name }}
docker push quay.io/${{ github.repository }}:latest
docker pull ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.release.tag_name }}
docker tag ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.release.tag_name }} quay.io/${{ github.repository_owner }}/awx-ee:${{ github.event.release.tag_name }}
docker push quay.io/${{ github.repository_owner }}/awx-ee:${{ github.event.release.tag_name }}

View File

@@ -21,7 +21,6 @@ on:
jobs:
stage:
if: endsWith(github.repository, '/awx')
runs-on: ubuntu-latest
permissions:
packages: write
@@ -85,20 +84,6 @@ jobs:
-e push=yes \
-e awx_official=yes
- name: Log in to GHCR
run: |
echo ${{ secrets.GITHUB_TOKEN }} | docker login ghcr.io -u ${{ github.actor }} --password-stdin
- name: Log in to Quay
run: |
echo ${{ secrets.QUAY_TOKEN }} | docker login quay.io -u ${{ secrets.QUAY_USER }} --password-stdin
- name: tag awx-ee:latest with version input
run: |
docker pull quay.io/ansible/awx-ee:latest
docker tag quay.io/ansible/awx-ee:latest ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.inputs.version }}
docker push ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.inputs.version }}
- name: Build and stage awx-operator
working-directory: awx-operator
run: |
@@ -118,7 +103,6 @@ jobs:
env:
AWX_TEST_IMAGE: ${{ github.repository }}
AWX_TEST_VERSION: ${{ github.event.inputs.version }}
AWX_EE_TEST_IMAGE: ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.inputs.version }}
- name: Create draft release for AWX
working-directory: awx

3
.gitignore vendored
View File

@@ -161,6 +161,3 @@ use_dev_supervisor.txt
/_build/
/_build_kube_dev/
/Dockerfile.kube-dev
awx/ui_next/src
awx/ui_next/build

View File

@@ -6,7 +6,6 @@ recursive-include awx/templates *.html
recursive-include awx/api/templates *.md *.html *.yml
recursive-include awx/ui/build *.html
recursive-include awx/ui/build *
recursive-include awx/ui_next/build *
recursive-include awx/playbooks *.yml
recursive-include awx/lib/site-packages *
recursive-include awx/plugins *.ps1

117
Makefile
View File

@@ -1,7 +1,4 @@
-include awx/ui_next/Makefile
PYTHON ?= python3.9
DOCKER_COMPOSE ?= docker-compose
OFFICIAL ?= no
NODE ?= node
NPM_BIN ?= npm
@@ -68,7 +65,7 @@ I18N_FLAG_FILE = .i18n_built
sdist \
ui-release ui-devel \
VERSION PYTHON_VERSION docker-compose-sources \
.git/hooks/pre-commit github_ci_setup github_ci_runner
.git/hooks/pre-commit
clean-tmp:
rm -rf tmp/
@@ -86,7 +83,7 @@ clean-schema:
clean-languages:
rm -f $(I18N_FLAG_FILE)
find ./awx/locale/ -type f -regex '.*\.mo$$' -delete
find ./awx/locale/ -type f -regex ".*\.mo$" -delete
## Remove temporary build files, compiled Python files.
clean: clean-ui clean-api clean-awxkit clean-dist
@@ -206,7 +203,19 @@ uwsgi: collectstatic
@if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/awx/bin/activate; \
fi; \
uwsgi /etc/tower/uwsgi.ini
uwsgi -b 32768 \
--socket 127.0.0.1:8050 \
--module=awx.wsgi:application \
--home=/var/lib/awx/venv/awx \
--chdir=/awx_devel/ \
--vacuum \
--processes=5 \
--harakiri=120 --master \
--no-orphans \
--max-requests=1000 \
--stats /tmp/stats.socket \
--lazy-apps \
--logformat "%(addr) %(method) %(uri) - %(proto) %(status)"
awx-autoreload:
@/awx_devel/tools/docker-compose/awx-autoreload /awx_devel/awx "$(DEV_RELOAD_COMMAND)"
@@ -217,6 +226,12 @@ daphne:
fi; \
daphne -b 127.0.0.1 -p 8051 awx.asgi:channel_layer
wsbroadcast:
@if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/awx/bin/activate; \
fi; \
$(PYTHON) manage.py run_wsbroadcast
## Run to start the background task dispatcher for development.
dispatcher:
@if [ "$(VENV_BASE)" ]; then \
@@ -224,6 +239,7 @@ dispatcher:
fi; \
$(PYTHON) manage.py run_dispatcher
## Run to start the zeromq callback receiver
receiver:
@if [ "$(VENV_BASE)" ]; then \
@@ -240,34 +256,6 @@ jupyter:
fi; \
$(MANAGEMENT_COMMAND) shell_plus --notebook
## Start the rsyslog configurer process in background in development environment.
run-rsyslog-configurer:
@if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/awx/bin/activate; \
fi; \
$(PYTHON) manage.py run_rsyslog_configurer
## Start cache_clear process in background in development environment.
run-cache-clear:
@if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/awx/bin/activate; \
fi; \
$(PYTHON) manage.py run_cache_clear
## Start the wsrelay process in background in development environment.
run-wsrelay:
@if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/awx/bin/activate; \
fi; \
$(PYTHON) manage.py run_wsrelay
## Start the heartbeat process in background in development environment.
run-heartbeet:
@if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/awx/bin/activate; \
fi; \
$(PYTHON) manage.py run_heartbeet
reports:
mkdir -p $@
@@ -313,21 +301,6 @@ test:
cd awxkit && $(VENV_BASE)/awx/bin/tox -re py3
awx-manage check_migrations --dry-run --check -n 'missing_migration_file'
## Login to Github container image registry, pull image, then build image.
github_ci_setup:
# GITHUB_ACTOR is automatic github actions env var
# CI_GITHUB_TOKEN is defined in .github files
echo $(CI_GITHUB_TOKEN) | docker login ghcr.io -u $(GITHUB_ACTOR) --password-stdin
docker pull $(DEVEL_IMAGE_NAME) || : # Pre-pull image to warm build cache
make docker-compose-build
## Runs AWX_DOCKER_CMD inside a new docker container.
docker-runner:
docker run -u $(shell id -u) --rm -v $(shell pwd):/awx_devel/:Z --workdir=/awx_devel $(DEVEL_IMAGE_NAME) $(AWX_DOCKER_CMD)
## Builds image and runs AWX_DOCKER_CMD in it, mainly for .github checks.
github_ci_runner: github_ci_setup docker-runner
test_collection:
rm -f $(shell ls -d $(VENV_BASE)/awx/lib/python* | head -n 1)/no-global-site-packages.txt
if [ "$(VENV_BASE)" ]; then \
@@ -434,14 +407,12 @@ ui-release: $(UI_BUILD_FLAG_FILE)
ui-devel: awx/ui/node_modules
@$(MAKE) -B $(UI_BUILD_FLAG_FILE)
@if [ -d "/var/lib/awx" ] ; then \
mkdir -p /var/lib/awx/public/static/css; \
mkdir -p /var/lib/awx/public/static/js; \
mkdir -p /var/lib/awx/public/static/media; \
cp -r awx/ui/build/static/css/* /var/lib/awx/public/static/css; \
cp -r awx/ui/build/static/js/* /var/lib/awx/public/static/js; \
cp -r awx/ui/build/static/media/* /var/lib/awx/public/static/media; \
fi
mkdir -p /var/lib/awx/public/static/css
mkdir -p /var/lib/awx/public/static/js
mkdir -p /var/lib/awx/public/static/media
cp -r awx/ui/build/static/css/* /var/lib/awx/public/static/css
cp -r awx/ui/build/static/js/* /var/lib/awx/public/static/js
cp -r awx/ui/build/static/media/* /var/lib/awx/public/static/media
ui-devel-instrumented: awx/ui/node_modules
$(NPM_BIN) --prefix awx/ui --loglevel warn run start-instrumented
@@ -468,12 +439,11 @@ ui-test-general:
$(NPM_BIN) run --prefix awx/ui pretest
$(NPM_BIN) run --prefix awx/ui/ test-general --runInBand
# NOTE: The make target ui-next is imported from awx/ui_next/Makefile
HEADLESS ?= no
ifeq ($(HEADLESS), yes)
dist/$(SDIST_TAR_FILE):
else
dist/$(SDIST_TAR_FILE): $(UI_BUILD_FLAG_FILE) ui-next
dist/$(SDIST_TAR_FILE): $(UI_BUILD_FLAG_FILE)
endif
$(PYTHON) -m build -s
ln -sf $(SDIST_TAR_FILE) dist/awx.tar.gz
@@ -521,21 +491,23 @@ docker-compose-sources: .git/hooks/pre-commit
-e enable_prometheus=$(PROMETHEUS) \
-e enable_grafana=$(GRAFANA) $(EXTRA_SOURCES_ANSIBLE_OPTS)
docker-compose: awx/projects docker-compose-sources
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml $(COMPOSE_OPTS) up $(COMPOSE_UP_OPTS) --remove-orphans
docker-compose -f tools/docker-compose/_sources/docker-compose.yml $(COMPOSE_OPTS) up $(COMPOSE_UP_OPTS) --remove-orphans
docker-compose-credential-plugins: awx/projects docker-compose-sources
echo -e "\033[0;31mTo generate a CyberArk Conjur API key: docker exec -it tools_conjur_1 conjurctl account create quick-start\033[0m"
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml -f tools/docker-credential-plugins-override.yml up --no-recreate awx_1 --remove-orphans
docker-compose -f tools/docker-compose/_sources/docker-compose.yml -f tools/docker-credential-plugins-override.yml up --no-recreate awx_1 --remove-orphans
docker-compose-test: awx/projects docker-compose-sources
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml run --rm --service-ports awx_1 /bin/bash
docker-compose -f tools/docker-compose/_sources/docker-compose.yml run --rm --service-ports awx_1 /bin/bash
docker-compose-runtest: awx/projects docker-compose-sources
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml run --rm --service-ports awx_1 /start_tests.sh
docker-compose -f tools/docker-compose/_sources/docker-compose.yml run --rm --service-ports awx_1 /start_tests.sh
docker-compose-build-swagger: awx/projects docker-compose-sources
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml run --rm --service-ports --no-deps awx_1 /start_tests.sh swagger
docker-compose -f tools/docker-compose/_sources/docker-compose.yml run --rm --service-ports --no-deps awx_1 /start_tests.sh swagger
SCHEMA_DIFF_BASE_BRANCH ?= devel
detect-schema-change: genschema
@@ -544,7 +516,7 @@ detect-schema-change: genschema
diff -u -b reference-schema.json schema.json
docker-compose-clean: awx/projects
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml rm -sf
docker-compose -f tools/docker-compose/_sources/docker-compose.yml rm -sf
docker-compose-container-group-clean:
@if [ -f "tools/docker-compose-minikube/_sources/minikube" ]; then \
@@ -560,8 +532,10 @@ docker-compose-build:
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG) .
docker-clean:
-$(foreach container_id,$(shell docker ps -f name=tools_awx -aq && docker ps -f name=tools_receptor -aq),docker stop $(container_id); docker rm -f $(container_id);)
-$(foreach image_id,$(shell docker images --filter=reference='*/*/*awx_devel*' --filter=reference='*/*awx_devel*' --filter=reference='*awx_devel*' -aq),docker rmi --force $(image_id);)
$(foreach container_id,$(shell docker ps -f name=tools_awx -aq && docker ps -f name=tools_receptor -aq),docker stop $(container_id); docker rm -f $(container_id);)
if [ "$(shell docker images | grep awx_devel)" ]; then \
docker images | grep awx_devel | awk '{print $$3}' | xargs docker rmi --force; \
fi
docker-clean-volumes: docker-compose-clean docker-compose-container-group-clean
docker volume rm -f tools_awx_db tools_grafana_storage tools_prometheus_storage $(docker volume ls --filter name=tools_redis_socket_ -q)
@@ -570,10 +544,10 @@ docker-refresh: docker-clean docker-compose
## Docker Development Environment with Elastic Stack Connected
docker-compose-elk: awx/projects docker-compose-sources
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml -f tools/elastic/docker-compose.logstash-link.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate
docker-compose -f tools/docker-compose/_sources/docker-compose.yml -f tools/elastic/docker-compose.logstash-link.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate
docker-compose-cluster-elk: awx/projects docker-compose-sources
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml -f tools/elastic/docker-compose.logstash-link-cluster.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate
docker-compose -f tools/docker-compose/_sources/docker-compose.yml -f tools/elastic/docker-compose.logstash-link-cluster.yml -f tools/elastic/docker-compose.elastic-override.yml up --no-recreate
docker-compose-container-group:
MINIKUBE_CONTAINER_GROUP=true make docker-compose
@@ -595,7 +569,6 @@ VERSION:
PYTHON_VERSION:
@echo "$(PYTHON)" | sed 's:python::'
.PHONY: Dockerfile
Dockerfile: tools/ansible/roles/dockerfile/templates/Dockerfile.j2
ansible-playbook tools/ansible/dockerfile.yml -e receptor_image=$(RECEPTOR_IMAGE)
@@ -676,7 +649,3 @@ help/generate:
} \
{ lastLine = $$0 }' $(MAKEFILE_LIST) | sort -u
@printf "\n"
## Display help for ui-next targets
help/ui-next:
@make -s help MAKEFILE_LIST="awx/ui_next/Makefile"

View File

@@ -67,6 +67,7 @@ else:
from django.db import connection
if HAS_DJANGO is True:
# See upgrade blocker note in requirements/README.md
try:
names_digest('foo', 'bar', 'baz', length=8)

View File

@@ -1,4 +1,5 @@
# Django
from django.conf import settings
from django.utils.translation import gettext_lazy as _
# Django REST Framework
@@ -8,7 +9,6 @@ from rest_framework import serializers
from awx.conf import fields, register, register_validate
from awx.api.fields import OAuth2ProviderField
from oauth2_provider.settings import oauth2_settings
from awx.sso.common import is_remote_auth_enabled
register(
@@ -96,20 +96,22 @@ register(
category=_('Authentication'),
category_slug='authentication',
)
register(
'ALLOW_METRICS_FOR_ANONYMOUS_USERS',
field_class=fields.BooleanField,
default=False,
label=_('Allow anonymous users to poll metrics'),
help_text=_('If true, anonymous users are allowed to poll metrics.'),
category=_('Authentication'),
category_slug='authentication',
)
def authentication_validate(serializer, attrs):
if attrs.get('DISABLE_LOCAL_AUTH', False) and not is_remote_auth_enabled():
raise serializers.ValidationError(_("There are no remote authentication systems configured."))
remote_auth_settings = [
'AUTH_LDAP_SERVER_URI',
'SOCIAL_AUTH_GOOGLE_OAUTH2_KEY',
'SOCIAL_AUTH_GITHUB_KEY',
'SOCIAL_AUTH_GITHUB_ORG_KEY',
'SOCIAL_AUTH_GITHUB_TEAM_KEY',
'SOCIAL_AUTH_SAML_ENABLED_IDPS',
'RADIUS_SERVER',
'TACACSPLUS_HOST',
]
if attrs.get('DISABLE_LOCAL_AUTH', False):
if not any(getattr(settings, s, None) for s in remote_auth_settings):
raise serializers.ValidationError(_("There are no remote authentication systems configured."))
return attrs

View File

@@ -80,6 +80,7 @@ class VerbatimField(serializers.Field):
class OAuth2ProviderField(fields.DictField):
default_error_messages = {'invalid_key_names': _('Invalid key names: {invalid_key_names}')}
valid_key_names = {'ACCESS_TOKEN_EXPIRE_SECONDS', 'AUTHORIZATION_CODE_EXPIRE_SECONDS', 'REFRESH_TOKEN_EXPIRE_SECONDS'}
child = fields.IntegerField(min_value=1)

View File

@@ -155,11 +155,12 @@ class FieldLookupBackend(BaseFilterBackend):
'search',
)
# A list of fields that we know can be filtered on without the possibility
# A list of fields that we know can be filtered on without the possiblity
# of introducing duplicates
NO_DUPLICATES_ALLOW_LIST = (CharField, IntegerField, BooleanField, TextField)
def get_fields_from_lookup(self, model, lookup):
if '__' in lookup and lookup.rsplit('__', 1)[-1] in self.SUPPORTED_LOOKUPS:
path, suffix = lookup.rsplit('__', 1)
else:
@@ -268,7 +269,7 @@ class FieldLookupBackend(BaseFilterBackend):
continue
# HACK: make `created` available via API for the Django User ORM model
# so it keep compatibility with other objects which exposes the `created` attr.
# so it keep compatiblity with other objects which exposes the `created` attr.
if queryset.model._meta.object_name == 'User' and key.startswith('created'):
key = key.replace('created', 'date_joined')

View File

@@ -28,7 +28,7 @@ from rest_framework import generics
from rest_framework.response import Response
from rest_framework import status
from rest_framework import views
from rest_framework.permissions import IsAuthenticated
from rest_framework.permissions import AllowAny
from rest_framework.renderers import StaticHTMLRenderer
from rest_framework.negotiation import DefaultContentNegotiation
@@ -135,6 +135,7 @@ def get_default_schema():
class APIView(views.APIView):
schema = get_default_schema()
versioning_class = URLPathVersioning
@@ -674,7 +675,7 @@ class SubListCreateAttachDetachAPIView(SubListCreateAPIView):
location = None
created = True
# Retrieve the sub object (whether created or by ID).
# Retrive the sub object (whether created or by ID).
sub = get_object_or_400(self.model, pk=sub_id)
# Verify we have permission to attach.
@@ -799,6 +800,7 @@ class RetrieveUpdateDestroyAPIView(RetrieveUpdateAPIView, DestroyAPIView):
class ResourceAccessList(ParentMixin, ListAPIView):
serializer_class = ResourceAccessListElementSerializer
ordering = ('username',)
@@ -821,8 +823,9 @@ def trigger_delayed_deep_copy(*args, **kwargs):
class CopyAPIView(GenericAPIView):
serializer_class = CopySerializer
permission_classes = (IsAuthenticated,)
permission_classes = (AllowAny,)
copy_return_serializer_class = None
new_in_330 = True
new_in_api_v2 = True

View File

@@ -128,7 +128,7 @@ class Metadata(metadata.SimpleMetadata):
# Special handling of notification configuration where the required properties
# are conditional on the type selected.
if field.field_name == 'notification_configuration':
for notification_type_name, notification_tr_name, notification_type_class in NotificationTemplate.NOTIFICATION_TYPES:
for (notification_type_name, notification_tr_name, notification_type_class) in NotificationTemplate.NOTIFICATION_TYPES:
field_info[notification_type_name] = notification_type_class.init_parameters
# Special handling of notification messages where the required properties
@@ -138,7 +138,7 @@ class Metadata(metadata.SimpleMetadata):
except (AttributeError, KeyError):
view_model = None
if view_model == NotificationTemplate and field.field_name == 'messages':
for notification_type_name, notification_tr_name, notification_type_class in NotificationTemplate.NOTIFICATION_TYPES:
for (notification_type_name, notification_tr_name, notification_type_class) in NotificationTemplate.NOTIFICATION_TYPES:
field_info[notification_type_name] = notification_type_class.default_messages
# Update type of fields returned...

View File

@@ -24,6 +24,7 @@ class DisabledPaginator(DjangoPaginator):
class Pagination(pagination.PageNumberPagination):
page_size_query_param = 'page_size'
max_page_size = settings.MAX_PAGE_SIZE
count_disabled = False

View File

@@ -22,6 +22,7 @@ class SurrogateEncoder(encoders.JSONEncoder):
class DefaultJSONRenderer(renderers.JSONRenderer):
encoder_class = SurrogateEncoder
@@ -60,7 +61,7 @@ class BrowsableAPIRenderer(renderers.BrowsableAPIRenderer):
delattr(renderer_context['view'], '_request')
def get_raw_data_form(self, data, view, method, request):
# Set a flag on the view to indicate to the view/serializer that we're
# Set a flag on the view to indiciate to the view/serializer that we're
# creating a raw data form for the browsable API. Store the original
# request method to determine how to populate the raw data form.
if request.method in {'OPTIONS', 'DELETE'}:
@@ -94,6 +95,7 @@ class BrowsableAPIRenderer(renderers.BrowsableAPIRenderer):
class PlainTextRenderer(renderers.BaseRenderer):
media_type = 'text/plain'
format = 'txt'
@@ -104,15 +106,18 @@ class PlainTextRenderer(renderers.BaseRenderer):
class DownloadTextRenderer(PlainTextRenderer):
format = "txt_download"
class AnsiTextRenderer(PlainTextRenderer):
media_type = 'text/plain'
format = 'ansi'
class AnsiDownloadRenderer(PlainTextRenderer):
format = "ansi_download"

File diff suppressed because it is too large Load Diff

View File

@@ -7,12 +7,10 @@ the following fields (some fields may not be visible to all users):
* `project_base_dir`: Path on the server where projects and playbooks are \
stored.
* `project_local_paths`: List of directories beneath `project_base_dir` to
use when creating/editing a manual project.
use when creating/editing a project.
* `time_zone`: The configured time zone for the server.
* `license_info`: Information about the current license.
* `version`: Version of Ansible Tower package installed.
* `custom_virtualenvs`: Deprecated venv locations from before migration to
execution environments. Export tooling is in `awx-manage` commands.
* `eula`: The current End-User License Agreement
{% endifmeth %}

View File

@@ -0,0 +1,4 @@
Version 1 of the Ansible Tower REST API.
Make a GET request to this resource to obtain a list of all child resources
available via the API.

View File

@@ -1,41 +0,0 @@
# Bulk Host Create
This endpoint allows the client to create multiple hosts and associate them with an inventory. They may do this by providing the inventory ID and a list of json that would normally be provided to create hosts.
Example:
{
"inventory": 1,
"hosts": [
{"name": "example1.com", "variables": "ansible_connection: local"},
{"name": "example2.com"}
]
}
Return data:
{
"url": "/api/v2/inventories/3/hosts/",
"hosts": [
{
"name": "example1.com",
"enabled": true,
"instance_id": "",
"description": "",
"variables": "ansible_connection: local",
"id": 1255,
"url": "/api/v2/hosts/1255/",
"inventory": "/api/v2/inventories/3/"
},
{
"name": "example2.com",
"enabled": true,
"instance_id": "",
"description": "",
"variables": "",
"id": 1256,
"url": "/api/v2/hosts/1256/",
"inventory": "/api/v2/inventories/3/"
}
]
}

View File

@@ -1,13 +0,0 @@
# Bulk Job Launch
This endpoint allows the client to launch multiple UnifiedJobTemplates at a time, along side any launch time parameters that they would normally set at launch time.
Example:
{
"name": "my bulk job",
"jobs": [
{"unified_job_template": 7, "inventory": 2},
{"unified_job_template": 7, "credentials": [3]}
]
}

View File

@@ -1,3 +0,0 @@
# Bulk Actions
This endpoint lists available bulk action APIs.

View File

@@ -3,7 +3,7 @@ Make a GET request to this resource to retrieve aggregate statistics about inven
Including fetching the number of total hosts tracked by Tower over an amount of time and the current success or
failed status of hosts which have run jobs within an Inventory.
## Parameters and Filtering
## Parmeters and Filtering
The `period` of the data can be adjusted with:
@@ -24,7 +24,7 @@ Data about the number of hosts will be returned in the following format:
Each element contains an epoch timestamp represented in seconds and a numerical value indicating
the number of hosts that exist at a given moment
Data about failed and successful hosts by inventory will be given as:
Data about failed and successfull hosts by inventory will be given as:
{
"sources": [

View File

@@ -2,7 +2,7 @@
Make a GET request to this resource to retrieve aggregate statistics about job runs suitable for graphing.
## Parameters and Filtering
## Parmeters and Filtering
The `period` of the data can be adjusted with:

View File

@@ -0,0 +1,11 @@
# List Fact Scans for a Host Specific Host Scan
Make a GET request to this resource to retrieve system tracking data for a particular scan
You may filter by datetime:
`?datetime=2015-06-01`
and module
`?datetime=2015-06-01&module=ansible`

View File

@@ -0,0 +1,11 @@
# List Fact Scans for a Host by Module and Date
Make a GET request to this resource to retrieve system tracking scans by module and date/time
You may filter scan runs using the `from` and `to` properties:
`?from=2015-06-01%2012:00:00&to=2015-06-03`
You may also filter by module
`?module=packages`

View File

@@ -0,0 +1 @@
# List Red Hat Insights for a Host

View File

@@ -1,18 +0,0 @@
{% ifmeth GET %}
# Retrieve {{ model_verbose_name|title|anora }}:
Make GET request to this resource to retrieve a single {{ model_verbose_name }}
record containing the following fields:
{% include "api/_result_fields_common.md" %}
{% endifmeth %}
{% ifmeth DELETE %}
# Delete {{ model_verbose_name|title|anora }}:
Make a DELETE request to this resource to soft-delete this {{ model_verbose_name }}.
A soft deletion will mark the `deleted` field as true and exclude the host
metric from license calculations.
This may be undone later if the same hostname is automated again afterwards.
{% endifmeth %}

View File

@@ -18,7 +18,7 @@ inventory sources:
* `inventory_update`: ID of the inventory update job that was started.
(integer, read-only)
* `project_update`: ID of the project update job that was started if this inventory source is an SCM source.
(integer, read-only, optional)
(interger, read-only, optional)
Note: All manual inventory sources (source="") will be ignored by the update_inventory_sources endpoint. This endpoint will not update inventory sources for Smart Inventories.

View File

@@ -0,0 +1,21 @@
{% ifmeth GET %}
# Determine if a Job can be started
Make a GET request to this resource to determine if the job can be started and
whether any passwords are required to start the job. The response will include
the following fields:
* `can_start`: Flag indicating if this job can be started (boolean, read-only)
* `passwords_needed_to_start`: Password names required to start the job (array,
read-only)
{% endifmeth %}
{% ifmeth POST %}
# Start a Job
Make a POST request to this resource to start the job. If any passwords are
required, they must be passed via POST data.
If successful, the response status code will be 202. If any required passwords
are not provided, a 400 status code will be returned. If the job cannot be
started, a 405 status code will be returned.
{% endifmeth %}

View File

@@ -2,7 +2,6 @@ receptor_user: awx
receptor_group: awx
receptor_verify: true
receptor_tls: true
receptor_mintls13: false
receptor_work_commands:
ansible-runner:
command: ansible-runner

View File

@@ -1,10 +0,0 @@
# Copyright (c) 2017 Ansible, Inc.
# All Rights Reserved.
from django.urls import re_path
from awx.api.views import HostMetricList, HostMetricDetail
urls = [re_path(r'^$', HostMetricList.as_view(), name='host_metric_list'), re_path(r'^(?P<pk>[0-9]+)/$', HostMetricDetail.as_view(), name='host_metric_detail')]
__all__ = ['urls']

View File

@@ -3,14 +3,7 @@
from django.urls import re_path
from awx.api.views import (
InstanceGroupList,
InstanceGroupDetail,
InstanceGroupUnifiedJobsList,
InstanceGroupInstanceList,
InstanceGroupAccessList,
InstanceGroupObjectRolesList,
)
from awx.api.views import InstanceGroupList, InstanceGroupDetail, InstanceGroupUnifiedJobsList, InstanceGroupInstanceList
urls = [
@@ -18,8 +11,6 @@ urls = [
re_path(r'^(?P<pk>[0-9]+)/$', InstanceGroupDetail.as_view(), name='instance_group_detail'),
re_path(r'^(?P<pk>[0-9]+)/jobs/$', InstanceGroupUnifiedJobsList.as_view(), name='instance_group_unified_jobs_list'),
re_path(r'^(?P<pk>[0-9]+)/instances/$', InstanceGroupInstanceList.as_view(), name='instance_group_instance_list'),
re_path(r'^(?P<pk>[0-9]+)/access_list/$', InstanceGroupAccessList.as_view(), name='instance_group_access_list'),
re_path(r'^(?P<pk>[0-9]+)/object_roles/$', InstanceGroupObjectRolesList.as_view(), name='instance_group_object_role_list'),
]
__all__ = ['urls']

View File

@@ -6,10 +6,7 @@ from django.urls import re_path
from awx.api.views.inventory import (
InventoryList,
InventoryDetail,
ConstructedInventoryDetail,
ConstructedInventoryList,
InventoryActivityStreamList,
InventoryInputInventoriesList,
InventoryJobTemplateList,
InventoryAccessList,
InventoryObjectRolesList,
@@ -40,7 +37,6 @@ urls = [
re_path(r'^(?P<pk>[0-9]+)/script/$', InventoryScriptView.as_view(), name='inventory_script_view'),
re_path(r'^(?P<pk>[0-9]+)/tree/$', InventoryTreeView.as_view(), name='inventory_tree_view'),
re_path(r'^(?P<pk>[0-9]+)/inventory_sources/$', InventoryInventorySourcesList.as_view(), name='inventory_inventory_sources_list'),
re_path(r'^(?P<pk>[0-9]+)/input_inventories/$', InventoryInputInventoriesList.as_view(), name='inventory_input_inventories'),
re_path(r'^(?P<pk>[0-9]+)/update_inventory_sources/$', InventoryInventorySourcesUpdate.as_view(), name='inventory_inventory_sources_update'),
re_path(r'^(?P<pk>[0-9]+)/activity_stream/$', InventoryActivityStreamList.as_view(), name='inventory_activity_stream_list'),
re_path(r'^(?P<pk>[0-9]+)/job_templates/$', InventoryJobTemplateList.as_view(), name='inventory_job_template_list'),
@@ -52,10 +48,4 @@ urls = [
re_path(r'^(?P<pk>[0-9]+)/copy/$', InventoryCopy.as_view(), name='inventory_copy'),
]
# Constructed inventory special views
constructed_inventory_urls = [
re_path(r'^$', ConstructedInventoryList.as_view(), name='constructed_inventory_list'),
re_path(r'^(?P<pk>[0-9]+)/$', ConstructedInventoryDetail.as_view(), name='constructed_inventory_detail'),
]
__all__ = ['urls', 'constructed_inventory_urls']
__all__ = ['urls']

View File

@@ -30,15 +30,7 @@ from awx.api.views import (
OAuth2TokenList,
ApplicationOAuth2TokenList,
OAuth2ApplicationDetail,
# HostMetricSummaryMonthlyList, # It will be enabled in future version of the AWX
)
from awx.api.views.bulk import (
BulkView,
BulkHostCreateView,
BulkJobLaunchView,
)
from awx.api.views.mesh_visualizer import MeshVisualizer
from awx.api.views.metrics import MetricsView
@@ -47,11 +39,10 @@ from .organization import urls as organization_urls
from .user import urls as user_urls
from .project import urls as project_urls
from .project_update import urls as project_update_urls
from .inventory import urls as inventory_urls, constructed_inventory_urls
from .inventory import urls as inventory_urls
from .execution_environments import urls as execution_environment_urls
from .team import urls as team_urls
from .host import urls as host_urls
from .host_metric import urls as host_metric_urls
from .group import urls as group_urls
from .inventory_source import urls as inventory_source_urls
from .inventory_update import urls as inventory_update_urls
@@ -119,11 +110,7 @@ v2_urls = [
re_path(r'^project_updates/', include(project_update_urls)),
re_path(r'^teams/', include(team_urls)),
re_path(r'^inventories/', include(inventory_urls)),
re_path(r'^constructed_inventories/', include(constructed_inventory_urls)),
re_path(r'^hosts/', include(host_urls)),
re_path(r'^host_metrics/', include(host_metric_urls)),
# It will be enabled in future version of the AWX
# re_path(r'^host_metric_summary_monthly/$', HostMetricSummaryMonthlyList.as_view(), name='host_metric_summary_monthly_list'),
re_path(r'^groups/', include(group_urls)),
re_path(r'^inventory_sources/', include(inventory_source_urls)),
re_path(r'^inventory_updates/', include(inventory_update_urls)),
@@ -149,9 +136,6 @@ v2_urls = [
re_path(r'^activity_stream/', include(activity_stream_urls)),
re_path(r'^workflow_approval_templates/', include(workflow_approval_template_urls)),
re_path(r'^workflow_approvals/', include(workflow_approval_urls)),
re_path(r'^bulk/$', BulkView.as_view(), name='bulk'),
re_path(r'^bulk/host_create/$', BulkHostCreateView.as_view(), name='bulk_host_create'),
re_path(r'^bulk/job_launch/$', BulkJobLaunchView.as_view(), name='bulk_job_launch'),
]

View File

@@ -33,6 +33,7 @@ class HostnameRegexValidator(RegexValidator):
return f"regex={self.regex}, message={self.message}, code={self.code}, inverse_match={self.inverse_match}, flags={self.flags}"
def __validate(self, value):
if ' ' in value:
return False, ValidationError("whitespaces in hostnames are illegal")

File diff suppressed because it is too large Load Diff

View File

@@ -1,69 +0,0 @@
from collections import OrderedDict
from rest_framework.permissions import IsAuthenticated
from rest_framework.renderers import JSONRenderer
from rest_framework.reverse import reverse
from rest_framework import status
from rest_framework.response import Response
from awx.main.models import UnifiedJob, Host
from awx.api.generics import (
GenericAPIView,
APIView,
)
from awx.api import (
serializers,
renderers,
)
class BulkView(APIView):
permission_classes = [IsAuthenticated]
renderer_classes = [
renderers.BrowsableAPIRenderer,
JSONRenderer,
]
allowed_methods = ['GET', 'OPTIONS']
def get(self, request, format=None):
'''List top level resources'''
data = OrderedDict()
data['host_create'] = reverse('api:bulk_host_create', request=request)
data['job_launch'] = reverse('api:bulk_job_launch', request=request)
return Response(data)
class BulkJobLaunchView(GenericAPIView):
permission_classes = [IsAuthenticated]
model = UnifiedJob
serializer_class = serializers.BulkJobLaunchSerializer
allowed_methods = ['GET', 'POST', 'OPTIONS']
def get(self, request):
data = OrderedDict()
data['detail'] = "Specify a list of unified job templates to launch alongside their launchtime parameters"
return Response(data, status=status.HTTP_200_OK)
def post(self, request):
bulkjob_serializer = serializers.BulkJobLaunchSerializer(data=request.data, context={'request': request})
if bulkjob_serializer.is_valid():
result = bulkjob_serializer.create(bulkjob_serializer.validated_data)
return Response(result, status=status.HTTP_201_CREATED)
return Response(bulkjob_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class BulkHostCreateView(GenericAPIView):
permission_classes = [IsAuthenticated]
model = Host
serializer_class = serializers.BulkHostCreateSerializer
allowed_methods = ['GET', 'POST', 'OPTIONS']
def get(self, request):
return Response({"detail": "Bulk create hosts with this endpoint"}, status=status.HTTP_200_OK)
def post(self, request):
serializer = serializers.BulkHostCreateSerializer(data=request.data, context={'request': request})
if serializer.is_valid():
result = serializer.create(serializer.validated_data)
return Response(result, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)

View File

@@ -25,7 +25,6 @@ from rest_framework import status
# Red Hat has an OID namespace (RHANANA). Receptor has its own designation under that.
RECEPTOR_OID = "1.3.6.1.4.1.2312.19.1"
# generate install bundle for the instance
# install bundle directory structure
# ├── install_receptor.yml (playbook)
@@ -41,6 +40,7 @@ RECEPTOR_OID = "1.3.6.1.4.1.2312.19.1"
# │ └── work-public-key.pem
# └── requirements.yml
class InstanceInstallBundle(GenericAPIView):
name = _('Install Bundle')
model = models.Instance
serializer_class = serializers.InstanceSerializer

View File

@@ -14,7 +14,6 @@ from django.utils.translation import gettext_lazy as _
from rest_framework.exceptions import PermissionDenied
from rest_framework.response import Response
from rest_framework import status
from rest_framework import serializers
# AWX
from awx.main.models import ActivityStream, Inventory, JobTemplate, Role, User, InstanceGroup, InventoryUpdateEvent, InventoryUpdate
@@ -32,7 +31,6 @@ from awx.api.views.labels import LabelSubListCreateAttachDetachView
from awx.api.serializers import (
InventorySerializer,
ConstructedInventorySerializer,
ActivityStreamSerializer,
RoleSerializer,
InstanceGroupSerializer,
@@ -48,6 +46,7 @@ logger = logging.getLogger('awx.api.views.organization')
class InventoryUpdateEventsList(SubListAPIView):
model = InventoryUpdateEvent
serializer_class = InventoryUpdateEventSerializer
parent_model = InventoryUpdate
@@ -67,11 +66,13 @@ class InventoryUpdateEventsList(SubListAPIView):
class InventoryList(ListCreateAPIView):
model = Inventory
serializer_class = InventorySerializer
class InventoryDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView):
model = Inventory
serializer_class = InventorySerializer
@@ -81,9 +82,7 @@ class InventoryDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIVie
# Do not allow changes to an Inventory kind.
if kind is not None and obj.kind != kind:
return Response(
dict(error=_('You cannot turn a regular inventory into a "smart" or "constructed" inventory.')), status=status.HTTP_405_METHOD_NOT_ALLOWED
)
return Response(dict(error=_('You cannot turn a regular inventory into a "smart" inventory.')), status=status.HTTP_405_METHOD_NOT_ALLOWED)
return super(InventoryDetail, self).update(request, *args, **kwargs)
def destroy(self, request, *args, **kwargs):
@@ -98,30 +97,8 @@ class InventoryDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIVie
return Response(dict(error=_("{0}".format(e))), status=status.HTTP_400_BAD_REQUEST)
class ConstructedInventoryDetail(InventoryDetail):
serializer_class = ConstructedInventorySerializer
class ConstructedInventoryList(InventoryList):
serializer_class = ConstructedInventorySerializer
def get_queryset(self):
r = super().get_queryset()
return r.filter(kind='constructed')
class InventoryInputInventoriesList(SubListAttachDetachAPIView):
model = Inventory
serializer_class = InventorySerializer
parent_model = Inventory
relationship = 'input_inventories'
def is_valid_relation(self, parent, sub, created=False):
if sub.kind == 'constructed':
raise serializers.ValidationError({'error': 'You cannot add a constructed inventory to another constructed inventory.'})
class InventoryActivityStreamList(SubListAPIView):
model = ActivityStream
serializer_class = ActivityStreamSerializer
parent_model = Inventory
@@ -136,6 +113,7 @@ class InventoryActivityStreamList(SubListAPIView):
class InventoryInstanceGroupsList(SubListAttachDetachAPIView):
model = InstanceGroup
serializer_class = InstanceGroupSerializer
parent_model = Inventory
@@ -143,11 +121,13 @@ class InventoryInstanceGroupsList(SubListAttachDetachAPIView):
class InventoryAccessList(ResourceAccessList):
model = User # needs to be User for AccessLists's
parent_model = Inventory
class InventoryObjectRolesList(SubListAPIView):
model = Role
serializer_class = RoleSerializer
parent_model = Inventory
@@ -160,6 +140,7 @@ class InventoryObjectRolesList(SubListAPIView):
class InventoryJobTemplateList(SubListAPIView):
model = JobTemplate
serializer_class = JobTemplateSerializer
parent_model = Inventory
@@ -173,9 +154,11 @@ class InventoryJobTemplateList(SubListAPIView):
class InventoryLabelList(LabelSubListCreateAttachDetachView):
parent_model = Inventory
class InventoryCopy(CopyAPIView):
model = Inventory
copy_return_serializer_class = InventorySerializer

View File

@@ -59,11 +59,13 @@ class LabelSubListCreateAttachDetachView(SubListCreateAttachDetachAPIView):
class LabelDetail(RetrieveUpdateAPIView):
model = Label
serializer_class = LabelSerializer
class LabelList(ListCreateAPIView):
name = _("Labels")
model = Label
serializer_class = LabelSerializer

View File

@@ -10,11 +10,13 @@ from awx.main.models import InstanceLink, Instance
class MeshVisualizer(APIView):
name = _("Mesh Visualizer")
permission_classes = (IsSystemAdminOrAuditor,)
swagger_topic = "System Configuration"
def get(self, request, format=None):
data = {
'nodes': InstanceNodeSerializer(Instance.objects.all(), many=True).data,
'links': InstanceLinkSerializer(InstanceLink.objects.select_related('target', 'source'), many=True).data,

View File

@@ -5,11 +5,9 @@
import logging
# Django
from django.conf import settings
from django.utils.translation import gettext_lazy as _
# Django REST Framework
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from rest_framework.exceptions import PermissionDenied
@@ -27,19 +25,15 @@ logger = logging.getLogger('awx.analytics')
class MetricsView(APIView):
name = _('Metrics')
swagger_topic = 'Metrics'
renderer_classes = [renderers.PlainTextRenderer, renderers.PrometheusJSONRenderer, renderers.BrowsableAPIRenderer]
def initialize_request(self, request, *args, **kwargs):
if settings.ALLOW_METRICS_FOR_ANONYMOUS_USERS:
self.permission_classes = (AllowAny,)
return super(APIView, self).initialize_request(request, *args, **kwargs)
def get(self, request):
'''Show Metrics Details'''
if settings.ALLOW_METRICS_FOR_ANONYMOUS_USERS or request.user.is_superuser or request.user.is_system_auditor:
if request.user.is_superuser or request.user.is_system_auditor:
metrics_to_show = ''
if not request.query_params.get('subsystemonly', "0") == "1":
metrics_to_show += metrics().decode('UTF-8')

View File

@@ -58,6 +58,7 @@ logger = logging.getLogger('awx.api.views.organization')
class OrganizationList(OrganizationCountsMixin, ListCreateAPIView):
model = Organization
serializer_class = OrganizationSerializer
@@ -69,6 +70,7 @@ class OrganizationList(OrganizationCountsMixin, ListCreateAPIView):
class OrganizationDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView):
model = Organization
serializer_class = OrganizationSerializer
@@ -104,6 +106,7 @@ class OrganizationDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPI
class OrganizationInventoriesList(SubListAPIView):
model = Inventory
serializer_class = InventorySerializer
parent_model = Organization
@@ -111,6 +114,7 @@ class OrganizationInventoriesList(SubListAPIView):
class OrganizationUsersList(BaseUsersList):
model = User
serializer_class = UserSerializer
parent_model = Organization
@@ -119,6 +123,7 @@ class OrganizationUsersList(BaseUsersList):
class OrganizationAdminsList(BaseUsersList):
model = User
serializer_class = UserSerializer
parent_model = Organization
@@ -127,6 +132,7 @@ class OrganizationAdminsList(BaseUsersList):
class OrganizationProjectsList(SubListCreateAPIView):
model = Project
serializer_class = ProjectSerializer
parent_model = Organization
@@ -134,6 +140,7 @@ class OrganizationProjectsList(SubListCreateAPIView):
class OrganizationExecutionEnvironmentsList(SubListCreateAttachDetachAPIView):
model = ExecutionEnvironment
serializer_class = ExecutionEnvironmentSerializer
parent_model = Organization
@@ -143,6 +150,7 @@ class OrganizationExecutionEnvironmentsList(SubListCreateAttachDetachAPIView):
class OrganizationJobTemplatesList(SubListCreateAPIView):
model = JobTemplate
serializer_class = JobTemplateSerializer
parent_model = Organization
@@ -150,6 +158,7 @@ class OrganizationJobTemplatesList(SubListCreateAPIView):
class OrganizationWorkflowJobTemplatesList(SubListCreateAPIView):
model = WorkflowJobTemplate
serializer_class = WorkflowJobTemplateSerializer
parent_model = Organization
@@ -157,6 +166,7 @@ class OrganizationWorkflowJobTemplatesList(SubListCreateAPIView):
class OrganizationTeamsList(SubListCreateAttachDetachAPIView):
model = Team
serializer_class = TeamSerializer
parent_model = Organization
@@ -165,6 +175,7 @@ class OrganizationTeamsList(SubListCreateAttachDetachAPIView):
class OrganizationActivityStreamList(SubListAPIView):
model = ActivityStream
serializer_class = ActivityStreamSerializer
parent_model = Organization
@@ -173,6 +184,7 @@ class OrganizationActivityStreamList(SubListAPIView):
class OrganizationNotificationTemplatesList(SubListCreateAttachDetachAPIView):
model = NotificationTemplate
serializer_class = NotificationTemplateSerializer
parent_model = Organization
@@ -181,28 +193,34 @@ class OrganizationNotificationTemplatesList(SubListCreateAttachDetachAPIView):
class OrganizationNotificationTemplatesAnyList(SubListCreateAttachDetachAPIView):
model = NotificationTemplate
serializer_class = NotificationTemplateSerializer
parent_model = Organization
class OrganizationNotificationTemplatesStartedList(OrganizationNotificationTemplatesAnyList):
relationship = 'notification_templates_started'
class OrganizationNotificationTemplatesErrorList(OrganizationNotificationTemplatesAnyList):
relationship = 'notification_templates_error'
class OrganizationNotificationTemplatesSuccessList(OrganizationNotificationTemplatesAnyList):
relationship = 'notification_templates_success'
class OrganizationNotificationTemplatesApprovalList(OrganizationNotificationTemplatesAnyList):
relationship = 'notification_templates_approvals'
class OrganizationInstanceGroupsList(SubListAttachDetachAPIView):
model = InstanceGroup
serializer_class = InstanceGroupSerializer
parent_model = Organization
@@ -210,6 +228,7 @@ class OrganizationInstanceGroupsList(SubListAttachDetachAPIView):
class OrganizationGalaxyCredentialsList(SubListAttachDetachAPIView):
model = Credential
serializer_class = CredentialSerializer
parent_model = Organization
@@ -221,11 +240,13 @@ class OrganizationGalaxyCredentialsList(SubListAttachDetachAPIView):
class OrganizationAccessList(ResourceAccessList):
model = User # needs to be User for AccessLists's
parent_model = Organization
class OrganizationObjectRolesList(SubListAPIView):
model = Role
serializer_class = RoleSerializer
parent_model = Organization

View File

@@ -36,6 +36,7 @@ logger = logging.getLogger('awx.api.views.root')
class ApiRootView(APIView):
permission_classes = (AllowAny,)
name = _('REST API')
versioning_class = None
@@ -58,6 +59,7 @@ class ApiRootView(APIView):
class ApiOAuthAuthorizationRootView(APIView):
permission_classes = (AllowAny,)
name = _("API OAuth 2 Authorization Root")
versioning_class = None
@@ -72,6 +74,7 @@ class ApiOAuthAuthorizationRootView(APIView):
class ApiVersionRootView(APIView):
permission_classes = (AllowAny,)
swagger_topic = 'Versioning'
@@ -98,14 +101,10 @@ class ApiVersionRootView(APIView):
data['tokens'] = reverse('api:o_auth2_token_list', request=request)
data['metrics'] = reverse('api:metrics_view', request=request)
data['inventory'] = reverse('api:inventory_list', request=request)
data['constructed_inventory'] = reverse('api:constructed_inventory_list', request=request)
data['inventory_sources'] = reverse('api:inventory_source_list', request=request)
data['inventory_updates'] = reverse('api:inventory_update_list', request=request)
data['groups'] = reverse('api:group_list', request=request)
data['hosts'] = reverse('api:host_list', request=request)
data['host_metrics'] = reverse('api:host_metric_list', request=request)
# It will be enabled in future version of the AWX
# data['host_metric_summary_monthly'] = reverse('api:host_metric_summary_monthly_list', request=request)
data['job_templates'] = reverse('api:job_template_list', request=request)
data['jobs'] = reverse('api:job_list', request=request)
data['ad_hoc_commands'] = reverse('api:ad_hoc_command_list', request=request)
@@ -125,7 +124,6 @@ class ApiVersionRootView(APIView):
data['workflow_job_template_nodes'] = reverse('api:workflow_job_template_node_list', request=request)
data['workflow_job_nodes'] = reverse('api:workflow_job_node_list', request=request)
data['mesh_visualizer'] = reverse('api:mesh_visualizer_view', request=request)
data['bulk'] = reverse('api:bulk', request=request)
return Response(data)
@@ -174,6 +172,7 @@ class ApiV2PingView(APIView):
class ApiV2SubscriptionView(APIView):
permission_classes = (IsAuthenticated,)
name = _('Subscriptions')
swagger_topic = 'System Configuration'
@@ -213,6 +212,7 @@ class ApiV2SubscriptionView(APIView):
class ApiV2AttachView(APIView):
permission_classes = (IsAuthenticated,)
name = _('Attach Subscription')
swagger_topic = 'System Configuration'
@@ -230,6 +230,7 @@ class ApiV2AttachView(APIView):
user = getattr(settings, 'SUBSCRIPTIONS_USERNAME', None)
pw = getattr(settings, 'SUBSCRIPTIONS_PASSWORD', None)
if pool_id and user and pw:
data = request.data.copy()
try:
with set_environ(**settings.AWX_TASK_ENV):
@@ -257,6 +258,7 @@ class ApiV2AttachView(APIView):
class ApiV2ConfigView(APIView):
permission_classes = (IsAuthenticated,)
name = _('Configuration')
swagger_topic = 'System Configuration'
@@ -276,9 +278,6 @@ class ApiV2ConfigView(APIView):
pendo_state = settings.PENDO_TRACKING_STATE if settings.PENDO_TRACKING_STATE in ('off', 'anonymous', 'detailed') else 'off'
# Guarding against settings.UI_NEXT being set to a non-boolean value
ui_next_state = settings.UI_NEXT if settings.UI_NEXT in (True, False) else False
data = dict(
time_zone=settings.TIME_ZONE,
license_info=license_data,
@@ -287,7 +286,6 @@ class ApiV2ConfigView(APIView):
analytics_status=pendo_state,
analytics_collectors=all_collectors(),
become_methods=PRIVILEGE_ESCALATION_METHODS,
ui_next=ui_next_state,
)
# If LDAP is enabled, user_ldap_fields will return a list of field

View File

@@ -8,6 +8,7 @@ from django.utils.translation import gettext_lazy as _
class ConfConfig(AppConfig):
name = 'awx.conf'
verbose_name = _('Configuration')
@@ -15,6 +16,7 @@ class ConfConfig(AppConfig):
self.module.autodiscover()
if not set(sys.argv) & {'migrate', 'check_migrations'}:
from .settings import SettingsWrapper
SettingsWrapper.initialize()

View File

@@ -21,7 +21,7 @@ logger = logging.getLogger('awx.conf.fields')
# Use DRF fields to convert/validate settings:
# - to_representation(obj) should convert a native Python object to a primitive
# serializable type. This primitive type will be what is presented in the API
# and stored in the JSON field in the database.
# and stored in the JSON field in the datbase.
# - to_internal_value(data) should convert the primitive type back into the
# appropriate Python type to be used in settings.
@@ -47,6 +47,7 @@ class IntegerField(IntegerField):
class StringListField(ListField):
child = CharField()
def to_representation(self, value):
@@ -56,6 +57,7 @@ class StringListField(ListField):
class StringListBooleanField(ListField):
default_error_messages = {'type_error': _('Expected None, True, False, a string or list of strings but got {input_type} instead.')}
child = CharField()
@@ -94,6 +96,7 @@ class StringListBooleanField(ListField):
class StringListPathField(StringListField):
default_error_messages = {'type_error': _('Expected list of strings but got {input_type} instead.'), 'path_error': _('{path} is not a valid path choice.')}
def to_internal_value(self, paths):
@@ -123,6 +126,7 @@ class StringListIsolatedPathField(StringListField):
}
def to_internal_value(self, paths):
if isinstance(paths, (list, tuple)):
for p in paths:
if not isinstance(p, str):

View File

@@ -8,6 +8,7 @@ import awx.main.fields
class Migration(migrations.Migration):
dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]
operations = [

View File

@@ -48,6 +48,7 @@ def revert_tower_settings(apps, schema_editor):
class Migration(migrations.Migration):
dependencies = [('conf', '0001_initial'), ('main', '0004_squashed_v310_release')]
run_before = [('main', '0005_squashed_v310_v313_updates')]

View File

@@ -7,6 +7,7 @@ import awx.main.fields
class Migration(migrations.Migration):
dependencies = [('conf', '0002_v310_copy_tower_settings')]
operations = [migrations.AlterField(model_name='setting', name='value', field=awx.main.fields.JSONBlob(null=True))]

View File

@@ -5,6 +5,7 @@ from django.db import migrations
class Migration(migrations.Migration):
dependencies = [('conf', '0003_v310_JSONField_changes')]
operations = [

View File

@@ -15,6 +15,7 @@ def reverse_copy_session_settings(apps, schema_editor):
class Migration(migrations.Migration):
dependencies = [('conf', '0004_v320_reencrypt')]
operations = [migrations.RunPython(copy_session_settings, reverse_copy_session_settings)]

View File

@@ -8,6 +8,7 @@ from django.db import migrations
class Migration(migrations.Migration):
dependencies = [('conf', '0005_v330_rename_two_session_settings')]
operations = [migrations.RunPython(fill_ldap_group_type_params)]

View File

@@ -9,6 +9,7 @@ def copy_allowed_ips(apps, schema_editor):
class Migration(migrations.Migration):
dependencies = [('conf', '0006_v331_ldap_group_type')]
operations = [migrations.RunPython(copy_allowed_ips)]

View File

@@ -14,6 +14,7 @@ def _noop(apps, schema_editor):
class Migration(migrations.Migration):
dependencies = [('conf', '0007_v380_rename_more_settings')]
operations = [migrations.RunPython(clear_old_license, _noop), migrations.RunPython(prefill_rh_credentials, _noop)]

View File

@@ -10,6 +10,7 @@ def rename_proot_settings(apps, schema_editor):
class Migration(migrations.Migration):
dependencies = [('conf', '0008_subscriptions')]
operations = [migrations.RunPython(rename_proot_settings)]

View File

@@ -1,11 +1,7 @@
import inspect
from django.conf import settings
import logging
logger = logging.getLogger('awx.conf.migrations')
from django.utils.timezone import now
def fill_ldap_group_type_params(apps, schema_editor):
@@ -19,7 +15,7 @@ def fill_ldap_group_type_params(apps, schema_editor):
entry = qs[0]
group_type_params = entry.value
else:
return # for new installs we prefer to use the default value
entry = Setting(key='AUTH_LDAP_GROUP_TYPE_PARAMS', value=group_type_params, created=now(), modified=now())
init_attrs = set(inspect.getfullargspec(group_type.__init__).args[1:])
for k in list(group_type_params.keys()):
@@ -27,5 +23,4 @@ def fill_ldap_group_type_params(apps, schema_editor):
del group_type_params[k]
entry.value = group_type_params
logger.warning(f'Migration updating AUTH_LDAP_GROUP_TYPE_PARAMS with value {entry.value}')
entry.save()

View File

@@ -10,6 +10,7 @@ __all__ = ['rename_setting']
def rename_setting(apps, schema_editor, old_key, new_key):
old_setting = None
Setting = apps.get_model('conf', 'Setting')
if Setting.objects.filter(key=new_key).exists() or hasattr(settings, new_key):

View File

@@ -17,6 +17,7 @@ __all__ = ['Setting']
class Setting(CreatedModifiedModel):
key = models.CharField(max_length=255)
value = JSONBlob(null=True)
user = prevent_search(models.ForeignKey('auth.User', related_name='settings', default=None, null=True, editable=False, on_delete=models.CASCADE))

View File

@@ -5,13 +5,11 @@ import threading
import time
import os
from concurrent.futures import ThreadPoolExecutor
# Django
from django.conf import LazySettings
from django.conf import settings, UserSettingsHolder
from django.core.cache import cache as django_cache
from django.core.exceptions import ImproperlyConfigured, SynchronousOnlyOperation
from django.core.exceptions import ImproperlyConfigured
from django.db import transaction, connection
from django.db.utils import Error as DBError, ProgrammingError
from django.utils.functional import cached_property
@@ -106,6 +104,7 @@ def filter_sensitive(registry, key, value):
class TransientSetting(object):
__slots__ = ('pk', 'value')
def __init__(self, pk, value):
@@ -159,7 +158,7 @@ class EncryptedCacheProxy(object):
obj_id = self.cache.get(Setting.get_cache_id_key(key), default=empty)
if obj_id is empty:
logger.info('Efficiency notice: Corresponding id not stored in cache %s', Setting.get_cache_id_key(key))
obj_id = getattr(_get_setting_from_db(self.registry, key), 'pk', None)
obj_id = getattr(self._get_setting_from_db(key), 'pk', None)
elif obj_id == SETTING_CACHE_NONE:
obj_id = None
return method(TransientSetting(pk=obj_id, value=value), 'value')
@@ -168,6 +167,11 @@ class EncryptedCacheProxy(object):
# a no-op; it just returns the provided value
return value
def _get_setting_from_db(self, key):
field = self.registry.get_setting_field(key)
if not field.read_only:
return Setting.objects.filter(key=key, user__isnull=True).order_by('pk').first()
def __getattr__(self, name):
return getattr(self.cache, name)
@@ -183,22 +187,6 @@ def get_settings_to_cache(registry):
return dict([(key, SETTING_CACHE_NOTSET) for key in get_writeable_settings(registry)])
# Will first attempt to get the setting from the database in synchronous mode.
# If call from async context, it will attempt to get the setting from the database in a thread.
def _get_setting_from_db(registry, key):
def get_settings_from_db_sync(registry, key):
field = registry.get_setting_field(key)
if not field.read_only or key == 'INSTALL_UUID':
return Setting.objects.filter(key=key, user__isnull=True).order_by('pk').first()
try:
return get_settings_from_db_sync(registry, key)
except SynchronousOnlyOperation:
with ThreadPoolExecutor(max_workers=1) as executor:
future = executor.submit(get_settings_from_db_sync, registry, key)
return future.result()
def get_cache_value(value):
"""Returns the proper special cache setting for a value
based on instance type.
@@ -358,7 +346,7 @@ class SettingsWrapper(UserSettingsHolder):
setting_id = None
# this value is read-only, however we *do* want to fetch its value from the database
if not field.read_only or name == 'INSTALL_UUID':
setting = _get_setting_from_db(self.registry, name)
setting = Setting.objects.filter(key=name, user__isnull=True).order_by('pk').first()
if setting:
if getattr(field, 'encrypted', False):
value = decrypt_field(setting, 'value')

View File

@@ -94,7 +94,9 @@ def test_setting_singleton_retrieve_readonly(api_request, dummy_setting):
@pytest.mark.django_db
def test_setting_singleton_update(api_request, dummy_setting):
with dummy_setting('FOO_BAR', field_class=fields.IntegerField, category='FooBar', category_slug='foobar'), mock.patch('awx.conf.views.clear_setting_cache'):
with dummy_setting('FOO_BAR', field_class=fields.IntegerField, category='FooBar', category_slug='foobar'), mock.patch(
'awx.conf.views.handle_setting_changes'
):
api_request('patch', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}), data={'FOO_BAR': 3})
response = api_request('get', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}))
assert response.data['FOO_BAR'] == 3
@@ -110,7 +112,7 @@ def test_setting_singleton_update_hybriddictfield_with_forbidden(api_request, du
# sure that the _Forbidden validator doesn't get used for the
# fields. See also https://github.com/ansible/awx/issues/4099.
with dummy_setting('FOO_BAR', field_class=sso_fields.SAMLOrgAttrField, category='FooBar', category_slug='foobar'), mock.patch(
'awx.conf.views.clear_setting_cache'
'awx.conf.views.handle_setting_changes'
):
api_request(
'patch',
@@ -124,7 +126,7 @@ def test_setting_singleton_update_hybriddictfield_with_forbidden(api_request, du
@pytest.mark.django_db
def test_setting_singleton_update_dont_change_readonly_fields(api_request, dummy_setting):
with dummy_setting('FOO_BAR', field_class=fields.IntegerField, read_only=True, default=4, category='FooBar', category_slug='foobar'), mock.patch(
'awx.conf.views.clear_setting_cache'
'awx.conf.views.handle_setting_changes'
):
api_request('patch', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}), data={'FOO_BAR': 5})
response = api_request('get', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}))
@@ -134,7 +136,7 @@ def test_setting_singleton_update_dont_change_readonly_fields(api_request, dummy
@pytest.mark.django_db
def test_setting_singleton_update_dont_change_encrypted_mark(api_request, dummy_setting):
with dummy_setting('FOO_BAR', field_class=fields.CharField, encrypted=True, category='FooBar', category_slug='foobar'), mock.patch(
'awx.conf.views.clear_setting_cache'
'awx.conf.views.handle_setting_changes'
):
api_request('patch', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}), data={'FOO_BAR': 'password'})
assert Setting.objects.get(key='FOO_BAR').value.startswith('$encrypted$')
@@ -153,14 +155,16 @@ def test_setting_singleton_update_runs_custom_validate(api_request, dummy_settin
with dummy_setting('FOO_BAR', field_class=fields.IntegerField, category='FooBar', category_slug='foobar'), dummy_validate(
'foobar', func_raising_exception
), mock.patch('awx.conf.views.clear_setting_cache'):
), mock.patch('awx.conf.views.handle_setting_changes'):
response = api_request('patch', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}), data={'FOO_BAR': 23})
assert response.status_code == 400
@pytest.mark.django_db
def test_setting_singleton_delete(api_request, dummy_setting):
with dummy_setting('FOO_BAR', field_class=fields.IntegerField, category='FooBar', category_slug='foobar'), mock.patch('awx.conf.views.clear_setting_cache'):
with dummy_setting('FOO_BAR', field_class=fields.IntegerField, category='FooBar', category_slug='foobar'), mock.patch(
'awx.conf.views.handle_setting_changes'
):
api_request('delete', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}))
response = api_request('get', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}))
assert not response.data['FOO_BAR']
@@ -169,7 +173,7 @@ def test_setting_singleton_delete(api_request, dummy_setting):
@pytest.mark.django_db
def test_setting_singleton_delete_no_read_only_fields(api_request, dummy_setting):
with dummy_setting('FOO_BAR', field_class=fields.IntegerField, read_only=True, default=23, category='FooBar', category_slug='foobar'), mock.patch(
'awx.conf.views.clear_setting_cache'
'awx.conf.views.handle_setting_changes'
):
api_request('delete', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}))
response = api_request('get', reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}))

View File

@@ -1,25 +0,0 @@
import pytest
from awx.conf.migrations._ldap_group_type import fill_ldap_group_type_params
from awx.conf.models import Setting
from django.apps import apps
@pytest.mark.django_db
def test_fill_group_type_params_no_op():
fill_ldap_group_type_params(apps, 'dont-use-me')
assert Setting.objects.count() == 0
@pytest.mark.django_db
def test_keep_old_setting_with_default_value():
Setting.objects.create(key='AUTH_LDAP_GROUP_TYPE', value={'name_attr': 'cn', 'member_attr': 'member'})
fill_ldap_group_type_params(apps, 'dont-use-me')
assert Setting.objects.count() == 1
s = Setting.objects.first()
assert s.value == {'name_attr': 'cn', 'member_attr': 'member'}
# NOTE: would be good to test the removal of attributes by migration
# but this requires fighting with the validator and is not done here

View File

@@ -5,6 +5,7 @@ from awx.conf.fields import StringListBooleanField, StringListPathField, ListTup
class TestStringListBooleanField:
FIELD_VALUES = [
("hello", "hello"),
(("a", "b"), ["a", "b"]),
@@ -52,6 +53,7 @@ class TestStringListBooleanField:
class TestListTuplesField:
FIELD_VALUES = [([('a', 'b'), ('abc', '123')], [("a", "b"), ("abc", "123")])]
FIELD_VALUES_INVALID = [("abc", type("abc")), ([('a', 'b', 'c'), ('abc', '123', '456')], type(('a',))), (['a', 'b'], type('a')), (123, type(123))]
@@ -71,6 +73,7 @@ class TestListTuplesField:
class TestStringListPathField:
FIELD_VALUES = [
((".", "..", "/"), [".", "..", "/"]),
(("/home",), ["/home"]),

View File

@@ -26,17 +26,17 @@ from awx.api.generics import APIView, GenericAPIView, ListAPIView, RetrieveUpdat
from awx.api.permissions import IsSystemAdminOrAuditor
from awx.api.versioning import reverse
from awx.main.utils import camelcase_to_underscore
from awx.main.tasks.system import clear_setting_cache
from awx.main.tasks.system import handle_setting_changes
from awx.conf.models import Setting
from awx.conf.serializers import SettingCategorySerializer, SettingSingletonSerializer
from awx.conf import settings_registry
from awx.main.utils.external_logging import reconfigure_rsyslog
SettingCategory = collections.namedtuple('SettingCategory', ('url', 'slug', 'name'))
class SettingCategoryList(ListAPIView):
model = Setting # Not exactly, but needed for the view.
serializer_class = SettingCategorySerializer
filter_backends = []
@@ -58,6 +58,7 @@ class SettingCategoryList(ListAPIView):
class SettingSingletonDetail(RetrieveUpdateDestroyAPIView):
model = Setting # Not exactly, but needed for the view.
serializer_class = SettingSingletonSerializer
filter_backends = []
@@ -119,10 +120,7 @@ class SettingSingletonDetail(RetrieveUpdateDestroyAPIView):
setting.save(update_fields=['value'])
settings_change_list.append(key)
if settings_change_list:
connection.on_commit(lambda: clear_setting_cache.delay(settings_change_list))
if any([setting.startswith('LOG_AGGREGATOR') for setting in settings_change_list]):
# call notify to rsyslog. no data is need so payload is empty
reconfigure_rsyslog.delay()
connection.on_commit(lambda: handle_setting_changes.delay(settings_change_list))
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
@@ -137,10 +135,7 @@ class SettingSingletonDetail(RetrieveUpdateDestroyAPIView):
setting.delete()
settings_change_list.append(setting.key)
if settings_change_list:
connection.on_commit(lambda: clear_setting_cache.delay(settings_change_list))
if any([setting.startswith('LOG_AGGREGATOR') for setting in settings_change_list]):
# call notify to rsyslog. no data is need so payload is empty
reconfigure_rsyslog.delay()
connection.on_commit(lambda: handle_setting_changes.delay(settings_change_list))
# When TOWER_URL_BASE is deleted from the API, reset it to the hostname
# used to make the request as a default.
@@ -151,6 +146,7 @@ class SettingSingletonDetail(RetrieveUpdateDestroyAPIView):
class SettingLoggingTest(GenericAPIView):
name = _('Logging Connectivity Test')
model = Setting
serializer_class = SettingSingletonSerializer
@@ -187,7 +183,7 @@ class SettingLoggingTest(GenericAPIView):
if not port:
return Response({'error': 'Port required for ' + protocol}, status=status.HTTP_400_BAD_REQUEST)
else:
# if http/https by this point, domain is reachable
# if http/https by this point, domain is reacheable
return Response(status=status.HTTP_202_ACCEPTED)
if protocol == 'udp':

View File

@@ -1972,7 +1972,7 @@ msgid ""
"HTTP headers and meta keys to search to determine remote host name or IP. "
"Add additional items to this list, such as \"HTTP_X_FORWARDED_FOR\", if "
"behind a reverse proxy. See the \"Proxy Support\" section of the "
"Administrator guide for more details."
"Adminstrator guide for more details."
msgstr ""
#: awx/main/conf.py:85
@@ -2457,7 +2457,7 @@ msgid ""
msgstr ""
#: awx/main/conf.py:631
msgid "Maximum disk persistence for external log aggregation (in GB)"
msgid "Maximum disk persistance for external log aggregation (in GB)"
msgstr ""
#: awx/main/conf.py:633
@@ -2548,7 +2548,7 @@ msgid "Enable"
msgstr ""
#: awx/main/constants.py:27
msgid "Does"
msgid "Doas"
msgstr ""
#: awx/main/constants.py:28
@@ -4801,7 +4801,7 @@ msgstr ""
#: awx/main/models/workflow.py:251
msgid ""
"An identifier corresponding to the workflow job template node that this node "
"An identifier coresponding to the workflow job template node that this node "
"was created from."
msgstr ""
@@ -5521,7 +5521,7 @@ msgstr ""
#: awx/sso/conf.py:606
msgid ""
"Extra arguments for Google OAuth2 login. You can restrict it to only allow a "
"single domain to authenticate, even if the user is logged in with multiple "
"single domain to authenticate, even if the user is logged in with multple "
"Google accounts. Refer to the documentation for more detail."
msgstr ""
@@ -5905,7 +5905,7 @@ msgstr ""
#: awx/sso/conf.py:1290
msgid ""
"Create a key pair to use as a service provider (SP) and include the "
"Create a keypair to use as a service provider (SP) and include the "
"certificate content here."
msgstr ""
@@ -5915,7 +5915,7 @@ msgstr ""
#: awx/sso/conf.py:1302
msgid ""
"Create a key pair to use as a service provider (SP) and include the private "
"Create a keypair to use as a service provider (SP) and include the private "
"key content here."
msgstr ""

View File

@@ -1971,7 +1971,7 @@ msgid ""
"HTTP headers and meta keys to search to determine remote host name or IP. "
"Add additional items to this list, such as \"HTTP_X_FORWARDED_FOR\", if "
"behind a reverse proxy. See the \"Proxy Support\" section of the "
"Administrator guide for more details."
"Adminstrator guide for more details."
msgstr "Los encabezados HTTP y las llaves de activación para buscar y determinar el nombre de host remoto o IP. Añada elementos adicionales a esta lista, como \"HTTP_X_FORWARDED_FOR\", si está detrás de un proxy inverso. Consulte la sección \"Soporte de proxy\" de la guía del adminstrador para obtener más información."
#: awx/main/conf.py:85
@@ -4804,7 +4804,7 @@ msgstr "Indica que un trabajo no se creará cuando es sea True. La semántica de
#: awx/main/models/workflow.py:251
msgid ""
"An identifier corresponding to the workflow job template node that this node "
"An identifier coresponding to the workflow job template node that this node "
"was created from."
msgstr "Un identificador que corresponde al nodo de plantilla de tarea del flujo de trabajo a partir del cual se creó este nodo."
@@ -5526,7 +5526,7 @@ msgstr "Argumentos adicionales para Google OAuth2"
#: awx/sso/conf.py:606
msgid ""
"Extra arguments for Google OAuth2 login. You can restrict it to only allow a "
"single domain to authenticate, even if the user is logged in with multiple "
"single domain to authenticate, even if the user is logged in with multple "
"Google accounts. Refer to the documentation for more detail."
msgstr "Argumentos adicionales para el inicio de sesión en Google OAuth2. Puede limitarlo para permitir la autenticación de un solo dominio, incluso si el usuario ha iniciado sesión con varias cuentas de Google. Consulte la documentación para obtener información detallada."
@@ -5910,7 +5910,7 @@ msgstr "Certificado público del proveedor de servicio SAML"
#: awx/sso/conf.py:1290
msgid ""
"Create a key pair to use as a service provider (SP) and include the "
"Create a keypair to use as a service provider (SP) and include the "
"certificate content here."
msgstr "Crear un par de claves para usar como proveedor de servicio (SP) e incluir el contenido del certificado aquí."
@@ -5920,7 +5920,7 @@ msgstr "Clave privada del proveedor de servicio SAML"
#: awx/sso/conf.py:1302
msgid ""
"Create a key pair to use as a service provider (SP) and include the private "
"Create a keypair to use as a service provider (SP) and include the private "
"key content here."
msgstr "Crear un par de claves para usar como proveedor de servicio (SP) e incluir el contenido de la clave privada aquí."

View File

@@ -561,6 +561,7 @@ class NotificationAttachMixin(BaseAccess):
class InstanceAccess(BaseAccess):
model = Instance
prefetch_related = ('rampart_groups',)
@@ -578,6 +579,7 @@ class InstanceAccess(BaseAccess):
return super(InstanceAccess, self).can_unattach(obj, sub_obj, relationship, relationship, data=data)
def can_add(self, data):
return self.user.is_superuser
def can_change(self, obj, data):
@@ -588,39 +590,18 @@ class InstanceAccess(BaseAccess):
class InstanceGroupAccess(BaseAccess):
"""
I can see Instance Groups when I am:
- a superuser(system administrator)
- at least read_role on the instance group
I can edit Instance Groups when I am:
- a superuser
- admin role on the Instance group
I can add/delete Instance Groups:
- a superuser(system administrator)
I can use Instance Groups when I have:
- use_role on the instance group
"""
model = InstanceGroup
prefetch_related = ('instances',)
def filtered_queryset(self):
return self.model.accessible_objects(self.user, 'read_role')
@check_superuser
def can_use(self, obj):
return self.user in obj.use_role
return InstanceGroup.objects.filter(organization__in=Organization.accessible_pk_qs(self.user, 'admin_role')).distinct()
def can_add(self, data):
return self.user.is_superuser
@check_superuser
def can_change(self, obj, data):
return self.can_admin(obj)
@check_superuser
def can_admin(self, obj):
return self.user in obj.admin_role
return self.user.is_superuser
def can_delete(self, obj):
if obj.name in [settings.DEFAULT_EXECUTION_QUEUE_NAME, settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME]:
@@ -867,7 +848,7 @@ class OrganizationAccess(NotificationAttachMixin, BaseAccess):
return RoleAccess(self.user).can_attach(rel_role, sub_obj, 'members', *args, **kwargs)
if relationship == "instance_groups":
if self.user in obj.admin_role and self.user in sub_obj.use_role:
if self.user.is_superuser:
return True
return False
return super(OrganizationAccess, self).can_attach(obj, sub_obj, relationship, *args, **kwargs)
@@ -956,7 +937,7 @@ class InventoryAccess(BaseAccess):
def can_attach(self, obj, sub_obj, relationship, *args, **kwargs):
if relationship == "instance_groups":
if self.user in sub_obj.use_role and self.user in obj.admin_role:
if self.user.can_access(type(sub_obj), "read", sub_obj) and self.user in obj.organization.admin_role:
return True
return False
return super(InventoryAccess, self).can_attach(obj, sub_obj, relationship, *args, **kwargs)
@@ -1049,9 +1030,7 @@ class GroupAccess(BaseAccess):
return Group.objects.filter(inventory__in=Inventory.accessible_pk_qs(self.user, 'read_role'))
def can_add(self, data):
if not data: # So the browseable API will work
return Inventory.accessible_objects(self.user, 'admin_role').exists()
if 'inventory' not in data:
if not data or 'inventory' not in data:
return False
# Checks for admin or change permission on inventory.
return self.check_related('inventory', Inventory, data)
@@ -1693,12 +1672,11 @@ class JobTemplateAccess(NotificationAttachMixin, UnifiedCredentialsMixin, BaseAc
return self.user.is_superuser or self.user in obj.admin_role
@check_superuser
# object here is the job template. sub_object here is what is being attached
def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
if relationship == "instance_groups":
if not obj.organization:
return False
return self.user in sub_obj.use_role and self.user in obj.admin_role
return self.user.can_access(type(sub_obj), "read", sub_obj) and self.user in obj.organization.admin_role
return super(JobTemplateAccess, self).can_attach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check)
@check_superuser
@@ -1875,6 +1853,8 @@ class JobLaunchConfigAccess(UnifiedCredentialsMixin, BaseAccess):
def _related_filtered_queryset(self, cls):
if cls is Label:
return LabelAccess(self.user).filtered_queryset()
elif cls is InstanceGroup:
return InstanceGroupAccess(self.user).filtered_queryset()
else:
return cls._accessible_pk_qs(cls, self.user, 'use_role')
@@ -1886,7 +1866,6 @@ class JobLaunchConfigAccess(UnifiedCredentialsMixin, BaseAccess):
@check_superuser
def can_add(self, data, template=None):
# WARNING: duplicated with BulkJobLaunchSerializer, check when changing permission levels
# This is a special case, we don't check related many-to-many elsewhere
# launch RBAC checks use this
if 'reference_obj' in data:
@@ -2019,16 +1998,7 @@ class WorkflowJobNodeAccess(BaseAccess):
)
def filtered_queryset(self):
return self.model.objects.filter(
Q(workflow_job__unified_job_template__in=UnifiedJobTemplate.accessible_pk_qs(self.user, 'read_role'))
| Q(workflow_job__organization__in=Organization.objects.filter(Q(admin_role__members=self.user)))
)
def can_read(self, obj):
"""Overriding this opens up detail view access for bulk jobs, where the workflow job has no associated workflow job template."""
if obj.workflow_job.is_bulk_job and obj.workflow_job.created_by_id == self.user.id:
return True
return super().can_read(obj)
return self.model.objects.filter(workflow_job__unified_job_template__in=UnifiedJobTemplate.accessible_pk_qs(self.user, 'read_role'))
@check_superuser
def can_add(self, data):
@@ -2154,16 +2124,7 @@ class WorkflowJobAccess(BaseAccess):
)
def filtered_queryset(self):
return WorkflowJob.objects.filter(
Q(unified_job_template__in=UnifiedJobTemplate.accessible_pk_qs(self.user, 'read_role'))
| Q(organization__in=Organization.objects.filter(Q(admin_role__members=self.user)), is_bulk_job=True)
)
def can_read(self, obj):
"""Overriding this opens up detail view access for bulk jobs, where the workflow job has no associated workflow job template."""
if obj.is_bulk_job and obj.created_by_id == self.user.id:
return True
return super().can_read(obj)
return WorkflowJob.objects.filter(unified_job_template__in=UnifiedJobTemplate.accessible_pk_qs(self.user, 'read_role'))
def can_add(self, data):
# Old add-start system for launching jobs is being depreciated, and
@@ -2391,6 +2352,7 @@ class JobEventAccess(BaseAccess):
class UnpartitionedJobEventAccess(JobEventAccess):
model = UnpartitionedJobEvent

View File

@@ -4,11 +4,11 @@ import logging
# AWX
from awx.main.analytics.subsystem_metrics import Metrics
from awx.main.dispatch.publish import task
from awx.main.dispatch import get_task_queuename
from awx.main.dispatch import get_local_queuename
logger = logging.getLogger('awx.main.scheduler')
@task(queue=get_task_queuename)
@task(queue=get_local_queuename)
def send_subsystem_metrics():
Metrics().send_metrics()

View File

@@ -65,7 +65,7 @@ class FixedSlidingWindow:
return sum(self.buckets.values()) or 0
class RelayWebsocketStatsManager:
class BroadcastWebsocketStatsManager:
def __init__(self, event_loop, local_hostname):
self._local_hostname = local_hostname
@@ -74,7 +74,7 @@ class RelayWebsocketStatsManager:
self._redis_key = BROADCAST_WEBSOCKET_REDIS_KEY_NAME
def new_remote_host_stats(self, remote_hostname):
self._stats[remote_hostname] = RelayWebsocketStats(self._local_hostname, remote_hostname)
self._stats[remote_hostname] = BroadcastWebsocketStats(self._local_hostname, remote_hostname)
return self._stats[remote_hostname]
def delete_remote_host_stats(self, remote_hostname):
@@ -107,7 +107,7 @@ class RelayWebsocketStatsManager:
return parser.text_string_to_metric_families(stats_str.decode('UTF-8'))
class RelayWebsocketStats:
class BroadcastWebsocketStats:
def __init__(self, local_hostname, remote_hostname):
self._local_hostname = local_hostname
self._remote_hostname = remote_hostname

View File

@@ -83,7 +83,7 @@ def _identify_lower(key, since, until, last_gather):
return lower, last_entries
@register('config', '1.5', description=_('General platform configuration.'))
@register('config', '1.4', description=_('General platform configuration.'))
def config(since, **kwargs):
license_info = get_license()
install_type = 'traditional'
@@ -119,7 +119,6 @@ def config(since, **kwargs):
'compliant': license_info.get('compliant'),
'date_warning': license_info.get('date_warning'),
'date_expired': license_info.get('date_expired'),
'subscription_usage_model': getattr(settings, 'SUBSCRIPTION_USAGE_MODEL', ''), # 1.5+
'free_instances': license_info.get('free_instances', 0),
'total_licensed_instances': license_info.get('instance_count', 0),
'license_expiry': license_info.get('time_remaining', 0),
@@ -234,13 +233,11 @@ def projects_by_scm_type(since, **kwargs):
return counts
@register('instance_info', '1.3', description=_('Cluster topology and capacity'))
@register('instance_info', '1.2', description=_('Cluster topology and capacity'))
def instance_info(since, include_hostnames=False, **kwargs):
info = {}
# Use same method that the TaskManager does to compute consumed capacity without querying all running jobs for each Instance
tm_models = TaskManagerModels.init_with_consumed_capacity(
instance_fields=['uuid', 'version', 'capacity', 'cpu', 'memory', 'managed_by_policy', 'enabled', 'node_type']
)
tm_models = TaskManagerModels.init_with_consumed_capacity(instance_fields=['uuid', 'version', 'capacity', 'cpu', 'memory', 'managed_by_policy', 'enabled'])
for tm_instance in tm_models.instances.instances_by_hostname.values():
instance = tm_instance.obj
instance_info = {

View File

@@ -9,7 +9,7 @@ from django.apps import apps
from awx.main.consumers import emit_channel_notification
from awx.main.utils import is_testing
root_key = settings.SUBSYSTEM_METRICS_REDIS_KEY_PREFIX
root_key = 'awx_metrics'
logger = logging.getLogger('awx.main.analytics')
@@ -264,6 +264,13 @@ class Metrics:
data[field] = self.METRICS[field].decode(self.conn)
return data
def store_metrics(self, data_json):
# called when receiving metrics from other instances
data = json.loads(data_json)
if self.instance_name != data['instance']:
logger.debug(f"{self.instance_name} received subsystem metrics from {data['instance']}")
self.conn.set(root_key + "_instance_" + data['instance'], data['metrics'])
def should_pipe_execute(self):
if self.metrics_have_changed is False:
return False
@@ -302,9 +309,9 @@ class Metrics:
'instance': self.instance_name,
'metrics': self.serialize_local_metrics(),
}
# store a local copy as well
self.store_metrics(json.dumps(payload))
emit_channel_notification("metrics", payload)
self.previous_send_metrics.set(current_time)
self.previous_send_metrics.store_value(self.conn)
finally:

View File

@@ -3,5 +3,6 @@ from django.utils.translation import gettext_lazy as _
class MainConfig(AppConfig):
name = 'awx.main'
verbose_name = _('Main')

View File

@@ -10,7 +10,7 @@ from rest_framework import serializers
# AWX
from awx.conf import fields, register, register_validate
from awx.main.models import ExecutionEnvironment
from awx.main.constants import SUBSCRIPTION_USAGE_MODEL_UNIQUE_HOSTS
logger = logging.getLogger('awx.main.conf')
@@ -282,16 +282,6 @@ register(
placeholder={'HTTP_PROXY': 'myproxy.local:8080'},
)
register(
'AWX_RUNNER_KEEPALIVE_SECONDS',
field_class=fields.IntegerField,
label=_('K8S Ansible Runner Keep-Alive Message Interval'),
help_text=_('Only applies to jobs running in a Container Group. If not 0, send a message every so-many seconds to keep connection open.'),
category=_('Jobs'),
category_slug='jobs',
placeholder=240, # intended to be under common 5 minute idle timeout
)
register(
'GALAXY_TASK_ENV',
field_class=fields.KeyValueField,
@@ -775,53 +765,6 @@ register(
help_text=_('Indicates whether the instance is part of a kubernetes-based deployment.'),
)
register(
'BULK_JOB_MAX_LAUNCH',
field_class=fields.IntegerField,
default=100,
label=_('Max jobs to allow bulk jobs to launch'),
help_text=_('Max jobs to allow bulk jobs to launch'),
category=_('Bulk Actions'),
category_slug='bulk',
)
register(
'BULK_HOST_MAX_CREATE',
field_class=fields.IntegerField,
default=100,
label=_('Max number of hosts to allow to be created in a single bulk action'),
help_text=_('Max number of hosts to allow to be created in a single bulk action'),
category=_('Bulk Actions'),
category_slug='bulk',
)
register(
'UI_NEXT',
field_class=fields.BooleanField,
default=False,
label=_('Enable Preview of New User Interface'),
help_text=_('Enable preview of new user interface.'),
category=_('System'),
category_slug='system',
)
register(
'SUBSCRIPTION_USAGE_MODEL',
field_class=fields.ChoiceField,
choices=[
('', _('Default model for AWX - no subscription. Deletion of host_metrics will not be considered for purposes of managed host counting')),
(
SUBSCRIPTION_USAGE_MODEL_UNIQUE_HOSTS,
_('Usage based on unique managed nodes in a large historical time frame and delete functionality for no longer used managed nodes'),
),
],
default='',
allow_blank=True,
label=_('Defines subscription usage model and shows Host Metrics'),
category=_('System'),
category_slug='system',
)
def logging_validate(serializer, attrs):
if not serializer.instance or not hasattr(serializer.instance, 'LOG_AGGREGATOR_HOST') or not hasattr(serializer.instance, 'LOG_AGGREGATOR_TYPE'):

View File

@@ -38,8 +38,6 @@ STANDARD_INVENTORY_UPDATE_ENV = {
'ANSIBLE_INVENTORY_EXPORT': 'True',
# Redirecting output to stderr allows JSON parsing to still work with -vvv
'ANSIBLE_VERBOSE_TO_STDERR': 'True',
# if ansible-inventory --limit is used for an inventory import, unmatched should be a failure
'ANSIBLE_HOST_PATTERN_MISMATCH': 'error',
}
CAN_CANCEL = ('new', 'pending', 'waiting', 'running')
ACTIVE_STATES = CAN_CANCEL
@@ -65,7 +63,7 @@ ENV_BLOCKLIST = frozenset(
'INVENTORY_HOSTVARS',
'AWX_HOST',
'PROJECT_REVISION',
'SUPERVISOR_CONFIG_PATH',
'SUPERVISOR_WEB_CONFIG_PATH',
)
)
@@ -108,9 +106,3 @@ JOB_VARIABLE_PREFIXES = [
ANSIBLE_RUNNER_NEEDS_UPDATE_MESSAGE = (
'\u001b[31m \u001b[1m This can be caused if the version of ansible-runner in your execution environment is out of date.\u001b[0m'
)
# Values for setting SUBSCRIPTION_USAGE_MODEL
SUBSCRIPTION_USAGE_MODEL_UNIQUE_HOSTS = 'unique_managed_hosts'
# Shared prefetch to use for creating a queryset for the purpose of writing or saving facts
HOST_FACTS_FIELDS = ('name', 'ansible_facts', 'ansible_facts_modified', 'modified', 'inventory_id')

View File

@@ -3,7 +3,6 @@ import logging
import time
import hmac
import asyncio
import redis
from django.core.serializers.json import DjangoJSONEncoder
from django.conf import settings
@@ -81,7 +80,7 @@ class WebsocketSecretAuthHelper:
WebsocketSecretAuthHelper.verify_secret(secret)
class RelayConsumer(AsyncJsonWebsocketConsumer):
class BroadcastConsumer(AsyncJsonWebsocketConsumer):
async def connect(self):
try:
WebsocketSecretAuthHelper.is_authorized(self.scope)
@@ -101,21 +100,6 @@ class RelayConsumer(AsyncJsonWebsocketConsumer):
async def internal_message(self, event):
await self.send(event['text'])
async def receive_json(self, data):
(group, message) = unwrap_broadcast_msg(data)
if group == "metrics":
message = json.loads(message['text'])
conn = redis.Redis.from_url(settings.BROKER_URL)
conn.set(settings.SUBSYSTEM_METRICS_REDIS_KEY_PREFIX + "_instance_" + message['instance'], message['metrics'])
else:
await self.channel_layer.group_send(group, message)
async def consumer_subscribe(self, event):
await self.send_json(event)
async def consumer_unsubscribe(self, event):
await self.send_json(event)
class EventConsumer(AsyncJsonWebsocketConsumer):
async def connect(self):
@@ -144,11 +128,6 @@ class EventConsumer(AsyncJsonWebsocketConsumer):
self.channel_name,
)
await self.channel_layer.group_send(
settings.BROADCAST_WEBSOCKET_GROUP_NAME,
{"type": "consumer.unsubscribe", "groups": list(current_groups), "origin_channel": self.channel_name},
)
@database_sync_to_async
def user_can_see_object_id(self, user_access, oid):
# At this point user is a channels.auth.UserLazyObject object
@@ -197,20 +176,9 @@ class EventConsumer(AsyncJsonWebsocketConsumer):
self.channel_name,
)
if len(old_groups):
await self.channel_layer.group_send(
settings.BROADCAST_WEBSOCKET_GROUP_NAME,
{"type": "consumer.unsubscribe", "groups": list(old_groups), "origin_channel": self.channel_name},
)
new_groups_exclusive = new_groups - current_groups
for group_name in new_groups_exclusive:
await self.channel_layer.group_add(group_name, self.channel_name)
await self.channel_layer.group_send(
settings.BROADCAST_WEBSOCKET_GROUP_NAME,
{"type": "consumer.subscribe", "groups": list(new_groups), "origin_channel": self.channel_name},
)
self.scope['session']['groups'] = new_groups
await self.send_json({"groups_current": list(new_groups), "groups_left": list(old_groups), "groups_joined": list(new_groups_exclusive)})
@@ -232,11 +200,9 @@ def _dump_payload(payload):
return None
def unwrap_broadcast_msg(payload: dict):
return (payload['group'], payload['message'])
def emit_channel_notification(group, payload):
from awx.main.wsbroadcast import wrap_broadcast_msg # noqa
payload_dumped = _dump_payload(payload)
if payload_dumped is None:
return
@@ -246,6 +212,16 @@ def emit_channel_notification(group, payload):
run_sync(
channel_layer.group_send(
group,
{"type": "internal.message", "text": payload_dumped, "needs_relay": True},
{"type": "internal.message", "text": payload_dumped},
)
)
run_sync(
channel_layer.group_send(
settings.BROADCAST_WEBSOCKET_GROUP_NAME,
{
"type": "internal.message",
"text": wrap_broadcast_msg(group, payload_dumped),
},
)
)

View File

@@ -70,7 +70,7 @@ def aim_backend(**kwargs):
client_cert = kwargs.get('client_cert', None)
client_key = kwargs.get('client_key', None)
verify = kwargs['verify']
webservice_id = kwargs.get('webservice_id', '')
webservice_id = kwargs['webservice_id']
app_id = kwargs['app_id']
object_query = kwargs['object_query']
object_query_format = kwargs['object_query_format']

View File

@@ -68,11 +68,7 @@ def conjur_backend(**kwargs):
with CertFiles(cacert) as cert:
# https://www.conjur.org/api.html#authentication-authenticate-post
auth_kwargs['verify'] = cert
try:
resp = requests.post(urljoin(url, '/'.join(['authn', account, username, 'authenticate'])), **auth_kwargs)
resp.raise_for_status()
except requests.exceptions.HTTPError:
resp = requests.post(urljoin(url, '/'.join(['api', 'authn', account, username, 'authenticate'])), **auth_kwargs)
resp = requests.post(urljoin(url, '/'.join(['api', 'authn', account, username, 'authenticate'])), **auth_kwargs)
raise_for_status(resp)
token = resp.content.decode('utf-8')
@@ -82,20 +78,14 @@ def conjur_backend(**kwargs):
}
# https://www.conjur.org/api.html#secrets-retrieve-a-secret-get
path = urljoin(url, '/'.join(['secrets', account, 'variable', secret_path]))
path_conjurcloud = urljoin(url, '/'.join(['api', 'secrets', account, 'variable', secret_path]))
path = urljoin(url, '/'.join(['api', 'secrets', account, 'variable', secret_path]))
if version:
ver = "version={}".format(version)
path = '?'.join([path, ver])
path_conjurcloud = '?'.join([path_conjurcloud, ver])
with CertFiles(cacert) as cert:
lookup_kwargs['verify'] = cert
try:
resp = requests.get(path, timeout=30, **lookup_kwargs)
resp.raise_for_status()
except requests.exceptions.HTTPError:
resp = requests.get(path_conjurcloud, timeout=30, **lookup_kwargs)
resp = requests.get(path, timeout=30, **lookup_kwargs)
raise_for_status(resp)
return resp.text

View File

@@ -1,7 +1,6 @@
import copy
import os
import pathlib
import time
from urllib.parse import urljoin
from .plugin import CredentialPlugin, CertFiles, raise_for_status
@@ -248,15 +247,7 @@ def kv_backend(**kwargs):
request_url = urljoin(url, '/'.join(['v1'] + path_segments)).rstrip('/')
with CertFiles(cacert) as cert:
request_kwargs['verify'] = cert
request_retries = 0
while request_retries < 5:
response = sess.get(request_url, **request_kwargs)
# https://developer.hashicorp.com/vault/docs/enterprise/consistency
if response.status_code == 412:
request_retries += 1
time.sleep(1)
else:
break
response = sess.get(request_url, **request_kwargs)
raise_for_status(response)
json = response.json()
@@ -298,15 +289,8 @@ def ssh_backend(**kwargs):
with CertFiles(cacert) as cert:
request_kwargs['verify'] = cert
request_retries = 0
while request_retries < 5:
resp = sess.post(request_url, **request_kwargs)
# https://developer.hashicorp.com/vault/docs/enterprise/consistency
if resp.status_code == 412:
request_retries += 1
time.sleep(1)
else:
break
resp = sess.post(request_url, **request_kwargs)
raise_for_status(resp)
return resp.json()['data']['signed_key']

View File

@@ -49,10 +49,7 @@ def tss_backend(**kwargs):
secret_dict = secret_server.get_secret(kwargs['secret_id'])
secret = ServerSecret(**secret_dict)
if isinstance(secret.fields[kwargs['secret_field']].value, str) == False:
return secret.fields[kwargs['secret_field']].value.text
else:
return secret.fields[kwargs['secret_field']].value
return secret.fields[kwargs['secret_field']].value
tss_plugin = CredentialPlugin(

View File

@@ -63,7 +63,7 @@ class RecordedQueryLog(object):
if not os.path.isdir(self.dest):
os.makedirs(self.dest)
progname = ' '.join(sys.argv)
for match in ('uwsgi', 'dispatcher', 'callback_receiver', 'wsrelay'):
for match in ('uwsgi', 'dispatcher', 'callback_receiver', 'wsbroadcast'):
if match in progname:
progname = match
break

View File

@@ -1,4 +1,3 @@
import os
import psycopg2
import select
@@ -7,6 +6,7 @@ from contextlib import contextmanager
from django.conf import settings
from django.db import connection as pg_connection
NOT_READY = ([], [], [])
@@ -14,29 +14,6 @@ def get_local_queuename():
return settings.CLUSTER_HOST_ID
def get_task_queuename():
if os.getenv('AWX_COMPONENT') != 'web':
return settings.CLUSTER_HOST_ID
from awx.main.models.ha import Instance
random_task_instance = (
Instance.objects.filter(
node_type__in=(Instance.Types.CONTROL, Instance.Types.HYBRID),
node_state=Instance.States.READY,
enabled=True,
)
.only('hostname')
.order_by('?')
.first()
)
if random_task_instance is None:
raise ValueError('No task instances are READY and Enabled.')
return random_task_instance.hostname
class PubSub(object):
def __init__(self, conn):
self.conn = conn

View File

@@ -6,7 +6,7 @@ from django.conf import settings
from django.db import connection
import redis
from awx.main.dispatch import get_task_queuename
from awx.main.dispatch import get_local_queuename
from . import pg_bus_conn
@@ -14,6 +14,7 @@ logger = logging.getLogger('awx.main.dispatch')
class Control(object):
services = ('dispatcher', 'callback_receiver')
result = None
@@ -21,7 +22,7 @@ class Control(object):
if service not in self.services:
raise RuntimeError('{} must be in {}'.format(service, self.services))
self.service = service
self.queuename = host or get_task_queuename()
self.queuename = host or get_local_queuename()
def status(self, *args, **kwargs):
r = redis.Redis.from_url(settings.BROKER_URL)

View File

@@ -192,6 +192,7 @@ class PoolWorker(object):
class StatefulPoolWorker(PoolWorker):
track_managed_tasks = True

View File

@@ -66,6 +66,7 @@ class task:
bind_kwargs = self.bind_kwargs
class PublisherMixin(object):
queue = None
@classmethod

View File

@@ -70,7 +70,7 @@ def reap_waiting(instance=None, status='failed', job_explanation=None, grace_per
reap_job(j, status, job_explanation=job_explanation)
def reap(instance=None, status='failed', job_explanation=None, excluded_uuids=None, ref_time=None):
def reap(instance=None, status='failed', job_explanation=None, excluded_uuids=None):
"""
Reap all jobs in running for this instance.
"""
@@ -79,11 +79,9 @@ def reap(instance=None, status='failed', job_explanation=None, excluded_uuids=No
else:
hostname = instance.hostname
workflow_ctype_id = ContentType.objects.get_for_model(WorkflowJob).id
base_Q = Q(status='running') & (Q(execution_node=hostname) | Q(controller_node=hostname)) & ~Q(polymorphic_ctype_id=workflow_ctype_id)
if ref_time:
jobs = UnifiedJob.objects.filter(base_Q & Q(started__lte=ref_time))
else:
jobs = UnifiedJob.objects.filter(base_Q)
jobs = UnifiedJob.objects.filter(
Q(status='running') & (Q(execution_node=hostname) | Q(controller_node=hostname)) & ~Q(polymorphic_ctype_id=workflow_ctype_id)
)
if excluded_uuids:
jobs = jobs.exclude(celery_task_id__in=excluded_uuids)
for j in jobs:

View File

@@ -40,6 +40,7 @@ class WorkerSignalHandler:
class AWXConsumerBase(object):
last_stats = time.time()
def __init__(self, name, worker, queues=[], pool=None):

View File

@@ -3,12 +3,14 @@ import logging
import os
import signal
import time
import traceback
import datetime
from django.conf import settings
from django.utils.functional import cached_property
from django.utils.timezone import now as tz_now
from django.db import transaction, connection as django_connection
from django.db import DatabaseError, OperationalError, transaction, connection as django_connection
from django.db.utils import InterfaceError, InternalError
from django_guid import set_guid
import psutil
@@ -62,7 +64,6 @@ class CallbackBrokerWorker(BaseWorker):
"""
MAX_RETRIES = 2
INDIVIDUAL_EVENT_RETRIES = 3
last_stats = time.time()
last_flush = time.time()
total = 0
@@ -154,8 +155,6 @@ class CallbackBrokerWorker(BaseWorker):
metrics_events_missing_created = 0
metrics_total_job_event_processing_seconds = datetime.timedelta(seconds=0)
for cls, events in self.buff.items():
if not events:
continue
logger.debug(f'{cls.__name__}.objects.bulk_create({len(events)})')
for e in events:
e.modified = now # this can be set before created because now is set above on line 149
@@ -165,48 +164,38 @@ class CallbackBrokerWorker(BaseWorker):
else: # only calculate the seconds if the created time already has been set
metrics_total_job_event_processing_seconds += e.modified - e.created
metrics_duration_to_save = time.perf_counter()
saved_events = []
try:
cls.objects.bulk_create(events)
metrics_bulk_events_saved += len(events)
saved_events = events
self.buff[cls] = []
except Exception as exc:
# If the database is flaking, let ensure_connection throw a general exception
# will be caught by the outer loop, which goes into a proper sleep and retry loop
django_connection.ensure_connection()
logger.warning(f'Error in events bulk_create, will try indiviually, error: {str(exc)}')
logger.warning(f'Error in events bulk_create, will try indiviually up to 5 errors, error {str(exc)}')
# if an exception occurs, we should re-attempt to save the
# events one-by-one, because something in the list is
# broken/stale
consecutive_errors = 0
events_saved = 0
metrics_events_batch_save_errors += 1
for e in events.copy():
for e in events:
try:
e.save()
metrics_singular_events_saved += 1
events.remove(e)
saved_events.append(e) # Importantly, remove successfully saved events from the buffer
events_saved += 1
consecutive_errors = 0
except Exception as exc_indv:
retry_count = getattr(e, '_retry_count', 0) + 1
e._retry_count = retry_count
# special sanitization logic for postgres treatment of NUL 0x00 char
if (retry_count == 1) and isinstance(exc_indv, ValueError) and ("\x00" in e.stdout):
e.stdout = e.stdout.replace("\x00", "")
if retry_count >= self.INDIVIDUAL_EVENT_RETRIES:
logger.error(f'Hit max retries ({retry_count}) saving individual Event error: {str(exc_indv)}\ndata:\n{e.__dict__}')
events.remove(e)
else:
logger.info(f'Database Error Saving individual Event uuid={e.uuid} try={retry_count}, error: {str(exc_indv)}')
consecutive_errors += 1
logger.info(f'Database Error Saving individual Job Event, error {str(exc_indv)}')
if consecutive_errors >= 5:
raise
metrics_singular_events_saved += events_saved
if events_saved == 0:
raise
metrics_duration_to_save = time.perf_counter() - metrics_duration_to_save
for e in saved_events:
for e in events:
if not getattr(e, '_skip_websocket_message', False):
metrics_events_broadcast += 1
emit_event_detail(e)
if getattr(e, '_notification_trigger_event', False):
job_stats_wrapup(getattr(e, e.JOB_REFERENCE), event=e)
self.buff = {}
self.last_flush = time.time()
# only update metrics if we saved events
if (metrics_bulk_events_saved + metrics_singular_events_saved) > 0:
@@ -278,16 +267,20 @@ class CallbackBrokerWorker(BaseWorker):
try:
self.flush(force=flush)
break
except Exception as exc:
# Aside form bugs, exceptions here are assumed to be due to database flake
except (OperationalError, InterfaceError, InternalError) as exc:
if retries >= self.MAX_RETRIES:
logger.exception('Worker could not re-establish database connectivity, giving up on one or more events.')
self.buff = {}
return
delay = 60 * retries
logger.warning(f'Database Error Flushing Job Events, retry #{retries + 1} in {delay} seconds: {str(exc)}')
django_connection.close()
time.sleep(delay)
retries += 1
except Exception:
logger.exception(f'Callback Task Processor Raised Unexpected Exception processing event data:\n{body}')
except DatabaseError:
logger.exception('Database Error Flushing Job Events')
django_connection.close()
break
except Exception as exc:
tb = traceback.format_exc()
logger.error('Callback Task Processor Raised Exception: %r', exc)
logger.error('Detail: {}'.format(tb))

View File

@@ -26,8 +26,8 @@ class TaskWorker(BaseWorker):
`awx.main.dispatch.publish`.
"""
@staticmethod
def resolve_callable(task):
@classmethod
def resolve_callable(cls, task):
"""
Transform a dotted notation task into an imported, callable function, e.g.,
@@ -46,8 +46,7 @@ class TaskWorker(BaseWorker):
return _call
@staticmethod
def run_callable(body):
def run_callable(self, body):
"""
Given some AMQP message, import the correct Python code and run it.
"""

View File

@@ -232,6 +232,7 @@ class ImplicitRoleField(models.ForeignKey):
field_names = [field_names]
for field_name in field_names:
if field_name.startswith('singleton:'):
continue
@@ -243,6 +244,7 @@ class ImplicitRoleField(models.ForeignKey):
field = getattr(cls, field_name, None)
if field and type(field) is ReverseManyToOneDescriptor or type(field) is ManyToManyDescriptor:
if '.' in field_attr:
raise Exception('Referencing deep roles through ManyToMany fields is unsupported.')
@@ -627,6 +629,7 @@ class CredentialInputField(JSONSchemaField):
# `ssh_key_unlock` requirements are very specific and can't be
# represented without complicated JSON schema
if model_instance.credential_type.managed is True and 'ssh_key_unlock' in defined_fields:
# in order to properly test the necessity of `ssh_key_unlock`, we
# need to know the real value of `ssh_key_data`; for a payload like:
# {
@@ -788,8 +791,7 @@ class CredentialTypeInjectorField(JSONSchemaField):
'type': 'object',
'patternProperties': {
# http://docs.ansible.com/ansible/playbooks_variables.html#what-makes-a-valid-variable-name
# plus, add ability to template
r'^[a-zA-Z_\{\}]+[a-zA-Z0-9_\{\}]*$': {"anyOf": [{'type': 'string'}, {'type': 'array'}, {'$ref': '#/properties/extra_vars'}]}
'^[a-zA-Z_]+[a-zA-Z0-9_]*$': {'type': 'string'},
},
'additionalProperties': False,
},
@@ -856,44 +858,27 @@ class CredentialTypeInjectorField(JSONSchemaField):
template_name = template_name.split('.')[1]
setattr(valid_namespace['tower'].filename, template_name, 'EXAMPLE_FILENAME')
def validate_template_string(type_, key, tmpl):
try:
sandbox.ImmutableSandboxedEnvironment(undefined=StrictUndefined).from_string(tmpl).render(valid_namespace)
except UndefinedError as e:
raise django_exceptions.ValidationError(
_('{sub_key} uses an undefined field ({error_msg})').format(sub_key=key, error_msg=e),
code='invalid',
params={'value': value},
)
except SecurityError as e:
raise django_exceptions.ValidationError(_('Encountered unsafe code execution: {}').format(e))
except TemplateSyntaxError as e:
raise django_exceptions.ValidationError(
_('Syntax error rendering template for {sub_key} inside of {type} ({error_msg})').format(sub_key=key, type=type_, error_msg=e),
code='invalid',
params={'value': value},
)
def validate_extra_vars(key, node):
if isinstance(node, dict):
for k, v in node.items():
validate_template_string("extra_vars", 'a key' if key is None else key, k)
validate_extra_vars(k if key is None else "{key}.{k}".format(key=key, k=k), v)
elif isinstance(node, list):
for i, x in enumerate(node):
validate_extra_vars("{key}[{i}]".format(key=key, i=i), x)
else:
validate_template_string("extra_vars", key, node)
for type_, injector in value.items():
if type_ == 'env':
for key in injector.keys():
self.validate_env_var_allowed(key)
if type_ == 'extra_vars':
validate_extra_vars(None, injector)
else:
for key, tmpl in injector.items():
validate_template_string(type_, key, tmpl)
for key, tmpl in injector.items():
try:
sandbox.ImmutableSandboxedEnvironment(undefined=StrictUndefined).from_string(tmpl).render(valid_namespace)
except UndefinedError as e:
raise django_exceptions.ValidationError(
_('{sub_key} uses an undefined field ({error_msg})').format(sub_key=key, error_msg=e),
code='invalid',
params={'value': value},
)
except SecurityError as e:
raise django_exceptions.ValidationError(_('Encountered unsafe code execution: {}').format(e))
except TemplateSyntaxError as e:
raise django_exceptions.ValidationError(
_('Syntax error rendering template for {sub_key} inside of {type} ({error_msg})').format(sub_key=key, type=type_, error_msg=e),
code='invalid',
params={'value': value},
)
class AskForField(models.BooleanField):
@@ -954,16 +939,6 @@ class OrderedManyToManyDescriptor(ManyToManyDescriptor):
def get_queryset(self):
return super(OrderedManyRelatedManager, self).get_queryset().order_by('%s__position' % self.through._meta.model_name)
def add(self, *objects):
if len(objects) > 1:
raise RuntimeError('Ordered many-to-many fields do not support multiple objects')
return super().add(*objects)
def remove(self, *objects):
if len(objects) > 1:
raise RuntimeError('Ordered many-to-many fields do not support multiple objects')
return super().remove(*objects)
return OrderedManyRelatedManager
return add_custom_queryset_to_many_related_manager(
@@ -981,12 +956,13 @@ class OrderedManyToManyField(models.ManyToManyField):
by a special `position` column on the M2M table
"""
def _update_m2m_position(self, sender, instance, action, **kwargs):
if action in ('post_add', 'post_remove'):
descriptor = getattr(instance, self.name)
order_with_respect_to = descriptor.source_field_name
for i, ig in enumerate(sender.objects.filter(**{order_with_respect_to: instance.pk})):
def _update_m2m_position(self, sender, **kwargs):
if kwargs.get('action') in ('post_add', 'post_remove'):
order_with_respect_to = None
for field in sender._meta.local_fields:
if isinstance(field, models.ForeignKey) and isinstance(kwargs['instance'], field.related_model):
order_with_respect_to = field.name
for i, ig in enumerate(sender.objects.filter(**{order_with_respect_to: kwargs['instance'].pk})):
if ig.position != i:
ig.position = i
ig.save()

View File

@@ -9,6 +9,7 @@ class Command(BaseCommand):
"""Checks connection to the database, and prints out connection info if not connected"""
def handle(self, *args, **options):
with connection.cursor() as cursor:
cursor.execute("SELECT version()")
version = str(cursor.fetchone()[0])

View File

@@ -82,6 +82,7 @@ class DeleteMeta:
part_drop = {}
for pk, status, created in self.jobs_qs:
part_key = partition_table_name(self.job_class, created)
if status in ['pending', 'waiting', 'running']:
part_drop[part_key] = False

View File

@@ -17,6 +17,7 @@ class Command(BaseCommand):
def handle(self, *args, **options):
if not options['user']:
raise CommandError('Username not supplied. Usage: awx-manage create_oauth2_token --user=username.')
try:
user = User.objects.get(username=options['user'])

View File

@@ -1,143 +0,0 @@
import time
from urllib.parse import urljoin
from argparse import ArgumentTypeError
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.db.models import Q
from django.utils.timezone import now
from awx.main.models import Instance, UnifiedJob
class AWXInstance:
def __init__(self, **filter):
self.filter = filter
self.get_instance()
def get_instance(self):
filter = self.filter if self.filter is not None else dict(hostname=settings.CLUSTER_HOST_ID)
qs = Instance.objects.filter(**filter)
if not qs.exists():
raise ValueError(f"No AWX instance found with {filter} parameters")
self.instance = qs.first()
def disable(self):
if self.instance.enabled:
self.instance.enabled = False
self.instance.save()
return True
def enable(self):
if not self.instance.enabled:
self.instance.enabled = True
self.instance.save()
return True
def jobs(self):
return UnifiedJob.objects.filter(
Q(controller_node=self.instance.hostname) | Q(execution_node=self.instance.hostname), status__in=("running", "waiting")
)
def jobs_pretty(self):
jobs = []
for j in self.jobs():
job_started = j.started if j.started else now()
# similar calculation of `elapsed` as the corresponding serializer
# does
td = now() - job_started
elapsed = (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / (10**6 * 1.0)
elapsed = float(elapsed)
details = dict(
name=j.name,
url=j.get_ui_url(),
elapsed=elapsed,
)
jobs.append(details)
jobs = sorted(jobs, reverse=True, key=lambda j: j["elapsed"])
return ", ".join([f"[\"{j['name']}\"]({j['url']})" for j in jobs])
def instance_pretty(self):
instance = (
self.instance.hostname,
urljoin(settings.TOWER_URL_BASE, f"/#/instances/{self.instance.pk}/details"),
)
return f"[\"{instance[0]}\"]({instance[1]})"
class Command(BaseCommand):
help = "Disable instance, optionally waiting for all its managed jobs to finish."
@staticmethod
def ge_1(arg):
if arg == "inf":
return float("inf")
int_arg = int(arg)
if int_arg < 1:
raise ArgumentTypeError(f"The value must be a positive number >= 1. Provided: \"{arg}\"")
return int_arg
def add_arguments(self, parser):
filter_group = parser.add_mutually_exclusive_group()
filter_group.add_argument(
"--hostname",
type=str,
default=settings.CLUSTER_HOST_ID,
help=f"{Instance.hostname.field.help_text} Defaults to the hostname of the machine where the Python interpreter is currently executing".strip(),
)
filter_group.add_argument("--id", type=self.ge_1, help=Instance.id.field.help_text)
parser.add_argument(
"--wait",
action="store_true",
help="Wait for jobs managed by the instance to finish. With default retry arguments waits ~1h",
)
parser.add_argument(
"--retry",
type=self.ge_1,
default=120,
help="Number of retries when waiting for jobs to finish. Default: 120. Also accepts \"inf\" to wait indefinitely",
)
parser.add_argument(
"--retry_sleep",
type=self.ge_1,
default=30,
help="Number of seconds to sleep before consequtive retries when waiting. Default: 30",
)
def handle(self, *args, **options):
try:
filter = dict(id=options["id"]) if options["id"] is not None else dict(hostname=options["hostname"])
instance = AWXInstance(**filter)
except ValueError as e:
raise CommandError(e)
if instance.disable():
self.stdout.write(self.style.SUCCESS(f"Instance {instance.instance_pretty()} has been disabled"))
else:
self.stdout.write(f"Instance {instance.instance_pretty()} has already been disabled")
if not options["wait"]:
return
rc = 1
while instance.jobs().count() > 0:
if rc < options["retry"]:
self.stdout.write(
f"{rc}/{options['retry']}: Waiting {options['retry_sleep']}s before the next attempt to see if the following instance' managed jobs have finished: {instance.jobs_pretty()}"
)
rc += 1
time.sleep(options["retry_sleep"])
else:
raise CommandError(
f"{rc}/{options['retry']}: No more retry attempts left, but the instance still has associated managed jobs: {instance.jobs_pretty()}"
)
else:
self.stdout.write(self.style.SUCCESS("Done waiting for instance' managed jobs to finish!"))

View File

@@ -1,38 +0,0 @@
from awx.main.tasks.system import clear_setting_cache
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
"""enable or disable authentication system"""
def add_arguments(self, parser):
"""
This adds the --enable --disable functionalities to the command using mutally_exclusive to avoid situations in which users pass both flags
"""
group = parser.add_mutually_exclusive_group()
group.add_argument('--enable', dest='enable', action='store_true', help='Pass --enable to enable local authentication')
group.add_argument('--disable', dest='disable', action='store_true', help='Pass --disable to disable local authentication')
def _enable_disable_auth(self, enable, disable):
"""
this method allows the disabling or enabling of local authenication based on the argument passed into the parser
if no arguments throw a command error, if --enable set the DISABLE_LOCAL_AUTH to False
if --disable it's set to True. Realizing that the flag is counterintuitive to what is expected.
"""
if enable:
settings.DISABLE_LOCAL_AUTH = False
print("Setting has changed to {} allowing local authentication".format(settings.DISABLE_LOCAL_AUTH))
elif disable:
settings.DISABLE_LOCAL_AUTH = True
print("Setting has changed to {} disallowing local authentication".format(settings.DISABLE_LOCAL_AUTH))
else:
raise CommandError('Please pass --enable flag to allow local auth or --disable flag to disable local auth')
clear_setting_cache.delay(['DISABLE_LOCAL_AUTH'])
def handle(self, **options):
self._enable_disable_auth(options.get('enable'), options.get('disable'))

View File

@@ -10,6 +10,7 @@ from django.utils.text import slugify
class Command(BaseCommand):
help = 'Export custom inventory scripts into a tarfile.'
def add_arguments(self, parser):
@@ -20,6 +21,7 @@ class Command(BaseCommand):
with tempfile.TemporaryDirectory() as tmpdirname:
with tarfile.open(tar_filename, "w") as tar:
for cis in CustomInventoryScript.objects.all():
# naming convention similar to project paths
slug_name = slugify(str(cis.name)).replace(u'-', u'_')

View File

@@ -1,230 +1,54 @@
from django.core.management.base import BaseCommand
import datetime
from django.core.serializers.json import DjangoJSONEncoder
from awx.main.models.inventory import HostMetric, HostMetricSummaryMonthly
from awx.main.analytics.collectors import config
from awx.main.models.inventory import HostMetric
import json
import sys
import tempfile
import tarfile
import csv
CSV_PREFERRED_ROW_COUNT = 500000
BATCHED_FETCH_COUNT = 10000
class Command(BaseCommand):
help = 'This is for offline licensing usage'
def host_metric_queryset(self, result, offset=0, limit=BATCHED_FETCH_COUNT):
list_of_queryset = list(
result.values(
'id',
'hostname',
'first_automation',
'last_automation',
'last_deleted',
'automated_counter',
'deleted_counter',
'deleted',
'used_in_inventories',
).order_by('first_automation')[offset : offset + limit]
)
return list_of_queryset
def host_metric_summary_monthly_queryset(self, result, offset=0, limit=BATCHED_FETCH_COUNT):
list_of_queryset = list(
result.values(
'id',
'date',
'license_consumed',
'license_capacity',
'hosts_added',
'hosts_deleted',
'indirectly_managed_hosts',
).order_by(
'date'
)[offset : offset + limit]
)
return list_of_queryset
def paginated_db_retrieval(self, type, filter_kwargs, rows_per_file):
offset = 0
list_of_queryset = []
while True:
if type == 'host_metric':
result = HostMetric.objects.filter(**filter_kwargs)
list_of_queryset = self.host_metric_queryset(result, offset, rows_per_file)
elif type == 'host_metric_summary_monthly':
result = HostMetricSummaryMonthly.objects.filter(**filter_kwargs)
list_of_queryset = self.host_metric_summary_monthly_queryset(result, offset, rows_per_file)
if not list_of_queryset:
break
else:
yield list_of_queryset
offset += len(list_of_queryset)
def controlled_db_retrieval(self, type, filter_kwargs, offset=0, fetch_count=BATCHED_FETCH_COUNT):
if type == 'host_metric':
result = HostMetric.objects.filter(**filter_kwargs)
return self.host_metric_queryset(result, offset, fetch_count)
elif type == 'host_metric_summary_monthly':
result = HostMetricSummaryMonthly.objects.filter(**filter_kwargs)
return self.host_metric_summary_monthly_queryset(result, offset, fetch_count)
def write_to_csv(self, csv_file, list_of_queryset, always_header, first_write=False, mode='a'):
with open(csv_file, mode, newline='') as output_file:
try:
keys = list_of_queryset[0].keys() if list_of_queryset else []
dict_writer = csv.DictWriter(output_file, keys)
if always_header or first_write:
dict_writer.writeheader()
dict_writer.writerows(list_of_queryset)
except Exception as e:
print(e)
def csv_for_tar(self, temp_dir, type, filter_kwargs, rows_per_file, always_header=True):
for index, list_of_queryset in enumerate(self.paginated_db_retrieval(type, filter_kwargs, rows_per_file)):
csv_file = f'{temp_dir}/{type}{index+1}.csv'
arcname_file = f'{type}{index+1}.csv'
first_write = True if index == 0 else False
self.write_to_csv(csv_file, list_of_queryset, always_header, first_write, 'w')
yield csv_file, arcname_file
def csv_for_tar_batched_fetch(self, temp_dir, type, filter_kwargs, rows_per_file, always_header=True):
csv_iteration = 1
offset = 0
rows_written_per_csv = 0
to_fetch = BATCHED_FETCH_COUNT
while True:
list_of_queryset = self.controlled_db_retrieval(type, filter_kwargs, offset, to_fetch)
if not list_of_queryset:
break
csv_file = f'{temp_dir}/{type}{csv_iteration}.csv'
arcname_file = f'{type}{csv_iteration}.csv'
self.write_to_csv(csv_file, list_of_queryset, always_header)
offset += to_fetch
rows_written_per_csv += to_fetch
always_header = False
remaining_rows_per_csv = rows_per_file - rows_written_per_csv
if not remaining_rows_per_csv:
yield csv_file, arcname_file
rows_written_per_csv = 0
always_header = True
to_fetch = BATCHED_FETCH_COUNT
csv_iteration += 1
elif remaining_rows_per_csv < BATCHED_FETCH_COUNT:
to_fetch = remaining_rows_per_csv
if rows_written_per_csv:
yield csv_file, arcname_file
def config_for_tar(self, options, temp_dir):
config_json = json.dumps(config(options.get('since')))
config_file = f'{temp_dir}/config.json'
arcname_file = 'config.json'
with open(config_file, 'w') as f:
f.write(config_json)
return config_file, arcname_file
def output_json(self, options, filter_kwargs):
with tempfile.TemporaryDirectory() as temp_dir:
for csv_detail in self.csv_for_tar(temp_dir, options.get('json', 'host_metric'), filter_kwargs, BATCHED_FETCH_COUNT, True):
csv_file = csv_detail[0]
with open(csv_file) as f:
reader = csv.DictReader(f)
rows = list(reader)
json_result = json.dumps(rows, cls=DjangoJSONEncoder)
print(json_result)
def output_csv(self, options, filter_kwargs):
with tempfile.TemporaryDirectory() as temp_dir:
for csv_detail in self.csv_for_tar(temp_dir, options.get('csv', 'host_metric'), filter_kwargs, BATCHED_FETCH_COUNT, False):
csv_file = csv_detail[0]
with open(csv_file) as f:
sys.stdout.write(f.read())
def output_tarball(self, options, filter_kwargs):
always_header = True
rows_per_file = options['rows_per_file'] or CSV_PREFERRED_ROW_COUNT
tar = tarfile.open("./host_metrics.tar.gz", "w:gz")
if rows_per_file <= BATCHED_FETCH_COUNT:
csv_function = self.csv_for_tar
else:
csv_function = self.csv_for_tar_batched_fetch
with tempfile.TemporaryDirectory() as temp_dir:
for csv_detail in csv_function(temp_dir, 'host_metric', filter_kwargs, rows_per_file, always_header):
tar.add(csv_detail[0], arcname=csv_detail[1])
for csv_detail in csv_function(temp_dir, 'host_metric_summary_monthly', filter_kwargs, rows_per_file, always_header):
tar.add(csv_detail[0], arcname=csv_detail[1])
config_file, arcname_file = self.config_for_tar(options, temp_dir)
tar.add(config_file, arcname=arcname_file)
tar.close()
def add_arguments(self, parser):
parser.add_argument('--since', type=datetime.datetime.fromisoformat, help='Start Date in ISO format YYYY-MM-DD')
parser.add_argument('--json', type=str, const='host_metric', nargs='?', help='Select output as JSON for host_metric or host_metric_summary_monthly')
parser.add_argument('--csv', type=str, const='host_metric', nargs='?', help='Select output as CSV for host_metric or host_metric_summary_monthly')
parser.add_argument('--tarball', action='store_true', help=f'Package CSV files into a tar with upto {CSV_PREFERRED_ROW_COUNT} rows')
parser.add_argument('--rows_per_file', type=int, help=f'Split rows in chunks of {CSV_PREFERRED_ROW_COUNT}')
parser.add_argument('--until', type=datetime.datetime.fromisoformat, help='End Date in ISO format YYYY-MM-DD')
parser.add_argument('--json', action='store_true', help='Select output as JSON')
def handle(self, *args, **options):
since = options.get('since')
until = options.get('until')
if since is None and until is None:
print("No Arguments received")
return None
if since is not None and since.tzinfo is None:
since = since.replace(tzinfo=datetime.timezone.utc)
if until is not None and until.tzinfo is None:
until = until.replace(tzinfo=datetime.timezone.utc)
filter_kwargs = {}
if since is not None:
filter_kwargs['last_automation__gte'] = since
if until is not None:
filter_kwargs['last_automation__lte'] = until
filter_kwargs_host_metrics_summary = {}
if since is not None:
filter_kwargs_host_metrics_summary['date__gte'] = since
if options['rows_per_file'] and options.get('rows_per_file') > CSV_PREFERRED_ROW_COUNT:
print(f"rows_per_file exceeds the allowable limit of {CSV_PREFERRED_ROW_COUNT}.")
return
result = HostMetric.objects.filter(**filter_kwargs)
# if --json flag is set, output the result in json format
if options['json']:
self.output_json(options, filter_kwargs)
elif options['csv']:
self.output_csv(options, filter_kwargs)
elif options['tarball']:
self.output_tarball(options, filter_kwargs)
list_of_queryset = list(result.values('hostname', 'first_automation', 'last_automation'))
json_result = json.dumps(list_of_queryset, cls=DjangoJSONEncoder)
print(json_result)
# --json flag is not set, output in plain text
else:
print(f"Printing up to {BATCHED_FETCH_COUNT} automated hosts:")
result = HostMetric.objects.filter(**filter_kwargs)
list_of_queryset = self.host_metric_queryset(result, 0, BATCHED_FETCH_COUNT)
for item in list_of_queryset:
print(f"Total Number of hosts automated: {len(result)}")
for item in result:
print(
"Hostname : {hostname} | first_automation : {first_automation} | last_automation : {last_automation}".format(
hostname=item['hostname'], first_automation=item['first_automation'], last_automation=item['last_automation']
hostname=item.hostname, first_automation=item.first_automation, last_automation=item.last_automation
)
)
return

View File

@@ -458,19 +458,12 @@ class Command(BaseCommand):
# TODO: We disable variable overwrite here in case user-defined inventory variables get
# mangled. But we still need to figure out a better way of processing multiple inventory
# update variables mixing with each other.
# issue for this: https://github.com/ansible/awx/issues/11623
if self.inventory.kind == 'constructed' and self.inventory_source.overwrite_vars:
# NOTE: we had to add a exception case to not merge variables
# to make constructed inventory coherent
db_variables = self.all_group.variables
else:
db_variables = self.inventory.variables_dict
db_variables.update(self.all_group.variables)
if db_variables != self.inventory.variables_dict:
self.inventory.variables = json.dumps(db_variables)
self.inventory.save(update_fields=['variables'])
all_obj = self.inventory
db_variables = all_obj.variables_dict
db_variables.update(self.all_group.variables)
if db_variables != all_obj.variables_dict:
all_obj.variables = json.dumps(db_variables)
all_obj.save(update_fields=['variables'])
logger.debug('Inventory variables updated from "all" group')
else:
logger.debug('Inventory variables unmodified')
@@ -529,32 +522,16 @@ class Command(BaseCommand):
def _update_db_host_from_mem_host(self, db_host, mem_host):
# Update host variables.
db_variables = db_host.variables_dict
mem_variables = mem_host.variables
update_fields = []
# Update host instance_id.
instance_id = self._get_instance_id(mem_variables)
if instance_id != db_host.instance_id:
old_instance_id = db_host.instance_id
db_host.instance_id = instance_id
update_fields.append('instance_id')
if self.inventory.kind == 'constructed':
# remote towervars so the constructed hosts do not have extra variables
for prefix in ('host', 'tower'):
for var in ('remote_{}_enabled', 'remote_{}_id'):
mem_variables.pop(var.format(prefix), None)
if self.overwrite_vars:
db_variables = mem_variables
db_variables = mem_host.variables
else:
db_variables.update(mem_variables)
db_variables.update(mem_host.variables)
update_fields = []
if db_variables != db_host.variables_dict:
db_host.variables = json.dumps(db_variables)
update_fields.append('variables')
# Update host enabled flag.
enabled = self._get_enabled(mem_variables)
enabled = self._get_enabled(mem_host.variables)
if enabled is not None and db_host.enabled != enabled:
db_host.enabled = enabled
update_fields.append('enabled')
@@ -563,6 +540,12 @@ class Command(BaseCommand):
old_name = db_host.name
db_host.name = mem_host.name
update_fields.append('name')
# Update host instance_id.
instance_id = self._get_instance_id(mem_host.variables)
if instance_id != db_host.instance_id:
old_instance_id = db_host.instance_id
db_host.instance_id = instance_id
update_fields.append('instance_id')
# Update host and display message(s) on what changed.
if update_fields:
db_host.save(update_fields=update_fields)
@@ -671,19 +654,13 @@ class Command(BaseCommand):
mem_host = self.all_group.all_hosts[mem_host_name]
import_vars = mem_host.variables
host_desc = import_vars.pop('_awx_description', 'imported')
host_attrs = dict(description=host_desc)
host_attrs = dict(variables=json.dumps(import_vars), description=host_desc)
enabled = self._get_enabled(mem_host.variables)
if enabled is not None:
host_attrs['enabled'] = enabled
if self.instance_id_var:
instance_id = self._get_instance_id(mem_host.variables)
host_attrs['instance_id'] = instance_id
if self.inventory.kind == 'constructed':
# remote towervars so the constructed hosts do not have extra variables
for prefix in ('host', 'tower'):
for var in ('remote_{}_enabled', 'remote_{}_id'):
import_vars.pop(var.format(prefix), None)
host_attrs['variables'] = json.dumps(import_vars)
try:
sanitize_jinja(mem_host_name)
except ValueError as e:
@@ -874,7 +851,6 @@ class Command(BaseCommand):
logger.info('Updating inventory %d: %s' % (inventory.pk, inventory.name))
# Create ad-hoc inventory source and inventory update objects
ee = get_default_execution_environment()
with ignore_inventory_computed_fields():
source = Command.get_source_absolute_path(raw_source)
@@ -884,22 +860,14 @@ class Command(BaseCommand):
source_path=os.path.abspath(source),
overwrite=bool(options.get('overwrite', False)),
overwrite_vars=bool(options.get('overwrite_vars', False)),
execution_environment=ee,
)
inventory_update = inventory_source.create_inventory_update(
_eager_fields=dict(
status='running', job_args=json.dumps(sys.argv), job_env=dict(os.environ.items()), job_cwd=os.getcwd(), execution_environment=ee
)
_eager_fields=dict(status='running', job_args=json.dumps(sys.argv), job_env=dict(os.environ.items()), job_cwd=os.getcwd())
)
try:
data = AnsibleInventoryLoader(source=source, verbosity=verbosity).load()
logger.debug('Finished loading from source: %s', source)
data = AnsibleInventoryLoader(source=source, verbosity=verbosity).load()
except SystemExit:
logger.debug("Error occurred while running ansible-inventory")
inventory_update.cancel()
sys.exit(1)
logger.debug('Finished loading from source: %s', source)
status, tb, exc = 'error', '', None
try:
@@ -966,6 +934,7 @@ class Command(BaseCommand):
# (even though inventory_import.Command.handle -- which calls
# perform_update -- has its own lock, inventory_ID_import)
with advisory_lock('inventory_{}_perform_update'.format(self.inventory.id)):
try:
self.check_license()
except PermissionDenied as e:

View File

@@ -6,6 +6,7 @@ from django.core.management.base import BaseCommand
class Ungrouped(object):
name = 'ungrouped'
policy_instance_percentage = None
policy_instance_minimum = None

View File

@@ -7,6 +7,7 @@ from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
help = (
"Remove an instance (specified by --hostname) from the specified queue (instance group).\n"
"In order remove the queue, use the `unregister_queue` command."

Some files were not shown because too many files have changed in this diff Show More