Compare commits

..

1 Commits

Author SHA1 Message Date
David O Neill
e0acd9b111 Change failing PR to draft 2024-02-12 16:15:47 +00:00
131 changed files with 757 additions and 1717 deletions

View File

@@ -11,12 +11,6 @@ runs:
shell: bash
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
- name: Set lower case owner name
shell: bash
run: echo "OWNER_LC=${OWNER,,}" >> $GITHUB_ENV
env:
OWNER: '${{ github.repository_owner }}'
- name: Log in to registry
shell: bash
run: |
@@ -24,11 +18,11 @@ runs:
- name: Pre-pull latest devel image to warm cache
shell: bash
run: docker pull ghcr.io/${OWNER_LC}/awx_devel:${{ github.base_ref }}
run: docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${{ github.base_ref }}
- name: Build image for current source checkout
shell: bash
run: |
DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} \
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} \
COMPOSE_TAG=${{ github.base_ref }} \
make docker-compose-build

View File

@@ -35,7 +35,7 @@ runs:
- name: Start AWX
shell: bash
run: |
DEV_DOCKER_OWNER=${{ github.repository_owner }} \
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} \
COMPOSE_TAG=${{ github.base_ref }} \
COMPOSE_UP_OPTS="-d" \
make docker-compose

View File

@@ -15,4 +15,5 @@
"dependencies":
- any: ["awx/ui/package.json"]
- any: ["requirements/*"]
- any: ["requirements/*.txt"]
- any: ["requirements/requirements.in"]

View File

@@ -127,6 +127,10 @@ jobs:
- name: Run sanity tests
run: make test_collection_sanity
env:
# needed due to cgroupsv2. This is fixed, but a stable release
# with the fix has not been made yet.
ANSIBLE_TEST_PREFER_PODMAN: 1
collection-integration:
name: awx_collection integration

View File

@@ -3,50 +3,28 @@ name: Build/Push Development Images
env:
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
on:
workflow_dispatch:
push:
branches:
- devel
- release_*
- feature_*
jobs:
push-development-images:
push:
if: endsWith(github.repository, '/awx') || startsWith(github.ref, 'refs/heads/release_')
runs-on: ubuntu-latest
timeout-minutes: 120
timeout-minutes: 60
permissions:
packages: write
contents: read
strategy:
fail-fast: false
matrix:
build-targets:
- image-name: awx_devel
make-target: docker-compose-buildx
- image-name: awx_kube_devel
make-target: awx-kube-dev-buildx
- image-name: awx
make-target: awx-kube-buildx
steps:
- name: Skipping build of awx image for non-awx repository
run: |
echo "Skipping build of awx image for non-awx repository"
exit 0
if: matrix.build-targets.image-name == 'awx' && !endsWith(github.repository, '/awx')
- uses: actions/checkout@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Get python version from Makefile
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Set GITHUB_ENV variables
- name: Set lower case owner name
run: |
echo "DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER,,}" >> $GITHUB_ENV
echo "COMPOSE_TAG=${GITHUB_REF##*/}" >> $GITHUB_ENV
echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
echo "OWNER_LC=${OWNER,,}" >>${GITHUB_ENV}
env:
OWNER: '${{ github.repository_owner }}'
@@ -59,19 +37,23 @@ jobs:
run: |
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
- name: Setup node and npm
uses: actions/setup-node@v2
with:
node-version: '16.13.1'
if: matrix.build-targets.image-name == 'awx'
- name: Prebuild UI for awx image (to speed up build process)
- name: Pre-pull image to warm build cache
run: |
sudo apt-get install gettext
make ui-release
make ui-next
if: matrix.build-targets.image-name == 'awx'
docker pull ghcr.io/${OWNER_LC}/awx_devel:${GITHUB_REF##*/} || :
docker pull ghcr.io/${OWNER_LC}/awx_kube_devel:${GITHUB_REF##*/} || :
docker pull ghcr.io/${OWNER_LC}/awx:${GITHUB_REF##*/} || :
- name: Build and push AWX devel images
- name: Build images
run: |
make ${{ matrix.build-targets.make-target }}
DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} COMPOSE_TAG=${GITHUB_REF##*/} make docker-compose-build
DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} COMPOSE_TAG=${GITHUB_REF##*/} make awx-kube-dev-build
DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} COMPOSE_TAG=${GITHUB_REF##*/} make awx-kube-build
- name: Push development images
run: |
docker push ghcr.io/${OWNER_LC}/awx_devel:${GITHUB_REF##*/}
docker push ghcr.io/${OWNER_LC}/awx_kube_devel:${GITHUB_REF##*/}
- name: Push AWX k8s image, only for upstream and feature branches
run: docker push ghcr.io/${OWNER_LC}/awx:${GITHUB_REF##*/}
if: endsWith(github.repository, '/awx')

View File

@@ -2,10 +2,12 @@
name: Feature branch deletion cleanup
env:
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
on: delete
on:
delete:
branches:
- feature_**
jobs:
branch_delete:
if: ${{ github.event.ref_type == 'branch' && startsWith(github.event.ref, 'feature_') }}
push:
runs-on: ubuntu-latest
timeout-minutes: 20
permissions:
@@ -20,4 +22,6 @@ jobs:
run: |
ansible localhost -c local, -m command -a "{{ ansible_python_interpreter + ' -m pip install boto3'}}"
ansible localhost -c local -m aws_s3 \
-a "bucket=awx-public-ci-files object=${GITHUB_REF##*/}/schema.json mode=delobj permission=public-read"
-a "bucket=awx-public-ci-files object=${GITHUB_REF##*/}/schema.json mode=delete permission=public-read"

View File

@@ -24,6 +24,38 @@ jobs:
repo-token: "${{ secrets.GITHUB_TOKEN }}"
configuration-path: .github/pr_labeler.yml
convert-to-draft:
runs-on: ubuntu-latest
name: Change failing PRS to draft
steps:
- name: Checkout repository
uses: actions/checkout@v2
- name: Set up Node.js
uses: actions/setup-node@v3
with:
node-version: 14
- name: Install dependencies
run: npm install -g github
- name: Check CI status
id: check-ci
run: |
status=$(curl -s -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
-H "Accept: application/vnd.github.v3+json" \
https://api.github.com/repos/${{ github.repository }}/commits/${{ github.sha }}/check-suites | \
jq -r '.check_suites[0].conclusion')
echo "CI Status: $status"
echo "::set-output name=ci_status::$status"
- name: Convert to Draft on CI Failure
if: steps.check-ci.outputs.ci_status == 'failure'
run: gh pr edit ${{ github.event.number }} --draft
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
community:
runs-on: ubuntu-latest
timeout-minutes: 20

View File

@@ -83,15 +83,11 @@ jobs:
- name: Re-tag and promote awx image
run: |
docker buildx imagetools create \
ghcr.io/${{ github.repository }}:${{ github.event.release.tag_name }} \
--tag quay.io/${{ github.repository }}:${{ github.event.release.tag_name }}
docker buildx imagetools create \
ghcr.io/${{ github.repository }}:${{ github.event.release.tag_name }} \
--tag quay.io/${{ github.repository }}:latest
- name: Re-tag and promote awx-ee image
run: |
docker buildx imagetools create \
ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.release.tag_name }} \
--tag quay.io/${{ github.repository_owner }}/awx-ee:${{ github.event.release.tag_name }}
docker pull ghcr.io/${{ github.repository }}:${{ github.event.release.tag_name }}
docker tag ghcr.io/${{ github.repository }}:${{ github.event.release.tag_name }} quay.io/${{ github.repository }}:${{ github.event.release.tag_name }}
docker tag ghcr.io/${{ github.repository }}:${{ github.event.release.tag_name }} quay.io/${{ github.repository }}:latest
docker push quay.io/${{ github.repository }}:${{ github.event.release.tag_name }}
docker push quay.io/${{ github.repository }}:latest
docker pull ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.release.tag_name }}
docker tag ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.release.tag_name }} quay.io/${{ github.repository_owner }}/awx-ee:${{ github.event.release.tag_name }}
docker push quay.io/${{ github.repository_owner }}/awx-ee:${{ github.event.release.tag_name }}

View File

@@ -86,33 +86,27 @@ jobs:
-e push=yes \
-e awx_official=yes
- name: Log into registry ghcr.io
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Log in to GHCR
run: |
echo ${{ secrets.GITHUB_TOKEN }} | docker login ghcr.io -u ${{ github.actor }} --password-stdin
- name: Log into registry quay.io
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
with:
registry: quay.io
username: ${{ secrets.QUAY_USER }}
password: ${{ secrets.QUAY_TOKEN }}
- name: Log in to Quay
run: |
echo ${{ secrets.QUAY_TOKEN }} | docker login quay.io -u ${{ secrets.QUAY_USER }} --password-stdin
- name: tag awx-ee:latest with version input
run: |
docker buildx imagetools create \
quay.io/ansible/awx-ee:latest \
--tag ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.inputs.version }}
docker pull quay.io/ansible/awx-ee:latest
docker tag quay.io/ansible/awx-ee:latest ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.inputs.version }}
docker push ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.inputs.version }}
- name: Stage awx-operator image
- name: Build and stage awx-operator
working-directory: awx-operator
run: |
BUILD_ARGS="--build-arg DEFAULT_AWX_VERSION=${{ github.event.inputs.version}} \
--build-arg OPERATOR_VERSION=${{ github.event.inputs.operator_version }}" \
IMG=ghcr.io/${{ github.repository_owner }}/awx-operator:${{ github.event.inputs.operator_version }} \
make docker-buildx
BUILD_ARGS="--build-arg DEFAULT_AWX_VERSION=${{ github.event.inputs.version }} \
--build-arg OPERATOR_VERSION=${{ github.event.inputs.operator_version }}" \
IMAGE_TAG_BASE=ghcr.io/${{ github.repository_owner }}/awx-operator \
VERSION=${{ github.event.inputs.operator_version }} make docker-build docker-push
- name: Run test deployment with awx-operator
working-directory: awx-operator

3
.gitignore vendored
View File

@@ -169,6 +169,3 @@ awx/ui_next/build
# Docs build stuff
docs/docsite/build/
_readthedocs/
# Pyenv
.python-version

113
.vscode/launch.json vendored
View File

@@ -1,113 +0,0 @@
{
"version": "0.2.0",
"configurations": [
{
"name": "run_ws_heartbeat",
"type": "debugpy",
"request": "launch",
"program": "manage.py",
"args": ["run_ws_heartbeat"],
"django": true,
"preLaunchTask": "stop awx-ws-heartbeat",
"postDebugTask": "start awx-ws-heartbeat"
},
{
"name": "run_cache_clear",
"type": "debugpy",
"request": "launch",
"program": "manage.py",
"args": ["run_cache_clear"],
"django": true,
"preLaunchTask": "stop awx-cache-clear",
"postDebugTask": "start awx-cache-clear"
},
{
"name": "run_callback_receiver",
"type": "debugpy",
"request": "launch",
"program": "manage.py",
"args": ["run_callback_receiver"],
"django": true,
"preLaunchTask": "stop awx-receiver",
"postDebugTask": "start awx-receiver"
},
{
"name": "run_dispatcher",
"type": "debugpy",
"request": "launch",
"program": "manage.py",
"args": ["run_dispatcher"],
"django": true,
"preLaunchTask": "stop awx-dispatcher",
"postDebugTask": "start awx-dispatcher"
},
{
"name": "run_rsyslog_configurer",
"type": "debugpy",
"request": "launch",
"program": "manage.py",
"args": ["run_rsyslog_configurer"],
"django": true,
"preLaunchTask": "stop awx-rsyslog-configurer",
"postDebugTask": "start awx-rsyslog-configurer"
},
{
"name": "run_cache_clear",
"type": "debugpy",
"request": "launch",
"program": "manage.py",
"args": ["run_cache_clear"],
"django": true,
"preLaunchTask": "stop awx-cache-clear",
"postDebugTask": "start awx-cache-clear"
},
{
"name": "run_wsrelay",
"type": "debugpy",
"request": "launch",
"program": "manage.py",
"args": ["run_wsrelay"],
"django": true,
"preLaunchTask": "stop awx-wsrelay",
"postDebugTask": "start awx-wsrelay"
},
{
"name": "daphne",
"type": "debugpy",
"request": "launch",
"program": "/var/lib/awx/venv/awx/bin/daphne",
"args": ["-b", "127.0.0.1", "-p", "8051", "awx.asgi:channel_layer"],
"django": true,
"preLaunchTask": "stop awx-daphne",
"postDebugTask": "start awx-daphne"
},
{
"name": "runserver(uwsgi alternative)",
"type": "debugpy",
"request": "launch",
"program": "manage.py",
"args": ["runserver", "127.0.0.1:8052"],
"django": true,
"preLaunchTask": "stop awx-uwsgi",
"postDebugTask": "start awx-uwsgi"
},
{
"name": "runserver_plus(uwsgi alternative)",
"type": "debugpy",
"request": "launch",
"program": "manage.py",
"args": ["runserver_plus", "127.0.0.1:8052"],
"django": true,
"preLaunchTask": "stop awx-uwsgi and install Werkzeug",
"postDebugTask": "start awx-uwsgi"
},
{
"name": "shell_plus",
"type": "debugpy",
"request": "launch",
"program": "manage.py",
"args": ["shell_plus"],
"django": true,
},
]
}

100
.vscode/tasks.json vendored
View File

@@ -1,100 +0,0 @@
{
"version": "2.0.0",
"tasks": [
{
"label": "start awx-cache-clear",
"type": "shell",
"command": "supervisorctl start tower-processes:awx-cache-clear"
},
{
"label": "stop awx-cache-clear",
"type": "shell",
"command": "supervisorctl stop tower-processes:awx-cache-clear"
},
{
"label": "start awx-daphne",
"type": "shell",
"command": "supervisorctl start tower-processes:awx-daphne"
},
{
"label": "stop awx-daphne",
"type": "shell",
"command": "supervisorctl stop tower-processes:awx-daphne"
},
{
"label": "start awx-dispatcher",
"type": "shell",
"command": "supervisorctl start tower-processes:awx-dispatcher"
},
{
"label": "stop awx-dispatcher",
"type": "shell",
"command": "supervisorctl stop tower-processes:awx-dispatcher"
},
{
"label": "start awx-receiver",
"type": "shell",
"command": "supervisorctl start tower-processes:awx-receiver"
},
{
"label": "stop awx-receiver",
"type": "shell",
"command": "supervisorctl stop tower-processes:awx-receiver"
},
{
"label": "start awx-rsyslog-configurer",
"type": "shell",
"command": "supervisorctl start tower-processes:awx-rsyslog-configurer"
},
{
"label": "stop awx-rsyslog-configurer",
"type": "shell",
"command": "supervisorctl stop tower-processes:awx-rsyslog-configurer"
},
{
"label": "start awx-rsyslogd",
"type": "shell",
"command": "supervisorctl start tower-processes:awx-rsyslogd"
},
{
"label": "stop awx-rsyslogd",
"type": "shell",
"command": "supervisorctl stop tower-processes:awx-rsyslogd"
},
{
"label": "start awx-uwsgi",
"type": "shell",
"command": "supervisorctl start tower-processes:awx-uwsgi"
},
{
"label": "stop awx-uwsgi",
"type": "shell",
"command": "supervisorctl stop tower-processes:awx-uwsgi"
},
{
"label": "stop awx-uwsgi and install Werkzeug",
"type": "shell",
"command": "pip install Werkzeug; supervisorctl stop tower-processes:awx-uwsgi"
},
{
"label": "start awx-ws-heartbeat",
"type": "shell",
"command": "supervisorctl start tower-processes:awx-ws-heartbeat"
},
{
"label": "stop awx-ws-heartbeat",
"type": "shell",
"command": "supervisorctl stop tower-processes:awx-ws-heartbeat"
},
{
"label": "start awx-wsrelay",
"type": "shell",
"command": "supervisorctl start tower-processes:awx-wsrelay"
},
{
"label": "stop awx-wsrelay",
"type": "shell",
"command": "supervisorctl stop tower-processes:awx-wsrelay"
}
]
}

View File

@@ -10,7 +10,7 @@ KIND_BIN ?= $(shell which kind)
CHROMIUM_BIN=/tmp/chrome-linux/chrome
GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
MANAGEMENT_COMMAND ?= awx-manage
VERSION ?= $(shell $(PYTHON) tools/scripts/scm_version.py 2> /dev/null)
VERSION ?= $(shell $(PYTHON) tools/scripts/scm_version.py)
# ansible-test requires semver compatable version, so we allow overrides to hack it
COLLECTION_VERSION ?= $(shell $(PYTHON) tools/scripts/scm_version.py | cut -d . -f 1-3)
@@ -75,9 +75,6 @@ SDIST_TAR_FILE ?= $(SDIST_TAR_NAME).tar.gz
I18N_FLAG_FILE = .i18n_built
## PLATFORMS defines the target platforms for the manager image be build to provide support to multiple
PLATFORMS ?= linux/amd64,linux/arm64 # linux/ppc64le,linux/s390x
.PHONY: awx-link clean clean-tmp clean-venv requirements requirements_dev \
develop refresh adduser migrate dbchange \
receiver test test_unit test_coverage coverage_html \
@@ -216,6 +213,8 @@ collectstatic:
fi; \
$(PYTHON) manage.py collectstatic --clear --noinput > /dev/null 2>&1
DEV_RELOAD_COMMAND ?= supervisorctl restart tower-processes:*
uwsgi: collectstatic
@if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/awx/bin/activate; \
@@ -223,7 +222,7 @@ uwsgi: collectstatic
uwsgi /etc/tower/uwsgi.ini
awx-autoreload:
@/awx_devel/tools/docker-compose/awx-autoreload /awx_devel/awx
@/awx_devel/tools/docker-compose/awx-autoreload /awx_devel/awx "$(DEV_RELOAD_COMMAND)"
daphne:
@if [ "$(VENV_BASE)" ]; then \
@@ -303,7 +302,7 @@ swagger: reports
@if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/awx/bin/activate; \
fi; \
(set -o pipefail && py.test $(PYTEST_ARGS) awx/conf/tests/functional awx/main/tests/functional/api awx/main/tests/docs | tee reports/$@.report)
(set -o pipefail && py.test $(PYTEST_ARGS) awx/conf/tests/functional awx/main/tests/functional/api awx/main/tests/docs --release=$(VERSION_TARGET) | tee reports/$@.report)
check: black
@@ -533,7 +532,7 @@ docker-compose-sources: .git/hooks/pre-commit
-e enable_vault=$(VAULT) \
-e vault_tls=$(VAULT_TLS) \
-e enable_tacacs=$(TACACS) \
$(EXTRA_SOURCES_ANSIBLE_OPTS)
$(EXTRA_SOURCES_ANSIBLE_OPTS)
docker-compose: awx/projects docker-compose-sources
ansible-galaxy install --ignore-certs -r tools/docker-compose/ansible/requirements.yml;
@@ -587,27 +586,12 @@ docker-compose-build: Dockerfile.dev
--build-arg BUILDKIT_INLINE_CACHE=1 \
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG) .
.PHONY: docker-compose-buildx
## Build awx_devel image for docker compose development environment for multiple architectures
docker-compose-buildx: Dockerfile.dev
- docker buildx create --name docker-compose-buildx
docker buildx use docker-compose-buildx
- docker buildx build \
--push \
--build-arg BUILDKIT_INLINE_CACHE=1 \
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG) \
--platform=$(PLATFORMS) \
--tag $(DEVEL_IMAGE_NAME) \
-f Dockerfile.dev .
- docker buildx rm docker-compose-buildx
docker-clean:
-$(foreach container_id,$(shell docker ps -f name=tools_awx -aq && docker ps -f name=tools_receptor -aq),docker stop $(container_id); docker rm -f $(container_id);)
-$(foreach image_id,$(shell docker images --filter=reference='*/*/*awx_devel*' --filter=reference='*/*awx_devel*' --filter=reference='*awx_devel*' -aq),docker rmi --force $(image_id);)
docker-clean-volumes: docker-compose-clean docker-compose-container-group-clean
docker volume rm -f tools_awx_db tools_vault_1 tools_ldap_1 tools_grafana_storage tools_prometheus_storage $(docker volume ls --filter name=tools_redis_socket_ -q)
docker volume rm -f tools_awx_db tools_vault_1 tools_grafana_storage tools_prometheus_storage $(docker volume ls --filter name=tools_redis_socket_ -q)
docker-refresh: docker-clean docker-compose
@@ -629,6 +613,9 @@ clean-elk:
docker rm tools_elasticsearch_1
docker rm tools_kibana_1
psql-container:
docker run -it --net tools_default --rm postgres:12 sh -c 'exec psql -h "postgres" -p "5432" -U postgres'
VERSION:
@echo "awx: $(VERSION)"
@@ -661,21 +648,6 @@ awx-kube-build: Dockerfile
--build-arg HEADLESS=$(HEADLESS) \
-t $(DEV_DOCKER_TAG_BASE)/awx:$(COMPOSE_TAG) .
## Build multi-arch awx image for deployment on Kubernetes environment.
awx-kube-buildx: Dockerfile
- docker buildx create --name awx-kube-buildx
docker buildx use awx-kube-buildx
- docker buildx build \
--push \
--build-arg VERSION=$(VERSION) \
--build-arg SETUPTOOLS_SCM_PRETEND_VERSION=$(VERSION) \
--build-arg HEADLESS=$(HEADLESS) \
--platform=$(PLATFORMS) \
--tag $(DEV_DOCKER_TAG_BASE)/awx:$(COMPOSE_TAG) \
-f Dockerfile .
- docker buildx rm awx-kube-buildx
.PHONY: Dockerfile.kube-dev
## Generate Docker.kube-dev for awx_kube_devel image
Dockerfile.kube-dev: tools/ansible/roles/dockerfile/templates/Dockerfile.j2
@@ -692,18 +664,6 @@ awx-kube-dev-build: Dockerfile.kube-dev
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) \
-t $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) .
## Build and push multi-arch awx_kube_devel image for development on local Kubernetes environment.
awx-kube-dev-buildx: Dockerfile.kube-dev
- docker buildx create --name awx-kube-dev-buildx
docker buildx use awx-kube-dev-buildx
- docker buildx build \
--push \
--build-arg BUILDKIT_INLINE_CACHE=1 \
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) \
--platform=$(PLATFORMS) \
--tag $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) \
-f Dockerfile.kube-dev .
- docker buildx rm awx-kube-dev-buildx
kind-dev-load: awx-kube-dev-build
$(KIND_BIN) load docker-image $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG)

View File

@@ -154,12 +154,10 @@ def manage():
from django.conf import settings
from django.core.management import execute_from_command_line
# enforce the postgres version is a minimum of 12 (we need this for partitioning); if not, then terminate program with exit code of 1
# In the future if we require a feature of a version of postgres > 12 this should be updated to reflect that.
# The return of connection.pg_version is something like 12013
# enforce the postgres version is equal to 12. if not, then terminate program with exit code of 1
if not os.getenv('SKIP_PG_VERSION_CHECK', False) and not MODE == 'development':
if (connection.pg_version // 10000) < 12:
sys.stderr.write("At a minimum, postgres version 12 is required\n")
sys.stderr.write("Postgres version 12 is required\n")
sys.exit(1)
if len(sys.argv) >= 2 and sys.argv[1] in ('version', '--version'): # pragma: no cover

View File

@@ -5594,7 +5594,7 @@ class InstanceSerializer(BaseSerializer):
res['jobs'] = self.reverse('api:instance_unified_jobs_list', kwargs={'pk': obj.pk})
res['peers'] = self.reverse('api:instance_peers_list', kwargs={"pk": obj.pk})
res['instance_groups'] = self.reverse('api:instance_instance_groups_list', kwargs={'pk': obj.pk})
if obj.node_type in [Instance.Types.EXECUTION, Instance.Types.HOP] and not obj.managed:
if obj.node_type in [Instance.Types.EXECUTION, Instance.Types.HOP]:
res['install_bundle'] = self.reverse('api:instance_install_bundle', kwargs={'pk': obj.pk})
if self.context['request'].user.is_superuser or self.context['request'].user.is_system_auditor:
if obj.node_type == 'execution':

View File

@@ -272,24 +272,16 @@ class DashboardJobsGraphView(APIView):
success_query = user_unified_jobs.filter(status='successful')
failed_query = user_unified_jobs.filter(status='failed')
canceled_query = user_unified_jobs.filter(status='canceled')
error_query = user_unified_jobs.filter(status='error')
if job_type == 'inv_sync':
success_query = success_query.filter(instance_of=models.InventoryUpdate)
failed_query = failed_query.filter(instance_of=models.InventoryUpdate)
canceled_query = canceled_query.filter(instance_of=models.InventoryUpdate)
error_query = error_query.filter(instance_of=models.InventoryUpdate)
elif job_type == 'playbook_run':
success_query = success_query.filter(instance_of=models.Job)
failed_query = failed_query.filter(instance_of=models.Job)
canceled_query = canceled_query.filter(instance_of=models.Job)
error_query = error_query.filter(instance_of=models.Job)
elif job_type == 'scm_update':
success_query = success_query.filter(instance_of=models.ProjectUpdate)
failed_query = failed_query.filter(instance_of=models.ProjectUpdate)
canceled_query = canceled_query.filter(instance_of=models.ProjectUpdate)
error_query = error_query.filter(instance_of=models.ProjectUpdate)
end = now()
interval = 'day'
@@ -305,12 +297,10 @@ class DashboardJobsGraphView(APIView):
else:
return Response({'error': _('Unknown period "%s"') % str(period)}, status=status.HTTP_400_BAD_REQUEST)
dashboard_data = {"jobs": {"successful": [], "failed": [], "canceled": [], "error": []}}
dashboard_data = {"jobs": {"successful": [], "failed": []}}
succ_list = dashboard_data['jobs']['successful']
fail_list = dashboard_data['jobs']['failed']
canceled_list = dashboard_data['jobs']['canceled']
error_list = dashboard_data['jobs']['error']
qs_s = (
success_query.filter(finished__range=(start, end))
@@ -328,22 +318,6 @@ class DashboardJobsGraphView(APIView):
.annotate(agg=Count('id', distinct=True))
)
data_f = {item['d']: item['agg'] for item in qs_f}
qs_c = (
canceled_query.filter(finished__range=(start, end))
.annotate(d=Trunc('finished', interval, tzinfo=end.tzinfo))
.order_by()
.values('d')
.annotate(agg=Count('id', distinct=True))
)
data_c = {item['d']: item['agg'] for item in qs_c}
qs_e = (
error_query.filter(finished__range=(start, end))
.annotate(d=Trunc('finished', interval, tzinfo=end.tzinfo))
.order_by()
.values('d')
.annotate(agg=Count('id', distinct=True))
)
data_e = {item['d']: item['agg'] for item in qs_e}
start_date = start.replace(hour=0, minute=0, second=0, microsecond=0)
for d in itertools.count():
@@ -352,8 +326,6 @@ class DashboardJobsGraphView(APIView):
break
succ_list.append([time.mktime(date.timetuple()), data_s.get(date, 0)])
fail_list.append([time.mktime(date.timetuple()), data_f.get(date, 0)])
canceled_list.append([time.mktime(date.timetuple()), data_c.get(date, 0)])
error_list.append([time.mktime(date.timetuple()), data_e.get(date, 0)])
return Response(dashboard_data)

View File

@@ -1,7 +1,6 @@
# Python
import contextlib
import logging
import psycopg
import threading
import time
import os
@@ -14,7 +13,7 @@ from django.conf import settings, UserSettingsHolder
from django.core.cache import cache as django_cache
from django.core.exceptions import ImproperlyConfigured, SynchronousOnlyOperation
from django.db import transaction, connection
from django.db.utils import DatabaseError, ProgrammingError
from django.db.utils import Error as DBError, ProgrammingError
from django.utils.functional import cached_property
# Django REST Framework
@@ -81,26 +80,18 @@ def _ctit_db_wrapper(trans_safe=False):
logger.debug('Obtaining database settings in spite of broken transaction.')
transaction.set_rollback(False)
yield
except ProgrammingError as e:
# Exception raised for programming errors
# Examples may be table not found or already exists,
# this generally means we can't fetch Tower configuration
# because the database hasn't actually finished migrating yet;
# this is usually a sign that a service in a container (such as ws_broadcast)
# has come up *before* the database has finished migrating, and
# especially that the conf.settings table doesn't exist yet
# syntax error in the SQL statement, wrong number of parameters specified, etc.
except DBError as exc:
if trans_safe:
logger.debug(f'Database settings are not available, using defaults. error: {str(e)}')
else:
logger.exception('Error modifying something related to database settings.')
except DatabaseError as e:
if trans_safe:
cause = e.__cause__
if cause and hasattr(cause, 'sqlstate'):
sqlstate = cause.sqlstate
sqlstate_str = psycopg.errors.lookup(sqlstate)
logger.error('SQL Error state: {} - {}'.format(sqlstate, sqlstate_str))
level = logger.warning
if isinstance(exc, ProgrammingError):
if 'relation' in str(exc) and 'does not exist' in str(exc):
# this generally means we can't fetch Tower configuration
# because the database hasn't actually finished migrating yet;
# this is usually a sign that a service in a container (such as ws_broadcast)
# has come up *before* the database has finished migrating, and
# especially that the conf.settings table doesn't exist yet
level = logger.debug
level(f'Database settings are not available, using defaults. error: {str(exc)}')
else:
logger.exception('Error modifying something related to database settings.')
finally:

View File

@@ -419,7 +419,7 @@ def _events_table(since, full_path, until, tbl, where_column, project_job_create
resolved_action,
resolved_role,
-- '-' operator listed here:
-- https://www.postgresql.org/docs/15/functions-json.html
-- https://www.postgresql.org/docs/12/functions-json.html
-- note that operator is only supported by jsonb objects
-- https://www.postgresql.org/docs/current/datatype-json.html
(CASE WHEN event = 'playbook_on_stats' THEN {event_data} - 'artifact_data' END) as playbook_on_stats,

View File

@@ -14,7 +14,7 @@ __all__ = [
'STANDARD_INVENTORY_UPDATE_ENV',
]
CLOUD_PROVIDERS = ('azure_rm', 'ec2', 'gce', 'vmware', 'openstack', 'rhv', 'satellite6', 'controller', 'insights', 'terraform')
CLOUD_PROVIDERS = ('azure_rm', 'ec2', 'gce', 'vmware', 'openstack', 'rhv', 'satellite6', 'controller', 'insights')
PRIVILEGE_ESCALATION_METHODS = [
('sudo', _('Sudo')),
('su', _('Su')),

View File

@@ -105,11 +105,7 @@ def create_listener_connection():
for k, v in settings.LISTENER_DATABASES.get('default', {}).get('OPTIONS', {}).items():
conf['OPTIONS'][k] = v
# Allow password-less authentication
if 'PASSWORD' in conf:
conf['OPTIONS']['password'] = conf.pop('PASSWORD')
connection_data = f"dbname={conf['NAME']} host={conf['HOST']} user={conf['USER']} port={conf['PORT']}"
connection_data = f"dbname={conf['NAME']} host={conf['HOST']} user={conf['USER']} password={conf['PASSWORD']} port={conf['PORT']}"
return psycopg.connect(connection_data, autocommit=True, **conf['OPTIONS'])

View File

@@ -162,7 +162,7 @@ class AWXConsumerRedis(AWXConsumerBase):
class AWXConsumerPG(AWXConsumerBase):
def __init__(self, *args, schedule=None, **kwargs):
super().__init__(*args, **kwargs)
self.pg_max_wait = getattr(settings, 'DISPATCHER_DB_DOWNTOWN_TOLLERANCE', settings.DISPATCHER_DB_DOWNTIME_TOLERANCE)
self.pg_max_wait = settings.DISPATCHER_DB_DOWNTIME_TOLERANCE
# if no successful loops have ran since startup, then we should fail right away
self.pg_is_down = True # set so that we fail if we get database errors on startup
init_time = time.time()
@@ -259,12 +259,6 @@ class AWXConsumerPG(AWXConsumerBase):
current_downtime = time.time() - self.pg_down_time
if current_downtime > self.pg_max_wait:
logger.exception(f"Postgres event consumer has not recovered in {current_downtime} s, exiting")
# Sending QUIT to multiprocess queue to signal workers to exit
for worker in self.pool.workers:
try:
worker.quit()
except Exception:
logger.exception(f"Error sending QUIT to worker {worker}")
raise
# Wait for a second before next attempt, but still listen for any shutdown signals
for i in range(10):
@@ -276,12 +270,6 @@ class AWXConsumerPG(AWXConsumerBase):
except Exception:
# Log unanticipated exception in addition to writing to stderr to get timestamps and other metadata
logger.exception('Encountered unhandled error in dispatcher main loop')
# Sending QUIT to multiprocess queue to signal workers to exit
for worker in self.pool.workers:
try:
worker.quit()
except Exception:
logger.exception(f"Error sending QUIT to worker {worker}")
raise

View File

@@ -5,12 +5,11 @@ import logging
import threading
import time
import urllib.parse
from pathlib import Path
from django.conf import settings
from django.contrib.auth import logout
from django.contrib.auth.models import User
from django.db.migrations.recorder import MigrationRecorder
from django.db.migrations.executor import MigrationExecutor
from django.db import connection
from django.shortcuts import redirect
from django.apps import apps
@@ -18,11 +17,9 @@ from django.utils.deprecation import MiddlewareMixin
from django.utils.translation import gettext_lazy as _
from django.urls import reverse, resolve
from awx.main import migrations
from awx.main.utils.named_url_graph import generate_graph, GraphNode
from awx.conf import fields, register
from awx.main.utils.profiling import AWXProfiler
from awx.main.utils.common import memoize
logger = logging.getLogger('awx.main.middleware')
@@ -201,22 +198,9 @@ class URLModificationMiddleware(MiddlewareMixin):
request.path_info = new_path
@memoize(ttl=20)
def is_migrating():
latest_number = 0
latest_name = ''
for migration_path in Path(migrations.__path__[0]).glob('[0-9]*.py'):
try:
migration_number = int(migration_path.name.split('_', 1)[0])
except ValueError:
continue
if migration_number > latest_number:
latest_number = migration_number
latest_name = migration_path.name[: -len('.py')]
return not MigrationRecorder(connection).migration_qs.filter(app='main', name=latest_name).exists()
class MigrationRanCheckMiddleware(MiddlewareMixin):
def process_request(self, request):
if is_migrating() and getattr(resolve(request.path), 'url_name', '') != 'migrations_notran':
executor = MigrationExecutor(connection)
plan = executor.migration_plan(executor.loader.graph.leaf_nodes())
if bool(plan) and getattr(resolve(request.path), 'url_name', '') != 'migrations_notran':
return redirect(reverse("ui:migrations_notran"))

View File

@@ -1,59 +0,0 @@
# Generated by Django 4.2.6 on 2024-02-15 20:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0189_inbound_hop_nodes'),
]
operations = [
migrations.AlterField(
model_name='inventorysource',
name='source',
field=models.CharField(
choices=[
('file', 'File, Directory or Script'),
('constructed', 'Template additional groups and hostvars at runtime'),
('scm', 'Sourced from a Project'),
('ec2', 'Amazon EC2'),
('gce', 'Google Compute Engine'),
('azure_rm', 'Microsoft Azure Resource Manager'),
('vmware', 'VMware vCenter'),
('satellite6', 'Red Hat Satellite 6'),
('openstack', 'OpenStack'),
('rhv', 'Red Hat Virtualization'),
('controller', 'Red Hat Ansible Automation Platform'),
('insights', 'Red Hat Insights'),
('terraform', 'Terraform State'),
],
default=None,
max_length=32,
),
),
migrations.AlterField(
model_name='inventoryupdate',
name='source',
field=models.CharField(
choices=[
('file', 'File, Directory or Script'),
('constructed', 'Template additional groups and hostvars at runtime'),
('scm', 'Sourced from a Project'),
('ec2', 'Amazon EC2'),
('gce', 'Google Compute Engine'),
('azure_rm', 'Microsoft Azure Resource Manager'),
('vmware', 'VMware vCenter'),
('satellite6', 'Red Hat Satellite 6'),
('openstack', 'OpenStack'),
('rhv', 'Red Hat Virtualization'),
('controller', 'Red Hat Ansible Automation Platform'),
('insights', 'Red Hat Insights'),
('terraform', 'Terraform State'),
],
default=None,
max_length=32,
),
),
]

View File

@@ -925,7 +925,6 @@ class InventorySourceOptions(BaseModel):
('rhv', _('Red Hat Virtualization')),
('controller', _('Red Hat Ansible Automation Platform')),
('insights', _('Red Hat Insights')),
('terraform', _('Terraform State')),
]
# From the options of the Django management base command
@@ -1631,20 +1630,6 @@ class satellite6(PluginFileInjector):
return ret
class terraform(PluginFileInjector):
plugin_name = 'terraform_state'
base_injector = 'managed'
namespace = 'cloud'
collection = 'terraform'
use_fqcn = True
def inventory_as_dict(self, inventory_update, private_data_dir):
env = super(terraform, self).get_plugin_env(inventory_update, private_data_dir, None)
ret = super().inventory_as_dict(inventory_update, private_data_dir)
ret['backend_config_files'] = env["TF_BACKEND_CONFIG_FILE"]
return ret
class controller(PluginFileInjector):
plugin_name = 'tower' # TODO: relying on routing for now, update after EEs pick up revised collection
base_injector = 'template'

View File

@@ -5,7 +5,6 @@ from copy import deepcopy
import datetime
import logging
import json
import traceback
from django.db import models
from django.conf import settings
@@ -485,29 +484,14 @@ class JobNotificationMixin(object):
if msg_template:
try:
msg = env.from_string(msg_template).render(**context)
except (TemplateSyntaxError, UndefinedError, SecurityError) as e:
msg = '\r\n'.join([e.message, ''.join(traceback.format_exception(None, e, e.__traceback__).replace('\n', '\r\n'))])
except (TemplateSyntaxError, UndefinedError, SecurityError):
msg = ''
if body_template:
try:
body = env.from_string(body_template).render(**context)
except (TemplateSyntaxError, UndefinedError, SecurityError) as e:
body = '\r\n'.join([e.message, ''.join(traceback.format_exception(None, e, e.__traceback__).replace('\n', '\r\n'))])
# https://datatracker.ietf.org/doc/html/rfc2822#section-2.2
# Body should have at least 2 CRLF, some clients will interpret
# the email incorrectly with blank body. So we will check that
if len(body.strip().splitlines()) <= 2:
# blank body
body = '\r\n'.join(
[
"The template rendering return a blank body.",
"Please check the template.",
"Refer to https://github.com/ansible/awx/issues/13983",
"for further information.",
]
)
except (TemplateSyntaxError, UndefinedError, SecurityError):
body = ''
return (msg, body)

View File

@@ -1,6 +1,5 @@
# Copyright (c) 2019 Ansible, Inc.
# All Rights Reserved.
# -*-coding:utf-8-*-
class CustomNotificationBase(object):

View File

@@ -4,15 +4,13 @@ import logging
from django.conf import settings
from django.urls import re_path
from channels.auth import AuthMiddlewareStack
from channels.routing import ProtocolTypeRouter, URLRouter
from ansible_base.lib.channels.middleware import DrfAuthMiddlewareStack
from . import consumers
logger = logging.getLogger('awx.main.routing')
_application = None
class AWXProtocolTypeRouter(ProtocolTypeRouter):
@@ -28,91 +26,13 @@ class AWXProtocolTypeRouter(ProtocolTypeRouter):
super().__init__(*args, **kwargs)
class MultipleURLRouterAdapter:
"""
Django channels doesn't nicely support Auth_1(urls_1), Auth_2(urls_2), ..., Auth_n(urls_n)
This class allows assocating a websocket url with an auth
Ordering matters. The first matching url will be used.
"""
def __init__(self, *auths):
self._auths = [a for a in auths]
async def __call__(self, scope, receive, send):
"""
Loop through the list of passed in URLRouter's (they may or may not be wrapped by auth).
We know we have exhausted the list of URLRouter patterns when we get a
ValueError('No route found for path %s'). When that happens, move onto the next
URLRouter.
If the final URLRouter raises an error, re-raise it in the end.
We know that we found a match when no error is raised, end the loop.
"""
last_index = len(self._auths) - 1
for i, auth in enumerate(self._auths):
try:
return await auth.__call__(scope, receive, send)
except ValueError as e:
if str(e).startswith('No route found for path'):
# Only surface the error if on the last URLRouter
if i == last_index:
raise
websocket_urlpatterns = [
re_path(r'api/websocket/$', consumers.EventConsumer.as_asgi()),
re_path(r'websocket/$', consumers.EventConsumer.as_asgi()),
]
websocket_relay_urlpatterns = [
re_path(r'websocket/relay/$', consumers.RelayConsumer.as_asgi()),
]
def application_func(cls=AWXProtocolTypeRouter) -> ProtocolTypeRouter:
return cls(
{
'websocket': MultipleURLRouterAdapter(
URLRouter(websocket_relay_urlpatterns),
DrfAuthMiddlewareStack(URLRouter(websocket_urlpatterns)),
)
}
)
def __getattr__(name: str) -> ProtocolTypeRouter:
"""
Defer instantiating application.
For testing, we just need it to NOT run on import.
https://peps.python.org/pep-0562/#specification
Normally, someone would get application from this module via:
from awx.main.routing import application
and do something with the application:
application.do_something()
What does the callstack look like when the import runs?
...
awx.main.routing.__getattribute__(...) # <-- we don't define this so NOOP as far as we are concerned
if '__getattr__' in awx.main.routing.__dict__: # <-- this triggers the function we are in
return awx.main.routing.__dict__.__getattr__("application")
Why isn't this function simply implemented as:
def __getattr__(name):
if not _application:
_application = application_func()
return _application
It could. I manually tested it and it passes test_routing.py.
But my understanding after reading the PEP-0562 specification link above is that
performance would be a bit worse due to the extra __getattribute__ calls when
we reference non-global variables.
"""
if name == "application":
globs = globals()
if not globs['_application']:
globs['_application'] = application_func()
return globs['_application']
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
application = AWXProtocolTypeRouter(
{
'websocket': AuthMiddlewareStack(URLRouter(websocket_urlpatterns)),
}
)

View File

@@ -29,7 +29,7 @@ class RunnerCallback:
self.safe_env = {}
self.event_ct = 0
self.model = model
self.update_attempts = int(getattr(settings, 'DISPATCHER_DB_DOWNTOWN_TOLLERANCE', settings.DISPATCHER_DB_DOWNTIME_TOLERANCE) / 5)
self.update_attempts = int(settings.DISPATCHER_DB_DOWNTIME_TOLERANCE / 5)
self.wrapup_event_dispatched = False
self.artifacts_processed = False
self.extra_update_fields = {}

View File

@@ -114,7 +114,7 @@ class BaseTask(object):
def __init__(self):
self.cleanup_paths = []
self.update_attempts = int(getattr(settings, 'DISPATCHER_DB_DOWNTOWN_TOLLERANCE', settings.DISPATCHER_DB_DOWNTIME_TOLERANCE) / 5)
self.update_attempts = int(settings.DISPATCHER_DB_DOWNTIME_TOLERANCE / 5)
self.runner_callback = self.callback_class(model=self.model)
def update_model(self, pk, _attempt=0, **updates):

View File

@@ -6,7 +6,6 @@ import itertools
import json
import logging
import os
import psycopg
from io import StringIO
from contextlib import redirect_stdout
import shutil
@@ -417,7 +416,7 @@ def handle_removed_image(remove_images=None):
@task(queue=get_task_queuename)
def cleanup_images_and_files():
_cleanup_images_and_files(image_prune=True)
_cleanup_images_and_files()
@task(queue=get_task_queuename)
@@ -631,18 +630,10 @@ def cluster_node_heartbeat(dispatch_time=None, worker_tasks=None):
logger.error("Host {} last checked in at {}, marked as lost.".format(other_inst.hostname, other_inst.last_seen))
except DatabaseError as e:
cause = e.__cause__
if cause and hasattr(cause, 'sqlstate'):
sqlstate = cause.sqlstate
sqlstate_str = psycopg.errors.lookup(sqlstate)
logger.debug('SQL Error state: {} - {}'.format(sqlstate, sqlstate_str))
if sqlstate == psycopg.errors.NoData:
logger.debug('Another instance has marked {} as lost'.format(other_inst.hostname))
else:
logger.exception("Error marking {} as lost.".format(other_inst.hostname))
if 'did not affect any rows' in str(e):
logger.debug('Another instance has marked {} as lost'.format(other_inst.hostname))
else:
logger.exception('No SQL state available. Error marking {} as lost'.format(other_inst.hostname))
logger.exception('Error marking {} as lost'.format(other_inst.hostname))
# Run local reaper
if worker_tasks is not None:
@@ -797,19 +788,10 @@ def update_inventory_computed_fields(inventory_id):
try:
i.update_computed_fields()
except DatabaseError as e:
# https://github.com/django/django/blob/eff21d8e7a1cb297aedf1c702668b590a1b618f3/django/db/models/base.py#L1105
# django raises DatabaseError("Forced update did not affect any rows.")
# if sqlstate is set then there was a database error and otherwise will re-raise that error
cause = e.__cause__
if cause and hasattr(cause, 'sqlstate'):
sqlstate = cause.sqlstate
sqlstate_str = psycopg.errors.lookup(sqlstate)
logger.error('SQL Error state: {} - {}'.format(sqlstate, sqlstate_str))
raise
# otherwise
logger.debug('Exiting duplicate update_inventory_computed_fields task.')
if 'did not affect any rows' in str(e):
logger.debug('Exiting duplicate update_inventory_computed_fields task.')
return
raise
def update_smart_memberships_for_inventory(smart_inventory):

View File

@@ -1,3 +0,0 @@
{
"TF_BACKEND_CONFIG_FILE": "{{ file_reference }}"
}

View File

@@ -1,8 +1,13 @@
from awx.main.tests.functional.conftest import * # noqa
import os
import pytest
@pytest.fixture()
def release():
return os.environ.get('VERSION_TARGET', '')
def pytest_addoption(parser):
parser.addoption("--release", action="store", help="a release version number, e.g., 3.3.0")
def pytest_generate_tests(metafunc):
# This is called for every test. Only get/set command line arguments
# if the argument is specified in the list of test "fixturenames".
option_value = metafunc.config.option.release
if 'release' in metafunc.fixturenames and option_value is not None:
metafunc.parametrize("release", [option_value])

View File

@@ -3,19 +3,15 @@ import pytest
from unittest import mock
import urllib.parse
from unittest.mock import PropertyMock
import importlib
# Django
from django.urls import resolve
from django.http import Http404
from django.apps import apps
from django.core.handlers.exception import response_for_exception
from django.contrib.auth.models import User
from django.core.serializers.json import DjangoJSONEncoder
from django.db.backends.sqlite3.base import SQLiteCursorWrapper
from django.db.models.signals import post_migrate
# AWX
from awx.main.models.projects import Project
from awx.main.models.ha import Instance
@@ -45,19 +41,10 @@ from awx.main.models.workflow import WorkflowJobTemplate
from awx.main.models.ad_hoc_commands import AdHocCommand
from awx.main.models.oauth import OAuth2Application as Application
from awx.main.models.execution_environments import ExecutionEnvironment
from awx.main.utils import is_testing
__SWAGGER_REQUESTS__ = {}
# HACK: the dab_resource_registry app required ServiceID in migrations which checks do not run
dab_rr_initial = importlib.import_module('ansible_base.resource_registry.migrations.0001_initial')
if is_testing():
post_migrate.connect(lambda **kwargs: dab_rr_initial.create_service_id(apps, None))
@pytest.fixture(scope="session")
def swagger_autogen(requests=__SWAGGER_REQUESTS__):
return requests

View File

@@ -193,7 +193,6 @@ class TestInventorySourceInjectors:
('satellite6', 'theforeman.foreman.foreman'),
('insights', 'redhatinsights.insights.insights'),
('controller', 'awx.awx.tower'),
('terraform', 'cloud.terraform.terraform_state'),
],
)
def test_plugin_proper_names(self, source, proper_name):

View File

@@ -107,7 +107,6 @@ def read_content(private_data_dir, raw_env, inventory_update):
for filename in os.listdir(os.path.join(private_data_dir, subdir)):
filename_list.append(os.path.join(subdir, filename))
filename_list = sorted(filename_list, key=lambda fn: inverse_env.get(os.path.join(private_data_dir, fn), [fn])[0])
inventory_content = ""
for filename in filename_list:
if filename in ('args', 'project'):
continue # Ansible runner
@@ -131,7 +130,6 @@ def read_content(private_data_dir, raw_env, inventory_update):
dir_contents[abs_file_path] = f.read()
# Declare a reference to inventory plugin file if it exists
if abs_file_path.endswith('.yml') and 'plugin: ' in dir_contents[abs_file_path]:
inventory_content = dir_contents[abs_file_path]
referenced_paths.add(abs_file_path) # used as inventory file
elif cache_file_regex.match(abs_file_path):
file_aliases[abs_file_path] = 'cache_file'
@@ -159,11 +157,7 @@ def read_content(private_data_dir, raw_env, inventory_update):
content = {}
for abs_file_path, file_content in dir_contents.items():
# assert that all files laid down are used
if (
abs_file_path not in referenced_paths
and to_container_path(abs_file_path, private_data_dir) not in inventory_content
and abs_file_path not in ignore_files
):
if abs_file_path not in referenced_paths and abs_file_path not in ignore_files:
raise AssertionError(
"File {} is not referenced. References and files:\n{}\n{}".format(abs_file_path, json.dumps(env, indent=4), json.dumps(dir_contents, indent=4))
)

View File

@@ -411,14 +411,14 @@ def test_project_delete(delete, organization, admin_user):
@pytest.mark.parametrize(
'order_by, expected_names',
'order_by, expected_names, expected_ids',
[
('name', ['alice project', 'bob project', 'shared project']),
('-name', ['shared project', 'bob project', 'alice project']),
('name', ['alice project', 'bob project', 'shared project'], [1, 2, 3]),
('-name', ['shared project', 'bob project', 'alice project'], [3, 2, 1]),
],
)
@pytest.mark.django_db
def test_project_list_ordering_by_name(get, order_by, expected_names, organization_factory):
def test_project_list_ordering_by_name(get, order_by, expected_names, expected_ids, organization_factory):
'ensure sorted order of project list is maintained correctly when the requested order is invalid or not applicable'
objects = organization_factory(
'org1',
@@ -426,11 +426,13 @@ def test_project_list_ordering_by_name(get, order_by, expected_names, organizati
superusers=['admin'],
)
project_names = []
project_ids = []
# TODO: ask for an order by here that doesn't apply
results = get(reverse('api:project_list'), objects.superusers.admin, QUERY_STRING='order_by=%s' % order_by).data['results']
for x in range(len(results)):
project_names.append(results[x]['name'])
assert project_names == expected_names
project_ids.append(results[x]['id'])
assert project_names == expected_names and project_ids == expected_ids
@pytest.mark.parametrize('order_by', ('name', '-name'))
@@ -448,8 +450,7 @@ def test_project_list_ordering_with_duplicate_names(get, order_by, organization_
for x in range(3):
results = get(reverse('api:project_list'), objects.superusers.admin, QUERY_STRING='order_by=%s' % order_by).data['results']
project_ids[x] = [proj['id'] for proj in results]
assert project_ids[0] == project_ids[1] == project_ids[2]
assert project_ids[0] == sorted(project_ids[0])
assert project_ids[0] == project_ids[1] == project_ids[2] == [1, 2, 3, 4, 5]
@pytest.mark.django_db

View File

@@ -1,90 +0,0 @@
import pytest
from django.contrib.auth.models import AnonymousUser
from channels.routing import ProtocolTypeRouter
from channels.testing.websocket import WebsocketCommunicator
from awx.main.consumers import WebsocketSecretAuthHelper
@pytest.fixture
def application():
# code in routing hits the db on import because .. settings cache
from awx.main.routing import application_func
yield application_func(ProtocolTypeRouter)
@pytest.fixture
def websocket_server_generator(application):
def fn(endpoint):
return WebsocketCommunicator(application, endpoint)
return fn
@pytest.mark.asyncio
@pytest.mark.django_db
class TestWebsocketRelay:
@pytest.fixture
def websocket_relay_secret_generator(self, settings):
def fn(secret, set_broadcast_websocket_secret=False):
secret_backup = settings.BROADCAST_WEBSOCKET_SECRET
settings.BROADCAST_WEBSOCKET_SECRET = 'foobar'
res = ('secret'.encode('utf-8'), WebsocketSecretAuthHelper.construct_secret().encode('utf-8'))
if set_broadcast_websocket_secret is False:
settings.BROADCAST_WEBSOCKET_SECRET = secret_backup
return res
return fn
@pytest.fixture
def websocket_relay_secret(self, settings, websocket_relay_secret_generator):
return websocket_relay_secret_generator('foobar', set_broadcast_websocket_secret=True)
async def test_authorized(self, websocket_server_generator, websocket_relay_secret):
server = websocket_server_generator('/websocket/relay/')
server.scope['headers'] = (websocket_relay_secret,)
connected, _ = await server.connect()
assert connected is True
async def test_not_authorized(self, websocket_server_generator):
server = websocket_server_generator('/websocket/relay/')
connected, _ = await server.connect()
assert connected is False, "Connection to the relay websocket without auth. We expected the client to be denied."
async def test_wrong_secret(self, websocket_server_generator, websocket_relay_secret_generator):
server = websocket_server_generator('/websocket/relay/')
server.scope['headers'] = (websocket_relay_secret_generator('foobar', set_broadcast_websocket_secret=False),)
connected, _ = await server.connect()
assert connected is False
@pytest.mark.asyncio
@pytest.mark.django_db
class TestWebsocketEventConsumer:
async def test_unauthorized_anonymous(self, websocket_server_generator):
server = websocket_server_generator('/websocket/')
server.scope['user'] = AnonymousUser()
connected, _ = await server.connect()
assert connected is False, "Anonymous user should NOT be allowed to login."
@pytest.mark.skip(reason="Ran out of coding time.")
async def test_authorized(self, websocket_server_generator, application, admin):
server = websocket_server_generator('/websocket/')
"""
I ran out of time. Here is what I was thinking ...
Inject a valid session into the cookies in the header
server.scope['headers'] = (
(b'cookie', ...),
)
"""
connected, _ = await server.connect()
assert connected is True, "User should be allowed in via cookies auth via a session key in the cookies"

View File

@@ -1,6 +1,11 @@
# Python
from unittest import mock
import uuid
# patch python-ldap
with mock.patch('__main__.__builtins__.dir', return_value=[]):
import ldap # NOQA
# Load development settings for base variables.
from awx.settings.development import * # NOQA

View File

@@ -1,64 +0,0 @@
import pytest
from unittest.mock import MagicMock, patch
from awx.main.tasks.system import update_inventory_computed_fields
from awx.main.models import Inventory
from django.db import DatabaseError
@pytest.fixture
def mock_logger():
with patch("awx.main.tasks.system.logger") as logger:
yield logger
@pytest.fixture
def mock_inventory():
return MagicMock(spec=Inventory)
def test_update_inventory_computed_fields_existing_inventory(mock_logger, mock_inventory):
# Mocking the Inventory.objects.filter method to return a non-empty queryset
with patch("awx.main.tasks.system.Inventory.objects.filter") as mock_filter:
mock_filter.return_value.exists.return_value = True
mock_filter.return_value.__getitem__.return_value = mock_inventory
# Mocking the update_computed_fields method
with patch.object(mock_inventory, "update_computed_fields") as mock_update_computed_fields:
update_inventory_computed_fields(1)
# Assertions
mock_filter.assert_called_once_with(id=1)
mock_update_computed_fields.assert_called_once()
# You can add more assertions based on your specific requirements
def test_update_inventory_computed_fields_missing_inventory(mock_logger):
# Mocking the Inventory.objects.filter method to return an empty queryset
with patch("awx.main.tasks.system.Inventory.objects.filter") as mock_filter:
mock_filter.return_value.exists.return_value = False
update_inventory_computed_fields(1)
# Assertions
mock_filter.assert_called_once_with(id=1)
mock_logger.error.assert_called_once_with("Update Inventory Computed Fields failed due to missing inventory: 1")
def test_update_inventory_computed_fields_database_error_nosqlstate(mock_logger, mock_inventory):
# Mocking the Inventory.objects.filter method to return a non-empty queryset
with patch("awx.main.tasks.system.Inventory.objects.filter") as mock_filter:
mock_filter.return_value.exists.return_value = True
mock_filter.return_value.__getitem__.return_value = mock_inventory
# Mocking the update_computed_fields method
with patch.object(mock_inventory, "update_computed_fields") as mock_update_computed_fields:
# Simulating the update_computed_fields method to explicitly raise a DatabaseError
mock_update_computed_fields.side_effect = DatabaseError("Some error")
update_inventory_computed_fields(1)
# Assertions
mock_filter.assert_called_once_with(id=1)
mock_update_computed_fields.assert_called_once()
mock_inventory.update_computed_fields.assert_called_once()

View File

@@ -121,10 +121,6 @@ def test_get_model_for_valid_type(model_type, model_class):
assert common.get_model_for_type(model_type) == model_class
def test_is_testing():
assert common.is_testing() is True
@pytest.mark.parametrize("model_type,model_class", [(name, cls) for cls, name in TEST_MODELS])
def test_get_capacity_type(model_type, model_class):
if model_type in ('job', 'ad_hoc_command', 'inventory_update', 'job_template'):

View File

@@ -7,7 +7,6 @@ import json
import yaml
import logging
import time
import psycopg
import os
import subprocess
import re
@@ -24,7 +23,7 @@ from django.core.exceptions import ObjectDoesNotExist, FieldDoesNotExist
from django.utils.dateparse import parse_datetime
from django.utils.translation import gettext_lazy as _
from django.utils.functional import cached_property
from django.db import connection, DatabaseError, transaction, ProgrammingError, IntegrityError
from django.db import connection, transaction, ProgrammingError, IntegrityError
from django.db.models.fields.related import ForeignObjectRel, ManyToManyField
from django.db.models.fields.related_descriptors import ForwardManyToOneDescriptor, ManyToManyDescriptor
from django.db.models.query import QuerySet
@@ -137,7 +136,7 @@ def underscore_to_camelcase(s):
@functools.cache
def is_testing(argv=None):
'''Return True if running django or py.test unit tests.'''
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'awx.main.tests.settings_for_test':
if 'PYTEST_CURRENT_TEST' in os.environ.keys():
return True
argv = sys.argv if argv is None else argv
if len(argv) >= 1 and ('py.test' in argv[0] or 'py/test.py' in argv[0]):
@@ -1156,26 +1155,11 @@ def create_partition(tblname, start=None):
f'ALTER TABLE {tblname} ATTACH PARTITION {tblname}_{partition_label} '
f'FOR VALUES FROM (\'{start_timestamp}\') TO (\'{end_timestamp}\');'
)
except (ProgrammingError, IntegrityError) as e:
cause = e.__cause__
if cause and hasattr(cause, 'sqlstate'):
# 42P07 = DuplicateTable
sqlstate = cause.sqlstate
sqlstate_str = psycopg.errors.lookup(sqlstate)
if psycopg.errors.DuplicateTable == sqlstate:
logger.info(f'Caught known error due to partition creation race: {e}')
else:
logger.error('SQL Error state: {} - {}'.format(sqlstate, sqlstate_str))
raise
except DatabaseError as e:
cause = e.__cause__
if cause and hasattr(cause, 'sqlstate'):
sqlstate = cause.sqlstate
sqlstate_str = psycopg.errors.lookup(sqlstate)
logger.error('SQL Error state: {} - {}'.format(sqlstate, sqlstate_str))
raise
if 'already exists' in str(e):
logger.info(f'Caught known error due to partition creation race: {e}')
else:
raise
def cleanup_new_process(func):

View File

@@ -339,7 +339,7 @@ class WebSocketRelayManager(object):
if deleted_remote_hosts:
logger.info(f"Removing {deleted_remote_hosts} from websocket broadcast list")
await asyncio.gather(*[self.cleanup_offline_host(h) for h in deleted_remote_hosts])
await asyncio.gather(self.cleanup_offline_host(h) for h in deleted_remote_hosts)
if new_remote_hosts:
logger.info(f"Adding {new_remote_hosts} to websocket broadcast list")

View File

@@ -216,54 +216,42 @@
- block:
- name: Fetch galaxy roles from roles/requirements.(yml/yaml)
ansible.builtin.command:
cmd: "ansible-galaxy role install -r {{ req_file }} {{ verbosity }}"
cmd: "ansible-galaxy role install -r {{ item }} {{ verbosity }}"
register: galaxy_result
vars:
req_file: "{{ lookup('ansible.builtin.first_found', req_candidates, skip=True) }}"
req_candidates:
- "{{ project_path | quote }}/roles/requirements.yml"
- "{{ project_path | quote }}/roles/requirements.yaml"
with_fileglob:
- "{{ project_path | quote }}/roles/requirements.yaml"
- "{{ project_path | quote }}/roles/requirements.yml"
changed_when: "'was installed successfully' in galaxy_result.stdout"
when:
- roles_enabled | bool
- req_file
when: roles_enabled | bool
tags:
- install_roles
- name: Fetch galaxy collections from collections/requirements.(yml/yaml)
ansible.builtin.command:
cmd: "ansible-galaxy collection install -r {{ req_file }} {{ verbosity }}"
cmd: "ansible-galaxy collection install -r {{ item }} {{ verbosity }}"
register: galaxy_collection_result
vars:
req_file: "{{ lookup('ansible.builtin.first_found', req_candidates, skip=True) }}"
req_candidates:
- "{{ project_path | quote }}/collections/requirements.yml"
- "{{ project_path | quote }}/collections/requirements.yaml"
- "{{ project_path | quote }}/requirements.yml"
- "{{ project_path | quote }}/requirements.yaml"
with_fileglob:
- "{{ project_path | quote }}/collections/requirements.yaml"
- "{{ project_path | quote }}/collections/requirements.yml"
changed_when: "'Nothing to do.' not in galaxy_collection_result.stdout"
when:
- "ansible_version.full is version_compare('2.9', '>=')"
- collections_enabled | bool
- req_file
tags:
- install_collections
- name: Fetch galaxy roles and collections from requirements.(yml/yaml)
ansible.builtin.command:
cmd: "ansible-galaxy install -r {{ req_file }} {{ verbosity }}"
cmd: "ansible-galaxy install -r {{ item }} {{ verbosity }}"
register: galaxy_combined_result
vars:
req_file: "{{ lookup('ansible.builtin.first_found', req_candidates, skip=True) }}"
req_candidates:
- "{{ project_path | quote }}/requirements.yaml"
- "{{ project_path | quote }}/requirements.yml"
with_fileglob:
- "{{ project_path | quote }}/requirements.yaml"
- "{{ project_path | quote }}/requirements.yml"
changed_when: "'Nothing to do.' not in galaxy_combined_result.stdout"
when:
- "ansible_version.full is version_compare('2.10', '>=')"
- collections_enabled | bool
- roles_enabled | bool
- req_file
tags:
- install_collections
- install_roles

View File

@@ -1,22 +0,0 @@
from ansible_base.resource_registry.registry import ParentResource, ResourceConfig, ServiceAPIConfig, SharedResource
from ansible_base.resource_registry.shared_types import OrganizationType, TeamType, UserType
from awx.main import models
class APIConfig(ServiceAPIConfig):
service_type = "awx"
RESOURCE_LIST = (
ResourceConfig(
models.Organization,
shared_resource=SharedResource(serializer=OrganizationType, is_provider=False),
),
ResourceConfig(models.User, shared_resource=SharedResource(serializer=UserType, is_provider=False), name_field="username"),
ResourceConfig(
models.Team,
shared_resource=SharedResource(serializer=TeamType, is_provider=False),
parent_resources=[ParentResource(model=models.Organization, field_name="organization")],
),
)

View File

@@ -353,11 +353,8 @@ INSTALLED_APPS = [
'awx.sso',
'solo',
'ansible_base.rest_filters',
'ansible_base.jwt_consumer',
'ansible_base.resource_registry',
]
INTERNAL_IPS = ('127.0.0.1',)
MAX_PAGE_SIZE = 200
@@ -365,7 +362,6 @@ REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'awx.api.pagination.Pagination',
'PAGE_SIZE': 25,
'DEFAULT_AUTHENTICATION_CLASSES': (
'ansible_base.jwt_consumer.awx.auth.AwxJWTAuthentication',
'awx.api.authentication.LoggedOAuth2Authentication',
'awx.api.authentication.SessionAuthentication',
'awx.api.authentication.LoggedBasicAuthentication',
@@ -759,14 +755,6 @@ SATELLITE6_INSTANCE_ID_VAR = 'foreman_id,foreman.id'
INSIGHTS_INSTANCE_ID_VAR = 'insights_id'
INSIGHTS_EXCLUDE_EMPTY_GROUPS = False
# ----------------
# -- Terraform State --
# ----------------
# TERRAFORM_ENABLED_VAR =
# TERRAFORM_ENABLED_VALUE =
TERRAFORM_INSTANCE_ID_VAR = 'id'
TERRAFORM_EXCLUDE_EMPTY_GROUPS = True
# ---------------------
# ----- Custom -----
# ---------------------
@@ -1120,7 +1108,6 @@ METRICS_SUBSYSTEM_CONFIG = {
# django-ansible-base
ANSIBLE_BASE_TEAM_MODEL = 'main.Team'
ANSIBLE_BASE_ORGANIZATION_MODEL = 'main.Organization'
ANSIBLE_BASE_RESOURCE_CONFIG_MODULE = 'awx.resource_api'
from ansible_base.lib import dynamic_config # noqa: E402

View File

@@ -72,8 +72,6 @@ AWX_CALLBACK_PROFILE = True
# Allows user to trigger task managers directly for debugging and profiling purposes.
# Only works in combination with settings.SETTINGS_MODULE == 'awx.settings.development'
AWX_DISABLE_TASK_MANAGERS = False
# Needed for launching runserver in debug mode
# ======================!!!!!!! FOR DEVELOPMENT ONLY !!!!!!!=================================
# Store a snapshot of default settings at this point before loading any

View File

@@ -13,7 +13,7 @@
"@patternfly/react-table": "4.113.0",
"ace-builds": "^1.10.1",
"ansi-to-html": "0.7.2",
"axios": "^1.6.7",
"axios": "0.27.2",
"d3": "7.6.1",
"dagre": "^0.8.4",
"dompurify": "2.4.0",
@@ -5940,13 +5940,12 @@
}
},
"node_modules/axios": {
"version": "1.6.7",
"resolved": "https://registry.npmjs.org/axios/-/axios-1.6.7.tgz",
"integrity": "sha512-/hDJGff6/c7u0hDkvkGxR/oy6CbCs8ziCsC7SqmhjfozqiJGc8Z11wrv9z9lYfY4K8l+H9TpjcMDX0xOZmx+RA==",
"version": "0.27.2",
"resolved": "https://registry.npmjs.org/axios/-/axios-0.27.2.tgz",
"integrity": "sha512-t+yRIyySRTp/wua5xEr+z1q60QmLq8ABsS5O9Me1AsE5dfKqgnCFzwiCZZ/cGNd1lq4/7akDWMxdhVlucjmnOQ==",
"dependencies": {
"follow-redirects": "^1.15.4",
"form-data": "^4.0.0",
"proxy-from-env": "^1.1.0"
"follow-redirects": "^1.14.9",
"form-data": "^4.0.0"
}
},
"node_modules/axios/node_modules/form-data": {
@@ -10388,9 +10387,9 @@
}
},
"node_modules/follow-redirects": {
"version": "1.15.5",
"resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.5.tgz",
"integrity": "sha512-vSFWUON1B+yAw1VN4xMfxgn5fTUiaOzAJCKBwIIgT/+7CuGy9+r+5gITvP62j3RmaD5Ph65UaERdOSRGUzZtgw==",
"version": "1.15.1",
"resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.1.tgz",
"integrity": "sha512-yLAMQs+k0b2m7cVxpS1VKJVvoz7SS9Td1zss3XRwXj+ZDH00RJgnuLx7E44wx02kQLrdM3aOOy+FpzS7+8OizA==",
"funding": [
{
"type": "individual",
@@ -18350,11 +18349,6 @@
"node": ">= 0.10"
}
},
"node_modules/proxy-from-env": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz",
"integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg=="
},
"node_modules/pseudolocale": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/pseudolocale/-/pseudolocale-1.2.0.tgz",
@@ -26921,13 +26915,12 @@
"dev": true
},
"axios": {
"version": "1.6.7",
"resolved": "https://registry.npmjs.org/axios/-/axios-1.6.7.tgz",
"integrity": "sha512-/hDJGff6/c7u0hDkvkGxR/oy6CbCs8ziCsC7SqmhjfozqiJGc8Z11wrv9z9lYfY4K8l+H9TpjcMDX0xOZmx+RA==",
"version": "0.27.2",
"resolved": "https://registry.npmjs.org/axios/-/axios-0.27.2.tgz",
"integrity": "sha512-t+yRIyySRTp/wua5xEr+z1q60QmLq8ABsS5O9Me1AsE5dfKqgnCFzwiCZZ/cGNd1lq4/7akDWMxdhVlucjmnOQ==",
"requires": {
"follow-redirects": "^1.15.4",
"form-data": "^4.0.0",
"proxy-from-env": "^1.1.0"
"follow-redirects": "^1.14.9",
"form-data": "^4.0.0"
},
"dependencies": {
"form-data": {
@@ -30378,9 +30371,9 @@
}
},
"follow-redirects": {
"version": "1.15.5",
"resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.5.tgz",
"integrity": "sha512-vSFWUON1B+yAw1VN4xMfxgn5fTUiaOzAJCKBwIIgT/+7CuGy9+r+5gITvP62j3RmaD5Ph65UaERdOSRGUzZtgw=="
"version": "1.15.1",
"resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.1.tgz",
"integrity": "sha512-yLAMQs+k0b2m7cVxpS1VKJVvoz7SS9Td1zss3XRwXj+ZDH00RJgnuLx7E44wx02kQLrdM3aOOy+FpzS7+8OizA=="
},
"fork-ts-checker-webpack-plugin": {
"version": "6.5.2",
@@ -36332,11 +36325,6 @@
}
}
},
"proxy-from-env": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz",
"integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg=="
},
"pseudolocale": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/pseudolocale/-/pseudolocale-1.2.0.tgz",

View File

@@ -13,7 +13,7 @@
"@patternfly/react-table": "4.113.0",
"ace-builds": "^1.10.1",
"ansi-to-html": "0.7.2",
"axios": "^1.6.7",
"axios": "0.27.2",
"d3": "7.6.1",
"dagre": "^0.8.4",
"dompurify": "2.4.0",

View File

@@ -257,17 +257,12 @@ function PromptDetail({
numChips={5}
ouiaId="prompt-job-tag-chips"
totalChips={
overrides.job_tags === undefined ||
overrides.job_tags === null ||
overrides.job_tags === ''
!overrides.job_tags || overrides.job_tags === ''
? 0
: overrides.job_tags.split(',').length
}
>
{overrides.job_tags !== undefined &&
overrides.job_tags !== null &&
overrides.job_tags !== '' &&
overrides.job_tags.length > 0 &&
{overrides.job_tags.length > 0 &&
overrides.job_tags.split(',').map((jobTag) => (
<Chip
key={jobTag}
@@ -289,18 +284,13 @@ function PromptDetail({
<ChipGroup
numChips={5}
totalChips={
overrides.skip_tags === undefined ||
overrides.skip_tags === null ||
overrides.skip_tags === ''
!overrides.skip_tags || overrides.skip_tags === ''
? 0
: overrides.skip_tags.split(',').length
}
ouiaId="prompt-skip-tag-chips"
>
{overrides.skip_tags !== undefined &&
overrides.skip_tags !== null &&
overrides.skip_tags !== '' &&
overrides.skip_tags.length > 0 &&
{overrides.skip_tags.length > 0 &&
overrides.skip_tags.split(',').map((skipTag) => (
<Chip
key={skipTag}

View File

@@ -115,11 +115,8 @@ function SessionProvider({ children }) {
}, [setSessionTimeout, setSessionCountdown]);
useEffect(() => {
const isRedirectCondition = (location, histLength) =>
location.pathname === '/login' && histLength === 2;
const unlisten = history.listen((location, action) => {
if (action === 'POP' || isRedirectCondition(location, history.length)) {
if (action === 'POP') {
setIsRedirectLinkReceived(true);
}
});

View File

@@ -784,7 +784,7 @@ msgstr "Branche à utiliser dans lexécution de la tâche. Projet par défaut
#: screens/Inventory/shared/Inventory.helptext.js:155
msgid "Branch to use on inventory sync. Project default used if blank. Only allowed if project allow_override field is set to true."
msgstr "Branche à utiliser pour la synchronisation de l'inventaire. La valeur par défaut du projet est utilisée si elle est vide. Cette option n'est autorisée que si le champ allow_override du projet est défini sur vrai."
msgstr ""
#: components/About/About.js:45
msgid "Brand Image"
@@ -2832,7 +2832,7 @@ msgstr "Entrez les variables avec la syntaxe JSON ou YAML. Consultez la documen
#: screens/Inventory/shared/SmartInventoryForm.js:94
msgid "Enter inventory variables using either JSON or YAML syntax. Use the radio button to toggle between the two. Refer to the Ansible Controller documentation for example syntax."
msgstr "Entrez les variables d'inventaire en utilisant la syntaxe JSON ou YAML. Utilisez le bouton d'option pour basculer entre les deux. Référez-vous à la documentation du contrôleur Ansible pour les exemples de syntaxe."
msgstr ""
#: screens/CredentialType/CredentialTypeDetails/CredentialTypeDetails.js:87
msgid "Environment variables or extra variables that specify the values a credential type can inject."
@@ -3015,7 +3015,7 @@ msgstr "Recherche exacte sur le champ d'identification."
#: components/Search/RelatedLookupTypeInput.js:38
msgid "Exact search on name field."
msgstr "Recherche exacte sur le champ nom."
msgstr ""
#: screens/Project/shared/Project.helptext.js:23
msgid "Example URLs for GIT Source Control include:"
@@ -3242,7 +3242,7 @@ msgstr "Jobs ayant échoué"
#: screens/WorkflowApproval/WorkflowApprovalList/WorkflowApprovalList.js:262
msgid "Failed to approve one or more workflow approval."
msgstr "Échec de l'approbation d'une ou plusieurs validations de flux de travail."
msgstr ""
#: screens/WorkflowApproval/shared/WorkflowApprovalButton.js:56
msgid "Failed to approve {0}."
@@ -3474,7 +3474,7 @@ msgstr "N'a pas réussi à supprimer {name}."
#: screens/WorkflowApproval/WorkflowApprovalList/WorkflowApprovalList.js:263
msgid "Failed to deny one or more workflow approval."
msgstr "Échec du refus d'une ou plusieurs validations de flux de travail."
msgstr ""
#: screens/WorkflowApproval/shared/WorkflowDenyButton.js:51
msgid "Failed to deny {0}."
@@ -3520,7 +3520,7 @@ msgstr "Echec du lancement du Job."
#: screens/Inventory/InventoryHosts/InventoryHostItem.js:121
msgid "Failed to load related groups."
msgstr "Impossible de charger les groupes associés."
msgstr ""
#: screens/Instances/InstanceDetail/InstanceDetail.js:388
#: screens/Instances/InstanceList/InstanceList.js:266
@@ -3972,12 +3972,12 @@ msgstr "Demande(s) de bilan de santé soumise(s). Veuillez patienter et recharge
#: screens/Instances/InstanceDetail/InstanceDetail.js:234
#: screens/Instances/InstanceList/InstanceListItem.js:242
msgid "Health checks are asynchronous tasks. See the"
msgstr "Les bilans de santé sont des tâches asynchrones. Veuillez consulter la documentation pour plus d'informations."
msgstr ""
#: screens/InstanceGroup/Instances/InstanceList.js:286
#: screens/Instances/InstanceList/InstanceList.js:219
msgid "Health checks can only be run on execution nodes."
msgstr "Les bilans de santé ne peuvent être exécutées que sur les nœuds d'exécution."
msgstr ""
#: components/StatusLabel/StatusLabel.js:42
msgid "Healthy"
@@ -5048,7 +5048,7 @@ msgstr "Lancer"
#: components/TemplateList/TemplateListItem.js:214
msgid "Launch Template"
msgstr "Lancer le modèle."
msgstr "Lacer le modèle."
#: screens/ManagementJob/ManagementJobList/LaunchManagementPrompt.js:32
#: screens/ManagementJob/ManagementJobList/LaunchManagementPrompt.js:34
@@ -9637,7 +9637,7 @@ msgstr "Utilisateur"
#: components/AppContainer/PageHeaderToolbar.js:160
msgid "User Details"
msgstr "Détails de l'utilisateur"
msgstr "Détails de l'erreur"
#: screens/Setting/SettingList.js:121
#: screens/Setting/Settings.js:118

View File

@@ -80,7 +80,7 @@ function Dashboard() {
<Trans>
<p>
<InfoCircleIcon /> A tech preview of the new {brandName} user
interface can be found <a href="/ui_next">here</a>.
interface can be found <a href="/ui_next/dashboard">here</a>.
</p>
</Trans>
</Banner>

View File

@@ -21,8 +21,6 @@ const ansibleDocUrls = {
'https://docs.ansible.com/ansible/latest/collections/community/vmware/vmware_vm_inventory_inventory.html',
constructed:
'https://docs.ansible.com/ansible/latest/collections/ansible/builtin/constructed_inventory.html',
terraform:
'https://github.com/ansible-collections/cloud.terraform/blob/stable-statefile-inventory/plugins/inventory/terraform_state.py',
};
const getInventoryHelpTextStrings = () => ({
@@ -121,10 +119,10 @@ const getInventoryHelpTextStrings = () => ({
<br />
{value && (
<div>
{t`If you want the Inventory Source to update on launch , click on Update on Launch,
and also go to `}
{t`If you want the Inventory Source to update on
launch and on project update, click on Update on launch, and also go to`}
<Link to={`/projects/${value.id}/details`}> {value.name} </Link>
{t`and click on Update Revision on Launch.`}
{t`and click on Update Revision on Launch`}
</div>
)}
</>
@@ -140,8 +138,8 @@ const getInventoryHelpTextStrings = () => ({
<br />
{value && (
<div>
{t`If you want the Inventory Source to update on launch , click on Update on Launch,
and also go to `}
{t`If you want the Inventory Source to update on
launch and on project update, click on Update on launch, and also go to`}
<Link to={`/projects/${value.id}/details`}> {value.name} </Link>
{t`and click on Update Revision on Launch`}
</div>

View File

@@ -23,7 +23,6 @@ import {
SCMSubForm,
SatelliteSubForm,
ControllerSubForm,
TerraformSubForm,
VMwareSubForm,
VirtualizationSubForm,
} from './InventorySourceSubForms';
@@ -215,14 +214,6 @@ const InventorySourceFormFields = ({
}
/>
),
terraform: (
<TerraformSubForm
autoPopulateCredential={
!source?.id || source?.source !== 'terraform'
}
sourceOptions={sourceOptions}
/>
),
vmware: (
<VMwareSubForm
autoPopulateCredential={

View File

@@ -38,7 +38,6 @@ describe('<InventorySourceForm />', () => {
['openstack', 'OpenStack'],
['rhv', 'Red Hat Virtualization'],
['controller', 'Red Hat Ansible Automation Platform'],
['terraform', 'Terraform State'],
],
},
},

View File

@@ -1,59 +0,0 @@
import React, { useCallback } from 'react';
import { useField, useFormikContext } from 'formik';
import { t } from '@lingui/macro';
import getDocsBaseUrl from 'util/getDocsBaseUrl';
import { useConfig } from 'contexts/Config';
import CredentialLookup from 'components/Lookup/CredentialLookup';
import { required } from 'util/validators';
import {
OptionsField,
VerbosityField,
EnabledVarField,
EnabledValueField,
HostFilterField,
SourceVarsField,
} from './SharedFields';
import getHelpText from '../Inventory.helptext';
const TerraformSubForm = ({ autoPopulateCredential }) => {
const helpText = getHelpText();
const { setFieldValue, setFieldTouched } = useFormikContext();
const [credentialField, credentialMeta, credentialHelpers] =
useField('credential');
const config = useConfig();
const handleCredentialUpdate = useCallback(
(value) => {
setFieldValue('credential', value);
setFieldTouched('credential', true, false);
},
[setFieldValue, setFieldTouched]
);
const docsBaseUrl = getDocsBaseUrl(config);
return (
<>
<CredentialLookup
credentialTypeNamespace="terraform"
label={t`Credential`}
helperTextInvalid={credentialMeta.error}
isValid={!credentialMeta.touched || !credentialMeta.error}
onBlur={() => credentialHelpers.setTouched()}
onChange={handleCredentialUpdate}
value={credentialField.value}
required
autoPopulate={autoPopulateCredential}
validate={required(t`Select a value for this field`)}
/>
<VerbosityField />
<HostFilterField />
<EnabledVarField />
<EnabledValueField />
<OptionsField />
<SourceVarsField
popoverContent={helpText.sourceVars(docsBaseUrl, 'terraform')}
/>
</>
);
};
export default TerraformSubForm;

View File

@@ -1,70 +0,0 @@
import React from 'react';
import { act } from 'react-dom/test-utils';
import { Formik } from 'formik';
import { CredentialsAPI } from 'api';
import { mountWithContexts } from '../../../../../testUtils/enzymeHelpers';
import TerraformSubForm from './TerraformSubForm';
jest.mock('../../../../api');
const initialValues = {
credential: null,
overwrite: false,
overwrite_vars: false,
source_path: '',
source_project: null,
source_script: null,
source_vars: '---\n',
update_cache_timeout: 0,
update_on_launch: true,
verbosity: 1,
};
const mockSourceOptions = {
actions: {
POST: {},
},
};
describe('<TerraformSubForm />', () => {
let wrapper;
beforeEach(async () => {
CredentialsAPI.read.mockResolvedValue({
data: { count: 0, results: [] },
});
await act(async () => {
wrapper = mountWithContexts(
<Formik initialValues={initialValues}>
<TerraformSubForm sourceOptions={mockSourceOptions} />
</Formik>
);
});
});
afterAll(() => {
jest.clearAllMocks();
});
test('should render subform fields', () => {
expect(wrapper.find('FormGroup[label="Credential"]')).toHaveLength(1);
expect(wrapper.find('FormGroup[label="Verbosity"]')).toHaveLength(1);
expect(wrapper.find('FormGroup[label="Update options"]')).toHaveLength(1);
expect(
wrapper.find('FormGroup[label="Cache timeout (seconds)"]')
).toHaveLength(1);
expect(
wrapper.find('VariablesField[label="Source variables"]')
).toHaveLength(1);
});
test('should make expected api calls', () => {
expect(CredentialsAPI.read).toHaveBeenCalledTimes(1);
expect(CredentialsAPI.read).toHaveBeenCalledWith({
credential_type__namespace: 'terraform',
order_by: 'name',
page: 1,
page_size: 5,
});
});
});

View File

@@ -6,6 +6,5 @@ export { default as OpenStackSubForm } from './OpenStackSubForm';
export { default as SCMSubForm } from './SCMSubForm';
export { default as SatelliteSubForm } from './SatelliteSubForm';
export { default as ControllerSubForm } from './ControllerSubForm';
export { default as TerraformSubForm } from './TerraformSubForm';
export { default as VMwareSubForm } from './VMwareSubForm';
export { default as VirtualizationSubForm } from './VirtualizationSubForm';

View File

@@ -3,7 +3,6 @@ import { Modal, Tab, Tabs, TabTitleText } from '@patternfly/react-core';
import PropTypes from 'prop-types';
import { t } from '@lingui/macro';
import { encode } from 'html-entities';
import { jsonToYaml } from 'util/yaml';
import StatusLabel from '../../../components/StatusLabel';
import { DetailList, Detail } from '../../../components/DetailList';
import ContentEmpty from '../../../components/ContentEmpty';
@@ -145,28 +144,9 @@ function HostEventModal({ onClose, hostEvent = {}, isOpen = false }) {
<ContentEmpty title={t`No JSON Available`} />
)}
</Tab>
<Tab
eventKey={2}
title={<TabTitleText>{t`YAML`}</TabTitleText>}
aria-label={t`YAML tab`}
ouiaId="yaml-tab"
>
{activeTabKey === 2 && jsonObj ? (
<CodeEditor
mode="javascript"
readOnly
value={jsonToYaml(JSON.stringify(jsonObj))}
onChange={() => {}}
rows={20}
hasErrors={false}
/>
) : (
<ContentEmpty title={t`No YAML Available`} />
)}
</Tab>
{stdOut?.length ? (
<Tab
eventKey={3}
eventKey={2}
title={<TabTitleText>{t`Output`}</TabTitleText>}
aria-label={t`Output tab`}
ouiaId="standard-out-tab"
@@ -183,7 +163,7 @@ function HostEventModal({ onClose, hostEvent = {}, isOpen = false }) {
) : null}
{stdErr?.length ? (
<Tab
eventKey={4}
eventKey={3}
title={<TabTitleText>{t`Standard Error`}</TabTitleText>}
aria-label={t`Standard error tab`}
ouiaId="standard-error-tab"

View File

@@ -2,7 +2,6 @@ import React from 'react';
import { shallow } from 'enzyme';
import { mountWithContexts } from '../../../../testUtils/enzymeHelpers';
import HostEventModal from './HostEventModal';
import { jsonToYaml } from 'util/yaml';
const hostEvent = {
changed: true,
@@ -168,8 +167,6 @@ const jsonValue = `{
]
}`;
const yamlValue = jsonToYaml(jsonValue);
describe('HostEventModal', () => {
test('initially renders successfully', () => {
const wrapper = shallow(
@@ -190,7 +187,7 @@ describe('HostEventModal', () => {
<HostEventModal hostEvent={hostEvent} onClose={() => {}} isOpen />
);
expect(wrapper.find('Tabs Tab').length).toEqual(5);
expect(wrapper.find('Tabs Tab').length).toEqual(4);
});
test('should initially show details tab', () => {
@@ -290,7 +287,7 @@ describe('HostEventModal', () => {
expect(codeEditor.prop('value')).toEqual(jsonValue);
});
test('should display YAML tab content on tab click', () => {
test('should display Standard Out tab content on tab click', () => {
const wrapper = shallow(
<HostEventModal hostEvent={hostEvent} onClose={() => {}} isOpen />
);
@@ -302,21 +299,6 @@ describe('HostEventModal', () => {
const codeEditor = wrapper.find('Tab[eventKey=2] CodeEditor');
expect(codeEditor.prop('mode')).toBe('javascript');
expect(codeEditor.prop('readOnly')).toBe(true);
expect(codeEditor.prop('value')).toEqual(yamlValue);
});
test('should display Standard Out tab content on tab click', () => {
const wrapper = shallow(
<HostEventModal hostEvent={hostEvent} onClose={() => {}} isOpen />
);
const handleTabClick = wrapper.find('Tabs').prop('onSelect');
handleTabClick(null, 3);
wrapper.update();
const codeEditor = wrapper.find('Tab[eventKey=3] CodeEditor');
expect(codeEditor.prop('mode')).toBe('javascript');
expect(codeEditor.prop('readOnly')).toBe(true);
expect(codeEditor.prop('value')).toEqual(hostEvent.event_data.res.stdout);
});
@@ -334,10 +316,10 @@ describe('HostEventModal', () => {
);
const handleTabClick = wrapper.find('Tabs').prop('onSelect');
handleTabClick(null, 4);
handleTabClick(null, 3);
wrapper.update();
const codeEditor = wrapper.find('Tab[eventKey=4] CodeEditor');
const codeEditor = wrapper.find('Tab[eventKey=3] CodeEditor');
expect(codeEditor.prop('mode')).toBe('javascript');
expect(codeEditor.prop('readOnly')).toBe(true);
expect(codeEditor.prop('value')).toEqual('error content');
@@ -369,10 +351,10 @@ describe('HostEventModal', () => {
);
const handleTabClick = wrapper.find('Tabs').prop('onSelect');
handleTabClick(null, 3);
handleTabClick(null, 2);
wrapper.update();
const codeEditor = wrapper.find('Tab[eventKey=3] CodeEditor');
const codeEditor = wrapper.find('Tab[eventKey=2] CodeEditor');
expect(codeEditor.prop('mode')).toBe('javascript');
expect(codeEditor.prop('readOnly')).toBe(true);
expect(codeEditor.prop('value')).toEqual('foo bar');
@@ -393,10 +375,10 @@ describe('HostEventModal', () => {
);
const handleTabClick = wrapper.find('Tabs').prop('onSelect');
handleTabClick(null, 3);
handleTabClick(null, 2);
wrapper.update();
const codeEditor = wrapper.find('Tab[eventKey=3] CodeEditor');
const codeEditor = wrapper.find('Tab[eventKey=2] CodeEditor');
expect(codeEditor.prop('mode')).toBe('javascript');
expect(codeEditor.prop('readOnly')).toBe(true);
expect(codeEditor.prop('value')).toEqual('baz\nbar');
@@ -412,10 +394,10 @@ describe('HostEventModal', () => {
);
const handleTabClick = wrapper.find('Tabs').prop('onSelect');
handleTabClick(null, 3);
handleTabClick(null, 2);
wrapper.update();
const codeEditor = wrapper.find('Tab[eventKey=3] CodeEditor');
const codeEditor = wrapper.find('Tab[eventKey=2] CodeEditor');
expect(codeEditor.prop('mode')).toBe('javascript');
expect(codeEditor.prop('readOnly')).toBe(true);
expect(codeEditor.prop('value')).toEqual(

View File

@@ -30,7 +30,7 @@ function SubscriptionUsage() {
<Trans>
<p>
<InfoCircleIcon /> A tech preview of the new {brandName} user
interface can be found <a href="/ui_next">here</a>.
interface can be found <a href="/ui_next/dashboard">here</a>.
</p>
</Trans>
</Banner>

View File

@@ -201,11 +201,7 @@ function NodeViewModal({ readOnly }) {
overrides.limit = originalNodeObject.limit;
}
if (launchConfig.ask_verbosity_on_launch) {
overrides.verbosity =
originalNodeObject.verbosity !== undefined &&
originalNodeObject.verbosity !== null
? originalNodeObject.verbosity.toString()
: '0';
overrides.verbosity = originalNodeObject.verbosity.toString();
}
if (launchConfig.ask_credential_on_launch) {
overrides.credentials = originalNodeCredentials || [];

View File

@@ -35,7 +35,7 @@ ui-next/src/build: $(UI_NEXT_DIR)/src/build/awx
## True target for ui-next/src/build. Build ui_next from source.
$(UI_NEXT_DIR)/src/build/awx: $(UI_NEXT_DIR)/src $(UI_NEXT_DIR)/src/node_modules/webpack
@echo "=== Building ui_next ==="
@cd $(UI_NEXT_DIR)/src && PRODUCT="$(PRODUCT)" PUBLIC_PATH=/static/awx/ ROUTE_PREFIX=/ui_next npm run build:awx
@cd $(UI_NEXT_DIR)/src && PRODUCT="$(PRODUCT)" PUBLIC_PATH=/static/awx/ npm run build:awx
@mv $(UI_NEXT_DIR)/src/build/awx/index.html $(UI_NEXT_DIR)/src/build/awx/index_awx.html
.PHONY: ui-next/src

View File

@@ -4,8 +4,6 @@
from django.conf import settings
from django.urls import re_path, include
from ansible_base.resource_registry.urls import urlpatterns as resource_api_urls
from awx.main.views import handle_400, handle_403, handle_404, handle_500, handle_csp_violation, handle_login_redirect
@@ -13,7 +11,6 @@ urlpatterns = [
re_path(r'', include('awx.ui.urls', namespace='ui')),
re_path(r'^ui_next/.*', include('awx.ui_next.urls', namespace='ui_next')),
re_path(r'^api/', include('awx.api.urls', namespace='api')),
re_path(r'^api/v2/', include(resource_api_urls)),
re_path(r'^sso/', include('awx.sso.urls', namespace='sso')),
re_path(r'^sso/', include('social_django.urls', namespace='social')),
re_path(r'^(?:api/)?400.html$', handle_400),

View File

@@ -18,7 +18,7 @@ documentation: https://github.com/ansible/awx/blob/devel/awx_collection/README.m
homepage: https://www.ansible.com/
issues: https://github.com/ansible/awx/issues?q=is%3Aissue+label%3Acomponent%3Aawx_collection
license:
- GPL-3.0-or-later
- GPL-3.0-only
name: awx
namespace: awx
readme: README.md

View File

@@ -0,0 +1,119 @@
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Wayne Witzel III <wayne@riotousliving.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import os
import traceback
TOWER_CLI_IMP_ERR = None
try:
import tower_cli.utils.exceptions as exc
from tower_cli.utils import parser
from tower_cli.api import client
HAS_TOWER_CLI = True
except ImportError:
TOWER_CLI_IMP_ERR = traceback.format_exc()
HAS_TOWER_CLI = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
def tower_auth_config(module):
"""
`tower_auth_config` attempts to load the tower-cli.cfg file
specified from the `tower_config_file` parameter. If found,
if returns the contents of the file as a dictionary, else
it will attempt to fetch values from the module params and
only pass those values that have been set.
"""
config_file = module.params.pop('tower_config_file', None)
if config_file:
if not os.path.exists(config_file):
module.fail_json(msg='file not found: %s' % config_file)
if os.path.isdir(config_file):
module.fail_json(msg='directory can not be used as config file: %s' % config_file)
with open(config_file, 'r') as f:
return parser.string_to_dict(f.read())
else:
auth_config = {}
host = module.params.pop('tower_host', None)
if host:
auth_config['host'] = host
username = module.params.pop('tower_username', None)
if username:
auth_config['username'] = username
password = module.params.pop('tower_password', None)
if password:
auth_config['password'] = password
module.params.pop('tower_verify_ssl', None) # pop alias if used
verify_ssl = module.params.pop('validate_certs', None)
if verify_ssl is not None:
auth_config['verify_ssl'] = verify_ssl
return auth_config
def tower_check_mode(module):
'''Execute check mode logic for Ansible Tower modules'''
if module.check_mode:
try:
result = client.get('/ping').json()
module.exit_json(changed=True, tower_version='{0}'.format(result['version']))
except (exc.ServerError, exc.ConnectionError, exc.BadRequest) as excinfo:
module.fail_json(changed=False, msg='Failed check mode: {0}'.format(excinfo))
class TowerLegacyModule(AnsibleModule):
def __init__(self, argument_spec, **kwargs):
args = dict(
tower_host=dict(),
tower_username=dict(),
tower_password=dict(no_log=True),
validate_certs=dict(type='bool', aliases=['tower_verify_ssl']),
tower_config_file=dict(type='path'),
)
args.update(argument_spec)
kwargs.setdefault('mutually_exclusive', [])
kwargs['mutually_exclusive'].extend(
(
('tower_config_file', 'tower_host'),
('tower_config_file', 'tower_username'),
('tower_config_file', 'tower_password'),
('tower_config_file', 'validate_certs'),
)
)
super().__init__(argument_spec=args, **kwargs)
if not HAS_TOWER_CLI:
self.fail_json(msg=missing_required_lib('ansible-tower-cli'), exception=TOWER_CLI_IMP_ERR)

View File

@@ -181,8 +181,10 @@ def run_module(request, collection_import):
resource_class = resource_module.ControllerAWXKitModule
elif getattr(resource_module, 'ControllerAPIModule', None):
resource_class = resource_module.ControllerAPIModule
elif getattr(resource_module, 'TowerLegacyModule', None):
resource_class = resource_module.TowerLegacyModule
else:
raise RuntimeError("The module has neither a ControllerAWXKitModule or a ControllerAPIModule")
raise RuntimeError("The module has neither a TowerLegacyModule, ControllerAWXKitModule or a ControllerAPIModule")
with mock.patch.object(resource_class, '_load_params', new=mock_load_params):
# Call the test utility (like a mock server) instead of issuing HTTP requests

View File

@@ -155,4 +155,4 @@ def test_build_notification_message_undefined(run_module, admin_user, organizati
nt = NotificationTemplate.objects.get(id=result['id'])
body = job.build_notification_message(nt, 'running')
assert 'The template rendering return a blank body' in body[1]
assert '{"started_by": "My Placeholder"}' in body[1]

View File

@@ -26,7 +26,7 @@
name: "{{ project_name }}"
organization: "{{ org_name }}"
scm_type: git
scm_url: https://github.com/ansible/ansible-tower-samples
scm_url: https://github.com/ansible/test-playbooks
wait: true
- name: Create a git project with same name, different org

View File

@@ -31,7 +31,7 @@
name: "{{ project_name1 }}"
organization: Default
scm_type: git
scm_url: https://github.com/ansible/ansible-tower-samples
scm_url: https://github.com/ansible/test-playbooks
wait: true
register: result
@@ -44,7 +44,7 @@
name: "{{ project_name1 }}"
organization: Default
scm_type: git
scm_url: https://github.com/ansible/ansible-tower-samples
scm_url: https://github.com/ansible/test-playbooks
wait: true
state: exists
register: result
@@ -58,7 +58,7 @@
name: "{{ project_name1 }}"
organization: Default
scm_type: git
scm_url: https://github.com/ansible/ansible-tower-samples
scm_url: https://github.com/ansible/test-playbooks
wait: true
state: exists
request_timeout: .001
@@ -75,7 +75,7 @@
name: "{{ project_name1 }}"
organization: Default
scm_type: git
scm_url: https://github.com/ansible/ansible-tower-samples
scm_url: https://github.com/ansible/test-playbooks
wait: true
state: absent
register: result
@@ -89,7 +89,7 @@
name: "{{ project_name1 }}"
organization: Default
scm_type: git
scm_url: https://github.com/ansible/ansible-tower-samples
scm_url: https://github.com/ansible/test-playbooks
wait: true
state: exists
register: result
@@ -103,7 +103,7 @@
name: "{{ project_name1 }}"
organization: Default
scm_type: git
scm_url: https://github.com/ansible/ansible-tower-samples
scm_url: https://github.com/ansible/test-playbooks
wait: false
register: result
ignore_errors: true
@@ -137,7 +137,7 @@
name: "{{ project_name2 }}"
organization: "{{ org_name }}"
scm_type: git
scm_url: https://github.com/ansible/ansible-tower-samples
scm_url: https://github.com/ansible/test-playbooks
scm_credential: "{{ cred_name }}"
check_mode: true
@@ -162,7 +162,7 @@
name: "{{ project_name2 }}"
organization: Non_Existing_Org
scm_type: git
scm_url: https://github.com/ansible/ansible-tower-samples
scm_url: https://github.com/ansible/test-playbooks
scm_credential: "{{ cred_name }}"
register: result
ignore_errors: true
@@ -179,7 +179,7 @@
name: "{{ project_name2 }}"
organization: "{{ org_name }}"
scm_type: git
scm_url: https://github.com/ansible/ansible-tower-samples
scm_url: https://github.com/ansible/test-playbooks
scm_credential: Non_Existing_Credential
register: result
ignore_errors: true
@@ -191,7 +191,7 @@
- "'Non_Existing_Credential' in result.msg"
- "result.total_results == 0"
- name: Create a git project using a branch and allowing branch override
- name: Create a git project without credentials without waiting
project:
name: "{{ project_name3 }}"
organization: Default

View File

@@ -13,7 +13,7 @@
name: "{{ project_name1 }}"
organization: Default
scm_type: git
scm_url: https://github.com/ansible/ansible-tower-samples
scm_url: https://github.com/ansible/test-playbooks
wait: false
register: project_create_result

View File

@@ -19,6 +19,8 @@ homepage: https://www.ansible.com/
issues: https://github.com/ansible/awx/issues?q=is%3Aissue+label%3Acomponent%3Aawx_collection
license:
- GPL-3.0-or-later
# plugins/module_utils/tower_legacy.py
- BSD-2-Clause
name: {{ collection_package }}
namespace: {{ collection_namespace }}
readme: README.md

View File

@@ -96,7 +96,6 @@ credential_type_name_to_config_kind_map = {
'vault': 'vault',
'vmware vcenter': 'vmware',
'gpg public key': 'gpg_public_key',
'terraform backend configuration': 'terraform',
}
config_kind_to_credential_type_name_map = {kind: name for name, kind in credential_type_name_to_config_kind_map.items()}

View File

@@ -51,16 +51,7 @@ class WSClient(object):
# Subscription group types
def __init__(
self,
token=None,
hostname='',
port=443,
secure=True,
ws_suffix='websocket/',
session_id=None,
csrftoken=None,
add_received_time=False,
session_cookie_name='awx_sessionid',
self, token=None, hostname='', port=443, secure=True, session_id=None, csrftoken=None, add_received_time=False, session_cookie_name='awx_sessionid'
):
# delay this import, because this is an optional dependency
import websocket
@@ -77,7 +68,6 @@ class WSClient(object):
hostname = result.hostname
self.port = port
self.suffix = ws_suffix
self._use_ssl = secure
self.hostname = hostname
self.token = token
@@ -95,7 +85,7 @@ class WSClient(object):
else:
auth_cookie = ''
pref = 'wss://' if self._use_ssl else 'ws://'
url = '{0}{1.hostname}:{1.port}/{1.suffix}'.format(pref, self)
url = '{0}{1.hostname}:{1.port}/websocket/'.format(pref, self)
self.ws = websocket.WebSocketApp(
url, on_open=self._on_open, on_message=self._on_message, on_error=self._on_error, on_close=self._on_close, cookie=auth_cookie
)

View File

@@ -90,7 +90,6 @@ setup(
install_requires=[
'PyYAML',
'requests',
'setuptools',
],
python_requires=">=3.8",
extras_require={'formatting': ['jq'], 'websockets': ['websocket-client==0.57.0'], 'crypto': ['cryptography']},

View File

@@ -17,11 +17,6 @@ def test_explicit_hostname():
assert client.token == "token"
def test_websocket_suffix():
client = WSClient("token", "hostname", 566, ws_suffix='my-websocket/')
assert client.suffix == 'my-websocket/'
@pytest.mark.parametrize(
'url, result',
[

View File

@@ -13,7 +13,7 @@ Scaling your mesh is only available on Openshift and Kubernetes (K8S) deployment
Instances serve as nodes in your mesh topology. Automation mesh allows you to extend the footprint of your automation. Where you launch a job and where the ``ansible-playbook`` runs can be in different locations.
.. image:: ../common/images/instances_mesh_concept.drawio.png
.. image:: ../common/images/instances_mesh_concept.png
:alt: Site A pointing to Site B and dotted arrows to two hosts from Site B
Automation mesh is useful for:
@@ -23,7 +23,7 @@ Automation mesh is useful for:
The nodes (control, hop, and execution instances) are interconnected via receptor, forming a virtual mesh.
.. image:: ../common/images/instances_mesh_concept_with_nodes.drawio.png
.. image:: ../common/images/instances_mesh_concept_with_nodes.png
:alt: Control node pointing to hop node, which is pointing to two execution nodes.
@@ -51,227 +51,13 @@ Prerequisites
- To manage instances from the AWX user interface, you must have System Administrator or System Auditor permissions.
Common topologies
------------------
Instances make up the network of devices that communicate with one another. They are the building blocks of an automation mesh. These building blocks serve as nodes in a mesh topology. There are several kinds of instances:
+-----------+-----------------------------------------------------------------------------------------------------------------+
| Node Type | Description |
+===========+=================================================================================================================+
| Control | Nodes that run persistent Ansible Automation Platform services, and delegate jobs to hybrid and execution nodes |
+-----------+-----------------------------------------------------------------------------------------------------------------+
| Hybrid | Nodes that run persistent Ansible Automation Platform services and execute jobs |
| | (not applicable to operator-based installations) |
+-----------+-----------------------------------------------------------------------------------------------------------------+
| Hop | Used for relaying across the mesh only |
+-----------+-----------------------------------------------------------------------------------------------------------------+
| Execution | Nodes that run jobs delivered from control nodes (jobs submitted from the users Ansible automation) |
+-----------+-----------------------------------------------------------------------------------------------------------------+
Simple topology
~~~~~~~~~~~~~~~~
One of the ways to expand job capacity is to create a standalone execution node that can be added to run alongside the Kubernetes deployment of AWX. These machines will not be a part of the AWX Kubernetes cluster. The control nodes running in the cluster will connect and submit work to these machines via Receptor. The machines are registered in AWX as type "execution" instances, meaning they will only be used to run AWX jobs, not dispatch work or handle web requests as control nodes do.
Hop nodes can be added to sit between the control plane of AWX and standalone execution nodes. These machines will not be a part of the AWX Kubernetes cluster and they will be registered in AWX as node type "hop", meaning they will only handle inbound and outbound traffic for otherwise unreachable nodes in a different or more strict network.
Below is an example of an AWX task pod with two execution nodes. Traffic to execution node 2 flows through a hop node that is setup between it and the control plane.
.. image:: ../common/images/instances_awx_task_pods_hopnode.drawio.png
:alt: AWX task pod with a hop node between the control plane of AWX and standalone execution nodes.
Below are sample values used to configure each node in a simple topology:
.. list-table::
:widths: 20 30 10 20 15
:header-rows: 1
* - Instance type
- Hostname
- Listener port
- Peers from control nodes
- Peers
* - Control plane
- awx-task-65d6d96987-mgn9j
- n/a
- n/a
- [hop node]
* - Hop node
- awx-hop-node
- 27199
- True
- []
* - Execution node
- awx-example.com
- n/a
- False
- [hop node]
Mesh topology
~~~~~~~~~~~~~~
Mesh ingress is a feature that allows remote nodes to connect inbound to the control plane. This is especially useful when creating remote nodes in restricted networking environments that disallow inbound traffic.
.. image:: ../common/images/instances_mesh_ingress_topology.drawio.png
:alt: Mesh ingress architecture showing the peering relationship between nodes.
Below are sample values used to configure each node in a mesh ingress topology:
.. list-table::
:widths: 20 30 10 20 15
:header-rows: 1
* - Instance type
- Hostname
- Listener port
- Peers from control nodes
- Peers
* - Control plane
- awx-task-65d6d96987-mgn9j
- n/a
- n/a
- [hop node]
* - Hop node
- awx-mesh-ingress-1
- 27199
- True
- []
* - Execution node
- awx-example.com
- n/a
- False
- [hop node]
In order to create a mesh ingress for AWX, see the `Mesh Ingress <https://ansible.readthedocs.io/projects/awx-operator/en/latest/user-guide/advanced-configuration/mesh-ingress.html>`_ chapter of the AWX Operator Documentation for information on setting up this type of topology. The last step is to create a remote execution node and add the execution node to an instance group in order for it to be used in your job execution. Whatever execution environment image used to run a playbook needs to be accessible for your remote execution node. Everything you are using in your playbook also needs to be accessible from this remote execution node.
.. image:: ../common/images/instances-job-template-using-remote-execution-ig.png
:alt: Job template using the instance group with the execution node to run jobs.
:width: 1400px
.. _ag_instances_add:
Add an instance
----------------
To create an instance in AWX:
1. Click **Instances** from the left side navigation menu of the AWX UI.
2. In the Instances list view, click the **Add** button and the Create new Instance window opens.
.. image:: ../common/images/instances_create_new.png
:alt: Create a new instance form.
:width: 1400px
An instance has several attributes that may be configured:
- Enter a fully qualified domain name (ping-able DNS) or IP address for your instance in the **Host Name** field (required). This field is equivalent to ``hostname`` in the API.
- Optionally enter a **Description** for the instance
- The **Instance State** field is auto-populated, indicating that it is being installed, and cannot be modified
- Optionally specify the **Listener Port** for the receptor to listen on for incoming connections. This is an open port on the remote machine used to establish inbound TCP connections. This field is equivalent to ``listener_port`` in the API.
- Select from the options in **Instance Type** field to specify the type you want to create. Only execution and hop nodes can be created as operator-based installations do not support hybrid nodes. This field is equivalent to ``node_type`` in the API.
- In the **Peers** field, select the instance hostnames you want your new instance to connect outbound to.
- In the **Options** fields:
- Check the **Enable Instance** box to make it available for jobs to run on an execution node.
- Check the **Managed by Policy** box to allow policy to dictate how the instance is assigned.
- Check the **Peers from control nodes** box to allow control nodes to peer to this instance automatically. Listener port needs to be set if this is enabled or the instance is a peer.
3. Once the attributes are configured, click **Save** to proceed.
Upon successful creation, the Details of the one of the created instances opens.
.. image:: ../common/images/instances_create_details.png
:alt: Details of the newly created instance.
:width: 1400px
.. note::
The proceeding steps 4-8 are intended to be ran from any computer that has SSH access to the newly created instance.
4. Click the download button next to the **Install Bundle** field to download the tarball that contain files to allow AWX to make proper TCP connections to the remote machine.
.. image:: ../common/images/instances_install_bundle.png
:alt: Instance details showing the Download button in the Install Bundle field of the Details tab.
:width: 1400px
5. Extract the downloaded ``tar.gz`` file from the location you downloaded it. The install bundle contains TLS certificates and keys, a certificate authority, and a proper Receptor configuration file. To facilitate that these files will be in the right location on the remote machine, the install bundle includes an ``install_receptor.yml`` playbook. The playbook requires the Receptor collection which can be obtained via:
::
ansible-galaxy collection install -r requirements.yml
6. Before running the ``ansible-playbook`` command, edit the following fields in the ``inventory.yml`` file:
- ``ansible_user`` with the username running the installation
- ``ansible_ssh_private_key_file`` to contain the filename of the private key used to connect to the instance
::
---
all:
hosts:
remote-execution:
ansible_host: <hostname>
ansible_user: <username> # user provided
ansible_ssh_private_key_file: ~/.ssh/id_rsa
The content of the ``inventory.yml`` file serves as a template and contains variables for roles that are applied during the installation and configuration of a receptor node in a mesh topology. You may modify some of the other fields, or replace the file in its entirety for advanced scenarios. Refer to `Role Variables <https://github.com/ansible/receptor-collection/blob/main/README.md>`_ for more information on each variable.
7. Save the file to continue.
8. Run the following command on the machine you want to update your mesh:
::
ansible-playbook -i inventory.yml install_receptor.yml
Wait a few minutes for the periodic AWX task to do a health check against the new instance. You may run a health check by selecting the node and clicking the **Run health check** button from its Details page at any time. Once the instances endpoint or page reports a "Ready" status for the instance, jobs are now ready to run on this machine!
9. To view other instances within the same topology or associate peers, click the **Peers** tab.
.. image:: ../common/images/instances_peers_tab.png
:alt: "Peers" tab showing two peers.
:width: 1400px
To associate peers with your node, click the **Associate** button to open a dialog box of instances eligible for peering.
.. image:: ../common/images/instances_associate_peer.png
:alt: Instances available to peer with the example hop node.
:width: 1400px
Execution nodes can peer with either hop nodes or other execution nodes. Hop nodes can only peer with execution nodes unless you check the **Peers from control nodes** check box from the **Options** field.
.. note::
If you associate or disassociate a peer, a notification will inform you to re-run the install bundle from the Peer Detail view (the :ref:`ag_topology_viewer` has the download link).
.. image:: ../common/images/instances_associate_peer_reinstallmsg.png
:alt: Notification to re-run the installation bundle due to change in the peering.
You can remove an instance by clicking **Remove** in the Instances page, or by setting the instance ``node_state = deprovisioning`` via the API. Upon deleting, a pop-up message will appear to notify that you may need to re-run the install bundle to make sure things that were removed are no longer connected.
10. To view a graphical representation of your updated topology, refer to the :ref:`ag_topology_viewer` section of this guide.
Manage instances
-----------------
Click **Instances** from the left side navigation menu to access the Instances list.
.. image:: ../common/images/instances_list_view.png
:alt: List view of instances in AWX
:width: 1400px
:alt: List view of instances in AWX
The Instances list displays all the current nodes in your topology, along with relevant details:
@@ -297,9 +83,7 @@ The Instances list displays all the current nodes in your topology, along with r
From this page, you can add, remove or run health checks on your nodes. Use the check boxes next to an instance to select it to remove or run a health check against. When a button is grayed-out, you do not have permission for that particular action. Contact your Administrator to grant you the required level of access. If you are able to remove an instance, you will receive a prompt for confirmation, like the one below:
.. image:: ../common/images/instances_delete_prompt.png
:alt: Prompt for deleting instances in AWX
:width: 1400px
:alt: Prompt for deleting instances in AWX.
.. note::
@@ -312,8 +96,7 @@ Click **Remove** to confirm.
If running a health check on an instance, at the top of the Details page, a message displays that the health check is in progress.
.. image:: ../common/images/instances_health_check.png
:alt: Health check for instances in AWX
:width: 1400px
:alt: Health check for instances in AWX
Click **Reload** to refresh the instance status.
@@ -321,20 +104,162 @@ Click **Reload** to refresh the instance status.
Health checks are ran asynchronously, and may take up to a minute for the instance status to update, even with a refresh. The status may or may not change after the health check. At the bottom of the Details page, a timer/clock icon displays next to the last known health check date and time stamp if the health check task is currently running.
.. image:: ../common/images/instances_health_check_pending.png
:alt: Health check for instance still in pending state.
.. image:: ../common/images/instances_health_check_pending.png
:alt: Health check for instance still in pending state.
The example health check shows the status updates with an error on node 'one':
.. image:: ../common/images/topology-viewer-instance-with-errors.png
:alt: Health check showing an error in one of the instances.
:width: 1400px
:alt: Health check showing an error in one of the instances.
Add an instance
----------------
One of the ways to expand capacity is to create an instance. Standalone execution nodes can be added to run alongside the Kubernetes deployment of AWX. These machines will not be a part of the AWX Kubernetes cluster. The control nodes running in the cluster will connect and submit work to these machines via Receptor. The machines are registered in AWX as type "execution" instances, meaning they will only be used to run AWX jobs, not dispatch work or handle web requests as control nodes do.
Hop nodes can be added to sit between the control plane of AWX and standalone execution nodes. These machines will not be a part of the AWX Kubernetes cluster and they will be registered in AWX as node type "hop", meaning they will only handle inbound and outbound traffic for otherwise unreachable nodes in a different or more strict network.
Below is an example of an AWX task pod with two execution nodes. Traffic to execution node 2 flows through a hop node that is setup between it and the control plane.
.. image:: ../common/images/instances_awx_task_pods_hopnode.png
:alt: AWX task pod with a hop node between the control plane of AWX and standalone execution nodes.
To create an instance in AWV:
1. Click **Instances** from the left side navigation menu of the AWX UI.
2. In the Instances list view, click the **Add** button and the Create new Instance window opens.
.. image:: ../common/images/instances_create_new.png
:alt: Create a new instance form.
An instance has several attributes that may be configured:
- Enter a fully qualified domain name (ping-able DNS) or IP address for your instance in the **Host Name** field (required). This field is equivalent to ``hostname`` in the API.
- Optionally enter a **Description** for the instance
- The **Instance State** field is auto-populated, indicating that it is being installed, and cannot be modified
- Optionally specify the **Listener Port** for the receptor to listen on for incoming connections. This is an open port on the remote machine used to establish inbound TCP connections. This field is equivalent to ``listener_port`` in the API.
- Select from the options in **Instance Type** field to specify the type you want to create. Only execution and hop nodes can be created as operator-based installations do not support hybrid nodes. This field is equivalent to ``node_type`` in the API.
- In the **Peers** field, select the instance hostnames you want your new instance to connect outbound to.
- In the **Options** fields:
- Check the **Enable Instance** box to make it available for jobs to run on an execution node.
- Check the **Managed by Policy** box to allow policy to dictate how the instance is assigned.
- Check the **Peers from control nodes** box to allow control nodes to peer to this instance automatically. Listener port needs to be set if this is enabled or the instance is a peer.
In the example diagram above, the configurations are as follows:
+------------------+---------------+--------------------------+--------------+
| instance name | listener_port | peers_from_control_nodes | peers |
+==================+===============+==========================+==============+
| execution node 1 | 27199 | true | [] |
+------------------+---------------+--------------------------+--------------+
| hop node | 27199 | true | [] |
+------------------+---------------+--------------------------+--------------+
| execution node 2 | null | false | ["hop node"] |
+------------------+---------------+--------------------------+--------------+
3. Once the attributes are configured, click **Save** to proceed.
Upon successful creation, the Details of the one of the created instances opens.
.. image:: ../common/images/instances_create_details.png
:alt: Details of the newly created instance.
.. note::
The proceeding steps 4-8 are intended to be ran from any computer that has SSH access to the newly created instance.
4. Click the download button next to the **Install Bundle** field to download the tarball that contain files to allow AWX to make proper TCP connections to the remote machine.
.. image:: ../common/images/instances_install_bundle.png
:alt: Instance details showing the Download button in the Install Bundle field of the Details tab.
5. Extract the downloaded ``tar.gz`` file from the location you downloaded it. The install bundle contains TLS certificates and keys, a certificate authority, and a proper Receptor configuration file. To facilitate that these files will be in the right location on the remote machine, the install bundle includes an ``install_receptor.yml`` playbook. The playbook requires the Receptor collection which can be obtained via:
::
ansible-galaxy collection install -r requirements.yml
6. Before running the ``ansible-playbook`` command, edit the following fields in the ``inventory.yml`` file:
- ``ansible_user`` with the username running the installation
- ``ansible_ssh_private_key_file`` to contain the filename of the private key used to connect to the instance
::
---
all:
hosts:
remote-execution:
ansible_host: 18.206.206.34
ansible_user: <username> # user provided
ansible_ssh_private_key_file: ~/.ssh/id_rsa
The content of the ``inventory.yml`` file serves as a template and contains variables for roles that are applied during the installation and configuration of a receptor node in a mesh topology. You may modify some of the other fields, or replace the file in its entirety for advanced scenarios. Refer to `Role Variables <https://github.com/ansible/receptor-collection/blob/main/README.md>`_ for more information on each variable.
7. Save the file to continue.
8. Run the following command on the machine you want to update your mesh:
::
ansible-playbook -i inventory.yml install_receptor.yml
Wait a few minutes for the periodic AWX task to do a health check against the new instance. You may run a health check by selecting the node and clicking the **Run health check** button from its Details page at any time. Once the instances endpoint or page reports a "Ready" status for the instance, jobs are now ready to run on this machine!
9. To view other instances within the same topology or associate peers, click the **Peers** tab.
.. image:: ../common/images/instances_peers_tab.png
:alt: "Peers" tab showing two peers.
To associate peers with your node, click the **Associate** button to open a dialog box of instances eligible for peering.
.. image:: ../common/images/instances_associate_peer.png
:alt: Instances available to peer with the example hop node.
Execution nodes can peer with either hop nodes or other execution nodes. Hop nodes can only peer with execution nodes unless you check the **Peers from control nodes** check box from the **Options** field.
.. note::
If you associate or disassociate a peer, a notification will inform you to re-run the install bundle from the Peer Detail view (the :ref:`ag_topology_viewer` has the download link).
.. image:: ../common/images/instances_associate_peer_reinstallmsg.png
:alt: Notification to re-run the installation bundle due to change in the peering.
You can remove an instance by clicking **Remove** in the Instances page, or by setting the instance ``node_state = deprovisioning`` via the API. Upon deleting, a pop-up message will appear to notify that you may need to re-run the install bundle to make sure things that were removed are no longer connected.
10. To view a graphical representation of your updated topology, refer to the :ref:`ag_topology_viewer` section of this guide.
Using a custom Receptor CA
---------------------------
Refer to the AWX Operator Documentation, `Custom Receptor CA <https://ansible.readthedocs.io/projects/awx-operator/en/latest/user-guide/advanced-configuration/custom-receptor-certs.html>`_ for detail.
The control nodes on the K8S cluster will communicate with execution nodes via mutual TLS TCP connections, running via Receptor. Execution nodes will verify incoming connections by ensuring the x509 certificate was issued by a trusted Certificate Authority (CA).
You may choose to provide your own CA for this validation. If no CA is provided, AWX operator will automatically generate one using OpenSSL.
Given custom ``ca.crt`` and ``ca.key`` stored locally, run the following:
::
kubectl create secret tls awx-demo-receptor-ca \
--cert=/path/to/ca.crt --key=/path/to/ca.key
The secret should be named ``{AWX Custom Resource name}-receptor-ca``. In the above, the AWX Custom Resource name is "awx-demo". Replace "awx-demo" with your AWX Custom Resource name.
If this secret is created after AWX is deployed, run the following to restart the deployment:
::
kubectl rollout restart deployment awx-demo
.. note::
Changing the receptor CA will sever connections to any existing execution nodes. These nodes will enter an *Unavailable* state, and jobs will not be able to run on them. You will need to download and re-run the install bundle for each execution node. This will replace the TLS certificate files with those signed by the new CA. The execution nodes will then appear in a *Ready* state after a few minutes.
Using a private image for the default EE

View File

@@ -7,7 +7,6 @@ Setting up LDAP Authentication
single: LDAP
pair: authentication; LDAP
This chapter describes how to integrate LDAP authentication with AWX.
.. note::

Binary file not shown.

Before

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 123 KiB

After

Width:  |  Height:  |  Size: 70 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 44 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 52 KiB

After

Width:  |  Height:  |  Size: 132 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 50 KiB

After

Width:  |  Height:  |  Size: 55 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 45 KiB

After

Width:  |  Height:  |  Size: 49 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.6 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 102 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 91 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 134 KiB

After

Width:  |  Height:  |  Size: 43 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 38 KiB

After

Width:  |  Height:  |  Size: 41 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 66 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 106 KiB

After

Width:  |  Height:  |  Size: 43 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 117 KiB

After

Width:  |  Height:  |  Size: 64 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 172 KiB

After

Width:  |  Height:  |  Size: 53 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 142 KiB

After

Width:  |  Height:  |  Size: 78 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 88 KiB

After

Width:  |  Height:  |  Size: 62 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 114 KiB

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 208 KiB

After

Width:  |  Height:  |  Size: 99 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 10 KiB

Some files were not shown because too many files have changed in this diff Show More