Compare commits

..

1 Commits

Author SHA1 Message Date
David O Neill
e0acd9b111 Change failing PR to draft 2024-02-12 16:15:47 +00:00
186 changed files with 1062 additions and 2959 deletions

View File

@@ -11,12 +11,6 @@ runs:
shell: bash shell: bash
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
- name: Set lower case owner name
shell: bash
run: echo "OWNER_LC=${OWNER,,}" >> $GITHUB_ENV
env:
OWNER: '${{ github.repository_owner }}'
- name: Log in to registry - name: Log in to registry
shell: bash shell: bash
run: | run: |
@@ -24,11 +18,11 @@ runs:
- name: Pre-pull latest devel image to warm cache - name: Pre-pull latest devel image to warm cache
shell: bash shell: bash
run: docker pull ghcr.io/${OWNER_LC}/awx_devel:${{ github.base_ref }} run: docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${{ github.base_ref }}
- name: Build image for current source checkout - name: Build image for current source checkout
shell: bash shell: bash
run: | run: |
DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} \ DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} \
COMPOSE_TAG=${{ github.base_ref }} \ COMPOSE_TAG=${{ github.base_ref }} \
make docker-compose-build make docker-compose-build

View File

@@ -35,7 +35,7 @@ runs:
- name: Start AWX - name: Start AWX
shell: bash shell: bash
run: | run: |
DEV_DOCKER_OWNER=${{ github.repository_owner }} \ DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} \
COMPOSE_TAG=${{ github.base_ref }} \ COMPOSE_TAG=${{ github.base_ref }} \
COMPOSE_UP_OPTS="-d" \ COMPOSE_UP_OPTS="-d" \
make docker-compose make docker-compose

View File

@@ -15,4 +15,5 @@
"dependencies": "dependencies":
- any: ["awx/ui/package.json"] - any: ["awx/ui/package.json"]
- any: ["requirements/*"] - any: ["requirements/*.txt"]
- any: ["requirements/requirements.in"]

View File

@@ -107,7 +107,7 @@ jobs:
ansible-galaxy collection install -r molecule/requirements.yml ansible-galaxy collection install -r molecule/requirements.yml
sudo rm -f $(which kustomize) sudo rm -f $(which kustomize)
make kustomize make kustomize
KUSTOMIZE_PATH=$(readlink -f bin/kustomize) molecule -v test -s kind -- --skip-tags=replicas KUSTOMIZE_PATH=$(readlink -f bin/kustomize) molecule -v test -s kind
env: env:
AWX_TEST_IMAGE: awx AWX_TEST_IMAGE: awx
AWX_TEST_VERSION: ci AWX_TEST_VERSION: ci
@@ -127,6 +127,10 @@ jobs:
- name: Run sanity tests - name: Run sanity tests
run: make test_collection_sanity run: make test_collection_sanity
env:
# needed due to cgroupsv2. This is fixed, but a stable release
# with the fix has not been made yet.
ANSIBLE_TEST_PREFER_PODMAN: 1
collection-integration: collection-integration:
name: awx_collection integration name: awx_collection integration

View File

@@ -3,50 +3,28 @@ name: Build/Push Development Images
env: env:
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
on: on:
workflow_dispatch:
push: push:
branches: branches:
- devel - devel
- release_* - release_*
- feature_* - feature_*
jobs: jobs:
push-development-images: push:
if: endsWith(github.repository, '/awx') || startsWith(github.ref, 'refs/heads/release_')
runs-on: ubuntu-latest runs-on: ubuntu-latest
timeout-minutes: 120 timeout-minutes: 60
permissions: permissions:
packages: write packages: write
contents: read contents: read
strategy:
fail-fast: false
matrix:
build-targets:
- image-name: awx_devel
make-target: docker-compose-buildx
- image-name: awx_kube_devel
make-target: awx-kube-dev-buildx
- image-name: awx
make-target: awx-kube-buildx
steps: steps:
- name: Skipping build of awx image for non-awx repository
run: |
echo "Skipping build of awx image for non-awx repository"
exit 0
if: matrix.build-targets.image-name == 'awx' && !endsWith(github.repository, '/awx')
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- name: Set up QEMU - name: Get python version from Makefile
uses: docker/setup-qemu-action@v3 run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
- name: Set up Docker Buildx - name: Set lower case owner name
uses: docker/setup-buildx-action@v3
- name: Set GITHUB_ENV variables
run: | run: |
echo "DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER,,}" >> $GITHUB_ENV echo "OWNER_LC=${OWNER,,}" >>${GITHUB_ENV}
echo "COMPOSE_TAG=${GITHUB_REF##*/}" >> $GITHUB_ENV
echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
env: env:
OWNER: '${{ github.repository_owner }}' OWNER: '${{ github.repository_owner }}'
@@ -59,19 +37,23 @@ jobs:
run: | run: |
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
- name: Setup node and npm - name: Pre-pull image to warm build cache
uses: actions/setup-node@v2
with:
node-version: '16.13.1'
if: matrix.build-targets.image-name == 'awx'
- name: Prebuild UI for awx image (to speed up build process)
run: | run: |
sudo apt-get install gettext docker pull ghcr.io/${OWNER_LC}/awx_devel:${GITHUB_REF##*/} || :
make ui-release docker pull ghcr.io/${OWNER_LC}/awx_kube_devel:${GITHUB_REF##*/} || :
make ui-next docker pull ghcr.io/${OWNER_LC}/awx:${GITHUB_REF##*/} || :
if: matrix.build-targets.image-name == 'awx'
- name: Build and push AWX devel images - name: Build images
run: | run: |
make ${{ matrix.build-targets.make-target }} DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} COMPOSE_TAG=${GITHUB_REF##*/} make docker-compose-build
DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} COMPOSE_TAG=${GITHUB_REF##*/} make awx-kube-dev-build
DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} COMPOSE_TAG=${GITHUB_REF##*/} make awx-kube-build
- name: Push development images
run: |
docker push ghcr.io/${OWNER_LC}/awx_devel:${GITHUB_REF##*/}
docker push ghcr.io/${OWNER_LC}/awx_kube_devel:${GITHUB_REF##*/}
- name: Push AWX k8s image, only for upstream and feature branches
run: docker push ghcr.io/${OWNER_LC}/awx:${GITHUB_REF##*/}
if: endsWith(github.repository, '/awx')

View File

@@ -2,10 +2,12 @@
name: Feature branch deletion cleanup name: Feature branch deletion cleanup
env: env:
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
on: delete on:
delete:
branches:
- feature_**
jobs: jobs:
branch_delete: push:
if: ${{ github.event.ref_type == 'branch' && startsWith(github.event.ref, 'feature_') }}
runs-on: ubuntu-latest runs-on: ubuntu-latest
timeout-minutes: 20 timeout-minutes: 20
permissions: permissions:
@@ -20,4 +22,6 @@ jobs:
run: | run: |
ansible localhost -c local, -m command -a "{{ ansible_python_interpreter + ' -m pip install boto3'}}" ansible localhost -c local, -m command -a "{{ ansible_python_interpreter + ' -m pip install boto3'}}"
ansible localhost -c local -m aws_s3 \ ansible localhost -c local -m aws_s3 \
-a "bucket=awx-public-ci-files object=${GITHUB_REF##*/}/schema.json mode=delobj permission=public-read" -a "bucket=awx-public-ci-files object=${GITHUB_REF##*/}/schema.json mode=delete permission=public-read"

View File

@@ -24,6 +24,38 @@ jobs:
repo-token: "${{ secrets.GITHUB_TOKEN }}" repo-token: "${{ secrets.GITHUB_TOKEN }}"
configuration-path: .github/pr_labeler.yml configuration-path: .github/pr_labeler.yml
convert-to-draft:
runs-on: ubuntu-latest
name: Change failing PRS to draft
steps:
- name: Checkout repository
uses: actions/checkout@v2
- name: Set up Node.js
uses: actions/setup-node@v3
with:
node-version: 14
- name: Install dependencies
run: npm install -g github
- name: Check CI status
id: check-ci
run: |
status=$(curl -s -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
-H "Accept: application/vnd.github.v3+json" \
https://api.github.com/repos/${{ github.repository }}/commits/${{ github.sha }}/check-suites | \
jq -r '.check_suites[0].conclusion')
echo "CI Status: $status"
echo "::set-output name=ci_status::$status"
- name: Convert to Draft on CI Failure
if: steps.check-ci.outputs.ci_status == 'failure'
run: gh pr edit ${{ github.event.number }} --draft
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
community: community:
runs-on: ubuntu-latest runs-on: ubuntu-latest
timeout-minutes: 20 timeout-minutes: 20

View File

@@ -83,15 +83,11 @@ jobs:
- name: Re-tag and promote awx image - name: Re-tag and promote awx image
run: | run: |
docker buildx imagetools create \ docker pull ghcr.io/${{ github.repository }}:${{ github.event.release.tag_name }}
ghcr.io/${{ github.repository }}:${{ github.event.release.tag_name }} \ docker tag ghcr.io/${{ github.repository }}:${{ github.event.release.tag_name }} quay.io/${{ github.repository }}:${{ github.event.release.tag_name }}
--tag quay.io/${{ github.repository }}:${{ github.event.release.tag_name }} docker tag ghcr.io/${{ github.repository }}:${{ github.event.release.tag_name }} quay.io/${{ github.repository }}:latest
docker buildx imagetools create \ docker push quay.io/${{ github.repository }}:${{ github.event.release.tag_name }}
ghcr.io/${{ github.repository }}:${{ github.event.release.tag_name }} \ docker push quay.io/${{ github.repository }}:latest
--tag quay.io/${{ github.repository }}:latest docker pull ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.release.tag_name }}
docker tag ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.release.tag_name }} quay.io/${{ github.repository_owner }}/awx-ee:${{ github.event.release.tag_name }}
- name: Re-tag and promote awx-ee image docker push quay.io/${{ github.repository_owner }}/awx-ee:${{ github.event.release.tag_name }}
run: |
docker buildx imagetools create \
ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.release.tag_name }} \
--tag quay.io/${{ github.repository_owner }}/awx-ee:${{ github.event.release.tag_name }}

View File

@@ -86,33 +86,27 @@ jobs:
-e push=yes \ -e push=yes \
-e awx_official=yes -e awx_official=yes
- name: Log into registry ghcr.io - name: Log in to GHCR
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0 run: |
with: echo ${{ secrets.GITHUB_TOKEN }} | docker login ghcr.io -u ${{ github.actor }} --password-stdin
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Log into registry quay.io - name: Log in to Quay
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0 run: |
with: echo ${{ secrets.QUAY_TOKEN }} | docker login quay.io -u ${{ secrets.QUAY_USER }} --password-stdin
registry: quay.io
username: ${{ secrets.QUAY_USER }}
password: ${{ secrets.QUAY_TOKEN }}
- name: tag awx-ee:latest with version input - name: tag awx-ee:latest with version input
run: | run: |
docker buildx imagetools create \ docker pull quay.io/ansible/awx-ee:latest
quay.io/ansible/awx-ee:latest \ docker tag quay.io/ansible/awx-ee:latest ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.inputs.version }}
--tag ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.inputs.version }} docker push ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.inputs.version }}
- name: Stage awx-operator image - name: Build and stage awx-operator
working-directory: awx-operator working-directory: awx-operator
run: | run: |
BUILD_ARGS="--build-arg DEFAULT_AWX_VERSION=${{ github.event.inputs.version}} \ BUILD_ARGS="--build-arg DEFAULT_AWX_VERSION=${{ github.event.inputs.version }} \
--build-arg OPERATOR_VERSION=${{ github.event.inputs.operator_version }}" \ --build-arg OPERATOR_VERSION=${{ github.event.inputs.operator_version }}" \
IMG=ghcr.io/${{ github.repository_owner }}/awx-operator:${{ github.event.inputs.operator_version }} \ IMAGE_TAG_BASE=ghcr.io/${{ github.repository_owner }}/awx-operator \
make docker-buildx VERSION=${{ github.event.inputs.operator_version }} make docker-build docker-push
- name: Run test deployment with awx-operator - name: Run test deployment with awx-operator
working-directory: awx-operator working-directory: awx-operator

8
.gitignore vendored
View File

@@ -46,11 +46,6 @@ tools/docker-compose/overrides/
tools/docker-compose-minikube/_sources tools/docker-compose-minikube/_sources
tools/docker-compose/keycloak.awx.realm.json tools/docker-compose/keycloak.awx.realm.json
!tools/docker-compose/editable_dependencies
tools/docker-compose/editable_dependencies/*
!tools/docker-compose/editable_dependencies/README.md
!tools/docker-compose/editable_dependencies/install.sh
# Tower setup playbook testing # Tower setup playbook testing
setup/test/roles/postgresql setup/test/roles/postgresql
**/provision_docker **/provision_docker
@@ -174,6 +169,3 @@ awx/ui_next/build
# Docs build stuff # Docs build stuff
docs/docsite/build/ docs/docsite/build/
_readthedocs/ _readthedocs/
# Pyenv
.python-version

113
.vscode/launch.json vendored
View File

@@ -1,113 +0,0 @@
{
"version": "0.2.0",
"configurations": [
{
"name": "run_ws_heartbeat",
"type": "debugpy",
"request": "launch",
"program": "manage.py",
"args": ["run_ws_heartbeat"],
"django": true,
"preLaunchTask": "stop awx-ws-heartbeat",
"postDebugTask": "start awx-ws-heartbeat"
},
{
"name": "run_cache_clear",
"type": "debugpy",
"request": "launch",
"program": "manage.py",
"args": ["run_cache_clear"],
"django": true,
"preLaunchTask": "stop awx-cache-clear",
"postDebugTask": "start awx-cache-clear"
},
{
"name": "run_callback_receiver",
"type": "debugpy",
"request": "launch",
"program": "manage.py",
"args": ["run_callback_receiver"],
"django": true,
"preLaunchTask": "stop awx-receiver",
"postDebugTask": "start awx-receiver"
},
{
"name": "run_dispatcher",
"type": "debugpy",
"request": "launch",
"program": "manage.py",
"args": ["run_dispatcher"],
"django": true,
"preLaunchTask": "stop awx-dispatcher",
"postDebugTask": "start awx-dispatcher"
},
{
"name": "run_rsyslog_configurer",
"type": "debugpy",
"request": "launch",
"program": "manage.py",
"args": ["run_rsyslog_configurer"],
"django": true,
"preLaunchTask": "stop awx-rsyslog-configurer",
"postDebugTask": "start awx-rsyslog-configurer"
},
{
"name": "run_cache_clear",
"type": "debugpy",
"request": "launch",
"program": "manage.py",
"args": ["run_cache_clear"],
"django": true,
"preLaunchTask": "stop awx-cache-clear",
"postDebugTask": "start awx-cache-clear"
},
{
"name": "run_wsrelay",
"type": "debugpy",
"request": "launch",
"program": "manage.py",
"args": ["run_wsrelay"],
"django": true,
"preLaunchTask": "stop awx-wsrelay",
"postDebugTask": "start awx-wsrelay"
},
{
"name": "daphne",
"type": "debugpy",
"request": "launch",
"program": "/var/lib/awx/venv/awx/bin/daphne",
"args": ["-b", "127.0.0.1", "-p", "8051", "awx.asgi:channel_layer"],
"django": true,
"preLaunchTask": "stop awx-daphne",
"postDebugTask": "start awx-daphne"
},
{
"name": "runserver(uwsgi alternative)",
"type": "debugpy",
"request": "launch",
"program": "manage.py",
"args": ["runserver", "127.0.0.1:8052"],
"django": true,
"preLaunchTask": "stop awx-uwsgi",
"postDebugTask": "start awx-uwsgi"
},
{
"name": "runserver_plus(uwsgi alternative)",
"type": "debugpy",
"request": "launch",
"program": "manage.py",
"args": ["runserver_plus", "127.0.0.1:8052"],
"django": true,
"preLaunchTask": "stop awx-uwsgi and install Werkzeug",
"postDebugTask": "start awx-uwsgi"
},
{
"name": "shell_plus",
"type": "debugpy",
"request": "launch",
"program": "manage.py",
"args": ["shell_plus"],
"django": true,
},
]
}

100
.vscode/tasks.json vendored
View File

@@ -1,100 +0,0 @@
{
"version": "2.0.0",
"tasks": [
{
"label": "start awx-cache-clear",
"type": "shell",
"command": "supervisorctl start tower-processes:awx-cache-clear"
},
{
"label": "stop awx-cache-clear",
"type": "shell",
"command": "supervisorctl stop tower-processes:awx-cache-clear"
},
{
"label": "start awx-daphne",
"type": "shell",
"command": "supervisorctl start tower-processes:awx-daphne"
},
{
"label": "stop awx-daphne",
"type": "shell",
"command": "supervisorctl stop tower-processes:awx-daphne"
},
{
"label": "start awx-dispatcher",
"type": "shell",
"command": "supervisorctl start tower-processes:awx-dispatcher"
},
{
"label": "stop awx-dispatcher",
"type": "shell",
"command": "supervisorctl stop tower-processes:awx-dispatcher"
},
{
"label": "start awx-receiver",
"type": "shell",
"command": "supervisorctl start tower-processes:awx-receiver"
},
{
"label": "stop awx-receiver",
"type": "shell",
"command": "supervisorctl stop tower-processes:awx-receiver"
},
{
"label": "start awx-rsyslog-configurer",
"type": "shell",
"command": "supervisorctl start tower-processes:awx-rsyslog-configurer"
},
{
"label": "stop awx-rsyslog-configurer",
"type": "shell",
"command": "supervisorctl stop tower-processes:awx-rsyslog-configurer"
},
{
"label": "start awx-rsyslogd",
"type": "shell",
"command": "supervisorctl start tower-processes:awx-rsyslogd"
},
{
"label": "stop awx-rsyslogd",
"type": "shell",
"command": "supervisorctl stop tower-processes:awx-rsyslogd"
},
{
"label": "start awx-uwsgi",
"type": "shell",
"command": "supervisorctl start tower-processes:awx-uwsgi"
},
{
"label": "stop awx-uwsgi",
"type": "shell",
"command": "supervisorctl stop tower-processes:awx-uwsgi"
},
{
"label": "stop awx-uwsgi and install Werkzeug",
"type": "shell",
"command": "pip install Werkzeug; supervisorctl stop tower-processes:awx-uwsgi"
},
{
"label": "start awx-ws-heartbeat",
"type": "shell",
"command": "supervisorctl start tower-processes:awx-ws-heartbeat"
},
{
"label": "stop awx-ws-heartbeat",
"type": "shell",
"command": "supervisorctl stop tower-processes:awx-ws-heartbeat"
},
{
"label": "start awx-wsrelay",
"type": "shell",
"command": "supervisorctl start tower-processes:awx-wsrelay"
},
{
"label": "stop awx-wsrelay",
"type": "shell",
"command": "supervisorctl stop tower-processes:awx-wsrelay"
}
]
}

View File

@@ -1,6 +1,6 @@
-include awx/ui_next/Makefile -include awx/ui_next/Makefile
PYTHON := $(notdir $(shell for i in python3.11 python3; do command -v $$i; done|sed 1q)) PYTHON := $(notdir $(shell for i in python3.9 python3; do command -v $$i; done|sed 1q))
SHELL := bash SHELL := bash
DOCKER_COMPOSE ?= docker-compose DOCKER_COMPOSE ?= docker-compose
OFFICIAL ?= no OFFICIAL ?= no
@@ -10,7 +10,7 @@ KIND_BIN ?= $(shell which kind)
CHROMIUM_BIN=/tmp/chrome-linux/chrome CHROMIUM_BIN=/tmp/chrome-linux/chrome
GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD) GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
MANAGEMENT_COMMAND ?= awx-manage MANAGEMENT_COMMAND ?= awx-manage
VERSION ?= $(shell $(PYTHON) tools/scripts/scm_version.py 2> /dev/null) VERSION ?= $(shell $(PYTHON) tools/scripts/scm_version.py)
# ansible-test requires semver compatable version, so we allow overrides to hack it # ansible-test requires semver compatable version, so we allow overrides to hack it
COLLECTION_VERSION ?= $(shell $(PYTHON) tools/scripts/scm_version.py | cut -d . -f 1-3) COLLECTION_VERSION ?= $(shell $(PYTHON) tools/scripts/scm_version.py | cut -d . -f 1-3)
@@ -47,8 +47,6 @@ VAULT ?= false
VAULT_TLS ?= false VAULT_TLS ?= false
# If set to true docker-compose will also start a tacacs+ instance # If set to true docker-compose will also start a tacacs+ instance
TACACS ?= false TACACS ?= false
# If set to true docker-compose will install editable dependencies
EDITABLE_DEPENDENCIES ?= false
VENV_BASE ?= /var/lib/awx/venv VENV_BASE ?= /var/lib/awx/venv
@@ -65,7 +63,7 @@ RECEPTOR_IMAGE ?= quay.io/ansible/receptor:devel
SRC_ONLY_PKGS ?= cffi,pycparser,psycopg,twilio SRC_ONLY_PKGS ?= cffi,pycparser,psycopg,twilio
# These should be upgraded in the AWX and Ansible venv before attempting # These should be upgraded in the AWX and Ansible venv before attempting
# to install the actual requirements # to install the actual requirements
VENV_BOOTSTRAP ?= pip==21.2.4 setuptools==69.0.2 setuptools_scm[toml]==8.0.4 wheel==0.42.0 VENV_BOOTSTRAP ?= pip==21.2.4 setuptools==65.6.3 setuptools_scm[toml]==8.0.4 wheel==0.38.4
NAME ?= awx NAME ?= awx
@@ -77,9 +75,6 @@ SDIST_TAR_FILE ?= $(SDIST_TAR_NAME).tar.gz
I18N_FLAG_FILE = .i18n_built I18N_FLAG_FILE = .i18n_built
## PLATFORMS defines the target platforms for the manager image be build to provide support to multiple
PLATFORMS ?= linux/amd64,linux/arm64 # linux/ppc64le,linux/s390x
.PHONY: awx-link clean clean-tmp clean-venv requirements requirements_dev \ .PHONY: awx-link clean clean-tmp clean-venv requirements requirements_dev \
develop refresh adduser migrate dbchange \ develop refresh adduser migrate dbchange \
receiver test test_unit test_coverage coverage_html \ receiver test test_unit test_coverage coverage_html \
@@ -218,6 +213,8 @@ collectstatic:
fi; \ fi; \
$(PYTHON) manage.py collectstatic --clear --noinput > /dev/null 2>&1 $(PYTHON) manage.py collectstatic --clear --noinput > /dev/null 2>&1
DEV_RELOAD_COMMAND ?= supervisorctl restart tower-processes:*
uwsgi: collectstatic uwsgi: collectstatic
@if [ "$(VENV_BASE)" ]; then \ @if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/awx/bin/activate; \ . $(VENV_BASE)/awx/bin/activate; \
@@ -225,7 +222,7 @@ uwsgi: collectstatic
uwsgi /etc/tower/uwsgi.ini uwsgi /etc/tower/uwsgi.ini
awx-autoreload: awx-autoreload:
@/awx_devel/tools/docker-compose/awx-autoreload /awx_devel/awx @/awx_devel/tools/docker-compose/awx-autoreload /awx_devel/awx "$(DEV_RELOAD_COMMAND)"
daphne: daphne:
@if [ "$(VENV_BASE)" ]; then \ @if [ "$(VENV_BASE)" ]; then \
@@ -305,7 +302,7 @@ swagger: reports
@if [ "$(VENV_BASE)" ]; then \ @if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/awx/bin/activate; \ . $(VENV_BASE)/awx/bin/activate; \
fi; \ fi; \
(set -o pipefail && py.test $(PYTEST_ARGS) awx/conf/tests/functional awx/main/tests/functional/api awx/main/tests/docs | tee reports/$@.report) (set -o pipefail && py.test $(PYTEST_ARGS) awx/conf/tests/functional awx/main/tests/functional/api awx/main/tests/docs --release=$(VERSION_TARGET) | tee reports/$@.report)
check: black check: black
@@ -535,23 +532,16 @@ docker-compose-sources: .git/hooks/pre-commit
-e enable_vault=$(VAULT) \ -e enable_vault=$(VAULT) \
-e vault_tls=$(VAULT_TLS) \ -e vault_tls=$(VAULT_TLS) \
-e enable_tacacs=$(TACACS) \ -e enable_tacacs=$(TACACS) \
-e install_editable_dependencies=$(EDITABLE_DEPENDENCIES) \ $(EXTRA_SOURCES_ANSIBLE_OPTS)
$(EXTRA_SOURCES_ANSIBLE_OPTS)
docker-compose: awx/projects docker-compose-sources docker-compose: awx/projects docker-compose-sources
ansible-galaxy install --ignore-certs -r tools/docker-compose/ansible/requirements.yml; ansible-galaxy install --ignore-certs -r tools/docker-compose/ansible/requirements.yml;
ansible-playbook -i tools/docker-compose/inventory tools/docker-compose/ansible/initialize_containers.yml \ ansible-playbook -i tools/docker-compose/inventory tools/docker-compose/ansible/initialize_containers.yml \
-e enable_vault=$(VAULT) \ -e enable_vault=$(VAULT) \
-e vault_tls=$(VAULT_TLS) \ -e vault_tls=$(VAULT_TLS) \
-e enable_ldap=$(LDAP); \ -e enable_ldap=$(LDAP);
$(MAKE) docker-compose-up
docker-compose-up:
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml $(COMPOSE_OPTS) up $(COMPOSE_UP_OPTS) --remove-orphans $(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml $(COMPOSE_OPTS) up $(COMPOSE_UP_OPTS) --remove-orphans
docker-compose-down:
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml $(COMPOSE_OPTS) down --remove-orphans
docker-compose-credential-plugins: awx/projects docker-compose-sources docker-compose-credential-plugins: awx/projects docker-compose-sources
echo -e "\033[0;31mTo generate a CyberArk Conjur API key: docker exec -it tools_conjur_1 conjurctl account create quick-start\033[0m" echo -e "\033[0;31mTo generate a CyberArk Conjur API key: docker exec -it tools_conjur_1 conjurctl account create quick-start\033[0m"
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml -f tools/docker-credential-plugins-override.yml up --no-recreate awx_1 --remove-orphans $(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml -f tools/docker-credential-plugins-override.yml up --no-recreate awx_1 --remove-orphans
@@ -596,27 +586,12 @@ docker-compose-build: Dockerfile.dev
--build-arg BUILDKIT_INLINE_CACHE=1 \ --build-arg BUILDKIT_INLINE_CACHE=1 \
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG) . --cache-from=$(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG) .
.PHONY: docker-compose-buildx
## Build awx_devel image for docker compose development environment for multiple architectures
docker-compose-buildx: Dockerfile.dev
- docker buildx create --name docker-compose-buildx
docker buildx use docker-compose-buildx
- docker buildx build \
--push \
--build-arg BUILDKIT_INLINE_CACHE=1 \
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG) \
--platform=$(PLATFORMS) \
--tag $(DEVEL_IMAGE_NAME) \
-f Dockerfile.dev .
- docker buildx rm docker-compose-buildx
docker-clean: docker-clean:
-$(foreach container_id,$(shell docker ps -f name=tools_awx -aq && docker ps -f name=tools_receptor -aq),docker stop $(container_id); docker rm -f $(container_id);) -$(foreach container_id,$(shell docker ps -f name=tools_awx -aq && docker ps -f name=tools_receptor -aq),docker stop $(container_id); docker rm -f $(container_id);)
-$(foreach image_id,$(shell docker images --filter=reference='*/*/*awx_devel*' --filter=reference='*/*awx_devel*' --filter=reference='*awx_devel*' -aq),docker rmi --force $(image_id);) -$(foreach image_id,$(shell docker images --filter=reference='*/*/*awx_devel*' --filter=reference='*/*awx_devel*' --filter=reference='*awx_devel*' -aq),docker rmi --force $(image_id);)
docker-clean-volumes: docker-compose-clean docker-compose-container-group-clean docker-clean-volumes: docker-compose-clean docker-compose-container-group-clean
docker volume rm -f tools_var_lib_awx tools_awx_db tools_vault_1 tools_ldap_1 tools_grafana_storage tools_prometheus_storage $(docker volume ls --filter name=tools_redis_socket_ -q) docker volume rm -f tools_awx_db tools_vault_1 tools_grafana_storage tools_prometheus_storage $(docker volume ls --filter name=tools_redis_socket_ -q)
docker-refresh: docker-clean docker-compose docker-refresh: docker-clean docker-compose
@@ -638,6 +613,9 @@ clean-elk:
docker rm tools_elasticsearch_1 docker rm tools_elasticsearch_1
docker rm tools_kibana_1 docker rm tools_kibana_1
psql-container:
docker run -it --net tools_default --rm postgres:12 sh -c 'exec psql -h "postgres" -p "5432" -U postgres'
VERSION: VERSION:
@echo "awx: $(VERSION)" @echo "awx: $(VERSION)"
@@ -670,21 +648,6 @@ awx-kube-build: Dockerfile
--build-arg HEADLESS=$(HEADLESS) \ --build-arg HEADLESS=$(HEADLESS) \
-t $(DEV_DOCKER_TAG_BASE)/awx:$(COMPOSE_TAG) . -t $(DEV_DOCKER_TAG_BASE)/awx:$(COMPOSE_TAG) .
## Build multi-arch awx image for deployment on Kubernetes environment.
awx-kube-buildx: Dockerfile
- docker buildx create --name awx-kube-buildx
docker buildx use awx-kube-buildx
- docker buildx build \
--push \
--build-arg VERSION=$(VERSION) \
--build-arg SETUPTOOLS_SCM_PRETEND_VERSION=$(VERSION) \
--build-arg HEADLESS=$(HEADLESS) \
--platform=$(PLATFORMS) \
--tag $(DEV_DOCKER_TAG_BASE)/awx:$(COMPOSE_TAG) \
-f Dockerfile .
- docker buildx rm awx-kube-buildx
.PHONY: Dockerfile.kube-dev .PHONY: Dockerfile.kube-dev
## Generate Docker.kube-dev for awx_kube_devel image ## Generate Docker.kube-dev for awx_kube_devel image
Dockerfile.kube-dev: tools/ansible/roles/dockerfile/templates/Dockerfile.j2 Dockerfile.kube-dev: tools/ansible/roles/dockerfile/templates/Dockerfile.j2
@@ -701,18 +664,6 @@ awx-kube-dev-build: Dockerfile.kube-dev
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) \ --cache-from=$(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) \
-t $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) . -t $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) .
## Build and push multi-arch awx_kube_devel image for development on local Kubernetes environment.
awx-kube-dev-buildx: Dockerfile.kube-dev
- docker buildx create --name awx-kube-dev-buildx
docker buildx use awx-kube-dev-buildx
- docker buildx build \
--push \
--build-arg BUILDKIT_INLINE_CACHE=1 \
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) \
--platform=$(PLATFORMS) \
--tag $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) \
-f Dockerfile.kube-dev .
- docker buildx rm awx-kube-dev-buildx
kind-dev-load: awx-kube-dev-build kind-dev-load: awx-kube-dev-build
$(KIND_BIN) load docker-image $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) $(KIND_BIN) load docker-image $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG)

View File

@@ -154,12 +154,10 @@ def manage():
from django.conf import settings from django.conf import settings
from django.core.management import execute_from_command_line from django.core.management import execute_from_command_line
# enforce the postgres version is a minimum of 12 (we need this for partitioning); if not, then terminate program with exit code of 1 # enforce the postgres version is equal to 12. if not, then terminate program with exit code of 1
# In the future if we require a feature of a version of postgres > 12 this should be updated to reflect that.
# The return of connection.pg_version is something like 12013
if not os.getenv('SKIP_PG_VERSION_CHECK', False) and not MODE == 'development': if not os.getenv('SKIP_PG_VERSION_CHECK', False) and not MODE == 'development':
if (connection.pg_version // 10000) < 12: if (connection.pg_version // 10000) < 12:
sys.stderr.write("At a minimum, postgres version 12 is required\n") sys.stderr.write("Postgres version 12 is required\n")
sys.exit(1) sys.exit(1)
if len(sys.argv) >= 2 and sys.argv[1] in ('version', '--version'): # pragma: no cover if len(sys.argv) >= 2 and sys.argv[1] in ('version', '--version'): # pragma: no cover

View File

@@ -93,7 +93,6 @@ register(
default='', default='',
label=_('Login redirect override URL'), label=_('Login redirect override URL'),
help_text=_('URL to which unauthorized users will be redirected to log in. If blank, users will be sent to the login page.'), help_text=_('URL to which unauthorized users will be redirected to log in. If blank, users will be sent to the login page.'),
warning_text=_('Changing the redirect URL could impact the ability to login if local authentication is also disabled.'),
category=_('Authentication'), category=_('Authentication'),
category_slug='authentication', category_slug='authentication',
) )

View File

@@ -36,13 +36,11 @@ class Metadata(metadata.SimpleMetadata):
field_info = OrderedDict() field_info = OrderedDict()
field_info['type'] = self.label_lookup[field] field_info['type'] = self.label_lookup[field]
field_info['required'] = getattr(field, 'required', False) field_info['required'] = getattr(field, 'required', False)
field_info['hidden'] = getattr(field, 'hidden', False)
text_attrs = [ text_attrs = [
'read_only', 'read_only',
'label', 'label',
'help_text', 'help_text',
'warning_text',
'min_length', 'min_length',
'max_length', 'max_length',
'min_value', 'min_value',

View File

@@ -5594,7 +5594,7 @@ class InstanceSerializer(BaseSerializer):
res['jobs'] = self.reverse('api:instance_unified_jobs_list', kwargs={'pk': obj.pk}) res['jobs'] = self.reverse('api:instance_unified_jobs_list', kwargs={'pk': obj.pk})
res['peers'] = self.reverse('api:instance_peers_list', kwargs={"pk": obj.pk}) res['peers'] = self.reverse('api:instance_peers_list', kwargs={"pk": obj.pk})
res['instance_groups'] = self.reverse('api:instance_instance_groups_list', kwargs={'pk': obj.pk}) res['instance_groups'] = self.reverse('api:instance_instance_groups_list', kwargs={'pk': obj.pk})
if obj.node_type in [Instance.Types.EXECUTION, Instance.Types.HOP] and not obj.managed: if obj.node_type in [Instance.Types.EXECUTION, Instance.Types.HOP]:
res['install_bundle'] = self.reverse('api:instance_install_bundle', kwargs={'pk': obj.pk}) res['install_bundle'] = self.reverse('api:instance_install_bundle', kwargs={'pk': obj.pk})
if self.context['request'].user.is_superuser or self.context['request'].user.is_system_auditor: if self.context['request'].user.is_superuser or self.context['request'].user.is_system_auditor:
if obj.node_type == 'execution': if obj.node_type == 'execution':

View File

@@ -24,10 +24,6 @@ def drf_reverse(viewname, args=None, kwargs=None, request=None, format=None, **e
else: else:
url = _reverse(viewname, args, kwargs, request, format, **extra) url = _reverse(viewname, args, kwargs, request, format, **extra)
if settings.OPTIONAL_API_URLPATTERN_PREFIX and request:
if request.path.startswith(f"/api/{settings.OPTIONAL_API_URLPATTERN_PREFIX}"):
url = url.replace('/api', f"/api/{settings.OPTIONAL_API_URLPATTERN_PREFIX}")
return url return url

View File

@@ -272,24 +272,16 @@ class DashboardJobsGraphView(APIView):
success_query = user_unified_jobs.filter(status='successful') success_query = user_unified_jobs.filter(status='successful')
failed_query = user_unified_jobs.filter(status='failed') failed_query = user_unified_jobs.filter(status='failed')
canceled_query = user_unified_jobs.filter(status='canceled')
error_query = user_unified_jobs.filter(status='error')
if job_type == 'inv_sync': if job_type == 'inv_sync':
success_query = success_query.filter(instance_of=models.InventoryUpdate) success_query = success_query.filter(instance_of=models.InventoryUpdate)
failed_query = failed_query.filter(instance_of=models.InventoryUpdate) failed_query = failed_query.filter(instance_of=models.InventoryUpdate)
canceled_query = canceled_query.filter(instance_of=models.InventoryUpdate)
error_query = error_query.filter(instance_of=models.InventoryUpdate)
elif job_type == 'playbook_run': elif job_type == 'playbook_run':
success_query = success_query.filter(instance_of=models.Job) success_query = success_query.filter(instance_of=models.Job)
failed_query = failed_query.filter(instance_of=models.Job) failed_query = failed_query.filter(instance_of=models.Job)
canceled_query = canceled_query.filter(instance_of=models.Job)
error_query = error_query.filter(instance_of=models.Job)
elif job_type == 'scm_update': elif job_type == 'scm_update':
success_query = success_query.filter(instance_of=models.ProjectUpdate) success_query = success_query.filter(instance_of=models.ProjectUpdate)
failed_query = failed_query.filter(instance_of=models.ProjectUpdate) failed_query = failed_query.filter(instance_of=models.ProjectUpdate)
canceled_query = canceled_query.filter(instance_of=models.ProjectUpdate)
error_query = error_query.filter(instance_of=models.ProjectUpdate)
end = now() end = now()
interval = 'day' interval = 'day'
@@ -305,12 +297,10 @@ class DashboardJobsGraphView(APIView):
else: else:
return Response({'error': _('Unknown period "%s"') % str(period)}, status=status.HTTP_400_BAD_REQUEST) return Response({'error': _('Unknown period "%s"') % str(period)}, status=status.HTTP_400_BAD_REQUEST)
dashboard_data = {"jobs": {"successful": [], "failed": [], "canceled": [], "error": []}} dashboard_data = {"jobs": {"successful": [], "failed": []}}
succ_list = dashboard_data['jobs']['successful'] succ_list = dashboard_data['jobs']['successful']
fail_list = dashboard_data['jobs']['failed'] fail_list = dashboard_data['jobs']['failed']
canceled_list = dashboard_data['jobs']['canceled']
error_list = dashboard_data['jobs']['error']
qs_s = ( qs_s = (
success_query.filter(finished__range=(start, end)) success_query.filter(finished__range=(start, end))
@@ -328,22 +318,6 @@ class DashboardJobsGraphView(APIView):
.annotate(agg=Count('id', distinct=True)) .annotate(agg=Count('id', distinct=True))
) )
data_f = {item['d']: item['agg'] for item in qs_f} data_f = {item['d']: item['agg'] for item in qs_f}
qs_c = (
canceled_query.filter(finished__range=(start, end))
.annotate(d=Trunc('finished', interval, tzinfo=end.tzinfo))
.order_by()
.values('d')
.annotate(agg=Count('id', distinct=True))
)
data_c = {item['d']: item['agg'] for item in qs_c}
qs_e = (
error_query.filter(finished__range=(start, end))
.annotate(d=Trunc('finished', interval, tzinfo=end.tzinfo))
.order_by()
.values('d')
.annotate(agg=Count('id', distinct=True))
)
data_e = {item['d']: item['agg'] for item in qs_e}
start_date = start.replace(hour=0, minute=0, second=0, microsecond=0) start_date = start.replace(hour=0, minute=0, second=0, microsecond=0)
for d in itertools.count(): for d in itertools.count():
@@ -352,8 +326,6 @@ class DashboardJobsGraphView(APIView):
break break
succ_list.append([time.mktime(date.timetuple()), data_s.get(date, 0)]) succ_list.append([time.mktime(date.timetuple()), data_s.get(date, 0)])
fail_list.append([time.mktime(date.timetuple()), data_f.get(date, 0)]) fail_list.append([time.mktime(date.timetuple()), data_f.get(date, 0)])
canceled_list.append([time.mktime(date.timetuple()), data_c.get(date, 0)])
error_list.append([time.mktime(date.timetuple()), data_e.get(date, 0)])
return Response(dashboard_data) return Response(dashboard_data)

View File

@@ -55,7 +55,6 @@ register(
# Optional; category_slug will be slugified version of category if not # Optional; category_slug will be slugified version of category if not
# explicitly provided. # explicitly provided.
category_slug='cows', category_slug='cows',
hidden=True,
) )

View File

@@ -127,8 +127,6 @@ class SettingsRegistry(object):
encrypted = bool(field_kwargs.pop('encrypted', False)) encrypted = bool(field_kwargs.pop('encrypted', False))
defined_in_file = bool(field_kwargs.pop('defined_in_file', False)) defined_in_file = bool(field_kwargs.pop('defined_in_file', False))
unit = field_kwargs.pop('unit', None) unit = field_kwargs.pop('unit', None)
hidden = field_kwargs.pop('hidden', False)
warning_text = field_kwargs.pop('warning_text', None)
if getattr(field_kwargs.get('child', None), 'source', None) is not None: if getattr(field_kwargs.get('child', None), 'source', None) is not None:
field_kwargs['child'].source = None field_kwargs['child'].source = None
field_instance = field_class(**field_kwargs) field_instance = field_class(**field_kwargs)
@@ -136,14 +134,12 @@ class SettingsRegistry(object):
field_instance.category = category field_instance.category = category
field_instance.depends_on = depends_on field_instance.depends_on = depends_on
field_instance.unit = unit field_instance.unit = unit
field_instance.hidden = hidden
if placeholder is not empty: if placeholder is not empty:
field_instance.placeholder = placeholder field_instance.placeholder = placeholder
field_instance.defined_in_file = defined_in_file field_instance.defined_in_file = defined_in_file
if field_instance.defined_in_file: if field_instance.defined_in_file:
field_instance.help_text = str(_('This value has been set manually in a settings file.')) + '\n\n' + str(field_instance.help_text) field_instance.help_text = str(_('This value has been set manually in a settings file.')) + '\n\n' + str(field_instance.help_text)
field_instance.encrypted = encrypted field_instance.encrypted = encrypted
field_instance.warning_text = warning_text
original_field_instance = field_instance original_field_instance = field_instance
if field_class != original_field_class: if field_class != original_field_class:
original_field_instance = original_field_class(**field_kwargs) original_field_instance = original_field_class(**field_kwargs)

View File

@@ -1,7 +1,6 @@
# Python # Python
import contextlib import contextlib
import logging import logging
import psycopg
import threading import threading
import time import time
import os import os
@@ -14,7 +13,7 @@ from django.conf import settings, UserSettingsHolder
from django.core.cache import cache as django_cache from django.core.cache import cache as django_cache
from django.core.exceptions import ImproperlyConfigured, SynchronousOnlyOperation from django.core.exceptions import ImproperlyConfigured, SynchronousOnlyOperation
from django.db import transaction, connection from django.db import transaction, connection
from django.db.utils import DatabaseError, ProgrammingError from django.db.utils import Error as DBError, ProgrammingError
from django.utils.functional import cached_property from django.utils.functional import cached_property
# Django REST Framework # Django REST Framework
@@ -81,26 +80,18 @@ def _ctit_db_wrapper(trans_safe=False):
logger.debug('Obtaining database settings in spite of broken transaction.') logger.debug('Obtaining database settings in spite of broken transaction.')
transaction.set_rollback(False) transaction.set_rollback(False)
yield yield
except ProgrammingError as e: except DBError as exc:
# Exception raised for programming errors
# Examples may be table not found or already exists,
# this generally means we can't fetch Tower configuration
# because the database hasn't actually finished migrating yet;
# this is usually a sign that a service in a container (such as ws_broadcast)
# has come up *before* the database has finished migrating, and
# especially that the conf.settings table doesn't exist yet
# syntax error in the SQL statement, wrong number of parameters specified, etc.
if trans_safe: if trans_safe:
logger.debug(f'Database settings are not available, using defaults. error: {str(e)}') level = logger.warning
else: if isinstance(exc, ProgrammingError):
logger.exception('Error modifying something related to database settings.') if 'relation' in str(exc) and 'does not exist' in str(exc):
except DatabaseError as e: # this generally means we can't fetch Tower configuration
if trans_safe: # because the database hasn't actually finished migrating yet;
cause = e.__cause__ # this is usually a sign that a service in a container (such as ws_broadcast)
if cause and hasattr(cause, 'sqlstate'): # has come up *before* the database has finished migrating, and
sqlstate = cause.sqlstate # especially that the conf.settings table doesn't exist yet
sqlstate_str = psycopg.errors.lookup(sqlstate) level = logger.debug
logger.error('SQL Error state: {} - {}'.format(sqlstate, sqlstate_str)) level(f'Database settings are not available, using defaults. error: {str(exc)}')
else: else:
logger.exception('Error modifying something related to database settings.') logger.exception('Error modifying something related to database settings.')
finally: finally:

View File

@@ -419,7 +419,7 @@ def _events_table(since, full_path, until, tbl, where_column, project_job_create
resolved_action, resolved_action,
resolved_role, resolved_role,
-- '-' operator listed here: -- '-' operator listed here:
-- https://www.postgresql.org/docs/15/functions-json.html -- https://www.postgresql.org/docs/12/functions-json.html
-- note that operator is only supported by jsonb objects -- note that operator is only supported by jsonb objects
-- https://www.postgresql.org/docs/current/datatype-json.html -- https://www.postgresql.org/docs/current/datatype-json.html
(CASE WHEN event = 'playbook_on_stats' THEN {event_data} - 'artifact_data' END) as playbook_on_stats, (CASE WHEN event = 'playbook_on_stats' THEN {event_data} - 'artifact_data' END) as playbook_on_stats,

View File

@@ -92,7 +92,6 @@ register(
), ),
category=_('System'), category=_('System'),
category_slug='system', category_slug='system',
required=False,
) )
register( register(
@@ -775,7 +774,6 @@ register(
allow_null=True, allow_null=True,
category=_('System'), category=_('System'),
category_slug='system', category_slug='system',
required=False,
) )
register( register(
'AUTOMATION_ANALYTICS_LAST_ENTRIES', 'AUTOMATION_ANALYTICS_LAST_ENTRIES',
@@ -817,7 +815,6 @@ register(
help_text=_('Max jobs to allow bulk jobs to launch'), help_text=_('Max jobs to allow bulk jobs to launch'),
category=_('Bulk Actions'), category=_('Bulk Actions'),
category_slug='bulk', category_slug='bulk',
hidden=True,
) )
register( register(
@@ -828,7 +825,6 @@ register(
help_text=_('Max number of hosts to allow to be created in a single bulk action'), help_text=_('Max number of hosts to allow to be created in a single bulk action'),
category=_('Bulk Actions'), category=_('Bulk Actions'),
category_slug='bulk', category_slug='bulk',
hidden=True,
) )
register( register(
@@ -839,7 +835,6 @@ register(
help_text=_('Max number of hosts to allow to be deleted in a single bulk action'), help_text=_('Max number of hosts to allow to be deleted in a single bulk action'),
category=_('Bulk Actions'), category=_('Bulk Actions'),
category_slug='bulk', category_slug='bulk',
hidden=True,
) )
register( register(
@@ -850,7 +845,6 @@ register(
help_text=_('Enable preview of new user interface.'), help_text=_('Enable preview of new user interface.'),
category=_('System'), category=_('System'),
category_slug='system', category_slug='system',
hidden=True,
) )
register( register(

View File

@@ -14,7 +14,7 @@ __all__ = [
'STANDARD_INVENTORY_UPDATE_ENV', 'STANDARD_INVENTORY_UPDATE_ENV',
] ]
CLOUD_PROVIDERS = ('azure_rm', 'ec2', 'gce', 'vmware', 'openstack', 'rhv', 'satellite6', 'controller', 'insights', 'terraform') CLOUD_PROVIDERS = ('azure_rm', 'ec2', 'gce', 'vmware', 'openstack', 'rhv', 'satellite6', 'controller', 'insights')
PRIVILEGE_ESCALATION_METHODS = [ PRIVILEGE_ESCALATION_METHODS = [
('sudo', _('Sudo')), ('sudo', _('Sudo')),
('su', _('Su')), ('su', _('Su')),

View File

@@ -1,10 +1,9 @@
from azure.keyvault.secrets import SecretClient
from azure.identity import ClientSecretCredential
from msrestazure import azure_cloud
from .plugin import CredentialPlugin from .plugin import CredentialPlugin
from django.utils.translation import gettext_lazy as _ from django.utils.translation import gettext_lazy as _
from azure.keyvault import KeyVaultClient, KeyVaultAuthentication
from azure.common.credentials import ServicePrincipalCredentials
from msrestazure import azure_cloud
# https://github.com/Azure/msrestazure-for-python/blob/master/msrestazure/azure_cloud.py # https://github.com/Azure/msrestazure-for-python/blob/master/msrestazure/azure_cloud.py
@@ -55,9 +54,22 @@ azure_keyvault_inputs = {
def azure_keyvault_backend(**kwargs): def azure_keyvault_backend(**kwargs):
csc = ClientSecretCredential(tenant_id=kwargs['tenant'], client_id=kwargs['client'], client_secret=kwargs['secret']) url = kwargs['url']
kv = SecretClient(credential=csc, vault_url=kwargs['url']) [cloud] = [c for c in clouds if c.name == kwargs.get('cloud_name', default_cloud.name)]
return kv.get_secret(name=kwargs['secret_field'], version=kwargs.get('secret_version', '')).value
def auth_callback(server, resource, scope):
credentials = ServicePrincipalCredentials(
url=url,
client_id=kwargs['client'],
secret=kwargs['secret'],
tenant=kwargs['tenant'],
resource=f"https://{cloud.suffixes.keyvault_dns.split('.', 1).pop()}",
)
token = credentials.token
return token['token_type'], token['access_token']
kv = KeyVaultClient(KeyVaultAuthentication(auth_callback))
return kv.get_secret(url, kwargs['secret_field'], kwargs.get('secret_version', '')).value
azure_keyvault_plugin = CredentialPlugin('Microsoft Azure Key Vault', inputs=azure_keyvault_inputs, backend=azure_keyvault_backend) azure_keyvault_plugin = CredentialPlugin('Microsoft Azure Key Vault', inputs=azure_keyvault_inputs, backend=azure_keyvault_backend)

View File

@@ -105,11 +105,7 @@ def create_listener_connection():
for k, v in settings.LISTENER_DATABASES.get('default', {}).get('OPTIONS', {}).items(): for k, v in settings.LISTENER_DATABASES.get('default', {}).get('OPTIONS', {}).items():
conf['OPTIONS'][k] = v conf['OPTIONS'][k] = v
# Allow password-less authentication connection_data = f"dbname={conf['NAME']} host={conf['HOST']} user={conf['USER']} password={conf['PASSWORD']} port={conf['PORT']}"
if 'PASSWORD' in conf:
conf['OPTIONS']['password'] = conf.pop('PASSWORD')
connection_data = f"dbname={conf['NAME']} host={conf['HOST']} user={conf['USER']} port={conf['PORT']}"
return psycopg.connect(connection_data, autocommit=True, **conf['OPTIONS']) return psycopg.connect(connection_data, autocommit=True, **conf['OPTIONS'])

View File

@@ -162,7 +162,7 @@ class AWXConsumerRedis(AWXConsumerBase):
class AWXConsumerPG(AWXConsumerBase): class AWXConsumerPG(AWXConsumerBase):
def __init__(self, *args, schedule=None, **kwargs): def __init__(self, *args, schedule=None, **kwargs):
super().__init__(*args, **kwargs) super().__init__(*args, **kwargs)
self.pg_max_wait = getattr(settings, 'DISPATCHER_DB_DOWNTOWN_TOLLERANCE', settings.DISPATCHER_DB_DOWNTIME_TOLERANCE) self.pg_max_wait = settings.DISPATCHER_DB_DOWNTIME_TOLERANCE
# if no successful loops have ran since startup, then we should fail right away # if no successful loops have ran since startup, then we should fail right away
self.pg_is_down = True # set so that we fail if we get database errors on startup self.pg_is_down = True # set so that we fail if we get database errors on startup
init_time = time.time() init_time = time.time()
@@ -259,12 +259,6 @@ class AWXConsumerPG(AWXConsumerBase):
current_downtime = time.time() - self.pg_down_time current_downtime = time.time() - self.pg_down_time
if current_downtime > self.pg_max_wait: if current_downtime > self.pg_max_wait:
logger.exception(f"Postgres event consumer has not recovered in {current_downtime} s, exiting") logger.exception(f"Postgres event consumer has not recovered in {current_downtime} s, exiting")
# Sending QUIT to multiprocess queue to signal workers to exit
for worker in self.pool.workers:
try:
worker.quit()
except Exception:
logger.exception(f"Error sending QUIT to worker {worker}")
raise raise
# Wait for a second before next attempt, but still listen for any shutdown signals # Wait for a second before next attempt, but still listen for any shutdown signals
for i in range(10): for i in range(10):
@@ -276,12 +270,6 @@ class AWXConsumerPG(AWXConsumerBase):
except Exception: except Exception:
# Log unanticipated exception in addition to writing to stderr to get timestamps and other metadata # Log unanticipated exception in addition to writing to stderr to get timestamps and other metadata
logger.exception('Encountered unhandled error in dispatcher main loop') logger.exception('Encountered unhandled error in dispatcher main loop')
# Sending QUIT to multiprocess queue to signal workers to exit
for worker in self.pool.workers:
try:
worker.quit()
except Exception:
logger.exception(f"Error sending QUIT to worker {worker}")
raise raise

View File

@@ -1,179 +0,0 @@
import json
import os
import sys
import re
from typing import Any
from django.core.management.base import BaseCommand
from django.conf import settings
from awx.conf import settings_registry
class Command(BaseCommand):
help = 'Dump the current auth configuration in django_ansible_base.authenticator format, currently supports LDAP and SAML'
DAB_SAML_AUTHENTICATOR_KEYS = {
"SP_ENTITY_ID": True,
"SP_PUBLIC_CERT": True,
"SP_PRIVATE_KEY": True,
"ORG_INFO": True,
"TECHNICAL_CONTACT": True,
"SUPPORT_CONTACT": True,
"SP_EXTRA": False,
"SECURITY_CONFIG": False,
"EXTRA_DATA": False,
"ENABLED_IDPS": True,
"CALLBACK_URL": False,
}
DAB_LDAP_AUTHENTICATOR_KEYS = {
"SERVER_URI": True,
"BIND_DN": False,
"BIND_PASSWORD": False,
"CONNECTION_OPTIONS": False,
"GROUP_TYPE": True,
"GROUP_TYPE_PARAMS": True,
"GROUP_SEARCH": False,
"START_TLS": False,
"USER_DN_TEMPLATE": True,
"USER_ATTR_MAP": True,
"USER_SEARCH": False,
}
def get_awx_ldap_settings(self) -> dict[str, dict[str, Any]]:
awx_ldap_settings = {}
for awx_ldap_setting in settings_registry.get_registered_settings(category_slug='ldap'):
key = awx_ldap_setting.removeprefix("AUTH_LDAP_")
value = getattr(settings, awx_ldap_setting, None)
awx_ldap_settings[key] = value
grouped_settings = {}
for key, value in awx_ldap_settings.items():
match = re.search(r'(\d+)', key)
index = int(match.group()) if match else 0
new_key = re.sub(r'\d+_', '', key)
if index not in grouped_settings:
grouped_settings[index] = {}
grouped_settings[index][new_key] = value
if new_key == "GROUP_TYPE" and value:
grouped_settings[index][new_key] = type(value).__name__
if new_key == "SERVER_URI" and value:
value = value.split(", ")
return grouped_settings
def is_enabled(self, settings, keys):
for key, required in keys.items():
if required and not settings.get(key):
return False
return True
def get_awx_saml_settings(self) -> dict[str, Any]:
awx_saml_settings = {}
for awx_saml_setting in settings_registry.get_registered_settings(category_slug='saml'):
awx_saml_settings[awx_saml_setting.removeprefix("SOCIAL_AUTH_SAML_")] = getattr(settings, awx_saml_setting, None)
return awx_saml_settings
def format_config_data(self, enabled, awx_settings, type, keys, name):
config = {
"type": f"awx.authentication.authenticator_plugins.{type}",
"name": name,
"enabled": enabled,
"create_objects": True,
"users_unique": False,
"remove_users": True,
"configuration": {},
}
for k in keys:
v = awx_settings.get(k)
config["configuration"].update({k: v})
if type == "saml":
idp_to_key_mapping = {
"url": "IDP_URL",
"x509cert": "IDP_X509_CERT",
"entity_id": "IDP_ENTITY_ID",
"attr_email": "IDP_ATTR_EMAIL",
"attr_groups": "IDP_GROUPS",
"attr_username": "IDP_ATTR_USERNAME",
"attr_last_name": "IDP_ATTR_LAST_NAME",
"attr_first_name": "IDP_ATTR_FIRST_NAME",
"attr_user_permanent_id": "IDP_ATTR_USER_PERMANENT_ID",
}
for idp_name in awx_settings.get("ENABLED_IDPS", {}):
for key in idp_to_key_mapping:
value = awx_settings["ENABLED_IDPS"][idp_name].get(key)
if value is not None:
config["name"] = idp_name
config["configuration"].update({idp_to_key_mapping[key]: value})
return config
def add_arguments(self, parser):
parser.add_argument(
"output_file",
nargs="?",
type=str,
default=None,
help="Output JSON file path",
)
def handle(self, *args, **options):
try:
data = []
# dump SAML settings
awx_saml_settings = self.get_awx_saml_settings()
awx_saml_enabled = self.is_enabled(awx_saml_settings, self.DAB_SAML_AUTHENTICATOR_KEYS)
if awx_saml_enabled:
awx_saml_name = awx_saml_settings["ENABLED_IDPS"]
data.append(
self.format_config_data(
awx_saml_enabled,
awx_saml_settings,
"saml",
self.DAB_SAML_AUTHENTICATOR_KEYS,
awx_saml_name,
)
)
# dump LDAP settings
awx_ldap_group_settings = self.get_awx_ldap_settings()
for awx_ldap_name, awx_ldap_settings in enumerate(awx_ldap_group_settings.values()):
enabled = self.is_enabled(awx_ldap_settings, self.DAB_LDAP_AUTHENTICATOR_KEYS)
if enabled:
data.append(
self.format_config_data(
enabled,
awx_ldap_settings,
"ldap",
self.DAB_LDAP_AUTHENTICATOR_KEYS,
str(awx_ldap_name),
)
)
# write to file if requested
if options["output_file"]:
# Define the path for the output JSON file
output_file = options["output_file"]
# Ensure the directory exists
os.makedirs(os.path.dirname(output_file), exist_ok=True)
# Write data to the JSON file
with open(output_file, "w") as f:
json.dump(data, f, indent=4)
self.stdout.write(self.style.SUCCESS(f"Auth config data dumped to {output_file}"))
else:
self.stdout.write(json.dumps(data, indent=4))
except Exception as e:
self.stdout.write(self.style.ERROR(f"An error occurred: {str(e)}"))
sys.exit(1)

View File

@@ -92,6 +92,8 @@ class Command(BaseCommand):
return host_stats return host_stats
def handle(self, *arg, **options): def handle(self, *arg, **options):
WebsocketsMetricsServer().start()
# it's necessary to delay this import in case # it's necessary to delay this import in case
# database migrations are still running # database migrations are still running
from awx.main.models.ha import Instance from awx.main.models.ha import Instance
@@ -164,15 +166,8 @@ class Command(BaseCommand):
return return
WebsocketsMetricsServer().start() try:
websocket_relay_manager = WebSocketRelayManager() websocket_relay_manager = WebSocketRelayManager()
asyncio.run(websocket_relay_manager.run())
while True: except KeyboardInterrupt:
try: logger.info('Terminating Websocket Relayer')
asyncio.run(websocket_relay_manager.run())
except KeyboardInterrupt:
logger.info('Shutting down Websocket Relayer')
break
except Exception as e:
logger.exception('Error in Websocket Relayer, exception: {}. Restarting in 10 seconds'.format(e))
time.sleep(10)

View File

@@ -5,12 +5,11 @@ import logging
import threading import threading
import time import time
import urllib.parse import urllib.parse
from pathlib import Path
from django.conf import settings from django.conf import settings
from django.contrib.auth import logout from django.contrib.auth import logout
from django.contrib.auth.models import User from django.contrib.auth.models import User
from django.db.migrations.recorder import MigrationRecorder from django.db.migrations.executor import MigrationExecutor
from django.db import connection from django.db import connection
from django.shortcuts import redirect from django.shortcuts import redirect
from django.apps import apps from django.apps import apps
@@ -18,11 +17,9 @@ from django.utils.deprecation import MiddlewareMixin
from django.utils.translation import gettext_lazy as _ from django.utils.translation import gettext_lazy as _
from django.urls import reverse, resolve from django.urls import reverse, resolve
from awx.main import migrations
from awx.main.utils.named_url_graph import generate_graph, GraphNode from awx.main.utils.named_url_graph import generate_graph, GraphNode
from awx.conf import fields, register from awx.conf import fields, register
from awx.main.utils.profiling import AWXProfiler from awx.main.utils.profiling import AWXProfiler
from awx.main.utils.common import memoize
logger = logging.getLogger('awx.main.middleware') logger = logging.getLogger('awx.main.middleware')
@@ -201,22 +198,9 @@ class URLModificationMiddleware(MiddlewareMixin):
request.path_info = new_path request.path_info = new_path
@memoize(ttl=20)
def is_migrating():
latest_number = 0
latest_name = ''
for migration_path in Path(migrations.__path__[0]).glob('[0-9]*.py'):
try:
migration_number = int(migration_path.name.split('_', 1)[0])
except ValueError:
continue
if migration_number > latest_number:
latest_number = migration_number
latest_name = migration_path.name[: -len('.py')]
return not MigrationRecorder(connection).migration_qs.filter(app='main', name=latest_name).exists()
class MigrationRanCheckMiddleware(MiddlewareMixin): class MigrationRanCheckMiddleware(MiddlewareMixin):
def process_request(self, request): def process_request(self, request):
if is_migrating() and getattr(resolve(request.path), 'url_name', '') != 'migrations_notran': executor = MigrationExecutor(connection)
plan = executor.migration_plan(executor.loader.graph.leaf_nodes())
if bool(plan) and getattr(resolve(request.path), 'url_name', '') != 'migrations_notran':
return redirect(reverse("ui:migrations_notran")) return redirect(reverse("ui:migrations_notran"))

View File

@@ -1,59 +0,0 @@
# Generated by Django 4.2.6 on 2024-02-15 20:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0189_inbound_hop_nodes'),
]
operations = [
migrations.AlterField(
model_name='inventorysource',
name='source',
field=models.CharField(
choices=[
('file', 'File, Directory or Script'),
('constructed', 'Template additional groups and hostvars at runtime'),
('scm', 'Sourced from a Project'),
('ec2', 'Amazon EC2'),
('gce', 'Google Compute Engine'),
('azure_rm', 'Microsoft Azure Resource Manager'),
('vmware', 'VMware vCenter'),
('satellite6', 'Red Hat Satellite 6'),
('openstack', 'OpenStack'),
('rhv', 'Red Hat Virtualization'),
('controller', 'Red Hat Ansible Automation Platform'),
('insights', 'Red Hat Insights'),
('terraform', 'Terraform State'),
],
default=None,
max_length=32,
),
),
migrations.AlterField(
model_name='inventoryupdate',
name='source',
field=models.CharField(
choices=[
('file', 'File, Directory or Script'),
('constructed', 'Template additional groups and hostvars at runtime'),
('scm', 'Sourced from a Project'),
('ec2', 'Amazon EC2'),
('gce', 'Google Compute Engine'),
('azure_rm', 'Microsoft Azure Resource Manager'),
('vmware', 'VMware vCenter'),
('satellite6', 'Red Hat Satellite 6'),
('openstack', 'OpenStack'),
('rhv', 'Red Hat Virtualization'),
('controller', 'Red Hat Ansible Automation Platform'),
('insights', 'Red Hat Insights'),
('terraform', 'Terraform State'),
],
default=None,
max_length=32,
),
),
]

View File

@@ -925,7 +925,6 @@ class InventorySourceOptions(BaseModel):
('rhv', _('Red Hat Virtualization')), ('rhv', _('Red Hat Virtualization')),
('controller', _('Red Hat Ansible Automation Platform')), ('controller', _('Red Hat Ansible Automation Platform')),
('insights', _('Red Hat Insights')), ('insights', _('Red Hat Insights')),
('terraform', _('Terraform State')),
] ]
# From the options of the Django management base command # From the options of the Django management base command
@@ -1631,20 +1630,6 @@ class satellite6(PluginFileInjector):
return ret return ret
class terraform(PluginFileInjector):
plugin_name = 'terraform_state'
base_injector = 'managed'
namespace = 'cloud'
collection = 'terraform'
use_fqcn = True
def inventory_as_dict(self, inventory_update, private_data_dir):
env = super(terraform, self).get_plugin_env(inventory_update, private_data_dir, None)
ret = super().inventory_as_dict(inventory_update, private_data_dir)
ret['backend_config_files'] = env["TF_BACKEND_CONFIG_FILE"]
return ret
class controller(PluginFileInjector): class controller(PluginFileInjector):
plugin_name = 'tower' # TODO: relying on routing for now, update after EEs pick up revised collection plugin_name = 'tower' # TODO: relying on routing for now, update after EEs pick up revised collection
base_injector = 'template' base_injector = 'template'

View File

@@ -5,7 +5,6 @@ from copy import deepcopy
import datetime import datetime
import logging import logging
import json import json
import traceback
from django.db import models from django.db import models
from django.conf import settings from django.conf import settings
@@ -485,29 +484,14 @@ class JobNotificationMixin(object):
if msg_template: if msg_template:
try: try:
msg = env.from_string(msg_template).render(**context) msg = env.from_string(msg_template).render(**context)
except (TemplateSyntaxError, UndefinedError, SecurityError) as e: except (TemplateSyntaxError, UndefinedError, SecurityError):
msg = '\r\n'.join([e.message, ''.join(traceback.format_exception(None, e, e.__traceback__).replace('\n', '\r\n'))]) msg = ''
if body_template: if body_template:
try: try:
body = env.from_string(body_template).render(**context) body = env.from_string(body_template).render(**context)
except (TemplateSyntaxError, UndefinedError, SecurityError) as e: except (TemplateSyntaxError, UndefinedError, SecurityError):
body = '\r\n'.join([e.message, ''.join(traceback.format_exception(None, e, e.__traceback__).replace('\n', '\r\n'))]) body = ''
# https://datatracker.ietf.org/doc/html/rfc2822#section-2.2
# Body should have at least 2 CRLF, some clients will interpret
# the email incorrectly with blank body. So we will check that
if len(body.strip().splitlines()) <= 2:
# blank body
body = '\r\n'.join(
[
"The template rendering return a blank body.",
"Please check the template.",
"Refer to https://github.com/ansible/awx/issues/13983",
"for further information.",
]
)
return (msg, body) return (msg, body)

View File

@@ -1,6 +1,5 @@
# Copyright (c) 2019 Ansible, Inc. # Copyright (c) 2019 Ansible, Inc.
# All Rights Reserved. # All Rights Reserved.
# -*-coding:utf-8-*-
class CustomNotificationBase(object): class CustomNotificationBase(object):

View File

@@ -4,15 +4,13 @@ import logging
from django.conf import settings from django.conf import settings
from django.urls import re_path from django.urls import re_path
from channels.auth import AuthMiddlewareStack
from channels.routing import ProtocolTypeRouter, URLRouter from channels.routing import ProtocolTypeRouter, URLRouter
from ansible_base.lib.channels.middleware import DrfAuthMiddlewareStack
from . import consumers from . import consumers
logger = logging.getLogger('awx.main.routing') logger = logging.getLogger('awx.main.routing')
_application = None
class AWXProtocolTypeRouter(ProtocolTypeRouter): class AWXProtocolTypeRouter(ProtocolTypeRouter):
@@ -28,91 +26,13 @@ class AWXProtocolTypeRouter(ProtocolTypeRouter):
super().__init__(*args, **kwargs) super().__init__(*args, **kwargs)
class MultipleURLRouterAdapter:
"""
Django channels doesn't nicely support Auth_1(urls_1), Auth_2(urls_2), ..., Auth_n(urls_n)
This class allows assocating a websocket url with an auth
Ordering matters. The first matching url will be used.
"""
def __init__(self, *auths):
self._auths = [a for a in auths]
async def __call__(self, scope, receive, send):
"""
Loop through the list of passed in URLRouter's (they may or may not be wrapped by auth).
We know we have exhausted the list of URLRouter patterns when we get a
ValueError('No route found for path %s'). When that happens, move onto the next
URLRouter.
If the final URLRouter raises an error, re-raise it in the end.
We know that we found a match when no error is raised, end the loop.
"""
last_index = len(self._auths) - 1
for i, auth in enumerate(self._auths):
try:
return await auth.__call__(scope, receive, send)
except ValueError as e:
if str(e).startswith('No route found for path'):
# Only surface the error if on the last URLRouter
if i == last_index:
raise
websocket_urlpatterns = [ websocket_urlpatterns = [
re_path(r'api/websocket/$', consumers.EventConsumer.as_asgi()),
re_path(r'websocket/$', consumers.EventConsumer.as_asgi()), re_path(r'websocket/$', consumers.EventConsumer.as_asgi()),
]
websocket_relay_urlpatterns = [
re_path(r'websocket/relay/$', consumers.RelayConsumer.as_asgi()), re_path(r'websocket/relay/$', consumers.RelayConsumer.as_asgi()),
] ]
application = AWXProtocolTypeRouter(
def application_func(cls=AWXProtocolTypeRouter) -> ProtocolTypeRouter: {
return cls( 'websocket': AuthMiddlewareStack(URLRouter(websocket_urlpatterns)),
{ }
'websocket': MultipleURLRouterAdapter( )
URLRouter(websocket_relay_urlpatterns),
DrfAuthMiddlewareStack(URLRouter(websocket_urlpatterns)),
)
}
)
def __getattr__(name: str) -> ProtocolTypeRouter:
"""
Defer instantiating application.
For testing, we just need it to NOT run on import.
https://peps.python.org/pep-0562/#specification
Normally, someone would get application from this module via:
from awx.main.routing import application
and do something with the application:
application.do_something()
What does the callstack look like when the import runs?
...
awx.main.routing.__getattribute__(...) # <-- we don't define this so NOOP as far as we are concerned
if '__getattr__' in awx.main.routing.__dict__: # <-- this triggers the function we are in
return awx.main.routing.__dict__.__getattr__("application")
Why isn't this function simply implemented as:
def __getattr__(name):
if not _application:
_application = application_func()
return _application
It could. I manually tested it and it passes test_routing.py.
But my understanding after reading the PEP-0562 specification link above is that
performance would be a bit worse due to the extra __getattribute__ calls when
we reference non-global variables.
"""
if name == "application":
globs = globals()
if not globs['_application']:
globs['_application'] = application_func()
return globs['_application']
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")

View File

@@ -29,7 +29,7 @@ class RunnerCallback:
self.safe_env = {} self.safe_env = {}
self.event_ct = 0 self.event_ct = 0
self.model = model self.model = model
self.update_attempts = int(getattr(settings, 'DISPATCHER_DB_DOWNTOWN_TOLLERANCE', settings.DISPATCHER_DB_DOWNTIME_TOLERANCE) / 5) self.update_attempts = int(settings.DISPATCHER_DB_DOWNTIME_TOLERANCE / 5)
self.wrapup_event_dispatched = False self.wrapup_event_dispatched = False
self.artifacts_processed = False self.artifacts_processed = False
self.extra_update_fields = {} self.extra_update_fields = {}

View File

@@ -114,7 +114,7 @@ class BaseTask(object):
def __init__(self): def __init__(self):
self.cleanup_paths = [] self.cleanup_paths = []
self.update_attempts = int(getattr(settings, 'DISPATCHER_DB_DOWNTOWN_TOLLERANCE', settings.DISPATCHER_DB_DOWNTIME_TOLERANCE) / 5) self.update_attempts = int(settings.DISPATCHER_DB_DOWNTIME_TOLERANCE / 5)
self.runner_callback = self.callback_class(model=self.model) self.runner_callback = self.callback_class(model=self.model)
def update_model(self, pk, _attempt=0, **updates): def update_model(self, pk, _attempt=0, **updates):

View File

@@ -6,7 +6,6 @@ import itertools
import json import json
import logging import logging
import os import os
import psycopg
from io import StringIO from io import StringIO
from contextlib import redirect_stdout from contextlib import redirect_stdout
import shutil import shutil
@@ -417,7 +416,7 @@ def handle_removed_image(remove_images=None):
@task(queue=get_task_queuename) @task(queue=get_task_queuename)
def cleanup_images_and_files(): def cleanup_images_and_files():
_cleanup_images_and_files(image_prune=True) _cleanup_images_and_files()
@task(queue=get_task_queuename) @task(queue=get_task_queuename)
@@ -631,18 +630,10 @@ def cluster_node_heartbeat(dispatch_time=None, worker_tasks=None):
logger.error("Host {} last checked in at {}, marked as lost.".format(other_inst.hostname, other_inst.last_seen)) logger.error("Host {} last checked in at {}, marked as lost.".format(other_inst.hostname, other_inst.last_seen))
except DatabaseError as e: except DatabaseError as e:
cause = e.__cause__ if 'did not affect any rows' in str(e):
if cause and hasattr(cause, 'sqlstate'): logger.debug('Another instance has marked {} as lost'.format(other_inst.hostname))
sqlstate = cause.sqlstate
sqlstate_str = psycopg.errors.lookup(sqlstate)
logger.debug('SQL Error state: {} - {}'.format(sqlstate, sqlstate_str))
if sqlstate == psycopg.errors.NoData:
logger.debug('Another instance has marked {} as lost'.format(other_inst.hostname))
else:
logger.exception("Error marking {} as lost.".format(other_inst.hostname))
else: else:
logger.exception('No SQL state available. Error marking {} as lost'.format(other_inst.hostname)) logger.exception('Error marking {} as lost'.format(other_inst.hostname))
# Run local reaper # Run local reaper
if worker_tasks is not None: if worker_tasks is not None:
@@ -797,19 +788,10 @@ def update_inventory_computed_fields(inventory_id):
try: try:
i.update_computed_fields() i.update_computed_fields()
except DatabaseError as e: except DatabaseError as e:
# https://github.com/django/django/blob/eff21d8e7a1cb297aedf1c702668b590a1b618f3/django/db/models/base.py#L1105 if 'did not affect any rows' in str(e):
# django raises DatabaseError("Forced update did not affect any rows.") logger.debug('Exiting duplicate update_inventory_computed_fields task.')
return
# if sqlstate is set then there was a database error and otherwise will re-raise that error raise
cause = e.__cause__
if cause and hasattr(cause, 'sqlstate'):
sqlstate = cause.sqlstate
sqlstate_str = psycopg.errors.lookup(sqlstate)
logger.error('SQL Error state: {} - {}'.format(sqlstate, sqlstate_str))
raise
# otherwise
logger.debug('Exiting duplicate update_inventory_computed_fields task.')
def update_smart_memberships_for_inventory(smart_inventory): def update_smart_memberships_for_inventory(smart_inventory):

View File

@@ -1,3 +0,0 @@
{
"TF_BACKEND_CONFIG_FILE": "{{ file_reference }}"
}

View File

@@ -1,8 +1,13 @@
from awx.main.tests.functional.conftest import * # noqa from awx.main.tests.functional.conftest import * # noqa
import os
import pytest
@pytest.fixture() def pytest_addoption(parser):
def release(): parser.addoption("--release", action="store", help="a release version number, e.g., 3.3.0")
return os.environ.get('VERSION_TARGET', '')
def pytest_generate_tests(metafunc):
# This is called for every test. Only get/set command line arguments
# if the argument is specified in the list of test "fixturenames".
option_value = metafunc.config.option.release
if 'release' in metafunc.fixturenames and option_value is not None:
metafunc.parametrize("release", [option_value])

View File

@@ -3,19 +3,15 @@ import pytest
from unittest import mock from unittest import mock
import urllib.parse import urllib.parse
from unittest.mock import PropertyMock from unittest.mock import PropertyMock
import importlib
# Django # Django
from django.urls import resolve from django.urls import resolve
from django.http import Http404 from django.http import Http404
from django.apps import apps
from django.core.handlers.exception import response_for_exception from django.core.handlers.exception import response_for_exception
from django.contrib.auth.models import User from django.contrib.auth.models import User
from django.core.serializers.json import DjangoJSONEncoder from django.core.serializers.json import DjangoJSONEncoder
from django.db.backends.sqlite3.base import SQLiteCursorWrapper from django.db.backends.sqlite3.base import SQLiteCursorWrapper
from django.db.models.signals import post_migrate
# AWX # AWX
from awx.main.models.projects import Project from awx.main.models.projects import Project
from awx.main.models.ha import Instance from awx.main.models.ha import Instance
@@ -45,19 +41,10 @@ from awx.main.models.workflow import WorkflowJobTemplate
from awx.main.models.ad_hoc_commands import AdHocCommand from awx.main.models.ad_hoc_commands import AdHocCommand
from awx.main.models.oauth import OAuth2Application as Application from awx.main.models.oauth import OAuth2Application as Application
from awx.main.models.execution_environments import ExecutionEnvironment from awx.main.models.execution_environments import ExecutionEnvironment
from awx.main.utils import is_testing
__SWAGGER_REQUESTS__ = {} __SWAGGER_REQUESTS__ = {}
# HACK: the dab_resource_registry app required ServiceID in migrations which checks do not run
dab_rr_initial = importlib.import_module('ansible_base.resource_registry.migrations.0001_initial')
if is_testing():
post_migrate.connect(lambda **kwargs: dab_rr_initial.create_service_id(apps, None))
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
def swagger_autogen(requests=__SWAGGER_REQUESTS__): def swagger_autogen(requests=__SWAGGER_REQUESTS__):
return requests return requests

View File

@@ -193,7 +193,6 @@ class TestInventorySourceInjectors:
('satellite6', 'theforeman.foreman.foreman'), ('satellite6', 'theforeman.foreman.foreman'),
('insights', 'redhatinsights.insights.insights'), ('insights', 'redhatinsights.insights.insights'),
('controller', 'awx.awx.tower'), ('controller', 'awx.awx.tower'),
('terraform', 'cloud.terraform.terraform_state'),
], ],
) )
def test_plugin_proper_names(self, source, proper_name): def test_plugin_proper_names(self, source, proper_name):

View File

@@ -107,7 +107,6 @@ def read_content(private_data_dir, raw_env, inventory_update):
for filename in os.listdir(os.path.join(private_data_dir, subdir)): for filename in os.listdir(os.path.join(private_data_dir, subdir)):
filename_list.append(os.path.join(subdir, filename)) filename_list.append(os.path.join(subdir, filename))
filename_list = sorted(filename_list, key=lambda fn: inverse_env.get(os.path.join(private_data_dir, fn), [fn])[0]) filename_list = sorted(filename_list, key=lambda fn: inverse_env.get(os.path.join(private_data_dir, fn), [fn])[0])
inventory_content = ""
for filename in filename_list: for filename in filename_list:
if filename in ('args', 'project'): if filename in ('args', 'project'):
continue # Ansible runner continue # Ansible runner
@@ -131,7 +130,6 @@ def read_content(private_data_dir, raw_env, inventory_update):
dir_contents[abs_file_path] = f.read() dir_contents[abs_file_path] = f.read()
# Declare a reference to inventory plugin file if it exists # Declare a reference to inventory plugin file if it exists
if abs_file_path.endswith('.yml') and 'plugin: ' in dir_contents[abs_file_path]: if abs_file_path.endswith('.yml') and 'plugin: ' in dir_contents[abs_file_path]:
inventory_content = dir_contents[abs_file_path]
referenced_paths.add(abs_file_path) # used as inventory file referenced_paths.add(abs_file_path) # used as inventory file
elif cache_file_regex.match(abs_file_path): elif cache_file_regex.match(abs_file_path):
file_aliases[abs_file_path] = 'cache_file' file_aliases[abs_file_path] = 'cache_file'
@@ -159,11 +157,7 @@ def read_content(private_data_dir, raw_env, inventory_update):
content = {} content = {}
for abs_file_path, file_content in dir_contents.items(): for abs_file_path, file_content in dir_contents.items():
# assert that all files laid down are used # assert that all files laid down are used
if ( if abs_file_path not in referenced_paths and abs_file_path not in ignore_files:
abs_file_path not in referenced_paths
and to_container_path(abs_file_path, private_data_dir) not in inventory_content
and abs_file_path not in ignore_files
):
raise AssertionError( raise AssertionError(
"File {} is not referenced. References and files:\n{}\n{}".format(abs_file_path, json.dumps(env, indent=4), json.dumps(dir_contents, indent=4)) "File {} is not referenced. References and files:\n{}\n{}".format(abs_file_path, json.dumps(env, indent=4), json.dumps(dir_contents, indent=4))
) )

View File

@@ -411,14 +411,14 @@ def test_project_delete(delete, organization, admin_user):
@pytest.mark.parametrize( @pytest.mark.parametrize(
'order_by, expected_names', 'order_by, expected_names, expected_ids',
[ [
('name', ['alice project', 'bob project', 'shared project']), ('name', ['alice project', 'bob project', 'shared project'], [1, 2, 3]),
('-name', ['shared project', 'bob project', 'alice project']), ('-name', ['shared project', 'bob project', 'alice project'], [3, 2, 1]),
], ],
) )
@pytest.mark.django_db @pytest.mark.django_db
def test_project_list_ordering_by_name(get, order_by, expected_names, organization_factory): def test_project_list_ordering_by_name(get, order_by, expected_names, expected_ids, organization_factory):
'ensure sorted order of project list is maintained correctly when the requested order is invalid or not applicable' 'ensure sorted order of project list is maintained correctly when the requested order is invalid or not applicable'
objects = organization_factory( objects = organization_factory(
'org1', 'org1',
@@ -426,11 +426,13 @@ def test_project_list_ordering_by_name(get, order_by, expected_names, organizati
superusers=['admin'], superusers=['admin'],
) )
project_names = [] project_names = []
project_ids = []
# TODO: ask for an order by here that doesn't apply # TODO: ask for an order by here that doesn't apply
results = get(reverse('api:project_list'), objects.superusers.admin, QUERY_STRING='order_by=%s' % order_by).data['results'] results = get(reverse('api:project_list'), objects.superusers.admin, QUERY_STRING='order_by=%s' % order_by).data['results']
for x in range(len(results)): for x in range(len(results)):
project_names.append(results[x]['name']) project_names.append(results[x]['name'])
assert project_names == expected_names project_ids.append(results[x]['id'])
assert project_names == expected_names and project_ids == expected_ids
@pytest.mark.parametrize('order_by', ('name', '-name')) @pytest.mark.parametrize('order_by', ('name', '-name'))
@@ -448,8 +450,7 @@ def test_project_list_ordering_with_duplicate_names(get, order_by, organization_
for x in range(3): for x in range(3):
results = get(reverse('api:project_list'), objects.superusers.admin, QUERY_STRING='order_by=%s' % order_by).data['results'] results = get(reverse('api:project_list'), objects.superusers.admin, QUERY_STRING='order_by=%s' % order_by).data['results']
project_ids[x] = [proj['id'] for proj in results] project_ids[x] = [proj['id'] for proj in results]
assert project_ids[0] == project_ids[1] == project_ids[2] assert project_ids[0] == project_ids[1] == project_ids[2] == [1, 2, 3, 4, 5]
assert project_ids[0] == sorted(project_ids[0])
@pytest.mark.django_db @pytest.mark.django_db

View File

@@ -1,90 +0,0 @@
import pytest
from django.contrib.auth.models import AnonymousUser
from channels.routing import ProtocolTypeRouter
from channels.testing.websocket import WebsocketCommunicator
from awx.main.consumers import WebsocketSecretAuthHelper
@pytest.fixture
def application():
# code in routing hits the db on import because .. settings cache
from awx.main.routing import application_func
yield application_func(ProtocolTypeRouter)
@pytest.fixture
def websocket_server_generator(application):
def fn(endpoint):
return WebsocketCommunicator(application, endpoint)
return fn
@pytest.mark.asyncio
@pytest.mark.django_db
class TestWebsocketRelay:
@pytest.fixture
def websocket_relay_secret_generator(self, settings):
def fn(secret, set_broadcast_websocket_secret=False):
secret_backup = settings.BROADCAST_WEBSOCKET_SECRET
settings.BROADCAST_WEBSOCKET_SECRET = 'foobar'
res = ('secret'.encode('utf-8'), WebsocketSecretAuthHelper.construct_secret().encode('utf-8'))
if set_broadcast_websocket_secret is False:
settings.BROADCAST_WEBSOCKET_SECRET = secret_backup
return res
return fn
@pytest.fixture
def websocket_relay_secret(self, settings, websocket_relay_secret_generator):
return websocket_relay_secret_generator('foobar', set_broadcast_websocket_secret=True)
async def test_authorized(self, websocket_server_generator, websocket_relay_secret):
server = websocket_server_generator('/websocket/relay/')
server.scope['headers'] = (websocket_relay_secret,)
connected, _ = await server.connect()
assert connected is True
async def test_not_authorized(self, websocket_server_generator):
server = websocket_server_generator('/websocket/relay/')
connected, _ = await server.connect()
assert connected is False, "Connection to the relay websocket without auth. We expected the client to be denied."
async def test_wrong_secret(self, websocket_server_generator, websocket_relay_secret_generator):
server = websocket_server_generator('/websocket/relay/')
server.scope['headers'] = (websocket_relay_secret_generator('foobar', set_broadcast_websocket_secret=False),)
connected, _ = await server.connect()
assert connected is False
@pytest.mark.asyncio
@pytest.mark.django_db
class TestWebsocketEventConsumer:
async def test_unauthorized_anonymous(self, websocket_server_generator):
server = websocket_server_generator('/websocket/')
server.scope['user'] = AnonymousUser()
connected, _ = await server.connect()
assert connected is False, "Anonymous user should NOT be allowed to login."
@pytest.mark.skip(reason="Ran out of coding time.")
async def test_authorized(self, websocket_server_generator, application, admin):
server = websocket_server_generator('/websocket/')
"""
I ran out of time. Here is what I was thinking ...
Inject a valid session into the cookies in the header
server.scope['headers'] = (
(b'cookie', ...),
)
"""
connected, _ = await server.connect()
assert connected is True, "User should be allowed in via cookies auth via a session key in the cookies"

View File

@@ -1,6 +1,11 @@
# Python # Python
from unittest import mock
import uuid import uuid
# patch python-ldap
with mock.patch('__main__.__builtins__.dir', return_value=[]):
import ldap # NOQA
# Load development settings for base variables. # Load development settings for base variables.
from awx.settings.development import * # NOQA from awx.settings.development import * # NOQA

View File

@@ -1,122 +0,0 @@
from io import StringIO
import json
from django.core.management import call_command
from django.test import TestCase, override_settings
settings_dict = {
"SOCIAL_AUTH_SAML_SP_ENTITY_ID": "SP_ENTITY_ID",
"SOCIAL_AUTH_SAML_SP_PUBLIC_CERT": "SP_PUBLIC_CERT",
"SOCIAL_AUTH_SAML_SP_PRIVATE_KEY": "SP_PRIVATE_KEY",
"SOCIAL_AUTH_SAML_ORG_INFO": "ORG_INFO",
"SOCIAL_AUTH_SAML_TECHNICAL_CONTACT": "TECHNICAL_CONTACT",
"SOCIAL_AUTH_SAML_SUPPORT_CONTACT": "SUPPORT_CONTACT",
"SOCIAL_AUTH_SAML_SP_EXTRA": "SP_EXTRA",
"SOCIAL_AUTH_SAML_SECURITY_CONFIG": "SECURITY_CONFIG",
"SOCIAL_AUTH_SAML_EXTRA_DATA": "EXTRA_DATA",
"SOCIAL_AUTH_SAML_ENABLED_IDPS": {
"Keycloak": {
"attr_last_name": "last_name",
"attr_groups": "groups",
"attr_email": "email",
"attr_user_permanent_id": "name_id",
"attr_username": "username",
"entity_id": "https://example.com/auth/realms/awx",
"url": "https://example.com/auth/realms/awx/protocol/saml",
"x509cert": "-----BEGIN CERTIFICATE-----\nMIIDDjCCAfYCCQCPBeVvpo8+VzANBgkqhkiG9w0BAQsFADBJMQswCQYDVQQGEwJV\nUzELMAkGA1UECAwCTkMxDzANBgNVBAcMBkR1cmhhbTEMMAoGA1UECgwDYXd4MQ4w\nDAYDVQQDDAVsb2NhbDAeFw0yNDAxMTgxNDA4MzFaFw0yNTAxMTcxNDA4MzFaMEkx\nCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJOQzEPMA0GA1UEBwwGRHVyaGFtMQwwCgYD\nVQQKDANhd3gxDjAMBgNVBAMMBWxvY2FsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A\nMIIBCgKCAQEAzouj93oyFXsHEABdPESh3CYpp5QJJBM4TLYIIolk6PFOFIVwBuFY\nfExi5w7Hh4A42lPM6RkrT+u3h7LV39H9MRUfqygOSmaxICTOI0sU9ROHc44fWWzN\n756OP4B5zSiqG82q8X7nYVkcID+2F/3ekPLMOlWn53OrcdfKKDIcqavoTkQJefc2\nggXU3WgVCxGki/qCm+e5cZ1Cpl/ykSLOT8dWMEzDd12kin66zJ3KYz9F2Q5kQTh4\nKRAChnBBoEqzOfENHEAaHALiXOlVSy61VcLbtvskRMMwBtsydlnd9n/HGnktgrid\n3Ca0z5wBTHWjAOBvCKxKJuDa+jmyHEnpcQIDAQABMA0GCSqGSIb3DQEBCwUAA4IB\nAQBXvmyPWgXhC26cHYJBgQqj57dZ+n7p00kM1J+27oDMjGmbmX+XIKXLWazw/rG3\ngDjw9MXI2tVCrQMX0ohjphaULXhb/VBUPDOiW+k7C6AB3nZySFRflcR3cM4f83zF\nMoBd0549h5Red4p72FeOKNJRTN8YO4ooH9YNh5g0FQkgqn7fV9w2CNlomeKIW9zP\nm8tjFw0cJUk2wEYBVl8O7ko5rgNlzhkLoZkMvJhKa99AQJA6MAdyoLl1lv56Kq4X\njk+mMEiz9SaInp+ILQ1uQxZEwuC7DoGRW76rV4Fnie6+DLft4WKZfX1497mx8NV3\noR0abutJaKnCj07dwRu4/EsK\n-----END CERTIFICATE-----",
"attr_first_name": "first_name",
}
},
"SOCIAL_AUTH_SAML_CALLBACK_URL": "CALLBACK_URL",
"AUTH_LDAP_1_SERVER_URI": "SERVER_URI",
"AUTH_LDAP_1_BIND_DN": "BIND_DN",
"AUTH_LDAP_1_BIND_PASSWORD": "BIND_PASSWORD",
"AUTH_LDAP_1_GROUP_SEARCH": ["GROUP_SEARCH"],
"AUTH_LDAP_1_GROUP_TYPE": "string object",
"AUTH_LDAP_1_GROUP_TYPE_PARAMS": {"member_attr": "member", "name_attr": "cn"},
"AUTH_LDAP_1_USER_DN_TEMPLATE": "USER_DN_TEMPLATE",
"AUTH_LDAP_1_USER_SEARCH": ["USER_SEARCH"],
"AUTH_LDAP_1_USER_ATTR_MAP": {
"email": "email",
"last_name": "last_name",
"first_name": "first_name",
},
"AUTH_LDAP_1_CONNECTION_OPTIONS": {},
"AUTH_LDAP_1_START_TLS": None,
}
@override_settings(**settings_dict)
class TestDumpAuthConfigCommand(TestCase):
def setUp(self):
super().setUp()
self.expected_config = [
{
"type": "awx.authentication.authenticator_plugins.saml",
"name": "Keycloak",
"enabled": True,
"create_objects": True,
"users_unique": False,
"remove_users": True,
"configuration": {
"SP_ENTITY_ID": "SP_ENTITY_ID",
"SP_PUBLIC_CERT": "SP_PUBLIC_CERT",
"SP_PRIVATE_KEY": "SP_PRIVATE_KEY",
"ORG_INFO": "ORG_INFO",
"TECHNICAL_CONTACT": "TECHNICAL_CONTACT",
"SUPPORT_CONTACT": "SUPPORT_CONTACT",
"SP_EXTRA": "SP_EXTRA",
"SECURITY_CONFIG": "SECURITY_CONFIG",
"EXTRA_DATA": "EXTRA_DATA",
"ENABLED_IDPS": {
"Keycloak": {
"attr_last_name": "last_name",
"attr_groups": "groups",
"attr_email": "email",
"attr_user_permanent_id": "name_id",
"attr_username": "username",
"entity_id": "https://example.com/auth/realms/awx",
"url": "https://example.com/auth/realms/awx/protocol/saml",
"x509cert": "-----BEGIN CERTIFICATE-----\nMIIDDjCCAfYCCQCPBeVvpo8+VzANBgkqhkiG9w0BAQsFADBJMQswCQYDVQQGEwJV\nUzELMAkGA1UECAwCTkMxDzANBgNVBAcMBkR1cmhhbTEMMAoGA1UECgwDYXd4MQ4w\nDAYDVQQDDAVsb2NhbDAeFw0yNDAxMTgxNDA4MzFaFw0yNTAxMTcxNDA4MzFaMEkx\nCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJOQzEPMA0GA1UEBwwGRHVyaGFtMQwwCgYD\nVQQKDANhd3gxDjAMBgNVBAMMBWxvY2FsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A\nMIIBCgKCAQEAzouj93oyFXsHEABdPESh3CYpp5QJJBM4TLYIIolk6PFOFIVwBuFY\nfExi5w7Hh4A42lPM6RkrT+u3h7LV39H9MRUfqygOSmaxICTOI0sU9ROHc44fWWzN\n756OP4B5zSiqG82q8X7nYVkcID+2F/3ekPLMOlWn53OrcdfKKDIcqavoTkQJefc2\nggXU3WgVCxGki/qCm+e5cZ1Cpl/ykSLOT8dWMEzDd12kin66zJ3KYz9F2Q5kQTh4\nKRAChnBBoEqzOfENHEAaHALiXOlVSy61VcLbtvskRMMwBtsydlnd9n/HGnktgrid\n3Ca0z5wBTHWjAOBvCKxKJuDa+jmyHEnpcQIDAQABMA0GCSqGSIb3DQEBCwUAA4IB\nAQBXvmyPWgXhC26cHYJBgQqj57dZ+n7p00kM1J+27oDMjGmbmX+XIKXLWazw/rG3\ngDjw9MXI2tVCrQMX0ohjphaULXhb/VBUPDOiW+k7C6AB3nZySFRflcR3cM4f83zF\nMoBd0549h5Red4p72FeOKNJRTN8YO4ooH9YNh5g0FQkgqn7fV9w2CNlomeKIW9zP\nm8tjFw0cJUk2wEYBVl8O7ko5rgNlzhkLoZkMvJhKa99AQJA6MAdyoLl1lv56Kq4X\njk+mMEiz9SaInp+ILQ1uQxZEwuC7DoGRW76rV4Fnie6+DLft4WKZfX1497mx8NV3\noR0abutJaKnCj07dwRu4/EsK\n-----END CERTIFICATE-----",
"attr_first_name": "first_name",
}
},
"CALLBACK_URL": "CALLBACK_URL",
"IDP_URL": "https://example.com/auth/realms/awx/protocol/saml",
"IDP_X509_CERT": "-----BEGIN CERTIFICATE-----\nMIIDDjCCAfYCCQCPBeVvpo8+VzANBgkqhkiG9w0BAQsFADBJMQswCQYDVQQGEwJV\nUzELMAkGA1UECAwCTkMxDzANBgNVBAcMBkR1cmhhbTEMMAoGA1UECgwDYXd4MQ4w\nDAYDVQQDDAVsb2NhbDAeFw0yNDAxMTgxNDA4MzFaFw0yNTAxMTcxNDA4MzFaMEkx\nCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJOQzEPMA0GA1UEBwwGRHVyaGFtMQwwCgYD\nVQQKDANhd3gxDjAMBgNVBAMMBWxvY2FsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A\nMIIBCgKCAQEAzouj93oyFXsHEABdPESh3CYpp5QJJBM4TLYIIolk6PFOFIVwBuFY\nfExi5w7Hh4A42lPM6RkrT+u3h7LV39H9MRUfqygOSmaxICTOI0sU9ROHc44fWWzN\n756OP4B5zSiqG82q8X7nYVkcID+2F/3ekPLMOlWn53OrcdfKKDIcqavoTkQJefc2\nggXU3WgVCxGki/qCm+e5cZ1Cpl/ykSLOT8dWMEzDd12kin66zJ3KYz9F2Q5kQTh4\nKRAChnBBoEqzOfENHEAaHALiXOlVSy61VcLbtvskRMMwBtsydlnd9n/HGnktgrid\n3Ca0z5wBTHWjAOBvCKxKJuDa+jmyHEnpcQIDAQABMA0GCSqGSIb3DQEBCwUAA4IB\nAQBXvmyPWgXhC26cHYJBgQqj57dZ+n7p00kM1J+27oDMjGmbmX+XIKXLWazw/rG3\ngDjw9MXI2tVCrQMX0ohjphaULXhb/VBUPDOiW+k7C6AB3nZySFRflcR3cM4f83zF\nMoBd0549h5Red4p72FeOKNJRTN8YO4ooH9YNh5g0FQkgqn7fV9w2CNlomeKIW9zP\nm8tjFw0cJUk2wEYBVl8O7ko5rgNlzhkLoZkMvJhKa99AQJA6MAdyoLl1lv56Kq4X\njk+mMEiz9SaInp+ILQ1uQxZEwuC7DoGRW76rV4Fnie6+DLft4WKZfX1497mx8NV3\noR0abutJaKnCj07dwRu4/EsK\n-----END CERTIFICATE-----",
"IDP_ENTITY_ID": "https://example.com/auth/realms/awx",
"IDP_ATTR_EMAIL": "email",
"IDP_GROUPS": "groups",
"IDP_ATTR_USERNAME": "username",
"IDP_ATTR_LAST_NAME": "last_name",
"IDP_ATTR_FIRST_NAME": "first_name",
"IDP_ATTR_USER_PERMANENT_ID": "name_id",
},
},
{
"type": "awx.authentication.authenticator_plugins.ldap",
"name": "1",
"enabled": True,
"create_objects": True,
"users_unique": False,
"remove_users": True,
"configuration": {
"SERVER_URI": "SERVER_URI",
"BIND_DN": "BIND_DN",
"BIND_PASSWORD": "BIND_PASSWORD",
"CONNECTION_OPTIONS": {},
"GROUP_TYPE": "str",
"GROUP_TYPE_PARAMS": {"member_attr": "member", "name_attr": "cn"},
"GROUP_SEARCH": ["GROUP_SEARCH"],
"START_TLS": None,
"USER_DN_TEMPLATE": "USER_DN_TEMPLATE",
"USER_ATTR_MAP": {"email": "email", "last_name": "last_name", "first_name": "first_name"},
"USER_SEARCH": ["USER_SEARCH"],
},
},
]
def test_json_returned_from_cmd(self):
output = StringIO()
call_command("dump_auth_config", stdout=output)
assert json.loads(output.getvalue()) == self.expected_config

View File

@@ -1,64 +0,0 @@
import pytest
from unittest.mock import MagicMock, patch
from awx.main.tasks.system import update_inventory_computed_fields
from awx.main.models import Inventory
from django.db import DatabaseError
@pytest.fixture
def mock_logger():
with patch("awx.main.tasks.system.logger") as logger:
yield logger
@pytest.fixture
def mock_inventory():
return MagicMock(spec=Inventory)
def test_update_inventory_computed_fields_existing_inventory(mock_logger, mock_inventory):
# Mocking the Inventory.objects.filter method to return a non-empty queryset
with patch("awx.main.tasks.system.Inventory.objects.filter") as mock_filter:
mock_filter.return_value.exists.return_value = True
mock_filter.return_value.__getitem__.return_value = mock_inventory
# Mocking the update_computed_fields method
with patch.object(mock_inventory, "update_computed_fields") as mock_update_computed_fields:
update_inventory_computed_fields(1)
# Assertions
mock_filter.assert_called_once_with(id=1)
mock_update_computed_fields.assert_called_once()
# You can add more assertions based on your specific requirements
def test_update_inventory_computed_fields_missing_inventory(mock_logger):
# Mocking the Inventory.objects.filter method to return an empty queryset
with patch("awx.main.tasks.system.Inventory.objects.filter") as mock_filter:
mock_filter.return_value.exists.return_value = False
update_inventory_computed_fields(1)
# Assertions
mock_filter.assert_called_once_with(id=1)
mock_logger.error.assert_called_once_with("Update Inventory Computed Fields failed due to missing inventory: 1")
def test_update_inventory_computed_fields_database_error_nosqlstate(mock_logger, mock_inventory):
# Mocking the Inventory.objects.filter method to return a non-empty queryset
with patch("awx.main.tasks.system.Inventory.objects.filter") as mock_filter:
mock_filter.return_value.exists.return_value = True
mock_filter.return_value.__getitem__.return_value = mock_inventory
# Mocking the update_computed_fields method
with patch.object(mock_inventory, "update_computed_fields") as mock_update_computed_fields:
# Simulating the update_computed_fields method to explicitly raise a DatabaseError
mock_update_computed_fields.side_effect = DatabaseError("Some error")
update_inventory_computed_fields(1)
# Assertions
mock_filter.assert_called_once_with(id=1)
mock_update_computed_fields.assert_called_once()
mock_inventory.update_computed_fields.assert_called_once()

View File

@@ -121,10 +121,6 @@ def test_get_model_for_valid_type(model_type, model_class):
assert common.get_model_for_type(model_type) == model_class assert common.get_model_for_type(model_type) == model_class
def test_is_testing():
assert common.is_testing() is True
@pytest.mark.parametrize("model_type,model_class", [(name, cls) for cls, name in TEST_MODELS]) @pytest.mark.parametrize("model_type,model_class", [(name, cls) for cls, name in TEST_MODELS])
def test_get_capacity_type(model_type, model_class): def test_get_capacity_type(model_type, model_class):
if model_type in ('job', 'ad_hoc_command', 'inventory_update', 'job_template'): if model_type in ('job', 'ad_hoc_command', 'inventory_update', 'job_template'):

View File

@@ -7,7 +7,6 @@ import json
import yaml import yaml
import logging import logging
import time import time
import psycopg
import os import os
import subprocess import subprocess
import re import re
@@ -24,7 +23,7 @@ from django.core.exceptions import ObjectDoesNotExist, FieldDoesNotExist
from django.utils.dateparse import parse_datetime from django.utils.dateparse import parse_datetime
from django.utils.translation import gettext_lazy as _ from django.utils.translation import gettext_lazy as _
from django.utils.functional import cached_property from django.utils.functional import cached_property
from django.db import connection, DatabaseError, transaction, ProgrammingError, IntegrityError from django.db import connection, transaction, ProgrammingError, IntegrityError
from django.db.models.fields.related import ForeignObjectRel, ManyToManyField from django.db.models.fields.related import ForeignObjectRel, ManyToManyField
from django.db.models.fields.related_descriptors import ForwardManyToOneDescriptor, ManyToManyDescriptor from django.db.models.fields.related_descriptors import ForwardManyToOneDescriptor, ManyToManyDescriptor
from django.db.models.query import QuerySet from django.db.models.query import QuerySet
@@ -137,7 +136,7 @@ def underscore_to_camelcase(s):
@functools.cache @functools.cache
def is_testing(argv=None): def is_testing(argv=None):
'''Return True if running django or py.test unit tests.''' '''Return True if running django or py.test unit tests.'''
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'awx.main.tests.settings_for_test': if 'PYTEST_CURRENT_TEST' in os.environ.keys():
return True return True
argv = sys.argv if argv is None else argv argv = sys.argv if argv is None else argv
if len(argv) >= 1 and ('py.test' in argv[0] or 'py/test.py' in argv[0]): if len(argv) >= 1 and ('py.test' in argv[0] or 'py/test.py' in argv[0]):
@@ -1156,25 +1155,11 @@ def create_partition(tblname, start=None):
f'ALTER TABLE {tblname} ATTACH PARTITION {tblname}_{partition_label} ' f'ALTER TABLE {tblname} ATTACH PARTITION {tblname}_{partition_label} '
f'FOR VALUES FROM (\'{start_timestamp}\') TO (\'{end_timestamp}\');' f'FOR VALUES FROM (\'{start_timestamp}\') TO (\'{end_timestamp}\');'
) )
except (ProgrammingError, IntegrityError) as e: except (ProgrammingError, IntegrityError) as e:
cause = e.__cause__ if 'already exists' in str(e):
if cause and hasattr(cause, 'sqlstate'): logger.info(f'Caught known error due to partition creation race: {e}')
sqlstate = cause.sqlstate else:
sqlstate_cls = psycopg.errors.lookup(sqlstate) raise
if psycopg.errors.DuplicateTable == sqlstate_cls or psycopg.errors.UniqueViolation == sqlstate_cls:
logger.info(f'Caught known error due to partition creation race: {e}')
else:
logger.error('SQL Error state: {} - {}'.format(sqlstate, sqlstate_cls))
raise
except DatabaseError as e:
cause = e.__cause__
if cause and hasattr(cause, 'sqlstate'):
sqlstate = cause.sqlstate
sqlstate_str = psycopg.errors.lookup(sqlstate)
logger.error('SQL Error state: {} - {}'.format(sqlstate, sqlstate_str))
raise
def cleanup_new_process(func): def cleanup_new_process(func):

View File

@@ -302,35 +302,20 @@ class WebSocketRelayManager(object):
self.stats_mgr.start() self.stats_mgr.start()
# Set up a pg_notify consumer for allowing web nodes to "provision" and "deprovision" themselves gracefully. # Set up a pg_notify consumer for allowing web nodes to "provision" and "deprovision" themselves gracefully.
database_conf = settings.DATABASES['default'].copy() database_conf = settings.DATABASES['default']
database_conf['OPTIONS'] = database_conf.get('OPTIONS', {}).copy() async_conn = await psycopg.AsyncConnection.connect(
dbname=database_conf['NAME'],
for k, v in settings.LISTENER_DATABASES.get('default', {}).items(): host=database_conf['HOST'],
database_conf[k] = v user=database_conf['USER'],
for k, v in settings.LISTENER_DATABASES.get('default', {}).get('OPTIONS', {}).items(): password=database_conf['PASSWORD'],
database_conf['OPTIONS'][k] = v port=database_conf['PORT'],
**database_conf.get("OPTIONS", {}),
task = None )
await async_conn.set_autocommit(True)
event_loop.create_task(self.on_ws_heartbeat(async_conn))
# Establishes a websocket connection to /websocket/relay on all API servers # Establishes a websocket connection to /websocket/relay on all API servers
while True: while True:
if not task or task.done():
try:
async_conn = await psycopg.AsyncConnection.connect(
dbname=database_conf['NAME'],
host=database_conf['HOST'],
user=database_conf['USER'],
password=database_conf['PASSWORD'],
port=database_conf['PORT'],
**database_conf.get("OPTIONS", {}),
)
task = event_loop.create_task(self.on_ws_heartbeat(async_conn), name="on_ws_heartbeat")
logger.info("Creating `on_ws_heartbeat` task in event loop.")
except Exception as e:
logger.warning(f"Failed to connect to database for pg_notify: {e}")
future_remote_hosts = self.known_hosts.keys() future_remote_hosts = self.known_hosts.keys()
current_remote_hosts = self.relay_connections.keys() current_remote_hosts = self.relay_connections.keys()
deleted_remote_hosts = set(current_remote_hosts) - set(future_remote_hosts) deleted_remote_hosts = set(current_remote_hosts) - set(future_remote_hosts)
@@ -354,7 +339,7 @@ class WebSocketRelayManager(object):
if deleted_remote_hosts: if deleted_remote_hosts:
logger.info(f"Removing {deleted_remote_hosts} from websocket broadcast list") logger.info(f"Removing {deleted_remote_hosts} from websocket broadcast list")
await asyncio.gather(*[self.cleanup_offline_host(h) for h in deleted_remote_hosts]) await asyncio.gather(self.cleanup_offline_host(h) for h in deleted_remote_hosts)
if new_remote_hosts: if new_remote_hosts:
logger.info(f"Adding {new_remote_hosts} to websocket broadcast list") logger.info(f"Adding {new_remote_hosts} to websocket broadcast list")

View File

@@ -216,59 +216,42 @@
- block: - block:
- name: Fetch galaxy roles from roles/requirements.(yml/yaml) - name: Fetch galaxy roles from roles/requirements.(yml/yaml)
ansible.builtin.command: ansible.builtin.command:
cmd: "ansible-galaxy role install -r {{ req_file }} {{ verbosity }}" cmd: "ansible-galaxy role install -r {{ item }} {{ verbosity }}"
register: galaxy_result register: galaxy_result
vars: with_fileglob:
req_file: "{{ lookup('ansible.builtin.first_found', req_candidates, skip=True) }}" - "{{ project_path | quote }}/roles/requirements.yaml"
req_candidates: - "{{ project_path | quote }}/roles/requirements.yml"
files:
- "{{ project_path | quote }}/roles/requirements.yml"
- "{{ project_path | quote }}/roles/requirements.yaml"
skip: True
changed_when: "'was installed successfully' in galaxy_result.stdout" changed_when: "'was installed successfully' in galaxy_result.stdout"
when: when: roles_enabled | bool
- roles_enabled | bool
- req_file
tags: tags:
- install_roles - install_roles
- name: Fetch galaxy collections from collections/requirements.(yml/yaml) - name: Fetch galaxy collections from collections/requirements.(yml/yaml)
ansible.builtin.command: ansible.builtin.command:
cmd: "ansible-galaxy collection install -r {{ req_file }} {{ verbosity }}" cmd: "ansible-galaxy collection install -r {{ item }} {{ verbosity }}"
register: galaxy_collection_result register: galaxy_collection_result
vars: with_fileglob:
req_file: "{{ lookup('ansible.builtin.first_found', req_candidates, skip=True) }}" - "{{ project_path | quote }}/collections/requirements.yaml"
req_candidates: - "{{ project_path | quote }}/collections/requirements.yml"
files:
- "{{ project_path | quote }}/collections/requirements.yml"
- "{{ project_path | quote }}/collections/requirements.yaml"
skip: True
changed_when: "'Nothing to do.' not in galaxy_collection_result.stdout" changed_when: "'Nothing to do.' not in galaxy_collection_result.stdout"
when: when:
- "ansible_version.full is version_compare('2.9', '>=')" - "ansible_version.full is version_compare('2.9', '>=')"
- collections_enabled | bool - collections_enabled | bool
- req_file
tags: tags:
- install_collections - install_collections
# requirements.yml in project root can be either "old" (roles only) or "new" (collections+roles) format
- name: Fetch galaxy roles and collections from requirements.(yml/yaml) - name: Fetch galaxy roles and collections from requirements.(yml/yaml)
ansible.builtin.command: ansible.builtin.command:
cmd: "ansible-galaxy install -r {{ req_file }} {{ verbosity }}" cmd: "ansible-galaxy install -r {{ item }} {{ verbosity }}"
register: galaxy_combined_result register: galaxy_combined_result
vars: with_fileglob:
req_file: "{{ lookup('ansible.builtin.first_found', req_candidates, skip=True) }}" - "{{ project_path | quote }}/requirements.yaml"
req_candidates: - "{{ project_path | quote }}/requirements.yml"
files:
- "{{ project_path | quote }}/requirements.yaml"
- "{{ project_path | quote }}/requirements.yml"
skip: True
changed_when: "'Nothing to do.' not in galaxy_combined_result.stdout" changed_when: "'Nothing to do.' not in galaxy_combined_result.stdout"
when: when:
- "ansible_version.full is version_compare('2.10', '>=')" - "ansible_version.full is version_compare('2.10', '>=')"
- collections_enabled | bool - collections_enabled | bool
- roles_enabled | bool - roles_enabled | bool
- req_file
tags: tags:
- install_collections - install_collections
- install_roles - install_roles

View File

@@ -1,22 +0,0 @@
from ansible_base.resource_registry.registry import ParentResource, ResourceConfig, ServiceAPIConfig, SharedResource
from ansible_base.resource_registry.shared_types import OrganizationType, TeamType, UserType
from awx.main import models
class APIConfig(ServiceAPIConfig):
service_type = "awx"
RESOURCE_LIST = (
ResourceConfig(
models.Organization,
shared_resource=SharedResource(serializer=OrganizationType, is_provider=False),
),
ResourceConfig(models.User, shared_resource=SharedResource(serializer=UserType, is_provider=False), name_field="username"),
ResourceConfig(
models.Team,
shared_resource=SharedResource(serializer=TeamType, is_provider=False),
parent_resources=[ParentResource(model=models.Organization, field_name="organization")],
),
)

View File

@@ -353,11 +353,8 @@ INSTALLED_APPS = [
'awx.sso', 'awx.sso',
'solo', 'solo',
'ansible_base.rest_filters', 'ansible_base.rest_filters',
'ansible_base.jwt_consumer',
'ansible_base.resource_registry',
] ]
INTERNAL_IPS = ('127.0.0.1',) INTERNAL_IPS = ('127.0.0.1',)
MAX_PAGE_SIZE = 200 MAX_PAGE_SIZE = 200
@@ -365,7 +362,6 @@ REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'awx.api.pagination.Pagination', 'DEFAULT_PAGINATION_CLASS': 'awx.api.pagination.Pagination',
'PAGE_SIZE': 25, 'PAGE_SIZE': 25,
'DEFAULT_AUTHENTICATION_CLASSES': ( 'DEFAULT_AUTHENTICATION_CLASSES': (
'ansible_base.jwt_consumer.awx.auth.AwxJWTAuthentication',
'awx.api.authentication.LoggedOAuth2Authentication', 'awx.api.authentication.LoggedOAuth2Authentication',
'awx.api.authentication.SessionAuthentication', 'awx.api.authentication.SessionAuthentication',
'awx.api.authentication.LoggedBasicAuthentication', 'awx.api.authentication.LoggedBasicAuthentication',
@@ -759,14 +755,6 @@ SATELLITE6_INSTANCE_ID_VAR = 'foreman_id,foreman.id'
INSIGHTS_INSTANCE_ID_VAR = 'insights_id' INSIGHTS_INSTANCE_ID_VAR = 'insights_id'
INSIGHTS_EXCLUDE_EMPTY_GROUPS = False INSIGHTS_EXCLUDE_EMPTY_GROUPS = False
# ----------------
# -- Terraform State --
# ----------------
# TERRAFORM_ENABLED_VAR =
# TERRAFORM_ENABLED_VALUE =
TERRAFORM_INSTANCE_ID_VAR = 'id'
TERRAFORM_EXCLUDE_EMPTY_GROUPS = True
# --------------------- # ---------------------
# ----- Custom ----- # ----- Custom -----
# --------------------- # ---------------------
@@ -1120,14 +1108,8 @@ METRICS_SUBSYSTEM_CONFIG = {
# django-ansible-base # django-ansible-base
ANSIBLE_BASE_TEAM_MODEL = 'main.Team' ANSIBLE_BASE_TEAM_MODEL = 'main.Team'
ANSIBLE_BASE_ORGANIZATION_MODEL = 'main.Organization' ANSIBLE_BASE_ORGANIZATION_MODEL = 'main.Organization'
ANSIBLE_BASE_RESOURCE_CONFIG_MODULE = 'awx.resource_api'
from ansible_base.lib import dynamic_config # noqa: E402 from ansible_base.lib import dynamic_config # noqa: E402
settings_file = os.path.join(os.path.dirname(dynamic_config.__file__), 'dynamic_settings.py') settings_file = os.path.join(os.path.dirname(dynamic_config.__file__), 'dynamic_settings.py')
include(settings_file) include(settings_file)
# Add a postfix to the API URL patterns
# example if set to '' API pattern will be /api
# example if set to 'controller' API pattern will be /api AND /api/controller
OPTIONAL_API_URLPATTERN_PREFIX = ''

View File

@@ -72,8 +72,6 @@ AWX_CALLBACK_PROFILE = True
# Allows user to trigger task managers directly for debugging and profiling purposes. # Allows user to trigger task managers directly for debugging and profiling purposes.
# Only works in combination with settings.SETTINGS_MODULE == 'awx.settings.development' # Only works in combination with settings.SETTINGS_MODULE == 'awx.settings.development'
AWX_DISABLE_TASK_MANAGERS = False AWX_DISABLE_TASK_MANAGERS = False
# Needed for launching runserver in debug mode
# ======================!!!!!!! FOR DEVELOPMENT ONLY !!!!!!!================================= # ======================!!!!!!! FOR DEVELOPMENT ONLY !!!!!!!=================================
# Store a snapshot of default settings at this point before loading any # Store a snapshot of default settings at this point before loading any

View File

@@ -59,7 +59,6 @@ register(
help_text=_('Maximum number of job events for the UI to retrieve within a single request.'), help_text=_('Maximum number of job events for the UI to retrieve within a single request.'),
category=_('UI'), category=_('UI'),
category_slug='ui', category_slug='ui',
hidden=True,
) )
register( register(
@@ -69,5 +68,4 @@ register(
help_text=_('If disabled, the page will not refresh when events are received. Reloading the page will be required to get the latest details.'), help_text=_('If disabled, the page will not refresh when events are received. Reloading the page will be required to get the latest details.'),
category=_('UI'), category=_('UI'),
category_slug='ui', category_slug='ui',
hidden=True,
) )

View File

@@ -13,7 +13,7 @@
"@patternfly/react-table": "4.113.0", "@patternfly/react-table": "4.113.0",
"ace-builds": "^1.10.1", "ace-builds": "^1.10.1",
"ansi-to-html": "0.7.2", "ansi-to-html": "0.7.2",
"axios": "^1.6.7", "axios": "0.27.2",
"d3": "7.6.1", "d3": "7.6.1",
"dagre": "^0.8.4", "dagre": "^0.8.4",
"dompurify": "2.4.0", "dompurify": "2.4.0",
@@ -5940,13 +5940,12 @@
} }
}, },
"node_modules/axios": { "node_modules/axios": {
"version": "1.6.7", "version": "0.27.2",
"resolved": "https://registry.npmjs.org/axios/-/axios-1.6.7.tgz", "resolved": "https://registry.npmjs.org/axios/-/axios-0.27.2.tgz",
"integrity": "sha512-/hDJGff6/c7u0hDkvkGxR/oy6CbCs8ziCsC7SqmhjfozqiJGc8Z11wrv9z9lYfY4K8l+H9TpjcMDX0xOZmx+RA==", "integrity": "sha512-t+yRIyySRTp/wua5xEr+z1q60QmLq8ABsS5O9Me1AsE5dfKqgnCFzwiCZZ/cGNd1lq4/7akDWMxdhVlucjmnOQ==",
"dependencies": { "dependencies": {
"follow-redirects": "^1.15.4", "follow-redirects": "^1.14.9",
"form-data": "^4.0.0", "form-data": "^4.0.0"
"proxy-from-env": "^1.1.0"
} }
}, },
"node_modules/axios/node_modules/form-data": { "node_modules/axios/node_modules/form-data": {
@@ -10388,9 +10387,9 @@
} }
}, },
"node_modules/follow-redirects": { "node_modules/follow-redirects": {
"version": "1.15.5", "version": "1.15.1",
"resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.5.tgz", "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.1.tgz",
"integrity": "sha512-vSFWUON1B+yAw1VN4xMfxgn5fTUiaOzAJCKBwIIgT/+7CuGy9+r+5gITvP62j3RmaD5Ph65UaERdOSRGUzZtgw==", "integrity": "sha512-yLAMQs+k0b2m7cVxpS1VKJVvoz7SS9Td1zss3XRwXj+ZDH00RJgnuLx7E44wx02kQLrdM3aOOy+FpzS7+8OizA==",
"funding": [ "funding": [
{ {
"type": "individual", "type": "individual",
@@ -18350,11 +18349,6 @@
"node": ">= 0.10" "node": ">= 0.10"
} }
}, },
"node_modules/proxy-from-env": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz",
"integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg=="
},
"node_modules/pseudolocale": { "node_modules/pseudolocale": {
"version": "1.2.0", "version": "1.2.0",
"resolved": "https://registry.npmjs.org/pseudolocale/-/pseudolocale-1.2.0.tgz", "resolved": "https://registry.npmjs.org/pseudolocale/-/pseudolocale-1.2.0.tgz",
@@ -26921,13 +26915,12 @@
"dev": true "dev": true
}, },
"axios": { "axios": {
"version": "1.6.7", "version": "0.27.2",
"resolved": "https://registry.npmjs.org/axios/-/axios-1.6.7.tgz", "resolved": "https://registry.npmjs.org/axios/-/axios-0.27.2.tgz",
"integrity": "sha512-/hDJGff6/c7u0hDkvkGxR/oy6CbCs8ziCsC7SqmhjfozqiJGc8Z11wrv9z9lYfY4K8l+H9TpjcMDX0xOZmx+RA==", "integrity": "sha512-t+yRIyySRTp/wua5xEr+z1q60QmLq8ABsS5O9Me1AsE5dfKqgnCFzwiCZZ/cGNd1lq4/7akDWMxdhVlucjmnOQ==",
"requires": { "requires": {
"follow-redirects": "^1.15.4", "follow-redirects": "^1.14.9",
"form-data": "^4.0.0", "form-data": "^4.0.0"
"proxy-from-env": "^1.1.0"
}, },
"dependencies": { "dependencies": {
"form-data": { "form-data": {
@@ -30378,9 +30371,9 @@
} }
}, },
"follow-redirects": { "follow-redirects": {
"version": "1.15.5", "version": "1.15.1",
"resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.5.tgz", "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.1.tgz",
"integrity": "sha512-vSFWUON1B+yAw1VN4xMfxgn5fTUiaOzAJCKBwIIgT/+7CuGy9+r+5gITvP62j3RmaD5Ph65UaERdOSRGUzZtgw==" "integrity": "sha512-yLAMQs+k0b2m7cVxpS1VKJVvoz7SS9Td1zss3XRwXj+ZDH00RJgnuLx7E44wx02kQLrdM3aOOy+FpzS7+8OizA=="
}, },
"fork-ts-checker-webpack-plugin": { "fork-ts-checker-webpack-plugin": {
"version": "6.5.2", "version": "6.5.2",
@@ -36332,11 +36325,6 @@
} }
} }
}, },
"proxy-from-env": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz",
"integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg=="
},
"pseudolocale": { "pseudolocale": {
"version": "1.2.0", "version": "1.2.0",
"resolved": "https://registry.npmjs.org/pseudolocale/-/pseudolocale-1.2.0.tgz", "resolved": "https://registry.npmjs.org/pseudolocale/-/pseudolocale-1.2.0.tgz",

View File

@@ -13,7 +13,7 @@
"@patternfly/react-table": "4.113.0", "@patternfly/react-table": "4.113.0",
"ace-builds": "^1.10.1", "ace-builds": "^1.10.1",
"ansi-to-html": "0.7.2", "ansi-to-html": "0.7.2",
"axios": "^1.6.7", "axios": "0.27.2",
"d3": "7.6.1", "d3": "7.6.1",
"dagre": "^0.8.4", "dagre": "^0.8.4",
"dompurify": "2.4.0", "dompurify": "2.4.0",

View File

@@ -67,18 +67,27 @@ function getInitialValues(launchConfig, surveyConfig, resource) {
const values = {}; const values = {};
if (surveyConfig?.spec) { if (surveyConfig?.spec) {
surveyConfig.spec.forEach((question) => { surveyConfig.spec.forEach((question) => {
if (resource?.extra_data && resource?.extra_data[question.variable]) { if (question.type === 'multiselect') {
values[`survey_${question.variable}`] =
resource.extra_data[question.variable];
} else if (question.type === 'multiselect') {
values[`survey_${question.variable}`] = question.default values[`survey_${question.variable}`] = question.default
? question.default.split('\n') ? question.default.split('\n')
: []; : [];
} else { } else {
values[`survey_${question.variable}`] = question.default ?? ''; values[`survey_${question.variable}`] = question.default ?? '';
} }
if (resource?.extra_data) {
Object.entries(resource.extra_data).forEach(([key, value]) => {
if (key === question.variable) {
if (question.type === 'multiselect') {
values[`survey_${question.variable}`] = value;
} else {
values[`survey_${question.variable}`] = value;
}
}
});
}
}); });
} }
return values; return values;
} }

View File

@@ -257,17 +257,12 @@ function PromptDetail({
numChips={5} numChips={5}
ouiaId="prompt-job-tag-chips" ouiaId="prompt-job-tag-chips"
totalChips={ totalChips={
overrides.job_tags === undefined || !overrides.job_tags || overrides.job_tags === ''
overrides.job_tags === null ||
overrides.job_tags === ''
? 0 ? 0
: overrides.job_tags.split(',').length : overrides.job_tags.split(',').length
} }
> >
{overrides.job_tags !== undefined && {overrides.job_tags.length > 0 &&
overrides.job_tags !== null &&
overrides.job_tags !== '' &&
overrides.job_tags.length > 0 &&
overrides.job_tags.split(',').map((jobTag) => ( overrides.job_tags.split(',').map((jobTag) => (
<Chip <Chip
key={jobTag} key={jobTag}
@@ -289,18 +284,13 @@ function PromptDetail({
<ChipGroup <ChipGroup
numChips={5} numChips={5}
totalChips={ totalChips={
overrides.skip_tags === undefined || !overrides.skip_tags || overrides.skip_tags === ''
overrides.skip_tags === null ||
overrides.skip_tags === ''
? 0 ? 0
: overrides.skip_tags.split(',').length : overrides.skip_tags.split(',').length
} }
ouiaId="prompt-skip-tag-chips" ouiaId="prompt-skip-tag-chips"
> >
{overrides.skip_tags !== undefined && {overrides.skip_tags.length > 0 &&
overrides.skip_tags !== null &&
overrides.skip_tags !== '' &&
overrides.skip_tags.length > 0 &&
overrides.skip_tags.split(',').map((skipTag) => ( overrides.skip_tags.split(',').map((skipTag) => (
<Chip <Chip
key={skipTag} key={skipTag}

View File

@@ -13,18 +13,6 @@ import ScheduleForm from '../shared/ScheduleForm';
import buildRuleSet from '../shared/buildRuleSet'; import buildRuleSet from '../shared/buildRuleSet';
import { CardBody } from '../../Card'; import { CardBody } from '../../Card';
function generateExtraData(extra_vars, surveyValues, surveyConfiguration) {
const extraVars = parseVariableField(
yaml.dump(mergeExtraVars(extra_vars, surveyValues))
);
surveyConfiguration.spec.forEach((q) => {
if (!surveyValues[q.variable]) {
delete extraVars[q.variable];
}
});
return extraVars;
}
function ScheduleEdit({ function ScheduleEdit({
hasDaysToKeepField, hasDaysToKeepField,
schedule, schedule,
@@ -45,12 +33,10 @@ function ScheduleEdit({
surveyConfiguration, surveyConfiguration,
originalInstanceGroups, originalInstanceGroups,
originalLabels, originalLabels,
scheduleCredentials = [], scheduleCredentials = []
isPromptTouched = false
) => { ) => {
const { const {
execution_environment, execution_environment,
extra_vars = null,
instance_groups, instance_groups,
inventory, inventory,
credentials = [], credentials = [],
@@ -62,54 +48,45 @@ function ScheduleEdit({
labels, labels,
...submitValues ...submitValues
} = values; } = values;
let extraVars;
const surveyValues = getSurveyValues(values); const surveyValues = getSurveyValues(values);
if ( if (
isPromptTouched && !Object.values(surveyValues).length &&
surveyConfiguration?.spec && surveyConfiguration?.spec?.length
launchConfiguration?.ask_variables_on_launch
) { ) {
submitValues.extra_data = generateExtraData( surveyConfiguration.spec.forEach((q) => {
extra_vars, surveyValues[q.variable] = q.default;
surveyValues, });
surveyConfiguration
);
} else if (
isPromptTouched &&
surveyConfiguration?.spec &&
!launchConfiguration?.ask_variables_on_launch
) {
submitValues.extra_data = generateExtraData(
schedule.extra_data,
surveyValues,
surveyConfiguration
);
} else if (
isPromptTouched &&
launchConfiguration?.ask_variables_on_launch
) {
submitValues.extra_data = parseVariableField(extra_vars);
} }
const initialExtraVars =
launchConfiguration?.ask_variables_on_launch &&
(values.extra_vars || '---');
if (surveyConfiguration?.spec) {
extraVars = yaml.dump(mergeExtraVars(initialExtraVars, surveyValues));
} else {
extraVars = yaml.dump(mergeExtraVars(initialExtraVars, {}));
}
submitValues.extra_data = extraVars && parseVariableField(extraVars);
if ( if (
isPromptTouched && Object.keys(submitValues.extra_data).length === 0 &&
launchConfiguration?.ask_inventory_on_launch && Object.keys(schedule.extra_data).length > 0
inventory
) { ) {
submitValues.extra_data = schedule.extra_data;
}
delete values.extra_vars;
if (inventory) {
submitValues.inventory = inventory.id; submitValues.inventory = inventory.id;
} }
if ( if (execution_environment) {
isPromptTouched &&
launchConfiguration?.ask_execution_environment_on_launch &&
execution_environment
) {
submitValues.execution_environment = execution_environment.id; submitValues.execution_environment = execution_environment.id;
} }
try { try {
if (isPromptTouched && launchConfiguration?.ask_labels_on_launch) { if (launchConfiguration?.ask_labels_on_launch) {
const { labelIds, error } = createNewLabels( const { labelIds, error } = createNewLabels(
values.labels, values.labels,
resource.organization resource.organization
@@ -143,16 +120,9 @@ function ScheduleEdit({
} }
} }
const cleanedRequestData = Object.keys(requestData)
.filter((key) => !key.startsWith('survey_'))
.reduce((acc, key) => {
acc[key] = requestData[key];
return acc;
}, {});
const { const {
data: { id: scheduleId }, data: { id: scheduleId },
} = await SchedulesAPI.update(schedule.id, cleanedRequestData); } = await SchedulesAPI.update(schedule.id, requestData);
const { added: addedCredentials, removed: removedCredentials } = const { added: addedCredentials, removed: removedCredentials } =
getAddedAndRemoved( getAddedAndRemoved(

View File

@@ -6,7 +6,6 @@ import {
InventoriesAPI, InventoriesAPI,
CredentialsAPI, CredentialsAPI,
CredentialTypesAPI, CredentialTypesAPI,
JobTemplatesAPI,
} from 'api'; } from 'api';
import { mountWithContexts } from '../../../../testUtils/enzymeHelpers'; import { mountWithContexts } from '../../../../testUtils/enzymeHelpers';
import ScheduleEdit from './ScheduleEdit'; import ScheduleEdit from './ScheduleEdit';
@@ -126,7 +125,6 @@ describe('<ScheduleEdit />', () => {
id: 27, id: 27,
}, },
}); });
await act(async () => { await act(async () => {
wrapper = mountWithContexts( wrapper = mountWithContexts(
<ScheduleEdit <ScheduleEdit
@@ -208,6 +206,7 @@ describe('<ScheduleEdit />', () => {
expect(SchedulesAPI.update).toHaveBeenCalledWith(27, { expect(SchedulesAPI.update).toHaveBeenCalledWith(27, {
description: 'test description', description: 'test description',
name: 'Run once schedule', name: 'Run once schedule',
extra_data: {},
rrule: rrule:
'DTSTART;TZID=America/New_York:20200325T100000 RRULE:INTERVAL=1;COUNT=1;FREQ=MINUTELY', 'DTSTART;TZID=America/New_York:20200325T100000 RRULE:INTERVAL=1;COUNT=1;FREQ=MINUTELY',
}); });
@@ -234,6 +233,7 @@ describe('<ScheduleEdit />', () => {
expect(SchedulesAPI.update).toHaveBeenCalledWith(27, { expect(SchedulesAPI.update).toHaveBeenCalledWith(27, {
description: 'test description', description: 'test description',
name: 'Run every 10 minutes 10 times', name: 'Run every 10 minutes 10 times',
extra_data: {},
rrule: rrule:
'DTSTART;TZID=America/New_York:20200325T103000 RRULE:INTERVAL=10;FREQ=MINUTELY;COUNT=10', 'DTSTART;TZID=America/New_York:20200325T103000 RRULE:INTERVAL=10;FREQ=MINUTELY;COUNT=10',
}); });
@@ -262,6 +262,7 @@ describe('<ScheduleEdit />', () => {
expect(SchedulesAPI.update).toHaveBeenCalledWith(27, { expect(SchedulesAPI.update).toHaveBeenCalledWith(27, {
description: 'test description', description: 'test description',
name: 'Run every hour until date', name: 'Run every hour until date',
extra_data: {},
rrule: rrule:
'DTSTART;TZID=America/New_York:20200325T104500 RRULE:INTERVAL=1;FREQ=HOURLY;UNTIL=20200326T144500Z', 'DTSTART;TZID=America/New_York:20200325T104500 RRULE:INTERVAL=1;FREQ=HOURLY;UNTIL=20200326T144500Z',
}); });
@@ -287,6 +288,7 @@ describe('<ScheduleEdit />', () => {
expect(SchedulesAPI.update).toHaveBeenCalledWith(27, { expect(SchedulesAPI.update).toHaveBeenCalledWith(27, {
description: 'test description', description: 'test description',
name: 'Run daily', name: 'Run daily',
extra_data: {},
rrule: rrule:
'DTSTART;TZID=America/New_York:20200325T104500 RRULE:INTERVAL=1;FREQ=DAILY', 'DTSTART;TZID=America/New_York:20200325T104500 RRULE:INTERVAL=1;FREQ=DAILY',
}); });
@@ -314,6 +316,7 @@ describe('<ScheduleEdit />', () => {
expect(SchedulesAPI.update).toHaveBeenCalledWith(27, { expect(SchedulesAPI.update).toHaveBeenCalledWith(27, {
description: 'test description', description: 'test description',
name: 'Run weekly on mon/wed/fri', name: 'Run weekly on mon/wed/fri',
extra_data: {},
rrule: `DTSTART;TZID=America/New_York:20200325T104500 RRULE:INTERVAL=1;FREQ=WEEKLY;BYDAY=${RRule.MO},${RRule.WE},${RRule.FR}`, rrule: `DTSTART;TZID=America/New_York:20200325T104500 RRULE:INTERVAL=1;FREQ=WEEKLY;BYDAY=${RRule.MO},${RRule.WE},${RRule.FR}`,
}); });
}); });
@@ -341,6 +344,7 @@ describe('<ScheduleEdit />', () => {
expect(SchedulesAPI.update).toHaveBeenCalledWith(27, { expect(SchedulesAPI.update).toHaveBeenCalledWith(27, {
description: 'test description', description: 'test description',
name: 'Run on the first day of the month', name: 'Run on the first day of the month',
extra_data: {},
rrule: rrule:
'DTSTART;TZID=America/New_York:20200401T104500 RRULE:INTERVAL=1;FREQ=MONTHLY;BYMONTHDAY=1', 'DTSTART;TZID=America/New_York:20200401T104500 RRULE:INTERVAL=1;FREQ=MONTHLY;BYMONTHDAY=1',
}); });
@@ -372,6 +376,7 @@ describe('<ScheduleEdit />', () => {
expect(SchedulesAPI.update).toHaveBeenCalledWith(27, { expect(SchedulesAPI.update).toHaveBeenCalledWith(27, {
description: 'test description', description: 'test description',
name: 'Run monthly on the last Tuesday', name: 'Run monthly on the last Tuesday',
extra_data: {},
rrule: rrule:
'DTSTART;TZID=America/New_York:20200331T110000 RRULE:INTERVAL=1;FREQ=MONTHLY;BYSETPOS=-1;BYDAY=TU', 'DTSTART;TZID=America/New_York:20200331T110000 RRULE:INTERVAL=1;FREQ=MONTHLY;BYSETPOS=-1;BYDAY=TU',
}); });
@@ -401,6 +406,7 @@ describe('<ScheduleEdit />', () => {
expect(SchedulesAPI.update).toHaveBeenCalledWith(27, { expect(SchedulesAPI.update).toHaveBeenCalledWith(27, {
description: 'test description', description: 'test description',
name: 'Yearly on the first day of March', name: 'Yearly on the first day of March',
extra_data: {},
rrule: rrule:
'DTSTART;TZID=America/New_York:20200301T000000 RRULE:INTERVAL=1;FREQ=YEARLY;BYMONTH=3;BYMONTHDAY=1', 'DTSTART;TZID=America/New_York:20200301T000000 RRULE:INTERVAL=1;FREQ=YEARLY;BYMONTH=3;BYMONTHDAY=1',
}); });
@@ -431,6 +437,7 @@ describe('<ScheduleEdit />', () => {
expect(SchedulesAPI.update).toHaveBeenCalledWith(27, { expect(SchedulesAPI.update).toHaveBeenCalledWith(27, {
description: 'test description', description: 'test description',
name: 'Yearly on the second Friday in April', name: 'Yearly on the second Friday in April',
extra_data: {},
rrule: rrule:
'DTSTART;TZID=America/New_York:20200410T111500 RRULE:INTERVAL=1;FREQ=YEARLY;BYSETPOS=2;BYDAY=FR;BYMONTH=4', 'DTSTART;TZID=America/New_York:20200410T111500 RRULE:INTERVAL=1;FREQ=YEARLY;BYSETPOS=2;BYDAY=FR;BYMONTH=4',
}); });
@@ -461,6 +468,7 @@ describe('<ScheduleEdit />', () => {
expect(SchedulesAPI.update).toHaveBeenCalledWith(27, { expect(SchedulesAPI.update).toHaveBeenCalledWith(27, {
description: 'test description', description: 'test description',
name: 'Yearly on the first weekday in October', name: 'Yearly on the first weekday in October',
extra_data: {},
rrule: rrule:
'DTSTART;TZID=America/New_York:20200410T111500 RRULE:INTERVAL=1;FREQ=YEARLY;BYSETPOS=1;BYDAY=MO,TU,WE,TH,FR;BYMONTH=10', 'DTSTART;TZID=America/New_York:20200410T111500 RRULE:INTERVAL=1;FREQ=YEARLY;BYSETPOS=1;BYDAY=MO,TU,WE,TH,FR;BYMONTH=10',
}); });
@@ -554,6 +562,7 @@ describe('<ScheduleEdit />', () => {
wrapper.update(); wrapper.update();
expect(SchedulesAPI.update).toBeCalledWith(27, { expect(SchedulesAPI.update).toBeCalledWith(27, {
extra_data: {},
name: 'mock schedule', name: 'mock schedule',
rrule: rrule:
'DTSTART;TZID=America/New_York:20210128T141500 RRULE:INTERVAL=1;COUNT=1;FREQ=MINUTELY', 'DTSTART;TZID=America/New_York:20210128T141500 RRULE:INTERVAL=1;COUNT=1;FREQ=MINUTELY',
@@ -624,13 +633,15 @@ describe('<ScheduleEdit />', () => {
endDateTime: undefined, endDateTime: undefined,
startDateTime: undefined, startDateTime: undefined,
description: '', description: '',
extra_data: {},
name: 'foo', name: 'foo',
inventory: 702,
rrule: rrule:
'DTSTART;TZID=America/New_York:20200402T144500 RRULE:INTERVAL=1;COUNT=1;FREQ=MINUTELY', 'DTSTART;TZID=America/New_York:20200402T144500 RRULE:INTERVAL=1;COUNT=1;FREQ=MINUTELY',
}); });
}); });
test('should submit update values properly when prompt is not opened', async () => { test('should submit survey with default values properly, without opening prompt wizard', async () => {
let scheduleSurveyWrapper; let scheduleSurveyWrapper;
await act(async () => { await act(async () => {
scheduleSurveyWrapper = mountWithContexts( scheduleSurveyWrapper = mountWithContexts(
@@ -735,195 +746,9 @@ describe('<ScheduleEdit />', () => {
expect(SchedulesAPI.update).toHaveBeenCalledWith(27, { expect(SchedulesAPI.update).toHaveBeenCalledWith(27, {
description: 'test description', description: 'test description',
name: 'Run once schedule', name: 'Run once schedule',
extra_data: { mc: 'first', text: 'text variable' },
rrule: rrule:
'DTSTART;TZID=America/New_York:20200325T100000 RRULE:INTERVAL=1;COUNT=1;FREQ=MINUTELY', 'DTSTART;TZID=America/New_York:20200325T100000 RRULE:INTERVAL=1;COUNT=1;FREQ=MINUTELY',
}); });
}); });
test('should submit update values properly when survey values change', async () => {
JobTemplatesAPI.readSurvey.mockResolvedValue({
data: {
spec: [
{
question_name: 'text',
question_description: '',
required: true,
type: 'text',
variable: 'text',
min: 0,
max: 1024,
default: 'text variable',
choices: '',
new_question: true,
},
],
},
});
JobTemplatesAPI.readLaunch.mockResolvedValue({
data: {
can_start_without_user_input: false,
passwords_needed_to_start: [],
ask_scm_branch_on_launch: false,
ask_variables_on_launch: false,
ask_tags_on_launch: false,
ask_diff_mode_on_launch: false,
ask_skip_tags_on_launch: false,
ask_job_type_on_launch: false,
ask_limit_on_launch: false,
ask_verbosity_on_launch: false,
ask_inventory_on_launch: true,
ask_credential_on_launch: true,
survey_enabled: true,
variables_needed_to_start: [],
credential_needed_to_start: true,
inventory_needed_to_start: true,
job_template_data: {
name: 'Demo Job Template',
id: 7,
description: '',
},
defaults: {
extra_vars: '---',
diff_mode: false,
limit: '',
job_tags: '',
skip_tags: '',
job_type: 'run',
verbosity: 0,
inventory: {
name: null,
id: null,
},
scm_branch: '',
credentials: [],
},
},
});
let scheduleSurveyWrapper;
await act(async () => {
scheduleSurveyWrapper = mountWithContexts(
<ScheduleEdit
schedule={mockSchedule}
resource={{
id: 700,
type: 'job_template',
iventory: 1,
summary_fields: {
credentials: [
{ name: 'job template credential', id: 75, kind: 'ssh' },
],
},
name: 'Foo Job Template',
description: '',
}}
resourceDefaultCredentials={[]}
launchConfig={{
can_start_without_user_input: false,
passwords_needed_to_start: [],
ask_scm_branch_on_launch: false,
ask_variables_on_launch: false,
ask_tags_on_launch: false,
ask_diff_mode_on_launch: false,
ask_skip_tags_on_launch: false,
ask_job_type_on_launch: false,
ask_limit_on_launch: false,
ask_verbosity_on_launch: false,
ask_inventory_on_launch: true,
ask_credential_on_launch: true,
survey_enabled: true,
variables_needed_to_start: [],
credential_needed_to_start: true,
inventory_needed_to_start: true,
job_template_data: {
name: 'Demo Job Template',
id: 7,
description: '',
},
defaults: {
extra_vars: '---',
diff_mode: false,
limit: '',
job_tags: '',
skip_tags: '',
job_type: 'run',
verbosity: 0,
inventory: {
name: null,
id: null,
},
scm_branch: '',
credentials: [],
},
}}
surveyConfig={{
spec: [
{
question_name: 'text',
question_description: '',
required: true,
type: 'text',
variable: 'text',
min: 0,
max: 1024,
default: 'text variable',
choices: '',
new_question: true,
},
],
}}
/>
);
});
scheduleSurveyWrapper.update();
await act(async () =>
scheduleSurveyWrapper
.find('Button[aria-label="Prompt"]')
.prop('onClick')()
);
scheduleSurveyWrapper.update();
expect(scheduleSurveyWrapper.find('WizardNavItem').length).toBe(4);
await act(async () =>
scheduleSurveyWrapper.find('WizardFooterInternal').prop('onNext')()
);
scheduleSurveyWrapper.update();
await act(async () =>
scheduleSurveyWrapper.find('WizardFooterInternal').prop('onNext')()
);
scheduleSurveyWrapper.update();
await act(async () =>
scheduleSurveyWrapper
.find('input#survey-question-text')
.simulate('change', {
target: { value: 'foo', name: 'survey_text' },
})
);
scheduleSurveyWrapper.update();
await act(async () =>
scheduleSurveyWrapper.find('WizardFooterInternal').prop('onNext')()
);
scheduleSurveyWrapper.update();
await act(async () =>
scheduleSurveyWrapper.find('WizardFooterInternal').prop('onNext')()
);
scheduleSurveyWrapper.update();
expect(scheduleSurveyWrapper.find('Wizard').length).toBe(0);
await act(async () =>
scheduleSurveyWrapper.find('Button[aria-label="Save"]').prop('onClick')()
);
expect(SchedulesAPI.update).toHaveBeenCalledWith(27, {
description: '',
name: 'mock schedule',
inventory: 702,
extra_data: {
text: 'foo',
},
rrule:
'DTSTART;TZID=America/New_York:20200402T144500 RRULE:INTERVAL=1;COUNT=1;FREQ=MINUTELY',
});
});
}); });

View File

@@ -40,7 +40,6 @@ function ScheduleForm({
resourceDefaultCredentials, resourceDefaultCredentials,
}) { }) {
const [isWizardOpen, setIsWizardOpen] = useState(false); const [isWizardOpen, setIsWizardOpen] = useState(false);
const [isPromptTouched, setIsPromptTouched] = useState(false);
const [isSaveDisabled, setIsSaveDisabled] = useState(false); const [isSaveDisabled, setIsSaveDisabled] = useState(false);
const originalLabels = useRef([]); const originalLabels = useRef([]);
const originalInstanceGroups = useRef([]); const originalInstanceGroups = useRef([]);
@@ -493,8 +492,7 @@ function ScheduleForm({
surveyConfig, surveyConfig,
originalInstanceGroups.current, originalInstanceGroups.current,
originalLabels.current, originalLabels.current,
credentials, credentials
isPromptTouched
); );
}} }}
validate={validate} validate={validate}
@@ -520,7 +518,6 @@ function ScheduleForm({
onSave={() => { onSave={() => {
setIsWizardOpen(false); setIsWizardOpen(false);
setIsSaveDisabled(false); setIsSaveDisabled(false);
setIsPromptTouched(true);
}} }}
resourceDefaultCredentials={resourceDefaultCredentials} resourceDefaultCredentials={resourceDefaultCredentials}
labels={originalLabels.current} labels={originalLabels.current}

View File

@@ -115,11 +115,8 @@ function SessionProvider({ children }) {
}, [setSessionTimeout, setSessionCountdown]); }, [setSessionTimeout, setSessionCountdown]);
useEffect(() => { useEffect(() => {
const isRedirectCondition = (location, histLength) =>
location.pathname === '/login' && histLength === 2;
const unlisten = history.listen((location, action) => { const unlisten = history.listen((location, action) => {
if (action === 'POP' || isRedirectCondition(location, history.length)) { if (action === 'POP') {
setIsRedirectLinkReceived(true); setIsRedirectLinkReceived(true);
} }
}); });

View File

@@ -784,7 +784,7 @@ msgstr "Branche à utiliser dans lexécution de la tâche. Projet par défaut
#: screens/Inventory/shared/Inventory.helptext.js:155 #: screens/Inventory/shared/Inventory.helptext.js:155
msgid "Branch to use on inventory sync. Project default used if blank. Only allowed if project allow_override field is set to true." msgid "Branch to use on inventory sync. Project default used if blank. Only allowed if project allow_override field is set to true."
msgstr "Branche à utiliser pour la synchronisation de l'inventaire. La valeur par défaut du projet est utilisée si elle est vide. Cette option n'est autorisée que si le champ allow_override du projet est défini sur vrai." msgstr ""
#: components/About/About.js:45 #: components/About/About.js:45
msgid "Brand Image" msgid "Brand Image"
@@ -2832,7 +2832,7 @@ msgstr "Entrez les variables avec la syntaxe JSON ou YAML. Consultez la documen
#: screens/Inventory/shared/SmartInventoryForm.js:94 #: screens/Inventory/shared/SmartInventoryForm.js:94
msgid "Enter inventory variables using either JSON or YAML syntax. Use the radio button to toggle between the two. Refer to the Ansible Controller documentation for example syntax." msgid "Enter inventory variables using either JSON or YAML syntax. Use the radio button to toggle between the two. Refer to the Ansible Controller documentation for example syntax."
msgstr "Entrez les variables d'inventaire en utilisant la syntaxe JSON ou YAML. Utilisez le bouton d'option pour basculer entre les deux. Référez-vous à la documentation du contrôleur Ansible pour les exemples de syntaxe." msgstr ""
#: screens/CredentialType/CredentialTypeDetails/CredentialTypeDetails.js:87 #: screens/CredentialType/CredentialTypeDetails/CredentialTypeDetails.js:87
msgid "Environment variables or extra variables that specify the values a credential type can inject." msgid "Environment variables or extra variables that specify the values a credential type can inject."
@@ -3015,7 +3015,7 @@ msgstr "Recherche exacte sur le champ d'identification."
#: components/Search/RelatedLookupTypeInput.js:38 #: components/Search/RelatedLookupTypeInput.js:38
msgid "Exact search on name field." msgid "Exact search on name field."
msgstr "Recherche exacte sur le champ nom." msgstr ""
#: screens/Project/shared/Project.helptext.js:23 #: screens/Project/shared/Project.helptext.js:23
msgid "Example URLs for GIT Source Control include:" msgid "Example URLs for GIT Source Control include:"
@@ -3242,7 +3242,7 @@ msgstr "Jobs ayant échoué"
#: screens/WorkflowApproval/WorkflowApprovalList/WorkflowApprovalList.js:262 #: screens/WorkflowApproval/WorkflowApprovalList/WorkflowApprovalList.js:262
msgid "Failed to approve one or more workflow approval." msgid "Failed to approve one or more workflow approval."
msgstr "Échec de l'approbation d'une ou plusieurs validations de flux de travail." msgstr ""
#: screens/WorkflowApproval/shared/WorkflowApprovalButton.js:56 #: screens/WorkflowApproval/shared/WorkflowApprovalButton.js:56
msgid "Failed to approve {0}." msgid "Failed to approve {0}."
@@ -3474,7 +3474,7 @@ msgstr "N'a pas réussi à supprimer {name}."
#: screens/WorkflowApproval/WorkflowApprovalList/WorkflowApprovalList.js:263 #: screens/WorkflowApproval/WorkflowApprovalList/WorkflowApprovalList.js:263
msgid "Failed to deny one or more workflow approval." msgid "Failed to deny one or more workflow approval."
msgstr "Échec du refus d'une ou plusieurs validations de flux de travail." msgstr ""
#: screens/WorkflowApproval/shared/WorkflowDenyButton.js:51 #: screens/WorkflowApproval/shared/WorkflowDenyButton.js:51
msgid "Failed to deny {0}." msgid "Failed to deny {0}."
@@ -3520,7 +3520,7 @@ msgstr "Echec du lancement du Job."
#: screens/Inventory/InventoryHosts/InventoryHostItem.js:121 #: screens/Inventory/InventoryHosts/InventoryHostItem.js:121
msgid "Failed to load related groups." msgid "Failed to load related groups."
msgstr "Impossible de charger les groupes associés." msgstr ""
#: screens/Instances/InstanceDetail/InstanceDetail.js:388 #: screens/Instances/InstanceDetail/InstanceDetail.js:388
#: screens/Instances/InstanceList/InstanceList.js:266 #: screens/Instances/InstanceList/InstanceList.js:266
@@ -3972,12 +3972,12 @@ msgstr "Demande(s) de bilan de santé soumise(s). Veuillez patienter et recharge
#: screens/Instances/InstanceDetail/InstanceDetail.js:234 #: screens/Instances/InstanceDetail/InstanceDetail.js:234
#: screens/Instances/InstanceList/InstanceListItem.js:242 #: screens/Instances/InstanceList/InstanceListItem.js:242
msgid "Health checks are asynchronous tasks. See the" msgid "Health checks are asynchronous tasks. See the"
msgstr "Les bilans de santé sont des tâches asynchrones. Veuillez consulter la documentation pour plus d'informations." msgstr ""
#: screens/InstanceGroup/Instances/InstanceList.js:286 #: screens/InstanceGroup/Instances/InstanceList.js:286
#: screens/Instances/InstanceList/InstanceList.js:219 #: screens/Instances/InstanceList/InstanceList.js:219
msgid "Health checks can only be run on execution nodes." msgid "Health checks can only be run on execution nodes."
msgstr "Les bilans de santé ne peuvent être exécutées que sur les nœuds d'exécution." msgstr ""
#: components/StatusLabel/StatusLabel.js:42 #: components/StatusLabel/StatusLabel.js:42
msgid "Healthy" msgid "Healthy"
@@ -5048,7 +5048,7 @@ msgstr "Lancer"
#: components/TemplateList/TemplateListItem.js:214 #: components/TemplateList/TemplateListItem.js:214
msgid "Launch Template" msgid "Launch Template"
msgstr "Lancer le modèle." msgstr "Lacer le modèle."
#: screens/ManagementJob/ManagementJobList/LaunchManagementPrompt.js:32 #: screens/ManagementJob/ManagementJobList/LaunchManagementPrompt.js:32
#: screens/ManagementJob/ManagementJobList/LaunchManagementPrompt.js:34 #: screens/ManagementJob/ManagementJobList/LaunchManagementPrompt.js:34
@@ -9637,7 +9637,7 @@ msgstr "Utilisateur"
#: components/AppContainer/PageHeaderToolbar.js:160 #: components/AppContainer/PageHeaderToolbar.js:160
msgid "User Details" msgid "User Details"
msgstr "Détails de l'utilisateur" msgstr "Détails de l'erreur"
#: screens/Setting/SettingList.js:121 #: screens/Setting/SettingList.js:121
#: screens/Setting/Settings.js:118 #: screens/Setting/Settings.js:118

View File

@@ -80,7 +80,7 @@ function Dashboard() {
<Trans> <Trans>
<p> <p>
<InfoCircleIcon /> A tech preview of the new {brandName} user <InfoCircleIcon /> A tech preview of the new {brandName} user
interface can be found <a href="/ui_next">here</a>. interface can be found <a href="/ui_next/dashboard">here</a>.
</p> </p>
</Trans> </Trans>
</Banner> </Banner>

View File

@@ -191,7 +191,7 @@ function InstancePeerList({ setBreadcrumb }) {
fetchPeers(); fetchPeers();
addToast({ addToast({
id: instancesPeerToAssociate, id: instancesPeerToAssociate,
title: t`Please be sure to run the install bundle for the selected instance(s) again in order to see changes take effect.`, title: t`Peers update on ${instance.hostname}. Please be sure to run the install bundle for ${instance.hostname} again in order to see changes take effect.`,
variant: AlertVariant.success, variant: AlertVariant.success,
hasTimeout: true, hasTimeout: true,
}); });

View File

@@ -21,8 +21,6 @@ const ansibleDocUrls = {
'https://docs.ansible.com/ansible/latest/collections/community/vmware/vmware_vm_inventory_inventory.html', 'https://docs.ansible.com/ansible/latest/collections/community/vmware/vmware_vm_inventory_inventory.html',
constructed: constructed:
'https://docs.ansible.com/ansible/latest/collections/ansible/builtin/constructed_inventory.html', 'https://docs.ansible.com/ansible/latest/collections/ansible/builtin/constructed_inventory.html',
terraform:
'https://github.com/ansible-collections/cloud.terraform/blob/stable-statefile-inventory/plugins/inventory/terraform_state.py',
}; };
const getInventoryHelpTextStrings = () => ({ const getInventoryHelpTextStrings = () => ({
@@ -121,10 +119,10 @@ const getInventoryHelpTextStrings = () => ({
<br /> <br />
{value && ( {value && (
<div> <div>
{t`If you want the Inventory Source to update on launch , click on Update on Launch, {t`If you want the Inventory Source to update on
and also go to `} launch and on project update, click on Update on launch, and also go to`}
<Link to={`/projects/${value.id}/details`}> {value.name} </Link> <Link to={`/projects/${value.id}/details`}> {value.name} </Link>
{t`and click on Update Revision on Launch.`} {t`and click on Update Revision on Launch`}
</div> </div>
)} )}
</> </>
@@ -140,8 +138,8 @@ const getInventoryHelpTextStrings = () => ({
<br /> <br />
{value && ( {value && (
<div> <div>
{t`If you want the Inventory Source to update on launch , click on Update on Launch, {t`If you want the Inventory Source to update on
and also go to `} launch and on project update, click on Update on launch, and also go to`}
<Link to={`/projects/${value.id}/details`}> {value.name} </Link> <Link to={`/projects/${value.id}/details`}> {value.name} </Link>
{t`and click on Update Revision on Launch`} {t`and click on Update Revision on Launch`}
</div> </div>

View File

@@ -23,7 +23,6 @@ import {
SCMSubForm, SCMSubForm,
SatelliteSubForm, SatelliteSubForm,
ControllerSubForm, ControllerSubForm,
TerraformSubForm,
VMwareSubForm, VMwareSubForm,
VirtualizationSubForm, VirtualizationSubForm,
} from './InventorySourceSubForms'; } from './InventorySourceSubForms';
@@ -215,14 +214,6 @@ const InventorySourceFormFields = ({
} }
/> />
), ),
terraform: (
<TerraformSubForm
autoPopulateCredential={
!source?.id || source?.source !== 'terraform'
}
sourceOptions={sourceOptions}
/>
),
vmware: ( vmware: (
<VMwareSubForm <VMwareSubForm
autoPopulateCredential={ autoPopulateCredential={

View File

@@ -38,7 +38,6 @@ describe('<InventorySourceForm />', () => {
['openstack', 'OpenStack'], ['openstack', 'OpenStack'],
['rhv', 'Red Hat Virtualization'], ['rhv', 'Red Hat Virtualization'],
['controller', 'Red Hat Ansible Automation Platform'], ['controller', 'Red Hat Ansible Automation Platform'],
['terraform', 'Terraform State'],
], ],
}, },
}, },

View File

@@ -1,59 +0,0 @@
import React, { useCallback } from 'react';
import { useField, useFormikContext } from 'formik';
import { t } from '@lingui/macro';
import getDocsBaseUrl from 'util/getDocsBaseUrl';
import { useConfig } from 'contexts/Config';
import CredentialLookup from 'components/Lookup/CredentialLookup';
import { required } from 'util/validators';
import {
OptionsField,
VerbosityField,
EnabledVarField,
EnabledValueField,
HostFilterField,
SourceVarsField,
} from './SharedFields';
import getHelpText from '../Inventory.helptext';
const TerraformSubForm = ({ autoPopulateCredential }) => {
const helpText = getHelpText();
const { setFieldValue, setFieldTouched } = useFormikContext();
const [credentialField, credentialMeta, credentialHelpers] =
useField('credential');
const config = useConfig();
const handleCredentialUpdate = useCallback(
(value) => {
setFieldValue('credential', value);
setFieldTouched('credential', true, false);
},
[setFieldValue, setFieldTouched]
);
const docsBaseUrl = getDocsBaseUrl(config);
return (
<>
<CredentialLookup
credentialTypeNamespace="terraform"
label={t`Credential`}
helperTextInvalid={credentialMeta.error}
isValid={!credentialMeta.touched || !credentialMeta.error}
onBlur={() => credentialHelpers.setTouched()}
onChange={handleCredentialUpdate}
value={credentialField.value}
required
autoPopulate={autoPopulateCredential}
validate={required(t`Select a value for this field`)}
/>
<VerbosityField />
<HostFilterField />
<EnabledVarField />
<EnabledValueField />
<OptionsField />
<SourceVarsField
popoverContent={helpText.sourceVars(docsBaseUrl, 'terraform')}
/>
</>
);
};
export default TerraformSubForm;

View File

@@ -1,70 +0,0 @@
import React from 'react';
import { act } from 'react-dom/test-utils';
import { Formik } from 'formik';
import { CredentialsAPI } from 'api';
import { mountWithContexts } from '../../../../../testUtils/enzymeHelpers';
import TerraformSubForm from './TerraformSubForm';
jest.mock('../../../../api');
const initialValues = {
credential: null,
overwrite: false,
overwrite_vars: false,
source_path: '',
source_project: null,
source_script: null,
source_vars: '---\n',
update_cache_timeout: 0,
update_on_launch: true,
verbosity: 1,
};
const mockSourceOptions = {
actions: {
POST: {},
},
};
describe('<TerraformSubForm />', () => {
let wrapper;
beforeEach(async () => {
CredentialsAPI.read.mockResolvedValue({
data: { count: 0, results: [] },
});
await act(async () => {
wrapper = mountWithContexts(
<Formik initialValues={initialValues}>
<TerraformSubForm sourceOptions={mockSourceOptions} />
</Formik>
);
});
});
afterAll(() => {
jest.clearAllMocks();
});
test('should render subform fields', () => {
expect(wrapper.find('FormGroup[label="Credential"]')).toHaveLength(1);
expect(wrapper.find('FormGroup[label="Verbosity"]')).toHaveLength(1);
expect(wrapper.find('FormGroup[label="Update options"]')).toHaveLength(1);
expect(
wrapper.find('FormGroup[label="Cache timeout (seconds)"]')
).toHaveLength(1);
expect(
wrapper.find('VariablesField[label="Source variables"]')
).toHaveLength(1);
});
test('should make expected api calls', () => {
expect(CredentialsAPI.read).toHaveBeenCalledTimes(1);
expect(CredentialsAPI.read).toHaveBeenCalledWith({
credential_type__namespace: 'terraform',
order_by: 'name',
page: 1,
page_size: 5,
});
});
});

View File

@@ -6,6 +6,5 @@ export { default as OpenStackSubForm } from './OpenStackSubForm';
export { default as SCMSubForm } from './SCMSubForm'; export { default as SCMSubForm } from './SCMSubForm';
export { default as SatelliteSubForm } from './SatelliteSubForm'; export { default as SatelliteSubForm } from './SatelliteSubForm';
export { default as ControllerSubForm } from './ControllerSubForm'; export { default as ControllerSubForm } from './ControllerSubForm';
export { default as TerraformSubForm } from './TerraformSubForm';
export { default as VMwareSubForm } from './VMwareSubForm'; export { default as VMwareSubForm } from './VMwareSubForm';
export { default as VirtualizationSubForm } from './VirtualizationSubForm'; export { default as VirtualizationSubForm } from './VirtualizationSubForm';

View File

@@ -3,7 +3,6 @@ import { Modal, Tab, Tabs, TabTitleText } from '@patternfly/react-core';
import PropTypes from 'prop-types'; import PropTypes from 'prop-types';
import { t } from '@lingui/macro'; import { t } from '@lingui/macro';
import { encode } from 'html-entities'; import { encode } from 'html-entities';
import { jsonToYaml } from 'util/yaml';
import StatusLabel from '../../../components/StatusLabel'; import StatusLabel from '../../../components/StatusLabel';
import { DetailList, Detail } from '../../../components/DetailList'; import { DetailList, Detail } from '../../../components/DetailList';
import ContentEmpty from '../../../components/ContentEmpty'; import ContentEmpty from '../../../components/ContentEmpty';
@@ -145,28 +144,9 @@ function HostEventModal({ onClose, hostEvent = {}, isOpen = false }) {
<ContentEmpty title={t`No JSON Available`} /> <ContentEmpty title={t`No JSON Available`} />
)} )}
</Tab> </Tab>
<Tab
eventKey={2}
title={<TabTitleText>{t`YAML`}</TabTitleText>}
aria-label={t`YAML tab`}
ouiaId="yaml-tab"
>
{activeTabKey === 2 && jsonObj ? (
<CodeEditor
mode="javascript"
readOnly
value={jsonToYaml(JSON.stringify(jsonObj))}
onChange={() => {}}
rows={20}
hasErrors={false}
/>
) : (
<ContentEmpty title={t`No YAML Available`} />
)}
</Tab>
{stdOut?.length ? ( {stdOut?.length ? (
<Tab <Tab
eventKey={3} eventKey={2}
title={<TabTitleText>{t`Output`}</TabTitleText>} title={<TabTitleText>{t`Output`}</TabTitleText>}
aria-label={t`Output tab`} aria-label={t`Output tab`}
ouiaId="standard-out-tab" ouiaId="standard-out-tab"
@@ -183,7 +163,7 @@ function HostEventModal({ onClose, hostEvent = {}, isOpen = false }) {
) : null} ) : null}
{stdErr?.length ? ( {stdErr?.length ? (
<Tab <Tab
eventKey={4} eventKey={3}
title={<TabTitleText>{t`Standard Error`}</TabTitleText>} title={<TabTitleText>{t`Standard Error`}</TabTitleText>}
aria-label={t`Standard error tab`} aria-label={t`Standard error tab`}
ouiaId="standard-error-tab" ouiaId="standard-error-tab"

View File

@@ -2,7 +2,6 @@ import React from 'react';
import { shallow } from 'enzyme'; import { shallow } from 'enzyme';
import { mountWithContexts } from '../../../../testUtils/enzymeHelpers'; import { mountWithContexts } from '../../../../testUtils/enzymeHelpers';
import HostEventModal from './HostEventModal'; import HostEventModal from './HostEventModal';
import { jsonToYaml } from 'util/yaml';
const hostEvent = { const hostEvent = {
changed: true, changed: true,
@@ -168,8 +167,6 @@ const jsonValue = `{
] ]
}`; }`;
const yamlValue = jsonToYaml(jsonValue);
describe('HostEventModal', () => { describe('HostEventModal', () => {
test('initially renders successfully', () => { test('initially renders successfully', () => {
const wrapper = shallow( const wrapper = shallow(
@@ -190,7 +187,7 @@ describe('HostEventModal', () => {
<HostEventModal hostEvent={hostEvent} onClose={() => {}} isOpen /> <HostEventModal hostEvent={hostEvent} onClose={() => {}} isOpen />
); );
expect(wrapper.find('Tabs Tab').length).toEqual(5); expect(wrapper.find('Tabs Tab').length).toEqual(4);
}); });
test('should initially show details tab', () => { test('should initially show details tab', () => {
@@ -290,7 +287,7 @@ describe('HostEventModal', () => {
expect(codeEditor.prop('value')).toEqual(jsonValue); expect(codeEditor.prop('value')).toEqual(jsonValue);
}); });
test('should display YAML tab content on tab click', () => { test('should display Standard Out tab content on tab click', () => {
const wrapper = shallow( const wrapper = shallow(
<HostEventModal hostEvent={hostEvent} onClose={() => {}} isOpen /> <HostEventModal hostEvent={hostEvent} onClose={() => {}} isOpen />
); );
@@ -302,21 +299,6 @@ describe('HostEventModal', () => {
const codeEditor = wrapper.find('Tab[eventKey=2] CodeEditor'); const codeEditor = wrapper.find('Tab[eventKey=2] CodeEditor');
expect(codeEditor.prop('mode')).toBe('javascript'); expect(codeEditor.prop('mode')).toBe('javascript');
expect(codeEditor.prop('readOnly')).toBe(true); expect(codeEditor.prop('readOnly')).toBe(true);
expect(codeEditor.prop('value')).toEqual(yamlValue);
});
test('should display Standard Out tab content on tab click', () => {
const wrapper = shallow(
<HostEventModal hostEvent={hostEvent} onClose={() => {}} isOpen />
);
const handleTabClick = wrapper.find('Tabs').prop('onSelect');
handleTabClick(null, 3);
wrapper.update();
const codeEditor = wrapper.find('Tab[eventKey=3] CodeEditor');
expect(codeEditor.prop('mode')).toBe('javascript');
expect(codeEditor.prop('readOnly')).toBe(true);
expect(codeEditor.prop('value')).toEqual(hostEvent.event_data.res.stdout); expect(codeEditor.prop('value')).toEqual(hostEvent.event_data.res.stdout);
}); });
@@ -334,10 +316,10 @@ describe('HostEventModal', () => {
); );
const handleTabClick = wrapper.find('Tabs').prop('onSelect'); const handleTabClick = wrapper.find('Tabs').prop('onSelect');
handleTabClick(null, 4); handleTabClick(null, 3);
wrapper.update(); wrapper.update();
const codeEditor = wrapper.find('Tab[eventKey=4] CodeEditor'); const codeEditor = wrapper.find('Tab[eventKey=3] CodeEditor');
expect(codeEditor.prop('mode')).toBe('javascript'); expect(codeEditor.prop('mode')).toBe('javascript');
expect(codeEditor.prop('readOnly')).toBe(true); expect(codeEditor.prop('readOnly')).toBe(true);
expect(codeEditor.prop('value')).toEqual('error content'); expect(codeEditor.prop('value')).toEqual('error content');
@@ -369,10 +351,10 @@ describe('HostEventModal', () => {
); );
const handleTabClick = wrapper.find('Tabs').prop('onSelect'); const handleTabClick = wrapper.find('Tabs').prop('onSelect');
handleTabClick(null, 3); handleTabClick(null, 2);
wrapper.update(); wrapper.update();
const codeEditor = wrapper.find('Tab[eventKey=3] CodeEditor'); const codeEditor = wrapper.find('Tab[eventKey=2] CodeEditor');
expect(codeEditor.prop('mode')).toBe('javascript'); expect(codeEditor.prop('mode')).toBe('javascript');
expect(codeEditor.prop('readOnly')).toBe(true); expect(codeEditor.prop('readOnly')).toBe(true);
expect(codeEditor.prop('value')).toEqual('foo bar'); expect(codeEditor.prop('value')).toEqual('foo bar');
@@ -393,10 +375,10 @@ describe('HostEventModal', () => {
); );
const handleTabClick = wrapper.find('Tabs').prop('onSelect'); const handleTabClick = wrapper.find('Tabs').prop('onSelect');
handleTabClick(null, 3); handleTabClick(null, 2);
wrapper.update(); wrapper.update();
const codeEditor = wrapper.find('Tab[eventKey=3] CodeEditor'); const codeEditor = wrapper.find('Tab[eventKey=2] CodeEditor');
expect(codeEditor.prop('mode')).toBe('javascript'); expect(codeEditor.prop('mode')).toBe('javascript');
expect(codeEditor.prop('readOnly')).toBe(true); expect(codeEditor.prop('readOnly')).toBe(true);
expect(codeEditor.prop('value')).toEqual('baz\nbar'); expect(codeEditor.prop('value')).toEqual('baz\nbar');
@@ -412,10 +394,10 @@ describe('HostEventModal', () => {
); );
const handleTabClick = wrapper.find('Tabs').prop('onSelect'); const handleTabClick = wrapper.find('Tabs').prop('onSelect');
handleTabClick(null, 3); handleTabClick(null, 2);
wrapper.update(); wrapper.update();
const codeEditor = wrapper.find('Tab[eventKey=3] CodeEditor'); const codeEditor = wrapper.find('Tab[eventKey=2] CodeEditor');
expect(codeEditor.prop('mode')).toBe('javascript'); expect(codeEditor.prop('mode')).toBe('javascript');
expect(codeEditor.prop('readOnly')).toBe(true); expect(codeEditor.prop('readOnly')).toBe(true);
expect(codeEditor.prop('value')).toEqual( expect(codeEditor.prop('value')).toEqual(

View File

@@ -30,7 +30,7 @@ function SubscriptionUsage() {
<Trans> <Trans>
<p> <p>
<InfoCircleIcon /> A tech preview of the new {brandName} user <InfoCircleIcon /> A tech preview of the new {brandName} user
interface can be found <a href="/ui_next">here</a>. interface can be found <a href="/ui_next/dashboard">here</a>.
</p> </p>
</Trans> </Trans>
</Banner> </Banner>

View File

@@ -201,11 +201,7 @@ function NodeViewModal({ readOnly }) {
overrides.limit = originalNodeObject.limit; overrides.limit = originalNodeObject.limit;
} }
if (launchConfig.ask_verbosity_on_launch) { if (launchConfig.ask_verbosity_on_launch) {
overrides.verbosity = overrides.verbosity = originalNodeObject.verbosity.toString();
originalNodeObject.verbosity !== undefined &&
originalNodeObject.verbosity !== null
? originalNodeObject.verbosity.toString()
: '0';
} }
if (launchConfig.ask_credential_on_launch) { if (launchConfig.ask_credential_on_launch) {
overrides.credentials = originalNodeCredentials || []; overrides.credentials = originalNodeCredentials || [];

View File

@@ -1,7 +1,7 @@
export default function getSurveyValues(values) { export default function getSurveyValues(values) {
const surveyValues = {}; const surveyValues = {};
Object.keys(values).forEach((key) => { Object.keys(values).forEach((key) => {
if (key.startsWith('survey_')) { if (key.startsWith('survey_') && values[key] !== []) {
if (Array.isArray(values[key]) && values[key].length === 0) { if (Array.isArray(values[key]) && values[key].length === 0) {
return; return;
} }

View File

@@ -1,12 +1,7 @@
import yaml from 'js-yaml'; import yaml from 'js-yaml';
export default function mergeExtraVars(extraVars = '', survey = {}) { export default function mergeExtraVars(extraVars = '', survey = {}) {
let vars = {}; const vars = yaml.load(extraVars) || {};
if (typeof extraVars === 'string') {
vars = yaml.load(extraVars);
} else if (typeof extraVars === 'object') {
vars = extraVars;
}
return { return {
...vars, ...vars,
...survey, ...survey,

View File

@@ -35,7 +35,7 @@ ui-next/src/build: $(UI_NEXT_DIR)/src/build/awx
## True target for ui-next/src/build. Build ui_next from source. ## True target for ui-next/src/build. Build ui_next from source.
$(UI_NEXT_DIR)/src/build/awx: $(UI_NEXT_DIR)/src $(UI_NEXT_DIR)/src/node_modules/webpack $(UI_NEXT_DIR)/src/build/awx: $(UI_NEXT_DIR)/src $(UI_NEXT_DIR)/src/node_modules/webpack
@echo "=== Building ui_next ===" @echo "=== Building ui_next ==="
@cd $(UI_NEXT_DIR)/src && PRODUCT="$(PRODUCT)" PUBLIC_PATH=/static/awx/ ROUTE_PREFIX=/ui_next npm run build:awx @cd $(UI_NEXT_DIR)/src && PRODUCT="$(PRODUCT)" PUBLIC_PATH=/static/awx/ npm run build:awx
@mv $(UI_NEXT_DIR)/src/build/awx/index.html $(UI_NEXT_DIR)/src/build/awx/index_awx.html @mv $(UI_NEXT_DIR)/src/build/awx/index.html $(UI_NEXT_DIR)/src/build/awx/index_awx.html
.PHONY: ui-next/src .PHONY: ui-next/src

View File

@@ -2,9 +2,7 @@
# All Rights Reserved. # All Rights Reserved.
from django.conf import settings from django.conf import settings
from django.urls import path, re_path, include from django.urls import re_path, include
from ansible_base.resource_registry.urls import urlpatterns as resource_api_urls
from awx.main.views import handle_400, handle_403, handle_404, handle_500, handle_csp_violation, handle_login_redirect from awx.main.views import handle_400, handle_403, handle_404, handle_500, handle_csp_violation, handle_login_redirect
@@ -12,16 +10,7 @@ from awx.main.views import handle_400, handle_403, handle_404, handle_500, handl
urlpatterns = [ urlpatterns = [
re_path(r'', include('awx.ui.urls', namespace='ui')), re_path(r'', include('awx.ui.urls', namespace='ui')),
re_path(r'^ui_next/.*', include('awx.ui_next.urls', namespace='ui_next')), re_path(r'^ui_next/.*', include('awx.ui_next.urls', namespace='ui_next')),
path('api/', include('awx.api.urls', namespace='api')), re_path(r'^api/', include('awx.api.urls', namespace='api')),
]
if settings.OPTIONAL_API_URLPATTERN_PREFIX:
urlpatterns += [
path(f'api/{settings.OPTIONAL_API_URLPATTERN_PREFIX}/', include('awx.api.urls')),
]
urlpatterns += [
re_path(r'^api/v2/', include(resource_api_urls)),
re_path(r'^sso/', include('awx.sso.urls', namespace='sso')), re_path(r'^sso/', include('awx.sso.urls', namespace='sso')),
re_path(r'^sso/', include('social_django.urls', namespace='social')), re_path(r'^sso/', include('social_django.urls', namespace='social')),
re_path(r'^(?:api/)?400.html$', handle_400), re_path(r'^(?:api/)?400.html$', handle_400),

View File

@@ -18,7 +18,7 @@ documentation: https://github.com/ansible/awx/blob/devel/awx_collection/README.m
homepage: https://www.ansible.com/ homepage: https://www.ansible.com/
issues: https://github.com/ansible/awx/issues?q=is%3Aissue+label%3Acomponent%3Aawx_collection issues: https://github.com/ansible/awx/issues?q=is%3Aissue+label%3Acomponent%3Aawx_collection
license: license:
- GPL-3.0-or-later - GPL-3.0-only
name: awx name: awx
namespace: awx namespace: awx
readme: README.md readme: README.md

View File

@@ -0,0 +1,119 @@
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Wayne Witzel III <wayne@riotousliving.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import os
import traceback
TOWER_CLI_IMP_ERR = None
try:
import tower_cli.utils.exceptions as exc
from tower_cli.utils import parser
from tower_cli.api import client
HAS_TOWER_CLI = True
except ImportError:
TOWER_CLI_IMP_ERR = traceback.format_exc()
HAS_TOWER_CLI = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
def tower_auth_config(module):
"""
`tower_auth_config` attempts to load the tower-cli.cfg file
specified from the `tower_config_file` parameter. If found,
if returns the contents of the file as a dictionary, else
it will attempt to fetch values from the module params and
only pass those values that have been set.
"""
config_file = module.params.pop('tower_config_file', None)
if config_file:
if not os.path.exists(config_file):
module.fail_json(msg='file not found: %s' % config_file)
if os.path.isdir(config_file):
module.fail_json(msg='directory can not be used as config file: %s' % config_file)
with open(config_file, 'r') as f:
return parser.string_to_dict(f.read())
else:
auth_config = {}
host = module.params.pop('tower_host', None)
if host:
auth_config['host'] = host
username = module.params.pop('tower_username', None)
if username:
auth_config['username'] = username
password = module.params.pop('tower_password', None)
if password:
auth_config['password'] = password
module.params.pop('tower_verify_ssl', None) # pop alias if used
verify_ssl = module.params.pop('validate_certs', None)
if verify_ssl is not None:
auth_config['verify_ssl'] = verify_ssl
return auth_config
def tower_check_mode(module):
'''Execute check mode logic for Ansible Tower modules'''
if module.check_mode:
try:
result = client.get('/ping').json()
module.exit_json(changed=True, tower_version='{0}'.format(result['version']))
except (exc.ServerError, exc.ConnectionError, exc.BadRequest) as excinfo:
module.fail_json(changed=False, msg='Failed check mode: {0}'.format(excinfo))
class TowerLegacyModule(AnsibleModule):
def __init__(self, argument_spec, **kwargs):
args = dict(
tower_host=dict(),
tower_username=dict(),
tower_password=dict(no_log=True),
validate_certs=dict(type='bool', aliases=['tower_verify_ssl']),
tower_config_file=dict(type='path'),
)
args.update(argument_spec)
kwargs.setdefault('mutually_exclusive', [])
kwargs['mutually_exclusive'].extend(
(
('tower_config_file', 'tower_host'),
('tower_config_file', 'tower_username'),
('tower_config_file', 'tower_password'),
('tower_config_file', 'validate_certs'),
)
)
super().__init__(argument_spec=args, **kwargs)
if not HAS_TOWER_CLI:
self.fail_json(msg=missing_required_lib('ansible-tower-cli'), exception=TOWER_CLI_IMP_ERR)

View File

@@ -181,8 +181,10 @@ def run_module(request, collection_import):
resource_class = resource_module.ControllerAWXKitModule resource_class = resource_module.ControllerAWXKitModule
elif getattr(resource_module, 'ControllerAPIModule', None): elif getattr(resource_module, 'ControllerAPIModule', None):
resource_class = resource_module.ControllerAPIModule resource_class = resource_module.ControllerAPIModule
elif getattr(resource_module, 'TowerLegacyModule', None):
resource_class = resource_module.TowerLegacyModule
else: else:
raise RuntimeError("The module has neither a ControllerAWXKitModule or a ControllerAPIModule") raise RuntimeError("The module has neither a TowerLegacyModule, ControllerAWXKitModule or a ControllerAPIModule")
with mock.patch.object(resource_class, '_load_params', new=mock_load_params): with mock.patch.object(resource_class, '_load_params', new=mock_load_params):
# Call the test utility (like a mock server) instead of issuing HTTP requests # Call the test utility (like a mock server) instead of issuing HTTP requests

View File

@@ -155,4 +155,4 @@ def test_build_notification_message_undefined(run_module, admin_user, organizati
nt = NotificationTemplate.objects.get(id=result['id']) nt = NotificationTemplate.objects.get(id=result['id'])
body = job.build_notification_message(nt, 'running') body = job.build_notification_message(nt, 'running')
assert 'The template rendering return a blank body' in body[1] assert '{"started_by": "My Placeholder"}' in body[1]

View File

@@ -26,7 +26,7 @@
name: "{{ project_name }}" name: "{{ project_name }}"
organization: "{{ org_name }}" organization: "{{ org_name }}"
scm_type: git scm_type: git
scm_url: https://github.com/ansible/ansible-tower-samples scm_url: https://github.com/ansible/test-playbooks
wait: true wait: true
- name: Create a git project with same name, different org - name: Create a git project with same name, different org

View File

@@ -31,7 +31,7 @@
name: "{{ project_name1 }}" name: "{{ project_name1 }}"
organization: Default organization: Default
scm_type: git scm_type: git
scm_url: https://github.com/ansible/ansible-tower-samples scm_url: https://github.com/ansible/test-playbooks
wait: true wait: true
register: result register: result
@@ -44,7 +44,7 @@
name: "{{ project_name1 }}" name: "{{ project_name1 }}"
organization: Default organization: Default
scm_type: git scm_type: git
scm_url: https://github.com/ansible/ansible-tower-samples scm_url: https://github.com/ansible/test-playbooks
wait: true wait: true
state: exists state: exists
register: result register: result
@@ -58,7 +58,7 @@
name: "{{ project_name1 }}" name: "{{ project_name1 }}"
organization: Default organization: Default
scm_type: git scm_type: git
scm_url: https://github.com/ansible/ansible-tower-samples scm_url: https://github.com/ansible/test-playbooks
wait: true wait: true
state: exists state: exists
request_timeout: .001 request_timeout: .001
@@ -75,7 +75,7 @@
name: "{{ project_name1 }}" name: "{{ project_name1 }}"
organization: Default organization: Default
scm_type: git scm_type: git
scm_url: https://github.com/ansible/ansible-tower-samples scm_url: https://github.com/ansible/test-playbooks
wait: true wait: true
state: absent state: absent
register: result register: result
@@ -89,7 +89,7 @@
name: "{{ project_name1 }}" name: "{{ project_name1 }}"
organization: Default organization: Default
scm_type: git scm_type: git
scm_url: https://github.com/ansible/ansible-tower-samples scm_url: https://github.com/ansible/test-playbooks
wait: true wait: true
state: exists state: exists
register: result register: result
@@ -103,7 +103,7 @@
name: "{{ project_name1 }}" name: "{{ project_name1 }}"
organization: Default organization: Default
scm_type: git scm_type: git
scm_url: https://github.com/ansible/ansible-tower-samples scm_url: https://github.com/ansible/test-playbooks
wait: false wait: false
register: result register: result
ignore_errors: true ignore_errors: true
@@ -137,7 +137,7 @@
name: "{{ project_name2 }}" name: "{{ project_name2 }}"
organization: "{{ org_name }}" organization: "{{ org_name }}"
scm_type: git scm_type: git
scm_url: https://github.com/ansible/ansible-tower-samples scm_url: https://github.com/ansible/test-playbooks
scm_credential: "{{ cred_name }}" scm_credential: "{{ cred_name }}"
check_mode: true check_mode: true
@@ -162,7 +162,7 @@
name: "{{ project_name2 }}" name: "{{ project_name2 }}"
organization: Non_Existing_Org organization: Non_Existing_Org
scm_type: git scm_type: git
scm_url: https://github.com/ansible/ansible-tower-samples scm_url: https://github.com/ansible/test-playbooks
scm_credential: "{{ cred_name }}" scm_credential: "{{ cred_name }}"
register: result register: result
ignore_errors: true ignore_errors: true
@@ -179,7 +179,7 @@
name: "{{ project_name2 }}" name: "{{ project_name2 }}"
organization: "{{ org_name }}" organization: "{{ org_name }}"
scm_type: git scm_type: git
scm_url: https://github.com/ansible/ansible-tower-samples scm_url: https://github.com/ansible/test-playbooks
scm_credential: Non_Existing_Credential scm_credential: Non_Existing_Credential
register: result register: result
ignore_errors: true ignore_errors: true
@@ -191,7 +191,7 @@
- "'Non_Existing_Credential' in result.msg" - "'Non_Existing_Credential' in result.msg"
- "result.total_results == 0" - "result.total_results == 0"
- name: Create a git project using a branch and allowing branch override - name: Create a git project without credentials without waiting
project: project:
name: "{{ project_name3 }}" name: "{{ project_name3 }}"
organization: Default organization: Default

View File

@@ -13,7 +13,7 @@
name: "{{ project_name1 }}" name: "{{ project_name1 }}"
organization: Default organization: Default
scm_type: git scm_type: git
scm_url: https://github.com/ansible/ansible-tower-samples scm_url: https://github.com/ansible/test-playbooks
wait: false wait: false
register: project_create_result register: project_create_result

View File

@@ -19,6 +19,8 @@ homepage: https://www.ansible.com/
issues: https://github.com/ansible/awx/issues?q=is%3Aissue+label%3Acomponent%3Aawx_collection issues: https://github.com/ansible/awx/issues?q=is%3Aissue+label%3Acomponent%3Aawx_collection
license: license:
- GPL-3.0-or-later - GPL-3.0-or-later
# plugins/module_utils/tower_legacy.py
- BSD-2-Clause
name: {{ collection_package }} name: {{ collection_package }}
namespace: {{ collection_namespace }} namespace: {{ collection_namespace }}
readme: README.md readme: README.md

View File

@@ -96,7 +96,6 @@ credential_type_name_to_config_kind_map = {
'vault': 'vault', 'vault': 'vault',
'vmware vcenter': 'vmware', 'vmware vcenter': 'vmware',
'gpg public key': 'gpg_public_key', 'gpg public key': 'gpg_public_key',
'terraform backend configuration': 'terraform',
} }
config_kind_to_credential_type_name_map = {kind: name for name, kind in credential_type_name_to_config_kind_map.items()} config_kind_to_credential_type_name_map = {kind: name for name, kind in credential_type_name_to_config_kind_map.items()}

View File

@@ -51,16 +51,7 @@ class WSClient(object):
# Subscription group types # Subscription group types
def __init__( def __init__(
self, self, token=None, hostname='', port=443, secure=True, session_id=None, csrftoken=None, add_received_time=False, session_cookie_name='awx_sessionid'
token=None,
hostname='',
port=443,
secure=True,
ws_suffix='websocket/',
session_id=None,
csrftoken=None,
add_received_time=False,
session_cookie_name='awx_sessionid',
): ):
# delay this import, because this is an optional dependency # delay this import, because this is an optional dependency
import websocket import websocket
@@ -77,7 +68,6 @@ class WSClient(object):
hostname = result.hostname hostname = result.hostname
self.port = port self.port = port
self.suffix = ws_suffix
self._use_ssl = secure self._use_ssl = secure
self.hostname = hostname self.hostname = hostname
self.token = token self.token = token
@@ -95,7 +85,7 @@ class WSClient(object):
else: else:
auth_cookie = '' auth_cookie = ''
pref = 'wss://' if self._use_ssl else 'ws://' pref = 'wss://' if self._use_ssl else 'ws://'
url = '{0}{1.hostname}:{1.port}/{1.suffix}'.format(pref, self) url = '{0}{1.hostname}:{1.port}/websocket/'.format(pref, self)
self.ws = websocket.WebSocketApp( self.ws = websocket.WebSocketApp(
url, on_open=self._on_open, on_message=self._on_message, on_error=self._on_error, on_close=self._on_close, cookie=auth_cookie url, on_open=self._on_open, on_message=self._on_message, on_error=self._on_error, on_close=self._on_close, cookie=auth_cookie
) )

View File

@@ -90,7 +90,6 @@ setup(
install_requires=[ install_requires=[
'PyYAML', 'PyYAML',
'requests', 'requests',
'setuptools',
], ],
python_requires=">=3.8", python_requires=">=3.8",
extras_require={'formatting': ['jq'], 'websockets': ['websocket-client==0.57.0'], 'crypto': ['cryptography']}, extras_require={'formatting': ['jq'], 'websockets': ['websocket-client==0.57.0'], 'crypto': ['cryptography']},

View File

@@ -175,10 +175,9 @@ class TestOptions(unittest.TestCase):
assert '--verbosity {0,1,2,3,4,5}' in out.getvalue() assert '--verbosity {0,1,2,3,4,5}' in out.getvalue()
def test_actions_with_primary_key(self): def test_actions_with_primary_key(self):
page = OptionsPage.from_json({'actions': {'GET': {}, 'POST': {}}})
ResourceOptionsParser(None, page, 'jobs', self.parser)
for method in ('get', 'modify', 'delete'): for method in ('get', 'modify', 'delete'):
page = OptionsPage.from_json({'actions': {'GET': {}, 'POST': {}}})
ResourceOptionsParser(None, page, 'jobs', self.parser)
assert method in self.parser.choices assert method in self.parser.choices
out = StringIO() out = StringIO()

View File

@@ -17,11 +17,6 @@ def test_explicit_hostname():
assert client.token == "token" assert client.token == "token"
def test_websocket_suffix():
client = WSClient("token", "hostname", 566, ws_suffix='my-websocket/')
assert client.suffix == 'my-websocket/'
@pytest.mark.parametrize( @pytest.mark.parametrize(
'url, result', 'url, result',
[ [

View File

@@ -8,7 +8,7 @@ skip_missing_interpreters = true
# skipsdist = true # skipsdist = true
[testenv] [testenv]
basepython = python3.11 basepython = python3.9
setenv = setenv =
PYTHONPATH = {toxinidir}:{env:PYTHONPATH:}:. PYTHONPATH = {toxinidir}:{env:PYTHONPATH:}:.
deps = deps =

View File

@@ -6,7 +6,7 @@ The *awx-manage* Utility
.. index:: .. index::
single: awx-manage single: awx-manage
The ``awx-manage`` utility is used to access detailed internal information of AWX. Commands for ``awx-manage`` should run as the ``awx`` user only. The ``awx-manage`` utility is used to access detailed internal information of AWX. Commands for ``awx-manage`` should run as the ``awx`` or ``root`` user.
.. warning:: .. warning::
Running awx-manage commands via playbook is not recommended or supported. Running awx-manage commands via playbook is not recommended or supported.

View File

@@ -557,7 +557,7 @@ Terminal Access Controller Access-Control System Plus (TACACS+) is a protocol th
Generic OIDC settings Generic OIDC settings
---------------------- ----------------------
Similar to SAML, OpenID Connect (OIDC) is uses the OAuth 2.0 framework. It allows third-party applications to verify the identity and obtain basic end-user information. The main difference between OIDC and SAML is that SAML has a service provider (SP)-to-IdP trust relationship, whereas OIDC establishes the trust with the channel (HTTPS) that is used to obtain the security token. To obtain the credentials needed to setup OIDC with AWX, refer to the documentation from the identity provider (IdP) of your choice that has OIDC support. Similar to SAML, OpenID Connect (OIDC) is uses the OAuth 2.0 framework. It allows third-party applications to verify the identity and obtain basic end-user information. The main difference between OIDC and SMAL is that SAML has a service provider (SP)-to-IdP trust relationship, whereas OIDC establishes the trust with the channel (HTTPS) that is used to obtain the security token. To obtain the credentials needed to setup OIDC with AWX, refer to the documentation from the identity provider (IdP) of your choice that has OIDC support.
To configure OIDC in AWX: To configure OIDC in AWX:

View File

@@ -13,7 +13,7 @@ Scaling your mesh is only available on Openshift and Kubernetes (K8S) deployment
Instances serve as nodes in your mesh topology. Automation mesh allows you to extend the footprint of your automation. Where you launch a job and where the ``ansible-playbook`` runs can be in different locations. Instances serve as nodes in your mesh topology. Automation mesh allows you to extend the footprint of your automation. Where you launch a job and where the ``ansible-playbook`` runs can be in different locations.
.. image:: ../common/images/instances_mesh_concept.drawio.png .. image:: ../common/images/instances_mesh_concept.png
:alt: Site A pointing to Site B and dotted arrows to two hosts from Site B :alt: Site A pointing to Site B and dotted arrows to two hosts from Site B
Automation mesh is useful for: Automation mesh is useful for:
@@ -23,7 +23,7 @@ Automation mesh is useful for:
The nodes (control, hop, and execution instances) are interconnected via receptor, forming a virtual mesh. The nodes (control, hop, and execution instances) are interconnected via receptor, forming a virtual mesh.
.. image:: ../common/images/instances_mesh_concept_with_nodes.drawio.png .. image:: ../common/images/instances_mesh_concept_with_nodes.png
:alt: Control node pointing to hop node, which is pointing to two execution nodes. :alt: Control node pointing to hop node, which is pointing to two execution nodes.
@@ -51,227 +51,13 @@ Prerequisites
- To manage instances from the AWX user interface, you must have System Administrator or System Auditor permissions. - To manage instances from the AWX user interface, you must have System Administrator or System Auditor permissions.
Common topologies
------------------
Instances make up the network of devices that communicate with one another. They are the building blocks of an automation mesh. These building blocks serve as nodes in a mesh topology. There are several kinds of instances:
+-----------+-----------------------------------------------------------------------------------------------------------------+
| Node Type | Description |
+===========+=================================================================================================================+
| Control | Nodes that run persistent Ansible Automation Platform services, and delegate jobs to hybrid and execution nodes |
+-----------+-----------------------------------------------------------------------------------------------------------------+
| Hybrid | Nodes that run persistent Ansible Automation Platform services and execute jobs |
| | (not applicable to operator-based installations) |
+-----------+-----------------------------------------------------------------------------------------------------------------+
| Hop | Used for relaying across the mesh only |
+-----------+-----------------------------------------------------------------------------------------------------------------+
| Execution | Nodes that run jobs delivered from control nodes (jobs submitted from the users Ansible automation) |
+-----------+-----------------------------------------------------------------------------------------------------------------+
Simple topology
~~~~~~~~~~~~~~~~
One of the ways to expand job capacity is to create a standalone execution node that can be added to run alongside the Kubernetes deployment of AWX. These machines will not be a part of the AWX Kubernetes cluster. The control nodes running in the cluster will connect and submit work to these machines via Receptor. The machines are registered in AWX as type "execution" instances, meaning they will only be used to run AWX jobs, not dispatch work or handle web requests as control nodes do.
Hop nodes can be added to sit between the control plane of AWX and standalone execution nodes. These machines will not be a part of the AWX Kubernetes cluster and they will be registered in AWX as node type "hop", meaning they will only handle inbound and outbound traffic for otherwise unreachable nodes in a different or more strict network.
Below is an example of an AWX task pod with two execution nodes. Traffic to execution node 2 flows through a hop node that is setup between it and the control plane.
.. image:: ../common/images/instances_awx_task_pods_hopnode.drawio.png
:alt: AWX task pod with a hop node between the control plane of AWX and standalone execution nodes.
Below are sample values used to configure each node in a simple topology:
.. list-table::
:widths: 20 30 10 20 15
:header-rows: 1
* - Instance type
- Hostname
- Listener port
- Peers from control nodes
- Peers
* - Control plane
- awx-task-65d6d96987-mgn9j
- n/a
- n/a
- [hop node]
* - Hop node
- awx-hop-node
- 27199
- True
- []
* - Execution node
- awx-example.com
- n/a
- False
- [hop node]
Mesh topology
~~~~~~~~~~~~~~
Mesh ingress is a feature that allows remote nodes to connect inbound to the control plane. This is especially useful when creating remote nodes in restricted networking environments that disallow inbound traffic.
.. image:: ../common/images/instances_mesh_ingress_topology.drawio.png
:alt: Mesh ingress architecture showing the peering relationship between nodes.
Below are sample values used to configure each node in a mesh ingress topology:
.. list-table::
:widths: 20 30 10 20 15
:header-rows: 1
* - Instance type
- Hostname
- Listener port
- Peers from control nodes
- Peers
* - Control plane
- awx-task-65d6d96987-mgn9j
- n/a
- n/a
- [hop node]
* - Hop node
- awx-mesh-ingress-1
- 27199
- True
- []
* - Execution node
- awx-example.com
- n/a
- False
- [hop node]
In order to create a mesh ingress for AWX, see the `Mesh Ingress <https://ansible.readthedocs.io/projects/awx-operator/en/latest/user-guide/advanced-configuration/mesh-ingress.html>`_ chapter of the AWX Operator Documentation for information on setting up this type of topology. The last step is to create a remote execution node and add the execution node to an instance group in order for it to be used in your job execution. Whatever execution environment image used to run a playbook needs to be accessible for your remote execution node. Everything you are using in your playbook also needs to be accessible from this remote execution node.
.. image:: ../common/images/instances-job-template-using-remote-execution-ig.png
:alt: Job template using the instance group with the execution node to run jobs.
:width: 1400px
.. _ag_instances_add:
Add an instance
----------------
To create an instance in AWX:
1. Click **Instances** from the left side navigation menu of the AWX UI.
2. In the Instances list view, click the **Add** button and the Create new Instance window opens.
.. image:: ../common/images/instances_create_new.png
:alt: Create a new instance form.
:width: 1400px
An instance has several attributes that may be configured:
- Enter a fully qualified domain name (ping-able DNS) or IP address for your instance in the **Host Name** field (required). This field is equivalent to ``hostname`` in the API.
- Optionally enter a **Description** for the instance
- The **Instance State** field is auto-populated, indicating that it is being installed, and cannot be modified
- Optionally specify the **Listener Port** for the receptor to listen on for incoming connections. This is an open port on the remote machine used to establish inbound TCP connections. This field is equivalent to ``listener_port`` in the API.
- Select from the options in **Instance Type** field to specify the type you want to create. Only execution and hop nodes can be created as operator-based installations do not support hybrid nodes. This field is equivalent to ``node_type`` in the API.
- In the **Peers** field, select the instance hostnames you want your new instance to connect outbound to.
- In the **Options** fields:
- Check the **Enable Instance** box to make it available for jobs to run on an execution node.
- Check the **Managed by Policy** box to allow policy to dictate how the instance is assigned.
- Check the **Peers from control nodes** box to allow control nodes to peer to this instance automatically. Listener port needs to be set if this is enabled or the instance is a peer.
3. Once the attributes are configured, click **Save** to proceed.
Upon successful creation, the Details of the one of the created instances opens.
.. image:: ../common/images/instances_create_details.png
:alt: Details of the newly created instance.
:width: 1400px
.. note::
The proceeding steps 4-8 are intended to be ran from any computer that has SSH access to the newly created instance.
4. Click the download button next to the **Install Bundle** field to download the tarball that contain files to allow AWX to make proper TCP connections to the remote machine.
.. image:: ../common/images/instances_install_bundle.png
:alt: Instance details showing the Download button in the Install Bundle field of the Details tab.
:width: 1400px
5. Extract the downloaded ``tar.gz`` file from the location you downloaded it. The install bundle contains TLS certificates and keys, a certificate authority, and a proper Receptor configuration file. To facilitate that these files will be in the right location on the remote machine, the install bundle includes an ``install_receptor.yml`` playbook. The playbook requires the Receptor collection which can be obtained via:
::
ansible-galaxy collection install -r requirements.yml
6. Before running the ``ansible-playbook`` command, edit the following fields in the ``inventory.yml`` file:
- ``ansible_user`` with the username running the installation
- ``ansible_ssh_private_key_file`` to contain the filename of the private key used to connect to the instance
::
---
all:
hosts:
remote-execution:
ansible_host: <hostname>
ansible_user: <username> # user provided
ansible_ssh_private_key_file: ~/.ssh/id_rsa
The content of the ``inventory.yml`` file serves as a template and contains variables for roles that are applied during the installation and configuration of a receptor node in a mesh topology. You may modify some of the other fields, or replace the file in its entirety for advanced scenarios. Refer to `Role Variables <https://github.com/ansible/receptor-collection/blob/main/README.md>`_ for more information on each variable.
7. Save the file to continue.
8. Run the following command on the machine you want to update your mesh:
::
ansible-playbook -i inventory.yml install_receptor.yml
Wait a few minutes for the periodic AWX task to do a health check against the new instance. You may run a health check by selecting the node and clicking the **Run health check** button from its Details page at any time. Once the instances endpoint or page reports a "Ready" status for the instance, jobs are now ready to run on this machine!
9. To view other instances within the same topology or associate peers, click the **Peers** tab.
.. image:: ../common/images/instances_peers_tab.png
:alt: "Peers" tab showing two peers.
:width: 1400px
To associate peers with your node, click the **Associate** button to open a dialog box of instances eligible for peering.
.. image:: ../common/images/instances_associate_peer.png
:alt: Instances available to peer with the example hop node.
:width: 1400px
Execution nodes can peer with either hop nodes or other execution nodes. Hop nodes can only peer with execution nodes unless you check the **Peers from control nodes** check box from the **Options** field.
.. note::
If you associate or disassociate a peer, a notification will inform you to re-run the install bundle from the Peer Detail view (the :ref:`ag_topology_viewer` has the download link).
.. image:: ../common/images/instances_associate_peer_reinstallmsg.png
:alt: Notification to re-run the installation bundle due to change in the peering.
You can remove an instance by clicking **Remove** in the Instances page, or by setting the instance ``node_state = deprovisioning`` via the API. Upon deleting, a pop-up message will appear to notify that you may need to re-run the install bundle to make sure things that were removed are no longer connected.
10. To view a graphical representation of your updated topology, refer to the :ref:`ag_topology_viewer` section of this guide.
Manage instances Manage instances
----------------- -----------------
Click **Instances** from the left side navigation menu to access the Instances list. Click **Instances** from the left side navigation menu to access the Instances list.
.. image:: ../common/images/instances_list_view.png .. image:: ../common/images/instances_list_view.png
:alt: List view of instances in AWX :alt: List view of instances in AWX
:width: 1400px
The Instances list displays all the current nodes in your topology, along with relevant details: The Instances list displays all the current nodes in your topology, along with relevant details:
@@ -297,9 +83,7 @@ The Instances list displays all the current nodes in your topology, along with r
From this page, you can add, remove or run health checks on your nodes. Use the check boxes next to an instance to select it to remove or run a health check against. When a button is grayed-out, you do not have permission for that particular action. Contact your Administrator to grant you the required level of access. If you are able to remove an instance, you will receive a prompt for confirmation, like the one below: From this page, you can add, remove or run health checks on your nodes. Use the check boxes next to an instance to select it to remove or run a health check against. When a button is grayed-out, you do not have permission for that particular action. Contact your Administrator to grant you the required level of access. If you are able to remove an instance, you will receive a prompt for confirmation, like the one below:
.. image:: ../common/images/instances_delete_prompt.png .. image:: ../common/images/instances_delete_prompt.png
:alt: Prompt for deleting instances in AWX :alt: Prompt for deleting instances in AWX.
:width: 1400px
.. note:: .. note::
@@ -312,8 +96,7 @@ Click **Remove** to confirm.
If running a health check on an instance, at the top of the Details page, a message displays that the health check is in progress. If running a health check on an instance, at the top of the Details page, a message displays that the health check is in progress.
.. image:: ../common/images/instances_health_check.png .. image:: ../common/images/instances_health_check.png
:alt: Health check for instances in AWX :alt: Health check for instances in AWX
:width: 1400px
Click **Reload** to refresh the instance status. Click **Reload** to refresh the instance status.
@@ -321,20 +104,162 @@ Click **Reload** to refresh the instance status.
Health checks are ran asynchronously, and may take up to a minute for the instance status to update, even with a refresh. The status may or may not change after the health check. At the bottom of the Details page, a timer/clock icon displays next to the last known health check date and time stamp if the health check task is currently running. Health checks are ran asynchronously, and may take up to a minute for the instance status to update, even with a refresh. The status may or may not change after the health check. At the bottom of the Details page, a timer/clock icon displays next to the last known health check date and time stamp if the health check task is currently running.
.. image:: ../common/images/instances_health_check_pending.png .. image:: ../common/images/instances_health_check_pending.png
:alt: Health check for instance still in pending state. :alt: Health check for instance still in pending state.
The example health check shows the status updates with an error on node 'one': The example health check shows the status updates with an error on node 'one':
.. image:: ../common/images/topology-viewer-instance-with-errors.png .. image:: ../common/images/topology-viewer-instance-with-errors.png
:alt: Health check showing an error in one of the instances. :alt: Health check showing an error in one of the instances.
:width: 1400px
Add an instance
----------------
One of the ways to expand capacity is to create an instance. Standalone execution nodes can be added to run alongside the Kubernetes deployment of AWX. These machines will not be a part of the AWX Kubernetes cluster. The control nodes running in the cluster will connect and submit work to these machines via Receptor. The machines are registered in AWX as type "execution" instances, meaning they will only be used to run AWX jobs, not dispatch work or handle web requests as control nodes do.
Hop nodes can be added to sit between the control plane of AWX and standalone execution nodes. These machines will not be a part of the AWX Kubernetes cluster and they will be registered in AWX as node type "hop", meaning they will only handle inbound and outbound traffic for otherwise unreachable nodes in a different or more strict network.
Below is an example of an AWX task pod with two execution nodes. Traffic to execution node 2 flows through a hop node that is setup between it and the control plane.
.. image:: ../common/images/instances_awx_task_pods_hopnode.png
:alt: AWX task pod with a hop node between the control plane of AWX and standalone execution nodes.
To create an instance in AWV:
1. Click **Instances** from the left side navigation menu of the AWX UI.
2. In the Instances list view, click the **Add** button and the Create new Instance window opens.
.. image:: ../common/images/instances_create_new.png
:alt: Create a new instance form.
An instance has several attributes that may be configured:
- Enter a fully qualified domain name (ping-able DNS) or IP address for your instance in the **Host Name** field (required). This field is equivalent to ``hostname`` in the API.
- Optionally enter a **Description** for the instance
- The **Instance State** field is auto-populated, indicating that it is being installed, and cannot be modified
- Optionally specify the **Listener Port** for the receptor to listen on for incoming connections. This is an open port on the remote machine used to establish inbound TCP connections. This field is equivalent to ``listener_port`` in the API.
- Select from the options in **Instance Type** field to specify the type you want to create. Only execution and hop nodes can be created as operator-based installations do not support hybrid nodes. This field is equivalent to ``node_type`` in the API.
- In the **Peers** field, select the instance hostnames you want your new instance to connect outbound to.
- In the **Options** fields:
- Check the **Enable Instance** box to make it available for jobs to run on an execution node.
- Check the **Managed by Policy** box to allow policy to dictate how the instance is assigned.
- Check the **Peers from control nodes** box to allow control nodes to peer to this instance automatically. Listener port needs to be set if this is enabled or the instance is a peer.
In the example diagram above, the configurations are as follows:
+------------------+---------------+--------------------------+--------------+
| instance name | listener_port | peers_from_control_nodes | peers |
+==================+===============+==========================+==============+
| execution node 1 | 27199 | true | [] |
+------------------+---------------+--------------------------+--------------+
| hop node | 27199 | true | [] |
+------------------+---------------+--------------------------+--------------+
| execution node 2 | null | false | ["hop node"] |
+------------------+---------------+--------------------------+--------------+
3. Once the attributes are configured, click **Save** to proceed.
Upon successful creation, the Details of the one of the created instances opens.
.. image:: ../common/images/instances_create_details.png
:alt: Details of the newly created instance.
.. note::
The proceeding steps 4-8 are intended to be ran from any computer that has SSH access to the newly created instance.
4. Click the download button next to the **Install Bundle** field to download the tarball that contain files to allow AWX to make proper TCP connections to the remote machine.
.. image:: ../common/images/instances_install_bundle.png
:alt: Instance details showing the Download button in the Install Bundle field of the Details tab.
5. Extract the downloaded ``tar.gz`` file from the location you downloaded it. The install bundle contains TLS certificates and keys, a certificate authority, and a proper Receptor configuration file. To facilitate that these files will be in the right location on the remote machine, the install bundle includes an ``install_receptor.yml`` playbook. The playbook requires the Receptor collection which can be obtained via:
::
ansible-galaxy collection install -r requirements.yml
6. Before running the ``ansible-playbook`` command, edit the following fields in the ``inventory.yml`` file:
- ``ansible_user`` with the username running the installation
- ``ansible_ssh_private_key_file`` to contain the filename of the private key used to connect to the instance
::
---
all:
hosts:
remote-execution:
ansible_host: 18.206.206.34
ansible_user: <username> # user provided
ansible_ssh_private_key_file: ~/.ssh/id_rsa
The content of the ``inventory.yml`` file serves as a template and contains variables for roles that are applied during the installation and configuration of a receptor node in a mesh topology. You may modify some of the other fields, or replace the file in its entirety for advanced scenarios. Refer to `Role Variables <https://github.com/ansible/receptor-collection/blob/main/README.md>`_ for more information on each variable.
7. Save the file to continue.
8. Run the following command on the machine you want to update your mesh:
::
ansible-playbook -i inventory.yml install_receptor.yml
Wait a few minutes for the periodic AWX task to do a health check against the new instance. You may run a health check by selecting the node and clicking the **Run health check** button from its Details page at any time. Once the instances endpoint or page reports a "Ready" status for the instance, jobs are now ready to run on this machine!
9. To view other instances within the same topology or associate peers, click the **Peers** tab.
.. image:: ../common/images/instances_peers_tab.png
:alt: "Peers" tab showing two peers.
To associate peers with your node, click the **Associate** button to open a dialog box of instances eligible for peering.
.. image:: ../common/images/instances_associate_peer.png
:alt: Instances available to peer with the example hop node.
Execution nodes can peer with either hop nodes or other execution nodes. Hop nodes can only peer with execution nodes unless you check the **Peers from control nodes** check box from the **Options** field.
.. note::
If you associate or disassociate a peer, a notification will inform you to re-run the install bundle from the Peer Detail view (the :ref:`ag_topology_viewer` has the download link).
.. image:: ../common/images/instances_associate_peer_reinstallmsg.png
:alt: Notification to re-run the installation bundle due to change in the peering.
You can remove an instance by clicking **Remove** in the Instances page, or by setting the instance ``node_state = deprovisioning`` via the API. Upon deleting, a pop-up message will appear to notify that you may need to re-run the install bundle to make sure things that were removed are no longer connected.
10. To view a graphical representation of your updated topology, refer to the :ref:`ag_topology_viewer` section of this guide.
Using a custom Receptor CA Using a custom Receptor CA
--------------------------- ---------------------------
Refer to the AWX Operator Documentation, `Custom Receptor CA <https://ansible.readthedocs.io/projects/awx-operator/en/latest/user-guide/advanced-configuration/custom-receptor-certs.html>`_ for detail. The control nodes on the K8S cluster will communicate with execution nodes via mutual TLS TCP connections, running via Receptor. Execution nodes will verify incoming connections by ensuring the x509 certificate was issued by a trusted Certificate Authority (CA).
You may choose to provide your own CA for this validation. If no CA is provided, AWX operator will automatically generate one using OpenSSL.
Given custom ``ca.crt`` and ``ca.key`` stored locally, run the following:
::
kubectl create secret tls awx-demo-receptor-ca \
--cert=/path/to/ca.crt --key=/path/to/ca.key
The secret should be named ``{AWX Custom Resource name}-receptor-ca``. In the above, the AWX Custom Resource name is "awx-demo". Replace "awx-demo" with your AWX Custom Resource name.
If this secret is created after AWX is deployed, run the following to restart the deployment:
::
kubectl rollout restart deployment awx-demo
.. note::
Changing the receptor CA will sever connections to any existing execution nodes. These nodes will enter an *Unavailable* state, and jobs will not be able to run on them. You will need to download and re-run the install bundle for each execution node. This will replace the TLS certificate files with those signed by the new CA. The execution nodes will then appear in a *Ready* state after a few minutes.
Using a private image for the default EE Using a private image for the default EE

View File

@@ -7,7 +7,6 @@ Setting up LDAP Authentication
single: LDAP single: LDAP
pair: authentication; LDAP pair: authentication; LDAP
This chapter describes how to integrate LDAP authentication with AWX.
.. note:: .. note::

Some files were not shown because too many files have changed in this diff Show More