Compare commits

..

15 Commits

Author SHA1 Message Date
John Westcott IV
af59abbbc4 Fixing NUL characters in event data 2023-05-02 14:37:35 -04:00
John Westcott IV
8ab3514428 Fixing ValueError becoming DataError 2023-05-02 11:47:12 -04:00
John Westcott IV
98781a82c7 Merge branch 'feature-django-upgrade' of github.com:ansible/awx into feature-django-upgrade 2023-05-02 11:45:51 -04:00
John Westcott IV
d3fabe81d1 Fixing using QuerySet.iterator() after prefetch_related() without specifying chunk_size is deprecated 2023-04-28 15:32:20 -04:00
John Westcott IV
b274d0e5ef Removing deprecated django.utils.timezone.utc alias in favor of datetime.timezone.utc 2023-04-28 15:32:20 -04:00
John Westcott IV
4494412f0c Replacing depricated index_togeather with new indexes 2023-04-28 15:31:28 -04:00
John Westcott IV
b82bec7d04 Replacing psycopg2.copy_expert with psycopg3.copy 2023-04-28 12:35:49 -04:00
John Westcott IV
2cee1caad2 Fixing final CI error 2023-04-28 12:35:49 -04:00
John Westcott IV
c3045b1169 Updating old migrations for psycopg3
We have both psycopg2 and 3 installed in the AWX venv.

Old versions of Django only used psycopg2 but 4.2 now supports 3

Django 4.2 detects psycopg3 first and will use that over psycopg2

So old migrations needed to be updated to support psycopg3
2023-04-28 12:35:49 -04:00
John Westcott IV
27024378bc Upgrading djgno to 4.2 LTS 2023-04-28 12:35:49 -04:00
John Westcott IV
8eff90d4c0 Adding upgrade to django-oauth-toolkit pre-migraiton 2023-04-28 12:35:49 -04:00
John Westcott IV
9b633b6492 Fixing final CI error 2023-04-27 08:00:56 -04:00
John Westcott IV
11dbc56ecb Updating old migrations for psycopg3
We have both psycopg2 and 3 installed in the AWX venv.

Old versions of Django only used psycopg2 but 4.2 now supports 3

Django 4.2 detects psycopg3 first and will use that over psycopg2

So old migrations needed to be updated to support psycopg3
2023-04-26 09:10:25 -04:00
John Westcott IV
4c1bd1e88e Upgrading djgno to 4.2 LTS 2023-04-26 09:10:25 -04:00
John Westcott IV
865cb7518e Adding upgrade to django-oauth-toolkit pre-migraiton 2023-04-26 09:10:25 -04:00
1495 changed files with 14164 additions and 48467 deletions

View File

@@ -19,8 +19,6 @@ body:
required: true required: true
- label: I understand that AWX is open source software provided for free and that I might not receive a timely response. - label: I understand that AWX is open source software provided for free and that I might not receive a timely response.
required: true required: true
- label: I am **NOT** reporting a (potential) security vulnerability. (These should be emailed to `security@ansible.com` instead.)
required: true
- type: textarea - type: textarea
id: summary id: summary
@@ -44,7 +42,6 @@ body:
label: Select the relevant components label: Select the relevant components
options: options:
- label: UI - label: UI
- label: UI (tech preview)
- label: API - label: API
- label: Docs - label: Docs
- label: Collection - label: Collection

View File

@@ -1,34 +0,0 @@
name: Setup images for AWX
description: Builds new awx_devel image
inputs:
github-token:
description: GitHub Token for registry access
required: true
runs:
using: composite
steps:
- name: Get python version from Makefile
shell: bash
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
- name: Set lower case owner name
shell: bash
run: echo "OWNER_LC=${OWNER,,}" >> $GITHUB_ENV
env:
OWNER: '${{ github.repository_owner }}'
- name: Log in to registry
shell: bash
run: |
echo "${{ inputs.github-token }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
- name: Pre-pull latest devel image to warm cache
shell: bash
run: docker pull ghcr.io/${OWNER_LC}/awx_devel:${{ github.base_ref }}
- name: Build image for current source checkout
shell: bash
run: |
DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} \
COMPOSE_TAG=${{ github.base_ref }} \
make docker-compose-build

View File

@@ -1,77 +0,0 @@
name: Run AWX docker-compose
description: Runs AWX with `make docker-compose`
inputs:
github-token:
description: GitHub Token to pass to awx_devel_image
required: true
build-ui:
description: Should the UI be built?
required: false
default: false
type: boolean
outputs:
ip:
description: The IP of the tools_awx_1 container
value: ${{ steps.data.outputs.ip }}
admin-token:
description: OAuth token for admin user
value: ${{ steps.data.outputs.admin_token }}
runs:
using: composite
steps:
- name: Build awx_devel image for running checks
uses: ./.github/actions/awx_devel_image
with:
github-token: ${{ inputs.github-token }}
- name: Upgrade ansible-core
shell: bash
run: python3 -m pip install --upgrade ansible-core
- name: Install system deps
shell: bash
run: sudo apt-get install -y gettext
- name: Start AWX
shell: bash
run: |
DEV_DOCKER_OWNER=${{ github.repository_owner }} \
COMPOSE_TAG=${{ github.base_ref }} \
COMPOSE_UP_OPTS="-d" \
make docker-compose
- name: Update default AWX password
shell: bash
run: |
SECONDS=0
while [[ "$(curl -s -o /dev/null -w ''%{http_code}'' -k https://localhost:8043/api/v2/ping/)" != "200" ]]; do
if [[ $SECONDS -gt 600 ]]; then
echo "Timing out, AWX never came up"
exit 1
fi
echo "Waiting for AWX..."
sleep 5
done
echo "AWX is up, updating the password..."
docker exec -i tools_awx_1 sh <<-EOSH
awx-manage update_password --username=admin --password=password
EOSH
- name: Build UI
# This must be a string comparison in composite actions:
# https://github.com/actions/runner/issues/2238
if: ${{ inputs.build-ui == 'true' }}
shell: bash
run: |
docker exec -i tools_awx_1 sh <<-EOSH
make ui-devel
EOSH
- name: Get instance data
id: data
shell: bash
run: |
AWX_IP=$(docker inspect -f '{{.NetworkSettings.Networks.awx.IPAddress}}' tools_awx_1)
ADMIN_TOKEN=$(docker exec -i tools_awx_1 awx-manage create_oauth2_token --user admin)
echo "ip=$AWX_IP" >> $GITHUB_OUTPUT
echo "admin_token=$ADMIN_TOKEN" >> $GITHUB_OUTPUT

View File

@@ -1,19 +0,0 @@
name: Upload logs
description: Upload logs from `make docker-compose` devel environment to GitHub as an artifact
inputs:
log-filename:
description: "*Unique* name of the log file"
required: true
runs:
using: composite
steps:
- name: Get AWX logs
shell: bash
run: |
docker logs tools_awx_1 > ${{ inputs.log-filename }}
- name: Upload AWX logs as artifact
uses: actions/upload-artifact@v3
with:
name: docker-compose-logs
path: ${{ inputs.log-filename }}

View File

@@ -1,10 +1,19 @@
version: 2 version: 2
updates: updates:
- package-ecosystem: "pip" - package-ecosystem: "npm"
directory: "docs/docsite/" directory: "/awx/ui"
schedule: schedule:
interval: "weekly" interval: "monthly"
open-pull-requests-limit: 2 open-pull-requests-limit: 5
allow:
- dependency-type: "production"
reviewers:
- "AlexSCorey"
- "keithjgrant"
- "kialam"
- "mabashian"
- "marshmalien"
labels: labels:
- "docs" - "component:ui"
- "dependencies" - "dependencies"
target-branch: "devel"

View File

@@ -6,8 +6,6 @@ needs_triage:
- "Feature Summary" - "Feature Summary"
"component:ui": "component:ui":
- "\\[X\\] UI" - "\\[X\\] UI"
"component:ui_next":
- "\\[X\\] UI \\(tech preview\\)"
"component:api": "component:api":
- "\\[X\\] API" - "\\[X\\] API"
"component:docs": "component:docs":

View File

@@ -15,4 +15,5 @@
"dependencies": "dependencies":
- any: ["awx/ui/package.json"] - any: ["awx/ui/package.json"]
- any: ["requirements/*"] - any: ["awx/requirements/*.txt"]
- any: ["awx/requirements/requirements.in"]

View File

@@ -7,8 +7,8 @@
## PRs/Issues ## PRs/Issues
### Visit the Forum or Matrix ### Visit our mailing list
- Hello, this appears to be less of a bug report or feature request and more of a question. Could you please ask this on either the [Ansible AWX channel on Matrix](https://matrix.to/#/#awx:ansible.com) or the [Ansible Community Forum](https://forum.ansible.com/tag/awx)? - Hello, this appears to be less of a bug report or feature request and more of a question. Could you please ask this on our mailing list? See https://github.com/ansible/awx/#get-involved for information for ways to connect with us.
### Denied Submission ### Denied Submission

View File

@@ -3,7 +3,7 @@ name: CI
env: env:
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
CI_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} CI_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
DEV_DOCKER_OWNER: ${{ github.repository_owner }} DEV_DOCKER_TAG_BASE: ghcr.io/${{ github.repository_owner }}
COMPOSE_TAG: ${{ github.base_ref || 'devel' }} COMPOSE_TAG: ${{ github.base_ref || 'devel' }}
on: on:
pull_request: pull_request:
@@ -11,7 +11,6 @@ jobs:
common-tests: common-tests:
name: ${{ matrix.tests.name }} name: ${{ matrix.tests.name }}
runs-on: ubuntu-latest runs-on: ubuntu-latest
timeout-minutes: 60
permissions: permissions:
packages: write packages: write
contents: read contents: read
@@ -21,8 +20,6 @@ jobs:
tests: tests:
- name: api-test - name: api-test
command: /start_tests.sh command: /start_tests.sh
- name: api-migrations
command: /start_tests.sh test_migrations
- name: api-lint - name: api-lint
command: /var/lib/awx/venv/awx/bin/tox -e linters command: /var/lib/awx/venv/awx/bin/tox -e linters
- name: api-swagger - name: api-swagger
@@ -38,44 +35,29 @@ jobs:
- name: ui-test-general - name: ui-test-general
command: make ui-test-general command: make ui-test-general
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v2
- name: Build awx_devel image for running checks
uses: ./.github/actions/awx_devel_image
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
- name: Run check ${{ matrix.tests.name }} - name: Run check ${{ matrix.tests.name }}
run: AWX_DOCKER_CMD='${{ matrix.tests.command }}' make docker-runner run: AWX_DOCKER_CMD='${{ matrix.tests.command }}' make github_ci_runner
dev-env: dev-env:
runs-on: ubuntu-latest runs-on: ubuntu-latest
timeout-minutes: 60
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v2
- uses: ./.github/actions/run_awx_devel
id: awx
with:
build-ui: false
github-token: ${{ secrets.GITHUB_TOKEN }}
- name: Run smoke test - name: Run smoke test
run: ansible-playbook tools/docker-compose/ansible/smoke-test.yml -v run: make github_ci_setup && ansible-playbook tools/docker-compose/ansible/smoke-test.yml -v
awx-operator: awx-operator:
runs-on: ubuntu-latest runs-on: ubuntu-latest
timeout-minutes: 60
env:
DEBUG_OUTPUT_DIR: /tmp/awx_operator_molecule_test
steps: steps:
- name: Checkout awx - name: Checkout awx
uses: actions/checkout@v3 uses: actions/checkout@v2
with: with:
path: awx path: awx
- name: Checkout awx-operator - name: Checkout awx-operator
uses: actions/checkout@v3 uses: actions/checkout@v2
with: with:
repository: ansible/awx-operator repository: ansible/awx-operator
path: awx-operator path: awx-operator
@@ -85,7 +67,7 @@ jobs:
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
- name: Install python ${{ env.py_version }} - name: Install python ${{ env.py_version }}
uses: actions/setup-python@v4 uses: actions/setup-python@v2
with: with:
python-version: ${{ env.py_version }} python-version: ${{ env.py_version }}
@@ -96,11 +78,11 @@ jobs:
- name: Build AWX image - name: Build AWX image
working-directory: awx working-directory: awx
run: | run: |
VERSION=`make version-for-buildyml` make awx-kube-build ansible-playbook -v tools/ansible/build.yml \
env: -e headless=yes \
COMPOSE_TAG: ci -e awx_image=awx \
DEV_DOCKER_TAG_BASE: local -e awx_image_tag=ci \
HEADLESS: yes -e ansible_python_interpreter=$(which python3)
- name: Run test deployment with awx-operator - name: Run test deployment with awx-operator
working-directory: awx-operator working-directory: awx-operator
@@ -109,28 +91,18 @@ jobs:
ansible-galaxy collection install -r molecule/requirements.yml ansible-galaxy collection install -r molecule/requirements.yml
sudo rm -f $(which kustomize) sudo rm -f $(which kustomize)
make kustomize make kustomize
KUSTOMIZE_PATH=$(readlink -f bin/kustomize) molecule -v test -s kind -- --skip-tags=replicas KUSTOMIZE_PATH=$(readlink -f bin/kustomize) molecule -v test -s kind
env: env:
AWX_TEST_IMAGE: local/awx AWX_TEST_IMAGE: awx
AWX_TEST_VERSION: ci AWX_TEST_VERSION: ci
AWX_EE_TEST_IMAGE: quay.io/ansible/awx-ee:latest
STORE_DEBUG_OUTPUT: true
- name: Upload debug output
if: failure()
uses: actions/upload-artifact@v3
with:
name: awx-operator-debug-output
path: ${{ env.DEBUG_OUTPUT_DIR }}
collection-sanity: collection-sanity:
name: awx_collection sanity name: awx_collection sanity
runs-on: ubuntu-latest runs-on: ubuntu-latest
timeout-minutes: 30
strategy: strategy:
fail-fast: false fail-fast: false
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v2
# The containers that GitHub Actions use have Ansible installed, so upgrade to make sure we have the latest version. # The containers that GitHub Actions use have Ansible installed, so upgrade to make sure we have the latest version.
- name: Upgrade ansible-core - name: Upgrade ansible-core
@@ -138,139 +110,7 @@ jobs:
- name: Run sanity tests - name: Run sanity tests
run: make test_collection_sanity run: make test_collection_sanity
collection-integration:
name: awx_collection integration
runs-on: ubuntu-latest
timeout-minutes: 60
strategy:
fail-fast: false
matrix:
target-regex:
- name: a-h
regex: ^[a-h]
- name: i-p
regex: ^[i-p]
- name: r-z0-9
regex: ^[r-z0-9]
steps:
- uses: actions/checkout@v3
- uses: ./.github/actions/run_awx_devel
id: awx
with:
build-ui: false
github-token: ${{ secrets.GITHUB_TOKEN }}
- name: Install dependencies for running tests
run: |
python3 -m pip install -e ./awxkit/
python3 -m pip install -r awx_collection/requirements.txt
- name: Run integration tests
run: |
echo "::remove-matcher owner=python::" # Disable annoying annotations from setup-python
echo '[general]' > ~/.tower_cli.cfg
echo 'host = https://${{ steps.awx.outputs.ip }}:8043' >> ~/.tower_cli.cfg
echo 'oauth_token = ${{ steps.awx.outputs.admin-token }}' >> ~/.tower_cli.cfg
echo 'verify_ssl = false' >> ~/.tower_cli.cfg
TARGETS="$(ls awx_collection/tests/integration/targets | grep '${{ matrix.target-regex.regex }}' | tr '\n' ' ')"
make COLLECTION_VERSION=100.100.100-git COLLECTION_TEST_TARGET="--coverage --requirements $TARGETS" test_collection_integration
env: env:
# needed due to cgroupsv2. This is fixed, but a stable release
# with the fix has not been made yet.
ANSIBLE_TEST_PREFER_PODMAN: 1 ANSIBLE_TEST_PREFER_PODMAN: 1
# Upload coverage report as artifact
- uses: actions/upload-artifact@v3
if: always()
with:
name: coverage-${{ matrix.target-regex.name }}
path: ~/.ansible/collections/ansible_collections/awx/awx/tests/output/coverage/
- uses: ./.github/actions/upload_awx_devel_logs
if: always()
with:
log-filename: collection-integration-${{ matrix.target-regex.name }}.log
collection-integration-coverage-combine:
name: combine awx_collection integration coverage
runs-on: ubuntu-latest
timeout-minutes: 10
needs:
- collection-integration
strategy:
fail-fast: false
steps:
- uses: actions/checkout@v3
- name: Upgrade ansible-core
run: python3 -m pip install --upgrade ansible-core
- name: Download coverage artifacts
uses: actions/download-artifact@v3
with:
path: coverage
- name: Combine coverage
run: |
make COLLECTION_VERSION=100.100.100-git install_collection
mkdir -p ~/.ansible/collections/ansible_collections/awx/awx/tests/output/coverage
cd coverage
for i in coverage-*; do
cp -rv $i/* ~/.ansible/collections/ansible_collections/awx/awx/tests/output/coverage/
done
cd ~/.ansible/collections/ansible_collections/awx/awx
ansible-test coverage combine --requirements
ansible-test coverage html
echo '## AWX Collection Integration Coverage' >> $GITHUB_STEP_SUMMARY
echo '```' >> $GITHUB_STEP_SUMMARY
ansible-test coverage report >> $GITHUB_STEP_SUMMARY
echo '```' >> $GITHUB_STEP_SUMMARY
echo >> $GITHUB_STEP_SUMMARY
echo '## AWX Collection Integration Coverage HTML' >> $GITHUB_STEP_SUMMARY
echo 'Download the HTML artifacts to view the coverage report.' >> $GITHUB_STEP_SUMMARY
# This is a huge hack, there's no official action for removing artifacts currently.
# Also ACTIONS_RUNTIME_URL and ACTIONS_RUNTIME_TOKEN aren't available in normal run
# steps, so we have to use github-script to get them.
#
# The advantage of doing this, though, is that we save on artifact storage space.
- name: Get secret artifact runtime URL
uses: actions/github-script@v6
id: get-runtime-url
with:
result-encoding: string
script: |
const { ACTIONS_RUNTIME_URL } = process.env;
return ACTIONS_RUNTIME_URL;
- name: Get secret artifact runtime token
uses: actions/github-script@v6
id: get-runtime-token
with:
result-encoding: string
script: |
const { ACTIONS_RUNTIME_TOKEN } = process.env;
return ACTIONS_RUNTIME_TOKEN;
- name: Remove intermediary artifacts
env:
ACTIONS_RUNTIME_URL: ${{ steps.get-runtime-url.outputs.result }}
ACTIONS_RUNTIME_TOKEN: ${{ steps.get-runtime-token.outputs.result }}
run: |
echo "::add-mask::${ACTIONS_RUNTIME_TOKEN}"
artifacts=$(
curl -H "Authorization: Bearer $ACTIONS_RUNTIME_TOKEN" \
${ACTIONS_RUNTIME_URL}_apis/pipelines/workflows/${{ github.run_id }}/artifacts?api-version=6.0-preview \
| jq -r '.value | .[] | select(.name | startswith("coverage-")) | .url'
)
for artifact in $artifacts; do
curl -i -X DELETE -H "Accept: application/json;api-version=6.0-preview" -H "Authorization: Bearer $ACTIONS_RUNTIME_TOKEN" "$artifact"
done
- name: Upload coverage report as artifact
uses: actions/upload-artifact@v3
with:
name: awx-collection-integration-coverage-html
path: ~/.ansible/collections/ansible_collections/awx/awx/tests/output/reports/coverage

View File

@@ -3,55 +3,32 @@ name: Build/Push Development Images
env: env:
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
on: on:
workflow_dispatch:
push: push:
branches: branches:
- devel - devel
- release_* - release_*
- feature_* - feature_*
jobs: jobs:
push-development-images: push:
if: endsWith(github.repository, '/awx') || startsWith(github.ref, 'refs/heads/release_')
runs-on: ubuntu-latest runs-on: ubuntu-latest
timeout-minutes: 120
permissions: permissions:
packages: write packages: write
contents: read contents: read
strategy:
fail-fast: false
matrix:
build-targets:
- image-name: awx_devel
make-target: docker-compose-buildx
- image-name: awx_kube_devel
make-target: awx-kube-dev-buildx
- image-name: awx
make-target: awx-kube-buildx
steps: steps:
- uses: actions/checkout@v2
- name: Skipping build of awx image for non-awx repository - name: Get python version from Makefile
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
- name: Set lower case owner name
run: | run: |
echo "Skipping build of awx image for non-awx repository" echo "OWNER_LC=${OWNER,,}" >>${GITHUB_ENV}
exit 0
if: matrix.build-targets.image-name == 'awx' && !endsWith(github.repository, '/awx')
- uses: actions/checkout@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Set GITHUB_ENV variables
run: |
echo "DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER,,}" >> $GITHUB_ENV
echo "COMPOSE_TAG=${GITHUB_REF##*/}" >> $GITHUB_ENV
echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
env: env:
OWNER: '${{ github.repository_owner }}' OWNER: '${{ github.repository_owner }}'
- name: Install python ${{ env.py_version }} - name: Install python ${{ env.py_version }}
uses: actions/setup-python@v4 uses: actions/setup-python@v2
with: with:
python-version: ${{ env.py_version }} python-version: ${{ env.py_version }}
@@ -59,19 +36,20 @@ jobs:
run: | run: |
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
- name: Setup node and npm - name: Pre-pull image to warm build cache
uses: actions/setup-node@v2
with:
node-version: '16.13.1'
if: matrix.build-targets.image-name == 'awx'
- name: Prebuild UI for awx image (to speed up build process)
run: | run: |
sudo apt-get install gettext docker pull ghcr.io/${OWNER_LC}/awx_devel:${GITHUB_REF##*/} || :
make ui-release docker pull ghcr.io/${OWNER_LC}/awx_kube_devel:${GITHUB_REF##*/} || :
make ui-next docker pull ghcr.io/${OWNER_LC}/awx:${GITHUB_REF##*/} || :
if: matrix.build-targets.image-name == 'awx'
- name: Build and push AWX devel images - name: Build images
run: | run: |
make ${{ matrix.build-targets.make-target }} DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} COMPOSE_TAG=${GITHUB_REF##*/} make docker-compose-build
DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} COMPOSE_TAG=${GITHUB_REF##*/} make awx-kube-dev-build
DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} COMPOSE_TAG=${GITHUB_REF##*/} make awx-kube-build
- name: Push image
run: |
docker push ghcr.io/${OWNER_LC}/awx_devel:${GITHUB_REF##*/}
docker push ghcr.io/${OWNER_LC}/awx_kube_devel:${GITHUB_REF##*/}
docker push ghcr.io/${OWNER_LC}/awx:${GITHUB_REF##*/}

View File

@@ -1,17 +0,0 @@
---
name: Docsite CI
on:
pull_request:
jobs:
docsite-build:
name: docsite test build
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- uses: actions/checkout@v3
- name: install tox
run: pip install tox
- name: Assure docs can be built
run: tox -e docs

View File

@@ -19,20 +19,41 @@ jobs:
job: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24] job: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v2
- uses: ./.github/actions/run_awx_devel - name: Get python version from Makefile
id: awx run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
- name: Install python ${{ env.py_version }}
uses: actions/setup-python@v2
with: with:
build-ui: true python-version: ${{ env.py_version }}
github-token: ${{ secrets.GITHUB_TOKEN }}
- name: Install system deps
run: sudo apt-get install -y gettext
- name: Log in to registry
run: |
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
- name: Pre-pull image to warm build cache
run: |
docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${{ github.base_ref }}
- name: Build UI
run: |
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} COMPOSE_TAG=${{ github.base_ref }} make ui-devel
- name: Start AWX
run: |
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} COMPOSE_TAG=${{ github.base_ref }} make docker-compose &> make-docker-compose-output.log &
- name: Pull awx_cypress_base image - name: Pull awx_cypress_base image
run: | run: |
docker pull quay.io/awx/awx_cypress_base:latest docker pull quay.io/awx/awx_cypress_base:latest
- name: Checkout test project - name: Checkout test project
uses: actions/checkout@v3 uses: actions/checkout@v2
with: with:
repository: ${{ github.repository_owner }}/tower-qa repository: ${{ github.repository_owner }}/tower-qa
ssh-key: ${{ secrets.QA_REPO_KEY }} ssh-key: ${{ secrets.QA_REPO_KEY }}
@@ -44,6 +65,18 @@ jobs:
cd ${{ secrets.E2E_PROJECT }}/ui-tests/awx-pf-tests cd ${{ secrets.E2E_PROJECT }}/ui-tests/awx-pf-tests
docker build -t awx-pf-tests . docker build -t awx-pf-tests .
- name: Update default AWX password
run: |
while [[ "$(curl -s -o /dev/null -w ''%{http_code}'' -k https://localhost:8043/api/v2/ping/)" != "200" ]]
do
echo "Waiting for AWX..."
sleep 5;
done
echo "AWX is up, updating the password..."
docker exec -i tools_awx_1 sh <<-EOSH
awx-manage update_password --username=admin --password=password
EOSH
- name: Run E2E tests - name: Run E2E tests
env: env:
CYPRESS_RECORD_KEY: ${{ secrets.CYPRESS_RECORD_KEY }} CYPRESS_RECORD_KEY: ${{ secrets.CYPRESS_RECORD_KEY }}
@@ -53,7 +86,7 @@ jobs:
export COMMIT_INFO_SHA=$GITHUB_SHA export COMMIT_INFO_SHA=$GITHUB_SHA
export COMMIT_INFO_REMOTE=$GITHUB_REPOSITORY_OWNER export COMMIT_INFO_REMOTE=$GITHUB_REPOSITORY_OWNER
cd ${{ secrets.E2E_PROJECT }}/ui-tests/awx-pf-tests cd ${{ secrets.E2E_PROJECT }}/ui-tests/awx-pf-tests
AWX_IP=${{ steps.awx.outputs.ip }} AWX_IP=$(docker inspect -f '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' tools_awx_1)
printenv > .env printenv > .env
echo "Executing tests:" echo "Executing tests:"
docker run \ docker run \
@@ -69,7 +102,8 @@ jobs:
-w /e2e \ -w /e2e \
awx-pf-tests run --project . awx-pf-tests run --project .
- uses: ./.github/actions/upload_awx_devel_logs - name: Save AWX logs
if: always() uses: actions/upload-artifact@v2
with: with:
log-filename: e2e-${{ matrix.job }}.log name: AWX-logs-${{ matrix.job }}
path: make-docker-compose-output.log

View File

@@ -2,12 +2,13 @@
name: Feature branch deletion cleanup name: Feature branch deletion cleanup
env: env:
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
on: delete on:
delete:
branches:
- feature_**
jobs: jobs:
branch_delete: push:
if: ${{ github.event.ref_type == 'branch' && startsWith(github.event.ref, 'feature_') }}
runs-on: ubuntu-latest runs-on: ubuntu-latest
timeout-minutes: 20
permissions: permissions:
packages: write packages: write
contents: read contents: read
@@ -20,4 +21,6 @@ jobs:
run: | run: |
ansible localhost -c local, -m command -a "{{ ansible_python_interpreter + ' -m pip install boto3'}}" ansible localhost -c local, -m command -a "{{ ansible_python_interpreter + ' -m pip install boto3'}}"
ansible localhost -c local -m aws_s3 \ ansible localhost -c local -m aws_s3 \
-a "bucket=awx-public-ci-files object=${GITHUB_REF##*/}/schema.json mode=delobj permission=public-read" -a "bucket=awx-public-ci-files object=${GITHUB_REF##*/}/schema.json mode=delete permission=public-read"

View File

@@ -6,19 +6,18 @@ on:
- opened - opened
- reopened - reopened
permissions: permissions:
contents: write # to fetch code contents: read # to fetch code
issues: write # to label issues issues: write # to label issues
jobs: jobs:
triage: triage:
runs-on: ubuntu-latest runs-on: ubuntu-latest
timeout-minutes: 20
name: Label Issue name: Label Issue
steps: steps:
- name: Label Issue - name: Label Issue
uses: github/issue-labeler@v3.1 uses: github/issue-labeler@v2.4.1
with: with:
repo-token: "${{ secrets.GITHUB_TOKEN }}" repo-token: "${{ secrets.GITHUB_TOKEN }}"
not-before: 2021-12-07T07:00:00Z not-before: 2021-12-07T07:00:00Z
@@ -27,10 +26,9 @@ jobs:
community: community:
runs-on: ubuntu-latest runs-on: ubuntu-latest
timeout-minutes: 20
name: Label Issue - Community name: Label Issue - Community
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v2
- uses: actions/setup-python@v4 - uses: actions/setup-python@v4
- name: Install python requests - name: Install python requests
run: pip install requests run: pip install requests

View File

@@ -8,13 +8,12 @@ on:
- synchronize - synchronize
permissions: permissions:
contents: write # to determine modified files (actions/labeler) contents: read # to determine modified files (actions/labeler)
pull-requests: write # to add labels to PRs (actions/labeler) pull-requests: write # to add labels to PRs (actions/labeler)
jobs: jobs:
triage: triage:
runs-on: ubuntu-latest runs-on: ubuntu-latest
timeout-minutes: 20
name: Label PR name: Label PR
steps: steps:
@@ -26,10 +25,9 @@ jobs:
community: community:
runs-on: ubuntu-latest runs-on: ubuntu-latest
timeout-minutes: 20
name: Label PR - Community name: Label PR - Community
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v2
- uses: actions/setup-python@v4 - uses: actions/setup-python@v4
- name: Install python requests - name: Install python requests
run: pip install requests run: pip install requests

View File

@@ -7,10 +7,8 @@ on:
types: [opened, edited, reopened, synchronize] types: [opened, edited, reopened, synchronize]
jobs: jobs:
pr-check: pr-check:
if: github.repository_owner == 'ansible' && endsWith(github.repository, 'awx')
name: Scan PR description for semantic versioning keywords name: Scan PR description for semantic versioning keywords
runs-on: ubuntu-latest runs-on: ubuntu-latest
timeout-minutes: 20
permissions: permissions:
packages: write packages: write
contents: read contents: read

View File

@@ -7,38 +7,23 @@ env:
on: on:
release: release:
types: [published] types: [published]
workflow_dispatch:
inputs:
tag_name:
description: 'Name for the tag of the release.'
required: true
permissions: permissions:
contents: read # to fetch code (actions/checkout) contents: read # to fetch code (actions/checkout)
jobs: jobs:
promote: promote:
if: endsWith(github.repository, '/awx') if: endsWith(github.repository, '/awx')
runs-on: ubuntu-latest runs-on: ubuntu-latest
timeout-minutes: 90
steps: steps:
- name: Set GitHub Env vars for workflow_dispatch event
if: ${{ github.event_name == 'workflow_dispatch' }}
run: |
echo "TAG_NAME=${{ github.event.inputs.tag_name }}" >> $GITHUB_ENV
- name: Set GitHub Env vars if release event
if: ${{ github.event_name == 'release' }}
run: |
echo "TAG_NAME=${{ env.TAG_NAME }}" >> $GITHUB_ENV
- name: Checkout awx - name: Checkout awx
uses: actions/checkout@v3 uses: actions/checkout@v2
- name: Get python version from Makefile - name: Get python version from Makefile
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
- name: Install python ${{ env.py_version }} - name: Install python ${{ env.py_version }}
uses: actions/setup-python@v4 uses: actions/setup-python@v2
with: with:
python-version: ${{ env.py_version }} python-version: ${{ env.py_version }}
@@ -55,20 +40,14 @@ jobs:
if: ${{ github.repository_owner != 'ansible' }} if: ${{ github.repository_owner != 'ansible' }}
- name: Build collection and publish to galaxy - name: Build collection and publish to galaxy
env:
COLLECTION_NAMESPACE: ${{ env.collection_namespace }}
COLLECTION_VERSION: ${{ env.TAG_NAME }}
COLLECTION_TEMPLATE_VERSION: true
run: | run: |
make build_collection COLLECTION_TEMPLATE_VERSION=true COLLECTION_NAMESPACE=${{ env.collection_namespace }} make build_collection
curl_with_redirects=$(curl --head -sLw '%{http_code}' https://galaxy.ansible.com/download/${{ env.collection_namespace }}-awx-${{ env.TAG_NAME }}.tar.gz | tail -1) if [ "$(curl --head -sw '%{http_code}' https://galaxy.ansible.com/download/${{ env.collection_namespace }}-awx-${{ github.event.release.tag_name }}.tar.gz | tail -1)" == "302" ] ; then \
curl_without_redirects=$(curl --head -sw '%{http_code}' https://galaxy.ansible.com/download/${{ env.collection_namespace }}-awx-${{ env.TAG_NAME }}.tar.gz | tail -1) echo "Galaxy release already done"; \
if [[ "$curl_with_redirects" == "302" ]] || [[ "$curl_without_redirects" == "302" ]]; then else \
echo "Galaxy release already done";
else
ansible-galaxy collection publish \ ansible-galaxy collection publish \
--token=${{ secrets.GALAXY_TOKEN }} \ --token=${{ secrets.GALAXY_TOKEN }} \
awx_collection_build/${{ env.collection_namespace }}-awx-${{ env.TAG_NAME }}.tar.gz; awx_collection_build/${{ env.collection_namespace }}-awx-${{ github.event.release.tag_name }}.tar.gz; \
fi fi
- name: Set official pypi info - name: Set official pypi info
@@ -80,11 +59,9 @@ jobs:
if: ${{ github.repository_owner != 'ansible' }} if: ${{ github.repository_owner != 'ansible' }}
- name: Build awxkit and upload to pypi - name: Build awxkit and upload to pypi
env:
SETUPTOOLS_SCM_PRETEND_VERSION: ${{ env.TAG_NAME }}
run: | run: |
git reset --hard git reset --hard
cd awxkit && python3 setup.py sdist bdist_wheel cd awxkit && python3 setup.py bdist_wheel
twine upload \ twine upload \
-r ${{ env.pypi_repo }} \ -r ${{ env.pypi_repo }} \
-u ${{ secrets.PYPI_USERNAME }} \ -u ${{ secrets.PYPI_USERNAME }} \
@@ -101,15 +78,11 @@ jobs:
- name: Re-tag and promote awx image - name: Re-tag and promote awx image
run: | run: |
docker buildx imagetools create \ docker pull ghcr.io/${{ github.repository }}:${{ github.event.release.tag_name }}
ghcr.io/${{ github.repository }}:${{ env.TAG_NAME }} \ docker tag ghcr.io/${{ github.repository }}:${{ github.event.release.tag_name }} quay.io/${{ github.repository }}:${{ github.event.release.tag_name }}
--tag quay.io/${{ github.repository }}:${{ env.TAG_NAME }} docker tag ghcr.io/${{ github.repository }}:${{ github.event.release.tag_name }} quay.io/${{ github.repository }}:latest
docker buildx imagetools create \ docker push quay.io/${{ github.repository }}:${{ github.event.release.tag_name }}
ghcr.io/${{ github.repository }}:${{ env.TAG_NAME }} \ docker push quay.io/${{ github.repository }}:latest
--tag quay.io/${{ github.repository }}:latest docker pull ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.release.tag_name }}
docker tag ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.release.tag_name }} quay.io/${{ github.repository_owner }}/awx-ee:${{ github.event.release.tag_name }}
- name: Re-tag and promote awx-ee image docker push quay.io/${{ github.repository_owner }}/awx-ee:${{ github.event.release.tag_name }}
run: |
docker buildx imagetools create \
ghcr.io/${{ github.repository_owner }}/awx-ee:${{ env.TAG_NAME }} \
--tag quay.io/${{ github.repository_owner }}/awx-ee:${{ env.TAG_NAME }}

View File

@@ -23,7 +23,6 @@ jobs:
stage: stage:
if: endsWith(github.repository, '/awx') if: endsWith(github.repository, '/awx')
runs-on: ubuntu-latest runs-on: ubuntu-latest
timeout-minutes: 90
permissions: permissions:
packages: write packages: write
contents: write contents: write
@@ -45,100 +44,68 @@ jobs:
exit 0 exit 0
- name: Checkout awx - name: Checkout awx
uses: actions/checkout@v3 uses: actions/checkout@v2
with: with:
path: awx path: awx
- name: Checkout awx-operator - name: Get python version from Makefile
uses: actions/checkout@v3 run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
- name: Install python ${{ env.py_version }}
uses: actions/setup-python@v2
with: with:
repository: ${{ github.repository_owner }}/awx-operator python-version: ${{ env.py_version }}
path: awx-operator
- name: Checkout awx-logos - name: Checkout awx-logos
uses: actions/checkout@v3 uses: actions/checkout@v2
with: with:
repository: ansible/awx-logos repository: ansible/awx-logos
path: awx-logos path: awx-logos
- name: Get python version from Makefile - name: Checkout awx-operator
working-directory: awx uses: actions/checkout@v2
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
- name: Install python ${{ env.py_version }}
uses: actions/setup-python@v4
with: with:
python-version: ${{ env.py_version }} repository: ${{ github.repository_owner }}/awx-operator
path: awx-operator
- name: Install playbook dependencies - name: Install playbook dependencies
run: | run: |
python3 -m pip install docker python3 -m pip install docker
- name: Log into registry ghcr.io
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Copy logos for inclusion in sdist for official build
working-directory: awx
run: |
cp ../awx-logos/awx/ui/client/assets/* awx/ui/public/static/media/
- name: Setup node and npm
uses: actions/setup-node@v2
with:
node-version: '16.13.1'
- name: Prebuild UI for awx image (to speed up build process)
working-directory: awx
run: |
sudo apt-get install gettext
make ui-release
make ui-next
- name: Set build env variables
run: |
echo "DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER,,}" >> $GITHUB_ENV
echo "COMPOSE_TAG=${{ github.event.inputs.version }}" >> $GITHUB_ENV
echo "VERSION=${{ github.event.inputs.version }}" >> $GITHUB_ENV
echo "AWX_TEST_VERSION=${{ github.event.inputs.version }}" >> $GITHUB_ENV
echo "AWX_TEST_IMAGE=ghcr.io/${OWNER,,}/awx" >> $GITHUB_ENV
echo "AWX_EE_TEST_IMAGE=ghcr.io/${OWNER,,}/awx-ee:${{ github.event.inputs.version }}" >> $GITHUB_ENV
echo "AWX_OPERATOR_TEST_IMAGE=ghcr.io/${OWNER,,}/awx-operator:${{ github.event.inputs.operator_version }}" >> $GITHUB_ENV
env:
OWNER: ${{ github.repository_owner }}
- name: Build and stage AWX - name: Build and stage AWX
working-directory: awx working-directory: awx
env:
DOCKER_BUILDX_PUSH: true
HEADLESS: false
PLATFORMS: linux/amd64,linux/arm64
run: | run: |
make awx-kube-buildx ansible-playbook -v tools/ansible/build.yml \
-e registry=ghcr.io \
-e registry_username=${{ github.actor }} \
-e registry_password=${{ secrets.GITHUB_TOKEN }} \
-e awx_image=${{ github.repository }} \
-e awx_version=${{ github.event.inputs.version }} \
-e ansible_python_interpreter=$(which python3) \
-e push=yes \
-e awx_official=yes
- name: Log in to GHCR
run: |
echo ${{ secrets.GITHUB_TOKEN }} | docker login ghcr.io -u ${{ github.actor }} --password-stdin
- name: Log in to Quay
run: |
echo ${{ secrets.QUAY_TOKEN }} | docker login quay.io -u ${{ secrets.QUAY_USER }} --password-stdin
- name: tag awx-ee:latest with version input - name: tag awx-ee:latest with version input
run: | run: |
docker buildx imagetools create \ docker pull quay.io/ansible/awx-ee:latest
quay.io/ansible/awx-ee:latest \ docker tag quay.io/ansible/awx-ee:latest ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.inputs.version }}
--tag ${AWX_EE_TEST_IMAGE} docker push ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.inputs.version }}
- name: Stage awx-operator image - name: Build and stage awx-operator
working-directory: awx-operator working-directory: awx-operator
run: | run: |
BUILD_ARGS="--build-arg DEFAULT_AWX_VERSION=${{ github.event.inputs.version}} \ BUILD_ARGS="--build-arg DEFAULT_AWX_VERSION=${{ github.event.inputs.version }} \
--build-arg OPERATOR_VERSION=${{ github.event.inputs.operator_version }}" \ --build-arg OPERATOR_VERSION=${{ github.event.inputs.operator_version }}" \
IMG=${AWX_OPERATOR_TEST_IMAGE} \ IMAGE_TAG_BASE=ghcr.io/${{ github.repository_owner }}/awx-operator \
make docker-buildx VERSION=${{ github.event.inputs.operator_version }} make docker-build docker-push
- name: Pulling images for test deployment with awx-operator
# awx operator molecue test expect to kind load image and buildx exports image to registry and not local
run: |
docker pull ${AWX_OPERATOR_TEST_IMAGE}
docker pull ${AWX_EE_TEST_IMAGE}
docker pull ${AWX_TEST_IMAGE}:${AWX_TEST_VERSION}
- name: Run test deployment with awx-operator - name: Run test deployment with awx-operator
working-directory: awx-operator working-directory: awx-operator
@@ -148,6 +115,10 @@ jobs:
sudo rm -f $(which kustomize) sudo rm -f $(which kustomize)
make kustomize make kustomize
KUSTOMIZE_PATH=$(readlink -f bin/kustomize) molecule test -s kind KUSTOMIZE_PATH=$(readlink -f bin/kustomize) molecule test -s kind
env:
AWX_TEST_IMAGE: ${{ github.repository }}
AWX_TEST_VERSION: ${{ github.event.inputs.version }}
AWX_EE_TEST_IMAGE: ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.inputs.version }}
- name: Create draft release for AWX - name: Create draft release for AWX
working-directory: awx working-directory: awx

View File

@@ -9,7 +9,6 @@ jobs:
name: Update Dependabot Prs name: Update Dependabot Prs
if: contains(github.event.pull_request.labels.*.name, 'dependencies') && contains(github.event.pull_request.labels.*.name, 'component:ui') if: contains(github.event.pull_request.labels.*.name, 'dependencies') && contains(github.event.pull_request.labels.*.name, 'component:ui')
runs-on: ubuntu-latest runs-on: ubuntu-latest
timeout-minutes: 20
steps: steps:
- name: Checkout branch - name: Checkout branch

View File

@@ -13,18 +13,17 @@ on:
jobs: jobs:
push: push:
runs-on: ubuntu-latest runs-on: ubuntu-latest
timeout-minutes: 60
permissions: permissions:
packages: write packages: write
contents: read contents: read
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v2
- name: Get python version from Makefile - name: Get python version from Makefile
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
- name: Install python ${{ env.py_version }} - name: Install python ${{ env.py_version }}
uses: actions/setup-python@v4 uses: actions/setup-python@v2
with: with:
python-version: ${{ env.py_version }} python-version: ${{ env.py_version }}

12
.gitignore vendored
View File

@@ -46,11 +46,6 @@ tools/docker-compose/overrides/
tools/docker-compose-minikube/_sources tools/docker-compose-minikube/_sources
tools/docker-compose/keycloak.awx.realm.json tools/docker-compose/keycloak.awx.realm.json
!tools/docker-compose/editable_dependencies
tools/docker-compose/editable_dependencies/*
!tools/docker-compose/editable_dependencies/README.md
!tools/docker-compose/editable_dependencies/install.sh
# Tower setup playbook testing # Tower setup playbook testing
setup/test/roles/postgresql setup/test/roles/postgresql
**/provision_docker **/provision_docker
@@ -170,10 +165,3 @@ use_dev_supervisor.txt
awx/ui_next/src awx/ui_next/src
awx/ui_next/build awx/ui_next/build
# Docs build stuff
docs/docsite/build/
_readthedocs/
# Pyenv
.python-version

View File

@@ -1,5 +0,0 @@
[allowlist]
description = "Documentation contains example secrets and passwords"
paths = [
"docs/docsite/rst/administration/oauth2_token_auth.rst",
]

View File

@@ -1,5 +0,0 @@
[tool.pip-tools]
resolver = "backtracking"
allow-unsafe = true
strip-extras = true
quiet = true

View File

@@ -1,16 +0,0 @@
# Read the Docs configuration file
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
version: 2
build:
os: ubuntu-22.04
tools:
python: >-
3.11
commands:
- pip install --user tox
- python3 -m tox -e docs --notest -v
- python3 -m tox -e docs --skip-pkg-install -q
- mkdir -p _readthedocs/html/
- mv docs/docsite/build/html/* _readthedocs/html/

113
.vscode/launch.json vendored
View File

@@ -1,113 +0,0 @@
{
"version": "0.2.0",
"configurations": [
{
"name": "run_ws_heartbeat",
"type": "debugpy",
"request": "launch",
"program": "manage.py",
"args": ["run_ws_heartbeat"],
"django": true,
"preLaunchTask": "stop awx-ws-heartbeat",
"postDebugTask": "start awx-ws-heartbeat"
},
{
"name": "run_cache_clear",
"type": "debugpy",
"request": "launch",
"program": "manage.py",
"args": ["run_cache_clear"],
"django": true,
"preLaunchTask": "stop awx-cache-clear",
"postDebugTask": "start awx-cache-clear"
},
{
"name": "run_callback_receiver",
"type": "debugpy",
"request": "launch",
"program": "manage.py",
"args": ["run_callback_receiver"],
"django": true,
"preLaunchTask": "stop awx-receiver",
"postDebugTask": "start awx-receiver"
},
{
"name": "run_dispatcher",
"type": "debugpy",
"request": "launch",
"program": "manage.py",
"args": ["run_dispatcher"],
"django": true,
"preLaunchTask": "stop awx-dispatcher",
"postDebugTask": "start awx-dispatcher"
},
{
"name": "run_rsyslog_configurer",
"type": "debugpy",
"request": "launch",
"program": "manage.py",
"args": ["run_rsyslog_configurer"],
"django": true,
"preLaunchTask": "stop awx-rsyslog-configurer",
"postDebugTask": "start awx-rsyslog-configurer"
},
{
"name": "run_cache_clear",
"type": "debugpy",
"request": "launch",
"program": "manage.py",
"args": ["run_cache_clear"],
"django": true,
"preLaunchTask": "stop awx-cache-clear",
"postDebugTask": "start awx-cache-clear"
},
{
"name": "run_wsrelay",
"type": "debugpy",
"request": "launch",
"program": "manage.py",
"args": ["run_wsrelay"],
"django": true,
"preLaunchTask": "stop awx-wsrelay",
"postDebugTask": "start awx-wsrelay"
},
{
"name": "daphne",
"type": "debugpy",
"request": "launch",
"program": "/var/lib/awx/venv/awx/bin/daphne",
"args": ["-b", "127.0.0.1", "-p", "8051", "awx.asgi:channel_layer"],
"django": true,
"preLaunchTask": "stop awx-daphne",
"postDebugTask": "start awx-daphne"
},
{
"name": "runserver(uwsgi alternative)",
"type": "debugpy",
"request": "launch",
"program": "manage.py",
"args": ["runserver", "127.0.0.1:8052"],
"django": true,
"preLaunchTask": "stop awx-uwsgi",
"postDebugTask": "start awx-uwsgi"
},
{
"name": "runserver_plus(uwsgi alternative)",
"type": "debugpy",
"request": "launch",
"program": "manage.py",
"args": ["runserver_plus", "127.0.0.1:8052"],
"django": true,
"preLaunchTask": "stop awx-uwsgi and install Werkzeug",
"postDebugTask": "start awx-uwsgi"
},
{
"name": "shell_plus",
"type": "debugpy",
"request": "launch",
"program": "manage.py",
"args": ["shell_plus"],
"django": true,
},
]
}

100
.vscode/tasks.json vendored
View File

@@ -1,100 +0,0 @@
{
"version": "2.0.0",
"tasks": [
{
"label": "start awx-cache-clear",
"type": "shell",
"command": "supervisorctl start tower-processes:awx-cache-clear"
},
{
"label": "stop awx-cache-clear",
"type": "shell",
"command": "supervisorctl stop tower-processes:awx-cache-clear"
},
{
"label": "start awx-daphne",
"type": "shell",
"command": "supervisorctl start tower-processes:awx-daphne"
},
{
"label": "stop awx-daphne",
"type": "shell",
"command": "supervisorctl stop tower-processes:awx-daphne"
},
{
"label": "start awx-dispatcher",
"type": "shell",
"command": "supervisorctl start tower-processes:awx-dispatcher"
},
{
"label": "stop awx-dispatcher",
"type": "shell",
"command": "supervisorctl stop tower-processes:awx-dispatcher"
},
{
"label": "start awx-receiver",
"type": "shell",
"command": "supervisorctl start tower-processes:awx-receiver"
},
{
"label": "stop awx-receiver",
"type": "shell",
"command": "supervisorctl stop tower-processes:awx-receiver"
},
{
"label": "start awx-rsyslog-configurer",
"type": "shell",
"command": "supervisorctl start tower-processes:awx-rsyslog-configurer"
},
{
"label": "stop awx-rsyslog-configurer",
"type": "shell",
"command": "supervisorctl stop tower-processes:awx-rsyslog-configurer"
},
{
"label": "start awx-rsyslogd",
"type": "shell",
"command": "supervisorctl start tower-processes:awx-rsyslogd"
},
{
"label": "stop awx-rsyslogd",
"type": "shell",
"command": "supervisorctl stop tower-processes:awx-rsyslogd"
},
{
"label": "start awx-uwsgi",
"type": "shell",
"command": "supervisorctl start tower-processes:awx-uwsgi"
},
{
"label": "stop awx-uwsgi",
"type": "shell",
"command": "supervisorctl stop tower-processes:awx-uwsgi"
},
{
"label": "stop awx-uwsgi and install Werkzeug",
"type": "shell",
"command": "pip install Werkzeug; supervisorctl stop tower-processes:awx-uwsgi"
},
{
"label": "start awx-ws-heartbeat",
"type": "shell",
"command": "supervisorctl start tower-processes:awx-ws-heartbeat"
},
{
"label": "stop awx-ws-heartbeat",
"type": "shell",
"command": "supervisorctl stop tower-processes:awx-ws-heartbeat"
},
{
"label": "start awx-wsrelay",
"type": "shell",
"command": "supervisorctl start tower-processes:awx-wsrelay"
},
{
"label": "stop awx-wsrelay",
"type": "shell",
"command": "supervisorctl stop tower-processes:awx-wsrelay"
}
]
}

View File

@@ -10,7 +10,6 @@ ignore: |
tools/docker-compose/_sources tools/docker-compose/_sources
# django template files # django template files
awx/api/templates/instance_install_bundle/** awx/api/templates/instance_install_bundle/**
.readthedocs.yaml
extends: default extends: default

View File

@@ -4,6 +4,6 @@
Early versions of AWX did not support seamless upgrades between major versions and required the use of a backup and restore tool to perform upgrades. Early versions of AWX did not support seamless upgrades between major versions and required the use of a backup and restore tool to perform upgrades.
As of version 18.0, `awx-operator` is the preferred install/upgrade method. Users who wish to upgrade modern AWX installations should follow the instructions at: Users who wish to upgrade modern AWX installations should follow the instructions at:
https://github.com/ansible/awx-operator/blob/devel/docs/upgrade/upgrading.md https://github.com/ansible/awx/blob/devel/INSTALL.md#upgrading-from-previous-versions

View File

@@ -31,7 +31,7 @@ If your issue isn't considered high priority, then please be patient as it may t
`state:needs_info` The issue needs more information. This could be more debug output, more specifics out the system such as version information. Any detail that is currently preventing this issue from moving forward. This should be considered a blocked state. `state:needs_info` The issue needs more information. This could be more debug output, more specifics out the system such as version information. Any detail that is currently preventing this issue from moving forward. This should be considered a blocked state.
`state:needs_review` The issue/pull request needs to be reviewed by other maintainers and contributors. This is usually used when there is a question out to another maintainer or when a person is less familiar with an area of the code base the issue is for. `state:needs_review` The issue/pull request needs to be reviewed by other maintainers and contributors. This is usually used when there is a question out to another maintainer or when a person is less familar with an area of the code base the issue is for.
`state:needs_revision` More commonly used on pull requests, this state represents that there are changes that are being waited on. `state:needs_revision` More commonly used on pull requests, this state represents that there are changes that are being waited on.
@@ -80,7 +80,7 @@ If any of those items are missing your pull request will still get the `needs_tr
Currently you can expect awxbot to add common labels such as `state:needs_triage`, `type:bug`, `component:docs`, etc... Currently you can expect awxbot to add common labels such as `state:needs_triage`, `type:bug`, `component:docs`, etc...
These labels are determined by the template data. Please use the template and fill it out as accurately as possible. These labels are determined by the template data. Please use the template and fill it out as accurately as possible.
The `state:needs_triage` label will remain on your pull request until a person has looked at it. The `state:needs_triage` label will will remain on your pull request until a person has looked at it.
You can also expect the bot to CC maintainers of specific areas of the code, this will notify them that there is a pull request by placing a comment on the pull request. You can also expect the bot to CC maintainers of specific areas of the code, this will notify them that there is a pull request by placing a comment on the pull request.
The comment will look something like `CC @matburt @wwitzel3 ...`. The comment will look something like `CC @matburt @wwitzel3 ...`.

View File

@@ -22,7 +22,7 @@ recursive-exclude awx/settings local_settings.py*
include tools/scripts/request_tower_configuration.sh include tools/scripts/request_tower_configuration.sh
include tools/scripts/request_tower_configuration.ps1 include tools/scripts/request_tower_configuration.ps1
include tools/scripts/automation-controller-service include tools/scripts/automation-controller-service
include tools/scripts/rsyslog-4xx-recovery include tools/scripts/failure-event-handler
include tools/scripts/awx-python include tools/scripts/awx-python
include awx/playbooks/library/mkfifo.py include awx/playbooks/library/mkfifo.py
include tools/sosreport/* include tools/sosreport/*

126
Makefile
View File

@@ -1,16 +1,14 @@
-include awx/ui_next/Makefile -include awx/ui_next/Makefile
PYTHON := $(notdir $(shell for i in python3.11 python3; do command -v $$i; done|sed 1q)) PYTHON := $(notdir $(shell for i in python3.9 python3; do command -v $$i; done|sed 1q))
SHELL := bash DOCKER_COMPOSE ?= docker-compose
DOCKER_COMPOSE ?= docker compose
OFFICIAL ?= no OFFICIAL ?= no
NODE ?= node NODE ?= node
NPM_BIN ?= npm NPM_BIN ?= npm
KIND_BIN ?= $(shell which kind)
CHROMIUM_BIN=/tmp/chrome-linux/chrome CHROMIUM_BIN=/tmp/chrome-linux/chrome
GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD) GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
MANAGEMENT_COMMAND ?= awx-manage MANAGEMENT_COMMAND ?= awx-manage
VERSION ?= $(shell $(PYTHON) tools/scripts/scm_version.py 2> /dev/null) VERSION := $(shell $(PYTHON) tools/scripts/scm_version.py)
# ansible-test requires semver compatable version, so we allow overrides to hack it # ansible-test requires semver compatable version, so we allow overrides to hack it
COLLECTION_VERSION ?= $(shell $(PYTHON) tools/scripts/scm_version.py | cut -d . -f 1-3) COLLECTION_VERSION ?= $(shell $(PYTHON) tools/scripts/scm_version.py | cut -d . -f 1-3)
@@ -29,8 +27,6 @@ COLLECTION_TEMPLATE_VERSION ?= false
# NOTE: This defaults the container image version to the branch that's active # NOTE: This defaults the container image version to the branch that's active
COMPOSE_TAG ?= $(GIT_BRANCH) COMPOSE_TAG ?= $(GIT_BRANCH)
MAIN_NODE_TYPE ?= hybrid MAIN_NODE_TYPE ?= hybrid
# If set to true docker-compose will also start a pgbouncer instance and use it
PGBOUNCER ?= false
# If set to true docker-compose will also start a keycloak instance # If set to true docker-compose will also start a keycloak instance
KEYCLOAK ?= false KEYCLOAK ?= false
# If set to true docker-compose will also start an ldap instance # If set to true docker-compose will also start an ldap instance
@@ -41,31 +37,22 @@ SPLUNK ?= false
PROMETHEUS ?= false PROMETHEUS ?= false
# If set to true docker-compose will also start a grafana instance # If set to true docker-compose will also start a grafana instance
GRAFANA ?= false GRAFANA ?= false
# If set to true docker-compose will also start a hashicorp vault instance
VAULT ?= false
# If set to true docker-compose will also start a hashicorp vault instance with TLS enabled
VAULT_TLS ?= false
# If set to true docker-compose will also start a tacacs+ instance # If set to true docker-compose will also start a tacacs+ instance
TACACS ?= false TACACS ?= false
# If set to true docker-compose will install editable dependencies
EDITABLE_DEPENDENCIES ?= false
VENV_BASE ?= /var/lib/awx/venv VENV_BASE ?= /var/lib/awx/venv
DEV_DOCKER_OWNER ?= ansible DEV_DOCKER_TAG_BASE ?= ghcr.io/ansible
# Docker will only accept lowercase, so github names like Paul need to be paul
DEV_DOCKER_OWNER_LOWER = $(shell echo $(DEV_DOCKER_OWNER) | tr A-Z a-z)
DEV_DOCKER_TAG_BASE ?= ghcr.io/$(DEV_DOCKER_OWNER_LOWER)
DEVEL_IMAGE_NAME ?= $(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG) DEVEL_IMAGE_NAME ?= $(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG)
RECEPTOR_IMAGE ?= quay.io/ansible/receptor:devel RECEPTOR_IMAGE ?= quay.io/ansible/receptor:devel
# Python packages to install only from source (not from binary wheels) # Python packages to install only from source (not from binary wheels)
# Comma separated list # Comma separated list
SRC_ONLY_PKGS ?= cffi,pycparser,psycopg,twilio SRC_ONLY_PKGS ?= cffi,pycparser,psycopg2,twilio
# These should be upgraded in the AWX and Ansible venv before attempting # These should be upgraded in the AWX and Ansible venv before attempting
# to install the actual requirements # to install the actual requirements
VENV_BOOTSTRAP ?= pip==21.2.4 setuptools==69.0.2 setuptools_scm[toml]==8.0.4 wheel==0.42.0 VENV_BOOTSTRAP ?= pip==21.2.4 setuptools==65.6.3 setuptools_scm[toml]==7.0.5 wheel==0.38.4
NAME ?= awx NAME ?= awx
@@ -77,16 +64,13 @@ SDIST_TAR_FILE ?= $(SDIST_TAR_NAME).tar.gz
I18N_FLAG_FILE = .i18n_built I18N_FLAG_FILE = .i18n_built
## PLATFORMS defines the target platforms for the manager image be build to provide support to multiple
PLATFORMS ?= linux/amd64,linux/arm64 # linux/ppc64le,linux/s390x
.PHONY: awx-link clean clean-tmp clean-venv requirements requirements_dev \ .PHONY: awx-link clean clean-tmp clean-venv requirements requirements_dev \
develop refresh adduser migrate dbchange \ develop refresh adduser migrate dbchange \
receiver test test_unit test_coverage coverage_html \ receiver test test_unit test_coverage coverage_html \
sdist \ sdist \
ui-release ui-devel \ ui-release ui-devel \
VERSION PYTHON_VERSION docker-compose-sources \ VERSION PYTHON_VERSION docker-compose-sources \
.git/hooks/pre-commit .git/hooks/pre-commit github_ci_setup github_ci_runner
clean-tmp: clean-tmp:
rm -rf tmp/ rm -rf tmp/
@@ -218,6 +202,8 @@ collectstatic:
fi; \ fi; \
$(PYTHON) manage.py collectstatic --clear --noinput > /dev/null 2>&1 $(PYTHON) manage.py collectstatic --clear --noinput > /dev/null 2>&1
DEV_RELOAD_COMMAND ?= supervisorctl restart tower-processes:*
uwsgi: collectstatic uwsgi: collectstatic
@if [ "$(VENV_BASE)" ]; then \ @if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/awx/bin/activate; \ . $(VENV_BASE)/awx/bin/activate; \
@@ -225,7 +211,7 @@ uwsgi: collectstatic
uwsgi /etc/tower/uwsgi.ini uwsgi /etc/tower/uwsgi.ini
awx-autoreload: awx-autoreload:
@/awx_devel/tools/docker-compose/awx-autoreload /awx_devel/awx @/awx_devel/tools/docker-compose/awx-autoreload /awx_devel/awx "$(DEV_RELOAD_COMMAND)"
daphne: daphne:
@if [ "$(VENV_BASE)" ]; then \ @if [ "$(VENV_BASE)" ]; then \
@@ -278,11 +264,11 @@ run-wsrelay:
$(PYTHON) manage.py run_wsrelay $(PYTHON) manage.py run_wsrelay
## Start the heartbeat process in background in development environment. ## Start the heartbeat process in background in development environment.
run-ws-heartbeat: run-heartbeet:
@if [ "$(VENV_BASE)" ]; then \ @if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/awx/bin/activate; \ . $(VENV_BASE)/awx/bin/activate; \
fi; \ fi; \
$(PYTHON) manage.py run_ws_heartbeat $(PYTHON) manage.py run_heartbeet
reports: reports:
mkdir -p $@ mkdir -p $@
@@ -305,7 +291,7 @@ swagger: reports
@if [ "$(VENV_BASE)" ]; then \ @if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/awx/bin/activate; \ . $(VENV_BASE)/awx/bin/activate; \
fi; \ fi; \
(set -o pipefail && py.test $(PYTEST_ARGS) awx/conf/tests/functional awx/main/tests/functional/api awx/main/tests/docs | tee reports/$@.report) (set -o pipefail && py.test $(PYTEST_ARGS) awx/conf/tests/functional awx/main/tests/functional/api awx/main/tests/docs --release=$(VERSION_TARGET) | tee reports/$@.report)
check: black check: black
@@ -329,16 +315,21 @@ test:
cd awxkit && $(VENV_BASE)/awx/bin/tox -re py3 cd awxkit && $(VENV_BASE)/awx/bin/tox -re py3
awx-manage check_migrations --dry-run --check -n 'missing_migration_file' awx-manage check_migrations --dry-run --check -n 'missing_migration_file'
test_migrations: ## Login to Github container image registry, pull image, then build image.
if [ "$(VENV_BASE)" ]; then \ github_ci_setup:
. $(VENV_BASE)/awx/bin/activate; \ # GITHUB_ACTOR is automatic github actions env var
fi; \ # CI_GITHUB_TOKEN is defined in .github files
PYTHONDONTWRITEBYTECODE=1 py.test -p no:cacheprovider --migrations -m migration_test $(PYTEST_ARGS) $(TEST_DIRS) echo $(CI_GITHUB_TOKEN) | docker login ghcr.io -u $(GITHUB_ACTOR) --password-stdin
docker pull $(DEVEL_IMAGE_NAME) || : # Pre-pull image to warm build cache
$(MAKE) docker-compose-build
## Runs AWX_DOCKER_CMD inside a new docker container. ## Runs AWX_DOCKER_CMD inside a new docker container.
docker-runner: docker-runner:
docker run -u $(shell id -u) --rm -v $(shell pwd):/awx_devel/:Z --workdir=/awx_devel $(DEVEL_IMAGE_NAME) $(AWX_DOCKER_CMD) docker run -u $(shell id -u) --rm -v $(shell pwd):/awx_devel/:Z --workdir=/awx_devel $(DEVEL_IMAGE_NAME) $(AWX_DOCKER_CMD)
## Builds image and runs AWX_DOCKER_CMD in it, mainly for .github checks.
github_ci_runner: github_ci_setup docker-runner
test_collection: test_collection:
rm -f $(shell ls -d $(VENV_BASE)/awx/lib/python* | head -n 1)/no-global-site-packages.txt rm -f $(shell ls -d $(VENV_BASE)/awx/lib/python* | head -n 1)/no-global-site-packages.txt
if [ "$(VENV_BASE)" ]; then \ if [ "$(VENV_BASE)" ]; then \
@@ -384,7 +375,7 @@ test_collection_sanity:
cd $(COLLECTION_INSTALL) && ansible-test sanity $(COLLECTION_SANITY_ARGS) cd $(COLLECTION_INSTALL) && ansible-test sanity $(COLLECTION_SANITY_ARGS)
test_collection_integration: install_collection test_collection_integration: install_collection
cd $(COLLECTION_INSTALL) && ansible-test integration -vvv $(COLLECTION_TEST_TARGET) cd $(COLLECTION_INSTALL) && ansible-test integration $(COLLECTION_TEST_TARGET)
test_unit: test_unit:
@if [ "$(VENV_BASE)" ]; then \ @if [ "$(VENV_BASE)" ]; then \
@@ -526,32 +517,17 @@ docker-compose-sources: .git/hooks/pre-commit
-e control_plane_node_count=$(CONTROL_PLANE_NODE_COUNT) \ -e control_plane_node_count=$(CONTROL_PLANE_NODE_COUNT) \
-e execution_node_count=$(EXECUTION_NODE_COUNT) \ -e execution_node_count=$(EXECUTION_NODE_COUNT) \
-e minikube_container_group=$(MINIKUBE_CONTAINER_GROUP) \ -e minikube_container_group=$(MINIKUBE_CONTAINER_GROUP) \
-e enable_pgbouncer=$(PGBOUNCER) \
-e enable_keycloak=$(KEYCLOAK) \ -e enable_keycloak=$(KEYCLOAK) \
-e enable_ldap=$(LDAP) \ -e enable_ldap=$(LDAP) \
-e enable_splunk=$(SPLUNK) \ -e enable_splunk=$(SPLUNK) \
-e enable_prometheus=$(PROMETHEUS) \ -e enable_prometheus=$(PROMETHEUS) \
-e enable_grafana=$(GRAFANA) \ -e enable_grafana=$(GRAFANA) \
-e enable_vault=$(VAULT) \
-e vault_tls=$(VAULT_TLS) \
-e enable_tacacs=$(TACACS) \ -e enable_tacacs=$(TACACS) \
-e install_editable_dependencies=$(EDITABLE_DEPENDENCIES) \ $(EXTRA_SOURCES_ANSIBLE_OPTS)
$(EXTRA_SOURCES_ANSIBLE_OPTS)
docker-compose: awx/projects docker-compose-sources docker-compose: awx/projects docker-compose-sources
ansible-galaxy install --ignore-certs -r tools/docker-compose/ansible/requirements.yml;
ansible-playbook -i tools/docker-compose/inventory tools/docker-compose/ansible/initialize_containers.yml \
-e enable_vault=$(VAULT) \
-e vault_tls=$(VAULT_TLS) \
-e enable_ldap=$(LDAP); \
$(MAKE) docker-compose-up
docker-compose-up:
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml $(COMPOSE_OPTS) up $(COMPOSE_UP_OPTS) --remove-orphans $(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml $(COMPOSE_OPTS) up $(COMPOSE_UP_OPTS) --remove-orphans
docker-compose-down:
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml $(COMPOSE_OPTS) down --remove-orphans
docker-compose-credential-plugins: awx/projects docker-compose-sources docker-compose-credential-plugins: awx/projects docker-compose-sources
echo -e "\033[0;31mTo generate a CyberArk Conjur API key: docker exec -it tools_conjur_1 conjurctl account create quick-start\033[0m" echo -e "\033[0;31mTo generate a CyberArk Conjur API key: docker exec -it tools_conjur_1 conjurctl account create quick-start\033[0m"
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml -f tools/docker-credential-plugins-override.yml up --no-recreate awx_1 --remove-orphans $(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml -f tools/docker-credential-plugins-override.yml up --no-recreate awx_1 --remove-orphans
@@ -596,27 +572,12 @@ docker-compose-build: Dockerfile.dev
--build-arg BUILDKIT_INLINE_CACHE=1 \ --build-arg BUILDKIT_INLINE_CACHE=1 \
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG) . --cache-from=$(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG) .
.PHONY: docker-compose-buildx
## Build awx_devel image for docker compose development environment for multiple architectures
docker-compose-buildx: Dockerfile.dev
- docker buildx create --name docker-compose-buildx
docker buildx use docker-compose-buildx
- docker buildx build \
--push \
--build-arg BUILDKIT_INLINE_CACHE=1 \
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG) \
--platform=$(PLATFORMS) \
--tag $(DEVEL_IMAGE_NAME) \
-f Dockerfile.dev .
- docker buildx rm docker-compose-buildx
docker-clean: docker-clean:
-$(foreach container_id,$(shell docker ps -f name=tools_awx -aq && docker ps -f name=tools_receptor -aq),docker stop $(container_id); docker rm -f $(container_id);) -$(foreach container_id,$(shell docker ps -f name=tools_awx -aq && docker ps -f name=tools_receptor -aq),docker stop $(container_id); docker rm -f $(container_id);)
-$(foreach image_id,$(shell docker images --filter=reference='*/*/*awx_devel*' --filter=reference='*/*awx_devel*' --filter=reference='*awx_devel*' -aq),docker rmi --force $(image_id);) -$(foreach image_id,$(shell docker images --filter=reference='*/*/*awx_devel*' --filter=reference='*/*awx_devel*' --filter=reference='*awx_devel*' -aq),docker rmi --force $(image_id);)
docker-clean-volumes: docker-compose-clean docker-compose-container-group-clean docker-clean-volumes: docker-compose-clean docker-compose-container-group-clean
docker volume rm -f tools_var_lib_awx tools_awx_db tools_awx_db_15 tools_vault_1 tools_ldap_1 tools_grafana_storage tools_prometheus_storage $(shell docker volume ls --filter name=tools_redis_socket_ -q) docker volume rm -f tools_awx_db tools_grafana_storage tools_prometheus_storage $(docker volume ls --filter name=tools_redis_socket_ -q)
docker-refresh: docker-clean docker-compose docker-refresh: docker-clean docker-compose
@@ -638,6 +599,9 @@ clean-elk:
docker rm tools_elasticsearch_1 docker rm tools_elasticsearch_1
docker rm tools_kibana_1 docker rm tools_kibana_1
psql-container:
docker run -it --net tools_default --rm postgres:12 sh -c 'exec psql -h "postgres" -p "5432" -U postgres'
VERSION: VERSION:
@echo "awx: $(VERSION)" @echo "awx: $(VERSION)"
@@ -670,21 +634,6 @@ awx-kube-build: Dockerfile
--build-arg HEADLESS=$(HEADLESS) \ --build-arg HEADLESS=$(HEADLESS) \
-t $(DEV_DOCKER_TAG_BASE)/awx:$(COMPOSE_TAG) . -t $(DEV_DOCKER_TAG_BASE)/awx:$(COMPOSE_TAG) .
## Build multi-arch awx image for deployment on Kubernetes environment.
awx-kube-buildx: Dockerfile
- docker buildx create --name awx-kube-buildx
docker buildx use awx-kube-buildx
- docker buildx build \
--push \
--build-arg VERSION=$(VERSION) \
--build-arg SETUPTOOLS_SCM_PRETEND_VERSION=$(VERSION) \
--build-arg HEADLESS=$(HEADLESS) \
--platform=$(PLATFORMS) \
--tag $(DEV_DOCKER_TAG_BASE)/awx:$(COMPOSE_TAG) \
-f Dockerfile .
- docker buildx rm awx-kube-buildx
.PHONY: Dockerfile.kube-dev .PHONY: Dockerfile.kube-dev
## Generate Docker.kube-dev for awx_kube_devel image ## Generate Docker.kube-dev for awx_kube_devel image
Dockerfile.kube-dev: tools/ansible/roles/dockerfile/templates/Dockerfile.j2 Dockerfile.kube-dev: tools/ansible/roles/dockerfile/templates/Dockerfile.j2
@@ -701,21 +650,6 @@ awx-kube-dev-build: Dockerfile.kube-dev
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) \ --cache-from=$(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) \
-t $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) . -t $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) .
## Build and push multi-arch awx_kube_devel image for development on local Kubernetes environment.
awx-kube-dev-buildx: Dockerfile.kube-dev
- docker buildx create --name awx-kube-dev-buildx
docker buildx use awx-kube-dev-buildx
- docker buildx build \
--push \
--build-arg BUILDKIT_INLINE_CACHE=1 \
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) \
--platform=$(PLATFORMS) \
--tag $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) \
-f Dockerfile.kube-dev .
- docker buildx rm awx-kube-dev-buildx
kind-dev-load: awx-kube-dev-build
$(KIND_BIN) load docker-image $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG)
# Translation TASKS # Translation TASKS
# -------------------------------------- # --------------------------------------
@@ -723,12 +657,10 @@ kind-dev-load: awx-kube-dev-build
## generate UI .pot file, an empty template of strings yet to be translated ## generate UI .pot file, an empty template of strings yet to be translated
pot: $(UI_BUILD_FLAG_FILE) pot: $(UI_BUILD_FLAG_FILE)
$(NPM_BIN) --prefix awx/ui --loglevel warn run extract-template --clean $(NPM_BIN) --prefix awx/ui --loglevel warn run extract-template --clean
$(NPM_BIN) --prefix awx/ui_next --loglevel warn run extract-template --clean
## generate UI .po files for each locale (will update translated strings for `en`) ## generate UI .po files for each locale (will update translated strings for `en`)
po: $(UI_BUILD_FLAG_FILE) po: $(UI_BUILD_FLAG_FILE)
$(NPM_BIN) --prefix awx/ui --loglevel warn run extract-strings -- --clean $(NPM_BIN) --prefix awx/ui --loglevel warn run extract-strings -- --clean
$(NPM_BIN) --prefix awx/ui_next --loglevel warn run extract-strings -- --clean
## generate API django .pot .po ## generate API django .pot .po
messages: messages:

View File

@@ -1,5 +1,5 @@
[![CI](https://github.com/ansible/awx/actions/workflows/ci.yml/badge.svg?branch=devel)](https://github.com/ansible/awx/actions/workflows/ci.yml) [![Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-Ansible-yellow.svg)](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) [![Apache v2 License](https://img.shields.io/badge/license-Apache%202.0-brightgreen.svg)](https://github.com/ansible/awx/blob/devel/LICENSE.md) [![AWX Mailing List](https://img.shields.io/badge/mailing%20list-AWX-orange.svg)](https://groups.google.com/g/awx-project) [![CI](https://github.com/ansible/awx/actions/workflows/ci.yml/badge.svg?branch=devel)](https://github.com/ansible/awx/actions/workflows/ci.yml) [![Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-Ansible-yellow.svg)](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) [![Apache v2 License](https://img.shields.io/badge/license-Apache%202.0-brightgreen.svg)](https://github.com/ansible/awx/blob/devel/LICENSE.md) [![AWX Mailing List](https://img.shields.io/badge/mailing%20list-AWX-orange.svg)](https://groups.google.com/g/awx-project)
[![Ansible Matrix](https://img.shields.io/badge/matrix-Ansible%20Community-blueviolet.svg?logo=matrix)](https://chat.ansible.im/#/welcome) [![Ansible Discourse](https://img.shields.io/badge/discourse-Ansible%20Community-yellowgreen.svg?logo=discourse)](https://forum.ansible.com) [![IRC Chat - #ansible-awx](https://img.shields.io/badge/IRC-%23ansible--awx-blueviolet.svg)](https://libera.chat)
<img src="https://raw.githubusercontent.com/ansible/awx-logos/master/awx/ui/client/assets/logo-login.svg?sanitize=true" width=200 alt="AWX" /> <img src="https://raw.githubusercontent.com/ansible/awx-logos/master/awx/ui/client/assets/logo-login.svg?sanitize=true" width=200 alt="AWX" />
@@ -7,7 +7,7 @@ AWX provides a web-based user interface, REST API, and task engine built on top
To install AWX, please view the [Install guide](./INSTALL.md). To install AWX, please view the [Install guide](./INSTALL.md).
To learn more about using AWX, view the [AWX docs site](https://ansible.readthedocs.io/projects/awx/en/latest/). To learn more about using AWX, and Tower, view the [Tower docs site](http://docs.ansible.com/ansible-tower/index.html).
The AWX Project Frequently Asked Questions can be found [here](https://www.ansible.com/awx-project-faq). The AWX Project Frequently Asked Questions can be found [here](https://www.ansible.com/awx-project-faq).
@@ -30,12 +30,12 @@ If you're experiencing a problem that you feel is a bug in AWX or have ideas for
Code of Conduct Code of Conduct
--------------- ---------------
We ask all of our community members and contributors to adhere to the [Ansible code of conduct](http://docs.ansible.com/ansible/latest/community/code_of_conduct.html). If you have questions or need assistance, please reach out to our community team at [codeofconduct@ansible.com](mailto:codeofconduct@ansible.com) We ask all of our community members and contributors to adhere to the [Ansible code of conduct](http://docs.ansible.com/ansible/latest/community/code_of_conduct.html). If you have questions or need assistance, please reach out to our community team at [codeofconduct@ansible.com](mailto:codeofconduct@ansible.com)
Get Involved Get Involved
------------ ------------
We welcome your feedback and ideas. Here's how to reach us with feedback and questions: We welcome your feedback and ideas. Here's how to reach us with feedback and questions:
- Join the [Ansible AWX channel on Matrix](https://matrix.to/#/#awx:ansible.com) - Join the `#ansible-awx` channel on irc.libera.chat
- Join the [Ansible Community Forum](https://forum.ansible.com) - Join the [mailing list](https://groups.google.com/forum/#!forum/awx-project)

View File

@@ -52,14 +52,39 @@ try:
except ImportError: # pragma: no cover except ImportError: # pragma: no cover
MODE = 'production' MODE = 'production'
import hashlib
try: try:
import django # noqa: F401 import django # noqa: F401
HAS_DJANGO = True
except ImportError: except ImportError:
pass HAS_DJANGO = False
else: else:
from django.db.backends.base import schema
from django.db.models import indexes
from django.db.backends.utils import names_digest
from django.db import connection from django.db import connection
if HAS_DJANGO is True:
# See upgrade blocker note in requirements/README.md
try:
names_digest('foo', 'bar', 'baz', length=8)
except ValueError:
def names_digest(*args, length):
"""
Generate a 32-bit digest of a set of arguments that can be used to shorten
identifying names. Support for use in FIPS environments.
"""
h = hashlib.md5(usedforsecurity=False)
for arg in args:
h.update(arg.encode())
return h.hexdigest()[:length]
schema.names_digest = names_digest
indexes.names_digest = names_digest
def find_commands(management_dir): def find_commands(management_dir):
# Modified version of function from django/core/management/__init__.py. # Modified version of function from django/core/management/__init__.py.
@@ -154,12 +179,10 @@ def manage():
from django.conf import settings from django.conf import settings
from django.core.management import execute_from_command_line from django.core.management import execute_from_command_line
# enforce the postgres version is a minimum of 12 (we need this for partitioning); if not, then terminate program with exit code of 1 # enforce the postgres version is equal to 12. if not, then terminate program with exit code of 1
# In the future if we require a feature of a version of postgres > 12 this should be updated to reflect that.
# The return of connection.pg_version is something like 12013
if not os.getenv('SKIP_PG_VERSION_CHECK', False) and not MODE == 'development': if not os.getenv('SKIP_PG_VERSION_CHECK', False) and not MODE == 'development':
if (connection.pg_version // 10000) < 12: if (connection.pg_version // 10000) < 12:
sys.stderr.write("At a minimum, postgres version 12 is required\n") sys.stderr.write("Postgres version 12 is required\n")
sys.exit(1) sys.exit(1)
if len(sys.argv) >= 2 and sys.argv[1] in ('version', '--version'): # pragma: no cover if len(sys.argv) >= 2 and sys.argv[1] in ('version', '--version'): # pragma: no cover

View File

@@ -93,7 +93,6 @@ register(
default='', default='',
label=_('Login redirect override URL'), label=_('Login redirect override URL'),
help_text=_('URL to which unauthorized users will be redirected to log in. If blank, users will be sent to the login page.'), help_text=_('URL to which unauthorized users will be redirected to log in. If blank, users will be sent to the login page.'),
warning_text=_('Changing the redirect URL could impact the ability to login if local authentication is also disabled.'),
category=_('Authentication'), category=_('Authentication'),
category_slug='authentication', category_slug='authentication',
) )

450
awx/api/filters.py Normal file
View File

@@ -0,0 +1,450 @@
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
# Python
import re
import json
from functools import reduce
# Django
from django.core.exceptions import FieldError, ValidationError, FieldDoesNotExist
from django.db import models
from django.db.models import Q, CharField, IntegerField, BooleanField, TextField, JSONField
from django.db.models.fields.related import ForeignObjectRel, ManyToManyField, ForeignKey
from django.db.models.functions import Cast
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
from django.utils.encoding import force_str
from django.utils.translation import gettext_lazy as _
# Django REST Framework
from rest_framework.exceptions import ParseError, PermissionDenied
from rest_framework.filters import BaseFilterBackend
# AWX
from awx.main.utils import get_type_for_model, to_python_boolean
from awx.main.utils.db import get_all_field_names
class TypeFilterBackend(BaseFilterBackend):
"""
Filter on type field now returned with all objects.
"""
def filter_queryset(self, request, queryset, view):
try:
types = None
for key, value in request.query_params.items():
if key == 'type':
if ',' in value:
types = value.split(',')
else:
types = (value,)
if types:
types_map = {}
for ct in ContentType.objects.filter(Q(app_label='main') | Q(app_label='auth', model='user')):
ct_model = ct.model_class()
if not ct_model:
continue
ct_type = get_type_for_model(ct_model)
types_map[ct_type] = ct.pk
model = queryset.model
model_type = get_type_for_model(model)
if 'polymorphic_ctype' in get_all_field_names(model):
types_pks = set([v for k, v in types_map.items() if k in types])
queryset = queryset.filter(polymorphic_ctype_id__in=types_pks)
elif model_type in types:
queryset = queryset
else:
queryset = queryset.none()
return queryset
except FieldError as e:
# Return a 400 for invalid field names.
raise ParseError(*e.args)
def get_fields_from_path(model, path):
"""
Given a Django ORM lookup path (possibly over multiple models)
Returns the fields in the line, and also the revised lookup path
ex., given
model=Organization
path='project__timeout'
returns tuple of fields traversed as well and a corrected path,
for special cases we do substitutions
([<IntegerField for timeout>], 'project__timeout')
"""
# Store of all the fields used to detect repeats
field_list = []
new_parts = []
for name in path.split('__'):
if model is None:
raise ParseError(_('No related model for field {}.').format(name))
# HACK: Make project and inventory source filtering by old field names work for backwards compatibility.
if model._meta.object_name in ('Project', 'InventorySource'):
name = {'current_update': 'current_job', 'last_update': 'last_job', 'last_update_failed': 'last_job_failed', 'last_updated': 'last_job_run'}.get(
name, name
)
if name == 'type' and 'polymorphic_ctype' in get_all_field_names(model):
name = 'polymorphic_ctype'
new_parts.append('polymorphic_ctype__model')
else:
new_parts.append(name)
if name in getattr(model, 'PASSWORD_FIELDS', ()):
raise PermissionDenied(_('Filtering on password fields is not allowed.'))
elif name == 'pk':
field = model._meta.pk
else:
name_alt = name.replace("_", "")
if name_alt in model._meta.fields_map.keys():
field = model._meta.fields_map[name_alt]
new_parts.pop()
new_parts.append(name_alt)
else:
field = model._meta.get_field(name)
if isinstance(field, ForeignObjectRel) and getattr(field.field, '__prevent_search__', False):
raise PermissionDenied(_('Filtering on %s is not allowed.' % name))
elif getattr(field, '__prevent_search__', False):
raise PermissionDenied(_('Filtering on %s is not allowed.' % name))
if field in field_list:
# Field traversed twice, could create infinite JOINs, DoSing Tower
raise ParseError(_('Loops not allowed in filters, detected on field {}.').format(field.name))
field_list.append(field)
model = getattr(field, 'related_model', None)
return field_list, '__'.join(new_parts)
def get_field_from_path(model, path):
"""
Given a Django ORM lookup path (possibly over multiple models)
Returns the last field in the line, and the revised lookup path
ex.
(<IntegerField for timeout>, 'project__timeout')
"""
field_list, new_path = get_fields_from_path(model, path)
return (field_list[-1], new_path)
class FieldLookupBackend(BaseFilterBackend):
"""
Filter using field lookups provided via query string parameters.
"""
RESERVED_NAMES = ('page', 'page_size', 'format', 'order', 'order_by', 'search', 'type', 'host_filter', 'count_disabled', 'no_truncate', 'limit')
SUPPORTED_LOOKUPS = (
'exact',
'iexact',
'contains',
'icontains',
'startswith',
'istartswith',
'endswith',
'iendswith',
'regex',
'iregex',
'gt',
'gte',
'lt',
'lte',
'in',
'isnull',
'search',
)
# A list of fields that we know can be filtered on without the possibility
# of introducing duplicates
NO_DUPLICATES_ALLOW_LIST = (CharField, IntegerField, BooleanField, TextField)
def get_fields_from_lookup(self, model, lookup):
if '__' in lookup and lookup.rsplit('__', 1)[-1] in self.SUPPORTED_LOOKUPS:
path, suffix = lookup.rsplit('__', 1)
else:
path = lookup
suffix = 'exact'
if not path:
raise ParseError(_('Query string field name not provided.'))
# FIXME: Could build up a list of models used across relationships, use
# those lookups combined with request.user.get_queryset(Model) to make
# sure user cannot query using objects he could not view.
field_list, new_path = get_fields_from_path(model, path)
new_lookup = new_path
new_lookup = '__'.join([new_path, suffix])
return field_list, new_lookup
def get_field_from_lookup(self, model, lookup):
'''Method to match return type of single field, if needed.'''
field_list, new_lookup = self.get_fields_from_lookup(model, lookup)
return (field_list[-1], new_lookup)
def to_python_related(self, value):
value = force_str(value)
if value.lower() in ('none', 'null'):
return None
else:
return int(value)
def value_to_python_for_field(self, field, value):
if isinstance(field, models.BooleanField):
return to_python_boolean(value)
elif isinstance(field, (ForeignObjectRel, ManyToManyField, GenericForeignKey, ForeignKey)):
try:
return self.to_python_related(value)
except ValueError:
raise ParseError(_('Invalid {field_name} id: {field_id}').format(field_name=getattr(field, 'name', 'related field'), field_id=value))
else:
return field.to_python(value)
def value_to_python(self, model, lookup, value):
try:
lookup.encode("ascii")
except UnicodeEncodeError:
raise ValueError("%r is not an allowed field name. Must be ascii encodable." % lookup)
field_list, new_lookup = self.get_fields_from_lookup(model, lookup)
field = field_list[-1]
needs_distinct = not all(isinstance(f, self.NO_DUPLICATES_ALLOW_LIST) for f in field_list)
# Type names are stored without underscores internally, but are presented and
# and serialized over the API containing underscores so we remove `_`
# for polymorphic_ctype__model lookups.
if new_lookup.startswith('polymorphic_ctype__model'):
value = value.replace('_', '')
elif new_lookup.endswith('__isnull'):
value = to_python_boolean(value)
elif new_lookup.endswith('__in'):
items = []
if not value:
raise ValueError('cannot provide empty value for __in')
for item in value.split(','):
items.append(self.value_to_python_for_field(field, item))
value = items
elif new_lookup.endswith('__regex') or new_lookup.endswith('__iregex'):
try:
re.compile(value)
except re.error as e:
raise ValueError(e.args[0])
elif new_lookup.endswith('__iexact'):
if not isinstance(field, (CharField, TextField)):
raise ValueError(f'{field.name} is not a text field and cannot be filtered by case-insensitive search')
elif new_lookup.endswith('__search'):
related_model = getattr(field, 'related_model', None)
if not related_model:
raise ValueError('%s is not searchable' % new_lookup[:-8])
new_lookups = []
for rm_field in related_model._meta.fields:
if rm_field.name in ('username', 'first_name', 'last_name', 'email', 'name', 'description', 'playbook'):
new_lookups.append('{}__{}__icontains'.format(new_lookup[:-8], rm_field.name))
return value, new_lookups, needs_distinct
else:
if isinstance(field, JSONField):
new_lookup = new_lookup.replace(field.name, f'{field.name}_as_txt')
value = self.value_to_python_for_field(field, value)
return value, new_lookup, needs_distinct
def filter_queryset(self, request, queryset, view):
try:
# Apply filters specified via query_params. Each entry in the lists
# below is (negate, field, value).
and_filters = []
or_filters = []
chain_filters = []
role_filters = []
search_filters = {}
needs_distinct = False
# Can only have two values: 'AND', 'OR'
# If 'AND' is used, an item must satisfy all conditions to show up in the results.
# If 'OR' is used, an item just needs to satisfy one condition to appear in results.
search_filter_relation = 'OR'
for key, values in request.query_params.lists():
if key in self.RESERVED_NAMES:
continue
# HACK: make `created` available via API for the Django User ORM model
# so it keep compatibility with other objects which exposes the `created` attr.
if queryset.model._meta.object_name == 'User' and key.startswith('created'):
key = key.replace('created', 'date_joined')
# HACK: Make job event filtering by host name mostly work even
# when not capturing job event hosts M2M.
if queryset.model._meta.object_name == 'JobEvent' and key.startswith('hosts__name'):
key = key.replace('hosts__name', 'or__host__name')
or_filters.append((False, 'host__name__isnull', True))
# Custom __int filter suffix (internal use only).
q_int = False
if key.endswith('__int'):
key = key[:-5]
q_int = True
# RBAC filtering
if key == 'role_level':
role_filters.append(values[0])
continue
# Search across related objects.
if key.endswith('__search'):
if values and ',' in values[0]:
search_filter_relation = 'AND'
values = reduce(lambda list1, list2: list1 + list2, [i.split(',') for i in values])
for value in values:
search_value, new_keys, _ = self.value_to_python(queryset.model, key, force_str(value))
assert isinstance(new_keys, list)
search_filters[search_value] = new_keys
# by definition, search *only* joins across relations,
# so it _always_ needs a .distinct()
needs_distinct = True
continue
# Custom chain__ and or__ filters, mutually exclusive (both can
# precede not__).
q_chain = False
q_or = False
if key.startswith('chain__'):
key = key[7:]
q_chain = True
elif key.startswith('or__'):
key = key[4:]
q_or = True
# Custom not__ filter prefix.
q_not = False
if key.startswith('not__'):
key = key[5:]
q_not = True
# Convert value(s) to python and add to the appropriate list.
for value in values:
if q_int:
value = int(value)
value, new_key, distinct = self.value_to_python(queryset.model, key, value)
if distinct:
needs_distinct = True
if '_as_txt' in new_key:
fname = next(item for item in new_key.split('__') if item.endswith('_as_txt'))
queryset = queryset.annotate(**{fname: Cast(fname[:-7], output_field=TextField())})
if q_chain:
chain_filters.append((q_not, new_key, value))
elif q_or:
or_filters.append((q_not, new_key, value))
else:
and_filters.append((q_not, new_key, value))
# Now build Q objects for database query filter.
if and_filters or or_filters or chain_filters or role_filters or search_filters:
args = []
for n, k, v in and_filters:
if n:
args.append(~Q(**{k: v}))
else:
args.append(Q(**{k: v}))
for role_name in role_filters:
if not hasattr(queryset.model, 'accessible_pk_qs'):
raise ParseError(_('Cannot apply role_level filter to this list because its model ' 'does not use roles for access control.'))
args.append(Q(pk__in=queryset.model.accessible_pk_qs(request.user, role_name)))
if or_filters:
q = Q()
for n, k, v in or_filters:
if n:
q |= ~Q(**{k: v})
else:
q |= Q(**{k: v})
args.append(q)
if search_filters and search_filter_relation == 'OR':
q = Q()
for term, constrains in search_filters.items():
for constrain in constrains:
q |= Q(**{constrain: term})
args.append(q)
elif search_filters and search_filter_relation == 'AND':
for term, constrains in search_filters.items():
q_chain = Q()
for constrain in constrains:
q_chain |= Q(**{constrain: term})
queryset = queryset.filter(q_chain)
for n, k, v in chain_filters:
if n:
q = ~Q(**{k: v})
else:
q = Q(**{k: v})
queryset = queryset.filter(q)
queryset = queryset.filter(*args)
if needs_distinct:
queryset = queryset.distinct()
return queryset
except (FieldError, FieldDoesNotExist, ValueError, TypeError) as e:
raise ParseError(e.args[0])
except ValidationError as e:
raise ParseError(json.dumps(e.messages, ensure_ascii=False))
class OrderByBackend(BaseFilterBackend):
"""
Filter to apply ordering based on query string parameters.
"""
def filter_queryset(self, request, queryset, view):
try:
order_by = None
for key, value in request.query_params.items():
if key in ('order', 'order_by'):
order_by = value
if ',' in value:
order_by = value.split(',')
else:
order_by = (value,)
default_order_by = self.get_default_ordering(view)
# glue the order by and default order by together so that the default is the backup option
order_by = list(order_by or []) + list(default_order_by or [])
if order_by:
order_by = self._validate_ordering_fields(queryset.model, order_by)
# Special handling of the type field for ordering. In this
# case, we're not sorting exactly on the type field, but
# given the limited number of views with multiple types,
# sorting on polymorphic_ctype.model is effectively the same.
new_order_by = []
if 'polymorphic_ctype' in get_all_field_names(queryset.model):
for field in order_by:
if field == 'type':
new_order_by.append('polymorphic_ctype__model')
elif field == '-type':
new_order_by.append('-polymorphic_ctype__model')
else:
new_order_by.append(field)
else:
for field in order_by:
if field not in ('type', '-type'):
new_order_by.append(field)
queryset = queryset.order_by(*new_order_by)
return queryset
except FieldError as e:
# Return a 400 for invalid field names.
raise ParseError(*e.args)
def get_default_ordering(self, view):
ordering = getattr(view, 'ordering', None)
if isinstance(ordering, str):
return (ordering,)
return ordering
def _validate_ordering_fields(self, model, order_by):
for field_name in order_by:
# strip off the negation prefix `-` if it exists
prefix = ''
path = field_name
if field_name[0] == '-':
prefix = field_name[0]
path = field_name[1:]
try:
field, new_path = get_field_from_path(model, path)
new_path = '{}{}'.format(prefix, new_path)
except (FieldError, FieldDoesNotExist) as e:
raise ParseError(e.args[0])
yield new_path

View File

@@ -30,17 +30,12 @@ from rest_framework.permissions import IsAuthenticated
from rest_framework.renderers import StaticHTMLRenderer from rest_framework.renderers import StaticHTMLRenderer
from rest_framework.negotiation import DefaultContentNegotiation from rest_framework.negotiation import DefaultContentNegotiation
# django-ansible-base
from ansible_base.rest_filters.rest_framework.field_lookup_backend import FieldLookupBackend
from ansible_base.lib.utils.models import get_all_field_names
from ansible_base.rbac.models import RoleEvaluation, RoleDefinition
from ansible_base.rbac.permission_registry import permission_registry
# AWX # AWX
from awx.api.filters import FieldLookupBackend
from awx.main.models import UnifiedJob, UnifiedJobTemplate, User, Role, Credential, WorkflowJobTemplateNode, WorkflowApprovalTemplate from awx.main.models import UnifiedJob, UnifiedJobTemplate, User, Role, Credential, WorkflowJobTemplateNode, WorkflowApprovalTemplate
from awx.main.models.rbac import give_creator_permissions
from awx.main.access import optimize_queryset from awx.main.access import optimize_queryset
from awx.main.utils import camelcase_to_underscore, get_search_fields, getattrd, get_object_or_400, decrypt_field, get_awx_version from awx.main.utils import camelcase_to_underscore, get_search_fields, getattrd, get_object_or_400, decrypt_field, get_awx_version
from awx.main.utils.db import get_all_field_names
from awx.main.utils.licensing import server_product_name from awx.main.utils.licensing import server_product_name
from awx.main.views import ApiErrorView from awx.main.views import ApiErrorView
from awx.api.serializers import ResourceAccessListElementSerializer, CopySerializer from awx.api.serializers import ResourceAccessListElementSerializer, CopySerializer
@@ -95,7 +90,7 @@ class LoggedLoginView(auth_views.LoginView):
ret = super(LoggedLoginView, self).post(request, *args, **kwargs) ret = super(LoggedLoginView, self).post(request, *args, **kwargs)
if request.user.is_authenticated: if request.user.is_authenticated:
logger.info(smart_str(u"User {} logged in from {}".format(self.request.user.username, request.META.get('REMOTE_ADDR', None)))) logger.info(smart_str(u"User {} logged in from {}".format(self.request.user.username, request.META.get('REMOTE_ADDR', None))))
ret.set_cookie('userLoggedIn', 'true', secure=getattr(settings, 'SESSION_COOKIE_SECURE', False)) ret.set_cookie('userLoggedIn', 'true')
ret.setdefault('X-API-Session-Cookie-Name', getattr(settings, 'SESSION_COOKIE_NAME', 'awx_sessionid')) ret.setdefault('X-API-Session-Cookie-Name', getattr(settings, 'SESSION_COOKIE_NAME', 'awx_sessionid'))
return ret return ret
@@ -111,7 +106,7 @@ class LoggedLogoutView(auth_views.LogoutView):
original_user = getattr(request, 'user', None) original_user = getattr(request, 'user', None)
ret = super(LoggedLogoutView, self).dispatch(request, *args, **kwargs) ret = super(LoggedLogoutView, self).dispatch(request, *args, **kwargs)
current_user = getattr(request, 'user', None) current_user = getattr(request, 'user', None)
ret.set_cookie('userLoggedIn', 'false', secure=getattr(settings, 'SESSION_COOKIE_SECURE', False)) ret.set_cookie('userLoggedIn', 'false')
if (not current_user or not getattr(current_user, 'pk', True)) and current_user != original_user: if (not current_user or not getattr(current_user, 'pk', True)) and current_user != original_user:
logger.info("User {} logged out.".format(original_user.username)) logger.info("User {} logged out.".format(original_user.username))
return ret return ret
@@ -174,7 +169,7 @@ class APIView(views.APIView):
self.__init_request_error__ = exc self.__init_request_error__ = exc
except UnsupportedMediaType as exc: except UnsupportedMediaType as exc:
exc.detail = _( exc.detail = _(
'You did not use correct Content-Type in your HTTP request. If you are using our REST API, the Content-Type must be application/json' 'You did not use correct Content-Type in your HTTP request. ' 'If you are using our REST API, the Content-Type must be application/json'
) )
self.__init_request_error__ = exc self.__init_request_error__ = exc
return drf_request return drf_request
@@ -237,8 +232,7 @@ class APIView(views.APIView):
response = super(APIView, self).finalize_response(request, response, *args, **kwargs) response = super(APIView, self).finalize_response(request, response, *args, **kwargs)
time_started = getattr(self, 'time_started', None) time_started = getattr(self, 'time_started', None)
if request.user.is_authenticated: response['X-API-Product-Version'] = get_awx_version()
response['X-API-Product-Version'] = get_awx_version()
response['X-API-Product-Name'] = server_product_name() response['X-API-Product-Name'] = server_product_name()
response['X-API-Node'] = settings.CLUSTER_HOST_ID response['X-API-Node'] = settings.CLUSTER_HOST_ID
@@ -476,11 +470,7 @@ class ListAPIView(generics.ListAPIView, GenericAPIView):
class ListCreateAPIView(ListAPIView, generics.ListCreateAPIView): class ListCreateAPIView(ListAPIView, generics.ListCreateAPIView):
# Base class for a list view that allows creating new objects. # Base class for a list view that allows creating new objects.
def perform_create(self, serializer): pass
super().perform_create(serializer)
if serializer.Meta.model in permission_registry.all_registered_models:
if self.request and self.request.user:
give_creator_permissions(self.request.user, serializer.instance)
class ParentMixin(object): class ParentMixin(object):
@@ -532,16 +522,14 @@ class SubListAPIView(ParentMixin, ListAPIView):
def get_queryset(self): def get_queryset(self):
parent = self.get_parent_object() parent = self.get_parent_object()
self.check_parent_access(parent) self.check_parent_access(parent)
sublist_qs = self.get_sublist_queryset(parent)
if not self.filter_read_permission: if not self.filter_read_permission:
return optimize_queryset(self.get_sublist_queryset(parent)) return optimize_queryset(sublist_qs)
qs = self.request.user.get_queryset(self.model) qs = self.request.user.get_queryset(self.model).distinct()
if hasattr(self, 'parent_key'): return qs & sublist_qs
# This is vastly preferable for ReverseForeignKey relationships
return qs.filter(**{self.parent_key: parent})
return qs.distinct() & self.get_sublist_queryset(parent).distinct()
def get_sublist_queryset(self, parent): def get_sublist_queryset(self, parent):
return getattrd(parent, self.relationship) return getattrd(parent, self.relationship).distinct()
class DestroyAPIView(generics.DestroyAPIView): class DestroyAPIView(generics.DestroyAPIView):
@@ -590,6 +578,15 @@ class SubListCreateAPIView(SubListAPIView, ListCreateAPIView):
d.update({'parent_key': getattr(self, 'parent_key', None)}) d.update({'parent_key': getattr(self, 'parent_key', None)})
return d return d
def get_queryset(self):
if hasattr(self, 'parent_key'):
# Prefer this filtering because ForeignKey allows us more assumptions
parent = self.get_parent_object()
self.check_parent_access(parent)
qs = self.request.user.get_queryset(self.model)
return qs.filter(**{self.parent_key: parent})
return super(SubListCreateAPIView, self).get_queryset()
def create(self, request, *args, **kwargs): def create(self, request, *args, **kwargs):
# If the object ID was not specified, it probably doesn't exist in the # If the object ID was not specified, it probably doesn't exist in the
# DB yet. We want to see if we can create it. The URL may choose to # DB yet. We want to see if we can create it. The URL may choose to
@@ -800,7 +797,6 @@ class RetrieveUpdateDestroyAPIView(RetrieveUpdateAPIView, DestroyAPIView):
class ResourceAccessList(ParentMixin, ListAPIView): class ResourceAccessList(ParentMixin, ListAPIView):
deprecated = True
serializer_class = ResourceAccessListElementSerializer serializer_class = ResourceAccessListElementSerializer
ordering = ('username',) ordering = ('username',)
@@ -808,15 +804,6 @@ class ResourceAccessList(ParentMixin, ListAPIView):
obj = self.get_parent_object() obj = self.get_parent_object()
content_type = ContentType.objects.get_for_model(obj) content_type = ContentType.objects.get_for_model(obj)
if settings.ANSIBLE_BASE_ROLE_SYSTEM_ACTIVATED:
ancestors = set(RoleEvaluation.objects.filter(content_type_id=content_type.id, object_id=obj.id).values_list('role_id', flat=True))
qs = User.objects.filter(has_roles__in=ancestors) | User.objects.filter(is_superuser=True)
auditor_role = RoleDefinition.objects.filter(name="System Auditor").first()
if auditor_role:
qs |= User.objects.filter(role_assignments__role_definition=auditor_role)
return qs.distinct()
roles = set(Role.objects.filter(content_type=content_type, object_id=obj.id)) roles = set(Role.objects.filter(content_type=content_type, object_id=obj.id))
ancestors = set() ancestors = set()
@@ -976,7 +963,7 @@ class CopyAPIView(GenericAPIView):
None, None, self.model, obj, request.user, create_kwargs=create_kwargs, copy_name=serializer.validated_data.get('name', '') None, None, self.model, obj, request.user, create_kwargs=create_kwargs, copy_name=serializer.validated_data.get('name', '')
) )
if hasattr(new_obj, 'admin_role') and request.user not in new_obj.admin_role.members.all(): if hasattr(new_obj, 'admin_role') and request.user not in new_obj.admin_role.members.all():
give_creator_permissions(request.user, new_obj) new_obj.admin_role.members.add(request.user)
if sub_objs: if sub_objs:
permission_check_func = None permission_check_func = None
if hasattr(type(self), 'deep_copy_permission_check_func'): if hasattr(type(self), 'deep_copy_permission_check_func'):

View File

@@ -36,13 +36,11 @@ class Metadata(metadata.SimpleMetadata):
field_info = OrderedDict() field_info = OrderedDict()
field_info['type'] = self.label_lookup[field] field_info['type'] = self.label_lookup[field]
field_info['required'] = getattr(field, 'required', False) field_info['required'] = getattr(field, 'required', False)
field_info['hidden'] = getattr(field, 'hidden', False)
text_attrs = [ text_attrs = [
'read_only', 'read_only',
'label', 'label',
'help_text', 'help_text',
'warning_text',
'min_length', 'min_length',
'max_length', 'max_length',
'min_value', 'min_value',
@@ -73,7 +71,7 @@ class Metadata(metadata.SimpleMetadata):
'url': _('URL for this {}.'), 'url': _('URL for this {}.'),
'related': _('Data structure with URLs of related resources.'), 'related': _('Data structure with URLs of related resources.'),
'summary_fields': _( 'summary_fields': _(
'Data structure with name/description for related resources. The output for some objects may be limited for performance reasons.' 'Data structure with name/description for related resources. ' 'The output for some objects may be limited for performance reasons.'
), ),
'created': _('Timestamp when this {} was created.'), 'created': _('Timestamp when this {} was created.'),
'modified': _('Timestamp when this {} was last modified.'), 'modified': _('Timestamp when this {} was last modified.'),

View File

@@ -6,7 +6,7 @@ import copy
import json import json
import logging import logging
import re import re
from collections import Counter, OrderedDict from collections import OrderedDict
from datetime import timedelta from datetime import timedelta
from uuid import uuid4 from uuid import uuid4
@@ -43,14 +43,9 @@ from rest_framework.utils.serializer_helpers import ReturnList
# Django-Polymorphic # Django-Polymorphic
from polymorphic.models import PolymorphicModel from polymorphic.models import PolymorphicModel
# django-ansible-base
from ansible_base.lib.utils.models import get_type_for_model
from ansible_base.rbac.models import RoleEvaluation, ObjectRole
from ansible_base.rbac import permission_registry
# AWX # AWX
from awx.main.access import get_user_capabilities from awx.main.access import get_user_capabilities
from awx.main.constants import ACTIVE_STATES, CENSOR_VALUE, org_role_to_permission from awx.main.constants import ACTIVE_STATES, CENSOR_VALUE
from awx.main.models import ( from awx.main.models import (
ActivityStream, ActivityStream,
AdHocCommand, AdHocCommand,
@@ -85,7 +80,6 @@ from awx.main.models import (
Project, Project,
ProjectUpdate, ProjectUpdate,
ProjectUpdateEvent, ProjectUpdateEvent,
ReceptorAddress,
RefreshToken, RefreshToken,
Role, Role,
Schedule, Schedule,
@@ -105,9 +99,10 @@ from awx.main.models import (
CLOUD_INVENTORY_SOURCES, CLOUD_INVENTORY_SOURCES,
) )
from awx.main.models.base import VERBOSITY_CHOICES, NEW_JOB_TYPE_CHOICES from awx.main.models.base import VERBOSITY_CHOICES, NEW_JOB_TYPE_CHOICES
from awx.main.models.rbac import role_summary_fields_generator, give_creator_permissions, get_role_codenames, to_permissions, get_role_from_object_role from awx.main.models.rbac import get_roles_on_resource, role_summary_fields_generator
from awx.main.fields import ImplicitRoleField from awx.main.fields import ImplicitRoleField
from awx.main.utils import ( from awx.main.utils import (
get_type_for_model,
get_model_for_type, get_model_for_type,
camelcase_to_underscore, camelcase_to_underscore,
getattrd, getattrd,
@@ -194,7 +189,6 @@ SUMMARIZABLE_FK_FIELDS = {
'webhook_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'), 'webhook_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
'approved_or_denied_by': ('id', 'username', 'first_name', 'last_name'), 'approved_or_denied_by': ('id', 'username', 'first_name', 'last_name'),
'credential_type': DEFAULT_SUMMARY_FIELDS, 'credential_type': DEFAULT_SUMMARY_FIELDS,
'resource': ('ansible_id', 'resource_type'),
} }
@@ -226,7 +220,7 @@ class CopySerializer(serializers.Serializer):
view = self.context.get('view', None) view = self.context.get('view', None)
obj = view.get_object() obj = view.get_object()
if name == obj.name: if name == obj.name:
raise serializers.ValidationError(_('The original object is already named {}, a copy from it cannot have the same name.'.format(name))) raise serializers.ValidationError(_('The original object is already named {}, a copy from' ' it cannot have the same name.'.format(name)))
return attrs return attrs
@@ -641,7 +635,7 @@ class BaseSerializer(serializers.ModelSerializer, metaclass=BaseSerializerMetacl
exclusions = self.get_validation_exclusions(self.instance) exclusions = self.get_validation_exclusions(self.instance)
obj = self.instance or self.Meta.model() obj = self.instance or self.Meta.model()
for k, v in attrs.items(): for k, v in attrs.items():
if k not in exclusions and k != 'canonical_address_port': if k not in exclusions:
setattr(obj, k, v) setattr(obj, k, v)
obj.full_clean(exclude=exclusions) obj.full_clean(exclude=exclusions)
# full_clean may modify values on the instance; copy those changes # full_clean may modify values on the instance; copy those changes
@@ -766,7 +760,7 @@ class UnifiedJobTemplateSerializer(BaseSerializer):
class UnifiedJobSerializer(BaseSerializer): class UnifiedJobSerializer(BaseSerializer):
show_capabilities = ['start', 'delete'] show_capabilities = ['start', 'delete']
event_processing_finished = serializers.BooleanField( event_processing_finished = serializers.BooleanField(
help_text=_('Indicates whether all of the events generated by this unified job have been saved to the database.'), read_only=True help_text=_('Indicates whether all of the events generated by this ' 'unified job have been saved to the database.'), read_only=True
) )
class Meta: class Meta:
@@ -1585,7 +1579,7 @@ class ProjectPlaybooksSerializer(ProjectSerializer):
class ProjectInventoriesSerializer(ProjectSerializer): class ProjectInventoriesSerializer(ProjectSerializer):
inventory_files = serializers.ReadOnlyField(help_text=_('Array of inventory files and directories available within this project, not comprehensive.')) inventory_files = serializers.ReadOnlyField(help_text=_('Array of inventory files and directories available within this project, ' 'not comprehensive.'))
class Meta: class Meta:
model = Project model = Project
@@ -1635,8 +1629,8 @@ class ProjectUpdateDetailSerializer(ProjectUpdateSerializer):
fields = ('*', 'host_status_counts', 'playbook_counts') fields = ('*', 'host_status_counts', 'playbook_counts')
def get_playbook_counts(self, obj): def get_playbook_counts(self, obj):
task_count = obj.get_event_queryset().filter(event='playbook_on_task_start').count() task_count = obj.project_update_events.filter(event='playbook_on_task_start').count()
play_count = obj.get_event_queryset().filter(event='playbook_on_play_start').count() play_count = obj.project_update_events.filter(event='playbook_on_play_start').count()
data = {'play_count': play_count, 'task_count': task_count} data = {'play_count': play_count, 'task_count': task_count}
@@ -2207,99 +2201,6 @@ class BulkHostCreateSerializer(serializers.Serializer):
return return_data return return_data
class BulkHostDeleteSerializer(serializers.Serializer):
hosts = serializers.ListField(
allow_empty=False,
max_length=100000,
write_only=True,
help_text=_('List of hosts ids to be deleted, e.g. [105, 130, 131, 200]'),
)
class Meta:
model = Host
fields = ('hosts',)
def validate(self, attrs):
request = self.context.get('request', None)
max_hosts = settings.BULK_HOST_MAX_DELETE
# Validating the number of hosts to be deleted
if len(attrs['hosts']) > max_hosts:
raise serializers.ValidationError(
{
"ERROR": 'Number of hosts exceeds system setting BULK_HOST_MAX_DELETE',
"BULK_HOST_MAX_DELETE": max_hosts,
"Hosts_count": len(attrs['hosts']),
}
)
# Getting list of all host objects, filtered by the list of the hosts to delete
attrs['host_qs'] = Host.objects.get_queryset().filter(pk__in=attrs['hosts']).only('id', 'inventory_id', 'name')
# Converting the queryset data in a dict. to reduce the number of queries when
# manipulating the data
attrs['hosts_data'] = attrs['host_qs'].values()
if len(attrs['host_qs']) == 0:
error_hosts = {host: "Hosts do not exist or you lack permission to delete it" for host in attrs['hosts']}
raise serializers.ValidationError({'hosts': error_hosts})
if len(attrs['host_qs']) < len(attrs['hosts']):
hosts_exists = [host['id'] for host in attrs['hosts_data']]
failed_hosts = list(set(attrs['hosts']).difference(hosts_exists))
error_hosts = {host: "Hosts do not exist or you lack permission to delete it" for host in failed_hosts}
raise serializers.ValidationError({'hosts': error_hosts})
# Getting all inventories that the hosts can be in
inv_list = list(set([host['inventory_id'] for host in attrs['hosts_data']]))
# Checking that the user have permission to all inventories
errors = dict()
for inv in Inventory.objects.get_queryset().filter(pk__in=inv_list):
if request and not request.user.is_superuser:
if request.user not in inv.admin_role:
errors[inv.name] = "Lack permissions to delete hosts from this inventory."
if errors != {}:
raise PermissionDenied({"inventories": errors})
# check the inventory type only if the user have permission to it.
errors = dict()
for inv in Inventory.objects.get_queryset().filter(pk__in=inv_list):
if inv.kind != '':
errors[inv.name] = "Hosts can only be deleted from manual inventories."
if errors != {}:
raise serializers.ValidationError({"inventories": errors})
attrs['inventories'] = inv_list
return attrs
def delete(self, validated_data):
result = {"hosts": dict()}
changes = {'deleted_hosts': dict()}
for inventory in validated_data['inventories']:
changes['deleted_hosts'][inventory] = list()
for host in validated_data['hosts_data']:
result["hosts"][host["id"]] = f"The host {host['name']} was deleted"
changes['deleted_hosts'][host["inventory_id"]].append({"host_id": host["id"], "host_name": host["name"]})
try:
validated_data['host_qs'].delete()
except Exception as e:
raise serializers.ValidationError({"detail": _(f"cannot delete hosts, host deletion error {e}")})
request = self.context.get('request', None)
for inventory in validated_data['inventories']:
activity_entry = ActivityStream.objects.create(
operation='update',
object1='inventory',
changes=json.dumps(changes['deleted_hosts'][inventory]),
actor=request.user,
)
activity_entry.inventory.add(inventory)
return result
class GroupTreeSerializer(GroupSerializer): class GroupTreeSerializer(GroupSerializer):
children = serializers.SerializerMethodField() children = serializers.SerializerMethodField()
@@ -2763,30 +2664,6 @@ class ResourceAccessListElementSerializer(UserSerializer):
if 'summary_fields' not in ret: if 'summary_fields' not in ret:
ret['summary_fields'] = {} ret['summary_fields'] = {}
team_content_type = ContentType.objects.get_for_model(Team)
content_type = ContentType.objects.get_for_model(obj)
reversed_org_map = {}
for k, v in org_role_to_permission.items():
reversed_org_map[v] = k
reversed_role_map = {}
for k, v in to_permissions.items():
reversed_role_map[v] = k
def get_roles_from_perms(perm_list):
"""given a list of permission codenames return a list of role names"""
role_names = set()
for codename in perm_list:
action = codename.split('_', 1)[0]
if action in reversed_role_map:
role_names.add(reversed_role_map[action])
elif codename in reversed_org_map:
if isinstance(obj, Organization):
role_names.add(reversed_org_map[codename])
if 'view_organization' not in role_names:
role_names.add('read_role')
return list(role_names)
def format_role_perm(role): def format_role_perm(role):
role_dict = {'id': role.id, 'name': role.name, 'description': role.description} role_dict = {'id': role.id, 'name': role.name, 'description': role.description}
try: try:
@@ -2802,21 +2679,13 @@ class ResourceAccessListElementSerializer(UserSerializer):
else: else:
# Singleton roles should not be managed from this view, as per copy/edit rework spec # Singleton roles should not be managed from this view, as per copy/edit rework spec
role_dict['user_capabilities'] = {'unattach': False} role_dict['user_capabilities'] = {'unattach': False}
return {'role': role_dict, 'descendant_roles': get_roles_on_resource(obj, role)}
model_name = content_type.model
if isinstance(obj, Organization):
descendant_perms = [codename for codename in get_role_codenames(role) if codename.endswith(model_name) or codename.startswith('add_')]
else:
descendant_perms = [codename for codename in get_role_codenames(role) if codename.endswith(model_name)]
return {'role': role_dict, 'descendant_roles': get_roles_from_perms(descendant_perms)}
def format_team_role_perm(naive_team_role, permissive_role_ids): def format_team_role_perm(naive_team_role, permissive_role_ids):
ret = [] ret = []
team = naive_team_role.content_object
team_role = naive_team_role team_role = naive_team_role
if naive_team_role.role_field == 'admin_role': if naive_team_role.role_field == 'admin_role':
team_role = team.member_role team_role = naive_team_role.content_object.member_role
for role in team_role.children.filter(id__in=permissive_role_ids).all(): for role in team_role.children.filter(id__in=permissive_role_ids).all():
role_dict = { role_dict = {
'id': role.id, 'id': role.id,
@@ -2836,87 +2705,13 @@ class ResourceAccessListElementSerializer(UserSerializer):
else: else:
# Singleton roles should not be managed from this view, as per copy/edit rework spec # Singleton roles should not be managed from this view, as per copy/edit rework spec
role_dict['user_capabilities'] = {'unattach': False} role_dict['user_capabilities'] = {'unattach': False}
ret.append({'role': role_dict, 'descendant_roles': get_roles_on_resource(obj, team_role)})
descendant_perms = list(
RoleEvaluation.objects.filter(role__in=team.has_roles.all(), object_id=obj.id, content_type_id=content_type.id)
.values_list('codename', flat=True)
.distinct()
)
ret.append({'role': role_dict, 'descendant_roles': get_roles_from_perms(descendant_perms)})
return ret return ret
gfk_kwargs = dict(content_type_id=content_type.id, object_id=obj.id) team_content_type = ContentType.objects.get_for_model(Team)
direct_permissive_role_ids = Role.objects.filter(**gfk_kwargs).values_list('id', flat=True) content_type = ContentType.objects.get_for_model(obj)
if settings.ANSIBLE_BASE_ROLE_SYSTEM_ACTIVATED:
ret['summary_fields']['direct_access'] = []
ret['summary_fields']['indirect_access'] = []
new_roles_seen = set()
all_team_roles = set()
all_permissive_role_ids = set()
for evaluation in RoleEvaluation.objects.filter(role__in=user.has_roles.all(), **gfk_kwargs).prefetch_related('role'):
new_role = evaluation.role
if new_role.id in new_roles_seen:
continue
new_roles_seen.add(new_role.id)
old_role = get_role_from_object_role(new_role)
all_permissive_role_ids.add(old_role.id)
if int(new_role.object_id) == obj.id and new_role.content_type_id == content_type.id:
ret['summary_fields']['direct_access'].append(format_role_perm(old_role))
elif new_role.content_type_id == team_content_type.id:
all_team_roles.add(old_role)
else:
ret['summary_fields']['indirect_access'].append(format_role_perm(old_role))
# Lazy role creation gives us a big problem, where some intermediate roles are not easy to find
# like when a team has indirect permission, so here we get all roles the users teams have
# these contribute to all potential permission-granting roles of the object
user_teams_qs = permission_registry.team_model.objects.filter(member_roles__in=ObjectRole.objects.filter(users=user))
team_obj_roles = ObjectRole.objects.filter(teams__in=user_teams_qs)
for evaluation in RoleEvaluation.objects.filter(role__in=team_obj_roles, **gfk_kwargs).prefetch_related('role'):
new_role = evaluation.role
if new_role.id in new_roles_seen:
continue
new_roles_seen.add(new_role.id)
old_role = get_role_from_object_role(new_role)
all_permissive_role_ids.add(old_role.id)
# In DAB RBAC, superuser is strictly a user flag, and global roles are not in the RoleEvaluation table
if user.is_superuser:
ret['summary_fields'].setdefault('indirect_access', [])
all_role_names = [field.name for field in obj._meta.get_fields() if isinstance(field, ImplicitRoleField)]
ret['summary_fields']['indirect_access'].append(
{
"role": {
"id": None,
"name": _("System Administrator"),
"description": _("Can manage all aspects of the system"),
"user_capabilities": {"unattach": False},
},
"descendant_roles": all_role_names,
}
)
elif user.is_system_auditor:
ret['summary_fields'].setdefault('indirect_access', [])
ret['summary_fields']['indirect_access'].append(
{
"role": {
"id": None,
"name": _("System Auditor"),
"description": _("Can view all aspects of the system"),
"user_capabilities": {"unattach": False},
},
"descendant_roles": ["read_role"],
}
)
ret['summary_fields']['direct_access'].extend([y for x in (format_team_role_perm(r, all_permissive_role_ids) for r in all_team_roles) for y in x])
return ret
direct_permissive_role_ids = Role.objects.filter(content_type=content_type, object_id=obj.id).values_list('id', flat=True)
all_permissive_role_ids = Role.objects.filter(content_type=content_type, object_id=obj.id).values_list('ancestors__id', flat=True) all_permissive_role_ids = Role.objects.filter(content_type=content_type, object_id=obj.id).values_list('ancestors__id', flat=True)
direct_access_roles = user.roles.filter(id__in=direct_permissive_role_ids).all() direct_access_roles = user.roles.filter(id__in=direct_permissive_role_ids).all()
@@ -3110,7 +2905,7 @@ class CredentialSerializer(BaseSerializer):
): ):
if getattr(self.instance, related_objects).count() > 0: if getattr(self.instance, related_objects).count() > 0:
raise ValidationError( raise ValidationError(
_('You cannot change the credential type of the credential, as it may break the functionality of the resources using it.') _('You cannot change the credential type of the credential, as it may break the functionality' ' of the resources using it.')
) )
return credential_type return credential_type
@@ -3130,7 +2925,7 @@ class CredentialSerializerCreate(CredentialSerializer):
default=None, default=None,
write_only=True, write_only=True,
allow_null=True, allow_null=True,
help_text=_('Write-only field used to add user to owner role. If provided, do not give either team or organization. Only valid for creation.'), help_text=_('Write-only field used to add user to owner role. If provided, ' 'do not give either team or organization. Only valid for creation.'),
) )
team = serializers.PrimaryKeyRelatedField( team = serializers.PrimaryKeyRelatedField(
queryset=Team.objects.all(), queryset=Team.objects.all(),
@@ -3138,14 +2933,14 @@ class CredentialSerializerCreate(CredentialSerializer):
default=None, default=None,
write_only=True, write_only=True,
allow_null=True, allow_null=True,
help_text=_('Write-only field used to add team to owner role. If provided, do not give either user or organization. Only valid for creation.'), help_text=_('Write-only field used to add team to owner role. If provided, ' 'do not give either user or organization. Only valid for creation.'),
) )
organization = serializers.PrimaryKeyRelatedField( organization = serializers.PrimaryKeyRelatedField(
queryset=Organization.objects.all(), queryset=Organization.objects.all(),
required=False, required=False,
default=None, default=None,
allow_null=True, allow_null=True,
help_text=_('Inherit permissions from organization roles. If provided on creation, do not give either user or team.'), help_text=_('Inherit permissions from organization roles. If provided on creation, ' 'do not give either user or team.'),
) )
class Meta: class Meta:
@@ -3167,7 +2962,7 @@ class CredentialSerializerCreate(CredentialSerializer):
if len(owner_fields) > 1: if len(owner_fields) > 1:
received = ", ".join(sorted(owner_fields)) received = ", ".join(sorted(owner_fields))
raise serializers.ValidationError( raise serializers.ValidationError(
{"detail": _("Only one of 'user', 'team', or 'organization' should be provided, received {} fields.".format(received))} {"detail": _("Only one of 'user', 'team', or 'organization' should be provided, " "received {} fields.".format(received))}
) )
if attrs.get('team'): if attrs.get('team'):
@@ -3185,7 +2980,7 @@ class CredentialSerializerCreate(CredentialSerializer):
credential = super(CredentialSerializerCreate, self).create(validated_data) credential = super(CredentialSerializerCreate, self).create(validated_data)
if user: if user:
give_creator_permissions(user, credential) credential.admin_role.members.add(user)
if team: if team:
if not credential.organization or team.organization.id != credential.organization.id: if not credential.organization or team.organization.id != credential.organization.id:
raise serializers.ValidationError({"detail": _("Credential organization must be set and match before assigning to a team")}) raise serializers.ValidationError({"detail": _("Credential organization must be set and match before assigning to a team")})
@@ -3438,7 +3233,7 @@ class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobO
if get_field_from_model_or_attrs('host_config_key') and not inventory: if get_field_from_model_or_attrs('host_config_key') and not inventory:
raise serializers.ValidationError({'host_config_key': _("Cannot enable provisioning callback without an inventory set.")}) raise serializers.ValidationError({'host_config_key': _("Cannot enable provisioning callback without an inventory set.")})
prompting_error_message = _("You must either set a default value or ask to prompt on launch.") prompting_error_message = _("Must either set a default value or ask to prompt on launch.")
if project is None: if project is None:
raise serializers.ValidationError({'project': _("Job Templates must have a project assigned.")}) raise serializers.ValidationError({'project': _("Job Templates must have a project assigned.")})
elif inventory is None and not get_field_from_model_or_attrs('ask_inventory_on_launch'): elif inventory is None and not get_field_from_model_or_attrs('ask_inventory_on_launch'):
@@ -3827,7 +3622,7 @@ class SystemJobSerializer(UnifiedJobSerializer):
try: try:
return obj.result_stdout return obj.result_stdout
except StdoutMaxBytesExceeded as e: except StdoutMaxBytesExceeded as e:
return _("Standard Output too large to display ({text_size} bytes), only download supported for sizes over {supported_size} bytes.").format( return _("Standard Output too large to display ({text_size} bytes), " "only download supported for sizes over {supported_size} bytes.").format(
text_size=e.total, supported_size=e.supported text_size=e.total, supported_size=e.supported
) )
@@ -4741,7 +4536,7 @@ class JobLaunchSerializer(BaseSerializer):
if cred.unique_hash() in provided_mapping.keys(): if cred.unique_hash() in provided_mapping.keys():
continue # User replaced credential with new of same type continue # User replaced credential with new of same type
errors.setdefault('credentials', []).append( errors.setdefault('credentials', []).append(
_('Removing {} credential at launch time without replacement is not supported. Provided list lacked credential(s): {}.').format( _('Removing {} credential at launch time without replacement is not supported. ' 'Provided list lacked credential(s): {}.').format(
cred.unique_hash(display=True), ', '.join([str(c) for c in removed_creds]) cred.unique_hash(display=True), ', '.join([str(c) for c in removed_creds])
) )
) )
@@ -4891,11 +4686,12 @@ class BulkJobNodeSerializer(WorkflowJobNodeSerializer):
# many-to-many fields # many-to-many fields
credentials = serializers.ListField(child=serializers.IntegerField(min_value=1), required=False) credentials = serializers.ListField(child=serializers.IntegerField(min_value=1), required=False)
labels = serializers.ListField(child=serializers.IntegerField(min_value=1), required=False) labels = serializers.ListField(child=serializers.IntegerField(min_value=1), required=False)
instance_groups = serializers.ListField(child=serializers.IntegerField(min_value=1), required=False) # TODO: Use instance group role added via PR 13584(once merged), for now everything related to instance group is commented
# instance_groups = serializers.ListField(child=serializers.IntegerField(min_value=1), required=False)
class Meta: class Meta:
model = WorkflowJobNode model = WorkflowJobNode
fields = ('*', 'credentials', 'labels', 'instance_groups') # m2m fields are not canonical for WJ nodes fields = ('*', 'credentials', 'labels') # m2m fields are not canonical for WJ nodes, TODO: add instance_groups once supported
def validate(self, attrs): def validate(self, attrs):
return super(LaunchConfigurationBaseSerializer, self).validate(attrs) return super(LaunchConfigurationBaseSerializer, self).validate(attrs)
@@ -4955,21 +4751,21 @@ class BulkJobLaunchSerializer(serializers.Serializer):
requested_use_execution_environments = {job['execution_environment'] for job in attrs['jobs'] if 'execution_environment' in job} requested_use_execution_environments = {job['execution_environment'] for job in attrs['jobs'] if 'execution_environment' in job}
requested_use_credentials = set() requested_use_credentials = set()
requested_use_labels = set() requested_use_labels = set()
requested_use_instance_groups = set() # requested_use_instance_groups = set()
for job in attrs['jobs']: for job in attrs['jobs']:
for cred in job.get('credentials', []): for cred in job.get('credentials', []):
requested_use_credentials.add(cred) requested_use_credentials.add(cred)
for label in job.get('labels', []): for label in job.get('labels', []):
requested_use_labels.add(label) requested_use_labels.add(label)
for instance_group in job.get('instance_groups', []): # for instance_group in job.get('instance_groups', []):
requested_use_instance_groups.add(instance_group) # requested_use_instance_groups.add(instance_group)
key_to_obj_map = { key_to_obj_map = {
"unified_job_template": {obj.id: obj for obj in UnifiedJobTemplate.objects.filter(id__in=requested_ujts)}, "unified_job_template": {obj.id: obj for obj in UnifiedJobTemplate.objects.filter(id__in=requested_ujts)},
"inventory": {obj.id: obj for obj in Inventory.objects.filter(id__in=requested_use_inventories)}, "inventory": {obj.id: obj for obj in Inventory.objects.filter(id__in=requested_use_inventories)},
"credentials": {obj.id: obj for obj in Credential.objects.filter(id__in=requested_use_credentials)}, "credentials": {obj.id: obj for obj in Credential.objects.filter(id__in=requested_use_credentials)},
"labels": {obj.id: obj for obj in Label.objects.filter(id__in=requested_use_labels)}, "labels": {obj.id: obj for obj in Label.objects.filter(id__in=requested_use_labels)},
"instance_groups": {obj.id: obj for obj in InstanceGroup.objects.filter(id__in=requested_use_instance_groups)}, # "instance_groups": {obj.id: obj for obj in InstanceGroup.objects.filter(id__in=requested_use_instance_groups)},
"execution_environment": {obj.id: obj for obj in ExecutionEnvironment.objects.filter(id__in=requested_use_execution_environments)}, "execution_environment": {obj.id: obj for obj in ExecutionEnvironment.objects.filter(id__in=requested_use_execution_environments)},
} }
@@ -4996,7 +4792,7 @@ class BulkJobLaunchSerializer(serializers.Serializer):
self.check_list_permission(Credential, requested_use_credentials, 'use_role') self.check_list_permission(Credential, requested_use_credentials, 'use_role')
self.check_list_permission(Label, requested_use_labels) self.check_list_permission(Label, requested_use_labels)
self.check_list_permission(InstanceGroup, requested_use_instance_groups) # TODO: change to use_role for conflict # self.check_list_permission(InstanceGroup, requested_use_instance_groups) # TODO: change to use_role for conflict
self.check_list_permission(ExecutionEnvironment, requested_use_execution_environments) # TODO: change if roles introduced self.check_list_permission(ExecutionEnvironment, requested_use_execution_environments) # TODO: change if roles introduced
jobs_object = self.get_objectified_jobs(attrs, key_to_obj_map) jobs_object = self.get_objectified_jobs(attrs, key_to_obj_map)
@@ -5043,7 +4839,7 @@ class BulkJobLaunchSerializer(serializers.Serializer):
node_m2m_object_types_to_through_model = { node_m2m_object_types_to_through_model = {
'credentials': WorkflowJobNode.credentials.through, 'credentials': WorkflowJobNode.credentials.through,
'labels': WorkflowJobNode.labels.through, 'labels': WorkflowJobNode.labels.through,
'instance_groups': WorkflowJobNode.instance_groups.through, # 'instance_groups': WorkflowJobNode.instance_groups.through,
} }
node_deferred_attr_names = ( node_deferred_attr_names = (
'limit', 'limit',
@@ -5096,9 +4892,9 @@ class BulkJobLaunchSerializer(serializers.Serializer):
if field_name in node_m2m_objects[node_identifier] and field_name == 'labels': if field_name in node_m2m_objects[node_identifier] and field_name == 'labels':
for label in node_m2m_objects[node_identifier][field_name]: for label in node_m2m_objects[node_identifier][field_name]:
through_model_objects.append(through_model(label=label, workflowjobnode=node_m2m_objects[node_identifier]['node'])) through_model_objects.append(through_model(label=label, workflowjobnode=node_m2m_objects[node_identifier]['node']))
if field_name in node_m2m_objects[node_identifier] and field_name == 'instance_groups': # if obj_type in node_m2m_objects[node_identifier] and obj_type == 'instance_groups':
for instance_group in node_m2m_objects[node_identifier][field_name]: # for instance_group in node_m2m_objects[node_identifier][obj_type]:
through_model_objects.append(through_model(instancegroup=instance_group, workflowjobnode=node_m2m_objects[node_identifier]['node'])) # through_model_objects.append(through_model(instancegroup=instance_group, workflowjobnode=node_m2m_objects[node_identifier]['node']))
if through_model_objects: if through_model_objects:
through_model.objects.bulk_create(through_model_objects) through_model.objects.bulk_create(through_model_objects)
@@ -5223,7 +5019,7 @@ class NotificationTemplateSerializer(BaseSerializer):
for subevent in event_messages: for subevent in event_messages:
if subevent not in ('running', 'approved', 'timed_out', 'denied'): if subevent not in ('running', 'approved', 'timed_out', 'denied'):
error_list.append( error_list.append(
_("Workflow Approval event '{}' invalid, must be one of 'running', 'approved', 'timed_out', or 'denied'").format(subevent) _("Workflow Approval event '{}' invalid, must be one of " "'running', 'approved', 'timed_out', or 'denied'").format(subevent)
) )
continue continue
subevent_messages = event_messages[subevent] subevent_messages = event_messages[subevent]
@@ -5279,21 +5075,16 @@ class NotificationTemplateSerializer(BaseSerializer):
body = messages[event].get('body', {}) body = messages[event].get('body', {})
if body: if body:
try: try:
sandbox.ImmutableSandboxedEnvironment(undefined=DescriptiveUndefined).from_string(body).render(JobNotificationMixin.context_stub()) rendered_body = (
sandbox.ImmutableSandboxedEnvironment(undefined=DescriptiveUndefined).from_string(body).render(JobNotificationMixin.context_stub())
# https://github.com/ansible/awx/issues/14410 )
potential_body = json.loads(rendered_body)
# When rendering something such as "{{ job.id }}" if not isinstance(potential_body, dict):
# the return type is not a dict, unlike "{{ job_metadata }}" which is a dict error_list.append(
_("Webhook body for '{}' should be a json dictionary. Found type '{}'.".format(event, type(potential_body).__name__))
# potential_body = json.loads(rendered_body) )
except json.JSONDecodeError as exc:
# if not isinstance(potential_body, dict): error_list.append(_("Webhook body for '{}' is not a valid json dictionary ({}).".format(event, exc)))
# error_list.append(
# _("Webhook body for '{}' should be a json dictionary. Found type '{}'.".format(event, type(potential_body).__name__))
# )
except Exception as exc:
error_list.append(_("Webhook body for '{}' is not valid. The following gave an error ({}).".format(event, exc)))
if error_list: if error_list:
raise serializers.ValidationError(error_list) raise serializers.ValidationError(error_list)
@@ -5566,24 +5357,10 @@ class ScheduleSerializer(LaunchConfigurationBaseSerializer, SchedulePreviewSeria
class InstanceLinkSerializer(BaseSerializer): class InstanceLinkSerializer(BaseSerializer):
class Meta: class Meta:
model = InstanceLink model = InstanceLink
fields = ('id', 'related', 'source', 'target', 'target_full_address', 'link_state') fields = ('source', 'target', 'link_state')
source = serializers.SlugRelatedField(slug_field="hostname", queryset=Instance.objects.all()) source = serializers.SlugRelatedField(slug_field="hostname", read_only=True)
target = serializers.SlugRelatedField(slug_field="hostname", read_only=True)
target = serializers.SerializerMethodField()
target_full_address = serializers.SerializerMethodField()
def get_related(self, obj):
res = super(InstanceLinkSerializer, self).get_related(obj)
res['source_instance'] = self.reverse('api:instance_detail', kwargs={'pk': obj.source.id})
res['target_address'] = self.reverse('api:receptor_address_detail', kwargs={'pk': obj.target.id})
return res
def get_target(self, obj):
return obj.target.instance.hostname
def get_target_full_address(self, obj):
return obj.target.get_full_address()
class InstanceNodeSerializer(BaseSerializer): class InstanceNodeSerializer(BaseSerializer):
@@ -5592,29 +5369,6 @@ class InstanceNodeSerializer(BaseSerializer):
fields = ('id', 'hostname', 'node_type', 'node_state', 'enabled') fields = ('id', 'hostname', 'node_type', 'node_state', 'enabled')
class ReceptorAddressSerializer(BaseSerializer):
full_address = serializers.SerializerMethodField()
class Meta:
model = ReceptorAddress
fields = (
'id',
'url',
'address',
'port',
'protocol',
'websocket_path',
'is_internal',
'canonical',
'instance',
'peers_from_control_nodes',
'full_address',
)
def get_full_address(self, obj):
return obj.get_full_address()
class InstanceSerializer(BaseSerializer): class InstanceSerializer(BaseSerializer):
show_capabilities = ['edit'] show_capabilities = ['edit']
@@ -5623,17 +5377,10 @@ class InstanceSerializer(BaseSerializer):
jobs_running = serializers.IntegerField(help_text=_('Count of jobs in the running or waiting state that are targeted for this instance'), read_only=True) jobs_running = serializers.IntegerField(help_text=_('Count of jobs in the running or waiting state that are targeted for this instance'), read_only=True)
jobs_total = serializers.IntegerField(help_text=_('Count of all jobs that target this instance'), read_only=True) jobs_total = serializers.IntegerField(help_text=_('Count of all jobs that target this instance'), read_only=True)
health_check_pending = serializers.SerializerMethodField() health_check_pending = serializers.SerializerMethodField()
peers = serializers.PrimaryKeyRelatedField(
help_text=_('Primary keys of receptor addresses to peer to.'), many=True, required=False, queryset=ReceptorAddress.objects.all()
)
reverse_peers = serializers.SerializerMethodField()
listener_port = serializers.IntegerField(source='canonical_address_port', required=False, allow_null=True)
peers_from_control_nodes = serializers.BooleanField(source='canonical_address_peers_from_control_nodes', required=False)
protocol = serializers.SerializerMethodField()
class Meta: class Meta:
model = Instance model = Instance
read_only_fields = ('ip_address', 'uuid', 'version', 'managed', 'reverse_peers') read_only_fields = ('ip_address', 'uuid', 'version')
fields = ( fields = (
'id', 'id',
'hostname', 'hostname',
@@ -5664,13 +5411,8 @@ class InstanceSerializer(BaseSerializer):
'managed_by_policy', 'managed_by_policy',
'node_type', 'node_type',
'node_state', 'node_state',
'managed',
'ip_address', 'ip_address',
'peers',
'reverse_peers',
'listener_port', 'listener_port',
'peers_from_control_nodes',
'protocol',
) )
extra_kwargs = { extra_kwargs = {
'node_type': {'initial': Instance.Types.EXECUTION, 'default': Instance.Types.EXECUTION}, 'node_type': {'initial': Instance.Types.EXECUTION, 'default': Instance.Types.EXECUTION},
@@ -5692,54 +5434,16 @@ class InstanceSerializer(BaseSerializer):
def get_related(self, obj): def get_related(self, obj):
res = super(InstanceSerializer, self).get_related(obj) res = super(InstanceSerializer, self).get_related(obj)
res['receptor_addresses'] = self.reverse('api:instance_receptor_addresses_list', kwargs={'pk': obj.pk})
res['jobs'] = self.reverse('api:instance_unified_jobs_list', kwargs={'pk': obj.pk}) res['jobs'] = self.reverse('api:instance_unified_jobs_list', kwargs={'pk': obj.pk})
res['peers'] = self.reverse('api:instance_peers_list', kwargs={"pk": obj.pk})
res['instance_groups'] = self.reverse('api:instance_instance_groups_list', kwargs={'pk': obj.pk}) res['instance_groups'] = self.reverse('api:instance_instance_groups_list', kwargs={'pk': obj.pk})
if obj.node_type in [Instance.Types.EXECUTION, Instance.Types.HOP] and not obj.managed: if settings.IS_K8S and obj.node_type in (Instance.Types.EXECUTION,):
res['install_bundle'] = self.reverse('api:instance_install_bundle', kwargs={'pk': obj.pk}) res['install_bundle'] = self.reverse('api:instance_install_bundle', kwargs={'pk': obj.pk})
res['peers'] = self.reverse('api:instance_peers_list', kwargs={"pk": obj.pk})
if self.context['request'].user.is_superuser or self.context['request'].user.is_system_auditor: if self.context['request'].user.is_superuser or self.context['request'].user.is_system_auditor:
if obj.node_type == 'execution': if obj.node_type == 'execution':
res['health_check'] = self.reverse('api:instance_health_check', kwargs={'pk': obj.pk}) res['health_check'] = self.reverse('api:instance_health_check', kwargs={'pk': obj.pk})
return res return res
def create_or_update(self, validated_data, obj=None, create=True):
# create a managed receptor address if listener port is defined
port = validated_data.pop('listener_port', -1)
peers_from_control_nodes = validated_data.pop('peers_from_control_nodes', -1)
# delete the receptor address if the port is explicitly set to None
if obj and port == None:
obj.receptor_addresses.filter(address=obj.hostname).delete()
if create:
instance = super(InstanceSerializer, self).create(validated_data)
else:
instance = super(InstanceSerializer, self).update(obj, validated_data)
instance.refresh_from_db() # instance canonical address lookup is deferred, so needs to be reloaded
# only create or update if port is defined in validated_data or already exists in the
# canonical address
# this prevents creating a receptor address if peers_from_control_nodes is in
# validated_data but a port is not set
if (port != None and port != -1) or instance.canonical_address_port:
kwargs = {}
if port != -1:
kwargs['port'] = port
if peers_from_control_nodes != -1:
kwargs['peers_from_control_nodes'] = peers_from_control_nodes
if kwargs:
kwargs['canonical'] = True
instance.receptor_addresses.update_or_create(address=instance.hostname, defaults=kwargs)
return instance
def create(self, validated_data):
return self.create_or_update(validated_data, create=True)
def update(self, obj, validated_data):
return self.create_or_update(validated_data, obj, create=False)
def get_summary_fields(self, obj): def get_summary_fields(self, obj):
summary = super().get_summary_fields(obj) summary = super().get_summary_fields(obj)
@@ -5749,16 +5453,6 @@ class InstanceSerializer(BaseSerializer):
return summary return summary
def get_reverse_peers(self, obj):
return Instance.objects.prefetch_related('peers').filter(peers__in=obj.receptor_addresses.all()).values_list('id', flat=True)
def get_protocol(self, obj):
# note: don't create a different query for receptor addresses, as this is prefetched on the View for optimization
for addr in obj.receptor_addresses.all():
if addr.canonical:
return addr.protocol
return ""
def get_consumed_capacity(self, obj): def get_consumed_capacity(self, obj):
return obj.consumed_capacity return obj.consumed_capacity
@@ -5771,30 +5465,22 @@ class InstanceSerializer(BaseSerializer):
def get_health_check_pending(self, obj): def get_health_check_pending(self, obj):
return obj.health_check_pending return obj.health_check_pending
def validate(self, attrs): def validate(self, data):
# Oddly, using 'source' on a DRF field populates attrs with the source name, so we should rename it back if self.instance:
if 'canonical_address_port' in attrs: if self.instance.node_type == Instance.Types.HOP:
attrs['listener_port'] = attrs.pop('canonical_address_port') raise serializers.ValidationError("Hop node instances may not be changed.")
if 'canonical_address_peers_from_control_nodes' in attrs: else:
attrs['peers_from_control_nodes'] = attrs.pop('canonical_address_peers_from_control_nodes') if not settings.IS_K8S:
raise serializers.ValidationError("Can only create instances on Kubernetes or OpenShift.")
if not self.instance and not settings.IS_K8S: return data
raise serializers.ValidationError(_("Can only create instances on Kubernetes or OpenShift."))
# cannot enable peers_from_control_nodes if listener_port is not set
if attrs.get('peers_from_control_nodes'):
port = attrs.get('listener_port', -1) # -1 denotes missing, None denotes explicit null
if (port is None) or (port == -1 and self.instance and self.instance.canonical_address is None):
raise serializers.ValidationError(_("Cannot enable peers_from_control_nodes if listener_port is not set."))
return super().validate(attrs)
def validate_node_type(self, value): def validate_node_type(self, value):
if not self.instance and value not in [Instance.Types.HOP, Instance.Types.EXECUTION]: if not self.instance:
raise serializers.ValidationError(_("Can only create execution or hop nodes.")) if value not in (Instance.Types.EXECUTION,):
raise serializers.ValidationError("Can only create execution nodes.")
if self.instance and self.instance.node_type != value: else:
raise serializers.ValidationError(_("Cannot change node type.")) if self.instance.node_type != value:
raise serializers.ValidationError("Cannot change node type.")
return value return value
@@ -5802,71 +5488,30 @@ class InstanceSerializer(BaseSerializer):
if self.instance: if self.instance:
if value != self.instance.node_state: if value != self.instance.node_state:
if not settings.IS_K8S: if not settings.IS_K8S:
raise serializers.ValidationError(_("Can only change the state on Kubernetes or OpenShift.")) raise serializers.ValidationError("Can only change the state on Kubernetes or OpenShift.")
if value != Instance.States.DEPROVISIONING: if value != Instance.States.DEPROVISIONING:
raise serializers.ValidationError(_("Can only change instances to the 'deprovisioning' state.")) raise serializers.ValidationError("Can only change instances to the 'deprovisioning' state.")
if self.instance.managed: if self.instance.node_type not in (Instance.Types.EXECUTION,):
raise serializers.ValidationError(_("Cannot deprovision managed nodes.")) raise serializers.ValidationError("Can only deprovision execution nodes.")
else: else:
if value and value != Instance.States.INSTALLED: if value and value != Instance.States.INSTALLED:
raise serializers.ValidationError(_("Can only create instances in the 'installed' state.")) raise serializers.ValidationError("Can only create instances in the 'installed' state.")
return value return value
def validate_hostname(self, value): def validate_hostname(self, value):
""" """
Cannot change the hostname - Hostname cannot be "localhost" - but can be something like localhost.domain
- Cannot change the hostname of an-already instantiated & initialized Instance object
""" """
if self.instance and self.instance.hostname != value: if self.instance and self.instance.hostname != value:
raise serializers.ValidationError(_("Cannot change hostname.")) raise serializers.ValidationError("Cannot change hostname.")
return value return value
def validate_listener_port(self, value): def validate_listener_port(self, value):
""" if self.instance and self.instance.listener_port != value:
Cannot change listener port, unless going from none to integer, and vice versa raise serializers.ValidationError("Cannot change listener port.")
If instance is managed, cannot change listener port at all
"""
if self.instance:
canonical_address_port = self.instance.canonical_address_port
if value and canonical_address_port and canonical_address_port != value:
raise serializers.ValidationError(_("Cannot change listener port."))
if self.instance.managed and value != canonical_address_port:
raise serializers.ValidationError(_("Cannot change listener port for managed nodes."))
return value
def validate_peers(self, value):
# cannot peer to an instance more than once
peers_instances = Counter(p.instance_id for p in value)
if any(count > 1 for count in peers_instances.values()):
raise serializers.ValidationError(_("Cannot peer to the same instance more than once."))
if self.instance:
instance_addresses = set(self.instance.receptor_addresses.all())
setting_peers = set(value)
peers_changed = set(self.instance.peers.all()) != setting_peers
if not settings.IS_K8S and peers_changed:
raise serializers.ValidationError(_("Cannot change peers."))
if self.instance.managed and peers_changed:
raise serializers.ValidationError(_("Setting peers manually for managed nodes is not allowed."))
# cannot peer to self
if instance_addresses & setting_peers:
raise serializers.ValidationError(_("Instance cannot peer to its own address."))
# cannot peer to an instance that is already peered to this instance
if instance_addresses:
for p in setting_peers:
if set(p.instance.peers.all()) & instance_addresses:
raise serializers.ValidationError(_(f"Instance {p.instance.hostname} is already peered to this instance."))
return value
def validate_peers_from_control_nodes(self, value):
if self.instance and self.instance.managed and self.instance.canonical_address_peers_from_control_nodes != value:
raise serializers.ValidationError(_("Cannot change peers_from_control_nodes for managed nodes."))
return value return value
@@ -5874,19 +5519,7 @@ class InstanceSerializer(BaseSerializer):
class InstanceHealthCheckSerializer(BaseSerializer): class InstanceHealthCheckSerializer(BaseSerializer):
class Meta: class Meta:
model = Instance model = Instance
read_only_fields = ( read_only_fields = ('uuid', 'hostname', 'version', 'last_health_check', 'errors', 'cpu', 'memory', 'cpu_capacity', 'mem_capacity', 'capacity')
'uuid',
'hostname',
'ip_address',
'version',
'last_health_check',
'errors',
'cpu',
'memory',
'cpu_capacity',
'mem_capacity',
'capacity',
)
fields = read_only_fields fields = read_only_fields
@@ -5926,7 +5559,7 @@ class InstanceGroupSerializer(BaseSerializer):
instances = serializers.SerializerMethodField() instances = serializers.SerializerMethodField()
is_container_group = serializers.BooleanField( is_container_group = serializers.BooleanField(
required=False, required=False,
help_text=_('Indicates whether instances in this group are containerized.Containerized groups have a designated Openshift or Kubernetes cluster.'), help_text=_('Indicates whether instances in this group are containerized.' 'Containerized groups have a designated Openshift or Kubernetes cluster.'),
) )
# NOTE: help_text is duplicated from field definitions, no obvious way of # NOTE: help_text is duplicated from field definitions, no obvious way of
# both defining field details here and also getting the field's help_text # both defining field details here and also getting the field's help_text
@@ -5937,7 +5570,7 @@ class InstanceGroupSerializer(BaseSerializer):
required=False, required=False,
initial=0, initial=0,
label=_('Policy Instance Percentage'), label=_('Policy Instance Percentage'),
help_text=_("Minimum percentage of all instances that will be automatically assigned to this group when new instances come online."), help_text=_("Minimum percentage of all instances that will be automatically assigned to " "this group when new instances come online."),
) )
policy_instance_minimum = serializers.IntegerField( policy_instance_minimum = serializers.IntegerField(
default=0, default=0,
@@ -5945,7 +5578,7 @@ class InstanceGroupSerializer(BaseSerializer):
required=False, required=False,
initial=0, initial=0,
label=_('Policy Instance Minimum'), label=_('Policy Instance Minimum'),
help_text=_("Static minimum number of Instances that will be automatically assign to this group when new instances come online."), help_text=_("Static minimum number of Instances that will be automatically assign to " "this group when new instances come online."),
) )
max_concurrent_jobs = serializers.IntegerField( max_concurrent_jobs = serializers.IntegerField(
default=0, default=0,

View File

@@ -1,10 +1,16 @@
import json
import warnings import warnings
from rest_framework.permissions import AllowAny from coreapi.document import Object, Link
from rest_framework.schemas import SchemaGenerator, AutoSchema as DRFAuthSchema
from drf_yasg.views import get_schema_view from rest_framework import exceptions
from drf_yasg import openapi from rest_framework.permissions import AllowAny
from rest_framework.renderers import CoreJSONRenderer
from rest_framework.response import Response
from rest_framework.schemas import SchemaGenerator, AutoSchema as DRFAuthSchema
from rest_framework.views import APIView
from rest_framework_swagger import renderers
class SuperUserSchemaGenerator(SchemaGenerator): class SuperUserSchemaGenerator(SchemaGenerator):
@@ -49,15 +55,43 @@ class AutoSchema(DRFAuthSchema):
return description return description
schema_view = get_schema_view( class SwaggerSchemaView(APIView):
openapi.Info( _ignore_model_permissions = True
title="Snippets API", exclude_from_schema = True
default_version='v1', permission_classes = [AllowAny]
description="Test description", renderer_classes = [CoreJSONRenderer, renderers.OpenAPIRenderer, renderers.SwaggerUIRenderer]
terms_of_service="https://www.google.com/policies/terms/",
contact=openapi.Contact(email="contact@snippets.local"), def get(self, request):
license=openapi.License(name="BSD License"), generator = SuperUserSchemaGenerator(title='Ansible Automation Platform controller API', patterns=None, urlconf=None)
), schema = generator.get_schema(request=request)
public=True, # python core-api doesn't support the deprecation yet, so track it
permission_classes=[AllowAny], # ourselves and return it in a response header
) _deprecated = []
# By default, DRF OpenAPI serialization places all endpoints in
# a single node based on their root path (/api). Instead, we want to
# group them by topic/tag so that they're categorized in the rendered
# output
document = schema._data.pop('api')
for path, node in document.items():
if isinstance(node, Object):
for action in node.values():
topic = getattr(action, 'topic', None)
if topic:
schema._data.setdefault(topic, Object())
schema._data[topic]._data[path] = node
if isinstance(action, Object):
for link in action.links.values():
if link.deprecated:
_deprecated.append(link.url)
elif isinstance(node, Link):
topic = getattr(node, 'topic', None)
if topic:
schema._data.setdefault(topic, Object())
schema._data[topic]._data[path] = node
if not schema:
raise exceptions.ValidationError('The schema generator did not return a schema Document')
return Response(schema, headers={'X-Deprecated-Paths': json.dumps(_deprecated)})

View File

@@ -1,22 +0,0 @@
# Bulk Host Delete
This endpoint allows the client to delete multiple hosts from inventories.
They may do this by providing a list of hosts ID's to be deleted.
Example:
{
"hosts": [1, 2, 3, 4, 5]
}
Return data:
{
"hosts": {
"1": "The host a1 was deleted",
"2": "The host a2 was deleted",
"3": "The host a3 was deleted",
"4": "The host a4 was deleted",
"5": "The host a5 was deleted",
}
}

View File

@@ -3,34 +3,21 @@ receptor_group: awx
receptor_verify: true receptor_verify: true
receptor_tls: true receptor_tls: true
receptor_mintls13: false receptor_mintls13: false
{% if instance.node_type == "execution" %}
receptor_work_commands: receptor_work_commands:
ansible-runner: ansible-runner:
command: ansible-runner command: ansible-runner
params: worker params: worker
allowruntimeparams: true allowruntimeparams: true
verifysignature: true verifysignature: true
additional_python_packages: custom_worksign_public_keyfile: receptor/work-public-key.pem
- ansible-runner
{% endif %}
custom_worksign_public_keyfile: receptor/work_public_key.pem
custom_tls_certfile: receptor/tls/receptor.crt custom_tls_certfile: receptor/tls/receptor.crt
custom_tls_keyfile: receptor/tls/receptor.key custom_tls_keyfile: receptor/tls/receptor.key
custom_ca_certfile: receptor/tls/ca/mesh-CA.crt custom_ca_certfile: receptor/tls/ca/receptor-ca.crt
{% if listener_port %} receptor_protocol: 'tcp'
receptor_protocol: {{ listener_protocol }}
receptor_listener: true receptor_listener: true
receptor_port: {{ listener_port }} receptor_port: {{ instance.listener_port }}
{% else %} receptor_dependencies:
receptor_listener: false - python39-pip
{% endif %}
{% if peers %}
receptor_peers:
{% for peer in peers %}
- address: {{ peer.address }}
protocol: {{ peer.protocol }}
{% endfor %}
{% endif %}
{% verbatim %} {% verbatim %}
podman_user: "{{ receptor_user }}" podman_user: "{{ receptor_user }}"
podman_group: "{{ receptor_group }}" podman_group: "{{ receptor_group }}"

View File

@@ -1,16 +1,20 @@
{% verbatim %}
--- ---
- hosts: all - hosts: all
become: yes become: yes
tasks: tasks:
- name: Create the receptor user - name: Create the receptor user
user: user:
{% verbatim %}
name: "{{ receptor_user }}" name: "{{ receptor_user }}"
{% endverbatim %}
shell: /bin/bash shell: /bin/bash
{% if instance.node_type == "execution" %} - name: Enable Copr repo for Receptor
command: dnf copr enable ansible-awx/receptor -y
- import_role: - import_role:
name: ansible.receptor.podman name: ansible.receptor.podman
{% endif %}
- import_role: - import_role:
name: ansible.receptor.setup name: ansible.receptor.setup
- name: Install ansible-runner
pip:
name: ansible-runner
executable: pip3.9
{% endverbatim %}

View File

@@ -1,4 +1,4 @@
--- ---
collections: collections:
- name: ansible.receptor - name: ansible.receptor
version: 2.0.3 version: 1.1.0

View File

@@ -10,7 +10,6 @@ from awx.api.views import (
InstanceInstanceGroupsList, InstanceInstanceGroupsList,
InstanceHealthCheck, InstanceHealthCheck,
InstancePeersList, InstancePeersList,
InstanceReceptorAddressesList,
) )
from awx.api.views.instance_install_bundle import InstanceInstallBundle from awx.api.views.instance_install_bundle import InstanceInstallBundle
@@ -22,7 +21,6 @@ urls = [
re_path(r'^(?P<pk>[0-9]+)/instance_groups/$', InstanceInstanceGroupsList.as_view(), name='instance_instance_groups_list'), re_path(r'^(?P<pk>[0-9]+)/instance_groups/$', InstanceInstanceGroupsList.as_view(), name='instance_instance_groups_list'),
re_path(r'^(?P<pk>[0-9]+)/health_check/$', InstanceHealthCheck.as_view(), name='instance_health_check'), re_path(r'^(?P<pk>[0-9]+)/health_check/$', InstanceHealthCheck.as_view(), name='instance_health_check'),
re_path(r'^(?P<pk>[0-9]+)/peers/$', InstancePeersList.as_view(), name='instance_peers_list'), re_path(r'^(?P<pk>[0-9]+)/peers/$', InstancePeersList.as_view(), name='instance_peers_list'),
re_path(r'^(?P<pk>[0-9]+)/receptor_addresses/$', InstanceReceptorAddressesList.as_view(), name='instance_receptor_addresses_list'),
re_path(r'^(?P<pk>[0-9]+)/install_bundle/$', InstanceInstallBundle.as_view(), name='instance_install_bundle'), re_path(r'^(?P<pk>[0-9]+)/install_bundle/$', InstanceInstallBundle.as_view(), name='instance_install_bundle'),
] ]

View File

@@ -1,17 +0,0 @@
# Copyright (c) 2017 Ansible, Inc.
# All Rights Reserved.
from django.urls import re_path
from awx.api.views import (
ReceptorAddressesList,
ReceptorAddressDetail,
)
urls = [
re_path(r'^$', ReceptorAddressesList.as_view(), name='receptor_addresses_list'),
re_path(r'^(?P<pk>[0-9]+)/$', ReceptorAddressDetail.as_view(), name='receptor_address_detail'),
]
__all__ = ['urls']

View File

@@ -30,13 +30,12 @@ from awx.api.views import (
OAuth2TokenList, OAuth2TokenList,
ApplicationOAuth2TokenList, ApplicationOAuth2TokenList,
OAuth2ApplicationDetail, OAuth2ApplicationDetail,
HostMetricSummaryMonthlyList, # HostMetricSummaryMonthlyList, # It will be enabled in future version of the AWX
) )
from awx.api.views.bulk import ( from awx.api.views.bulk import (
BulkView, BulkView,
BulkHostCreateView, BulkHostCreateView,
BulkHostDeleteView,
BulkJobLaunchView, BulkJobLaunchView,
) )
@@ -85,7 +84,6 @@ from .oauth2_root import urls as oauth2_root_urls
from .workflow_approval_template import urls as workflow_approval_template_urls from .workflow_approval_template import urls as workflow_approval_template_urls
from .workflow_approval import urls as workflow_approval_urls from .workflow_approval import urls as workflow_approval_urls
from .analytics import urls as analytics_urls from .analytics import urls as analytics_urls
from .receptor_address import urls as receptor_address_urls
v2_urls = [ v2_urls = [
re_path(r'^$', ApiV2RootView.as_view(), name='api_v2_root_view'), re_path(r'^$', ApiV2RootView.as_view(), name='api_v2_root_view'),
@@ -125,7 +123,8 @@ v2_urls = [
re_path(r'^constructed_inventories/', include(constructed_inventory_urls)), re_path(r'^constructed_inventories/', include(constructed_inventory_urls)),
re_path(r'^hosts/', include(host_urls)), re_path(r'^hosts/', include(host_urls)),
re_path(r'^host_metrics/', include(host_metric_urls)), re_path(r'^host_metrics/', include(host_metric_urls)),
re_path(r'^host_metric_summary_monthly/$', HostMetricSummaryMonthlyList.as_view(), name='host_metric_summary_monthly_list'), # It will be enabled in future version of the AWX
# re_path(r'^host_metric_summary_monthly/$', HostMetricSummaryMonthlyList.as_view(), name='host_metric_summary_monthly_list'),
re_path(r'^groups/', include(group_urls)), re_path(r'^groups/', include(group_urls)),
re_path(r'^inventory_sources/', include(inventory_source_urls)), re_path(r'^inventory_sources/', include(inventory_source_urls)),
re_path(r'^inventory_updates/', include(inventory_update_urls)), re_path(r'^inventory_updates/', include(inventory_update_urls)),
@@ -154,9 +153,7 @@ v2_urls = [
re_path(r'^workflow_approvals/', include(workflow_approval_urls)), re_path(r'^workflow_approvals/', include(workflow_approval_urls)),
re_path(r'^bulk/$', BulkView.as_view(), name='bulk'), re_path(r'^bulk/$', BulkView.as_view(), name='bulk'),
re_path(r'^bulk/host_create/$', BulkHostCreateView.as_view(), name='bulk_host_create'), re_path(r'^bulk/host_create/$', BulkHostCreateView.as_view(), name='bulk_host_create'),
re_path(r'^bulk/host_delete/$', BulkHostDeleteView.as_view(), name='bulk_host_delete'),
re_path(r'^bulk/job_launch/$', BulkJobLaunchView.as_view(), name='bulk_job_launch'), re_path(r'^bulk/job_launch/$', BulkJobLaunchView.as_view(), name='bulk_job_launch'),
re_path(r'^receptor_addresses/', include(receptor_address_urls)),
] ]
@@ -170,13 +167,10 @@ urlpatterns = [
] ]
if MODE == 'development': if MODE == 'development':
# Only include these if we are in the development environment # Only include these if we are in the development environment
from awx.api.swagger import schema_view from awx.api.swagger import SwaggerSchemaView
urlpatterns += [re_path(r'^swagger/$', SwaggerSchemaView.as_view(), name='swagger_view')]
from awx.api.urls.debug import urls as debug_urls from awx.api.urls.debug import urls as debug_urls
urlpatterns += [re_path(r'^debug/', include(debug_urls))] urlpatterns += [re_path(r'^debug/', include(debug_urls))]
urlpatterns += [
re_path(r'^swagger(?P<format>\.json|\.yaml)/$', schema_view.without_ui(cache_timeout=0), name='schema-json'),
re_path(r'^swagger/$', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
re_path(r'^redoc/$', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),
]

View File

@@ -1,11 +1,10 @@
from django.urls import re_path from django.urls import re_path
from awx.api.views.webhooks import WebhookKeyView, GithubWebhookReceiver, GitlabWebhookReceiver, BitbucketDcWebhookReceiver from awx.api.views.webhooks import WebhookKeyView, GithubWebhookReceiver, GitlabWebhookReceiver
urlpatterns = [ urlpatterns = [
re_path(r'^webhook_key/$', WebhookKeyView.as_view(), name='webhook_key'), re_path(r'^webhook_key/$', WebhookKeyView.as_view(), name='webhook_key'),
re_path(r'^github/$', GithubWebhookReceiver.as_view(), name='webhook_receiver_github'), re_path(r'^github/$', GithubWebhookReceiver.as_view(), name='webhook_receiver_github'),
re_path(r'^gitlab/$', GitlabWebhookReceiver.as_view(), name='webhook_receiver_gitlab'), re_path(r'^gitlab/$', GitlabWebhookReceiver.as_view(), name='webhook_receiver_gitlab'),
re_path(r'^bitbucket_dc/$', BitbucketDcWebhookReceiver.as_view(), name='webhook_receiver_bitbucket_dc'),
] ]

View File

@@ -2,21 +2,28 @@
# All Rights Reserved. # All Rights Reserved.
from django.conf import settings from django.conf import settings
from django.urls import NoReverseMatch
from rest_framework.reverse import reverse as drf_reverse from rest_framework.reverse import _reverse
from rest_framework.versioning import URLPathVersioning as BaseVersioning from rest_framework.versioning import URLPathVersioning as BaseVersioning
def is_optional_api_urlpattern_prefix_request(request): def drf_reverse(viewname, args=None, kwargs=None, request=None, format=None, **extra):
if settings.OPTIONAL_API_URLPATTERN_PREFIX and request: """
if request.path.startswith(f"/api/{settings.OPTIONAL_API_URLPATTERN_PREFIX}"): Copy and monkey-patch `rest_framework.reverse.reverse` to prevent adding unwarranted
return True query string parameters.
return False """
scheme = getattr(request, 'versioning_scheme', None)
if scheme is not None:
try:
url = scheme.reverse(viewname, args, kwargs, request, format, **extra)
except NoReverseMatch:
# In case the versioning scheme reversal fails, fallback to the
# default implementation
url = _reverse(viewname, args, kwargs, request, format, **extra)
else:
url = _reverse(viewname, args, kwargs, request, format, **extra)
def transform_optional_api_urlpattern_prefix_url(request, url):
if is_optional_api_urlpattern_prefix_request(request):
url = url.replace('/api', f"/api/{settings.OPTIONAL_API_URLPATTERN_PREFIX}")
return url return url

View File

@@ -60,9 +60,6 @@ from oauth2_provider.models import get_access_token_model
import pytz import pytz
from wsgiref.util import FileWrapper from wsgiref.util import FileWrapper
# django-ansible-base
from ansible_base.rbac.models import RoleEvaluation, ObjectRole
# AWX # AWX
from awx.main.tasks.system import send_notifications, update_inventory_computed_fields from awx.main.tasks.system import send_notifications, update_inventory_computed_fields
from awx.main.access import get_user_queryset from awx.main.access import get_user_queryset
@@ -90,7 +87,6 @@ from awx.api.generics import (
from awx.api.views.labels import LabelSubListCreateAttachDetachView from awx.api.views.labels import LabelSubListCreateAttachDetachView
from awx.api.versioning import reverse from awx.api.versioning import reverse
from awx.main import models from awx.main import models
from awx.main.models.rbac import get_role_definition
from awx.main.utils import ( from awx.main.utils import (
camelcase_to_underscore, camelcase_to_underscore,
extract_ansible_vars, extract_ansible_vars,
@@ -132,10 +128,6 @@ logger = logging.getLogger('awx.api.views')
def unpartitioned_event_horizon(cls): def unpartitioned_event_horizon(cls):
with connection.cursor() as cursor:
cursor.execute(f"SELECT 1 FROM INFORMATION_SCHEMA.TABLES WHERE table_name = '_unpartitioned_{cls._meta.db_table}';")
if not cursor.fetchone():
return 0
with connection.cursor() as cursor: with connection.cursor() as cursor:
try: try:
cursor.execute(f'SELECT MAX(id) FROM _unpartitioned_{cls._meta.db_table}') cursor.execute(f'SELECT MAX(id) FROM _unpartitioned_{cls._meta.db_table}')
@@ -276,24 +268,16 @@ class DashboardJobsGraphView(APIView):
success_query = user_unified_jobs.filter(status='successful') success_query = user_unified_jobs.filter(status='successful')
failed_query = user_unified_jobs.filter(status='failed') failed_query = user_unified_jobs.filter(status='failed')
canceled_query = user_unified_jobs.filter(status='canceled')
error_query = user_unified_jobs.filter(status='error')
if job_type == 'inv_sync': if job_type == 'inv_sync':
success_query = success_query.filter(instance_of=models.InventoryUpdate) success_query = success_query.filter(instance_of=models.InventoryUpdate)
failed_query = failed_query.filter(instance_of=models.InventoryUpdate) failed_query = failed_query.filter(instance_of=models.InventoryUpdate)
canceled_query = canceled_query.filter(instance_of=models.InventoryUpdate)
error_query = error_query.filter(instance_of=models.InventoryUpdate)
elif job_type == 'playbook_run': elif job_type == 'playbook_run':
success_query = success_query.filter(instance_of=models.Job) success_query = success_query.filter(instance_of=models.Job)
failed_query = failed_query.filter(instance_of=models.Job) failed_query = failed_query.filter(instance_of=models.Job)
canceled_query = canceled_query.filter(instance_of=models.Job)
error_query = error_query.filter(instance_of=models.Job)
elif job_type == 'scm_update': elif job_type == 'scm_update':
success_query = success_query.filter(instance_of=models.ProjectUpdate) success_query = success_query.filter(instance_of=models.ProjectUpdate)
failed_query = failed_query.filter(instance_of=models.ProjectUpdate) failed_query = failed_query.filter(instance_of=models.ProjectUpdate)
canceled_query = canceled_query.filter(instance_of=models.ProjectUpdate)
error_query = error_query.filter(instance_of=models.ProjectUpdate)
end = now() end = now()
interval = 'day' interval = 'day'
@@ -309,12 +293,10 @@ class DashboardJobsGraphView(APIView):
else: else:
return Response({'error': _('Unknown period "%s"') % str(period)}, status=status.HTTP_400_BAD_REQUEST) return Response({'error': _('Unknown period "%s"') % str(period)}, status=status.HTTP_400_BAD_REQUEST)
dashboard_data = {"jobs": {"successful": [], "failed": [], "canceled": [], "error": []}} dashboard_data = {"jobs": {"successful": [], "failed": []}}
succ_list = dashboard_data['jobs']['successful'] succ_list = dashboard_data['jobs']['successful']
fail_list = dashboard_data['jobs']['failed'] fail_list = dashboard_data['jobs']['failed']
canceled_list = dashboard_data['jobs']['canceled']
error_list = dashboard_data['jobs']['error']
qs_s = ( qs_s = (
success_query.filter(finished__range=(start, end)) success_query.filter(finished__range=(start, end))
@@ -332,22 +314,6 @@ class DashboardJobsGraphView(APIView):
.annotate(agg=Count('id', distinct=True)) .annotate(agg=Count('id', distinct=True))
) )
data_f = {item['d']: item['agg'] for item in qs_f} data_f = {item['d']: item['agg'] for item in qs_f}
qs_c = (
canceled_query.filter(finished__range=(start, end))
.annotate(d=Trunc('finished', interval, tzinfo=end.tzinfo))
.order_by()
.values('d')
.annotate(agg=Count('id', distinct=True))
)
data_c = {item['d']: item['agg'] for item in qs_c}
qs_e = (
error_query.filter(finished__range=(start, end))
.annotate(d=Trunc('finished', interval, tzinfo=end.tzinfo))
.order_by()
.values('d')
.annotate(agg=Count('id', distinct=True))
)
data_e = {item['d']: item['agg'] for item in qs_e}
start_date = start.replace(hour=0, minute=0, second=0, microsecond=0) start_date = start.replace(hour=0, minute=0, second=0, microsecond=0)
for d in itertools.count(): for d in itertools.count():
@@ -356,8 +322,6 @@ class DashboardJobsGraphView(APIView):
break break
succ_list.append([time.mktime(date.timetuple()), data_s.get(date, 0)]) succ_list.append([time.mktime(date.timetuple()), data_s.get(date, 0)])
fail_list.append([time.mktime(date.timetuple()), data_f.get(date, 0)]) fail_list.append([time.mktime(date.timetuple()), data_f.get(date, 0)])
canceled_list.append([time.mktime(date.timetuple()), data_c.get(date, 0)])
error_list.append([time.mktime(date.timetuple()), data_e.get(date, 0)])
return Response(dashboard_data) return Response(dashboard_data)
@@ -369,34 +333,25 @@ class InstanceList(ListCreateAPIView):
search_fields = ('hostname',) search_fields = ('hostname',)
ordering = ('id',) ordering = ('id',)
def get_queryset(self):
qs = super().get_queryset().prefetch_related('receptor_addresses')
return qs
class InstanceDetail(RetrieveUpdateAPIView): class InstanceDetail(RetrieveUpdateAPIView):
name = _("Instance Detail") name = _("Instance Detail")
model = models.Instance model = models.Instance
serializer_class = serializers.InstanceSerializer serializer_class = serializers.InstanceSerializer
def get_queryset(self):
qs = super().get_queryset().prefetch_related('receptor_addresses')
return qs
def update_raw_data(self, data): def update_raw_data(self, data):
# these fields are only valid on creation of an instance, so they unwanted on detail view # these fields are only valid on creation of an instance, so they unwanted on detail view
data.pop('listener_port', None)
data.pop('node_type', None) data.pop('node_type', None)
data.pop('hostname', None) data.pop('hostname', None)
data.pop('ip_address', None)
return super(InstanceDetail, self).update_raw_data(data) return super(InstanceDetail, self).update_raw_data(data)
def update(self, request, *args, **kwargs): def update(self, request, *args, **kwargs):
r = super(InstanceDetail, self).update(request, *args, **kwargs) r = super(InstanceDetail, self).update(request, *args, **kwargs)
if status.is_success(r.status_code): if status.is_success(r.status_code):
obj = self.get_object() obj = self.get_object()
capacity_changed = obj.set_capacity_value() obj.set_capacity_value()
if capacity_changed: obj.save(update_fields=['capacity'])
obj.save(update_fields=['capacity'])
r.data = serializers.InstanceSerializer(obj, context=self.get_serializer_context()).to_representation(obj) r.data = serializers.InstanceSerializer(obj, context=self.get_serializer_context()).to_representation(obj)
return r return r
@@ -415,37 +370,13 @@ class InstanceUnifiedJobsList(SubListAPIView):
class InstancePeersList(SubListAPIView): class InstancePeersList(SubListAPIView):
name = _("Peers") name = _("Instance Peers")
model = models.ReceptorAddress
serializer_class = serializers.ReceptorAddressSerializer
parent_model = models.Instance parent_model = models.Instance
model = models.Instance
serializer_class = serializers.InstanceSerializer
parent_access = 'read' parent_access = 'read'
search_fields = {'hostname'}
relationship = 'peers' relationship = 'peers'
search_fields = ('address',)
class InstanceReceptorAddressesList(SubListAPIView):
name = _("Receptor Addresses")
model = models.ReceptorAddress
parent_key = 'instance'
parent_model = models.Instance
serializer_class = serializers.ReceptorAddressSerializer
search_fields = ('address',)
class ReceptorAddressesList(ListAPIView):
name = _("Receptor Addresses")
model = models.ReceptorAddress
serializer_class = serializers.ReceptorAddressSerializer
search_fields = ('address',)
class ReceptorAddressDetail(RetrieveAPIView):
name = _("Receptor Address Detail")
model = models.ReceptorAddress
serializer_class = serializers.ReceptorAddressSerializer
parent_model = models.Instance
relationship = 'receptor_addresses'
class InstanceInstanceGroupsList(InstanceGroupMembershipMixin, SubListCreateAttachDetachAPIView): class InstanceInstanceGroupsList(InstanceGroupMembershipMixin, SubListCreateAttachDetachAPIView):
@@ -540,7 +471,6 @@ class InstanceGroupAccessList(ResourceAccessList):
class InstanceGroupObjectRolesList(SubListAPIView): class InstanceGroupObjectRolesList(SubListAPIView):
deprecated = True
model = models.Role model = models.Role
serializer_class = serializers.RoleSerializer serializer_class = serializers.RoleSerializer
parent_model = models.InstanceGroup parent_model = models.InstanceGroup
@@ -635,7 +565,7 @@ class LaunchConfigCredentialsBase(SubListAttachDetachAPIView):
if self.relationship not in ask_mapping: if self.relationship not in ask_mapping:
return {"msg": _("Related template cannot accept {} on launch.").format(self.relationship)} return {"msg": _("Related template cannot accept {} on launch.").format(self.relationship)}
elif sub.passwords_needed: elif sub.passwords_needed:
return {"msg": _("Credential that requires user input on launch cannot be used in saved launch configuration.")} return {"msg": _("Credential that requires user input on launch " "cannot be used in saved launch configuration.")}
ask_field_name = ask_mapping[self.relationship] ask_field_name = ask_mapping[self.relationship]
@@ -729,7 +659,6 @@ class TeamUsersList(BaseUsersList):
class TeamRolesList(SubListAttachDetachAPIView): class TeamRolesList(SubListAttachDetachAPIView):
deprecated = True
model = models.Role model = models.Role
serializer_class = serializers.RoleSerializerWithParentAccess serializer_class = serializers.RoleSerializerWithParentAccess
metadata_class = RoleMetadata metadata_class = RoleMetadata
@@ -769,12 +698,10 @@ class TeamRolesList(SubListAttachDetachAPIView):
class TeamObjectRolesList(SubListAPIView): class TeamObjectRolesList(SubListAPIView):
deprecated = True
model = models.Role model = models.Role
serializer_class = serializers.RoleSerializer serializer_class = serializers.RoleSerializer
parent_model = models.Team parent_model = models.Team
search_fields = ('role_field', 'content_type__model') search_fields = ('role_field', 'content_type__model')
deprecated = True
def get_queryset(self): def get_queryset(self):
po = self.get_parent_object() po = self.get_parent_object()
@@ -792,15 +719,8 @@ class TeamProjectsList(SubListAPIView):
self.check_parent_access(team) self.check_parent_access(team)
model_ct = ContentType.objects.get_for_model(self.model) model_ct = ContentType.objects.get_for_model(self.model)
parent_ct = ContentType.objects.get_for_model(self.parent_model) parent_ct = ContentType.objects.get_for_model(self.parent_model)
proj_roles = models.Role.objects.filter(Q(ancestors__content_type=parent_ct) & Q(ancestors__object_id=team.pk), content_type=model_ct)
rd = get_role_definition(team.member_role) return self.model.accessible_objects(self.request.user, 'read_role').filter(pk__in=[t.content_object.pk for t in proj_roles])
role = ObjectRole.objects.filter(object_id=team.id, content_type=parent_ct, role_definition=rd).first()
if role is None:
# Team has no permissions, therefore team has no projects
return self.model.objects.none()
else:
project_qs = self.model.accessible_objects(self.request.user, 'read_role')
return project_qs.filter(id__in=RoleEvaluation.objects.filter(content_type_id=model_ct.id, role=role).values_list('object_id'))
class TeamActivityStreamList(SubListAPIView): class TeamActivityStreamList(SubListAPIView):
@@ -815,23 +735,10 @@ class TeamActivityStreamList(SubListAPIView):
self.check_parent_access(parent) self.check_parent_access(parent)
qs = self.request.user.get_queryset(self.model) qs = self.request.user.get_queryset(self.model)
return qs.filter( return qs.filter(
Q(team=parent) Q(team=parent)
| Q( | Q(project__in=models.Project.accessible_objects(parent, 'read_role'))
project__in=RoleEvaluation.objects.filter( | Q(credential__in=models.Credential.accessible_objects(parent, 'read_role'))
role__in=parent.has_roles.all(), content_type_id=ContentType.objects.get_for_model(models.Project).id, codename='view_project'
)
.values_list('object_id')
.distinct()
)
| Q(
credential__in=RoleEvaluation.objects.filter(
role__in=parent.has_roles.all(), content_type_id=ContentType.objects.get_for_model(models.Credential).id, codename='view_credential'
)
.values_list('object_id')
.distinct()
)
) )
@@ -1083,12 +990,10 @@ class ProjectAccessList(ResourceAccessList):
class ProjectObjectRolesList(SubListAPIView): class ProjectObjectRolesList(SubListAPIView):
deprecated = True
model = models.Role model = models.Role
serializer_class = serializers.RoleSerializer serializer_class = serializers.RoleSerializer
parent_model = models.Project parent_model = models.Project
search_fields = ('role_field', 'content_type__model') search_fields = ('role_field', 'content_type__model')
deprecated = True
def get_queryset(self): def get_queryset(self):
po = self.get_parent_object() po = self.get_parent_object()
@@ -1246,7 +1151,6 @@ class UserTeamsList(SubListAPIView):
class UserRolesList(SubListAttachDetachAPIView): class UserRolesList(SubListAttachDetachAPIView):
deprecated = True
model = models.Role model = models.Role
serializer_class = serializers.RoleSerializerWithParentAccess serializer_class = serializers.RoleSerializerWithParentAccess
metadata_class = RoleMetadata metadata_class = RoleMetadata
@@ -1488,7 +1392,7 @@ class OrganizationCredentialList(SubListCreateAPIView):
self.check_parent_access(organization) self.check_parent_access(organization)
user_visible = models.Credential.accessible_objects(self.request.user, 'read_role').all() user_visible = models.Credential.accessible_objects(self.request.user, 'read_role').all()
org_set = models.Credential.objects.filter(organization=organization) org_set = models.Credential.accessible_objects(organization.admin_role, 'read_role').all()
if self.request.user.is_superuser or self.request.user.is_system_auditor: if self.request.user.is_superuser or self.request.user.is_system_auditor:
return org_set return org_set
@@ -1521,12 +1425,10 @@ class CredentialAccessList(ResourceAccessList):
class CredentialObjectRolesList(SubListAPIView): class CredentialObjectRolesList(SubListAPIView):
deprecated = True
model = models.Role model = models.Role
serializer_class = serializers.RoleSerializer serializer_class = serializers.RoleSerializer
parent_model = models.Credential parent_model = models.Credential
search_fields = ('role_field', 'content_type__model') search_fields = ('role_field', 'content_type__model')
deprecated = True
def get_queryset(self): def get_queryset(self):
po = self.get_parent_object() po = self.get_parent_object()
@@ -1662,15 +1564,16 @@ class HostMetricDetail(RetrieveDestroyAPIView):
return Response(status=status.HTTP_204_NO_CONTENT) return Response(status=status.HTTP_204_NO_CONTENT)
class HostMetricSummaryMonthlyList(ListAPIView): # It will be enabled in future version of the AWX
name = _("Host Metrics Summary Monthly") # class HostMetricSummaryMonthlyList(ListAPIView):
model = models.HostMetricSummaryMonthly # name = _("Host Metrics Summary Monthly")
serializer_class = serializers.HostMetricSummaryMonthlySerializer # model = models.HostMetricSummaryMonthly
permission_classes = (IsSystemAdminOrAuditor,) # serializer_class = serializers.HostMetricSummaryMonthlySerializer
search_fields = ('date',) # permission_classes = (IsSystemAdminOrAuditor,)
# search_fields = ('date',)
def get_queryset(self): #
return self.model.objects.all() # def get_queryset(self):
# return self.model.objects.all()
class HostList(HostRelatedSearchMixin, ListCreateAPIView): class HostList(HostRelatedSearchMixin, ListCreateAPIView):
@@ -2313,6 +2216,13 @@ class JobTemplateList(ListCreateAPIView):
serializer_class = serializers.JobTemplateSerializer serializer_class = serializers.JobTemplateSerializer
always_allow_superuser = False always_allow_superuser = False
def post(self, request, *args, **kwargs):
ret = super(JobTemplateList, self).post(request, *args, **kwargs)
if ret.status_code == 201:
job_template = models.JobTemplate.objects.get(id=ret.data['id'])
job_template.admin_role.members.add(request.user)
return ret
class JobTemplateDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView): class JobTemplateDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView):
model = models.JobTemplate model = models.JobTemplate
@@ -2591,7 +2501,7 @@ class JobTemplateSurveySpec(GenericAPIView):
return Response( return Response(
dict( dict(
error=_( error=_(
"$encrypted$ is a reserved keyword for password question defaults, survey question {idx} is type {survey_item[type]}." "$encrypted$ is a reserved keyword for password question defaults, " "survey question {idx} is type {survey_item[type]}."
).format(**context) ).format(**context)
), ),
status=status.HTTP_400_BAD_REQUEST, status=status.HTTP_400_BAD_REQUEST,
@@ -2858,12 +2768,10 @@ class JobTemplateAccessList(ResourceAccessList):
class JobTemplateObjectRolesList(SubListAPIView): class JobTemplateObjectRolesList(SubListAPIView):
deprecated = True
model = models.Role model = models.Role
serializer_class = serializers.RoleSerializer serializer_class = serializers.RoleSerializer
parent_model = models.JobTemplate parent_model = models.JobTemplate
search_fields = ('role_field', 'content_type__model') search_fields = ('role_field', 'content_type__model')
deprecated = True
def get_queryset(self): def get_queryset(self):
po = self.get_parent_object() po = self.get_parent_object()
@@ -3246,12 +3154,10 @@ class WorkflowJobTemplateAccessList(ResourceAccessList):
class WorkflowJobTemplateObjectRolesList(SubListAPIView): class WorkflowJobTemplateObjectRolesList(SubListAPIView):
deprecated = True
model = models.Role model = models.Role
serializer_class = serializers.RoleSerializer serializer_class = serializers.RoleSerializer
parent_model = models.WorkflowJobTemplate parent_model = models.WorkflowJobTemplate
search_fields = ('role_field', 'content_type__model') search_fields = ('role_field', 'content_type__model')
deprecated = True
def get_queryset(self): def get_queryset(self):
po = self.get_parent_object() po = self.get_parent_object()
@@ -3427,6 +3333,7 @@ class JobLabelList(SubListAPIView):
serializer_class = serializers.LabelSerializer serializer_class = serializers.LabelSerializer
parent_model = models.Job parent_model = models.Job
relationship = 'labels' relationship = 'labels'
parent_key = 'job'
class WorkflowJobLabelList(JobLabelList): class WorkflowJobLabelList(JobLabelList):
@@ -4149,7 +4056,7 @@ class UnifiedJobStdout(RetrieveAPIView):
return super(UnifiedJobStdout, self).retrieve(request, *args, **kwargs) return super(UnifiedJobStdout, self).retrieve(request, *args, **kwargs)
except models.StdoutMaxBytesExceeded as e: except models.StdoutMaxBytesExceeded as e:
response_message = _( response_message = _(
"Standard Output too large to display ({text_size} bytes), only download supported for sizes over {supported_size} bytes." "Standard Output too large to display ({text_size} bytes), " "only download supported for sizes over {supported_size} bytes."
).format(text_size=e.total, supported_size=e.supported) ).format(text_size=e.total, supported_size=e.supported)
if request.accepted_renderer.format == 'json': if request.accepted_renderer.format == 'json':
return Response({'range': {'start': 0, 'end': 1, 'absolute_end': 1}, 'content': response_message}) return Response({'range': {'start': 0, 'end': 1, 'absolute_end': 1}, 'content': response_message})
@@ -4260,7 +4167,6 @@ class ActivityStreamDetail(RetrieveAPIView):
class RoleList(ListAPIView): class RoleList(ListAPIView):
deprecated = True
model = models.Role model = models.Role
serializer_class = serializers.RoleSerializer serializer_class = serializers.RoleSerializer
permission_classes = (IsAuthenticated,) permission_classes = (IsAuthenticated,)
@@ -4268,13 +4174,11 @@ class RoleList(ListAPIView):
class RoleDetail(RetrieveAPIView): class RoleDetail(RetrieveAPIView):
deprecated = True
model = models.Role model = models.Role
serializer_class = serializers.RoleSerializer serializer_class = serializers.RoleSerializer
class RoleUsersList(SubListAttachDetachAPIView): class RoleUsersList(SubListAttachDetachAPIView):
deprecated = True
model = models.User model = models.User
serializer_class = serializers.UserSerializer serializer_class = serializers.UserSerializer
parent_model = models.Role parent_model = models.Role
@@ -4309,7 +4213,6 @@ class RoleUsersList(SubListAttachDetachAPIView):
class RoleTeamsList(SubListAttachDetachAPIView): class RoleTeamsList(SubListAttachDetachAPIView):
deprecated = True
model = models.Team model = models.Team
serializer_class = serializers.TeamSerializer serializer_class = serializers.TeamSerializer
parent_model = models.Role parent_model = models.Role
@@ -4354,12 +4257,10 @@ class RoleTeamsList(SubListAttachDetachAPIView):
team.member_role.children.remove(role) team.member_role.children.remove(role)
else: else:
team.member_role.children.add(role) team.member_role.children.add(role)
return Response(status=status.HTTP_204_NO_CONTENT) return Response(status=status.HTTP_204_NO_CONTENT)
class RoleParentsList(SubListAPIView): class RoleParentsList(SubListAPIView):
deprecated = True
model = models.Role model = models.Role
serializer_class = serializers.RoleSerializer serializer_class = serializers.RoleSerializer
parent_model = models.Role parent_model = models.Role
@@ -4373,7 +4274,6 @@ class RoleParentsList(SubListAPIView):
class RoleChildrenList(SubListAPIView): class RoleChildrenList(SubListAPIView):
deprecated = True
model = models.Role model = models.Role
serializer_class = serializers.RoleSerializer serializer_class = serializers.RoleSerializer
parent_model = models.Role parent_model = models.Role

View File

@@ -48,23 +48,23 @@ class AnalyticsRootView(APIView):
def get(self, request, format=None): def get(self, request, format=None):
data = OrderedDict() data = OrderedDict()
data['authorized'] = reverse('api:analytics_authorized', request=request) data['authorized'] = reverse('api:analytics_authorized')
data['reports'] = reverse('api:analytics_reports_list', request=request) data['reports'] = reverse('api:analytics_reports_list')
data['report_options'] = reverse('api:analytics_report_options_list', request=request) data['report_options'] = reverse('api:analytics_report_options_list')
data['adoption_rate'] = reverse('api:analytics_adoption_rate', request=request) data['adoption_rate'] = reverse('api:analytics_adoption_rate')
data['adoption_rate_options'] = reverse('api:analytics_adoption_rate_options', request=request) data['adoption_rate_options'] = reverse('api:analytics_adoption_rate_options')
data['event_explorer'] = reverse('api:analytics_event_explorer', request=request) data['event_explorer'] = reverse('api:analytics_event_explorer')
data['event_explorer_options'] = reverse('api:analytics_event_explorer_options', request=request) data['event_explorer_options'] = reverse('api:analytics_event_explorer_options')
data['host_explorer'] = reverse('api:analytics_host_explorer', request=request) data['host_explorer'] = reverse('api:analytics_host_explorer')
data['host_explorer_options'] = reverse('api:analytics_host_explorer_options', request=request) data['host_explorer_options'] = reverse('api:analytics_host_explorer_options')
data['job_explorer'] = reverse('api:analytics_job_explorer', request=request) data['job_explorer'] = reverse('api:analytics_job_explorer')
data['job_explorer_options'] = reverse('api:analytics_job_explorer_options', request=request) data['job_explorer_options'] = reverse('api:analytics_job_explorer_options')
data['probe_templates'] = reverse('api:analytics_probe_templates_explorer', request=request) data['probe_templates'] = reverse('api:analytics_probe_templates_explorer')
data['probe_templates_options'] = reverse('api:analytics_probe_templates_options', request=request) data['probe_templates_options'] = reverse('api:analytics_probe_templates_options')
data['probe_template_for_hosts'] = reverse('api:analytics_probe_template_for_hosts_explorer', request=request) data['probe_template_for_hosts'] = reverse('api:analytics_probe_template_for_hosts_explorer')
data['probe_template_for_hosts_options'] = reverse('api:analytics_probe_template_for_hosts_options', request=request) data['probe_template_for_hosts_options'] = reverse('api:analytics_probe_template_for_hosts_options')
data['roi_templates'] = reverse('api:analytics_roi_templates_explorer', request=request) data['roi_templates'] = reverse('api:analytics_roi_templates_explorer')
data['roi_templates_options'] = reverse('api:analytics_roi_templates_options', request=request) data['roi_templates_options'] = reverse('api:analytics_roi_templates_options')
return Response(data) return Response(data)

View File

@@ -1,7 +1,5 @@
from collections import OrderedDict from collections import OrderedDict
from django.utils.translation import gettext_lazy as _
from rest_framework.permissions import IsAuthenticated from rest_framework.permissions import IsAuthenticated
from rest_framework.renderers import JSONRenderer from rest_framework.renderers import JSONRenderer
from rest_framework.reverse import reverse from rest_framework.reverse import reverse
@@ -20,9 +18,6 @@ from awx.api import (
class BulkView(APIView): class BulkView(APIView):
name = _('Bulk')
swagger_topic = 'Bulk'
permission_classes = [IsAuthenticated] permission_classes = [IsAuthenticated]
renderer_classes = [ renderer_classes = [
renderers.BrowsableAPIRenderer, renderers.BrowsableAPIRenderer,
@@ -34,7 +29,6 @@ class BulkView(APIView):
'''List top level resources''' '''List top level resources'''
data = OrderedDict() data = OrderedDict()
data['host_create'] = reverse('api:bulk_host_create', request=request) data['host_create'] = reverse('api:bulk_host_create', request=request)
data['host_delete'] = reverse('api:bulk_host_delete', request=request)
data['job_launch'] = reverse('api:bulk_job_launch', request=request) data['job_launch'] = reverse('api:bulk_job_launch', request=request)
return Response(data) return Response(data)
@@ -73,20 +67,3 @@ class BulkHostCreateView(GenericAPIView):
result = serializer.create(serializer.validated_data) result = serializer.create(serializer.validated_data)
return Response(result, status=status.HTTP_201_CREATED) return Response(result, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class BulkHostDeleteView(GenericAPIView):
permission_classes = [IsAuthenticated]
model = Host
serializer_class = serializers.BulkHostDeleteSerializer
allowed_methods = ['GET', 'POST', 'OPTIONS']
def get(self, request):
return Response({"detail": "Bulk delete hosts with this endpoint"}, status=status.HTTP_200_OK)
def post(self, request):
serializer = serializers.BulkHostDeleteSerializer(data=request.data, context={'request': request})
if serializer.is_valid():
result = serializer.delete(serializer.validated_data)
return Response(result, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)

View File

@@ -6,8 +6,6 @@ import io
import ipaddress import ipaddress
import os import os
import tarfile import tarfile
import time
import re
import asn1 import asn1
from awx.api import serializers from awx.api import serializers
@@ -42,8 +40,6 @@ RECEPTOR_OID = "1.3.6.1.4.1.2312.19.1"
# │ │ └── receptor.key # │ │ └── receptor.key
# │ └── work-public-key.pem # │ └── work-public-key.pem
# └── requirements.yml # └── requirements.yml
class InstanceInstallBundle(GenericAPIView): class InstanceInstallBundle(GenericAPIView):
name = _('Install Bundle') name = _('Install Bundle')
model = models.Instance model = models.Instance
@@ -53,54 +49,56 @@ class InstanceInstallBundle(GenericAPIView):
def get(self, request, *args, **kwargs): def get(self, request, *args, **kwargs):
instance_obj = self.get_object() instance_obj = self.get_object()
if instance_obj.node_type not in ('execution', 'hop'): if instance_obj.node_type not in ('execution',):
return Response( return Response(
data=dict(msg=_('Install bundle can only be generated for execution or hop nodes.')), data=dict(msg=_('Install bundle can only be generated for execution nodes.')),
status=status.HTTP_400_BAD_REQUEST, status=status.HTTP_400_BAD_REQUEST,
) )
with io.BytesIO() as f: with io.BytesIO() as f:
with tarfile.open(fileobj=f, mode='w:gz') as tar: with tarfile.open(fileobj=f, mode='w:gz') as tar:
# copy /etc/receptor/tls/ca/mesh-CA.crt to receptor/tls/ca in the tar file # copy /etc/receptor/tls/ca/receptor-ca.crt to receptor/tls/ca in the tar file
tar.add(os.path.realpath('/etc/receptor/tls/ca/mesh-CA.crt'), arcname=f"{instance_obj.hostname}_install_bundle/receptor/tls/ca/mesh-CA.crt") tar.add(
os.path.realpath('/etc/receptor/tls/ca/receptor-ca.crt'), arcname=f"{instance_obj.hostname}_install_bundle/receptor/tls/ca/receptor-ca.crt"
)
# copy /etc/receptor/work_public_key.pem to receptor/work_public_key.pem # copy /etc/receptor/signing/work-public-key.pem to receptor/work-public-key.pem
tar.add('/etc/receptor/work_public_key.pem', arcname=f"{instance_obj.hostname}_install_bundle/receptor/work_public_key.pem") tar.add('/etc/receptor/signing/work-public-key.pem', arcname=f"{instance_obj.hostname}_install_bundle/receptor/work-public-key.pem")
# generate and write the receptor key to receptor/tls/receptor.key in the tar file # generate and write the receptor key to receptor/tls/receptor.key in the tar file
key, cert = generate_receptor_tls(instance_obj) key, cert = generate_receptor_tls(instance_obj)
def tar_addfile(tarinfo, filecontent):
tarinfo.mtime = time.time()
tarinfo.size = len(filecontent)
tar.addfile(tarinfo, io.BytesIO(filecontent))
key_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/receptor/tls/receptor.key") key_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/receptor/tls/receptor.key")
tar_addfile(key_tarinfo, key) key_tarinfo.size = len(key)
tar.addfile(key_tarinfo, io.BytesIO(key))
cert_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/receptor/tls/receptor.crt") cert_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/receptor/tls/receptor.crt")
cert_tarinfo.size = len(cert) cert_tarinfo.size = len(cert)
tar_addfile(cert_tarinfo, cert) tar.addfile(cert_tarinfo, io.BytesIO(cert))
# generate and write install_receptor.yml to the tar file # generate and write install_receptor.yml to the tar file
playbook = generate_playbook(instance_obj).encode('utf-8') playbook = generate_playbook().encode('utf-8')
playbook_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/install_receptor.yml") playbook_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/install_receptor.yml")
tar_addfile(playbook_tarinfo, playbook) playbook_tarinfo.size = len(playbook)
tar.addfile(playbook_tarinfo, io.BytesIO(playbook))
# generate and write inventory.yml to the tar file # generate and write inventory.yml to the tar file
inventory_yml = generate_inventory_yml(instance_obj).encode('utf-8') inventory_yml = generate_inventory_yml(instance_obj).encode('utf-8')
inventory_yml_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/inventory.yml") inventory_yml_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/inventory.yml")
tar_addfile(inventory_yml_tarinfo, inventory_yml) inventory_yml_tarinfo.size = len(inventory_yml)
tar.addfile(inventory_yml_tarinfo, io.BytesIO(inventory_yml))
# generate and write group_vars/all.yml to the tar file # generate and write group_vars/all.yml to the tar file
group_vars = generate_group_vars_all_yml(instance_obj).encode('utf-8') group_vars = generate_group_vars_all_yml(instance_obj).encode('utf-8')
group_vars_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/group_vars/all.yml") group_vars_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/group_vars/all.yml")
tar_addfile(group_vars_tarinfo, group_vars) group_vars_tarinfo.size = len(group_vars)
tar.addfile(group_vars_tarinfo, io.BytesIO(group_vars))
# generate and write requirements.yml to the tar file # generate and write requirements.yml to the tar file
requirements_yml = generate_requirements_yml().encode('utf-8') requirements_yml = generate_requirements_yml().encode('utf-8')
requirements_yml_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/requirements.yml") requirements_yml_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/requirements.yml")
tar_addfile(requirements_yml_tarinfo, requirements_yml) requirements_yml_tarinfo.size = len(requirements_yml)
tar.addfile(requirements_yml_tarinfo, io.BytesIO(requirements_yml))
# respond with the tarfile # respond with the tarfile
f.seek(0) f.seek(0)
@@ -109,10 +107,8 @@ class InstanceInstallBundle(GenericAPIView):
return response return response
def generate_playbook(instance_obj): def generate_playbook():
playbook_yaml = render_to_string("instance_install_bundle/install_receptor.yml", context=dict(instance=instance_obj)) return render_to_string("instance_install_bundle/install_receptor.yml")
# convert consecutive newlines with a single newline
return re.sub(r'\n+', '\n', playbook_yaml)
def generate_requirements_yml(): def generate_requirements_yml():
@@ -124,21 +120,7 @@ def generate_inventory_yml(instance_obj):
def generate_group_vars_all_yml(instance_obj): def generate_group_vars_all_yml(instance_obj):
# get peers return render_to_string("instance_install_bundle/group_vars/all.yml", context=dict(instance=instance_obj))
peers = []
for addr in instance_obj.peers.select_related('instance'):
peers.append(dict(address=addr.get_full_address(), protocol=addr.protocol))
context = dict(instance=instance_obj, peers=peers)
canonical_addr = instance_obj.canonical_address
if canonical_addr:
context['listener_port'] = canonical_addr.port
protocol = canonical_addr.protocol if canonical_addr.protocol != 'wss' else 'ws'
context['listener_protocol'] = protocol
all_yaml = render_to_string("instance_install_bundle/group_vars/all.yml", context=context)
# convert consecutive newlines with a single newline
return re.sub(r'\n+', '\n', all_yaml)
def generate_receptor_tls(instance_obj): def generate_receptor_tls(instance_obj):
@@ -179,14 +161,14 @@ def generate_receptor_tls(instance_obj):
.sign(key, hashes.SHA256()) .sign(key, hashes.SHA256())
) )
# sign csr with the receptor ca key from /etc/receptor/ca/mesh-CA.key # sign csr with the receptor ca key from /etc/receptor/ca/receptor-ca.key
with open('/etc/receptor/tls/ca/mesh-CA.key', 'rb') as f: with open('/etc/receptor/tls/ca/receptor-ca.key', 'rb') as f:
ca_key = serialization.load_pem_private_key( ca_key = serialization.load_pem_private_key(
f.read(), f.read(),
password=None, password=None,
) )
with open('/etc/receptor/tls/ca/mesh-CA.crt', 'rb') as f: with open('/etc/receptor/tls/ca/receptor-ca.crt', 'rb') as f:
ca_cert = x509.load_pem_x509_certificate(f.read()) ca_cert = x509.load_pem_x509_certificate(f.read())
cert = ( cert = (

View File

@@ -152,7 +152,6 @@ class InventoryObjectRolesList(SubListAPIView):
serializer_class = RoleSerializer serializer_class = RoleSerializer
parent_model = Inventory parent_model = Inventory
search_fields = ('role_field', 'content_type__model') search_fields = ('role_field', 'content_type__model')
deprecated = True
def get_queryset(self): def get_queryset(self):
po = self.get_parent_object() po = self.get_parent_object()

View File

@@ -17,7 +17,7 @@ class MeshVisualizer(APIView):
def get(self, request, format=None): def get(self, request, format=None):
data = { data = {
'nodes': InstanceNodeSerializer(Instance.objects.all(), many=True).data, 'nodes': InstanceNodeSerializer(Instance.objects.all(), many=True).data,
'links': InstanceLinkSerializer(InstanceLink.objects.select_related('target__instance', 'source'), many=True).data, 'links': InstanceLinkSerializer(InstanceLink.objects.select_related('target', 'source'), many=True).data,
} }
return Response(data) return Response(data)

View File

@@ -50,7 +50,7 @@ class UnifiedJobDeletionMixin(object):
return Response({"error": _("Job has not finished processing events.")}, status=status.HTTP_400_BAD_REQUEST) return Response({"error": _("Job has not finished processing events.")}, status=status.HTTP_400_BAD_REQUEST)
else: else:
# if it has been > 1 minute, events are probably lost # if it has been > 1 minute, events are probably lost
logger.warning('Allowing deletion of {} through the API without all events processed.'.format(obj.log_format)) logger.warning('Allowing deletion of {} through the API without all events ' 'processed.'.format(obj.log_format))
# Manually cascade delete events if unpartitioned job # Manually cascade delete events if unpartitioned job
if obj.has_unpartitioned_events: if obj.has_unpartitioned_events:

View File

@@ -226,7 +226,6 @@ class OrganizationObjectRolesList(SubListAPIView):
serializer_class = RoleSerializer serializer_class = RoleSerializer
parent_model = Organization parent_model = Organization
search_fields = ('role_field', 'content_type__model') search_fields = ('role_field', 'content_type__model')
deprecated = True
def get_queryset(self): def get_queryset(self):
po = self.get_parent_object() po = self.get_parent_object()

View File

@@ -13,7 +13,6 @@ from django.utils.decorators import method_decorator
from django.views.decorators.csrf import ensure_csrf_cookie from django.views.decorators.csrf import ensure_csrf_cookie
from django.template.loader import render_to_string from django.template.loader import render_to_string
from django.utils.translation import gettext_lazy as _ from django.utils.translation import gettext_lazy as _
from django.urls import reverse as django_reverse
from rest_framework.permissions import AllowAny, IsAuthenticated from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response from rest_framework.response import Response
@@ -21,14 +20,13 @@ from rest_framework import status
import requests import requests
from awx import MODE
from awx.api.generics import APIView from awx.api.generics import APIView
from awx.conf.registry import settings_registry from awx.conf.registry import settings_registry
from awx.main.analytics import all_collectors from awx.main.analytics import all_collectors
from awx.main.ha import is_ha_environment from awx.main.ha import is_ha_environment
from awx.main.utils import get_awx_version, get_custom_venv_choices from awx.main.utils import get_awx_version, get_custom_venv_choices
from awx.main.utils.licensing import validate_entitlement_manifest from awx.main.utils.licensing import validate_entitlement_manifest
from awx.api.versioning import URLPathVersioning, is_optional_api_urlpattern_prefix_request, reverse, drf_reverse from awx.api.versioning import reverse, drf_reverse
from awx.main.constants import PRIVILEGE_ESCALATION_METHODS from awx.main.constants import PRIVILEGE_ESCALATION_METHODS
from awx.main.models import Project, Organization, Instance, InstanceGroup, JobTemplate from awx.main.models import Project, Organization, Instance, InstanceGroup, JobTemplate
from awx.main.utils import set_environ from awx.main.utils import set_environ
@@ -40,24 +38,22 @@ logger = logging.getLogger('awx.api.views.root')
class ApiRootView(APIView): class ApiRootView(APIView):
permission_classes = (AllowAny,) permission_classes = (AllowAny,)
name = _('REST API') name = _('REST API')
versioning_class = URLPathVersioning versioning_class = None
swagger_topic = 'Versioning' swagger_topic = 'Versioning'
@method_decorator(ensure_csrf_cookie) @method_decorator(ensure_csrf_cookie)
def get(self, request, format=None): def get(self, request, format=None):
'''List supported API versions''' '''List supported API versions'''
v2 = reverse('api:api_v2_root_view', request=request, kwargs={'version': 'v2'})
v2 = reverse('api:api_v2_root_view', kwargs={'version': 'v2'})
data = OrderedDict() data = OrderedDict()
data['description'] = _('AWX REST API') data['description'] = _('AWX REST API')
data['current_version'] = v2 data['current_version'] = v2
data['available_versions'] = dict(v2=v2) data['available_versions'] = dict(v2=v2)
if not is_optional_api_urlpattern_prefix_request(request): data['oauth2'] = drf_reverse('api:oauth_authorization_root_view')
data['oauth2'] = drf_reverse('api:oauth_authorization_root_view')
data['custom_logo'] = settings.CUSTOM_LOGO data['custom_logo'] = settings.CUSTOM_LOGO
data['custom_login_info'] = settings.CUSTOM_LOGIN_INFO data['custom_login_info'] = settings.CUSTOM_LOGIN_INFO
data['login_redirect_override'] = settings.LOGIN_REDIRECT_OVERRIDE data['login_redirect_override'] = settings.LOGIN_REDIRECT_OVERRIDE
if MODE == 'development':
data['swagger'] = drf_reverse('api:schema-swagger-ui')
return Response(data) return Response(data)
@@ -85,7 +81,6 @@ class ApiVersionRootView(APIView):
data['ping'] = reverse('api:api_v2_ping_view', request=request) data['ping'] = reverse('api:api_v2_ping_view', request=request)
data['instances'] = reverse('api:instance_list', request=request) data['instances'] = reverse('api:instance_list', request=request)
data['instance_groups'] = reverse('api:instance_group_list', request=request) data['instance_groups'] = reverse('api:instance_group_list', request=request)
data['receptor_addresses'] = reverse('api:receptor_addresses_list', request=request)
data['config'] = reverse('api:api_v2_config_view', request=request) data['config'] = reverse('api:api_v2_config_view', request=request)
data['settings'] = reverse('api:setting_category_list', request=request) data['settings'] = reverse('api:setting_category_list', request=request)
data['me'] = reverse('api:user_me_list', request=request) data['me'] = reverse('api:user_me_list', request=request)
@@ -109,7 +104,8 @@ class ApiVersionRootView(APIView):
data['groups'] = reverse('api:group_list', request=request) data['groups'] = reverse('api:group_list', request=request)
data['hosts'] = reverse('api:host_list', request=request) data['hosts'] = reverse('api:host_list', request=request)
data['host_metrics'] = reverse('api:host_metric_list', request=request) data['host_metrics'] = reverse('api:host_metric_list', request=request)
data['host_metric_summary_monthly'] = reverse('api:host_metric_summary_monthly_list', request=request) # It will be enabled in future version of the AWX
# data['host_metric_summary_monthly'] = reverse('api:host_metric_summary_monthly_list', request=request)
data['job_templates'] = reverse('api:job_template_list', request=request) data['job_templates'] = reverse('api:job_template_list', request=request)
data['jobs'] = reverse('api:job_list', request=request) data['jobs'] = reverse('api:job_list', request=request)
data['ad_hoc_commands'] = reverse('api:ad_hoc_command_list', request=request) data['ad_hoc_commands'] = reverse('api:ad_hoc_command_list', request=request)
@@ -131,10 +127,6 @@ class ApiVersionRootView(APIView):
data['mesh_visualizer'] = reverse('api:mesh_visualizer_view', request=request) data['mesh_visualizer'] = reverse('api:mesh_visualizer_view', request=request)
data['bulk'] = reverse('api:bulk', request=request) data['bulk'] = reverse('api:bulk', request=request)
data['analytics'] = reverse('api:analytics_root_view', request=request) data['analytics'] = reverse('api:analytics_root_view', request=request)
data['service_index'] = django_reverse('service-index-root')
data['role_definitions'] = django_reverse('roledefinition-list')
data['role_user_assignments'] = django_reverse('roleuserassignment-list')
data['role_team_assignments'] = django_reverse('roleteamassignment-list')
return Response(data) return Response(data)

View File

@@ -1,4 +1,4 @@
from hashlib import sha1, sha256 from hashlib import sha1
import hmac import hmac
import logging import logging
import urllib.parse import urllib.parse
@@ -99,31 +99,14 @@ class WebhookReceiverBase(APIView):
def get_signature(self): def get_signature(self):
raise NotImplementedError raise NotImplementedError
def must_check_signature(self):
return True
def is_ignored_request(self):
return False
def check_signature(self, obj): def check_signature(self, obj):
if not obj.webhook_key: if not obj.webhook_key:
raise PermissionDenied raise PermissionDenied
if not self.must_check_signature():
logger.debug("skipping signature validation")
return
hash_alg, expected_digest = self.get_signature() mac = hmac.new(force_bytes(obj.webhook_key), msg=force_bytes(self.request.body), digestmod=sha1)
if hash_alg == 'sha1': logger.debug("header signature: %s", self.get_signature())
mac = hmac.new(force_bytes(obj.webhook_key), msg=force_bytes(self.request.body), digestmod=sha1)
elif hash_alg == 'sha256':
mac = hmac.new(force_bytes(obj.webhook_key), msg=force_bytes(self.request.body), digestmod=sha256)
else:
logger.debug("Unsupported signature type, supported: sha1, sha256, received: {}".format(hash_alg))
raise PermissionDenied
logger.debug("header signature: %s", expected_digest)
logger.debug("calculated signature: %s", force_bytes(mac.hexdigest())) logger.debug("calculated signature: %s", force_bytes(mac.hexdigest()))
if not hmac.compare_digest(force_bytes(mac.hexdigest()), expected_digest): if not hmac.compare_digest(force_bytes(mac.hexdigest()), self.get_signature()):
raise PermissionDenied raise PermissionDenied
@csrf_exempt @csrf_exempt
@@ -131,14 +114,10 @@ class WebhookReceiverBase(APIView):
# Ensure that the full contents of the request are captured for multiple uses. # Ensure that the full contents of the request are captured for multiple uses.
request.body request.body
logger.debug("headers: {}\ndata: {}\n".format(request.headers, request.data)) logger.debug("headers: {}\n" "data: {}\n".format(request.headers, request.data))
obj = self.get_object() obj = self.get_object()
self.check_signature(obj) self.check_signature(obj)
if self.is_ignored_request():
# This was an ignored request type (e.g. ping), don't act on it
return Response({'message': _("Webhook ignored")}, status=status.HTTP_200_OK)
event_type = self.get_event_type() event_type = self.get_event_type()
event_guid = self.get_event_guid() event_guid = self.get_event_guid()
event_ref = self.get_event_ref() event_ref = self.get_event_ref()
@@ -207,7 +186,7 @@ class GithubWebhookReceiver(WebhookReceiverBase):
if hash_alg != 'sha1': if hash_alg != 'sha1':
logger.debug("Unsupported signature type, expected: sha1, received: {}".format(hash_alg)) logger.debug("Unsupported signature type, expected: sha1, received: {}".format(hash_alg))
raise PermissionDenied raise PermissionDenied
return hash_alg, force_bytes(signature) return force_bytes(signature)
class GitlabWebhookReceiver(WebhookReceiverBase): class GitlabWebhookReceiver(WebhookReceiverBase):
@@ -235,73 +214,15 @@ class GitlabWebhookReceiver(WebhookReceiverBase):
return "{}://{}/api/v4/projects/{}/statuses/{}".format(parsed.scheme, parsed.netloc, project['id'], self.get_event_ref()) return "{}://{}/api/v4/projects/{}/statuses/{}".format(parsed.scheme, parsed.netloc, project['id'], self.get_event_ref())
def get_signature(self):
return force_bytes(self.request.META.get('HTTP_X_GITLAB_TOKEN') or '')
def check_signature(self, obj): def check_signature(self, obj):
if not obj.webhook_key: if not obj.webhook_key:
raise PermissionDenied raise PermissionDenied
token_from_request = force_bytes(self.request.META.get('HTTP_X_GITLAB_TOKEN') or '')
# GitLab only returns the secret token, not an hmac hash. Use # GitLab only returns the secret token, not an hmac hash. Use
# the hmac `compare_digest` helper function to prevent timing # the hmac `compare_digest` helper function to prevent timing
# analysis by attackers. # analysis by attackers.
if not hmac.compare_digest(force_bytes(obj.webhook_key), token_from_request): if not hmac.compare_digest(force_bytes(obj.webhook_key), self.get_signature()):
raise PermissionDenied raise PermissionDenied
class BitbucketDcWebhookReceiver(WebhookReceiverBase):
service = 'bitbucket_dc'
ref_keys = {
'repo:refs_changed': 'changes.0.toHash',
'mirror:repo_synchronized': 'changes.0.toHash',
'pr:opened': 'pullRequest.toRef.latestCommit',
'pr:from_ref_updated': 'pullRequest.toRef.latestCommit',
'pr:modified': 'pullRequest.toRef.latestCommit',
}
def get_event_type(self):
return self.request.META.get('HTTP_X_EVENT_KEY')
def get_event_guid(self):
return self.request.META.get('HTTP_X_REQUEST_ID')
def get_event_status_api(self):
# https://<bitbucket-base-url>/rest/build-status/1.0/commits/<commit-hash>
if self.get_event_type() not in self.ref_keys.keys():
return
if self.get_event_ref() is None:
return
any_url = None
if 'actor' in self.request.data:
any_url = self.request.data['actor'].get('links', {}).get('self')
if any_url is None and 'repository' in self.request.data:
any_url = self.request.data['repository'].get('links', {}).get('self')
if any_url is None:
return
any_url = any_url[0].get('href')
if any_url is None:
return
parsed = urllib.parse.urlparse(any_url)
return "{}://{}/rest/build-status/1.0/commits/{}".format(parsed.scheme, parsed.netloc, self.get_event_ref())
def is_ignored_request(self):
return self.get_event_type() not in [
'repo:refs_changed',
'mirror:repo_synchronized',
'pr:opened',
'pr:from_ref_updated',
'pr:modified',
]
def must_check_signature(self):
# Bitbucket does not sign ping requests...
return self.get_event_type() != 'diagnostics:ping'
def get_signature(self):
header_sig = self.request.META.get('HTTP_X_HUB_SIGNATURE')
if not header_sig:
logger.debug("Expected signature missing from header key HTTP_X_HUB_SIGNATURE")
raise PermissionDenied
hash_alg, signature = header_sig.split('=')
return hash_alg, force_bytes(signature)

View File

@@ -14,7 +14,7 @@ class ConfConfig(AppConfig):
def ready(self): def ready(self):
self.module.autodiscover() self.module.autodiscover()
if not set(sys.argv) & {'migrate', 'check_migrations', 'showmigrations'}: if not set(sys.argv) & {'migrate', 'check_migrations'}:
from .settings import SettingsWrapper from .settings import SettingsWrapper
SettingsWrapper.initialize() SettingsWrapper.initialize()

View File

@@ -55,7 +55,6 @@ register(
# Optional; category_slug will be slugified version of category if not # Optional; category_slug will be slugified version of category if not
# explicitly provided. # explicitly provided.
category_slug='cows', category_slug='cows',
hidden=True,
) )

View File

@@ -1,17 +0,0 @@
# Generated by Django 4.2 on 2023-06-09 19:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('conf', '0009_rename_proot_settings'),
]
operations = [
migrations.AlterField(
model_name='setting',
name='value',
field=models.JSONField(null=True),
),
]

View File

@@ -7,10 +7,9 @@ import json
# Django # Django
from django.db import models from django.db import models
from ansible_base.lib.utils.models import prevent_search
# AWX # AWX
from awx.main.models.base import CreatedModifiedModel from awx.main.fields import JSONBlob
from awx.main.models.base import CreatedModifiedModel, prevent_search
from awx.main.utils import encrypt_field from awx.main.utils import encrypt_field
from awx.conf import settings_registry from awx.conf import settings_registry
@@ -19,7 +18,7 @@ __all__ = ['Setting']
class Setting(CreatedModifiedModel): class Setting(CreatedModifiedModel):
key = models.CharField(max_length=255) key = models.CharField(max_length=255)
value = models.JSONField(null=True) value = JSONBlob(null=True)
user = prevent_search(models.ForeignKey('auth.User', related_name='settings', default=None, null=True, editable=False, on_delete=models.CASCADE)) user = prevent_search(models.ForeignKey('auth.User', related_name='settings', default=None, null=True, editable=False, on_delete=models.CASCADE))
def __str__(self): def __str__(self):

View File

@@ -127,8 +127,6 @@ class SettingsRegistry(object):
encrypted = bool(field_kwargs.pop('encrypted', False)) encrypted = bool(field_kwargs.pop('encrypted', False))
defined_in_file = bool(field_kwargs.pop('defined_in_file', False)) defined_in_file = bool(field_kwargs.pop('defined_in_file', False))
unit = field_kwargs.pop('unit', None) unit = field_kwargs.pop('unit', None)
hidden = field_kwargs.pop('hidden', False)
warning_text = field_kwargs.pop('warning_text', None)
if getattr(field_kwargs.get('child', None), 'source', None) is not None: if getattr(field_kwargs.get('child', None), 'source', None) is not None:
field_kwargs['child'].source = None field_kwargs['child'].source = None
field_instance = field_class(**field_kwargs) field_instance = field_class(**field_kwargs)
@@ -136,14 +134,12 @@ class SettingsRegistry(object):
field_instance.category = category field_instance.category = category
field_instance.depends_on = depends_on field_instance.depends_on = depends_on
field_instance.unit = unit field_instance.unit = unit
field_instance.hidden = hidden
if placeholder is not empty: if placeholder is not empty:
field_instance.placeholder = placeholder field_instance.placeholder = placeholder
field_instance.defined_in_file = defined_in_file field_instance.defined_in_file = defined_in_file
if field_instance.defined_in_file: if field_instance.defined_in_file:
field_instance.help_text = str(_('This value has been set manually in a settings file.')) + '\n\n' + str(field_instance.help_text) field_instance.help_text = str(_('This value has been set manually in a settings file.')) + '\n\n' + str(field_instance.help_text)
field_instance.encrypted = encrypted field_instance.encrypted = encrypted
field_instance.warning_text = warning_text
original_field_instance = field_instance original_field_instance = field_instance
if field_class != original_field_class: if field_class != original_field_class:
original_field_instance = original_field_class(**field_kwargs) original_field_instance = original_field_class(**field_kwargs)

View File

@@ -1,7 +1,6 @@
# Python # Python
import contextlib import contextlib
import logging import logging
import psycopg
import threading import threading
import time import time
import os import os
@@ -14,7 +13,7 @@ from django.conf import settings, UserSettingsHolder
from django.core.cache import cache as django_cache from django.core.cache import cache as django_cache
from django.core.exceptions import ImproperlyConfigured, SynchronousOnlyOperation from django.core.exceptions import ImproperlyConfigured, SynchronousOnlyOperation
from django.db import transaction, connection from django.db import transaction, connection
from django.db.utils import DatabaseError, ProgrammingError from django.db.utils import Error as DBError, ProgrammingError
from django.utils.functional import cached_property from django.utils.functional import cached_property
# Django REST Framework # Django REST Framework
@@ -81,26 +80,18 @@ def _ctit_db_wrapper(trans_safe=False):
logger.debug('Obtaining database settings in spite of broken transaction.') logger.debug('Obtaining database settings in spite of broken transaction.')
transaction.set_rollback(False) transaction.set_rollback(False)
yield yield
except ProgrammingError as e: except DBError as exc:
# Exception raised for programming errors
# Examples may be table not found or already exists,
# this generally means we can't fetch Tower configuration
# because the database hasn't actually finished migrating yet;
# this is usually a sign that a service in a container (such as ws_broadcast)
# has come up *before* the database has finished migrating, and
# especially that the conf.settings table doesn't exist yet
# syntax error in the SQL statement, wrong number of parameters specified, etc.
if trans_safe: if trans_safe:
logger.debug(f'Database settings are not available, using defaults. error: {str(e)}') level = logger.warning
else: if isinstance(exc, ProgrammingError):
logger.exception('Error modifying something related to database settings.') if 'relation' in str(exc) and 'does not exist' in str(exc):
except DatabaseError as e: # this generally means we can't fetch Tower configuration
if trans_safe: # because the database hasn't actually finished migrating yet;
cause = e.__cause__ # this is usually a sign that a service in a container (such as ws_broadcast)
if cause and hasattr(cause, 'sqlstate'): # has come up *before* the database has finished migrating, and
sqlstate = cause.sqlstate # especially that the conf.settings table doesn't exist yet
sqlstate_str = psycopg.errors.lookup(sqlstate) level = logger.debug
logger.error('SQL Error state: {} - {}'.format(sqlstate, sqlstate_str)) level(f'Database settings are not available, using defaults. error: {str(exc)}')
else: else:
logger.exception('Error modifying something related to database settings.') logger.exception('Error modifying something related to database settings.')
finally: finally:
@@ -427,10 +418,6 @@ class SettingsWrapper(UserSettingsHolder):
"""Get value while accepting the in-memory cache if key is available""" """Get value while accepting the in-memory cache if key is available"""
with _ctit_db_wrapper(trans_safe=True): with _ctit_db_wrapper(trans_safe=True):
return self._get_local(name) return self._get_local(name)
# If the last line did not return, that means we hit a database error
# in that case, we should not have a local cache value
# thus, return empty as a signal to use the default
return empty
def __getattr__(self, name): def __getattr__(self, name):
value = empty value = empty

View File

@@ -35,7 +35,7 @@ class TestStringListBooleanField:
field = StringListBooleanField() field = StringListBooleanField()
with pytest.raises(ValidationError) as e: with pytest.raises(ValidationError) as e:
field.to_internal_value(value) field.to_internal_value(value)
assert e.value.detail[0] == "Expected None, True, False, a string or list of strings but got {} instead.".format(type(value)) assert e.value.detail[0] == "Expected None, True, False, a string or list " "of strings but got {} instead.".format(type(value))
@pytest.mark.parametrize("value_in, value_known", FIELD_VALUES) @pytest.mark.parametrize("value_in, value_known", FIELD_VALUES)
def test_to_representation_valid(self, value_in, value_known): def test_to_representation_valid(self, value_in, value_known):
@@ -48,7 +48,7 @@ class TestStringListBooleanField:
field = StringListBooleanField() field = StringListBooleanField()
with pytest.raises(ValidationError) as e: with pytest.raises(ValidationError) as e:
field.to_representation(value) field.to_representation(value)
assert e.value.detail[0] == "Expected None, True, False, a string or list of strings but got {} instead.".format(type(value)) assert e.value.detail[0] == "Expected None, True, False, a string or list " "of strings but got {} instead.".format(type(value))
class TestListTuplesField: class TestListTuplesField:
@@ -67,7 +67,7 @@ class TestListTuplesField:
field = ListTuplesField() field = ListTuplesField()
with pytest.raises(ValidationError) as e: with pytest.raises(ValidationError) as e:
field.to_internal_value(value) field.to_internal_value(value)
assert e.value.detail[0] == "Expected a list of tuples of max length 2 but got {} instead.".format(t) assert e.value.detail[0] == "Expected a list of tuples of max length 2 " "but got {} instead.".format(t)
class TestStringListPathField: class TestStringListPathField:

View File

@@ -13,7 +13,6 @@ from unittest import mock
from django.conf import LazySettings from django.conf import LazySettings
from django.core.cache.backends.locmem import LocMemCache from django.core.cache.backends.locmem import LocMemCache
from django.core.exceptions import ImproperlyConfigured from django.core.exceptions import ImproperlyConfigured
from django.db.utils import Error as DBError
from django.utils.translation import gettext_lazy as _ from django.utils.translation import gettext_lazy as _
import pytest import pytest
@@ -332,18 +331,3 @@ def test_in_memory_cache_works(settings):
with mock.patch.object(settings, '_get_local') as mock_get: with mock.patch.object(settings, '_get_local') as mock_get:
assert settings.AWX_VAR == 'DEFAULT' assert settings.AWX_VAR == 'DEFAULT'
mock_get.assert_not_called() mock_get.assert_not_called()
@pytest.mark.defined_in_file(AWX_VAR=[])
def test_getattr_with_database_error(settings):
"""
If a setting is defined via the registry and has a null-ish default which is not None
then referencing that setting during a database outage should give that default
this is regression testing for a bug where it would return None
"""
settings.registry.register('AWX_VAR', field_class=fields.StringListField, default=[], category=_('System'), category_slug='system')
settings._awx_conf_memoizedcache.clear()
with mock.patch('django.db.backends.base.base.BaseDatabaseWrapper.ensure_connection') as mock_ensure:
mock_ensure.side_effect = DBError('for test')
assert settings.AWX_VAR == []

View File

@@ -20,15 +20,11 @@ from rest_framework.exceptions import ParseError, PermissionDenied
# Django OAuth Toolkit # Django OAuth Toolkit
from awx.main.models.oauth import OAuth2Application, OAuth2AccessToken from awx.main.models.oauth import OAuth2Application, OAuth2AccessToken
# django-ansible-base
from ansible_base.lib.utils.validation import to_python_boolean
from ansible_base.rbac.models import RoleEvaluation
from ansible_base.rbac import permission_registry
# AWX # AWX
from awx.main.utils import ( from awx.main.utils import (
get_object_or_400, get_object_or_400,
get_pk_from_dict, get_pk_from_dict,
to_python_boolean,
get_licenser, get_licenser,
) )
from awx.main.models import ( from awx.main.models import (
@@ -60,7 +56,6 @@ from awx.main.models import (
Project, Project,
ProjectUpdate, ProjectUpdate,
ProjectUpdateEvent, ProjectUpdateEvent,
ReceptorAddress,
Role, Role,
Schedule, Schedule,
SystemJob, SystemJob,
@@ -75,6 +70,8 @@ from awx.main.models import (
WorkflowJobTemplateNode, WorkflowJobTemplateNode,
WorkflowApproval, WorkflowApproval,
WorkflowApprovalTemplate, WorkflowApprovalTemplate,
ROLE_SINGLETON_SYSTEM_ADMINISTRATOR,
ROLE_SINGLETON_SYSTEM_AUDITOR,
) )
from awx.main.models.mixins import ResourceMixin from awx.main.models.mixins import ResourceMixin
@@ -82,6 +79,7 @@ __all__ = [
'get_user_queryset', 'get_user_queryset',
'check_user_access', 'check_user_access',
'check_user_access_with_errors', 'check_user_access_with_errors',
'user_accessible_objects',
'consumer_access', 'consumer_access',
] ]
@@ -138,6 +136,10 @@ def register_access(model_class, access_class):
access_registry[model_class] = access_class access_registry[model_class] = access_class
def user_accessible_objects(user, role_name):
return ResourceMixin._accessible_objects(User, user, role_name)
def get_user_queryset(user, model_class): def get_user_queryset(user, model_class):
""" """
Return a queryset for the given model_class containing only the instances Return a queryset for the given model_class containing only the instances
@@ -265,11 +267,7 @@ class BaseAccess(object):
return self.can_change(obj, data) return self.can_change(obj, data)
def can_delete(self, obj): def can_delete(self, obj):
if self.user.is_superuser: return self.user.is_superuser
return True
if obj._meta.model_name in [cls._meta.model_name for cls in permission_registry.all_registered_models]:
return self.user.has_obj_perm(obj, 'delete')
return False
def can_copy(self, obj): def can_copy(self, obj):
return self.can_add({'reference_obj': obj}) return self.can_add({'reference_obj': obj})
@@ -368,9 +366,9 @@ class BaseAccess(object):
report_violation = lambda message: None report_violation = lambda message: None
else: else:
report_violation = lambda message: logger.warning(message) report_violation = lambda message: logger.warning(message)
if validation_info.get('trial', False) is True: if validation_info.get('trial', False) is True or validation_info['instance_count'] == 10: # basic 10 license
def report_violation(message): # noqa def report_violation(message):
raise PermissionDenied(message) raise PermissionDenied(message)
if check_expiration and validation_info.get('time_remaining', None) is None: if check_expiration and validation_info.get('time_remaining', None) is None:
@@ -644,10 +642,7 @@ class UserAccess(BaseAccess):
""" """
model = User model = User
prefetch_related = ( prefetch_related = ('profile',)
'profile',
'resource',
)
def filtered_queryset(self): def filtered_queryset(self):
if settings.ORG_ADMINS_CAN_SEE_ALL_USERS and (self.user.admin_of_organizations.exists() or self.user.auditor_of_organizations.exists()): if settings.ORG_ADMINS_CAN_SEE_ALL_USERS and (self.user.admin_of_organizations.exists() or self.user.auditor_of_organizations.exists()):
@@ -656,7 +651,9 @@ class UserAccess(BaseAccess):
qs = ( qs = (
User.objects.filter(pk__in=Organization.accessible_objects(self.user, 'read_role').values('member_role__members')) User.objects.filter(pk__in=Organization.accessible_objects(self.user, 'read_role').values('member_role__members'))
| User.objects.filter(pk=self.user.id) | User.objects.filter(pk=self.user.id)
| User.objects.filter(is_superuser=True) | User.objects.filter(
pk__in=Role.objects.filter(singleton_name__in=[ROLE_SINGLETON_SYSTEM_ADMINISTRATOR, ROLE_SINGLETON_SYSTEM_AUDITOR]).values('members')
)
).distinct() ).distinct()
return qs return qs
@@ -714,15 +711,6 @@ class UserAccess(BaseAccess):
if not allow_orphans: if not allow_orphans:
# in these cases only superusers can modify orphan users # in these cases only superusers can modify orphan users
return False return False
if settings.ANSIBLE_BASE_ROLE_SYSTEM_ACTIVATED:
# Permission granted if the user has all permissions that the target user has
target_perms = set(
RoleEvaluation.objects.filter(role__in=obj.has_roles.all()).values_list('object_id', 'content_type_id', 'codename').distinct()
)
user_perms = set(
RoleEvaluation.objects.filter(role__in=self.user.has_roles.all()).values_list('object_id', 'content_type_id', 'codename').distinct()
)
return not (target_perms - user_perms)
return not obj.roles.all().exclude(ancestors__in=self.user.roles.all()).exists() return not obj.roles.all().exclude(ancestors__in=self.user.roles.all()).exists()
else: else:
return self.is_all_org_admin(obj) return self.is_all_org_admin(obj)
@@ -850,7 +838,6 @@ class OrganizationAccess(NotificationAttachMixin, BaseAccess):
prefetch_related = ( prefetch_related = (
'created_by', 'created_by',
'modified_by', 'modified_by',
'resource', # dab_resource_registry
) )
# organization admin_role is not a parent of organization auditor_role # organization admin_role is not a parent of organization auditor_role
notification_attach_roles = ['admin_role', 'auditor_role'] notification_attach_roles = ['admin_role', 'auditor_role']
@@ -961,6 +948,9 @@ class InventoryAccess(BaseAccess):
def can_update(self, obj): def can_update(self, obj):
return self.user in obj.update_role return self.user in obj.update_role
def can_delete(self, obj):
return self.can_admin(obj, None)
def can_run_ad_hoc_commands(self, obj): def can_run_ad_hoc_commands(self, obj):
return self.user in obj.adhoc_role return self.user in obj.adhoc_role
@@ -1316,7 +1306,6 @@ class TeamAccess(BaseAccess):
'created_by', 'created_by',
'modified_by', 'modified_by',
'organization', 'organization',
'resource', # dab_resource_registry
) )
def filtered_queryset(self): def filtered_queryset(self):
@@ -1414,12 +1403,8 @@ class ExecutionEnvironmentAccess(BaseAccess):
def can_change(self, obj, data): def can_change(self, obj, data):
if obj and obj.organization_id is None: if obj and obj.organization_id is None:
raise PermissionDenied raise PermissionDenied
if settings.ANSIBLE_BASE_ROLE_SYSTEM_ACTIVATED: if self.user not in obj.organization.execution_environment_admin_role:
if not self.user.has_obj_perm(obj, 'change'): raise PermissionDenied
raise PermissionDenied
else:
if self.user not in obj.organization.execution_environment_admin_role:
raise PermissionDenied
if data and 'organization' in data: if data and 'organization' in data:
new_org = get_object_from_data('organization', Organization, data, obj=obj) new_org = get_object_from_data('organization', Organization, data, obj=obj)
if not new_org or self.user not in new_org.execution_environment_admin_role: if not new_org or self.user not in new_org.execution_environment_admin_role:
@@ -2249,7 +2234,7 @@ class WorkflowJobAccess(BaseAccess):
if not node_access.can_add({'reference_obj': node}): if not node_access.can_add({'reference_obj': node}):
wj_add_perm = False wj_add_perm = False
if not wj_add_perm and self.save_messages: if not wj_add_perm and self.save_messages:
self.messages['workflow_job_template'] = _('You do not have permission to the workflow job resources required for relaunch.') self.messages['workflow_job_template'] = _('You do not have permission to the workflow job ' 'resources required for relaunch.')
return wj_add_perm return wj_add_perm
def can_cancel(self, obj): def can_cancel(self, obj):
@@ -2449,29 +2434,6 @@ class InventoryUpdateEventAccess(BaseAccess):
return False return False
class ReceptorAddressAccess(BaseAccess):
"""
I can see receptor address records whenever I can access the instance
"""
model = ReceptorAddress
def filtered_queryset(self):
return self.model.objects.filter(Q(instance__in=Instance.accessible_pk_qs(self.user, 'read_role')))
@check_superuser
def can_add(self, data):
return False
@check_superuser
def can_change(self, obj, data):
return False
@check_superuser
def can_delete(self, obj):
return False
class SystemJobEventAccess(BaseAccess): class SystemJobEventAccess(BaseAccess):
""" """
I can only see manage System Jobs events if I'm a super user I can only see manage System Jobs events if I'm a super user
@@ -2605,8 +2567,6 @@ class ScheduleAccess(UnifiedCredentialsMixin, BaseAccess):
if not JobLaunchConfigAccess(self.user).can_add(data): if not JobLaunchConfigAccess(self.user).can_add(data):
return False return False
if not data: if not data:
if settings.ANSIBLE_BASE_ROLE_SYSTEM_ACTIVATED:
return self.user.has_roles.filter(permission_partials__codename__in=['execute_jobtemplate', 'update_project', 'update_inventory']).exists()
return Role.objects.filter(role_field__in=['update_role', 'execute_role'], ancestors__in=self.user.roles.all()).exists() return Role.objects.filter(role_field__in=['update_role', 'execute_role'], ancestors__in=self.user.roles.all()).exists()
return self.check_related('unified_job_template', UnifiedJobTemplate, data, role_field='execute_role', mandatory=True) return self.check_related('unified_job_template', UnifiedJobTemplate, data, role_field='execute_role', mandatory=True)
@@ -2635,8 +2595,6 @@ class NotificationTemplateAccess(BaseAccess):
prefetch_related = ('created_by', 'modified_by', 'organization') prefetch_related = ('created_by', 'modified_by', 'organization')
def filtered_queryset(self): def filtered_queryset(self):
if settings.ANSIBLE_BASE_ROLE_SYSTEM_ACTIVATED:
return self.model.access_qs(self.user, 'view')
return self.model.objects.filter( return self.model.objects.filter(
Q(organization__in=Organization.accessible_objects(self.user, 'notification_admin_role')) | Q(organization__in=self.user.auditor_of_organizations) Q(organization__in=Organization.accessible_objects(self.user, 'notification_admin_role')) | Q(organization__in=self.user.auditor_of_organizations)
).distinct() ).distinct()
@@ -2805,7 +2763,7 @@ class ActivityStreamAccess(BaseAccess):
| Q(notification_template__organization__in=auditing_orgs) | Q(notification_template__organization__in=auditing_orgs)
| Q(notification__notification_template__organization__in=auditing_orgs) | Q(notification__notification_template__organization__in=auditing_orgs)
| Q(label__organization__in=auditing_orgs) | Q(label__organization__in=auditing_orgs)
| Q(role__in=Role.visible_roles(self.user) if auditing_orgs else []) | Q(role__in=Role.objects.filter(ancestors__in=self.user.roles.all()) if auditing_orgs else [])
) )
project_set = Project.accessible_pk_qs(self.user, 'read_role') project_set = Project.accessible_pk_qs(self.user, 'read_role')
@@ -2862,10 +2820,13 @@ class RoleAccess(BaseAccess):
def filtered_queryset(self): def filtered_queryset(self):
result = Role.visible_roles(self.user) result = Role.visible_roles(self.user)
# Make system admin/auditor mandatorily visible. # Sanity check: is the requesting user an orphaned non-admin/auditor?
mandatories = ('system_administrator', 'system_auditor') # if yes, make system admin/auditor mandatorily visible.
super_qs = Role.objects.filter(singleton_name__in=mandatories) if not self.user.is_superuser and not self.user.is_system_auditor and not self.user.organizations.exists():
return result | super_qs mandatories = ('system_administrator', 'system_auditor')
super_qs = Role.objects.filter(singleton_name__in=mandatories)
result = result | super_qs
return result
def can_add(self, obj, data): def can_add(self, obj, data):
# Unsupported for now # Unsupported for now

View File

@@ -2,7 +2,7 @@
import logging import logging
# AWX # AWX
from awx.main.analytics.subsystem_metrics import DispatcherMetrics, CallbackReceiverMetrics from awx.main.analytics.subsystem_metrics import Metrics
from awx.main.dispatch.publish import task from awx.main.dispatch.publish import task
from awx.main.dispatch import get_task_queuename from awx.main.dispatch import get_task_queuename
@@ -11,5 +11,4 @@ logger = logging.getLogger('awx.main.scheduler')
@task(queue=get_task_queuename) @task(queue=get_task_queuename)
def send_subsystem_metrics(): def send_subsystem_metrics():
DispatcherMetrics().send_metrics() Metrics().send_metrics()
CallbackReceiverMetrics().send_metrics()

View File

@@ -419,7 +419,7 @@ def _events_table(since, full_path, until, tbl, where_column, project_job_create
resolved_action, resolved_action,
resolved_role, resolved_role,
-- '-' operator listed here: -- '-' operator listed here:
-- https://www.postgresql.org/docs/15/functions-json.html -- https://www.postgresql.org/docs/12/functions-json.html
-- note that operator is only supported by jsonb objects -- note that operator is only supported by jsonb objects
-- https://www.postgresql.org/docs/current/datatype-json.html -- https://www.postgresql.org/docs/current/datatype-json.html
(CASE WHEN event = 'playbook_on_stats' THEN {event_data} - 'artifact_data' END) as playbook_on_stats, (CASE WHEN event = 'playbook_on_stats' THEN {event_data} - 'artifact_data' END) as playbook_on_stats,
@@ -613,20 +613,3 @@ def host_metric_table(since, full_path, until, **kwargs):
since.isoformat(), until.isoformat(), since.isoformat(), until.isoformat() since.isoformat(), until.isoformat(), since.isoformat(), until.isoformat()
) )
return _copy_table(table='host_metric', query=host_metric_query, path=full_path) return _copy_table(table='host_metric', query=host_metric_query, path=full_path)
@register('host_metric_summary_monthly_table', '1.0', format='csv', description=_('HostMetricSummaryMonthly export, full sync'), expensive=trivial_slicing)
def host_metric_summary_monthly_table(since, full_path, **kwargs):
query = '''
COPY (SELECT main_hostmetricsummarymonthly.id,
main_hostmetricsummarymonthly.date,
main_hostmetricsummarymonthly.license_capacity,
main_hostmetricsummarymonthly.license_consumed,
main_hostmetricsummarymonthly.hosts_added,
main_hostmetricsummarymonthly.hosts_deleted,
main_hostmetricsummarymonthly.indirectly_managed_hosts
FROM main_hostmetricsummarymonthly
ORDER BY main_hostmetricsummarymonthly.id ASC) TO STDOUT WITH CSV HEADER
'''
return _copy_table(table='host_metric_summary_monthly', query=query, path=full_path)

View File

@@ -1,15 +1,10 @@
import itertools
import redis import redis
import json import json
import time import time
import logging import logging
import prometheus_client
from prometheus_client.core import GaugeMetricFamily, HistogramMetricFamily
from prometheus_client.registry import CollectorRegistry
from django.conf import settings from django.conf import settings
from django.http import HttpRequest from django.apps import apps
from rest_framework.request import Request
from awx.main.consumers import emit_channel_notification from awx.main.consumers import emit_channel_notification
from awx.main.utils import is_testing from awx.main.utils import is_testing
@@ -18,30 +13,6 @@ root_key = settings.SUBSYSTEM_METRICS_REDIS_KEY_PREFIX
logger = logging.getLogger('awx.main.analytics') logger = logging.getLogger('awx.main.analytics')
class MetricsNamespace:
def __init__(self, namespace):
self._namespace = namespace
class MetricsServerSettings(MetricsNamespace):
def port(self):
return settings.METRICS_SUBSYSTEM_CONFIG['server'][self._namespace]['port']
class MetricsServer(MetricsServerSettings):
def __init__(self, namespace, registry):
MetricsNamespace.__init__(self, namespace)
self._registry = registry
def start(self):
try:
# TODO: addr for ipv6 ?
prometheus_client.start_http_server(self.port(), addr='localhost', registry=self._registry)
except Exception:
logger.error(f"MetricsServer failed to start for service '{self._namespace}.")
raise
class BaseM: class BaseM:
def __init__(self, field, help_text): def __init__(self, field, help_text):
self.field = field self.field = field
@@ -177,40 +148,71 @@ class HistogramM(BaseM):
return output_text return output_text
class Metrics(MetricsNamespace): class Metrics:
# metric name, help_text def __init__(self, auto_pipe_execute=False, instance_name=None):
METRICSLIST = []
_METRICSLIST = [
FloatM('subsystem_metrics_pipe_execute_seconds', 'Time spent saving metrics to redis'),
IntM('subsystem_metrics_pipe_execute_calls', 'Number of calls to pipe_execute'),
FloatM('subsystem_metrics_send_metrics_seconds', 'Time spent sending metrics to other nodes'),
]
def __init__(self, namespace, auto_pipe_execute=False, instance_name=None, metrics_have_changed=True, **kwargs):
MetricsNamespace.__init__(self, namespace)
self.pipe = redis.Redis.from_url(settings.BROKER_URL).pipeline() self.pipe = redis.Redis.from_url(settings.BROKER_URL).pipeline()
self.conn = redis.Redis.from_url(settings.BROKER_URL) self.conn = redis.Redis.from_url(settings.BROKER_URL)
self.last_pipe_execute = time.time() self.last_pipe_execute = time.time()
# track if metrics have been modified since last saved to redis # track if metrics have been modified since last saved to redis
# start with True so that we get an initial save to redis # start with True so that we get an initial save to redis
self.metrics_have_changed = metrics_have_changed self.metrics_have_changed = True
self.pipe_execute_interval = settings.SUBSYSTEM_METRICS_INTERVAL_SAVE_TO_REDIS self.pipe_execute_interval = settings.SUBSYSTEM_METRICS_INTERVAL_SAVE_TO_REDIS
self.send_metrics_interval = settings.SUBSYSTEM_METRICS_INTERVAL_SEND_METRICS self.send_metrics_interval = settings.SUBSYSTEM_METRICS_INTERVAL_SEND_METRICS
# auto pipe execute will commit transaction of metric data to redis # auto pipe execute will commit transaction of metric data to redis
# at a regular interval (pipe_execute_interval). If set to False, # at a regular interval (pipe_execute_interval). If set to False,
# the calling function should call .pipe_execute() explicitly # the calling function should call .pipe_execute() explicitly
self.auto_pipe_execute = auto_pipe_execute self.auto_pipe_execute = auto_pipe_execute
Instance = apps.get_model('main', 'Instance')
if instance_name: if instance_name:
self.instance_name = instance_name self.instance_name = instance_name
elif is_testing(): elif is_testing():
self.instance_name = "awx_testing" self.instance_name = "awx_testing"
else: else:
self.instance_name = settings.CLUSTER_HOST_ID # Same as Instance.objects.my_hostname() BUT we do not need to import Instance self.instance_name = Instance.objects.my_hostname()
# metric name, help_text
METRICSLIST = [
SetIntM('callback_receiver_events_queue_size_redis', 'Current number of events in redis queue'),
IntM('callback_receiver_events_popped_redis', 'Number of events popped from redis'),
IntM('callback_receiver_events_in_memory', 'Current number of events in memory (in transfer from redis to db)'),
IntM('callback_receiver_batch_events_errors', 'Number of times batch insertion failed'),
FloatM('callback_receiver_events_insert_db_seconds', 'Total time spent saving events to database'),
IntM('callback_receiver_events_insert_db', 'Number of events batch inserted into database'),
IntM('callback_receiver_events_broadcast', 'Number of events broadcast to other control plane nodes'),
HistogramM(
'callback_receiver_batch_events_insert_db', 'Number of events batch inserted into database', settings.SUBSYSTEM_METRICS_BATCH_INSERT_BUCKETS
),
SetFloatM('callback_receiver_event_processing_avg_seconds', 'Average processing time per event per callback receiver batch'),
FloatM('subsystem_metrics_pipe_execute_seconds', 'Time spent saving metrics to redis'),
IntM('subsystem_metrics_pipe_execute_calls', 'Number of calls to pipe_execute'),
FloatM('subsystem_metrics_send_metrics_seconds', 'Time spent sending metrics to other nodes'),
SetFloatM('task_manager_get_tasks_seconds', 'Time spent in loading tasks from db'),
SetFloatM('task_manager_start_task_seconds', 'Time spent starting task'),
SetFloatM('task_manager_process_running_tasks_seconds', 'Time spent processing running tasks'),
SetFloatM('task_manager_process_pending_tasks_seconds', 'Time spent processing pending tasks'),
SetFloatM('task_manager__schedule_seconds', 'Time spent in running the entire _schedule'),
IntM('task_manager__schedule_calls', 'Number of calls to _schedule, after lock is acquired'),
SetFloatM('task_manager_recorded_timestamp', 'Unix timestamp when metrics were last recorded'),
SetIntM('task_manager_tasks_started', 'Number of tasks started'),
SetIntM('task_manager_running_processed', 'Number of running tasks processed'),
SetIntM('task_manager_pending_processed', 'Number of pending tasks processed'),
SetIntM('task_manager_tasks_blocked', 'Number of tasks blocked from running'),
SetFloatM('task_manager_commit_seconds', 'Time spent in db transaction, including on_commit calls'),
SetFloatM('dependency_manager_get_tasks_seconds', 'Time spent loading pending tasks from db'),
SetFloatM('dependency_manager_generate_dependencies_seconds', 'Time spent generating dependencies for pending tasks'),
SetFloatM('dependency_manager__schedule_seconds', 'Time spent in running the entire _schedule'),
IntM('dependency_manager__schedule_calls', 'Number of calls to _schedule, after lock is acquired'),
SetFloatM('dependency_manager_recorded_timestamp', 'Unix timestamp when metrics were last recorded'),
SetIntM('dependency_manager_pending_processed', 'Number of pending tasks processed'),
SetFloatM('workflow_manager__schedule_seconds', 'Time spent in running the entire _schedule'),
IntM('workflow_manager__schedule_calls', 'Number of calls to _schedule, after lock is acquired'),
SetFloatM('workflow_manager_recorded_timestamp', 'Unix timestamp when metrics were last recorded'),
SetFloatM('workflow_manager_spawn_workflow_graph_jobs_seconds', 'Time spent spawning workflow tasks'),
SetFloatM('workflow_manager_get_tasks_seconds', 'Time spent loading workflow tasks from db'),
]
# turn metric list into dictionary with the metric name as a key # turn metric list into dictionary with the metric name as a key
self.METRICS = {} self.METRICS = {}
for m in itertools.chain(self.METRICSLIST, self._METRICSLIST): for m in METRICSLIST:
self.METRICS[m.field] = m self.METRICS[m.field] = m
# track last time metrics were sent to other nodes # track last time metrics were sent to other nodes
@@ -223,7 +225,7 @@ class Metrics(MetricsNamespace):
m.reset_value(self.conn) m.reset_value(self.conn)
self.metrics_have_changed = True self.metrics_have_changed = True
self.conn.delete(root_key + "_lock") self.conn.delete(root_key + "_lock")
for m in self.conn.scan_iter(root_key + '-' + self._namespace + '_instance_*'): for m in self.conn.scan_iter(root_key + '_instance_*'):
self.conn.delete(m) self.conn.delete(m)
def inc(self, field, value): def inc(self, field, value):
@@ -290,7 +292,7 @@ class Metrics(MetricsNamespace):
def send_metrics(self): def send_metrics(self):
# more than one thread could be calling this at the same time, so should # more than one thread could be calling this at the same time, so should
# acquire redis lock before sending metrics # acquire redis lock before sending metrics
lock = self.conn.lock(root_key + '-' + self._namespace + '_lock') lock = self.conn.lock(root_key + '_lock')
if not lock.acquire(blocking=False): if not lock.acquire(blocking=False):
return return
try: try:
@@ -300,10 +302,9 @@ class Metrics(MetricsNamespace):
payload = { payload = {
'instance': self.instance_name, 'instance': self.instance_name,
'metrics': serialized_metrics, 'metrics': serialized_metrics,
'metrics_namespace': self._namespace,
} }
# store the serialized data locally as well, so that load_other_metrics will read it # store the serialized data locally as well, so that load_other_metrics will read it
self.conn.set(root_key + '-' + self._namespace + '_instance_' + self.instance_name, serialized_metrics) self.conn.set(root_key + '_instance_' + self.instance_name, serialized_metrics)
emit_channel_notification("metrics", payload) emit_channel_notification("metrics", payload)
self.previous_send_metrics.set(current_time) self.previous_send_metrics.set(current_time)
@@ -325,14 +326,14 @@ class Metrics(MetricsNamespace):
instances_filter = request.query_params.getlist("node") instances_filter = request.query_params.getlist("node")
# get a sorted list of instance names # get a sorted list of instance names
instance_names = [self.instance_name] instance_names = [self.instance_name]
for m in self.conn.scan_iter(root_key + '-' + self._namespace + '_instance_*'): for m in self.conn.scan_iter(root_key + '_instance_*'):
instance_names.append(m.decode('UTF-8').split('_instance_')[1]) instance_names.append(m.decode('UTF-8').split('_instance_')[1])
instance_names.sort() instance_names.sort()
# load data, including data from the this local instance # load data, including data from the this local instance
instance_data = {} instance_data = {}
for instance in instance_names: for instance in instance_names:
if len(instances_filter) == 0 or instance in instances_filter: if len(instances_filter) == 0 or instance in instances_filter:
instance_data_from_redis = self.conn.get(root_key + '-' + self._namespace + '_instance_' + instance) instance_data_from_redis = self.conn.get(root_key + '_instance_' + instance)
# data from other instances may not be available. That is OK. # data from other instances may not be available. That is OK.
if instance_data_from_redis: if instance_data_from_redis:
instance_data[instance] = json.loads(instance_data_from_redis.decode('UTF-8')) instance_data[instance] = json.loads(instance_data_from_redis.decode('UTF-8'))
@@ -351,120 +352,6 @@ class Metrics(MetricsNamespace):
return output_text return output_text
class DispatcherMetrics(Metrics):
METRICSLIST = [
SetFloatM('task_manager_get_tasks_seconds', 'Time spent in loading tasks from db'),
SetFloatM('task_manager_start_task_seconds', 'Time spent starting task'),
SetFloatM('task_manager_process_running_tasks_seconds', 'Time spent processing running tasks'),
SetFloatM('task_manager_process_pending_tasks_seconds', 'Time spent processing pending tasks'),
SetFloatM('task_manager__schedule_seconds', 'Time spent in running the entire _schedule'),
IntM('task_manager__schedule_calls', 'Number of calls to _schedule, after lock is acquired'),
SetFloatM('task_manager_recorded_timestamp', 'Unix timestamp when metrics were last recorded'),
SetIntM('task_manager_tasks_started', 'Number of tasks started'),
SetIntM('task_manager_running_processed', 'Number of running tasks processed'),
SetIntM('task_manager_pending_processed', 'Number of pending tasks processed'),
SetIntM('task_manager_tasks_blocked', 'Number of tasks blocked from running'),
SetFloatM('task_manager_commit_seconds', 'Time spent in db transaction, including on_commit calls'),
SetFloatM('dependency_manager_get_tasks_seconds', 'Time spent loading pending tasks from db'),
SetFloatM('dependency_manager_generate_dependencies_seconds', 'Time spent generating dependencies for pending tasks'),
SetFloatM('dependency_manager__schedule_seconds', 'Time spent in running the entire _schedule'),
IntM('dependency_manager__schedule_calls', 'Number of calls to _schedule, after lock is acquired'),
SetFloatM('dependency_manager_recorded_timestamp', 'Unix timestamp when metrics were last recorded'),
SetIntM('dependency_manager_pending_processed', 'Number of pending tasks processed'),
SetFloatM('workflow_manager__schedule_seconds', 'Time spent in running the entire _schedule'),
IntM('workflow_manager__schedule_calls', 'Number of calls to _schedule, after lock is acquired'),
SetFloatM('workflow_manager_recorded_timestamp', 'Unix timestamp when metrics were last recorded'),
SetFloatM('workflow_manager_spawn_workflow_graph_jobs_seconds', 'Time spent spawning workflow tasks'),
SetFloatM('workflow_manager_get_tasks_seconds', 'Time spent loading workflow tasks from db'),
# dispatcher subsystem metrics
SetIntM('dispatcher_pool_scale_up_events', 'Number of times local dispatcher scaled up a worker since startup'),
SetIntM('dispatcher_pool_active_task_count', 'Number of active tasks in the worker pool when last task was submitted'),
SetIntM('dispatcher_pool_max_worker_count', 'Highest number of workers in worker pool in last collection interval, about 20s'),
SetFloatM('dispatcher_availability', 'Fraction of time (in last collection interval) dispatcher was able to receive messages'),
]
def __init__(self, *args, **kwargs):
super().__init__(settings.METRICS_SERVICE_DISPATCHER, *args, **kwargs)
class CallbackReceiverMetrics(Metrics):
METRICSLIST = [
SetIntM('callback_receiver_events_queue_size_redis', 'Current number of events in redis queue'),
IntM('callback_receiver_events_popped_redis', 'Number of events popped from redis'),
IntM('callback_receiver_events_in_memory', 'Current number of events in memory (in transfer from redis to db)'),
IntM('callback_receiver_batch_events_errors', 'Number of times batch insertion failed'),
FloatM('callback_receiver_events_insert_db_seconds', 'Total time spent saving events to database'),
IntM('callback_receiver_events_insert_db', 'Number of events batch inserted into database'),
IntM('callback_receiver_events_broadcast', 'Number of events broadcast to other control plane nodes'),
HistogramM(
'callback_receiver_batch_events_insert_db', 'Number of events batch inserted into database', settings.SUBSYSTEM_METRICS_BATCH_INSERT_BUCKETS
),
SetFloatM('callback_receiver_event_processing_avg_seconds', 'Average processing time per event per callback receiver batch'),
]
def __init__(self, *args, **kwargs):
super().__init__(settings.METRICS_SERVICE_CALLBACK_RECEIVER, *args, **kwargs)
def metrics(request): def metrics(request):
output_text = '' m = Metrics()
for m in [DispatcherMetrics(), CallbackReceiverMetrics()]: return m.generate_metrics(request)
output_text += m.generate_metrics(request)
return output_text
class CustomToPrometheusMetricsCollector(prometheus_client.registry.Collector):
"""
Takes the metric data from redis -> our custom metric fields -> prometheus
library metric fields.
The plan is to get rid of the use of redis, our custom metric fields, and
to switch fully to the prometheus library. At that point, this translation
code will be deleted.
"""
def __init__(self, metrics_obj, *args, **kwargs):
super().__init__(*args, **kwargs)
self._metrics = metrics_obj
def collect(self):
my_hostname = settings.CLUSTER_HOST_ID
instance_data = self._metrics.load_other_metrics(Request(HttpRequest()))
if not instance_data:
logger.debug(f"No metric data not found in redis for metric namespace '{self._metrics._namespace}'")
return None
host_metrics = instance_data.get(my_hostname)
for _, metric in self._metrics.METRICS.items():
entry = host_metrics.get(metric.field)
if not entry:
logger.debug(f"{self._metrics._namespace} metric '{metric.field}' not found in redis data payload {json.dumps(instance_data, indent=2)}")
continue
if isinstance(metric, HistogramM):
buckets = list(zip(metric.buckets, entry['counts']))
buckets = [[str(i[0]), str(i[1])] for i in buckets]
yield HistogramMetricFamily(metric.field, metric.help_text, buckets=buckets, sum_value=entry['sum'])
else:
yield GaugeMetricFamily(metric.field, metric.help_text, value=entry)
class CallbackReceiverMetricsServer(MetricsServer):
def __init__(self):
registry = CollectorRegistry(auto_describe=True)
registry.register(CustomToPrometheusMetricsCollector(DispatcherMetrics(metrics_have_changed=False)))
super().__init__(settings.METRICS_SERVICE_CALLBACK_RECEIVER, registry)
class DispatcherMetricsServer(MetricsServer):
def __init__(self):
registry = CollectorRegistry(auto_describe=True)
registry.register(CustomToPrometheusMetricsCollector(CallbackReceiverMetrics(metrics_have_changed=False)))
super().__init__(settings.METRICS_SERVICE_DISPATCHER, registry)
class WebsocketsMetricsServer(MetricsServer):
def __init__(self):
registry = CollectorRegistry(auto_describe=True)
# registry.register()
super().__init__(settings.METRICS_SERVICE_WEBSOCKETS, registry)

View File

@@ -1,40 +1,7 @@
from django.apps import AppConfig from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _ from django.utils.translation import gettext_lazy as _
from awx.main.utils.named_url_graph import _customize_graph, generate_graph
from awx.conf import register, fields
class MainConfig(AppConfig): class MainConfig(AppConfig):
name = 'awx.main' name = 'awx.main'
verbose_name = _('Main') verbose_name = _('Main')
def load_named_url_feature(self):
models = [m for m in self.get_models() if hasattr(m, 'get_absolute_url')]
generate_graph(models)
_customize_graph()
register(
'NAMED_URL_FORMATS',
field_class=fields.DictField,
read_only=True,
label=_('Formats of all available named urls'),
help_text=_('Read-only list of key-value pairs that shows the standard format of all available named URLs.'),
category=_('Named URL'),
category_slug='named-url',
)
register(
'NAMED_URL_GRAPH_NODES',
field_class=fields.DictField,
read_only=True,
label=_('List of all named url graph nodes.'),
help_text=_(
'Read-only list of key-value pairs that exposes named URL graph topology.'
' Use this list to programmatically generate named URLs for resources'
),
category=_('Named URL'),
category_slug='named-url',
)
def ready(self):
super().ready()
self.load_named_url_feature()

View File

@@ -1,87 +0,0 @@
import functools
from django.conf import settings
from django.core.cache.backends.base import DEFAULT_TIMEOUT
from django.core.cache.backends.redis import RedisCache
from redis.exceptions import ConnectionError, ResponseError, TimeoutError
import socket
# This list comes from what django-redis ignores and the behavior we are trying
# to retain while dropping the dependency on django-redis.
IGNORED_EXCEPTIONS = (TimeoutError, ResponseError, ConnectionError, socket.timeout)
CONNECTION_INTERRUPTED_SENTINEL = object()
def optionally_ignore_exceptions(func=None, return_value=None):
if func is None:
return functools.partial(optionally_ignore_exceptions, return_value=return_value)
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except IGNORED_EXCEPTIONS as e:
if settings.DJANGO_REDIS_IGNORE_EXCEPTIONS:
return return_value
raise e.__cause__ or e
return wrapper
class AWXRedisCache(RedisCache):
"""
We just want to wrap the upstream RedisCache class so that we can ignore
the exceptions that it raises when the cache is unavailable.
"""
@optionally_ignore_exceptions
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
return super().add(key, value, timeout, version)
@optionally_ignore_exceptions(return_value=CONNECTION_INTERRUPTED_SENTINEL)
def _get(self, key, default=None, version=None):
return super().get(key, default, version)
def get(self, key, default=None, version=None):
value = self._get(key, default, version)
if value is CONNECTION_INTERRUPTED_SENTINEL:
return default
return value
@optionally_ignore_exceptions
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
return super().set(key, value, timeout, version)
@optionally_ignore_exceptions
def touch(self, key, timeout=DEFAULT_TIMEOUT, version=None):
return super().touch(key, timeout, version)
@optionally_ignore_exceptions
def delete(self, key, version=None):
return super().delete(key, version)
@optionally_ignore_exceptions
def get_many(self, keys, version=None):
return super().get_many(keys, version)
@optionally_ignore_exceptions
def has_key(self, key, version=None):
return super().has_key(key, version)
@optionally_ignore_exceptions
def incr(self, key, delta=1, version=None):
return super().incr(key, delta, version)
@optionally_ignore_exceptions
def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None):
return super().set_many(data, timeout, version)
@optionally_ignore_exceptions
def delete_many(self, keys, version=None):
return super().delete_many(keys, version)
@optionally_ignore_exceptions
def clear(self):
return super().clear()

View File

@@ -92,21 +92,6 @@ register(
), ),
category=_('System'), category=_('System'),
category_slug='system', category_slug='system',
required=False,
)
register(
'CSRF_TRUSTED_ORIGINS',
default=[],
field_class=fields.StringListField,
label=_('CSRF Trusted Origins List'),
help_text=_(
"If the service is behind a reverse proxy/load balancer, use this setting "
"to configure the schema://addresses from which the service should trust "
"Origin header values. "
),
category=_('System'),
category_slug='system',
) )
register( register(
@@ -695,33 +680,15 @@ register(
category_slug='logging', category_slug='logging',
) )
register( register(
'LOG_AGGREGATOR_ACTION_QUEUE_SIZE', 'LOG_AGGREGATOR_MAX_DISK_USAGE_GB',
field_class=fields.IntegerField,
default=131072,
min_value=1,
label=_('Maximum number of messages that can be stored in the log action queue'),
help_text=_(
'Defines how large the rsyslog action queue can grow in number of messages '
'stored. This can have an impact on memory utilization. When the queue '
'reaches 75% of this number, the queue will start writing to disk '
'(queue.highWatermark in rsyslog). When it reaches 90%, NOTICE, INFO, and '
'DEBUG messages will start to be discarded (queue.discardMark with '
'queue.discardSeverity=5).'
),
category=_('Logging'),
category_slug='logging',
)
register(
'LOG_AGGREGATOR_ACTION_MAX_DISK_USAGE_GB',
field_class=fields.IntegerField, field_class=fields.IntegerField,
default=1, default=1,
min_value=1, min_value=1,
label=_('Maximum disk persistence for rsyslogd action queuing (in GB)'), label=_('Maximum disk persistance for external log aggregation (in GB)'),
help_text=_( help_text=_(
'Amount of data to store (in gigabytes) if an rsyslog action takes time ' 'Amount of data to store (in gigabytes) during an outage of '
'to process an incoming message (defaults to 1). ' 'the external log aggregator (defaults to 1). '
'Equivalent to the rsyslogd queue.maxdiskspace setting on the action (e.g. omhttp). ' 'Equivalent to the rsyslogd queue.maxdiskspace setting.'
'It stores files in the directory specified by LOG_AGGREGATOR_MAX_DISK_USAGE_PATH.'
), ),
category=_('Logging'), category=_('Logging'),
category_slug='logging', category_slug='logging',
@@ -775,7 +742,6 @@ register(
allow_null=True, allow_null=True,
category=_('System'), category=_('System'),
category_slug='system', category_slug='system',
required=False,
) )
register( register(
'AUTOMATION_ANALYTICS_LAST_ENTRIES', 'AUTOMATION_ANALYTICS_LAST_ENTRIES',
@@ -817,7 +783,6 @@ register(
help_text=_('Max jobs to allow bulk jobs to launch'), help_text=_('Max jobs to allow bulk jobs to launch'),
category=_('Bulk Actions'), category=_('Bulk Actions'),
category_slug='bulk', category_slug='bulk',
hidden=True,
) )
register( register(
@@ -828,18 +793,6 @@ register(
help_text=_('Max number of hosts to allow to be created in a single bulk action'), help_text=_('Max number of hosts to allow to be created in a single bulk action'),
category=_('Bulk Actions'), category=_('Bulk Actions'),
category_slug='bulk', category_slug='bulk',
hidden=True,
)
register(
'BULK_HOST_MAX_DELETE',
field_class=fields.IntegerField,
default=250,
label=_('Max number of hosts to allow to be deleted in a single bulk action'),
help_text=_('Max number of hosts to allow to be deleted in a single bulk action'),
category=_('Bulk Actions'),
category_slug='bulk',
hidden=True,
) )
register( register(
@@ -850,7 +803,6 @@ register(
help_text=_('Enable preview of new user interface.'), help_text=_('Enable preview of new user interface.'),
category=_('System'), category=_('System'),
category_slug='system', category_slug='system',
hidden=True,
) )
register( register(
@@ -879,55 +831,6 @@ register(
category_slug='system', category_slug='system',
) )
register(
'HOST_METRIC_SUMMARY_TASK_LAST_TS',
field_class=fields.DateTimeField,
label=_('Last computing date of HostMetricSummaryMonthly'),
allow_null=True,
category=_('System'),
category_slug='system',
)
register(
'AWX_CLEANUP_PATHS',
field_class=fields.BooleanField,
label=_('Enable or Disable tmp dir cleanup'),
default=True,
help_text=_('Enable or Disable TMP Dir cleanup'),
category=('Debug'),
category_slug='debug',
)
register(
'AWX_REQUEST_PROFILE',
field_class=fields.BooleanField,
label=_('Debug Web Requests'),
default=False,
help_text=_('Debug web request python timing'),
category=('Debug'),
category_slug='debug',
)
register(
'DEFAULT_CONTAINER_RUN_OPTIONS',
field_class=fields.StringListField,
label=_('Container Run Options'),
default=['--network', 'slirp4netns:enable_ipv6=true'],
help_text=_("List of options to pass to podman run example: ['--network', 'slirp4netns:enable_ipv6=true', '--log-level', 'debug']"),
category=('Jobs'),
category_slug='jobs',
)
register(
'RECEPTOR_RELEASE_WORK',
field_class=fields.BooleanField,
label=_('Release Receptor Work'),
default=True,
help_text=_('Release receptor work'),
category=('Debug'),
category_slug='debug',
)
def logging_validate(serializer, attrs): def logging_validate(serializer, attrs):
if not serializer.instance or not hasattr(serializer.instance, 'LOG_AGGREGATOR_HOST') or not hasattr(serializer.instance, 'LOG_AGGREGATOR_TYPE'): if not serializer.instance or not hasattr(serializer.instance, 'LOG_AGGREGATOR_HOST') or not hasattr(serializer.instance, 'LOG_AGGREGATOR_TYPE'):

View File

@@ -14,7 +14,7 @@ __all__ = [
'STANDARD_INVENTORY_UPDATE_ENV', 'STANDARD_INVENTORY_UPDATE_ENV',
] ]
CLOUD_PROVIDERS = ('azure_rm', 'ec2', 'gce', 'vmware', 'openstack', 'rhv', 'satellite6', 'controller', 'insights', 'terraform') CLOUD_PROVIDERS = ('azure_rm', 'ec2', 'gce', 'vmware', 'openstack', 'rhv', 'satellite6', 'controller', 'insights')
PRIVILEGE_ESCALATION_METHODS = [ PRIVILEGE_ESCALATION_METHODS = [
('sudo', _('Sudo')), ('sudo', _('Sudo')),
('su', _('Su')), ('su', _('Su')),
@@ -114,28 +114,3 @@ SUBSCRIPTION_USAGE_MODEL_UNIQUE_HOSTS = 'unique_managed_hosts'
# Shared prefetch to use for creating a queryset for the purpose of writing or saving facts # Shared prefetch to use for creating a queryset for the purpose of writing or saving facts
HOST_FACTS_FIELDS = ('name', 'ansible_facts', 'ansible_facts_modified', 'modified', 'inventory_id') HOST_FACTS_FIELDS = ('name', 'ansible_facts', 'ansible_facts_modified', 'modified', 'inventory_id')
# Data for RBAC compatibility layer
role_name_to_perm_mapping = {
'adhoc_role': ['adhoc_'],
'approval_role': ['approve_'],
'auditor_role': ['audit_'],
'admin_role': ['change_', 'add_', 'delete_'],
'execute_role': ['execute_'],
'read_role': ['view_'],
'update_role': ['update_'],
'member_role': ['member_'],
'use_role': ['use_'],
}
org_role_to_permission = {
'notification_admin_role': 'add_notificationtemplate',
'project_admin_role': 'add_project',
'execute_role': 'execute_jobtemplate',
'inventory_admin_role': 'add_inventory',
'credential_admin_role': 'add_credential',
'workflow_admin_role': 'add_workflowjobtemplate',
'job_template_admin_role': 'change_jobtemplate', # TODO: this doesnt really work, solution not clear
'execution_environment_admin_role': 'add_executionenvironment',
'auditor_role': 'view_project', # TODO: also doesnt really work
}

View File

@@ -106,7 +106,7 @@ class RelayConsumer(AsyncJsonWebsocketConsumer):
if group == "metrics": if group == "metrics":
message = json.loads(message['text']) message = json.loads(message['text'])
conn = redis.Redis.from_url(settings.BROKER_URL) conn = redis.Redis.from_url(settings.BROKER_URL)
conn.set(settings.SUBSYSTEM_METRICS_REDIS_KEY_PREFIX + "-" + message['metrics_namespace'] + "_instance_" + message['instance'], message['metrics']) conn.set(settings.SUBSYSTEM_METRICS_REDIS_KEY_PREFIX + "_instance_" + message['instance'], message['metrics'])
else: else:
await self.channel_layer.group_send(group, message) await self.channel_layer.group_send(group, message)

View File

@@ -58,7 +58,7 @@ aim_inputs = {
'id': 'object_property', 'id': 'object_property',
'label': _('Object Property'), 'label': _('Object Property'),
'type': 'string', 'type': 'string',
'help_text': _('The property of the object to return. Available properties: Username, Password and Address.'), 'help_text': _('The property of the object to return. Default: Content Ex: Username, Address, etc.'),
}, },
{ {
'id': 'reason', 'id': 'reason',
@@ -111,12 +111,8 @@ def aim_backend(**kwargs):
object_property = 'Content' object_property = 'Content'
elif object_property.lower() == 'username': elif object_property.lower() == 'username':
object_property = 'UserName' object_property = 'UserName'
elif object_property.lower() == 'password':
object_property = 'Content'
elif object_property.lower() == 'address':
object_property = 'Address'
elif object_property not in res: elif object_property not in res:
raise KeyError('Property {} not found in object, available properties: Username, Password and Address'.format(object_property)) raise KeyError('Property {} not found in object'.format(object_property))
else: else:
object_property = object_property.capitalize() object_property = object_property.capitalize()

View File

@@ -1,65 +0,0 @@
import boto3
from botocore.exceptions import ClientError
from .plugin import CredentialPlugin
from django.utils.translation import gettext_lazy as _
secrets_manager_inputs = {
'fields': [
{
'id': 'aws_access_key',
'label': _('AWS Access Key'),
'type': 'string',
},
{
'id': 'aws_secret_key',
'label': _('AWS Secret Key'),
'type': 'string',
'secret': True,
},
],
'metadata': [
{
'id': 'region_name',
'label': _('AWS Secrets Manager Region'),
'type': 'string',
'help_text': _('Region which the secrets manager is located'),
},
{
'id': 'secret_name',
'label': _('AWS Secret Name'),
'type': 'string',
},
],
'required': ['aws_access_key', 'aws_secret_key', 'region_name', 'secret_name'],
}
def aws_secretsmanager_backend(**kwargs):
secret_name = kwargs['secret_name']
region_name = kwargs['region_name']
aws_secret_access_key = kwargs['aws_secret_key']
aws_access_key_id = kwargs['aws_access_key']
session = boto3.session.Session()
client = session.client(
service_name='secretsmanager', region_name=region_name, aws_secret_access_key=aws_secret_access_key, aws_access_key_id=aws_access_key_id
)
try:
get_secret_value_response = client.get_secret_value(SecretId=secret_name)
except ClientError as e:
raise e
# Secrets Manager decrypts the secret value using the associated KMS CMK
# Depending on whether the secret was a string or binary, only one of these fields will be populated
if 'SecretString' in get_secret_value_response:
secret = get_secret_value_response['SecretString']
else:
secret = get_secret_value_response['SecretBinary']
return secret
aws_secretmanager_plugin = CredentialPlugin('AWS Secrets Manager lookup', inputs=secrets_manager_inputs, backend=aws_secretsmanager_backend)

View File

@@ -1,10 +1,9 @@
from azure.keyvault.secrets import SecretClient
from azure.identity import ClientSecretCredential
from msrestazure import azure_cloud
from .plugin import CredentialPlugin from .plugin import CredentialPlugin
from django.utils.translation import gettext_lazy as _ from django.utils.translation import gettext_lazy as _
from azure.keyvault import KeyVaultClient, KeyVaultAuthentication
from azure.common.credentials import ServicePrincipalCredentials
from msrestazure import azure_cloud
# https://github.com/Azure/msrestazure-for-python/blob/master/msrestazure/azure_cloud.py # https://github.com/Azure/msrestazure-for-python/blob/master/msrestazure/azure_cloud.py
@@ -55,9 +54,22 @@ azure_keyvault_inputs = {
def azure_keyvault_backend(**kwargs): def azure_keyvault_backend(**kwargs):
csc = ClientSecretCredential(tenant_id=kwargs['tenant'], client_id=kwargs['client'], client_secret=kwargs['secret']) url = kwargs['url']
kv = SecretClient(credential=csc, vault_url=kwargs['url']) [cloud] = [c for c in clouds if c.name == kwargs.get('cloud_name', default_cloud.name)]
return kv.get_secret(name=kwargs['secret_field'], version=kwargs.get('secret_version', '')).value
def auth_callback(server, resource, scope):
credentials = ServicePrincipalCredentials(
url=url,
client_id=kwargs['client'],
secret=kwargs['secret'],
tenant=kwargs['tenant'],
resource=f"https://{cloud.suffixes.keyvault_dns.split('.', 1).pop()}",
)
token = credentials.token
return token['token_type'], token['access_token']
kv = KeyVaultClient(KeyVaultAuthentication(auth_callback))
return kv.get_secret(url, kwargs['secret_field'], kwargs.get('secret_version', '')).value
azure_keyvault_plugin = CredentialPlugin('Microsoft Azure Key Vault', inputs=azure_keyvault_inputs, backend=azure_keyvault_backend) azure_keyvault_plugin = CredentialPlugin('Microsoft Azure Key Vault', inputs=azure_keyvault_inputs, backend=azure_keyvault_backend)

View File

@@ -4,8 +4,6 @@ from urllib.parse import urljoin, quote
from django.utils.translation import gettext_lazy as _ from django.utils.translation import gettext_lazy as _
import requests import requests
import base64
import binascii
conjur_inputs = { conjur_inputs = {
@@ -52,13 +50,6 @@ conjur_inputs = {
} }
def _is_base64(s: str) -> bool:
try:
return base64.b64encode(base64.b64decode(s.encode("utf-8"))) == s.encode("utf-8")
except binascii.Error:
return False
def conjur_backend(**kwargs): def conjur_backend(**kwargs):
url = kwargs['url'] url = kwargs['url']
api_key = kwargs['api_key'] api_key = kwargs['api_key']
@@ -86,7 +77,7 @@ def conjur_backend(**kwargs):
token = resp.content.decode('utf-8') token = resp.content.decode('utf-8')
lookup_kwargs = { lookup_kwargs = {
'headers': {'Authorization': 'Token token="{}"'.format(token if _is_base64(token) else base64.b64encode(token.encode('utf-8')).decode('utf-8'))}, 'headers': {'Authorization': 'Token token="{}"'.format(token)},
'allow_redirects': False, 'allow_redirects': False,
} }

View File

@@ -2,29 +2,25 @@ from .plugin import CredentialPlugin
from django.conf import settings from django.conf import settings
from django.utils.translation import gettext_lazy as _ from django.utils.translation import gettext_lazy as _
from delinea.secrets.vault import PasswordGrantAuthorizer, SecretsVault from thycotic.secrets.vault import SecretsVault
from base64 import b64decode
dsv_inputs = { dsv_inputs = {
'fields': [ 'fields': [
{ {
'id': 'tenant', 'id': 'tenant',
'label': _('Tenant'), 'label': _('Tenant'),
'help_text': _('The tenant e.g. "ex" when the URL is https://ex.secretsvaultcloud.com'), 'help_text': _('The tenant e.g. "ex" when the URL is https://ex.secretservercloud.com'),
'type': 'string', 'type': 'string',
}, },
{ {
'id': 'tld', 'id': 'tld',
'label': _('Top-level Domain (TLD)'), 'label': _('Top-level Domain (TLD)'),
'help_text': _('The TLD of the tenant e.g. "com" when the URL is https://ex.secretsvaultcloud.com'), 'help_text': _('The TLD of the tenant e.g. "com" when the URL is https://ex.secretservercloud.com'),
'choices': ['ca', 'com', 'com.au', 'eu'], 'choices': ['ca', 'com', 'com.au', 'com.sg', 'eu'],
'default': 'com', 'default': 'com',
}, },
{ {'id': 'client_id', 'label': _('Client ID'), 'type': 'string'},
'id': 'client_id',
'label': _('Client ID'),
'type': 'string',
},
{ {
'id': 'client_secret', 'id': 'client_secret',
'label': _('Client Secret'), 'label': _('Client Secret'),
@@ -45,16 +41,8 @@ dsv_inputs = {
'help_text': _('The field to extract from the secret'), 'help_text': _('The field to extract from the secret'),
'type': 'string', 'type': 'string',
}, },
{
'id': 'secret_decoding',
'label': _('Should the secret be base64 decoded?'),
'help_text': _('Specify whether the secret should be base64 decoded, typically used for storing files, such as SSH keys'),
'choices': ['No Decoding', 'Decode Base64'],
'type': 'string',
'default': 'No Decoding',
},
], ],
'required': ['tenant', 'client_id', 'client_secret', 'path', 'secret_field', 'secret_decoding'], 'required': ['tenant', 'client_id', 'client_secret', 'path', 'secret_field'],
} }
if settings.DEBUG: if settings.DEBUG:
@@ -63,32 +51,12 @@ if settings.DEBUG:
'id': 'url_template', 'id': 'url_template',
'label': _('URL template'), 'label': _('URL template'),
'type': 'string', 'type': 'string',
'default': 'https://{}.secretsvaultcloud.{}', 'default': 'https://{}.secretsvaultcloud.{}/v1',
} }
) )
dsv_plugin = CredentialPlugin(
def dsv_backend(**kwargs): 'Thycotic DevOps Secrets Vault',
tenant_name = kwargs['tenant'] dsv_inputs,
tenant_tld = kwargs.get('tld', 'com') lambda **kwargs: SecretsVault(**{k: v for (k, v) in kwargs.items() if k in [field['id'] for field in dsv_inputs['fields']]}).get_secret(kwargs['path'])['data'][kwargs['secret_field']], # fmt: skip
tenant_url_template = kwargs.get('url_template', 'https://{}.secretsvaultcloud.{}') )
client_id = kwargs['client_id']
client_secret = kwargs['client_secret']
secret_path = kwargs['path']
secret_field = kwargs['secret_field']
# providing a default value to remain backward compatible for secrets that have not specified this option
secret_decoding = kwargs.get('secret_decoding', 'No Decoding')
tenant_url = tenant_url_template.format(tenant_name, tenant_tld.strip("."))
authorizer = PasswordGrantAuthorizer(tenant_url, client_id, client_secret)
dsv_secret = SecretsVault(tenant_url, authorizer).get_secret(secret_path)
# files can be uploaded base64 decoded to DSV and thus decoding it only, when asked for
if secret_decoding == 'Decode Base64':
return b64decode(dsv_secret['data'][secret_field]).decode()
return dsv_secret['data'][secret_field]
dsv_plugin = CredentialPlugin(name='Thycotic DevOps Secrets Vault', inputs=dsv_inputs, backend=dsv_backend)

View File

@@ -41,34 +41,6 @@ base_inputs = {
'secret': True, 'secret': True,
'help_text': _('The Secret ID for AppRole Authentication'), 'help_text': _('The Secret ID for AppRole Authentication'),
}, },
{
'id': 'client_cert_public',
'label': _('Client Certificate'),
'type': 'string',
'multiline': True,
'help_text': _(
'The PEM-encoded client certificate used for TLS client authentication.'
' This should include the certificate and any intermediate certififcates.'
),
},
{
'id': 'client_cert_private',
'label': _('Client Certificate Key'),
'type': 'string',
'multiline': True,
'secret': True,
'help_text': _('The certificate private key used for TLS client authentication.'),
},
{
'id': 'client_cert_role',
'label': _('TLS Authentication Role'),
'type': 'string',
'multiline': False,
'help_text': _(
'The role configured in Hashicorp Vault for TLS client authentication.'
' If not provided, Hashicorp Vault may assign roles based on the certificate used.'
),
},
{ {
'id': 'namespace', 'id': 'namespace',
'label': _('Namespace name (Vault Enterprise only)'), 'label': _('Namespace name (Vault Enterprise only)'),
@@ -87,20 +59,6 @@ base_inputs = {
' see https://www.vaultproject.io/docs/auth/kubernetes#configuration' ' see https://www.vaultproject.io/docs/auth/kubernetes#configuration'
), ),
}, },
{
'id': 'username',
'label': _('Username'),
'type': 'string',
'secret': False,
'help_text': _('Username for user authentication.'),
},
{
'id': 'password',
'label': _('Password'),
'type': 'string',
'secret': True,
'help_text': _('Password for user authentication.'),
},
{ {
'id': 'default_auth_path', 'id': 'default_auth_path',
'label': _('Path to Auth'), 'label': _('Path to Auth'),
@@ -199,25 +157,19 @@ hashi_ssh_inputs['required'].extend(['public_key', 'role'])
def handle_auth(**kwargs): def handle_auth(**kwargs):
token = None token = None
if kwargs.get('token'): if kwargs.get('token'):
token = kwargs['token'] token = kwargs['token']
elif kwargs.get('username') and kwargs.get('password'):
token = method_auth(**kwargs, auth_param=userpass_auth(**kwargs))
elif kwargs.get('role_id') and kwargs.get('secret_id'): elif kwargs.get('role_id') and kwargs.get('secret_id'):
token = method_auth(**kwargs, auth_param=approle_auth(**kwargs)) token = method_auth(**kwargs, auth_param=approle_auth(**kwargs))
elif kwargs.get('kubernetes_role'): elif kwargs.get('kubernetes_role'):
token = method_auth(**kwargs, auth_param=kubernetes_auth(**kwargs)) token = method_auth(**kwargs, auth_param=kubernetes_auth(**kwargs))
elif kwargs.get('client_cert_public') and kwargs.get('client_cert_private'):
token = method_auth(**kwargs, auth_param=client_cert_auth(**kwargs))
else: else:
raise Exception('Token, Username/Password, AppRole, Kubernetes, or TLS authentication parameters must be set') raise Exception('Either token or AppRole/Kubernetes authentication parameters must be set')
return token return token
def userpass_auth(**kwargs):
return {'username': kwargs['username'], 'password': kwargs['password']}
def approle_auth(**kwargs): def approle_auth(**kwargs):
return {'role_id': kwargs['role_id'], 'secret_id': kwargs['secret_id']} return {'role_id': kwargs['role_id'], 'secret_id': kwargs['secret_id']}
@@ -229,10 +181,6 @@ def kubernetes_auth(**kwargs):
return {'role': kwargs['kubernetes_role'], 'jwt': jwt} return {'role': kwargs['kubernetes_role'], 'jwt': jwt}
def client_cert_auth(**kwargs):
return {'name': kwargs.get('client_cert_role')}
def method_auth(**kwargs): def method_auth(**kwargs):
# get auth method specific params # get auth method specific params
request_kwargs = {'json': kwargs['auth_param'], 'timeout': 30} request_kwargs = {'json': kwargs['auth_param'], 'timeout': 30}
@@ -245,25 +193,13 @@ def method_auth(**kwargs):
cacert = kwargs.get('cacert', None) cacert = kwargs.get('cacert', None)
sess = requests.Session() sess = requests.Session()
sess.mount(url, requests.adapters.HTTPAdapter(max_retries=5))
# Namespace support # Namespace support
if kwargs.get('namespace'): if kwargs.get('namespace'):
sess.headers['X-Vault-Namespace'] = kwargs['namespace'] sess.headers['X-Vault-Namespace'] = kwargs['namespace']
request_url = '/'.join([url, 'auth', auth_path, 'login']).rstrip('/') request_url = '/'.join([url, 'auth', auth_path, 'login']).rstrip('/')
if kwargs['auth_param'].get('username'):
request_url = request_url + '/' + (kwargs['username'])
with CertFiles(cacert) as cert: with CertFiles(cacert) as cert:
request_kwargs['verify'] = cert request_kwargs['verify'] = cert
# TLS client certificate support resp = sess.post(request_url, **request_kwargs)
if kwargs.get('client_cert_public') and kwargs.get('client_cert_private'):
# Add client cert to requests Session before making call
with CertFiles(kwargs['client_cert_public'], key=kwargs['client_cert_private']) as client_cert:
sess.cert = client_cert
resp = sess.post(request_url, **request_kwargs)
else:
# Make call without client certificate
resp = sess.post(request_url, **request_kwargs)
resp.raise_for_status() resp.raise_for_status()
token = resp.json()['auth']['client_token'] token = resp.json()['auth']['client_token']
return token return token
@@ -284,7 +220,6 @@ def kv_backend(**kwargs):
} }
sess = requests.Session() sess = requests.Session()
sess.mount(url, requests.adapters.HTTPAdapter(max_retries=5))
sess.headers['Authorization'] = 'Bearer {}'.format(token) sess.headers['Authorization'] = 'Bearer {}'.format(token)
# Compatibility header for older installs of Hashicorp Vault # Compatibility header for older installs of Hashicorp Vault
sess.headers['X-Vault-Token'] = token sess.headers['X-Vault-Token'] = token
@@ -330,8 +265,6 @@ def kv_backend(**kwargs):
if secret_key: if secret_key:
try: try:
if (secret_key != 'data') and (secret_key not in json['data']) and ('data' in json['data']):
return json['data']['data'][secret_key]
return json['data'][secret_key] return json['data'][secret_key]
except KeyError: except KeyError:
raise RuntimeError('{} is not present at {}'.format(secret_key, secret_path)) raise RuntimeError('{} is not present at {}'.format(secret_key, secret_path))
@@ -355,7 +288,6 @@ def ssh_backend(**kwargs):
request_kwargs['json']['valid_principals'] = kwargs['valid_principals'] request_kwargs['json']['valid_principals'] = kwargs['valid_principals']
sess = requests.Session() sess = requests.Session()
sess.mount(url, requests.adapters.HTTPAdapter(max_retries=5))
sess.headers['Authorization'] = 'Bearer {}'.format(token) sess.headers['Authorization'] = 'Bearer {}'.format(token)
if kwargs.get('namespace'): if kwargs.get('namespace'):
sess.headers['X-Vault-Namespace'] = kwargs['namespace'] sess.headers['X-Vault-Namespace'] = kwargs['namespace']

View File

@@ -1,10 +1,7 @@
from .plugin import CredentialPlugin from .plugin import CredentialPlugin
from django.utils.translation import gettext_lazy as _ from django.utils.translation import gettext_lazy as _
try: from thycotic.secrets.server import DomainPasswordGrantAuthorizer, PasswordGrantAuthorizer, SecretServer, ServerSecret
from delinea.secrets.server import DomainPasswordGrantAuthorizer, PasswordGrantAuthorizer, SecretServer, ServerSecret
except ImportError:
from thycotic.secrets.server import DomainPasswordGrantAuthorizer, PasswordGrantAuthorizer, SecretServer, ServerSecret
tss_inputs = { tss_inputs = {
'fields': [ 'fields': [
@@ -53,10 +50,8 @@ tss_inputs = {
def tss_backend(**kwargs): def tss_backend(**kwargs):
if kwargs.get("domain"): if 'domain' in kwargs:
authorizer = DomainPasswordGrantAuthorizer( authorizer = DomainPasswordGrantAuthorizer(kwargs['server_url'], kwargs['username'], kwargs['password'], kwargs['domain'])
base_url=kwargs['server_url'], username=kwargs['username'], domain=kwargs['domain'], password=kwargs['password']
)
else: else:
authorizer = PasswordGrantAuthorizer(kwargs['server_url'], kwargs['username'], kwargs['password']) authorizer = PasswordGrantAuthorizer(kwargs['server_url'], kwargs['username'], kwargs['password'])
secret_server = SecretServer(kwargs['server_url'], authorizer) secret_server = SecretServer(kwargs['server_url'], authorizer)

View File

@@ -87,7 +87,7 @@ class RecordedQueryLog(object):
) )
log.commit() log.commit()
log.execute( log.execute(
'INSERT INTO queries (pid, version, argv, time, sql, explain, bt) VALUES (?, ?, ?, ?, ?, ?, ?);', 'INSERT INTO queries (pid, version, argv, time, sql, explain, bt) ' 'VALUES (?, ?, ?, ?, ?, ?, ?);',
(os.getpid(), version, ' '.join(sys.argv), seconds, sql, explain, bt), (os.getpid(), version, ' '.join(sys.argv), seconds, sql, explain, bt),
) )
log.commit() log.commit()

View File

@@ -1,7 +1,6 @@
import os import os
import psycopg import psycopg2
import select import select
from copy import deepcopy
from contextlib import contextmanager from contextlib import contextmanager
@@ -41,12 +40,8 @@ def get_task_queuename():
class PubSub(object): class PubSub(object):
def __init__(self, conn, select_timeout=None): def __init__(self, conn):
self.conn = conn self.conn = conn
if select_timeout is None:
self.select_timeout = 5
else:
self.select_timeout = select_timeout
def listen(self, channel): def listen(self, channel):
with self.conn.cursor() as cur: with self.conn.cursor() as cur:
@@ -60,62 +55,25 @@ class PubSub(object):
with self.conn.cursor() as cur: with self.conn.cursor() as cur:
cur.execute('SELECT pg_notify(%s, %s);', (channel, payload)) cur.execute('SELECT pg_notify(%s, %s);', (channel, payload))
@staticmethod def events(self, select_timeout=5, yield_timeouts=False):
def current_notifies(conn):
"""
Altered version of .notifies method from psycopg library
This removes the outer while True loop so that we only process
queued notifications
"""
with conn.lock:
try:
ns = conn.wait(psycopg.generators.notifies(conn.pgconn))
except psycopg.errors._NO_TRACEBACK as ex:
raise ex.with_traceback(None)
enc = psycopg._encodings.pgconn_encoding(conn.pgconn)
for pgn in ns:
n = psycopg.connection.Notify(pgn.relname.decode(enc), pgn.extra.decode(enc), pgn.be_pid)
yield n
def events(self, yield_timeouts=False):
if not self.conn.autocommit: if not self.conn.autocommit:
raise RuntimeError('Listening for events can only be done in autocommit mode') raise RuntimeError('Listening for events can only be done in autocommit mode')
while True: while True:
if select.select([self.conn], [], [], self.select_timeout) == NOT_READY: if select.select([self.conn], [], [], select_timeout) == NOT_READY:
if yield_timeouts: if yield_timeouts:
yield None yield None
else: else:
notification_generator = self.current_notifies(self.conn) self.conn.poll()
for notification in notification_generator: while self.conn.notifies:
yield notification yield self.conn.notifies.pop(0)
def close(self): def close(self):
self.conn.close() self.conn.close()
def create_listener_connection():
conf = deepcopy(settings.DATABASES['default'])
conf['OPTIONS'] = deepcopy(conf.get('OPTIONS', {}))
# Modify the application name to distinguish from other connections the process might use
conf['OPTIONS']['application_name'] = get_application_name(settings.CLUSTER_HOST_ID, function='listener')
# Apply overrides specifically for the listener connection
for k, v in settings.LISTENER_DATABASES.get('default', {}).items():
conf[k] = v
for k, v in settings.LISTENER_DATABASES.get('default', {}).get('OPTIONS', {}).items():
conf['OPTIONS'][k] = v
# Allow password-less authentication
if 'PASSWORD' in conf:
conf['OPTIONS']['password'] = conf.pop('PASSWORD')
connection_data = f"dbname={conf['NAME']} host={conf['HOST']} user={conf['USER']} port={conf['PORT']}"
return psycopg.connect(connection_data, autocommit=True, **conf['OPTIONS'])
@contextmanager @contextmanager
def pg_bus_conn(new_connection=False, select_timeout=None): def pg_bus_conn(new_connection=False):
''' '''
Any listeners probably want to establish a new database connection, Any listeners probably want to establish a new database connection,
separate from the Django connection used for queries, because that will prevent separate from the Django connection used for queries, because that will prevent
@@ -127,7 +85,13 @@ def pg_bus_conn(new_connection=False, select_timeout=None):
''' '''
if new_connection: if new_connection:
conn = create_listener_connection() conf = settings.DATABASES['default'].copy()
conf['OPTIONS'] = conf.get('OPTIONS', {}).copy()
# Modify the application name to distinguish from other connections the process might use
conf['OPTIONS']['application_name'] = get_application_name(settings.CLUSTER_HOST_ID, function='listener')
conn = psycopg2.connect(dbname=conf['NAME'], host=conf['HOST'], user=conf['USER'], password=conf['PASSWORD'], port=conf['PORT'], **conf['OPTIONS'])
# Django connection.cursor().connection doesn't have autocommit=True on by default
conn.set_session(autocommit=True)
else: else:
if pg_connection.connection is None: if pg_connection.connection is None:
pg_connection.connect() pg_connection.connect()
@@ -135,7 +99,7 @@ def pg_bus_conn(new_connection=False, select_timeout=None):
raise RuntimeError('Unexpectedly could not connect to postgres for pg_notify actions') raise RuntimeError('Unexpectedly could not connect to postgres for pg_notify actions')
conn = pg_connection.connection conn = pg_connection.connection
pubsub = PubSub(conn, select_timeout=select_timeout) pubsub = PubSub(conn)
yield pubsub yield pubsub
if new_connection: if new_connection:
conn.close() conn.close()

View File

@@ -37,14 +37,8 @@ class Control(object):
def running(self, *args, **kwargs): def running(self, *args, **kwargs):
return self.control_with_reply('running', *args, **kwargs) return self.control_with_reply('running', *args, **kwargs)
def cancel(self, task_ids, with_reply=True): def cancel(self, task_ids, *args, **kwargs):
if with_reply: return self.control_with_reply('cancel', *args, extra_data={'task_ids': task_ids}, **kwargs)
return self.control_with_reply('cancel', extra_data={'task_ids': task_ids})
else:
self.control({'control': 'cancel', 'task_ids': task_ids, 'reply_to': None}, extra_data={'task_ids': task_ids})
def schedule(self, *args, **kwargs):
return self.control_with_reply('schedule', *args, **kwargs)
@classmethod @classmethod
def generate_reply_queue_name(cls): def generate_reply_queue_name(cls):
@@ -58,14 +52,14 @@ class Control(object):
if not connection.get_autocommit(): if not connection.get_autocommit():
raise RuntimeError('Control-with-reply messages can only be done in autocommit mode') raise RuntimeError('Control-with-reply messages can only be done in autocommit mode')
with pg_bus_conn(select_timeout=timeout) as conn: with pg_bus_conn() as conn:
conn.listen(reply_queue) conn.listen(reply_queue)
send_data = {'control': command, 'reply_to': reply_queue} send_data = {'control': command, 'reply_to': reply_queue}
if extra_data: if extra_data:
send_data.update(extra_data) send_data.update(extra_data)
conn.notify(self.queuename, json.dumps(send_data)) conn.notify(self.queuename, json.dumps(send_data))
for reply in conn.events(yield_timeouts=True): for reply in conn.events(select_timeout=timeout, yield_timeouts=True):
if reply is None: if reply is None:
logger.error(f'{self.service} did not reply within {timeout}s') logger.error(f'{self.service} did not reply within {timeout}s')
raise RuntimeError(f"{self.service} did not reply within {timeout}s") raise RuntimeError(f"{self.service} did not reply within {timeout}s")

View File

@@ -1,142 +1,57 @@
import logging import logging
import os
import time import time
import yaml from multiprocessing import Process
from datetime import datetime
from django.conf import settings
from django.db import connections
from schedule import Scheduler
from django_guid import set_guid
from django_guid.utils import generate_guid
from awx.main.dispatch.worker import TaskWorker
from awx.main.utils.db import set_connection_name
logger = logging.getLogger('awx.main.dispatch.periodic') logger = logging.getLogger('awx.main.dispatch.periodic')
class ScheduledTask: class Scheduler(Scheduler):
""" def run_continuously(self):
Class representing schedules, very loosely modeled after python schedule library Job idle_seconds = max(1, min(self.jobs).period.total_seconds() / 2)
the idea of this class is to:
- only deal in relative times (time since the scheduler global start)
- only deal in integer math for target runtimes, but float for current relative time
Missed schedule policy: def run():
Invariant target times are maintained, meaning that if interval=10s offset=0 ppid = os.getppid()
and it runs at t=7s, then it calls for next run in 3s. logger.warning('periodic beat started')
However, if a complete interval has passed, that is counted as a missed run,
and missed runs are abandoned (no catch-up runs).
"""
def __init__(self, name: str, data: dict): set_connection_name('periodic') # set application_name to distinguish from other dispatcher processes
# parameters need for schedule computation
self.interval = int(data['schedule'].total_seconds())
self.offset = 0 # offset relative to start time this schedule begins
self.index = 0 # number of periods of the schedule that has passed
# parameters that do not affect scheduling logic while True:
self.last_run = None # time of last run, only used for debug if os.getppid() != ppid:
self.completed_runs = 0 # number of times schedule is known to run # if the parent PID changes, this process has been orphaned
self.name = name # via e.g., segfault or sigkill, we should exit too
self.data = data # used by caller to know what to run pid = os.getpid()
logger.warning(f'periodic beat exiting gracefully pid:{pid}')
raise SystemExit()
try:
for conn in connections.all():
# If the database connection has a hiccup, re-establish a new
# connection
conn.close_if_unusable_or_obsolete()
set_guid(generate_guid())
self.run_pending()
except Exception:
logger.exception('encountered an error while scheduling periodic tasks')
time.sleep(idle_seconds)
@property process = Process(target=run)
def next_run(self): process.daemon = True
"Time until the next run with t=0 being the global_start of the scheduler class" process.start()
return (self.index + 1) * self.interval + self.offset
def due_to_run(self, relative_time):
return bool(self.next_run <= relative_time)
def expected_runs(self, relative_time):
return int((relative_time - self.offset) / self.interval)
def mark_run(self, relative_time):
self.last_run = relative_time
self.completed_runs += 1
new_index = self.expected_runs(relative_time)
if new_index > self.index + 1:
logger.warning(f'Missed {new_index - self.index - 1} schedules of {self.name}')
self.index = new_index
def missed_runs(self, relative_time):
"Number of times job was supposed to ran but failed to, only used for debug"
missed_ct = self.expected_runs(relative_time) - self.completed_runs
# if this is currently due to run do not count that as a missed run
if missed_ct and self.due_to_run(relative_time):
missed_ct -= 1
return missed_ct
class Scheduler: def run_continuously():
def __init__(self, schedule): scheduler = Scheduler()
""" for task in settings.CELERYBEAT_SCHEDULE.values():
Expects schedule in the form of a dictionary like apply_async = TaskWorker.resolve_callable(task['task']).apply_async
{ total_seconds = task['schedule'].total_seconds()
'job1': {'schedule': timedelta(seconds=50), 'other': 'stuff'} scheduler.every(total_seconds).seconds.do(apply_async)
} scheduler.run_continuously()
Only the schedule nearest-second value is used for scheduling,
the rest of the data is for use by the caller to know what to run.
"""
self.jobs = [ScheduledTask(name, data) for name, data in schedule.items()]
min_interval = min(job.interval for job in self.jobs)
num_jobs = len(self.jobs)
# this is intentionally oppioniated against spammy schedules
# a core goal is to spread out the scheduled tasks (for worker management)
# and high-frequency schedules just do not work with that
if num_jobs > min_interval:
raise RuntimeError(f'Number of schedules ({num_jobs}) is more than the shortest schedule interval ({min_interval} seconds).')
# even space out jobs over the base interval
for i, job in enumerate(self.jobs):
job.offset = (i * min_interval) // num_jobs
# internally times are all referenced relative to startup time, add grace period
self.global_start = time.time() + 2.0
def get_and_mark_pending(self):
relative_time = time.time() - self.global_start
to_run = []
for job in self.jobs:
if job.due_to_run(relative_time):
to_run.append(job)
logger.debug(f'scheduler found {job.name} to run, {relative_time - job.next_run} seconds after target')
job.mark_run(relative_time)
return to_run
def time_until_next_run(self):
relative_time = time.time() - self.global_start
next_job = min(self.jobs, key=lambda j: j.next_run)
delta = next_job.next_run - relative_time
if delta <= 0.1:
# careful not to give 0 or negative values to the select timeout, which has unclear interpretation
logger.warning(f'Scheduler next run of {next_job.name} is {-delta} seconds in the past')
return 0.1
elif delta > 20.0:
logger.warning(f'Scheduler next run unexpectedly over 20 seconds in future: {delta}')
return 20.0
logger.debug(f'Scheduler next run is {next_job.name} in {delta} seconds')
return delta
def debug(self, *args, **kwargs):
data = dict()
data['title'] = 'Scheduler status'
now = datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S UTC')
start_time = datetime.fromtimestamp(self.global_start).strftime('%Y-%m-%d %H:%M:%S UTC')
relative_time = time.time() - self.global_start
data['started_time'] = start_time
data['current_time'] = now
data['current_time_relative'] = round(relative_time, 3)
data['total_schedules'] = len(self.jobs)
data['schedule_list'] = dict(
[
(
job.name,
dict(
last_run_seconds_ago=round(relative_time - job.last_run, 3) if job.last_run else None,
next_run_in_seconds=round(job.next_run - relative_time, 3),
offset_in_seconds=job.offset,
completed_runs=job.completed_runs,
missed_runs=job.missed_runs(relative_time),
),
)
for job in sorted(self.jobs, key=lambda job: job.interval)
]
)
return yaml.safe_dump(data, default_flow_style=False, sort_keys=False)

View File

@@ -339,17 +339,6 @@ class AutoscalePool(WorkerPool):
# but if the task takes longer than the time defined here, we will force it to stop here # but if the task takes longer than the time defined here, we will force it to stop here
self.task_manager_timeout = settings.TASK_MANAGER_TIMEOUT + settings.TASK_MANAGER_TIMEOUT_GRACE_PERIOD self.task_manager_timeout = settings.TASK_MANAGER_TIMEOUT + settings.TASK_MANAGER_TIMEOUT_GRACE_PERIOD
# initialize some things for subsystem metrics periodic gathering
# the AutoscalePool class does not save these to redis directly, but reports via produce_subsystem_metrics
self.scale_up_ct = 0
self.worker_count_max = 0
def produce_subsystem_metrics(self, metrics_object):
metrics_object.set('dispatcher_pool_scale_up_events', self.scale_up_ct)
metrics_object.set('dispatcher_pool_active_task_count', sum(len(w.managed_tasks) for w in self.workers))
metrics_object.set('dispatcher_pool_max_worker_count', self.worker_count_max)
self.worker_count_max = len(self.workers)
@property @property
def should_grow(self): def should_grow(self):
if len(self.workers) < self.min_workers: if len(self.workers) < self.min_workers:
@@ -417,16 +406,16 @@ class AutoscalePool(WorkerPool):
# the task manager to never do more work # the task manager to never do more work
current_task = w.current_task current_task = w.current_task
if current_task and isinstance(current_task, dict): if current_task and isinstance(current_task, dict):
endings = ('tasks.task_manager', 'tasks.dependency_manager', 'tasks.workflow_manager') endings = ['tasks.task_manager', 'tasks.dependency_manager', 'tasks.workflow_manager']
current_task_name = current_task.get('task', '') current_task_name = current_task.get('task', '')
if current_task_name.endswith(endings): if any(current_task_name.endswith(e) for e in endings):
if 'started' not in current_task: if 'started' not in current_task:
w.managed_tasks[current_task['uuid']]['started'] = time.time() w.managed_tasks[current_task['uuid']]['started'] = time.time()
age = time.time() - current_task['started'] age = time.time() - current_task['started']
w.managed_tasks[current_task['uuid']]['age'] = age w.managed_tasks[current_task['uuid']]['age'] = age
if age > self.task_manager_timeout: if age > self.task_manager_timeout:
logger.error(f'{current_task_name} has held the advisory lock for {age}, sending SIGUSR1 to {w.pid}') logger.error(f'{current_task_name} has held the advisory lock for {age}, sending SIGTERM to {w.pid}')
os.kill(w.pid, signal.SIGUSR1) os.kill(w.pid, signal.SIGTERM)
for m in orphaned: for m in orphaned:
# if all the workers are dead, spawn at least one # if all the workers are dead, spawn at least one
@@ -454,12 +443,7 @@ class AutoscalePool(WorkerPool):
idx = random.choice(range(len(self.workers))) idx = random.choice(range(len(self.workers)))
return idx, self.workers[idx] return idx, self.workers[idx]
else: else:
self.scale_up_ct += 1 return super(AutoscalePool, self).up()
ret = super(AutoscalePool, self).up()
new_worker_ct = len(self.workers)
if new_worker_ct > self.worker_count_max:
self.worker_count_max = new_worker_ct
return ret
def write(self, preferred_queue, body): def write(self, preferred_queue, body):
if 'guid' in body: if 'guid' in body:

View File

@@ -73,15 +73,15 @@ class task:
return cls.apply_async(args, kwargs) return cls.apply_async(args, kwargs)
@classmethod @classmethod
def get_async_body(cls, args=None, kwargs=None, uuid=None, **kw): def apply_async(cls, args=None, kwargs=None, queue=None, uuid=None, **kw):
"""
Get the python dict to become JSON data in the pg_notify message
This same message gets passed over the dispatcher IPC queue to workers
If a task is submitted to a multiprocessing pool, skipping pg_notify, this might be used directly
"""
task_id = uuid or str(uuid4()) task_id = uuid or str(uuid4())
args = args or [] args = args or []
kwargs = kwargs or {} kwargs = kwargs or {}
queue = queue or getattr(cls.queue, 'im_func', cls.queue)
if not queue:
msg = f'{cls.name}: Queue value required and may not be None'
logger.error(msg)
raise ValueError(msg)
obj = {'uuid': task_id, 'args': args, 'kwargs': kwargs, 'task': cls.name, 'time_pub': time.time()} obj = {'uuid': task_id, 'args': args, 'kwargs': kwargs, 'task': cls.name, 'time_pub': time.time()}
guid = get_guid() guid = get_guid()
if guid: if guid:
@@ -89,16 +89,6 @@ class task:
if bind_kwargs: if bind_kwargs:
obj['bind_kwargs'] = bind_kwargs obj['bind_kwargs'] = bind_kwargs
obj.update(**kw) obj.update(**kw)
return obj
@classmethod
def apply_async(cls, args=None, kwargs=None, queue=None, uuid=None, **kw):
queue = queue or getattr(cls.queue, 'im_func', cls.queue)
if not queue:
msg = f'{cls.name}: Queue value required and may not be None'
logger.error(msg)
raise ValueError(msg)
obj = cls.get_async_body(args=args, kwargs=kwargs, uuid=uuid, **kw)
if callable(queue): if callable(queue):
queue = queue() queue = queue()
if not is_testing(): if not is_testing():
@@ -126,5 +116,4 @@ class task:
setattr(fn, 'name', cls.name) setattr(fn, 'name', cls.name)
setattr(fn, 'apply_async', cls.apply_async) setattr(fn, 'apply_async', cls.apply_async)
setattr(fn, 'delay', cls.delay) setattr(fn, 'delay', cls.delay)
setattr(fn, 'get_async_body', cls.get_async_body)
return fn return fn

View File

@@ -7,21 +7,18 @@ import signal
import sys import sys
import redis import redis
import json import json
import psycopg import psycopg2
import time import time
from uuid import UUID from uuid import UUID
from queue import Empty as QueueEmpty from queue import Empty as QueueEmpty
from datetime import timedelta
from django import db from django import db
from django.conf import settings from django.conf import settings
from awx.main.dispatch.pool import WorkerPool from awx.main.dispatch.pool import WorkerPool
from awx.main.dispatch.periodic import Scheduler
from awx.main.dispatch import pg_bus_conn from awx.main.dispatch import pg_bus_conn
from awx.main.utils.common import log_excess_runtime from awx.main.utils.common import log_excess_runtime
from awx.main.utils.db import set_connection_name from awx.main.utils.db import set_connection_name
import awx.main.analytics.subsystem_metrics as s_metrics
if 'run_callback_receiver' in sys.argv: if 'run_callback_receiver' in sys.argv:
logger = logging.getLogger('awx.main.commands.run_callback_receiver') logger = logging.getLogger('awx.main.commands.run_callback_receiver')
@@ -66,12 +63,10 @@ class AWXConsumerBase(object):
def control(self, body): def control(self, body):
logger.warning(f'Received control signal:\n{body}') logger.warning(f'Received control signal:\n{body}')
control = body.get('control') control = body.get('control')
if control in ('status', 'schedule', 'running', 'cancel'): if control in ('status', 'running', 'cancel'):
reply_queue = body['reply_to'] reply_queue = body['reply_to']
if control == 'status': if control == 'status':
msg = '\n'.join([self.listening_on, self.pool.debug()]) msg = '\n'.join([self.listening_on, self.pool.debug()])
if control == 'schedule':
msg = self.scheduler.debug()
elif control == 'running': elif control == 'running':
msg = [] msg = []
for worker in self.pool.workers: for worker in self.pool.workers:
@@ -89,20 +84,24 @@ class AWXConsumerBase(object):
if task_ids and not msg: if task_ids and not msg:
logger.info(f'Could not locate running tasks to cancel with ids={task_ids}') logger.info(f'Could not locate running tasks to cancel with ids={task_ids}')
if reply_queue is not None: with pg_bus_conn() as conn:
with pg_bus_conn() as conn: conn.notify(reply_queue, json.dumps(msg))
conn.notify(reply_queue, json.dumps(msg))
elif control == 'reload': elif control == 'reload':
for worker in self.pool.workers: for worker in self.pool.workers:
worker.quit() worker.quit()
else: else:
logger.error('unrecognized control message: {}'.format(control)) logger.error('unrecognized control message: {}'.format(control))
def dispatch_task(self, body): def process_task(self, body):
"""This will place the given body into a worker queue to run method decorated as a task"""
if isinstance(body, dict): if isinstance(body, dict):
body['time_ack'] = time.time() body['time_ack'] = time.time()
if 'control' in body:
try:
return self.control(body)
except Exception:
logger.exception(f"Exception handling control message: {body}")
return
if len(self.pool): if len(self.pool):
if "uuid" in body and body['uuid']: if "uuid" in body and body['uuid']:
try: try:
@@ -116,24 +115,15 @@ class AWXConsumerBase(object):
self.pool.write(queue, body) self.pool.write(queue, body)
self.total_messages += 1 self.total_messages += 1
def process_task(self, body):
"""Routes the task details in body as either a control task or a task-task"""
if 'control' in body:
try:
return self.control(body)
except Exception:
logger.exception(f"Exception handling control message: {body}")
return
self.dispatch_task(body)
@log_excess_runtime(logger) @log_excess_runtime(logger)
def record_statistics(self): def record_statistics(self):
if time.time() - self.last_stats > 1: # buffer stat recording to once per second if time.time() - self.last_stats > 1: # buffer stat recording to once per second
try: try:
self.redis.set(f'awx_{self.name}_statistics', self.pool.debug()) self.redis.set(f'awx_{self.name}_statistics', self.pool.debug())
self.last_stats = time.time()
except Exception: except Exception:
logger.exception(f"encountered an error communicating with redis to store {self.name} statistics") logger.exception(f"encountered an error communicating with redis to store {self.name} statistics")
self.last_stats = time.time() self.last_stats = time.time()
def run(self, *args, **kwargs): def run(self, *args, **kwargs):
signal.signal(signal.SIGINT, self.stop) signal.signal(signal.SIGINT, self.stop)
@@ -152,75 +142,29 @@ class AWXConsumerRedis(AWXConsumerBase):
def run(self, *args, **kwargs): def run(self, *args, **kwargs):
super(AWXConsumerRedis, self).run(*args, **kwargs) super(AWXConsumerRedis, self).run(*args, **kwargs)
self.worker.on_start() self.worker.on_start()
logger.info(f'Callback receiver started with pid={os.getpid()}')
db.connection.close() # logs use database, so close connection
while True: while True:
logger.debug(f'{os.getpid()} is alive')
time.sleep(60) time.sleep(60)
class AWXConsumerPG(AWXConsumerBase): class AWXConsumerPG(AWXConsumerBase):
def __init__(self, *args, schedule=None, **kwargs): def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs) super().__init__(*args, **kwargs)
self.pg_max_wait = getattr(settings, 'DISPATCHER_DB_DOWNTOWN_TOLLERANCE', settings.DISPATCHER_DB_DOWNTIME_TOLERANCE) self.pg_max_wait = settings.DISPATCHER_DB_DOWNTOWN_TOLLERANCE
# if no successful loops have ran since startup, then we should fail right away # if no successful loops have ran since startup, then we should fail right away
self.pg_is_down = True # set so that we fail if we get database errors on startup self.pg_is_down = True # set so that we fail if we get database errors on startup
init_time = time.time() self.pg_down_time = time.time() - self.pg_max_wait # allow no grace period
self.pg_down_time = init_time - self.pg_max_wait # allow no grace period self.last_cleanup = time.time()
self.last_cleanup = init_time
self.subsystem_metrics = s_metrics.DispatcherMetrics(auto_pipe_execute=False)
self.last_metrics_gather = init_time
self.listen_cumulative_time = 0.0
if schedule:
schedule = schedule.copy()
else:
schedule = {}
# add control tasks to be ran at regular schedules
# NOTE: if we run out of database connections, it is important to still run cleanup
# so that we scale down workers and free up connections
schedule['pool_cleanup'] = {'control': self.pool.cleanup, 'schedule': timedelta(seconds=60)}
# record subsystem metrics for the dispatcher
schedule['metrics_gather'] = {'control': self.record_metrics, 'schedule': timedelta(seconds=20)}
self.scheduler = Scheduler(schedule)
def record_metrics(self):
current_time = time.time()
self.pool.produce_subsystem_metrics(self.subsystem_metrics)
self.subsystem_metrics.set('dispatcher_availability', self.listen_cumulative_time / (current_time - self.last_metrics_gather))
self.subsystem_metrics.pipe_execute()
self.listen_cumulative_time = 0.0
self.last_metrics_gather = current_time
def run_periodic_tasks(self): def run_periodic_tasks(self):
""" self.record_statistics() # maintains time buffer in method
Run general periodic logic, and return maximum time in seconds before
the next requested run
This may be called more often than that when events are consumed
so this should be very efficient in that
"""
try:
self.record_statistics() # maintains time buffer in method
except Exception as exc:
logger.warning(f'Failed to save dispatcher statistics {exc}')
for job in self.scheduler.get_and_mark_pending(): if time.time() - self.last_cleanup > 60: # same as cluster_node_heartbeat
if 'control' in job.data: # NOTE: if we run out of database connections, it is important to still run cleanup
try: # so that we scale down workers and free up connections
job.data['control']() self.pool.cleanup()
except Exception: self.last_cleanup = time.time()
logger.exception(f'Error running control task {job.data}')
elif 'task' in job.data:
body = self.worker.resolve_callable(job.data['task']).get_async_body()
# bypasses pg_notify for scheduled tasks
self.dispatch_task(body)
if self.pg_is_down:
logger.info('Dispatcher listener connection established')
self.pg_is_down = False
self.listen_start = time.time()
return self.scheduler.time_until_next_run()
def run(self, *args, **kwargs): def run(self, *args, **kwargs):
super(AWXConsumerPG, self).run(*args, **kwargs) super(AWXConsumerPG, self).run(*args, **kwargs)
@@ -236,21 +180,17 @@ class AWXConsumerPG(AWXConsumerBase):
if init is False: if init is False:
self.worker.on_start() self.worker.on_start()
init = True init = True
# run_periodic_tasks run scheduled actions and gives time until next scheduled action
# this is saved to the conn (PubSub) object in order to modify read timeout in-loop
conn.select_timeout = self.run_periodic_tasks()
# this is the main operational loop for awx-manage run_dispatcher
for e in conn.events(yield_timeouts=True): for e in conn.events(yield_timeouts=True):
self.listen_cumulative_time += time.time() - self.listen_start # for metrics
if e is not None: if e is not None:
self.process_task(json.loads(e.payload)) self.process_task(json.loads(e.payload))
conn.select_timeout = self.run_periodic_tasks() self.run_periodic_tasks()
self.pg_is_down = False
if self.should_stop: if self.should_stop:
return return
except psycopg.InterfaceError: except psycopg2.InterfaceError:
logger.warning("Stale Postgres message bus connection, reconnecting") logger.warning("Stale Postgres message bus connection, reconnecting")
continue continue
except (db.DatabaseError, psycopg.OperationalError): except (db.DatabaseError, psycopg2.OperationalError):
# If we have attained stady state operation, tolerate short-term database hickups # If we have attained stady state operation, tolerate short-term database hickups
if not self.pg_is_down: if not self.pg_is_down:
logger.exception(f"Error consuming new events from postgres, will retry for {self.pg_max_wait} s") logger.exception(f"Error consuming new events from postgres, will retry for {self.pg_max_wait} s")
@@ -259,12 +199,6 @@ class AWXConsumerPG(AWXConsumerBase):
current_downtime = time.time() - self.pg_down_time current_downtime = time.time() - self.pg_down_time
if current_downtime > self.pg_max_wait: if current_downtime > self.pg_max_wait:
logger.exception(f"Postgres event consumer has not recovered in {current_downtime} s, exiting") logger.exception(f"Postgres event consumer has not recovered in {current_downtime} s, exiting")
# Sending QUIT to multiprocess queue to signal workers to exit
for worker in self.pool.workers:
try:
worker.quit()
except Exception:
logger.exception(f"Error sending QUIT to worker {worker}")
raise raise
# Wait for a second before next attempt, but still listen for any shutdown signals # Wait for a second before next attempt, but still listen for any shutdown signals
for i in range(10): for i in range(10):
@@ -276,12 +210,6 @@ class AWXConsumerPG(AWXConsumerBase):
except Exception: except Exception:
# Log unanticipated exception in addition to writing to stderr to get timestamps and other metadata # Log unanticipated exception in addition to writing to stderr to get timestamps and other metadata
logger.exception('Encountered unhandled error in dispatcher main loop') logger.exception('Encountered unhandled error in dispatcher main loop')
# Sending QUIT to multiprocess queue to signal workers to exit
for worker in self.pool.workers:
try:
worker.quit()
except Exception:
logger.exception(f"Error sending QUIT to worker {worker}")
raise raise
@@ -304,8 +232,8 @@ class BaseWorker(object):
break break
except QueueEmpty: except QueueEmpty:
continue continue
except Exception: except Exception as e:
logger.exception("Exception on worker {}, reconnecting: ".format(idx)) logger.error("Exception on worker {}, restarting: ".format(idx) + str(e))
continue continue
try: try:
for conn in db.connections.all(): for conn in db.connections.all():

View File

@@ -9,6 +9,7 @@ from django.conf import settings
from django.utils.functional import cached_property from django.utils.functional import cached_property
from django.utils.timezone import now as tz_now from django.utils.timezone import now as tz_now
from django.db import transaction, connection as django_connection from django.db import transaction, connection as django_connection
from django.db.utils import DataError
from django_guid import set_guid from django_guid import set_guid
import psutil import psutil
@@ -72,7 +73,7 @@ class CallbackBrokerWorker(BaseWorker):
def __init__(self): def __init__(self):
self.buff = {} self.buff = {}
self.redis = redis.Redis.from_url(settings.BROKER_URL) self.redis = redis.Redis.from_url(settings.BROKER_URL)
self.subsystem_metrics = s_metrics.CallbackReceiverMetrics(auto_pipe_execute=False) self.subsystem_metrics = s_metrics.Metrics(auto_pipe_execute=False)
self.queue_pop = 0 self.queue_pop = 0
self.queue_name = settings.CALLBACK_QUEUE self.queue_name = settings.CALLBACK_QUEUE
self.prof = AWXProfiler("CallbackBrokerWorker") self.prof = AWXProfiler("CallbackBrokerWorker")
@@ -191,12 +192,16 @@ class CallbackBrokerWorker(BaseWorker):
e._retry_count = retry_count e._retry_count = retry_count
# special sanitization logic for postgres treatment of NUL 0x00 char # special sanitization logic for postgres treatment of NUL 0x00 char
# This used to check the class of the exception but on the postgres3 upgrade it could appear if (retry_count == 1) and isinstance(exc_indv, DataError):
# as either DataError or ValueError, so now lets just try if its there. # The easiest place is in stdout. This raises as an error stating that it can't save a NUL character
if (retry_count == 1) and ("\x00" in e.stdout): if "\x00" in e.stdout:
e.stdout = e.stdout.replace("\x00", "") e.stdout = e.stdout.replace("\x00", "")
# There is also a chance that NUL char is embedded in event data which is part of a JSON blob. In that case we, thankfully, get a different exception
if retry_count >= self.INDIVIDUAL_EVENT_RETRIES: if 'unsupported Unicode escape sequence' in str(exc_indv):
e.event_data = json.loads(
json.dumps(e.event_data).replace("\x00", "").replace("\\x00", "").replace("\u0000", "").replace("\\u0000", "")
)
elif retry_count >= self.INDIVIDUAL_EVENT_RETRIES:
logger.error(f'Hit max retries ({retry_count}) saving individual Event error: {str(exc_indv)}\ndata:\n{e.__dict__}') logger.error(f'Hit max retries ({retry_count}) saving individual Event error: {str(exc_indv)}\ndata:\n{e.__dict__}')
events.remove(e) events.remove(e)
else: else:

View File

@@ -5,7 +5,6 @@
import copy import copy
import json import json
import re import re
import sys
import urllib.parse import urllib.parse
from jinja2 import sandbox, StrictUndefined from jinja2 import sandbox, StrictUndefined
@@ -68,60 +67,10 @@ def __enum_validate__(validator, enums, instance, schema):
Draft4Validator.VALIDATORS['enum'] = __enum_validate__ Draft4Validator.VALIDATORS['enum'] = __enum_validate__
import logging
logger = logging.getLogger('awx.main.fields')
class JSONBlob(JSONField): class JSONBlob(JSONField):
# Cringe... a JSONField that is back ended with a TextField.
# This field was a legacy custom field type that tl;dr; was a TextField
# Over the years, with Django upgrades, we were able to go to a JSONField instead of the custom field
# However, we didn't want to have large customers with millions of events to update from text to json during an upgrade
# So we keep this field type as backended with TextField.
def get_internal_type(self): def get_internal_type(self):
return "TextField" return "TextField"
# postgres uses a Jsonb field as the default backend
# with psycopg2 it was using a psycopg2._json.Json class internally
# with psycopg3 it uses a psycopg.types.json.Jsonb class internally
# The binary class was not compatible with a text field, so we are going to override these next two methods and ensure we are using a string
def from_db_value(self, value, expression, connection):
if value is None:
return value
if isinstance(value, str):
try:
return json.loads(value)
except Exception as e:
logger.error(f"Failed to load JSONField {self.name}: {e}")
return value
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
try:
# Null characters are not allowed in text fields and JSONBlobs are JSON data but saved as text
# So we want to make sure we strip out any null characters also note, these "should" be escaped by the dumps process:
# >>> my_obj = { 'test': '\x00' }
# >>> import json
# >>> json.dumps(my_obj)
# '{"test": "\\u0000"}'
# But just to be safe, lets remove them if they are there. \x00 and \u0000 are the same:
# >>> string = "\x00"
# >>> "\u0000" in string
# True
dumped_value = json.dumps(value)
if "\x00" in dumped_value:
dumped_value = dumped_value.replace("\x00", '')
return dumped_value
except Exception as e:
logger.error(f"Failed to dump JSONField {self.name}: {e} value: {value}")
return value
# Based on AutoOneToOneField from django-annoying: # Based on AutoOneToOneField from django-annoying:
# https://bitbucket.org/offline/django-annoying/src/a0de8b294db3/annoying/fields.py # https://bitbucket.org/offline/django-annoying/src/a0de8b294db3/annoying/fields.py
@@ -407,13 +356,11 @@ class SmartFilterField(models.TextField):
# https://docs.python.org/2/library/stdtypes.html#truth-value-testing # https://docs.python.org/2/library/stdtypes.html#truth-value-testing
if not value: if not value:
return None return None
# avoid doing too much during migrations value = urllib.parse.unquote(value)
if 'migrate' not in sys.argv: try:
value = urllib.parse.unquote(value) SmartFilter().query_from_string(value)
try: except RuntimeError as e:
SmartFilter().query_from_string(value) raise models.base.ValidationError(e)
except RuntimeError as e:
raise models.base.ValidationError(e)
return super(SmartFilterField, self).get_prep_value(value) return super(SmartFilterField, self).get_prep_value(value)
@@ -853,7 +800,7 @@ class CredentialTypeInjectorField(JSONSchemaField):
def validate_env_var_allowed(self, env_var): def validate_env_var_allowed(self, env_var):
if env_var.startswith('ANSIBLE_'): if env_var.startswith('ANSIBLE_'):
raise django_exceptions.ValidationError( raise django_exceptions.ValidationError(
_('Environment variable {} may affect Ansible configuration so its use is not allowed in credentials.').format(env_var), _('Environment variable {} may affect Ansible configuration so its ' 'use is not allowed in credentials.').format(env_var),
code='invalid', code='invalid',
params={'value': env_var}, params={'value': env_var},
) )

View File

@@ -1,53 +0,0 @@
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved
from django.core.management.base import BaseCommand
from awx.main.models import Instance, ReceptorAddress
def add_address(**kwargs):
try:
instance = Instance.objects.get(hostname=kwargs.pop('instance'))
kwargs['instance'] = instance
if kwargs.get('canonical') and instance.receptor_addresses.filter(canonical=True).exclude(address=kwargs['address']).exists():
print(f"Instance {instance.hostname} already has a canonical address, skipping")
return False
# if ReceptorAddress already exists with address, just update
# otherwise, create new ReceptorAddress
addr, _ = ReceptorAddress.objects.update_or_create(address=kwargs.pop('address'), defaults=kwargs)
print(f"Successfully added receptor address {addr.get_full_address()}")
return True
except Exception as e:
print(f"Error adding receptor address: {e}")
return False
class Command(BaseCommand):
"""
Internal controller command.
Register receptor address to an already-registered instance.
"""
help = "Add receptor address to an instance."
def add_arguments(self, parser):
parser.add_argument('--instance', dest='instance', required=True, type=str, help="Instance hostname this address is added to")
parser.add_argument('--address', dest='address', required=True, type=str, help="Receptor address")
parser.add_argument('--port', dest='port', type=int, help="Receptor listener port")
parser.add_argument('--websocket_path', dest='websocket_path', type=str, default="", help="Path for websockets")
parser.add_argument('--is_internal', action='store_true', help="If true, address only resolvable within the Kubernetes cluster")
parser.add_argument('--protocol', type=str, default='tcp', choices=['tcp', 'ws', 'wss'], help="Protocol to use for the Receptor listener")
parser.add_argument('--canonical', action='store_true', help="If true, address is the canonical address for the instance")
parser.add_argument('--peers_from_control_nodes', action='store_true', help="If true, control nodes will peer to this address")
def handle(self, **options):
address_options = {
k: options[k]
for k in ('instance', 'address', 'port', 'websocket_path', 'is_internal', 'protocol', 'peers_from_control_nodes', 'canonical')
if options[k]
}
changed = add_address(**address_options)
if changed:
print("(changed: True)")

View File

@@ -23,10 +23,7 @@ class Command(BaseCommand):
def add_arguments(self, parser): def add_arguments(self, parser):
parser.add_argument('--days', dest='days', type=int, default=90, metavar='N', help='Remove activity stream events more than N days old') parser.add_argument('--days', dest='days', type=int, default=90, metavar='N', help='Remove activity stream events more than N days old')
parser.add_argument('--dry-run', dest='dry_run', action='store_true', default=False, help='Dry run mode (show items that would be removed)') parser.add_argument('--dry-run', dest='dry_run', action='store_true', default=False, help='Dry run mode (show items that would ' 'be removed)')
parser.add_argument(
'--batch-size', dest='batch_size', type=int, default=500, metavar='X', help='Remove activity stream events in batch of X events. Defaults to 500.'
)
def init_logging(self): def init_logging(self):
log_levels = dict(enumerate([logging.ERROR, logging.INFO, logging.DEBUG, 0])) log_levels = dict(enumerate([logging.ERROR, logging.INFO, logging.DEBUG, 0]))
@@ -51,7 +48,7 @@ class Command(BaseCommand):
else: else:
pks_to_delete.add(asobj.pk) pks_to_delete.add(asobj.pk)
# Cleanup objects in batches instead of deleting each one individually. # Cleanup objects in batches instead of deleting each one individually.
if len(pks_to_delete) >= self.batch_size: if len(pks_to_delete) >= 500:
ActivityStream.objects.filter(pk__in=pks_to_delete).delete() ActivityStream.objects.filter(pk__in=pks_to_delete).delete()
n_deleted_items += len(pks_to_delete) n_deleted_items += len(pks_to_delete)
pks_to_delete.clear() pks_to_delete.clear()
@@ -66,5 +63,4 @@ class Command(BaseCommand):
self.days = int(options.get('days', 30)) self.days = int(options.get('days', 30))
self.cutoff = now() - datetime.timedelta(days=self.days) self.cutoff = now() - datetime.timedelta(days=self.days)
self.dry_run = bool(options.get('dry_run', False)) self.dry_run = bool(options.get('dry_run', False))
self.batch_size = int(options.get('batch_size', 500))
self.cleanup_activitystream() self.cleanup_activitystream()

View File

@@ -1,22 +1,22 @@
from awx.main.models import HostMetric
from django.core.management.base import BaseCommand from django.core.management.base import BaseCommand
from django.conf import settings from django.conf import settings
from awx.main.tasks.host_metrics import HostMetricTask
class Command(BaseCommand): class Command(BaseCommand):
""" """
This command provides cleanup task for HostMetric model. Run soft-deleting of HostMetrics
There are two modes, which run in following order:
- soft cleanup
- - Perform soft-deletion of all host metrics last automated 12 months ago or before.
This is the same as issuing a DELETE request to /api/v2/host_metrics/N/ for all host metrics that match the criteria.
- - updates columns delete, deleted_counter and last_deleted
- hard cleanup
- - Permanently erase from the database all host metrics last automated 36 months ago or before.
This operation happens after the soft deletion has finished.
""" """
help = 'Run soft and hard-deletion of HostMetrics' help = 'Run soft-deleting of HostMetrics'
def add_arguments(self, parser):
parser.add_argument('--months-ago', type=int, dest='months-ago', action='store', help='Threshold in months for soft-deleting')
def handle(self, *args, **options): def handle(self, *args, **options):
HostMetricTask().cleanup(soft_threshold=settings.CLEANUP_HOST_METRICS_SOFT_THRESHOLD, hard_threshold=settings.CLEANUP_HOST_METRICS_HARD_THRESHOLD) months_ago = options.get('months-ago') or None
if not months_ago:
months_ago = getattr(settings, 'CLEANUP_HOST_METRICS_THRESHOLD', 12)
HostMetric.cleanup_task(months_ago)

View File

@@ -9,7 +9,6 @@ import re
# Django # Django
from django.apps import apps
from django.core.management.base import BaseCommand, CommandError from django.core.management.base import BaseCommand, CommandError
from django.db import transaction, connection from django.db import transaction, connection
from django.db.models import Min, Max from django.db.models import Min, Max
@@ -18,7 +17,10 @@ from django.utils.timezone import now
# AWX # AWX
from awx.main.models import Job, AdHocCommand, ProjectUpdate, InventoryUpdate, SystemJob, WorkflowJob, Notification from awx.main.models import Job, AdHocCommand, ProjectUpdate, InventoryUpdate, SystemJob, WorkflowJob, Notification
from awx.main.utils import unified_job_class_to_event_table_name
def unified_job_class_to_event_table_name(job_class):
return f'main_{job_class().event_class.__name__.lower()}'
def partition_table_name(job_class, dt): def partition_table_name(job_class, dt):
@@ -150,10 +152,7 @@ class Command(BaseCommand):
def add_arguments(self, parser): def add_arguments(self, parser):
parser.add_argument('--days', dest='days', type=int, default=90, metavar='N', help='Remove jobs/updates executed more than N days ago. Defaults to 90.') parser.add_argument('--days', dest='days', type=int, default=90, metavar='N', help='Remove jobs/updates executed more than N days ago. Defaults to 90.')
parser.add_argument('--dry-run', dest='dry_run', action='store_true', default=False, help='Dry run mode (show items that would be removed)') parser.add_argument('--dry-run', dest='dry_run', action='store_true', default=False, help='Dry run mode (show items that would ' 'be removed)')
parser.add_argument(
'--batch-size', dest='batch_size', type=int, default=100000, metavar='X', help='Remove jobs in batch of X jobs. Defaults to 100000.'
)
parser.add_argument('--jobs', dest='only_jobs', action='store_true', default=False, help='Remove jobs') parser.add_argument('--jobs', dest='only_jobs', action='store_true', default=False, help='Remove jobs')
parser.add_argument('--ad-hoc-commands', dest='only_ad_hoc_commands', action='store_true', default=False, help='Remove ad hoc commands') parser.add_argument('--ad-hoc-commands', dest='only_ad_hoc_commands', action='store_true', default=False, help='Remove ad hoc commands')
parser.add_argument('--project-updates', dest='only_project_updates', action='store_true', default=False, help='Remove project updates') parser.add_argument('--project-updates', dest='only_project_updates', action='store_true', default=False, help='Remove project updates')
@@ -199,58 +198,18 @@ class Command(BaseCommand):
delete_meta.delete_jobs() delete_meta.delete_jobs()
return (delete_meta.jobs_no_delete_count, delete_meta.jobs_to_delete_count) return (delete_meta.jobs_no_delete_count, delete_meta.jobs_to_delete_count)
def has_unpartitioned_table(self, model): def _cascade_delete_job_events(self, model, pk_list):
tblname = unified_job_class_to_event_table_name(model)
with connection.cursor() as cursor:
cursor.execute(f"SELECT 1 FROM pg_tables WHERE tablename = '_unpartitioned_{tblname}';")
row = cursor.fetchone()
if row is None:
return False
return True
def _delete_unpartitioned_table(self, model):
"If the unpartitioned table is no longer necessary, it will drop the table"
tblname = unified_job_class_to_event_table_name(model)
if not self.has_unpartitioned_table(model):
self.logger.debug(f'Table _unpartitioned_{tblname} does not exist, you are fully migrated.')
return
with connection.cursor() as cursor:
# same as UnpartitionedJobEvent.objects.aggregate(Max('created'))
cursor.execute(f'SELECT MAX("_unpartitioned_{tblname}"."created") FROM "_unpartitioned_{tblname}";')
row = cursor.fetchone()
last_created = row[0]
if last_created:
self.logger.info(f'Last event created in _unpartitioned_{tblname} was {last_created.isoformat()}')
else:
self.logger.info(f'Table _unpartitioned_{tblname} has no events in it')
if (last_created is None) or (last_created < self.cutoff):
self.logger.warning(
f'Dropping table _unpartitioned_{tblname} since no records are newer than {self.cutoff}\n'
'WARNING - this will happen in a separate transaction so a failure will not roll back prior cleanup'
)
with connection.cursor() as cursor:
cursor.execute(f'DROP TABLE _unpartitioned_{tblname};')
def _delete_unpartitioned_events(self, model, pk_list):
"If unpartitioned job events remain, it will cascade those from jobs in pk_list"
tblname = unified_job_class_to_event_table_name(model)
rel_name = model().event_parent_key
# Bail if the unpartitioned table does not exist anymore
if not self.has_unpartitioned_table(model):
return
# Table still exists, delete individual unpartitioned events
if pk_list: if pk_list:
with connection.cursor() as cursor: with connection.cursor() as cursor:
self.logger.debug(f'Deleting {len(pk_list)} events from _unpartitioned_{tblname}, use a longer cleanup window to delete the table.') tblname = unified_job_class_to_event_table_name(model)
pk_list_csv = ','.join(map(str, pk_list)) pk_list_csv = ','.join(map(str, pk_list))
cursor.execute(f"DELETE FROM _unpartitioned_{tblname} WHERE {rel_name} IN ({pk_list_csv});") rel_name = model().event_parent_key
cursor.execute(f"DELETE FROM _unpartitioned_{tblname} WHERE {rel_name} IN ({pk_list_csv})")
def cleanup_jobs(self): def cleanup_jobs(self):
batch_size = 100000
# Hack to avoid doing N+1 queries as each item in the Job query set does # Hack to avoid doing N+1 queries as each item in the Job query set does
# an individual query to get the underlying UnifiedJob. # an individual query to get the underlying UnifiedJob.
Job.polymorphic_super_sub_accessors_replaced = True Job.polymorphic_super_sub_accessors_replaced = True
@@ -265,14 +224,13 @@ class Command(BaseCommand):
deleted = 0 deleted = 0
info = qs.aggregate(min=Min('id'), max=Max('id')) info = qs.aggregate(min=Min('id'), max=Max('id'))
if info['min'] is not None: if info['min'] is not None:
for start in range(info['min'], info['max'] + 1, self.batch_size): for start in range(info['min'], info['max'] + 1, batch_size):
qs_batch = qs.filter(id__gte=start, id__lte=start + self.batch_size) qs_batch = qs.filter(id__gte=start, id__lte=start + batch_size)
pk_list = qs_batch.values_list('id', flat=True) pk_list = qs_batch.values_list('id', flat=True)
_, results = qs_batch.delete() _, results = qs_batch.delete()
deleted += results['main.Job'] deleted += results['main.Job']
# Avoid dropping the job event table in case we have interacted with it already self._cascade_delete_job_events(Job, pk_list)
self._delete_unpartitioned_events(Job, pk_list)
return skipped, deleted return skipped, deleted
@@ -295,7 +253,7 @@ class Command(BaseCommand):
deleted += 1 deleted += 1
if not self.dry_run: if not self.dry_run:
self._delete_unpartitioned_events(AdHocCommand, pk_list) self._cascade_delete_job_events(AdHocCommand, pk_list)
skipped += AdHocCommand.objects.filter(created__gte=self.cutoff).count() skipped += AdHocCommand.objects.filter(created__gte=self.cutoff).count()
return skipped, deleted return skipped, deleted
@@ -323,7 +281,7 @@ class Command(BaseCommand):
deleted += 1 deleted += 1
if not self.dry_run: if not self.dry_run:
self._delete_unpartitioned_events(ProjectUpdate, pk_list) self._cascade_delete_job_events(ProjectUpdate, pk_list)
skipped += ProjectUpdate.objects.filter(created__gte=self.cutoff).count() skipped += ProjectUpdate.objects.filter(created__gte=self.cutoff).count()
return skipped, deleted return skipped, deleted
@@ -351,7 +309,7 @@ class Command(BaseCommand):
deleted += 1 deleted += 1
if not self.dry_run: if not self.dry_run:
self._delete_unpartitioned_events(InventoryUpdate, pk_list) self._cascade_delete_job_events(InventoryUpdate, pk_list)
skipped += InventoryUpdate.objects.filter(created__gte=self.cutoff).count() skipped += InventoryUpdate.objects.filter(created__gte=self.cutoff).count()
return skipped, deleted return skipped, deleted
@@ -375,7 +333,7 @@ class Command(BaseCommand):
deleted += 1 deleted += 1
if not self.dry_run: if not self.dry_run:
self._delete_unpartitioned_events(SystemJob, pk_list) self._cascade_delete_job_events(SystemJob, pk_list)
skipped += SystemJob.objects.filter(created__gte=self.cutoff).count() skipped += SystemJob.objects.filter(created__gte=self.cutoff).count()
return skipped, deleted return skipped, deleted
@@ -420,12 +378,12 @@ class Command(BaseCommand):
skipped += Notification.objects.filter(created__gte=self.cutoff).count() skipped += Notification.objects.filter(created__gte=self.cutoff).count()
return skipped, deleted return skipped, deleted
@transaction.atomic
def handle(self, *args, **options): def handle(self, *args, **options):
self.verbosity = int(options.get('verbosity', 1)) self.verbosity = int(options.get('verbosity', 1))
self.init_logging() self.init_logging()
self.days = int(options.get('days', 90)) self.days = int(options.get('days', 90))
self.dry_run = bool(options.get('dry_run', False)) self.dry_run = bool(options.get('dry_run', False))
self.batch_size = int(options.get('batch_size', 100000))
try: try:
self.cutoff = now() - datetime.timedelta(days=self.days) self.cutoff = now() - datetime.timedelta(days=self.days)
except OverflowError: except OverflowError:
@@ -447,29 +405,19 @@ class Command(BaseCommand):
del s.receivers[:] del s.receivers[:]
s.sender_receivers_cache.clear() s.sender_receivers_cache.clear()
with transaction.atomic(): for m in model_names:
for m in models_to_cleanup: if m not in models_to_cleanup:
skipped, deleted = getattr(self, 'cleanup_%s' % m)() continue
func = getattr(self, 'cleanup_%s_partition' % m, None) skipped, deleted = getattr(self, 'cleanup_%s' % m)()
if func:
skipped_partition, deleted_partition = func()
skipped += skipped_partition
deleted += deleted_partition
if self.dry_run: func = getattr(self, 'cleanup_%s_partition' % m, None)
self.logger.log(99, '%s: %d would be deleted, %d would be skipped.', m.replace('_', ' '), deleted, skipped) if func:
else: skipped_partition, deleted_partition = func()
self.logger.log(99, '%s: %d deleted, %d skipped.', m.replace('_', ' '), deleted, skipped) skipped += skipped_partition
deleted += deleted_partition
# Deleting unpartitioned tables cannot be done in same transaction as updates to related tables if self.dry_run:
if not self.dry_run: self.logger.log(99, '%s: %d would be deleted, %d would be skipped.', m.replace('_', ' '), deleted, skipped)
with transaction.atomic(): else:
for m in models_to_cleanup: self.logger.log(99, '%s: %d deleted, %d skipped.', m.replace('_', ' '), deleted, skipped)
unified_job_class_name = m[:-1].title().replace('Management', 'System').replace('_', '')
unified_job_class = apps.get_model('main', unified_job_class_name)
try:
unified_job_class().event_class
except (NotImplementedError, AttributeError):
continue # no need to run this for models without events
self._delete_unpartitioned_table(unified_job_class)

View File

@@ -44,7 +44,7 @@ class Command(BaseCommand):
'- To list all (now deprecated) custom virtual environments run:', '- To list all (now deprecated) custom virtual environments run:',
'awx-manage list_custom_venvs', 'awx-manage list_custom_venvs',
'', '',
'- To export the contents of a (deprecated) virtual environment, run the following command while supplying the path as an argument:', '- To export the contents of a (deprecated) virtual environment, ' 'run the following command while supplying the path as an argument:',
'awx-manage export_custom_venv /path/to/venv', 'awx-manage export_custom_venv /path/to/venv',
'', '',
'- Run these commands with `-q` to remove tool tips.', '- Run these commands with `-q` to remove tool tips.',

View File

@@ -13,7 +13,7 @@ class Command(BaseCommand):
Deprovision a cluster node Deprovision a cluster node
""" """
help = 'Remove instance from the database. Specify `--hostname` to use this command.' help = 'Remove instance from the database. ' 'Specify `--hostname` to use this command.'
def add_arguments(self, parser): def add_arguments(self, parser):
parser.add_argument('--hostname', dest='hostname', type=str, help='Hostname used during provisioning') parser.add_argument('--hostname', dest='hostname', type=str, help='Hostname used during provisioning')

View File

@@ -1,195 +0,0 @@
import json
import os
import sys
import re
from typing import Any
from django.core.management.base import BaseCommand
from django.conf import settings
from awx.conf import settings_registry
class Command(BaseCommand):
help = 'Dump the current auth configuration in django_ansible_base.authenticator format, currently supports LDAP and SAML'
DAB_SAML_AUTHENTICATOR_KEYS = {
"SP_ENTITY_ID": True,
"SP_PUBLIC_CERT": True,
"SP_PRIVATE_KEY": True,
"ORG_INFO": True,
"TECHNICAL_CONTACT": True,
"SUPPORT_CONTACT": True,
"SP_EXTRA": False,
"SECURITY_CONFIG": False,
"EXTRA_DATA": False,
"ENABLED_IDPS": True,
"CALLBACK_URL": False,
}
DAB_LDAP_AUTHENTICATOR_KEYS = {
"SERVER_URI": True,
"BIND_DN": False,
"BIND_PASSWORD": False,
"CONNECTION_OPTIONS": False,
"GROUP_TYPE": True,
"GROUP_TYPE_PARAMS": True,
"GROUP_SEARCH": False,
"START_TLS": False,
"USER_DN_TEMPLATE": True,
"USER_ATTR_MAP": True,
"USER_SEARCH": False,
}
def is_enabled(self, settings, keys):
missing_fields = []
for key, required in keys.items():
if required and not settings.get(key):
missing_fields.append(key)
if missing_fields:
return False, missing_fields
return True, None
def get_awx_ldap_settings(self) -> dict[str, dict[str, Any]]:
awx_ldap_settings = {}
for awx_ldap_setting in settings_registry.get_registered_settings(category_slug='ldap'):
key = awx_ldap_setting.removeprefix("AUTH_LDAP_")
value = getattr(settings, awx_ldap_setting, None)
awx_ldap_settings[key] = value
grouped_settings = {}
for key, value in awx_ldap_settings.items():
match = re.search(r'(\d+)', key)
index = int(match.group()) if match else 0
new_key = re.sub(r'\d+_', '', key)
if index not in grouped_settings:
grouped_settings[index] = {}
grouped_settings[index][new_key] = value
if new_key == "GROUP_TYPE" and value:
grouped_settings[index][new_key] = type(value).__name__
if new_key == "SERVER_URI" and value:
value = value.split(", ")
grouped_settings[index][new_key] = value
if type(value).__name__ == "LDAPSearch":
data = []
data.append(value.base_dn)
data.append("SCOPE_SUBTREE")
data.append(value.filterstr)
grouped_settings[index][new_key] = data
return grouped_settings
def get_awx_saml_settings(self) -> dict[str, Any]:
awx_saml_settings = {}
for awx_saml_setting in settings_registry.get_registered_settings(category_slug='saml'):
awx_saml_settings[awx_saml_setting.removeprefix("SOCIAL_AUTH_SAML_")] = getattr(settings, awx_saml_setting, None)
return awx_saml_settings
def format_config_data(self, enabled, awx_settings, type, keys, name):
config = {
"type": f"ansible_base.authentication.authenticator_plugins.{type}",
"name": name,
"enabled": enabled,
"create_objects": True,
"users_unique": False,
"remove_users": True,
"configuration": {},
}
for k in keys:
v = awx_settings.get(k)
config["configuration"].update({k: v})
if type == "saml":
idp_to_key_mapping = {
"url": "IDP_URL",
"x509cert": "IDP_X509_CERT",
"entity_id": "IDP_ENTITY_ID",
"attr_email": "IDP_ATTR_EMAIL",
"attr_groups": "IDP_GROUPS",
"attr_username": "IDP_ATTR_USERNAME",
"attr_last_name": "IDP_ATTR_LAST_NAME",
"attr_first_name": "IDP_ATTR_FIRST_NAME",
"attr_user_permanent_id": "IDP_ATTR_USER_PERMANENT_ID",
}
for idp_name in awx_settings.get("ENABLED_IDPS", {}):
for key in idp_to_key_mapping:
value = awx_settings["ENABLED_IDPS"][idp_name].get(key)
if value is not None:
config["name"] = idp_name
config["configuration"].update({idp_to_key_mapping[key]: value})
return config
def add_arguments(self, parser):
parser.add_argument(
"output_file",
nargs="?",
type=str,
default=None,
help="Output JSON file path",
)
def handle(self, *args, **options):
try:
data = []
# dump SAML settings
awx_saml_settings = self.get_awx_saml_settings()
awx_saml_enabled, saml_missing_fields = self.is_enabled(awx_saml_settings, self.DAB_SAML_AUTHENTICATOR_KEYS)
if awx_saml_enabled:
awx_saml_name = awx_saml_settings["ENABLED_IDPS"]
data.append(
self.format_config_data(
awx_saml_enabled,
awx_saml_settings,
"saml",
self.DAB_SAML_AUTHENTICATOR_KEYS,
awx_saml_name,
)
)
else:
data.append({"SAML_missing_fields": saml_missing_fields})
# dump LDAP settings
awx_ldap_group_settings = self.get_awx_ldap_settings()
for awx_ldap_name, awx_ldap_settings in awx_ldap_group_settings.items():
awx_ldap_enabled, ldap_missing_fields = self.is_enabled(awx_ldap_settings, self.DAB_LDAP_AUTHENTICATOR_KEYS)
if awx_ldap_enabled:
data.append(
self.format_config_data(
awx_ldap_enabled,
awx_ldap_settings,
"ldap",
self.DAB_LDAP_AUTHENTICATOR_KEYS,
f"LDAP_{awx_ldap_name}",
)
)
else:
data.append({f"LDAP_{awx_ldap_name}_missing_fields": ldap_missing_fields})
# write to file if requested
if options["output_file"]:
# Define the path for the output JSON file
output_file = options["output_file"]
# Ensure the directory exists
os.makedirs(os.path.dirname(output_file), exist_ok=True)
# Write data to the JSON file
with open(output_file, "w") as f:
json.dump(data, f, indent=4)
self.stdout.write(self.style.SUCCESS(f"Auth config data dumped to {output_file}"))
else:
self.stdout.write(json.dumps(data, indent=4))
except Exception as e:
self.stdout.write(self.style.ERROR(f"An error occurred: {str(e)}"))
sys.exit(1)

View File

@@ -1,9 +0,0 @@
from django.core.management.base import BaseCommand
from awx.main.tasks.host_metrics import HostMetricSummaryMonthlyTask
class Command(BaseCommand):
help = 'Computing of HostMetricSummaryMonthly'
def handle(self, *args, **options):
HostMetricSummaryMonthlyTask().execute()

View File

@@ -22,7 +22,7 @@ class Command(BaseCommand):
'# Discovered Virtual Environments:', '# Discovered Virtual Environments:',
'\n'.join(venvs), '\n'.join(venvs),
'', '',
'- To export the contents of a (deprecated) virtual environment, run the following command while supplying the path as an argument:', '- To export the contents of a (deprecated) virtual environment, ' 'run the following command while supplying the path as an argument:',
'awx-manage export_custom_venv /path/to/venv', 'awx-manage export_custom_venv /path/to/venv',
'', '',
'- To view the connections a (deprecated) virtual environment had in the database, run the following command while supplying the path as an argument:', '- To view the connections a (deprecated) virtual environment had in the database, run the following command while supplying the path as an argument:',

Some files were not shown because too many files have changed in this diff Show More