Compare commits

..

3 Commits

Author SHA1 Message Date
Elijah DeLee
d65ab1c5ac add serializer stuff so shows up in DRF 2025-03-18 09:14:55 -04:00
Elijah DeLee
a2decc7c60 fix lint 2025-03-18 09:14:55 -04:00
Elijah DeLee
97d03e434e Add concept of priority to job templates and jobs
This adds concept of priority to jobs.
The task manager now orders on priority, then created.
All rules around instance group capacity etc still apply. So even if a
job has very high priority, if there is not available capacity in the
available instance groups, it will not be scheduled.

Higher number is higher priority.
Default priority is 0.

For dependencies spawned from other jobs, assign them the priority of
the job that caused them to be created.

Still need to add prompt on launch stuff for priority to be consistent.
2025-03-18 09:14:55 -04:00
253 changed files with 2969 additions and 9658 deletions

View File

@@ -2,7 +2,7 @@
codecov: codecov:
notify: notify:
after_n_builds: 9 # Number of test matrix+lint jobs uploading coverage after_n_builds: 6 # Number of test matrix+lint jobs uploading coverage
wait_for_ci: false wait_for_ci: false
require_ci_to_pass: false require_ci_to_pass: false

View File

@@ -17,23 +17,6 @@ exclude_also =
[run] [run]
branch = True branch = True
# NOTE: `disable_warnings` is needed when `pytest-cov` runs in tandem
# NOTE: with `pytest-xdist`. These warnings are false negative in this
# NOTE: context.
#
# NOTE: It's `coveragepy` that emits the warnings and previously they
# NOTE: wouldn't get on the radar of `pytest`'s `filterwarnings`
# NOTE: mechanism. This changed, however, with `pytest >= 8.4`. And
# NOTE: since we set `filterwarnings = error`, those warnings are being
# NOTE: raised as exceptions, cascading into `pytest`'s internals and
# NOTE: causing tracebacks and crashes of the test sessions.
#
# Ref:
# * https://github.com/pytest-dev/pytest-cov/issues/693
# * https://github.com/pytest-dev/pytest-cov/pull/695
# * https://github.com/pytest-dev/pytest-cov/pull/696
disable_warnings =
module-not-measured
omit = omit =
awx/main/migrations/* awx/main/migrations/*
awx/settings/defaults.py awx/settings/defaults.py

View File

@@ -4,8 +4,7 @@
<!--- <!---
If you are fixing an existing issue, please include "related #nnn" in your If you are fixing an existing issue, please include "related #nnn" in your
commit message and your description; but you should still explain what commit message and your description; but you should still explain what
the change does. Also please make sure that if this PR has an attached JIRA, put AAP-<number> the change does.
in as the first entry for your PR title.
--> -->
##### ISSUE TYPE ##### ISSUE TYPE
@@ -17,11 +16,17 @@ in as the first entry for your PR title.
##### COMPONENT NAME ##### COMPONENT NAME
<!--- Name of the module/plugin/module/task --> <!--- Name of the module/plugin/module/task -->
- API - API
- UI
- Collection - Collection
- CLI - CLI
- Docs - Docs
- Other - Other
##### AWX VERSION
<!--- Paste verbatim output from `make VERSION` between quotes below -->
```
```
##### ADDITIONAL INFORMATION ##### ADDITIONAL INFORMATION

View File

@@ -11,7 +11,9 @@ inputs:
runs: runs:
using: composite using: composite
steps: steps:
- uses: ./.github/actions/setup-python - name: Get python version from Makefile
shell: bash
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
- name: Set lower case owner name - name: Set lower case owner name
shell: bash shell: bash
@@ -24,9 +26,26 @@ runs:
run: | run: |
echo "${{ inputs.github-token }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin echo "${{ inputs.github-token }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
- uses: ./.github/actions/setup-ssh-agent - name: Generate placeholder SSH private key if SSH auth for private repos is not needed
id: generate_key
shell: bash
run: |
if [[ -z "${{ inputs.private-github-key }}" ]]; then
ssh-keygen -t ed25519 -C "github-actions" -N "" -f ~/.ssh/id_ed25519
echo "SSH_PRIVATE_KEY<<EOF" >> $GITHUB_OUTPUT
cat ~/.ssh/id_ed25519 >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
else
echo "SSH_PRIVATE_KEY<<EOF" >> $GITHUB_OUTPUT
echo "${{ inputs.private-github-key }}" >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
fi
- name: Add private GitHub key to SSH agent
uses: webfactory/ssh-agent@v0.9.0
with: with:
ssh-private-key: ${{ inputs.private-github-key }} ssh-private-key: ${{ steps.generate_key.outputs.SSH_PRIVATE_KEY }}
- name: Pre-pull latest devel image to warm cache - name: Pre-pull latest devel image to warm cache
shell: bash shell: bash

View File

@@ -1,27 +0,0 @@
name: 'Setup Python from Makefile'
description: 'Extract and set up Python version from Makefile'
inputs:
python-version:
description: 'Override Python version (optional)'
required: false
default: ''
working-directory:
description: 'Directory containing the Makefile'
required: false
default: '.'
runs:
using: composite
steps:
- name: Get python version from Makefile
shell: bash
run: |
if [ -n "${{ inputs.python-version }}" ]; then
echo "py_version=${{ inputs.python-version }}" >> $GITHUB_ENV
else
cd ${{ inputs.working-directory }}
echo "py_version=`make PYTHON_VERSION`" >> $GITHUB_ENV
fi
- name: Install python
uses: actions/setup-python@v5
with:
python-version: ${{ env.py_version }}

View File

@@ -1,29 +0,0 @@
name: 'Setup SSH for GitHub'
description: 'Configure SSH for private repository access'
inputs:
ssh-private-key:
description: 'SSH private key for repository access'
required: false
default: ''
runs:
using: composite
steps:
- name: Generate placeholder SSH private key if SSH auth for private repos is not needed
id: generate_key
shell: bash
run: |
if [[ -z "${{ inputs.ssh-private-key }}" ]]; then
ssh-keygen -t ed25519 -C "github-actions" -N "" -f ~/.ssh/id_ed25519
echo "SSH_PRIVATE_KEY<<EOF" >> $GITHUB_OUTPUT
cat ~/.ssh/id_ed25519 >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
else
echo "SSH_PRIVATE_KEY<<EOF" >> $GITHUB_OUTPUT
echo "${{ inputs.ssh-private-key }}" >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
fi
- name: Add private GitHub key to SSH agent
uses: webfactory/ssh-agent@v0.9.0
with:
ssh-private-key: ${{ steps.generate_key.outputs.SSH_PRIVATE_KEY }}

View File

@@ -8,10 +8,3 @@ updates:
labels: labels:
- "docs" - "docs"
- "dependencies" - "dependencies"
- package-ecosystem: "pip"
directory: "requirements/"
schedule:
interval: "daily" #run daily until we trust it, then back this off to weekly
open-pull-requests-limit: 2
labels:
- "dependencies"

View File

@@ -130,7 +130,7 @@ jobs:
with: with:
show-progress: false show-progress: false
- uses: ./.github/actions/setup-python - uses: actions/setup-python@v5
with: with:
python-version: '3.x' python-version: '3.x'
@@ -161,10 +161,6 @@ jobs:
show-progress: false show-progress: false
path: awx path: awx
- uses: ./awx/.github/actions/setup-ssh-agent
with:
ssh-private-key: ${{ secrets.PRIVATE_GITHUB_KEY }}
- name: Checkout awx-operator - name: Checkout awx-operator
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
@@ -172,15 +168,39 @@ jobs:
repository: ansible/awx-operator repository: ansible/awx-operator
path: awx-operator path: awx-operator
- name: Setup python, referencing action at awx relative path - name: Get python version from Makefile
uses: ./awx/.github/actions/setup-python working-directory: awx
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
- name: Install python ${{ env.py_version }}
uses: actions/setup-python@v4
with: with:
python-version: '3.x' python-version: ${{ env.py_version }}
- name: Install playbook dependencies - name: Install playbook dependencies
run: | run: |
python3 -m pip install docker python3 -m pip install docker
- name: Generate placeholder SSH private key if SSH auth for private repos is not needed
id: generate_key
shell: bash
run: |
if [[ -z "${{ secrets.PRIVATE_GITHUB_KEY }}" ]]; then
ssh-keygen -t ed25519 -C "github-actions" -N "" -f ~/.ssh/id_ed25519
echo "SSH_PRIVATE_KEY<<EOF" >> $GITHUB_OUTPUT
cat ~/.ssh/id_ed25519 >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
else
echo "SSH_PRIVATE_KEY<<EOF" >> $GITHUB_OUTPUT
echo "${{ secrets.PRIVATE_GITHUB_KEY }}" >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
fi
- name: Add private GitHub key to SSH agent
uses: webfactory/ssh-agent@v0.9.0
with:
ssh-private-key: ${{ steps.generate_key.outputs.SSH_PRIVATE_KEY }}
- name: Build AWX image - name: Build AWX image
working-directory: awx working-directory: awx
run: | run: |
@@ -279,7 +299,7 @@ jobs:
with: with:
show-progress: false show-progress: false
- uses: ./.github/actions/setup-python - uses: actions/setup-python@v5
with: with:
python-version: '3.x' python-version: '3.x'
@@ -336,7 +356,6 @@ jobs:
with: with:
name: coverage-${{ matrix.target-regex.name }} name: coverage-${{ matrix.target-regex.name }}
path: ~/.ansible/collections/ansible_collections/awx/awx/tests/output/coverage/ path: ~/.ansible/collections/ansible_collections/awx/awx/tests/output/coverage/
retention-days: 1
- uses: ./.github/actions/upload_awx_devel_logs - uses: ./.github/actions/upload_awx_devel_logs
if: always() if: always()
@@ -354,22 +373,32 @@ jobs:
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
persist-credentials: false
show-progress: false show-progress: false
- uses: ./.github/actions/setup-python - uses: actions/setup-python@v5
with: with:
python-version: '3.x' python-version: '3.x'
- name: Upgrade ansible-core - name: Upgrade ansible-core
run: python3 -m pip install --upgrade ansible-core run: python3 -m pip install --upgrade ansible-core
- name: Download coverage artifacts - name: Download coverage artifacts A to H
uses: actions/download-artifact@v4 uses: actions/download-artifact@v4
with: with:
merge-multiple: true name: coverage-a-h
path: coverage
- name: Download coverage artifacts I to P
uses: actions/download-artifact@v4
with:
name: coverage-i-p
path: coverage
- name: Download coverage artifacts Z to Z
uses: actions/download-artifact@v4
with:
name: coverage-r-z0-9
path: coverage path: coverage
pattern: coverage-*
- name: Combine coverage - name: Combine coverage
run: | run: |
@@ -387,6 +416,46 @@ jobs:
echo '## AWX Collection Integration Coverage HTML' >> $GITHUB_STEP_SUMMARY echo '## AWX Collection Integration Coverage HTML' >> $GITHUB_STEP_SUMMARY
echo 'Download the HTML artifacts to view the coverage report.' >> $GITHUB_STEP_SUMMARY echo 'Download the HTML artifacts to view the coverage report.' >> $GITHUB_STEP_SUMMARY
# This is a huge hack, there's no official action for removing artifacts currently.
# Also ACTIONS_RUNTIME_URL and ACTIONS_RUNTIME_TOKEN aren't available in normal run
# steps, so we have to use github-script to get them.
#
# The advantage of doing this, though, is that we save on artifact storage space.
- name: Get secret artifact runtime URL
uses: actions/github-script@v6
id: get-runtime-url
with:
result-encoding: string
script: |
const { ACTIONS_RUNTIME_URL } = process.env;
return ACTIONS_RUNTIME_URL;
- name: Get secret artifact runtime token
uses: actions/github-script@v6
id: get-runtime-token
with:
result-encoding: string
script: |
const { ACTIONS_RUNTIME_TOKEN } = process.env;
return ACTIONS_RUNTIME_TOKEN;
- name: Remove intermediary artifacts
env:
ACTIONS_RUNTIME_URL: ${{ steps.get-runtime-url.outputs.result }}
ACTIONS_RUNTIME_TOKEN: ${{ steps.get-runtime-token.outputs.result }}
run: |
echo "::add-mask::${ACTIONS_RUNTIME_TOKEN}"
artifacts=$(
curl -H "Authorization: Bearer $ACTIONS_RUNTIME_TOKEN" \
${ACTIONS_RUNTIME_URL}_apis/pipelines/workflows/${{ github.run_id }}/artifacts?api-version=6.0-preview \
| jq -r '.value | .[] | select(.name | startswith("coverage-")) | .url'
)
for artifact in $artifacts; do
curl -i -X DELETE -H "Accept: application/json;api-version=6.0-preview" -H "Authorization: Bearer $ACTIONS_RUNTIME_TOKEN" "$artifact"
done
- name: Upload coverage report as artifact - name: Upload coverage report as artifact
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v4
with: with:

View File

@@ -10,7 +10,6 @@ on:
- devel - devel
- release_* - release_*
- feature_* - feature_*
- stable-*
jobs: jobs:
push-development-images: push-development-images:
runs-on: ubuntu-latest runs-on: ubuntu-latest
@@ -50,10 +49,14 @@ jobs:
run: | run: |
echo "DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER,,}" >> $GITHUB_ENV echo "DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER,,}" >> $GITHUB_ENV
echo "COMPOSE_TAG=${GITHUB_REF##*/}" >> $GITHUB_ENV echo "COMPOSE_TAG=${GITHUB_REF##*/}" >> $GITHUB_ENV
echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
env: env:
OWNER: '${{ github.repository_owner }}' OWNER: '${{ github.repository_owner }}'
- uses: ./.github/actions/setup-python - name: Install python ${{ env.py_version }}
uses: actions/setup-python@v4
with:
python-version: ${{ env.py_version }}
- name: Log in to registry - name: Log in to registry
run: | run: |
@@ -70,9 +73,25 @@ jobs:
make ui make ui
if: matrix.build-targets.image-name == 'awx' if: matrix.build-targets.image-name == 'awx'
- uses: ./.github/actions/setup-ssh-agent - name: Generate placeholder SSH private key if SSH auth for private repos is not needed
id: generate_key
shell: bash
run: |
if [[ -z "${{ secrets.PRIVATE_GITHUB_KEY }}" ]]; then
ssh-keygen -t ed25519 -C "github-actions" -N "" -f ~/.ssh/id_ed25519
echo "SSH_PRIVATE_KEY<<EOF" >> $GITHUB_OUTPUT
cat ~/.ssh/id_ed25519 >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
else
echo "SSH_PRIVATE_KEY<<EOF" >> $GITHUB_OUTPUT
echo "${{ secrets.PRIVATE_GITHUB_KEY }}" >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
fi
- name: Add private GitHub key to SSH agent
uses: webfactory/ssh-agent@v0.9.0
with: with:
ssh-private-key: ${{ secrets.PRIVATE_GITHUB_KEY }} ssh-private-key: ${{ steps.generate_key.outputs.SSH_PRIVATE_KEY }}
- name: Build and push AWX devel images - name: Build and push AWX devel images
run: | run: |

View File

@@ -12,7 +12,7 @@ jobs:
with: with:
show-progress: false show-progress: false
- uses: ./.github/actions/setup-python - uses: actions/setup-python@v5
with: with:
python-version: '3.x' python-version: '3.x'

View File

@@ -34,11 +34,9 @@ jobs:
with: with:
show-progress: false show-progress: false
- uses: ./.github/actions/setup-python - uses: actions/setup-python@v4
- name: Install python requests - name: Install python requests
run: pip install requests run: pip install requests
- name: Check if user is a member of Ansible org - name: Check if user is a member of Ansible org
uses: jannekem/run-python-script-action@v1 uses: jannekem/run-python-script-action@v1
id: check_user id: check_user

View File

@@ -33,7 +33,7 @@ jobs:
with: with:
show-progress: false show-progress: false
- uses: ./.github/actions/setup-python - uses: actions/setup-python@v5
with: with:
python-version: '3.x' python-version: '3.x'

View File

@@ -36,7 +36,13 @@ jobs:
with: with:
show-progress: false show-progress: false
- uses: ./.github/actions/setup-python - name: Get python version from Makefile
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
- name: Install python ${{ env.py_version }}
uses: actions/setup-python@v4
with:
python-version: ${{ env.py_version }}
- name: Install dependencies - name: Install dependencies
run: | run: |

View File

@@ -1,85 +0,0 @@
---
name: SonarQube
on:
workflow_run:
workflows:
- CI
types:
- completed
permissions: read-all
jobs:
sonarqube:
runs-on: ubuntu-latest
if: github.event.workflow_run.conclusion == 'success' && github.event.workflow_run.event == 'pull_request'
steps:
- name: Checkout Code
uses: actions/checkout@v4
with:
fetch-depth: 0
show-progress: false
- name: Download coverage report artifact
uses: actions/download-artifact@v4
with:
name: coverage-report
path: reports/
github-token: ${{ secrets.GITHUB_TOKEN }}
run-id: ${{ github.event.workflow_run.id }}
- name: Download PR number artifact
uses: actions/download-artifact@v4
with:
name: pr-number
path: .
github-token: ${{ secrets.GITHUB_TOKEN }}
run-id: ${{ github.event.workflow_run.id }}
- name: Extract PR number
run: |
cat pr-number.txt
echo "PR_NUMBER=$(cat pr-number.txt)" >> $GITHUB_ENV
- name: Get PR info
uses: octokit/request-action@v2.x
id: pr_info
with:
route: GET /repos/{repo}/pulls/{number}
repo: ${{ github.event.repository.full_name }}
number: ${{ env.PR_NUMBER }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Set PR info into env
run: |
echo "PR_BASE=${{ fromJson(steps.pr_info.outputs.data).base.ref }}" >> $GITHUB_ENV
echo "PR_HEAD=${{ fromJson(steps.pr_info.outputs.data).head.ref }}" >> $GITHUB_ENV
- name: Add base branch
run: |
gh pr checkout ${{ env.PR_NUMBER }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Extract and export repo owner/name
run: |
REPO_SLUG="${GITHUB_REPOSITORY}"
IFS="/" read -r REPO_OWNER REPO_NAME <<< "$REPO_SLUG"
echo "REPO_OWNER=$REPO_OWNER" >> $GITHUB_ENV
echo "REPO_NAME=$REPO_NAME" >> $GITHUB_ENV
- name: SonarQube scan
uses: SonarSource/sonarqube-scan-action@v5
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SONAR_TOKEN: ${{ secrets[format('{0}', vars.SONAR_TOKEN_SECRET_NAME)] }}
with:
args: >
-Dsonar.organization=${{ env.REPO_OWNER }}
-Dsonar.projectKey=${{ env.REPO_OWNER }}_${{ env.REPO_NAME }}
-Dsonar.pullrequest.key=${{ env.PR_NUMBER }}
-Dsonar.pullrequest.branch=${{ env.PR_HEAD }}
-Dsonar.pullrequest.base=${{ env.PR_BASE }}
-Dsonar.scm.revision=${{ github.event.workflow_run.head_sha }}

View File

@@ -64,9 +64,14 @@ jobs:
repository: ansible/awx-logos repository: ansible/awx-logos
path: awx-logos path: awx-logos
- uses: ./awx/.github/actions/setup-python - name: Get python version from Makefile
working-directory: awx
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
- name: Install python ${{ env.py_version }}
uses: actions/setup-python@v4
with: with:
working-directory: awx python-version: ${{ env.py_version }}
- name: Install playbook dependencies - name: Install playbook dependencies
run: | run: |
@@ -85,11 +90,9 @@ jobs:
cp ../awx-logos/awx/ui/client/assets/* awx/ui/public/static/media/ cp ../awx-logos/awx/ui/client/assets/* awx/ui/public/static/media/
- name: Setup node and npm for new UI build - name: Setup node and npm for new UI build
uses: actions/setup-node@v4 uses: actions/setup-node@v2
with: with:
node-version: '18' node-version: '18'
cache: 'npm'
cache-dependency-path: awx/awx/ui/**/package-lock.json
- name: Prebuild new UI for awx image (to speed up build process) - name: Prebuild new UI for awx image (to speed up build process)
working-directory: awx working-directory: awx

View File

@@ -11,7 +11,6 @@ on:
- devel - devel
- release_** - release_**
- feature_** - feature_**
- stable-**
jobs: jobs:
push: push:
runs-on: ubuntu-latest runs-on: ubuntu-latest
@@ -24,26 +23,57 @@ jobs:
with: with:
show-progress: false show-progress: false
- name: Build awx_devel image to use for schema gen - name: Get python version from Makefile
uses: ./.github/actions/awx_devel_image run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
- name: Install python ${{ env.py_version }}
uses: actions/setup-python@v4
with: with:
github-token: ${{ secrets.GITHUB_TOKEN }} python-version: ${{ env.py_version }}
private-github-key: ${{ secrets.PRIVATE_GITHUB_KEY }}
- name: Log in to registry
run: |
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
- name: Generate placeholder SSH private key if SSH auth for private repos is not needed
id: generate_key
shell: bash
run: |
if [[ -z "${{ secrets.PRIVATE_GITHUB_KEY }}" ]]; then
ssh-keygen -t ed25519 -C "github-actions" -N "" -f ~/.ssh/id_ed25519
echo "SSH_PRIVATE_KEY<<EOF" >> $GITHUB_OUTPUT
cat ~/.ssh/id_ed25519 >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
else
echo "SSH_PRIVATE_KEY<<EOF" >> $GITHUB_OUTPUT
echo "${{ secrets.PRIVATE_GITHUB_KEY }}" >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
fi
- name: Add private GitHub key to SSH agent
uses: webfactory/ssh-agent@v0.9.0
with:
ssh-private-key: ${{ steps.generate_key.outputs.SSH_PRIVATE_KEY }}
- name: Pre-pull image to warm build cache
run: |
docker pull -q ghcr.io/${{ github.repository_owner }}/awx_devel:${GITHUB_REF##*/} || :
- name: Build image
run: |
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} COMPOSE_TAG=${GITHUB_REF##*/} make docker-compose-build
- name: Generate API Schema - name: Generate API Schema
run: | run: |
DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} \
COMPOSE_TAG=${{ github.base_ref || github.ref_name }} \
docker run -u $(id -u) --rm -v ${{ github.workspace }}:/awx_devel/:Z \ docker run -u $(id -u) --rm -v ${{ github.workspace }}:/awx_devel/:Z \
--workdir=/awx_devel `make print-DEVEL_IMAGE_NAME` /start_tests.sh genschema --workdir=/awx_devel ghcr.io/${{ github.repository_owner }}/awx_devel:${GITHUB_REF##*/} /start_tests.sh genschema
- name: Upload API Schema - name: Upload API Schema
uses: keithweaver/aws-s3-github-action@4dd5a7b81d54abaa23bbac92b27e85d7f405ae53 env:
with: AWS_ACCESS_KEY: ${{ secrets.AWS_ACCESS_KEY }}
command: cp AWS_SECRET_KEY: ${{ secrets.AWS_SECRET_KEY }}
source: ${{ github.workspace }}/schema.json AWS_REGION: 'us-east-1'
destination: s3://awx-public-ci-files/${{ github.ref_name }}/schema.json run: |
aws_access_key_id: ${{ secrets.AWS_ACCESS_KEY }} ansible localhost -c local, -m command -a "{{ ansible_python_interpreter + ' -m pip install boto3'}}"
aws_secret_access_key: ${{ secrets.AWS_SECRET_KEY }} ansible localhost -c local -m aws_s3 \
aws_region: us-east-1 -a "src=${{ github.workspace }}/schema.json bucket=awx-public-ci-files object=${GITHUB_REF##*/}/schema.json mode=put permission=public-read"
flags: --acl public-read --only-show-errors

2
.gitignore vendored
View File

@@ -150,8 +150,6 @@ use_dev_supervisor.txt
awx/ui/src awx/ui/src
awx/ui/build awx/ui/build
awx/ui/.ui-built
awx/ui_next
# Docs build stuff # Docs build stuff
docs/docsite/build/ docs/docsite/build/

View File

@@ -19,12 +19,6 @@ COLLECTION_VERSION ?= $(shell $(PYTHON) tools/scripts/scm_version.py | cut -d .
COLLECTION_SANITY_ARGS ?= --docker COLLECTION_SANITY_ARGS ?= --docker
# collection unit testing directories # collection unit testing directories
COLLECTION_TEST_DIRS ?= awx_collection/test/awx COLLECTION_TEST_DIRS ?= awx_collection/test/awx
# pytest added args to collect coverage
COVERAGE_ARGS ?= --cov --cov-report=xml --junitxml=reports/junit.xml
# pytest test directories
TEST_DIRS ?= awx/main/tests/unit awx/main/tests/functional awx/conf/tests
# pytest args to run tests in parallel
PARALLEL_TESTS ?= -n auto
# collection integration test directories (defaults to all) # collection integration test directories (defaults to all)
COLLECTION_TEST_TARGET ?= COLLECTION_TEST_TARGET ?=
# args for collection install # args for collection install
@@ -77,7 +71,7 @@ RECEPTOR_IMAGE ?= quay.io/ansible/receptor:devel
SRC_ONLY_PKGS ?= cffi,pycparser,psycopg,twilio SRC_ONLY_PKGS ?= cffi,pycparser,psycopg,twilio
# These should be upgraded in the AWX and Ansible venv before attempting # These should be upgraded in the AWX and Ansible venv before attempting
# to install the actual requirements # to install the actual requirements
VENV_BOOTSTRAP ?= pip==21.2.4 setuptools==80.9.0 setuptools_scm[toml]==8.0.4 wheel==0.42.0 cython==3.1.3 VENV_BOOTSTRAP ?= pip==21.2.4 setuptools==70.3.0 setuptools_scm[toml]==8.1.0 wheel==0.45.1 cython==3.0.11
NAME ?= awx NAME ?= awx
@@ -315,14 +309,14 @@ black: reports
@chmod +x .git/hooks/pre-commit @chmod +x .git/hooks/pre-commit
genschema: reports genschema: reports
$(MAKE) swagger PYTEST_ADDOPTS="--genschema --create-db " $(MAKE) swagger PYTEST_ARGS="--genschema --create-db "
mv swagger.json schema.json mv swagger.json schema.json
swagger: reports swagger: reports
@if [ "$(VENV_BASE)" ]; then \ @if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/awx/bin/activate; \ . $(VENV_BASE)/awx/bin/activate; \
fi; \ fi; \
(set -o pipefail && py.test $(COVERAGE_ARGS) $(PARALLEL_TESTS) awx/conf/tests/functional awx/main/tests/functional/api awx/main/tests/docs | tee reports/$@.report) (set -o pipefail && py.test --cov --cov-report=xml --junitxml=reports/junit.xml $(PYTEST_ARGS) awx/conf/tests/functional awx/main/tests/functional/api awx/main/tests/docs | tee reports/$@.report)
@if [ "${GITHUB_ACTIONS}" = "true" ]; \ @if [ "${GITHUB_ACTIONS}" = "true" ]; \
then \ then \
echo 'cov-report-files=reports/coverage.xml' >> "${GITHUB_OUTPUT}"; \ echo 'cov-report-files=reports/coverage.xml' >> "${GITHUB_OUTPUT}"; \
@@ -340,12 +334,14 @@ api-lint:
awx-link: awx-link:
[ -d "/awx_devel/awx.egg-info" ] || $(PYTHON) /awx_devel/tools/scripts/egg_info_dev [ -d "/awx_devel/awx.egg-info" ] || $(PYTHON) /awx_devel/tools/scripts/egg_info_dev
TEST_DIRS ?= awx/main/tests/unit awx/main/tests/functional awx/conf/tests
PYTEST_ARGS ?= -n auto
## Run all API unit tests. ## Run all API unit tests.
test: test:
if [ "$(VENV_BASE)" ]; then \ if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/awx/bin/activate; \ . $(VENV_BASE)/awx/bin/activate; \
fi; \ fi; \
PYTHONDONTWRITEBYTECODE=1 py.test -p no:cacheprovider $(PARALLEL_TESTS) $(TEST_DIRS) PYTHONDONTWRITEBYTECODE=1 py.test -p no:cacheprovider $(PYTEST_ARGS) $(TEST_DIRS)
cd awxkit && $(VENV_BASE)/awx/bin/tox -re py3 cd awxkit && $(VENV_BASE)/awx/bin/tox -re py3
awx-manage check_migrations --dry-run --check -n 'missing_migration_file' awx-manage check_migrations --dry-run --check -n 'missing_migration_file'
@@ -354,7 +350,7 @@ live_test:
## Run all API unit tests with coverage enabled. ## Run all API unit tests with coverage enabled.
test_coverage: test_coverage:
$(MAKE) test PYTEST_ADDOPTS="--create-db $(COVERAGE_ARGS)" $(MAKE) test PYTEST_ARGS="--create-db --cov --cov-report=xml --junitxml=reports/junit.xml"
@if [ "${GITHUB_ACTIONS}" = "true" ]; \ @if [ "${GITHUB_ACTIONS}" = "true" ]; \
then \ then \
echo 'cov-report-files=awxkit/coverage.xml,reports/coverage.xml' >> "${GITHUB_OUTPUT}"; \ echo 'cov-report-files=awxkit/coverage.xml,reports/coverage.xml' >> "${GITHUB_OUTPUT}"; \
@@ -362,7 +358,7 @@ test_coverage:
fi fi
test_migrations: test_migrations:
PYTHONDONTWRITEBYTECODE=1 py.test -p no:cacheprovider --migrations -m migration_test --create-db $(PARALLEL_TESTS) $(COVERAGE_ARGS) $(TEST_DIRS) PYTHONDONTWRITEBYTECODE=1 py.test -p no:cacheprovider --migrations -m migration_test --create-db --cov=awx --cov-report=xml --junitxml=reports/junit.xml $(PYTEST_ARGS) $(TEST_DIRS)
@if [ "${GITHUB_ACTIONS}" = "true" ]; \ @if [ "${GITHUB_ACTIONS}" = "true" ]; \
then \ then \
echo 'cov-report-files=reports/coverage.xml' >> "${GITHUB_OUTPUT}"; \ echo 'cov-report-files=reports/coverage.xml' >> "${GITHUB_OUTPUT}"; \
@@ -380,7 +376,7 @@ test_collection:
fi && \ fi && \
if ! [ -x "$(shell command -v ansible-playbook)" ]; then pip install ansible-core; fi if ! [ -x "$(shell command -v ansible-playbook)" ]; then pip install ansible-core; fi
ansible --version ansible --version
py.test $(COLLECTION_TEST_DIRS) $(COVERAGE_ARGS) -v py.test $(COLLECTION_TEST_DIRS) --cov --cov-report=xml --junitxml=reports/junit.xml -v
@if [ "${GITHUB_ACTIONS}" = "true" ]; \ @if [ "${GITHUB_ACTIONS}" = "true" ]; \
then \ then \
echo 'cov-report-files=reports/coverage.xml' >> "${GITHUB_OUTPUT}"; \ echo 'cov-report-files=reports/coverage.xml' >> "${GITHUB_OUTPUT}"; \

View File

@@ -3,17 +3,6 @@
<img src="https://raw.githubusercontent.com/ansible/awx-logos/master/awx/ui/client/assets/logo-login.svg?sanitize=true" width=200 alt="AWX" /> <img src="https://raw.githubusercontent.com/ansible/awx-logos/master/awx/ui/client/assets/logo-login.svg?sanitize=true" width=200 alt="AWX" />
> [!CAUTION]
> The last release of this repository was released on Jul 2, 2024.
> **Releases of this project are now paused during a large scale refactoring.**
> For more information, follow [the Forum](https://forum.ansible.com/) and - more specifically - see the various communications on the matter:
>
> * [Blog: Upcoming Changes to the AWX Project](https://www.ansible.com/blog/upcoming-changes-to-the-awx-project/)
> * [Streamlining AWX Releases](https://forum.ansible.com/t/streamlining-awx-releases/6894) Primary update
> * [Refactoring AWX into a Pluggable, Service-Oriented Architecture](https://forum.ansible.com/t/refactoring-awx-into-a-pluggable-service-oriented-architecture/7404)
> * [Upcoming changes to AWX Operator installation methods](https://forum.ansible.com/t/upcoming-changes-to-awx-operator-installation-methods/7598)
> * [AWX UI and credential types transitioning to the new pluggable architecture](https://forum.ansible.com/t/awx-ui-and-credential-types-transitioning-to-the-new-pluggable-architecture/8027)
AWX provides a web-based user interface, REST API, and task engine built on top of [Ansible](https://github.com/ansible/ansible). It is one of the upstream projects for [Red Hat Ansible Automation Platform](https://www.ansible.com/products/automation-platform). AWX provides a web-based user interface, REST API, and task engine built on top of [Ansible](https://github.com/ansible/ansible). It is one of the upstream projects for [Red Hat Ansible Automation Platform](https://www.ansible.com/products/automation-platform).
To install AWX, please view the [Install guide](./INSTALL.md). To install AWX, please view the [Install guide](./INSTALL.md).

View File

@@ -844,7 +844,7 @@ class ResourceAccessList(ParentMixin, ListAPIView):
if settings.ANSIBLE_BASE_ROLE_SYSTEM_ACTIVATED: if settings.ANSIBLE_BASE_ROLE_SYSTEM_ACTIVATED:
ancestors = set(RoleEvaluation.objects.filter(content_type_id=content_type.id, object_id=obj.id).values_list('role_id', flat=True)) ancestors = set(RoleEvaluation.objects.filter(content_type_id=content_type.id, object_id=obj.id).values_list('role_id', flat=True))
qs = User.objects.filter(has_roles__in=ancestors) | User.objects.filter(is_superuser=True) qs = User.objects.filter(has_roles__in=ancestors) | User.objects.filter(is_superuser=True)
auditor_role = RoleDefinition.objects.filter(name="Platform Auditor").first() auditor_role = RoleDefinition.objects.filter(name="Controller System Auditor").first()
if auditor_role: if auditor_role:
qs |= User.objects.filter(role_assignments__role_definition=auditor_role) qs |= User.objects.filter(role_assignments__role_definition=auditor_role)
return qs.distinct() return qs.distinct()

View File

@@ -10,7 +10,7 @@ from rest_framework import permissions
# AWX # AWX
from awx.main.access import check_user_access from awx.main.access import check_user_access
from awx.main.models import Inventory, UnifiedJob, Organization from awx.main.models import Inventory, UnifiedJob
from awx.main.utils import get_object_or_400 from awx.main.utils import get_object_or_400
logger = logging.getLogger('awx.api.permissions') logger = logging.getLogger('awx.api.permissions')
@@ -228,19 +228,12 @@ class InventoryInventorySourcesUpdatePermission(ModelAccessPermission):
class UserPermission(ModelAccessPermission): class UserPermission(ModelAccessPermission):
def check_post_permissions(self, request, view, obj=None): def check_post_permissions(self, request, view, obj=None):
if not request.data: if not request.data:
return Organization.access_qs(request.user, 'change').exists() return request.user.admin_of_organizations.exists()
elif request.user.is_superuser: elif request.user.is_superuser:
return True return True
raise PermissionDenied() raise PermissionDenied()
class IsSystemAdmin(permissions.BasePermission):
def has_permission(self, request, view):
if not (request.user and request.user.is_authenticated):
return False
return request.user.is_superuser
class IsSystemAdminOrAuditor(permissions.BasePermission): class IsSystemAdminOrAuditor(permissions.BasePermission):
""" """
Allows write access only to system admin users. Allows write access only to system admin users.

View File

@@ -6,8 +6,6 @@ import copy
import json import json
import logging import logging
import re import re
import yaml
import urllib.parse
from collections import Counter, OrderedDict from collections import Counter, OrderedDict
from datetime import timedelta from datetime import timedelta
from uuid import uuid4 from uuid import uuid4
@@ -117,7 +115,6 @@ from awx.main.utils import (
from awx.main.utils.filters import SmartFilter from awx.main.utils.filters import SmartFilter
from awx.main.utils.plugins import load_combined_inventory_source_options from awx.main.utils.plugins import load_combined_inventory_source_options
from awx.main.utils.named_url_graph import reset_counters from awx.main.utils.named_url_graph import reset_counters
from awx.main.utils.inventory_vars import update_group_variables
from awx.main.scheduler.task_manager_models import TaskManagerModels from awx.main.scheduler.task_manager_models import TaskManagerModels
from awx.main.redact import UriCleaner, REPLACE_STR from awx.main.redact import UriCleaner, REPLACE_STR
from awx.main.signals import update_inventory_computed_fields from awx.main.signals import update_inventory_computed_fields
@@ -629,41 +626,15 @@ class BaseSerializer(serializers.ModelSerializer, metaclass=BaseSerializerMetacl
return exclusions return exclusions
def validate(self, attrs): def validate(self, attrs):
"""
Apply serializer validation. Called by DRF.
Can be extended by subclasses. Or consider overwriting
`validate_with_obj` in subclasses, which provides access to the model
object and exception handling for field validation.
:param dict attrs: The names and values of the model form fields.
:raise rest_framework.exceptions.ValidationError: If the validation
fails.
The exception must contain a dict with the names of the form fields
which failed validation as keys, and a list of error messages as
values. This ensures that the error messages are rendered near the
relevant fields.
:return: The names and values from the model form fields, possibly
modified by the validations.
:rtype: dict
"""
attrs = super(BaseSerializer, self).validate(attrs) attrs = super(BaseSerializer, self).validate(attrs)
# Create/update a model instance and run its full_clean() method to
# do any validation implemented on the model class.
exclusions = self.get_validation_exclusions(self.instance)
# Create a new model instance or take the existing one if it exists,
# and update its attributes with the respective field values from
# attrs.
obj = self.instance or self.Meta.model()
for k, v in attrs.items():
if k not in exclusions and k != 'canonical_address_port':
setattr(obj, k, v)
try: try:
# Run serializer validators which need the model object for # Create/update a model instance and run its full_clean() method to
# validation. # do any validation implemented on the model class.
self.validate_with_obj(attrs, obj) exclusions = self.get_validation_exclusions(self.instance)
# Apply any validations implemented on the model class. obj = self.instance or self.Meta.model()
for k, v in attrs.items():
if k not in exclusions and k != 'canonical_address_port':
setattr(obj, k, v)
obj.full_clean(exclude=exclusions) obj.full_clean(exclude=exclusions)
# full_clean may modify values on the instance; copy those changes # full_clean may modify values on the instance; copy those changes
# back to attrs so they are saved. # back to attrs so they are saved.
@@ -692,32 +663,6 @@ class BaseSerializer(serializers.ModelSerializer, metaclass=BaseSerializerMetacl
raise ValidationError(d) raise ValidationError(d)
return attrs return attrs
def validate_with_obj(self, attrs, obj):
"""
Overwrite this if you need the model instance for your validation.
:param dict attrs: The names and values of the model form fields.
:param obj: An instance of the class's meta model.
If the serializer runs on a newly created object, obj contains only
the attrs from its serializer. If the serializer runs because an
object has been edited, obj is the existing model instance with all
attributes and values available.
:raise django.core.exceptionsValidationError: Raise this if your
validation fails.
To make the error appear at the respective form field, instantiate
the Exception with a dict containing the field name as key and the
error message as value.
Example: ``ValidationError({"password": "Not good enough!"})``
If the exception contains just a string, the message cannot be
related to a field and is rendered at the top of the model form.
:return: None
"""
return
def reverse(self, *args, **kwargs): def reverse(self, *args, **kwargs):
kwargs['request'] = self.context.get('request') kwargs['request'] = self.context.get('request')
return reverse(*args, **kwargs) return reverse(*args, **kwargs)
@@ -734,29 +679,15 @@ class EmptySerializer(serializers.Serializer):
pass pass
class OpaQueryPathMixin(serializers.Serializer): class UnifiedJobTemplateSerializer(BaseSerializer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def validate_opa_query_path(self, value):
# Decode the URL and re-encode it
decoded_value = urllib.parse.unquote(value)
re_encoded_value = urllib.parse.quote(decoded_value, safe='/')
if value != re_encoded_value:
raise serializers.ValidationError(_("The URL must be properly encoded."))
return value
class UnifiedJobTemplateSerializer(BaseSerializer, OpaQueryPathMixin):
# As a base serializer, the capabilities prefetch is not used directly, # As a base serializer, the capabilities prefetch is not used directly,
# instead they are derived from the Workflow Job Template Serializer and the Job Template Serializer, respectively. # instead they are derived from the Workflow Job Template Serializer and the Job Template Serializer, respectively.
priority = serializers.IntegerField(required=False, min_value=0, max_value=32000)
capabilities_prefetch = [] capabilities_prefetch = []
class Meta: class Meta:
model = UnifiedJobTemplate model = UnifiedJobTemplate
fields = ('*', 'last_job_run', 'last_job_failed', 'next_job_run', 'status', 'execution_environment') fields = ('*', 'last_job_run', 'last_job_failed', 'next_job_run', 'status', 'priority', 'execution_environment')
def get_related(self, obj): def get_related(self, obj):
res = super(UnifiedJobTemplateSerializer, self).get_related(obj) res = super(UnifiedJobTemplateSerializer, self).get_related(obj)
@@ -1054,6 +985,7 @@ class UserSerializer(BaseSerializer):
return ret return ret
def validate_password(self, value): def validate_password(self, value):
django_validate_password(value)
if not self.instance and value in (None, ''): if not self.instance and value in (None, ''):
raise serializers.ValidationError(_('Password required for new User.')) raise serializers.ValidationError(_('Password required for new User.'))
@@ -1076,50 +1008,6 @@ class UserSerializer(BaseSerializer):
return value return value
def validate_with_obj(self, attrs, obj):
"""
Validate the password with the Django password validators
To enable the Django password validators, configure
`settings.AUTH_PASSWORD_VALIDATORS` as described in the [Django
docs](https://docs.djangoproject.com/en/5.1/topics/auth/passwords/#enabling-password-validation)
:param dict attrs: The User form field names and their values as a dict.
Example::
{
'username': 'TestUsername', 'first_name': 'FirstName',
'last_name': 'LastName', 'email': 'First.Last@my.org',
'is_superuser': False, 'is_system_auditor': False,
'password': 'secret123'
}
:param obj: The User model instance.
:raises django.core.exceptions.ValidationError: Raise this if at least
one Django password validator fails.
The exception contains a dict ``{"password": <error-message>``}
which indicates that the password field has failed validation, and
the reason for failure.
:return: None.
"""
# We must do this here instead of in `validate_password` bacause some
# django password validators need access to other model instance fields,
# e.g. ``username`` for the ``UserAttributeSimilarityValidator``.
password = attrs.get("password")
# Skip validation if no password has been entered. This may happen when
# an existing User is edited.
if password and password != '$encrypted$':
# Apply validators from settings.AUTH_PASSWORD_VALIDATORS. This may
# raise ValidationError.
#
# If the validation fails, re-raise the exception with adjusted
# content to make the error appear near the password field.
try:
django_validate_password(password, user=obj)
except DjangoValidationError as exc:
raise DjangoValidationError({"password": exc.messages})
def _update_password(self, obj, new_password): def _update_password(self, obj, new_password):
if new_password and new_password != '$encrypted$': if new_password and new_password != '$encrypted$':
obj.set_password(new_password) obj.set_password(new_password)
@@ -1182,12 +1070,12 @@ class UserActivityStreamSerializer(UserSerializer):
fields = ('*', '-is_system_auditor') fields = ('*', '-is_system_auditor')
class OrganizationSerializer(BaseSerializer, OpaQueryPathMixin): class OrganizationSerializer(BaseSerializer):
show_capabilities = ['edit', 'delete'] show_capabilities = ['edit', 'delete']
class Meta: class Meta:
model = Organization model = Organization
fields = ('*', 'max_hosts', 'custom_virtualenv', 'default_environment', 'opa_query_path') fields = ('*', 'max_hosts', 'custom_virtualenv', 'default_environment')
read_only_fields = ('*', 'custom_virtualenv') read_only_fields = ('*', 'custom_virtualenv')
def get_related(self, obj): def get_related(self, obj):
@@ -1541,7 +1429,7 @@ class LabelsListMixin(object):
return res return res
class InventorySerializer(LabelsListMixin, BaseSerializerWithVariables, OpaQueryPathMixin): class InventorySerializer(LabelsListMixin, BaseSerializerWithVariables):
show_capabilities = ['edit', 'delete', 'adhoc', 'copy'] show_capabilities = ['edit', 'delete', 'adhoc', 'copy']
capabilities_prefetch = ['admin', 'adhoc', {'copy': 'organization.inventory_admin'}] capabilities_prefetch = ['admin', 'adhoc', {'copy': 'organization.inventory_admin'}]
@@ -1562,7 +1450,6 @@ class InventorySerializer(LabelsListMixin, BaseSerializerWithVariables, OpaQuery
'inventory_sources_with_failures', 'inventory_sources_with_failures',
'pending_deletion', 'pending_deletion',
'prevent_instance_group_fallback', 'prevent_instance_group_fallback',
'opa_query_path',
) )
def get_related(self, obj): def get_related(self, obj):
@@ -1632,68 +1519,8 @@ class InventorySerializer(LabelsListMixin, BaseSerializerWithVariables, OpaQuery
if kind == 'smart' and not host_filter: if kind == 'smart' and not host_filter:
raise serializers.ValidationError({'host_filter': _('Smart inventories must specify host_filter')}) raise serializers.ValidationError({'host_filter': _('Smart inventories must specify host_filter')})
return super(InventorySerializer, self).validate(attrs) return super(InventorySerializer, self).validate(attrs)
@staticmethod
def _update_variables(variables, inventory_id):
"""
Update the inventory variables of the 'all'-group.
The variables field contains vars from the inventory dialog, hence
representing the "all"-group variables.
Since this is not an update from an inventory source, we update the
variables when the inventory details form is saved.
A user edit on the inventory variables is considered a reset of the
variables update history. Particularly if the user removes a variable by
editing the inventory variables field, the variable is not supposed to
reappear with a value from a previous inventory source update.
We achieve this by forcing `reset=True` on such an update.
As a side-effect, variables which have been set by source updates and
have survived a user-edit (i.e. they have not been deleted from the
variables field) will be assumed to originate from the user edit and are
thus no longer deleted from the inventory when they are removed from
their original source!
Note that we use the inventory source id -1 for user-edit updates
because a regular inventory source cannot have an id of -1 since
PostgreSQL assigns pk's starting from 1 (if this assumption doesn't hold
true, we have to assign another special value for invsrc_id).
:param str variables: The variables as plain text in yaml or json
format.
:param int inventory_id: The primary key of the related inventory
object.
"""
variables_dict = parse_yaml_or_json(variables, silent_failure=False)
logger.debug(f"InventorySerializer._update_variables: {inventory_id=} {variables_dict=}, {variables=}")
update_group_variables(
group_id=None, # `None` denotes the 'all' group (which doesn't have a pk).
newvars=variables_dict,
dbvars=None,
invsrc_id=-1,
inventory_id=inventory_id,
reset=True,
)
def create(self, validated_data):
"""Called when a new inventory has to be created."""
logger.debug(f"InventorySerializer.create({validated_data=}) >>>>")
obj = super().create(validated_data)
self._update_variables(validated_data.get("variables") or "", obj.id)
return obj
def update(self, obj, validated_data):
"""Called when an existing inventory is updated."""
logger.debug(f"InventorySerializer.update({validated_data=}) >>>>")
obj = super().update(obj, validated_data)
self._update_variables(validated_data.get("variables") or "", obj.id)
return obj
class ConstructedFieldMixin(serializers.Field): class ConstructedFieldMixin(serializers.Field):
def get_attribute(self, instance): def get_attribute(self, instance):
@@ -1983,12 +1810,10 @@ class GroupSerializer(BaseSerializerWithVariables):
return res return res
def validate(self, attrs): def validate(self, attrs):
# Do not allow the group name to conflict with an existing host name.
name = force_str(attrs.get('name', self.instance and self.instance.name or '')) name = force_str(attrs.get('name', self.instance and self.instance.name or ''))
inventory = attrs.get('inventory', self.instance and self.instance.inventory or '') inventory = attrs.get('inventory', self.instance and self.instance.inventory or '')
if Host.objects.filter(name=name, inventory=inventory).exists(): if Host.objects.filter(name=name, inventory=inventory).exists():
raise serializers.ValidationError(_('A Host with that name already exists.')) raise serializers.ValidationError(_('A Host with that name already exists.'))
#
return super(GroupSerializer, self).validate(attrs) return super(GroupSerializer, self).validate(attrs)
def validate_name(self, value): def validate_name(self, value):
@@ -2839,7 +2664,7 @@ class ResourceAccessListElementSerializer(UserSerializer):
{ {
"role": { "role": {
"id": None, "id": None,
"name": _("Platform Auditor"), "name": _("Controller System Auditor"),
"description": _("Can view all aspects of the system"), "description": _("Can view all aspects of the system"),
"user_capabilities": {"unattach": False}, "user_capabilities": {"unattach": False},
}, },
@@ -3027,6 +2852,11 @@ class CredentialSerializer(BaseSerializer):
ret.remove(field) ret.remove(field)
return ret return ret
def validate_organization(self, org):
if self.instance and (not self.instance.managed) and self.instance.credential_type.kind == 'galaxy' and org is None:
raise serializers.ValidationError(_("Galaxy credentials must be owned by an Organization."))
return org
def validate_credential_type(self, credential_type): def validate_credential_type(self, credential_type):
if self.instance and credential_type.pk != self.instance.credential_type.pk: if self.instance and credential_type.pk != self.instance.credential_type.pk:
for related_objects in ( for related_objects in (
@@ -3102,6 +2932,9 @@ class CredentialSerializerCreate(CredentialSerializer):
if attrs.get('team'): if attrs.get('team'):
attrs['organization'] = attrs['team'].organization attrs['organization'] = attrs['team'].organization
if 'credential_type' in attrs and attrs['credential_type'].kind == 'galaxy' and list(owner_fields) != ['organization']:
raise serializers.ValidationError({"organization": _("Galaxy credentials must be owned by an Organization.")})
return super(CredentialSerializerCreate, self).validate(attrs) return super(CredentialSerializerCreate, self).validate(attrs)
def create(self, validated_data): def create(self, validated_data):
@@ -3164,6 +2997,7 @@ class JobOptionsSerializer(LabelsListMixin, BaseSerializer):
'scm_branch', 'scm_branch',
'forks', 'forks',
'limit', 'limit',
'priority',
'verbosity', 'verbosity',
'extra_vars', 'extra_vars',
'job_tags', 'job_tags',
@@ -3286,6 +3120,7 @@ class JobTemplateMixin(object):
class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobOptionsSerializer): class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobOptionsSerializer):
show_capabilities = ['start', 'schedule', 'copy', 'edit', 'delete'] show_capabilities = ['start', 'schedule', 'copy', 'edit', 'delete']
capabilities_prefetch = ['admin', 'execute', {'copy': ['project.use', 'inventory.use']}] capabilities_prefetch = ['admin', 'execute', {'copy': ['project.use', 'inventory.use']}]
priority = serializers.IntegerField(required=False, min_value=0, max_value=32000)
status = serializers.ChoiceField(choices=JobTemplate.JOB_TEMPLATE_STATUS_CHOICES, read_only=True, required=False) status = serializers.ChoiceField(choices=JobTemplate.JOB_TEMPLATE_STATUS_CHOICES, read_only=True, required=False)
@@ -3293,6 +3128,7 @@ class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobO
model = JobTemplate model = JobTemplate
fields = ( fields = (
'*', '*',
'priority',
'host_config_key', 'host_config_key',
'ask_scm_branch_on_launch', 'ask_scm_branch_on_launch',
'ask_diff_mode_on_launch', 'ask_diff_mode_on_launch',
@@ -3319,7 +3155,6 @@ class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobO
'webhook_service', 'webhook_service',
'webhook_credential', 'webhook_credential',
'prevent_instance_group_fallback', 'prevent_instance_group_fallback',
'opa_query_path',
) )
read_only_fields = ('*', 'custom_virtualenv') read_only_fields = ('*', 'custom_virtualenv')
@@ -3421,6 +3256,7 @@ class JobSerializer(UnifiedJobSerializer, JobOptionsSerializer):
'diff_mode', 'diff_mode',
'job_slice_number', 'job_slice_number',
'job_slice_count', 'job_slice_count',
'priority',
'webhook_service', 'webhook_service',
'webhook_credential', 'webhook_credential',
'webhook_guid', 'webhook_guid',
@@ -3871,6 +3707,7 @@ class WorkflowJobTemplateWithSpecSerializer(WorkflowJobTemplateSerializer):
class WorkflowJobSerializer(LabelsListMixin, UnifiedJobSerializer): class WorkflowJobSerializer(LabelsListMixin, UnifiedJobSerializer):
limit = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None) limit = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
priority = serializers.IntegerField(required=False, min_value=0, max_value=32000)
scm_branch = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None) scm_branch = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
skip_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None) skip_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
@@ -3891,6 +3728,7 @@ class WorkflowJobSerializer(LabelsListMixin, UnifiedJobSerializer):
'-controller_node', '-controller_node',
'inventory', 'inventory',
'limit', 'limit',
'priority',
'scm_branch', 'scm_branch',
'webhook_service', 'webhook_service',
'webhook_credential', 'webhook_credential',
@@ -4008,6 +3846,7 @@ class LaunchConfigurationBaseSerializer(BaseSerializer):
job_type = serializers.ChoiceField(allow_blank=True, allow_null=True, required=False, default=None, choices=NEW_JOB_TYPE_CHOICES) job_type = serializers.ChoiceField(allow_blank=True, allow_null=True, required=False, default=None, choices=NEW_JOB_TYPE_CHOICES)
job_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None) job_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
limit = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None) limit = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
priority = serializers.IntegerField(required=False, min_value=0, max_value=32000)
skip_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None) skip_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
diff_mode = serializers.BooleanField(required=False, allow_null=True, default=None) diff_mode = serializers.BooleanField(required=False, allow_null=True, default=None)
verbosity = serializers.ChoiceField(allow_null=True, required=False, default=None, choices=VERBOSITY_CHOICES) verbosity = serializers.ChoiceField(allow_null=True, required=False, default=None, choices=VERBOSITY_CHOICES)
@@ -4026,6 +3865,7 @@ class LaunchConfigurationBaseSerializer(BaseSerializer):
'job_tags', 'job_tags',
'skip_tags', 'skip_tags',
'limit', 'limit',
'priority',
'skip_tags', 'skip_tags',
'diff_mode', 'diff_mode',
'verbosity', 'verbosity',
@@ -4519,6 +4359,7 @@ class JobLaunchSerializer(BaseSerializer):
job_type = serializers.ChoiceField(required=False, choices=NEW_JOB_TYPE_CHOICES, write_only=True) job_type = serializers.ChoiceField(required=False, choices=NEW_JOB_TYPE_CHOICES, write_only=True)
skip_tags = serializers.CharField(required=False, write_only=True, allow_blank=True) skip_tags = serializers.CharField(required=False, write_only=True, allow_blank=True)
limit = serializers.CharField(required=False, write_only=True, allow_blank=True) limit = serializers.CharField(required=False, write_only=True, allow_blank=True)
priority = serializers.IntegerField(required=False, write_only=False, min_value=0, max_value=32000)
verbosity = serializers.ChoiceField(required=False, choices=VERBOSITY_CHOICES, write_only=True) verbosity = serializers.ChoiceField(required=False, choices=VERBOSITY_CHOICES, write_only=True)
execution_environment = serializers.PrimaryKeyRelatedField(queryset=ExecutionEnvironment.objects.all(), required=False, write_only=True) execution_environment = serializers.PrimaryKeyRelatedField(queryset=ExecutionEnvironment.objects.all(), required=False, write_only=True)
labels = serializers.PrimaryKeyRelatedField(many=True, queryset=Label.objects.all(), required=False, write_only=True) labels = serializers.PrimaryKeyRelatedField(many=True, queryset=Label.objects.all(), required=False, write_only=True)
@@ -4536,6 +4377,7 @@ class JobLaunchSerializer(BaseSerializer):
'inventory', 'inventory',
'scm_branch', 'scm_branch',
'limit', 'limit',
'priority',
'job_tags', 'job_tags',
'skip_tags', 'skip_tags',
'job_type', 'job_type',
@@ -4721,6 +4563,7 @@ class WorkflowJobLaunchSerializer(BaseSerializer):
extra_vars = VerbatimField(required=False, write_only=True) extra_vars = VerbatimField(required=False, write_only=True)
inventory = serializers.PrimaryKeyRelatedField(queryset=Inventory.objects.all(), required=False, write_only=True) inventory = serializers.PrimaryKeyRelatedField(queryset=Inventory.objects.all(), required=False, write_only=True)
limit = serializers.CharField(required=False, write_only=True, allow_blank=True) limit = serializers.CharField(required=False, write_only=True, allow_blank=True)
priority = serializers.IntegerField(required=False, write_only=False, min_value=0, max_value=32000)
scm_branch = serializers.CharField(required=False, write_only=True, allow_blank=True) scm_branch = serializers.CharField(required=False, write_only=True, allow_blank=True)
workflow_job_template_data = serializers.SerializerMethodField() workflow_job_template_data = serializers.SerializerMethodField()
@@ -4860,13 +4703,14 @@ class BulkJobLaunchSerializer(serializers.Serializer):
) )
inventory = serializers.PrimaryKeyRelatedField(queryset=Inventory.objects.all(), required=False, write_only=True) inventory = serializers.PrimaryKeyRelatedField(queryset=Inventory.objects.all(), required=False, write_only=True)
limit = serializers.CharField(write_only=True, required=False, allow_blank=False) limit = serializers.CharField(write_only=True, required=False, allow_blank=False)
# priority = serializers.IntegerField(write_only=True, required=False, min_value=0, max_value=32000)
scm_branch = serializers.CharField(write_only=True, required=False, allow_blank=False) scm_branch = serializers.CharField(write_only=True, required=False, allow_blank=False)
skip_tags = serializers.CharField(write_only=True, required=False, allow_blank=False) skip_tags = serializers.CharField(write_only=True, required=False, allow_blank=False)
job_tags = serializers.CharField(write_only=True, required=False, allow_blank=False) job_tags = serializers.CharField(write_only=True, required=False, allow_blank=False)
class Meta: class Meta:
model = WorkflowJob model = WorkflowJob
fields = ('name', 'jobs', 'description', 'extra_vars', 'organization', 'inventory', 'limit', 'scm_branch', 'skip_tags', 'job_tags') fields = ('name', 'jobs', 'description', 'extra_vars', 'organization', 'inventory', 'limit', 'priority', 'scm_branch', 'skip_tags', 'job_tags')
read_only_fields = () read_only_fields = ()
def validate(self, attrs): def validate(self, attrs):
@@ -5990,34 +5834,6 @@ class InstanceGroupSerializer(BaseSerializer):
raise serializers.ValidationError(_('Only Kubernetes credentials can be associated with an Instance Group')) raise serializers.ValidationError(_('Only Kubernetes credentials can be associated with an Instance Group'))
return value return value
def validate_pod_spec_override(self, value):
if not value:
return value
# value should be empty for non-container groups
if self.instance and not self.instance.is_container_group:
raise serializers.ValidationError(_('pod_spec_override is only valid for container groups'))
pod_spec_override_json = {}
# defect if the value is yaml or json if yaml convert to json
try:
# convert yaml to json
pod_spec_override_json = yaml.safe_load(value)
except yaml.YAMLError:
try:
pod_spec_override_json = json.loads(value)
except json.JSONDecodeError:
raise serializers.ValidationError(_('pod_spec_override must be valid yaml or json'))
# validate the
spec = pod_spec_override_json.get('spec', {})
automount_service_account_token = spec.get('automountServiceAccountToken', False)
if automount_service_account_token:
raise serializers.ValidationError(_('automountServiceAccountToken is not allowed for security reasons'))
return value
def validate(self, attrs): def validate(self, attrs):
attrs = super(InstanceGroupSerializer, self).validate(attrs) attrs = super(InstanceGroupSerializer, self).validate(attrs)

View File

@@ -3,7 +3,7 @@
from django.urls import re_path from django.urls import re_path
from awx.api.views import RoleList, RoleDetail, RoleUsersList, RoleTeamsList from awx.api.views import RoleList, RoleDetail, RoleUsersList, RoleTeamsList, RoleParentsList, RoleChildrenList
urls = [ urls = [
@@ -11,6 +11,8 @@ urls = [
re_path(r'^(?P<pk>[0-9]+)/$', RoleDetail.as_view(), name='role_detail'), re_path(r'^(?P<pk>[0-9]+)/$', RoleDetail.as_view(), name='role_detail'),
re_path(r'^(?P<pk>[0-9]+)/users/$', RoleUsersList.as_view(), name='role_users_list'), re_path(r'^(?P<pk>[0-9]+)/users/$', RoleUsersList.as_view(), name='role_users_list'),
re_path(r'^(?P<pk>[0-9]+)/teams/$', RoleTeamsList.as_view(), name='role_teams_list'), re_path(r'^(?P<pk>[0-9]+)/teams/$', RoleTeamsList.as_view(), name='role_teams_list'),
re_path(r'^(?P<pk>[0-9]+)/parents/$', RoleParentsList.as_view(), name='role_parents_list'),
re_path(r'^(?P<pk>[0-9]+)/children/$', RoleChildrenList.as_view(), name='role_children_list'),
] ]
__all__ = ['urls'] __all__ = ['urls']

View File

@@ -55,7 +55,8 @@ from wsgiref.util import FileWrapper
# django-ansible-base # django-ansible-base
from ansible_base.lib.utils.requests import get_remote_hosts from ansible_base.lib.utils.requests import get_remote_hosts
from ansible_base.rbac.models import RoleEvaluation from ansible_base.rbac.models import RoleEvaluation, ObjectRole
from ansible_base.resource_registry.shared_types import OrganizationType, TeamType, UserType
# AWX # AWX
from awx.main.tasks.system import send_notifications, update_inventory_computed_fields from awx.main.tasks.system import send_notifications, update_inventory_computed_fields
@@ -84,6 +85,7 @@ from awx.api.generics import (
from awx.api.views.labels import LabelSubListCreateAttachDetachView from awx.api.views.labels import LabelSubListCreateAttachDetachView
from awx.api.versioning import reverse from awx.api.versioning import reverse
from awx.main import models from awx.main import models
from awx.main.models.rbac import get_role_definition
from awx.main.utils import ( from awx.main.utils import (
camelcase_to_underscore, camelcase_to_underscore,
extract_ansible_vars, extract_ansible_vars,
@@ -669,16 +671,81 @@ class ScheduleUnifiedJobsList(SubListAPIView):
name = _('Schedule Jobs List') name = _('Schedule Jobs List')
def immutablesharedfields(cls):
'''
Class decorator to prevent modifying shared resources when ALLOW_LOCAL_RESOURCE_MANAGEMENT setting is set to False.
Works by overriding these view methods:
- create
- delete
- perform_update
create and delete are overridden to raise a PermissionDenied exception.
perform_update is overridden to check if any shared fields are being modified,
and raise a PermissionDenied exception if so.
'''
# create instead of perform_create because some of our views
# override create instead of perform_create
if hasattr(cls, 'create'):
cls.original_create = cls.create
@functools.wraps(cls.create)
def create_wrapper(*args, **kwargs):
if settings.ALLOW_LOCAL_RESOURCE_MANAGEMENT:
return cls.original_create(*args, **kwargs)
raise PermissionDenied({'detail': _('Creation of this resource is not allowed. Create this resource via the platform ingress.')})
cls.create = create_wrapper
if hasattr(cls, 'delete'):
cls.original_delete = cls.delete
@functools.wraps(cls.delete)
def delete_wrapper(*args, **kwargs):
if settings.ALLOW_LOCAL_RESOURCE_MANAGEMENT:
return cls.original_delete(*args, **kwargs)
raise PermissionDenied({'detail': _('Deletion of this resource is not allowed. Delete this resource via the platform ingress.')})
cls.delete = delete_wrapper
if hasattr(cls, 'perform_update'):
cls.original_perform_update = cls.perform_update
@functools.wraps(cls.perform_update)
def update_wrapper(*args, **kwargs):
if not settings.ALLOW_LOCAL_RESOURCE_MANAGEMENT:
view, serializer = args
instance = view.get_object()
if instance:
if isinstance(instance, models.Organization):
shared_fields = OrganizationType._declared_fields.keys()
elif isinstance(instance, models.User):
shared_fields = UserType._declared_fields.keys()
elif isinstance(instance, models.Team):
shared_fields = TeamType._declared_fields.keys()
attrs = serializer.validated_data
for field in shared_fields:
if field in attrs and getattr(instance, field) != attrs[field]:
raise PermissionDenied({field: _(f"Cannot change shared field '{field}'. Alter this field via the platform ingress.")})
return cls.original_perform_update(*args, **kwargs)
cls.perform_update = update_wrapper
return cls
@immutablesharedfields
class TeamList(ListCreateAPIView): class TeamList(ListCreateAPIView):
model = models.Team model = models.Team
serializer_class = serializers.TeamSerializer serializer_class = serializers.TeamSerializer
@immutablesharedfields
class TeamDetail(RetrieveUpdateDestroyAPIView): class TeamDetail(RetrieveUpdateDestroyAPIView):
model = models.Team model = models.Team
serializer_class = serializers.TeamSerializer serializer_class = serializers.TeamSerializer
@immutablesharedfields
class TeamUsersList(BaseUsersList): class TeamUsersList(BaseUsersList):
model = models.User model = models.User
serializer_class = serializers.UserSerializer serializer_class = serializers.UserSerializer
@@ -720,19 +787,9 @@ class TeamRolesList(SubListAttachDetachAPIView):
team = get_object_or_404(models.Team, pk=self.kwargs['pk']) team = get_object_or_404(models.Team, pk=self.kwargs['pk'])
credential_content_type = ContentType.objects.get_for_model(models.Credential) credential_content_type = ContentType.objects.get_for_model(models.Credential)
if role.content_type == credential_content_type: if role.content_type == credential_content_type:
if not role.content_object.organization: if not role.content_object.organization or role.content_object.organization.id != team.organization.id:
data = dict( data = dict(msg=_("You cannot grant credential access to a team when the Organization field isn't set, or belongs to a different organization"))
msg=_("You cannot grant access to a credential that is not assigned to an organization (private credentials cannot be assigned to teams)")
)
return Response(data, status=status.HTTP_400_BAD_REQUEST) return Response(data, status=status.HTTP_400_BAD_REQUEST)
elif role.content_object.organization.id != team.organization.id:
if not request.user.is_superuser:
data = dict(
msg=_(
"You cannot grant a team access to a credential in a different organization. Only superusers can grant cross-organization credential access to teams"
)
)
return Response(data, status=status.HTTP_400_BAD_REQUEST)
return super(TeamRolesList, self).post(request, *args, **kwargs) return super(TeamRolesList, self).post(request, *args, **kwargs)
@@ -759,9 +816,17 @@ class TeamProjectsList(SubListAPIView):
def get_queryset(self): def get_queryset(self):
team = self.get_parent_object() team = self.get_parent_object()
self.check_parent_access(team) self.check_parent_access(team)
my_qs = self.model.accessible_objects(self.request.user, 'read_role') model_ct = ContentType.objects.get_for_model(self.model)
team_qs = models.Project.accessible_objects(team, 'read_role') parent_ct = ContentType.objects.get_for_model(self.parent_model)
return my_qs & team_qs
rd = get_role_definition(team.member_role)
role = ObjectRole.objects.filter(object_id=team.id, content_type=parent_ct, role_definition=rd).first()
if role is None:
# Team has no permissions, therefore team has no projects
return self.model.objects.none()
else:
project_qs = self.model.accessible_objects(self.request.user, 'read_role')
return project_qs.filter(id__in=RoleEvaluation.objects.filter(content_type_id=model_ct.id, role=role).values_list('object_id'))
class TeamActivityStreamList(SubListAPIView): class TeamActivityStreamList(SubListAPIView):
@@ -876,23 +941,13 @@ class ProjectTeamsList(ListAPIView):
serializer_class = serializers.TeamSerializer serializer_class = serializers.TeamSerializer
def get_queryset(self): def get_queryset(self):
parent = get_object_or_404(models.Project, pk=self.kwargs['pk']) p = get_object_or_404(models.Project, pk=self.kwargs['pk'])
if not self.request.user.can_access(models.Project, 'read', parent): if not self.request.user.can_access(models.Project, 'read', p):
raise PermissionDenied() raise PermissionDenied()
project_ct = ContentType.objects.get_for_model(models.Project)
project_ct = ContentType.objects.get_for_model(parent)
team_ct = ContentType.objects.get_for_model(self.model) team_ct = ContentType.objects.get_for_model(self.model)
all_roles = models.Role.objects.filter(Q(descendents__content_type=project_ct) & Q(descendents__object_id=p.pk), content_type=team_ct)
roles_on_project = models.Role.objects.filter( return self.model.accessible_objects(self.request.user, 'read_role').filter(pk__in=[t.content_object.pk for t in all_roles])
content_type=project_ct,
object_id=parent.pk,
)
team_member_parent_roles = models.Role.objects.filter(children__in=roles_on_project, role_field='member_role', content_type=team_ct).distinct()
team_ids = team_member_parent_roles.values_list('object_id', flat=True)
my_qs = self.model.accessible_objects(self.request.user, 'read_role').filter(pk__in=team_ids)
return my_qs
class ProjectSchedulesList(SubListCreateAPIView): class ProjectSchedulesList(SubListCreateAPIView):
@@ -1072,6 +1127,7 @@ class ProjectCopy(CopyAPIView):
copy_return_serializer_class = serializers.ProjectSerializer copy_return_serializer_class = serializers.ProjectSerializer
@immutablesharedfields
class UserList(ListCreateAPIView): class UserList(ListCreateAPIView):
model = models.User model = models.User
serializer_class = serializers.UserSerializer serializer_class = serializers.UserSerializer
@@ -1128,6 +1184,14 @@ class UserRolesList(SubListAttachDetachAPIView):
role = get_object_or_400(models.Role, pk=sub_id) role = get_object_or_400(models.Role, pk=sub_id)
content_types = ContentType.objects.get_for_models(models.Organization, models.Team, models.Credential) # dict of {model: content_type} content_types = ContentType.objects.get_for_models(models.Organization, models.Team, models.Credential) # dict of {model: content_type}
# Prevent user to be associated with team/org when ALLOW_LOCAL_RESOURCE_MANAGEMENT is False
if not settings.ALLOW_LOCAL_RESOURCE_MANAGEMENT:
for model in [models.Organization, models.Team]:
ct = content_types[model]
if role.content_type == ct and role.role_field in ['member_role', 'admin_role']:
data = dict(msg=_(f"Cannot directly modify user membership to {ct.model}. Direct shared resource management disabled"))
return Response(data, status=status.HTTP_403_FORBIDDEN)
credential_content_type = content_types[models.Credential] credential_content_type = content_types[models.Credential]
if role.content_type == credential_content_type: if role.content_type == credential_content_type:
if 'disassociate' not in request.data and role.content_object.organization and user not in role.content_object.organization.member_role: if 'disassociate' not in request.data and role.content_object.organization and user not in role.content_object.organization.member_role:
@@ -1162,6 +1226,7 @@ class UserOrganizationsList(OrganizationCountsMixin, SubListAPIView):
model = models.Organization model = models.Organization
serializer_class = serializers.OrganizationSerializer serializer_class = serializers.OrganizationSerializer
parent_model = models.User parent_model = models.User
relationship = 'organizations'
def get_queryset(self): def get_queryset(self):
parent = self.get_parent_object() parent = self.get_parent_object()
@@ -1175,6 +1240,7 @@ class UserAdminOfOrganizationsList(OrganizationCountsMixin, SubListAPIView):
model = models.Organization model = models.Organization
serializer_class = serializers.OrganizationSerializer serializer_class = serializers.OrganizationSerializer
parent_model = models.User parent_model = models.User
relationship = 'admin_of_organizations'
def get_queryset(self): def get_queryset(self):
parent = self.get_parent_object() parent = self.get_parent_object()
@@ -1198,6 +1264,7 @@ class UserActivityStreamList(SubListAPIView):
return qs.filter(Q(actor=parent) | Q(user__in=[parent])) return qs.filter(Q(actor=parent) | Q(user__in=[parent]))
@immutablesharedfields
class UserDetail(RetrieveUpdateDestroyAPIView): class UserDetail(RetrieveUpdateDestroyAPIView):
model = models.User model = models.User
serializer_class = serializers.UserSerializer serializer_class = serializers.UserSerializer
@@ -4172,6 +4239,13 @@ class RoleUsersList(SubListAttachDetachAPIView):
role = self.get_parent_object() role = self.get_parent_object()
content_types = ContentType.objects.get_for_models(models.Organization, models.Team, models.Credential) # dict of {model: content_type} content_types = ContentType.objects.get_for_models(models.Organization, models.Team, models.Credential) # dict of {model: content_type}
if not settings.ALLOW_LOCAL_RESOURCE_MANAGEMENT:
for model in [models.Organization, models.Team]:
ct = content_types[model]
if role.content_type == ct and role.role_field in ['member_role', 'admin_role']:
data = dict(msg=_(f"Cannot directly modify user membership to {ct.model}. Direct shared resource management disabled"))
return Response(data, status=status.HTTP_403_FORBIDDEN)
credential_content_type = content_types[models.Credential] credential_content_type = content_types[models.Credential]
if role.content_type == credential_content_type: if role.content_type == credential_content_type:
if 'disassociate' not in request.data and role.content_object.organization and user not in role.content_object.organization.member_role: if 'disassociate' not in request.data and role.content_object.organization and user not in role.content_object.organization.member_role:
@@ -4213,21 +4287,9 @@ class RoleTeamsList(SubListAttachDetachAPIView):
credential_content_type = ContentType.objects.get_for_model(models.Credential) credential_content_type = ContentType.objects.get_for_model(models.Credential)
if role.content_type == credential_content_type: if role.content_type == credential_content_type:
# Private credentials (no organization) are never allowed for teams if not role.content_object.organization or role.content_object.organization.id != team.organization.id:
if not role.content_object.organization: data = dict(msg=_("You cannot grant credential access to a team when the Organization field isn't set, or belongs to a different organization"))
data = dict(
msg=_("You cannot grant access to a credential that is not assigned to an organization (private credentials cannot be assigned to teams)")
)
return Response(data, status=status.HTTP_400_BAD_REQUEST) return Response(data, status=status.HTTP_400_BAD_REQUEST)
# Cross-organization credentials are only allowed for superusers
elif role.content_object.organization.id != team.organization.id:
if not request.user.is_superuser:
data = dict(
msg=_(
"You cannot grant a team access to a credential in a different organization. Only superusers can grant cross-organization credential access to teams"
)
)
return Response(data, status=status.HTTP_400_BAD_REQUEST)
action = 'attach' action = 'attach'
if request.data.get('disassociate', None): if request.data.get('disassociate', None):
@@ -4247,6 +4309,34 @@ class RoleTeamsList(SubListAttachDetachAPIView):
return Response(status=status.HTTP_204_NO_CONTENT) return Response(status=status.HTTP_204_NO_CONTENT)
class RoleParentsList(SubListAPIView):
deprecated = True
model = models.Role
serializer_class = serializers.RoleSerializer
parent_model = models.Role
relationship = 'parents'
permission_classes = (IsAuthenticated,)
search_fields = ('role_field', 'content_type__model')
def get_queryset(self):
role = models.Role.objects.get(pk=self.kwargs['pk'])
return models.Role.filter_visible_roles(self.request.user, role.parents.all())
class RoleChildrenList(SubListAPIView):
deprecated = True
model = models.Role
serializer_class = serializers.RoleSerializer
parent_model = models.Role
relationship = 'children'
permission_classes = (IsAuthenticated,)
search_fields = ('role_field', 'content_type__model')
def get_queryset(self):
role = models.Role.objects.get(pk=self.kwargs['pk'])
return models.Role.filter_visible_roles(self.request.user, role.children.all())
# Create view functions for all of the class-based views to simplify inclusion # Create view functions for all of the class-based views to simplify inclusion
# in URL patterns and reverse URL lookups, converting CamelCase names to # in URL patterns and reverse URL lookups, converting CamelCase names to
# lowercase_with_underscore (e.g. MyView.as_view() becomes my_view). # lowercase_with_underscore (e.g. MyView.as_view() becomes my_view).

View File

@@ -10,7 +10,7 @@ from awx.api.generics import APIView, Response
from awx.api.permissions import AnalyticsPermission from awx.api.permissions import AnalyticsPermission
from awx.api.versioning import reverse from awx.api.versioning import reverse
from awx.main.utils import get_awx_version from awx.main.utils import get_awx_version
from awx.main.utils.analytics_proxy import OIDCClient from awx.main.utils.analytics_proxy import OIDCClient, DEFAULT_OIDC_TOKEN_ENDPOINT
from rest_framework import status from rest_framework import status
from collections import OrderedDict from collections import OrderedDict
@@ -202,16 +202,10 @@ class AnalyticsGenericView(APIView):
if method not in ["GET", "POST", "OPTIONS"]: if method not in ["GET", "POST", "OPTIONS"]:
return self._error_response(ERROR_UNSUPPORTED_METHOD, method, remote=False, status_code=status.HTTP_500_INTERNAL_SERVER_ERROR) return self._error_response(ERROR_UNSUPPORTED_METHOD, method, remote=False, status_code=status.HTTP_500_INTERNAL_SERVER_ERROR)
url = self._get_analytics_url(request.path) url = self._get_analytics_url(request.path)
using_subscriptions_credentials = False
try: try:
rh_user = getattr(settings, 'REDHAT_USERNAME', None) rh_user = self._get_setting('REDHAT_USERNAME', None, ERROR_MISSING_USER)
rh_password = getattr(settings, 'REDHAT_PASSWORD', None) rh_password = self._get_setting('REDHAT_PASSWORD', None, ERROR_MISSING_PASSWORD)
if not (rh_user and rh_password): client = OIDCClient(rh_user, rh_password, DEFAULT_OIDC_TOKEN_ENDPOINT, ['api.console'])
rh_user = self._get_setting('SUBSCRIPTIONS_CLIENT_ID', None, ERROR_MISSING_USER)
rh_password = self._get_setting('SUBSCRIPTIONS_CLIENT_SECRET', None, ERROR_MISSING_PASSWORD)
using_subscriptions_credentials = True
client = OIDCClient(rh_user, rh_password)
response = client.make_request( response = client.make_request(
method, method,
url, url,
@@ -222,17 +216,17 @@ class AnalyticsGenericView(APIView):
timeout=(31, 31), timeout=(31, 31),
) )
except requests.RequestException: except requests.RequestException:
# subscriptions credentials are not valid for basic auth, so just return 401 logger.error("Automation Analytics API request failed, trying base auth method")
if using_subscriptions_credentials: response = self._base_auth_request(request, method, url, rh_user, rh_password, headers)
response = Response(status=status.HTTP_401_UNAUTHORIZED) except MissingSettings:
else: rh_user = self._get_setting('SUBSCRIPTIONS_USERNAME', None, ERROR_MISSING_USER)
logger.error("Automation Analytics API request failed, trying base auth method") rh_password = self._get_setting('SUBSCRIPTIONS_PASSWORD', None, ERROR_MISSING_PASSWORD)
response = self._base_auth_request(request, method, url, rh_user, rh_password, headers) response = self._base_auth_request(request, method, url, rh_user, rh_password, headers)
# #
# Missing or wrong user/pass # Missing or wrong user/pass
# #
if response.status_code == status.HTTP_401_UNAUTHORIZED: if response.status_code == status.HTTP_401_UNAUTHORIZED:
text = response.get('text', '').rstrip("\n") text = (response.text or '').rstrip("\n")
return self._error_response(ERROR_UNAUTHORIZED, text, remote=True, remote_status_code=response.status_code) return self._error_response(ERROR_UNAUTHORIZED, text, remote=True, remote_status_code=response.status_code)
# #
# Not found, No entitlement or No data in Analytics # Not found, No entitlement or No data in Analytics

View File

@@ -12,7 +12,7 @@ import re
import asn1 import asn1
from awx.api import serializers from awx.api import serializers
from awx.api.generics import GenericAPIView, Response from awx.api.generics import GenericAPIView, Response
from awx.api.permissions import IsSystemAdmin from awx.api.permissions import IsSystemAdminOrAuditor
from awx.main import models from awx.main import models
from cryptography import x509 from cryptography import x509
from cryptography.hazmat.primitives import hashes, serialization from cryptography.hazmat.primitives import hashes, serialization
@@ -48,7 +48,7 @@ class InstanceInstallBundle(GenericAPIView):
name = _('Install Bundle') name = _('Install Bundle')
model = models.Instance model = models.Instance
serializer_class = serializers.InstanceSerializer serializer_class = serializers.InstanceSerializer
permission_classes = (IsSystemAdmin,) permission_classes = (IsSystemAdminOrAuditor,)
def get(self, request, *args, **kwargs): def get(self, request, *args, **kwargs):
instance_obj = self.get_object() instance_obj = self.get_object()

View File

@@ -53,15 +53,18 @@ from awx.api.serializers import (
CredentialSerializer, CredentialSerializer,
) )
from awx.api.views.mixin import RelatedJobsPreventDeleteMixin, OrganizationCountsMixin, OrganizationInstanceGroupMembershipMixin from awx.api.views.mixin import RelatedJobsPreventDeleteMixin, OrganizationCountsMixin, OrganizationInstanceGroupMembershipMixin
from awx.api.views import immutablesharedfields
logger = logging.getLogger('awx.api.views.organization') logger = logging.getLogger('awx.api.views.organization')
@immutablesharedfields
class OrganizationList(OrganizationCountsMixin, ListCreateAPIView): class OrganizationList(OrganizationCountsMixin, ListCreateAPIView):
model = Organization model = Organization
serializer_class = OrganizationSerializer serializer_class = OrganizationSerializer
@immutablesharedfields
class OrganizationDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView): class OrganizationDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView):
model = Organization model = Organization
serializer_class = OrganizationSerializer serializer_class = OrganizationSerializer
@@ -104,6 +107,7 @@ class OrganizationInventoriesList(SubListAPIView):
relationship = 'inventories' relationship = 'inventories'
@immutablesharedfields
class OrganizationUsersList(BaseUsersList): class OrganizationUsersList(BaseUsersList):
model = User model = User
serializer_class = UserSerializer serializer_class = UserSerializer
@@ -112,6 +116,7 @@ class OrganizationUsersList(BaseUsersList):
ordering = ('username',) ordering = ('username',)
@immutablesharedfields
class OrganizationAdminsList(BaseUsersList): class OrganizationAdminsList(BaseUsersList):
model = User model = User
serializer_class = UserSerializer serializer_class = UserSerializer
@@ -150,6 +155,7 @@ class OrganizationWorkflowJobTemplatesList(SubListCreateAPIView):
parent_key = 'organization' parent_key = 'organization'
@immutablesharedfields
class OrganizationTeamsList(SubListCreateAttachDetachAPIView): class OrganizationTeamsList(SubListCreateAttachDetachAPIView):
model = Team model = Team
serializer_class = TeamSerializer serializer_class = TeamSerializer

View File

@@ -8,8 +8,6 @@ import operator
from collections import OrderedDict from collections import OrderedDict
from django.conf import settings from django.conf import settings
from django.core.cache import cache
from django.db import connection
from django.utils.encoding import smart_str from django.utils.encoding import smart_str
from django.utils.decorators import method_decorator from django.utils.decorators import method_decorator
from django.views.decorators.csrf import ensure_csrf_cookie from django.views.decorators.csrf import ensure_csrf_cookie
@@ -28,14 +26,12 @@ from awx.api.generics import APIView
from awx.conf.registry import settings_registry from awx.conf.registry import settings_registry
from awx.main.analytics import all_collectors from awx.main.analytics import all_collectors
from awx.main.ha import is_ha_environment from awx.main.ha import is_ha_environment
from awx.main.tasks.system import clear_setting_cache
from awx.main.utils import get_awx_version, get_custom_venv_choices from awx.main.utils import get_awx_version, get_custom_venv_choices
from awx.main.utils.licensing import validate_entitlement_manifest from awx.main.utils.licensing import validate_entitlement_manifest
from awx.api.versioning import URLPathVersioning, reverse, drf_reverse from awx.api.versioning import URLPathVersioning, reverse, drf_reverse
from awx.main.constants import PRIVILEGE_ESCALATION_METHODS from awx.main.constants import PRIVILEGE_ESCALATION_METHODS
from awx.main.models import Project, Organization, Instance, InstanceGroup, JobTemplate from awx.main.models import Project, Organization, Instance, InstanceGroup, JobTemplate
from awx.main.utils import set_environ from awx.main.utils import set_environ
from awx.main.utils.analytics_proxy import TokenError
from awx.main.utils.licensing import get_licenser from awx.main.utils.licensing import get_licenser
logger = logging.getLogger('awx.api.views.root') logger = logging.getLogger('awx.api.views.root')
@@ -180,52 +176,19 @@ class ApiV2SubscriptionView(APIView):
def post(self, request): def post(self, request):
data = request.data.copy() data = request.data.copy()
if data.get('subscriptions_password') == '$encrypted$':
data['subscriptions_password'] = settings.SUBSCRIPTIONS_PASSWORD
try: try:
user = None user, pw = data.get('subscriptions_username'), data.get('subscriptions_password')
pw = None
basic_auth = False
# determine if the credentials are for basic auth or not
if data.get('subscriptions_client_id'):
user, pw = data.get('subscriptions_client_id'), data.get('subscriptions_client_secret')
if pw == '$encrypted$':
pw = settings.SUBSCRIPTIONS_CLIENT_SECRET
elif data.get('subscriptions_username'):
user, pw = data.get('subscriptions_username'), data.get('subscriptions_password')
if pw == '$encrypted$':
pw = settings.SUBSCRIPTIONS_PASSWORD
basic_auth = True
if not user or not pw:
return Response({"error": _("Missing subscription credentials")}, status=status.HTTP_400_BAD_REQUEST)
with set_environ(**settings.AWX_TASK_ENV): with set_environ(**settings.AWX_TASK_ENV):
validated = get_licenser().validate_rh(user, pw, basic_auth) validated = get_licenser().validate_rh(user, pw)
if user:
# update settings if the credentials were valid settings.SUBSCRIPTIONS_USERNAME = data['subscriptions_username']
if basic_auth: if pw:
if user: settings.SUBSCRIPTIONS_PASSWORD = data['subscriptions_password']
settings.SUBSCRIPTIONS_USERNAME = user
if pw:
settings.SUBSCRIPTIONS_PASSWORD = pw
# mutual exclusion for basic auth and service account
# only one should be set at a given time so that
# config/attach/ knows which credentials to use
settings.SUBSCRIPTIONS_CLIENT_ID = ""
settings.SUBSCRIPTIONS_CLIENT_SECRET = ""
else:
if user:
settings.SUBSCRIPTIONS_CLIENT_ID = user
if pw:
settings.SUBSCRIPTIONS_CLIENT_SECRET = pw
# mutual exclusion for basic auth and service account
settings.SUBSCRIPTIONS_USERNAME = ""
settings.SUBSCRIPTIONS_PASSWORD = ""
except Exception as exc: except Exception as exc:
msg = _("Invalid Subscription") msg = _("Invalid Subscription")
if isinstance(exc, TokenError) or ( if isinstance(exc, requests.exceptions.HTTPError) and getattr(getattr(exc, 'response', None), 'status_code', None) == 401:
isinstance(exc, requests.exceptions.HTTPError) and getattr(getattr(exc, 'response', None), 'status_code', None) == 401
):
msg = _("The provided credentials are invalid (HTTP 401).") msg = _("The provided credentials are invalid (HTTP 401).")
elif isinstance(exc, requests.exceptions.ProxyError): elif isinstance(exc, requests.exceptions.ProxyError):
msg = _("Unable to connect to proxy server.") msg = _("Unable to connect to proxy server.")
@@ -252,25 +215,16 @@ class ApiV2AttachView(APIView):
def post(self, request): def post(self, request):
data = request.data.copy() data = request.data.copy()
subscription_id = data.get('subscription_id', None) pool_id = data.get('pool_id', None)
if not subscription_id: if not pool_id:
return Response({"error": _("No subscription ID provided.")}, status=status.HTTP_400_BAD_REQUEST) return Response({"error": _("No subscription pool ID provided.")}, status=status.HTTP_400_BAD_REQUEST)
# Ensure we always use the latest subscription credentials user = getattr(settings, 'SUBSCRIPTIONS_USERNAME', None)
cache.delete_many(['SUBSCRIPTIONS_CLIENT_ID', 'SUBSCRIPTIONS_CLIENT_SECRET', 'SUBSCRIPTIONS_USERNAME', 'SUBSCRIPTIONS_PASSWORD']) pw = getattr(settings, 'SUBSCRIPTIONS_PASSWORD', None)
user = getattr(settings, 'SUBSCRIPTIONS_CLIENT_ID', None) if pool_id and user and pw:
pw = getattr(settings, 'SUBSCRIPTIONS_CLIENT_SECRET', None)
basic_auth = False
if not (user and pw):
user = getattr(settings, 'SUBSCRIPTIONS_USERNAME', None)
pw = getattr(settings, 'SUBSCRIPTIONS_PASSWORD', None)
basic_auth = True
if not (user and pw):
return Response({"error": _("Missing subscription credentials")}, status=status.HTTP_400_BAD_REQUEST)
if subscription_id and user and pw:
data = request.data.copy() data = request.data.copy()
try: try:
with set_environ(**settings.AWX_TASK_ENV): with set_environ(**settings.AWX_TASK_ENV):
validated = get_licenser().validate_rh(user, pw, basic_auth) validated = get_licenser().validate_rh(user, pw)
except Exception as exc: except Exception as exc:
msg = _("Invalid Subscription") msg = _("Invalid Subscription")
if isinstance(exc, requests.exceptions.HTTPError) and getattr(getattr(exc, 'response', None), 'status_code', None) == 401: if isinstance(exc, requests.exceptions.HTTPError) and getattr(getattr(exc, 'response', None), 'status_code', None) == 401:
@@ -284,12 +238,10 @@ class ApiV2AttachView(APIView):
else: else:
logger.exception(smart_str(u"Invalid subscription submitted."), extra=dict(actor=request.user.username)) logger.exception(smart_str(u"Invalid subscription submitted."), extra=dict(actor=request.user.username))
return Response({"error": msg}, status=status.HTTP_400_BAD_REQUEST) return Response({"error": msg}, status=status.HTTP_400_BAD_REQUEST)
for sub in validated: for sub in validated:
if sub['subscription_id'] == subscription_id: if sub['pool_id'] == pool_id:
sub['valid_key'] = True sub['valid_key'] = True
settings.LICENSE = sub settings.LICENSE = sub
connection.on_commit(lambda: clear_setting_cache.delay(['LICENSE']))
return Response(sub) return Response(sub)
return Response({"error": _("Error processing subscription metadata.")}, status=status.HTTP_400_BAD_REQUEST) return Response({"error": _("Error processing subscription metadata.")}, status=status.HTTP_400_BAD_REQUEST)
@@ -309,6 +261,7 @@ class ApiV2ConfigView(APIView):
'''Return various sitewide configuration settings''' '''Return various sitewide configuration settings'''
license_data = get_licenser().validate() license_data = get_licenser().validate()
if not license_data.get('valid_key', False): if not license_data.get('valid_key', False):
license_data = {} license_data = {}
@@ -372,7 +325,6 @@ class ApiV2ConfigView(APIView):
try: try:
license_data_validated = get_licenser().license_from_manifest(license_data) license_data_validated = get_licenser().license_from_manifest(license_data)
connection.on_commit(lambda: clear_setting_cache.delay(['LICENSE']))
except Exception: except Exception:
logger.warning(smart_str(u"Invalid subscription submitted."), extra=dict(actor=request.user.username)) logger.warning(smart_str(u"Invalid subscription submitted."), extra=dict(actor=request.user.username))
return Response({"error": _("Invalid License")}, status=status.HTTP_400_BAD_REQUEST) return Response({"error": _("Invalid License")}, status=status.HTTP_400_BAD_REQUEST)
@@ -391,7 +343,6 @@ class ApiV2ConfigView(APIView):
def delete(self, request): def delete(self, request):
try: try:
settings.LICENSE = {} settings.LICENSE = {}
connection.on_commit(lambda: clear_setting_cache.delay(['LICENSE']))
return Response(status=status.HTTP_204_NO_CONTENT) return Response(status=status.HTTP_204_NO_CONTENT)
except Exception: except Exception:
# FIX: Log # FIX: Log

View File

@@ -10,7 +10,7 @@ from django.core.validators import URLValidator, _lazy_re_compile
from django.utils.translation import gettext_lazy as _ from django.utils.translation import gettext_lazy as _
# Django REST Framework # Django REST Framework
from rest_framework.fields import BooleanField, CharField, ChoiceField, DictField, DateTimeField, EmailField, IntegerField, ListField, FloatField # noqa from rest_framework.fields import BooleanField, CharField, ChoiceField, DictField, DateTimeField, EmailField, IntegerField, ListField # noqa
from rest_framework.serializers import PrimaryKeyRelatedField # noqa from rest_framework.serializers import PrimaryKeyRelatedField # noqa
# AWX # AWX
@@ -207,8 +207,7 @@ class URLField(CharField):
if self.allow_plain_hostname: if self.allow_plain_hostname:
try: try:
url_parts = urlparse.urlsplit(value) url_parts = urlparse.urlsplit(value)
looks_like_ipv6 = bool(url_parts.netloc and url_parts.netloc.startswith('[') and url_parts.netloc.endswith(']')) if url_parts.hostname and '.' not in url_parts.hostname:
if not looks_like_ipv6 and url_parts.hostname and '.' not in url_parts.hostname:
netloc = '{}.local'.format(url_parts.hostname) netloc = '{}.local'.format(url_parts.hostname)
if url_parts.port: if url_parts.port:
netloc = '{}:{}'.format(netloc, url_parts.port) netloc = '{}:{}'.format(netloc, url_parts.port)

View File

@@ -27,5 +27,5 @@ def _migrate_setting(apps, old_key, new_key, encrypted=False):
def prefill_rh_credentials(apps, schema_editor): def prefill_rh_credentials(apps, schema_editor):
_migrate_setting(apps, 'REDHAT_USERNAME', 'SUBSCRIPTIONS_CLIENT_ID', encrypted=False) _migrate_setting(apps, 'REDHAT_USERNAME', 'SUBSCRIPTIONS_USERNAME', encrypted=False)
_migrate_setting(apps, 'REDHAT_PASSWORD', 'SUBSCRIPTIONS_CLIENT_SECRET', encrypted=True) _migrate_setting(apps, 'REDHAT_PASSWORD', 'SUBSCRIPTIONS_PASSWORD', encrypted=True)

View File

@@ -38,7 +38,6 @@ class SettingsRegistry(object):
if setting in self._registry: if setting in self._registry:
raise ImproperlyConfigured('Setting "{}" is already registered.'.format(setting)) raise ImproperlyConfigured('Setting "{}" is already registered.'.format(setting))
category = kwargs.setdefault('category', None) category = kwargs.setdefault('category', None)
kwargs.setdefault('required', False) # No setting is ordinarily required
category_slug = kwargs.setdefault('category_slug', slugify(category or '') or None) category_slug = kwargs.setdefault('category_slug', slugify(category or '') or None)
if category_slug in {'all', 'changed', 'user-defaults'}: if category_slug in {'all', 'changed', 'user-defaults'}:
raise ImproperlyConfigured('"{}" is a reserved category slug.'.format(category_slug)) raise ImproperlyConfigured('"{}" is a reserved category slug.'.format(category_slug))

View File

@@ -128,41 +128,3 @@ class TestURLField:
else: else:
with pytest.raises(ValidationError): with pytest.raises(ValidationError):
field.run_validators(url) field.run_validators(url)
@pytest.mark.parametrize(
"url, expect_error",
[
("https://[1:2:3]", True),
("http://[1:2:3]", True),
("https://[2001:db8:3333:4444:5555:6666:7777:8888", True),
("https://2001:db8:3333:4444:5555:6666:7777:8888", True),
("https://[2001:db8:3333:4444:5555:6666:7777:8888]", False),
("https://[::1]", False),
("https://[::]", False),
("https://[2001:db8::1]", False),
("https://[2001:db8:0:0:0:0:1:1]", False),
("https://[fe80::2%eth0]", True), # ipv6 scope identifier
("https://[fe80:0:0:0:200:f8ff:fe21:67cf]", False),
("https://[::ffff:192.168.1.10]", False),
("https://[0:0:0:0:0:ffff:c000:0201]", False),
("https://[2001:0db8:000a:0001:0000:0000:0000:0000]", False),
("https://[2001:db8:a:1::]", False),
("https://[ff02::1]", False),
("https://[ff02:0:0:0:0:0:0:1]", False),
("https://[fc00::1]", False),
("https://[fd12:3456:789a:1::1]", False),
("https://[2001:db8::abcd:ef12:3456:7890]", False),
("https://[2001:db8:0000:abcd:0000:ef12:0000:3456]", False),
("https://[::ffff:10.0.0.1]", False),
("https://[2001:db8:cafe::]", False),
("https://[2001:db8:cafe:0:0:0:0:0]", False),
("https://[fe80::210:f3ff:fedf:4567%3]", True), # ipv6 scope identifier, numerical interface
],
)
def test_ipv6_urls(self, url, expect_error):
field = URLField()
if expect_error:
with pytest.raises(ValidationError, match="Enter a valid URL"):
field.run_validators(url)
else:
field.run_validators(url)

View File

@@ -639,9 +639,7 @@ class UserAccess(BaseAccess):
prefetch_related = ('resource',) prefetch_related = ('resource',)
def filtered_queryset(self): def filtered_queryset(self):
if settings.ORG_ADMINS_CAN_SEE_ALL_USERS and ( if settings.ORG_ADMINS_CAN_SEE_ALL_USERS and (self.user.admin_of_organizations.exists() or self.user.auditor_of_organizations.exists()):
Organization.access_qs(self.user, 'change').exists() or Organization.access_qs(self.user, 'audit').exists()
):
qs = User.objects.all() qs = User.objects.all()
else: else:
qs = ( qs = (
@@ -1226,9 +1224,7 @@ class TeamAccess(BaseAccess):
) )
def filtered_queryset(self): def filtered_queryset(self):
if settings.ORG_ADMINS_CAN_SEE_ALL_USERS and ( if settings.ORG_ADMINS_CAN_SEE_ALL_USERS and (self.user.admin_of_organizations.exists() or self.user.auditor_of_organizations.exists()):
Organization.access_qs(self.user, 'change').exists() or Organization.access_qs(self.user, 'audit').exists()
):
return self.model.objects.all() return self.model.objects.all()
return self.model.objects.filter( return self.model.objects.filter(
Q(organization__in=Organization.accessible_pk_qs(self.user, 'member_role')) | Q(pk__in=self.model.accessible_pk_qs(self.user, 'read_role')) Q(organization__in=Organization.accessible_pk_qs(self.user, 'member_role')) | Q(pk__in=self.model.accessible_pk_qs(self.user, 'read_role'))
@@ -2102,7 +2098,7 @@ class WorkflowJobAccess(BaseAccess):
def filtered_queryset(self): def filtered_queryset(self):
return WorkflowJob.objects.filter( return WorkflowJob.objects.filter(
Q(unified_job_template__in=UnifiedJobTemplate.accessible_pk_qs(self.user, 'read_role')) Q(unified_job_template__in=UnifiedJobTemplate.accessible_pk_qs(self.user, 'read_role'))
| Q(organization__in=Organization.accessible_pk_qs(self.user, 'auditor_role')) | Q(organization__in=Organization.objects.filter(Q(admin_role__members=self.user)), is_bulk_job=True)
) )
def can_read(self, obj): def can_read(self, obj):
@@ -2500,11 +2496,12 @@ class UnifiedJobAccess(BaseAccess):
def filtered_queryset(self): def filtered_queryset(self):
inv_pk_qs = Inventory._accessible_pk_qs(Inventory, self.user, 'read_role') inv_pk_qs = Inventory._accessible_pk_qs(Inventory, self.user, 'read_role')
org_auditor_qs = Organization.objects.filter(Q(admin_role__members=self.user) | Q(auditor_role__members=self.user))
qs = self.model.objects.filter( qs = self.model.objects.filter(
Q(unified_job_template_id__in=UnifiedJobTemplate.accessible_pk_qs(self.user, 'read_role')) Q(unified_job_template_id__in=UnifiedJobTemplate.accessible_pk_qs(self.user, 'read_role'))
| Q(inventoryupdate__inventory_source__inventory__id__in=inv_pk_qs) | Q(inventoryupdate__inventory_source__inventory__id__in=inv_pk_qs)
| Q(adhoccommand__inventory__id__in=inv_pk_qs) | Q(adhoccommand__inventory__id__in=inv_pk_qs)
| Q(organization__in=Organization.accessible_pk_qs(self.user, 'auditor_role')) | Q(organization__in=org_auditor_qs)
) )
return qs return qs
@@ -2568,7 +2565,7 @@ class NotificationTemplateAccess(BaseAccess):
if settings.ANSIBLE_BASE_ROLE_SYSTEM_ACTIVATED: if settings.ANSIBLE_BASE_ROLE_SYSTEM_ACTIVATED:
return self.model.access_qs(self.user, 'view') return self.model.access_qs(self.user, 'view')
return self.model.objects.filter( return self.model.objects.filter(
Q(organization__in=Organization.access_qs(self.user, 'add_notificationtemplate')) | Q(organization__in=Organization.access_qs(self.user, 'audit')) Q(organization__in=Organization.access_qs(self.user, 'add_notificationtemplate')) | Q(organization__in=self.user.auditor_of_organizations)
).distinct() ).distinct()
@check_superuser @check_superuser
@@ -2603,7 +2600,7 @@ class NotificationAccess(BaseAccess):
def filtered_queryset(self): def filtered_queryset(self):
return self.model.objects.filter( return self.model.objects.filter(
Q(notification_template__organization__in=Organization.access_qs(self.user, 'add_notificationtemplate')) Q(notification_template__organization__in=Organization.access_qs(self.user, 'add_notificationtemplate'))
| Q(notification_template__organization__in=Organization.access_qs(self.user, 'audit')) | Q(notification_template__organization__in=self.user.auditor_of_organizations)
).distinct() ).distinct()
def can_delete(self, obj): def can_delete(self, obj):

View File

@@ -3,13 +3,13 @@ import logging
# AWX # AWX
from awx.main.analytics.subsystem_metrics import DispatcherMetrics, CallbackReceiverMetrics from awx.main.analytics.subsystem_metrics import DispatcherMetrics, CallbackReceiverMetrics
from awx.main.dispatch.publish import task as task_awx from awx.main.dispatch.publish import task
from awx.main.dispatch import get_task_queuename from awx.main.dispatch import get_task_queuename
logger = logging.getLogger('awx.main.scheduler') logger = logging.getLogger('awx.main.scheduler')
@task_awx(queue=get_task_queuename) @task(queue=get_task_queuename)
def send_subsystem_metrics(): def send_subsystem_metrics():
DispatcherMetrics().send_metrics() DispatcherMetrics().send_metrics()
CallbackReceiverMetrics().send_metrics() CallbackReceiverMetrics().send_metrics()

View File

@@ -142,7 +142,7 @@ def config(since, **kwargs):
return { return {
'platform': { 'platform': {
'system': platform.system(), 'system': platform.system(),
'dist': (distro.name(), distro.version(), distro.codename()), 'dist': distro.linux_distribution(),
'release': platform.release(), 'release': platform.release(),
'type': install_type, 'type': install_type,
}, },

View File

@@ -22,7 +22,7 @@ from ansible_base.lib.utils.db import advisory_lock
from awx.main.models import Job from awx.main.models import Job
from awx.main.access import access_registry from awx.main.access import access_registry
from awx.main.utils import get_awx_http_client_headers, set_environ, datetime_hook from awx.main.utils import get_awx_http_client_headers, set_environ, datetime_hook
from awx.main.utils.analytics_proxy import OIDCClient from awx.main.utils.analytics_proxy import OIDCClient, DEFAULT_OIDC_TOKEN_ENDPOINT
__all__ = ['register', 'gather', 'ship'] __all__ = ['register', 'gather', 'ship']
@@ -186,7 +186,7 @@ def gather(dest=None, module=None, subset=None, since=None, until=None, collecti
if not ( if not (
settings.AUTOMATION_ANALYTICS_URL settings.AUTOMATION_ANALYTICS_URL
and ((settings.REDHAT_USERNAME and settings.REDHAT_PASSWORD) or (settings.SUBSCRIPTIONS_CLIENT_ID and settings.SUBSCRIPTIONS_CLIENT_SECRET)) and ((settings.REDHAT_USERNAME and settings.REDHAT_PASSWORD) or (settings.SUBSCRIPTIONS_USERNAME and settings.SUBSCRIPTIONS_PASSWORD))
): ):
logger.log(log_level, "Not gathering analytics, configuration is invalid. Use --dry-run to gather locally without sending.") logger.log(log_level, "Not gathering analytics, configuration is invalid. Use --dry-run to gather locally without sending.")
return None return None
@@ -324,10 +324,10 @@ def gather(dest=None, module=None, subset=None, since=None, until=None, collecti
settings.AUTOMATION_ANALYTICS_LAST_ENTRIES = json.dumps(last_entries, cls=DjangoJSONEncoder) settings.AUTOMATION_ANALYTICS_LAST_ENTRIES = json.dumps(last_entries, cls=DjangoJSONEncoder)
if collection_type != 'dry-run': if collection_type != 'dry-run':
for fpath in tarfiles: if succeeded:
if os.path.exists(fpath): for fpath in tarfiles:
os.remove(fpath) if os.path.exists(fpath):
os.remove(fpath)
with disable_activity_stream(): with disable_activity_stream():
if not settings.AUTOMATION_ANALYTICS_LAST_GATHER or until > settings.AUTOMATION_ANALYTICS_LAST_GATHER: if not settings.AUTOMATION_ANALYTICS_LAST_GATHER or until > settings.AUTOMATION_ANALYTICS_LAST_GATHER:
# `AUTOMATION_ANALYTICS_LAST_GATHER` is set whether collection succeeds or fails; # `AUTOMATION_ANALYTICS_LAST_GATHER` is set whether collection succeeds or fails;
@@ -368,20 +368,8 @@ def ship(path):
logger.error('AUTOMATION_ANALYTICS_URL is not set') logger.error('AUTOMATION_ANALYTICS_URL is not set')
return False return False
rh_id = getattr(settings, 'REDHAT_USERNAME', None) rh_user = getattr(settings, 'REDHAT_USERNAME', None)
rh_secret = getattr(settings, 'REDHAT_PASSWORD', None) rh_password = getattr(settings, 'REDHAT_PASSWORD', None)
if not (rh_id and rh_secret):
rh_id = getattr(settings, 'SUBSCRIPTIONS_CLIENT_ID', None)
rh_secret = getattr(settings, 'SUBSCRIPTIONS_CLIENT_SECRET', None)
if not rh_id:
logger.error('Neither REDHAT_USERNAME nor SUBSCRIPTIONS_CLIENT_ID are set')
return False
if not rh_secret:
logger.error('Neither REDHAT_PASSWORD nor SUBSCRIPTIONS_CLIENT_SECRET are set')
return False
with open(path, 'rb') as f: with open(path, 'rb') as f:
files = {'file': (os.path.basename(path), f, settings.INSIGHTS_AGENT_MIME)} files = {'file': (os.path.basename(path), f, settings.INSIGHTS_AGENT_MIME)}
@@ -389,13 +377,25 @@ def ship(path):
s.headers = get_awx_http_client_headers() s.headers = get_awx_http_client_headers()
s.headers.pop('Content-Type') s.headers.pop('Content-Type')
with set_environ(**settings.AWX_TASK_ENV): with set_environ(**settings.AWX_TASK_ENV):
try: if rh_user and rh_password:
client = OIDCClient(rh_id, rh_secret) try:
response = client.make_request("POST", url, headers=s.headers, files=files, verify=settings.INSIGHTS_CERT_PATH, timeout=(31, 31)) client = OIDCClient(rh_user, rh_password, DEFAULT_OIDC_TOKEN_ENDPOINT, ['api.console'])
except requests.RequestException: response = client.make_request("POST", url, headers=s.headers, files=files, verify=settings.INSIGHTS_CERT_PATH, timeout=(31, 31))
logger.error("Automation Analytics API request failed, trying base auth method") except requests.RequestException:
response = s.post(url, files=files, verify=settings.INSIGHTS_CERT_PATH, auth=(rh_id, rh_secret), headers=s.headers, timeout=(31, 31)) logger.error("Automation Analytics API request failed, trying base auth method")
response = s.post(url, files=files, verify=settings.INSIGHTS_CERT_PATH, auth=(rh_user, rh_password), headers=s.headers, timeout=(31, 31))
elif not rh_user or not rh_password:
logger.info('REDHAT_USERNAME and REDHAT_PASSWORD are not set, using SUBSCRIPTIONS_USERNAME and SUBSCRIPTIONS_PASSWORD')
rh_user = getattr(settings, 'SUBSCRIPTIONS_USERNAME', None)
rh_password = getattr(settings, 'SUBSCRIPTIONS_PASSWORD', None)
if rh_user and rh_password:
response = s.post(url, files=files, verify=settings.INSIGHTS_CERT_PATH, auth=(rh_user, rh_password), headers=s.headers, timeout=(31, 31))
elif not rh_user:
logger.error('REDHAT_USERNAME and SUBSCRIPTIONS_USERNAME are not set')
return False
elif not rh_password:
logger.error('REDHAT_PASSWORD and SUBSCRIPTIONS_USERNAME are not set')
return False
# Accept 2XX status_codes # Accept 2XX status_codes
if response.status_code >= 300: if response.status_code >= 300:
logger.error('Upload failed with status {}, {}'.format(response.status_code, response.text)) logger.error('Upload failed with status {}, {}'.format(response.status_code, response.text))

View File

@@ -128,7 +128,6 @@ def metrics():
registry=REGISTRY, registry=REGISTRY,
) )
LICENSE_EXPIRY = Gauge('awx_license_expiry', 'Time before license expires', registry=REGISTRY)
LICENSE_INSTANCE_TOTAL = Gauge('awx_license_instance_total', 'Total number of managed hosts provided by your license', registry=REGISTRY) LICENSE_INSTANCE_TOTAL = Gauge('awx_license_instance_total', 'Total number of managed hosts provided by your license', registry=REGISTRY)
LICENSE_INSTANCE_FREE = Gauge('awx_license_instance_free', 'Number of remaining managed hosts provided by your license', registry=REGISTRY) LICENSE_INSTANCE_FREE = Gauge('awx_license_instance_free', 'Number of remaining managed hosts provided by your license', registry=REGISTRY)
@@ -149,7 +148,6 @@ def metrics():
} }
) )
LICENSE_EXPIRY.set(str(license_info.get('time_remaining', 0)))
LICENSE_INSTANCE_TOTAL.set(str(license_info.get('instance_count', 0))) LICENSE_INSTANCE_TOTAL.set(str(license_info.get('instance_count', 0)))
LICENSE_INSTANCE_FREE.set(str(license_info.get('free_instances', 0))) LICENSE_INSTANCE_FREE.set(str(license_info.get('free_instances', 0)))

View File

@@ -9,7 +9,6 @@ from prometheus_client.core import GaugeMetricFamily, HistogramMetricFamily
from prometheus_client.registry import CollectorRegistry from prometheus_client.registry import CollectorRegistry
from django.conf import settings from django.conf import settings
from django.http import HttpRequest from django.http import HttpRequest
import redis.exceptions
from rest_framework.request import Request from rest_framework.request import Request
from awx.main.consumers import emit_channel_notification from awx.main.consumers import emit_channel_notification
@@ -44,12 +43,11 @@ class MetricsServer(MetricsServerSettings):
class BaseM: class BaseM:
def __init__(self, field, help_text, labels=None): def __init__(self, field, help_text):
self.field = field self.field = field
self.help_text = help_text self.help_text = help_text
self.current_value = 0 self.current_value = 0
self.metric_has_changed = False self.metric_has_changed = False
self.labels = labels or {}
def reset_value(self, conn): def reset_value(self, conn):
conn.hset(root_key, self.field, 0) conn.hset(root_key, self.field, 0)
@@ -70,16 +68,12 @@ class BaseM:
value = conn.hget(root_key, self.field) value = conn.hget(root_key, self.field)
return self.decode_value(value) return self.decode_value(value)
def to_prometheus(self, instance_data, namespace=None): def to_prometheus(self, instance_data):
output_text = f"# HELP {self.field} {self.help_text}\n# TYPE {self.field} gauge\n" output_text = f"# HELP {self.field} {self.help_text}\n# TYPE {self.field} gauge\n"
for instance in instance_data: for instance in instance_data:
if self.field in instance_data[instance]: if self.field in instance_data[instance]:
# Build label string
labels = f'node="{instance}"'
if namespace:
labels += f',subsystem="{namespace}"'
# on upgrade, if there are stale instances, we can end up with issues where new metrics are not present # on upgrade, if there are stale instances, we can end up with issues where new metrics are not present
output_text += f'{self.field}{{{labels}}} {instance_data[instance][self.field]}\n' output_text += f'{self.field}{{node="{instance}"}} {instance_data[instance][self.field]}\n'
return output_text return output_text
@@ -172,17 +166,14 @@ class HistogramM(BaseM):
self.sum.store_value(conn) self.sum.store_value(conn)
self.inf.store_value(conn) self.inf.store_value(conn)
def to_prometheus(self, instance_data, namespace=None): def to_prometheus(self, instance_data):
output_text = f"# HELP {self.field} {self.help_text}\n# TYPE {self.field} histogram\n" output_text = f"# HELP {self.field} {self.help_text}\n# TYPE {self.field} histogram\n"
for instance in instance_data: for instance in instance_data:
# Build label string
node_label = f'node="{instance}"'
subsystem_label = f',subsystem="{namespace}"' if namespace else ''
for i, b in enumerate(self.buckets): for i, b in enumerate(self.buckets):
output_text += f'{self.field}_bucket{{le="{b}",{node_label}{subsystem_label}}} {sum(instance_data[instance][self.field]["counts"][0:i+1])}\n' output_text += f'{self.field}_bucket{{le="{b}",node="{instance}"}} {sum(instance_data[instance][self.field]["counts"][0:i+1])}\n'
output_text += f'{self.field}_bucket{{le="+Inf",{node_label}{subsystem_label}}} {instance_data[instance][self.field]["inf"]}\n' output_text += f'{self.field}_bucket{{le="+Inf",node="{instance}"}} {instance_data[instance][self.field]["inf"]}\n'
output_text += f'{self.field}_count{{{node_label}{subsystem_label}}} {instance_data[instance][self.field]["inf"]}\n' output_text += f'{self.field}_count{{node="{instance}"}} {instance_data[instance][self.field]["inf"]}\n'
output_text += f'{self.field}_sum{{{node_label}{subsystem_label}}} {instance_data[instance][self.field]["sum"]}\n' output_text += f'{self.field}_sum{{node="{instance}"}} {instance_data[instance][self.field]["sum"]}\n'
return output_text return output_text
@@ -281,32 +272,26 @@ class Metrics(MetricsNamespace):
def pipe_execute(self): def pipe_execute(self):
if self.metrics_have_changed is True: if self.metrics_have_changed is True:
duration_pipe_exec = time.perf_counter() duration_to_save = time.perf_counter()
for m in self.METRICS: for m in self.METRICS:
self.METRICS[m].store_value(self.pipe) self.METRICS[m].store_value(self.pipe)
self.pipe.execute() self.pipe.execute()
self.last_pipe_execute = time.time() self.last_pipe_execute = time.time()
self.metrics_have_changed = False self.metrics_have_changed = False
duration_pipe_exec = time.perf_counter() - duration_pipe_exec duration_to_save = time.perf_counter() - duration_to_save
self.METRICS['subsystem_metrics_pipe_execute_seconds'].inc(duration_to_save)
duration_send_metrics = time.perf_counter()
self.send_metrics()
duration_send_metrics = time.perf_counter() - duration_send_metrics
# Increment operational metrics
self.METRICS['subsystem_metrics_pipe_execute_seconds'].inc(duration_pipe_exec)
self.METRICS['subsystem_metrics_pipe_execute_calls'].inc(1) self.METRICS['subsystem_metrics_pipe_execute_calls'].inc(1)
self.METRICS['subsystem_metrics_send_metrics_seconds'].inc(duration_send_metrics)
duration_to_save = time.perf_counter()
self.send_metrics()
duration_to_save = time.perf_counter() - duration_to_save
self.METRICS['subsystem_metrics_send_metrics_seconds'].inc(duration_to_save)
def send_metrics(self): def send_metrics(self):
# more than one thread could be calling this at the same time, so should # more than one thread could be calling this at the same time, so should
# acquire redis lock before sending metrics # acquire redis lock before sending metrics
try: lock = self.conn.lock(root_key + '-' + self._namespace + '_lock')
lock = self.conn.lock(root_key + '-' + self._namespace + '_lock') if not lock.acquire(blocking=False):
if not lock.acquire(blocking=False):
return
except redis.exceptions.ConnectionError as exc:
logger.warning(f'Connection error in send_metrics: {exc}')
return return
try: try:
current_time = time.time() current_time = time.time()
@@ -362,13 +347,7 @@ class Metrics(MetricsNamespace):
if instance_data: if instance_data:
for field in self.METRICS: for field in self.METRICS:
if len(metrics_filter) == 0 or field in metrics_filter: if len(metrics_filter) == 0 or field in metrics_filter:
# Add subsystem label only for operational metrics output_text += self.METRICS[field].to_prometheus(instance_data)
namespace = (
self._namespace
if field in ['subsystem_metrics_pipe_execute_seconds', 'subsystem_metrics_pipe_execute_calls', 'subsystem_metrics_send_metrics_seconds']
else None
)
output_text += self.METRICS[field].to_prometheus(instance_data, namespace)
return output_text return output_text
@@ -456,10 +435,7 @@ class CustomToPrometheusMetricsCollector(prometheus_client.registry.Collector):
logger.debug(f"No metric data not found in redis for metric namespace '{self._metrics._namespace}'") logger.debug(f"No metric data not found in redis for metric namespace '{self._metrics._namespace}'")
return None return None
if not (host_metrics := instance_data.get(my_hostname)): host_metrics = instance_data.get(my_hostname)
logger.debug(f"Metric data for this node '{my_hostname}' not found in redis for metric namespace '{self._metrics._namespace}'")
return None
for _, metric in self._metrics.METRICS.items(): for _, metric in self._metrics.METRICS.items():
entry = host_metrics.get(metric.field) entry = host_metrics.get(metric.field)
if not entry: if not entry:

View File

@@ -1,9 +1,6 @@
import os import os
from dispatcherd.config import setup as dispatcher_setup
from django.apps import AppConfig from django.apps import AppConfig
from django.db import connection
from django.utils.translation import gettext_lazy as _ from django.utils.translation import gettext_lazy as _
from awx.main.utils.common import bypass_in_test, load_all_entry_points_for from awx.main.utils.common import bypass_in_test, load_all_entry_points_for
from awx.main.utils.migration import is_database_synchronized from awx.main.utils.migration import is_database_synchronized
@@ -79,28 +76,9 @@ class MainConfig(AppConfig):
cls = entry_point.load() cls = entry_point.load()
InventorySourceOptions.injectors[entry_point_name] = cls InventorySourceOptions.injectors[entry_point_name] = cls
def configure_dispatcherd(self):
"""This implements the default configuration for dispatcherd
If running the tasking service like awx-manage run_dispatcher,
some additional config will be applied on top of this.
This configuration provides the minimum such that code can submit
tasks to pg_notify to run those tasks.
"""
from awx.main.dispatch.config import get_dispatcherd_config
if connection.vendor != 'postgresql':
config_dict = get_dispatcherd_config(mock_publish=True)
else:
config_dict = get_dispatcherd_config()
dispatcher_setup(config_dict)
def ready(self): def ready(self):
super().ready() super().ready()
self.configure_dispatcherd()
""" """
Credential loading triggers database operations. There are cases we want to call Credential loading triggers database operations. There are cases we want to call
awx-manage collectstatic without a database. All management commands invoke the ready() code awx-manage collectstatic without a database. All management commands invoke the ready() code

View File

@@ -12,7 +12,6 @@ from rest_framework import serializers
from awx.conf import fields, register, register_validate from awx.conf import fields, register, register_validate
from awx.main.models import ExecutionEnvironment from awx.main.models import ExecutionEnvironment
from awx.main.constants import SUBSCRIPTION_USAGE_MODEL_UNIQUE_HOSTS from awx.main.constants import SUBSCRIPTION_USAGE_MODEL_UNIQUE_HOSTS
from awx.main.tasks.policy import OPA_AUTH_TYPES
logger = logging.getLogger('awx.main.conf') logger = logging.getLogger('awx.main.conf')
@@ -91,6 +90,7 @@ register(
), ),
category=_('System'), category=_('System'),
category_slug='system', category_slug='system',
required=False,
) )
register( register(
@@ -105,7 +105,6 @@ register(
), ),
category=_('System'), category=_('System'),
category_slug='system', category_slug='system',
hidden=True,
) )
register( register(
@@ -125,8 +124,8 @@ register(
allow_blank=True, allow_blank=True,
encrypted=False, encrypted=False,
read_only=False, read_only=False,
label=_('Red Hat Client ID for Analytics'), label=_('Red Hat customer username'),
help_text=_('Client ID used to send data to Automation Analytics'), help_text=_('This username is used to send data to Automation Analytics'),
category=_('System'), category=_('System'),
category_slug='system', category_slug='system',
) )
@@ -138,8 +137,8 @@ register(
allow_blank=True, allow_blank=True,
encrypted=True, encrypted=True,
read_only=False, read_only=False,
label=_('Red Hat Client Secret for Analytics'), label=_('Red Hat customer password'),
help_text=_('Client secret used to send data to Automation Analytics'), help_text=_('This password is used to send data to Automation Analytics'),
category=_('System'), category=_('System'),
category_slug='system', category_slug='system',
) )
@@ -151,11 +150,10 @@ register(
allow_blank=True, allow_blank=True,
encrypted=False, encrypted=False,
read_only=False, read_only=False,
label=_('Red Hat Username for Subscriptions'), label=_('Red Hat or Satellite username'),
help_text=_('Username used to retrieve subscription and content information'), # noqa help_text=_('This username is used to retrieve subscription and content information'), # noqa
category=_('System'), category=_('System'),
category_slug='system', category_slug='system',
hidden=True,
) )
register( register(
@@ -165,40 +163,10 @@ register(
allow_blank=True, allow_blank=True,
encrypted=True, encrypted=True,
read_only=False, read_only=False,
label=_('Red Hat Password for Subscriptions'), label=_('Red Hat or Satellite password'),
help_text=_('Password used to retrieve subscription and content information'), # noqa help_text=_('This password is used to retrieve subscription and content information'), # noqa
category=_('System'), category=_('System'),
category_slug='system', category_slug='system',
hidden=True,
)
register(
'SUBSCRIPTIONS_CLIENT_ID',
field_class=fields.CharField,
default='',
allow_blank=True,
encrypted=False,
read_only=False,
label=_('Red Hat Client ID for Subscriptions'),
help_text=_('Client ID used to retrieve subscription and content information'), # noqa
category=_('System'),
category_slug='system',
hidden=True,
)
register(
'SUBSCRIPTIONS_CLIENT_SECRET',
field_class=fields.CharField,
default='',
allow_blank=True,
encrypted=True,
read_only=False,
label=_('Red Hat Client Secret for Subscriptions'),
help_text=_('Client secret used to retrieve subscription and content information'), # noqa
category=_('System'),
category_slug='system',
hidden=True,
) )
register( register(
@@ -269,6 +237,7 @@ register(
help_text=_('List of modules allowed to be used by ad-hoc jobs.'), help_text=_('List of modules allowed to be used by ad-hoc jobs.'),
category=_('Jobs'), category=_('Jobs'),
category_slug='jobs', category_slug='jobs',
required=False,
) )
register( register(
@@ -279,6 +248,7 @@ register(
('never', _('Never')), ('never', _('Never')),
('template', _('Only On Job Template Definitions')), ('template', _('Only On Job Template Definitions')),
], ],
required=True,
label=_('When can extra variables contain Jinja templates?'), label=_('When can extra variables contain Jinja templates?'),
help_text=_( help_text=_(
'Ansible allows variable substitution via the Jinja2 templating ' 'Ansible allows variable substitution via the Jinja2 templating '
@@ -303,6 +273,7 @@ register(
register( register(
'AWX_ISOLATION_SHOW_PATHS', 'AWX_ISOLATION_SHOW_PATHS',
field_class=fields.StringListIsolatedPathField, field_class=fields.StringListIsolatedPathField,
required=False,
label=_('Paths to expose to isolated jobs'), label=_('Paths to expose to isolated jobs'),
help_text=_( help_text=_(
'List of paths that would otherwise be hidden to expose to isolated jobs. Enter one path per line. ' 'List of paths that would otherwise be hidden to expose to isolated jobs. Enter one path per line. '
@@ -468,6 +439,7 @@ register(
register( register(
'AWX_ANSIBLE_CALLBACK_PLUGINS', 'AWX_ANSIBLE_CALLBACK_PLUGINS',
field_class=fields.StringListField, field_class=fields.StringListField,
required=False,
label=_('Ansible Callback Plugins'), label=_('Ansible Callback Plugins'),
help_text=_('List of paths to search for extra callback plugins to be used when running jobs. Enter one path per line.'), help_text=_('List of paths to search for extra callback plugins to be used when running jobs. Enter one path per line.'),
category=_('Jobs'), category=_('Jobs'),
@@ -581,6 +553,7 @@ register(
help_text=_('Port on Logging Aggregator to send logs to (if required and not provided in Logging Aggregator).'), help_text=_('Port on Logging Aggregator to send logs to (if required and not provided in Logging Aggregator).'),
category=_('Logging'), category=_('Logging'),
category_slug='logging', category_slug='logging',
required=False,
) )
register( register(
'LOG_AGGREGATOR_TYPE', 'LOG_AGGREGATOR_TYPE',
@@ -602,6 +575,7 @@ register(
help_text=_('Username for external log aggregator (if required; HTTP/s only).'), help_text=_('Username for external log aggregator (if required; HTTP/s only).'),
category=_('Logging'), category=_('Logging'),
category_slug='logging', category_slug='logging',
required=False,
) )
register( register(
'LOG_AGGREGATOR_PASSWORD', 'LOG_AGGREGATOR_PASSWORD',
@@ -613,6 +587,7 @@ register(
help_text=_('Password or authentication token for external log aggregator (if required; HTTP/s only).'), help_text=_('Password or authentication token for external log aggregator (if required; HTTP/s only).'),
category=_('Logging'), category=_('Logging'),
category_slug='logging', category_slug='logging',
required=False,
) )
register( register(
'LOG_AGGREGATOR_LOGGERS', 'LOG_AGGREGATOR_LOGGERS',
@@ -799,6 +774,7 @@ register(
allow_null=True, allow_null=True,
category=_('System'), category=_('System'),
category_slug='system', category_slug='system',
required=False,
hidden=True, hidden=True,
) )
register( register(
@@ -1004,134 +980,3 @@ def csrf_trusted_origins_validate(serializer, attrs):
register_validate('system', csrf_trusted_origins_validate) register_validate('system', csrf_trusted_origins_validate)
register(
'OPA_HOST',
field_class=fields.CharField,
label=_('OPA server hostname'),
default='',
help_text=_('The hostname used to connect to the OPA server. If empty, policy enforcement will be disabled.'),
category=('PolicyAsCode'),
category_slug='policyascode',
allow_blank=True,
)
register(
'OPA_PORT',
field_class=fields.IntegerField,
label=_('OPA server port'),
default=8181,
help_text=_('The port used to connect to the OPA server. Defaults to 8181.'),
category=('PolicyAsCode'),
category_slug='policyascode',
)
register(
'OPA_SSL',
field_class=fields.BooleanField,
label=_('Use SSL for OPA connection'),
default=False,
help_text=_('Enable or disable the use of SSL to connect to the OPA server. Defaults to false.'),
category=('PolicyAsCode'),
category_slug='policyascode',
)
register(
'OPA_AUTH_TYPE',
field_class=fields.ChoiceField,
label=_('OPA authentication type'),
choices=[OPA_AUTH_TYPES.NONE, OPA_AUTH_TYPES.TOKEN, OPA_AUTH_TYPES.CERTIFICATE],
default=OPA_AUTH_TYPES.NONE,
help_text=_('The authentication type that will be used to connect to the OPA server: "None", "Token", or "Certificate".'),
category=('PolicyAsCode'),
category_slug='policyascode',
)
register(
'OPA_AUTH_TOKEN',
field_class=fields.CharField,
label=_('OPA authentication token'),
default='',
help_text=_(
'The token for authentication to the OPA server. Required when OPA_AUTH_TYPE is "Token". If an authorization header is defined in OPA_AUTH_CUSTOM_HEADERS, it will be overridden by OPA_AUTH_TOKEN.'
),
category=('PolicyAsCode'),
category_slug='policyascode',
allow_blank=True,
encrypted=True,
)
register(
'OPA_AUTH_CLIENT_CERT',
field_class=fields.CharField,
label=_('OPA client certificate content'),
default='',
help_text=_('The content of the client certificate file for mTLS authentication to the OPA server. Required when OPA_AUTH_TYPE is "Certificate".'),
category=('PolicyAsCode'),
category_slug='policyascode',
allow_blank=True,
)
register(
'OPA_AUTH_CLIENT_KEY',
field_class=fields.CharField,
label=_('OPA client key content'),
default='',
help_text=_('The content of the client key for mTLS authentication to the OPA server. Required when OPA_AUTH_TYPE is "Certificate".'),
category=('PolicyAsCode'),
category_slug='policyascode',
allow_blank=True,
encrypted=True,
)
register(
'OPA_AUTH_CA_CERT',
field_class=fields.CharField,
label=_('OPA CA certificate content'),
default='',
help_text=_('The content of the CA certificate for mTLS authentication to the OPA server. Required when OPA_AUTH_TYPE is "Certificate".'),
category=('PolicyAsCode'),
category_slug='policyascode',
allow_blank=True,
)
register(
'OPA_AUTH_CUSTOM_HEADERS',
field_class=fields.DictField,
label=_('OPA custom authentication headers'),
default={},
help_text=_('Optional custom headers included in requests to the OPA server. Defaults to empty dictionary ({}).'),
category=('PolicyAsCode'),
category_slug='policyascode',
)
register(
'OPA_REQUEST_TIMEOUT',
field_class=fields.FloatField,
label=_('OPA request timeout'),
default=1.5,
help_text=_('The number of seconds after which the connection to the OPA server will time out. Defaults to 1.5 seconds.'),
category=('PolicyAsCode'),
category_slug='policyascode',
)
register(
'OPA_REQUEST_RETRIES',
field_class=fields.IntegerField,
label=_('OPA request retry count'),
default=2,
help_text=_('The number of retry attempts for connecting to the OPA server. Default is 2.'),
category=('PolicyAsCode'),
category_slug='policyascode',
)
def policy_as_code_validate(serializer, attrs):
opa_host = attrs.get('OPA_HOST', '')
if opa_host and (opa_host.startswith('http://') or opa_host.startswith('https://')):
raise serializers.ValidationError({'OPA_HOST': _("OPA_HOST should not include 'http://' or 'https://' prefixes. Please enter only the hostname.")})
return attrs
register_validate('policyascode', policy_as_code_validate)

View File

@@ -77,8 +77,6 @@ LOGGER_BLOCKLIST = (
'awx.main.utils.log', 'awx.main.utils.log',
# loggers that may be called getting logging settings # loggers that may be called getting logging settings
'awx.conf', 'awx.conf',
# dispatcherd should only use 1 database connection
'dispatcherd',
) )
# Reported version for node seen in receptor mesh but for which capacity check # Reported version for node seen in receptor mesh but for which capacity check

View File

@@ -1,53 +0,0 @@
from django.conf import settings
from ansible_base.lib.utils.db import get_pg_notify_params
from awx.main.dispatch import get_task_queuename
from awx.main.dispatch.pool import get_auto_max_workers
def get_dispatcherd_config(for_service: bool = False, mock_publish: bool = False) -> dict:
"""Return a dictionary config for dispatcherd
Parameters:
for_service: if True, include dynamic options needed for running the dispatcher service
this will require database access, you should delay evaluation until after app setup
"""
config = {
"version": 2,
"service": {
"pool_kwargs": {
"min_workers": settings.JOB_EVENT_WORKERS,
"max_workers": get_auto_max_workers(),
},
"main_kwargs": {"node_id": settings.CLUSTER_HOST_ID},
"process_manager_cls": "ForkServerManager",
"process_manager_kwargs": {"preload_modules": ['awx.main.dispatch.hazmat']},
},
"brokers": {
"socket": {"socket_path": settings.DISPATCHERD_DEBUGGING_SOCKFILE},
},
"publish": {"default_control_broker": "socket"},
"worker": {"worker_cls": "awx.main.dispatch.worker.dispatcherd.AWXTaskWorker"},
}
if mock_publish:
config["brokers"]["noop"] = {}
config["publish"]["default_broker"] = "noop"
else:
config["brokers"]["pg_notify"] = {
"config": get_pg_notify_params(),
"sync_connection_factory": "ansible_base.lib.utils.db.psycopg_connection_from_django",
"default_publish_channel": settings.CLUSTER_HOST_ID, # used for debugging commands
}
config["publish"]["default_broker"] = "pg_notify"
if for_service:
config["producers"] = {
"ScheduledProducer": {"task_schedule": settings.DISPATCHER_SCHEDULE},
"OnStartProducer": {"task_list": {"awx.main.tasks.system.dispatch_startup": {}}},
"ControlProducer": {},
}
config["brokers"]["pg_notify"]["channels"] = ['tower_broadcast_all', 'tower_settings_change', get_task_queuename()]
return config

View File

@@ -1,36 +0,0 @@
import django
# dispatcherd publisher logic is likely to be used, but needs manual preload
from dispatcherd.brokers import pg_notify # noqa
# Cache may not be initialized until we are in the worker, so preload here
from channels_redis import core # noqa
from awx import prepare_env
from dispatcherd.utils import resolve_callable
prepare_env()
django.setup() # noqa
from django.conf import settings
# Preload all periodic tasks so their imports will be in shared memory
for name, options in settings.CELERYBEAT_SCHEDULE.items():
resolve_callable(options['task'])
# Preload in-line import from tasks
from awx.main.scheduler.kubernetes import PodManager # noqa
from django.core.cache import cache as django_cache
from django.db import connection
connection.close()
django_cache.close()

View File

@@ -88,10 +88,8 @@ class Scheduler:
# internally times are all referenced relative to startup time, add grace period # internally times are all referenced relative to startup time, add grace period
self.global_start = time.time() + 2.0 self.global_start = time.time() + 2.0
def get_and_mark_pending(self, reftime=None): def get_and_mark_pending(self):
if reftime is None: relative_time = time.time() - self.global_start
reftime = time.time() # mostly for tests
relative_time = reftime - self.global_start
to_run = [] to_run = []
for job in self.jobs: for job in self.jobs:
if job.due_to_run(relative_time): if job.due_to_run(relative_time):
@@ -100,10 +98,8 @@ class Scheduler:
job.mark_run(relative_time) job.mark_run(relative_time)
return to_run return to_run
def time_until_next_run(self, reftime=None): def time_until_next_run(self):
if reftime is None: relative_time = time.time() - self.global_start
reftime = time.time() # mostly for tests
relative_time = reftime - self.global_start
next_job = min(self.jobs, key=lambda j: j.next_run) next_job = min(self.jobs, key=lambda j: j.next_run)
delta = next_job.next_run - relative_time delta = next_job.next_run - relative_time
if delta <= 0.1: if delta <= 0.1:
@@ -119,11 +115,10 @@ class Scheduler:
def debug(self, *args, **kwargs): def debug(self, *args, **kwargs):
data = dict() data = dict()
data['title'] = 'Scheduler status' data['title'] = 'Scheduler status'
reftime = time.time()
now = datetime.fromtimestamp(reftime).strftime('%Y-%m-%d %H:%M:%S UTC') now = datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S UTC')
start_time = datetime.fromtimestamp(self.global_start).strftime('%Y-%m-%d %H:%M:%S UTC') start_time = datetime.fromtimestamp(self.global_start).strftime('%Y-%m-%d %H:%M:%S UTC')
relative_time = reftime - self.global_start relative_time = time.time() - self.global_start
data['started_time'] = start_time data['started_time'] = start_time
data['current_time'] = now data['current_time'] = now
data['current_time_relative'] = round(relative_time, 3) data['current_time_relative'] = round(relative_time, 3)

View File

@@ -7,7 +7,6 @@ import time
import traceback import traceback
from datetime import datetime from datetime import datetime
from uuid import uuid4 from uuid import uuid4
import json
import collections import collections
from multiprocessing import Process from multiprocessing import Process
@@ -26,10 +25,7 @@ from ansible_base.lib.logging.runtime import log_excess_runtime
from awx.main.models import UnifiedJob from awx.main.models import UnifiedJob
from awx.main.dispatch import reaper from awx.main.dispatch import reaper
from awx.main.utils.common import get_mem_effective_capacity, get_corrected_memory, get_corrected_cpu, get_cpu_effective_capacity from awx.main.utils.common import convert_mem_str_to_bytes, get_mem_effective_capacity
# ansible-runner
from ansible_runner.utils.capacity import get_mem_in_bytes, get_cpu_count
if 'run_callback_receiver' in sys.argv: if 'run_callback_receiver' in sys.argv:
logger = logging.getLogger('awx.main.commands.run_callback_receiver') logger = logging.getLogger('awx.main.commands.run_callback_receiver')
@@ -37,9 +33,6 @@ else:
logger = logging.getLogger('awx.main.dispatch') logger = logging.getLogger('awx.main.dispatch')
RETIRED_SENTINEL_TASK = "[retired]"
class NoOpResultQueue(object): class NoOpResultQueue(object):
def put(self, item): def put(self, item):
pass pass
@@ -84,17 +77,11 @@ class PoolWorker(object):
self.queue = MPQueue(queue_size) self.queue = MPQueue(queue_size)
self.process = Process(target=target, args=(self.queue, self.finished) + args) self.process = Process(target=target, args=(self.queue, self.finished) + args)
self.process.daemon = True self.process.daemon = True
self.creation_time = time.monotonic()
self.retiring = False
def start(self): def start(self):
self.process.start() self.process.start()
def put(self, body): def put(self, body):
if self.retiring:
uuid = body.get('uuid', 'N/A') if isinstance(body, dict) else 'N/A'
logger.info(f"Worker pid:{self.pid} is retiring. Refusing new task {uuid}.")
raise QueueFull("Worker is retiring and not accepting new tasks") # AutoscalePool.write handles QueueFull
uuid = '?' uuid = '?'
if isinstance(body, dict): if isinstance(body, dict):
if not body.get('uuid'): if not body.get('uuid'):
@@ -113,11 +100,6 @@ class PoolWorker(object):
""" """
self.queue.put('QUIT') self.queue.put('QUIT')
@property
def age(self):
"""Returns the current age of the worker in seconds."""
return time.monotonic() - self.creation_time
@property @property
def pid(self): def pid(self):
return self.process.pid return self.process.pid
@@ -164,8 +146,6 @@ class PoolWorker(object):
# the purpose of self.managed_tasks is to just track internal # the purpose of self.managed_tasks is to just track internal
# state of which events are *currently* being processed. # state of which events are *currently* being processed.
logger.warning('Event UUID {} appears to be have been duplicated.'.format(uuid)) logger.warning('Event UUID {} appears to be have been duplicated.'.format(uuid))
if self.retiring:
self.managed_tasks[RETIRED_SENTINEL_TASK] = {'task': RETIRED_SENTINEL_TASK}
@property @property
def current_task(self): def current_task(self):
@@ -281,8 +261,6 @@ class WorkerPool(object):
'{% for w in workers %}' '{% for w in workers %}'
'. worker[pid:{{ w.pid }}]{% if not w.alive %} GONE exit={{ w.exitcode }}{% endif %}' '. worker[pid:{{ w.pid }}]{% if not w.alive %} GONE exit={{ w.exitcode }}{% endif %}'
' sent={{ w.messages_sent }}' ' sent={{ w.messages_sent }}'
' age={{ "%.0f"|format(w.age) }}s'
' retiring={{ w.retiring }}'
'{% if w.messages_finished %} finished={{ w.messages_finished }}{% endif %}' '{% if w.messages_finished %} finished={{ w.messages_finished }}{% endif %}'
' qsize={{ w.managed_tasks|length }}' ' qsize={{ w.managed_tasks|length }}'
' rss={{ w.mb }}MB' ' rss={{ w.mb }}MB'
@@ -329,41 +307,6 @@ class WorkerPool(object):
logger.exception('could not kill {}'.format(worker.pid)) logger.exception('could not kill {}'.format(worker.pid))
def get_auto_max_workers():
"""Method we normally rely on to get max_workers
Uses almost same logic as Instance.local_health_check
The important thing is to be MORE than Instance.capacity
so that the task-manager does not over-schedule this node
Ideally we would just use the capacity from the database plus reserve workers,
but this poses some bootstrap problems where OCP task containers
register themselves after startup
"""
# Get memory from ansible-runner
total_memory_gb = get_mem_in_bytes()
# This may replace memory calculation with a user override
corrected_memory = get_corrected_memory(total_memory_gb)
# Get same number as max forks based on memory, this function takes memory as bytes
mem_capacity = get_mem_effective_capacity(corrected_memory, is_control_node=True)
# Follow same process for CPU capacity constraint
cpu_count = get_cpu_count()
corrected_cpu = get_corrected_cpu(cpu_count)
cpu_capacity = get_cpu_effective_capacity(corrected_cpu, is_control_node=True)
# Here is what is different from health checks,
auto_max = max(mem_capacity, cpu_capacity)
# add magic number of extra workers to ensure
# we have a few extra workers to run the heartbeat
auto_max += 7
return auto_max
class AutoscalePool(WorkerPool): class AutoscalePool(WorkerPool):
""" """
An extended pool implementation that automatically scales workers up and An extended pool implementation that automatically scales workers up and
@@ -374,13 +317,22 @@ class AutoscalePool(WorkerPool):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
self.max_workers = kwargs.pop('max_workers', None) self.max_workers = kwargs.pop('max_workers', None)
self.max_worker_lifetime_seconds = kwargs.pop(
'max_worker_lifetime_seconds', getattr(settings, 'WORKER_MAX_LIFETIME_SECONDS', 14400)
) # Default to 4 hours
super(AutoscalePool, self).__init__(*args, **kwargs) super(AutoscalePool, self).__init__(*args, **kwargs)
if self.max_workers is None: if self.max_workers is None:
self.max_workers = get_auto_max_workers() settings_absmem = getattr(settings, 'SYSTEM_TASK_ABS_MEM', None)
if settings_absmem is not None:
# There are 1073741824 bytes in a gigabyte. Convert bytes to gigabytes by dividing by 2**30
total_memory_gb = convert_mem_str_to_bytes(settings_absmem) // 2**30
else:
total_memory_gb = (psutil.virtual_memory().total >> 30) + 1 # noqa: round up
# Get same number as max forks based on memory, this function takes memory as bytes
self.max_workers = get_mem_effective_capacity(total_memory_gb * 2**30)
# add magic prime number of extra workers to ensure
# we have a few extra workers to run the heartbeat
self.max_workers += 7
# max workers can't be less than min_workers # max workers can't be less than min_workers
self.max_workers = max(self.min_workers, self.max_workers) self.max_workers = max(self.min_workers, self.max_workers)
@@ -394,9 +346,6 @@ class AutoscalePool(WorkerPool):
self.scale_up_ct = 0 self.scale_up_ct = 0
self.worker_count_max = 0 self.worker_count_max = 0
# last time we wrote current tasks, to avoid too much log spam
self.last_task_list_log = time.monotonic()
def produce_subsystem_metrics(self, metrics_object): def produce_subsystem_metrics(self, metrics_object):
metrics_object.set('dispatcher_pool_scale_up_events', self.scale_up_ct) metrics_object.set('dispatcher_pool_scale_up_events', self.scale_up_ct)
metrics_object.set('dispatcher_pool_active_task_count', sum(len(w.managed_tasks) for w in self.workers)) metrics_object.set('dispatcher_pool_active_task_count', sum(len(w.managed_tasks) for w in self.workers))
@@ -436,7 +385,6 @@ class AutoscalePool(WorkerPool):
""" """
orphaned = [] orphaned = []
for w in self.workers[::]: for w in self.workers[::]:
is_retirement_age = self.max_worker_lifetime_seconds is not None and w.age > self.max_worker_lifetime_seconds
if not w.alive: if not w.alive:
# the worker process has exited # the worker process has exited
# 1. take the task it was running and enqueue the error # 1. take the task it was running and enqueue the error
@@ -445,10 +393,6 @@ class AutoscalePool(WorkerPool):
# send them to another worker # send them to another worker
logger.error('worker pid:{} is gone (exit={})'.format(w.pid, w.exitcode)) logger.error('worker pid:{} is gone (exit={})'.format(w.pid, w.exitcode))
if w.current_task: if w.current_task:
if w.current_task == {'task': RETIRED_SENTINEL_TASK}:
logger.debug('scaling down worker pid:{} due to worker age: {}'.format(w.pid, w.age))
self.workers.remove(w)
continue
if w.current_task != 'QUIT': if w.current_task != 'QUIT':
try: try:
for j in UnifiedJob.objects.filter(celery_task_id=w.current_task['uuid']): for j in UnifiedJob.objects.filter(celery_task_id=w.current_task['uuid']):
@@ -459,7 +403,6 @@ class AutoscalePool(WorkerPool):
logger.warning(f'Worker was told to quit but has not, pid={w.pid}') logger.warning(f'Worker was told to quit but has not, pid={w.pid}')
orphaned.extend(w.orphaned_tasks) orphaned.extend(w.orphaned_tasks)
self.workers.remove(w) self.workers.remove(w)
elif w.idle and len(self.workers) > self.min_workers: elif w.idle and len(self.workers) > self.min_workers:
# the process has an empty queue (it's idle) and we have # the process has an empty queue (it's idle) and we have
# more processes in the pool than we need (> min) # more processes in the pool than we need (> min)
@@ -468,22 +411,6 @@ class AutoscalePool(WorkerPool):
logger.debug('scaling down worker pid:{}'.format(w.pid)) logger.debug('scaling down worker pid:{}'.format(w.pid))
w.quit() w.quit()
self.workers.remove(w) self.workers.remove(w)
elif w.idle and is_retirement_age:
logger.debug('scaling down worker pid:{} due to worker age: {}'.format(w.pid, w.age))
w.quit()
self.workers.remove(w)
elif is_retirement_age and not w.retiring and not w.idle:
logger.info(
f"Worker pid:{w.pid} (age: {w.age:.0f}s) exceeded max lifetime ({self.max_worker_lifetime_seconds:.0f}s). "
"Signaling for graceful retirement."
)
# Send QUIT signal; worker will finish current task then exit.
w.quit()
# mark as retiring to reject any future tasks that might be assigned in meantime
w.retiring = True
if w.alive: if w.alive:
# if we discover a task manager invocation that's been running # if we discover a task manager invocation that's been running
# too long, reap it (because otherwise it'll just hold the postgres # too long, reap it (because otherwise it'll just hold the postgres
@@ -536,14 +463,6 @@ class AutoscalePool(WorkerPool):
self.worker_count_max = new_worker_ct self.worker_count_max = new_worker_ct
return ret return ret
@staticmethod
def fast_task_serialization(current_task):
try:
return str(current_task.get('task')) + ' - ' + str(sorted(current_task.get('args', []))) + ' - ' + str(sorted(current_task.get('kwargs', {})))
except Exception:
# just make sure this does not make things worse
return str(current_task)
def write(self, preferred_queue, body): def write(self, preferred_queue, body):
if 'guid' in body: if 'guid' in body:
set_guid(body['guid']) set_guid(body['guid'])
@@ -565,15 +484,6 @@ class AutoscalePool(WorkerPool):
if isinstance(body, dict): if isinstance(body, dict):
task_name = body.get('task') task_name = body.get('task')
logger.warning(f'Workers maxed, queuing {task_name}, load: {sum(len(w.managed_tasks) for w in self.workers)} / {len(self.workers)}') logger.warning(f'Workers maxed, queuing {task_name}, load: {sum(len(w.managed_tasks) for w in self.workers)} / {len(self.workers)}')
# Once every 10 seconds write out task list for debugging
if time.monotonic() - self.last_task_list_log >= 10.0:
task_counts = {}
for worker in self.workers:
task_slug = self.fast_task_serialization(worker.current_task)
task_counts.setdefault(task_slug, 0)
task_counts[task_slug] += 1
logger.info(f'Running tasks by count:\n{json.dumps(task_counts, indent=2)}')
self.last_task_list_log = time.monotonic()
return super(AutoscalePool, self).write(preferred_queue, body) return super(AutoscalePool, self).write(preferred_queue, body)
except Exception: except Exception:
for conn in connections.all(): for conn in connections.all():

View File

@@ -4,9 +4,6 @@ import json
import time import time
from uuid import uuid4 from uuid import uuid4
from dispatcherd.publish import submit_task
from dispatcherd.utils import resolve_callable
from django_guid import get_guid from django_guid import get_guid
from django.conf import settings from django.conf import settings
@@ -96,19 +93,6 @@ class task:
@classmethod @classmethod
def apply_async(cls, args=None, kwargs=None, queue=None, uuid=None, **kw): def apply_async(cls, args=None, kwargs=None, queue=None, uuid=None, **kw):
try:
from flags.state import flag_enabled
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'):
# At this point we have the import string, and submit_task wants the method, so back to that
actual_task = resolve_callable(cls.name)
return submit_task(actual_task, args=args, kwargs=kwargs, queue=queue, uuid=uuid, **kw)
except Exception:
logger.exception(f"[DISPATCHER] Failed to check for alternative dispatcherd implementation for {cls.name}")
# Continue with original implementation if anything fails
pass
# Original implementation follows
queue = queue or getattr(cls.queue, 'im_func', cls.queue) queue = queue or getattr(cls.queue, 'im_func', cls.queue)
if not queue: if not queue:
msg = f'{cls.name}: Queue value required and may not be None' msg = f'{cls.name}: Queue value required and may not be None'

View File

@@ -15,7 +15,6 @@ from datetime import timedelta
from django import db from django import db
from django.conf import settings from django.conf import settings
import redis.exceptions
from ansible_base.lib.logging.runtime import log_excess_runtime from ansible_base.lib.logging.runtime import log_excess_runtime
@@ -131,13 +130,10 @@ class AWXConsumerBase(object):
@log_excess_runtime(logger, debug_cutoff=0.05, cutoff=0.2) @log_excess_runtime(logger, debug_cutoff=0.05, cutoff=0.2)
def record_statistics(self): def record_statistics(self):
if time.time() - self.last_stats > 1: # buffer stat recording to once per second if time.time() - self.last_stats > 1: # buffer stat recording to once per second
save_data = self.pool.debug()
try: try:
self.redis.set(f'awx_{self.name}_statistics', save_data) self.redis.set(f'awx_{self.name}_statistics', self.pool.debug())
except redis.exceptions.ConnectionError as exc:
logger.warning(f'Redis connection error saving {self.name} status data:\n{exc}\nmissed data:\n{save_data}')
except Exception: except Exception:
logger.exception(f"Unknown redis error saving {self.name} status data:\nmissed data:\n{save_data}") logger.exception(f"encountered an error communicating with redis to store {self.name} statistics")
self.last_stats = time.time() self.last_stats = time.time()
def run(self, *args, **kwargs): def run(self, *args, **kwargs):
@@ -193,10 +189,7 @@ class AWXConsumerPG(AWXConsumerBase):
current_time = time.time() current_time = time.time()
self.pool.produce_subsystem_metrics(self.subsystem_metrics) self.pool.produce_subsystem_metrics(self.subsystem_metrics)
self.subsystem_metrics.set('dispatcher_availability', self.listen_cumulative_time / (current_time - self.last_metrics_gather)) self.subsystem_metrics.set('dispatcher_availability', self.listen_cumulative_time / (current_time - self.last_metrics_gather))
try: self.subsystem_metrics.pipe_execute()
self.subsystem_metrics.pipe_execute()
except redis.exceptions.ConnectionError as exc:
logger.warning(f'Redis connection error saving dispatcher metrics, error:\n{exc}')
self.listen_cumulative_time = 0.0 self.listen_cumulative_time = 0.0
self.last_metrics_gather = current_time self.last_metrics_gather = current_time
@@ -212,11 +205,7 @@ class AWXConsumerPG(AWXConsumerBase):
except Exception as exc: except Exception as exc:
logger.warning(f'Failed to save dispatcher statistics {exc}') logger.warning(f'Failed to save dispatcher statistics {exc}')
# Everything benchmarks to the same original time, so that skews due to for job in self.scheduler.get_and_mark_pending():
# runtime of the actions, themselves, do not mess up scheduling expectations
reftime = time.time()
for job in self.scheduler.get_and_mark_pending(reftime=reftime):
if 'control' in job.data: if 'control' in job.data:
try: try:
job.data['control']() job.data['control']()
@@ -233,12 +222,12 @@ class AWXConsumerPG(AWXConsumerBase):
self.listen_start = time.time() self.listen_start = time.time()
return self.scheduler.time_until_next_run(reftime=reftime) return self.scheduler.time_until_next_run()
def run(self, *args, **kwargs): def run(self, *args, **kwargs):
super(AWXConsumerPG, self).run(*args, **kwargs) super(AWXConsumerPG, self).run(*args, **kwargs)
logger.info(f"Running {self.name}, workers min={self.pool.min_workers} max={self.pool.max_workers}, listening to queues {self.queues}") logger.info(f"Running worker {self.name} listening to queues {self.queues}")
init = False init = False
while True: while True:

View File

@@ -86,7 +86,6 @@ class CallbackBrokerWorker(BaseWorker):
return os.getpid() return os.getpid()
def read(self, queue): def read(self, queue):
has_redis_error = False
try: try:
res = self.redis.blpop(self.queue_name, timeout=1) res = self.redis.blpop(self.queue_name, timeout=1)
if res is None: if res is None:
@@ -96,21 +95,14 @@ class CallbackBrokerWorker(BaseWorker):
self.subsystem_metrics.inc('callback_receiver_events_popped_redis', 1) self.subsystem_metrics.inc('callback_receiver_events_popped_redis', 1)
self.subsystem_metrics.inc('callback_receiver_events_in_memory', 1) self.subsystem_metrics.inc('callback_receiver_events_in_memory', 1)
return json.loads(res[1]) return json.loads(res[1])
except redis.exceptions.ConnectionError as exc:
# Low noise log, because very common and many workers will write this
logger.error(f"redis connection error: {exc}")
has_redis_error = True
time.sleep(5)
except redis.exceptions.RedisError: except redis.exceptions.RedisError:
logger.exception("encountered an error communicating with redis") logger.exception("encountered an error communicating with redis")
has_redis_error = True
time.sleep(1) time.sleep(1)
except (json.JSONDecodeError, KeyError): except (json.JSONDecodeError, KeyError):
logger.exception("failed to decode JSON message from redis") logger.exception("failed to decode JSON message from redis")
finally: finally:
if not has_redis_error: self.record_statistics()
self.record_statistics() self.record_read_metrics()
self.record_read_metrics()
return {'event': 'FLUSH'} return {'event': 'FLUSH'}

View File

@@ -1,14 +0,0 @@
from dispatcherd.worker.task import TaskWorker
from django.db import connection
class AWXTaskWorker(TaskWorker):
def on_start(self) -> None:
"""Get worker connected so that first task it gets will be worked quickly"""
connection.ensure_connection()
def pre_task(self, message) -> None:
"""This should remedy bad connections that can not fix themselves"""
connection.close_if_unusable_or_obsolete()

View File

@@ -38,12 +38,5 @@ class PostRunError(Exception):
super(PostRunError, self).__init__(msg) super(PostRunError, self).__init__(msg)
class PolicyEvaluationError(Exception):
def __init__(self, msg, status='failed', tb=''):
self.status = status
self.tb = tb
super(PolicyEvaluationError, self).__init__(msg)
class ReceptorNodeNotFound(RuntimeError): class ReceptorNodeNotFound(RuntimeError):
pass pass

View File

@@ -14,14 +14,21 @@ from jinja2.exceptions import UndefinedError, TemplateSyntaxError, SecurityError
# Django # Django
from django.core import exceptions as django_exceptions from django.core import exceptions as django_exceptions
from django.core.serializers.json import DjangoJSONEncoder from django.core.serializers.json import DjangoJSONEncoder
from django.db.models.signals import m2m_changed, post_save from django.db.models.signals import (
post_save,
post_delete,
)
from django.db.models.signals import m2m_changed
from django.db import models from django.db import models
from django.db.models.fields.related import lazy_related_operation
from django.db.models.fields.related_descriptors import ( from django.db.models.fields.related_descriptors import (
ReverseOneToOneDescriptor, ReverseOneToOneDescriptor,
ForwardManyToOneDescriptor, ForwardManyToOneDescriptor,
ManyToManyDescriptor, ManyToManyDescriptor,
ReverseManyToOneDescriptor,
create_forward_many_to_many_manager, create_forward_many_to_many_manager,
) )
from django.utils.encoding import smart_str
from django.db.models import JSONField from django.db.models import JSONField
from django.utils.functional import cached_property from django.utils.functional import cached_property
from django.utils.translation import gettext_lazy as _ from django.utils.translation import gettext_lazy as _
@@ -47,6 +54,7 @@ __all__ = [
'ImplicitRoleField', 'ImplicitRoleField',
'SmartFilterField', 'SmartFilterField',
'OrderedManyToManyField', 'OrderedManyToManyField',
'update_role_parentage_for_instance',
'is_implicit_parent', 'is_implicit_parent',
] ]
@@ -138,6 +146,34 @@ class AutoOneToOneField(models.OneToOneField):
setattr(cls, related.get_accessor_name(), AutoSingleRelatedObjectDescriptor(related)) setattr(cls, related.get_accessor_name(), AutoSingleRelatedObjectDescriptor(related))
def resolve_role_field(obj, field):
ret = []
field_components = field.split('.', 1)
if hasattr(obj, field_components[0]):
obj = getattr(obj, field_components[0])
else:
return []
if obj is None:
return []
if len(field_components) == 1:
# use extremely generous duck typing to accomidate all possible forms
# of the model that may be used during various migrations
if obj._meta.model_name != 'role' or obj._meta.app_label != 'main':
raise Exception(smart_str('{} refers to a {}, not a Role'.format(field, type(obj))))
ret.append(obj.id)
else:
if type(obj) is ManyToManyDescriptor:
for o in obj.all():
ret += resolve_role_field(o, field_components[1])
else:
ret += resolve_role_field(obj, field_components[1])
return ret
def is_implicit_parent(parent_role, child_role): def is_implicit_parent(parent_role, child_role):
""" """
Determine if the parent_role is an implicit parent as defined by Determine if the parent_role is an implicit parent as defined by
@@ -174,6 +210,34 @@ def is_implicit_parent(parent_role, child_role):
return False return False
def update_role_parentage_for_instance(instance):
"""update_role_parentage_for_instance
updates the parents listing for all the roles
of a given instance if they have changed
"""
parents_removed = set()
parents_added = set()
for implicit_role_field in getattr(instance.__class__, '__implicit_role_fields'):
cur_role = getattr(instance, implicit_role_field.name)
original_parents = set(json.loads(cur_role.implicit_parents))
new_parents = implicit_role_field._resolve_parent_roles(instance)
removals = original_parents - new_parents
if removals:
cur_role.parents.remove(*list(removals))
parents_removed.add(cur_role.pk)
additions = new_parents - original_parents
if additions:
cur_role.parents.add(*list(additions))
parents_added.add(cur_role.pk)
new_parents_list = list(new_parents)
new_parents_list.sort()
new_parents_json = json.dumps(new_parents_list)
if cur_role.implicit_parents != new_parents_json:
cur_role.implicit_parents = new_parents_json
cur_role.save(update_fields=['implicit_parents'])
return (parents_added, parents_removed)
class ImplicitRoleDescriptor(ForwardManyToOneDescriptor): class ImplicitRoleDescriptor(ForwardManyToOneDescriptor):
pass pass
@@ -205,6 +269,65 @@ class ImplicitRoleField(models.ForeignKey):
getattr(cls, '__implicit_role_fields').append(self) getattr(cls, '__implicit_role_fields').append(self)
post_save.connect(self._post_save, cls, True, dispatch_uid='implicit-role-post-save') post_save.connect(self._post_save, cls, True, dispatch_uid='implicit-role-post-save')
post_delete.connect(self._post_delete, cls, True, dispatch_uid='implicit-role-post-delete')
function = lambda local, related, field: self.bind_m2m_changed(field, related, local)
lazy_related_operation(function, cls, "self", field=self)
def bind_m2m_changed(self, _self, _role_class, cls):
if not self.parent_role:
return
field_names = self.parent_role
if type(field_names) is not list:
field_names = [field_names]
for field_name in field_names:
if field_name.startswith('singleton:'):
continue
field_name, sep, field_attr = field_name.partition('.')
# Non existent fields will occur if ever a parent model is
# moved inside a migration, needed for job_template_organization_field
# migration in particular
# consistency is assured by unit test awx.main.tests.functional
field = getattr(cls, field_name, None)
if field and type(field) is ReverseManyToOneDescriptor or type(field) is ManyToManyDescriptor:
if '.' in field_attr:
raise Exception('Referencing deep roles through ManyToMany fields is unsupported.')
if type(field) is ReverseManyToOneDescriptor:
sender = field.through
else:
sender = field.related.through
reverse = type(field) is ManyToManyDescriptor
m2m_changed.connect(self.m2m_update(field_attr, reverse), sender, weak=False)
def m2m_update(self, field_attr, _reverse):
def _m2m_update(instance, action, model, pk_set, reverse, **kwargs):
if action == 'post_add' or action == 'pre_remove':
if _reverse:
reverse = not reverse
if reverse:
for pk in pk_set:
obj = model.objects.get(pk=pk)
if action == 'post_add':
getattr(instance, field_attr).children.add(getattr(obj, self.name))
if action == 'pre_remove':
getattr(instance, field_attr).children.remove(getattr(obj, self.name))
else:
for pk in pk_set:
obj = model.objects.get(pk=pk)
if action == 'post_add':
getattr(instance, self.name).parents.add(getattr(obj, field_attr))
if action == 'pre_remove':
getattr(instance, self.name).parents.remove(getattr(obj, field_attr))
return _m2m_update
def _post_save(self, instance, created, *args, **kwargs): def _post_save(self, instance, created, *args, **kwargs):
Role_ = utils.get_current_apps().get_model('main', 'Role') Role_ = utils.get_current_apps().get_model('main', 'Role')
@@ -214,24 +337,68 @@ class ImplicitRoleField(models.ForeignKey):
Model = utils.get_current_apps().get_model('main', instance.__class__.__name__) Model = utils.get_current_apps().get_model('main', instance.__class__.__name__)
latest_instance = Model.objects.get(pk=instance.pk) latest_instance = Model.objects.get(pk=instance.pk)
# Create any missing role objects # Avoid circular import
missing_roles = [] from awx.main.models.rbac import batch_role_ancestor_rebuilding, Role
for implicit_role_field in getattr(latest_instance.__class__, '__implicit_role_fields'):
cur_role = getattr(latest_instance, implicit_role_field.name, None)
if cur_role is None:
missing_roles.append(Role_(role_field=implicit_role_field.name, content_type_id=ct_id, object_id=latest_instance.id))
if len(missing_roles) > 0: with batch_role_ancestor_rebuilding():
Role_.objects.bulk_create(missing_roles) # Create any missing role objects
updates = {} missing_roles = []
role_ids = [] for implicit_role_field in getattr(latest_instance.__class__, '__implicit_role_fields'):
for role in Role_.objects.filter(content_type_id=ct_id, object_id=latest_instance.id): cur_role = getattr(latest_instance, implicit_role_field.name, None)
setattr(latest_instance, role.role_field, role) if cur_role is None:
updates[role.role_field] = role.id missing_roles.append(Role_(role_field=implicit_role_field.name, content_type_id=ct_id, object_id=latest_instance.id))
role_ids.append(role.id)
type(latest_instance).objects.filter(pk=latest_instance.pk).update(**updates)
instance.refresh_from_db() if len(missing_roles) > 0:
Role_.objects.bulk_create(missing_roles)
updates = {}
role_ids = []
for role in Role_.objects.filter(content_type_id=ct_id, object_id=latest_instance.id):
setattr(latest_instance, role.role_field, role)
updates[role.role_field] = role.id
role_ids.append(role.id)
type(latest_instance).objects.filter(pk=latest_instance.pk).update(**updates)
Role.rebuild_role_ancestor_list(role_ids, [])
update_role_parentage_for_instance(latest_instance)
instance.refresh_from_db()
def _resolve_parent_roles(self, instance):
if not self.parent_role:
return set()
paths = self.parent_role if type(self.parent_role) is list else [self.parent_role]
parent_roles = set()
for path in paths:
if path.startswith("singleton:"):
singleton_name = path[10:]
Role_ = utils.get_current_apps().get_model('main', 'Role')
qs = Role_.objects.filter(singleton_name=singleton_name)
if qs.count() >= 1:
role = qs[0]
else:
role = Role_.objects.create(singleton_name=singleton_name, role_field=singleton_name)
parents = [role.id]
else:
parents = resolve_role_field(instance, path)
for parent in parents:
parent_roles.add(parent)
return parent_roles
def _post_delete(self, instance, *args, **kwargs):
role_ids = []
for implicit_role_field in getattr(instance.__class__, '__implicit_role_fields'):
role_ids.append(getattr(instance, implicit_role_field.name + '_id'))
Role_ = utils.get_current_apps().get_model('main', 'Role')
child_ids = [x for x in Role_.parents.through.objects.filter(to_role_id__in=role_ids).distinct().values_list('from_role_id', flat=True)]
Role_.objects.filter(id__in=role_ids).delete()
# Avoid circular import
from awx.main.models.rbac import Role
Role.rebuild_role_ancestor_list([], child_ids)
class SmartFilterField(models.TextField): class SmartFilterField(models.TextField):

View File

@@ -4,7 +4,6 @@
from django.core.management.base import BaseCommand from django.core.management.base import BaseCommand
from django.db import transaction from django.db import transaction
from crum import impersonate from crum import impersonate
from ansible_base.resource_registry.signals.handlers import no_reverse_sync
from awx.main.models import User, Organization, Project, Inventory, CredentialType, Credential, Host, JobTemplate from awx.main.models import User, Organization, Project, Inventory, CredentialType, Credential, Host, JobTemplate
from awx.main.signals import disable_computed_fields from awx.main.signals import disable_computed_fields
@@ -17,9 +16,8 @@ class Command(BaseCommand):
def handle(self, *args, **kwargs): def handle(self, *args, **kwargs):
# Wrap the operation in an atomic block, so we do not on accident # Wrap the operation in an atomic block, so we do not on accident
# create the organization but not create the project, etc. # create the organization but not create the project, etc.
with no_reverse_sync(): with transaction.atomic():
with transaction.atomic(): self._handle()
self._handle()
def _handle(self): def _handle(self):
changed = False changed = False

View File

@@ -33,7 +33,6 @@ from awx.main.utils.safe_yaml import sanitize_jinja
from awx.main.models.rbac import batch_role_ancestor_rebuilding from awx.main.models.rbac import batch_role_ancestor_rebuilding
from awx.main.utils import ignore_inventory_computed_fields, get_licenser from awx.main.utils import ignore_inventory_computed_fields, get_licenser
from awx.main.utils.execution_environments import get_default_execution_environment from awx.main.utils.execution_environments import get_default_execution_environment
from awx.main.utils.inventory_vars import update_group_variables
from awx.main.signals import disable_activity_stream from awx.main.signals import disable_activity_stream
from awx.main.constants import STANDARD_INVENTORY_UPDATE_ENV from awx.main.constants import STANDARD_INVENTORY_UPDATE_ENV
@@ -458,19 +457,19 @@ class Command(BaseCommand):
""" """
Update inventory variables from "all" group. Update inventory variables from "all" group.
""" """
# TODO: We disable variable overwrite here in case user-defined inventory variables get
# mangled. But we still need to figure out a better way of processing multiple inventory
# update variables mixing with each other.
# issue for this: https://github.com/ansible/awx/issues/11623
if self.inventory.kind == 'constructed' and self.inventory_source.overwrite_vars: if self.inventory.kind == 'constructed' and self.inventory_source.overwrite_vars:
# NOTE: we had to add a exception case to not merge variables # NOTE: we had to add a exception case to not merge variables
# to make constructed inventory coherent # to make constructed inventory coherent
db_variables = self.all_group.variables db_variables = self.all_group.variables
else: else:
db_variables = update_group_variables( db_variables = self.inventory.variables_dict
group_id=None, # `None` denotes the 'all' group (which doesn't have a pk). db_variables.update(self.all_group.variables)
newvars=self.all_group.variables,
dbvars=self.inventory.variables_dict,
invsrc_id=self.inventory_source.id,
inventory_id=self.inventory.id,
overwrite_vars=self.overwrite_vars,
)
if db_variables != self.inventory.variables_dict: if db_variables != self.inventory.variables_dict:
self.inventory.variables = json.dumps(db_variables) self.inventory.variables = json.dumps(db_variables)
self.inventory.save(update_fields=['variables']) self.inventory.save(update_fields=['variables'])

View File

@@ -1,13 +1,10 @@
# Copyright (c) 2015 Ansible, Inc. # Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved. # All Rights Reserved.
import redis
from django.conf import settings from django.conf import settings
from django.core.management.base import BaseCommand, CommandError from django.core.management.base import BaseCommand
import redis.exceptions
from awx.main.analytics.subsystem_metrics import CallbackReceiverMetricsServer from awx.main.analytics.subsystem_metrics import CallbackReceiverMetricsServer
from awx.main.dispatch.control import Control from awx.main.dispatch.control import Control
from awx.main.dispatch.worker import AWXConsumerRedis, CallbackBrokerWorker from awx.main.dispatch.worker import AWXConsumerRedis, CallbackBrokerWorker
@@ -30,10 +27,7 @@ class Command(BaseCommand):
return return
consumer = None consumer = None
try: CallbackReceiverMetricsServer().start()
CallbackReceiverMetricsServer().start()
except redis.exceptions.ConnectionError as exc:
raise CommandError(f'Callback receiver could not connect to redis, error: {exc}')
try: try:
consumer = AWXConsumerRedis( consumer = AWXConsumerRedis(

View File

@@ -2,21 +2,11 @@
# All Rights Reserved. # All Rights Reserved.
import logging import logging
import yaml import yaml
import os
import redis
from django.conf import settings from django.conf import settings
from django.core.management.base import BaseCommand, CommandError from django.core.management.base import BaseCommand
from flags.state import flag_enabled
from dispatcherd.factories import get_control_from_settings
from dispatcherd import run_service
from dispatcherd.config import setup as dispatcher_setup
from awx.main.dispatch import get_task_queuename from awx.main.dispatch import get_task_queuename
from awx.main.dispatch.config import get_dispatcherd_config
from awx.main.dispatch.control import Control from awx.main.dispatch.control import Control
from awx.main.dispatch.pool import AutoscalePool from awx.main.dispatch.pool import AutoscalePool
from awx.main.dispatch.worker import AWXConsumerPG, TaskWorker from awx.main.dispatch.worker import AWXConsumerPG, TaskWorker
@@ -48,44 +38,18 @@ class Command(BaseCommand):
), ),
) )
def verify_dispatcherd_socket(self):
if not os.path.exists(settings.DISPATCHERD_DEBUGGING_SOCKFILE):
raise CommandError('Dispatcher is not running locally')
def handle(self, *arg, **options): def handle(self, *arg, **options):
if options.get('status'): if options.get('status'):
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'): print(Control('dispatcher').status())
ctl = get_control_from_settings() return
running_data = ctl.control_with_reply('status')
if len(running_data) != 1:
raise CommandError('Did not receive expected number of replies')
print(yaml.dump(running_data[0], default_flow_style=False))
return
else:
print(Control('dispatcher').status())
return
if options.get('schedule'): if options.get('schedule'):
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'): print(Control('dispatcher').schedule())
print('NOT YET IMPLEMENTED')
return
else:
print(Control('dispatcher').schedule())
return return
if options.get('running'): if options.get('running'):
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'): print(Control('dispatcher').running())
ctl = get_control_from_settings() return
running_data = ctl.control_with_reply('running')
print(yaml.dump(running_data, default_flow_style=False))
return
else:
print(Control('dispatcher').running())
return
if options.get('reload'): if options.get('reload'):
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'): return Control('dispatcher').control({'control': 'reload'})
print('NOT YET IMPLEMENTED')
return
else:
return Control('dispatcher').control({'control': 'reload'})
if options.get('cancel'): if options.get('cancel'):
cancel_str = options.get('cancel') cancel_str = options.get('cancel')
try: try:
@@ -94,36 +58,18 @@ class Command(BaseCommand):
cancel_data = [cancel_str] cancel_data = [cancel_str]
if not isinstance(cancel_data, list): if not isinstance(cancel_data, list):
cancel_data = [cancel_str] cancel_data = [cancel_str]
print(Control('dispatcher').cancel(cancel_data))
return
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'): consumer = None
ctl = get_control_from_settings()
results = []
for task_id in cancel_data:
# For each task UUID, send an individual cancel command
result = ctl.control_with_reply('cancel', data={'uuid': task_id})
results.append(result)
print(yaml.dump(results, default_flow_style=False))
return
else:
print(Control('dispatcher').cancel(cancel_data))
return
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'): DispatcherMetricsServer().start()
dispatcher_setup(get_dispatcherd_config(for_service=True))
run_service()
else:
consumer = None
try: try:
DispatcherMetricsServer().start() queues = ['tower_broadcast_all', 'tower_settings_change', get_task_queuename()]
except redis.exceptions.ConnectionError as exc: consumer = AWXConsumerPG('dispatcher', TaskWorker(), queues, AutoscalePool(min_workers=4), schedule=settings.CELERYBEAT_SCHEDULE)
raise CommandError(f'Dispatcher could not connect to redis, error: {exc}') consumer.run()
except KeyboardInterrupt:
try: logger.debug('Terminating Task Dispatcher')
queues = ['tower_broadcast_all', 'tower_settings_change', get_task_queuename()] if consumer:
consumer = AWXConsumerPG('dispatcher', TaskWorker(), queues, AutoscalePool(min_workers=4), schedule=settings.CELERYBEAT_SCHEDULE) consumer.stop()
consumer.run()
except KeyboardInterrupt:
logger.debug('Terminating Task Dispatcher')
if consumer:
consumer.stop()

View File

@@ -1,61 +0,0 @@
# Generated by Django 4.2.18 on 2025-02-27 20:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('main', '0197_add_opa_query_path')]
operations = [
migrations.AlterField(
model_name='inventorysource',
name='source',
field=models.CharField(
choices=[
('file', 'File, Directory or Script'),
('constructed', 'Template additional groups and hostvars at runtime'),
('scm', 'Sourced from a Project'),
('ec2', 'Amazon EC2'),
('gce', 'Google Compute Engine'),
('azure_rm', 'Microsoft Azure Resource Manager'),
('vmware', 'VMware vCenter'),
('vmware_esxi', 'VMware ESXi'),
('satellite6', 'Red Hat Satellite 6'),
('openstack', 'OpenStack'),
('rhv', 'Red Hat Virtualization'),
('controller', 'Red Hat Ansible Automation Platform'),
('insights', 'Red Hat Insights'),
('terraform', 'Terraform State'),
('openshift_virtualization', 'OpenShift Virtualization'),
],
default=None,
max_length=32,
),
),
migrations.AlterField(
model_name='inventoryupdate',
name='source',
field=models.CharField(
choices=[
('file', 'File, Directory or Script'),
('constructed', 'Template additional groups and hostvars at runtime'),
('scm', 'Sourced from a Project'),
('ec2', 'Amazon EC2'),
('gce', 'Google Compute Engine'),
('azure_rm', 'Microsoft Azure Resource Manager'),
('vmware', 'VMware vCenter'),
('vmware_esxi', 'VMware ESXi'),
('satellite6', 'Red Hat Satellite 6'),
('openstack', 'OpenStack'),
('rhv', 'Red Hat Virtualization'),
('controller', 'Red Hat Ansible Automation Platform'),
('insights', 'Red Hat Insights'),
('terraform', 'Terraform State'),
('openshift_virtualization', 'OpenShift Virtualization'),
],
default=None,
max_length=32,
),
),
]

View File

@@ -0,0 +1,15 @@
# Generated by Django 4.2.10 on 2024-09-16 10:22
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0197_add_opa_query_path'),
]
operations = [
migrations.DeleteModel(
name='Profile',
),
]

View File

@@ -1,32 +0,0 @@
# Generated by Django 4.2.20 on 2025-04-24 09:08
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0198_alter_inventorysource_source_and_more'),
]
operations = [
migrations.CreateModel(
name='InventoryGroupVariablesWithHistory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('variables', models.JSONField()),
('group', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='inventory_group_variables', to='main.group')),
(
'inventory',
models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='inventory_group_variables', to='main.inventory'),
),
],
),
migrations.AddConstraint(
model_name='inventorygroupvariableswithhistory',
constraint=models.UniqueConstraint(
fields=('inventory', 'group'), name='unique_inventory_group', violation_error_message='Inventory/Group combination must be unique.'
),
),
]

View File

@@ -0,0 +1,26 @@
# Generated by Django 4.2.10 on 2024-09-16 15:21
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0198_delete_profile'),
]
operations = [
# delete all sso application migrations
migrations.RunSQL("DELETE FROM django_migrations WHERE app = 'sso';"),
# delete all sso application content group permissions
migrations.RunSQL(
"DELETE FROM auth_group_permissions "
"WHERE permission_id IN "
"(SELECT id FROM auth_permission WHERE content_type_id in (SELECT id FROM django_content_type WHERE app_label = 'sso'));"
),
# delete all sso application content permissions
migrations.RunSQL("DELETE FROM auth_permission " "WHERE content_type_id IN (SELECT id FROM django_content_type WHERE app_label = 'sso');"),
# delete sso application content type
migrations.RunSQL("DELETE FROM django_content_type WHERE app_label = 'sso';"),
# drop sso application created table
migrations.RunSQL("DROP TABLE IF EXISTS sso_userenterpriseauth;"),
]

View File

@@ -0,0 +1,23 @@
# Generated by Django 4.2.10 on 2024-10-22 15:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0199_remove_sso_app_content'),
]
operations = [
migrations.AlterField(
model_name='inventorysource',
name='source',
field=models.CharField(default=None, max_length=32),
),
migrations.AlterField(
model_name='inventoryupdate',
name='source',
field=models.CharField(default=None, max_length=32),
),
]

View File

@@ -1,56 +0,0 @@
# Generated by Django 4.2.20 on 2025-04-22 15:54
import logging
from django.db import migrations, models
from awx.main.migrations._db_constraints import _rename_duplicates
logger = logging.getLogger(__name__)
def rename_jts(apps, schema_editor):
cls = apps.get_model('main', 'JobTemplate')
_rename_duplicates(cls)
def rename_projects(apps, schema_editor):
cls = apps.get_model('main', 'Project')
_rename_duplicates(cls)
def change_inventory_source_org_unique(apps, schema_editor):
cls = apps.get_model('main', 'InventorySource')
r = cls.objects.update(org_unique=False)
logger.info(f'Set database constraint rule for {r} inventory source objects')
def rename_wfjt(apps, schema_editor):
cls = apps.get_model('main', 'WorkflowJobTemplate')
_rename_duplicates(cls)
class Migration(migrations.Migration):
dependencies = [
('main', '0199_inventorygroupvariableswithhistory_and_more'),
]
operations = [
migrations.RunPython(rename_jts, migrations.RunPython.noop),
migrations.RunPython(rename_projects, migrations.RunPython.noop),
migrations.AddField(
model_name='unifiedjobtemplate',
name='org_unique',
field=models.BooleanField(blank=True, default=True, editable=False, help_text='Used internally to selectively enforce database constraint on name'),
),
migrations.RunPython(rename_wfjt, migrations.RunPython.noop),
migrations.RunPython(change_inventory_source_org_unique, migrations.RunPython.noop),
migrations.AddConstraint(
model_name='unifiedjobtemplate',
constraint=models.UniqueConstraint(
condition=models.Q(('org_unique', True)), fields=('polymorphic_ctype', 'name', 'organization'), name='ujt_hard_name_constraint'
),
),
]

View File

@@ -0,0 +1,39 @@
# Generated by Django 4.2.10 on 2024-10-24 14:06
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0200_alter_inventorysource_source_and_more'),
]
operations = [
migrations.AlterUniqueTogether(
name='oauth2application',
unique_together=None,
),
migrations.RemoveField(
model_name='oauth2application',
name='organization',
),
migrations.RemoveField(
model_name='oauth2application',
name='user',
),
migrations.RemoveField(
model_name='activitystream',
name='o_auth2_access_token',
),
migrations.RemoveField(
model_name='activitystream',
name='o_auth2_application',
),
migrations.DeleteModel(
name='OAuth2AccessToken',
),
migrations.DeleteModel(
name='OAuth2Application',
),
]

View File

@@ -1,26 +0,0 @@
from django.db import migrations
# AWX
from awx.main.models import CredentialType
from awx.main.utils.common import set_current_apps
def setup_tower_managed_defaults(apps, schema_editor):
set_current_apps(apps)
CredentialType.setup_tower_managed_defaults(apps)
def setup_rbac_role_system_administrator(apps, schema_editor):
Role = apps.get_model('main', 'Role')
Role.objects.get_or_create(singleton_name='system_administrator', role_field='system_administrator')
class Migration(migrations.Migration):
dependencies = [
('main', '0200_template_name_constraint'),
]
operations = [
migrations.RunPython(setup_tower_managed_defaults),
migrations.RunPython(setup_rbac_role_system_administrator),
]

View File

@@ -1,102 +0,0 @@
# Generated by Django migration for converting Controller role definitions
from ansible_base.rbac.migrations._utils import give_permissions
from django.db import migrations
def convert_controller_role_definitions(apps, schema_editor):
"""
Convert Controller role definitions to regular role definitions:
- Controller Organization Admin -> Organization Admin
- Controller Organization Member -> Organization Member
- Controller Team Admin -> Team Admin
- Controller Team Member -> Team Member
- Controller System Auditor -> Platform Auditor
Then delete the old Controller role definitions.
"""
RoleDefinition = apps.get_model('dab_rbac', 'RoleDefinition')
RoleUserAssignment = apps.get_model('dab_rbac', 'RoleUserAssignment')
RoleTeamAssignment = apps.get_model('dab_rbac', 'RoleTeamAssignment')
Permission = apps.get_model('dab_rbac', 'DABPermission')
# Mapping of old Controller role names to new role names
role_mappings = {
'Controller Organization Admin': 'Organization Admin',
'Controller Organization Member': 'Organization Member',
'Controller Team Admin': 'Team Admin',
'Controller Team Member': 'Team Member',
}
for old_name, new_name in role_mappings.items():
# Find the old Controller role definition
old_role = RoleDefinition.objects.filter(name=old_name).first()
if not old_role:
continue # Skip if the old role doesn't exist
# Find the new role definition
new_role = RoleDefinition.objects.get(name=new_name)
# Collect all the assignments that need to be migrated
# Group by object (content_type + object_id) to batch the give_permissions calls
assignments_by_object = {}
# Get user assignments
user_assignments = RoleUserAssignment.objects.filter(role_definition=old_role).select_related('object_role')
for assignment in user_assignments:
key = (assignment.object_role.content_type_id, assignment.object_role.object_id)
if key not in assignments_by_object:
assignments_by_object[key] = {'users': [], 'teams': []}
assignments_by_object[key]['users'].append(assignment.user)
# Get team assignments
team_assignments = RoleTeamAssignment.objects.filter(role_definition=old_role).select_related('object_role')
for assignment in team_assignments:
key = (assignment.object_role.content_type_id, assignment.object_role.object_id)
if key not in assignments_by_object:
assignments_by_object[key] = {'users': [], 'teams': []}
assignments_by_object[key]['teams'].append(assignment.team.id)
# Use give_permissions to create new assignments with the new role definition
for (content_type_id, object_id), data in assignments_by_object.items():
if data['users'] or data['teams']:
give_permissions(
apps,
new_role,
users=data['users'],
teams=data['teams'],
object_id=object_id,
content_type_id=content_type_id,
)
# Delete the old role definition (this will cascade to delete old assignments and ObjectRoles)
old_role.delete()
# Create or get Platform Auditor
auditor_rd, created = RoleDefinition.objects.get_or_create(
name='Platform Auditor',
defaults={'description': 'Migrated singleton role giving read permission to everything', 'managed': True},
)
if created:
auditor_rd.permissions.add(*list(Permission.objects.filter(codename__startswith='view')))
old_rd = RoleDefinition.objects.filter(name='Controller System Auditor').first()
if old_rd:
for assignment in RoleUserAssignment.objects.filter(role_definition=old_rd):
RoleUserAssignment.objects.create(
user=assignment.user,
role_definition=auditor_rd,
)
# Delete the Controller System Auditor role
RoleDefinition.objects.filter(name='Controller System Auditor').delete()
class Migration(migrations.Migration):
dependencies = [
('main', '0201_create_managed_creds'),
]
operations = [
migrations.RunPython(convert_controller_role_definitions),
]

View File

@@ -0,0 +1,44 @@
# Generated by Django 4.2.16 on 2024-12-18 16:05
from django.db import migrations, models
from awx.main.migrations._create_system_jobs import delete_clear_tokens_sjt
class Migration(migrations.Migration):
dependencies = [
('main', '0201_alter_oauth2application_unique_together_and_more'),
]
operations = [
migrations.RunPython(delete_clear_tokens_sjt, migrations.RunPython.noop),
migrations.AlterField(
model_name='systemjob',
name='job_type',
field=models.CharField(
blank=True,
choices=[
('cleanup_jobs', 'Remove jobs older than a certain number of days'),
('cleanup_activitystream', 'Remove activity stream entries older than a certain number of days'),
('cleanup_sessions', 'Removes expired browser sessions from the database'),
],
default='',
max_length=32,
),
),
migrations.AlterField(
model_name='systemjobtemplate',
name='job_type',
field=models.CharField(
blank=True,
choices=[
('cleanup_jobs', 'Remove jobs older than a certain number of days'),
('cleanup_activitystream', 'Remove activity stream entries older than a certain number of days'),
('cleanup_sessions', 'Removes expired browser sessions from the database'),
],
default='',
max_length=32,
),
),
]

View File

@@ -0,0 +1,27 @@
# Generated by Django 4.2.16 on 2025-03-11 14:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0201_delete_token_cleanup_job'),
]
operations = [
migrations.AddField(
model_name='unifiedjob',
name='priority',
field=models.PositiveIntegerField(
default=0,
editable=False,
help_text='Relative priority to other jobs. The higher the number, the higher the priority. Jobs with equivalent prioirty are started based on available capacity and launch time.',
),
),
migrations.AddField(
model_name='unifiedjobtemplate',
name='priority',
field=models.PositiveIntegerField(default=0),
),
]

View File

@@ -1,22 +0,0 @@
import logging
from django.db import migrations
from awx.main.migrations._dab_rbac import consolidate_indirect_user_roles
logger = logging.getLogger('awx.main.migrations')
class Migration(migrations.Migration):
dependencies = [
('main', '0202_convert_controller_role_definitions'),
]
# The DAB RBAC app makes substantial model changes which by change-ordering comes after this
# not including run_before might sometimes work but this enforces a more strict and stable order
# for both applying migrations forwards and backwards
run_before = [("dab_rbac", "0004_remote_permissions_additions")]
operations = [
migrations.RunPython(consolidate_indirect_user_roles, migrations.RunPython.noop),
]

View File

@@ -1,124 +0,0 @@
# Generated by Django 4.2.10 on 2024-09-16 10:22
from django.db import migrations, models
from awx.main.migrations._create_system_jobs import delete_clear_tokens_sjt
# --- START of function merged from 0203_rename_github_app_kind.py ---
def update_github_app_kind(apps, schema_editor):
"""
Updates the 'kind' field for CredentialType records
from 'github_app' to 'github_app_lookup'.
This addresses a change in the entry point key for the GitHub App plugin.
"""
CredentialType = apps.get_model('main', 'CredentialType')
db_alias = schema_editor.connection.alias
CredentialType.objects.using(db_alias).filter(kind='github_app').update(kind='github_app_lookup')
# --- END of function merged from 0203_rename_github_app_kind.py ---
class Migration(migrations.Migration):
dependencies = [
('main', '0203_remove_team_of_teams'),
]
operations = [
migrations.DeleteModel(
name='Profile',
),
# Remove SSO app content
# delete all sso application migrations
# Added reverse_sql=migrations.RunSQL.noop to make this reversible for tests
migrations.RunSQL("DELETE FROM django_migrations WHERE app = 'sso';", reverse_sql=migrations.RunSQL.noop),
# delete all sso application content group permissions
# Added reverse_sql=migrations.RunSQL.noop to make this reversible for tests
migrations.RunSQL(
"DELETE FROM auth_group_permissions "
"WHERE permission_id IN "
"(SELECT id FROM auth_permission WHERE content_type_id in (SELECT id FROM django_content_type WHERE app_label = 'sso'));",
reverse_sql=migrations.RunSQL.noop,
),
# delete all sso application content permissions
# Added reverse_sql=migrations.RunSQL.noop to make this reversible for tests
migrations.RunSQL(
"DELETE FROM auth_permission " "WHERE content_type_id IN (SELECT id FROM django_content_type WHERE app_label = 'sso');",
reverse_sql=migrations.RunSQL.noop,
),
# delete sso application content type
# Added reverse_sql=migrations.RunSQL.noop to make this reversible for tests
migrations.RunSQL("DELETE FROM django_content_type WHERE app_label = 'sso';", reverse_sql=migrations.RunSQL.noop),
# drop sso application created table
# Added reverse_sql=migrations.RunSQL.noop to make this reversible for tests
migrations.RunSQL("DROP TABLE IF EXISTS sso_userenterpriseauth;", reverse_sql=migrations.RunSQL.noop),
# Alter inventory source source field
migrations.AlterField(
model_name='inventorysource',
name='source',
field=models.CharField(default=None, max_length=32),
),
migrations.AlterField(
model_name='inventoryupdate',
name='source',
field=models.CharField(default=None, max_length=32),
),
# Alter OAuth2Application unique together
migrations.AlterUniqueTogether(
name='oauth2application',
unique_together=None,
),
migrations.RemoveField(
model_name='oauth2application',
name='organization',
),
migrations.RemoveField(
model_name='oauth2application',
name='user',
),
migrations.RemoveField(
model_name='activitystream',
name='o_auth2_access_token',
),
migrations.RemoveField(
model_name='activitystream',
name='o_auth2_application',
),
migrations.DeleteModel(
name='OAuth2AccessToken',
),
migrations.DeleteModel(
name='OAuth2Application',
),
# Delete system token cleanup jobs, because tokens were deleted
migrations.RunPython(delete_clear_tokens_sjt, migrations.RunPython.noop),
migrations.AlterField(
model_name='systemjob',
name='job_type',
field=models.CharField(
blank=True,
choices=[
('cleanup_jobs', 'Remove jobs older than a certain number of days'),
('cleanup_activitystream', 'Remove activity stream entries older than a certain number of days'),
('cleanup_sessions', 'Removes expired browser sessions from the database'),
],
default='',
max_length=32,
),
),
migrations.AlterField(
model_name='systemjobtemplate',
name='job_type',
field=models.CharField(
blank=True,
choices=[
('cleanup_jobs', 'Remove jobs older than a certain number of days'),
('cleanup_activitystream', 'Remove activity stream entries older than a certain number of days'),
('cleanup_sessions', 'Removes expired browser sessions from the database'),
],
default='',
max_length=32,
),
),
# --- START of operations merged from 0203_rename_github_app_kind.py ---
migrations.RunPython(update_github_app_kind, migrations.RunPython.noop),
# --- END of operations merged from 0203_rename_github_app_kind.py ---
]

View File

@@ -1,6 +1,5 @@
import logging import logging
logger = logging.getLogger('awx.main.migrations') logger = logging.getLogger('awx.main.migrations')

View File

@@ -1,6 +1,5 @@
import json import json
import logging import logging
from collections import defaultdict
from django.apps import apps as global_apps from django.apps import apps as global_apps
from django.db.models import ForeignKey from django.db.models import ForeignKey
@@ -18,14 +17,7 @@ logger = logging.getLogger('awx.main.migrations._dab_rbac')
def create_permissions_as_operation(apps, schema_editor): def create_permissions_as_operation(apps, schema_editor):
logger.info('Running data migration create_permissions_as_operation')
# NOTE: the DAB ContentType changes adjusted how they fire
# before they would fire on every app config, like contenttypes
create_dab_permissions(global_apps.get_app_config("main"), apps=apps) create_dab_permissions(global_apps.get_app_config("main"), apps=apps)
# This changed to only fire once and do a global creation
# so we need to call it for specifically the dab_rbac app
# multiple calls will not hurt anything
create_dab_permissions(global_apps.get_app_config("dab_rbac"), apps=apps)
""" """
@@ -120,12 +112,7 @@ def get_descendents(f, children_map):
def get_permissions_for_role(role_field, children_map, apps): def get_permissions_for_role(role_field, children_map, apps):
Permission = apps.get_model('dab_rbac', 'DABPermission') Permission = apps.get_model('dab_rbac', 'DABPermission')
try: ContentType = apps.get_model('contenttypes', 'ContentType')
# After migration for remote permissions
ContentType = apps.get_model('dab_rbac', 'DABContentType')
except LookupError:
# If using DAB from before remote permissions are implemented
ContentType = apps.get_model('contenttypes', 'ContentType')
perm_list = [] perm_list = []
for child_field in get_descendents(role_field, children_map): for child_field in get_descendents(role_field, children_map):
@@ -168,15 +155,11 @@ def migrate_to_new_rbac(apps, schema_editor):
This method moves the assigned permissions from the old rbac.py models This method moves the assigned permissions from the old rbac.py models
to the new RoleDefinition and ObjectRole models to the new RoleDefinition and ObjectRole models
""" """
logger.info('Running data migration migrate_to_new_rbac')
Role = apps.get_model('main', 'Role') Role = apps.get_model('main', 'Role')
RoleDefinition = apps.get_model('dab_rbac', 'RoleDefinition') RoleDefinition = apps.get_model('dab_rbac', 'RoleDefinition')
RoleUserAssignment = apps.get_model('dab_rbac', 'RoleUserAssignment') RoleUserAssignment = apps.get_model('dab_rbac', 'RoleUserAssignment')
Permission = apps.get_model('dab_rbac', 'DABPermission') Permission = apps.get_model('dab_rbac', 'DABPermission')
if Permission.objects.count() == 0:
raise RuntimeError('Running migrate_to_new_rbac requires DABPermission objects created first')
# remove add premissions that are not valid for migrations from old versions # remove add premissions that are not valid for migrations from old versions
for perm_str in ('add_organization', 'add_jobtemplate'): for perm_str in ('add_organization', 'add_jobtemplate'):
perm = Permission.objects.filter(codename=perm_str).first() perm = Permission.objects.filter(codename=perm_str).first()
@@ -256,14 +239,11 @@ def migrate_to_new_rbac(apps, schema_editor):
# Create new replacement system auditor role # Create new replacement system auditor role
new_system_auditor, created = RoleDefinition.objects.get_or_create( new_system_auditor, created = RoleDefinition.objects.get_or_create(
name='Platform Auditor', name='Controller System Auditor',
defaults={'description': 'Migrated singleton role giving read permission to everything', 'managed': True}, defaults={'description': 'Migrated singleton role giving read permission to everything', 'managed': True},
) )
new_system_auditor.permissions.add(*list(Permission.objects.filter(codename__startswith='view'))) new_system_auditor.permissions.add(*list(Permission.objects.filter(codename__startswith='view')))
if created:
logger.info(f'Created RoleDefinition {new_system_auditor.name} pk={new_system_auditor.pk} with {new_system_auditor.permissions.count()} permissions')
# migrate is_system_auditor flag, because it is no longer handled by a system role # migrate is_system_auditor flag, because it is no longer handled by a system role
old_system_auditor = Role.objects.filter(singleton_name='system_auditor').first() old_system_auditor = Role.objects.filter(singleton_name='system_auditor').first()
if old_system_auditor: if old_system_auditor:
@@ -292,9 +272,8 @@ def get_or_create_managed(name, description, ct, permissions, RoleDefinition):
def setup_managed_role_definitions(apps, schema_editor): def setup_managed_role_definitions(apps, schema_editor):
""" """
Idempotent method to create or sync the managed role definitions Idepotent method to create or sync the managed role definitions
""" """
logger.info('Running data migration setup_managed_role_definitions')
to_create = { to_create = {
'object_admin': '{cls.__name__} Admin', 'object_admin': '{cls.__name__} Admin',
'org_admin': 'Organization Admin', 'org_admin': 'Organization Admin',
@@ -302,13 +281,7 @@ def setup_managed_role_definitions(apps, schema_editor):
'special': '{cls.__name__} {action}', 'special': '{cls.__name__} {action}',
} }
try: ContentType = apps.get_model('contenttypes', 'ContentType')
# After migration for remote permissions
ContentType = apps.get_model('dab_rbac', 'DABContentType')
except LookupError:
# If using DAB from before remote permissions are implemented
ContentType = apps.get_model('contenttypes', 'ContentType')
Permission = apps.get_model('dab_rbac', 'DABPermission') Permission = apps.get_model('dab_rbac', 'DABPermission')
RoleDefinition = apps.get_model('dab_rbac', 'RoleDefinition') RoleDefinition = apps.get_model('dab_rbac', 'RoleDefinition')
Organization = apps.get_model(settings.ANSIBLE_BASE_ORGANIZATION_MODEL) Organization = apps.get_model(settings.ANSIBLE_BASE_ORGANIZATION_MODEL)
@@ -336,6 +309,16 @@ def setup_managed_role_definitions(apps, schema_editor):
to_create['object_admin'].format(cls=cls), f'Has all permissions to a single {cls._meta.verbose_name}', ct, indiv_perms, RoleDefinition to_create['object_admin'].format(cls=cls), f'Has all permissions to a single {cls._meta.verbose_name}', ct, indiv_perms, RoleDefinition
) )
) )
if cls_name == 'team':
managed_role_definitions.append(
get_or_create_managed(
'Controller Team Admin',
f'Has all permissions to a single {cls._meta.verbose_name}',
ct,
indiv_perms,
RoleDefinition,
)
)
if 'org_children' in to_create and (cls_name not in ('organization', 'instancegroup', 'team')): if 'org_children' in to_create and (cls_name not in ('organization', 'instancegroup', 'team')):
org_child_perms = object_perms.copy() org_child_perms = object_perms.copy()
@@ -376,6 +359,18 @@ def setup_managed_role_definitions(apps, schema_editor):
RoleDefinition, RoleDefinition,
) )
) )
if action == 'member' and cls_name in ('organization', 'team'):
suffix = to_create['special'].format(cls=cls, action=action.title())
rd_name = f'Controller {suffix}'
managed_role_definitions.append(
get_or_create_managed(
rd_name,
f'Has {action} permissions to a single {cls._meta.verbose_name}',
ct,
perm_list,
RoleDefinition,
)
)
if 'org_admin' in to_create: if 'org_admin' in to_create:
managed_role_definitions.append( managed_role_definitions.append(
@@ -387,6 +382,15 @@ def setup_managed_role_definitions(apps, schema_editor):
RoleDefinition, RoleDefinition,
) )
) )
managed_role_definitions.append(
get_or_create_managed(
'Controller Organization Admin',
'Has all permissions to a single organization and all objects inside of it',
org_ct,
org_perms,
RoleDefinition,
)
)
# Special "organization action" roles # Special "organization action" roles
audit_permissions = [perm for perm in org_perms if perm.codename.startswith('view_')] audit_permissions = [perm for perm in org_perms if perm.codename.startswith('view_')]
@@ -427,115 +431,3 @@ def setup_managed_role_definitions(apps, schema_editor):
for role_definition in unexpected_role_definitions: for role_definition in unexpected_role_definitions:
logger.info(f'Deleting old managed role definition {role_definition.name}, pk={role_definition.pk}') logger.info(f'Deleting old managed role definition {role_definition.name}, pk={role_definition.pk}')
role_definition.delete() role_definition.delete()
def get_team_to_team_relationships(apps, team_member_role):
"""
Find all team-to-team relationships where one team is a member of another.
Returns a dict mapping parent_team_id -> [child_team_id, ...]
"""
team_to_team_relationships = defaultdict(list)
# Find all team assignments with the Team Member role
RoleTeamAssignment = apps.get_model('dab_rbac', 'RoleTeamAssignment')
team_assignments = RoleTeamAssignment.objects.filter(role_definition=team_member_role).select_related('team')
for assignment in team_assignments:
parent_team_id = int(assignment.object_id)
child_team_id = assignment.team.id
team_to_team_relationships[parent_team_id].append(child_team_id)
return team_to_team_relationships
def get_all_user_members_of_team(apps, team_member_role, team_id, team_to_team_map, visited=None):
"""
Recursively find all users who are members of a team, including through nested teams.
"""
if visited is None:
visited = set()
if team_id in visited:
return set() # Avoid infinite recursion
visited.add(team_id)
all_users = set()
# Get direct user assignments to this team
RoleUserAssignment = apps.get_model('dab_rbac', 'RoleUserAssignment')
user_assignments = RoleUserAssignment.objects.filter(role_definition=team_member_role, object_id=team_id).select_related('user')
for assignment in user_assignments:
all_users.add(assignment.user)
# Get team-to-team assignments and recursively find their users
child_team_ids = team_to_team_map.get(team_id, [])
for child_team_id in child_team_ids:
nested_users = get_all_user_members_of_team(apps, team_member_role, child_team_id, team_to_team_map, visited.copy())
all_users.update(nested_users)
return all_users
def remove_team_to_team_assignment(apps, team_member_role, parent_team_id, child_team_id):
"""
Remove team-to-team memberships.
"""
Team = apps.get_model('main', 'Team')
RoleTeamAssignment = apps.get_model('dab_rbac', 'RoleTeamAssignment')
parent_team = Team.objects.get(id=parent_team_id)
child_team = Team.objects.get(id=child_team_id)
# Remove all team-to-team RoleTeamAssignments
RoleTeamAssignment.objects.filter(role_definition=team_member_role, object_id=parent_team_id, team=child_team).delete()
# Check mirroring Team model for children under member_role
parent_team.member_role.children.filter(object_id=child_team_id).delete()
def consolidate_indirect_user_roles(apps, schema_editor):
"""
A user should have a member role for every team they were indirectly
a member of. ex. Team A is a member of Team B. All users in Team A
previously were only members of Team A. They should now be members of
Team A and Team B.
"""
# get models for membership on teams
RoleDefinition = apps.get_model('dab_rbac', 'RoleDefinition')
Team = apps.get_model('main', 'Team')
team_member_role = RoleDefinition.objects.get(name='Team Member')
team_to_team_map = get_team_to_team_relationships(apps, team_member_role)
if not team_to_team_map:
return # No team-to-team relationships to consolidate
# Get content type for Team - needed for give_permissions
try:
from django.contrib.contenttypes.models import ContentType
team_content_type = ContentType.objects.get_for_model(Team)
except ImportError:
# Fallback if ContentType is not available
ContentType = apps.get_model('contenttypes', 'ContentType')
team_content_type = ContentType.objects.get_for_model(Team)
# Get all users who should be direct members of a team
for parent_team_id, child_team_ids in team_to_team_map.items():
all_users = get_all_user_members_of_team(apps, team_member_role, parent_team_id, team_to_team_map)
# Create direct RoleUserAssignments for all users
if all_users:
give_permissions(apps=apps, rd=team_member_role, users=list(all_users), object_id=parent_team_id, content_type_id=team_content_type.id)
# Mirror assignments to Team model
parent_team = Team.objects.get(id=parent_team_id)
for user in all_users:
parent_team.member_role.members.add(user.id)
# Remove all team-to-team assignments for parent team
for child_team_id in child_team_ids:
remove_team_to_team_assignment(apps, team_member_role, parent_team_id, child_team_id)

View File

@@ -1,25 +0,0 @@
import logging
from django.db.models import Count
logger = logging.getLogger(__name__)
def _rename_duplicates(cls):
field = cls._meta.get_field('name')
max_len = field.max_length
for organization_id in cls.objects.order_by().values_list('organization_id', flat=True).distinct():
duplicate_data = cls.objects.values('name').filter(organization_id=organization_id).annotate(count=Count('name')).order_by().filter(count__gt=1)
for data in duplicate_data:
name = data['name']
for idx, ujt in enumerate(cls.objects.filter(name=name, organization_id=organization_id).order_by('created')):
if idx > 0:
suffix = f'_dup{idx}'
max_chars = max_len - len(suffix)
if len(ujt.name) >= max_chars:
ujt.name = ujt.name[:max_chars] + suffix
else:
ujt.name = ujt.name + suffix
logger.info(f'Renaming duplicate {cls._meta.model_name} to `{ujt.name}` because of duplicate name entry')
ujt.save(update_fields=['name'])

View File

@@ -3,6 +3,7 @@ from time import time
from django.db.models import Subquery, OuterRef, F from django.db.models import Subquery, OuterRef, F
from awx.main.fields import update_role_parentage_for_instance
from awx.main.models.rbac import Role, batch_role_ancestor_rebuilding from awx.main.models.rbac import Role, batch_role_ancestor_rebuilding
logger = logging.getLogger('rbac_migrations') logger = logging.getLogger('rbac_migrations')
@@ -237,10 +238,85 @@ def restore_inventory_admins_backward(apps, schema_editor):
def rebuild_role_hierarchy(apps, schema_editor): def rebuild_role_hierarchy(apps, schema_editor):
"""Not used after DAB RBAC migration""" """
pass This should be called in any migration when ownerships are changed.
Ex. I remove a user from the admin_role of a credential.
Ancestors are cached from parents for performance, this re-computes ancestors.
"""
logger.info('Computing role roots..')
start = time()
roots = Role.objects.all().values_list('id', flat=True)
stop = time()
logger.info('Found %d roots in %f seconds, rebuilding ancestry map' % (len(roots), stop - start))
start = time()
Role.rebuild_role_ancestor_list(roots, [])
stop = time()
logger.info('Rebuild ancestors completed in %f seconds' % (stop - start))
logger.info('Done.')
def rebuild_role_parentage(apps, schema_editor, models=None): def rebuild_role_parentage(apps, schema_editor, models=None):
"""Not used after DAB RBAC migration""" """
pass This should be called in any migration when any parent_role entry
is modified so that the cached parent fields will be updated. Ex:
foo_role = ImplicitRoleField(
parent_role=['bar_role'] # change to parent_role=['admin_role']
)
This is like rebuild_role_hierarchy, but that method updates ancestors,
whereas this method updates parents.
"""
start = time()
seen_models = set()
model_ct = 0
noop_ct = 0
ContentType = apps.get_model('contenttypes', "ContentType")
additions = set()
removals = set()
role_qs = Role.objects
if models:
# update_role_parentage_for_instance is expensive
# if the models have been downselected, ignore those which are not in the list
ct_ids = list(ContentType.objects.filter(model__in=[name.lower() for name in models]).values_list('id', flat=True))
role_qs = role_qs.filter(content_type__in=ct_ids)
for role in role_qs.iterator():
if not role.object_id:
continue
model_tuple = (role.content_type_id, role.object_id)
if model_tuple in seen_models:
continue
seen_models.add(model_tuple)
# The GenericForeignKey does not work right in migrations
# with the usage as role.content_object
# so we do the lookup ourselves with current migration models
ct = role.content_type
app = ct.app_label
ct_model = apps.get_model(app, ct.model)
content_object = ct_model.objects.get(pk=role.object_id)
parents_added, parents_removed = update_role_parentage_for_instance(content_object)
additions.update(parents_added)
removals.update(parents_removed)
if parents_added:
model_ct += 1
logger.debug('Added to parents of roles {} of {}'.format(parents_added, content_object))
if parents_removed:
model_ct += 1
logger.debug('Removed from parents of roles {} of {}'.format(parents_removed, content_object))
else:
noop_ct += 1
logger.debug('No changes to role parents for {} resources'.format(noop_ct))
logger.debug('Added parents to {} roles'.format(len(additions)))
logger.debug('Removed parents from {} roles'.format(len(removals)))
if model_ct:
logger.info('Updated implicit parents of {} resources'.format(model_ct))
logger.info('Rebuild parentage completed in %f seconds' % (time() - start))
# this is ran because the ordinary signals for
# Role.parents.add and Role.parents.remove not called in migration
Role.rebuild_role_ancestor_list(list(additions), list(removals))

View File

@@ -33,7 +33,6 @@ from awx.main.models.inventory import ( # noqa
InventorySource, InventorySource,
InventoryUpdate, InventoryUpdate,
SmartInventoryMembership, SmartInventoryMembership,
InventoryGroupVariablesWithHistory,
) )
from awx.main.models.jobs import ( # noqa from awx.main.models.jobs import ( # noqa
Job, Job,
@@ -172,17 +171,35 @@ def cleanup_created_modified_by(sender, **kwargs):
pre_delete.connect(cleanup_created_modified_by, sender=User) pre_delete.connect(cleanup_created_modified_by, sender=User)
@property
def user_get_organizations(user):
return Organization.access_qs(user, 'member')
@property
def user_get_admin_of_organizations(user):
return Organization.access_qs(user, 'change')
@property
def user_get_auditor_of_organizations(user):
return Organization.access_qs(user, 'audit')
@property @property
def created(user): def created(user):
return user.date_joined return user.date_joined
User.add_to_class('organizations', user_get_organizations)
User.add_to_class('admin_of_organizations', user_get_admin_of_organizations)
User.add_to_class('auditor_of_organizations', user_get_auditor_of_organizations)
User.add_to_class('created', created) User.add_to_class('created', created)
def get_system_auditor_role(): def get_system_auditor_role():
rd, created = RoleDefinition.objects.get_or_create( rd, created = RoleDefinition.objects.get_or_create(
name='Platform Auditor', defaults={'description': 'Migrated singleton role giving read permission to everything'} name='Controller System Auditor', defaults={'description': 'Migrated singleton role giving read permission to everything'}
) )
if created: if created:
rd.permissions.add(*list(permission_registry.permission_qs.filter(codename__startswith='view'))) rd.permissions.add(*list(permission_registry.permission_qs.filter(codename__startswith='view')))

View File

@@ -24,7 +24,6 @@ from awx.main.managers import DeferJobCreatedManager
from awx.main.constants import MINIMAL_EVENTS from awx.main.constants import MINIMAL_EVENTS
from awx.main.models.base import CreatedModifiedModel from awx.main.models.base import CreatedModifiedModel
from awx.main.utils import ignore_inventory_computed_fields, camelcase_to_underscore from awx.main.utils import ignore_inventory_computed_fields, camelcase_to_underscore
from awx.main.utils.db import bulk_update_sorted_by_id
analytics_logger = logging.getLogger('awx.analytics.job_events') analytics_logger = logging.getLogger('awx.analytics.job_events')
@@ -603,7 +602,7 @@ class JobEvent(BasePlaybookEvent):
h.last_job_host_summary_id = host_mapping[h.id] h.last_job_host_summary_id = host_mapping[h.id]
updated_hosts.add(h) updated_hosts.add(h)
bulk_update_sorted_by_id(Host, updated_hosts, ['last_job_id', 'last_job_host_summary_id']) Host.objects.bulk_update(list(updated_hosts), ['last_job_id', 'last_job_host_summary_id'], batch_size=100)
# Create/update Host Metrics # Create/update Host Metrics
self._update_host_metrics(updated_hosts_list) self._update_host_metrics(updated_hosts_list)

View File

@@ -1024,10 +1024,7 @@ class InventorySourceOptions(BaseModel):
# If a credential was provided, it's important that it matches # If a credential was provided, it's important that it matches
# the actual inventory source being used (Amazon requires Amazon # the actual inventory source being used (Amazon requires Amazon
# credentials; Rackspace requires Rackspace credentials; etc...) # credentials; Rackspace requires Rackspace credentials; etc...)
# TODO: AAP-53978 check that this matches new awx-plugin content for ESXI if source.replace('ec2', 'aws') != cred.kind:
if source == 'vmware_esxi' and source.replace('vmware_esxi', 'vmware') != cred.kind:
return _('VMWARE inventory sources (such as %s) require credentials for the matching cloud service.') % source
if source == 'ec2' and source.replace('ec2', 'aws') != cred.kind:
return _('Cloud-based inventory sources (such as %s) require credentials for the matching cloud service.') % source return _('Cloud-based inventory sources (such as %s) require credentials for the matching cloud service.') % source
# Allow an EC2 source to omit the credential. If Tower is running on # Allow an EC2 source to omit the credential. If Tower is running on
# an EC2 instance with an IAM Role assigned, boto will use credentials # an EC2 instance with an IAM Role assigned, boto will use credentials
@@ -1123,10 +1120,8 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions, CustomVirtualE
def save(self, *args, **kwargs): def save(self, *args, **kwargs):
# if this is a new object, inherit organization from its inventory # if this is a new object, inherit organization from its inventory
if not self.pk: if not self.pk and self.inventory and self.inventory.organization_id and not self.organization_id:
self.org_unique = False # needed to exclude from unique (name, organization) constraint self.organization_id = self.inventory.organization_id
if self.inventory and self.inventory.organization_id and not self.organization_id:
self.organization_id = self.inventory.organization_id
# If update_fields has been specified, add our field names to it, # If update_fields has been specified, add our field names to it,
# if it hasn't been specified, then we're just doing a normal save. # if it hasn't been specified, then we're just doing a normal save.
@@ -1407,38 +1402,3 @@ class CustomInventoryScript(CommonModelNameNotUnique):
def get_absolute_url(self, request=None): def get_absolute_url(self, request=None):
return reverse('api:inventory_script_detail', kwargs={'pk': self.pk}, request=request) return reverse('api:inventory_script_detail', kwargs={'pk': self.pk}, request=request)
class InventoryGroupVariablesWithHistory(models.Model):
"""
Represents the inventory variables of one inventory group.
The purpose of this model is to persist the update history of the group
variables. The update history is maintained in another class
(`InventoryGroupVariables`), this class here is just a container for the
database storage.
"""
class Meta:
constraints = [
# Do not allow the same inventory/group combination more than once.
models.UniqueConstraint(
fields=["inventory", "group"],
name="unique_inventory_group",
violation_error_message=_("Inventory/Group combination must be unique."),
),
]
inventory = models.ForeignKey(
'Inventory',
related_name='inventory_group_variables',
null=True,
on_delete=models.CASCADE,
)
group = models.ForeignKey( # `None` denotes the 'all'-group.
'Group',
related_name='inventory_group_variables',
null=True,
on_delete=models.CASCADE,
)
variables = models.JSONField() # The group variables, including their history.

View File

@@ -298,6 +298,7 @@ class JobTemplate(
'organization', 'organization',
'survey_passwords', 'survey_passwords',
'labels', 'labels',
'priority',
'credentials', 'credentials',
'job_slice_number', 'job_slice_number',
'job_slice_count', 'job_slice_count',
@@ -358,6 +359,26 @@ class JobTemplate(
update_fields.append('organization_id') update_fields.append('organization_id')
return super(JobTemplate, self).save(*args, **kwargs) return super(JobTemplate, self).save(*args, **kwargs)
def validate_unique(self, exclude=None):
"""Custom over-ride for JT specifically
because organization is inferred from project after full_clean is finished
thus the organization field is not yet set when validation happens
"""
errors = []
for ut in JobTemplate.SOFT_UNIQUE_TOGETHER:
kwargs = {'name': self.name}
if self.project:
kwargs['organization'] = self.project.organization_id
else:
kwargs['organization'] = None
qs = JobTemplate.objects.filter(**kwargs)
if self.pk:
qs = qs.exclude(pk=self.pk)
if qs.exists():
errors.append('%s with this (%s) combination already exists.' % (JobTemplate.__name__, ', '.join(set(ut) - {'polymorphic_ctype'})))
if errors:
raise ValidationError(errors)
def create_unified_job(self, **kwargs): def create_unified_job(self, **kwargs):
prevent_slicing = kwargs.pop('_prevent_slicing', False) prevent_slicing = kwargs.pop('_prevent_slicing', False)
slice_ct = self.get_effective_slice_ct(kwargs) slice_ct = self.get_effective_slice_ct(kwargs)
@@ -384,26 +405,6 @@ class JobTemplate(
WorkflowJobNode.objects.create(**create_kwargs) WorkflowJobNode.objects.create(**create_kwargs)
return job return job
def validate_unique(self, exclude=None):
"""Custom over-ride for JT specifically
because organization is inferred from project after full_clean is finished
thus the organization field is not yet set when validation happens
"""
errors = []
for ut in JobTemplate.SOFT_UNIQUE_TOGETHER:
kwargs = {'name': self.name}
if self.project:
kwargs['organization'] = self.project.organization_id
else:
kwargs['organization'] = None
qs = JobTemplate.objects.filter(**kwargs)
if self.pk:
qs = qs.exclude(pk=self.pk)
if qs.exists():
errors.append('%s with this (%s) combination already exists.' % (JobTemplate.__name__, ', '.join(set(ut) - {'polymorphic_ctype'})))
if errors:
raise ValidationError(errors)
def get_absolute_url(self, request=None): def get_absolute_url(self, request=None):
return reverse('api:job_template_detail', kwargs={'pk': self.pk}, request=request) return reverse('api:job_template_detail', kwargs={'pk': self.pk}, request=request)
@@ -1175,7 +1176,7 @@ class SystemJobTemplate(UnifiedJobTemplate, SystemJobOptions):
@classmethod @classmethod
def _get_unified_job_field_names(cls): def _get_unified_job_field_names(cls):
return ['name', 'description', 'organization', 'job_type', 'extra_vars'] return ['name', 'description', 'organization', 'priority', 'job_type', 'extra_vars']
def get_absolute_url(self, request=None): def get_absolute_url(self, request=None):
return reverse('api:system_job_template_detail', kwargs={'pk': self.pk}, request=request) return reverse('api:system_job_template_detail', kwargs={'pk': self.pk}, request=request)

View File

@@ -86,7 +86,7 @@ class ResourceMixin(models.Model):
raise RuntimeError(f'Role filters only valid for users and ancestor role, received {accessor}') raise RuntimeError(f'Role filters only valid for users and ancestor role, received {accessor}')
if content_types is None: if content_types is None:
ct_kwarg = dict(content_type=ContentType.objects.get_for_model(cls)) ct_kwarg = dict(content_type_id=ContentType.objects.get_for_model(cls).id)
else: else:
ct_kwarg = dict(content_type_id__in=content_types) ct_kwarg = dict(content_type_id__in=content_types)

View File

@@ -354,7 +354,7 @@ class Project(UnifiedJobTemplate, ProjectOptions, ResourceMixin, CustomVirtualEn
@classmethod @classmethod
def _get_unified_job_field_names(cls): def _get_unified_job_field_names(cls):
return set(f.name for f in ProjectOptions._meta.fields) | set(['name', 'description', 'organization']) return set(f.name for f in ProjectOptions._meta.fields) | set(['name', 'description', 'priority', 'organization'])
def clean_organization(self): def clean_organization(self):
if self.pk: if self.pk:

View File

@@ -27,9 +27,6 @@ from django.conf import settings
# Ansible_base app # Ansible_base app
from ansible_base.rbac.models import RoleDefinition, RoleUserAssignment, RoleTeamAssignment from ansible_base.rbac.models import RoleDefinition, RoleUserAssignment, RoleTeamAssignment
from ansible_base.rbac.sync import maybe_reverse_sync_assignment, maybe_reverse_sync_unassignment, maybe_reverse_sync_role_definition
from ansible_base.rbac import permission_registry
from ansible_base.resource_registry.signals.handlers import no_reverse_sync
from ansible_base.lib.utils.models import get_type_for_model from ansible_base.lib.utils.models import get_type_for_model
# AWX # AWX
@@ -562,27 +559,34 @@ def get_role_definition(role):
f = obj._meta.get_field(role.role_field) f = obj._meta.get_field(role.role_field)
action_name = f.name.rsplit("_", 1)[0] action_name = f.name.rsplit("_", 1)[0]
model_print = type(obj).__name__ model_print = type(obj).__name__
rd_name = f'{model_print} {action_name.title()} Compat'
perm_list = get_role_codenames(role) perm_list = get_role_codenames(role)
defaults = { defaults = {
'content_type': permission_registry.content_type_model.objects.get_by_natural_key(role.content_type.app_label, role.content_type.model), 'content_type_id': role.content_type_id,
'description': f'Has {action_name.title()} permission to {model_print} for backwards API compatibility', 'description': f'Has {action_name.title()} permission to {model_print} for backwards API compatibility',
} }
# use Controller-specific role definitions for Team/Organization and member/admin
# instead of platform role definitions
# these should exist in the system already, so just do a lookup by role definition name
if model_print in ['Team', 'Organization'] and action_name in ['member', 'admin']:
rd_name = f'Controller {model_print} {action_name.title()}'
rd = RoleDefinition.objects.filter(name=rd_name).first()
if rd:
return rd
else:
return RoleDefinition.objects.create_from_permissions(permissions=perm_list, name=rd_name, managed=True, **defaults)
else:
rd_name = f'{model_print} {action_name.title()} Compat'
with impersonate(None): with impersonate(None):
try: try:
with no_reverse_sync(): rd, created = RoleDefinition.objects.get_or_create(name=rd_name, permissions=perm_list, defaults=defaults)
rd, created = RoleDefinition.objects.get_or_create(name=rd_name, permissions=perm_list, defaults=defaults)
except ValidationError: except ValidationError:
# This is a tricky case - practically speaking, users should not be allowed to create team roles # This is a tricky case - practically speaking, users should not be allowed to create team roles
# or roles that include the team member permission. # or roles that include the team member permission.
# If we need to create this for compatibility purposes then we will create it as a managed non-editable role # If we need to create this for compatibility purposes then we will create it as a managed non-editable role
defaults['managed'] = True defaults['managed'] = True
with no_reverse_sync(): rd, created = RoleDefinition.objects.get_or_create(name=rd_name, permissions=perm_list, defaults=defaults)
rd, created = RoleDefinition.objects.get_or_create(name=rd_name, permissions=perm_list, defaults=defaults)
if created and rbac_sync_enabled.enabled:
maybe_reverse_sync_role_definition(rd, action='create')
return rd return rd
@@ -596,6 +600,12 @@ def get_role_from_object_role(object_role):
model_name, role_name, _ = rd.name.split() model_name, role_name, _ = rd.name.split()
role_name = role_name.lower() role_name = role_name.lower()
role_name += '_role' role_name += '_role'
elif rd.name.startswith('Controller') and rd.name.endswith(' Admin'):
# Controller Organization Admin and Controller Team Admin
role_name = 'admin_role'
elif rd.name.startswith('Controller') and rd.name.endswith(' Member'):
# Controller Organization Member and Controller Team Member
role_name = 'member_role'
elif rd.name.endswith(' Admin') and rd.name.count(' ') == 2: elif rd.name.endswith(' Admin') and rd.name.count(' ') == 2:
# cases like "Organization Project Admin" # cases like "Organization Project Admin"
model_name, target_model_name, role_name = rd.name.split() model_name, target_model_name, role_name = rd.name.split()
@@ -622,14 +632,12 @@ def get_role_from_object_role(object_role):
return getattr(object_role.content_object, role_name) return getattr(object_role.content_object, role_name)
def give_or_remove_permission(role, actor, giving=True, rd=None): def give_or_remove_permission(role, actor, giving=True):
obj = role.content_object obj = role.content_object
if obj is None: if obj is None:
return return
if not rd: rd = get_role_definition(role)
rd = get_role_definition(role) rd.give_or_remove_permission(actor, obj, giving=giving)
assignment = rd.give_or_remove_permission(actor, obj, giving=giving)
return assignment
class SyncEnabled(threading.local): class SyncEnabled(threading.local):
@@ -681,15 +689,7 @@ def sync_members_to_new_rbac(instance, action, model, pk_set, reverse, **kwargs)
role = Role.objects.get(pk=user_or_role_id) role = Role.objects.get(pk=user_or_role_id)
else: else:
user = get_user_model().objects.get(pk=user_or_role_id) user = get_user_model().objects.get(pk=user_or_role_id)
rd = get_role_definition(role) give_or_remove_permission(role, user, giving=is_giving)
assignment = give_or_remove_permission(role, user, giving=is_giving, rd=rd)
# sync to resource server
if rbac_sync_enabled.enabled:
if is_giving:
maybe_reverse_sync_assignment(assignment)
else:
maybe_reverse_sync_unassignment(rd, user, role.content_object)
def sync_parents_to_new_rbac(instance, action, model, pk_set, reverse, **kwargs): def sync_parents_to_new_rbac(instance, action, model, pk_set, reverse, **kwargs):
@@ -732,19 +732,12 @@ def sync_parents_to_new_rbac(instance, action, model, pk_set, reverse, **kwargs)
from awx.main.models.organization import Team from awx.main.models.organization import Team
team = Team.objects.get(pk=parent_role.object_id) team = Team.objects.get(pk=parent_role.object_id)
rd = get_role_definition(child_role) give_or_remove_permission(child_role, team, giving=is_giving)
assignment = give_or_remove_permission(child_role, team, giving=is_giving, rd=rd)
# sync to resource server
if rbac_sync_enabled.enabled:
if is_giving:
maybe_reverse_sync_assignment(assignment)
else:
maybe_reverse_sync_unassignment(rd, team, child_role.content_object)
ROLE_DEFINITION_TO_ROLE_FIELD = { ROLE_DEFINITION_TO_ROLE_FIELD = {
'Organization Member': 'member_role', 'Organization Member': 'member_role',
'Controller Organization Member': 'member_role',
'WorkflowJobTemplate Admin': 'admin_role', 'WorkflowJobTemplate Admin': 'admin_role',
'Organization WorkflowJobTemplate Admin': 'workflow_admin_role', 'Organization WorkflowJobTemplate Admin': 'workflow_admin_role',
'WorkflowJobTemplate Execute': 'execute_role', 'WorkflowJobTemplate Execute': 'execute_role',
@@ -769,8 +762,11 @@ ROLE_DEFINITION_TO_ROLE_FIELD = {
'Organization Credential Admin': 'credential_admin_role', 'Organization Credential Admin': 'credential_admin_role',
'Credential Use': 'use_role', 'Credential Use': 'use_role',
'Team Admin': 'admin_role', 'Team Admin': 'admin_role',
'Controller Team Admin': 'admin_role',
'Team Member': 'member_role', 'Team Member': 'member_role',
'Controller Team Member': 'member_role',
'Organization Admin': 'admin_role', 'Organization Admin': 'admin_role',
'Controller Organization Admin': 'admin_role',
'Organization Audit': 'auditor_role', 'Organization Audit': 'auditor_role',
'Organization Execute': 'execute_role', 'Organization Execute': 'execute_role',
'Organization Approval': 'approval_role', 'Organization Approval': 'approval_role',

View File

@@ -18,13 +18,11 @@ from collections import OrderedDict
# Django # Django
from django.conf import settings from django.conf import settings
from django.db import models, connection, transaction from django.db import models, connection, transaction
from django.db.models.constraints import UniqueConstraint
from django.core.exceptions import NON_FIELD_ERRORS from django.core.exceptions import NON_FIELD_ERRORS
from django.utils.translation import gettext_lazy as _ from django.utils.translation import gettext_lazy as _
from django.utils.timezone import now from django.utils.timezone import now
from django.utils.encoding import smart_str from django.utils.encoding import smart_str
from django.contrib.contenttypes.models import ContentType from django.contrib.contenttypes.models import ContentType
from flags.state import flag_enabled
# REST Framework # REST Framework
from rest_framework.exceptions import ParseError from rest_framework.exceptions import ParseError
@@ -34,7 +32,6 @@ from polymorphic.models import PolymorphicModel
from ansible_base.lib.utils.models import prevent_search, get_type_for_model from ansible_base.lib.utils.models import prevent_search, get_type_for_model
from ansible_base.rbac import permission_registry from ansible_base.rbac import permission_registry
from ansible_base.rbac.models import RoleEvaluation
# AWX # AWX
from awx.main.models.base import CommonModelNameNotUnique, PasswordFieldsModel, NotificationFieldsModel from awx.main.models.base import CommonModelNameNotUnique, PasswordFieldsModel, NotificationFieldsModel
@@ -114,16 +111,18 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, ExecutionEn
ordering = ('name',) ordering = ('name',)
# unique_together here is intentionally commented out. Please make sure sub-classes of this model # unique_together here is intentionally commented out. Please make sure sub-classes of this model
# contain at least this uniqueness restriction: SOFT_UNIQUE_TOGETHER = [('polymorphic_ctype', 'name')] # contain at least this uniqueness restriction: SOFT_UNIQUE_TOGETHER = [('polymorphic_ctype', 'name')]
# Unique name constraint - note that inventory source model is excluded from this constraint entirely # unique_together = [('polymorphic_ctype', 'name', 'organization')]
constraints = [
UniqueConstraint(fields=['polymorphic_ctype', 'name', 'organization'], condition=models.Q(org_unique=True), name='ujt_hard_name_constraint')
]
old_pk = models.PositiveIntegerField( old_pk = models.PositiveIntegerField(
null=True, null=True,
default=None, default=None,
editable=False, editable=False,
) )
priority = models.PositiveIntegerField(
null=False,
default=0,
editable=True,
)
current_job = models.ForeignKey( current_job = models.ForeignKey(
'UnifiedJob', 'UnifiedJob',
null=True, null=True,
@@ -186,9 +185,6 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, ExecutionEn
) )
labels = models.ManyToManyField("Label", blank=True, related_name='%(class)s_labels') labels = models.ManyToManyField("Label", blank=True, related_name='%(class)s_labels')
instance_groups = OrderedManyToManyField('InstanceGroup', blank=True, through='UnifiedJobTemplateInstanceGroupMembership') instance_groups = OrderedManyToManyField('InstanceGroup', blank=True, through='UnifiedJobTemplateInstanceGroupMembership')
org_unique = models.BooleanField(
blank=True, default=True, editable=False, help_text=_('Used internally to selectively enforce database constraint on name')
)
def get_absolute_url(self, request=None): def get_absolute_url(self, request=None):
real_instance = self.get_real_instance() real_instance = self.get_real_instance()
@@ -219,21 +215,20 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, ExecutionEn
# do not use this if in a subclass # do not use this if in a subclass
if cls != UnifiedJobTemplate: if cls != UnifiedJobTemplate:
return super(UnifiedJobTemplate, cls).accessible_pk_qs(accessor, role_field) return super(UnifiedJobTemplate, cls).accessible_pk_qs(accessor, role_field)
from ansible_base.rbac.models import RoleEvaluation
action = to_permissions[role_field] action = to_permissions[role_field]
# Special condition for super auditor # Special condition for super auditor
role_subclasses = cls._submodels_with_roles() role_subclasses = cls._submodels_with_roles()
role_cts = ContentType.objects.get_for_models(*role_subclasses).values()
all_codenames = {f'{action}_{cls._meta.model_name}' for cls in role_subclasses} all_codenames = {f'{action}_{cls._meta.model_name}' for cls in role_subclasses}
if not (all_codenames - accessor.singleton_permissions()): if not (all_codenames - accessor.singleton_permissions()):
role_cts = ContentType.objects.get_for_models(*role_subclasses).values()
qs = cls.objects.filter(polymorphic_ctype__in=role_cts) qs = cls.objects.filter(polymorphic_ctype__in=role_cts)
return qs.values_list('id', flat=True) return qs.values_list('id', flat=True)
dab_role_cts = permission_registry.content_type_model.objects.get_for_models(*role_subclasses).values()
return ( return (
RoleEvaluation.objects.filter(role__in=accessor.has_roles.all(), codename__in=all_codenames, content_type_id__in=[ct.id for ct in dab_role_cts]) RoleEvaluation.objects.filter(role__in=accessor.has_roles.all(), codename__in=all_codenames, content_type_id__in=[ct.id for ct in role_cts])
.values_list('object_id') .values_list('object_id')
.distinct() .distinct()
) )
@@ -595,6 +590,13 @@ class UnifiedJob(
default=None, default=None,
editable=False, editable=False,
) )
priority = models.PositiveIntegerField(
default=0,
editable=False,
help_text=_(
"Relative priority to other jobs. The higher the number, the higher the priority. Jobs with equivalent prioirty are started based on available capacity and launch time."
),
)
emitted_events = models.PositiveIntegerField( emitted_events = models.PositiveIntegerField(
default=0, default=0,
editable=False, editable=False,
@@ -1200,13 +1202,6 @@ class UnifiedJob(
fd = StringIO(fd.getvalue().replace('\\r\\n', '\n')) fd = StringIO(fd.getvalue().replace('\\r\\n', '\n'))
return fd return fd
def _fix_double_escapes(self, content):
"""
Collapse double-escaped sequences into single-escaped form.
"""
# Replace \\ followed by one of ' " \ n r t
return re.sub(r'\\([\'"\\nrt])', r'\1', content)
def _escape_ascii(self, content): def _escape_ascii(self, content):
# Remove ANSI escape sequences used to embed event data. # Remove ANSI escape sequences used to embed event data.
content = re.sub(r'\x1b\[K(?:[A-Za-z0-9+/=]+\x1b\[\d+D)+\x1b\[K', '', content) content = re.sub(r'\x1b\[K(?:[A-Za-z0-9+/=]+\x1b\[\d+D)+\x1b\[K', '', content)
@@ -1214,14 +1209,12 @@ class UnifiedJob(
content = re.sub(r'\x1b[^m]*m', '', content) content = re.sub(r'\x1b[^m]*m', '', content)
return content return content
def _result_stdout_raw(self, redact_sensitive=False, escape_ascii=False, fix_escapes=False): def _result_stdout_raw(self, redact_sensitive=False, escape_ascii=False):
content = self.result_stdout_raw_handle().read() content = self.result_stdout_raw_handle().read()
if redact_sensitive: if redact_sensitive:
content = UriCleaner.remove_sensitive(content) content = UriCleaner.remove_sensitive(content)
if escape_ascii: if escape_ascii:
content = self._escape_ascii(content) content = self._escape_ascii(content)
if fix_escapes:
content = self._fix_double_escapes(content)
return content return content
@property @property
@@ -1230,10 +1223,9 @@ class UnifiedJob(
@property @property
def result_stdout(self): def result_stdout(self):
# Human-facing output should fix escapes return self._result_stdout_raw(escape_ascii=True)
return self._result_stdout_raw(escape_ascii=True, fix_escapes=True)
def _result_stdout_raw_limited(self, start_line=0, end_line=None, redact_sensitive=True, escape_ascii=False, fix_escapes=False): def _result_stdout_raw_limited(self, start_line=0, end_line=None, redact_sensitive=True, escape_ascii=False):
return_buffer = StringIO() return_buffer = StringIO()
if end_line is not None: if end_line is not None:
end_line = int(end_line) end_line = int(end_line)
@@ -1256,18 +1248,14 @@ class UnifiedJob(
return_buffer = UriCleaner.remove_sensitive(return_buffer) return_buffer = UriCleaner.remove_sensitive(return_buffer)
if escape_ascii: if escape_ascii:
return_buffer = self._escape_ascii(return_buffer) return_buffer = self._escape_ascii(return_buffer)
if fix_escapes:
return_buffer = self._fix_double_escapes(return_buffer)
return return_buffer, start_actual, end_actual, absolute_end return return_buffer, start_actual, end_actual, absolute_end
def result_stdout_raw_limited(self, start_line=0, end_line=None, redact_sensitive=False): def result_stdout_raw_limited(self, start_line=0, end_line=None, redact_sensitive=False):
# Raw should NOT fix escapes
return self._result_stdout_raw_limited(start_line, end_line, redact_sensitive) return self._result_stdout_raw_limited(start_line, end_line, redact_sensitive)
def result_stdout_limited(self, start_line=0, end_line=None, redact_sensitive=False): def result_stdout_limited(self, start_line=0, end_line=None, redact_sensitive=False):
# Human-facing should fix escapes return self._result_stdout_raw_limited(start_line, end_line, redact_sensitive, escape_ascii=True)
return self._result_stdout_raw_limited(start_line, end_line, redact_sensitive, escape_ascii=True, fix_escapes=True)
@property @property
def workflow_job_id(self): def workflow_job_id(self):
@@ -1386,30 +1374,7 @@ class UnifiedJob(
traceback=self.result_traceback, traceback=self.result_traceback,
) )
def get_start_kwargs(self): def pre_start(self, **kwargs):
needed = self.get_passwords_needed_to_start()
decrypted_start_args = decrypt_field(self, 'start_args')
if not decrypted_start_args or decrypted_start_args == '{}':
return None
try:
start_args = json.loads(decrypted_start_args)
except Exception:
logger.exception(f'Unexpected malformed start_args on unified_job={self.id}')
return None
opts = dict([(field, start_args.get(field, '')) for field in needed])
if not all(opts.values()):
missing_fields = ', '.join([k for k, v in opts.items() if not v])
self.job_explanation = u'Missing needed fields: %s.' % missing_fields
self.save(update_fields=['job_explanation'])
return opts
def pre_start(self):
if not self.can_start: if not self.can_start:
self.job_explanation = u'%s is not in a startable state: %s, expecting one of %s' % (self._meta.verbose_name, self.status, str(('new', 'waiting'))) self.job_explanation = u'%s is not in a startable state: %s, expecting one of %s' % (self._meta.verbose_name, self.status, str(('new', 'waiting')))
self.save(update_fields=['job_explanation']) self.save(update_fields=['job_explanation'])
@@ -1430,11 +1395,26 @@ class UnifiedJob(
self.save(update_fields=['job_explanation']) self.save(update_fields=['job_explanation'])
return (False, None) return (False, None)
opts = self.get_start_kwargs() needed = self.get_passwords_needed_to_start()
try:
start_args = json.loads(decrypt_field(self, 'start_args'))
except Exception:
start_args = None
if opts and (not all(opts.values())): if start_args in (None, ''):
start_args = kwargs
opts = dict([(field, start_args.get(field, '')) for field in needed])
if not all(opts.values()):
missing_fields = ', '.join([k for k, v in opts.items() if not v])
self.job_explanation = u'Missing needed fields: %s.' % missing_fields
self.save(update_fields=['job_explanation'])
return (False, None) return (False, None)
if 'extra_vars' in kwargs:
self.handle_extra_data(kwargs['extra_vars'])
# remove any job_explanations that may have been set while job was in pending # remove any job_explanations that may have been set while job was in pending
if self.job_explanation != "": if self.job_explanation != "":
self.job_explanation = "" self.job_explanation = ""
@@ -1495,44 +1475,21 @@ class UnifiedJob(
def cancel_dispatcher_process(self): def cancel_dispatcher_process(self):
"""Returns True if dispatcher running this job acknowledged request and sent SIGTERM""" """Returns True if dispatcher running this job acknowledged request and sent SIGTERM"""
if not self.celery_task_id: if not self.celery_task_id:
return False return
canceled = [] canceled = []
# Special case for task manager (used during workflow job cancellation)
if not connection.get_autocommit(): if not connection.get_autocommit():
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'): # this condition is purpose-written for the task manager, when it cancels jobs in workflows
try: ControlDispatcher('dispatcher', self.controller_node).cancel([self.celery_task_id], with_reply=False)
from dispatcherd.factories import get_control_from_settings
ctl = get_control_from_settings()
ctl.control('cancel', data={'uuid': self.celery_task_id})
except Exception:
logger.exception("Error sending cancel command to new dispatcher")
else:
try:
ControlDispatcher('dispatcher', self.controller_node).cancel([self.celery_task_id], with_reply=False)
except Exception:
logger.exception("Error sending cancel command to legacy dispatcher")
return True # task manager itself needs to act under assumption that cancel was received return True # task manager itself needs to act under assumption that cancel was received
# Standard case with reply
try: try:
# Use control and reply mechanism to cancel and obtain confirmation
timeout = 5 timeout = 5
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'): canceled = ControlDispatcher('dispatcher', self.controller_node).cancel([self.celery_task_id])
from dispatcherd.factories import get_control_from_settings
ctl = get_control_from_settings()
results = ctl.control_with_reply('cancel', data={'uuid': self.celery_task_id}, expected_replies=1, timeout=timeout)
# Check if cancel was successful by checking if we got any results
return bool(results and len(results) > 0)
else:
# Original implementation
canceled = ControlDispatcher('dispatcher', self.controller_node).cancel([self.celery_task_id])
except socket.timeout: except socket.timeout:
logger.error(f'could not reach dispatcher on {self.controller_node} within {timeout}s') logger.error(f'could not reach dispatcher on {self.controller_node} within {timeout}s')
except Exception: except Exception:
logger.exception("error encountered when checking task status") logger.exception("error encountered when checking task status")
return bool(self.celery_task_id in canceled) # True or False, whether confirmation was obtained return bool(self.celery_task_id in canceled) # True or False, whether confirmation was obtained
def cancel(self, job_explanation=None, is_chain=False): def cancel(self, job_explanation=None, is_chain=False):

View File

@@ -416,7 +416,7 @@ class WorkflowJobOptions(LaunchTimeConfigBase):
@classmethod @classmethod
def _get_unified_job_field_names(cls): def _get_unified_job_field_names(cls):
r = set(f.name for f in WorkflowJobOptions._meta.fields) | set( r = set(f.name for f in WorkflowJobOptions._meta.fields) | set(
['name', 'description', 'organization', 'survey_passwords', 'labels', 'limit', 'scm_branch', 'job_tags', 'skip_tags'] ['name', 'description', 'organization', 'survey_passwords', 'labels', 'limit', 'scm_branch', 'priority', 'job_tags', 'skip_tags']
) )
r.remove('char_prompts') # needed due to copying launch config to launch config r.remove('char_prompts') # needed due to copying launch config to launch config
return r return r

View File

@@ -53,8 +53,8 @@ class GrafanaBackend(AWXBaseEmailBackend, CustomNotificationBase):
): ):
super(GrafanaBackend, self).__init__(fail_silently=fail_silently) super(GrafanaBackend, self).__init__(fail_silently=fail_silently)
self.grafana_key = grafana_key self.grafana_key = grafana_key
self.dashboardId = int(dashboardId) if dashboardId != '' else None self.dashboardId = int(dashboardId) if dashboardId is not None else None
self.panelId = int(panelId) if panelId != '' else None self.panelId = int(panelId) if panelId is not None else None
self.annotation_tags = annotation_tags if annotation_tags is not None else [] self.annotation_tags = annotation_tags if annotation_tags is not None else []
self.grafana_no_verify_ssl = grafana_no_verify_ssl self.grafana_no_verify_ssl = grafana_no_verify_ssl
self.isRegion = isRegion self.isRegion = isRegion
@@ -97,7 +97,6 @@ class GrafanaBackend(AWXBaseEmailBackend, CustomNotificationBase):
r = requests.post( r = requests.post(
"{}/api/annotations".format(m.recipients()[0]), json=grafana_data, headers=grafana_headers, verify=(not self.grafana_no_verify_ssl) "{}/api/annotations".format(m.recipients()[0]), json=grafana_data, headers=grafana_headers, verify=(not self.grafana_no_verify_ssl)
) )
if r.status_code >= 400: if r.status_code >= 400:
logger.error(smart_str(_("Error sending notification grafana: {}").format(r.status_code))) logger.error(smart_str(_("Error sending notification grafana: {}").format(r.status_code)))
if not self.fail_silently: if not self.fail_silently:

View File

@@ -5,6 +5,8 @@ import time
import ssl import ssl
import logging import logging
import irc.client
from django.utils.encoding import smart_str from django.utils.encoding import smart_str
from django.utils.translation import gettext_lazy as _ from django.utils.translation import gettext_lazy as _
@@ -14,19 +16,6 @@ from awx.main.notifications.custom_notification_base import CustomNotificationBa
logger = logging.getLogger('awx.main.notifications.irc_backend') logger = logging.getLogger('awx.main.notifications.irc_backend')
def _irc():
"""
Prime the real jaraco namespace before importing irc.* so that
setuptools' vendored 'setuptools._vendor.jaraco' doesn't shadow
external 'jaraco.*' packages (e.g., jaraco.stream).
"""
import jaraco.stream # ensure the namespace package is established # noqa: F401
import irc.client as irc_client
import irc.connection as irc_connection
return irc_client, irc_connection
class IrcBackend(AWXBaseEmailBackend, CustomNotificationBase): class IrcBackend(AWXBaseEmailBackend, CustomNotificationBase):
init_parameters = { init_parameters = {
"server": {"label": "IRC Server Address", "type": "string"}, "server": {"label": "IRC Server Address", "type": "string"},
@@ -51,15 +40,12 @@ class IrcBackend(AWXBaseEmailBackend, CustomNotificationBase):
def open(self): def open(self):
if self.connection is not None: if self.connection is not None:
return False return False
irc_client, irc_connection = _irc()
if self.use_ssl: if self.use_ssl:
connection_factory = irc_connection.Factory(wrapper=ssl.wrap_socket) connection_factory = irc.connection.Factory(wrapper=ssl.wrap_socket)
else: else:
connection_factory = irc_connection.Factory() connection_factory = irc.connection.Factory()
try: try:
self.reactor = irc_client.Reactor() self.reactor = irc.client.Reactor()
self.connection = self.reactor.server().connect( self.connection = self.reactor.server().connect(
self.server, self.server,
self.port, self.port,
@@ -67,7 +53,7 @@ class IrcBackend(AWXBaseEmailBackend, CustomNotificationBase):
password=self.password, password=self.password,
connect_factory=connection_factory, connect_factory=connection_factory,
) )
except irc_client.ServerConnectionError as e: except irc.client.ServerConnectionError as e:
logger.error(smart_str(_("Exception connecting to irc server: {}").format(e))) logger.error(smart_str(_("Exception connecting to irc server: {}").format(e)))
if not self.fail_silently: if not self.fail_silently:
raise raise
@@ -79,9 +65,8 @@ class IrcBackend(AWXBaseEmailBackend, CustomNotificationBase):
self.connection = None self.connection = None
def on_connect(self, connection, event): def on_connect(self, connection, event):
irc_client, _ = _irc()
for c in self.channels: for c in self.channels:
if irc_client.is_channel(c): if irc.client.is_channel(c):
connection.join(c) connection.join(c)
else: else:
for m in self.channels[c]: for m in self.channels[c]:

View File

@@ -174,9 +174,6 @@ class PodManager(object):
) )
pod_spec['spec']['containers'][0]['name'] = self.pod_name pod_spec['spec']['containers'][0]['name'] = self.pod_name
# Prevent mounting of service account token in job pods in order to prevent job pods from accessing the k8s API via in cluster service account auth
pod_spec['spec']['automountServiceAccountToken'] = False
return pod_spec return pod_spec

View File

@@ -10,8 +10,6 @@ import time
import sys import sys
import signal import signal
import redis
# Django # Django
from django.db import transaction from django.db import transaction
from django.utils.translation import gettext_lazy as _, gettext_noop from django.utils.translation import gettext_lazy as _, gettext_noop
@@ -19,9 +17,6 @@ from django.utils.timezone import now as tz_now
from django.conf import settings from django.conf import settings
from django.contrib.contenttypes.models import ContentType from django.contrib.contenttypes.models import ContentType
# django-flags
from flags.state import flag_enabled
from ansible_base.lib.utils.models import get_type_for_model from ansible_base.lib.utils.models import get_type_for_model
# django-ansible-base # django-ansible-base
@@ -51,7 +46,6 @@ from awx.main.signals import disable_activity_stream
from awx.main.constants import ACTIVE_STATES from awx.main.constants import ACTIVE_STATES
from awx.main.scheduler.dependency_graph import DependencyGraph from awx.main.scheduler.dependency_graph import DependencyGraph
from awx.main.scheduler.task_manager_models import TaskManagerModels from awx.main.scheduler.task_manager_models import TaskManagerModels
from awx.main.tasks.jobs import dispatch_waiting_jobs
import awx.main.analytics.subsystem_metrics as s_metrics import awx.main.analytics.subsystem_metrics as s_metrics
from awx.main.utils import decrypt_field from awx.main.utils import decrypt_field
@@ -103,7 +97,7 @@ class TaskBase:
UnifiedJob.objects.filter(**filter_args) UnifiedJob.objects.filter(**filter_args)
.exclude(launch_type='sync') .exclude(launch_type='sync')
.exclude(polymorphic_ctype_id=wf_approval_ctype_id) .exclude(polymorphic_ctype_id=wf_approval_ctype_id)
.order_by('created') .order_by('-priority', 'created')
.prefetch_related('dependent_jobs') .prefetch_related('dependent_jobs')
) )
self.all_tasks = [t for t in qs] self.all_tasks = [t for t in qs]
@@ -126,8 +120,6 @@ class TaskBase:
self.subsystem_metrics.pipe_execute() self.subsystem_metrics.pipe_execute()
else: else:
logger.debug(f"skipping recording {self.prefix} metrics, last recorded {time_last_recorded} seconds ago") logger.debug(f"skipping recording {self.prefix} metrics, last recorded {time_last_recorded} seconds ago")
except redis.exceptions.ConnectionError as exc:
logger.warning(f"Redis connection error saving metrics for {self.prefix}, error: {exc}")
except Exception: except Exception:
logger.exception(f"Error saving metrics for {self.prefix}") logger.exception(f"Error saving metrics for {self.prefix}")
@@ -294,7 +286,7 @@ class WorkflowManager(TaskBase):
@timeit @timeit
def get_tasks(self, filter_args): def get_tasks(self, filter_args):
self.all_tasks = [wf for wf in WorkflowJob.objects.filter(**filter_args)] self.all_tasks = [wf for wf in WorkflowJob.objects.filter(**filter_args).order_by('-priority', 'created')]
@timeit @timeit
def _schedule(self): def _schedule(self):
@@ -344,12 +336,14 @@ class DependencyManager(TaskBase):
return bool(((update.finished + timedelta(seconds=cache_timeout))) < tz_now()) return bool(((update.finished + timedelta(seconds=cache_timeout))) < tz_now())
def get_or_create_project_update(self, project_id): def get_or_create_project_update(self, task):
project_id = task.project_id
priority = task.priority
project = self.all_projects.get(project_id, None) project = self.all_projects.get(project_id, None)
if project is not None: if project is not None:
latest_project_update = project.project_updates.filter(job_type='check').order_by("-created").first() latest_project_update = project.project_updates.filter(job_type='check').order_by("-created").first()
if self.should_update_again(latest_project_update, project.scm_update_cache_timeout): if self.should_update_again(latest_project_update, project.scm_update_cache_timeout):
project_task = project.create_project_update(_eager_fields=dict(launch_type='dependency')) project_task = project.create_project_update(_eager_fields=dict(launch_type='dependency', priority=priority))
project_task.signal_start() project_task.signal_start()
return [project_task] return [project_task]
else: else:
@@ -357,7 +351,7 @@ class DependencyManager(TaskBase):
return [] return []
def gen_dep_for_job(self, task): def gen_dep_for_job(self, task):
dependencies = self.get_or_create_project_update(task.project_id) dependencies = self.get_or_create_project_update(task)
try: try:
start_args = json.loads(decrypt_field(task, field_name="start_args")) start_args = json.loads(decrypt_field(task, field_name="start_args"))
@@ -369,7 +363,7 @@ class DependencyManager(TaskBase):
continue continue
latest_inventory_update = inventory_source.inventory_updates.order_by("-created").first() latest_inventory_update = inventory_source.inventory_updates.order_by("-created").first()
if self.should_update_again(latest_inventory_update, inventory_source.update_cache_timeout): if self.should_update_again(latest_inventory_update, inventory_source.update_cache_timeout):
inventory_task = inventory_source.create_inventory_update(_eager_fields=dict(launch_type='dependency')) inventory_task = inventory_source.create_inventory_update(_eager_fields=dict(launch_type='dependency', priority=task.priority))
inventory_task.signal_start() inventory_task.signal_start()
dependencies.append(inventory_task) dependencies.append(inventory_task)
else: else:
@@ -435,7 +429,6 @@ class TaskManager(TaskBase):
# 5 minutes to start pending jobs. If this limit is reached, pending jobs # 5 minutes to start pending jobs. If this limit is reached, pending jobs
# will no longer be started and will be started on the next task manager cycle. # will no longer be started and will be started on the next task manager cycle.
self.time_delta_job_explanation = timedelta(seconds=30) self.time_delta_job_explanation = timedelta(seconds=30)
self.control_nodes_to_notify: set[str] = set()
super().__init__(prefix="task_manager") super().__init__(prefix="task_manager")
def after_lock_init(self): def after_lock_init(self):
@@ -524,19 +517,16 @@ class TaskManager(TaskBase):
task.save() task.save()
task.log_lifecycle("waiting") task.log_lifecycle("waiting")
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'): # apply_async does a NOTIFY to the channel dispatcher is listening to
self.control_nodes_to_notify.add(task.get_queue_name()) # postgres will treat this as part of the transaction, which is what we want
else: if task.status != 'failed' and type(task) is not WorkflowJob:
# apply_async does a NOTIFY to the channel dispatcher is listening to task_cls = task._get_task_class()
# postgres will treat this as part of the transaction, which is what we want task_cls.apply_async(
if task.status != 'failed' and type(task) is not WorkflowJob: [task.pk],
task_cls = task._get_task_class() opts,
task_cls.apply_async( queue=task.get_queue_name(),
[task.pk], uuid=task.celery_task_id,
opts, )
queue=task.get_queue_name(),
uuid=task.celery_task_id,
)
# In exception cases, like a job failing pre-start checks, we send the websocket status message. # In exception cases, like a job failing pre-start checks, we send the websocket status message.
# For jobs going into waiting, we omit this because of performance issues, as it should go to running quickly # For jobs going into waiting, we omit this because of performance issues, as it should go to running quickly
@@ -729,8 +719,3 @@ class TaskManager(TaskBase):
for workflow_approval in self.get_expired_workflow_approvals(): for workflow_approval in self.get_expired_workflow_approvals():
self.timeout_approval_node(workflow_approval) self.timeout_approval_node(workflow_approval)
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'):
for controller_node in self.control_nodes_to_notify:
logger.info(f'Notifying node {controller_node} of new waiting jobs.')
dispatch_waiting_jobs.apply_async(queue=controller_node)

View File

@@ -7,7 +7,7 @@ from django.conf import settings
# AWX # AWX
from awx import MODE from awx import MODE
from awx.main.scheduler import TaskManager, DependencyManager, WorkflowManager from awx.main.scheduler import TaskManager, DependencyManager, WorkflowManager
from awx.main.dispatch.publish import task as task_awx from awx.main.dispatch.publish import task
from awx.main.dispatch import get_task_queuename from awx.main.dispatch import get_task_queuename
logger = logging.getLogger('awx.main.scheduler') logger = logging.getLogger('awx.main.scheduler')
@@ -20,16 +20,16 @@ def run_manager(manager, prefix):
manager().schedule() manager().schedule()
@task_awx(queue=get_task_queuename) @task(queue=get_task_queuename)
def task_manager(): def task_manager():
run_manager(TaskManager, "task") run_manager(TaskManager, "task")
@task_awx(queue=get_task_queuename) @task(queue=get_task_queuename)
def dependency_manager(): def dependency_manager():
run_manager(DependencyManager, "dependency") run_manager(DependencyManager, "dependency")
@task_awx(queue=get_task_queuename) @task(queue=get_task_queuename)
def workflow_manager(): def workflow_manager():
run_manager(WorkflowManager, "workflow") run_manager(WorkflowManager, "workflow")

View File

@@ -38,6 +38,7 @@ from awx.main.models import (
InventorySource, InventorySource,
Job, Job,
JobHostSummary, JobHostSummary,
JobTemplate,
Organization, Organization,
Project, Project,
Role, Role,
@@ -55,7 +56,10 @@ from awx.main.models import (
from awx.main.utils import model_instance_diff, model_to_dict, camelcase_to_underscore, get_current_apps from awx.main.utils import model_instance_diff, model_to_dict, camelcase_to_underscore, get_current_apps
from awx.main.utils import ignore_inventory_computed_fields, ignore_inventory_group_removal, _inventory_updates from awx.main.utils import ignore_inventory_computed_fields, ignore_inventory_group_removal, _inventory_updates
from awx.main.tasks.system import update_inventory_computed_fields, handle_removed_image from awx.main.tasks.system import update_inventory_computed_fields, handle_removed_image
from awx.main.fields import is_implicit_parent from awx.main.fields import (
is_implicit_parent,
update_role_parentage_for_instance,
)
from awx.main import consumers from awx.main import consumers
@@ -188,6 +192,31 @@ def cleanup_detached_labels_on_deleted_parent(sender, instance, **kwargs):
label.delete() label.delete()
def save_related_job_templates(sender, instance, **kwargs):
"""save_related_job_templates loops through all of the
job templates that use an Inventory that have had their
Organization updated. This triggers the rebuilding of the RBAC hierarchy
and ensures the proper access restrictions.
"""
if sender is not Inventory:
raise ValueError('This signal callback is only intended for use with Project or Inventory')
update_fields = kwargs.get('update_fields', None)
if (update_fields and not ('organization' in update_fields or 'organization_id' in update_fields)) or kwargs.get('created', False):
return
if instance._prior_values_store.get('organization_id') != instance.organization_id:
jtq = JobTemplate.objects.filter(**{sender.__name__.lower(): instance})
for jt in jtq:
parents_added, parents_removed = update_role_parentage_for_instance(jt)
if parents_added or parents_removed:
logger.info(
'Permissions on JT {} changed due to inventory {} organization change from {} to {}.'.format(
jt.pk, instance.pk, instance._prior_values_store.get('organization_id'), instance.organization_id
)
)
def connect_computed_field_signals(): def connect_computed_field_signals():
post_save.connect(emit_update_inventory_on_created_or_deleted, sender=Host) post_save.connect(emit_update_inventory_on_created_or_deleted, sender=Host)
post_delete.connect(emit_update_inventory_on_created_or_deleted, sender=Host) post_delete.connect(emit_update_inventory_on_created_or_deleted, sender=Host)
@@ -201,6 +230,7 @@ def connect_computed_field_signals():
connect_computed_field_signals() connect_computed_field_signals()
post_save.connect(save_related_job_templates, sender=Inventory)
m2m_changed.connect(rebuild_role_ancestor_list, Role.parents.through) m2m_changed.connect(rebuild_role_ancestor_list, Role.parents.through)
m2m_changed.connect(rbac_activity_stream, Role.members.through) m2m_changed.connect(rbac_activity_stream, Role.members.through)
m2m_changed.connect(rbac_activity_stream, Role.parents.through) m2m_changed.connect(rbac_activity_stream, Role.parents.through)

View File

@@ -1 +1 @@
from . import callback, facts, helpers, host_indirect, host_metrics, jobs, receptor, system # noqa from . import host_metrics, jobs, receptor, system # noqa

View File

@@ -6,15 +6,16 @@ import logging
# Django # Django
from django.conf import settings from django.conf import settings
from django.db.models.query import QuerySet
from django.utils.encoding import smart_str from django.utils.encoding import smart_str
from django.utils.timezone import now from django.utils.timezone import now
from django.db import OperationalError
# django-ansible-base # django-ansible-base
from ansible_base.lib.logging.runtime import log_excess_runtime from ansible_base.lib.logging.runtime import log_excess_runtime
# AWX # AWX
from awx.main.utils.db import bulk_update_sorted_by_id from awx.main.models.inventory import Host
from awx.main.models import Host
logger = logging.getLogger('awx.main.tasks.facts') logger = logging.getLogger('awx.main.tasks.facts')
@@ -22,51 +23,63 @@ system_tracking_logger = logging.getLogger('awx.analytics.system_tracking')
@log_excess_runtime(logger, debug_cutoff=0.01, msg='Inventory {inventory_id} host facts prepared for {written_ct} hosts, took {delta:.3f} s', add_log_data=True) @log_excess_runtime(logger, debug_cutoff=0.01, msg='Inventory {inventory_id} host facts prepared for {written_ct} hosts, took {delta:.3f} s', add_log_data=True)
def start_fact_cache(hosts, artifacts_dir, timeout=None, inventory_id=None, log_data=None): def start_fact_cache(hosts, destination, log_data, timeout=None, inventory_id=None):
log_data = log_data or {}
log_data['inventory_id'] = inventory_id log_data['inventory_id'] = inventory_id
log_data['written_ct'] = 0 log_data['written_ct'] = 0
hosts_cached = [] try:
os.makedirs(destination, mode=0o700)
# Create the fact_cache directory inside artifacts_dir except FileExistsError:
fact_cache_dir = os.path.join(artifacts_dir, 'fact_cache') pass
os.makedirs(fact_cache_dir, mode=0o700, exist_ok=True)
if timeout is None: if timeout is None:
timeout = settings.ANSIBLE_FACT_CACHE_TIMEOUT timeout = settings.ANSIBLE_FACT_CACHE_TIMEOUT
last_write_time = None if isinstance(hosts, QuerySet):
hosts = hosts.iterator()
last_filepath_written = None
for host in hosts: for host in hosts:
hosts_cached.append(host.name) if (not host.ansible_facts_modified) or (timeout and host.ansible_facts_modified < now() - datetime.timedelta(seconds=timeout)):
if not host.ansible_facts_modified or (timeout and host.ansible_facts_modified < now() - datetime.timedelta(seconds=timeout)):
continue # facts are expired - do not write them continue # facts are expired - do not write them
filepath = os.sep.join(map(str, [destination, host.name]))
filepath = os.path.join(fact_cache_dir, host.name) if not os.path.realpath(filepath).startswith(destination):
if not os.path.realpath(filepath).startswith(fact_cache_dir): system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
logger.error(f'facts for host {smart_str(host.name)} could not be cached')
continue continue
try: try:
with codecs.open(filepath, 'w', encoding='utf-8') as f: with codecs.open(filepath, 'w', encoding='utf-8') as f:
os.chmod(f.name, 0o600) os.chmod(f.name, 0o600)
json.dump(host.ansible_facts, f) json.dump(host.ansible_facts, f)
log_data['written_ct'] += 1 log_data['written_ct'] += 1
last_write_time = os.path.getmtime(filepath) last_filepath_written = filepath
except IOError: except IOError:
logger.error(f'facts for host {smart_str(host.name)} could not be cached') system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
continue continue
# make note of the time we wrote the last file so we can check if any file changed later
if last_filepath_written:
return os.path.getmtime(last_filepath_written)
return None
# Write summary file directly to the artifacts_dir
if inventory_id is not None: def raw_update_hosts(host_list):
summary_file = os.path.join(artifacts_dir, 'host_cache_summary.json') Host.objects.bulk_update(host_list, ['ansible_facts', 'ansible_facts_modified'])
summary_data = {
'last_write_time': last_write_time,
'hosts_cached': hosts_cached, def update_hosts(host_list, max_tries=5):
'written_ct': log_data['written_ct'], if not host_list:
} return
with open(summary_file, 'w', encoding='utf-8') as f: for i in range(max_tries):
json.dump(summary_data, f, indent=2) try:
raw_update_hosts(host_list)
except OperationalError as exc:
# Deadlocks can happen if this runs at the same time as another large query
# inventory updates and updating last_job_host_summary are candidates for conflict
# but these would resolve easily on a retry
if i + 1 < max_tries:
logger.info(f'OperationalError (suspected deadlock) saving host facts retry {i}, message: {exc}')
continue
else:
raise
break
@log_excess_runtime( @log_excess_runtime(
@@ -75,54 +88,35 @@ def start_fact_cache(hosts, artifacts_dir, timeout=None, inventory_id=None, log_
msg='Inventory {inventory_id} host facts: updated {updated_ct}, cleared {cleared_ct}, unchanged {unmodified_ct}, took {delta:.3f} s', msg='Inventory {inventory_id} host facts: updated {updated_ct}, cleared {cleared_ct}, unchanged {unmodified_ct}, took {delta:.3f} s',
add_log_data=True, add_log_data=True,
) )
def finish_fact_cache(artifacts_dir, job_id=None, inventory_id=None, log_data=None): def finish_fact_cache(hosts, destination, facts_write_time, log_data, job_id=None, inventory_id=None):
log_data = log_data or {}
log_data['inventory_id'] = inventory_id log_data['inventory_id'] = inventory_id
log_data['updated_ct'] = 0 log_data['updated_ct'] = 0
log_data['unmodified_ct'] = 0 log_data['unmodified_ct'] = 0
log_data['cleared_ct'] = 0 log_data['cleared_ct'] = 0
# The summary file is directly inside the artifacts dir
summary_path = os.path.join(artifacts_dir, 'host_cache_summary.json')
if not os.path.exists(summary_path):
logger.error(f'Missing summary file at {summary_path}')
return
try: if isinstance(hosts, QuerySet):
with open(summary_path, 'r', encoding='utf-8') as f: hosts = hosts.iterator()
summary = json.load(f)
facts_write_time = os.path.getmtime(summary_path) # After successful read
except (json.JSONDecodeError, OSError) as e:
logger.error(f'Error reading summary file at {summary_path}: {e}')
return
host_names = summary.get('hosts_cached', [])
hosts_cached = Host.objects.filter(name__in=host_names).order_by('id').iterator()
# Path where individual fact files were written
fact_cache_dir = os.path.join(artifacts_dir, 'fact_cache')
hosts_to_update = [] hosts_to_update = []
for host in hosts:
for host in hosts_cached: filepath = os.sep.join(map(str, [destination, host.name]))
filepath = os.path.join(fact_cache_dir, host.name) if not os.path.realpath(filepath).startswith(destination):
if not os.path.realpath(filepath).startswith(fact_cache_dir): system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
logger.error(f'Invalid path for facts file: {filepath}')
continue continue
if os.path.exists(filepath): if os.path.exists(filepath):
# If the file changed since we wrote the last facts file, pre-playbook run... # If the file changed since we wrote the last facts file, pre-playbook run...
modified = os.path.getmtime(filepath) modified = os.path.getmtime(filepath)
if not facts_write_time or modified >= facts_write_time: if (not facts_write_time) or modified > facts_write_time:
try: with codecs.open(filepath, 'r', encoding='utf-8') as f:
with codecs.open(filepath, 'r', encoding='utf-8') as f: try:
ansible_facts = json.load(f) ansible_facts = json.load(f)
except ValueError: except ValueError:
continue continue
if ansible_facts != host.ansible_facts:
host.ansible_facts = ansible_facts host.ansible_facts = ansible_facts
host.ansible_facts_modified = now() host.ansible_facts_modified = now()
hosts_to_update.append(host) hosts_to_update.append(host)
logger.info( system_tracking_logger.info(
f'New fact for inventory {smart_str(host.inventory.name)} host {smart_str(host.name)}', 'New fact for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)),
extra=dict( extra=dict(
inventory_id=host.inventory.id, inventory_id=host.inventory.id,
host_name=host.name, host_name=host.name,
@@ -132,21 +126,16 @@ def finish_fact_cache(artifacts_dir, job_id=None, inventory_id=None, log_data=No
), ),
) )
log_data['updated_ct'] += 1 log_data['updated_ct'] += 1
else:
log_data['unmodified_ct'] += 1
else: else:
log_data['unmodified_ct'] += 1 log_data['unmodified_ct'] += 1
else: else:
# if the file goes missing, ansible removed it (likely via clear_facts) # if the file goes missing, ansible removed it (likely via clear_facts)
# if the file goes missing, but the host has not started facts, then we should not clear the facts
host.ansible_facts = {} host.ansible_facts = {}
host.ansible_facts_modified = now() host.ansible_facts_modified = now()
hosts_to_update.append(host) hosts_to_update.append(host)
logger.info(f'Facts cleared for inventory {smart_str(host.inventory.name)} host {smart_str(host.name)}') system_tracking_logger.info('Facts cleared for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)))
log_data['cleared_ct'] += 1 log_data['cleared_ct'] += 1
if len(hosts_to_update) > 100:
if len(hosts_to_update) >= 100: update_hosts(hosts_to_update)
bulk_update_sorted_by_id(Host, hosts_to_update, fields=['ansible_facts', 'ansible_facts_modified'])
hosts_to_update = [] hosts_to_update = []
update_hosts(hosts_to_update)
bulk_update_sorted_by_id(Host, hosts_to_update, fields=['ansible_facts', 'ansible_facts_modified'])

View File

@@ -45,46 +45,26 @@ def build_indirect_host_data(job: Job, job_event_queries: dict[str, dict[str, st
facts_missing_logged = False facts_missing_logged = False
unhashable_facts_logged = False unhashable_facts_logged = False
job_event_queries_fqcn = {}
for query_k, query_v in job_event_queries.items():
if len(parts := query_k.split('.')) != 3:
logger.info(f"Skiping malformed query '{query_k}'. Expected to be of the form 'a.b.c'")
continue
if parts[2] != '*':
continue
job_event_queries_fqcn['.'.join(parts[0:2])] = query_v
for event in job.job_events.filter(event_data__isnull=False).iterator(): for event in job.job_events.filter(event_data__isnull=False).iterator():
if 'res' not in event.event_data: if 'res' not in event.event_data:
continue continue
if not (resolved_action := event.event_data.get('resolved_action', None)): if 'resolved_action' not in event.event_data or event.event_data['resolved_action'] not in job_event_queries.keys():
continue continue
if len(resolved_action_parts := resolved_action.split('.')) != 3: resolved_action = event.event_data['resolved_action']
logger.debug(f"Malformed invocation module name '{resolved_action}'. Expected to be of the form 'a.b.c'")
continue
resolved_action_fqcn = '.'.join(resolved_action_parts[0:2]) # We expect a dict with a 'query' key for the resolved_action
if 'query' not in job_event_queries[resolved_action]:
# Match module invocation to collection queries
# First match against fully qualified query names i.e. a.b.c
# Then try and match against wildcard queries i.e. a.b.*
if not (jq_str_for_event := job_event_queries.get(resolved_action, job_event_queries_fqcn.get(resolved_action_fqcn, {})).get('query')):
continue continue
# Recall from cache, or process the jq expression, and loop over the jq results # Recall from cache, or process the jq expression, and loop over the jq results
jq_str_for_event = job_event_queries[resolved_action]['query']
if jq_str_for_event not in compiled_jq_expressions: if jq_str_for_event not in compiled_jq_expressions:
compiled_jq_expressions[resolved_action] = jq.compile(jq_str_for_event) compiled_jq_expressions[resolved_action] = jq.compile(jq_str_for_event)
compiled_jq = compiled_jq_expressions[resolved_action] compiled_jq = compiled_jq_expressions[resolved_action]
for data in compiled_jq.input(event.event_data['res']).all():
try:
data_source = compiled_jq.input(event.event_data['res']).all()
except Exception as e:
logger.warning(f'error for module {resolved_action} and data {event.event_data["res"]}: {e}')
continue
for data in data_source:
# From this jq result (specific to a single Ansible module), get index information about this host record # From this jq result (specific to a single Ansible module), get index information about this host record
if not data.get('canonical_facts'): if not data.get('canonical_facts'):
if not facts_missing_logged: if not facts_missing_logged:

View File

@@ -7,18 +7,17 @@ from django.db.models import Count, F
from django.db.models.functions import TruncMonth from django.db.models.functions import TruncMonth
from django.utils.timezone import now from django.utils.timezone import now
from awx.main.dispatch import get_task_queuename from awx.main.dispatch import get_task_queuename
from awx.main.dispatch.publish import task as task_awx from awx.main.dispatch.publish import task
from awx.main.models.inventory import HostMetric, HostMetricSummaryMonthly from awx.main.models.inventory import HostMetric, HostMetricSummaryMonthly
from awx.main.tasks.helpers import is_run_threshold_reached from awx.main.tasks.helpers import is_run_threshold_reached
from awx.conf.license import get_license from awx.conf.license import get_license
from ansible_base.lib.utils.db import advisory_lock from ansible_base.lib.utils.db import advisory_lock
from awx.main.utils.db import bulk_update_sorted_by_id
logger = logging.getLogger('awx.main.tasks.host_metrics') logger = logging.getLogger('awx.main.tasks.host_metrics')
@task_awx(queue=get_task_queuename) @task(queue=get_task_queuename)
def cleanup_host_metrics(): def cleanup_host_metrics():
if is_run_threshold_reached(getattr(settings, 'CLEANUP_HOST_METRICS_LAST_TS', None), getattr(settings, 'CLEANUP_HOST_METRICS_INTERVAL', 30) * 86400): if is_run_threshold_reached(getattr(settings, 'CLEANUP_HOST_METRICS_LAST_TS', None), getattr(settings, 'CLEANUP_HOST_METRICS_INTERVAL', 30) * 86400):
logger.info(f"Executing cleanup_host_metrics, last ran at {getattr(settings, 'CLEANUP_HOST_METRICS_LAST_TS', '---')}") logger.info(f"Executing cleanup_host_metrics, last ran at {getattr(settings, 'CLEANUP_HOST_METRICS_LAST_TS', '---')}")
@@ -29,7 +28,7 @@ def cleanup_host_metrics():
logger.info("Finished cleanup_host_metrics") logger.info("Finished cleanup_host_metrics")
@task_awx(queue=get_task_queuename) @task(queue=get_task_queuename)
def host_metric_summary_monthly(): def host_metric_summary_monthly():
"""Run cleanup host metrics summary monthly task each week""" """Run cleanup host metrics summary monthly task each week"""
if is_run_threshold_reached(getattr(settings, 'HOST_METRIC_SUMMARY_TASK_LAST_TS', None), getattr(settings, 'HOST_METRIC_SUMMARY_TASK_INTERVAL', 7) * 86400): if is_run_threshold_reached(getattr(settings, 'HOST_METRIC_SUMMARY_TASK_LAST_TS', None), getattr(settings, 'HOST_METRIC_SUMMARY_TASK_INTERVAL', 7) * 86400):
@@ -147,9 +146,8 @@ class HostMetricSummaryMonthlyTask:
month = month + relativedelta(months=1) month = month + relativedelta(months=1)
# Create/Update stats # Create/Update stats
HostMetricSummaryMonthly.objects.bulk_create(self.records_to_create) HostMetricSummaryMonthly.objects.bulk_create(self.records_to_create, batch_size=1000)
HostMetricSummaryMonthly.objects.bulk_update(self.records_to_update, ['license_consumed', 'hosts_added', 'hosts_deleted'], batch_size=1000)
bulk_update_sorted_by_id(HostMetricSummaryMonthly, self.records_to_update, ['license_consumed', 'hosts_added', 'hosts_deleted'])
# Set timestamp of last run # Set timestamp of last run
settings.HOST_METRIC_SUMMARY_TASK_LAST_TS = now() settings.HOST_METRIC_SUMMARY_TASK_LAST_TS = now()

View File

@@ -17,12 +17,10 @@ import urllib.parse as urlparse
# Django # Django
from django.conf import settings from django.conf import settings
from django.db import transaction
# Shared code for the AWX platform # Shared code for the AWX platform
from awx_plugins.interfaces._temporary_private_container_api import CONTAINER_ROOT, get_incontainer_path from awx_plugins.interfaces._temporary_private_container_api import CONTAINER_ROOT, get_incontainer_path
from django.utils.translation import gettext_lazy as _
from rest_framework.exceptions import PermissionDenied
# Runner # Runner
import ansible_runner import ansible_runner
@@ -31,12 +29,9 @@ import ansible_runner
import git import git
from gitdb.exc import BadName as BadGitName from gitdb.exc import BadName as BadGitName
# Dispatcherd
from dispatcherd.publish import task
from dispatcherd.utils import serialize_task
# AWX # AWX
from awx.main.dispatch.publish import task as task_awx from awx.main.dispatch.publish import task
from awx.main.dispatch import get_task_queuename from awx.main.dispatch import get_task_queuename
from awx.main.constants import ( from awx.main.constants import (
PRIVILEGE_ESCALATION_METHODS, PRIVILEGE_ESCALATION_METHODS,
@@ -44,13 +39,13 @@ from awx.main.constants import (
JOB_FOLDER_PREFIX, JOB_FOLDER_PREFIX,
MAX_ISOLATED_PATH_COLON_DELIMITER, MAX_ISOLATED_PATH_COLON_DELIMITER,
CONTAINER_VOLUMES_MOUNT_TYPES, CONTAINER_VOLUMES_MOUNT_TYPES,
ACTIVE_STATES,
HOST_FACTS_FIELDS, HOST_FACTS_FIELDS,
) )
from awx.main.models import ( from awx.main.models import (
Instance, Instance,
Inventory, Inventory,
InventorySource, InventorySource,
UnifiedJob,
Job, Job,
AdHocCommand, AdHocCommand,
ProjectUpdate, ProjectUpdate,
@@ -70,12 +65,11 @@ from awx.main.tasks.callback import (
RunnerCallbackForProjectUpdate, RunnerCallbackForProjectUpdate,
RunnerCallbackForSystemJob, RunnerCallbackForSystemJob,
) )
from awx.main.tasks.policy import evaluate_policy
from awx.main.tasks.signals import with_signal_handling, signal_callback from awx.main.tasks.signals import with_signal_handling, signal_callback
from awx.main.tasks.receptor import AWXReceptorJob from awx.main.tasks.receptor import AWXReceptorJob
from awx.main.tasks.facts import start_fact_cache, finish_fact_cache from awx.main.tasks.facts import start_fact_cache, finish_fact_cache
from awx.main.tasks.system import update_smart_memberships_for_inventory, update_inventory_computed_fields, events_processed_hook from awx.main.tasks.system import update_smart_memberships_for_inventory, update_inventory_computed_fields, events_processed_hook
from awx.main.exceptions import AwxTaskError, PolicyEvaluationError, PostRunError, ReceptorNodeNotFound from awx.main.exceptions import AwxTaskError, PostRunError, ReceptorNodeNotFound
from awx.main.utils.ansible import read_ansible_config from awx.main.utils.ansible import read_ansible_config
from awx.main.utils.safe_yaml import safe_dump, sanitize_jinja from awx.main.utils.safe_yaml import safe_dump, sanitize_jinja
from awx.main.utils.common import ( from awx.main.utils.common import (
@@ -89,6 +83,8 @@ from awx.main.utils.common import (
from awx.conf.license import get_license from awx.conf.license import get_license
from awx.main.utils.handlers import SpecialInventoryHandler from awx.main.utils.handlers import SpecialInventoryHandler
from awx.main.utils.update_model import update_model from awx.main.utils.update_model import update_model
from rest_framework.exceptions import PermissionDenied
from django.utils.translation import gettext_lazy as _
# Django flags # Django flags
from flags.state import flag_enabled from flags.state import flag_enabled
@@ -115,15 +111,6 @@ def with_path_cleanup(f):
return _wrapped return _wrapped
@task(on_duplicate='queue_one', bind=True, queue=get_task_queuename)
def dispatch_waiting_jobs(binder):
for uj in UnifiedJob.objects.filter(status='waiting', controller_node=settings.CLUSTER_HOST_ID).only('id', 'status', 'polymorphic_ctype', 'celery_task_id'):
kwargs = uj.get_start_kwargs()
if not kwargs:
kwargs = {}
binder.control('run', data={'task': serialize_task(uj._get_task_class()), 'args': [uj.id], 'kwargs': kwargs, 'uuid': uj.celery_task_id})
class BaseTask(object): class BaseTask(object):
model = None model = None
event_model = None event_model = None
@@ -131,7 +118,6 @@ class BaseTask(object):
callback_class = RunnerCallback callback_class = RunnerCallback
def __init__(self): def __init__(self):
self.instance = None
self.cleanup_paths = [] self.cleanup_paths = []
self.update_attempts = int(getattr(settings, 'DISPATCHER_DB_DOWNTOWN_TOLLERANCE', settings.DISPATCHER_DB_DOWNTIME_TOLERANCE) / 5) self.update_attempts = int(getattr(settings, 'DISPATCHER_DB_DOWNTOWN_TOLLERANCE', settings.DISPATCHER_DB_DOWNTIME_TOLERANCE) / 5)
self.runner_callback = self.callback_class(model=self.model) self.runner_callback = self.callback_class(model=self.model)
@@ -319,8 +305,6 @@ class BaseTask(object):
# Add ANSIBLE_* settings to the subprocess environment. # Add ANSIBLE_* settings to the subprocess environment.
for attr in dir(settings): for attr in dir(settings):
if attr == attr.upper() and attr.startswith('ANSIBLE_') and not attr.startswith('ANSIBLE_BASE_'): if attr == attr.upper() and attr.startswith('ANSIBLE_') and not attr.startswith('ANSIBLE_BASE_'):
if attr == 'ANSIBLE_STANDARD_SETTINGS_FILES':
continue # special case intended only for dynaconf use
env[attr] = str(getattr(settings, attr)) env[attr] = str(getattr(settings, attr))
# Also set environment variables configured in AWX_TASK_ENV setting. # Also set environment variables configured in AWX_TASK_ENV setting.
for key, value in settings.AWX_TASK_ENV.items(): for key, value in settings.AWX_TASK_ENV.items():
@@ -468,48 +452,27 @@ class BaseTask(object):
def should_use_fact_cache(self): def should_use_fact_cache(self):
return False return False
def transition_status(self, pk: int) -> bool:
"""Atomically transition status to running, if False returned, another process got it"""
with transaction.atomic():
# Explanation of parts for the fetch:
# .values - avoid loading a full object, this is known to lead to deadlocks due to signals
# the signals load other related rows which another process may be locking, and happens in practice
# of=('self',) - keeps FK tables out of the lock list, another way deadlocks can happen
# .get - just load the single job
instance_data = UnifiedJob.objects.select_for_update(of=('self',)).values('status', 'cancel_flag').get(pk=pk)
# If status is not waiting (obtained under lock) then this process does not have clearence to run
if instance_data['status'] == 'waiting':
if instance_data['cancel_flag']:
updated_status = 'canceled'
else:
updated_status = 'running'
# Explanation of the update:
# .filter - again, do not load the full object
# .update - a bulk update on just that one row, avoid loading unintended data
UnifiedJob.objects.filter(pk=pk).update(status=updated_status, start_args='')
elif instance_data['status'] == 'running':
logger.info(f'Job {pk} is being ran by another process, exiting')
return False
return True
@with_path_cleanup @with_path_cleanup
@with_signal_handling @with_signal_handling
def run(self, pk, **kwargs): def run(self, pk, **kwargs):
""" """
Run the job/task and capture its output. Run the job/task and capture its output.
""" """
if not self.instance: # Used to skip fetch for local runs self.instance = self.model.objects.get(pk=pk)
if not self.transition_status(pk): if self.instance.status != 'canceled' and self.instance.cancel_flag:
logger.info(f'Job {pk} is being ran by another process, exiting') self.instance = self.update_model(self.instance.pk, start_args='', status='canceled')
return if self.instance.status not in ACTIVE_STATES:
# Prevent starting the job if it has been reaped or handled by another process.
raise RuntimeError(f'Not starting {self.instance.status} task pk={pk} because {self.instance.status} is not a valid active state')
# Load the instance if self.instance.execution_environment_id is None:
self.instance = self.update_model(pk) from awx.main.signals import disable_activity_stream
if self.instance.status != 'running':
logger.error(f'Not starting {self.instance.status} task pk={pk} because its status "{self.instance.status}" is not expected')
return
with disable_activity_stream():
self.instance = self.update_model(self.instance.pk, execution_environment=self.instance.resolve_execution_environment())
# self.instance because of the update_model pattern and when it's used in callback handlers
self.instance = self.update_model(pk, status='running', start_args='') # blank field to remove encrypted passwords
self.instance.websocket_emit_status("running") self.instance.websocket_emit_status("running")
status, rc = 'error', None status, rc = 'error', None
self.runner_callback.event_ct = 0 self.runner_callback.event_ct = 0
@@ -522,20 +485,12 @@ class BaseTask(object):
private_data_dir = None private_data_dir = None
try: try:
if self.instance.execution_environment_id is None:
from awx.main.signals import disable_activity_stream
with disable_activity_stream():
self.instance = self.update_model(self.instance.pk, execution_environment=self.instance.resolve_execution_environment())
self.instance.send_notification_templates("running") self.instance.send_notification_templates("running")
private_data_dir = self.build_private_data_dir(self.instance) private_data_dir = self.build_private_data_dir(self.instance)
self.pre_run_hook(self.instance, private_data_dir) self.pre_run_hook(self.instance, private_data_dir)
evaluate_policy(self.instance)
self.build_project_dir(self.instance, private_data_dir) self.build_project_dir(self.instance, private_data_dir)
self.instance.log_lifecycle("preparing_playbook") self.instance.log_lifecycle("preparing_playbook")
if self.instance.cancel_flag or signal_callback(): if self.instance.cancel_flag or signal_callback():
logger.debug(f'detected pre-run cancel flag for {self.instance.log_format}')
self.instance = self.update_model(self.instance.pk, status='canceled') self.instance = self.update_model(self.instance.pk, status='canceled')
if self.instance.status != 'running': if self.instance.status != 'running':
@@ -658,11 +613,12 @@ class BaseTask(object):
elif status == 'canceled': elif status == 'canceled':
self.instance = self.update_model(pk) self.instance = self.update_model(pk)
cancel_flag_value = getattr(self.instance, 'cancel_flag', False) cancel_flag_value = getattr(self.instance, 'cancel_flag', False)
if cancel_flag_value is False: if (cancel_flag_value is False) and signal_callback():
self.runner_callback.delay_update(skip_if_already_set=True, job_explanation="Task was canceled due to receiving a shutdown signal.") self.runner_callback.delay_update(skip_if_already_set=True, job_explanation="Task was canceled due to receiving a shutdown signal.")
status = 'failed' status = 'failed'
except PolicyEvaluationError as exc: elif cancel_flag_value is False:
self.runner_callback.delay_update(job_explanation=str(exc), result_traceback=str(exc)) self.runner_callback.delay_update(skip_if_already_set=True, job_explanation="The running ansible process received a shutdown signal.")
status = 'failed'
except ReceptorNodeNotFound as exc: except ReceptorNodeNotFound as exc:
self.runner_callback.delay_update(job_explanation=str(exc)) self.runner_callback.delay_update(job_explanation=str(exc))
except Exception: except Exception:
@@ -688,9 +644,6 @@ class BaseTask(object):
# Field host_status_counts is used as a metric to check if event processing is finished # Field host_status_counts is used as a metric to check if event processing is finished
# we send notifications if it is, if not, callback receiver will send them # we send notifications if it is, if not, callback receiver will send them
if not self.instance:
logger.error(f'Unified job pk={pk} appears to be deleted while running')
return
if (self.instance.host_status_counts is not None) or (not self.runner_callback.wrapup_event_dispatched): if (self.instance.host_status_counts is not None) or (not self.runner_callback.wrapup_event_dispatched):
events_processed_hook(self.instance) events_processed_hook(self.instance)
@@ -787,7 +740,6 @@ class SourceControlMixin(BaseTask):
try: try:
# the job private_data_dir is passed so sync can download roles and collections there # the job private_data_dir is passed so sync can download roles and collections there
sync_task = RunProjectUpdate(job_private_data_dir=private_data_dir) sync_task = RunProjectUpdate(job_private_data_dir=private_data_dir)
sync_task.instance = local_project_sync # avoids "waiting" status check, performance
sync_task.run(local_project_sync.id) sync_task.run(local_project_sync.id)
local_project_sync.refresh_from_db() local_project_sync.refresh_from_db()
self.instance = self.update_model(self.instance.pk, scm_revision=local_project_sync.scm_revision) self.instance = self.update_model(self.instance.pk, scm_revision=local_project_sync.scm_revision)
@@ -851,7 +803,7 @@ class SourceControlMixin(BaseTask):
self.release_lock(project) self.release_lock(project)
@task_awx(queue=get_task_queuename) @task(queue=get_task_queuename)
class RunJob(SourceControlMixin, BaseTask): class RunJob(SourceControlMixin, BaseTask):
""" """
Run a job using ansible-playbook. Run a job using ansible-playbook.
@@ -1139,8 +1091,8 @@ class RunJob(SourceControlMixin, BaseTask):
# where ansible expects to find it # where ansible expects to find it
if self.should_use_fact_cache(): if self.should_use_fact_cache():
job.log_lifecycle("start_job_fact_cache") job.log_lifecycle("start_job_fact_cache")
self.hosts_with_facts_cached = start_fact_cache( self.facts_write_time = start_fact_cache(
job.get_hosts_for_fact_cache(), artifacts_dir=os.path.join(private_data_dir, 'artifacts', str(job.id)), inventory_id=job.inventory_id job.get_hosts_for_fact_cache(), os.path.join(private_data_dir, 'artifacts', str(job.id), 'fact_cache'), inventory_id=job.inventory_id
) )
def build_project_dir(self, job, private_data_dir): def build_project_dir(self, job, private_data_dir):
@@ -1150,7 +1102,7 @@ class RunJob(SourceControlMixin, BaseTask):
super(RunJob, self).post_run_hook(job, status) super(RunJob, self).post_run_hook(job, status)
job.refresh_from_db(fields=['job_env']) job.refresh_from_db(fields=['job_env'])
private_data_dir = job.job_env.get('AWX_PRIVATE_DATA_DIR') private_data_dir = job.job_env.get('AWX_PRIVATE_DATA_DIR')
if not private_data_dir: if (not private_data_dir) or (not hasattr(self, 'facts_write_time')):
# If there's no private data dir, that means we didn't get into the # If there's no private data dir, that means we didn't get into the
# actual `run()` call; this _usually_ means something failed in # actual `run()` call; this _usually_ means something failed in
# the pre_run_hook method # the pre_run_hook method
@@ -1158,7 +1110,9 @@ class RunJob(SourceControlMixin, BaseTask):
if self.should_use_fact_cache() and self.runner_callback.artifacts_processed: if self.should_use_fact_cache() and self.runner_callback.artifacts_processed:
job.log_lifecycle("finish_job_fact_cache") job.log_lifecycle("finish_job_fact_cache")
finish_fact_cache( finish_fact_cache(
artifacts_dir=os.path.join(private_data_dir, 'artifacts', str(job.id)), job.get_hosts_for_fact_cache(),
os.path.join(private_data_dir, 'artifacts', str(job.id), 'fact_cache'),
facts_write_time=self.facts_write_time,
job_id=job.id, job_id=job.id,
inventory_id=job.inventory_id, inventory_id=job.inventory_id,
) )
@@ -1174,7 +1128,7 @@ class RunJob(SourceControlMixin, BaseTask):
update_inventory_computed_fields.delay(inventory.id) update_inventory_computed_fields.delay(inventory.id)
@task_awx(queue=get_task_queuename) @task(queue=get_task_queuename)
class RunProjectUpdate(BaseTask): class RunProjectUpdate(BaseTask):
model = ProjectUpdate model = ProjectUpdate
event_model = ProjectUpdateEvent event_model = ProjectUpdateEvent
@@ -1513,7 +1467,7 @@ class RunProjectUpdate(BaseTask):
return [] return []
@task_awx(queue=get_task_queuename) @task(queue=get_task_queuename)
class RunInventoryUpdate(SourceControlMixin, BaseTask): class RunInventoryUpdate(SourceControlMixin, BaseTask):
model = InventoryUpdate model = InventoryUpdate
event_model = InventoryUpdateEvent event_model = InventoryUpdateEvent
@@ -1624,7 +1578,7 @@ class RunInventoryUpdate(SourceControlMixin, BaseTask):
# Include any facts from input inventories so they can be used in filters # Include any facts from input inventories so they can be used in filters
start_fact_cache( start_fact_cache(
input_inventory.hosts.only(*HOST_FACTS_FIELDS), input_inventory.hosts.only(*HOST_FACTS_FIELDS),
artifacts_dir=os.path.join(private_data_dir, 'artifacts', str(inventory_update.id)), os.path.join(private_data_dir, 'artifacts', str(inventory_update.id), 'fact_cache'),
inventory_id=input_inventory.id, inventory_id=input_inventory.id,
) )
@@ -1776,7 +1730,7 @@ class RunInventoryUpdate(SourceControlMixin, BaseTask):
raise PostRunError('Error occured while saving inventory data, see traceback or server logs', status='error', tb=traceback.format_exc()) raise PostRunError('Error occured while saving inventory data, see traceback or server logs', status='error', tb=traceback.format_exc())
@task_awx(queue=get_task_queuename) @task(queue=get_task_queuename)
class RunAdHocCommand(BaseTask): class RunAdHocCommand(BaseTask):
""" """
Run an ad hoc command using ansible. Run an ad hoc command using ansible.
@@ -1929,7 +1883,7 @@ class RunAdHocCommand(BaseTask):
return d return d
@task_awx(queue=get_task_queuename) @task(queue=get_task_queuename)
class RunSystemJob(BaseTask): class RunSystemJob(BaseTask):
model = SystemJob model = SystemJob
event_model = SystemJobEvent event_model = SystemJobEvent

View File

@@ -1,458 +0,0 @@
import json
import tempfile
import contextlib
from pprint import pformat
from typing import Optional, Union
from django.conf import settings
from django.utils.translation import gettext_lazy as _
from opa_client import OpaClient
from opa_client.base import BaseClient
from requests import HTTPError
from rest_framework import serializers
from rest_framework import fields
from awx.main import models
from awx.main.exceptions import PolicyEvaluationError
# Monkey patching opa_client.base.BaseClient to fix retries and timeout settings
_original_opa_base_client_init = BaseClient.__init__
def _opa_base_client_init_fix(
self,
host: str = "localhost",
port: int = 8181,
version: str = "v1",
ssl: bool = False,
cert: Optional[Union[str, tuple]] = None,
headers: Optional[dict] = None,
retries: int = 2,
timeout: float = 1.5,
):
_original_opa_base_client_init(self, host, port, version, ssl, cert, headers)
self.retries = retries
self.timeout = timeout
BaseClient.__init__ = _opa_base_client_init_fix
class _TeamSerializer(serializers.ModelSerializer):
class Meta:
model = models.Team
fields = ('id', 'name')
class _UserSerializer(serializers.ModelSerializer):
teams = serializers.SerializerMethodField()
class Meta:
model = models.User
fields = ('id', 'username', 'is_superuser', 'teams')
def get_teams(self, user: models.User):
teams = models.Team.access_qs(user, 'member')
return _TeamSerializer(many=True).to_representation(teams)
class _ExecutionEnvironmentSerializer(serializers.ModelSerializer):
class Meta:
model = models.ExecutionEnvironment
fields = (
'id',
'name',
'image',
'pull',
)
class _InstanceGroupSerializer(serializers.ModelSerializer):
class Meta:
model = models.InstanceGroup
fields = (
'id',
'name',
'capacity',
'jobs_running',
'jobs_total',
'max_concurrent_jobs',
'max_forks',
)
class _InventorySourceSerializer(serializers.ModelSerializer):
class Meta:
model = models.InventorySource
fields = ('id', 'name', 'source', 'status')
class _InventorySerializer(serializers.ModelSerializer):
inventory_sources = _InventorySourceSerializer(many=True)
class Meta:
model = models.Inventory
fields = (
'id',
'name',
'description',
'kind',
'total_hosts',
'total_groups',
'has_inventory_sources',
'total_inventory_sources',
'has_active_failures',
'hosts_with_active_failures',
'inventory_sources',
)
class _JobTemplateSerializer(serializers.ModelSerializer):
class Meta:
model = models.JobTemplate
fields = (
'id',
'name',
'job_type',
)
class _WorkflowJobTemplateSerializer(serializers.ModelSerializer):
class Meta:
model = models.WorkflowJobTemplate
fields = (
'id',
'name',
'job_type',
)
class _WorkflowJobSerializer(serializers.ModelSerializer):
class Meta:
model = models.WorkflowJob
fields = (
'id',
'name',
)
class _OrganizationSerializer(serializers.ModelSerializer):
class Meta:
model = models.Organization
fields = (
'id',
'name',
)
class _ProjectSerializer(serializers.ModelSerializer):
class Meta:
model = models.Project
fields = (
'id',
'name',
'status',
'scm_type',
'scm_url',
'scm_branch',
'scm_refspec',
'scm_clean',
'scm_track_submodules',
'scm_delete_on_update',
)
class _CredentialSerializer(serializers.ModelSerializer):
organization = _OrganizationSerializer()
class Meta:
model = models.Credential
fields = (
'id',
'name',
'description',
'organization',
'credential_type',
'managed',
'kind',
'cloud',
'kubernetes',
)
class _LabelSerializer(serializers.ModelSerializer):
organization = _OrganizationSerializer()
class Meta:
model = models.Label
fields = ('id', 'name', 'organization')
class JobSerializer(serializers.ModelSerializer):
created_by = _UserSerializer()
credentials = _CredentialSerializer(many=True)
execution_environment = _ExecutionEnvironmentSerializer()
instance_group = _InstanceGroupSerializer()
inventory = _InventorySerializer()
job_template = _JobTemplateSerializer()
labels = _LabelSerializer(many=True)
organization = _OrganizationSerializer()
project = _ProjectSerializer()
extra_vars = fields.SerializerMethodField()
hosts_count = fields.SerializerMethodField()
workflow_job = fields.SerializerMethodField()
workflow_job_template = fields.SerializerMethodField()
class Meta:
model = models.Job
fields = (
'id',
'name',
'created',
'created_by',
'credentials',
'execution_environment',
'extra_vars',
'forks',
'hosts_count',
'instance_group',
'inventory',
'job_template',
'job_type',
'job_type_name',
'labels',
'launch_type',
'limit',
'launched_by',
'organization',
'playbook',
'project',
'scm_branch',
'scm_revision',
'workflow_job',
'workflow_job_template',
)
def get_extra_vars(self, obj: models.Job):
return json.loads(obj.display_extra_vars())
def get_hosts_count(self, obj: models.Job):
return obj.hosts.count()
def get_workflow_job(self, obj: models.Job):
workflow_job: models.WorkflowJob = obj.get_workflow_job()
if workflow_job is None:
return None
return _WorkflowJobSerializer().to_representation(workflow_job)
def get_workflow_job_template(self, obj: models.Job):
workflow_job: models.WorkflowJob = obj.get_workflow_job()
if workflow_job is None:
return None
workflow_job_template: models.WorkflowJobTemplate = workflow_job.workflow_job_template
if workflow_job_template is None:
return None
return _WorkflowJobTemplateSerializer().to_representation(workflow_job_template)
class OPAResultSerializer(serializers.Serializer):
allowed = fields.BooleanField(required=True)
violations = fields.ListField(child=fields.CharField())
class OPA_AUTH_TYPES:
NONE = 'None'
TOKEN = 'Token'
CERTIFICATE = 'Certificate'
@contextlib.contextmanager
def opa_cert_file():
"""
Context manager that creates temporary certificate files for OPA authentication.
For mTLS (mutual TLS), we need:
- Client certificate and key for client authentication
- CA certificate (optional) for server verification
Returns:
tuple: (client_cert_path, verify_path)
- client_cert_path: Path to client cert file or None if not using client cert
- verify_path: Path to CA cert file, True to use system CA store, or False for no verification
"""
client_cert_temp = None
ca_temp = None
try:
# Case 1: Full mTLS with client cert and optional CA cert
if settings.OPA_AUTH_TYPE == OPA_AUTH_TYPES.CERTIFICATE:
# Create client certificate file (required for mTLS)
client_cert_temp = tempfile.NamedTemporaryFile(delete=True, mode='w', suffix=".pem")
client_cert_temp.write(settings.OPA_AUTH_CLIENT_CERT)
client_cert_temp.write("\n")
client_cert_temp.write(settings.OPA_AUTH_CLIENT_KEY)
client_cert_temp.write("\n")
client_cert_temp.flush()
# If CA cert is provided, use it for server verification
# Otherwise, use system CA store (True)
if settings.OPA_AUTH_CA_CERT:
ca_temp = tempfile.NamedTemporaryFile(delete=True, mode='w', suffix=".pem")
ca_temp.write(settings.OPA_AUTH_CA_CERT)
ca_temp.write("\n")
ca_temp.flush()
verify_path = ca_temp.name
else:
verify_path = True # Use system CA store
yield (client_cert_temp.name, verify_path)
# Case 2: TLS with only server verification (no client cert)
elif settings.OPA_SSL:
# If CA cert is provided, use it for server verification
# Otherwise, use system CA store (True)
if settings.OPA_AUTH_CA_CERT:
ca_temp = tempfile.NamedTemporaryFile(delete=True, mode='w', suffix=".pem")
ca_temp.write(settings.OPA_AUTH_CA_CERT)
ca_temp.write("\n")
ca_temp.flush()
verify_path = ca_temp.name
else:
verify_path = True # Use system CA store
yield (None, verify_path)
# Case 3: No TLS
else:
yield (None, False)
finally:
# Clean up temporary files
if client_cert_temp:
client_cert_temp.close()
if ca_temp:
ca_temp.close()
@contextlib.contextmanager
def opa_client(headers=None):
with opa_cert_file() as cert_files:
cert, verify = cert_files
with OpaClient(
host=settings.OPA_HOST,
port=settings.OPA_PORT,
headers=headers,
ssl=settings.OPA_SSL,
cert=cert,
timeout=settings.OPA_REQUEST_TIMEOUT,
retries=settings.OPA_REQUEST_RETRIES,
) as client:
# Workaround for https://github.com/Turall/OPA-python-client/issues/32
# by directly setting cert and verify on requests.session
client._session.cert = cert
client._session.verify = verify
yield client
def evaluate_policy(instance):
# Policy evaluation for Policy as Code feature
if not settings.OPA_HOST:
return
if not isinstance(instance, models.Job):
return
instance.log_lifecycle("evaluate_policy")
input_data = JobSerializer(instance=instance).data
headers = settings.OPA_AUTH_CUSTOM_HEADERS
if settings.OPA_AUTH_TYPE == OPA_AUTH_TYPES.TOKEN:
headers.update({'Authorization': 'Bearer {}'.format(settings.OPA_AUTH_TOKEN)})
if settings.OPA_AUTH_TYPE == OPA_AUTH_TYPES.CERTIFICATE and not settings.OPA_SSL:
raise PolicyEvaluationError(_('OPA_AUTH_TYPE=Certificate requires OPA_SSL to be enabled.'))
cert_settings_missing = []
if settings.OPA_AUTH_TYPE == OPA_AUTH_TYPES.CERTIFICATE:
if not settings.OPA_AUTH_CLIENT_CERT:
cert_settings_missing += ['OPA_AUTH_CLIENT_CERT']
if not settings.OPA_AUTH_CLIENT_KEY:
cert_settings_missing += ['OPA_AUTH_CLIENT_KEY']
if not settings.OPA_AUTH_CA_CERT:
cert_settings_missing += ['OPA_AUTH_CA_CERT']
if cert_settings_missing:
raise PolicyEvaluationError(_('Following certificate settings are missing for OPA_AUTH_TYPE=Certificate: {}').format(cert_settings_missing))
query_paths = [
('Organization', instance.organization.opa_query_path),
('Inventory', instance.inventory.opa_query_path),
('Job template', instance.job_template.opa_query_path),
]
violations = dict()
errors = dict()
try:
with opa_client(headers=headers) as client:
for path_type, query_path in query_paths:
response = dict()
try:
if not query_path:
continue
response = client.query_rule(input_data=input_data, package_path=query_path)
except HTTPError as e:
message = _('Call to OPA failed. Exception: {}').format(e)
try:
error_data = e.response.json()
except ValueError:
errors[path_type] = message
continue
error_code = error_data.get("code")
error_message = error_data.get("message")
if error_code or error_message:
message = _('Call to OPA failed. Code: {}, Message: {}').format(error_code, error_message)
errors[path_type] = message
continue
except Exception as e:
errors[path_type] = _('Call to OPA failed. Exception: {}').format(e)
continue
result = response.get('result')
if result is None:
errors[path_type] = _('Call to OPA did not return a "result" property. The path refers to an undefined document.')
continue
result_serializer = OPAResultSerializer(data=result)
if not result_serializer.is_valid():
errors[path_type] = _('OPA policy returned invalid result.')
continue
result_data = result_serializer.validated_data
if not result_data.get("allowed") and (result_violations := result_data.get("violations")):
violations[path_type] = result_violations
format_results = dict()
if any(errors[e] for e in errors):
format_results["Errors"] = errors
if any(violations[v] for v in violations):
format_results["Violations"] = violations
if violations or errors:
raise PolicyEvaluationError(pformat(format_results, width=80))
except Exception as e:
raise PolicyEvaluationError(_('This job cannot be executed due to a policy violation or error. See the following details:\n{}').format(e))

View File

@@ -32,7 +32,7 @@ from awx.main.constants import MAX_ISOLATED_PATH_COLON_DELIMITER
from awx.main.tasks.signals import signal_state, signal_callback, SignalExit from awx.main.tasks.signals import signal_state, signal_callback, SignalExit
from awx.main.models import Instance, InstanceLink, UnifiedJob, ReceptorAddress from awx.main.models import Instance, InstanceLink, UnifiedJob, ReceptorAddress
from awx.main.dispatch import get_task_queuename from awx.main.dispatch import get_task_queuename
from awx.main.dispatch.publish import task as task_awx from awx.main.dispatch.publish import task
# Receptorctl # Receptorctl
from receptorctl.socket_interface import ReceptorControl from receptorctl.socket_interface import ReceptorControl
@@ -852,7 +852,7 @@ def reload_receptor():
raise RuntimeError("Receptor reload failed") raise RuntimeError("Receptor reload failed")
@task_awx() @task()
def write_receptor_config(): def write_receptor_config():
""" """
This task runs async on each control node, K8S only. This task runs async on each control node, K8S only.
@@ -875,7 +875,7 @@ def write_receptor_config():
reload_receptor() reload_receptor()
@task_awx(queue=get_task_queuename) @task(queue=get_task_queuename)
def remove_deprovisioned_node(hostname): def remove_deprovisioned_node(hostname):
InstanceLink.objects.filter(source__hostname=hostname).update(link_state=InstanceLink.States.REMOVING) InstanceLink.objects.filter(source__hostname=hostname).update(link_state=InstanceLink.States.REMOVING)
InstanceLink.objects.filter(target__instance__hostname=hostname).update(link_state=InstanceLink.States.REMOVING) InstanceLink.objects.filter(target__instance__hostname=hostname).update(link_state=InstanceLink.States.REMOVING)

View File

@@ -14,21 +14,16 @@ class SignalExit(Exception):
class SignalState: class SignalState:
# SIGTERM: Sent by supervisord to process group on shutdown
# SIGUSR1: The dispatcherd cancel signal
signals = (signal.SIGTERM, signal.SIGINT, signal.SIGUSR1)
def reset(self): def reset(self):
for for_signal in self.signals: self.sigterm_flag = False
self.signal_flags[for_signal] = False self.sigint_flag = False
self.original_methods[for_signal] = None
self.is_active = False # for nested context managers self.is_active = False # for nested context managers
self.original_sigterm = None
self.original_sigint = None
self.raise_exception = False self.raise_exception = False
def __init__(self): def __init__(self):
self.signal_flags = {}
self.original_methods = {}
self.reset() self.reset()
def raise_if_needed(self): def raise_if_needed(self):
@@ -36,28 +31,31 @@ class SignalState:
self.raise_exception = False # so it is not raised a second time in error handling self.raise_exception = False # so it is not raised a second time in error handling
raise SignalExit() raise SignalExit()
def set_signal_flag(self, *args, for_signal=None): def set_sigterm_flag(self, *args):
self.signal_flags[for_signal] = True self.sigterm_flag = True
logger.info(f'Processed signal {for_signal}, set exit flag') self.raise_if_needed()
def set_sigint_flag(self, *args):
self.sigint_flag = True
self.raise_if_needed() self.raise_if_needed()
def connect_signals(self): def connect_signals(self):
for for_signal in self.signals: self.original_sigterm = signal.getsignal(signal.SIGTERM)
self.original_methods[for_signal] = signal.getsignal(for_signal) self.original_sigint = signal.getsignal(signal.SIGINT)
signal.signal(for_signal, lambda *args, for_signal=for_signal: self.set_signal_flag(*args, for_signal=for_signal)) signal.signal(signal.SIGTERM, self.set_sigterm_flag)
signal.signal(signal.SIGINT, self.set_sigint_flag)
self.is_active = True self.is_active = True
def restore_signals(self): def restore_signals(self):
for for_signal in self.signals: signal.signal(signal.SIGTERM, self.original_sigterm)
original_method = self.original_methods[for_signal] signal.signal(signal.SIGINT, self.original_sigint)
signal.signal(for_signal, original_method) # if we got a signal while context manager was active, call parent methods.
# if we got a signal while context manager was active, call parent methods. if self.sigterm_flag:
if self.signal_flags[for_signal]: if callable(self.original_sigterm):
if callable(original_method): self.original_sigterm()
try: if self.sigint_flag:
original_method() if callable(self.original_sigint):
except Exception as exc: self.original_sigint()
logger.info(f'Error processing original {for_signal} signal, error: {str(exc)}')
self.reset() self.reset()
@@ -65,7 +63,7 @@ signal_state = SignalState()
def signal_callback(): def signal_callback():
return any(signal_state.signal_flags[for_signal] for for_signal in signal_state.signals) return bool(signal_state.sigterm_flag or signal_state.sigint_flag)
def with_signal_handling(f): def with_signal_handling(f):

View File

@@ -1,77 +1,78 @@
# Python # Python
from collections import namedtuple
import functools import functools
import importlib import importlib
import itertools import itertools
import json import json
import logging import logging
import os import os
import psycopg
from io import StringIO
from contextlib import redirect_stdout
import shutil import shutil
import time import time
from collections import namedtuple
from contextlib import redirect_stdout
from datetime import datetime
from distutils.version import LooseVersion as Version from distutils.version import LooseVersion as Version
from io import StringIO from datetime import datetime
# Runner # Django
import ansible_runner.cleanup from django.conf import settings
import psycopg from django.db import connection, transaction, DatabaseError, IntegrityError
from ansible_base.lib.utils.db import advisory_lock from django.db.models.fields.related import ForeignKey
from django.utils.timezone import now, timedelta
# django-ansible-base from django.utils.encoding import smart_str
from ansible_base.resource_registry.tasks.sync import SyncExecutor from django.contrib.auth.models import User
from django.utils.translation import gettext_lazy as _
from django.utils.translation import gettext_noop
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from django.db.models.query import QuerySet
# Django-CRUM # Django-CRUM
from crum import impersonate from crum import impersonate
# Django flags
from flags.state import flag_enabled
# Runner
import ansible_runner.cleanup
# dateutil # dateutil
from dateutil.parser import parse as parse_date from dateutil.parser import parse as parse_date
# Django # django-ansible-base
from django.conf import settings from ansible_base.resource_registry.tasks.sync import SyncExecutor
from django.contrib.auth.models import User from ansible_base.lib.utils.db import advisory_lock
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from django.db import DatabaseError, IntegrityError, connection, transaction
from django.db.models.fields.related import ForeignKey
from django.db.models.query import QuerySet
from django.utils.encoding import smart_str
from django.utils.timezone import now, timedelta
from django.utils.translation import gettext_lazy as _
from django.utils.translation import gettext_noop
# Django flags
from flags.state import flag_enabled
from rest_framework.exceptions import PermissionDenied
# AWX # AWX
from awx import __version__ as awx_application_version from awx import __version__ as awx_application_version
from awx.conf import settings_registry
from awx.main import analytics
from awx.main.access import access_registry from awx.main.access import access_registry
from awx.main.analytics.subsystem_metrics import DispatcherMetrics
from awx.main.constants import ACTIVE_STATES, ERROR_STATES
from awx.main.consumers import emit_channel_notification
from awx.main.dispatch import get_task_queuename, reaper
from awx.main.dispatch.publish import task as task_awx
from awx.main.models import ( from awx.main.models import (
Schedule,
TowerScheduleState,
Instance, Instance,
InstanceGroup, InstanceGroup,
Inventory,
Job,
Notification,
Schedule,
SmartInventoryMembership,
TowerScheduleState,
UnifiedJob, UnifiedJob,
Notification,
Inventory,
SmartInventoryMembership,
Job,
convert_jsonfields, convert_jsonfields,
) )
from awx.main.constants import ACTIVE_STATES, ERROR_STATES
from awx.main.dispatch.publish import task
from awx.main.dispatch import get_task_queuename, reaper
from awx.main.utils.common import ignore_inventory_computed_fields, ignore_inventory_group_removal
from awx.main.utils.reload import stop_local_services
from awx.main.tasks.helpers import is_run_threshold_reached from awx.main.tasks.helpers import is_run_threshold_reached
from awx.main.tasks.host_indirect import save_indirect_host_entries from awx.main.tasks.host_indirect import save_indirect_host_entries
from awx.main.tasks.receptor import administrative_workunit_reaper, get_receptor_ctl, worker_cleanup, worker_info, write_receptor_config from awx.main.tasks.receptor import get_receptor_ctl, worker_info, worker_cleanup, administrative_workunit_reaper, write_receptor_config
from awx.main.utils.common import ignore_inventory_computed_fields, ignore_inventory_group_removal from awx.main.consumers import emit_channel_notification
from awx.main.utils.reload import stop_local_services from awx.main import analytics
from dispatcherd.publish import task from awx.conf import settings_registry
from awx.main.analytics.subsystem_metrics import DispatcherMetrics
from rest_framework.exceptions import PermissionDenied
logger = logging.getLogger('awx.main.tasks.system') logger = logging.getLogger('awx.main.tasks.system')
@@ -82,12 +83,7 @@ Try upgrading OpenSSH or providing your private key in an different format. \
''' '''
def _run_dispatch_startup_common(): def dispatch_startup():
"""
Execute the common startup initialization steps.
This includes updating schedules, syncing instance membership, and starting
local reaping and resetting metrics.
"""
startup_logger = logging.getLogger('awx.main.tasks') startup_logger = logging.getLogger('awx.main.tasks')
# TODO: Enable this on VM installs # TODO: Enable this on VM installs
@@ -97,14 +93,14 @@ def _run_dispatch_startup_common():
try: try:
convert_jsonfields() convert_jsonfields()
except Exception: except Exception:
logger.exception("Failed JSON field conversion, skipping.") logger.exception("Failed json field conversion, skipping.")
startup_logger.debug("Syncing schedules") startup_logger.debug("Syncing Schedules")
for sch in Schedule.objects.all(): for sch in Schedule.objects.all():
try: try:
sch.update_computed_fields() sch.update_computed_fields()
except Exception: except Exception:
logger.exception("Failed to rebuild schedule %s.", sch) logger.exception("Failed to rebuild schedule {}.".format(sch))
# #
# When the dispatcher starts, if the instance cannot be found in the database, # When the dispatcher starts, if the instance cannot be found in the database,
@@ -124,67 +120,25 @@ def _run_dispatch_startup_common():
apply_cluster_membership_policies() apply_cluster_membership_policies()
cluster_node_heartbeat() cluster_node_heartbeat()
reaper.startup_reaping() reaper.startup_reaping()
reaper.reap_waiting(grace_period=0)
m = DispatcherMetrics() m = DispatcherMetrics()
m.reset_values() m.reset_values()
def _legacy_dispatch_startup():
"""
Legacy branch for startup: simply performs reaping of waiting jobs with a zero grace period.
"""
logger.debug("Legacy dispatcher: calling reaper.reap_waiting with grace_period=0")
reaper.reap_waiting(grace_period=0)
def _dispatcherd_dispatch_startup():
"""
New dispatcherd branch for startup: uses the control API to re-submit waiting jobs.
"""
logger.debug("Dispatcherd enabled: dispatching waiting jobs via control channel")
from awx.main.tasks.jobs import dispatch_waiting_jobs
dispatch_waiting_jobs.apply_async(queue=get_task_queuename())
def dispatch_startup():
"""
System initialization at startup.
First, execute the common logic.
Then, if FEATURE_DISPATCHERD_ENABLED is enabled, re-submit waiting jobs via the control API;
otherwise, fall back to legacy reaping of waiting jobs.
"""
_run_dispatch_startup_common()
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'):
_dispatcherd_dispatch_startup()
else:
_legacy_dispatch_startup()
def inform_cluster_of_shutdown(): def inform_cluster_of_shutdown():
"""
Clean system shutdown that marks the current instance offline.
In legacy mode, it also reaps waiting jobs.
In dispatcherd mode, it relies on dispatcherd's built-in cleanup.
"""
try: try:
inst = Instance.objects.get(hostname=settings.CLUSTER_HOST_ID) this_inst = Instance.objects.get(hostname=settings.CLUSTER_HOST_ID)
inst.mark_offline(update_last_seen=True, errors=_('Instance received normal shutdown signal')) this_inst.mark_offline(update_last_seen=True, errors=_('Instance received normal shutdown signal'))
except Instance.DoesNotExist:
logger.exception("Cluster host not found: %s", settings.CLUSTER_HOST_ID)
return
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'):
logger.debug("Dispatcherd mode: no extra reaping required for instance %s", inst.hostname)
else:
try: try:
logger.debug("Legacy mode: reaping waiting jobs for instance %s", inst.hostname) reaper.reap_waiting(this_inst, grace_period=0)
reaper.reap_waiting(inst, grace_period=0)
except Exception: except Exception:
logger.exception("Failed to reap waiting jobs for %s", inst.hostname) logger.exception('failed to reap waiting jobs for {}'.format(this_inst.hostname))
logger.warning("Normal shutdown processed for instance %s; instance removed from capacity pool.", inst.hostname) logger.warning('Normal shutdown signal for instance {}, removed self from capacity pool.'.format(this_inst.hostname))
except Exception:
logger.exception('Encountered problem with normal shutdown signal.')
@task_awx(queue=get_task_queuename) @task(queue=get_task_queuename)
def migrate_jsonfield(table, pkfield, columns): def migrate_jsonfield(table, pkfield, columns):
batchsize = 10000 batchsize = 10000
with advisory_lock(f'json_migration_{table}', wait=False) as acquired: with advisory_lock(f'json_migration_{table}', wait=False) as acquired:
@@ -230,7 +184,7 @@ def migrate_jsonfield(table, pkfield, columns):
logger.warning(f"Migration of {table} to jsonb is finished.") logger.warning(f"Migration of {table} to jsonb is finished.")
@task_awx(queue=get_task_queuename) @task(queue=get_task_queuename)
def apply_cluster_membership_policies(): def apply_cluster_membership_policies():
from awx.main.signals import disable_activity_stream from awx.main.signals import disable_activity_stream
@@ -342,7 +296,7 @@ def apply_cluster_membership_policies():
logger.debug('Cluster policy computation finished in {} seconds'.format(time.time() - started_compute)) logger.debug('Cluster policy computation finished in {} seconds'.format(time.time() - started_compute))
@task_awx(queue='tower_settings_change') @task(queue='tower_settings_change')
def clear_setting_cache(setting_keys): def clear_setting_cache(setting_keys):
# log that cache is being cleared # log that cache is being cleared
logger.info(f"clear_setting_cache of keys {setting_keys}") logger.info(f"clear_setting_cache of keys {setting_keys}")
@@ -355,7 +309,7 @@ def clear_setting_cache(setting_keys):
cache.delete_many(cache_keys) cache.delete_many(cache_keys)
@task_awx(queue='tower_broadcast_all') @task(queue='tower_broadcast_all')
def delete_project_files(project_path): def delete_project_files(project_path):
# TODO: possibly implement some retry logic # TODO: possibly implement some retry logic
lock_file = project_path + '.lock' lock_file = project_path + '.lock'
@@ -373,7 +327,7 @@ def delete_project_files(project_path):
logger.exception('Could not remove lock file {}'.format(lock_file)) logger.exception('Could not remove lock file {}'.format(lock_file))
@task_awx(queue='tower_broadcast_all') @task(queue='tower_broadcast_all')
def profile_sql(threshold=1, minutes=1): def profile_sql(threshold=1, minutes=1):
if threshold <= 0: if threshold <= 0:
cache.delete('awx-profile-sql-threshold') cache.delete('awx-profile-sql-threshold')
@@ -383,7 +337,7 @@ def profile_sql(threshold=1, minutes=1):
logger.error('SQL QUERIES >={}s ENABLED FOR {} MINUTE(S)'.format(threshold, minutes)) logger.error('SQL QUERIES >={}s ENABLED FOR {} MINUTE(S)'.format(threshold, minutes))
@task_awx(queue=get_task_queuename) @task(queue=get_task_queuename)
def send_notifications(notification_list, job_id=None): def send_notifications(notification_list, job_id=None):
if not isinstance(notification_list, list): if not isinstance(notification_list, list):
raise TypeError("notification_list should be of type list") raise TypeError("notification_list should be of type list")
@@ -428,13 +382,13 @@ def events_processed_hook(unified_job):
save_indirect_host_entries.delay(unified_job.id) save_indirect_host_entries.delay(unified_job.id)
@task_awx(queue=get_task_queuename) @task(queue=get_task_queuename)
def gather_analytics(): def gather_analytics():
if is_run_threshold_reached(getattr(settings, 'AUTOMATION_ANALYTICS_LAST_GATHER', None), settings.AUTOMATION_ANALYTICS_GATHER_INTERVAL): if is_run_threshold_reached(getattr(settings, 'AUTOMATION_ANALYTICS_LAST_GATHER', None), settings.AUTOMATION_ANALYTICS_GATHER_INTERVAL):
analytics.gather() analytics.gather()
@task_awx(queue=get_task_queuename) @task(queue=get_task_queuename)
def purge_old_stdout_files(): def purge_old_stdout_files():
nowtime = time.time() nowtime = time.time()
for f in os.listdir(settings.JOBOUTPUT_ROOT): for f in os.listdir(settings.JOBOUTPUT_ROOT):
@@ -496,18 +450,18 @@ class CleanupImagesAndFiles:
cls.run_remote(this_inst, **kwargs) cls.run_remote(this_inst, **kwargs)
@task_awx(queue='tower_broadcast_all') @task(queue='tower_broadcast_all')
def handle_removed_image(remove_images=None): def handle_removed_image(remove_images=None):
"""Special broadcast invocation of this method to handle case of deleted EE""" """Special broadcast invocation of this method to handle case of deleted EE"""
CleanupImagesAndFiles.run(remove_images=remove_images, file_pattern='') CleanupImagesAndFiles.run(remove_images=remove_images, file_pattern='')
@task_awx(queue=get_task_queuename) @task(queue=get_task_queuename)
def cleanup_images_and_files(): def cleanup_images_and_files():
CleanupImagesAndFiles.run(image_prune=True) CleanupImagesAndFiles.run(image_prune=True)
@task_awx(queue=get_task_queuename) @task(queue=get_task_queuename)
def cluster_node_health_check(node): def cluster_node_health_check(node):
""" """
Used for the health check endpoint, refreshes the status of the instance, but must be ran on target node Used for the health check endpoint, refreshes the status of the instance, but must be ran on target node
@@ -526,7 +480,7 @@ def cluster_node_health_check(node):
this_inst.local_health_check() this_inst.local_health_check()
@task_awx(queue=get_task_queuename) @task(queue=get_task_queuename)
def execution_node_health_check(node): def execution_node_health_check(node):
if node == '': if node == '':
logger.warning('Remote health check incorrectly called with blank string') logger.warning('Remote health check incorrectly called with blank string')
@@ -594,16 +548,8 @@ def inspect_established_receptor_connections(mesh_status):
def inspect_execution_and_hop_nodes(instance_list): def inspect_execution_and_hop_nodes(instance_list):
with advisory_lock('inspect_execution_and_hop_nodes_lock', wait=False): with advisory_lock('inspect_execution_and_hop_nodes_lock', wait=False):
node_lookup = {inst.hostname: inst for inst in instance_list} node_lookup = {inst.hostname: inst for inst in instance_list}
try: ctl = get_receptor_ctl()
ctl = get_receptor_ctl() mesh_status = ctl.simple_command('status')
except FileNotFoundError:
logger.error('Receptor daemon not running, skipping execution node check')
return
try:
mesh_status = ctl.simple_command('status')
except ValueError as exc:
logger.error(f'Error running receptorctl status command, error: {str(exc)}')
return
inspect_established_receptor_connections(mesh_status) inspect_established_receptor_connections(mesh_status)
@@ -651,109 +597,8 @@ def inspect_execution_and_hop_nodes(instance_list):
execution_node_health_check.apply_async([hostname]) execution_node_health_check.apply_async([hostname])
@task_awx(queue=get_task_queuename, bind_kwargs=['dispatch_time', 'worker_tasks']) @task(queue=get_task_queuename, bind_kwargs=['dispatch_time', 'worker_tasks'])
def cluster_node_heartbeat(dispatch_time=None, worker_tasks=None): def cluster_node_heartbeat(dispatch_time=None, worker_tasks=None):
"""
Original implementation for AWX dispatcher.
Uses worker_tasks from bind_kwargs to track running tasks.
"""
# Run common instance management logic
this_inst, instance_list, lost_instances = _heartbeat_instance_management()
if this_inst is None:
return # Early return case from instance management
# Check versions
_heartbeat_check_versions(this_inst, instance_list)
# Handle lost instances
_heartbeat_handle_lost_instances(lost_instances, this_inst)
# Run local reaper - original implementation using worker_tasks
if worker_tasks is not None:
active_task_ids = []
for task_list in worker_tasks.values():
active_task_ids.extend(task_list)
# Convert dispatch_time to datetime
ref_time = datetime.fromisoformat(dispatch_time) if dispatch_time else now()
reaper.reap(instance=this_inst, excluded_uuids=active_task_ids, ref_time=ref_time)
if max(len(task_list) for task_list in worker_tasks.values()) <= 1:
reaper.reap_waiting(instance=this_inst, excluded_uuids=active_task_ids, ref_time=ref_time)
@task(queue=get_task_queuename, bind=True)
def adispatch_cluster_node_heartbeat(binder):
"""
Dispatcherd implementation.
Uses Control API to get running tasks.
"""
# Run common instance management logic
this_inst, instance_list, lost_instances = _heartbeat_instance_management()
if this_inst is None:
return # Early return case from instance management
# Check versions
_heartbeat_check_versions(this_inst, instance_list)
# Handle lost instances
_heartbeat_handle_lost_instances(lost_instances, this_inst)
# Get running tasks using dispatcherd API
active_task_ids = _get_active_task_ids_from_dispatcherd(binder)
if active_task_ids is None:
logger.warning("No active task IDs retrieved from dispatcherd, skipping reaper")
return # Failed to get task IDs, don't attempt reaping
# Run local reaper using tasks from dispatcherd
ref_time = now() # No dispatch_time in dispatcherd version
logger.debug(f"Running reaper with {len(active_task_ids)} excluded UUIDs")
reaper.reap(instance=this_inst, excluded_uuids=active_task_ids, ref_time=ref_time)
# If waiting jobs are hanging out, resubmit them
if UnifiedJob.objects.filter(controller_node=settings.CLUSTER_HOST_ID, status='waiting').exists():
from awx.main.tasks.jobs import dispatch_waiting_jobs
dispatch_waiting_jobs.apply_async(queue=get_task_queuename())
def _get_active_task_ids_from_dispatcherd(binder):
"""
Retrieve active task IDs from the dispatcherd control API.
Returns:
list: List of active task UUIDs
None: If there was an error retrieving the data
"""
active_task_ids = []
try:
logger.debug("Querying dispatcherd API for running tasks")
data = binder.control('running')
# Extract UUIDs from the running data
# Process running data: first item is a dict with node_id and task entries
data.pop('node_id', None)
# Extract task UUIDs from data structure
for task_key, task_value in data.items():
if isinstance(task_value, dict) and 'uuid' in task_value:
active_task_ids.append(task_value['uuid'])
logger.debug(f"Found active task with UUID: {task_value['uuid']}")
elif isinstance(task_key, str):
# Handle case where UUID might be the key
active_task_ids.append(task_key)
logger.debug(f"Found active task with key: {task_key}")
logger.debug(f"Retrieved {len(active_task_ids)} active task IDs from dispatcherd")
return active_task_ids
except Exception:
logger.exception("Failed to get running tasks from dispatcherd")
return None
def _heartbeat_instance_management():
"""Common logic for heartbeat instance management."""
logger.debug("Cluster node heartbeat task.") logger.debug("Cluster node heartbeat task.")
nowtime = now() nowtime = now()
instance_list = list(Instance.objects.filter(node_state__in=(Instance.States.READY, Instance.States.UNAVAILABLE, Instance.States.INSTALLED))) instance_list = list(Instance.objects.filter(node_state__in=(Instance.States.READY, Instance.States.UNAVAILABLE, Instance.States.INSTALLED)))
@@ -780,7 +625,7 @@ def _heartbeat_instance_management():
this_inst.local_health_check() this_inst.local_health_check()
if startup_event and this_inst.capacity != 0: if startup_event and this_inst.capacity != 0:
logger.warning(f'Rejoining the cluster as instance {this_inst.hostname}. Prior last_seen {last_last_seen}') logger.warning(f'Rejoining the cluster as instance {this_inst.hostname}. Prior last_seen {last_last_seen}')
return None, None, None # Early return case return
elif not last_last_seen: elif not last_last_seen:
logger.warning(f'Instance does not have recorded last_seen, updating to {nowtime}') logger.warning(f'Instance does not have recorded last_seen, updating to {nowtime}')
elif (nowtime - last_last_seen) > timedelta(seconds=settings.CLUSTER_NODE_HEARTBEAT_PERIOD + 2): elif (nowtime - last_last_seen) > timedelta(seconds=settings.CLUSTER_NODE_HEARTBEAT_PERIOD + 2):
@@ -792,14 +637,8 @@ def _heartbeat_instance_management():
logger.warning(f'Recreated instance record {this_inst.hostname} after unexpected removal') logger.warning(f'Recreated instance record {this_inst.hostname} after unexpected removal')
this_inst.local_health_check() this_inst.local_health_check()
else: else:
logger.error("Cluster Host Not Found: {}".format(settings.CLUSTER_HOST_ID)) raise RuntimeError("Cluster Host Not Found: {}".format(settings.CLUSTER_HOST_ID))
return None, None, None # IFF any node has a greater version than we do, then we'll shutdown services
return this_inst, instance_list, lost_instances
def _heartbeat_check_versions(this_inst, instance_list):
"""Check versions across instances and determine if shutdown is needed."""
for other_inst in instance_list: for other_inst in instance_list:
if other_inst.node_type in ('execution', 'hop'): if other_inst.node_type in ('execution', 'hop'):
continue continue
@@ -816,9 +655,6 @@ def _heartbeat_check_versions(this_inst, instance_list):
stop_local_services(communicate=False) stop_local_services(communicate=False)
raise RuntimeError("Shutting down.") raise RuntimeError("Shutting down.")
def _heartbeat_handle_lost_instances(lost_instances, this_inst):
"""Handle lost instances by reaping their jobs and marking them offline."""
for other_inst in lost_instances: for other_inst in lost_instances:
try: try:
explanation = "Job reaped due to instance shutdown" explanation = "Job reaped due to instance shutdown"
@@ -849,8 +685,17 @@ def _heartbeat_handle_lost_instances(lost_instances, this_inst):
else: else:
logger.exception('No SQL state available. Error marking {} as lost'.format(other_inst.hostname)) logger.exception('No SQL state available. Error marking {} as lost'.format(other_inst.hostname))
# Run local reaper
if worker_tasks is not None:
active_task_ids = []
for task_list in worker_tasks.values():
active_task_ids.extend(task_list)
reaper.reap(instance=this_inst, excluded_uuids=active_task_ids, ref_time=datetime.fromisoformat(dispatch_time))
if max(len(task_list) for task_list in worker_tasks.values()) <= 1:
reaper.reap_waiting(instance=this_inst, excluded_uuids=active_task_ids, ref_time=datetime.fromisoformat(dispatch_time))
@task_awx(queue=get_task_queuename)
@task(queue=get_task_queuename)
def awx_receptor_workunit_reaper(): def awx_receptor_workunit_reaper():
""" """
When an AWX job is launched via receptor, files such as status, stdin, and stdout are created When an AWX job is launched via receptor, files such as status, stdin, and stdout are created
@@ -873,16 +718,8 @@ def awx_receptor_workunit_reaper():
if not settings.RECEPTOR_RELEASE_WORK: if not settings.RECEPTOR_RELEASE_WORK:
return return
logger.debug("Checking for unreleased receptor work units") logger.debug("Checking for unreleased receptor work units")
try: receptor_ctl = get_receptor_ctl()
receptor_ctl = get_receptor_ctl() receptor_work_list = receptor_ctl.simple_command("work list")
except FileNotFoundError:
logger.info('Receptorctl sockfile not found for workunit reaper, doing nothing')
return
try:
receptor_work_list = receptor_ctl.simple_command("work list")
except ValueError as exc:
logger.info(f'Error getting work list for workunit reaper, error: {str(exc)}')
return
unit_ids = [id for id in receptor_work_list] unit_ids = [id for id in receptor_work_list]
jobs_with_unreleased_receptor_units = UnifiedJob.objects.filter(work_unit_id__in=unit_ids).exclude(status__in=ACTIVE_STATES) jobs_with_unreleased_receptor_units = UnifiedJob.objects.filter(work_unit_id__in=unit_ids).exclude(status__in=ACTIVE_STATES)
@@ -896,7 +733,7 @@ def awx_receptor_workunit_reaper():
administrative_workunit_reaper(receptor_work_list) administrative_workunit_reaper(receptor_work_list)
@task_awx(queue=get_task_queuename) @task(queue=get_task_queuename)
def awx_k8s_reaper(): def awx_k8s_reaper():
if not settings.RECEPTOR_RELEASE_WORK: if not settings.RECEPTOR_RELEASE_WORK:
return return
@@ -919,7 +756,7 @@ def awx_k8s_reaper():
logger.exception("Failed to delete orphaned pod {} from {}".format(job.log_format, group)) logger.exception("Failed to delete orphaned pod {} from {}".format(job.log_format, group))
@task_awx(queue=get_task_queuename) @task(queue=get_task_queuename)
def awx_periodic_scheduler(): def awx_periodic_scheduler():
lock_session_timeout_milliseconds = settings.TASK_MANAGER_LOCK_TIMEOUT * 1000 lock_session_timeout_milliseconds = settings.TASK_MANAGER_LOCK_TIMEOUT * 1000
with advisory_lock('awx_periodic_scheduler_lock', lock_session_timeout_milliseconds=lock_session_timeout_milliseconds, wait=False) as acquired: with advisory_lock('awx_periodic_scheduler_lock', lock_session_timeout_milliseconds=lock_session_timeout_milliseconds, wait=False) as acquired:
@@ -978,7 +815,7 @@ def awx_periodic_scheduler():
emit_channel_notification('schedules-changed', dict(id=schedule.id, group_name="schedules")) emit_channel_notification('schedules-changed', dict(id=schedule.id, group_name="schedules"))
@task_awx(queue=get_task_queuename) @task(queue=get_task_queuename)
def handle_failure_notifications(task_ids): def handle_failure_notifications(task_ids):
"""A task-ified version of the method that sends notifications.""" """A task-ified version of the method that sends notifications."""
found_task_ids = set() found_task_ids = set()
@@ -993,7 +830,7 @@ def handle_failure_notifications(task_ids):
logger.warning(f'Could not send notifications for {deleted_tasks} because they were not found in the database') logger.warning(f'Could not send notifications for {deleted_tasks} because they were not found in the database')
@task_awx(queue=get_task_queuename) @task(queue=get_task_queuename)
def update_inventory_computed_fields(inventory_id): def update_inventory_computed_fields(inventory_id):
""" """
Signal handler and wrapper around inventory.update_computed_fields to Signal handler and wrapper around inventory.update_computed_fields to
@@ -1043,7 +880,7 @@ def update_smart_memberships_for_inventory(smart_inventory):
return False return False
@task_awx(queue=get_task_queuename) @task(queue=get_task_queuename)
def update_host_smart_inventory_memberships(): def update_host_smart_inventory_memberships():
smart_inventories = Inventory.objects.filter(kind='smart', host_filter__isnull=False, pending_deletion=False) smart_inventories = Inventory.objects.filter(kind='smart', host_filter__isnull=False, pending_deletion=False)
changed_inventories = set([]) changed_inventories = set([])
@@ -1059,7 +896,7 @@ def update_host_smart_inventory_memberships():
smart_inventory.update_computed_fields() smart_inventory.update_computed_fields()
@task_awx(queue=get_task_queuename) @task(queue=get_task_queuename)
def delete_inventory(inventory_id, user_id, retries=5): def delete_inventory(inventory_id, user_id, retries=5):
# Delete inventory as user # Delete inventory as user
if user_id is None: if user_id is None:
@@ -1121,7 +958,7 @@ def _reconstruct_relationships(copy_mapping):
new_obj.save() new_obj.save()
@task_awx(queue=get_task_queuename) @task(queue=get_task_queuename)
def deep_copy_model_obj(model_module, model_name, obj_pk, new_obj_pk, user_pk, permission_check_func=None): def deep_copy_model_obj(model_module, model_name, obj_pk, new_obj_pk, user_pk, permission_check_func=None):
logger.debug('Deep copy {} from {} to {}.'.format(model_name, obj_pk, new_obj_pk)) logger.debug('Deep copy {} from {} to {}.'.format(model_name, obj_pk, new_obj_pk))
@@ -1176,7 +1013,7 @@ def deep_copy_model_obj(model_module, model_name, obj_pk, new_obj_pk, user_pk, p
update_inventory_computed_fields.delay(new_obj.id) update_inventory_computed_fields.delay(new_obj.id)
@task_awx(queue=get_task_queuename) @task(queue=get_task_queuename)
def periodic_resource_sync(): def periodic_resource_sync():
if not getattr(settings, 'RESOURCE_SERVER', None): if not getattr(settings, 'RESOURCE_SERVER', None):
logger.debug("Skipping periodic resource_sync, RESOURCE_SERVER not configured") logger.debug("Skipping periodic resource_sync, RESOURCE_SERVER not configured")

View File

@@ -8,12 +8,5 @@
"CONTROLLER_PASSWORD": "fooo", "CONTROLLER_PASSWORD": "fooo",
"CONTROLLER_USERNAME": "fooo", "CONTROLLER_USERNAME": "fooo",
"CONTROLLER_OAUTH_TOKEN": "", "CONTROLLER_OAUTH_TOKEN": "",
"CONTROLLER_VERIFY_SSL": "False", "CONTROLLER_VERIFY_SSL": "False"
"AAP_HOSTNAME": "https://foo.invalid",
"AAP_PASSWORD": "fooo",
"AAP_USERNAME": "fooo",
"AAP_VALIDATE_CERTS": "False",
"CONTROLLER_REQUEST_TIMEOUT": "fooo",
"AAP_REQUEST_TIMEOUT": "fooo",
"AAP_TOKEN": ""
} }

View File

@@ -1,9 +0,0 @@
---
- hosts: all
gather_facts: false
connection: local
vars:
sleep_interval: 5
tasks:
- name: sleep for a specified interval
command: sleep '{{ sleep_interval }}'

View File

@@ -1,7 +0,0 @@
---
- hosts: all
gather_facts: false
connection: local
tasks:
- meta: clear_facts

View File

@@ -1,17 +0,0 @@
---
- hosts: all
vars:
extra_value: ""
gather_facts: false
connection: local
tasks:
- name: set a custom fact
set_fact:
foo: "bar{{ extra_value }}"
bar:
a:
b:
- "c"
- "d"
cacheable: true

Some files were not shown because too many files have changed in this diff Show More