Compare commits

..

3 Commits

Author SHA1 Message Date
jainnikhil30
e5635657d0 add the migration for changing the default capacity adjustment 2025-05-22 14:36:22 +05:30
jainnikhil30
df8d2740d7 set the memory value to 64 in test so that the overcall capacity is not a float value 2025-05-15 16:16:55 +05:30
jainnikhil30
c8efa82aca adjust the default capacity_adjustment to 0.75 2025-05-15 16:03:36 +05:30
221 changed files with 3118 additions and 8224 deletions

View File

@@ -2,7 +2,7 @@
codecov:
notify:
after_n_builds: 9 # Number of test matrix+lint jobs uploading coverage
after_n_builds: 6 # Number of test matrix+lint jobs uploading coverage
wait_for_ci: false
require_ci_to_pass: false

View File

@@ -17,23 +17,6 @@ exclude_also =
[run]
branch = True
# NOTE: `disable_warnings` is needed when `pytest-cov` runs in tandem
# NOTE: with `pytest-xdist`. These warnings are false negative in this
# NOTE: context.
#
# NOTE: It's `coveragepy` that emits the warnings and previously they
# NOTE: wouldn't get on the radar of `pytest`'s `filterwarnings`
# NOTE: mechanism. This changed, however, with `pytest >= 8.4`. And
# NOTE: since we set `filterwarnings = error`, those warnings are being
# NOTE: raised as exceptions, cascading into `pytest`'s internals and
# NOTE: causing tracebacks and crashes of the test sessions.
#
# Ref:
# * https://github.com/pytest-dev/pytest-cov/issues/693
# * https://github.com/pytest-dev/pytest-cov/pull/695
# * https://github.com/pytest-dev/pytest-cov/pull/696
disable_warnings =
module-not-measured
omit =
awx/main/migrations/*
awx/settings/defaults.py

View File

@@ -17,6 +17,7 @@ in as the first entry for your PR title.
##### COMPONENT NAME
<!--- Name of the module/plugin/module/task -->
- API
- UI
- Collection
- CLI
- Docs

View File

@@ -11,6 +11,8 @@ inputs:
runs:
using: composite
steps:
- uses: ./.github/actions/setup-python
- name: Set lower case owner name
shell: bash
run: echo "OWNER_LC=${OWNER,,}" >> $GITHUB_ENV

View File

@@ -36,7 +36,7 @@ runs:
- name: Upgrade ansible-core
shell: bash
run: python -m pip install --upgrade ansible-core
run: python3 -m pip install --upgrade ansible-core
- name: Install system deps
shell: bash

View File

@@ -8,10 +8,3 @@ updates:
labels:
- "docs"
- "dependencies"
- package-ecosystem: "pip"
directory: "requirements/"
schedule:
interval: "daily" #run daily until we trust it, then back this off to weekly
open-pull-requests-limit: 2
labels:
- "dependencies"

View File

@@ -1,72 +0,0 @@
---
name: API Schema Change Detection
env:
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
CI_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
DEV_DOCKER_OWNER: ${{ github.repository_owner }}
COMPOSE_TAG: ${{ github.base_ref || 'devel' }}
UPSTREAM_REPOSITORY_ID: 91594105
on:
pull_request:
branches:
- devel
- release_**
- feature_**
- stable-**
jobs:
api-schema-detection:
name: Detect API Schema Changes
runs-on: ubuntu-latest
timeout-minutes: 30
permissions:
packages: write
contents: read
steps:
- uses: actions/checkout@v4
with:
show-progress: false
fetch-depth: 0
- name: Build awx_devel image for schema check
uses: ./.github/actions/awx_devel_image
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
private-github-key: ${{ secrets.PRIVATE_GITHUB_KEY }}
- name: Detect API schema changes
id: schema-check
continue-on-error: true
run: |
AWX_DOCKER_ARGS='-e GITHUB_ACTIONS' \
AWX_DOCKER_CMD='make detect-schema-change SCHEMA_DIFF_BASE_BRANCH=${{ github.event.pull_request.base.ref }}' \
make docker-runner 2>&1 | tee schema-diff.txt
exit ${PIPESTATUS[0]}
- name: Add schema diff to job summary
if: always()
# show text and if for some reason, it can't be generated, state that it can't be.
run: |
echo "## API Schema Change Detection Results" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
if [ -f schema-diff.txt ]; then
if grep -q "^+" schema-diff.txt || grep -q "^-" schema-diff.txt; then
echo "### Schema changes detected" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
# Truncate to first 1000 lines to stay under GitHub's 1MB summary limit
TOTAL_LINES=$(wc -l < schema-diff.txt)
if [ $TOTAL_LINES -gt 1000 ]; then
echo "_Showing first 1000 of ${TOTAL_LINES} lines. See job logs or download artifact for full diff._" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
fi
echo '```diff' >> $GITHUB_STEP_SUMMARY
head -n 1000 schema-diff.txt >> $GITHUB_STEP_SUMMARY
echo '```' >> $GITHUB_STEP_SUMMARY
else
echo "### No schema changes detected" >> $GITHUB_STEP_SUMMARY
fi
else
echo "### Unable to generate schema diff" >> $GITHUB_STEP_SUMMARY
fi

View File

@@ -32,9 +32,18 @@ jobs:
- name: api-lint
command: /var/lib/awx/venv/awx/bin/tox -e linters
coverage-upload-name: ""
- name: api-swagger
command: /start_tests.sh swagger
coverage-upload-name: ""
- name: awx-collection
command: /start_tests.sh test_collection_all
coverage-upload-name: "awx-collection"
- name: api-schema
command: >-
/start_tests.sh detect-schema-change SCHEMA_DIFF_BASE_BRANCH=${{
github.event.pull_request.base.ref || github.ref_name
}}
coverage-upload-name: ""
steps:
- uses: actions/checkout@v4
@@ -54,17 +63,6 @@ jobs:
AWX_DOCKER_CMD='${{ matrix.tests.command }}'
make docker-runner
- name: Inject PR number into coverage.xml
if: >-
!cancelled()
&& github.event_name == 'pull_request'
&& steps.make-run.outputs.cov-report-files != ''
run: |
if [ -f "reports/coverage.xml" ]; then
sed -i '2i<!-- PR ${{ github.event.pull_request.number }} -->' reports/coverage.xml
echo "Injected PR number ${{ github.event.pull_request.number }} into coverage.xml"
fi
- name: Upload test coverage to Codecov
if: >-
!cancelled()
@@ -104,14 +102,6 @@ jobs:
}}
token: ${{ secrets.CODECOV_TOKEN }}
- name: Upload test artifacts
if: always()
uses: actions/upload-artifact@v4
with:
name: ${{ matrix.tests.name }}-artifacts
path: reports/coverage.xml
retention-days: 5
- name: Upload awx jUnit test reports
if: >-
!cancelled()
@@ -142,7 +132,7 @@ jobs:
- uses: ./.github/actions/setup-python
with:
python-version: '3.13'
python-version: '3.x'
- uses: ./.github/actions/run_awx_devel
id: awx
@@ -182,14 +172,13 @@ jobs:
repository: ansible/awx-operator
path: awx-operator
- name: Setup python, referencing action at awx relative path
uses: ./awx/.github/actions/setup-python
- uses: ./awx/.github/actions/setup-python
with:
python-version: '3.13'
working-directory: awx
- name: Install playbook dependencies
run: |
python -m pip install docker
python3 -m pip install docker
- name: Build AWX image
working-directory: awx
@@ -203,8 +192,8 @@ jobs:
- name: Run test deployment with awx-operator
working-directory: awx-operator
run: |
python -m pip install -r molecule/requirements.txt
python -m pip install PyYAML # for awx/tools/scripts/rewrite-awx-operator-requirements.py
python3 -m pip install -r molecule/requirements.txt
python3 -m pip install PyYAML # for awx/tools/scripts/rewrite-awx-operator-requirements.py
$(realpath ../awx/tools/scripts/rewrite-awx-operator-requirements.py) molecule/requirements.yml $(realpath ../awx)
ansible-galaxy collection install -r molecule/requirements.yml
sudo rm -f $(which kustomize)
@@ -291,11 +280,7 @@ jobs:
- uses: ./.github/actions/setup-python
with:
python-version: '3.13'
- name: Remove system ansible to avoid conflicts
run: |
python -m pip uninstall -y ansible ansible-core || true
python-version: '3.x'
- uses: ./.github/actions/run_awx_devel
id: awx
@@ -306,9 +291,8 @@ jobs:
- name: Install dependencies for running tests
run: |
python -m pip install -e ./awxkit/
python -m pip install -r awx_collection/requirements.txt
hash -r # Rehash to pick up newly installed scripts
python3 -m pip install -e ./awxkit/
python3 -m pip install -r awx_collection/requirements.txt
- name: Run integration tests
id: make-run
@@ -320,7 +304,6 @@ jobs:
echo 'password = password' >> ~/.tower_cli.cfg
echo 'verify_ssl = false' >> ~/.tower_cli.cfg
TARGETS="$(ls awx_collection/tests/integration/targets | grep '${{ matrix.target-regex.regex }}' | tr '\n' ' ')"
export PYTHONPATH="$(python -c 'import site; print(":".join(site.getsitepackages()))')${PYTHONPATH:+:$PYTHONPATH}"
make COLLECTION_VERSION=100.100.100-git COLLECTION_TEST_TARGET="--requirements $TARGETS" test_collection_integration
env:
ANSIBLE_TEST_PREFER_PODMAN: 1
@@ -352,7 +335,6 @@ jobs:
with:
name: coverage-${{ matrix.target-regex.name }}
path: ~/.ansible/collections/ansible_collections/awx/awx/tests/output/coverage/
retention-days: 1
- uses: ./.github/actions/upload_awx_devel_logs
if: always()
@@ -370,26 +352,32 @@ jobs:
steps:
- uses: actions/checkout@v4
with:
persist-credentials: false
show-progress: false
- uses: ./.github/actions/setup-python
with:
python-version: '3.13'
- name: Remove system ansible to avoid conflicts
run: |
python -m pip uninstall -y ansible ansible-core || true
python-version: '3.x'
- name: Upgrade ansible-core
run: python -m pip install --upgrade ansible-core
run: python3 -m pip install --upgrade ansible-core
- name: Download coverage artifacts
- name: Download coverage artifacts A to H
uses: actions/download-artifact@v4
with:
merge-multiple: true
name: coverage-a-h
path: coverage
- name: Download coverage artifacts I to P
uses: actions/download-artifact@v4
with:
name: coverage-i-p
path: coverage
- name: Download coverage artifacts Z to Z
uses: actions/download-artifact@v4
with:
name: coverage-r-z0-9
path: coverage
pattern: coverage-*
- name: Combine coverage
run: |
@@ -397,17 +385,56 @@ jobs:
mkdir -p ~/.ansible/collections/ansible_collections/awx/awx/tests/output/coverage
cp -rv coverage/* ~/.ansible/collections/ansible_collections/awx/awx/tests/output/coverage/
cd ~/.ansible/collections/ansible_collections/awx/awx
hash -r # Rehash to pick up newly installed scripts
PATH="$(python -c 'import sys; import os; print(os.path.dirname(sys.executable))'):$PATH" ansible-test coverage combine --requirements
PATH="$(python -c 'import sys; import os; print(os.path.dirname(sys.executable))'):$PATH" ansible-test coverage html
ansible-test coverage combine --requirements
ansible-test coverage html
echo '## AWX Collection Integration Coverage' >> $GITHUB_STEP_SUMMARY
echo '```' >> $GITHUB_STEP_SUMMARY
PATH="$(python -c 'import sys; import os; print(os.path.dirname(sys.executable))'):$PATH" ansible-test coverage report >> $GITHUB_STEP_SUMMARY
ansible-test coverage report >> $GITHUB_STEP_SUMMARY
echo '```' >> $GITHUB_STEP_SUMMARY
echo >> $GITHUB_STEP_SUMMARY
echo '## AWX Collection Integration Coverage HTML' >> $GITHUB_STEP_SUMMARY
echo 'Download the HTML artifacts to view the coverage report.' >> $GITHUB_STEP_SUMMARY
# This is a huge hack, there's no official action for removing artifacts currently.
# Also ACTIONS_RUNTIME_URL and ACTIONS_RUNTIME_TOKEN aren't available in normal run
# steps, so we have to use github-script to get them.
#
# The advantage of doing this, though, is that we save on artifact storage space.
- name: Get secret artifact runtime URL
uses: actions/github-script@v6
id: get-runtime-url
with:
result-encoding: string
script: |
const { ACTIONS_RUNTIME_URL } = process.env;
return ACTIONS_RUNTIME_URL;
- name: Get secret artifact runtime token
uses: actions/github-script@v6
id: get-runtime-token
with:
result-encoding: string
script: |
const { ACTIONS_RUNTIME_TOKEN } = process.env;
return ACTIONS_RUNTIME_TOKEN;
- name: Remove intermediary artifacts
env:
ACTIONS_RUNTIME_URL: ${{ steps.get-runtime-url.outputs.result }}
ACTIONS_RUNTIME_TOKEN: ${{ steps.get-runtime-token.outputs.result }}
run: |
echo "::add-mask::${ACTIONS_RUNTIME_TOKEN}"
artifacts=$(
curl -H "Authorization: Bearer $ACTIONS_RUNTIME_TOKEN" \
${ACTIONS_RUNTIME_URL}_apis/pipelines/workflows/${{ github.run_id }}/artifacts?api-version=6.0-preview \
| jq -r '.value | .[] | select(.name | startswith("coverage-")) | .url'
)
for artifact in $artifacts; do
curl -i -X DELETE -H "Accept: application/json;api-version=6.0-preview" -H "Authorization: Bearer $ACTIONS_RUNTIME_TOKEN" "$artifact"
done
- name: Upload coverage report as artifact
uses: actions/upload-artifact@v4
with:

View File

@@ -10,7 +10,6 @@ on:
- devel
- release_*
- feature_*
- stable-*
jobs:
push-development-images:
runs-on: ubuntu-latest

View File

@@ -1,248 +0,0 @@
# SonarCloud Analysis Workflow for awx
#
# This workflow runs SonarCloud analysis triggered by CI workflow completion.
# It is split into two separate jobs for clarity and maintainability:
#
# FLOW: CI completes → workflow_run triggers this workflow → appropriate job runs
#
# JOB 1: sonar-pr-analysis (for PRs)
# - Triggered by: workflow_run (CI on pull_request)
# - Steps: Download coverage → Get PR info → Get changed files → Run SonarCloud PR analysis
# - Scans: All changed files in the PR (Python, YAML, JSON, etc.)
# - Quality gate: Focuses on new/changed code in PR only
#
# JOB 2: sonar-branch-analysis (for long-lived branches)
# - Triggered by: workflow_run (CI on push to devel)
# - Steps: Download coverage → Run SonarCloud branch analysis
# - Scans: Full codebase
# - Quality gate: Focuses on overall project health
#
# This ensures coverage data is always available from CI before analysis runs.
#
# What files are scanned:
# - All files in the repository that SonarCloud can analyze
# - Excludes: tests, scripts, dev environments, external collections (see sonar-project.properties)
# With much help from:
# https://community.sonarsource.com/t/how-to-use-sonarcloud-with-a-forked-repository-on-github/7363/30
# https://community.sonarsource.com/t/how-to-use-sonarcloud-with-a-forked-repository-on-github/7363/32
name: SonarCloud
on:
workflow_run: # This is triggered by CI being completed.
workflows:
- CI
types:
- completed
permissions: read-all
jobs:
sonar-pr-analysis:
name: SonarCloud PR Analysis
runs-on: ubuntu-latest
if: |
github.event.workflow_run.conclusion == 'success' &&
github.event.workflow_run.event == 'pull_request' &&
github.repository == 'ansible/awx'
steps:
- uses: actions/checkout@v4
# Download all individual coverage artifacts from CI workflow
- name: Download coverage artifacts
uses: dawidd6/action-download-artifact@246dbf436b23d7c49e21a7ab8204ca9ecd1fe615
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
workflow: CI
run_id: ${{ github.event.workflow_run.id }}
pattern: api-test-artifacts
# Extract PR metadata from workflow_run event
- name: Set PR metadata and prepare files for analysis
env:
COMMIT_SHA: ${{ github.event.workflow_run.head_sha }}
REPO_NAME: ${{ github.event.repository.full_name }}
HEAD_BRANCH: ${{ github.event.workflow_run.head_branch }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
# Find all downloaded coverage XML files
coverage_files=$(find . -name "coverage.xml" -type f | tr '\n' ',' | sed 's/,$//')
echo "Found coverage files: $coverage_files"
echo "COVERAGE_PATHS=$coverage_files" >> $GITHUB_ENV
# Extract PR number from first coverage.xml file found
first_coverage=$(find . -name "coverage.xml" -type f | head -1)
if [ -f "$first_coverage" ]; then
PR_NUMBER=$(grep -m 1 '<!-- PR' "$first_coverage" | awk '{print $3}' || echo "")
else
PR_NUMBER=""
fi
echo "🔍 SonarCloud Analysis Decision Summary"
echo "========================================"
echo "├── CI Event: ✅ Pull Request"
echo "├── PR Number from coverage.xml: #${PR_NUMBER:-<not found>}"
if [ -z "$PR_NUMBER" ]; then
echo "##[error]❌ FATAL: PR number not found in coverage.xml"
echo "##[error]This job requires a PR number to run PR analysis."
echo "##[error]The ci workflow should have injected the PR number into coverage.xml."
exit 1
fi
# Get PR metadata from GitHub API
PR_DATA=$(gh api "repos/$REPO_NAME/pulls/$PR_NUMBER")
PR_BASE=$(echo "$PR_DATA" | jq -r '.base.ref')
PR_HEAD=$(echo "$PR_DATA" | jq -r '.head.ref')
# Print summary
echo "🔍 SonarCloud Analysis Decision Summary"
echo "========================================"
echo "├── CI Event: ✅ Pull Request"
echo "├── PR Number: #$PR_NUMBER"
echo "├── Base Branch: $PR_BASE"
echo "├── Head Branch: $PR_HEAD"
echo "├── Repo: $REPO_NAME"
# Export to GitHub env for later steps
echo "PR_NUMBER=$PR_NUMBER" >> $GITHUB_ENV
echo "PR_BASE=$PR_BASE" >> $GITHUB_ENV
echo "PR_HEAD=$PR_HEAD" >> $GITHUB_ENV
echo "COMMIT_SHA=$COMMIT_SHA" >> $GITHUB_ENV
echo "REPO_NAME=$REPO_NAME" >> $GITHUB_ENV
# Get all changed files from PR (with error handling)
files=""
if [ -n "$PR_NUMBER" ]; then
if gh api repos/$REPO_NAME/pulls/$PR_NUMBER/files --jq '.[].filename' > /tmp/pr_files.txt 2>/tmp/pr_error.txt; then
files=$(cat /tmp/pr_files.txt)
else
echo "├── Changed Files: ⚠️ Could not fetch (likely test repo or PR not found)"
if [ -f coverage.xml ] && [ -s coverage.xml ]; then
echo "├── Coverage Data: ✅ Available"
else
echo "├── Coverage Data: ⚠️ Not available"
fi
echo "└── Result: ✅ Running SonarCloud analysis (full scan)"
# No files = no inclusions filter = full scan
exit 0
fi
else
echo "├── PR Number: ⚠️ Not available"
if [ -f coverage.xml ] && [ -s coverage.xml ]; then
echo "├── Coverage Data: ✅ Available"
else
echo "├── Coverage Data: ⚠️ Not available"
fi
echo "└── Result: ✅ Running SonarCloud analysis (full scan)"
exit 0
fi
# Get file extensions and count for summary
extensions=$(echo "$files" | sed 's/.*\.//' | sort | uniq | tr '\n' ',' | sed 's/,$//')
file_count=$(echo "$files" | wc -l)
echo "├── Changed Files: $file_count file(s) (.${extensions})"
# Check if coverage.xml exists and has content
if [ -f coverage.xml ] && [ -s coverage.xml ]; then
echo "├── Coverage Data: ✅ Available"
else
echo "├── Coverage Data: ⚠️ Not available (analysis will proceed without coverage)"
fi
# Prepare file list for Sonar
echo "All changed files in PR:"
echo "$files"
# Filter out files that are excluded by .coveragerc to avoid coverage conflicts
# This prevents SonarCloud from analyzing files that have no coverage data
if [ -n "$files" ]; then
# Filter out files matching .coveragerc omit patterns
filtered_files=$(echo "$files" | grep -v "settings/.*_defaults\.py$" | grep -v "settings/defaults\.py$" | grep -v "main/migrations/")
# Show which files were filtered out for transparency
excluded_files=$(echo "$files" | grep -E "(settings/.*_defaults\.py$|settings/defaults\.py$|main/migrations/)" || true)
if [ -n "$excluded_files" ]; then
echo "├── Filtered out (coverage-excluded): $(echo "$excluded_files" | wc -l) file(s)"
echo "$excluded_files" | sed 's/^/│ - /'
fi
if [ -n "$filtered_files" ]; then
inclusions=$(echo "$filtered_files" | tr '\n' ',' | sed 's/,$//')
echo "SONAR_INCLUSIONS=$inclusions" >> $GITHUB_ENV
echo "└── Result: ✅ Will scan these files (excluding coverage-omitted files): $inclusions"
else
echo "└── Result: ✅ All changed files are excluded by coverage config, running full SonarCloud analysis"
# Don't set SONAR_INCLUSIONS, let it scan everything per sonar-project.properties
fi
else
echo "└── Result: ✅ Running SonarCloud analysis"
fi
- name: Add base branch
if: env.PR_NUMBER != ''
run: |
gh pr checkout ${{ env.PR_NUMBER }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: SonarCloud Scan
uses: SonarSource/sonarqube-scan-action@fd88b7d7ccbaefd23d8f36f73b59db7a3d246602 # v6
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SONAR_TOKEN: ${{ secrets.CICD_ORG_SONAR_TOKEN_CICD_BOT }}
with:
args: >
-Dsonar.scm.revision=${{ env.COMMIT_SHA }}
-Dsonar.pullrequest.key=${{ env.PR_NUMBER }}
-Dsonar.pullrequest.branch=${{ env.PR_HEAD }}
-Dsonar.pullrequest.base=${{ env.PR_BASE }}
-Dsonar.python.coverage.reportPaths=${{ env.COVERAGE_PATHS }}
${{ env.SONAR_INCLUSIONS && format('-Dsonar.inclusions={0}', env.SONAR_INCLUSIONS) || '' }}
sonar-branch-analysis:
name: SonarCloud Branch Analysis
runs-on: ubuntu-latest
if: |
github.event_name == 'workflow_run' &&
github.event.workflow_run.conclusion == 'success' &&
github.event.workflow_run.event == 'push' &&
github.repository == 'ansible/awx'
steps:
- uses: actions/checkout@v4
# Download all individual coverage artifacts from CI workflow (optional for branch pushes)
- name: Download coverage artifacts
continue-on-error: true
uses: dawidd6/action-download-artifact@246dbf436b23d7c49e21a7ab8204ca9ecd1fe615
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
workflow: CI
run_id: ${{ github.event.workflow_run.id }}
pattern: api-test-artifacts
- name: Print SonarCloud Analysis Summary
env:
BRANCH_NAME: ${{ github.event.workflow_run.head_branch }}
run: |
# Find all downloaded coverage XML files
coverage_files=$(find . -name "coverage.xml" -type f | tr '\n' ',' | sed 's/,$//')
echo "Found coverage files: $coverage_files"
echo "COVERAGE_PATHS=$coverage_files" >> $GITHUB_ENV
echo "🔍 SonarCloud Analysis Summary"
echo "=============================="
echo "├── CI Event: ✅ Push (via workflow_run)"
echo "├── Branch: $BRANCH_NAME"
echo "├── Coverage Files: ${coverage_files:-none}"
echo "├── Python Changes: N/A (Full codebase scan)"
echo "└── Result: ✅ Proceed - \"Running SonarCloud analysis\""
- name: SonarCloud Scan
uses: SonarSource/sonarqube-scan-action@fd88b7d7ccbaefd23d8f36f73b59db7a3d246602 # v6
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SONAR_TOKEN: ${{ secrets.CICD_ORG_SONAR_TOKEN_CICD_BOT }}
with:
args: >
-Dsonar.scm.revision=${{ github.event.workflow_run.head_sha }}
-Dsonar.branch.name=${{ github.event.workflow_run.head_branch }}
${{ env.COVERAGE_PATHS && format('-Dsonar.python.coverage.reportPaths={0}', env.COVERAGE_PATHS) || '' }}

View File

@@ -85,11 +85,9 @@ jobs:
cp ../awx-logos/awx/ui/client/assets/* awx/ui/public/static/media/
- name: Setup node and npm for new UI build
uses: actions/setup-node@v4
uses: actions/setup-node@v2
with:
node-version: '18'
cache: 'npm'
cache-dependency-path: awx/awx/ui/**/package-lock.json
- name: Prebuild new UI for awx image (to speed up build process)
working-directory: awx

View File

@@ -11,7 +11,6 @@ on:
- devel
- release_**
- feature_**
- stable-**
jobs:
push:
runs-on: ubuntu-latest
@@ -24,26 +23,35 @@ jobs:
with:
show-progress: false
- name: Build awx_devel image to use for schema gen
uses: ./.github/actions/awx_devel_image
- uses: ./.github/actions/setup-python
- name: Log in to registry
run: |
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
- uses: ./.github/actions/setup-ssh-agent
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
private-github-key: ${{ secrets.PRIVATE_GITHUB_KEY }}
ssh-private-key: ${{ secrets.PRIVATE_GITHUB_KEY }}
- name: Pre-pull image to warm build cache
run: |
docker pull -q ghcr.io/${{ github.repository_owner }}/awx_devel:${GITHUB_REF##*/} || :
- name: Build image
run: |
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} COMPOSE_TAG=${GITHUB_REF##*/} make docker-compose-build
- name: Generate API Schema
run: |
DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} \
COMPOSE_TAG=${{ github.base_ref || github.ref_name }} \
docker run -u $(id -u) --rm -v ${{ github.workspace }}:/awx_devel/:Z \
--workdir=/awx_devel `make print-DEVEL_IMAGE_NAME` /start_tests.sh genschema
--workdir=/awx_devel ghcr.io/${{ github.repository_owner }}/awx_devel:${GITHUB_REF##*/} /start_tests.sh genschema
- name: Upload API Schema
uses: keithweaver/aws-s3-github-action@4dd5a7b81d54abaa23bbac92b27e85d7f405ae53
with:
command: cp
source: ${{ github.workspace }}/schema.json
destination: s3://awx-public-ci-files/${{ github.ref_name }}/schema.json
aws_access_key_id: ${{ secrets.AWS_ACCESS_KEY }}
aws_secret_access_key: ${{ secrets.AWS_SECRET_KEY }}
aws_region: us-east-1
flags: --acl public-read --only-show-errors
env:
AWS_ACCESS_KEY: ${{ secrets.AWS_ACCESS_KEY }}
AWS_SECRET_KEY: ${{ secrets.AWS_SECRET_KEY }}
AWS_REGION: 'us-east-1'
run: |
ansible localhost -c local, -m command -a "{{ ansible_python_interpreter + ' -m pip install boto3'}}"
ansible localhost -c local -m aws_s3 \
-a "src=${{ github.workspace }}/schema.json bucket=awx-public-ci-files object=${GITHUB_REF##*/}/schema.json mode=put permission=public-read"

1
.gitignore vendored
View File

@@ -1,7 +1,6 @@
# Ignore generated schema
swagger.json
schema.json
schema.yaml
reference-schema.json
# Tags

View File

@@ -19,16 +19,8 @@ COLLECTION_VERSION ?= $(shell $(PYTHON) tools/scripts/scm_version.py | cut -d .
COLLECTION_SANITY_ARGS ?= --docker
# collection unit testing directories
COLLECTION_TEST_DIRS ?= awx_collection/test/awx
# pytest added args to collect coverage
COVERAGE_ARGS ?= --cov --cov-report=xml --junitxml=reports/junit.xml
# pytest test directories
TEST_DIRS ?= awx/main/tests/unit awx/main/tests/functional awx/conf/tests
# pytest args to run tests in parallel
PARALLEL_TESTS ?= -n auto
# collection integration test directories (defaults to all)
COLLECTION_TEST_TARGET ?=
# Python version for ansible-test (must be 3.11, 3.12, or 3.13)
ANSIBLE_TEST_PYTHON_VERSION ?= 3.13
# args for collection install
COLLECTION_PACKAGE ?= awx
COLLECTION_NAMESPACE ?= awx
@@ -79,7 +71,7 @@ RECEPTOR_IMAGE ?= quay.io/ansible/receptor:devel
SRC_ONLY_PKGS ?= cffi,pycparser,psycopg,twilio
# These should be upgraded in the AWX and Ansible venv before attempting
# to install the actual requirements
VENV_BOOTSTRAP ?= pip==21.2.4 setuptools==80.9.0 setuptools_scm[toml]==8.0.4 wheel==0.42.0 cython==3.1.3
VENV_BOOTSTRAP ?= pip==21.2.4 setuptools==70.3.0 setuptools_scm[toml]==8.1.0 wheel==0.45.1 cython==3.0.11
NAME ?= awx
@@ -316,17 +308,20 @@ black: reports
@echo "fi" >> .git/hooks/pre-commit
@chmod +x .git/hooks/pre-commit
genschema: awx-link reports
@if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/awx/bin/activate; \
fi; \
$(MANAGEMENT_COMMAND) spectacular --format openapi-json --file schema.json
genschema: reports
$(MAKE) swagger PYTEST_ARGS="--genschema --create-db "
mv swagger.json schema.json
genschema-yaml: awx-link reports
swagger: reports
@if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/awx/bin/activate; \
fi; \
$(MANAGEMENT_COMMAND) spectacular --format openapi --file schema.yaml
(set -o pipefail && py.test --cov --cov-report=xml --junitxml=reports/junit.xml $(PYTEST_ARGS) awx/conf/tests/functional awx/main/tests/functional/api awx/main/tests/docs | tee reports/$@.report)
@if [ "${GITHUB_ACTIONS}" = "true" ]; \
then \
echo 'cov-report-files=reports/coverage.xml' >> "${GITHUB_OUTPUT}"; \
echo 'test-result-files=reports/junit.xml' >> "${GITHUB_OUTPUT}"; \
fi
check: black
@@ -339,12 +334,14 @@ api-lint:
awx-link:
[ -d "/awx_devel/awx.egg-info" ] || $(PYTHON) /awx_devel/tools/scripts/egg_info_dev
TEST_DIRS ?= awx/main/tests/unit awx/main/tests/functional awx/conf/tests
PYTEST_ARGS ?= -n auto
## Run all API unit tests.
test:
if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/awx/bin/activate; \
fi; \
PYTHONDONTWRITEBYTECODE=1 py.test -p no:cacheprovider $(PARALLEL_TESTS) $(TEST_DIRS)
PYTHONDONTWRITEBYTECODE=1 py.test -p no:cacheprovider $(PYTEST_ARGS) $(TEST_DIRS)
cd awxkit && $(VENV_BASE)/awx/bin/tox -re py3
awx-manage check_migrations --dry-run --check -n 'missing_migration_file'
@@ -353,7 +350,7 @@ live_test:
## Run all API unit tests with coverage enabled.
test_coverage:
$(MAKE) test PYTEST_ADDOPTS="--create-db $(COVERAGE_ARGS)"
$(MAKE) test PYTEST_ARGS="--create-db --cov --cov-report=xml --junitxml=reports/junit.xml"
@if [ "${GITHUB_ACTIONS}" = "true" ]; \
then \
echo 'cov-report-files=awxkit/coverage.xml,reports/coverage.xml' >> "${GITHUB_OUTPUT}"; \
@@ -361,7 +358,7 @@ test_coverage:
fi
test_migrations:
PYTHONDONTWRITEBYTECODE=1 py.test -p no:cacheprovider --migrations -m migration_test --create-db $(PARALLEL_TESTS) $(COVERAGE_ARGS) $(TEST_DIRS)
PYTHONDONTWRITEBYTECODE=1 py.test -p no:cacheprovider --migrations -m migration_test --create-db --cov=awx --cov-report=xml --junitxml=reports/junit.xml $(PYTEST_ARGS) $(TEST_DIRS)
@if [ "${GITHUB_ACTIONS}" = "true" ]; \
then \
echo 'cov-report-files=reports/coverage.xml' >> "${GITHUB_OUTPUT}"; \
@@ -379,7 +376,7 @@ test_collection:
fi && \
if ! [ -x "$(shell command -v ansible-playbook)" ]; then pip install ansible-core; fi
ansible --version
py.test $(COLLECTION_TEST_DIRS) $(COVERAGE_ARGS) -v
py.test $(COLLECTION_TEST_DIRS) --cov --cov-report=xml --junitxml=reports/junit.xml -v
@if [ "${GITHUB_ACTIONS}" = "true" ]; \
then \
echo 'cov-report-files=reports/coverage.xml' >> "${GITHUB_OUTPUT}"; \
@@ -430,8 +427,8 @@ test_collection_sanity:
test_collection_integration: install_collection
cd $(COLLECTION_INSTALL) && \
PATH="$$($(PYTHON) -c 'import sys; import os; print(os.path.dirname(sys.executable))'):$$PATH" ansible-test integration --python $(ANSIBLE_TEST_PYTHON_VERSION) --coverage -vvv $(COLLECTION_TEST_TARGET) && \
PATH="$$($(PYTHON) -c 'import sys; import os; print(os.path.dirname(sys.executable))'):$$PATH" ansible-test coverage xml --requirements --group-by command --group-by version
ansible-test integration --coverage -vvv $(COLLECTION_TEST_TARGET) && \
ansible-test coverage xml --requirements --group-by command --group-by version
@if [ "${GITHUB_ACTIONS}" = "true" ]; \
then \
echo cov-report-files="$$(find "$(COLLECTION_INSTALL)/tests/output/reports/" -type f -name 'coverage=integration*.xml' -print0 | tr '\0' ',' | sed 's#,$$##')" >> "${GITHUB_OUTPUT}"; \
@@ -536,15 +533,14 @@ docker-compose-test: awx/projects docker-compose-sources
docker-compose-runtest: awx/projects docker-compose-sources
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml run --rm --service-ports awx_1 /start_tests.sh
docker-compose-build-schema: awx/projects docker-compose-sources
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml run --rm --service-ports --no-deps awx_1 make genschema
docker-compose-build-swagger: awx/projects docker-compose-sources
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml run --rm --service-ports --no-deps awx_1 /start_tests.sh swagger
SCHEMA_DIFF_BASE_BRANCH ?= devel
detect-schema-change: genschema
curl https://s3.amazonaws.com/awx-public-ci-files/$(SCHEMA_DIFF_BASE_BRANCH)/schema.json -o reference-schema.json
# Ignore differences in whitespace with -b
# diff exits with 1 when files differ - capture but don't fail
-diff -u -b reference-schema.json schema.json
diff -u -b reference-schema.json schema.json
docker-compose-clean: awx/projects
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml rm -sf

View File

@@ -161,14 +161,16 @@ def get_view_description(view, html=False):
def get_default_schema():
# drf-spectacular is configured via REST_FRAMEWORK['DEFAULT_SCHEMA_CLASS']
# Just use the DRF default, which will pick up our CustomAutoSchema
return views.APIView.schema
if settings.DYNACONF.is_development_mode:
from awx.api.swagger import schema_view
return schema_view
else:
return views.APIView.schema
class APIView(views.APIView):
# Schema is inherited from DRF's APIView, which uses DEFAULT_SCHEMA_CLASS
# No need to override it here - drf-spectacular will handle it
schema = get_default_schema()
versioning_class = URLPathVersioning
def initialize_request(self, request, *args, **kwargs):
@@ -842,7 +844,7 @@ class ResourceAccessList(ParentMixin, ListAPIView):
if settings.ANSIBLE_BASE_ROLE_SYSTEM_ACTIVATED:
ancestors = set(RoleEvaluation.objects.filter(content_type_id=content_type.id, object_id=obj.id).values_list('role_id', flat=True))
qs = User.objects.filter(has_roles__in=ancestors) | User.objects.filter(is_superuser=True)
auditor_role = RoleDefinition.objects.filter(name="Platform Auditor").first()
auditor_role = RoleDefinition.objects.filter(name="Controller System Auditor").first()
if auditor_role:
qs |= User.objects.filter(role_assignments__role_definition=auditor_role)
return qs.distinct()

View File

@@ -10,7 +10,7 @@ from rest_framework import permissions
# AWX
from awx.main.access import check_user_access
from awx.main.models import Inventory, UnifiedJob, Organization
from awx.main.models import Inventory, UnifiedJob
from awx.main.utils import get_object_or_400
logger = logging.getLogger('awx.api.permissions')
@@ -228,19 +228,12 @@ class InventoryInventorySourcesUpdatePermission(ModelAccessPermission):
class UserPermission(ModelAccessPermission):
def check_post_permissions(self, request, view, obj=None):
if not request.data:
return Organization.access_qs(request.user, 'change').exists()
return request.user.admin_of_organizations.exists()
elif request.user.is_superuser:
return True
raise PermissionDenied()
class IsSystemAdmin(permissions.BasePermission):
def has_permission(self, request, view):
if not (request.user and request.user.is_authenticated):
return False
return request.user.is_superuser
class IsSystemAdminOrAuditor(permissions.BasePermission):
"""
Allows write access only to system admin users.

View File

@@ -1,75 +0,0 @@
import warnings
from rest_framework.permissions import IsAuthenticated
from drf_spectacular.openapi import AutoSchema
from drf_spectacular.views import (
SpectacularAPIView,
SpectacularSwaggerView,
SpectacularRedocView,
)
class CustomAutoSchema(AutoSchema):
"""Custom AutoSchema to add swagger_topic to tags and handle deprecated endpoints."""
def get_tags(self):
tags = []
try:
if hasattr(self.view, 'get_serializer'):
serializer = self.view.get_serializer()
else:
serializer = None
except Exception:
serializer = None
warnings.warn(
'{}.get_serializer() raised an exception during '
'schema generation. Serializer fields will not be '
'generated for this view.'.format(self.view.__class__.__name__)
)
if hasattr(self.view, 'swagger_topic'):
tags.append(str(self.view.swagger_topic).title())
elif serializer and hasattr(serializer, 'Meta') and hasattr(serializer.Meta, 'model'):
tags.append(str(serializer.Meta.model._meta.verbose_name_plural).title())
elif hasattr(self.view, 'model'):
tags.append(str(self.view.model._meta.verbose_name_plural).title())
else:
tags = super().get_tags() # Use default drf-spectacular behavior
if not tags:
warnings.warn(f'Could not determine tags for {self.view.__class__.__name__}')
tags = ['api'] # Fallback to default value
return tags
def is_deprecated(self):
"""Return `True` if this operation is to be marked as deprecated."""
return getattr(self.view, 'deprecated', False)
class AuthenticatedSpectacularAPIView(SpectacularAPIView):
"""SpectacularAPIView that requires authentication."""
permission_classes = [IsAuthenticated]
class AuthenticatedSpectacularSwaggerView(SpectacularSwaggerView):
"""SpectacularSwaggerView that requires authentication."""
permission_classes = [IsAuthenticated]
class AuthenticatedSpectacularRedocView(SpectacularRedocView):
"""SpectacularRedocView that requires authentication."""
permission_classes = [IsAuthenticated]
# Schema view (returns OpenAPI schema JSON/YAML)
schema_view = AuthenticatedSpectacularAPIView.as_view()
# Swagger UI view
swagger_ui_view = AuthenticatedSpectacularSwaggerView.as_view(url_name='api:schema-json')
# ReDoc UI view
redoc_view = AuthenticatedSpectacularRedocView.as_view(url_name='api:schema-json')

View File

@@ -46,6 +46,9 @@ from ansible_base.lib.utils.models import get_type_for_model
from ansible_base.rbac.models import RoleEvaluation, ObjectRole
from ansible_base.rbac import permission_registry
# django-flags
from flags.state import flag_enabled
# AWX
from awx.main.access import get_user_capabilities
from awx.main.constants import ACTIVE_STATES, org_role_to_permission
@@ -734,10 +737,13 @@ class EmptySerializer(serializers.Serializer):
pass
class OpaQueryPathMixin(serializers.Serializer):
class OpaQueryPathEnabledMixin(serializers.Serializer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not flag_enabled("FEATURE_POLICY_AS_CODE_ENABLED") and 'opa_query_path' in self.fields:
self.fields.pop('opa_query_path')
def validate_opa_query_path(self, value):
# Decode the URL and re-encode it
decoded_value = urllib.parse.unquote(value)
@@ -749,7 +755,7 @@ class OpaQueryPathMixin(serializers.Serializer):
return value
class UnifiedJobTemplateSerializer(BaseSerializer, OpaQueryPathMixin):
class UnifiedJobTemplateSerializer(BaseSerializer, OpaQueryPathEnabledMixin):
# As a base serializer, the capabilities prefetch is not used directly,
# instead they are derived from the Workflow Job Template Serializer and the Job Template Serializer, respectively.
capabilities_prefetch = []
@@ -1182,7 +1188,7 @@ class UserActivityStreamSerializer(UserSerializer):
fields = ('*', '-is_system_auditor')
class OrganizationSerializer(BaseSerializer, OpaQueryPathMixin):
class OrganizationSerializer(BaseSerializer, OpaQueryPathEnabledMixin):
show_capabilities = ['edit', 'delete']
class Meta:
@@ -1541,7 +1547,7 @@ class LabelsListMixin(object):
return res
class InventorySerializer(LabelsListMixin, BaseSerializerWithVariables, OpaQueryPathMixin):
class InventorySerializer(LabelsListMixin, BaseSerializerWithVariables, OpaQueryPathEnabledMixin):
show_capabilities = ['edit', 'delete', 'adhoc', 'copy']
capabilities_prefetch = ['admin', 'adhoc', {'copy': 'organization.inventory_admin'}]
@@ -2839,7 +2845,7 @@ class ResourceAccessListElementSerializer(UserSerializer):
{
"role": {
"id": None,
"name": _("Platform Auditor"),
"name": _("Controller System Auditor"),
"description": _("Can view all aspects of the system"),
"user_capabilities": {"unattach": False},
},
@@ -3027,6 +3033,11 @@ class CredentialSerializer(BaseSerializer):
ret.remove(field)
return ret
def validate_organization(self, org):
if self.instance and (not self.instance.managed) and self.instance.credential_type.kind == 'galaxy' and org is None:
raise serializers.ValidationError(_("Galaxy credentials must be owned by an Organization."))
return org
def validate_credential_type(self, credential_type):
if self.instance and credential_type.pk != self.instance.credential_type.pk:
for related_objects in (
@@ -3102,6 +3113,9 @@ class CredentialSerializerCreate(CredentialSerializer):
if attrs.get('team'):
attrs['organization'] = attrs['team'].organization
if 'credential_type' in attrs and attrs['credential_type'].kind == 'galaxy' and list(owner_fields) != ['organization']:
raise serializers.ValidationError({"organization": _("Galaxy credentials must be owned by an Organization.")})
return super(CredentialSerializerCreate, self).validate(attrs)
def create(self, validated_data):
@@ -5998,7 +6012,7 @@ class InstanceGroupSerializer(BaseSerializer):
if self.instance and not self.instance.is_container_group:
raise serializers.ValidationError(_('pod_spec_override is only valid for container groups'))
pod_spec_override_json = {}
pod_spec_override_json = None
# defect if the value is yaml or json if yaml convert to json
try:
# convert yaml to json

55
awx/api/swagger.py Normal file
View File

@@ -0,0 +1,55 @@
import warnings
from rest_framework.permissions import AllowAny
from drf_yasg import openapi
from drf_yasg.inspectors import SwaggerAutoSchema
from drf_yasg.views import get_schema_view
class CustomSwaggerAutoSchema(SwaggerAutoSchema):
"""Custom SwaggerAutoSchema to add swagger_topic to tags."""
def get_tags(self, operation_keys=None):
tags = []
try:
if hasattr(self.view, 'get_serializer'):
serializer = self.view.get_serializer()
else:
serializer = None
except Exception:
serializer = None
warnings.warn(
'{}.get_serializer() raised an exception during '
'schema generation. Serializer fields will not be '
'generated for {}.'.format(self.view.__class__.__name__, operation_keys)
)
if hasattr(self.view, 'swagger_topic'):
tags.append(str(self.view.swagger_topic).title())
elif serializer and hasattr(serializer, 'Meta'):
tags.append(str(serializer.Meta.model._meta.verbose_name_plural).title())
elif hasattr(self.view, 'model'):
tags.append(str(self.view.model._meta.verbose_name_plural).title())
else:
tags = ['api'] # Fallback to default value
if not tags:
warnings.warn(f'Could not determine tags for {self.view.__class__.__name__}')
return tags
def is_deprecated(self):
"""Return `True` if this operation is to be marked as deprecated."""
return getattr(self.view, 'deprecated', False)
schema_view = get_schema_view(
openapi.Info(
title='AWX API',
default_version='v2',
description='AWX API Documentation',
terms_of_service='https://www.google.com/policies/terms/',
contact=openapi.Contact(email='contact@snippets.local'),
license=openapi.License(name='Apache License'),
),
public=True,
permission_classes=[AllowAny],
)

View File

@@ -1,4 +1,4 @@
---
collections:
- name: ansible.receptor
version: 2.0.6
version: 2.0.3

View File

@@ -3,7 +3,7 @@
from django.urls import re_path
from awx.api.views import RoleList, RoleDetail, RoleUsersList, RoleTeamsList
from awx.api.views import RoleList, RoleDetail, RoleUsersList, RoleTeamsList, RoleParentsList, RoleChildrenList
urls = [
@@ -11,6 +11,8 @@ urls = [
re_path(r'^(?P<pk>[0-9]+)/$', RoleDetail.as_view(), name='role_detail'),
re_path(r'^(?P<pk>[0-9]+)/users/$', RoleUsersList.as_view(), name='role_users_list'),
re_path(r'^(?P<pk>[0-9]+)/teams/$', RoleTeamsList.as_view(), name='role_teams_list'),
re_path(r'^(?P<pk>[0-9]+)/parents/$', RoleParentsList.as_view(), name='role_parents_list'),
re_path(r'^(?P<pk>[0-9]+)/children/$', RoleChildrenList.as_view(), name='role_children_list'),
]
__all__ = ['urls']

View File

@@ -4,6 +4,7 @@
from __future__ import absolute_import, unicode_literals
from django.urls import include, re_path
from awx import MODE
from awx.api.generics import LoggedLoginView, LoggedLogoutView
from awx.api.views.root import (
ApiRootView,
@@ -147,21 +148,21 @@ v2_urls = [
app_name = 'api'
# Import schema views (needed for both development and testing)
from awx.api.schema import schema_view, swagger_ui_view, redoc_view
urlpatterns = [
re_path(r'^$', ApiRootView.as_view(), name='api_root_view'),
re_path(r'^(?P<version>(v2))/', include(v2_urls)),
re_path(r'^login/$', LoggedLoginView.as_view(template_name='rest_framework/login.html', extra_context={'inside_login_context': True}), name='login'),
re_path(r'^logout/$', LoggedLogoutView.as_view(next_page='/api/', redirect_field_name='next'), name='logout'),
# Schema endpoints (available in all modes for API documentation and testing)
re_path(r'^schema/$', schema_view, name='schema-json'),
re_path(r'^docs/$', swagger_ui_view, name='schema-swagger-ui'),
re_path(r'^redoc/$', redoc_view, name='schema-redoc'),
]
if MODE == 'development':
# Only include these if we are in the development environment
from awx.api.swagger import schema_view
from awx.api.urls.debug import urls as debug_urls
from awx.api.urls.debug import urls as debug_urls
urlpatterns += [re_path(r'^debug/', include(debug_urls))]
urlpatterns += [re_path(r'^debug/', include(debug_urls))]
urlpatterns += [
re_path(r'^swagger(?P<format>\.json|\.yaml)/$', schema_view.without_ui(cache_timeout=0), name='schema-json'),
re_path(r'^swagger/$', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
re_path(r'^redoc/$', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),
]

View File

@@ -55,7 +55,8 @@ from wsgiref.util import FileWrapper
# django-ansible-base
from ansible_base.lib.utils.requests import get_remote_hosts
from ansible_base.rbac.models import RoleEvaluation
from ansible_base.rbac.models import RoleEvaluation, ObjectRole
from ansible_base.resource_registry.shared_types import OrganizationType, TeamType, UserType
# AWX
from awx.main.tasks.system import send_notifications, update_inventory_computed_fields
@@ -84,6 +85,7 @@ from awx.api.generics import (
from awx.api.views.labels import LabelSubListCreateAttachDetachView
from awx.api.versioning import reverse
from awx.main import models
from awx.main.models.rbac import get_role_definition
from awx.main.utils import (
camelcase_to_underscore,
extract_ansible_vars,
@@ -669,16 +671,81 @@ class ScheduleUnifiedJobsList(SubListAPIView):
name = _('Schedule Jobs List')
def immutablesharedfields(cls):
'''
Class decorator to prevent modifying shared resources when ALLOW_LOCAL_RESOURCE_MANAGEMENT setting is set to False.
Works by overriding these view methods:
- create
- delete
- perform_update
create and delete are overridden to raise a PermissionDenied exception.
perform_update is overridden to check if any shared fields are being modified,
and raise a PermissionDenied exception if so.
'''
# create instead of perform_create because some of our views
# override create instead of perform_create
if hasattr(cls, 'create'):
cls.original_create = cls.create
@functools.wraps(cls.create)
def create_wrapper(*args, **kwargs):
if settings.ALLOW_LOCAL_RESOURCE_MANAGEMENT:
return cls.original_create(*args, **kwargs)
raise PermissionDenied({'detail': _('Creation of this resource is not allowed. Create this resource via the platform ingress.')})
cls.create = create_wrapper
if hasattr(cls, 'delete'):
cls.original_delete = cls.delete
@functools.wraps(cls.delete)
def delete_wrapper(*args, **kwargs):
if settings.ALLOW_LOCAL_RESOURCE_MANAGEMENT:
return cls.original_delete(*args, **kwargs)
raise PermissionDenied({'detail': _('Deletion of this resource is not allowed. Delete this resource via the platform ingress.')})
cls.delete = delete_wrapper
if hasattr(cls, 'perform_update'):
cls.original_perform_update = cls.perform_update
@functools.wraps(cls.perform_update)
def update_wrapper(*args, **kwargs):
if not settings.ALLOW_LOCAL_RESOURCE_MANAGEMENT:
view, serializer = args
instance = view.get_object()
if instance:
if isinstance(instance, models.Organization):
shared_fields = OrganizationType._declared_fields.keys()
elif isinstance(instance, models.User):
shared_fields = UserType._declared_fields.keys()
elif isinstance(instance, models.Team):
shared_fields = TeamType._declared_fields.keys()
attrs = serializer.validated_data
for field in shared_fields:
if field in attrs and getattr(instance, field) != attrs[field]:
raise PermissionDenied({field: _(f"Cannot change shared field '{field}'. Alter this field via the platform ingress.")})
return cls.original_perform_update(*args, **kwargs)
cls.perform_update = update_wrapper
return cls
@immutablesharedfields
class TeamList(ListCreateAPIView):
model = models.Team
serializer_class = serializers.TeamSerializer
@immutablesharedfields
class TeamDetail(RetrieveUpdateDestroyAPIView):
model = models.Team
serializer_class = serializers.TeamSerializer
@immutablesharedfields
class TeamUsersList(BaseUsersList):
model = models.User
serializer_class = serializers.UserSerializer
@@ -720,19 +787,9 @@ class TeamRolesList(SubListAttachDetachAPIView):
team = get_object_or_404(models.Team, pk=self.kwargs['pk'])
credential_content_type = ContentType.objects.get_for_model(models.Credential)
if role.content_type == credential_content_type:
if not role.content_object.organization:
data = dict(
msg=_("You cannot grant access to a credential that is not assigned to an organization (private credentials cannot be assigned to teams)")
)
if not role.content_object.organization or role.content_object.organization.id != team.organization.id:
data = dict(msg=_("You cannot grant credential access to a team when the Organization field isn't set, or belongs to a different organization"))
return Response(data, status=status.HTTP_400_BAD_REQUEST)
elif role.content_object.organization.id != team.organization.id:
if not request.user.is_superuser:
data = dict(
msg=_(
"You cannot grant a team access to a credential in a different organization. Only superusers can grant cross-organization credential access to teams"
)
)
return Response(data, status=status.HTTP_400_BAD_REQUEST)
return super(TeamRolesList, self).post(request, *args, **kwargs)
@@ -759,9 +816,17 @@ class TeamProjectsList(SubListAPIView):
def get_queryset(self):
team = self.get_parent_object()
self.check_parent_access(team)
my_qs = self.model.accessible_objects(self.request.user, 'read_role')
team_qs = models.Project.accessible_objects(team, 'read_role')
return my_qs & team_qs
model_ct = ContentType.objects.get_for_model(self.model)
parent_ct = ContentType.objects.get_for_model(self.parent_model)
rd = get_role_definition(team.member_role)
role = ObjectRole.objects.filter(object_id=team.id, content_type=parent_ct, role_definition=rd).first()
if role is None:
# Team has no permissions, therefore team has no projects
return self.model.objects.none()
else:
project_qs = self.model.accessible_objects(self.request.user, 'read_role')
return project_qs.filter(id__in=RoleEvaluation.objects.filter(content_type_id=model_ct.id, role=role).values_list('object_id'))
class TeamActivityStreamList(SubListAPIView):
@@ -876,23 +941,13 @@ class ProjectTeamsList(ListAPIView):
serializer_class = serializers.TeamSerializer
def get_queryset(self):
parent = get_object_or_404(models.Project, pk=self.kwargs['pk'])
if not self.request.user.can_access(models.Project, 'read', parent):
p = get_object_or_404(models.Project, pk=self.kwargs['pk'])
if not self.request.user.can_access(models.Project, 'read', p):
raise PermissionDenied()
project_ct = ContentType.objects.get_for_model(parent)
project_ct = ContentType.objects.get_for_model(models.Project)
team_ct = ContentType.objects.get_for_model(self.model)
roles_on_project = models.Role.objects.filter(
content_type=project_ct,
object_id=parent.pk,
)
team_member_parent_roles = models.Role.objects.filter(children__in=roles_on_project, role_field='member_role', content_type=team_ct).distinct()
team_ids = team_member_parent_roles.values_list('object_id', flat=True)
my_qs = self.model.accessible_objects(self.request.user, 'read_role').filter(pk__in=team_ids)
return my_qs
all_roles = models.Role.objects.filter(Q(descendents__content_type=project_ct) & Q(descendents__object_id=p.pk), content_type=team_ct)
return self.model.accessible_objects(self.request.user, 'read_role').filter(pk__in=[t.content_object.pk for t in all_roles])
class ProjectSchedulesList(SubListCreateAPIView):
@@ -1072,6 +1127,7 @@ class ProjectCopy(CopyAPIView):
copy_return_serializer_class = serializers.ProjectSerializer
@immutablesharedfields
class UserList(ListCreateAPIView):
model = models.User
serializer_class = serializers.UserSerializer
@@ -1128,6 +1184,14 @@ class UserRolesList(SubListAttachDetachAPIView):
role = get_object_or_400(models.Role, pk=sub_id)
content_types = ContentType.objects.get_for_models(models.Organization, models.Team, models.Credential) # dict of {model: content_type}
# Prevent user to be associated with team/org when ALLOW_LOCAL_RESOURCE_MANAGEMENT is False
if not settings.ALLOW_LOCAL_RESOURCE_MANAGEMENT:
for model in [models.Organization, models.Team]:
ct = content_types[model]
if role.content_type == ct and role.role_field in ['member_role', 'admin_role']:
data = dict(msg=_(f"Cannot directly modify user membership to {ct.model}. Direct shared resource management disabled"))
return Response(data, status=status.HTTP_403_FORBIDDEN)
credential_content_type = content_types[models.Credential]
if role.content_type == credential_content_type:
if 'disassociate' not in request.data and role.content_object.organization and user not in role.content_object.organization.member_role:
@@ -1162,6 +1226,7 @@ class UserOrganizationsList(OrganizationCountsMixin, SubListAPIView):
model = models.Organization
serializer_class = serializers.OrganizationSerializer
parent_model = models.User
relationship = 'organizations'
def get_queryset(self):
parent = self.get_parent_object()
@@ -1175,6 +1240,7 @@ class UserAdminOfOrganizationsList(OrganizationCountsMixin, SubListAPIView):
model = models.Organization
serializer_class = serializers.OrganizationSerializer
parent_model = models.User
relationship = 'admin_of_organizations'
def get_queryset(self):
parent = self.get_parent_object()
@@ -1198,6 +1264,7 @@ class UserActivityStreamList(SubListAPIView):
return qs.filter(Q(actor=parent) | Q(user__in=[parent]))
@immutablesharedfields
class UserDetail(RetrieveUpdateDestroyAPIView):
model = models.User
serializer_class = serializers.UserSerializer
@@ -4172,6 +4239,13 @@ class RoleUsersList(SubListAttachDetachAPIView):
role = self.get_parent_object()
content_types = ContentType.objects.get_for_models(models.Organization, models.Team, models.Credential) # dict of {model: content_type}
if not settings.ALLOW_LOCAL_RESOURCE_MANAGEMENT:
for model in [models.Organization, models.Team]:
ct = content_types[model]
if role.content_type == ct and role.role_field in ['member_role', 'admin_role']:
data = dict(msg=_(f"Cannot directly modify user membership to {ct.model}. Direct shared resource management disabled"))
return Response(data, status=status.HTTP_403_FORBIDDEN)
credential_content_type = content_types[models.Credential]
if role.content_type == credential_content_type:
if 'disassociate' not in request.data and role.content_object.organization and user not in role.content_object.organization.member_role:
@@ -4213,21 +4287,9 @@ class RoleTeamsList(SubListAttachDetachAPIView):
credential_content_type = ContentType.objects.get_for_model(models.Credential)
if role.content_type == credential_content_type:
# Private credentials (no organization) are never allowed for teams
if not role.content_object.organization:
data = dict(
msg=_("You cannot grant access to a credential that is not assigned to an organization (private credentials cannot be assigned to teams)")
)
if not role.content_object.organization or role.content_object.organization.id != team.organization.id:
data = dict(msg=_("You cannot grant credential access to a team when the Organization field isn't set, or belongs to a different organization"))
return Response(data, status=status.HTTP_400_BAD_REQUEST)
# Cross-organization credentials are only allowed for superusers
elif role.content_object.organization.id != team.organization.id:
if not request.user.is_superuser:
data = dict(
msg=_(
"You cannot grant a team access to a credential in a different organization. Only superusers can grant cross-organization credential access to teams"
)
)
return Response(data, status=status.HTTP_400_BAD_REQUEST)
action = 'attach'
if request.data.get('disassociate', None):
@@ -4247,6 +4309,34 @@ class RoleTeamsList(SubListAttachDetachAPIView):
return Response(status=status.HTTP_204_NO_CONTENT)
class RoleParentsList(SubListAPIView):
deprecated = True
model = models.Role
serializer_class = serializers.RoleSerializer
parent_model = models.Role
relationship = 'parents'
permission_classes = (IsAuthenticated,)
search_fields = ('role_field', 'content_type__model')
def get_queryset(self):
role = models.Role.objects.get(pk=self.kwargs['pk'])
return models.Role.filter_visible_roles(self.request.user, role.parents.all())
class RoleChildrenList(SubListAPIView):
deprecated = True
model = models.Role
serializer_class = serializers.RoleSerializer
parent_model = models.Role
relationship = 'children'
permission_classes = (IsAuthenticated,)
search_fields = ('role_field', 'content_type__model')
def get_queryset(self):
role = models.Role.objects.get(pk=self.kwargs['pk'])
return models.Role.filter_visible_roles(self.request.user, role.children.all())
# Create view functions for all of the class-based views to simplify inclusion
# in URL patterns and reverse URL lookups, converting CamelCase names to
# lowercase_with_underscore (e.g. MyView.as_view() becomes my_view).

View File

@@ -12,7 +12,7 @@ import re
import asn1
from awx.api import serializers
from awx.api.generics import GenericAPIView, Response
from awx.api.permissions import IsSystemAdmin
from awx.api.permissions import IsSystemAdminOrAuditor
from awx.main import models
from cryptography import x509
from cryptography.hazmat.primitives import hashes, serialization
@@ -48,7 +48,7 @@ class InstanceInstallBundle(GenericAPIView):
name = _('Install Bundle')
model = models.Instance
serializer_class = serializers.InstanceSerializer
permission_classes = (IsSystemAdmin,)
permission_classes = (IsSystemAdminOrAuditor,)
def get(self, request, *args, **kwargs):
instance_obj = self.get_object()

View File

@@ -53,15 +53,18 @@ from awx.api.serializers import (
CredentialSerializer,
)
from awx.api.views.mixin import RelatedJobsPreventDeleteMixin, OrganizationCountsMixin, OrganizationInstanceGroupMembershipMixin
from awx.api.views import immutablesharedfields
logger = logging.getLogger('awx.api.views.organization')
@immutablesharedfields
class OrganizationList(OrganizationCountsMixin, ListCreateAPIView):
model = Organization
serializer_class = OrganizationSerializer
@immutablesharedfields
class OrganizationDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView):
model = Organization
serializer_class = OrganizationSerializer
@@ -104,6 +107,7 @@ class OrganizationInventoriesList(SubListAPIView):
relationship = 'inventories'
@immutablesharedfields
class OrganizationUsersList(BaseUsersList):
model = User
serializer_class = UserSerializer
@@ -112,6 +116,7 @@ class OrganizationUsersList(BaseUsersList):
ordering = ('username',)
@immutablesharedfields
class OrganizationAdminsList(BaseUsersList):
model = User
serializer_class = UserSerializer
@@ -150,6 +155,7 @@ class OrganizationWorkflowJobTemplatesList(SubListCreateAPIView):
parent_key = 'organization'
@immutablesharedfields
class OrganizationTeamsList(SubListCreateAttachDetachAPIView):
model = Team
serializer_class = TeamSerializer

View File

@@ -8,8 +8,6 @@ import operator
from collections import OrderedDict
from django.conf import settings
from django.core.cache import cache
from django.db import connection
from django.utils.encoding import smart_str
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import ensure_csrf_cookie
@@ -28,7 +26,6 @@ from awx.api.generics import APIView
from awx.conf.registry import settings_registry
from awx.main.analytics import all_collectors
from awx.main.ha import is_ha_environment
from awx.main.tasks.system import clear_setting_cache
from awx.main.utils import get_awx_version, get_custom_venv_choices
from awx.main.utils.licensing import validate_entitlement_manifest
from awx.api.versioning import URLPathVersioning, reverse, drf_reverse
@@ -59,7 +56,7 @@ class ApiRootView(APIView):
data['custom_login_info'] = settings.CUSTOM_LOGIN_INFO
data['login_redirect_override'] = settings.LOGIN_REDIRECT_OVERRIDE
if MODE == 'development':
data['docs'] = drf_reverse('api:schema-swagger-ui')
data['swagger'] = drf_reverse('api:schema-swagger-ui')
return Response(data)
@@ -180,47 +177,16 @@ class ApiV2SubscriptionView(APIView):
def post(self, request):
data = request.data.copy()
if data.get('subscriptions_client_secret') == '$encrypted$':
data['subscriptions_client_secret'] = settings.SUBSCRIPTIONS_CLIENT_SECRET
try:
user = None
pw = None
basic_auth = False
# determine if the credentials are for basic auth or not
if data.get('subscriptions_client_id'):
user, pw = data.get('subscriptions_client_id'), data.get('subscriptions_client_secret')
if pw == '$encrypted$':
pw = settings.SUBSCRIPTIONS_CLIENT_SECRET
elif data.get('subscriptions_username'):
user, pw = data.get('subscriptions_username'), data.get('subscriptions_password')
if pw == '$encrypted$':
pw = settings.SUBSCRIPTIONS_PASSWORD
basic_auth = True
if not user or not pw:
return Response({"error": _("Missing subscription credentials")}, status=status.HTTP_400_BAD_REQUEST)
user, pw = data.get('subscriptions_client_id'), data.get('subscriptions_client_secret')
with set_environ(**settings.AWX_TASK_ENV):
validated = get_licenser().validate_rh(user, pw, basic_auth)
# update settings if the credentials were valid
if basic_auth:
if user:
settings.SUBSCRIPTIONS_USERNAME = user
if pw:
settings.SUBSCRIPTIONS_PASSWORD = pw
# mutual exclusion for basic auth and service account
# only one should be set at a given time so that
# config/attach/ knows which credentials to use
settings.SUBSCRIPTIONS_CLIENT_ID = ""
settings.SUBSCRIPTIONS_CLIENT_SECRET = ""
else:
if user:
settings.SUBSCRIPTIONS_CLIENT_ID = user
if pw:
settings.SUBSCRIPTIONS_CLIENT_SECRET = pw
# mutual exclusion for basic auth and service account
settings.SUBSCRIPTIONS_USERNAME = ""
settings.SUBSCRIPTIONS_PASSWORD = ""
validated = get_licenser().validate_rh(user, pw)
if user:
settings.SUBSCRIPTIONS_CLIENT_ID = data['subscriptions_client_id']
if pw:
settings.SUBSCRIPTIONS_CLIENT_SECRET = data['subscriptions_client_secret']
except Exception as exc:
msg = _("Invalid Subscription")
if isinstance(exc, TokenError) or (
@@ -255,22 +221,13 @@ class ApiV2AttachView(APIView):
subscription_id = data.get('subscription_id', None)
if not subscription_id:
return Response({"error": _("No subscription ID provided.")}, status=status.HTTP_400_BAD_REQUEST)
# Ensure we always use the latest subscription credentials
cache.delete_many(['SUBSCRIPTIONS_CLIENT_ID', 'SUBSCRIPTIONS_CLIENT_SECRET', 'SUBSCRIPTIONS_USERNAME', 'SUBSCRIPTIONS_PASSWORD'])
user = getattr(settings, 'SUBSCRIPTIONS_CLIENT_ID', None)
pw = getattr(settings, 'SUBSCRIPTIONS_CLIENT_SECRET', None)
basic_auth = False
if not (user and pw):
user = getattr(settings, 'SUBSCRIPTIONS_USERNAME', None)
pw = getattr(settings, 'SUBSCRIPTIONS_PASSWORD', None)
basic_auth = True
if not (user and pw):
return Response({"error": _("Missing subscription credentials")}, status=status.HTTP_400_BAD_REQUEST)
if subscription_id and user and pw:
data = request.data.copy()
try:
with set_environ(**settings.AWX_TASK_ENV):
validated = get_licenser().validate_rh(user, pw, basic_auth)
validated = get_licenser().validate_rh(user, pw)
except Exception as exc:
msg = _("Invalid Subscription")
if isinstance(exc, requests.exceptions.HTTPError) and getattr(getattr(exc, 'response', None), 'status_code', None) == 401:
@@ -284,12 +241,10 @@ class ApiV2AttachView(APIView):
else:
logger.exception(smart_str(u"Invalid subscription submitted."), extra=dict(actor=request.user.username))
return Response({"error": msg}, status=status.HTTP_400_BAD_REQUEST)
for sub in validated:
if sub['subscription_id'] == subscription_id:
sub['valid_key'] = True
settings.LICENSE = sub
connection.on_commit(lambda: clear_setting_cache.delay(['LICENSE']))
return Response(sub)
return Response({"error": _("Error processing subscription metadata.")}, status=status.HTTP_400_BAD_REQUEST)
@@ -309,6 +264,7 @@ class ApiV2ConfigView(APIView):
'''Return various sitewide configuration settings'''
license_data = get_licenser().validate()
if not license_data.get('valid_key', False):
license_data = {}
@@ -372,7 +328,6 @@ class ApiV2ConfigView(APIView):
try:
license_data_validated = get_licenser().license_from_manifest(license_data)
connection.on_commit(lambda: clear_setting_cache.delay(['LICENSE']))
except Exception:
logger.warning(smart_str(u"Invalid subscription submitted."), extra=dict(actor=request.user.username))
return Response({"error": _("Invalid License")}, status=status.HTTP_400_BAD_REQUEST)
@@ -391,7 +346,6 @@ class ApiV2ConfigView(APIView):
def delete(self, request):
try:
settings.LICENSE = {}
connection.on_commit(lambda: clear_setting_cache.delay(['LICENSE']))
return Response(status=status.HTTP_204_NO_CONTENT)
except Exception:
# FIX: Log

View File

@@ -38,7 +38,6 @@ class SettingsRegistry(object):
if setting in self._registry:
raise ImproperlyConfigured('Setting "{}" is already registered.'.format(setting))
category = kwargs.setdefault('category', None)
kwargs.setdefault('required', False) # No setting is ordinarily required
category_slug = kwargs.setdefault('category_slug', slugify(category or '') or None)
if category_slug in {'all', 'changed', 'user-defaults'}:
raise ImproperlyConfigured('"{}" is a reserved category slug.'.format(category_slug))

View File

@@ -639,9 +639,7 @@ class UserAccess(BaseAccess):
prefetch_related = ('resource',)
def filtered_queryset(self):
if settings.ORG_ADMINS_CAN_SEE_ALL_USERS and (
Organization.access_qs(self.user, 'change').exists() or Organization.access_qs(self.user, 'audit').exists()
):
if settings.ORG_ADMINS_CAN_SEE_ALL_USERS and (self.user.admin_of_organizations.exists() or self.user.auditor_of_organizations.exists()):
qs = User.objects.all()
else:
qs = (
@@ -1226,9 +1224,7 @@ class TeamAccess(BaseAccess):
)
def filtered_queryset(self):
if settings.ORG_ADMINS_CAN_SEE_ALL_USERS and (
Organization.access_qs(self.user, 'change').exists() or Organization.access_qs(self.user, 'audit').exists()
):
if settings.ORG_ADMINS_CAN_SEE_ALL_USERS and (self.user.admin_of_organizations.exists() or self.user.auditor_of_organizations.exists()):
return self.model.objects.all()
return self.model.objects.filter(
Q(organization__in=Organization.accessible_pk_qs(self.user, 'member_role')) | Q(pk__in=self.model.accessible_pk_qs(self.user, 'read_role'))
@@ -2568,7 +2564,7 @@ class NotificationTemplateAccess(BaseAccess):
if settings.ANSIBLE_BASE_ROLE_SYSTEM_ACTIVATED:
return self.model.access_qs(self.user, 'view')
return self.model.objects.filter(
Q(organization__in=Organization.access_qs(self.user, 'add_notificationtemplate')) | Q(organization__in=Organization.access_qs(self.user, 'audit'))
Q(organization__in=Organization.access_qs(self.user, 'add_notificationtemplate')) | Q(organization__in=self.user.auditor_of_organizations)
).distinct()
@check_superuser
@@ -2603,7 +2599,7 @@ class NotificationAccess(BaseAccess):
def filtered_queryset(self):
return self.model.objects.filter(
Q(notification_template__organization__in=Organization.access_qs(self.user, 'add_notificationtemplate'))
| Q(notification_template__organization__in=Organization.access_qs(self.user, 'audit'))
| Q(notification_template__organization__in=self.user.auditor_of_organizations)
).distinct()
def can_delete(self, obj):

View File

@@ -3,13 +3,13 @@ import logging
# AWX
from awx.main.analytics.subsystem_metrics import DispatcherMetrics, CallbackReceiverMetrics
from awx.main.dispatch.publish import task as task_awx
from awx.main.dispatch.publish import task
from awx.main.dispatch import get_task_queuename
logger = logging.getLogger('awx.main.scheduler')
@task_awx(queue=get_task_queuename, timeout=300, on_duplicate='discard')
@task(queue=get_task_queuename)
def send_subsystem_metrics():
DispatcherMetrics().send_metrics()
CallbackReceiverMetrics().send_metrics()

View File

@@ -142,7 +142,7 @@ def config(since, **kwargs):
return {
'platform': {
'system': platform.system(),
'dist': (distro.name(), distro.version(), distro.codename()),
'dist': distro.linux_distribution(),
'release': platform.release(),
'type': install_type,
},

View File

@@ -324,10 +324,10 @@ def gather(dest=None, module=None, subset=None, since=None, until=None, collecti
settings.AUTOMATION_ANALYTICS_LAST_ENTRIES = json.dumps(last_entries, cls=DjangoJSONEncoder)
if collection_type != 'dry-run':
for fpath in tarfiles:
if os.path.exists(fpath):
os.remove(fpath)
if succeeded:
for fpath in tarfiles:
if os.path.exists(fpath):
os.remove(fpath)
with disable_activity_stream():
if not settings.AUTOMATION_ANALYTICS_LAST_GATHER or until > settings.AUTOMATION_ANALYTICS_LAST_GATHER:
# `AUTOMATION_ANALYTICS_LAST_GATHER` is set whether collection succeeds or fails;

View File

@@ -128,7 +128,6 @@ def metrics():
registry=REGISTRY,
)
LICENSE_EXPIRY = Gauge('awx_license_expiry', 'Time before license expires', registry=REGISTRY)
LICENSE_INSTANCE_TOTAL = Gauge('awx_license_instance_total', 'Total number of managed hosts provided by your license', registry=REGISTRY)
LICENSE_INSTANCE_FREE = Gauge('awx_license_instance_free', 'Number of remaining managed hosts provided by your license', registry=REGISTRY)
@@ -149,7 +148,6 @@ def metrics():
}
)
LICENSE_EXPIRY.set(str(license_info.get('time_remaining', 0)))
LICENSE_INSTANCE_TOTAL.set(str(license_info.get('instance_count', 0)))
LICENSE_INSTANCE_FREE.set(str(license_info.get('free_instances', 0)))

View File

@@ -44,12 +44,11 @@ class MetricsServer(MetricsServerSettings):
class BaseM:
def __init__(self, field, help_text, labels=None):
def __init__(self, field, help_text):
self.field = field
self.help_text = help_text
self.current_value = 0
self.metric_has_changed = False
self.labels = labels or {}
def reset_value(self, conn):
conn.hset(root_key, self.field, 0)
@@ -70,16 +69,12 @@ class BaseM:
value = conn.hget(root_key, self.field)
return self.decode_value(value)
def to_prometheus(self, instance_data, namespace=None):
def to_prometheus(self, instance_data):
output_text = f"# HELP {self.field} {self.help_text}\n# TYPE {self.field} gauge\n"
for instance in instance_data:
if self.field in instance_data[instance]:
# Build label string
labels = f'node="{instance}"'
if namespace:
labels += f',subsystem="{namespace}"'
# on upgrade, if there are stale instances, we can end up with issues where new metrics are not present
output_text += f'{self.field}{{{labels}}} {instance_data[instance][self.field]}\n'
output_text += f'{self.field}{{node="{instance}"}} {instance_data[instance][self.field]}\n'
return output_text
@@ -172,17 +167,14 @@ class HistogramM(BaseM):
self.sum.store_value(conn)
self.inf.store_value(conn)
def to_prometheus(self, instance_data, namespace=None):
def to_prometheus(self, instance_data):
output_text = f"# HELP {self.field} {self.help_text}\n# TYPE {self.field} histogram\n"
for instance in instance_data:
# Build label string
node_label = f'node="{instance}"'
subsystem_label = f',subsystem="{namespace}"' if namespace else ''
for i, b in enumerate(self.buckets):
output_text += f'{self.field}_bucket{{le="{b}",{node_label}{subsystem_label}}} {sum(instance_data[instance][self.field]["counts"][0:i+1])}\n'
output_text += f'{self.field}_bucket{{le="+Inf",{node_label}{subsystem_label}}} {instance_data[instance][self.field]["inf"]}\n'
output_text += f'{self.field}_count{{{node_label}{subsystem_label}}} {instance_data[instance][self.field]["inf"]}\n'
output_text += f'{self.field}_sum{{{node_label}{subsystem_label}}} {instance_data[instance][self.field]["sum"]}\n'
output_text += f'{self.field}_bucket{{le="{b}",node="{instance}"}} {sum(instance_data[instance][self.field]["counts"][0:i+1])}\n'
output_text += f'{self.field}_bucket{{le="+Inf",node="{instance}"}} {instance_data[instance][self.field]["inf"]}\n'
output_text += f'{self.field}_count{{node="{instance}"}} {instance_data[instance][self.field]["inf"]}\n'
output_text += f'{self.field}_sum{{node="{instance}"}} {instance_data[instance][self.field]["sum"]}\n'
return output_text
@@ -281,22 +273,20 @@ class Metrics(MetricsNamespace):
def pipe_execute(self):
if self.metrics_have_changed is True:
duration_pipe_exec = time.perf_counter()
duration_to_save = time.perf_counter()
for m in self.METRICS:
self.METRICS[m].store_value(self.pipe)
self.pipe.execute()
self.last_pipe_execute = time.time()
self.metrics_have_changed = False
duration_pipe_exec = time.perf_counter() - duration_pipe_exec
duration_send_metrics = time.perf_counter()
self.send_metrics()
duration_send_metrics = time.perf_counter() - duration_send_metrics
# Increment operational metrics
self.METRICS['subsystem_metrics_pipe_execute_seconds'].inc(duration_pipe_exec)
duration_to_save = time.perf_counter() - duration_to_save
self.METRICS['subsystem_metrics_pipe_execute_seconds'].inc(duration_to_save)
self.METRICS['subsystem_metrics_pipe_execute_calls'].inc(1)
self.METRICS['subsystem_metrics_send_metrics_seconds'].inc(duration_send_metrics)
duration_to_save = time.perf_counter()
self.send_metrics()
duration_to_save = time.perf_counter() - duration_to_save
self.METRICS['subsystem_metrics_send_metrics_seconds'].inc(duration_to_save)
def send_metrics(self):
# more than one thread could be calling this at the same time, so should
@@ -362,13 +352,7 @@ class Metrics(MetricsNamespace):
if instance_data:
for field in self.METRICS:
if len(metrics_filter) == 0 or field in metrics_filter:
# Add subsystem label only for operational metrics
namespace = (
self._namespace
if field in ['subsystem_metrics_pipe_execute_seconds', 'subsystem_metrics_pipe_execute_calls', 'subsystem_metrics_send_metrics_seconds']
else None
)
output_text += self.METRICS[field].to_prometheus(instance_data, namespace)
output_text += self.METRICS[field].to_prometheus(instance_data)
return output_text
@@ -456,10 +440,7 @@ class CustomToPrometheusMetricsCollector(prometheus_client.registry.Collector):
logger.debug(f"No metric data not found in redis for metric namespace '{self._metrics._namespace}'")
return None
if not (host_metrics := instance_data.get(my_hostname)):
logger.debug(f"Metric data for this node '{my_hostname}' not found in redis for metric namespace '{self._metrics._namespace}'")
return None
host_metrics = instance_data.get(my_hostname)
for _, metric in self._metrics.METRICS.items():
entry = host_metrics.get(metric.field)
if not entry:

View File

@@ -1,9 +1,6 @@
import os
from dispatcherd.config import setup as dispatcher_setup
from django.apps import AppConfig
from django.db import connection
from django.utils.translation import gettext_lazy as _
from awx.main.utils.common import bypass_in_test, load_all_entry_points_for
from awx.main.utils.migration import is_database_synchronized
@@ -79,28 +76,9 @@ class MainConfig(AppConfig):
cls = entry_point.load()
InventorySourceOptions.injectors[entry_point_name] = cls
def configure_dispatcherd(self):
"""This implements the default configuration for dispatcherd
If running the tasking service like awx-manage run_dispatcher,
some additional config will be applied on top of this.
This configuration provides the minimum such that code can submit
tasks to pg_notify to run those tasks.
"""
from awx.main.dispatch.config import get_dispatcherd_config
if connection.vendor != 'postgresql':
config_dict = get_dispatcherd_config(mock_publish=True)
else:
config_dict = get_dispatcherd_config()
dispatcher_setup(config_dict)
def ready(self):
super().ready()
self.configure_dispatcherd()
"""
Credential loading triggers database operations. There are cases we want to call
awx-manage collectstatic without a database. All management commands invoke the ready() code

View File

@@ -4,6 +4,7 @@ import logging
# Django
from django.core.checks import Error
from django.utils.translation import gettext_lazy as _
from django.conf import settings
# Django REST Framework
from rest_framework import serializers
@@ -91,6 +92,7 @@ register(
),
category=_('System'),
category_slug='system',
required=False,
)
register(
@@ -105,7 +107,6 @@ register(
),
category=_('System'),
category_slug='system',
hidden=True,
)
register(
@@ -144,35 +145,6 @@ register(
category_slug='system',
)
register(
'SUBSCRIPTIONS_USERNAME',
field_class=fields.CharField,
default='',
allow_blank=True,
encrypted=False,
read_only=False,
label=_('Red Hat Username for Subscriptions'),
help_text=_('Username used to retrieve subscription and content information'), # noqa
category=_('System'),
category_slug='system',
hidden=True,
)
register(
'SUBSCRIPTIONS_PASSWORD',
field_class=fields.CharField,
default='',
allow_blank=True,
encrypted=True,
read_only=False,
label=_('Red Hat Password for Subscriptions'),
help_text=_('Password used to retrieve subscription and content information'), # noqa
category=_('System'),
category_slug='system',
hidden=True,
)
register(
'SUBSCRIPTIONS_CLIENT_ID',
field_class=fields.CharField,
@@ -184,7 +156,6 @@ register(
help_text=_('Client ID used to retrieve subscription and content information'), # noqa
category=_('System'),
category_slug='system',
hidden=True,
)
register(
@@ -198,7 +169,6 @@ register(
help_text=_('Client secret used to retrieve subscription and content information'), # noqa
category=_('System'),
category_slug='system',
hidden=True,
)
register(
@@ -269,6 +239,7 @@ register(
help_text=_('List of modules allowed to be used by ad-hoc jobs.'),
category=_('Jobs'),
category_slug='jobs',
required=False,
)
register(
@@ -279,6 +250,7 @@ register(
('never', _('Never')),
('template', _('Only On Job Template Definitions')),
],
required=True,
label=_('When can extra variables contain Jinja templates?'),
help_text=_(
'Ansible allows variable substitution via the Jinja2 templating '
@@ -303,6 +275,7 @@ register(
register(
'AWX_ISOLATION_SHOW_PATHS',
field_class=fields.StringListIsolatedPathField,
required=False,
label=_('Paths to expose to isolated jobs'),
help_text=_(
'List of paths that would otherwise be hidden to expose to isolated jobs. Enter one path per line. '
@@ -468,6 +441,7 @@ register(
register(
'AWX_ANSIBLE_CALLBACK_PLUGINS',
field_class=fields.StringListField,
required=False,
label=_('Ansible Callback Plugins'),
help_text=_('List of paths to search for extra callback plugins to be used when running jobs. Enter one path per line.'),
category=_('Jobs'),
@@ -581,6 +555,7 @@ register(
help_text=_('Port on Logging Aggregator to send logs to (if required and not provided in Logging Aggregator).'),
category=_('Logging'),
category_slug='logging',
required=False,
)
register(
'LOG_AGGREGATOR_TYPE',
@@ -602,6 +577,7 @@ register(
help_text=_('Username for external log aggregator (if required; HTTP/s only).'),
category=_('Logging'),
category_slug='logging',
required=False,
)
register(
'LOG_AGGREGATOR_PASSWORD',
@@ -613,6 +589,7 @@ register(
help_text=_('Password or authentication token for external log aggregator (if required; HTTP/s only).'),
category=_('Logging'),
category_slug='logging',
required=False,
)
register(
'LOG_AGGREGATOR_LOGGERS',
@@ -799,6 +776,7 @@ register(
allow_null=True,
category=_('System'),
category_slug='system',
required=False,
hidden=True,
)
register(
@@ -1006,132 +984,123 @@ def csrf_trusted_origins_validate(serializer, attrs):
register_validate('system', csrf_trusted_origins_validate)
register(
'OPA_HOST',
field_class=fields.CharField,
label=_('OPA server hostname'),
default='',
help_text=_('The hostname used to connect to the OPA server. If empty, policy enforcement will be disabled.'),
category=('PolicyAsCode'),
category_slug='policyascode',
allow_blank=True,
)
if settings.FEATURE_POLICY_AS_CODE_ENABLED: # Unable to use flag_enabled due to AppRegistryNotReady error
register(
'OPA_HOST',
field_class=fields.CharField,
label=_('OPA server hostname'),
default='',
help_text=_('The hostname used to connect to the OPA server. If empty, policy enforcement will be disabled.'),
category=('PolicyAsCode'),
category_slug='policyascode',
allow_blank=True,
)
register(
'OPA_PORT',
field_class=fields.IntegerField,
label=_('OPA server port'),
default=8181,
help_text=_('The port used to connect to the OPA server. Defaults to 8181.'),
category=('PolicyAsCode'),
category_slug='policyascode',
)
register(
'OPA_PORT',
field_class=fields.IntegerField,
label=_('OPA server port'),
default=8181,
help_text=_('The port used to connect to the OPA server. Defaults to 8181.'),
category=('PolicyAsCode'),
category_slug='policyascode',
)
register(
'OPA_SSL',
field_class=fields.BooleanField,
label=_('Use SSL for OPA connection'),
default=False,
help_text=_('Enable or disable the use of SSL to connect to the OPA server. Defaults to false.'),
category=('PolicyAsCode'),
category_slug='policyascode',
)
register(
'OPA_SSL',
field_class=fields.BooleanField,
label=_('Use SSL for OPA connection'),
default=False,
help_text=_('Enable or disable the use of SSL to connect to the OPA server. Defaults to false.'),
category=('PolicyAsCode'),
category_slug='policyascode',
)
register(
'OPA_AUTH_TYPE',
field_class=fields.ChoiceField,
label=_('OPA authentication type'),
choices=[OPA_AUTH_TYPES.NONE, OPA_AUTH_TYPES.TOKEN, OPA_AUTH_TYPES.CERTIFICATE],
default=OPA_AUTH_TYPES.NONE,
help_text=_('The authentication type that will be used to connect to the OPA server: "None", "Token", or "Certificate".'),
category=('PolicyAsCode'),
category_slug='policyascode',
)
register(
'OPA_AUTH_TYPE',
field_class=fields.ChoiceField,
label=_('OPA authentication type'),
choices=[OPA_AUTH_TYPES.NONE, OPA_AUTH_TYPES.TOKEN, OPA_AUTH_TYPES.CERTIFICATE],
default=OPA_AUTH_TYPES.NONE,
help_text=_('The authentication type that will be used to connect to the OPA server: "None", "Token", or "Certificate".'),
category=('PolicyAsCode'),
category_slug='policyascode',
)
register(
'OPA_AUTH_TOKEN',
field_class=fields.CharField,
label=_('OPA authentication token'),
default='',
help_text=_(
'The token for authentication to the OPA server. Required when OPA_AUTH_TYPE is "Token". If an authorization header is defined in OPA_AUTH_CUSTOM_HEADERS, it will be overridden by OPA_AUTH_TOKEN.'
),
category=('PolicyAsCode'),
category_slug='policyascode',
allow_blank=True,
encrypted=True,
)
register(
'OPA_AUTH_TOKEN',
field_class=fields.CharField,
label=_('OPA authentication token'),
default='',
help_text=_(
'The token for authentication to the OPA server. Required when OPA_AUTH_TYPE is "Token". If an authorization header is defined in OPA_AUTH_CUSTOM_HEADERS, it will be overridden by OPA_AUTH_TOKEN.'
),
category=('PolicyAsCode'),
category_slug='policyascode',
allow_blank=True,
encrypted=True,
)
register(
'OPA_AUTH_CLIENT_CERT',
field_class=fields.CharField,
label=_('OPA client certificate content'),
default='',
help_text=_('The content of the client certificate file for mTLS authentication to the OPA server. Required when OPA_AUTH_TYPE is "Certificate".'),
category=('PolicyAsCode'),
category_slug='policyascode',
allow_blank=True,
)
register(
'OPA_AUTH_CLIENT_CERT',
field_class=fields.CharField,
label=_('OPA client certificate content'),
default='',
help_text=_('The content of the client certificate file for mTLS authentication to the OPA server. Required when OPA_AUTH_TYPE is "Certificate".'),
category=('PolicyAsCode'),
category_slug='policyascode',
allow_blank=True,
)
register(
'OPA_AUTH_CLIENT_KEY',
field_class=fields.CharField,
label=_('OPA client key content'),
default='',
help_text=_('The content of the client key for mTLS authentication to the OPA server. Required when OPA_AUTH_TYPE is "Certificate".'),
category=('PolicyAsCode'),
category_slug='policyascode',
allow_blank=True,
encrypted=True,
)
register(
'OPA_AUTH_CLIENT_KEY',
field_class=fields.CharField,
label=_('OPA client key content'),
default='',
help_text=_('The content of the client key for mTLS authentication to the OPA server. Required when OPA_AUTH_TYPE is "Certificate".'),
category=('PolicyAsCode'),
category_slug='policyascode',
allow_blank=True,
encrypted=True,
)
register(
'OPA_AUTH_CA_CERT',
field_class=fields.CharField,
label=_('OPA CA certificate content'),
default='',
help_text=_('The content of the CA certificate for mTLS authentication to the OPA server. Required when OPA_AUTH_TYPE is "Certificate".'),
category=('PolicyAsCode'),
category_slug='policyascode',
allow_blank=True,
)
register(
'OPA_AUTH_CA_CERT',
field_class=fields.CharField,
label=_('OPA CA certificate content'),
default='',
help_text=_('The content of the CA certificate for mTLS authentication to the OPA server. Required when OPA_AUTH_TYPE is "Certificate".'),
category=('PolicyAsCode'),
category_slug='policyascode',
allow_blank=True,
)
register(
'OPA_AUTH_CUSTOM_HEADERS',
field_class=fields.DictField,
label=_('OPA custom authentication headers'),
default={},
help_text=_('Optional custom headers included in requests to the OPA server. Defaults to empty dictionary ({}).'),
category=('PolicyAsCode'),
category_slug='policyascode',
)
register(
'OPA_AUTH_CUSTOM_HEADERS',
field_class=fields.DictField,
label=_('OPA custom authentication headers'),
default={},
help_text=_('Optional custom headers included in requests to the OPA server. Defaults to empty dictionary ({}).'),
category=('PolicyAsCode'),
category_slug='policyascode',
)
register(
'OPA_REQUEST_TIMEOUT',
field_class=fields.FloatField,
label=_('OPA request timeout'),
default=1.5,
help_text=_('The number of seconds after which the connection to the OPA server will time out. Defaults to 1.5 seconds.'),
category=('PolicyAsCode'),
category_slug='policyascode',
)
register(
'OPA_REQUEST_TIMEOUT',
field_class=fields.FloatField,
label=_('OPA request timeout'),
default=1.5,
help_text=_('The number of seconds after which the connection to the OPA server will time out. Defaults to 1.5 seconds.'),
category=('PolicyAsCode'),
category_slug='policyascode',
)
register(
'OPA_REQUEST_RETRIES',
field_class=fields.IntegerField,
label=_('OPA request retry count'),
default=2,
help_text=_('The number of retry attempts for connecting to the OPA server. Default is 2.'),
category=('PolicyAsCode'),
category_slug='policyascode',
)
def policy_as_code_validate(serializer, attrs):
opa_host = attrs.get('OPA_HOST', '')
if opa_host and (opa_host.startswith('http://') or opa_host.startswith('https://')):
raise serializers.ValidationError({'OPA_HOST': _("OPA_HOST should not include 'http://' or 'https://' prefixes. Please enter only the hostname.")})
return attrs
register_validate('policyascode', policy_as_code_validate)
register(
'OPA_REQUEST_RETRIES',
field_class=fields.IntegerField,
label=_('OPA request retry count'),
default=2,
help_text=_('The number of retry attempts for connecting to the OPA server. Default is 2.'),
category=('PolicyAsCode'),
category_slug='policyascode',
)

View File

@@ -77,8 +77,6 @@ LOGGER_BLOCKLIST = (
'awx.main.utils.log',
# loggers that may be called getting logging settings
'awx.conf',
# dispatcherd should only use 1 database connection
'dispatcherd',
)
# Reported version for node seen in receptor mesh but for which capacity check

View File

@@ -1,53 +0,0 @@
from django.conf import settings
from ansible_base.lib.utils.db import get_pg_notify_params
from awx.main.dispatch import get_task_queuename
from awx.main.dispatch.pool import get_auto_max_workers
def get_dispatcherd_config(for_service: bool = False, mock_publish: bool = False) -> dict:
"""Return a dictionary config for dispatcherd
Parameters:
for_service: if True, include dynamic options needed for running the dispatcher service
this will require database access, you should delay evaluation until after app setup
"""
config = {
"version": 2,
"service": {
"pool_kwargs": {
"min_workers": settings.JOB_EVENT_WORKERS,
"max_workers": get_auto_max_workers(),
},
"main_kwargs": {"node_id": settings.CLUSTER_HOST_ID},
"process_manager_cls": "ForkServerManager",
"process_manager_kwargs": {"preload_modules": ['awx.main.dispatch.hazmat']},
},
"brokers": {
"socket": {"socket_path": settings.DISPATCHERD_DEBUGGING_SOCKFILE},
},
"publish": {"default_control_broker": "socket"},
"worker": {"worker_cls": "awx.main.dispatch.worker.dispatcherd.AWXTaskWorker"},
}
if mock_publish:
config["brokers"]["noop"] = {}
config["publish"]["default_broker"] = "noop"
else:
config["brokers"]["pg_notify"] = {
"config": get_pg_notify_params(),
"sync_connection_factory": "ansible_base.lib.utils.db.psycopg_connection_from_django",
"default_publish_channel": settings.CLUSTER_HOST_ID, # used for debugging commands
}
config["publish"]["default_broker"] = "pg_notify"
if for_service:
config["producers"] = {
"ScheduledProducer": {"task_schedule": settings.DISPATCHER_SCHEDULE},
"OnStartProducer": {"task_list": {"awx.main.tasks.system.dispatch_startup": {}}},
"ControlProducer": {},
}
config["brokers"]["pg_notify"]["channels"] = ['tower_broadcast_all', 'tower_settings_change', get_task_queuename()]
return config

View File

@@ -1,36 +0,0 @@
import django
# dispatcherd publisher logic is likely to be used, but needs manual preload
from dispatcherd.brokers import pg_notify # noqa
# Cache may not be initialized until we are in the worker, so preload here
from channels_redis import core # noqa
from awx import prepare_env
from dispatcherd.utils import resolve_callable
prepare_env()
django.setup() # noqa
from django.conf import settings
# Preload all periodic tasks so their imports will be in shared memory
for name, options in settings.CELERYBEAT_SCHEDULE.items():
resolve_callable(options['task'])
# Preload in-line import from tasks
from awx.main.scheduler.kubernetes import PodManager # noqa
from django.core.cache import cache as django_cache
from django.db import connection
connection.close()
django_cache.close()

View File

@@ -37,9 +37,6 @@ else:
logger = logging.getLogger('awx.main.dispatch')
RETIRED_SENTINEL_TASK = "[retired]"
class NoOpResultQueue(object):
def put(self, item):
pass
@@ -84,17 +81,11 @@ class PoolWorker(object):
self.queue = MPQueue(queue_size)
self.process = Process(target=target, args=(self.queue, self.finished) + args)
self.process.daemon = True
self.creation_time = time.monotonic()
self.retiring = False
def start(self):
self.process.start()
def put(self, body):
if self.retiring:
uuid = body.get('uuid', 'N/A') if isinstance(body, dict) else 'N/A'
logger.info(f"Worker pid:{self.pid} is retiring. Refusing new task {uuid}.")
raise QueueFull("Worker is retiring and not accepting new tasks") # AutoscalePool.write handles QueueFull
uuid = '?'
if isinstance(body, dict):
if not body.get('uuid'):
@@ -113,11 +104,6 @@ class PoolWorker(object):
"""
self.queue.put('QUIT')
@property
def age(self):
"""Returns the current age of the worker in seconds."""
return time.monotonic() - self.creation_time
@property
def pid(self):
return self.process.pid
@@ -164,8 +150,6 @@ class PoolWorker(object):
# the purpose of self.managed_tasks is to just track internal
# state of which events are *currently* being processed.
logger.warning('Event UUID {} appears to be have been duplicated.'.format(uuid))
if self.retiring:
self.managed_tasks[RETIRED_SENTINEL_TASK] = {'task': RETIRED_SENTINEL_TASK}
@property
def current_task(self):
@@ -281,8 +265,6 @@ class WorkerPool(object):
'{% for w in workers %}'
'. worker[pid:{{ w.pid }}]{% if not w.alive %} GONE exit={{ w.exitcode }}{% endif %}'
' sent={{ w.messages_sent }}'
' age={{ "%.0f"|format(w.age) }}s'
' retiring={{ w.retiring }}'
'{% if w.messages_finished %} finished={{ w.messages_finished }}{% endif %}'
' qsize={{ w.managed_tasks|length }}'
' rss={{ w.mb }}MB'
@@ -374,9 +356,6 @@ class AutoscalePool(WorkerPool):
def __init__(self, *args, **kwargs):
self.max_workers = kwargs.pop('max_workers', None)
self.max_worker_lifetime_seconds = kwargs.pop(
'max_worker_lifetime_seconds', getattr(settings, 'WORKER_MAX_LIFETIME_SECONDS', 14400)
) # Default to 4 hours
super(AutoscalePool, self).__init__(*args, **kwargs)
if self.max_workers is None:
@@ -436,7 +415,6 @@ class AutoscalePool(WorkerPool):
"""
orphaned = []
for w in self.workers[::]:
is_retirement_age = self.max_worker_lifetime_seconds is not None and w.age > self.max_worker_lifetime_seconds
if not w.alive:
# the worker process has exited
# 1. take the task it was running and enqueue the error
@@ -445,10 +423,6 @@ class AutoscalePool(WorkerPool):
# send them to another worker
logger.error('worker pid:{} is gone (exit={})'.format(w.pid, w.exitcode))
if w.current_task:
if w.current_task == {'task': RETIRED_SENTINEL_TASK}:
logger.debug('scaling down worker pid:{} due to worker age: {}'.format(w.pid, w.age))
self.workers.remove(w)
continue
if w.current_task != 'QUIT':
try:
for j in UnifiedJob.objects.filter(celery_task_id=w.current_task['uuid']):
@@ -459,7 +433,6 @@ class AutoscalePool(WorkerPool):
logger.warning(f'Worker was told to quit but has not, pid={w.pid}')
orphaned.extend(w.orphaned_tasks)
self.workers.remove(w)
elif w.idle and len(self.workers) > self.min_workers:
# the process has an empty queue (it's idle) and we have
# more processes in the pool than we need (> min)
@@ -468,22 +441,6 @@ class AutoscalePool(WorkerPool):
logger.debug('scaling down worker pid:{}'.format(w.pid))
w.quit()
self.workers.remove(w)
elif w.idle and is_retirement_age:
logger.debug('scaling down worker pid:{} due to worker age: {}'.format(w.pid, w.age))
w.quit()
self.workers.remove(w)
elif is_retirement_age and not w.retiring and not w.idle:
logger.info(
f"Worker pid:{w.pid} (age: {w.age:.0f}s) exceeded max lifetime ({self.max_worker_lifetime_seconds:.0f}s). "
"Signaling for graceful retirement."
)
# Send QUIT signal; worker will finish current task then exit.
w.quit()
# mark as retiring to reject any future tasks that might be assigned in meantime
w.retiring = True
if w.alive:
# if we discover a task manager invocation that's been running
# too long, reap it (because otherwise it'll just hold the postgres

View File

@@ -4,10 +4,6 @@ import json
import time
from uuid import uuid4
from dispatcherd.publish import submit_task
from dispatcherd.processors.blocker import Blocker
from dispatcherd.utils import resolve_callable
from django_guid import get_guid
from django.conf import settings
@@ -61,17 +57,13 @@ class task:
print(f"Time I was dispatched: {dispatch_time}")
"""
def __init__(self, queue=None, bind_kwargs=None, timeout=None, on_duplicate=None):
def __init__(self, queue=None, bind_kwargs=None):
self.queue = queue
self.bind_kwargs = bind_kwargs
self.timeout = timeout
self.on_duplicate = on_duplicate
def __call__(self, fn=None):
queue = self.queue
bind_kwargs = self.bind_kwargs
timeout = self.timeout
on_duplicate = self.on_duplicate
class PublisherMixin(object):
queue = None
@@ -101,31 +93,6 @@ class task:
@classmethod
def apply_async(cls, args=None, kwargs=None, queue=None, uuid=None, **kw):
try:
from flags.state import flag_enabled
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'):
# At this point we have the import string, and submit_task wants the method, so back to that
actual_task = resolve_callable(cls.name)
processor_options = ()
if on_duplicate is not None:
processor_options = (Blocker.Params(on_duplicate=on_duplicate),)
return submit_task(
actual_task,
args=args,
kwargs=kwargs,
queue=queue,
uuid=uuid,
timeout=timeout,
processor_options=processor_options,
**kw,
)
except Exception:
logger.exception(f"[DISPATCHER] Failed to check for alternative dispatcherd implementation for {cls.name}")
# Continue with original implementation if anything fails
pass
# Original implementation follows
queue = queue or getattr(cls.queue, 'im_func', cls.queue)
if not queue:
msg = f'{cls.name}: Queue value required and may not be None'

View File

@@ -1,14 +0,0 @@
from dispatcherd.worker.task import TaskWorker
from django.db import connection
class AWXTaskWorker(TaskWorker):
def on_start(self) -> None:
"""Get worker connected so that first task it gets will be worked quickly"""
connection.ensure_connection()
def pre_task(self, message) -> None:
"""This should remedy bad connections that can not fix themselves"""
connection.close_if_unusable_or_obsolete()

View File

@@ -14,14 +14,21 @@ from jinja2.exceptions import UndefinedError, TemplateSyntaxError, SecurityError
# Django
from django.core import exceptions as django_exceptions
from django.core.serializers.json import DjangoJSONEncoder
from django.db.models.signals import m2m_changed, post_save
from django.db.models.signals import (
post_save,
post_delete,
)
from django.db.models.signals import m2m_changed
from django.db import models
from django.db.models.fields.related import lazy_related_operation
from django.db.models.fields.related_descriptors import (
ReverseOneToOneDescriptor,
ForwardManyToOneDescriptor,
ManyToManyDescriptor,
ReverseManyToOneDescriptor,
create_forward_many_to_many_manager,
)
from django.utils.encoding import smart_str
from django.db.models import JSONField
from django.utils.functional import cached_property
from django.utils.translation import gettext_lazy as _
@@ -47,6 +54,7 @@ __all__ = [
'ImplicitRoleField',
'SmartFilterField',
'OrderedManyToManyField',
'update_role_parentage_for_instance',
'is_implicit_parent',
]
@@ -138,6 +146,34 @@ class AutoOneToOneField(models.OneToOneField):
setattr(cls, related.get_accessor_name(), AutoSingleRelatedObjectDescriptor(related))
def resolve_role_field(obj, field):
ret = []
field_components = field.split('.', 1)
if hasattr(obj, field_components[0]):
obj = getattr(obj, field_components[0])
else:
return []
if obj is None:
return []
if len(field_components) == 1:
# use extremely generous duck typing to accomidate all possible forms
# of the model that may be used during various migrations
if obj._meta.model_name != 'role' or obj._meta.app_label != 'main':
raise Exception(smart_str('{} refers to a {}, not a Role'.format(field, type(obj))))
ret.append(obj.id)
else:
if type(obj) is ManyToManyDescriptor:
for o in obj.all():
ret += resolve_role_field(o, field_components[1])
else:
ret += resolve_role_field(obj, field_components[1])
return ret
def is_implicit_parent(parent_role, child_role):
"""
Determine if the parent_role is an implicit parent as defined by
@@ -174,6 +210,34 @@ def is_implicit_parent(parent_role, child_role):
return False
def update_role_parentage_for_instance(instance):
"""update_role_parentage_for_instance
updates the parents listing for all the roles
of a given instance if they have changed
"""
parents_removed = set()
parents_added = set()
for implicit_role_field in getattr(instance.__class__, '__implicit_role_fields'):
cur_role = getattr(instance, implicit_role_field.name)
original_parents = set(json.loads(cur_role.implicit_parents))
new_parents = implicit_role_field._resolve_parent_roles(instance)
removals = original_parents - new_parents
if removals:
cur_role.parents.remove(*list(removals))
parents_removed.add(cur_role.pk)
additions = new_parents - original_parents
if additions:
cur_role.parents.add(*list(additions))
parents_added.add(cur_role.pk)
new_parents_list = list(new_parents)
new_parents_list.sort()
new_parents_json = json.dumps(new_parents_list)
if cur_role.implicit_parents != new_parents_json:
cur_role.implicit_parents = new_parents_json
cur_role.save(update_fields=['implicit_parents'])
return (parents_added, parents_removed)
class ImplicitRoleDescriptor(ForwardManyToOneDescriptor):
pass
@@ -205,6 +269,65 @@ class ImplicitRoleField(models.ForeignKey):
getattr(cls, '__implicit_role_fields').append(self)
post_save.connect(self._post_save, cls, True, dispatch_uid='implicit-role-post-save')
post_delete.connect(self._post_delete, cls, True, dispatch_uid='implicit-role-post-delete')
function = lambda local, related, field: self.bind_m2m_changed(field, related, local)
lazy_related_operation(function, cls, "self", field=self)
def bind_m2m_changed(self, _self, _role_class, cls):
if not self.parent_role:
return
field_names = self.parent_role
if type(field_names) is not list:
field_names = [field_names]
for field_name in field_names:
if field_name.startswith('singleton:'):
continue
field_name, sep, field_attr = field_name.partition('.')
# Non existent fields will occur if ever a parent model is
# moved inside a migration, needed for job_template_organization_field
# migration in particular
# consistency is assured by unit test awx.main.tests.functional
field = getattr(cls, field_name, None)
if field and type(field) is ReverseManyToOneDescriptor or type(field) is ManyToManyDescriptor:
if '.' in field_attr:
raise Exception('Referencing deep roles through ManyToMany fields is unsupported.')
if type(field) is ReverseManyToOneDescriptor:
sender = field.through
else:
sender = field.related.through
reverse = type(field) is ManyToManyDescriptor
m2m_changed.connect(self.m2m_update(field_attr, reverse), sender, weak=False)
def m2m_update(self, field_attr, _reverse):
def _m2m_update(instance, action, model, pk_set, reverse, **kwargs):
if action == 'post_add' or action == 'pre_remove':
if _reverse:
reverse = not reverse
if reverse:
for pk in pk_set:
obj = model.objects.get(pk=pk)
if action == 'post_add':
getattr(instance, field_attr).children.add(getattr(obj, self.name))
if action == 'pre_remove':
getattr(instance, field_attr).children.remove(getattr(obj, self.name))
else:
for pk in pk_set:
obj = model.objects.get(pk=pk)
if action == 'post_add':
getattr(instance, self.name).parents.add(getattr(obj, field_attr))
if action == 'pre_remove':
getattr(instance, self.name).parents.remove(getattr(obj, field_attr))
return _m2m_update
def _post_save(self, instance, created, *args, **kwargs):
Role_ = utils.get_current_apps().get_model('main', 'Role')
@@ -214,24 +337,68 @@ class ImplicitRoleField(models.ForeignKey):
Model = utils.get_current_apps().get_model('main', instance.__class__.__name__)
latest_instance = Model.objects.get(pk=instance.pk)
# Create any missing role objects
missing_roles = []
for implicit_role_field in getattr(latest_instance.__class__, '__implicit_role_fields'):
cur_role = getattr(latest_instance, implicit_role_field.name, None)
if cur_role is None:
missing_roles.append(Role_(role_field=implicit_role_field.name, content_type_id=ct_id, object_id=latest_instance.id))
# Avoid circular import
from awx.main.models.rbac import batch_role_ancestor_rebuilding, Role
if len(missing_roles) > 0:
Role_.objects.bulk_create(missing_roles)
updates = {}
role_ids = []
for role in Role_.objects.filter(content_type_id=ct_id, object_id=latest_instance.id):
setattr(latest_instance, role.role_field, role)
updates[role.role_field] = role.id
role_ids.append(role.id)
type(latest_instance).objects.filter(pk=latest_instance.pk).update(**updates)
with batch_role_ancestor_rebuilding():
# Create any missing role objects
missing_roles = []
for implicit_role_field in getattr(latest_instance.__class__, '__implicit_role_fields'):
cur_role = getattr(latest_instance, implicit_role_field.name, None)
if cur_role is None:
missing_roles.append(Role_(role_field=implicit_role_field.name, content_type_id=ct_id, object_id=latest_instance.id))
instance.refresh_from_db()
if len(missing_roles) > 0:
Role_.objects.bulk_create(missing_roles)
updates = {}
role_ids = []
for role in Role_.objects.filter(content_type_id=ct_id, object_id=latest_instance.id):
setattr(latest_instance, role.role_field, role)
updates[role.role_field] = role.id
role_ids.append(role.id)
type(latest_instance).objects.filter(pk=latest_instance.pk).update(**updates)
Role.rebuild_role_ancestor_list(role_ids, [])
update_role_parentage_for_instance(latest_instance)
instance.refresh_from_db()
def _resolve_parent_roles(self, instance):
if not self.parent_role:
return set()
paths = self.parent_role if type(self.parent_role) is list else [self.parent_role]
parent_roles = set()
for path in paths:
if path.startswith("singleton:"):
singleton_name = path[10:]
Role_ = utils.get_current_apps().get_model('main', 'Role')
qs = Role_.objects.filter(singleton_name=singleton_name)
if qs.count() >= 1:
role = qs[0]
else:
role = Role_.objects.create(singleton_name=singleton_name, role_field=singleton_name)
parents = [role.id]
else:
parents = resolve_role_field(instance, path)
for parent in parents:
parent_roles.add(parent)
return parent_roles
def _post_delete(self, instance, *args, **kwargs):
role_ids = []
for implicit_role_field in getattr(instance.__class__, '__implicit_role_fields'):
role_ids.append(getattr(instance, implicit_role_field.name + '_id'))
Role_ = utils.get_current_apps().get_model('main', 'Role')
child_ids = [x for x in Role_.parents.through.objects.filter(to_role_id__in=role_ids).distinct().values_list('from_role_id', flat=True)]
Role_.objects.filter(id__in=role_ids).delete()
# Avoid circular import
from awx.main.models.rbac import Role
Role.rebuild_role_ancestor_list([], child_ids)
class SmartFilterField(models.TextField):

View File

@@ -4,7 +4,6 @@
from django.core.management.base import BaseCommand
from django.db import transaction
from crum import impersonate
from ansible_base.resource_registry.signals.handlers import no_reverse_sync
from awx.main.models import User, Organization, Project, Inventory, CredentialType, Credential, Host, JobTemplate
from awx.main.signals import disable_computed_fields
@@ -17,9 +16,8 @@ class Command(BaseCommand):
def handle(self, *args, **kwargs):
# Wrap the operation in an atomic block, so we do not on accident
# create the organization but not create the project, etc.
with no_reverse_sync():
with transaction.atomic():
self._handle()
with transaction.atomic():
self._handle()
def _handle(self):
changed = False

View File

@@ -2,21 +2,13 @@
# All Rights Reserved.
import logging
import yaml
import os
import redis
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from flags.state import flag_enabled
from dispatcherd.factories import get_control_from_settings
from dispatcherd import run_service
from dispatcherd.config import setup as dispatcher_setup
from awx.main.dispatch import get_task_queuename
from awx.main.dispatch.config import get_dispatcherd_config
from awx.main.dispatch.control import Control
from awx.main.dispatch.pool import AutoscalePool
from awx.main.dispatch.worker import AWXConsumerPG, TaskWorker
@@ -48,44 +40,18 @@ class Command(BaseCommand):
),
)
def verify_dispatcherd_socket(self):
if not os.path.exists(settings.DISPATCHERD_DEBUGGING_SOCKFILE):
raise CommandError('Dispatcher is not running locally')
def handle(self, *arg, **options):
if options.get('status'):
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'):
ctl = get_control_from_settings()
running_data = ctl.control_with_reply('status')
if len(running_data) != 1:
raise CommandError('Did not receive expected number of replies')
print(yaml.dump(running_data[0], default_flow_style=False))
return
else:
print(Control('dispatcher').status())
return
print(Control('dispatcher').status())
return
if options.get('schedule'):
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'):
print('NOT YET IMPLEMENTED')
return
else:
print(Control('dispatcher').schedule())
print(Control('dispatcher').schedule())
return
if options.get('running'):
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'):
ctl = get_control_from_settings()
running_data = ctl.control_with_reply('running')
print(yaml.dump(running_data, default_flow_style=False))
return
else:
print(Control('dispatcher').running())
return
print(Control('dispatcher').running())
return
if options.get('reload'):
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'):
print('NOT YET IMPLEMENTED')
return
else:
return Control('dispatcher').control({'control': 'reload'})
return Control('dispatcher').control({'control': 'reload'})
if options.get('cancel'):
cancel_str = options.get('cancel')
try:
@@ -94,36 +60,21 @@ class Command(BaseCommand):
cancel_data = [cancel_str]
if not isinstance(cancel_data, list):
cancel_data = [cancel_str]
print(Control('dispatcher').cancel(cancel_data))
return
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'):
ctl = get_control_from_settings()
results = []
for task_id in cancel_data:
# For each task UUID, send an individual cancel command
result = ctl.control_with_reply('cancel', data={'uuid': task_id})
results.append(result)
print(yaml.dump(results, default_flow_style=False))
return
else:
print(Control('dispatcher').cancel(cancel_data))
return
consumer = None
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'):
dispatcher_setup(get_dispatcherd_config(for_service=True))
run_service()
else:
consumer = None
try:
DispatcherMetricsServer().start()
except redis.exceptions.ConnectionError as exc:
raise CommandError(f'Dispatcher could not connect to redis, error: {exc}')
try:
DispatcherMetricsServer().start()
except redis.exceptions.ConnectionError as exc:
raise CommandError(f'Dispatcher could not connect to redis, error: {exc}')
try:
queues = ['tower_broadcast_all', 'tower_settings_change', get_task_queuename()]
consumer = AWXConsumerPG('dispatcher', TaskWorker(), queues, AutoscalePool(min_workers=4), schedule=settings.CELERYBEAT_SCHEDULE)
consumer.run()
except KeyboardInterrupt:
logger.debug('Terminating Task Dispatcher')
if consumer:
consumer.stop()
try:
queues = ['tower_broadcast_all', 'tower_settings_change', get_task_queuename()]
consumer = AWXConsumerPG('dispatcher', TaskWorker(), queues, AutoscalePool(min_workers=4), schedule=settings.CELERYBEAT_SCHEDULE)
consumer.run()
except KeyboardInterrupt:
logger.debug('Terminating Task Dispatcher')
if consumer:
consumer.stop()

View File

@@ -26,11 +26,6 @@ def change_inventory_source_org_unique(apps, schema_editor):
logger.info(f'Set database constraint rule for {r} inventory source objects')
def rename_wfjt(apps, schema_editor):
cls = apps.get_model('main', 'WorkflowJobTemplate')
_rename_duplicates(cls)
class Migration(migrations.Migration):
dependencies = [
@@ -38,14 +33,13 @@ class Migration(migrations.Migration):
]
operations = [
migrations.RunPython(rename_jts, migrations.RunPython.noop),
migrations.RunPython(rename_projects, migrations.RunPython.noop),
migrations.AddField(
model_name='unifiedjobtemplate',
name='org_unique',
field=models.BooleanField(blank=True, default=True, editable=False, help_text='Used internally to selectively enforce database constraint on name'),
),
migrations.RunPython(rename_jts, migrations.RunPython.noop),
migrations.RunPython(rename_projects, migrations.RunPython.noop),
migrations.RunPython(rename_wfjt, migrations.RunPython.noop),
migrations.RunPython(change_inventory_source_org_unique, migrations.RunPython.noop),
migrations.AddConstraint(
model_name='unifiedjobtemplate',

View File

@@ -1,26 +0,0 @@
from django.db import migrations
# AWX
from awx.main.models import CredentialType
from awx.main.utils.common import set_current_apps
def setup_tower_managed_defaults(apps, schema_editor):
set_current_apps(apps)
CredentialType.setup_tower_managed_defaults(apps)
def setup_rbac_role_system_administrator(apps, schema_editor):
Role = apps.get_model('main', 'Role')
Role.objects.get_or_create(singleton_name='system_administrator', role_field='system_administrator')
class Migration(migrations.Migration):
dependencies = [
('main', '0200_template_name_constraint'),
]
operations = [
migrations.RunPython(setup_tower_managed_defaults),
migrations.RunPython(setup_rbac_role_system_administrator),
]

View File

@@ -0,0 +1,15 @@
# Generated by Django 4.2.10 on 2024-09-16 10:22
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0200_template_name_constraint'),
]
operations = [
migrations.DeleteModel(
name='Profile',
),
]

View File

@@ -1,102 +0,0 @@
# Generated by Django migration for converting Controller role definitions
from ansible_base.rbac.migrations._utils import give_permissions
from django.db import migrations
def convert_controller_role_definitions(apps, schema_editor):
"""
Convert Controller role definitions to regular role definitions:
- Controller Organization Admin -> Organization Admin
- Controller Organization Member -> Organization Member
- Controller Team Admin -> Team Admin
- Controller Team Member -> Team Member
- Controller System Auditor -> Platform Auditor
Then delete the old Controller role definitions.
"""
RoleDefinition = apps.get_model('dab_rbac', 'RoleDefinition')
RoleUserAssignment = apps.get_model('dab_rbac', 'RoleUserAssignment')
RoleTeamAssignment = apps.get_model('dab_rbac', 'RoleTeamAssignment')
Permission = apps.get_model('dab_rbac', 'DABPermission')
# Mapping of old Controller role names to new role names
role_mappings = {
'Controller Organization Admin': 'Organization Admin',
'Controller Organization Member': 'Organization Member',
'Controller Team Admin': 'Team Admin',
'Controller Team Member': 'Team Member',
}
for old_name, new_name in role_mappings.items():
# Find the old Controller role definition
old_role = RoleDefinition.objects.filter(name=old_name).first()
if not old_role:
continue # Skip if the old role doesn't exist
# Find the new role definition
new_role = RoleDefinition.objects.get(name=new_name)
# Collect all the assignments that need to be migrated
# Group by object (content_type + object_id) to batch the give_permissions calls
assignments_by_object = {}
# Get user assignments
user_assignments = RoleUserAssignment.objects.filter(role_definition=old_role).select_related('object_role')
for assignment in user_assignments:
key = (assignment.object_role.content_type_id, assignment.object_role.object_id)
if key not in assignments_by_object:
assignments_by_object[key] = {'users': [], 'teams': []}
assignments_by_object[key]['users'].append(assignment.user)
# Get team assignments
team_assignments = RoleTeamAssignment.objects.filter(role_definition=old_role).select_related('object_role')
for assignment in team_assignments:
key = (assignment.object_role.content_type_id, assignment.object_role.object_id)
if key not in assignments_by_object:
assignments_by_object[key] = {'users': [], 'teams': []}
assignments_by_object[key]['teams'].append(assignment.team.id)
# Use give_permissions to create new assignments with the new role definition
for (content_type_id, object_id), data in assignments_by_object.items():
if data['users'] or data['teams']:
give_permissions(
apps,
new_role,
users=data['users'],
teams=data['teams'],
object_id=object_id,
content_type_id=content_type_id,
)
# Delete the old role definition (this will cascade to delete old assignments and ObjectRoles)
old_role.delete()
# Create or get Platform Auditor
auditor_rd, created = RoleDefinition.objects.get_or_create(
name='Platform Auditor',
defaults={'description': 'Migrated singleton role giving read permission to everything', 'managed': True},
)
if created:
auditor_rd.permissions.add(*list(Permission.objects.filter(codename__startswith='view')))
old_rd = RoleDefinition.objects.filter(name='Controller System Auditor').first()
if old_rd:
for assignment in RoleUserAssignment.objects.filter(role_definition=old_rd):
RoleUserAssignment.objects.create(
user=assignment.user,
role_definition=auditor_rd,
)
# Delete the Controller System Auditor role
RoleDefinition.objects.filter(name='Controller System Auditor').delete()
class Migration(migrations.Migration):
dependencies = [
('main', '0201_create_managed_creds'),
]
operations = [
migrations.RunPython(convert_controller_role_definitions),
]

View File

@@ -0,0 +1,26 @@
# Generated by Django 4.2.10 on 2024-09-16 15:21
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0201_delete_profile'),
]
operations = [
# delete all sso application migrations
migrations.RunSQL("DELETE FROM django_migrations WHERE app = 'sso';"),
# delete all sso application content group permissions
migrations.RunSQL(
"DELETE FROM auth_group_permissions "
"WHERE permission_id IN "
"(SELECT id FROM auth_permission WHERE content_type_id in (SELECT id FROM django_content_type WHERE app_label = 'sso'));"
),
# delete all sso application content permissions
migrations.RunSQL("DELETE FROM auth_permission " "WHERE content_type_id IN (SELECT id FROM django_content_type WHERE app_label = 'sso');"),
# delete sso application content type
migrations.RunSQL("DELETE FROM django_content_type WHERE app_label = 'sso';"),
# drop sso application created table
migrations.RunSQL("DROP TABLE IF EXISTS sso_userenterpriseauth;"),
]

View File

@@ -0,0 +1,23 @@
# Generated by Django 4.2.10 on 2024-10-22 15:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0202_remove_sso_app_content'),
]
operations = [
migrations.AlterField(
model_name='inventorysource',
name='source',
field=models.CharField(default=None, max_length=32),
),
migrations.AlterField(
model_name='inventoryupdate',
name='source',
field=models.CharField(default=None, max_length=32),
),
]

View File

@@ -1,22 +0,0 @@
import logging
from django.db import migrations
from awx.main.migrations._dab_rbac import consolidate_indirect_user_roles
logger = logging.getLogger('awx.main.migrations')
class Migration(migrations.Migration):
dependencies = [
('main', '0202_convert_controller_role_definitions'),
]
# The DAB RBAC app makes substantial model changes which by change-ordering comes after this
# not including run_before might sometimes work but this enforces a more strict and stable order
# for both applying migrations forwards and backwards
run_before = [("dab_rbac", "0004_remote_permissions_additions")]
operations = [
migrations.RunPython(consolidate_indirect_user_roles, migrations.RunPython.noop),
]

View File

@@ -0,0 +1,39 @@
# Generated by Django 4.2.10 on 2024-10-24 14:06
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0203_alter_inventorysource_source_and_more'),
]
operations = [
migrations.AlterUniqueTogether(
name='oauth2application',
unique_together=None,
),
migrations.RemoveField(
model_name='oauth2application',
name='organization',
),
migrations.RemoveField(
model_name='oauth2application',
name='user',
),
migrations.RemoveField(
model_name='activitystream',
name='o_auth2_access_token',
),
migrations.RemoveField(
model_name='activitystream',
name='o_auth2_application',
),
migrations.DeleteModel(
name='OAuth2AccessToken',
),
migrations.DeleteModel(
name='OAuth2Application',
),
]

View File

@@ -1,124 +0,0 @@
# Generated by Django 4.2.10 on 2024-09-16 10:22
from django.db import migrations, models
from awx.main.migrations._create_system_jobs import delete_clear_tokens_sjt
# --- START of function merged from 0203_rename_github_app_kind.py ---
def update_github_app_kind(apps, schema_editor):
"""
Updates the 'kind' field for CredentialType records
from 'github_app' to 'github_app_lookup'.
This addresses a change in the entry point key for the GitHub App plugin.
"""
CredentialType = apps.get_model('main', 'CredentialType')
db_alias = schema_editor.connection.alias
CredentialType.objects.using(db_alias).filter(kind='github_app').update(kind='github_app_lookup')
# --- END of function merged from 0203_rename_github_app_kind.py ---
class Migration(migrations.Migration):
dependencies = [
('main', '0203_remove_team_of_teams'),
]
operations = [
migrations.DeleteModel(
name='Profile',
),
# Remove SSO app content
# delete all sso application migrations
# Added reverse_sql=migrations.RunSQL.noop to make this reversible for tests
migrations.RunSQL("DELETE FROM django_migrations WHERE app = 'sso';", reverse_sql=migrations.RunSQL.noop),
# delete all sso application content group permissions
# Added reverse_sql=migrations.RunSQL.noop to make this reversible for tests
migrations.RunSQL(
"DELETE FROM auth_group_permissions "
"WHERE permission_id IN "
"(SELECT id FROM auth_permission WHERE content_type_id in (SELECT id FROM django_content_type WHERE app_label = 'sso'));",
reverse_sql=migrations.RunSQL.noop,
),
# delete all sso application content permissions
# Added reverse_sql=migrations.RunSQL.noop to make this reversible for tests
migrations.RunSQL(
"DELETE FROM auth_permission " "WHERE content_type_id IN (SELECT id FROM django_content_type WHERE app_label = 'sso');",
reverse_sql=migrations.RunSQL.noop,
),
# delete sso application content type
# Added reverse_sql=migrations.RunSQL.noop to make this reversible for tests
migrations.RunSQL("DELETE FROM django_content_type WHERE app_label = 'sso';", reverse_sql=migrations.RunSQL.noop),
# drop sso application created table
# Added reverse_sql=migrations.RunSQL.noop to make this reversible for tests
migrations.RunSQL("DROP TABLE IF EXISTS sso_userenterpriseauth;", reverse_sql=migrations.RunSQL.noop),
# Alter inventory source source field
migrations.AlterField(
model_name='inventorysource',
name='source',
field=models.CharField(default=None, max_length=32),
),
migrations.AlterField(
model_name='inventoryupdate',
name='source',
field=models.CharField(default=None, max_length=32),
),
# Alter OAuth2Application unique together
migrations.AlterUniqueTogether(
name='oauth2application',
unique_together=None,
),
migrations.RemoveField(
model_name='oauth2application',
name='organization',
),
migrations.RemoveField(
model_name='oauth2application',
name='user',
),
migrations.RemoveField(
model_name='activitystream',
name='o_auth2_access_token',
),
migrations.RemoveField(
model_name='activitystream',
name='o_auth2_application',
),
migrations.DeleteModel(
name='OAuth2AccessToken',
),
migrations.DeleteModel(
name='OAuth2Application',
),
# Delete system token cleanup jobs, because tokens were deleted
migrations.RunPython(delete_clear_tokens_sjt, migrations.RunPython.noop),
migrations.AlterField(
model_name='systemjob',
name='job_type',
field=models.CharField(
blank=True,
choices=[
('cleanup_jobs', 'Remove jobs older than a certain number of days'),
('cleanup_activitystream', 'Remove activity stream entries older than a certain number of days'),
('cleanup_sessions', 'Removes expired browser sessions from the database'),
],
default='',
max_length=32,
),
),
migrations.AlterField(
model_name='systemjobtemplate',
name='job_type',
field=models.CharField(
blank=True,
choices=[
('cleanup_jobs', 'Remove jobs older than a certain number of days'),
('cleanup_activitystream', 'Remove activity stream entries older than a certain number of days'),
('cleanup_sessions', 'Removes expired browser sessions from the database'),
],
default='',
max_length=32,
),
),
# --- START of operations merged from 0203_rename_github_app_kind.py ---
migrations.RunPython(update_github_app_kind, migrations.RunPython.noop),
# --- END of operations merged from 0203_rename_github_app_kind.py ---
]

View File

@@ -0,0 +1,44 @@
# Generated by Django 4.2.16 on 2024-12-18 16:05
from django.db import migrations, models
from awx.main.migrations._create_system_jobs import delete_clear_tokens_sjt
class Migration(migrations.Migration):
dependencies = [
('main', '0204_alter_oauth2application_unique_together_and_more'),
]
operations = [
migrations.RunPython(delete_clear_tokens_sjt, migrations.RunPython.noop),
migrations.AlterField(
model_name='systemjob',
name='job_type',
field=models.CharField(
blank=True,
choices=[
('cleanup_jobs', 'Remove jobs older than a certain number of days'),
('cleanup_activitystream', 'Remove activity stream entries older than a certain number of days'),
('cleanup_sessions', 'Removes expired browser sessions from the database'),
],
default='',
max_length=32,
),
),
migrations.AlterField(
model_name='systemjobtemplate',
name='job_type',
field=models.CharField(
blank=True,
choices=[
('cleanup_jobs', 'Remove jobs older than a certain number of days'),
('cleanup_activitystream', 'Remove activity stream entries older than a certain number of days'),
('cleanup_sessions', 'Removes expired browser sessions from the database'),
],
default='',
max_length=32,
),
),
]

View File

@@ -0,0 +1,22 @@
# Generated by Django 4.2.20 on 2025-05-22 08:57
from decimal import Decimal
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0205_delete_token_cleanup_job'),
]
operations = [
migrations.AlterField(
model_name='instance',
name='capacity_adjustment',
field=models.DecimalField(
decimal_places=2, default=Decimal('0.75'), max_digits=3, validators=[django.core.validators.MinValueValidator(Decimal('0'))]
),
),
]

View File

@@ -1,6 +1,5 @@
import logging
logger = logging.getLogger('awx.main.migrations')

View File

@@ -1,6 +1,5 @@
import json
import logging
from collections import defaultdict
from django.apps import apps as global_apps
from django.db.models import ForeignKey
@@ -18,14 +17,7 @@ logger = logging.getLogger('awx.main.migrations._dab_rbac')
def create_permissions_as_operation(apps, schema_editor):
logger.info('Running data migration create_permissions_as_operation')
# NOTE: the DAB ContentType changes adjusted how they fire
# before they would fire on every app config, like contenttypes
create_dab_permissions(global_apps.get_app_config("main"), apps=apps)
# This changed to only fire once and do a global creation
# so we need to call it for specifically the dab_rbac app
# multiple calls will not hurt anything
create_dab_permissions(global_apps.get_app_config("dab_rbac"), apps=apps)
"""
@@ -120,12 +112,7 @@ def get_descendents(f, children_map):
def get_permissions_for_role(role_field, children_map, apps):
Permission = apps.get_model('dab_rbac', 'DABPermission')
try:
# After migration for remote permissions
ContentType = apps.get_model('dab_rbac', 'DABContentType')
except LookupError:
# If using DAB from before remote permissions are implemented
ContentType = apps.get_model('contenttypes', 'ContentType')
ContentType = apps.get_model('contenttypes', 'ContentType')
perm_list = []
for child_field in get_descendents(role_field, children_map):
@@ -168,15 +155,11 @@ def migrate_to_new_rbac(apps, schema_editor):
This method moves the assigned permissions from the old rbac.py models
to the new RoleDefinition and ObjectRole models
"""
logger.info('Running data migration migrate_to_new_rbac')
Role = apps.get_model('main', 'Role')
RoleDefinition = apps.get_model('dab_rbac', 'RoleDefinition')
RoleUserAssignment = apps.get_model('dab_rbac', 'RoleUserAssignment')
Permission = apps.get_model('dab_rbac', 'DABPermission')
if Permission.objects.count() == 0:
raise RuntimeError('Running migrate_to_new_rbac requires DABPermission objects created first')
# remove add premissions that are not valid for migrations from old versions
for perm_str in ('add_organization', 'add_jobtemplate'):
perm = Permission.objects.filter(codename=perm_str).first()
@@ -256,14 +239,11 @@ def migrate_to_new_rbac(apps, schema_editor):
# Create new replacement system auditor role
new_system_auditor, created = RoleDefinition.objects.get_or_create(
name='Platform Auditor',
name='Controller System Auditor',
defaults={'description': 'Migrated singleton role giving read permission to everything', 'managed': True},
)
new_system_auditor.permissions.add(*list(Permission.objects.filter(codename__startswith='view')))
if created:
logger.info(f'Created RoleDefinition {new_system_auditor.name} pk={new_system_auditor.pk} with {new_system_auditor.permissions.count()} permissions')
# migrate is_system_auditor flag, because it is no longer handled by a system role
old_system_auditor = Role.objects.filter(singleton_name='system_auditor').first()
if old_system_auditor:
@@ -292,9 +272,8 @@ def get_or_create_managed(name, description, ct, permissions, RoleDefinition):
def setup_managed_role_definitions(apps, schema_editor):
"""
Idempotent method to create or sync the managed role definitions
Idepotent method to create or sync the managed role definitions
"""
logger.info('Running data migration setup_managed_role_definitions')
to_create = {
'object_admin': '{cls.__name__} Admin',
'org_admin': 'Organization Admin',
@@ -302,13 +281,7 @@ def setup_managed_role_definitions(apps, schema_editor):
'special': '{cls.__name__} {action}',
}
try:
# After migration for remote permissions
ContentType = apps.get_model('dab_rbac', 'DABContentType')
except LookupError:
# If using DAB from before remote permissions are implemented
ContentType = apps.get_model('contenttypes', 'ContentType')
ContentType = apps.get_model('contenttypes', 'ContentType')
Permission = apps.get_model('dab_rbac', 'DABPermission')
RoleDefinition = apps.get_model('dab_rbac', 'RoleDefinition')
Organization = apps.get_model(settings.ANSIBLE_BASE_ORGANIZATION_MODEL)
@@ -336,6 +309,16 @@ def setup_managed_role_definitions(apps, schema_editor):
to_create['object_admin'].format(cls=cls), f'Has all permissions to a single {cls._meta.verbose_name}', ct, indiv_perms, RoleDefinition
)
)
if cls_name == 'team':
managed_role_definitions.append(
get_or_create_managed(
'Controller Team Admin',
f'Has all permissions to a single {cls._meta.verbose_name}',
ct,
indiv_perms,
RoleDefinition,
)
)
if 'org_children' in to_create and (cls_name not in ('organization', 'instancegroup', 'team')):
org_child_perms = object_perms.copy()
@@ -376,6 +359,18 @@ def setup_managed_role_definitions(apps, schema_editor):
RoleDefinition,
)
)
if action == 'member' and cls_name in ('organization', 'team'):
suffix = to_create['special'].format(cls=cls, action=action.title())
rd_name = f'Controller {suffix}'
managed_role_definitions.append(
get_or_create_managed(
rd_name,
f'Has {action} permissions to a single {cls._meta.verbose_name}',
ct,
perm_list,
RoleDefinition,
)
)
if 'org_admin' in to_create:
managed_role_definitions.append(
@@ -387,6 +382,15 @@ def setup_managed_role_definitions(apps, schema_editor):
RoleDefinition,
)
)
managed_role_definitions.append(
get_or_create_managed(
'Controller Organization Admin',
'Has all permissions to a single organization and all objects inside of it',
org_ct,
org_perms,
RoleDefinition,
)
)
# Special "organization action" roles
audit_permissions = [perm for perm in org_perms if perm.codename.startswith('view_')]
@@ -427,115 +431,3 @@ def setup_managed_role_definitions(apps, schema_editor):
for role_definition in unexpected_role_definitions:
logger.info(f'Deleting old managed role definition {role_definition.name}, pk={role_definition.pk}')
role_definition.delete()
def get_team_to_team_relationships(apps, team_member_role):
"""
Find all team-to-team relationships where one team is a member of another.
Returns a dict mapping parent_team_id -> [child_team_id, ...]
"""
team_to_team_relationships = defaultdict(list)
# Find all team assignments with the Team Member role
RoleTeamAssignment = apps.get_model('dab_rbac', 'RoleTeamAssignment')
team_assignments = RoleTeamAssignment.objects.filter(role_definition=team_member_role).select_related('team')
for assignment in team_assignments:
parent_team_id = int(assignment.object_id)
child_team_id = assignment.team.id
team_to_team_relationships[parent_team_id].append(child_team_id)
return team_to_team_relationships
def get_all_user_members_of_team(apps, team_member_role, team_id, team_to_team_map, visited=None):
"""
Recursively find all users who are members of a team, including through nested teams.
"""
if visited is None:
visited = set()
if team_id in visited:
return set() # Avoid infinite recursion
visited.add(team_id)
all_users = set()
# Get direct user assignments to this team
RoleUserAssignment = apps.get_model('dab_rbac', 'RoleUserAssignment')
user_assignments = RoleUserAssignment.objects.filter(role_definition=team_member_role, object_id=team_id).select_related('user')
for assignment in user_assignments:
all_users.add(assignment.user)
# Get team-to-team assignments and recursively find their users
child_team_ids = team_to_team_map.get(team_id, [])
for child_team_id in child_team_ids:
nested_users = get_all_user_members_of_team(apps, team_member_role, child_team_id, team_to_team_map, visited.copy())
all_users.update(nested_users)
return all_users
def remove_team_to_team_assignment(apps, team_member_role, parent_team_id, child_team_id):
"""
Remove team-to-team memberships.
"""
Team = apps.get_model('main', 'Team')
RoleTeamAssignment = apps.get_model('dab_rbac', 'RoleTeamAssignment')
parent_team = Team.objects.get(id=parent_team_id)
child_team = Team.objects.get(id=child_team_id)
# Remove all team-to-team RoleTeamAssignments
RoleTeamAssignment.objects.filter(role_definition=team_member_role, object_id=parent_team_id, team=child_team).delete()
# Check mirroring Team model for children under member_role
parent_team.member_role.children.filter(object_id=child_team_id).delete()
def consolidate_indirect_user_roles(apps, schema_editor):
"""
A user should have a member role for every team they were indirectly
a member of. ex. Team A is a member of Team B. All users in Team A
previously were only members of Team A. They should now be members of
Team A and Team B.
"""
# get models for membership on teams
RoleDefinition = apps.get_model('dab_rbac', 'RoleDefinition')
Team = apps.get_model('main', 'Team')
team_member_role = RoleDefinition.objects.get(name='Team Member')
team_to_team_map = get_team_to_team_relationships(apps, team_member_role)
if not team_to_team_map:
return # No team-to-team relationships to consolidate
# Get content type for Team - needed for give_permissions
try:
from django.contrib.contenttypes.models import ContentType
team_content_type = ContentType.objects.get_for_model(Team)
except ImportError:
# Fallback if ContentType is not available
ContentType = apps.get_model('contenttypes', 'ContentType')
team_content_type = ContentType.objects.get_for_model(Team)
# Get all users who should be direct members of a team
for parent_team_id, child_team_ids in team_to_team_map.items():
all_users = get_all_user_members_of_team(apps, team_member_role, parent_team_id, team_to_team_map)
# Create direct RoleUserAssignments for all users
if all_users:
give_permissions(apps=apps, rd=team_member_role, users=list(all_users), object_id=parent_team_id, content_type_id=team_content_type.id)
# Mirror assignments to Team model
parent_team = Team.objects.get(id=parent_team_id)
for user in all_users:
parent_team.member_role.members.add(user.id)
# Remove all team-to-team assignments for parent team
for child_team_id in child_team_ids:
remove_team_to_team_assignment(apps, team_member_role, parent_team_id, child_team_id)

View File

@@ -3,6 +3,7 @@ from time import time
from django.db.models import Subquery, OuterRef, F
from awx.main.fields import update_role_parentage_for_instance
from awx.main.models.rbac import Role, batch_role_ancestor_rebuilding
logger = logging.getLogger('rbac_migrations')
@@ -237,10 +238,85 @@ def restore_inventory_admins_backward(apps, schema_editor):
def rebuild_role_hierarchy(apps, schema_editor):
"""Not used after DAB RBAC migration"""
pass
"""
This should be called in any migration when ownerships are changed.
Ex. I remove a user from the admin_role of a credential.
Ancestors are cached from parents for performance, this re-computes ancestors.
"""
logger.info('Computing role roots..')
start = time()
roots = Role.objects.all().values_list('id', flat=True)
stop = time()
logger.info('Found %d roots in %f seconds, rebuilding ancestry map' % (len(roots), stop - start))
start = time()
Role.rebuild_role_ancestor_list(roots, [])
stop = time()
logger.info('Rebuild ancestors completed in %f seconds' % (stop - start))
logger.info('Done.')
def rebuild_role_parentage(apps, schema_editor, models=None):
"""Not used after DAB RBAC migration"""
pass
"""
This should be called in any migration when any parent_role entry
is modified so that the cached parent fields will be updated. Ex:
foo_role = ImplicitRoleField(
parent_role=['bar_role'] # change to parent_role=['admin_role']
)
This is like rebuild_role_hierarchy, but that method updates ancestors,
whereas this method updates parents.
"""
start = time()
seen_models = set()
model_ct = 0
noop_ct = 0
ContentType = apps.get_model('contenttypes', "ContentType")
additions = set()
removals = set()
role_qs = Role.objects
if models:
# update_role_parentage_for_instance is expensive
# if the models have been downselected, ignore those which are not in the list
ct_ids = list(ContentType.objects.filter(model__in=[name.lower() for name in models]).values_list('id', flat=True))
role_qs = role_qs.filter(content_type__in=ct_ids)
for role in role_qs.iterator():
if not role.object_id:
continue
model_tuple = (role.content_type_id, role.object_id)
if model_tuple in seen_models:
continue
seen_models.add(model_tuple)
# The GenericForeignKey does not work right in migrations
# with the usage as role.content_object
# so we do the lookup ourselves with current migration models
ct = role.content_type
app = ct.app_label
ct_model = apps.get_model(app, ct.model)
content_object = ct_model.objects.get(pk=role.object_id)
parents_added, parents_removed = update_role_parentage_for_instance(content_object)
additions.update(parents_added)
removals.update(parents_removed)
if parents_added:
model_ct += 1
logger.debug('Added to parents of roles {} of {}'.format(parents_added, content_object))
if parents_removed:
model_ct += 1
logger.debug('Removed from parents of roles {} of {}'.format(parents_removed, content_object))
else:
noop_ct += 1
logger.debug('No changes to role parents for {} resources'.format(noop_ct))
logger.debug('Added parents to {} roles'.format(len(additions)))
logger.debug('Removed parents from {} roles'.format(len(removals)))
if model_ct:
logger.info('Updated implicit parents of {} resources'.format(model_ct))
logger.info('Rebuild parentage completed in %f seconds' % (time() - start))
# this is ran because the ordinary signals for
# Role.parents.add and Role.parents.remove not called in migration
Role.rebuild_role_ancestor_list(list(additions), list(removals))

View File

@@ -172,17 +172,35 @@ def cleanup_created_modified_by(sender, **kwargs):
pre_delete.connect(cleanup_created_modified_by, sender=User)
@property
def user_get_organizations(user):
return Organization.access_qs(user, 'member')
@property
def user_get_admin_of_organizations(user):
return Organization.access_qs(user, 'change')
@property
def user_get_auditor_of_organizations(user):
return Organization.access_qs(user, 'audit')
@property
def created(user):
return user.date_joined
User.add_to_class('organizations', user_get_organizations)
User.add_to_class('admin_of_organizations', user_get_admin_of_organizations)
User.add_to_class('auditor_of_organizations', user_get_auditor_of_organizations)
User.add_to_class('created', created)
def get_system_auditor_role():
rd, created = RoleDefinition.objects.get_or_create(
name='Platform Auditor', defaults={'description': 'Migrated singleton role giving read permission to everything'}
name='Controller System Auditor', defaults={'description': 'Migrated singleton role giving read permission to everything'}
)
if created:
rd.permissions.add(*list(permission_registry.permission_qs.filter(codename__startswith='view')))

View File

@@ -160,7 +160,7 @@ class Instance(HasPolicyEditsMixin, BaseModel):
default=100,
editable=False,
)
capacity_adjustment = models.DecimalField(default=Decimal(1.0), max_digits=3, decimal_places=2, validators=[MinValueValidator(Decimal(0.0))])
capacity_adjustment = models.DecimalField(default=Decimal(0.75), max_digits=3, decimal_places=2, validators=[MinValueValidator(Decimal(0.0))])
enabled = models.BooleanField(default=True)
managed_by_policy = models.BooleanField(default=True)

View File

@@ -1024,10 +1024,7 @@ class InventorySourceOptions(BaseModel):
# If a credential was provided, it's important that it matches
# the actual inventory source being used (Amazon requires Amazon
# credentials; Rackspace requires Rackspace credentials; etc...)
# TODO: AAP-53978 check that this matches new awx-plugin content for ESXI
if source == 'vmware_esxi' and source.replace('vmware_esxi', 'vmware') != cred.kind:
return _('VMWARE inventory sources (such as %s) require credentials for the matching cloud service.') % source
if source == 'ec2' and source.replace('ec2', 'aws') != cred.kind:
if source.replace('ec2', 'aws') != cred.kind:
return _('Cloud-based inventory sources (such as %s) require credentials for the matching cloud service.') % source
# Allow an EC2 source to omit the credential. If Tower is running on
# an EC2 instance with an IAM Role assigned, boto will use credentials

View File

@@ -86,7 +86,7 @@ class ResourceMixin(models.Model):
raise RuntimeError(f'Role filters only valid for users and ancestor role, received {accessor}')
if content_types is None:
ct_kwarg = dict(content_type=ContentType.objects.get_for_model(cls))
ct_kwarg = dict(content_type_id=ContentType.objects.get_for_model(cls).id)
else:
ct_kwarg = dict(content_type_id__in=content_types)

View File

@@ -27,9 +27,6 @@ from django.conf import settings
# Ansible_base app
from ansible_base.rbac.models import RoleDefinition, RoleUserAssignment, RoleTeamAssignment
from ansible_base.rbac.sync import maybe_reverse_sync_assignment, maybe_reverse_sync_unassignment, maybe_reverse_sync_role_definition
from ansible_base.rbac import permission_registry
from ansible_base.resource_registry.signals.handlers import no_reverse_sync
from ansible_base.lib.utils.models import get_type_for_model
# AWX
@@ -562,27 +559,34 @@ def get_role_definition(role):
f = obj._meta.get_field(role.role_field)
action_name = f.name.rsplit("_", 1)[0]
model_print = type(obj).__name__
rd_name = f'{model_print} {action_name.title()} Compat'
perm_list = get_role_codenames(role)
defaults = {
'content_type': permission_registry.content_type_model.objects.get_by_natural_key(role.content_type.app_label, role.content_type.model),
'content_type_id': role.content_type_id,
'description': f'Has {action_name.title()} permission to {model_print} for backwards API compatibility',
}
# use Controller-specific role definitions for Team/Organization and member/admin
# instead of platform role definitions
# these should exist in the system already, so just do a lookup by role definition name
if model_print in ['Team', 'Organization'] and action_name in ['member', 'admin']:
rd_name = f'Controller {model_print} {action_name.title()}'
rd = RoleDefinition.objects.filter(name=rd_name).first()
if rd:
return rd
else:
return RoleDefinition.objects.create_from_permissions(permissions=perm_list, name=rd_name, managed=True, **defaults)
else:
rd_name = f'{model_print} {action_name.title()} Compat'
with impersonate(None):
try:
with no_reverse_sync():
rd, created = RoleDefinition.objects.get_or_create(name=rd_name, permissions=perm_list, defaults=defaults)
rd, created = RoleDefinition.objects.get_or_create(name=rd_name, permissions=perm_list, defaults=defaults)
except ValidationError:
# This is a tricky case - practically speaking, users should not be allowed to create team roles
# or roles that include the team member permission.
# If we need to create this for compatibility purposes then we will create it as a managed non-editable role
defaults['managed'] = True
with no_reverse_sync():
rd, created = RoleDefinition.objects.get_or_create(name=rd_name, permissions=perm_list, defaults=defaults)
if created and rbac_sync_enabled.enabled:
maybe_reverse_sync_role_definition(rd, action='create')
rd, created = RoleDefinition.objects.get_or_create(name=rd_name, permissions=perm_list, defaults=defaults)
return rd
@@ -596,6 +600,12 @@ def get_role_from_object_role(object_role):
model_name, role_name, _ = rd.name.split()
role_name = role_name.lower()
role_name += '_role'
elif rd.name.startswith('Controller') and rd.name.endswith(' Admin'):
# Controller Organization Admin and Controller Team Admin
role_name = 'admin_role'
elif rd.name.startswith('Controller') and rd.name.endswith(' Member'):
# Controller Organization Member and Controller Team Member
role_name = 'member_role'
elif rd.name.endswith(' Admin') and rd.name.count(' ') == 2:
# cases like "Organization Project Admin"
model_name, target_model_name, role_name = rd.name.split()
@@ -622,14 +632,12 @@ def get_role_from_object_role(object_role):
return getattr(object_role.content_object, role_name)
def give_or_remove_permission(role, actor, giving=True, rd=None):
def give_or_remove_permission(role, actor, giving=True):
obj = role.content_object
if obj is None:
return
if not rd:
rd = get_role_definition(role)
assignment = rd.give_or_remove_permission(actor, obj, giving=giving)
return assignment
rd = get_role_definition(role)
rd.give_or_remove_permission(actor, obj, giving=giving)
class SyncEnabled(threading.local):
@@ -681,15 +689,7 @@ def sync_members_to_new_rbac(instance, action, model, pk_set, reverse, **kwargs)
role = Role.objects.get(pk=user_or_role_id)
else:
user = get_user_model().objects.get(pk=user_or_role_id)
rd = get_role_definition(role)
assignment = give_or_remove_permission(role, user, giving=is_giving, rd=rd)
# sync to resource server
if rbac_sync_enabled.enabled:
if is_giving:
maybe_reverse_sync_assignment(assignment)
else:
maybe_reverse_sync_unassignment(rd, user, role.content_object)
give_or_remove_permission(role, user, giving=is_giving)
def sync_parents_to_new_rbac(instance, action, model, pk_set, reverse, **kwargs):
@@ -732,19 +732,12 @@ def sync_parents_to_new_rbac(instance, action, model, pk_set, reverse, **kwargs)
from awx.main.models.organization import Team
team = Team.objects.get(pk=parent_role.object_id)
rd = get_role_definition(child_role)
assignment = give_or_remove_permission(child_role, team, giving=is_giving, rd=rd)
# sync to resource server
if rbac_sync_enabled.enabled:
if is_giving:
maybe_reverse_sync_assignment(assignment)
else:
maybe_reverse_sync_unassignment(rd, team, child_role.content_object)
give_or_remove_permission(child_role, team, giving=is_giving)
ROLE_DEFINITION_TO_ROLE_FIELD = {
'Organization Member': 'member_role',
'Controller Organization Member': 'member_role',
'WorkflowJobTemplate Admin': 'admin_role',
'Organization WorkflowJobTemplate Admin': 'workflow_admin_role',
'WorkflowJobTemplate Execute': 'execute_role',
@@ -769,8 +762,11 @@ ROLE_DEFINITION_TO_ROLE_FIELD = {
'Organization Credential Admin': 'credential_admin_role',
'Credential Use': 'use_role',
'Team Admin': 'admin_role',
'Controller Team Admin': 'admin_role',
'Team Member': 'member_role',
'Controller Team Member': 'member_role',
'Organization Admin': 'admin_role',
'Controller Organization Admin': 'admin_role',
'Organization Audit': 'auditor_role',
'Organization Execute': 'execute_role',
'Organization Approval': 'approval_role',

View File

@@ -24,7 +24,6 @@ from django.utils.translation import gettext_lazy as _
from django.utils.timezone import now
from django.utils.encoding import smart_str
from django.contrib.contenttypes.models import ContentType
from flags.state import flag_enabled
# REST Framework
from rest_framework.exceptions import ParseError
@@ -34,7 +33,6 @@ from polymorphic.models import PolymorphicModel
from ansible_base.lib.utils.models import prevent_search, get_type_for_model
from ansible_base.rbac import permission_registry
from ansible_base.rbac.models import RoleEvaluation
# AWX
from awx.main.models.base import CommonModelNameNotUnique, PasswordFieldsModel, NotificationFieldsModel
@@ -219,21 +217,20 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, ExecutionEn
# do not use this if in a subclass
if cls != UnifiedJobTemplate:
return super(UnifiedJobTemplate, cls).accessible_pk_qs(accessor, role_field)
from ansible_base.rbac.models import RoleEvaluation
action = to_permissions[role_field]
# Special condition for super auditor
role_subclasses = cls._submodels_with_roles()
role_cts = ContentType.objects.get_for_models(*role_subclasses).values()
all_codenames = {f'{action}_{cls._meta.model_name}' for cls in role_subclasses}
if not (all_codenames - accessor.singleton_permissions()):
role_cts = ContentType.objects.get_for_models(*role_subclasses).values()
qs = cls.objects.filter(polymorphic_ctype__in=role_cts)
return qs.values_list('id', flat=True)
dab_role_cts = permission_registry.content_type_model.objects.get_for_models(*role_subclasses).values()
return (
RoleEvaluation.objects.filter(role__in=accessor.has_roles.all(), codename__in=all_codenames, content_type_id__in=[ct.id for ct in dab_role_cts])
RoleEvaluation.objects.filter(role__in=accessor.has_roles.all(), codename__in=all_codenames, content_type_id__in=[ct.id for ct in role_cts])
.values_list('object_id')
.distinct()
)
@@ -1200,13 +1197,6 @@ class UnifiedJob(
fd = StringIO(fd.getvalue().replace('\\r\\n', '\n'))
return fd
def _fix_double_escapes(self, content):
"""
Collapse double-escaped sequences into single-escaped form.
"""
# Replace \\ followed by one of ' " \ n r t
return re.sub(r'\\([\'"\\nrt])', r'\1', content)
def _escape_ascii(self, content):
# Remove ANSI escape sequences used to embed event data.
content = re.sub(r'\x1b\[K(?:[A-Za-z0-9+/=]+\x1b\[\d+D)+\x1b\[K', '', content)
@@ -1214,14 +1204,12 @@ class UnifiedJob(
content = re.sub(r'\x1b[^m]*m', '', content)
return content
def _result_stdout_raw(self, redact_sensitive=False, escape_ascii=False, fix_escapes=False):
def _result_stdout_raw(self, redact_sensitive=False, escape_ascii=False):
content = self.result_stdout_raw_handle().read()
if redact_sensitive:
content = UriCleaner.remove_sensitive(content)
if escape_ascii:
content = self._escape_ascii(content)
if fix_escapes:
content = self._fix_double_escapes(content)
return content
@property
@@ -1230,10 +1218,9 @@ class UnifiedJob(
@property
def result_stdout(self):
# Human-facing output should fix escapes
return self._result_stdout_raw(escape_ascii=True, fix_escapes=True)
return self._result_stdout_raw(escape_ascii=True)
def _result_stdout_raw_limited(self, start_line=0, end_line=None, redact_sensitive=True, escape_ascii=False, fix_escapes=False):
def _result_stdout_raw_limited(self, start_line=0, end_line=None, redact_sensitive=True, escape_ascii=False):
return_buffer = StringIO()
if end_line is not None:
end_line = int(end_line)
@@ -1256,18 +1243,14 @@ class UnifiedJob(
return_buffer = UriCleaner.remove_sensitive(return_buffer)
if escape_ascii:
return_buffer = self._escape_ascii(return_buffer)
if fix_escapes:
return_buffer = self._fix_double_escapes(return_buffer)
return return_buffer, start_actual, end_actual, absolute_end
def result_stdout_raw_limited(self, start_line=0, end_line=None, redact_sensitive=False):
# Raw should NOT fix escapes
return self._result_stdout_raw_limited(start_line, end_line, redact_sensitive)
def result_stdout_limited(self, start_line=0, end_line=None, redact_sensitive=False):
# Human-facing should fix escapes
return self._result_stdout_raw_limited(start_line, end_line, redact_sensitive, escape_ascii=True, fix_escapes=True)
return self._result_stdout_raw_limited(start_line, end_line, redact_sensitive, escape_ascii=True)
@property
def workflow_job_id(self):
@@ -1386,30 +1369,7 @@ class UnifiedJob(
traceback=self.result_traceback,
)
def get_start_kwargs(self):
needed = self.get_passwords_needed_to_start()
decrypted_start_args = decrypt_field(self, 'start_args')
if not decrypted_start_args or decrypted_start_args == '{}':
return None
try:
start_args = json.loads(decrypted_start_args)
except Exception:
logger.exception(f'Unexpected malformed start_args on unified_job={self.id}')
return None
opts = dict([(field, start_args.get(field, '')) for field in needed])
if not all(opts.values()):
missing_fields = ', '.join([k for k, v in opts.items() if not v])
self.job_explanation = u'Missing needed fields: %s.' % missing_fields
self.save(update_fields=['job_explanation'])
return opts
def pre_start(self):
def pre_start(self, **kwargs):
if not self.can_start:
self.job_explanation = u'%s is not in a startable state: %s, expecting one of %s' % (self._meta.verbose_name, self.status, str(('new', 'waiting')))
self.save(update_fields=['job_explanation'])
@@ -1430,11 +1390,26 @@ class UnifiedJob(
self.save(update_fields=['job_explanation'])
return (False, None)
opts = self.get_start_kwargs()
needed = self.get_passwords_needed_to_start()
try:
start_args = json.loads(decrypt_field(self, 'start_args'))
except Exception:
start_args = None
if opts and (not all(opts.values())):
if start_args in (None, ''):
start_args = kwargs
opts = dict([(field, start_args.get(field, '')) for field in needed])
if not all(opts.values()):
missing_fields = ', '.join([k for k, v in opts.items() if not v])
self.job_explanation = u'Missing needed fields: %s.' % missing_fields
self.save(update_fields=['job_explanation'])
return (False, None)
if 'extra_vars' in kwargs:
self.handle_extra_data(kwargs['extra_vars'])
# remove any job_explanations that may have been set while job was in pending
if self.job_explanation != "":
self.job_explanation = ""
@@ -1495,44 +1470,21 @@ class UnifiedJob(
def cancel_dispatcher_process(self):
"""Returns True if dispatcher running this job acknowledged request and sent SIGTERM"""
if not self.celery_task_id:
return False
return
canceled = []
# Special case for task manager (used during workflow job cancellation)
if not connection.get_autocommit():
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'):
try:
from dispatcherd.factories import get_control_from_settings
ctl = get_control_from_settings()
ctl.control('cancel', data={'uuid': self.celery_task_id})
except Exception:
logger.exception("Error sending cancel command to new dispatcher")
else:
try:
ControlDispatcher('dispatcher', self.controller_node).cancel([self.celery_task_id], with_reply=False)
except Exception:
logger.exception("Error sending cancel command to legacy dispatcher")
# this condition is purpose-written for the task manager, when it cancels jobs in workflows
ControlDispatcher('dispatcher', self.controller_node).cancel([self.celery_task_id], with_reply=False)
return True # task manager itself needs to act under assumption that cancel was received
# Standard case with reply
try:
# Use control and reply mechanism to cancel and obtain confirmation
timeout = 5
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'):
from dispatcherd.factories import get_control_from_settings
ctl = get_control_from_settings()
results = ctl.control_with_reply('cancel', data={'uuid': self.celery_task_id}, expected_replies=1, timeout=timeout)
# Check if cancel was successful by checking if we got any results
return bool(results and len(results) > 0)
else:
# Original implementation
canceled = ControlDispatcher('dispatcher', self.controller_node).cancel([self.celery_task_id])
canceled = ControlDispatcher('dispatcher', self.controller_node).cancel([self.celery_task_id])
except socket.timeout:
logger.error(f'could not reach dispatcher on {self.controller_node} within {timeout}s')
except Exception:
logger.exception("error encountered when checking task status")
return bool(self.celery_task_id in canceled) # True or False, whether confirmation was obtained
def cancel(self, job_explanation=None, is_chain=False):

View File

@@ -53,8 +53,8 @@ class GrafanaBackend(AWXBaseEmailBackend, CustomNotificationBase):
):
super(GrafanaBackend, self).__init__(fail_silently=fail_silently)
self.grafana_key = grafana_key
self.dashboardId = int(dashboardId) if dashboardId != '' else None
self.panelId = int(panelId) if panelId != '' else None
self.dashboardId = int(dashboardId) if dashboardId is not None and panelId != "" else None
self.panelId = int(panelId) if panelId is not None and panelId != "" else None
self.annotation_tags = annotation_tags if annotation_tags is not None else []
self.grafana_no_verify_ssl = grafana_no_verify_ssl
self.isRegion = isRegion

View File

@@ -5,6 +5,8 @@ import time
import ssl
import logging
import irc.client
from django.utils.encoding import smart_str
from django.utils.translation import gettext_lazy as _
@@ -14,19 +16,6 @@ from awx.main.notifications.custom_notification_base import CustomNotificationBa
logger = logging.getLogger('awx.main.notifications.irc_backend')
def _irc():
"""
Prime the real jaraco namespace before importing irc.* so that
setuptools' vendored 'setuptools._vendor.jaraco' doesn't shadow
external 'jaraco.*' packages (e.g., jaraco.stream).
"""
import jaraco.stream # ensure the namespace package is established # noqa: F401
import irc.client as irc_client
import irc.connection as irc_connection
return irc_client, irc_connection
class IrcBackend(AWXBaseEmailBackend, CustomNotificationBase):
init_parameters = {
"server": {"label": "IRC Server Address", "type": "string"},
@@ -51,15 +40,12 @@ class IrcBackend(AWXBaseEmailBackend, CustomNotificationBase):
def open(self):
if self.connection is not None:
return False
irc_client, irc_connection = _irc()
if self.use_ssl:
connection_factory = irc_connection.Factory(wrapper=ssl.wrap_socket)
connection_factory = irc.connection.Factory(wrapper=ssl.wrap_socket)
else:
connection_factory = irc_connection.Factory()
connection_factory = irc.connection.Factory()
try:
self.reactor = irc_client.Reactor()
self.reactor = irc.client.Reactor()
self.connection = self.reactor.server().connect(
self.server,
self.port,
@@ -67,7 +53,7 @@ class IrcBackend(AWXBaseEmailBackend, CustomNotificationBase):
password=self.password,
connect_factory=connection_factory,
)
except irc_client.ServerConnectionError as e:
except irc.client.ServerConnectionError as e:
logger.error(smart_str(_("Exception connecting to irc server: {}").format(e)))
if not self.fail_silently:
raise
@@ -79,9 +65,8 @@ class IrcBackend(AWXBaseEmailBackend, CustomNotificationBase):
self.connection = None
def on_connect(self, connection, event):
irc_client, _ = _irc()
for c in self.channels:
if irc_client.is_channel(c):
if irc.client.is_channel(c):
connection.join(c)
else:
for m in self.channels[c]:

View File

@@ -19,9 +19,6 @@ from django.utils.timezone import now as tz_now
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
# django-flags
from flags.state import flag_enabled
from ansible_base.lib.utils.models import get_type_for_model
# django-ansible-base
@@ -51,7 +48,6 @@ from awx.main.signals import disable_activity_stream
from awx.main.constants import ACTIVE_STATES
from awx.main.scheduler.dependency_graph import DependencyGraph
from awx.main.scheduler.task_manager_models import TaskManagerModels
from awx.main.tasks.jobs import dispatch_waiting_jobs
import awx.main.analytics.subsystem_metrics as s_metrics
from awx.main.utils import decrypt_field
@@ -435,7 +431,6 @@ class TaskManager(TaskBase):
# 5 minutes to start pending jobs. If this limit is reached, pending jobs
# will no longer be started and will be started on the next task manager cycle.
self.time_delta_job_explanation = timedelta(seconds=30)
self.control_nodes_to_notify: set[str] = set()
super().__init__(prefix="task_manager")
def after_lock_init(self):
@@ -524,19 +519,16 @@ class TaskManager(TaskBase):
task.save()
task.log_lifecycle("waiting")
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'):
self.control_nodes_to_notify.add(task.get_queue_name())
else:
# apply_async does a NOTIFY to the channel dispatcher is listening to
# postgres will treat this as part of the transaction, which is what we want
if task.status != 'failed' and type(task) is not WorkflowJob:
task_cls = task._get_task_class()
task_cls.apply_async(
[task.pk],
opts,
queue=task.get_queue_name(),
uuid=task.celery_task_id,
)
# apply_async does a NOTIFY to the channel dispatcher is listening to
# postgres will treat this as part of the transaction, which is what we want
if task.status != 'failed' and type(task) is not WorkflowJob:
task_cls = task._get_task_class()
task_cls.apply_async(
[task.pk],
opts,
queue=task.get_queue_name(),
uuid=task.celery_task_id,
)
# In exception cases, like a job failing pre-start checks, we send the websocket status message.
# For jobs going into waiting, we omit this because of performance issues, as it should go to running quickly
@@ -729,8 +721,3 @@ class TaskManager(TaskBase):
for workflow_approval in self.get_expired_workflow_approvals():
self.timeout_approval_node(workflow_approval)
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'):
for controller_node in self.control_nodes_to_notify:
logger.info(f'Notifying node {controller_node} of new waiting jobs.')
dispatch_waiting_jobs.apply_async(queue=controller_node)

View File

@@ -7,7 +7,7 @@ from django.conf import settings
# AWX
from awx import MODE
from awx.main.scheduler import TaskManager, DependencyManager, WorkflowManager
from awx.main.dispatch.publish import task as task_awx
from awx.main.dispatch.publish import task
from awx.main.dispatch import get_task_queuename
logger = logging.getLogger('awx.main.scheduler')
@@ -20,16 +20,16 @@ def run_manager(manager, prefix):
manager().schedule()
@task_awx(queue=get_task_queuename)
@task(queue=get_task_queuename)
def task_manager():
run_manager(TaskManager, "task")
@task_awx(queue=get_task_queuename)
@task(queue=get_task_queuename)
def dependency_manager():
run_manager(DependencyManager, "dependency")
@task_awx(queue=get_task_queuename)
@task(queue=get_task_queuename)
def workflow_manager():
run_manager(WorkflowManager, "workflow")

View File

@@ -38,6 +38,7 @@ from awx.main.models import (
InventorySource,
Job,
JobHostSummary,
JobTemplate,
Organization,
Project,
Role,
@@ -55,7 +56,10 @@ from awx.main.models import (
from awx.main.utils import model_instance_diff, model_to_dict, camelcase_to_underscore, get_current_apps
from awx.main.utils import ignore_inventory_computed_fields, ignore_inventory_group_removal, _inventory_updates
from awx.main.tasks.system import update_inventory_computed_fields, handle_removed_image
from awx.main.fields import is_implicit_parent
from awx.main.fields import (
is_implicit_parent,
update_role_parentage_for_instance,
)
from awx.main import consumers
@@ -188,6 +192,31 @@ def cleanup_detached_labels_on_deleted_parent(sender, instance, **kwargs):
label.delete()
def save_related_job_templates(sender, instance, **kwargs):
"""save_related_job_templates loops through all of the
job templates that use an Inventory that have had their
Organization updated. This triggers the rebuilding of the RBAC hierarchy
and ensures the proper access restrictions.
"""
if sender is not Inventory:
raise ValueError('This signal callback is only intended for use with Project or Inventory')
update_fields = kwargs.get('update_fields', None)
if (update_fields and not ('organization' in update_fields or 'organization_id' in update_fields)) or kwargs.get('created', False):
return
if instance._prior_values_store.get('organization_id') != instance.organization_id:
jtq = JobTemplate.objects.filter(**{sender.__name__.lower(): instance})
for jt in jtq:
parents_added, parents_removed = update_role_parentage_for_instance(jt)
if parents_added or parents_removed:
logger.info(
'Permissions on JT {} changed due to inventory {} organization change from {} to {}.'.format(
jt.pk, instance.pk, instance._prior_values_store.get('organization_id'), instance.organization_id
)
)
def connect_computed_field_signals():
post_save.connect(emit_update_inventory_on_created_or_deleted, sender=Host)
post_delete.connect(emit_update_inventory_on_created_or_deleted, sender=Host)
@@ -201,6 +230,7 @@ def connect_computed_field_signals():
connect_computed_field_signals()
post_save.connect(save_related_job_templates, sender=Inventory)
m2m_changed.connect(rebuild_role_ancestor_list, Role.parents.through)
m2m_changed.connect(rbac_activity_stream, Role.members.through)
m2m_changed.connect(rbac_activity_stream, Role.parents.through)

View File

@@ -1 +1 @@
from . import callback, facts, helpers, host_indirect, host_metrics, jobs, receptor, system # noqa
from . import host_metrics, jobs, receptor, system # noqa

View File

@@ -159,7 +159,7 @@ def cleanup_old_indirect_host_entries() -> None:
IndirectManagedNodeAudit.objects.filter(created__lt=limit).delete()
@task(queue=get_task_queuename, timeout=3600 * 5)
@task(queue=get_task_queuename)
def save_indirect_host_entries(job_id: int, wait_for_events: bool = True) -> None:
try:
job = Job.objects.get(id=job_id)
@@ -201,7 +201,7 @@ def save_indirect_host_entries(job_id: int, wait_for_events: bool = True) -> Non
logger.exception(f'Error processing indirect host data for job_id={job_id}')
@task(queue=get_task_queuename, timeout=3600 * 5)
@task(queue=get_task_queuename)
def cleanup_and_save_indirect_host_entries_fallback() -> None:
if not flag_enabled("FEATURE_INDIRECT_NODE_COUNTING_ENABLED"):
return

View File

@@ -7,7 +7,7 @@ from django.db.models import Count, F
from django.db.models.functions import TruncMonth
from django.utils.timezone import now
from awx.main.dispatch import get_task_queuename
from awx.main.dispatch.publish import task as task_awx
from awx.main.dispatch.publish import task
from awx.main.models.inventory import HostMetric, HostMetricSummaryMonthly
from awx.main.tasks.helpers import is_run_threshold_reached
from awx.conf.license import get_license
@@ -18,7 +18,7 @@ from awx.main.utils.db import bulk_update_sorted_by_id
logger = logging.getLogger('awx.main.tasks.host_metrics')
@task_awx(queue=get_task_queuename)
@task(queue=get_task_queuename)
def cleanup_host_metrics():
if is_run_threshold_reached(getattr(settings, 'CLEANUP_HOST_METRICS_LAST_TS', None), getattr(settings, 'CLEANUP_HOST_METRICS_INTERVAL', 30) * 86400):
logger.info(f"Executing cleanup_host_metrics, last ran at {getattr(settings, 'CLEANUP_HOST_METRICS_LAST_TS', '---')}")
@@ -29,7 +29,7 @@ def cleanup_host_metrics():
logger.info("Finished cleanup_host_metrics")
@task_awx(queue=get_task_queuename)
@task(queue=get_task_queuename)
def host_metric_summary_monthly():
"""Run cleanup host metrics summary monthly task each week"""
if is_run_threshold_reached(getattr(settings, 'HOST_METRIC_SUMMARY_TASK_LAST_TS', None), getattr(settings, 'HOST_METRIC_SUMMARY_TASK_INTERVAL', 7) * 86400):

View File

@@ -17,12 +17,9 @@ import urllib.parse as urlparse
# Django
from django.conf import settings
from django.db import transaction
# Shared code for the AWX platform
from awx_plugins.interfaces._temporary_private_container_api import CONTAINER_ROOT, get_incontainer_path
from django.utils.translation import gettext_lazy as _
from rest_framework.exceptions import PermissionDenied
# Runner
import ansible_runner
@@ -31,12 +28,8 @@ import ansible_runner
import git
from gitdb.exc import BadName as BadGitName
# Dispatcherd
from dispatcherd.publish import task
from dispatcherd.utils import serialize_task
# AWX
from awx.main.dispatch.publish import task as task_awx
from awx.main.dispatch.publish import task
from awx.main.dispatch import get_task_queuename
from awx.main.constants import (
PRIVILEGE_ESCALATION_METHODS,
@@ -44,13 +37,13 @@ from awx.main.constants import (
JOB_FOLDER_PREFIX,
MAX_ISOLATED_PATH_COLON_DELIMITER,
CONTAINER_VOLUMES_MOUNT_TYPES,
ACTIVE_STATES,
HOST_FACTS_FIELDS,
)
from awx.main.models import (
Instance,
Inventory,
InventorySource,
UnifiedJob,
Job,
AdHocCommand,
ProjectUpdate,
@@ -89,6 +82,8 @@ from awx.main.utils.common import (
from awx.conf.license import get_license
from awx.main.utils.handlers import SpecialInventoryHandler
from awx.main.utils.update_model import update_model
from rest_framework.exceptions import PermissionDenied
from django.utils.translation import gettext_lazy as _
# Django flags
from flags.state import flag_enabled
@@ -115,15 +110,6 @@ def with_path_cleanup(f):
return _wrapped
@task(on_duplicate='queue_one', bind=True, queue=get_task_queuename)
def dispatch_waiting_jobs(binder):
for uj in UnifiedJob.objects.filter(status='waiting', controller_node=settings.CLUSTER_HOST_ID).only('id', 'status', 'polymorphic_ctype', 'celery_task_id'):
kwargs = uj.get_start_kwargs()
if not kwargs:
kwargs = {}
binder.control('run', data={'task': serialize_task(uj._get_task_class()), 'args': [uj.id], 'kwargs': kwargs, 'uuid': uj.celery_task_id})
class BaseTask(object):
model = None
event_model = None
@@ -131,7 +117,6 @@ class BaseTask(object):
callback_class = RunnerCallback
def __init__(self):
self.instance = None
self.cleanup_paths = []
self.update_attempts = int(getattr(settings, 'DISPATCHER_DB_DOWNTOWN_TOLLERANCE', settings.DISPATCHER_DB_DOWNTIME_TOLERANCE) / 5)
self.runner_callback = self.callback_class(model=self.model)
@@ -319,8 +304,6 @@ class BaseTask(object):
# Add ANSIBLE_* settings to the subprocess environment.
for attr in dir(settings):
if attr == attr.upper() and attr.startswith('ANSIBLE_') and not attr.startswith('ANSIBLE_BASE_'):
if attr == 'ANSIBLE_STANDARD_SETTINGS_FILES':
continue # special case intended only for dynaconf use
env[attr] = str(getattr(settings, attr))
# Also set environment variables configured in AWX_TASK_ENV setting.
for key, value in settings.AWX_TASK_ENV.items():
@@ -468,48 +451,27 @@ class BaseTask(object):
def should_use_fact_cache(self):
return False
def transition_status(self, pk: int) -> bool:
"""Atomically transition status to running, if False returned, another process got it"""
with transaction.atomic():
# Explanation of parts for the fetch:
# .values - avoid loading a full object, this is known to lead to deadlocks due to signals
# the signals load other related rows which another process may be locking, and happens in practice
# of=('self',) - keeps FK tables out of the lock list, another way deadlocks can happen
# .get - just load the single job
instance_data = UnifiedJob.objects.select_for_update(of=('self',)).values('status', 'cancel_flag').get(pk=pk)
# If status is not waiting (obtained under lock) then this process does not have clearence to run
if instance_data['status'] == 'waiting':
if instance_data['cancel_flag']:
updated_status = 'canceled'
else:
updated_status = 'running'
# Explanation of the update:
# .filter - again, do not load the full object
# .update - a bulk update on just that one row, avoid loading unintended data
UnifiedJob.objects.filter(pk=pk).update(status=updated_status, start_args='')
elif instance_data['status'] == 'running':
logger.info(f'Job {pk} is being ran by another process, exiting')
return False
return True
@with_path_cleanup
@with_signal_handling
def run(self, pk, **kwargs):
"""
Run the job/task and capture its output.
"""
if not self.instance: # Used to skip fetch for local runs
if not self.transition_status(pk):
logger.info(f'Job {pk} is being ran by another process, exiting')
return
self.instance = self.model.objects.get(pk=pk)
if self.instance.status != 'canceled' and self.instance.cancel_flag:
self.instance = self.update_model(self.instance.pk, start_args='', status='canceled')
if self.instance.status not in ACTIVE_STATES:
# Prevent starting the job if it has been reaped or handled by another process.
raise RuntimeError(f'Not starting {self.instance.status} task pk={pk} because {self.instance.status} is not a valid active state')
# Load the instance
self.instance = self.update_model(pk)
if self.instance.status != 'running':
logger.error(f'Not starting {self.instance.status} task pk={pk} because its status "{self.instance.status}" is not expected')
return
if self.instance.execution_environment_id is None:
from awx.main.signals import disable_activity_stream
with disable_activity_stream():
self.instance = self.update_model(self.instance.pk, execution_environment=self.instance.resolve_execution_environment())
# self.instance because of the update_model pattern and when it's used in callback handlers
self.instance = self.update_model(pk, status='running', start_args='') # blank field to remove encrypted passwords
self.instance.websocket_emit_status("running")
status, rc = 'error', None
self.runner_callback.event_ct = 0
@@ -522,12 +484,6 @@ class BaseTask(object):
private_data_dir = None
try:
if self.instance.execution_environment_id is None:
from awx.main.signals import disable_activity_stream
with disable_activity_stream():
self.instance = self.update_model(self.instance.pk, execution_environment=self.instance.resolve_execution_environment())
self.instance.send_notification_templates("running")
private_data_dir = self.build_private_data_dir(self.instance)
self.pre_run_hook(self.instance, private_data_dir)
@@ -535,7 +491,6 @@ class BaseTask(object):
self.build_project_dir(self.instance, private_data_dir)
self.instance.log_lifecycle("preparing_playbook")
if self.instance.cancel_flag or signal_callback():
logger.debug(f'detected pre-run cancel flag for {self.instance.log_format}')
self.instance = self.update_model(self.instance.pk, status='canceled')
if self.instance.status != 'running':
@@ -658,9 +613,12 @@ class BaseTask(object):
elif status == 'canceled':
self.instance = self.update_model(pk)
cancel_flag_value = getattr(self.instance, 'cancel_flag', False)
if cancel_flag_value is False:
if (cancel_flag_value is False) and signal_callback():
self.runner_callback.delay_update(skip_if_already_set=True, job_explanation="Task was canceled due to receiving a shutdown signal.")
status = 'failed'
elif cancel_flag_value is False:
self.runner_callback.delay_update(skip_if_already_set=True, job_explanation="The running ansible process received a shutdown signal.")
status = 'failed'
except PolicyEvaluationError as exc:
self.runner_callback.delay_update(job_explanation=str(exc), result_traceback=str(exc))
except ReceptorNodeNotFound as exc:
@@ -688,9 +646,6 @@ class BaseTask(object):
# Field host_status_counts is used as a metric to check if event processing is finished
# we send notifications if it is, if not, callback receiver will send them
if not self.instance:
logger.error(f'Unified job pk={pk} appears to be deleted while running')
return
if (self.instance.host_status_counts is not None) or (not self.runner_callback.wrapup_event_dispatched):
events_processed_hook(self.instance)
@@ -787,7 +742,6 @@ class SourceControlMixin(BaseTask):
try:
# the job private_data_dir is passed so sync can download roles and collections there
sync_task = RunProjectUpdate(job_private_data_dir=private_data_dir)
sync_task.instance = local_project_sync # avoids "waiting" status check, performance
sync_task.run(local_project_sync.id)
local_project_sync.refresh_from_db()
self.instance = self.update_model(self.instance.pk, scm_revision=local_project_sync.scm_revision)
@@ -851,7 +805,7 @@ class SourceControlMixin(BaseTask):
self.release_lock(project)
@task_awx(queue=get_task_queuename)
@task(queue=get_task_queuename)
class RunJob(SourceControlMixin, BaseTask):
"""
Run a job using ansible-playbook.
@@ -1174,7 +1128,7 @@ class RunJob(SourceControlMixin, BaseTask):
update_inventory_computed_fields.delay(inventory.id)
@task_awx(queue=get_task_queuename)
@task(queue=get_task_queuename)
class RunProjectUpdate(BaseTask):
model = ProjectUpdate
event_model = ProjectUpdateEvent
@@ -1321,7 +1275,7 @@ class RunProjectUpdate(BaseTask):
galaxy_creds_are_defined = project_update.project.organization and project_update.project.organization.galaxy_credentials.exists()
if not galaxy_creds_are_defined and (settings.AWX_ROLES_ENABLED or settings.AWX_COLLECTIONS_ENABLED):
logger.warning(f'Galaxy role/collection syncing is enabled, but no credentials are configured for {project_update.project.organization}.')
logger.warning('Galaxy role/collection syncing is enabled, but no credentials are configured for {project_update.project.organization}.')
extra_vars.update(
{
@@ -1346,7 +1300,7 @@ class RunProjectUpdate(BaseTask):
extra_vars['scm_refspec'] = project_update.scm_refspec
elif project_update.project.allow_override:
# If branch is override-able, do extra fetch for all branches
extra_vars['scm_refspec'] = '+refs/heads/*:refs/remotes/origin/*'
extra_vars['scm_refspec'] = 'refs/heads/*:refs/remotes/origin/*'
if project_update.scm_type == 'archive':
# for raw archive, prevent error moving files between volumes
@@ -1513,7 +1467,7 @@ class RunProjectUpdate(BaseTask):
return []
@task_awx(queue=get_task_queuename)
@task(queue=get_task_queuename)
class RunInventoryUpdate(SourceControlMixin, BaseTask):
model = InventoryUpdate
event_model = InventoryUpdateEvent
@@ -1776,7 +1730,7 @@ class RunInventoryUpdate(SourceControlMixin, BaseTask):
raise PostRunError('Error occured while saving inventory data, see traceback or server logs', status='error', tb=traceback.format_exc())
@task_awx(queue=get_task_queuename)
@task(queue=get_task_queuename)
class RunAdHocCommand(BaseTask):
"""
Run an ad hoc command using ansible.
@@ -1929,7 +1883,7 @@ class RunAdHocCommand(BaseTask):
return d
@task_awx(queue=get_task_queuename)
@task(queue=get_task_queuename)
class RunSystemJob(BaseTask):
model = SystemJob
event_model = SystemJobEvent

View File

@@ -8,6 +8,7 @@ from typing import Optional, Union
from django.conf import settings
from django.utils.translation import gettext_lazy as _
from flags.state import flag_enabled
from opa_client import OpaClient
from opa_client.base import BaseClient
from requests import HTTPError
@@ -363,6 +364,9 @@ def opa_client(headers=None):
def evaluate_policy(instance):
# Policy evaluation for Policy as Code feature
if not flag_enabled("FEATURE_POLICY_AS_CODE_ENABLED"):
return
if not settings.OPA_HOST:
return

View File

@@ -32,7 +32,7 @@ from awx.main.constants import MAX_ISOLATED_PATH_COLON_DELIMITER
from awx.main.tasks.signals import signal_state, signal_callback, SignalExit
from awx.main.models import Instance, InstanceLink, UnifiedJob, ReceptorAddress
from awx.main.dispatch import get_task_queuename
from awx.main.dispatch.publish import task as task_awx
from awx.main.dispatch.publish import task
# Receptorctl
from receptorctl.socket_interface import ReceptorControl
@@ -852,7 +852,7 @@ def reload_receptor():
raise RuntimeError("Receptor reload failed")
@task_awx(on_duplicate='queue_one')
@task()
def write_receptor_config():
"""
This task runs async on each control node, K8S only.
@@ -875,7 +875,7 @@ def write_receptor_config():
reload_receptor()
@task_awx(queue=get_task_queuename, on_duplicate='discard')
@task(queue=get_task_queuename)
def remove_deprovisioned_node(hostname):
InstanceLink.objects.filter(source__hostname=hostname).update(link_state=InstanceLink.States.REMOVING)
InstanceLink.objects.filter(target__instance__hostname=hostname).update(link_state=InstanceLink.States.REMOVING)

View File

@@ -14,21 +14,16 @@ class SignalExit(Exception):
class SignalState:
# SIGTERM: Sent by supervisord to process group on shutdown
# SIGUSR1: The dispatcherd cancel signal
signals = (signal.SIGTERM, signal.SIGINT, signal.SIGUSR1)
def reset(self):
for for_signal in self.signals:
self.signal_flags[for_signal] = False
self.original_methods[for_signal] = None
self.sigterm_flag = False
self.sigint_flag = False
self.is_active = False # for nested context managers
self.original_sigterm = None
self.original_sigint = None
self.raise_exception = False
def __init__(self):
self.signal_flags = {}
self.original_methods = {}
self.reset()
def raise_if_needed(self):
@@ -36,28 +31,31 @@ class SignalState:
self.raise_exception = False # so it is not raised a second time in error handling
raise SignalExit()
def set_signal_flag(self, *args, for_signal=None):
self.signal_flags[for_signal] = True
logger.info(f'Processed signal {for_signal}, set exit flag')
def set_sigterm_flag(self, *args):
self.sigterm_flag = True
self.raise_if_needed()
def set_sigint_flag(self, *args):
self.sigint_flag = True
self.raise_if_needed()
def connect_signals(self):
for for_signal in self.signals:
self.original_methods[for_signal] = signal.getsignal(for_signal)
signal.signal(for_signal, lambda *args, for_signal=for_signal: self.set_signal_flag(*args, for_signal=for_signal))
self.original_sigterm = signal.getsignal(signal.SIGTERM)
self.original_sigint = signal.getsignal(signal.SIGINT)
signal.signal(signal.SIGTERM, self.set_sigterm_flag)
signal.signal(signal.SIGINT, self.set_sigint_flag)
self.is_active = True
def restore_signals(self):
for for_signal in self.signals:
original_method = self.original_methods[for_signal]
signal.signal(for_signal, original_method)
# if we got a signal while context manager was active, call parent methods.
if self.signal_flags[for_signal]:
if callable(original_method):
try:
original_method()
except Exception as exc:
logger.info(f'Error processing original {for_signal} signal, error: {str(exc)}')
signal.signal(signal.SIGTERM, self.original_sigterm)
signal.signal(signal.SIGINT, self.original_sigint)
# if we got a signal while context manager was active, call parent methods.
if self.sigterm_flag:
if callable(self.original_sigterm):
self.original_sigterm()
if self.sigint_flag:
if callable(self.original_sigint):
self.original_sigint()
self.reset()
@@ -65,7 +63,7 @@ signal_state = SignalState()
def signal_callback():
return any(signal_state.signal_flags[for_signal] for for_signal in signal_state.signals)
return bool(signal_state.sigterm_flag or signal_state.sigint_flag)
def with_signal_handling(f):

View File

@@ -1,77 +1,78 @@
# Python
from collections import namedtuple
import functools
import importlib
import itertools
import json
import logging
import os
import psycopg
from io import StringIO
from contextlib import redirect_stdout
import shutil
import time
from collections import namedtuple
from contextlib import redirect_stdout
from datetime import datetime
from distutils.version import LooseVersion as Version
from io import StringIO
from datetime import datetime
# Runner
import ansible_runner.cleanup
import psycopg
from ansible_base.lib.utils.db import advisory_lock
# django-ansible-base
from ansible_base.resource_registry.tasks.sync import SyncExecutor
# Django
from django.conf import settings
from django.db import connection, transaction, DatabaseError, IntegrityError
from django.db.models.fields.related import ForeignKey
from django.utils.timezone import now, timedelta
from django.utils.encoding import smart_str
from django.contrib.auth.models import User
from django.utils.translation import gettext_lazy as _
from django.utils.translation import gettext_noop
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from django.db.models.query import QuerySet
# Django-CRUM
from crum import impersonate
# Django flags
from flags.state import flag_enabled
# Runner
import ansible_runner.cleanup
# dateutil
from dateutil.parser import parse as parse_date
# Django
from django.conf import settings
from django.contrib.auth.models import User
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from django.db import DatabaseError, IntegrityError, connection, transaction
from django.db.models.fields.related import ForeignKey
from django.db.models.query import QuerySet
from django.utils.encoding import smart_str
from django.utils.timezone import now, timedelta
from django.utils.translation import gettext_lazy as _
from django.utils.translation import gettext_noop
# Django flags
from flags.state import flag_enabled
from rest_framework.exceptions import PermissionDenied
# django-ansible-base
from ansible_base.resource_registry.tasks.sync import SyncExecutor
from ansible_base.lib.utils.db import advisory_lock
# AWX
from awx import __version__ as awx_application_version
from awx.conf import settings_registry
from awx.main import analytics
from awx.main.access import access_registry
from awx.main.analytics.subsystem_metrics import DispatcherMetrics
from awx.main.constants import ACTIVE_STATES, ERROR_STATES
from awx.main.consumers import emit_channel_notification
from awx.main.dispatch import get_task_queuename, reaper
from awx.main.dispatch.publish import task as task_awx
from awx.main.models import (
Schedule,
TowerScheduleState,
Instance,
InstanceGroup,
Inventory,
Job,
Notification,
Schedule,
SmartInventoryMembership,
TowerScheduleState,
UnifiedJob,
Notification,
Inventory,
SmartInventoryMembership,
Job,
convert_jsonfields,
)
from awx.main.constants import ACTIVE_STATES, ERROR_STATES
from awx.main.dispatch.publish import task
from awx.main.dispatch import get_task_queuename, reaper
from awx.main.utils.common import ignore_inventory_computed_fields, ignore_inventory_group_removal
from awx.main.utils.reload import stop_local_services
from awx.main.tasks.helpers import is_run_threshold_reached
from awx.main.tasks.host_indirect import save_indirect_host_entries
from awx.main.tasks.receptor import administrative_workunit_reaper, get_receptor_ctl, worker_cleanup, worker_info, write_receptor_config
from awx.main.utils.common import ignore_inventory_computed_fields, ignore_inventory_group_removal
from awx.main.utils.reload import stop_local_services
from dispatcherd.publish import task
from awx.main.tasks.receptor import get_receptor_ctl, worker_info, worker_cleanup, administrative_workunit_reaper, write_receptor_config
from awx.main.consumers import emit_channel_notification
from awx.main import analytics
from awx.conf import settings_registry
from awx.main.analytics.subsystem_metrics import DispatcherMetrics
from rest_framework.exceptions import PermissionDenied
logger = logging.getLogger('awx.main.tasks.system')
@@ -82,12 +83,7 @@ Try upgrading OpenSSH or providing your private key in an different format. \
'''
def _run_dispatch_startup_common():
"""
Execute the common startup initialization steps.
This includes updating schedules, syncing instance membership, and starting
local reaping and resetting metrics.
"""
def dispatch_startup():
startup_logger = logging.getLogger('awx.main.tasks')
# TODO: Enable this on VM installs
@@ -97,14 +93,14 @@ def _run_dispatch_startup_common():
try:
convert_jsonfields()
except Exception:
logger.exception("Failed JSON field conversion, skipping.")
logger.exception("Failed json field conversion, skipping.")
startup_logger.debug("Syncing schedules")
startup_logger.debug("Syncing Schedules")
for sch in Schedule.objects.all():
try:
sch.update_computed_fields()
except Exception:
logger.exception("Failed to rebuild schedule %s.", sch)
logger.exception("Failed to rebuild schedule {}.".format(sch))
#
# When the dispatcher starts, if the instance cannot be found in the database,
@@ -124,67 +120,25 @@ def _run_dispatch_startup_common():
apply_cluster_membership_policies()
cluster_node_heartbeat()
reaper.startup_reaping()
reaper.reap_waiting(grace_period=0)
m = DispatcherMetrics()
m.reset_values()
def _legacy_dispatch_startup():
"""
Legacy branch for startup: simply performs reaping of waiting jobs with a zero grace period.
"""
logger.debug("Legacy dispatcher: calling reaper.reap_waiting with grace_period=0")
reaper.reap_waiting(grace_period=0)
def _dispatcherd_dispatch_startup():
"""
New dispatcherd branch for startup: uses the control API to re-submit waiting jobs.
"""
logger.debug("Dispatcherd enabled: dispatching waiting jobs via control channel")
from awx.main.tasks.jobs import dispatch_waiting_jobs
dispatch_waiting_jobs.apply_async(queue=get_task_queuename())
def dispatch_startup():
"""
System initialization at startup.
First, execute the common logic.
Then, if FEATURE_DISPATCHERD_ENABLED is enabled, re-submit waiting jobs via the control API;
otherwise, fall back to legacy reaping of waiting jobs.
"""
_run_dispatch_startup_common()
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'):
_dispatcherd_dispatch_startup()
else:
_legacy_dispatch_startup()
def inform_cluster_of_shutdown():
"""
Clean system shutdown that marks the current instance offline.
In legacy mode, it also reaps waiting jobs.
In dispatcherd mode, it relies on dispatcherd's built-in cleanup.
"""
try:
inst = Instance.objects.get(hostname=settings.CLUSTER_HOST_ID)
inst.mark_offline(update_last_seen=True, errors=_('Instance received normal shutdown signal'))
except Instance.DoesNotExist:
logger.exception("Cluster host not found: %s", settings.CLUSTER_HOST_ID)
return
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'):
logger.debug("Dispatcherd mode: no extra reaping required for instance %s", inst.hostname)
else:
this_inst = Instance.objects.get(hostname=settings.CLUSTER_HOST_ID)
this_inst.mark_offline(update_last_seen=True, errors=_('Instance received normal shutdown signal'))
try:
logger.debug("Legacy mode: reaping waiting jobs for instance %s", inst.hostname)
reaper.reap_waiting(inst, grace_period=0)
reaper.reap_waiting(this_inst, grace_period=0)
except Exception:
logger.exception("Failed to reap waiting jobs for %s", inst.hostname)
logger.warning("Normal shutdown processed for instance %s; instance removed from capacity pool.", inst.hostname)
logger.exception('failed to reap waiting jobs for {}'.format(this_inst.hostname))
logger.warning('Normal shutdown signal for instance {}, removed self from capacity pool.'.format(this_inst.hostname))
except Exception:
logger.exception('Encountered problem with normal shutdown signal.')
@task_awx(queue=get_task_queuename, timeout=3600 * 5)
@task(queue=get_task_queuename)
def migrate_jsonfield(table, pkfield, columns):
batchsize = 10000
with advisory_lock(f'json_migration_{table}', wait=False) as acquired:
@@ -230,7 +184,7 @@ def migrate_jsonfield(table, pkfield, columns):
logger.warning(f"Migration of {table} to jsonb is finished.")
@task_awx(queue=get_task_queuename, timeout=3600, on_duplicate='queue_one')
@task(queue=get_task_queuename)
def apply_cluster_membership_policies():
from awx.main.signals import disable_activity_stream
@@ -342,7 +296,7 @@ def apply_cluster_membership_policies():
logger.debug('Cluster policy computation finished in {} seconds'.format(time.time() - started_compute))
@task_awx(queue='tower_settings_change', timeout=600)
@task(queue='tower_settings_change')
def clear_setting_cache(setting_keys):
# log that cache is being cleared
logger.info(f"clear_setting_cache of keys {setting_keys}")
@@ -355,7 +309,7 @@ def clear_setting_cache(setting_keys):
cache.delete_many(cache_keys)
@task_awx(queue='tower_broadcast_all', timeout=600)
@task(queue='tower_broadcast_all')
def delete_project_files(project_path):
# TODO: possibly implement some retry logic
lock_file = project_path + '.lock'
@@ -373,7 +327,7 @@ def delete_project_files(project_path):
logger.exception('Could not remove lock file {}'.format(lock_file))
@task_awx(queue='tower_broadcast_all')
@task(queue='tower_broadcast_all')
def profile_sql(threshold=1, minutes=1):
if threshold <= 0:
cache.delete('awx-profile-sql-threshold')
@@ -383,7 +337,7 @@ def profile_sql(threshold=1, minutes=1):
logger.error('SQL QUERIES >={}s ENABLED FOR {} MINUTE(S)'.format(threshold, minutes))
@task_awx(queue=get_task_queuename, timeout=1800)
@task(queue=get_task_queuename)
def send_notifications(notification_list, job_id=None):
if not isinstance(notification_list, list):
raise TypeError("notification_list should be of type list")
@@ -428,13 +382,13 @@ def events_processed_hook(unified_job):
save_indirect_host_entries.delay(unified_job.id)
@task_awx(queue=get_task_queuename, timeout=3600 * 5, on_duplicate='discard')
@task(queue=get_task_queuename)
def gather_analytics():
if is_run_threshold_reached(getattr(settings, 'AUTOMATION_ANALYTICS_LAST_GATHER', None), settings.AUTOMATION_ANALYTICS_GATHER_INTERVAL):
analytics.gather()
@task_awx(queue=get_task_queuename, timeout=600, on_duplicate='queue_one')
@task(queue=get_task_queuename)
def purge_old_stdout_files():
nowtime = time.time()
for f in os.listdir(settings.JOBOUTPUT_ROOT):
@@ -496,18 +450,37 @@ class CleanupImagesAndFiles:
cls.run_remote(this_inst, **kwargs)
@task_awx(queue='tower_broadcast_all', timeout=3600)
@task(queue='tower_broadcast_all')
def handle_removed_image(remove_images=None):
"""Special broadcast invocation of this method to handle case of deleted EE"""
CleanupImagesAndFiles.run(remove_images=remove_images, file_pattern='')
@task_awx(queue=get_task_queuename, timeout=3600, on_duplicate='queue_one')
@task(queue=get_task_queuename)
def cleanup_images_and_files():
CleanupImagesAndFiles.run(image_prune=True)
@task_awx(queue=get_task_queuename, timeout=600, on_duplicate='queue_one')
@task(queue=get_task_queuename)
def cluster_node_health_check(node):
"""
Used for the health check endpoint, refreshes the status of the instance, but must be ran on target node
"""
if node == '':
logger.warning('Local health check incorrectly called with blank string')
return
elif node != settings.CLUSTER_HOST_ID:
logger.warning(f'Local health check for {node} incorrectly sent to {settings.CLUSTER_HOST_ID}')
return
try:
this_inst = Instance.objects.me()
except Instance.DoesNotExist:
logger.warning(f'Instance record for {node} missing, could not check capacity.')
return
this_inst.local_health_check()
@task(queue=get_task_queuename)
def execution_node_health_check(node):
if node == '':
logger.warning('Remote health check incorrectly called with blank string')
@@ -575,16 +548,8 @@ def inspect_established_receptor_connections(mesh_status):
def inspect_execution_and_hop_nodes(instance_list):
with advisory_lock('inspect_execution_and_hop_nodes_lock', wait=False):
node_lookup = {inst.hostname: inst for inst in instance_list}
try:
ctl = get_receptor_ctl()
except FileNotFoundError:
logger.error('Receptor daemon not running, skipping execution node check')
return
try:
mesh_status = ctl.simple_command('status')
except ValueError as exc:
logger.error(f'Error running receptorctl status command, error: {str(exc)}')
return
ctl = get_receptor_ctl()
mesh_status = ctl.simple_command('status')
inspect_established_receptor_connections(mesh_status)
@@ -632,109 +597,8 @@ def inspect_execution_and_hop_nodes(instance_list):
execution_node_health_check.apply_async([hostname])
@task_awx(queue=get_task_queuename, bind_kwargs=['dispatch_time', 'worker_tasks'])
@task(queue=get_task_queuename, bind_kwargs=['dispatch_time', 'worker_tasks'])
def cluster_node_heartbeat(dispatch_time=None, worker_tasks=None):
"""
Original implementation for AWX dispatcher.
Uses worker_tasks from bind_kwargs to track running tasks.
"""
# Run common instance management logic
this_inst, instance_list, lost_instances = _heartbeat_instance_management()
if this_inst is None:
return # Early return case from instance management
# Check versions
_heartbeat_check_versions(this_inst, instance_list)
# Handle lost instances
_heartbeat_handle_lost_instances(lost_instances, this_inst)
# Run local reaper - original implementation using worker_tasks
if worker_tasks is not None:
active_task_ids = []
for task_list in worker_tasks.values():
active_task_ids.extend(task_list)
# Convert dispatch_time to datetime
ref_time = datetime.fromisoformat(dispatch_time) if dispatch_time else now()
reaper.reap(instance=this_inst, excluded_uuids=active_task_ids, ref_time=ref_time)
if max(len(task_list) for task_list in worker_tasks.values()) <= 1:
reaper.reap_waiting(instance=this_inst, excluded_uuids=active_task_ids, ref_time=ref_time)
@task(queue=get_task_queuename, bind=True)
def adispatch_cluster_node_heartbeat(binder):
"""
Dispatcherd implementation.
Uses Control API to get running tasks.
"""
# Run common instance management logic
this_inst, instance_list, lost_instances = _heartbeat_instance_management()
if this_inst is None:
return # Early return case from instance management
# Check versions
_heartbeat_check_versions(this_inst, instance_list)
# Handle lost instances
_heartbeat_handle_lost_instances(lost_instances, this_inst)
# Get running tasks using dispatcherd API
active_task_ids = _get_active_task_ids_from_dispatcherd(binder)
if active_task_ids is None:
logger.warning("No active task IDs retrieved from dispatcherd, skipping reaper")
return # Failed to get task IDs, don't attempt reaping
# Run local reaper using tasks from dispatcherd
ref_time = now() # No dispatch_time in dispatcherd version
logger.debug(f"Running reaper with {len(active_task_ids)} excluded UUIDs")
reaper.reap(instance=this_inst, excluded_uuids=active_task_ids, ref_time=ref_time)
# If waiting jobs are hanging out, resubmit them
if UnifiedJob.objects.filter(controller_node=settings.CLUSTER_HOST_ID, status='waiting').exists():
from awx.main.tasks.jobs import dispatch_waiting_jobs
dispatch_waiting_jobs.apply_async(queue=get_task_queuename())
def _get_active_task_ids_from_dispatcherd(binder):
"""
Retrieve active task IDs from the dispatcherd control API.
Returns:
list: List of active task UUIDs
None: If there was an error retrieving the data
"""
active_task_ids = []
try:
logger.debug("Querying dispatcherd API for running tasks")
data = binder.control('running')
# Extract UUIDs from the running data
# Process running data: first item is a dict with node_id and task entries
data.pop('node_id', None)
# Extract task UUIDs from data structure
for task_key, task_value in data.items():
if isinstance(task_value, dict) and 'uuid' in task_value:
active_task_ids.append(task_value['uuid'])
logger.debug(f"Found active task with UUID: {task_value['uuid']}")
elif isinstance(task_key, str):
# Handle case where UUID might be the key
active_task_ids.append(task_key)
logger.debug(f"Found active task with key: {task_key}")
logger.debug(f"Retrieved {len(active_task_ids)} active task IDs from dispatcherd")
return active_task_ids
except Exception:
logger.exception("Failed to get running tasks from dispatcherd")
return None
def _heartbeat_instance_management():
"""Common logic for heartbeat instance management."""
logger.debug("Cluster node heartbeat task.")
nowtime = now()
instance_list = list(Instance.objects.filter(node_state__in=(Instance.States.READY, Instance.States.UNAVAILABLE, Instance.States.INSTALLED)))
@@ -761,7 +625,7 @@ def _heartbeat_instance_management():
this_inst.local_health_check()
if startup_event and this_inst.capacity != 0:
logger.warning(f'Rejoining the cluster as instance {this_inst.hostname}. Prior last_seen {last_last_seen}')
return None, None, None # Early return case
return
elif not last_last_seen:
logger.warning(f'Instance does not have recorded last_seen, updating to {nowtime}')
elif (nowtime - last_last_seen) > timedelta(seconds=settings.CLUSTER_NODE_HEARTBEAT_PERIOD + 2):
@@ -773,14 +637,8 @@ def _heartbeat_instance_management():
logger.warning(f'Recreated instance record {this_inst.hostname} after unexpected removal')
this_inst.local_health_check()
else:
logger.error("Cluster Host Not Found: {}".format(settings.CLUSTER_HOST_ID))
return None, None, None
return this_inst, instance_list, lost_instances
def _heartbeat_check_versions(this_inst, instance_list):
"""Check versions across instances and determine if shutdown is needed."""
raise RuntimeError("Cluster Host Not Found: {}".format(settings.CLUSTER_HOST_ID))
# IFF any node has a greater version than we do, then we'll shutdown services
for other_inst in instance_list:
if other_inst.node_type in ('execution', 'hop'):
continue
@@ -797,9 +655,6 @@ def _heartbeat_check_versions(this_inst, instance_list):
stop_local_services(communicate=False)
raise RuntimeError("Shutting down.")
def _heartbeat_handle_lost_instances(lost_instances, this_inst):
"""Handle lost instances by reaping their jobs and marking them offline."""
for other_inst in lost_instances:
try:
explanation = "Job reaped due to instance shutdown"
@@ -830,8 +685,17 @@ def _heartbeat_handle_lost_instances(lost_instances, this_inst):
else:
logger.exception('No SQL state available. Error marking {} as lost'.format(other_inst.hostname))
# Run local reaper
if worker_tasks is not None:
active_task_ids = []
for task_list in worker_tasks.values():
active_task_ids.extend(task_list)
reaper.reap(instance=this_inst, excluded_uuids=active_task_ids, ref_time=datetime.fromisoformat(dispatch_time))
if max(len(task_list) for task_list in worker_tasks.values()) <= 1:
reaper.reap_waiting(instance=this_inst, excluded_uuids=active_task_ids, ref_time=datetime.fromisoformat(dispatch_time))
@task_awx(queue=get_task_queuename, timeout=1800, on_duplicate='queue_one')
@task(queue=get_task_queuename)
def awx_receptor_workunit_reaper():
"""
When an AWX job is launched via receptor, files such as status, stdin, and stdout are created
@@ -854,16 +718,8 @@ def awx_receptor_workunit_reaper():
if not settings.RECEPTOR_RELEASE_WORK:
return
logger.debug("Checking for unreleased receptor work units")
try:
receptor_ctl = get_receptor_ctl()
except FileNotFoundError:
logger.info('Receptorctl sockfile not found for workunit reaper, doing nothing')
return
try:
receptor_work_list = receptor_ctl.simple_command("work list")
except ValueError as exc:
logger.info(f'Error getting work list for workunit reaper, error: {str(exc)}')
return
receptor_ctl = get_receptor_ctl()
receptor_work_list = receptor_ctl.simple_command("work list")
unit_ids = [id for id in receptor_work_list]
jobs_with_unreleased_receptor_units = UnifiedJob.objects.filter(work_unit_id__in=unit_ids).exclude(status__in=ACTIVE_STATES)
@@ -877,7 +733,7 @@ def awx_receptor_workunit_reaper():
administrative_workunit_reaper(receptor_work_list)
@task_awx(queue=get_task_queuename, timeout=1800, on_duplicate='queue_one')
@task(queue=get_task_queuename)
def awx_k8s_reaper():
if not settings.RECEPTOR_RELEASE_WORK:
return
@@ -900,7 +756,7 @@ def awx_k8s_reaper():
logger.exception("Failed to delete orphaned pod {} from {}".format(job.log_format, group))
@task_awx(queue=get_task_queuename, timeout=3600 * 5, on_duplicate='discard')
@task(queue=get_task_queuename)
def awx_periodic_scheduler():
lock_session_timeout_milliseconds = settings.TASK_MANAGER_LOCK_TIMEOUT * 1000
with advisory_lock('awx_periodic_scheduler_lock', lock_session_timeout_milliseconds=lock_session_timeout_milliseconds, wait=False) as acquired:
@@ -959,7 +815,7 @@ def awx_periodic_scheduler():
emit_channel_notification('schedules-changed', dict(id=schedule.id, group_name="schedules"))
@task_awx(queue=get_task_queuename, timeout=3600)
@task(queue=get_task_queuename)
def handle_failure_notifications(task_ids):
"""A task-ified version of the method that sends notifications."""
found_task_ids = set()
@@ -974,7 +830,7 @@ def handle_failure_notifications(task_ids):
logger.warning(f'Could not send notifications for {deleted_tasks} because they were not found in the database')
@task_awx(queue=get_task_queuename, timeout=3600 * 5)
@task(queue=get_task_queuename)
def update_inventory_computed_fields(inventory_id):
"""
Signal handler and wrapper around inventory.update_computed_fields to
@@ -1024,7 +880,7 @@ def update_smart_memberships_for_inventory(smart_inventory):
return False
@task_awx(queue=get_task_queuename, timeout=3600, on_duplicate='queue_one')
@task(queue=get_task_queuename)
def update_host_smart_inventory_memberships():
smart_inventories = Inventory.objects.filter(kind='smart', host_filter__isnull=False, pending_deletion=False)
changed_inventories = set([])
@@ -1040,7 +896,7 @@ def update_host_smart_inventory_memberships():
smart_inventory.update_computed_fields()
@task_awx(queue=get_task_queuename, timeout=3600 * 5)
@task(queue=get_task_queuename)
def delete_inventory(inventory_id, user_id, retries=5):
# Delete inventory as user
if user_id is None:
@@ -1102,7 +958,7 @@ def _reconstruct_relationships(copy_mapping):
new_obj.save()
@task_awx(queue=get_task_queuename, timeout=600)
@task(queue=get_task_queuename)
def deep_copy_model_obj(model_module, model_name, obj_pk, new_obj_pk, user_pk, permission_check_func=None):
logger.debug('Deep copy {} from {} to {}.'.format(model_name, obj_pk, new_obj_pk))
@@ -1157,7 +1013,7 @@ def deep_copy_model_obj(model_module, model_name, obj_pk, new_obj_pk, user_pk, p
update_inventory_computed_fields.delay(new_obj.id)
@task_awx(queue=get_task_queuename, timeout=3600, on_duplicate='discard')
@task(queue=get_task_queuename)
def periodic_resource_sync():
if not getattr(settings, 'RESOURCE_SERVER', None):
logger.debug("Skipping periodic resource_sync, RESOURCE_SERVER not configured")

View File

@@ -8,12 +8,5 @@
"CONTROLLER_PASSWORD": "fooo",
"CONTROLLER_USERNAME": "fooo",
"CONTROLLER_OAUTH_TOKEN": "",
"CONTROLLER_VERIFY_SSL": "False",
"AAP_HOSTNAME": "https://foo.invalid",
"AAP_PASSWORD": "fooo",
"AAP_USERNAME": "fooo",
"AAP_VALIDATE_CERTS": "False",
"CONTROLLER_REQUEST_TIMEOUT": "fooo",
"AAP_REQUEST_TIMEOUT": "fooo",
"AAP_TOKEN": ""
"CONTROLLER_VERIFY_SSL": "False"
}

View File

@@ -1,9 +0,0 @@
---
- hosts: all
gather_facts: false
connection: local
vars:
sleep_interval: 5
tasks:
- name: sleep for a specified interval
command: sleep '{{ sleep_interval }}'

View File

@@ -1,57 +1,17 @@
import time
import logging
from dispatcherd.publish import task
from django.db import connection
from awx.main.dispatch import get_task_queuename
from awx.main.dispatch.publish import task as old_task
from ansible_base.lib.utils.db import advisory_lock
from awx.main.dispatch.publish import task
logger = logging.getLogger(__name__)
@old_task(queue=get_task_queuename)
@task(queue=get_task_queuename)
def sleep_task(seconds=10, log=False):
if log:
logger.info('starting sleep_task')
time.sleep(seconds)
if log:
logger.info('finished sleep_task')
@task()
def sleep_break_connection(seconds=0.2):
"""
Interact with the database in an intentionally breaking way.
After this finishes, queries made by this connection are expected to error
with "the connection is closed"
This is obviously a problem for any task that comes afterwards.
So this is used to break things so that the fixes may be demonstrated.
"""
with connection.cursor() as cursor:
cursor.execute(f"SET idle_session_timeout = '{seconds / 2}s';")
logger.info(f'sleeping for {seconds}s > {seconds / 2}s session timeout')
time.sleep(seconds)
for i in range(1, 3):
logger.info(f'\nRunning query number {i}')
try:
with connection.cursor() as cursor:
cursor.execute("SELECT 1;")
logger.info(' query worked, not expected')
except Exception as exc:
logger.info(f' query errored as expected\ntype: {type(exc)}\nstr: {str(exc)}')
logger.info(f'Connection present: {bool(connection.connection)}, reports closed: {getattr(connection.connection, "closed", "not_found")}')
@task()
def advisory_lock_exception():
time.sleep(0.2) # so it can fill up all the workers... hacky for now
with advisory_lock('advisory_lock_exception', lock_session_timeout_milliseconds=20):
raise RuntimeError('this is an intentional error')

View File

@@ -7,6 +7,7 @@ from django.core.serializers.json import DjangoJSONEncoder
from django.utils.functional import Promise
from django.utils.encoding import force_str
from drf_yasg.codecs import OpenAPICodecJson
import pytest
from awx.api.versioning import drf_reverse
@@ -42,10 +43,10 @@ class TestSwaggerGeneration:
@pytest.fixture(autouse=True, scope='function')
def _prepare(self, get, admin):
if not self.__class__.JSON:
# drf-spectacular returns OpenAPI schema directly from schema endpoint
url = drf_reverse('api:schema-json') + '?format=json'
url = drf_reverse('api:schema-swagger-ui') + '?format=openapi'
response = get(url, user=admin)
data = response.data
codec = OpenAPICodecJson([])
data = codec.generate_swagger_object(response.data)
if response.has_header('X-Deprecated-Paths'):
data['deprecated_paths'] = json.loads(response['X-Deprecated-Paths'])

View File

@@ -150,24 +150,3 @@ def test_ship_credential(setting_map, expected_result, expected_auth, temp_analy
assert mock_analytic_post.call_args[1]['auth'] == expected_auth
else:
mock_analytic_post.assert_not_called()
@pytest.mark.django_db
def test_gather_cleanup_on_auth_failure(mock_valid_license, temp_analytic_tar):
settings.INSIGHTS_TRACKING_STATE = True
settings.AUTOMATION_ANALYTICS_URL = 'https://example.com/api'
settings.REDHAT_USERNAME = 'test_user'
settings.REDHAT_PASSWORD = 'test_password'
with tempfile.NamedTemporaryFile(delete=False, suffix='.tar.gz') as temp_file:
temp_file_path = temp_file.name
try:
with mock.patch('awx.main.analytics.core.ship', return_value=False):
with mock.patch('awx.main.analytics.core.package', return_value=temp_file_path):
gather(module=importlib.import_module(__name__), collection_type='scheduled')
assert not os.path.exists(temp_file_path), "Temp file was not cleaned up after ship failure"
finally:
if os.path.exists(temp_file_path):
os.remove(temp_file_path)

View File

@@ -30,7 +30,6 @@ EXPECTED_VALUES = {
'awx_license_instance_free': 0,
'awx_pending_jobs_total': 0,
'awx_database_connections_total': 1,
'awx_license_expiry': 0,
}
@@ -49,7 +48,7 @@ def test_metrics_counts(organization_factory, job_template_factory, workflow_job
for gauge in gauges:
for sample in gauge.samples:
# name, label, value, timestamp, exemplar
name, _, value, _, _, _ = sample
name, _, value, _, _ = sample
assert EXPECTED_VALUES[name] == value

View File

@@ -287,72 +287,6 @@ def test_sa_grant_private_credential_to_team_through_role_teams(post, credential
assert response.status_code == 400
@pytest.mark.django_db
def test_grant_credential_to_team_different_organization_through_role_teams(post, get, credential, organizations, admin, org_admin, team, team_member):
# # Test that credential from different org can be assigned to team by a superuser through role_teams_list endpoint
orgs = organizations(2)
credential.organization = orgs[0]
credential.save()
team.organization = orgs[1]
team.save()
# Non-superuser (org_admin) trying cross-org assignment should be denied
response = post(reverse('api:role_teams_list', kwargs={'pk': credential.use_role.id}), {'id': team.id}, org_admin)
assert response.status_code == 400
assert (
"You cannot grant a team access to a credential in a different organization. Only superusers can grant cross-organization credential access to teams"
in response.data['msg']
)
# Superuser (admin) can do cross-org assignment
response = post(reverse('api:role_teams_list', kwargs={'pk': credential.use_role.id}), {'id': team.id}, admin)
assert response.status_code == 204
assert credential.use_role in team.member_role.children.all()
assert team_member in credential.read_role
assert team_member in credential.use_role
assert team_member not in credential.admin_role
@pytest.mark.django_db
def test_grant_credential_to_team_different_organization(post, get, credential, organizations, admin, org_admin, team, team_member):
# Test that credential from different org can be assigned to team by a superuser
orgs = organizations(2)
credential.organization = orgs[0]
credential.save()
team.organization = orgs[1]
team.save()
# Non-superuser (org_admin, ...) trying cross-org assignment should be denied
response = post(reverse('api:team_roles_list', kwargs={'pk': team.id}), {'id': credential.use_role.id}, org_admin)
assert response.status_code == 400
assert (
"You cannot grant a team access to a credential in a different organization. Only superusers can grant cross-organization credential access to teams"
in response.data['msg']
)
# Superuser (system admin) can do cross-org assignment
response = post(reverse('api:team_roles_list', kwargs={'pk': team.id}), {'id': credential.use_role.id}, admin)
assert response.status_code == 204
assert credential.use_role in team.member_role.children.all()
assert team_member in credential.read_role
assert team_member in credential.use_role
assert team_member not in credential.admin_role
# Team member can see the credential in API
response = get(reverse('api:team_credentials_list', kwargs={'pk': team.id}), team_member)
assert response.status_code == 200
assert response.data['count'] == 1
assert response.data['results'][0]['id'] == credential.id
# Team member can see the credential in general credentials API
response = get(reverse('api:credential_list'), team_member)
assert response.status_code == 200
assert any(cred['id'] == credential.id for cred in response.data['results'])
@pytest.mark.django_db
def test_sa_grant_private_credential_to_team_through_team_roles(post, credential, admin, team):
# not even a system admin can grant a private cred to a team though
@@ -1290,30 +1224,6 @@ def test_custom_credential_type_create(get, post, organization, admin):
assert decrypt_field(cred, 'api_token') == 'secret'
@pytest.mark.django_db
def test_galaxy_create_ok(post, organization, admin):
params = {
'credential_type': 1,
'name': 'Galaxy credential',
'inputs': {
'url': 'https://galaxy.ansible.com',
'token': 'some_galaxy_token',
},
}
galaxy = CredentialType.defaults['galaxy_api_token']()
galaxy.save()
params['user'] = admin.id
params['credential_type'] = galaxy.pk
response = post(reverse('api:credential_list'), params, admin)
assert response.status_code == 201
assert Credential.objects.count() == 1
cred = Credential.objects.all()[:1].get()
assert cred.credential_type == galaxy
assert cred.inputs['url'] == 'https://galaxy.ansible.com'
assert decrypt_field(cred, 'token') == 'some_galaxy_token'
#
# misc xfail conditions
#

View File

@@ -0,0 +1,64 @@
import pytest
from awx.api.versioning import reverse
from awx.main.models import Organization
@pytest.mark.django_db
class TestImmutableSharedFields:
@pytest.fixture(autouse=True)
def configure_settings(self, settings):
settings.ALLOW_LOCAL_RESOURCE_MANAGEMENT = False
def test_create_raises_permission_denied(self, admin_user, post):
orgA = Organization.objects.create(name='orgA')
resp = post(
url=reverse('api:team_list'),
data={'name': 'teamA', 'organization': orgA.id},
user=admin_user,
expect=403,
)
assert "Creation of this resource is not allowed" in resp.data['detail']
def test_perform_delete_raises_permission_denied(self, admin_user, delete):
orgA = Organization.objects.create(name='orgA')
team = orgA.teams.create(name='teamA')
resp = delete(
url=reverse('api:team_detail', kwargs={'pk': team.id}),
user=admin_user,
expect=403,
)
assert "Deletion of this resource is not allowed" in resp.data['detail']
def test_perform_update(self, admin_user, patch):
orgA = Organization.objects.create(name='orgA')
# allow patching non-shared fields
patch(
url=reverse('api:organization_detail', kwargs={'pk': orgA.id}),
data={"max_hosts": 76},
user=admin_user,
expect=200,
)
# prevent patching shared fields
resp = patch(url=reverse('api:organization_detail', kwargs={'pk': orgA.id}), data={"name": "orgB"}, user=admin_user, expect=403)
assert "Cannot change shared field" in resp.data['name']
@pytest.mark.parametrize(
'role',
['admin_role', 'member_role'],
)
@pytest.mark.parametrize('resource', ['organization', 'team'])
def test_prevent_assigning_member_to_organization_or_team(self, admin_user, post, resource, role):
orgA = Organization.objects.create(name='orgA')
if resource == 'organization':
role = getattr(orgA, role)
elif resource == 'team':
teamA = orgA.teams.create(name='teamA')
role = getattr(teamA, role)
resp = post(
url=reverse('api:user_roles_list', kwargs={'pk': admin_user.id}),
data={'id': role.id},
user=admin_user,
expect=403,
)
assert f"Cannot directly modify user membership to {resource}." in resp.data['msg']

View File

@@ -1,5 +1,3 @@
from unittest import mock
import pytest
from awx.api.versioning import reverse
@@ -7,9 +5,6 @@ from awx.main.models.activity_stream import ActivityStream
from awx.main.models.ha import Instance
from django.test.utils import override_settings
from django.http import HttpResponse
from rest_framework import status
INSTANCE_KWARGS = dict(hostname='example-host', cpu=6, node_type='execution', memory=36000000000, cpu_capacity=6, mem_capacity=42)
@@ -92,11 +87,3 @@ def test_custom_hostname_regex(post, admin_user):
"peers": [],
}
post(url=url, user=admin_user, data=data, expect=value[1])
def test_instance_install_bundle(get, admin_user, system_auditor):
instance = Instance.objects.create(**INSTANCE_KWARGS)
url = reverse('api:instance_install_bundle', kwargs={'pk': instance.pk})
with mock.patch('awx.api.views.instance_install_bundle.InstanceInstallBundle.get', return_value=HttpResponse({'test': 'data'}, status=status.HTTP_200_OK)):
get(url=url, user=admin_user, expect=200)
get(url=url, user=system_auditor, expect=403)

View File

@@ -521,20 +521,6 @@ class TestInventorySourceCredential:
patch(url=inv_src.get_absolute_url(), data={'credential': aws_cred.pk}, expect=200, user=admin_user)
assert list(inv_src.credentials.values_list('id', flat=True)) == [aws_cred.pk]
@pytest.mark.skip(reason="Delay until AAP-53978 completed")
def test_vmware_cred_create_esxi_source(self, inventory, admin_user, organization, post, get):
"""Test that a vmware esxi source can be added with a vmware credential"""
from awx.main.models.credential import Credential, CredentialType
vmware = CredentialType.defaults['vmware']()
vmware.save()
vmware_cred = Credential.objects.create(credential_type=vmware, name="bar", organization=organization)
inv_src = InventorySource.objects.create(inventory=inventory, name='foobar', source='vmware_esxi')
r = post(url=reverse('api:inventory_source_credentials_list', kwargs={'pk': inv_src.pk}), data={'id': vmware_cred.pk}, expect=204, user=admin_user)
g = get(inv_src.get_absolute_url(), admin_user)
assert r.status_code == 204
assert g.data['credential'] == vmware_cred.pk
@pytest.mark.django_db
class TestControlledBySCM:

View File

@@ -1,191 +0,0 @@
import pytest
from unittest.mock import patch, MagicMock
from awx.api.versioning import reverse
# Generated by Cursor (claude-4-sonnet)
@pytest.mark.django_db
class TestLicenseCacheClearing:
"""Test cache clearing for LICENSE setting changes"""
def test_license_from_manifest_clears_cache(self, admin_user, post):
"""Test that posting a manifest to /api/v2/config/ clears the LICENSE cache"""
# Mock the licenser and clear_setting_cache
with patch('awx.api.views.root.get_licenser') as mock_get_licenser, patch('awx.api.views.root.validate_entitlement_manifest') as mock_validate, patch(
'awx.api.views.root.clear_setting_cache'
) as mock_clear_cache, patch('django.db.connection.on_commit') as mock_on_commit:
# Set up mock license data
mock_license_data = {'valid_key': True, 'license_type': 'enterprise', 'instance_count': 100, 'subscription_name': 'Test Enterprise License'}
# Mock the validation and license processing
mock_validate.return_value = [{'some': 'manifest_data'}]
mock_licenser = MagicMock()
mock_licenser.license_from_manifest.return_value = mock_license_data
mock_get_licenser.return_value = mock_licenser
# Prepare the request data (base64 encoded manifest)
manifest_data = {'manifest': 'ZmFrZS1tYW5pZmVzdC1kYXRh'} # base64 for "fake-manifest-data"
# Make the POST request
url = reverse('api:api_v2_config_view')
response = post(url, manifest_data, admin_user, expect=200)
# Verify the response
assert response.data == mock_license_data
# Verify license_from_manifest was called
mock_licenser.license_from_manifest.assert_called_once()
# Verify on_commit was called (may be multiple times due to other settings)
assert mock_on_commit.call_count >= 1
# Execute all on_commit callbacks to trigger cache clearing
for call_args in mock_on_commit.call_args_list:
callback = call_args[0][0]
callback()
# Verify that clear_setting_cache.delay was called with ['LICENSE']
mock_clear_cache.delay.assert_any_call(['LICENSE'])
def test_config_delete_clears_cache(self, admin_user, delete):
"""Test that DELETE /api/v2/config/ clears the LICENSE cache"""
with patch('awx.api.views.root.clear_setting_cache') as mock_clear_cache, patch('django.db.connection.on_commit') as mock_on_commit:
# Make the DELETE request
url = reverse('api:api_v2_config_view')
delete(url, admin_user, expect=204)
# Verify on_commit was called at least once
assert mock_on_commit.call_count >= 1
# Execute all on_commit callbacks to trigger cache clearing
for call_args in mock_on_commit.call_args_list:
callback = call_args[0][0]
callback()
mock_clear_cache.delay.assert_called_once_with(['LICENSE'])
def test_attach_view_clears_cache(self, admin_user, post):
"""Test that posting to /api/v2/config/attach/ clears the LICENSE cache"""
with patch('awx.api.views.root.get_licenser') as mock_get_licenser, patch('awx.api.views.root.clear_setting_cache') as mock_clear_cache, patch(
'django.db.connection.on_commit'
) as mock_on_commit, patch('awx.api.views.root.settings') as mock_settings:
# Set up subscription credentials in settings
mock_settings.SUBSCRIPTIONS_CLIENT_ID = 'test-client-id'
mock_settings.SUBSCRIPTIONS_CLIENT_SECRET = 'test-client-secret'
# Set up mock licenser with validated subscriptions
mock_licenser = MagicMock()
subscription_data = {'subscription_id': 'test-subscription-123', 'valid_key': False, 'license_type': 'enterprise', 'instance_count': 50}
mock_licenser.validate_rh.return_value = [subscription_data]
mock_get_licenser.return_value = mock_licenser
# Prepare request data
request_data = {'subscription_id': 'test-subscription-123'}
# Make the POST request
url = reverse('api:api_v2_attach_view')
response = post(url, request_data, admin_user, expect=200)
# Verify the response includes valid_key=True
assert response.data['valid_key'] is True
assert response.data['subscription_id'] == 'test-subscription-123'
# Verify settings.LICENSE was set
expected_license = subscription_data.copy()
expected_license['valid_key'] = True
assert mock_settings.LICENSE == expected_license
# Verify cache clearing was scheduled
mock_on_commit.assert_called_once()
call_args = mock_on_commit.call_args[0][0] # Get the lambda function
# Execute the lambda to verify it calls clear_setting_cache
call_args()
mock_clear_cache.delay.assert_called_once_with(['LICENSE'])
def test_attach_view_subscription_not_found_no_cache_clear(self, admin_user, post):
"""Test that attach view doesn't clear cache when subscription is not found"""
with patch('awx.api.views.root.get_licenser') as mock_get_licenser, patch('awx.api.views.root.clear_setting_cache') as mock_clear_cache, patch(
'django.db.connection.on_commit'
) as mock_on_commit:
# Set up mock licenser with different subscription
mock_licenser = MagicMock()
subscription_data = {'subscription_id': 'different-subscription-456', 'valid_key': False, 'license_type': 'enterprise'} # Different ID
mock_licenser.validate_rh.return_value = [subscription_data]
mock_get_licenser.return_value = mock_licenser
# Request data with non-matching subscription ID
request_data = {
'subscription_id': 'test-subscription-123', # This won't match
}
# Make the POST request
url = reverse('api:api_v2_attach_view')
response = post(url, request_data, admin_user, expect=400)
# Verify error response
assert 'error' in response.data
# Verify cache clearing was NOT called (no matching subscription)
mock_on_commit.assert_not_called()
mock_clear_cache.delay.assert_not_called()
def test_manifest_validation_error_no_cache_clear(self, admin_user, post):
"""Test that config view doesn't clear cache when manifest validation fails"""
with patch('awx.api.views.root.validate_entitlement_manifest') as mock_validate, patch(
'awx.api.views.root.clear_setting_cache'
) as mock_clear_cache, patch('django.db.connection.on_commit') as mock_on_commit:
# Mock validation to raise ValueError
mock_validate.side_effect = ValueError("Invalid manifest")
# Prepare request data
manifest_data = {'manifest': 'aW52YWxpZC1tYW5pZmVzdA=='} # base64 for "invalid-manifest"
# Make the POST request
url = reverse('api:api_v2_config_view')
response = post(url, manifest_data, admin_user, expect=400)
# Verify error response
assert response.data['error'] == 'Invalid manifest'
# Verify cache clearing was NOT called (validation failed)
mock_on_commit.assert_not_called()
mock_clear_cache.delay.assert_not_called()
def test_license_processing_error_no_cache_clear(self, admin_user, post):
"""Test that config view doesn't clear cache when license processing fails"""
with patch('awx.api.views.root.get_licenser') as mock_get_licenser, patch('awx.api.views.root.validate_entitlement_manifest') as mock_validate, patch(
'awx.api.views.root.clear_setting_cache'
) as mock_clear_cache, patch('django.db.connection.on_commit') as mock_on_commit:
# Mock validation to succeed but license processing to fail
mock_validate.return_value = [{'some': 'manifest_data'}]
mock_licenser = MagicMock()
mock_licenser.license_from_manifest.side_effect = Exception("License processing failed")
mock_get_licenser.return_value = mock_licenser
# Prepare request data
manifest_data = {'manifest': 'ZmFrZS1tYW5pZmVzdA=='} # base64 for "fake-manifest"
# Make the POST request
url = reverse('api:api_v2_config_view')
response = post(url, manifest_data, admin_user, expect=400)
# Verify error response
assert response.data['error'] == 'Invalid License'
# Verify cache clearing was NOT called (license processing failed)
mock_on_commit.assert_not_called()
mock_clear_cache.delay.assert_not_called()

View File

@@ -1,244 +0,0 @@
from unittest.mock import patch, MagicMock
import pytest
from awx.api.versioning import reverse
from rest_framework import status
@pytest.mark.django_db
class TestApiV2SubscriptionView:
"""Test cases for the /api/v2/config/subscriptions/ endpoint"""
def test_basic_auth(self, post, admin):
"""Test POST with subscriptions_username and subscriptions_password calls validate_rh with basic_auth=True"""
data = {'subscriptions_username': 'test_user', 'subscriptions_password': 'test_password'}
with patch('awx.api.views.root.get_licenser') as mock_get_licenser:
mock_licenser = MagicMock()
mock_licenser.validate_rh.return_value = []
mock_get_licenser.return_value = mock_licenser
response = post(reverse('api:api_v2_subscription_view'), data, admin)
assert response.status_code == status.HTTP_200_OK
mock_licenser.validate_rh.assert_called_once_with('test_user', 'test_password', True)
def test_service_account(self, post, admin):
"""Test POST with subscriptions_client_id and subscriptions_client_secret calls validate_rh with basic_auth=False"""
data = {'subscriptions_client_id': 'test_client_id', 'subscriptions_client_secret': 'test_client_secret'}
with patch('awx.api.views.root.get_licenser') as mock_get_licenser:
mock_licenser = MagicMock()
mock_licenser.validate_rh.return_value = []
mock_get_licenser.return_value = mock_licenser
response = post(reverse('api:api_v2_subscription_view'), data, admin)
assert response.status_code == status.HTTP_200_OK
mock_licenser.validate_rh.assert_called_once_with('test_client_id', 'test_client_secret', False)
def test_encrypted_password_basic_auth(self, post, admin, settings):
"""Test POST with $encrypted$ password uses settings value for basic auth"""
data = {'subscriptions_username': 'test_user', 'subscriptions_password': '$encrypted$'}
settings.SUBSCRIPTIONS_PASSWORD = 'actual_password_from_settings'
with patch('awx.api.views.root.get_licenser') as mock_get_licenser:
mock_licenser = MagicMock()
mock_licenser.validate_rh.return_value = []
mock_get_licenser.return_value = mock_licenser
response = post(reverse('api:api_v2_subscription_view'), data, admin)
assert response.status_code == status.HTTP_200_OK
mock_licenser.validate_rh.assert_called_once_with('test_user', 'actual_password_from_settings', True)
def test_encrypted_client_secret_service_account(self, post, admin, settings):
"""Test POST with $encrypted$ client_secret uses settings value for service_account"""
data = {'subscriptions_client_id': 'test_client_id', 'subscriptions_client_secret': '$encrypted$'}
settings.SUBSCRIPTIONS_CLIENT_SECRET = 'actual_secret_from_settings'
with patch('awx.api.views.root.get_licenser') as mock_get_licenser:
mock_licenser = MagicMock()
mock_licenser.validate_rh.return_value = []
mock_get_licenser.return_value = mock_licenser
response = post(reverse('api:api_v2_subscription_view'), data, admin)
assert response.status_code == status.HTTP_200_OK
mock_licenser.validate_rh.assert_called_once_with('test_client_id', 'actual_secret_from_settings', False)
def test_missing_username_returns_error(self, post, admin):
"""Test POST with missing username returns 400 error"""
data = {'subscriptions_password': 'test_password'}
response = post(reverse('api:api_v2_subscription_view'), data, admin)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert 'Missing subscription credentials' in response.data['error']
def test_missing_password_returns_error(self, post, admin, settings):
"""Test POST with missing password returns 400 error"""
data = {'subscriptions_username': 'test_user'}
settings.SUBSCRIPTIONS_PASSWORD = None
response = post(reverse('api:api_v2_subscription_view'), data, admin)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert 'Missing subscription credentials' in response.data['error']
def test_missing_client_id_returns_error(self, post, admin):
"""Test POST with missing client_id returns 400 error"""
data = {'subscriptions_client_secret': 'test_secret'}
response = post(reverse('api:api_v2_subscription_view'), data, admin)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert 'Missing subscription credentials' in response.data['error']
def test_missing_client_secret_returns_error(self, post, admin, settings):
"""Test POST with missing client_secret returns 400 error"""
data = {'subscriptions_client_id': 'test_client_id'}
settings.SUBSCRIPTIONS_CLIENT_SECRET = None
response = post(reverse('api:api_v2_subscription_view'), data, admin)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert 'Missing subscription credentials' in response.data['error']
def test_empty_username_returns_error(self, post, admin):
"""Test POST with empty username returns 400 error"""
data = {'subscriptions_username': '', 'subscriptions_password': 'test_password'}
response = post(reverse('api:api_v2_subscription_view'), data, admin)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert 'Missing subscription credentials' in response.data['error']
def test_empty_password_returns_error(self, post, admin, settings):
"""Test POST with empty password returns 400 error"""
data = {'subscriptions_username': 'test_user', 'subscriptions_password': ''}
settings.SUBSCRIPTIONS_PASSWORD = None
response = post(reverse('api:api_v2_subscription_view'), data, admin)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert 'Missing subscription credentials' in response.data['error']
def test_non_superuser_permission_denied(self, post, rando):
"""Test that non-superuser cannot access the endpoint"""
data = {'subscriptions_username': 'test_user', 'subscriptions_password': 'test_password'}
response = post(reverse('api:api_v2_subscription_view'), data, rando)
assert response.status_code == status.HTTP_403_FORBIDDEN
def test_settings_updated_on_successful_basic_auth(self, post, admin, settings):
"""Test that settings are updated when basic auth validation succeeds"""
data = {'subscriptions_username': 'new_username', 'subscriptions_password': 'new_password'}
with patch('awx.api.views.root.get_licenser') as mock_get_licenser:
mock_licenser = MagicMock()
mock_licenser.validate_rh.return_value = []
mock_get_licenser.return_value = mock_licenser
response = post(reverse('api:api_v2_subscription_view'), data, admin)
assert response.status_code == status.HTTP_200_OK
assert settings.SUBSCRIPTIONS_USERNAME == 'new_username'
assert settings.SUBSCRIPTIONS_PASSWORD == 'new_password'
def test_settings_updated_on_successful_service_account(self, post, admin, settings):
"""Test that settings are updated when service account validation succeeds"""
data = {'subscriptions_client_id': 'new_client_id', 'subscriptions_client_secret': 'new_client_secret'}
with patch('awx.api.views.root.get_licenser') as mock_get_licenser:
mock_licenser = MagicMock()
mock_licenser.validate_rh.return_value = []
mock_get_licenser.return_value = mock_licenser
response = post(reverse('api:api_v2_subscription_view'), data, admin)
assert response.status_code == status.HTTP_200_OK
assert settings.SUBSCRIPTIONS_CLIENT_ID == 'new_client_id'
assert settings.SUBSCRIPTIONS_CLIENT_SECRET == 'new_client_secret'
def test_validate_rh_exception_handling(self, post, admin):
"""Test that exceptions from validate_rh are properly handled"""
data = {'subscriptions_username': 'test_user', 'subscriptions_password': 'test_password'}
with patch('awx.api.views.root.get_licenser') as mock_get_licenser:
mock_licenser = MagicMock()
mock_licenser.validate_rh.side_effect = Exception("Connection error")
mock_get_licenser.return_value = mock_licenser
response = post(reverse('api:api_v2_subscription_view'), data, admin)
assert response.status_code == status.HTTP_400_BAD_REQUEST
def test_mixed_credentials_prioritizes_client_id(self, post, admin):
"""Test that when both username and client_id are provided, client_id takes precedence"""
data = {
'subscriptions_username': 'test_user',
'subscriptions_password': 'test_password',
'subscriptions_client_id': 'test_client_id',
'subscriptions_client_secret': 'test_client_secret',
}
with patch('awx.api.views.root.get_licenser') as mock_get_licenser:
mock_licenser = MagicMock()
mock_licenser.validate_rh.return_value = []
mock_get_licenser.return_value = mock_licenser
response = post(reverse('api:api_v2_subscription_view'), data, admin)
assert response.status_code == status.HTTP_200_OK
# Should use service account (basic_auth=False) since client_id is present
mock_licenser.validate_rh.assert_called_once_with('test_client_id', 'test_client_secret', False)
def test_basic_auth_clears_service_account_settings(self, post, admin, settings):
"""Test that setting basic auth credentials clears service account settings"""
# Pre-populate service account settings
settings.SUBSCRIPTIONS_CLIENT_ID = 'existing_client_id'
settings.SUBSCRIPTIONS_CLIENT_SECRET = 'existing_client_secret'
data = {'subscriptions_username': 'test_user', 'subscriptions_password': 'test_password'}
with patch('awx.api.views.root.get_licenser') as mock_get_licenser:
mock_licenser = MagicMock()
mock_licenser.validate_rh.return_value = []
mock_get_licenser.return_value = mock_licenser
response = post(reverse('api:api_v2_subscription_view'), data, admin)
assert response.status_code == status.HTTP_200_OK
# Basic auth settings should be set
assert settings.SUBSCRIPTIONS_USERNAME == 'test_user'
assert settings.SUBSCRIPTIONS_PASSWORD == 'test_password'
# Service account settings should be cleared
assert settings.SUBSCRIPTIONS_CLIENT_ID == ""
assert settings.SUBSCRIPTIONS_CLIENT_SECRET == ""
def test_service_account_clears_basic_auth_settings(self, post, admin, settings):
"""Test that setting service account credentials clears basic auth settings"""
# Pre-populate basic auth settings
settings.SUBSCRIPTIONS_USERNAME = 'existing_username'
settings.SUBSCRIPTIONS_PASSWORD = 'existing_password'
data = {'subscriptions_client_id': 'test_client_id', 'subscriptions_client_secret': 'test_client_secret'}
with patch('awx.api.views.root.get_licenser') as mock_get_licenser:
mock_licenser = MagicMock()
mock_licenser.validate_rh.return_value = []
mock_get_licenser.return_value = mock_licenser
response = post(reverse('api:api_v2_subscription_view'), data, admin)
assert response.status_code == status.HTTP_200_OK
# Service account settings should be set
assert settings.SUBSCRIPTIONS_CLIENT_ID == 'test_client_id'
assert settings.SUBSCRIPTIONS_CLIENT_SECRET == 'test_client_secret'
# Basic auth settings should be cleared
assert settings.SUBSCRIPTIONS_USERNAME == ""
assert settings.SUBSCRIPTIONS_PASSWORD == ""

View File

@@ -5,6 +5,10 @@ import pytest
from django.contrib.sessions.middleware import SessionMiddleware
from django.test.utils import override_settings
from django.contrib.auth.models import AnonymousUser
from ansible_base.lib.utils.response import get_relative_url
from ansible_base.lib.testing.fixtures import settings_override_mutable # NOQA: F401 imported to be a pytest fixture
from awx.main.models import User
from awx.api.versioning import reverse
@@ -17,6 +21,33 @@ from awx.api.versioning import reverse
EXAMPLE_USER_DATA = {"username": "affable", "first_name": "a", "last_name": "a", "email": "a@a.com", "is_superuser": False, "password": "r$TyKiOCb#ED"}
@pytest.mark.django_db
def test_validate_local_user(post, admin_user, settings, settings_override_mutable): # NOQA: F811 this is how you use a pytest fixture
"Copy of the test by same name in django-ansible-base for integration and compatibility testing"
url = get_relative_url('validate-local-account')
admin_user.set_password('password')
admin_user.save()
data = {
"username": admin_user.username,
"password": "password",
}
with override_settings(RESOURCE_SERVER={"URL": "https://foo.invalid", "SECRET_KEY": "foobar"}):
response = post(url=url, data=data, user=AnonymousUser(), expect=200)
assert 'ansible_id' in response.data
assert response.data['auth_code'] is not None, response.data
# No resource server, return coherent response but can not provide auth code
response = post(url=url, data=data, user=AnonymousUser(), expect=200)
assert 'ansible_id' in response.data
assert response.data['auth_code'] is None
# wrong password
data['password'] = 'foobar'
response = post(url=url, data=data, user=AnonymousUser(), expect=401)
# response.data may be none here, this is just testing that we get no server error
@pytest.mark.django_db
def test_user_create(post, admin):
response = post(reverse('api:user_list'), EXAMPLE_USER_DATA, admin, middleware=SessionMiddleware(mock.Mock()))
@@ -258,19 +289,3 @@ def test_user_verify_attribute_created(admin, get):
for op, count in (('gt', 1), ('lt', 0)):
resp = get(reverse('api:user_list') + f'?created__{op}={past}', admin)
assert resp.data['count'] == count
@pytest.mark.django_db
def test_org_not_shown_in_admin_user_sublists(admin_user, get, organization):
for view_name in ('user_admin_of_organizations_list', 'user_organizations_list'):
url = reverse(f'api:{view_name}', kwargs={'pk': admin_user.pk})
r = get(url, user=admin_user, expect=200)
assert organization.pk not in [org['id'] for org in r.data['results']]
@pytest.mark.django_db
def test_admin_user_not_shown_in_org_users(admin_user, get, organization):
for view_name in ('organization_users_list', 'organization_admins_list'):
url = reverse(f'api:{view_name}', kwargs={'pk': organization.pk})
r = get(url, user=admin_user, expect=200)
assert admin_user.pk not in [u['id'] for u in r.data['results']]

View File

@@ -1,5 +1,3 @@
import logging
# Python
import pytest
from unittest import mock
@@ -10,7 +8,7 @@ import importlib
# Django
from django.urls import resolve
from django.http import Http404
from django.apps import apps as global_apps
from django.apps import apps
from django.core.handlers.exception import response_for_exception
from django.contrib.auth.models import User
from django.core.serializers.json import DjangoJSONEncoder
@@ -49,8 +47,6 @@ from awx.main.models.ad_hoc_commands import AdHocCommand
from awx.main.models.execution_environments import ExecutionEnvironment
from awx.main.utils import is_testing
logger = logging.getLogger(__name__)
__SWAGGER_REQUESTS__ = {}
@@ -58,17 +54,8 @@ __SWAGGER_REQUESTS__ = {}
dab_rr_initial = importlib.import_module('ansible_base.resource_registry.migrations.0001_initial')
def create_service_id(app_config, apps=global_apps, **kwargs):
try:
apps.get_model("dab_resource_registry", "ServiceID")
except LookupError:
logger.info('Looks like reverse migration, not creating resource registry ServiceID')
return
dab_rr_initial.create_service_id(apps, None)
if is_testing():
post_migrate.connect(create_service_id)
post_migrate.connect(lambda **kwargs: dab_rr_initial.create_service_id(apps, None))
@pytest.fixture(scope="session")
@@ -139,7 +126,7 @@ def execution_environment():
@pytest.fixture
def setup_managed_roles():
"Run the migration script to pre-create managed role definitions"
setup_managed_role_definitions(global_apps, None)
setup_managed_role_definitions(apps, None)
@pytest.fixture

View File

@@ -1,30 +1,36 @@
import pytest
from flags.state import get_flags, flag_state
from ansible_base.feature_flags.models import AAPFlag
from ansible_base.feature_flags.utils import create_initial_data as seed_feature_flags
from django.conf import settings
from django.test import override_settings
from awx.main.models import User
@override_settings(FLAGS={})
@pytest.mark.django_db
def test_feature_flags_list_endpoint(get):
bob = User.objects.create(username='bob', password='test_user', is_superuser=True)
url = "/api/v2/feature_flags/states/"
bob = User.objects.create(username='bob', password='test_user', is_superuser=False)
url = "/api/v2/feature_flags_state/"
response = get(url, user=bob, expect=200)
assert len(get_flags()) > 0
assert len(response.data["results"]) == len(get_flags())
assert len(response.data) == 0
@override_settings(
FLAGS={
"FEATURE_SOME_PLATFORM_FLAG_ENABLED": [
{"condition": "boolean", "value": False},
{"condition": "before date", "value": "2022-06-01T12:00Z"},
],
"FEATURE_SOME_PLATFORM_FLAG_FOO_ENABLED": [
{"condition": "boolean", "value": True},
],
}
)
@pytest.mark.django_db
@pytest.mark.parametrize('flag_val', (True, False))
def test_feature_flags_list_endpoint_override(get, flag_val):
bob = User.objects.create(username='bob', password='test_user', is_superuser=True)
def test_feature_flags_list_endpoint_override(get):
bob = User.objects.create(username='bob', password='test_user', is_superuser=False)
AAPFlag.objects.all().delete()
flag_name = "FEATURE_DISPATCHERD_ENABLED"
setattr(settings, flag_name, flag_val)
seed_feature_flags()
url = "/api/v2/feature_flags/states/"
url = "/api/v2/feature_flags_state/"
response = get(url, user=bob, expect=200)
assert len(response.data["results"]) == 6
assert flag_state(flag_name) == flag_val
assert len(response.data) == 2
assert response.data["FEATURE_SOME_PLATFORM_FLAG_ENABLED"] is False
assert response.data["FEATURE_SOME_PLATFORM_FLAG_FOO_ENABLED"] is True

View File

@@ -1,147 +0,0 @@
import pytest
from django.contrib.contenttypes.models import ContentType
from django.test import override_settings
from django.apps import apps
from ansible_base.rbac.models import RoleDefinition, RoleUserAssignment, RoleTeamAssignment
from ansible_base.rbac.migrations._utils import give_permissions
from awx.main.models import User, Team
from awx.main.migrations._dab_rbac import consolidate_indirect_user_roles
@pytest.mark.django_db
@override_settings(ANSIBLE_BASE_ALLOW_TEAM_PARENTS=True)
def test_consolidate_indirect_user_roles_with_nested_teams(setup_managed_roles, organization):
"""
Test the consolidate_indirect_user_roles function with a nested team hierarchy.
Setup:
- Users: A, B, C, D
- Teams: E, F, G
- Direct assignments: A→(E,F,G), B→E, C→F, D→G
- Team hierarchy: F→E (F is member of E), G→F (G is member of F)
Expected result after consolidation:
- Team E should have users: A, B, C, D (A directly, B directly, C through F, D through G→F)
- Team F should have users: A, C, D (A directly, C directly, D through G)
- Team G should have users: A, D (A directly, D directly)
"""
user_a = User.objects.create_user(username='user_a')
user_b = User.objects.create_user(username='user_b')
user_c = User.objects.create_user(username='user_c')
user_d = User.objects.create_user(username='user_d')
team_e = Team.objects.create(name='Team E', organization=organization)
team_f = Team.objects.create(name='Team F', organization=organization)
team_g = Team.objects.create(name='Team G', organization=organization)
# Get role definition and content type for give_permissions
team_member_role = RoleDefinition.objects.get(name='Team Member')
team_content_type = ContentType.objects.get_for_model(Team)
# Assign users to teams
give_permissions(apps=apps, rd=team_member_role, users=[user_a], object_id=team_e.id, content_type_id=team_content_type.id)
give_permissions(apps=apps, rd=team_member_role, users=[user_a], object_id=team_f.id, content_type_id=team_content_type.id)
give_permissions(apps=apps, rd=team_member_role, users=[user_a], object_id=team_g.id, content_type_id=team_content_type.id)
give_permissions(apps=apps, rd=team_member_role, users=[user_b], object_id=team_e.id, content_type_id=team_content_type.id)
give_permissions(apps=apps, rd=team_member_role, users=[user_c], object_id=team_f.id, content_type_id=team_content_type.id)
give_permissions(apps=apps, rd=team_member_role, users=[user_d], object_id=team_g.id, content_type_id=team_content_type.id)
# Mirror user assignments in the old RBAC system because signals don't run in tests
team_e.member_role.members.add(user_a.id, user_b.id)
team_f.member_role.members.add(user_a.id, user_c.id)
team_g.member_role.members.add(user_a.id, user_d.id)
# Setup team-to-team relationships
give_permissions(apps=apps, rd=team_member_role, teams=[team_f], object_id=team_e.id, content_type_id=team_content_type.id)
give_permissions(apps=apps, rd=team_member_role, teams=[team_g], object_id=team_f.id, content_type_id=team_content_type.id)
# Verify initial direct assignments
team_e_users_before = set(RoleUserAssignment.objects.filter(role_definition=team_member_role, object_id=team_e.id).values_list('user_id', flat=True))
assert team_e_users_before == {user_a.id, user_b.id}
team_f_users_before = set(RoleUserAssignment.objects.filter(role_definition=team_member_role, object_id=team_f.id).values_list('user_id', flat=True))
assert team_f_users_before == {user_a.id, user_c.id}
team_g_users_before = set(RoleUserAssignment.objects.filter(role_definition=team_member_role, object_id=team_g.id).values_list('user_id', flat=True))
assert team_g_users_before == {user_a.id, user_d.id}
# Verify team-to-team relationships exist
assert RoleTeamAssignment.objects.filter(role_definition=team_member_role, team=team_f, object_id=team_e.id).exists()
assert RoleTeamAssignment.objects.filter(role_definition=team_member_role, team=team_g, object_id=team_f.id).exists()
# Run the consolidation function
consolidate_indirect_user_roles(apps, None)
# Verify consolidation
team_e_users_after = set(RoleUserAssignment.objects.filter(role_definition=team_member_role, object_id=team_e.id).values_list('user_id', flat=True))
assert team_e_users_after == {user_a.id, user_b.id, user_c.id, user_d.id}, f"Team E should have users A, B, C, D but has {team_e_users_after}"
team_f_users_after = set(RoleUserAssignment.objects.filter(role_definition=team_member_role, object_id=team_f.id).values_list('user_id', flat=True))
assert team_f_users_after == {user_a.id, user_c.id, user_d.id}, f"Team F should have users A, C, D but has {team_f_users_after}"
team_g_users_after = set(RoleUserAssignment.objects.filter(role_definition=team_member_role, object_id=team_g.id).values_list('user_id', flat=True))
assert team_g_users_after == {user_a.id, user_d.id}, f"Team G should have users A, D but has {team_g_users_after}"
# Verify team member changes are mirrored to the old RBAC system
assert team_e_users_after == set(team_e.member_role.members.all().values_list('id', flat=True))
assert team_f_users_after == set(team_f.member_role.members.all().values_list('id', flat=True))
assert team_g_users_after == set(team_g.member_role.members.all().values_list('id', flat=True))
# Verify team-to-team relationships are removed after consolidation
assert not RoleTeamAssignment.objects.filter(
role_definition=team_member_role, team=team_f, object_id=team_e.id
).exists(), "Team-to-team relationship F→E should be removed"
assert not RoleTeamAssignment.objects.filter(
role_definition=team_member_role, team=team_g, object_id=team_f.id
).exists(), "Team-to-team relationship G→F should be removed"
@pytest.mark.django_db
@override_settings(ANSIBLE_BASE_ALLOW_TEAM_PARENTS=True)
def test_consolidate_indirect_user_roles_no_team_relationships(setup_managed_roles, organization):
"""
Test that the function handles the case where there are no team-to-team relationships.
It should return early without making any changes.
"""
# Create a user and team with direct assignment
user = User.objects.create_user(username='test_user')
team = Team.objects.create(name='Test Team', organization=organization)
team_member_role = RoleDefinition.objects.get(name='Team Member')
team_content_type = ContentType.objects.get_for_model(Team)
give_permissions(apps=apps, rd=team_member_role, users=[user], object_id=team.id, content_type_id=team_content_type.id)
# Compare count of assignments before and after consolidation
assignments_before = RoleUserAssignment.objects.filter(role_definition=team_member_role).count()
consolidate_indirect_user_roles(apps, None)
assignments_after = RoleUserAssignment.objects.filter(role_definition=team_member_role).count()
assert assignments_before == assignments_after, "Number of assignments should not change when there are no team-to-team relationships"
@pytest.mark.django_db
@override_settings(ANSIBLE_BASE_ALLOW_TEAM_PARENTS=True)
def test_consolidate_indirect_user_roles_circular_reference(setup_managed_roles, organization):
"""
Test that the function handles circular team references without infinite recursion.
"""
team_a = Team.objects.create(name='Team A', organization=organization)
team_b = Team.objects.create(name='Team B', organization=organization)
# Create a user assigned to team A
user = User.objects.create_user(username='test_user')
team_member_role = RoleDefinition.objects.get(name='Team Member')
team_content_type = ContentType.objects.get_for_model(Team)
give_permissions(apps=apps, rd=team_member_role, users=[user], object_id=team_a.id, content_type_id=team_content_type.id)
# Create circular team relationships: A → B → A
give_permissions(apps=apps, rd=team_member_role, teams=[team_b], object_id=team_a.id, content_type_id=team_content_type.id)
give_permissions(apps=apps, rd=team_member_role, teams=[team_a], object_id=team_b.id, content_type_id=team_content_type.id)
# Run the consolidation function - should not raise an exception
consolidate_indirect_user_roles(apps, None)
# Both teams should have the user assigned
team_a_users = set(RoleUserAssignment.objects.filter(role_definition=team_member_role, object_id=team_a.id).values_list('user_id', flat=True))
team_b_users = set(RoleUserAssignment.objects.filter(role_definition=team_member_role, object_id=team_b.id).values_list('user_id', flat=True))
assert user.id in team_a_users, "User should be assigned to team A"
assert user.id in team_b_users, "User should be assigned to team B"

View File

@@ -1,5 +1,6 @@
import pytest
from django.contrib.contenttypes.models import ContentType
from django.urls import reverse as django_reverse
from awx.api.versioning import reverse
@@ -7,14 +8,13 @@ from awx.main.models import JobTemplate, Inventory, Organization
from awx.main.access import JobTemplateAccess, WorkflowJobTemplateAccess
from ansible_base.rbac.models import RoleDefinition
from ansible_base.rbac import permission_registry
@pytest.mark.django_db
def test_managed_roles_created(setup_managed_roles):
"Managed RoleDefinitions are created in post_migration signal, we expect to see them here"
for cls in (JobTemplate, Inventory):
ct = permission_registry.content_type_model.objects.get_for_model(cls)
ct = ContentType.objects.get_for_model(cls)
rds = list(RoleDefinition.objects.filter(content_type=ct))
assert len(rds) > 1
assert f'{cls.__name__} Admin' in [rd.name for rd in rds]
@@ -26,20 +26,17 @@ def test_managed_roles_created(setup_managed_roles):
def test_custom_read_role(admin_user, post, setup_managed_roles):
rd_url = django_reverse('roledefinition-list')
resp = post(
url=rd_url,
data={"name": "read role made for test", "content_type": "awx.inventory", "permissions": ['awx.view_inventory']},
user=admin_user,
expect=201,
url=rd_url, data={"name": "read role made for test", "content_type": "awx.inventory", "permissions": ['view_inventory']}, user=admin_user, expect=201
)
rd_id = resp.data['id']
rd = RoleDefinition.objects.get(id=rd_id)
assert rd.content_type == permission_registry.content_type_model.objects.get_for_model(Inventory)
assert rd.content_type == ContentType.objects.get_for_model(Inventory)
@pytest.mark.django_db
def test_custom_system_roles_prohibited(admin_user, post):
rd_url = django_reverse('roledefinition-list')
resp = post(url=rd_url, data={"name": "read role made for test", "content_type": None, "permissions": ['awx.view_inventory']}, user=admin_user, expect=400)
resp = post(url=rd_url, data={"name": "read role made for test", "content_type": None, "permissions": ['view_inventory']}, user=admin_user, expect=400)
assert 'System-wide roles are not enabled' in str(resp.data)
@@ -74,7 +71,7 @@ def test_assign_custom_delete_role(admin_user, rando, inventory, delete, patch):
rd, _ = RoleDefinition.objects.get_or_create(
name='inventory-delete',
permissions=['delete_inventory', 'view_inventory', 'change_inventory'],
content_type=permission_registry.content_type_model.objects.get_for_model(Inventory),
content_type=ContentType.objects.get_for_model(Inventory),
)
rd.give_permission(rando, inventory)
inv_id = inventory.pk
@@ -88,9 +85,7 @@ def test_assign_custom_delete_role(admin_user, rando, inventory, delete, patch):
@pytest.mark.django_db
def test_assign_custom_add_role(admin_user, rando, organization, post, setup_managed_roles):
rd, _ = RoleDefinition.objects.get_or_create(
name='inventory-add',
permissions=['add_inventory', 'view_organization'],
content_type=permission_registry.content_type_model.objects.get_for_model(Organization),
name='inventory-add', permissions=['add_inventory', 'view_organization'], content_type=ContentType.objects.get_for_model(Organization)
)
rd.give_permission(rando, organization)
url = reverse('api:inventory_list')
@@ -151,6 +146,14 @@ def test_assign_credential_to_user_of_another_org(setup_managed_roles, credentia
post(url=url, data={"user": org_admin.id, "role_definition": rd.id, "object_id": credential.id}, user=admin_user, expect=201)
@pytest.mark.django_db
def test_team_member_role_not_assignable(team, rando, post, admin_user, setup_managed_roles):
member_rd = RoleDefinition.objects.get(name='Organization Member')
url = django_reverse('roleuserassignment-list')
r = post(url, data={'object_id': team.id, 'role_definition': member_rd.id, 'user': rando.id}, user=admin_user, expect=400)
assert 'Not managed locally' in str(r.data)
@pytest.mark.django_db
def test_adding_user_to_org_member_role(setup_managed_roles, organization, admin, bob, post, get):
'''
@@ -170,17 +173,10 @@ def test_adding_user_to_org_member_role(setup_managed_roles, organization, admin
@pytest.mark.django_db
@pytest.mark.parametrize('actor', ['user', 'team'])
@pytest.mark.parametrize('role_name', ['Organization Admin', 'Organization Member', 'Team Admin', 'Team Member'])
def test_adding_actor_to_platform_roles(setup_managed_roles, role_name, actor, organization, team, admin, bob, post):
def test_prevent_adding_actor_to_platform_roles(setup_managed_roles, role_name, actor, organization, team, admin, bob, post):
'''
Allow user to be added to platform-level roles
Exceptions:
- Team cannot be added to Organization Member or Admin role
- Team cannot be added to Team Admin or Team Member role
Prevent user or team from being added to platform-level roles
'''
if actor == 'team':
expect = 400
else:
expect = 201
rd = RoleDefinition.objects.get(name=role_name)
endpoint = 'roleuserassignment-list' if actor == 'user' else 'roleteamassignment-list'
url = django_reverse(endpoint)
@@ -188,9 +184,37 @@ def test_adding_actor_to_platform_roles(setup_managed_roles, role_name, actor, o
data = {'object_id': object_id, 'role_definition': rd.id}
actor_id = bob.id if actor == 'user' else team.id
data[actor] = actor_id
r = post(url, data=data, user=admin, expect=expect)
if expect == 400:
if 'Organization' in role_name:
assert 'Assigning organization member permission to teams is not allowed' in str(r.data)
if 'Team' in role_name:
assert 'Assigning team permissions to other teams is not allowed' in str(r.data)
r = post(url, data=data, user=admin, expect=400)
assert 'Not managed locally' in str(r.data)
@pytest.mark.django_db
@pytest.mark.parametrize('role_name', ['Controller Team Admin', 'Controller Team Member'])
def test_adding_user_to_controller_team_roles(setup_managed_roles, role_name, team, admin, bob, post, get):
'''
Allow user to be added to Controller Team Admin or Controller Team Member
'''
url_detail = reverse('api:team_detail', kwargs={'pk': team.id})
get(url_detail, user=bob, expect=403)
rd = RoleDefinition.objects.get(name=role_name)
url = django_reverse('roleuserassignment-list')
post(url, data={'object_id': team.id, 'role_definition': rd.id, 'user': bob.id}, user=admin, expect=201)
get(url_detail, user=bob, expect=200)
@pytest.mark.django_db
@pytest.mark.parametrize('role_name', ['Controller Organization Admin', 'Controller Organization Member'])
def test_adding_user_to_controller_organization_roles(setup_managed_roles, role_name, organization, admin, bob, post, get):
'''
Allow user to be added to Controller Organization Admin or Controller Organization Member
'''
url_detail = reverse('api:organization_detail', kwargs={'pk': organization.id})
get(url_detail, user=bob, expect=403)
rd = RoleDefinition.objects.get(name=role_name)
url = django_reverse('roleuserassignment-list')
post(url, data={'object_id': organization.id, 'role_definition': rd.id, 'user': bob.id}, user=admin, expect=201)
get(url, user=bob, expect=200)

View File

@@ -15,14 +15,6 @@ def test_roles_to_not_create(setup_managed_roles):
raise Exception(f'Found RoleDefinitions that should not exist: {bad_names}')
@pytest.mark.django_db
def test_org_admin_role(setup_managed_roles):
rd = RoleDefinition.objects.get(name='Organization Admin')
codenames = list(rd.permissions.values_list('codename', flat=True))
assert 'view_inventory' in codenames
assert 'change_inventory' in codenames
@pytest.mark.django_db
def test_project_update_role(setup_managed_roles):
"""Role to allow updating a project on the object-level should exist"""
@@ -39,18 +31,32 @@ def test_org_child_add_permission(setup_managed_roles):
assert not DABPermission.objects.filter(codename='add_jobtemplate').exists()
@pytest.mark.django_db
def test_controller_specific_roles_have_correct_permissions(setup_managed_roles):
'''
Controller specific roles should have the same permissions as the platform roles
e.g. Controller Team Admin should have same permission set as Team Admin
'''
for rd_name in ['Controller Team Admin', 'Controller Team Member', 'Controller Organization Member', 'Controller Organization Admin']:
rd = RoleDefinition.objects.get(name=rd_name)
rd_platform = RoleDefinition.objects.get(name=rd_name.split('Controller ')[1])
assert set(rd.permissions.all()) == set(rd_platform.permissions.all())
@pytest.mark.django_db
@pytest.mark.parametrize('resource_name', ['Team', 'Organization'])
@pytest.mark.parametrize('action', ['Member', 'Admin'])
def test_legacy_RBAC_uses_platform_roles(setup_managed_roles, resource_name, action, team, bob, organization):
def test_legacy_RBAC_uses_controller_specific_roles(setup_managed_roles, resource_name, action, team, bob, organization):
'''
Assignment to legacy RBAC roles should use platform role definitions
e.g. Team Admin, Team Member, Organization Member, Organization Admin
Assignment to legacy RBAC roles should use controller specific role definitions
e.g. Controller Team Admin, Controller Team Member, Controller Organization Member, Controller Organization Admin
'''
resource = team if resource_name == 'Team' else organization
if action == 'Member':
resource.member_role.members.add(bob)
else:
resource.admin_role.members.add(bob)
rd = RoleDefinition.objects.get(name=f'{resource_name} {action}')
rd = RoleDefinition.objects.get(name=f'Controller {resource_name} {action}')
rd_platform = RoleDefinition.objects.get(name=f'{resource_name} {action}')
assert RoleUserAssignment.objects.filter(role_definition=rd, user=bob, object_id=resource.id).exists()
assert not RoleUserAssignment.objects.filter(role_definition=rd_platform, user=bob, object_id=resource.id).exists()

View File

@@ -3,6 +3,8 @@ import json
import pytest
from django.contrib.contenttypes.models import ContentType
from crum import impersonate
from awx.main.fields import ImplicitRoleField
@@ -58,7 +60,7 @@ def test_role_migration_matches(request, model, setup_managed_roles):
new_codenames = set(rd.permissions.values_list('codename', flat=True))
# all the old roles should map to a non-Compat role definition
if 'Compat' not in rd.name:
model_rds = RoleDefinition.objects.filter(content_type=permission_registry.content_type_model.objects.get_for_model(obj))
model_rds = RoleDefinition.objects.filter(content_type=ContentType.objects.get_for_model(obj))
rd_data = {}
for rd in model_rds:
rd_data[rd.name] = list(rd.permissions.values_list('codename', flat=True))
@@ -74,7 +76,7 @@ def test_role_migration_matches(request, model, setup_managed_roles):
@pytest.mark.django_db
def test_role_naming(setup_managed_roles):
qs = RoleDefinition.objects.filter(content_type=permission_registry.content_type_model.objects.get(model='jobtemplate'), name__endswith='dmin')
qs = RoleDefinition.objects.filter(content_type=ContentType.objects.get(model='jobtemplate'), name__endswith='dmin')
assert qs.count() == 1 # sanity
rd = qs.first()
assert rd.name == 'JobTemplate Admin'
@@ -84,7 +86,7 @@ def test_role_naming(setup_managed_roles):
@pytest.mark.django_db
def test_action_role_naming(setup_managed_roles):
qs = RoleDefinition.objects.filter(content_type=permission_registry.content_type_model.objects.get(model='jobtemplate'), name__endswith='ecute')
qs = RoleDefinition.objects.filter(content_type=ContentType.objects.get(model='jobtemplate'), name__endswith='ecute')
assert qs.count() == 1 # sanity
rd = qs.first()
assert rd.name == 'JobTemplate Execute'
@@ -96,7 +98,7 @@ def test_action_role_naming(setup_managed_roles):
def test_compat_role_naming(setup_managed_roles, job_template, rando, alice):
with impersonate(alice):
job_template.read_role.members.add(rando)
qs = RoleDefinition.objects.filter(content_type=permission_registry.content_type_model.objects.get(model='jobtemplate'), name__endswith='ompat')
qs = RoleDefinition.objects.filter(content_type=ContentType.objects.get(model='jobtemplate'), name__endswith='ompat')
assert qs.count() == 1 # sanity
rd = qs.first()
assert rd.name == 'JobTemplate Read Compat'
@@ -173,6 +175,20 @@ def test_creator_permission(rando, admin_user, inventory, setup_managed_roles):
assert rando in inventory.admin_role.members.all()
@pytest.mark.django_db
def test_team_team_read_role(rando, team, admin_user, post, setup_managed_roles):
orgs = [Organization.objects.create(name=f'foo-{i}') for i in range(2)]
teams = [Team.objects.create(name=f'foo-{i}', organization=orgs[i]) for i in range(2)]
teams[1].member_role.members.add(rando)
# give second team read permission to first team through the API for regression testing
url = reverse('api:role_teams_list', kwargs={'pk': teams[0].read_role.pk, 'version': 'v2'})
post(url, {'id': teams[1].id}, user=admin_user)
# user should be able to view the first team
assert rando in teams[0].read_role
@pytest.mark.django_db
def test_implicit_parents_no_assignments(organization):
"""Through the normal course of creating models, we should not be changing DAB RBAC permissions"""
@@ -186,25 +202,25 @@ def test_user_auditor_rel(organization, rando, setup_managed_roles):
assert rando not in organization.auditor_role
audit_rd = RoleDefinition.objects.get(name='Organization Audit')
audit_rd.give_permission(rando, organization)
assert list(Organization.access_qs(rando, 'audit')) == [organization]
assert list(rando.auditor_of_organizations) == [organization]
@pytest.mark.django_db
@pytest.mark.parametrize('resource_name', ['Organization', 'Team'])
@pytest.mark.parametrize('role_name', ['Member', 'Admin'])
def test_mapping_from_role_definitions_to_roles(organization, team, rando, role_name, resource_name, setup_managed_roles):
def test_mapping_from_controller_role_definitions_to_roles(organization, team, rando, role_name, resource_name, setup_managed_roles):
"""
ensure mappings for platform roles are correct
ensure mappings for controller roles are correct
e.g.
Organization Member > organization.member_role
Organization Admin > organization.admin_role
Team Member > team.member_role
Team Admin > team.admin_role
Controller Organization Member > organization.member_role
Controller Organization Admin > organization.admin_role
Controller Team Member > team.member_role
Controller Team Admin > team.admin_role
"""
resource = organization if resource_name == 'Organization' else team
old_role_name = f"{role_name.lower()}_role"
getattr(resource, old_role_name).members.add(rando)
assignment = RoleUserAssignment.objects.get(user=rando)
assert assignment.role_definition.name == f'{resource_name} {role_name}'
assert assignment.role_definition.name == f'Controller {resource_name} {role_name}'
old_role = get_role_from_object_role(assignment.object_role)
assert old_role.id == getattr(resource, old_role_name).id

View File

@@ -35,21 +35,21 @@ class TestNewToOld:
def test_new_to_old_rbac_team_member_addition(self, admin, post, team, bob, setup_managed_roles):
'''
Assign user to Team Member role definition, should be added to team.member_role.members
Assign user to Controller Team Member role definition, should be added to team.member_role.members
'''
rd = RoleDefinition.objects.get(name='Team Member')
rd = RoleDefinition.objects.get(name='Controller Team Member')
url = get_relative_url('roleuserassignment-list')
post(url, user=admin, data={'role_definition': rd.id, 'user': bob.id, 'object_id': team.id}, expect=201)
assert bob in team.member_role.members.all()
def test_new_to_old_rbac_team_member_removal(self, admin, delete, team, bob, setup_managed_roles):
def test_new_to_old_rbac_team_member_removal(self, admin, delete, team, bob):
'''
Remove user from Team Member role definition, should be deleted from team.member_role.members
Remove user from Controller Team Member role definition, should be deleted from team.member_role.members
'''
team.member_role.members.add(bob)
rd = RoleDefinition.objects.get(name='Team Member')
rd = RoleDefinition.objects.get(name='Controller Team Member')
user_assignment = RoleUserAssignment.objects.get(user=bob, role_definition=rd, object_id=team.id)
url = get_relative_url('roleuserassignment-detail', kwargs={'pk': user_assignment.id})

View File

@@ -39,7 +39,7 @@ def test_dispatcher_max_workers_reserve(settings, fake_redis):
plus reserve worker count
"""
with override_settings(**settings):
i = Instance.objects.create(hostname='test-1', node_type='hybrid')
i = Instance.objects.create(hostname='test-1', node_type='hybrid', capacity_adjustment=1.0)
i.local_health_check()
assert get_auto_max_workers() == i.capacity + 7, (i.cpu, i.memory, i.cpu_capacity, i.mem_capacity)

View File

@@ -387,6 +387,36 @@ def test_remove_team_from_role(post, team, admin, role):
assert role.parents.filter(id=team.member_role.id).count() == 0
#
# /roles/<id>/parents/
#
@pytest.mark.django_db
def test_role_parents(get, team, admin, role):
role.parents.add(team.member_role)
url = reverse('api:role_parents_list', kwargs={'pk': role.id})
response = get(url, admin)
assert response.status_code == 200
assert response.data['count'] == 1
assert response.data['results'][0]['id'] == team.member_role.id
#
# /roles/<id>/children/
#
@pytest.mark.django_db
def test_role_children(get, team, admin, role):
role.parents.add(team.member_role)
url = reverse('api:role_children_list', kwargs={'pk': team.member_role.id})
response = get(url, admin)
assert response.status_code == 200
assert response.data['count'] == 2
assert response.data['results'][0]['id'] == role.id or response.data['results'][1]['id'] == role.id
#
# Generics
#

View File

@@ -1,5 +1,7 @@
import pytest
from django.contrib.contenttypes.models import ContentType
from awx.main.access import ExecutionEnvironmentAccess
from awx.main.models import ExecutionEnvironment, Organization, Team
from awx.main.models.rbac import get_role_codenames
@@ -8,7 +10,6 @@ from awx.api.versioning import reverse
from django.urls import reverse as django_reverse
from ansible_base.rbac.models import RoleDefinition
from ansible_base.rbac import permission_registry
@pytest.fixture
@@ -16,7 +17,7 @@ def ee_rd():
return RoleDefinition.objects.create_from_permissions(
name='EE object admin',
permissions=['change_executionenvironment', 'delete_executionenvironment'],
content_type=permission_registry.content_type_model.objects.get_for_model(ExecutionEnvironment),
content_type=ContentType.objects.get_for_model(ExecutionEnvironment),
)
@@ -25,7 +26,7 @@ def org_ee_rd():
return RoleDefinition.objects.create_from_permissions(
name='EE org admin',
permissions=['add_executionenvironment', 'change_executionenvironment', 'delete_executionenvironment', 'view_organization'],
content_type=permission_registry.content_type_model.objects.get_for_model(Organization),
content_type=ContentType.objects.get_for_model(Organization),
)

Some files were not shown because too many files have changed in this diff Show More