mirror of
https://github.com/ansible/awx.git
synced 2026-02-09 21:54:43 -03:30
Compare commits
2 Commits
thedoubl3j
...
upgrade-sq
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
543d3f940b | ||
|
|
ee7edb9179 |
@@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
codecov:
|
codecov:
|
||||||
notify:
|
notify:
|
||||||
after_n_builds: 9 # Number of test matrix+lint jobs uploading coverage
|
after_n_builds: 6 # Number of test matrix+lint jobs uploading coverage
|
||||||
wait_for_ci: false
|
wait_for_ci: false
|
||||||
|
|
||||||
require_ci_to_pass: false
|
require_ci_to_pass: false
|
||||||
|
|||||||
17
.coveragerc
17
.coveragerc
@@ -17,23 +17,6 @@ exclude_also =
|
|||||||
|
|
||||||
[run]
|
[run]
|
||||||
branch = True
|
branch = True
|
||||||
# NOTE: `disable_warnings` is needed when `pytest-cov` runs in tandem
|
|
||||||
# NOTE: with `pytest-xdist`. These warnings are false negative in this
|
|
||||||
# NOTE: context.
|
|
||||||
#
|
|
||||||
# NOTE: It's `coveragepy` that emits the warnings and previously they
|
|
||||||
# NOTE: wouldn't get on the radar of `pytest`'s `filterwarnings`
|
|
||||||
# NOTE: mechanism. This changed, however, with `pytest >= 8.4`. And
|
|
||||||
# NOTE: since we set `filterwarnings = error`, those warnings are being
|
|
||||||
# NOTE: raised as exceptions, cascading into `pytest`'s internals and
|
|
||||||
# NOTE: causing tracebacks and crashes of the test sessions.
|
|
||||||
#
|
|
||||||
# Ref:
|
|
||||||
# * https://github.com/pytest-dev/pytest-cov/issues/693
|
|
||||||
# * https://github.com/pytest-dev/pytest-cov/pull/695
|
|
||||||
# * https://github.com/pytest-dev/pytest-cov/pull/696
|
|
||||||
disable_warnings =
|
|
||||||
module-not-measured
|
|
||||||
omit =
|
omit =
|
||||||
awx/main/migrations/*
|
awx/main/migrations/*
|
||||||
awx/settings/defaults.py
|
awx/settings/defaults.py
|
||||||
|
|||||||
9
.github/PULL_REQUEST_TEMPLATE.md
vendored
9
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -4,8 +4,7 @@
|
|||||||
<!---
|
<!---
|
||||||
If you are fixing an existing issue, please include "related #nnn" in your
|
If you are fixing an existing issue, please include "related #nnn" in your
|
||||||
commit message and your description; but you should still explain what
|
commit message and your description; but you should still explain what
|
||||||
the change does. Also please make sure that if this PR has an attached JIRA, put AAP-<number>
|
the change does.
|
||||||
in as the first entry for your PR title.
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
##### ISSUE TYPE
|
##### ISSUE TYPE
|
||||||
@@ -17,11 +16,17 @@ in as the first entry for your PR title.
|
|||||||
##### COMPONENT NAME
|
##### COMPONENT NAME
|
||||||
<!--- Name of the module/plugin/module/task -->
|
<!--- Name of the module/plugin/module/task -->
|
||||||
- API
|
- API
|
||||||
|
- UI
|
||||||
- Collection
|
- Collection
|
||||||
- CLI
|
- CLI
|
||||||
- Docs
|
- Docs
|
||||||
- Other
|
- Other
|
||||||
|
|
||||||
|
##### AWX VERSION
|
||||||
|
<!--- Paste verbatim output from `make VERSION` between quotes below -->
|
||||||
|
```
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
##### ADDITIONAL INFORMATION
|
##### ADDITIONAL INFORMATION
|
||||||
|
|||||||
7
.github/dependabot.yml
vendored
7
.github/dependabot.yml
vendored
@@ -8,10 +8,3 @@ updates:
|
|||||||
labels:
|
labels:
|
||||||
- "docs"
|
- "docs"
|
||||||
- "dependencies"
|
- "dependencies"
|
||||||
- package-ecosystem: "pip"
|
|
||||||
directory: "requirements/"
|
|
||||||
schedule:
|
|
||||||
interval: "daily" #run daily until we trust it, then back this off to weekly
|
|
||||||
open-pull-requests-limit: 2
|
|
||||||
labels:
|
|
||||||
- "dependencies"
|
|
||||||
|
|||||||
64
.github/workflows/ci.yml
vendored
64
.github/workflows/ci.yml
vendored
@@ -172,10 +172,9 @@ jobs:
|
|||||||
repository: ansible/awx-operator
|
repository: ansible/awx-operator
|
||||||
path: awx-operator
|
path: awx-operator
|
||||||
|
|
||||||
- name: Setup python, referencing action at awx relative path
|
- uses: ./awx/.github/actions/setup-python
|
||||||
uses: ./awx/.github/actions/setup-python
|
|
||||||
with:
|
with:
|
||||||
python-version: '3.x'
|
working-directory: awx
|
||||||
|
|
||||||
- name: Install playbook dependencies
|
- name: Install playbook dependencies
|
||||||
run: |
|
run: |
|
||||||
@@ -336,7 +335,6 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
name: coverage-${{ matrix.target-regex.name }}
|
name: coverage-${{ matrix.target-regex.name }}
|
||||||
path: ~/.ansible/collections/ansible_collections/awx/awx/tests/output/coverage/
|
path: ~/.ansible/collections/ansible_collections/awx/awx/tests/output/coverage/
|
||||||
retention-days: 1
|
|
||||||
|
|
||||||
- uses: ./.github/actions/upload_awx_devel_logs
|
- uses: ./.github/actions/upload_awx_devel_logs
|
||||||
if: always()
|
if: always()
|
||||||
@@ -354,7 +352,6 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
persist-credentials: false
|
|
||||||
show-progress: false
|
show-progress: false
|
||||||
|
|
||||||
- uses: ./.github/actions/setup-python
|
- uses: ./.github/actions/setup-python
|
||||||
@@ -364,12 +361,23 @@ jobs:
|
|||||||
- name: Upgrade ansible-core
|
- name: Upgrade ansible-core
|
||||||
run: python3 -m pip install --upgrade ansible-core
|
run: python3 -m pip install --upgrade ansible-core
|
||||||
|
|
||||||
- name: Download coverage artifacts
|
- name: Download coverage artifacts A to H
|
||||||
uses: actions/download-artifact@v4
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
merge-multiple: true
|
name: coverage-a-h
|
||||||
|
path: coverage
|
||||||
|
|
||||||
|
- name: Download coverage artifacts I to P
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: coverage-i-p
|
||||||
|
path: coverage
|
||||||
|
|
||||||
|
- name: Download coverage artifacts Z to Z
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: coverage-r-z0-9
|
||||||
path: coverage
|
path: coverage
|
||||||
pattern: coverage-*
|
|
||||||
|
|
||||||
- name: Combine coverage
|
- name: Combine coverage
|
||||||
run: |
|
run: |
|
||||||
@@ -387,6 +395,46 @@ jobs:
|
|||||||
echo '## AWX Collection Integration Coverage HTML' >> $GITHUB_STEP_SUMMARY
|
echo '## AWX Collection Integration Coverage HTML' >> $GITHUB_STEP_SUMMARY
|
||||||
echo 'Download the HTML artifacts to view the coverage report.' >> $GITHUB_STEP_SUMMARY
|
echo 'Download the HTML artifacts to view the coverage report.' >> $GITHUB_STEP_SUMMARY
|
||||||
|
|
||||||
|
# This is a huge hack, there's no official action for removing artifacts currently.
|
||||||
|
# Also ACTIONS_RUNTIME_URL and ACTIONS_RUNTIME_TOKEN aren't available in normal run
|
||||||
|
# steps, so we have to use github-script to get them.
|
||||||
|
#
|
||||||
|
# The advantage of doing this, though, is that we save on artifact storage space.
|
||||||
|
|
||||||
|
- name: Get secret artifact runtime URL
|
||||||
|
uses: actions/github-script@v6
|
||||||
|
id: get-runtime-url
|
||||||
|
with:
|
||||||
|
result-encoding: string
|
||||||
|
script: |
|
||||||
|
const { ACTIONS_RUNTIME_URL } = process.env;
|
||||||
|
return ACTIONS_RUNTIME_URL;
|
||||||
|
|
||||||
|
- name: Get secret artifact runtime token
|
||||||
|
uses: actions/github-script@v6
|
||||||
|
id: get-runtime-token
|
||||||
|
with:
|
||||||
|
result-encoding: string
|
||||||
|
script: |
|
||||||
|
const { ACTIONS_RUNTIME_TOKEN } = process.env;
|
||||||
|
return ACTIONS_RUNTIME_TOKEN;
|
||||||
|
|
||||||
|
- name: Remove intermediary artifacts
|
||||||
|
env:
|
||||||
|
ACTIONS_RUNTIME_URL: ${{ steps.get-runtime-url.outputs.result }}
|
||||||
|
ACTIONS_RUNTIME_TOKEN: ${{ steps.get-runtime-token.outputs.result }}
|
||||||
|
run: |
|
||||||
|
echo "::add-mask::${ACTIONS_RUNTIME_TOKEN}"
|
||||||
|
artifacts=$(
|
||||||
|
curl -H "Authorization: Bearer $ACTIONS_RUNTIME_TOKEN" \
|
||||||
|
${ACTIONS_RUNTIME_URL}_apis/pipelines/workflows/${{ github.run_id }}/artifacts?api-version=6.0-preview \
|
||||||
|
| jq -r '.value | .[] | select(.name | startswith("coverage-")) | .url'
|
||||||
|
)
|
||||||
|
|
||||||
|
for artifact in $artifacts; do
|
||||||
|
curl -i -X DELETE -H "Accept: application/json;api-version=6.0-preview" -H "Authorization: Bearer $ACTIONS_RUNTIME_TOKEN" "$artifact"
|
||||||
|
done
|
||||||
|
|
||||||
- name: Upload coverage report as artifact
|
- name: Upload coverage report as artifact
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
|
|||||||
1
.github/workflows/devel_images.yml
vendored
1
.github/workflows/devel_images.yml
vendored
@@ -10,7 +10,6 @@ on:
|
|||||||
- devel
|
- devel
|
||||||
- release_*
|
- release_*
|
||||||
- feature_*
|
- feature_*
|
||||||
- stable-*
|
|
||||||
jobs:
|
jobs:
|
||||||
push-development-images:
|
push-development-images:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|||||||
85
.github/workflows/sonarcloud_pr.yml
vendored
85
.github/workflows/sonarcloud_pr.yml
vendored
@@ -1,85 +0,0 @@
|
|||||||
---
|
|
||||||
name: SonarQube
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_run:
|
|
||||||
workflows:
|
|
||||||
- CI
|
|
||||||
types:
|
|
||||||
- completed
|
|
||||||
|
|
||||||
permissions: read-all
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
sonarqube:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
if: github.event.workflow_run.conclusion == 'success' && github.event.workflow_run.event == 'pull_request'
|
|
||||||
steps:
|
|
||||||
- name: Checkout Code
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
show-progress: false
|
|
||||||
|
|
||||||
- name: Download coverage report artifact
|
|
||||||
uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
name: coverage-report
|
|
||||||
path: reports/
|
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
run-id: ${{ github.event.workflow_run.id }}
|
|
||||||
|
|
||||||
- name: Download PR number artifact
|
|
||||||
uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
name: pr-number
|
|
||||||
path: .
|
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
run-id: ${{ github.event.workflow_run.id }}
|
|
||||||
|
|
||||||
- name: Extract PR number
|
|
||||||
run: |
|
|
||||||
cat pr-number.txt
|
|
||||||
echo "PR_NUMBER=$(cat pr-number.txt)" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: Get PR info
|
|
||||||
uses: octokit/request-action@v2.x
|
|
||||||
id: pr_info
|
|
||||||
with:
|
|
||||||
route: GET /repos/{repo}/pulls/{number}
|
|
||||||
repo: ${{ github.event.repository.full_name }}
|
|
||||||
number: ${{ env.PR_NUMBER }}
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Set PR info into env
|
|
||||||
run: |
|
|
||||||
echo "PR_BASE=${{ fromJson(steps.pr_info.outputs.data).base.ref }}" >> $GITHUB_ENV
|
|
||||||
echo "PR_HEAD=${{ fromJson(steps.pr_info.outputs.data).head.ref }}" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: Add base branch
|
|
||||||
run: |
|
|
||||||
gh pr checkout ${{ env.PR_NUMBER }}
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Extract and export repo owner/name
|
|
||||||
run: |
|
|
||||||
REPO_SLUG="${GITHUB_REPOSITORY}"
|
|
||||||
IFS="/" read -r REPO_OWNER REPO_NAME <<< "$REPO_SLUG"
|
|
||||||
echo "REPO_OWNER=$REPO_OWNER" >> $GITHUB_ENV
|
|
||||||
echo "REPO_NAME=$REPO_NAME" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: SonarQube scan
|
|
||||||
uses: SonarSource/sonarqube-scan-action@v5
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
SONAR_TOKEN: ${{ secrets[format('{0}', vars.SONAR_TOKEN_SECRET_NAME)] }}
|
|
||||||
with:
|
|
||||||
args: >
|
|
||||||
-Dsonar.organization=${{ env.REPO_OWNER }}
|
|
||||||
-Dsonar.projectKey=${{ env.REPO_OWNER }}_${{ env.REPO_NAME }}
|
|
||||||
-Dsonar.pullrequest.key=${{ env.PR_NUMBER }}
|
|
||||||
-Dsonar.pullrequest.branch=${{ env.PR_HEAD }}
|
|
||||||
-Dsonar.pullrequest.base=${{ env.PR_BASE }}
|
|
||||||
-Dsonar.scm.revision=${{ github.event.workflow_run.head_sha }}
|
|
||||||
4
.github/workflows/stage.yml
vendored
4
.github/workflows/stage.yml
vendored
@@ -85,11 +85,9 @@ jobs:
|
|||||||
cp ../awx-logos/awx/ui/client/assets/* awx/ui/public/static/media/
|
cp ../awx-logos/awx/ui/client/assets/* awx/ui/public/static/media/
|
||||||
|
|
||||||
- name: Setup node and npm for new UI build
|
- name: Setup node and npm for new UI build
|
||||||
uses: actions/setup-node@v4
|
uses: actions/setup-node@v2
|
||||||
with:
|
with:
|
||||||
node-version: '18'
|
node-version: '18'
|
||||||
cache: 'npm'
|
|
||||||
cache-dependency-path: awx/awx/ui/**/package-lock.json
|
|
||||||
|
|
||||||
- name: Prebuild new UI for awx image (to speed up build process)
|
- name: Prebuild new UI for awx image (to speed up build process)
|
||||||
working-directory: awx
|
working-directory: awx
|
||||||
|
|||||||
42
.github/workflows/upload_schema.yml
vendored
42
.github/workflows/upload_schema.yml
vendored
@@ -11,7 +11,6 @@ on:
|
|||||||
- devel
|
- devel
|
||||||
- release_**
|
- release_**
|
||||||
- feature_**
|
- feature_**
|
||||||
- stable-**
|
|
||||||
jobs:
|
jobs:
|
||||||
push:
|
push:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
@@ -24,26 +23,35 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
show-progress: false
|
show-progress: false
|
||||||
|
|
||||||
- name: Build awx_devel image to use for schema gen
|
- uses: ./.github/actions/setup-python
|
||||||
uses: ./.github/actions/awx_devel_image
|
|
||||||
|
- name: Log in to registry
|
||||||
|
run: |
|
||||||
|
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
|
||||||
|
|
||||||
|
- uses: ./.github/actions/setup-ssh-agent
|
||||||
with:
|
with:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
ssh-private-key: ${{ secrets.PRIVATE_GITHUB_KEY }}
|
||||||
private-github-key: ${{ secrets.PRIVATE_GITHUB_KEY }}
|
|
||||||
|
- name: Pre-pull image to warm build cache
|
||||||
|
run: |
|
||||||
|
docker pull -q ghcr.io/${{ github.repository_owner }}/awx_devel:${GITHUB_REF##*/} || :
|
||||||
|
|
||||||
|
- name: Build image
|
||||||
|
run: |
|
||||||
|
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} COMPOSE_TAG=${GITHUB_REF##*/} make docker-compose-build
|
||||||
|
|
||||||
- name: Generate API Schema
|
- name: Generate API Schema
|
||||||
run: |
|
run: |
|
||||||
DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} \
|
|
||||||
COMPOSE_TAG=${{ github.base_ref || github.ref_name }} \
|
|
||||||
docker run -u $(id -u) --rm -v ${{ github.workspace }}:/awx_devel/:Z \
|
docker run -u $(id -u) --rm -v ${{ github.workspace }}:/awx_devel/:Z \
|
||||||
--workdir=/awx_devel `make print-DEVEL_IMAGE_NAME` /start_tests.sh genschema
|
--workdir=/awx_devel ghcr.io/${{ github.repository_owner }}/awx_devel:${GITHUB_REF##*/} /start_tests.sh genschema
|
||||||
|
|
||||||
- name: Upload API Schema
|
- name: Upload API Schema
|
||||||
uses: keithweaver/aws-s3-github-action@4dd5a7b81d54abaa23bbac92b27e85d7f405ae53
|
env:
|
||||||
with:
|
AWS_ACCESS_KEY: ${{ secrets.AWS_ACCESS_KEY }}
|
||||||
command: cp
|
AWS_SECRET_KEY: ${{ secrets.AWS_SECRET_KEY }}
|
||||||
source: ${{ github.workspace }}/schema.json
|
AWS_REGION: 'us-east-1'
|
||||||
destination: s3://awx-public-ci-files/${{ github.ref_name }}/schema.json
|
run: |
|
||||||
aws_access_key_id: ${{ secrets.AWS_ACCESS_KEY }}
|
ansible localhost -c local, -m command -a "{{ ansible_python_interpreter + ' -m pip install boto3'}}"
|
||||||
aws_secret_access_key: ${{ secrets.AWS_SECRET_KEY }}
|
ansible localhost -c local -m aws_s3 \
|
||||||
aws_region: us-east-1
|
-a "src=${{ github.workspace }}/schema.json bucket=awx-public-ci-files object=${GITHUB_REF##*/}/schema.json mode=put permission=public-read"
|
||||||
flags: --acl public-read --only-show-errors
|
|
||||||
|
|||||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -150,8 +150,6 @@ use_dev_supervisor.txt
|
|||||||
|
|
||||||
awx/ui/src
|
awx/ui/src
|
||||||
awx/ui/build
|
awx/ui/build
|
||||||
awx/ui/.ui-built
|
|
||||||
awx/ui_next
|
|
||||||
|
|
||||||
# Docs build stuff
|
# Docs build stuff
|
||||||
docs/docsite/build/
|
docs/docsite/build/
|
||||||
|
|||||||
22
Makefile
22
Makefile
@@ -19,12 +19,6 @@ COLLECTION_VERSION ?= $(shell $(PYTHON) tools/scripts/scm_version.py | cut -d .
|
|||||||
COLLECTION_SANITY_ARGS ?= --docker
|
COLLECTION_SANITY_ARGS ?= --docker
|
||||||
# collection unit testing directories
|
# collection unit testing directories
|
||||||
COLLECTION_TEST_DIRS ?= awx_collection/test/awx
|
COLLECTION_TEST_DIRS ?= awx_collection/test/awx
|
||||||
# pytest added args to collect coverage
|
|
||||||
COVERAGE_ARGS ?= --cov --cov-report=xml --junitxml=reports/junit.xml
|
|
||||||
# pytest test directories
|
|
||||||
TEST_DIRS ?= awx/main/tests/unit awx/main/tests/functional awx/conf/tests
|
|
||||||
# pytest args to run tests in parallel
|
|
||||||
PARALLEL_TESTS ?= -n auto
|
|
||||||
# collection integration test directories (defaults to all)
|
# collection integration test directories (defaults to all)
|
||||||
COLLECTION_TEST_TARGET ?=
|
COLLECTION_TEST_TARGET ?=
|
||||||
# args for collection install
|
# args for collection install
|
||||||
@@ -77,7 +71,7 @@ RECEPTOR_IMAGE ?= quay.io/ansible/receptor:devel
|
|||||||
SRC_ONLY_PKGS ?= cffi,pycparser,psycopg,twilio
|
SRC_ONLY_PKGS ?= cffi,pycparser,psycopg,twilio
|
||||||
# These should be upgraded in the AWX and Ansible venv before attempting
|
# These should be upgraded in the AWX and Ansible venv before attempting
|
||||||
# to install the actual requirements
|
# to install the actual requirements
|
||||||
VENV_BOOTSTRAP ?= pip==21.2.4 setuptools==80.9.0 setuptools_scm[toml]==8.0.4 wheel==0.42.0 cython==3.1.3
|
VENV_BOOTSTRAP ?= pip==21.2.4 setuptools==70.3.0 setuptools_scm[toml]==8.1.0 wheel==0.45.1 cython==3.0.11
|
||||||
|
|
||||||
NAME ?= awx
|
NAME ?= awx
|
||||||
|
|
||||||
@@ -315,14 +309,14 @@ black: reports
|
|||||||
@chmod +x .git/hooks/pre-commit
|
@chmod +x .git/hooks/pre-commit
|
||||||
|
|
||||||
genschema: reports
|
genschema: reports
|
||||||
$(MAKE) swagger PYTEST_ADDOPTS="--genschema --create-db "
|
$(MAKE) swagger PYTEST_ARGS="--genschema --create-db "
|
||||||
mv swagger.json schema.json
|
mv swagger.json schema.json
|
||||||
|
|
||||||
swagger: reports
|
swagger: reports
|
||||||
@if [ "$(VENV_BASE)" ]; then \
|
@if [ "$(VENV_BASE)" ]; then \
|
||||||
. $(VENV_BASE)/awx/bin/activate; \
|
. $(VENV_BASE)/awx/bin/activate; \
|
||||||
fi; \
|
fi; \
|
||||||
(set -o pipefail && py.test $(COVERAGE_ARGS) $(PARALLEL_TESTS) awx/conf/tests/functional awx/main/tests/functional/api awx/main/tests/docs | tee reports/$@.report)
|
(set -o pipefail && py.test --cov --cov-report=xml --junitxml=reports/junit.xml $(PYTEST_ARGS) awx/conf/tests/functional awx/main/tests/functional/api awx/main/tests/docs | tee reports/$@.report)
|
||||||
@if [ "${GITHUB_ACTIONS}" = "true" ]; \
|
@if [ "${GITHUB_ACTIONS}" = "true" ]; \
|
||||||
then \
|
then \
|
||||||
echo 'cov-report-files=reports/coverage.xml' >> "${GITHUB_OUTPUT}"; \
|
echo 'cov-report-files=reports/coverage.xml' >> "${GITHUB_OUTPUT}"; \
|
||||||
@@ -340,12 +334,14 @@ api-lint:
|
|||||||
awx-link:
|
awx-link:
|
||||||
[ -d "/awx_devel/awx.egg-info" ] || $(PYTHON) /awx_devel/tools/scripts/egg_info_dev
|
[ -d "/awx_devel/awx.egg-info" ] || $(PYTHON) /awx_devel/tools/scripts/egg_info_dev
|
||||||
|
|
||||||
|
TEST_DIRS ?= awx/main/tests/unit awx/main/tests/functional awx/conf/tests
|
||||||
|
PYTEST_ARGS ?= -n auto
|
||||||
## Run all API unit tests.
|
## Run all API unit tests.
|
||||||
test:
|
test:
|
||||||
if [ "$(VENV_BASE)" ]; then \
|
if [ "$(VENV_BASE)" ]; then \
|
||||||
. $(VENV_BASE)/awx/bin/activate; \
|
. $(VENV_BASE)/awx/bin/activate; \
|
||||||
fi; \
|
fi; \
|
||||||
PYTHONDONTWRITEBYTECODE=1 py.test -p no:cacheprovider $(PARALLEL_TESTS) $(TEST_DIRS)
|
PYTHONDONTWRITEBYTECODE=1 py.test -p no:cacheprovider $(PYTEST_ARGS) $(TEST_DIRS)
|
||||||
cd awxkit && $(VENV_BASE)/awx/bin/tox -re py3
|
cd awxkit && $(VENV_BASE)/awx/bin/tox -re py3
|
||||||
awx-manage check_migrations --dry-run --check -n 'missing_migration_file'
|
awx-manage check_migrations --dry-run --check -n 'missing_migration_file'
|
||||||
|
|
||||||
@@ -354,7 +350,7 @@ live_test:
|
|||||||
|
|
||||||
## Run all API unit tests with coverage enabled.
|
## Run all API unit tests with coverage enabled.
|
||||||
test_coverage:
|
test_coverage:
|
||||||
$(MAKE) test PYTEST_ADDOPTS="--create-db $(COVERAGE_ARGS)"
|
$(MAKE) test PYTEST_ARGS="--create-db --cov --cov-report=xml --junitxml=reports/junit.xml"
|
||||||
@if [ "${GITHUB_ACTIONS}" = "true" ]; \
|
@if [ "${GITHUB_ACTIONS}" = "true" ]; \
|
||||||
then \
|
then \
|
||||||
echo 'cov-report-files=awxkit/coverage.xml,reports/coverage.xml' >> "${GITHUB_OUTPUT}"; \
|
echo 'cov-report-files=awxkit/coverage.xml,reports/coverage.xml' >> "${GITHUB_OUTPUT}"; \
|
||||||
@@ -362,7 +358,7 @@ test_coverage:
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
test_migrations:
|
test_migrations:
|
||||||
PYTHONDONTWRITEBYTECODE=1 py.test -p no:cacheprovider --migrations -m migration_test --create-db $(PARALLEL_TESTS) $(COVERAGE_ARGS) $(TEST_DIRS)
|
PYTHONDONTWRITEBYTECODE=1 py.test -p no:cacheprovider --migrations -m migration_test --create-db --cov=awx --cov-report=xml --junitxml=reports/junit.xml $(PYTEST_ARGS) $(TEST_DIRS)
|
||||||
@if [ "${GITHUB_ACTIONS}" = "true" ]; \
|
@if [ "${GITHUB_ACTIONS}" = "true" ]; \
|
||||||
then \
|
then \
|
||||||
echo 'cov-report-files=reports/coverage.xml' >> "${GITHUB_OUTPUT}"; \
|
echo 'cov-report-files=reports/coverage.xml' >> "${GITHUB_OUTPUT}"; \
|
||||||
@@ -380,7 +376,7 @@ test_collection:
|
|||||||
fi && \
|
fi && \
|
||||||
if ! [ -x "$(shell command -v ansible-playbook)" ]; then pip install ansible-core; fi
|
if ! [ -x "$(shell command -v ansible-playbook)" ]; then pip install ansible-core; fi
|
||||||
ansible --version
|
ansible --version
|
||||||
py.test $(COLLECTION_TEST_DIRS) $(COVERAGE_ARGS) -v
|
py.test $(COLLECTION_TEST_DIRS) --cov --cov-report=xml --junitxml=reports/junit.xml -v
|
||||||
@if [ "${GITHUB_ACTIONS}" = "true" ]; \
|
@if [ "${GITHUB_ACTIONS}" = "true" ]; \
|
||||||
then \
|
then \
|
||||||
echo 'cov-report-files=reports/coverage.xml' >> "${GITHUB_OUTPUT}"; \
|
echo 'cov-report-files=reports/coverage.xml' >> "${GITHUB_OUTPUT}"; \
|
||||||
|
|||||||
@@ -844,7 +844,7 @@ class ResourceAccessList(ParentMixin, ListAPIView):
|
|||||||
if settings.ANSIBLE_BASE_ROLE_SYSTEM_ACTIVATED:
|
if settings.ANSIBLE_BASE_ROLE_SYSTEM_ACTIVATED:
|
||||||
ancestors = set(RoleEvaluation.objects.filter(content_type_id=content_type.id, object_id=obj.id).values_list('role_id', flat=True))
|
ancestors = set(RoleEvaluation.objects.filter(content_type_id=content_type.id, object_id=obj.id).values_list('role_id', flat=True))
|
||||||
qs = User.objects.filter(has_roles__in=ancestors) | User.objects.filter(is_superuser=True)
|
qs = User.objects.filter(has_roles__in=ancestors) | User.objects.filter(is_superuser=True)
|
||||||
auditor_role = RoleDefinition.objects.filter(name="Platform Auditor").first()
|
auditor_role = RoleDefinition.objects.filter(name="Controller System Auditor").first()
|
||||||
if auditor_role:
|
if auditor_role:
|
||||||
qs |= User.objects.filter(role_assignments__role_definition=auditor_role)
|
qs |= User.objects.filter(role_assignments__role_definition=auditor_role)
|
||||||
return qs.distinct()
|
return qs.distinct()
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ from rest_framework import permissions
|
|||||||
|
|
||||||
# AWX
|
# AWX
|
||||||
from awx.main.access import check_user_access
|
from awx.main.access import check_user_access
|
||||||
from awx.main.models import Inventory, UnifiedJob, Organization
|
from awx.main.models import Inventory, UnifiedJob
|
||||||
from awx.main.utils import get_object_or_400
|
from awx.main.utils import get_object_or_400
|
||||||
|
|
||||||
logger = logging.getLogger('awx.api.permissions')
|
logger = logging.getLogger('awx.api.permissions')
|
||||||
@@ -228,19 +228,12 @@ class InventoryInventorySourcesUpdatePermission(ModelAccessPermission):
|
|||||||
class UserPermission(ModelAccessPermission):
|
class UserPermission(ModelAccessPermission):
|
||||||
def check_post_permissions(self, request, view, obj=None):
|
def check_post_permissions(self, request, view, obj=None):
|
||||||
if not request.data:
|
if not request.data:
|
||||||
return Organization.access_qs(request.user, 'change').exists()
|
return request.user.admin_of_organizations.exists()
|
||||||
elif request.user.is_superuser:
|
elif request.user.is_superuser:
|
||||||
return True
|
return True
|
||||||
raise PermissionDenied()
|
raise PermissionDenied()
|
||||||
|
|
||||||
|
|
||||||
class IsSystemAdmin(permissions.BasePermission):
|
|
||||||
def has_permission(self, request, view):
|
|
||||||
if not (request.user and request.user.is_authenticated):
|
|
||||||
return False
|
|
||||||
return request.user.is_superuser
|
|
||||||
|
|
||||||
|
|
||||||
class IsSystemAdminOrAuditor(permissions.BasePermission):
|
class IsSystemAdminOrAuditor(permissions.BasePermission):
|
||||||
"""
|
"""
|
||||||
Allows write access only to system admin users.
|
Allows write access only to system admin users.
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ import json
|
|||||||
import logging
|
import logging
|
||||||
import re
|
import re
|
||||||
import yaml
|
import yaml
|
||||||
import urllib.parse
|
|
||||||
from collections import Counter, OrderedDict
|
from collections import Counter, OrderedDict
|
||||||
from datetime import timedelta
|
from datetime import timedelta
|
||||||
from uuid import uuid4
|
from uuid import uuid4
|
||||||
@@ -117,7 +116,6 @@ from awx.main.utils import (
|
|||||||
from awx.main.utils.filters import SmartFilter
|
from awx.main.utils.filters import SmartFilter
|
||||||
from awx.main.utils.plugins import load_combined_inventory_source_options
|
from awx.main.utils.plugins import load_combined_inventory_source_options
|
||||||
from awx.main.utils.named_url_graph import reset_counters
|
from awx.main.utils.named_url_graph import reset_counters
|
||||||
from awx.main.utils.inventory_vars import update_group_variables
|
|
||||||
from awx.main.scheduler.task_manager_models import TaskManagerModels
|
from awx.main.scheduler.task_manager_models import TaskManagerModels
|
||||||
from awx.main.redact import UriCleaner, REPLACE_STR
|
from awx.main.redact import UriCleaner, REPLACE_STR
|
||||||
from awx.main.signals import update_inventory_computed_fields
|
from awx.main.signals import update_inventory_computed_fields
|
||||||
@@ -734,22 +732,7 @@ class EmptySerializer(serializers.Serializer):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class OpaQueryPathMixin(serializers.Serializer):
|
class UnifiedJobTemplateSerializer(BaseSerializer):
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
super().__init__(*args, **kwargs)
|
|
||||||
|
|
||||||
def validate_opa_query_path(self, value):
|
|
||||||
# Decode the URL and re-encode it
|
|
||||||
decoded_value = urllib.parse.unquote(value)
|
|
||||||
re_encoded_value = urllib.parse.quote(decoded_value, safe='/')
|
|
||||||
|
|
||||||
if value != re_encoded_value:
|
|
||||||
raise serializers.ValidationError(_("The URL must be properly encoded."))
|
|
||||||
|
|
||||||
return value
|
|
||||||
|
|
||||||
|
|
||||||
class UnifiedJobTemplateSerializer(BaseSerializer, OpaQueryPathMixin):
|
|
||||||
# As a base serializer, the capabilities prefetch is not used directly,
|
# As a base serializer, the capabilities prefetch is not used directly,
|
||||||
# instead they are derived from the Workflow Job Template Serializer and the Job Template Serializer, respectively.
|
# instead they are derived from the Workflow Job Template Serializer and the Job Template Serializer, respectively.
|
||||||
capabilities_prefetch = []
|
capabilities_prefetch = []
|
||||||
@@ -1182,12 +1165,12 @@ class UserActivityStreamSerializer(UserSerializer):
|
|||||||
fields = ('*', '-is_system_auditor')
|
fields = ('*', '-is_system_auditor')
|
||||||
|
|
||||||
|
|
||||||
class OrganizationSerializer(BaseSerializer, OpaQueryPathMixin):
|
class OrganizationSerializer(BaseSerializer):
|
||||||
show_capabilities = ['edit', 'delete']
|
show_capabilities = ['edit', 'delete']
|
||||||
|
|
||||||
class Meta:
|
class Meta:
|
||||||
model = Organization
|
model = Organization
|
||||||
fields = ('*', 'max_hosts', 'custom_virtualenv', 'default_environment', 'opa_query_path')
|
fields = ('*', 'max_hosts', 'custom_virtualenv', 'default_environment')
|
||||||
read_only_fields = ('*', 'custom_virtualenv')
|
read_only_fields = ('*', 'custom_virtualenv')
|
||||||
|
|
||||||
def get_related(self, obj):
|
def get_related(self, obj):
|
||||||
@@ -1541,7 +1524,7 @@ class LabelsListMixin(object):
|
|||||||
return res
|
return res
|
||||||
|
|
||||||
|
|
||||||
class InventorySerializer(LabelsListMixin, BaseSerializerWithVariables, OpaQueryPathMixin):
|
class InventorySerializer(LabelsListMixin, BaseSerializerWithVariables):
|
||||||
show_capabilities = ['edit', 'delete', 'adhoc', 'copy']
|
show_capabilities = ['edit', 'delete', 'adhoc', 'copy']
|
||||||
capabilities_prefetch = ['admin', 'adhoc', {'copy': 'organization.inventory_admin'}]
|
capabilities_prefetch = ['admin', 'adhoc', {'copy': 'organization.inventory_admin'}]
|
||||||
|
|
||||||
@@ -1562,7 +1545,6 @@ class InventorySerializer(LabelsListMixin, BaseSerializerWithVariables, OpaQuery
|
|||||||
'inventory_sources_with_failures',
|
'inventory_sources_with_failures',
|
||||||
'pending_deletion',
|
'pending_deletion',
|
||||||
'prevent_instance_group_fallback',
|
'prevent_instance_group_fallback',
|
||||||
'opa_query_path',
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def get_related(self, obj):
|
def get_related(self, obj):
|
||||||
@@ -1632,68 +1614,8 @@ class InventorySerializer(LabelsListMixin, BaseSerializerWithVariables, OpaQuery
|
|||||||
|
|
||||||
if kind == 'smart' and not host_filter:
|
if kind == 'smart' and not host_filter:
|
||||||
raise serializers.ValidationError({'host_filter': _('Smart inventories must specify host_filter')})
|
raise serializers.ValidationError({'host_filter': _('Smart inventories must specify host_filter')})
|
||||||
|
|
||||||
return super(InventorySerializer, self).validate(attrs)
|
return super(InventorySerializer, self).validate(attrs)
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _update_variables(variables, inventory_id):
|
|
||||||
"""
|
|
||||||
Update the inventory variables of the 'all'-group.
|
|
||||||
|
|
||||||
The variables field contains vars from the inventory dialog, hence
|
|
||||||
representing the "all"-group variables.
|
|
||||||
|
|
||||||
Since this is not an update from an inventory source, we update the
|
|
||||||
variables when the inventory details form is saved.
|
|
||||||
|
|
||||||
A user edit on the inventory variables is considered a reset of the
|
|
||||||
variables update history. Particularly if the user removes a variable by
|
|
||||||
editing the inventory variables field, the variable is not supposed to
|
|
||||||
reappear with a value from a previous inventory source update.
|
|
||||||
|
|
||||||
We achieve this by forcing `reset=True` on such an update.
|
|
||||||
|
|
||||||
As a side-effect, variables which have been set by source updates and
|
|
||||||
have survived a user-edit (i.e. they have not been deleted from the
|
|
||||||
variables field) will be assumed to originate from the user edit and are
|
|
||||||
thus no longer deleted from the inventory when they are removed from
|
|
||||||
their original source!
|
|
||||||
|
|
||||||
Note that we use the inventory source id -1 for user-edit updates
|
|
||||||
because a regular inventory source cannot have an id of -1 since
|
|
||||||
PostgreSQL assigns pk's starting from 1 (if this assumption doesn't hold
|
|
||||||
true, we have to assign another special value for invsrc_id).
|
|
||||||
|
|
||||||
:param str variables: The variables as plain text in yaml or json
|
|
||||||
format.
|
|
||||||
:param int inventory_id: The primary key of the related inventory
|
|
||||||
object.
|
|
||||||
"""
|
|
||||||
variables_dict = parse_yaml_or_json(variables, silent_failure=False)
|
|
||||||
logger.debug(f"InventorySerializer._update_variables: {inventory_id=} {variables_dict=}, {variables=}")
|
|
||||||
update_group_variables(
|
|
||||||
group_id=None, # `None` denotes the 'all' group (which doesn't have a pk).
|
|
||||||
newvars=variables_dict,
|
|
||||||
dbvars=None,
|
|
||||||
invsrc_id=-1,
|
|
||||||
inventory_id=inventory_id,
|
|
||||||
reset=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
def create(self, validated_data):
|
|
||||||
"""Called when a new inventory has to be created."""
|
|
||||||
logger.debug(f"InventorySerializer.create({validated_data=}) >>>>")
|
|
||||||
obj = super().create(validated_data)
|
|
||||||
self._update_variables(validated_data.get("variables") or "", obj.id)
|
|
||||||
return obj
|
|
||||||
|
|
||||||
def update(self, obj, validated_data):
|
|
||||||
"""Called when an existing inventory is updated."""
|
|
||||||
logger.debug(f"InventorySerializer.update({validated_data=}) >>>>")
|
|
||||||
obj = super().update(obj, validated_data)
|
|
||||||
self._update_variables(validated_data.get("variables") or "", obj.id)
|
|
||||||
return obj
|
|
||||||
|
|
||||||
|
|
||||||
class ConstructedFieldMixin(serializers.Field):
|
class ConstructedFieldMixin(serializers.Field):
|
||||||
def get_attribute(self, instance):
|
def get_attribute(self, instance):
|
||||||
@@ -1983,12 +1905,10 @@ class GroupSerializer(BaseSerializerWithVariables):
|
|||||||
return res
|
return res
|
||||||
|
|
||||||
def validate(self, attrs):
|
def validate(self, attrs):
|
||||||
# Do not allow the group name to conflict with an existing host name.
|
|
||||||
name = force_str(attrs.get('name', self.instance and self.instance.name or ''))
|
name = force_str(attrs.get('name', self.instance and self.instance.name or ''))
|
||||||
inventory = attrs.get('inventory', self.instance and self.instance.inventory or '')
|
inventory = attrs.get('inventory', self.instance and self.instance.inventory or '')
|
||||||
if Host.objects.filter(name=name, inventory=inventory).exists():
|
if Host.objects.filter(name=name, inventory=inventory).exists():
|
||||||
raise serializers.ValidationError(_('A Host with that name already exists.'))
|
raise serializers.ValidationError(_('A Host with that name already exists.'))
|
||||||
#
|
|
||||||
return super(GroupSerializer, self).validate(attrs)
|
return super(GroupSerializer, self).validate(attrs)
|
||||||
|
|
||||||
def validate_name(self, value):
|
def validate_name(self, value):
|
||||||
@@ -2839,7 +2759,7 @@ class ResourceAccessListElementSerializer(UserSerializer):
|
|||||||
{
|
{
|
||||||
"role": {
|
"role": {
|
||||||
"id": None,
|
"id": None,
|
||||||
"name": _("Platform Auditor"),
|
"name": _("Controller System Auditor"),
|
||||||
"description": _("Can view all aspects of the system"),
|
"description": _("Can view all aspects of the system"),
|
||||||
"user_capabilities": {"unattach": False},
|
"user_capabilities": {"unattach": False},
|
||||||
},
|
},
|
||||||
@@ -3027,6 +2947,11 @@ class CredentialSerializer(BaseSerializer):
|
|||||||
ret.remove(field)
|
ret.remove(field)
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
def validate_organization(self, org):
|
||||||
|
if self.instance and (not self.instance.managed) and self.instance.credential_type.kind == 'galaxy' and org is None:
|
||||||
|
raise serializers.ValidationError(_("Galaxy credentials must be owned by an Organization."))
|
||||||
|
return org
|
||||||
|
|
||||||
def validate_credential_type(self, credential_type):
|
def validate_credential_type(self, credential_type):
|
||||||
if self.instance and credential_type.pk != self.instance.credential_type.pk:
|
if self.instance and credential_type.pk != self.instance.credential_type.pk:
|
||||||
for related_objects in (
|
for related_objects in (
|
||||||
@@ -3102,6 +3027,9 @@ class CredentialSerializerCreate(CredentialSerializer):
|
|||||||
if attrs.get('team'):
|
if attrs.get('team'):
|
||||||
attrs['organization'] = attrs['team'].organization
|
attrs['organization'] = attrs['team'].organization
|
||||||
|
|
||||||
|
if 'credential_type' in attrs and attrs['credential_type'].kind == 'galaxy' and list(owner_fields) != ['organization']:
|
||||||
|
raise serializers.ValidationError({"organization": _("Galaxy credentials must be owned by an Organization.")})
|
||||||
|
|
||||||
return super(CredentialSerializerCreate, self).validate(attrs)
|
return super(CredentialSerializerCreate, self).validate(attrs)
|
||||||
|
|
||||||
def create(self, validated_data):
|
def create(self, validated_data):
|
||||||
@@ -3319,7 +3247,6 @@ class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobO
|
|||||||
'webhook_service',
|
'webhook_service',
|
||||||
'webhook_credential',
|
'webhook_credential',
|
||||||
'prevent_instance_group_fallback',
|
'prevent_instance_group_fallback',
|
||||||
'opa_query_path',
|
|
||||||
)
|
)
|
||||||
read_only_fields = ('*', 'custom_virtualenv')
|
read_only_fields = ('*', 'custom_virtualenv')
|
||||||
|
|
||||||
@@ -5998,7 +5925,7 @@ class InstanceGroupSerializer(BaseSerializer):
|
|||||||
if self.instance and not self.instance.is_container_group:
|
if self.instance and not self.instance.is_container_group:
|
||||||
raise serializers.ValidationError(_('pod_spec_override is only valid for container groups'))
|
raise serializers.ValidationError(_('pod_spec_override is only valid for container groups'))
|
||||||
|
|
||||||
pod_spec_override_json = {}
|
pod_spec_override_json = None
|
||||||
# defect if the value is yaml or json if yaml convert to json
|
# defect if the value is yaml or json if yaml convert to json
|
||||||
try:
|
try:
|
||||||
# convert yaml to json
|
# convert yaml to json
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
|
|
||||||
from django.urls import re_path
|
from django.urls import re_path
|
||||||
|
|
||||||
from awx.api.views import RoleList, RoleDetail, RoleUsersList, RoleTeamsList
|
from awx.api.views import RoleList, RoleDetail, RoleUsersList, RoleTeamsList, RoleParentsList, RoleChildrenList
|
||||||
|
|
||||||
|
|
||||||
urls = [
|
urls = [
|
||||||
@@ -11,6 +11,8 @@ urls = [
|
|||||||
re_path(r'^(?P<pk>[0-9]+)/$', RoleDetail.as_view(), name='role_detail'),
|
re_path(r'^(?P<pk>[0-9]+)/$', RoleDetail.as_view(), name='role_detail'),
|
||||||
re_path(r'^(?P<pk>[0-9]+)/users/$', RoleUsersList.as_view(), name='role_users_list'),
|
re_path(r'^(?P<pk>[0-9]+)/users/$', RoleUsersList.as_view(), name='role_users_list'),
|
||||||
re_path(r'^(?P<pk>[0-9]+)/teams/$', RoleTeamsList.as_view(), name='role_teams_list'),
|
re_path(r'^(?P<pk>[0-9]+)/teams/$', RoleTeamsList.as_view(), name='role_teams_list'),
|
||||||
|
re_path(r'^(?P<pk>[0-9]+)/parents/$', RoleParentsList.as_view(), name='role_parents_list'),
|
||||||
|
re_path(r'^(?P<pk>[0-9]+)/children/$', RoleChildrenList.as_view(), name='role_children_list'),
|
||||||
]
|
]
|
||||||
|
|
||||||
__all__ = ['urls']
|
__all__ = ['urls']
|
||||||
|
|||||||
@@ -55,7 +55,8 @@ from wsgiref.util import FileWrapper
|
|||||||
|
|
||||||
# django-ansible-base
|
# django-ansible-base
|
||||||
from ansible_base.lib.utils.requests import get_remote_hosts
|
from ansible_base.lib.utils.requests import get_remote_hosts
|
||||||
from ansible_base.rbac.models import RoleEvaluation
|
from ansible_base.rbac.models import RoleEvaluation, ObjectRole
|
||||||
|
from ansible_base.resource_registry.shared_types import OrganizationType, TeamType, UserType
|
||||||
|
|
||||||
# AWX
|
# AWX
|
||||||
from awx.main.tasks.system import send_notifications, update_inventory_computed_fields
|
from awx.main.tasks.system import send_notifications, update_inventory_computed_fields
|
||||||
@@ -84,6 +85,7 @@ from awx.api.generics import (
|
|||||||
from awx.api.views.labels import LabelSubListCreateAttachDetachView
|
from awx.api.views.labels import LabelSubListCreateAttachDetachView
|
||||||
from awx.api.versioning import reverse
|
from awx.api.versioning import reverse
|
||||||
from awx.main import models
|
from awx.main import models
|
||||||
|
from awx.main.models.rbac import get_role_definition
|
||||||
from awx.main.utils import (
|
from awx.main.utils import (
|
||||||
camelcase_to_underscore,
|
camelcase_to_underscore,
|
||||||
extract_ansible_vars,
|
extract_ansible_vars,
|
||||||
@@ -669,16 +671,81 @@ class ScheduleUnifiedJobsList(SubListAPIView):
|
|||||||
name = _('Schedule Jobs List')
|
name = _('Schedule Jobs List')
|
||||||
|
|
||||||
|
|
||||||
|
def immutablesharedfields(cls):
|
||||||
|
'''
|
||||||
|
Class decorator to prevent modifying shared resources when ALLOW_LOCAL_RESOURCE_MANAGEMENT setting is set to False.
|
||||||
|
|
||||||
|
Works by overriding these view methods:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- perform_update
|
||||||
|
create and delete are overridden to raise a PermissionDenied exception.
|
||||||
|
perform_update is overridden to check if any shared fields are being modified,
|
||||||
|
and raise a PermissionDenied exception if so.
|
||||||
|
'''
|
||||||
|
# create instead of perform_create because some of our views
|
||||||
|
# override create instead of perform_create
|
||||||
|
if hasattr(cls, 'create'):
|
||||||
|
cls.original_create = cls.create
|
||||||
|
|
||||||
|
@functools.wraps(cls.create)
|
||||||
|
def create_wrapper(*args, **kwargs):
|
||||||
|
if settings.ALLOW_LOCAL_RESOURCE_MANAGEMENT:
|
||||||
|
return cls.original_create(*args, **kwargs)
|
||||||
|
raise PermissionDenied({'detail': _('Creation of this resource is not allowed. Create this resource via the platform ingress.')})
|
||||||
|
|
||||||
|
cls.create = create_wrapper
|
||||||
|
|
||||||
|
if hasattr(cls, 'delete'):
|
||||||
|
cls.original_delete = cls.delete
|
||||||
|
|
||||||
|
@functools.wraps(cls.delete)
|
||||||
|
def delete_wrapper(*args, **kwargs):
|
||||||
|
if settings.ALLOW_LOCAL_RESOURCE_MANAGEMENT:
|
||||||
|
return cls.original_delete(*args, **kwargs)
|
||||||
|
raise PermissionDenied({'detail': _('Deletion of this resource is not allowed. Delete this resource via the platform ingress.')})
|
||||||
|
|
||||||
|
cls.delete = delete_wrapper
|
||||||
|
|
||||||
|
if hasattr(cls, 'perform_update'):
|
||||||
|
cls.original_perform_update = cls.perform_update
|
||||||
|
|
||||||
|
@functools.wraps(cls.perform_update)
|
||||||
|
def update_wrapper(*args, **kwargs):
|
||||||
|
if not settings.ALLOW_LOCAL_RESOURCE_MANAGEMENT:
|
||||||
|
view, serializer = args
|
||||||
|
instance = view.get_object()
|
||||||
|
if instance:
|
||||||
|
if isinstance(instance, models.Organization):
|
||||||
|
shared_fields = OrganizationType._declared_fields.keys()
|
||||||
|
elif isinstance(instance, models.User):
|
||||||
|
shared_fields = UserType._declared_fields.keys()
|
||||||
|
elif isinstance(instance, models.Team):
|
||||||
|
shared_fields = TeamType._declared_fields.keys()
|
||||||
|
attrs = serializer.validated_data
|
||||||
|
for field in shared_fields:
|
||||||
|
if field in attrs and getattr(instance, field) != attrs[field]:
|
||||||
|
raise PermissionDenied({field: _(f"Cannot change shared field '{field}'. Alter this field via the platform ingress.")})
|
||||||
|
return cls.original_perform_update(*args, **kwargs)
|
||||||
|
|
||||||
|
cls.perform_update = update_wrapper
|
||||||
|
|
||||||
|
return cls
|
||||||
|
|
||||||
|
|
||||||
|
@immutablesharedfields
|
||||||
class TeamList(ListCreateAPIView):
|
class TeamList(ListCreateAPIView):
|
||||||
model = models.Team
|
model = models.Team
|
||||||
serializer_class = serializers.TeamSerializer
|
serializer_class = serializers.TeamSerializer
|
||||||
|
|
||||||
|
|
||||||
|
@immutablesharedfields
|
||||||
class TeamDetail(RetrieveUpdateDestroyAPIView):
|
class TeamDetail(RetrieveUpdateDestroyAPIView):
|
||||||
model = models.Team
|
model = models.Team
|
||||||
serializer_class = serializers.TeamSerializer
|
serializer_class = serializers.TeamSerializer
|
||||||
|
|
||||||
|
|
||||||
|
@immutablesharedfields
|
||||||
class TeamUsersList(BaseUsersList):
|
class TeamUsersList(BaseUsersList):
|
||||||
model = models.User
|
model = models.User
|
||||||
serializer_class = serializers.UserSerializer
|
serializer_class = serializers.UserSerializer
|
||||||
@@ -720,19 +787,9 @@ class TeamRolesList(SubListAttachDetachAPIView):
|
|||||||
team = get_object_or_404(models.Team, pk=self.kwargs['pk'])
|
team = get_object_or_404(models.Team, pk=self.kwargs['pk'])
|
||||||
credential_content_type = ContentType.objects.get_for_model(models.Credential)
|
credential_content_type = ContentType.objects.get_for_model(models.Credential)
|
||||||
if role.content_type == credential_content_type:
|
if role.content_type == credential_content_type:
|
||||||
if not role.content_object.organization:
|
if not role.content_object.organization or role.content_object.organization.id != team.organization.id:
|
||||||
data = dict(
|
data = dict(msg=_("You cannot grant credential access to a team when the Organization field isn't set, or belongs to a different organization"))
|
||||||
msg=_("You cannot grant access to a credential that is not assigned to an organization (private credentials cannot be assigned to teams)")
|
|
||||||
)
|
|
||||||
return Response(data, status=status.HTTP_400_BAD_REQUEST)
|
return Response(data, status=status.HTTP_400_BAD_REQUEST)
|
||||||
elif role.content_object.organization.id != team.organization.id:
|
|
||||||
if not request.user.is_superuser:
|
|
||||||
data = dict(
|
|
||||||
msg=_(
|
|
||||||
"You cannot grant a team access to a credential in a different organization. Only superusers can grant cross-organization credential access to teams"
|
|
||||||
)
|
|
||||||
)
|
|
||||||
return Response(data, status=status.HTTP_400_BAD_REQUEST)
|
|
||||||
|
|
||||||
return super(TeamRolesList, self).post(request, *args, **kwargs)
|
return super(TeamRolesList, self).post(request, *args, **kwargs)
|
||||||
|
|
||||||
@@ -759,9 +816,17 @@ class TeamProjectsList(SubListAPIView):
|
|||||||
def get_queryset(self):
|
def get_queryset(self):
|
||||||
team = self.get_parent_object()
|
team = self.get_parent_object()
|
||||||
self.check_parent_access(team)
|
self.check_parent_access(team)
|
||||||
my_qs = self.model.accessible_objects(self.request.user, 'read_role')
|
model_ct = ContentType.objects.get_for_model(self.model)
|
||||||
team_qs = models.Project.accessible_objects(team, 'read_role')
|
parent_ct = ContentType.objects.get_for_model(self.parent_model)
|
||||||
return my_qs & team_qs
|
|
||||||
|
rd = get_role_definition(team.member_role)
|
||||||
|
role = ObjectRole.objects.filter(object_id=team.id, content_type=parent_ct, role_definition=rd).first()
|
||||||
|
if role is None:
|
||||||
|
# Team has no permissions, therefore team has no projects
|
||||||
|
return self.model.objects.none()
|
||||||
|
else:
|
||||||
|
project_qs = self.model.accessible_objects(self.request.user, 'read_role')
|
||||||
|
return project_qs.filter(id__in=RoleEvaluation.objects.filter(content_type_id=model_ct.id, role=role).values_list('object_id'))
|
||||||
|
|
||||||
|
|
||||||
class TeamActivityStreamList(SubListAPIView):
|
class TeamActivityStreamList(SubListAPIView):
|
||||||
@@ -876,23 +941,13 @@ class ProjectTeamsList(ListAPIView):
|
|||||||
serializer_class = serializers.TeamSerializer
|
serializer_class = serializers.TeamSerializer
|
||||||
|
|
||||||
def get_queryset(self):
|
def get_queryset(self):
|
||||||
parent = get_object_or_404(models.Project, pk=self.kwargs['pk'])
|
p = get_object_or_404(models.Project, pk=self.kwargs['pk'])
|
||||||
if not self.request.user.can_access(models.Project, 'read', parent):
|
if not self.request.user.can_access(models.Project, 'read', p):
|
||||||
raise PermissionDenied()
|
raise PermissionDenied()
|
||||||
|
project_ct = ContentType.objects.get_for_model(models.Project)
|
||||||
project_ct = ContentType.objects.get_for_model(parent)
|
|
||||||
team_ct = ContentType.objects.get_for_model(self.model)
|
team_ct = ContentType.objects.get_for_model(self.model)
|
||||||
|
all_roles = models.Role.objects.filter(Q(descendents__content_type=project_ct) & Q(descendents__object_id=p.pk), content_type=team_ct)
|
||||||
roles_on_project = models.Role.objects.filter(
|
return self.model.accessible_objects(self.request.user, 'read_role').filter(pk__in=[t.content_object.pk for t in all_roles])
|
||||||
content_type=project_ct,
|
|
||||||
object_id=parent.pk,
|
|
||||||
)
|
|
||||||
|
|
||||||
team_member_parent_roles = models.Role.objects.filter(children__in=roles_on_project, role_field='member_role', content_type=team_ct).distinct()
|
|
||||||
|
|
||||||
team_ids = team_member_parent_roles.values_list('object_id', flat=True)
|
|
||||||
my_qs = self.model.accessible_objects(self.request.user, 'read_role').filter(pk__in=team_ids)
|
|
||||||
return my_qs
|
|
||||||
|
|
||||||
|
|
||||||
class ProjectSchedulesList(SubListCreateAPIView):
|
class ProjectSchedulesList(SubListCreateAPIView):
|
||||||
@@ -1072,6 +1127,7 @@ class ProjectCopy(CopyAPIView):
|
|||||||
copy_return_serializer_class = serializers.ProjectSerializer
|
copy_return_serializer_class = serializers.ProjectSerializer
|
||||||
|
|
||||||
|
|
||||||
|
@immutablesharedfields
|
||||||
class UserList(ListCreateAPIView):
|
class UserList(ListCreateAPIView):
|
||||||
model = models.User
|
model = models.User
|
||||||
serializer_class = serializers.UserSerializer
|
serializer_class = serializers.UserSerializer
|
||||||
@@ -1128,6 +1184,14 @@ class UserRolesList(SubListAttachDetachAPIView):
|
|||||||
role = get_object_or_400(models.Role, pk=sub_id)
|
role = get_object_or_400(models.Role, pk=sub_id)
|
||||||
|
|
||||||
content_types = ContentType.objects.get_for_models(models.Organization, models.Team, models.Credential) # dict of {model: content_type}
|
content_types = ContentType.objects.get_for_models(models.Organization, models.Team, models.Credential) # dict of {model: content_type}
|
||||||
|
# Prevent user to be associated with team/org when ALLOW_LOCAL_RESOURCE_MANAGEMENT is False
|
||||||
|
if not settings.ALLOW_LOCAL_RESOURCE_MANAGEMENT:
|
||||||
|
for model in [models.Organization, models.Team]:
|
||||||
|
ct = content_types[model]
|
||||||
|
if role.content_type == ct and role.role_field in ['member_role', 'admin_role']:
|
||||||
|
data = dict(msg=_(f"Cannot directly modify user membership to {ct.model}. Direct shared resource management disabled"))
|
||||||
|
return Response(data, status=status.HTTP_403_FORBIDDEN)
|
||||||
|
|
||||||
credential_content_type = content_types[models.Credential]
|
credential_content_type = content_types[models.Credential]
|
||||||
if role.content_type == credential_content_type:
|
if role.content_type == credential_content_type:
|
||||||
if 'disassociate' not in request.data and role.content_object.organization and user not in role.content_object.organization.member_role:
|
if 'disassociate' not in request.data and role.content_object.organization and user not in role.content_object.organization.member_role:
|
||||||
@@ -1162,6 +1226,7 @@ class UserOrganizationsList(OrganizationCountsMixin, SubListAPIView):
|
|||||||
model = models.Organization
|
model = models.Organization
|
||||||
serializer_class = serializers.OrganizationSerializer
|
serializer_class = serializers.OrganizationSerializer
|
||||||
parent_model = models.User
|
parent_model = models.User
|
||||||
|
relationship = 'organizations'
|
||||||
|
|
||||||
def get_queryset(self):
|
def get_queryset(self):
|
||||||
parent = self.get_parent_object()
|
parent = self.get_parent_object()
|
||||||
@@ -1175,6 +1240,7 @@ class UserAdminOfOrganizationsList(OrganizationCountsMixin, SubListAPIView):
|
|||||||
model = models.Organization
|
model = models.Organization
|
||||||
serializer_class = serializers.OrganizationSerializer
|
serializer_class = serializers.OrganizationSerializer
|
||||||
parent_model = models.User
|
parent_model = models.User
|
||||||
|
relationship = 'admin_of_organizations'
|
||||||
|
|
||||||
def get_queryset(self):
|
def get_queryset(self):
|
||||||
parent = self.get_parent_object()
|
parent = self.get_parent_object()
|
||||||
@@ -1198,6 +1264,7 @@ class UserActivityStreamList(SubListAPIView):
|
|||||||
return qs.filter(Q(actor=parent) | Q(user__in=[parent]))
|
return qs.filter(Q(actor=parent) | Q(user__in=[parent]))
|
||||||
|
|
||||||
|
|
||||||
|
@immutablesharedfields
|
||||||
class UserDetail(RetrieveUpdateDestroyAPIView):
|
class UserDetail(RetrieveUpdateDestroyAPIView):
|
||||||
model = models.User
|
model = models.User
|
||||||
serializer_class = serializers.UserSerializer
|
serializer_class = serializers.UserSerializer
|
||||||
@@ -4172,6 +4239,13 @@ class RoleUsersList(SubListAttachDetachAPIView):
|
|||||||
role = self.get_parent_object()
|
role = self.get_parent_object()
|
||||||
|
|
||||||
content_types = ContentType.objects.get_for_models(models.Organization, models.Team, models.Credential) # dict of {model: content_type}
|
content_types = ContentType.objects.get_for_models(models.Organization, models.Team, models.Credential) # dict of {model: content_type}
|
||||||
|
if not settings.ALLOW_LOCAL_RESOURCE_MANAGEMENT:
|
||||||
|
for model in [models.Organization, models.Team]:
|
||||||
|
ct = content_types[model]
|
||||||
|
if role.content_type == ct and role.role_field in ['member_role', 'admin_role']:
|
||||||
|
data = dict(msg=_(f"Cannot directly modify user membership to {ct.model}. Direct shared resource management disabled"))
|
||||||
|
return Response(data, status=status.HTTP_403_FORBIDDEN)
|
||||||
|
|
||||||
credential_content_type = content_types[models.Credential]
|
credential_content_type = content_types[models.Credential]
|
||||||
if role.content_type == credential_content_type:
|
if role.content_type == credential_content_type:
|
||||||
if 'disassociate' not in request.data and role.content_object.organization and user not in role.content_object.organization.member_role:
|
if 'disassociate' not in request.data and role.content_object.organization and user not in role.content_object.organization.member_role:
|
||||||
@@ -4213,21 +4287,9 @@ class RoleTeamsList(SubListAttachDetachAPIView):
|
|||||||
|
|
||||||
credential_content_type = ContentType.objects.get_for_model(models.Credential)
|
credential_content_type = ContentType.objects.get_for_model(models.Credential)
|
||||||
if role.content_type == credential_content_type:
|
if role.content_type == credential_content_type:
|
||||||
# Private credentials (no organization) are never allowed for teams
|
if not role.content_object.organization or role.content_object.organization.id != team.organization.id:
|
||||||
if not role.content_object.organization:
|
data = dict(msg=_("You cannot grant credential access to a team when the Organization field isn't set, or belongs to a different organization"))
|
||||||
data = dict(
|
|
||||||
msg=_("You cannot grant access to a credential that is not assigned to an organization (private credentials cannot be assigned to teams)")
|
|
||||||
)
|
|
||||||
return Response(data, status=status.HTTP_400_BAD_REQUEST)
|
return Response(data, status=status.HTTP_400_BAD_REQUEST)
|
||||||
# Cross-organization credentials are only allowed for superusers
|
|
||||||
elif role.content_object.organization.id != team.organization.id:
|
|
||||||
if not request.user.is_superuser:
|
|
||||||
data = dict(
|
|
||||||
msg=_(
|
|
||||||
"You cannot grant a team access to a credential in a different organization. Only superusers can grant cross-organization credential access to teams"
|
|
||||||
)
|
|
||||||
)
|
|
||||||
return Response(data, status=status.HTTP_400_BAD_REQUEST)
|
|
||||||
|
|
||||||
action = 'attach'
|
action = 'attach'
|
||||||
if request.data.get('disassociate', None):
|
if request.data.get('disassociate', None):
|
||||||
@@ -4247,6 +4309,34 @@ class RoleTeamsList(SubListAttachDetachAPIView):
|
|||||||
return Response(status=status.HTTP_204_NO_CONTENT)
|
return Response(status=status.HTTP_204_NO_CONTENT)
|
||||||
|
|
||||||
|
|
||||||
|
class RoleParentsList(SubListAPIView):
|
||||||
|
deprecated = True
|
||||||
|
model = models.Role
|
||||||
|
serializer_class = serializers.RoleSerializer
|
||||||
|
parent_model = models.Role
|
||||||
|
relationship = 'parents'
|
||||||
|
permission_classes = (IsAuthenticated,)
|
||||||
|
search_fields = ('role_field', 'content_type__model')
|
||||||
|
|
||||||
|
def get_queryset(self):
|
||||||
|
role = models.Role.objects.get(pk=self.kwargs['pk'])
|
||||||
|
return models.Role.filter_visible_roles(self.request.user, role.parents.all())
|
||||||
|
|
||||||
|
|
||||||
|
class RoleChildrenList(SubListAPIView):
|
||||||
|
deprecated = True
|
||||||
|
model = models.Role
|
||||||
|
serializer_class = serializers.RoleSerializer
|
||||||
|
parent_model = models.Role
|
||||||
|
relationship = 'children'
|
||||||
|
permission_classes = (IsAuthenticated,)
|
||||||
|
search_fields = ('role_field', 'content_type__model')
|
||||||
|
|
||||||
|
def get_queryset(self):
|
||||||
|
role = models.Role.objects.get(pk=self.kwargs['pk'])
|
||||||
|
return models.Role.filter_visible_roles(self.request.user, role.children.all())
|
||||||
|
|
||||||
|
|
||||||
# Create view functions for all of the class-based views to simplify inclusion
|
# Create view functions for all of the class-based views to simplify inclusion
|
||||||
# in URL patterns and reverse URL lookups, converting CamelCase names to
|
# in URL patterns and reverse URL lookups, converting CamelCase names to
|
||||||
# lowercase_with_underscore (e.g. MyView.as_view() becomes my_view).
|
# lowercase_with_underscore (e.g. MyView.as_view() becomes my_view).
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ from awx.api.generics import APIView, Response
|
|||||||
from awx.api.permissions import AnalyticsPermission
|
from awx.api.permissions import AnalyticsPermission
|
||||||
from awx.api.versioning import reverse
|
from awx.api.versioning import reverse
|
||||||
from awx.main.utils import get_awx_version
|
from awx.main.utils import get_awx_version
|
||||||
from awx.main.utils.analytics_proxy import OIDCClient
|
from awx.main.utils.analytics_proxy import OIDCClient, DEFAULT_OIDC_TOKEN_ENDPOINT
|
||||||
from rest_framework import status
|
from rest_framework import status
|
||||||
|
|
||||||
from collections import OrderedDict
|
from collections import OrderedDict
|
||||||
@@ -202,16 +202,10 @@ class AnalyticsGenericView(APIView):
|
|||||||
if method not in ["GET", "POST", "OPTIONS"]:
|
if method not in ["GET", "POST", "OPTIONS"]:
|
||||||
return self._error_response(ERROR_UNSUPPORTED_METHOD, method, remote=False, status_code=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
return self._error_response(ERROR_UNSUPPORTED_METHOD, method, remote=False, status_code=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
||||||
url = self._get_analytics_url(request.path)
|
url = self._get_analytics_url(request.path)
|
||||||
using_subscriptions_credentials = False
|
|
||||||
try:
|
try:
|
||||||
rh_user = getattr(settings, 'REDHAT_USERNAME', None)
|
rh_user = self._get_setting('REDHAT_USERNAME', None, ERROR_MISSING_USER)
|
||||||
rh_password = getattr(settings, 'REDHAT_PASSWORD', None)
|
rh_password = self._get_setting('REDHAT_PASSWORD', None, ERROR_MISSING_PASSWORD)
|
||||||
if not (rh_user and rh_password):
|
client = OIDCClient(rh_user, rh_password, DEFAULT_OIDC_TOKEN_ENDPOINT, ['api.console'])
|
||||||
rh_user = self._get_setting('SUBSCRIPTIONS_CLIENT_ID', None, ERROR_MISSING_USER)
|
|
||||||
rh_password = self._get_setting('SUBSCRIPTIONS_CLIENT_SECRET', None, ERROR_MISSING_PASSWORD)
|
|
||||||
using_subscriptions_credentials = True
|
|
||||||
|
|
||||||
client = OIDCClient(rh_user, rh_password)
|
|
||||||
response = client.make_request(
|
response = client.make_request(
|
||||||
method,
|
method,
|
||||||
url,
|
url,
|
||||||
@@ -222,17 +216,17 @@ class AnalyticsGenericView(APIView):
|
|||||||
timeout=(31, 31),
|
timeout=(31, 31),
|
||||||
)
|
)
|
||||||
except requests.RequestException:
|
except requests.RequestException:
|
||||||
# subscriptions credentials are not valid for basic auth, so just return 401
|
logger.error("Automation Analytics API request failed, trying base auth method")
|
||||||
if using_subscriptions_credentials:
|
response = self._base_auth_request(request, method, url, rh_user, rh_password, headers)
|
||||||
response = Response(status=status.HTTP_401_UNAUTHORIZED)
|
except MissingSettings:
|
||||||
else:
|
rh_user = self._get_setting('SUBSCRIPTIONS_USERNAME', None, ERROR_MISSING_USER)
|
||||||
logger.error("Automation Analytics API request failed, trying base auth method")
|
rh_password = self._get_setting('SUBSCRIPTIONS_PASSWORD', None, ERROR_MISSING_PASSWORD)
|
||||||
response = self._base_auth_request(request, method, url, rh_user, rh_password, headers)
|
response = self._base_auth_request(request, method, url, rh_user, rh_password, headers)
|
||||||
#
|
#
|
||||||
# Missing or wrong user/pass
|
# Missing or wrong user/pass
|
||||||
#
|
#
|
||||||
if response.status_code == status.HTTP_401_UNAUTHORIZED:
|
if response.status_code == status.HTTP_401_UNAUTHORIZED:
|
||||||
text = response.get('text', '').rstrip("\n")
|
text = (response.text or '').rstrip("\n")
|
||||||
return self._error_response(ERROR_UNAUTHORIZED, text, remote=True, remote_status_code=response.status_code)
|
return self._error_response(ERROR_UNAUTHORIZED, text, remote=True, remote_status_code=response.status_code)
|
||||||
#
|
#
|
||||||
# Not found, No entitlement or No data in Analytics
|
# Not found, No entitlement or No data in Analytics
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ import re
|
|||||||
import asn1
|
import asn1
|
||||||
from awx.api import serializers
|
from awx.api import serializers
|
||||||
from awx.api.generics import GenericAPIView, Response
|
from awx.api.generics import GenericAPIView, Response
|
||||||
from awx.api.permissions import IsSystemAdmin
|
from awx.api.permissions import IsSystemAdminOrAuditor
|
||||||
from awx.main import models
|
from awx.main import models
|
||||||
from cryptography import x509
|
from cryptography import x509
|
||||||
from cryptography.hazmat.primitives import hashes, serialization
|
from cryptography.hazmat.primitives import hashes, serialization
|
||||||
@@ -48,7 +48,7 @@ class InstanceInstallBundle(GenericAPIView):
|
|||||||
name = _('Install Bundle')
|
name = _('Install Bundle')
|
||||||
model = models.Instance
|
model = models.Instance
|
||||||
serializer_class = serializers.InstanceSerializer
|
serializer_class = serializers.InstanceSerializer
|
||||||
permission_classes = (IsSystemAdmin,)
|
permission_classes = (IsSystemAdminOrAuditor,)
|
||||||
|
|
||||||
def get(self, request, *args, **kwargs):
|
def get(self, request, *args, **kwargs):
|
||||||
instance_obj = self.get_object()
|
instance_obj = self.get_object()
|
||||||
|
|||||||
@@ -53,15 +53,18 @@ from awx.api.serializers import (
|
|||||||
CredentialSerializer,
|
CredentialSerializer,
|
||||||
)
|
)
|
||||||
from awx.api.views.mixin import RelatedJobsPreventDeleteMixin, OrganizationCountsMixin, OrganizationInstanceGroupMembershipMixin
|
from awx.api.views.mixin import RelatedJobsPreventDeleteMixin, OrganizationCountsMixin, OrganizationInstanceGroupMembershipMixin
|
||||||
|
from awx.api.views import immutablesharedfields
|
||||||
|
|
||||||
logger = logging.getLogger('awx.api.views.organization')
|
logger = logging.getLogger('awx.api.views.organization')
|
||||||
|
|
||||||
|
|
||||||
|
@immutablesharedfields
|
||||||
class OrganizationList(OrganizationCountsMixin, ListCreateAPIView):
|
class OrganizationList(OrganizationCountsMixin, ListCreateAPIView):
|
||||||
model = Organization
|
model = Organization
|
||||||
serializer_class = OrganizationSerializer
|
serializer_class = OrganizationSerializer
|
||||||
|
|
||||||
|
|
||||||
|
@immutablesharedfields
|
||||||
class OrganizationDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView):
|
class OrganizationDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView):
|
||||||
model = Organization
|
model = Organization
|
||||||
serializer_class = OrganizationSerializer
|
serializer_class = OrganizationSerializer
|
||||||
@@ -104,6 +107,7 @@ class OrganizationInventoriesList(SubListAPIView):
|
|||||||
relationship = 'inventories'
|
relationship = 'inventories'
|
||||||
|
|
||||||
|
|
||||||
|
@immutablesharedfields
|
||||||
class OrganizationUsersList(BaseUsersList):
|
class OrganizationUsersList(BaseUsersList):
|
||||||
model = User
|
model = User
|
||||||
serializer_class = UserSerializer
|
serializer_class = UserSerializer
|
||||||
@@ -112,6 +116,7 @@ class OrganizationUsersList(BaseUsersList):
|
|||||||
ordering = ('username',)
|
ordering = ('username',)
|
||||||
|
|
||||||
|
|
||||||
|
@immutablesharedfields
|
||||||
class OrganizationAdminsList(BaseUsersList):
|
class OrganizationAdminsList(BaseUsersList):
|
||||||
model = User
|
model = User
|
||||||
serializer_class = UserSerializer
|
serializer_class = UserSerializer
|
||||||
@@ -150,6 +155,7 @@ class OrganizationWorkflowJobTemplatesList(SubListCreateAPIView):
|
|||||||
parent_key = 'organization'
|
parent_key = 'organization'
|
||||||
|
|
||||||
|
|
||||||
|
@immutablesharedfields
|
||||||
class OrganizationTeamsList(SubListCreateAttachDetachAPIView):
|
class OrganizationTeamsList(SubListCreateAttachDetachAPIView):
|
||||||
model = Team
|
model = Team
|
||||||
serializer_class = TeamSerializer
|
serializer_class = TeamSerializer
|
||||||
|
|||||||
@@ -8,8 +8,6 @@ import operator
|
|||||||
from collections import OrderedDict
|
from collections import OrderedDict
|
||||||
|
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
from django.core.cache import cache
|
|
||||||
from django.db import connection
|
|
||||||
from django.utils.encoding import smart_str
|
from django.utils.encoding import smart_str
|
||||||
from django.utils.decorators import method_decorator
|
from django.utils.decorators import method_decorator
|
||||||
from django.views.decorators.csrf import ensure_csrf_cookie
|
from django.views.decorators.csrf import ensure_csrf_cookie
|
||||||
@@ -28,14 +26,12 @@ from awx.api.generics import APIView
|
|||||||
from awx.conf.registry import settings_registry
|
from awx.conf.registry import settings_registry
|
||||||
from awx.main.analytics import all_collectors
|
from awx.main.analytics import all_collectors
|
||||||
from awx.main.ha import is_ha_environment
|
from awx.main.ha import is_ha_environment
|
||||||
from awx.main.tasks.system import clear_setting_cache
|
|
||||||
from awx.main.utils import get_awx_version, get_custom_venv_choices
|
from awx.main.utils import get_awx_version, get_custom_venv_choices
|
||||||
from awx.main.utils.licensing import validate_entitlement_manifest
|
from awx.main.utils.licensing import validate_entitlement_manifest
|
||||||
from awx.api.versioning import URLPathVersioning, reverse, drf_reverse
|
from awx.api.versioning import URLPathVersioning, reverse, drf_reverse
|
||||||
from awx.main.constants import PRIVILEGE_ESCALATION_METHODS
|
from awx.main.constants import PRIVILEGE_ESCALATION_METHODS
|
||||||
from awx.main.models import Project, Organization, Instance, InstanceGroup, JobTemplate
|
from awx.main.models import Project, Organization, Instance, InstanceGroup, JobTemplate
|
||||||
from awx.main.utils import set_environ
|
from awx.main.utils import set_environ
|
||||||
from awx.main.utils.analytics_proxy import TokenError
|
|
||||||
from awx.main.utils.licensing import get_licenser
|
from awx.main.utils.licensing import get_licenser
|
||||||
|
|
||||||
logger = logging.getLogger('awx.api.views.root')
|
logger = logging.getLogger('awx.api.views.root')
|
||||||
@@ -180,52 +176,19 @@ class ApiV2SubscriptionView(APIView):
|
|||||||
|
|
||||||
def post(self, request):
|
def post(self, request):
|
||||||
data = request.data.copy()
|
data = request.data.copy()
|
||||||
|
if data.get('subscriptions_password') == '$encrypted$':
|
||||||
|
data['subscriptions_password'] = settings.SUBSCRIPTIONS_PASSWORD
|
||||||
try:
|
try:
|
||||||
user = None
|
user, pw = data.get('subscriptions_username'), data.get('subscriptions_password')
|
||||||
pw = None
|
|
||||||
basic_auth = False
|
|
||||||
# determine if the credentials are for basic auth or not
|
|
||||||
if data.get('subscriptions_client_id'):
|
|
||||||
user, pw = data.get('subscriptions_client_id'), data.get('subscriptions_client_secret')
|
|
||||||
if pw == '$encrypted$':
|
|
||||||
pw = settings.SUBSCRIPTIONS_CLIENT_SECRET
|
|
||||||
elif data.get('subscriptions_username'):
|
|
||||||
user, pw = data.get('subscriptions_username'), data.get('subscriptions_password')
|
|
||||||
if pw == '$encrypted$':
|
|
||||||
pw = settings.SUBSCRIPTIONS_PASSWORD
|
|
||||||
basic_auth = True
|
|
||||||
|
|
||||||
if not user or not pw:
|
|
||||||
return Response({"error": _("Missing subscription credentials")}, status=status.HTTP_400_BAD_REQUEST)
|
|
||||||
|
|
||||||
with set_environ(**settings.AWX_TASK_ENV):
|
with set_environ(**settings.AWX_TASK_ENV):
|
||||||
validated = get_licenser().validate_rh(user, pw, basic_auth)
|
validated = get_licenser().validate_rh(user, pw)
|
||||||
|
if user:
|
||||||
# update settings if the credentials were valid
|
settings.SUBSCRIPTIONS_USERNAME = data['subscriptions_username']
|
||||||
if basic_auth:
|
if pw:
|
||||||
if user:
|
settings.SUBSCRIPTIONS_PASSWORD = data['subscriptions_password']
|
||||||
settings.SUBSCRIPTIONS_USERNAME = user
|
|
||||||
if pw:
|
|
||||||
settings.SUBSCRIPTIONS_PASSWORD = pw
|
|
||||||
# mutual exclusion for basic auth and service account
|
|
||||||
# only one should be set at a given time so that
|
|
||||||
# config/attach/ knows which credentials to use
|
|
||||||
settings.SUBSCRIPTIONS_CLIENT_ID = ""
|
|
||||||
settings.SUBSCRIPTIONS_CLIENT_SECRET = ""
|
|
||||||
else:
|
|
||||||
if user:
|
|
||||||
settings.SUBSCRIPTIONS_CLIENT_ID = user
|
|
||||||
if pw:
|
|
||||||
settings.SUBSCRIPTIONS_CLIENT_SECRET = pw
|
|
||||||
# mutual exclusion for basic auth and service account
|
|
||||||
settings.SUBSCRIPTIONS_USERNAME = ""
|
|
||||||
settings.SUBSCRIPTIONS_PASSWORD = ""
|
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
msg = _("Invalid Subscription")
|
msg = _("Invalid Subscription")
|
||||||
if isinstance(exc, TokenError) or (
|
if isinstance(exc, requests.exceptions.HTTPError) and getattr(getattr(exc, 'response', None), 'status_code', None) == 401:
|
||||||
isinstance(exc, requests.exceptions.HTTPError) and getattr(getattr(exc, 'response', None), 'status_code', None) == 401
|
|
||||||
):
|
|
||||||
msg = _("The provided credentials are invalid (HTTP 401).")
|
msg = _("The provided credentials are invalid (HTTP 401).")
|
||||||
elif isinstance(exc, requests.exceptions.ProxyError):
|
elif isinstance(exc, requests.exceptions.ProxyError):
|
||||||
msg = _("Unable to connect to proxy server.")
|
msg = _("Unable to connect to proxy server.")
|
||||||
@@ -252,25 +215,16 @@ class ApiV2AttachView(APIView):
|
|||||||
|
|
||||||
def post(self, request):
|
def post(self, request):
|
||||||
data = request.data.copy()
|
data = request.data.copy()
|
||||||
subscription_id = data.get('subscription_id', None)
|
pool_id = data.get('pool_id', None)
|
||||||
if not subscription_id:
|
if not pool_id:
|
||||||
return Response({"error": _("No subscription ID provided.")}, status=status.HTTP_400_BAD_REQUEST)
|
return Response({"error": _("No subscription pool ID provided.")}, status=status.HTTP_400_BAD_REQUEST)
|
||||||
# Ensure we always use the latest subscription credentials
|
user = getattr(settings, 'SUBSCRIPTIONS_USERNAME', None)
|
||||||
cache.delete_many(['SUBSCRIPTIONS_CLIENT_ID', 'SUBSCRIPTIONS_CLIENT_SECRET', 'SUBSCRIPTIONS_USERNAME', 'SUBSCRIPTIONS_PASSWORD'])
|
pw = getattr(settings, 'SUBSCRIPTIONS_PASSWORD', None)
|
||||||
user = getattr(settings, 'SUBSCRIPTIONS_CLIENT_ID', None)
|
if pool_id and user and pw:
|
||||||
pw = getattr(settings, 'SUBSCRIPTIONS_CLIENT_SECRET', None)
|
|
||||||
basic_auth = False
|
|
||||||
if not (user and pw):
|
|
||||||
user = getattr(settings, 'SUBSCRIPTIONS_USERNAME', None)
|
|
||||||
pw = getattr(settings, 'SUBSCRIPTIONS_PASSWORD', None)
|
|
||||||
basic_auth = True
|
|
||||||
if not (user and pw):
|
|
||||||
return Response({"error": _("Missing subscription credentials")}, status=status.HTTP_400_BAD_REQUEST)
|
|
||||||
if subscription_id and user and pw:
|
|
||||||
data = request.data.copy()
|
data = request.data.copy()
|
||||||
try:
|
try:
|
||||||
with set_environ(**settings.AWX_TASK_ENV):
|
with set_environ(**settings.AWX_TASK_ENV):
|
||||||
validated = get_licenser().validate_rh(user, pw, basic_auth)
|
validated = get_licenser().validate_rh(user, pw)
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
msg = _("Invalid Subscription")
|
msg = _("Invalid Subscription")
|
||||||
if isinstance(exc, requests.exceptions.HTTPError) and getattr(getattr(exc, 'response', None), 'status_code', None) == 401:
|
if isinstance(exc, requests.exceptions.HTTPError) and getattr(getattr(exc, 'response', None), 'status_code', None) == 401:
|
||||||
@@ -284,12 +238,10 @@ class ApiV2AttachView(APIView):
|
|||||||
else:
|
else:
|
||||||
logger.exception(smart_str(u"Invalid subscription submitted."), extra=dict(actor=request.user.username))
|
logger.exception(smart_str(u"Invalid subscription submitted."), extra=dict(actor=request.user.username))
|
||||||
return Response({"error": msg}, status=status.HTTP_400_BAD_REQUEST)
|
return Response({"error": msg}, status=status.HTTP_400_BAD_REQUEST)
|
||||||
|
|
||||||
for sub in validated:
|
for sub in validated:
|
||||||
if sub['subscription_id'] == subscription_id:
|
if sub['pool_id'] == pool_id:
|
||||||
sub['valid_key'] = True
|
sub['valid_key'] = True
|
||||||
settings.LICENSE = sub
|
settings.LICENSE = sub
|
||||||
connection.on_commit(lambda: clear_setting_cache.delay(['LICENSE']))
|
|
||||||
return Response(sub)
|
return Response(sub)
|
||||||
|
|
||||||
return Response({"error": _("Error processing subscription metadata.")}, status=status.HTTP_400_BAD_REQUEST)
|
return Response({"error": _("Error processing subscription metadata.")}, status=status.HTTP_400_BAD_REQUEST)
|
||||||
@@ -309,6 +261,7 @@ class ApiV2ConfigView(APIView):
|
|||||||
'''Return various sitewide configuration settings'''
|
'''Return various sitewide configuration settings'''
|
||||||
|
|
||||||
license_data = get_licenser().validate()
|
license_data = get_licenser().validate()
|
||||||
|
|
||||||
if not license_data.get('valid_key', False):
|
if not license_data.get('valid_key', False):
|
||||||
license_data = {}
|
license_data = {}
|
||||||
|
|
||||||
@@ -372,7 +325,6 @@ class ApiV2ConfigView(APIView):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
license_data_validated = get_licenser().license_from_manifest(license_data)
|
license_data_validated = get_licenser().license_from_manifest(license_data)
|
||||||
connection.on_commit(lambda: clear_setting_cache.delay(['LICENSE']))
|
|
||||||
except Exception:
|
except Exception:
|
||||||
logger.warning(smart_str(u"Invalid subscription submitted."), extra=dict(actor=request.user.username))
|
logger.warning(smart_str(u"Invalid subscription submitted."), extra=dict(actor=request.user.username))
|
||||||
return Response({"error": _("Invalid License")}, status=status.HTTP_400_BAD_REQUEST)
|
return Response({"error": _("Invalid License")}, status=status.HTTP_400_BAD_REQUEST)
|
||||||
@@ -391,7 +343,6 @@ class ApiV2ConfigView(APIView):
|
|||||||
def delete(self, request):
|
def delete(self, request):
|
||||||
try:
|
try:
|
||||||
settings.LICENSE = {}
|
settings.LICENSE = {}
|
||||||
connection.on_commit(lambda: clear_setting_cache.delay(['LICENSE']))
|
|
||||||
return Response(status=status.HTTP_204_NO_CONTENT)
|
return Response(status=status.HTTP_204_NO_CONTENT)
|
||||||
except Exception:
|
except Exception:
|
||||||
# FIX: Log
|
# FIX: Log
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ from django.core.validators import URLValidator, _lazy_re_compile
|
|||||||
from django.utils.translation import gettext_lazy as _
|
from django.utils.translation import gettext_lazy as _
|
||||||
|
|
||||||
# Django REST Framework
|
# Django REST Framework
|
||||||
from rest_framework.fields import BooleanField, CharField, ChoiceField, DictField, DateTimeField, EmailField, IntegerField, ListField, FloatField # noqa
|
from rest_framework.fields import BooleanField, CharField, ChoiceField, DictField, DateTimeField, EmailField, IntegerField, ListField # noqa
|
||||||
from rest_framework.serializers import PrimaryKeyRelatedField # noqa
|
from rest_framework.serializers import PrimaryKeyRelatedField # noqa
|
||||||
|
|
||||||
# AWX
|
# AWX
|
||||||
@@ -207,8 +207,7 @@ class URLField(CharField):
|
|||||||
if self.allow_plain_hostname:
|
if self.allow_plain_hostname:
|
||||||
try:
|
try:
|
||||||
url_parts = urlparse.urlsplit(value)
|
url_parts = urlparse.urlsplit(value)
|
||||||
looks_like_ipv6 = bool(url_parts.netloc and url_parts.netloc.startswith('[') and url_parts.netloc.endswith(']'))
|
if url_parts.hostname and '.' not in url_parts.hostname:
|
||||||
if not looks_like_ipv6 and url_parts.hostname and '.' not in url_parts.hostname:
|
|
||||||
netloc = '{}.local'.format(url_parts.hostname)
|
netloc = '{}.local'.format(url_parts.hostname)
|
||||||
if url_parts.port:
|
if url_parts.port:
|
||||||
netloc = '{}:{}'.format(netloc, url_parts.port)
|
netloc = '{}:{}'.format(netloc, url_parts.port)
|
||||||
|
|||||||
@@ -27,5 +27,5 @@ def _migrate_setting(apps, old_key, new_key, encrypted=False):
|
|||||||
|
|
||||||
|
|
||||||
def prefill_rh_credentials(apps, schema_editor):
|
def prefill_rh_credentials(apps, schema_editor):
|
||||||
_migrate_setting(apps, 'REDHAT_USERNAME', 'SUBSCRIPTIONS_CLIENT_ID', encrypted=False)
|
_migrate_setting(apps, 'REDHAT_USERNAME', 'SUBSCRIPTIONS_USERNAME', encrypted=False)
|
||||||
_migrate_setting(apps, 'REDHAT_PASSWORD', 'SUBSCRIPTIONS_CLIENT_SECRET', encrypted=True)
|
_migrate_setting(apps, 'REDHAT_PASSWORD', 'SUBSCRIPTIONS_PASSWORD', encrypted=True)
|
||||||
|
|||||||
@@ -38,7 +38,6 @@ class SettingsRegistry(object):
|
|||||||
if setting in self._registry:
|
if setting in self._registry:
|
||||||
raise ImproperlyConfigured('Setting "{}" is already registered.'.format(setting))
|
raise ImproperlyConfigured('Setting "{}" is already registered.'.format(setting))
|
||||||
category = kwargs.setdefault('category', None)
|
category = kwargs.setdefault('category', None)
|
||||||
kwargs.setdefault('required', False) # No setting is ordinarily required
|
|
||||||
category_slug = kwargs.setdefault('category_slug', slugify(category or '') or None)
|
category_slug = kwargs.setdefault('category_slug', slugify(category or '') or None)
|
||||||
if category_slug in {'all', 'changed', 'user-defaults'}:
|
if category_slug in {'all', 'changed', 'user-defaults'}:
|
||||||
raise ImproperlyConfigured('"{}" is a reserved category slug.'.format(category_slug))
|
raise ImproperlyConfigured('"{}" is a reserved category slug.'.format(category_slug))
|
||||||
|
|||||||
@@ -128,41 +128,3 @@ class TestURLField:
|
|||||||
else:
|
else:
|
||||||
with pytest.raises(ValidationError):
|
with pytest.raises(ValidationError):
|
||||||
field.run_validators(url)
|
field.run_validators(url)
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
|
||||||
"url, expect_error",
|
|
||||||
[
|
|
||||||
("https://[1:2:3]", True),
|
|
||||||
("http://[1:2:3]", True),
|
|
||||||
("https://[2001:db8:3333:4444:5555:6666:7777:8888", True),
|
|
||||||
("https://2001:db8:3333:4444:5555:6666:7777:8888", True),
|
|
||||||
("https://[2001:db8:3333:4444:5555:6666:7777:8888]", False),
|
|
||||||
("https://[::1]", False),
|
|
||||||
("https://[::]", False),
|
|
||||||
("https://[2001:db8::1]", False),
|
|
||||||
("https://[2001:db8:0:0:0:0:1:1]", False),
|
|
||||||
("https://[fe80::2%eth0]", True), # ipv6 scope identifier
|
|
||||||
("https://[fe80:0:0:0:200:f8ff:fe21:67cf]", False),
|
|
||||||
("https://[::ffff:192.168.1.10]", False),
|
|
||||||
("https://[0:0:0:0:0:ffff:c000:0201]", False),
|
|
||||||
("https://[2001:0db8:000a:0001:0000:0000:0000:0000]", False),
|
|
||||||
("https://[2001:db8:a:1::]", False),
|
|
||||||
("https://[ff02::1]", False),
|
|
||||||
("https://[ff02:0:0:0:0:0:0:1]", False),
|
|
||||||
("https://[fc00::1]", False),
|
|
||||||
("https://[fd12:3456:789a:1::1]", False),
|
|
||||||
("https://[2001:db8::abcd:ef12:3456:7890]", False),
|
|
||||||
("https://[2001:db8:0000:abcd:0000:ef12:0000:3456]", False),
|
|
||||||
("https://[::ffff:10.0.0.1]", False),
|
|
||||||
("https://[2001:db8:cafe::]", False),
|
|
||||||
("https://[2001:db8:cafe:0:0:0:0:0]", False),
|
|
||||||
("https://[fe80::210:f3ff:fedf:4567%3]", True), # ipv6 scope identifier, numerical interface
|
|
||||||
],
|
|
||||||
)
|
|
||||||
def test_ipv6_urls(self, url, expect_error):
|
|
||||||
field = URLField()
|
|
||||||
if expect_error:
|
|
||||||
with pytest.raises(ValidationError, match="Enter a valid URL"):
|
|
||||||
field.run_validators(url)
|
|
||||||
else:
|
|
||||||
field.run_validators(url)
|
|
||||||
|
|||||||
@@ -639,9 +639,7 @@ class UserAccess(BaseAccess):
|
|||||||
prefetch_related = ('resource',)
|
prefetch_related = ('resource',)
|
||||||
|
|
||||||
def filtered_queryset(self):
|
def filtered_queryset(self):
|
||||||
if settings.ORG_ADMINS_CAN_SEE_ALL_USERS and (
|
if settings.ORG_ADMINS_CAN_SEE_ALL_USERS and (self.user.admin_of_organizations.exists() or self.user.auditor_of_organizations.exists()):
|
||||||
Organization.access_qs(self.user, 'change').exists() or Organization.access_qs(self.user, 'audit').exists()
|
|
||||||
):
|
|
||||||
qs = User.objects.all()
|
qs = User.objects.all()
|
||||||
else:
|
else:
|
||||||
qs = (
|
qs = (
|
||||||
@@ -1226,9 +1224,7 @@ class TeamAccess(BaseAccess):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def filtered_queryset(self):
|
def filtered_queryset(self):
|
||||||
if settings.ORG_ADMINS_CAN_SEE_ALL_USERS and (
|
if settings.ORG_ADMINS_CAN_SEE_ALL_USERS and (self.user.admin_of_organizations.exists() or self.user.auditor_of_organizations.exists()):
|
||||||
Organization.access_qs(self.user, 'change').exists() or Organization.access_qs(self.user, 'audit').exists()
|
|
||||||
):
|
|
||||||
return self.model.objects.all()
|
return self.model.objects.all()
|
||||||
return self.model.objects.filter(
|
return self.model.objects.filter(
|
||||||
Q(organization__in=Organization.accessible_pk_qs(self.user, 'member_role')) | Q(pk__in=self.model.accessible_pk_qs(self.user, 'read_role'))
|
Q(organization__in=Organization.accessible_pk_qs(self.user, 'member_role')) | Q(pk__in=self.model.accessible_pk_qs(self.user, 'read_role'))
|
||||||
@@ -2568,7 +2564,7 @@ class NotificationTemplateAccess(BaseAccess):
|
|||||||
if settings.ANSIBLE_BASE_ROLE_SYSTEM_ACTIVATED:
|
if settings.ANSIBLE_BASE_ROLE_SYSTEM_ACTIVATED:
|
||||||
return self.model.access_qs(self.user, 'view')
|
return self.model.access_qs(self.user, 'view')
|
||||||
return self.model.objects.filter(
|
return self.model.objects.filter(
|
||||||
Q(organization__in=Organization.access_qs(self.user, 'add_notificationtemplate')) | Q(organization__in=Organization.access_qs(self.user, 'audit'))
|
Q(organization__in=Organization.access_qs(self.user, 'add_notificationtemplate')) | Q(organization__in=self.user.auditor_of_organizations)
|
||||||
).distinct()
|
).distinct()
|
||||||
|
|
||||||
@check_superuser
|
@check_superuser
|
||||||
@@ -2603,7 +2599,7 @@ class NotificationAccess(BaseAccess):
|
|||||||
def filtered_queryset(self):
|
def filtered_queryset(self):
|
||||||
return self.model.objects.filter(
|
return self.model.objects.filter(
|
||||||
Q(notification_template__organization__in=Organization.access_qs(self.user, 'add_notificationtemplate'))
|
Q(notification_template__organization__in=Organization.access_qs(self.user, 'add_notificationtemplate'))
|
||||||
| Q(notification_template__organization__in=Organization.access_qs(self.user, 'audit'))
|
| Q(notification_template__organization__in=self.user.auditor_of_organizations)
|
||||||
).distinct()
|
).distinct()
|
||||||
|
|
||||||
def can_delete(self, obj):
|
def can_delete(self, obj):
|
||||||
|
|||||||
@@ -3,13 +3,13 @@ import logging
|
|||||||
|
|
||||||
# AWX
|
# AWX
|
||||||
from awx.main.analytics.subsystem_metrics import DispatcherMetrics, CallbackReceiverMetrics
|
from awx.main.analytics.subsystem_metrics import DispatcherMetrics, CallbackReceiverMetrics
|
||||||
from awx.main.dispatch.publish import task as task_awx
|
from awx.main.dispatch.publish import task
|
||||||
from awx.main.dispatch import get_task_queuename
|
from awx.main.dispatch import get_task_queuename
|
||||||
|
|
||||||
logger = logging.getLogger('awx.main.scheduler')
|
logger = logging.getLogger('awx.main.scheduler')
|
||||||
|
|
||||||
|
|
||||||
@task_awx(queue=get_task_queuename)
|
@task(queue=get_task_queuename)
|
||||||
def send_subsystem_metrics():
|
def send_subsystem_metrics():
|
||||||
DispatcherMetrics().send_metrics()
|
DispatcherMetrics().send_metrics()
|
||||||
CallbackReceiverMetrics().send_metrics()
|
CallbackReceiverMetrics().send_metrics()
|
||||||
|
|||||||
@@ -142,7 +142,7 @@ def config(since, **kwargs):
|
|||||||
return {
|
return {
|
||||||
'platform': {
|
'platform': {
|
||||||
'system': platform.system(),
|
'system': platform.system(),
|
||||||
'dist': (distro.name(), distro.version(), distro.codename()),
|
'dist': distro.linux_distribution(),
|
||||||
'release': platform.release(),
|
'release': platform.release(),
|
||||||
'type': install_type,
|
'type': install_type,
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ from ansible_base.lib.utils.db import advisory_lock
|
|||||||
from awx.main.models import Job
|
from awx.main.models import Job
|
||||||
from awx.main.access import access_registry
|
from awx.main.access import access_registry
|
||||||
from awx.main.utils import get_awx_http_client_headers, set_environ, datetime_hook
|
from awx.main.utils import get_awx_http_client_headers, set_environ, datetime_hook
|
||||||
from awx.main.utils.analytics_proxy import OIDCClient
|
from awx.main.utils.analytics_proxy import OIDCClient, DEFAULT_OIDC_TOKEN_ENDPOINT
|
||||||
|
|
||||||
__all__ = ['register', 'gather', 'ship']
|
__all__ = ['register', 'gather', 'ship']
|
||||||
|
|
||||||
@@ -186,7 +186,7 @@ def gather(dest=None, module=None, subset=None, since=None, until=None, collecti
|
|||||||
|
|
||||||
if not (
|
if not (
|
||||||
settings.AUTOMATION_ANALYTICS_URL
|
settings.AUTOMATION_ANALYTICS_URL
|
||||||
and ((settings.REDHAT_USERNAME and settings.REDHAT_PASSWORD) or (settings.SUBSCRIPTIONS_CLIENT_ID and settings.SUBSCRIPTIONS_CLIENT_SECRET))
|
and ((settings.REDHAT_USERNAME and settings.REDHAT_PASSWORD) or (settings.SUBSCRIPTIONS_USERNAME and settings.SUBSCRIPTIONS_PASSWORD))
|
||||||
):
|
):
|
||||||
logger.log(log_level, "Not gathering analytics, configuration is invalid. Use --dry-run to gather locally without sending.")
|
logger.log(log_level, "Not gathering analytics, configuration is invalid. Use --dry-run to gather locally without sending.")
|
||||||
return None
|
return None
|
||||||
@@ -324,10 +324,10 @@ def gather(dest=None, module=None, subset=None, since=None, until=None, collecti
|
|||||||
settings.AUTOMATION_ANALYTICS_LAST_ENTRIES = json.dumps(last_entries, cls=DjangoJSONEncoder)
|
settings.AUTOMATION_ANALYTICS_LAST_ENTRIES = json.dumps(last_entries, cls=DjangoJSONEncoder)
|
||||||
|
|
||||||
if collection_type != 'dry-run':
|
if collection_type != 'dry-run':
|
||||||
for fpath in tarfiles:
|
if succeeded:
|
||||||
if os.path.exists(fpath):
|
for fpath in tarfiles:
|
||||||
os.remove(fpath)
|
if os.path.exists(fpath):
|
||||||
|
os.remove(fpath)
|
||||||
with disable_activity_stream():
|
with disable_activity_stream():
|
||||||
if not settings.AUTOMATION_ANALYTICS_LAST_GATHER or until > settings.AUTOMATION_ANALYTICS_LAST_GATHER:
|
if not settings.AUTOMATION_ANALYTICS_LAST_GATHER or until > settings.AUTOMATION_ANALYTICS_LAST_GATHER:
|
||||||
# `AUTOMATION_ANALYTICS_LAST_GATHER` is set whether collection succeeds or fails;
|
# `AUTOMATION_ANALYTICS_LAST_GATHER` is set whether collection succeeds or fails;
|
||||||
@@ -368,20 +368,8 @@ def ship(path):
|
|||||||
logger.error('AUTOMATION_ANALYTICS_URL is not set')
|
logger.error('AUTOMATION_ANALYTICS_URL is not set')
|
||||||
return False
|
return False
|
||||||
|
|
||||||
rh_id = getattr(settings, 'REDHAT_USERNAME', None)
|
rh_user = getattr(settings, 'REDHAT_USERNAME', None)
|
||||||
rh_secret = getattr(settings, 'REDHAT_PASSWORD', None)
|
rh_password = getattr(settings, 'REDHAT_PASSWORD', None)
|
||||||
|
|
||||||
if not (rh_id and rh_secret):
|
|
||||||
rh_id = getattr(settings, 'SUBSCRIPTIONS_CLIENT_ID', None)
|
|
||||||
rh_secret = getattr(settings, 'SUBSCRIPTIONS_CLIENT_SECRET', None)
|
|
||||||
|
|
||||||
if not rh_id:
|
|
||||||
logger.error('Neither REDHAT_USERNAME nor SUBSCRIPTIONS_CLIENT_ID are set')
|
|
||||||
return False
|
|
||||||
|
|
||||||
if not rh_secret:
|
|
||||||
logger.error('Neither REDHAT_PASSWORD nor SUBSCRIPTIONS_CLIENT_SECRET are set')
|
|
||||||
return False
|
|
||||||
|
|
||||||
with open(path, 'rb') as f:
|
with open(path, 'rb') as f:
|
||||||
files = {'file': (os.path.basename(path), f, settings.INSIGHTS_AGENT_MIME)}
|
files = {'file': (os.path.basename(path), f, settings.INSIGHTS_AGENT_MIME)}
|
||||||
@@ -389,13 +377,25 @@ def ship(path):
|
|||||||
s.headers = get_awx_http_client_headers()
|
s.headers = get_awx_http_client_headers()
|
||||||
s.headers.pop('Content-Type')
|
s.headers.pop('Content-Type')
|
||||||
with set_environ(**settings.AWX_TASK_ENV):
|
with set_environ(**settings.AWX_TASK_ENV):
|
||||||
try:
|
if rh_user and rh_password:
|
||||||
client = OIDCClient(rh_id, rh_secret)
|
try:
|
||||||
response = client.make_request("POST", url, headers=s.headers, files=files, verify=settings.INSIGHTS_CERT_PATH, timeout=(31, 31))
|
client = OIDCClient(rh_user, rh_password, DEFAULT_OIDC_TOKEN_ENDPOINT, ['api.console'])
|
||||||
except requests.RequestException:
|
response = client.make_request("POST", url, headers=s.headers, files=files, verify=settings.INSIGHTS_CERT_PATH, timeout=(31, 31))
|
||||||
logger.error("Automation Analytics API request failed, trying base auth method")
|
except requests.RequestException:
|
||||||
response = s.post(url, files=files, verify=settings.INSIGHTS_CERT_PATH, auth=(rh_id, rh_secret), headers=s.headers, timeout=(31, 31))
|
logger.error("Automation Analytics API request failed, trying base auth method")
|
||||||
|
response = s.post(url, files=files, verify=settings.INSIGHTS_CERT_PATH, auth=(rh_user, rh_password), headers=s.headers, timeout=(31, 31))
|
||||||
|
elif not rh_user or not rh_password:
|
||||||
|
logger.info('REDHAT_USERNAME and REDHAT_PASSWORD are not set, using SUBSCRIPTIONS_USERNAME and SUBSCRIPTIONS_PASSWORD')
|
||||||
|
rh_user = getattr(settings, 'SUBSCRIPTIONS_USERNAME', None)
|
||||||
|
rh_password = getattr(settings, 'SUBSCRIPTIONS_PASSWORD', None)
|
||||||
|
if rh_user and rh_password:
|
||||||
|
response = s.post(url, files=files, verify=settings.INSIGHTS_CERT_PATH, auth=(rh_user, rh_password), headers=s.headers, timeout=(31, 31))
|
||||||
|
elif not rh_user:
|
||||||
|
logger.error('REDHAT_USERNAME and SUBSCRIPTIONS_USERNAME are not set')
|
||||||
|
return False
|
||||||
|
elif not rh_password:
|
||||||
|
logger.error('REDHAT_PASSWORD and SUBSCRIPTIONS_USERNAME are not set')
|
||||||
|
return False
|
||||||
# Accept 2XX status_codes
|
# Accept 2XX status_codes
|
||||||
if response.status_code >= 300:
|
if response.status_code >= 300:
|
||||||
logger.error('Upload failed with status {}, {}'.format(response.status_code, response.text))
|
logger.error('Upload failed with status {}, {}'.format(response.status_code, response.text))
|
||||||
|
|||||||
@@ -128,7 +128,6 @@ def metrics():
|
|||||||
registry=REGISTRY,
|
registry=REGISTRY,
|
||||||
)
|
)
|
||||||
|
|
||||||
LICENSE_EXPIRY = Gauge('awx_license_expiry', 'Time before license expires', registry=REGISTRY)
|
|
||||||
LICENSE_INSTANCE_TOTAL = Gauge('awx_license_instance_total', 'Total number of managed hosts provided by your license', registry=REGISTRY)
|
LICENSE_INSTANCE_TOTAL = Gauge('awx_license_instance_total', 'Total number of managed hosts provided by your license', registry=REGISTRY)
|
||||||
LICENSE_INSTANCE_FREE = Gauge('awx_license_instance_free', 'Number of remaining managed hosts provided by your license', registry=REGISTRY)
|
LICENSE_INSTANCE_FREE = Gauge('awx_license_instance_free', 'Number of remaining managed hosts provided by your license', registry=REGISTRY)
|
||||||
|
|
||||||
@@ -149,7 +148,6 @@ def metrics():
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
LICENSE_EXPIRY.set(str(license_info.get('time_remaining', 0)))
|
|
||||||
LICENSE_INSTANCE_TOTAL.set(str(license_info.get('instance_count', 0)))
|
LICENSE_INSTANCE_TOTAL.set(str(license_info.get('instance_count', 0)))
|
||||||
LICENSE_INSTANCE_FREE.set(str(license_info.get('free_instances', 0)))
|
LICENSE_INSTANCE_FREE.set(str(license_info.get('free_instances', 0)))
|
||||||
|
|
||||||
|
|||||||
@@ -44,12 +44,11 @@ class MetricsServer(MetricsServerSettings):
|
|||||||
|
|
||||||
|
|
||||||
class BaseM:
|
class BaseM:
|
||||||
def __init__(self, field, help_text, labels=None):
|
def __init__(self, field, help_text):
|
||||||
self.field = field
|
self.field = field
|
||||||
self.help_text = help_text
|
self.help_text = help_text
|
||||||
self.current_value = 0
|
self.current_value = 0
|
||||||
self.metric_has_changed = False
|
self.metric_has_changed = False
|
||||||
self.labels = labels or {}
|
|
||||||
|
|
||||||
def reset_value(self, conn):
|
def reset_value(self, conn):
|
||||||
conn.hset(root_key, self.field, 0)
|
conn.hset(root_key, self.field, 0)
|
||||||
@@ -70,16 +69,12 @@ class BaseM:
|
|||||||
value = conn.hget(root_key, self.field)
|
value = conn.hget(root_key, self.field)
|
||||||
return self.decode_value(value)
|
return self.decode_value(value)
|
||||||
|
|
||||||
def to_prometheus(self, instance_data, namespace=None):
|
def to_prometheus(self, instance_data):
|
||||||
output_text = f"# HELP {self.field} {self.help_text}\n# TYPE {self.field} gauge\n"
|
output_text = f"# HELP {self.field} {self.help_text}\n# TYPE {self.field} gauge\n"
|
||||||
for instance in instance_data:
|
for instance in instance_data:
|
||||||
if self.field in instance_data[instance]:
|
if self.field in instance_data[instance]:
|
||||||
# Build label string
|
|
||||||
labels = f'node="{instance}"'
|
|
||||||
if namespace:
|
|
||||||
labels += f',subsystem="{namespace}"'
|
|
||||||
# on upgrade, if there are stale instances, we can end up with issues where new metrics are not present
|
# on upgrade, if there are stale instances, we can end up with issues where new metrics are not present
|
||||||
output_text += f'{self.field}{{{labels}}} {instance_data[instance][self.field]}\n'
|
output_text += f'{self.field}{{node="{instance}"}} {instance_data[instance][self.field]}\n'
|
||||||
return output_text
|
return output_text
|
||||||
|
|
||||||
|
|
||||||
@@ -172,17 +167,14 @@ class HistogramM(BaseM):
|
|||||||
self.sum.store_value(conn)
|
self.sum.store_value(conn)
|
||||||
self.inf.store_value(conn)
|
self.inf.store_value(conn)
|
||||||
|
|
||||||
def to_prometheus(self, instance_data, namespace=None):
|
def to_prometheus(self, instance_data):
|
||||||
output_text = f"# HELP {self.field} {self.help_text}\n# TYPE {self.field} histogram\n"
|
output_text = f"# HELP {self.field} {self.help_text}\n# TYPE {self.field} histogram\n"
|
||||||
for instance in instance_data:
|
for instance in instance_data:
|
||||||
# Build label string
|
|
||||||
node_label = f'node="{instance}"'
|
|
||||||
subsystem_label = f',subsystem="{namespace}"' if namespace else ''
|
|
||||||
for i, b in enumerate(self.buckets):
|
for i, b in enumerate(self.buckets):
|
||||||
output_text += f'{self.field}_bucket{{le="{b}",{node_label}{subsystem_label}}} {sum(instance_data[instance][self.field]["counts"][0:i+1])}\n'
|
output_text += f'{self.field}_bucket{{le="{b}",node="{instance}"}} {sum(instance_data[instance][self.field]["counts"][0:i+1])}\n'
|
||||||
output_text += f'{self.field}_bucket{{le="+Inf",{node_label}{subsystem_label}}} {instance_data[instance][self.field]["inf"]}\n'
|
output_text += f'{self.field}_bucket{{le="+Inf",node="{instance}"}} {instance_data[instance][self.field]["inf"]}\n'
|
||||||
output_text += f'{self.field}_count{{{node_label}{subsystem_label}}} {instance_data[instance][self.field]["inf"]}\n'
|
output_text += f'{self.field}_count{{node="{instance}"}} {instance_data[instance][self.field]["inf"]}\n'
|
||||||
output_text += f'{self.field}_sum{{{node_label}{subsystem_label}}} {instance_data[instance][self.field]["sum"]}\n'
|
output_text += f'{self.field}_sum{{node="{instance}"}} {instance_data[instance][self.field]["sum"]}\n'
|
||||||
return output_text
|
return output_text
|
||||||
|
|
||||||
|
|
||||||
@@ -281,22 +273,20 @@ class Metrics(MetricsNamespace):
|
|||||||
|
|
||||||
def pipe_execute(self):
|
def pipe_execute(self):
|
||||||
if self.metrics_have_changed is True:
|
if self.metrics_have_changed is True:
|
||||||
duration_pipe_exec = time.perf_counter()
|
duration_to_save = time.perf_counter()
|
||||||
for m in self.METRICS:
|
for m in self.METRICS:
|
||||||
self.METRICS[m].store_value(self.pipe)
|
self.METRICS[m].store_value(self.pipe)
|
||||||
self.pipe.execute()
|
self.pipe.execute()
|
||||||
self.last_pipe_execute = time.time()
|
self.last_pipe_execute = time.time()
|
||||||
self.metrics_have_changed = False
|
self.metrics_have_changed = False
|
||||||
duration_pipe_exec = time.perf_counter() - duration_pipe_exec
|
duration_to_save = time.perf_counter() - duration_to_save
|
||||||
|
self.METRICS['subsystem_metrics_pipe_execute_seconds'].inc(duration_to_save)
|
||||||
duration_send_metrics = time.perf_counter()
|
|
||||||
self.send_metrics()
|
|
||||||
duration_send_metrics = time.perf_counter() - duration_send_metrics
|
|
||||||
|
|
||||||
# Increment operational metrics
|
|
||||||
self.METRICS['subsystem_metrics_pipe_execute_seconds'].inc(duration_pipe_exec)
|
|
||||||
self.METRICS['subsystem_metrics_pipe_execute_calls'].inc(1)
|
self.METRICS['subsystem_metrics_pipe_execute_calls'].inc(1)
|
||||||
self.METRICS['subsystem_metrics_send_metrics_seconds'].inc(duration_send_metrics)
|
|
||||||
|
duration_to_save = time.perf_counter()
|
||||||
|
self.send_metrics()
|
||||||
|
duration_to_save = time.perf_counter() - duration_to_save
|
||||||
|
self.METRICS['subsystem_metrics_send_metrics_seconds'].inc(duration_to_save)
|
||||||
|
|
||||||
def send_metrics(self):
|
def send_metrics(self):
|
||||||
# more than one thread could be calling this at the same time, so should
|
# more than one thread could be calling this at the same time, so should
|
||||||
@@ -362,13 +352,7 @@ class Metrics(MetricsNamespace):
|
|||||||
if instance_data:
|
if instance_data:
|
||||||
for field in self.METRICS:
|
for field in self.METRICS:
|
||||||
if len(metrics_filter) == 0 or field in metrics_filter:
|
if len(metrics_filter) == 0 or field in metrics_filter:
|
||||||
# Add subsystem label only for operational metrics
|
output_text += self.METRICS[field].to_prometheus(instance_data)
|
||||||
namespace = (
|
|
||||||
self._namespace
|
|
||||||
if field in ['subsystem_metrics_pipe_execute_seconds', 'subsystem_metrics_pipe_execute_calls', 'subsystem_metrics_send_metrics_seconds']
|
|
||||||
else None
|
|
||||||
)
|
|
||||||
output_text += self.METRICS[field].to_prometheus(instance_data, namespace)
|
|
||||||
return output_text
|
return output_text
|
||||||
|
|
||||||
|
|
||||||
@@ -456,10 +440,7 @@ class CustomToPrometheusMetricsCollector(prometheus_client.registry.Collector):
|
|||||||
logger.debug(f"No metric data not found in redis for metric namespace '{self._metrics._namespace}'")
|
logger.debug(f"No metric data not found in redis for metric namespace '{self._metrics._namespace}'")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
if not (host_metrics := instance_data.get(my_hostname)):
|
host_metrics = instance_data.get(my_hostname)
|
||||||
logger.debug(f"Metric data for this node '{my_hostname}' not found in redis for metric namespace '{self._metrics._namespace}'")
|
|
||||||
return None
|
|
||||||
|
|
||||||
for _, metric in self._metrics.METRICS.items():
|
for _, metric in self._metrics.METRICS.items():
|
||||||
entry = host_metrics.get(metric.field)
|
entry = host_metrics.get(metric.field)
|
||||||
if not entry:
|
if not entry:
|
||||||
|
|||||||
@@ -1,9 +1,6 @@
|
|||||||
import os
|
import os
|
||||||
|
|
||||||
from dispatcherd.config import setup as dispatcher_setup
|
|
||||||
|
|
||||||
from django.apps import AppConfig
|
from django.apps import AppConfig
|
||||||
from django.db import connection
|
|
||||||
from django.utils.translation import gettext_lazy as _
|
from django.utils.translation import gettext_lazy as _
|
||||||
from awx.main.utils.common import bypass_in_test, load_all_entry_points_for
|
from awx.main.utils.common import bypass_in_test, load_all_entry_points_for
|
||||||
from awx.main.utils.migration import is_database_synchronized
|
from awx.main.utils.migration import is_database_synchronized
|
||||||
@@ -79,28 +76,9 @@ class MainConfig(AppConfig):
|
|||||||
cls = entry_point.load()
|
cls = entry_point.load()
|
||||||
InventorySourceOptions.injectors[entry_point_name] = cls
|
InventorySourceOptions.injectors[entry_point_name] = cls
|
||||||
|
|
||||||
def configure_dispatcherd(self):
|
|
||||||
"""This implements the default configuration for dispatcherd
|
|
||||||
|
|
||||||
If running the tasking service like awx-manage run_dispatcher,
|
|
||||||
some additional config will be applied on top of this.
|
|
||||||
This configuration provides the minimum such that code can submit
|
|
||||||
tasks to pg_notify to run those tasks.
|
|
||||||
"""
|
|
||||||
from awx.main.dispatch.config import get_dispatcherd_config
|
|
||||||
|
|
||||||
if connection.vendor != 'postgresql':
|
|
||||||
config_dict = get_dispatcherd_config(mock_publish=True)
|
|
||||||
else:
|
|
||||||
config_dict = get_dispatcherd_config()
|
|
||||||
|
|
||||||
dispatcher_setup(config_dict)
|
|
||||||
|
|
||||||
def ready(self):
|
def ready(self):
|
||||||
super().ready()
|
super().ready()
|
||||||
|
|
||||||
self.configure_dispatcherd()
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Credential loading triggers database operations. There are cases we want to call
|
Credential loading triggers database operations. There are cases we want to call
|
||||||
awx-manage collectstatic without a database. All management commands invoke the ready() code
|
awx-manage collectstatic without a database. All management commands invoke the ready() code
|
||||||
|
|||||||
189
awx/main/conf.py
189
awx/main/conf.py
@@ -12,7 +12,6 @@ from rest_framework import serializers
|
|||||||
from awx.conf import fields, register, register_validate
|
from awx.conf import fields, register, register_validate
|
||||||
from awx.main.models import ExecutionEnvironment
|
from awx.main.models import ExecutionEnvironment
|
||||||
from awx.main.constants import SUBSCRIPTION_USAGE_MODEL_UNIQUE_HOSTS
|
from awx.main.constants import SUBSCRIPTION_USAGE_MODEL_UNIQUE_HOSTS
|
||||||
from awx.main.tasks.policy import OPA_AUTH_TYPES
|
|
||||||
|
|
||||||
logger = logging.getLogger('awx.main.conf')
|
logger = logging.getLogger('awx.main.conf')
|
||||||
|
|
||||||
@@ -91,6 +90,7 @@ register(
|
|||||||
),
|
),
|
||||||
category=_('System'),
|
category=_('System'),
|
||||||
category_slug='system',
|
category_slug='system',
|
||||||
|
required=False,
|
||||||
)
|
)
|
||||||
|
|
||||||
register(
|
register(
|
||||||
@@ -105,7 +105,6 @@ register(
|
|||||||
),
|
),
|
||||||
category=_('System'),
|
category=_('System'),
|
||||||
category_slug='system',
|
category_slug='system',
|
||||||
hidden=True,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
register(
|
register(
|
||||||
@@ -125,8 +124,8 @@ register(
|
|||||||
allow_blank=True,
|
allow_blank=True,
|
||||||
encrypted=False,
|
encrypted=False,
|
||||||
read_only=False,
|
read_only=False,
|
||||||
label=_('Red Hat Client ID for Analytics'),
|
label=_('Red Hat customer username'),
|
||||||
help_text=_('Client ID used to send data to Automation Analytics'),
|
help_text=_('This username is used to send data to Automation Analytics'),
|
||||||
category=_('System'),
|
category=_('System'),
|
||||||
category_slug='system',
|
category_slug='system',
|
||||||
)
|
)
|
||||||
@@ -138,8 +137,8 @@ register(
|
|||||||
allow_blank=True,
|
allow_blank=True,
|
||||||
encrypted=True,
|
encrypted=True,
|
||||||
read_only=False,
|
read_only=False,
|
||||||
label=_('Red Hat Client Secret for Analytics'),
|
label=_('Red Hat customer password'),
|
||||||
help_text=_('Client secret used to send data to Automation Analytics'),
|
help_text=_('This password is used to send data to Automation Analytics'),
|
||||||
category=_('System'),
|
category=_('System'),
|
||||||
category_slug='system',
|
category_slug='system',
|
||||||
)
|
)
|
||||||
@@ -151,11 +150,10 @@ register(
|
|||||||
allow_blank=True,
|
allow_blank=True,
|
||||||
encrypted=False,
|
encrypted=False,
|
||||||
read_only=False,
|
read_only=False,
|
||||||
label=_('Red Hat Username for Subscriptions'),
|
label=_('Red Hat or Satellite username'),
|
||||||
help_text=_('Username used to retrieve subscription and content information'), # noqa
|
help_text=_('This username is used to retrieve subscription and content information'), # noqa
|
||||||
category=_('System'),
|
category=_('System'),
|
||||||
category_slug='system',
|
category_slug='system',
|
||||||
hidden=True,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
register(
|
register(
|
||||||
@@ -165,40 +163,10 @@ register(
|
|||||||
allow_blank=True,
|
allow_blank=True,
|
||||||
encrypted=True,
|
encrypted=True,
|
||||||
read_only=False,
|
read_only=False,
|
||||||
label=_('Red Hat Password for Subscriptions'),
|
label=_('Red Hat or Satellite password'),
|
||||||
help_text=_('Password used to retrieve subscription and content information'), # noqa
|
help_text=_('This password is used to retrieve subscription and content information'), # noqa
|
||||||
category=_('System'),
|
category=_('System'),
|
||||||
category_slug='system',
|
category_slug='system',
|
||||||
hidden=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
register(
|
|
||||||
'SUBSCRIPTIONS_CLIENT_ID',
|
|
||||||
field_class=fields.CharField,
|
|
||||||
default='',
|
|
||||||
allow_blank=True,
|
|
||||||
encrypted=False,
|
|
||||||
read_only=False,
|
|
||||||
label=_('Red Hat Client ID for Subscriptions'),
|
|
||||||
help_text=_('Client ID used to retrieve subscription and content information'), # noqa
|
|
||||||
category=_('System'),
|
|
||||||
category_slug='system',
|
|
||||||
hidden=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
register(
|
|
||||||
'SUBSCRIPTIONS_CLIENT_SECRET',
|
|
||||||
field_class=fields.CharField,
|
|
||||||
default='',
|
|
||||||
allow_blank=True,
|
|
||||||
encrypted=True,
|
|
||||||
read_only=False,
|
|
||||||
label=_('Red Hat Client Secret for Subscriptions'),
|
|
||||||
help_text=_('Client secret used to retrieve subscription and content information'), # noqa
|
|
||||||
category=_('System'),
|
|
||||||
category_slug='system',
|
|
||||||
hidden=True,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
register(
|
register(
|
||||||
@@ -269,6 +237,7 @@ register(
|
|||||||
help_text=_('List of modules allowed to be used by ad-hoc jobs.'),
|
help_text=_('List of modules allowed to be used by ad-hoc jobs.'),
|
||||||
category=_('Jobs'),
|
category=_('Jobs'),
|
||||||
category_slug='jobs',
|
category_slug='jobs',
|
||||||
|
required=False,
|
||||||
)
|
)
|
||||||
|
|
||||||
register(
|
register(
|
||||||
@@ -279,6 +248,7 @@ register(
|
|||||||
('never', _('Never')),
|
('never', _('Never')),
|
||||||
('template', _('Only On Job Template Definitions')),
|
('template', _('Only On Job Template Definitions')),
|
||||||
],
|
],
|
||||||
|
required=True,
|
||||||
label=_('When can extra variables contain Jinja templates?'),
|
label=_('When can extra variables contain Jinja templates?'),
|
||||||
help_text=_(
|
help_text=_(
|
||||||
'Ansible allows variable substitution via the Jinja2 templating '
|
'Ansible allows variable substitution via the Jinja2 templating '
|
||||||
@@ -303,6 +273,7 @@ register(
|
|||||||
register(
|
register(
|
||||||
'AWX_ISOLATION_SHOW_PATHS',
|
'AWX_ISOLATION_SHOW_PATHS',
|
||||||
field_class=fields.StringListIsolatedPathField,
|
field_class=fields.StringListIsolatedPathField,
|
||||||
|
required=False,
|
||||||
label=_('Paths to expose to isolated jobs'),
|
label=_('Paths to expose to isolated jobs'),
|
||||||
help_text=_(
|
help_text=_(
|
||||||
'List of paths that would otherwise be hidden to expose to isolated jobs. Enter one path per line. '
|
'List of paths that would otherwise be hidden to expose to isolated jobs. Enter one path per line. '
|
||||||
@@ -468,6 +439,7 @@ register(
|
|||||||
register(
|
register(
|
||||||
'AWX_ANSIBLE_CALLBACK_PLUGINS',
|
'AWX_ANSIBLE_CALLBACK_PLUGINS',
|
||||||
field_class=fields.StringListField,
|
field_class=fields.StringListField,
|
||||||
|
required=False,
|
||||||
label=_('Ansible Callback Plugins'),
|
label=_('Ansible Callback Plugins'),
|
||||||
help_text=_('List of paths to search for extra callback plugins to be used when running jobs. Enter one path per line.'),
|
help_text=_('List of paths to search for extra callback plugins to be used when running jobs. Enter one path per line.'),
|
||||||
category=_('Jobs'),
|
category=_('Jobs'),
|
||||||
@@ -581,6 +553,7 @@ register(
|
|||||||
help_text=_('Port on Logging Aggregator to send logs to (if required and not provided in Logging Aggregator).'),
|
help_text=_('Port on Logging Aggregator to send logs to (if required and not provided in Logging Aggregator).'),
|
||||||
category=_('Logging'),
|
category=_('Logging'),
|
||||||
category_slug='logging',
|
category_slug='logging',
|
||||||
|
required=False,
|
||||||
)
|
)
|
||||||
register(
|
register(
|
||||||
'LOG_AGGREGATOR_TYPE',
|
'LOG_AGGREGATOR_TYPE',
|
||||||
@@ -602,6 +575,7 @@ register(
|
|||||||
help_text=_('Username for external log aggregator (if required; HTTP/s only).'),
|
help_text=_('Username for external log aggregator (if required; HTTP/s only).'),
|
||||||
category=_('Logging'),
|
category=_('Logging'),
|
||||||
category_slug='logging',
|
category_slug='logging',
|
||||||
|
required=False,
|
||||||
)
|
)
|
||||||
register(
|
register(
|
||||||
'LOG_AGGREGATOR_PASSWORD',
|
'LOG_AGGREGATOR_PASSWORD',
|
||||||
@@ -613,6 +587,7 @@ register(
|
|||||||
help_text=_('Password or authentication token for external log aggregator (if required; HTTP/s only).'),
|
help_text=_('Password or authentication token for external log aggregator (if required; HTTP/s only).'),
|
||||||
category=_('Logging'),
|
category=_('Logging'),
|
||||||
category_slug='logging',
|
category_slug='logging',
|
||||||
|
required=False,
|
||||||
)
|
)
|
||||||
register(
|
register(
|
||||||
'LOG_AGGREGATOR_LOGGERS',
|
'LOG_AGGREGATOR_LOGGERS',
|
||||||
@@ -799,6 +774,7 @@ register(
|
|||||||
allow_null=True,
|
allow_null=True,
|
||||||
category=_('System'),
|
category=_('System'),
|
||||||
category_slug='system',
|
category_slug='system',
|
||||||
|
required=False,
|
||||||
hidden=True,
|
hidden=True,
|
||||||
)
|
)
|
||||||
register(
|
register(
|
||||||
@@ -1004,134 +980,3 @@ def csrf_trusted_origins_validate(serializer, attrs):
|
|||||||
|
|
||||||
|
|
||||||
register_validate('system', csrf_trusted_origins_validate)
|
register_validate('system', csrf_trusted_origins_validate)
|
||||||
|
|
||||||
|
|
||||||
register(
|
|
||||||
'OPA_HOST',
|
|
||||||
field_class=fields.CharField,
|
|
||||||
label=_('OPA server hostname'),
|
|
||||||
default='',
|
|
||||||
help_text=_('The hostname used to connect to the OPA server. If empty, policy enforcement will be disabled.'),
|
|
||||||
category=('PolicyAsCode'),
|
|
||||||
category_slug='policyascode',
|
|
||||||
allow_blank=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
register(
|
|
||||||
'OPA_PORT',
|
|
||||||
field_class=fields.IntegerField,
|
|
||||||
label=_('OPA server port'),
|
|
||||||
default=8181,
|
|
||||||
help_text=_('The port used to connect to the OPA server. Defaults to 8181.'),
|
|
||||||
category=('PolicyAsCode'),
|
|
||||||
category_slug='policyascode',
|
|
||||||
)
|
|
||||||
|
|
||||||
register(
|
|
||||||
'OPA_SSL',
|
|
||||||
field_class=fields.BooleanField,
|
|
||||||
label=_('Use SSL for OPA connection'),
|
|
||||||
default=False,
|
|
||||||
help_text=_('Enable or disable the use of SSL to connect to the OPA server. Defaults to false.'),
|
|
||||||
category=('PolicyAsCode'),
|
|
||||||
category_slug='policyascode',
|
|
||||||
)
|
|
||||||
|
|
||||||
register(
|
|
||||||
'OPA_AUTH_TYPE',
|
|
||||||
field_class=fields.ChoiceField,
|
|
||||||
label=_('OPA authentication type'),
|
|
||||||
choices=[OPA_AUTH_TYPES.NONE, OPA_AUTH_TYPES.TOKEN, OPA_AUTH_TYPES.CERTIFICATE],
|
|
||||||
default=OPA_AUTH_TYPES.NONE,
|
|
||||||
help_text=_('The authentication type that will be used to connect to the OPA server: "None", "Token", or "Certificate".'),
|
|
||||||
category=('PolicyAsCode'),
|
|
||||||
category_slug='policyascode',
|
|
||||||
)
|
|
||||||
|
|
||||||
register(
|
|
||||||
'OPA_AUTH_TOKEN',
|
|
||||||
field_class=fields.CharField,
|
|
||||||
label=_('OPA authentication token'),
|
|
||||||
default='',
|
|
||||||
help_text=_(
|
|
||||||
'The token for authentication to the OPA server. Required when OPA_AUTH_TYPE is "Token". If an authorization header is defined in OPA_AUTH_CUSTOM_HEADERS, it will be overridden by OPA_AUTH_TOKEN.'
|
|
||||||
),
|
|
||||||
category=('PolicyAsCode'),
|
|
||||||
category_slug='policyascode',
|
|
||||||
allow_blank=True,
|
|
||||||
encrypted=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
register(
|
|
||||||
'OPA_AUTH_CLIENT_CERT',
|
|
||||||
field_class=fields.CharField,
|
|
||||||
label=_('OPA client certificate content'),
|
|
||||||
default='',
|
|
||||||
help_text=_('The content of the client certificate file for mTLS authentication to the OPA server. Required when OPA_AUTH_TYPE is "Certificate".'),
|
|
||||||
category=('PolicyAsCode'),
|
|
||||||
category_slug='policyascode',
|
|
||||||
allow_blank=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
register(
|
|
||||||
'OPA_AUTH_CLIENT_KEY',
|
|
||||||
field_class=fields.CharField,
|
|
||||||
label=_('OPA client key content'),
|
|
||||||
default='',
|
|
||||||
help_text=_('The content of the client key for mTLS authentication to the OPA server. Required when OPA_AUTH_TYPE is "Certificate".'),
|
|
||||||
category=('PolicyAsCode'),
|
|
||||||
category_slug='policyascode',
|
|
||||||
allow_blank=True,
|
|
||||||
encrypted=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
register(
|
|
||||||
'OPA_AUTH_CA_CERT',
|
|
||||||
field_class=fields.CharField,
|
|
||||||
label=_('OPA CA certificate content'),
|
|
||||||
default='',
|
|
||||||
help_text=_('The content of the CA certificate for mTLS authentication to the OPA server. Required when OPA_AUTH_TYPE is "Certificate".'),
|
|
||||||
category=('PolicyAsCode'),
|
|
||||||
category_slug='policyascode',
|
|
||||||
allow_blank=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
register(
|
|
||||||
'OPA_AUTH_CUSTOM_HEADERS',
|
|
||||||
field_class=fields.DictField,
|
|
||||||
label=_('OPA custom authentication headers'),
|
|
||||||
default={},
|
|
||||||
help_text=_('Optional custom headers included in requests to the OPA server. Defaults to empty dictionary ({}).'),
|
|
||||||
category=('PolicyAsCode'),
|
|
||||||
category_slug='policyascode',
|
|
||||||
)
|
|
||||||
|
|
||||||
register(
|
|
||||||
'OPA_REQUEST_TIMEOUT',
|
|
||||||
field_class=fields.FloatField,
|
|
||||||
label=_('OPA request timeout'),
|
|
||||||
default=1.5,
|
|
||||||
help_text=_('The number of seconds after which the connection to the OPA server will time out. Defaults to 1.5 seconds.'),
|
|
||||||
category=('PolicyAsCode'),
|
|
||||||
category_slug='policyascode',
|
|
||||||
)
|
|
||||||
|
|
||||||
register(
|
|
||||||
'OPA_REQUEST_RETRIES',
|
|
||||||
field_class=fields.IntegerField,
|
|
||||||
label=_('OPA request retry count'),
|
|
||||||
default=2,
|
|
||||||
help_text=_('The number of retry attempts for connecting to the OPA server. Default is 2.'),
|
|
||||||
category=('PolicyAsCode'),
|
|
||||||
category_slug='policyascode',
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def policy_as_code_validate(serializer, attrs):
|
|
||||||
opa_host = attrs.get('OPA_HOST', '')
|
|
||||||
if opa_host and (opa_host.startswith('http://') or opa_host.startswith('https://')):
|
|
||||||
raise serializers.ValidationError({'OPA_HOST': _("OPA_HOST should not include 'http://' or 'https://' prefixes. Please enter only the hostname.")})
|
|
||||||
return attrs
|
|
||||||
|
|
||||||
|
|
||||||
register_validate('policyascode', policy_as_code_validate)
|
|
||||||
|
|||||||
@@ -77,8 +77,6 @@ LOGGER_BLOCKLIST = (
|
|||||||
'awx.main.utils.log',
|
'awx.main.utils.log',
|
||||||
# loggers that may be called getting logging settings
|
# loggers that may be called getting logging settings
|
||||||
'awx.conf',
|
'awx.conf',
|
||||||
# dispatcherd should only use 1 database connection
|
|
||||||
'dispatcherd',
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# Reported version for node seen in receptor mesh but for which capacity check
|
# Reported version for node seen in receptor mesh but for which capacity check
|
||||||
|
|||||||
@@ -1,53 +0,0 @@
|
|||||||
from django.conf import settings
|
|
||||||
|
|
||||||
from ansible_base.lib.utils.db import get_pg_notify_params
|
|
||||||
from awx.main.dispatch import get_task_queuename
|
|
||||||
from awx.main.dispatch.pool import get_auto_max_workers
|
|
||||||
|
|
||||||
|
|
||||||
def get_dispatcherd_config(for_service: bool = False, mock_publish: bool = False) -> dict:
|
|
||||||
"""Return a dictionary config for dispatcherd
|
|
||||||
|
|
||||||
Parameters:
|
|
||||||
for_service: if True, include dynamic options needed for running the dispatcher service
|
|
||||||
this will require database access, you should delay evaluation until after app setup
|
|
||||||
"""
|
|
||||||
config = {
|
|
||||||
"version": 2,
|
|
||||||
"service": {
|
|
||||||
"pool_kwargs": {
|
|
||||||
"min_workers": settings.JOB_EVENT_WORKERS,
|
|
||||||
"max_workers": get_auto_max_workers(),
|
|
||||||
},
|
|
||||||
"main_kwargs": {"node_id": settings.CLUSTER_HOST_ID},
|
|
||||||
"process_manager_cls": "ForkServerManager",
|
|
||||||
"process_manager_kwargs": {"preload_modules": ['awx.main.dispatch.hazmat']},
|
|
||||||
},
|
|
||||||
"brokers": {
|
|
||||||
"socket": {"socket_path": settings.DISPATCHERD_DEBUGGING_SOCKFILE},
|
|
||||||
},
|
|
||||||
"publish": {"default_control_broker": "socket"},
|
|
||||||
"worker": {"worker_cls": "awx.main.dispatch.worker.dispatcherd.AWXTaskWorker"},
|
|
||||||
}
|
|
||||||
|
|
||||||
if mock_publish:
|
|
||||||
config["brokers"]["noop"] = {}
|
|
||||||
config["publish"]["default_broker"] = "noop"
|
|
||||||
else:
|
|
||||||
config["brokers"]["pg_notify"] = {
|
|
||||||
"config": get_pg_notify_params(),
|
|
||||||
"sync_connection_factory": "ansible_base.lib.utils.db.psycopg_connection_from_django",
|
|
||||||
"default_publish_channel": settings.CLUSTER_HOST_ID, # used for debugging commands
|
|
||||||
}
|
|
||||||
config["publish"]["default_broker"] = "pg_notify"
|
|
||||||
|
|
||||||
if for_service:
|
|
||||||
config["producers"] = {
|
|
||||||
"ScheduledProducer": {"task_schedule": settings.DISPATCHER_SCHEDULE},
|
|
||||||
"OnStartProducer": {"task_list": {"awx.main.tasks.system.dispatch_startup": {}}},
|
|
||||||
"ControlProducer": {},
|
|
||||||
}
|
|
||||||
|
|
||||||
config["brokers"]["pg_notify"]["channels"] = ['tower_broadcast_all', 'tower_settings_change', get_task_queuename()]
|
|
||||||
|
|
||||||
return config
|
|
||||||
@@ -1,36 +0,0 @@
|
|||||||
import django
|
|
||||||
|
|
||||||
# dispatcherd publisher logic is likely to be used, but needs manual preload
|
|
||||||
from dispatcherd.brokers import pg_notify # noqa
|
|
||||||
|
|
||||||
# Cache may not be initialized until we are in the worker, so preload here
|
|
||||||
from channels_redis import core # noqa
|
|
||||||
|
|
||||||
from awx import prepare_env
|
|
||||||
|
|
||||||
from dispatcherd.utils import resolve_callable
|
|
||||||
|
|
||||||
|
|
||||||
prepare_env()
|
|
||||||
|
|
||||||
django.setup() # noqa
|
|
||||||
|
|
||||||
|
|
||||||
from django.conf import settings
|
|
||||||
|
|
||||||
|
|
||||||
# Preload all periodic tasks so their imports will be in shared memory
|
|
||||||
for name, options in settings.CELERYBEAT_SCHEDULE.items():
|
|
||||||
resolve_callable(options['task'])
|
|
||||||
|
|
||||||
|
|
||||||
# Preload in-line import from tasks
|
|
||||||
from awx.main.scheduler.kubernetes import PodManager # noqa
|
|
||||||
|
|
||||||
|
|
||||||
from django.core.cache import cache as django_cache
|
|
||||||
from django.db import connection
|
|
||||||
|
|
||||||
|
|
||||||
connection.close()
|
|
||||||
django_cache.close()
|
|
||||||
@@ -7,7 +7,6 @@ import time
|
|||||||
import traceback
|
import traceback
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from uuid import uuid4
|
from uuid import uuid4
|
||||||
import json
|
|
||||||
|
|
||||||
import collections
|
import collections
|
||||||
from multiprocessing import Process
|
from multiprocessing import Process
|
||||||
@@ -26,10 +25,7 @@ from ansible_base.lib.logging.runtime import log_excess_runtime
|
|||||||
|
|
||||||
from awx.main.models import UnifiedJob
|
from awx.main.models import UnifiedJob
|
||||||
from awx.main.dispatch import reaper
|
from awx.main.dispatch import reaper
|
||||||
from awx.main.utils.common import get_mem_effective_capacity, get_corrected_memory, get_corrected_cpu, get_cpu_effective_capacity
|
from awx.main.utils.common import convert_mem_str_to_bytes, get_mem_effective_capacity
|
||||||
|
|
||||||
# ansible-runner
|
|
||||||
from ansible_runner.utils.capacity import get_mem_in_bytes, get_cpu_count
|
|
||||||
|
|
||||||
if 'run_callback_receiver' in sys.argv:
|
if 'run_callback_receiver' in sys.argv:
|
||||||
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
|
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
|
||||||
@@ -37,9 +33,6 @@ else:
|
|||||||
logger = logging.getLogger('awx.main.dispatch')
|
logger = logging.getLogger('awx.main.dispatch')
|
||||||
|
|
||||||
|
|
||||||
RETIRED_SENTINEL_TASK = "[retired]"
|
|
||||||
|
|
||||||
|
|
||||||
class NoOpResultQueue(object):
|
class NoOpResultQueue(object):
|
||||||
def put(self, item):
|
def put(self, item):
|
||||||
pass
|
pass
|
||||||
@@ -84,17 +77,11 @@ class PoolWorker(object):
|
|||||||
self.queue = MPQueue(queue_size)
|
self.queue = MPQueue(queue_size)
|
||||||
self.process = Process(target=target, args=(self.queue, self.finished) + args)
|
self.process = Process(target=target, args=(self.queue, self.finished) + args)
|
||||||
self.process.daemon = True
|
self.process.daemon = True
|
||||||
self.creation_time = time.monotonic()
|
|
||||||
self.retiring = False
|
|
||||||
|
|
||||||
def start(self):
|
def start(self):
|
||||||
self.process.start()
|
self.process.start()
|
||||||
|
|
||||||
def put(self, body):
|
def put(self, body):
|
||||||
if self.retiring:
|
|
||||||
uuid = body.get('uuid', 'N/A') if isinstance(body, dict) else 'N/A'
|
|
||||||
logger.info(f"Worker pid:{self.pid} is retiring. Refusing new task {uuid}.")
|
|
||||||
raise QueueFull("Worker is retiring and not accepting new tasks") # AutoscalePool.write handles QueueFull
|
|
||||||
uuid = '?'
|
uuid = '?'
|
||||||
if isinstance(body, dict):
|
if isinstance(body, dict):
|
||||||
if not body.get('uuid'):
|
if not body.get('uuid'):
|
||||||
@@ -113,11 +100,6 @@ class PoolWorker(object):
|
|||||||
"""
|
"""
|
||||||
self.queue.put('QUIT')
|
self.queue.put('QUIT')
|
||||||
|
|
||||||
@property
|
|
||||||
def age(self):
|
|
||||||
"""Returns the current age of the worker in seconds."""
|
|
||||||
return time.monotonic() - self.creation_time
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def pid(self):
|
def pid(self):
|
||||||
return self.process.pid
|
return self.process.pid
|
||||||
@@ -164,8 +146,6 @@ class PoolWorker(object):
|
|||||||
# the purpose of self.managed_tasks is to just track internal
|
# the purpose of self.managed_tasks is to just track internal
|
||||||
# state of which events are *currently* being processed.
|
# state of which events are *currently* being processed.
|
||||||
logger.warning('Event UUID {} appears to be have been duplicated.'.format(uuid))
|
logger.warning('Event UUID {} appears to be have been duplicated.'.format(uuid))
|
||||||
if self.retiring:
|
|
||||||
self.managed_tasks[RETIRED_SENTINEL_TASK] = {'task': RETIRED_SENTINEL_TASK}
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def current_task(self):
|
def current_task(self):
|
||||||
@@ -281,8 +261,6 @@ class WorkerPool(object):
|
|||||||
'{% for w in workers %}'
|
'{% for w in workers %}'
|
||||||
'. worker[pid:{{ w.pid }}]{% if not w.alive %} GONE exit={{ w.exitcode }}{% endif %}'
|
'. worker[pid:{{ w.pid }}]{% if not w.alive %} GONE exit={{ w.exitcode }}{% endif %}'
|
||||||
' sent={{ w.messages_sent }}'
|
' sent={{ w.messages_sent }}'
|
||||||
' age={{ "%.0f"|format(w.age) }}s'
|
|
||||||
' retiring={{ w.retiring }}'
|
|
||||||
'{% if w.messages_finished %} finished={{ w.messages_finished }}{% endif %}'
|
'{% if w.messages_finished %} finished={{ w.messages_finished }}{% endif %}'
|
||||||
' qsize={{ w.managed_tasks|length }}'
|
' qsize={{ w.managed_tasks|length }}'
|
||||||
' rss={{ w.mb }}MB'
|
' rss={{ w.mb }}MB'
|
||||||
@@ -329,41 +307,6 @@ class WorkerPool(object):
|
|||||||
logger.exception('could not kill {}'.format(worker.pid))
|
logger.exception('could not kill {}'.format(worker.pid))
|
||||||
|
|
||||||
|
|
||||||
def get_auto_max_workers():
|
|
||||||
"""Method we normally rely on to get max_workers
|
|
||||||
|
|
||||||
Uses almost same logic as Instance.local_health_check
|
|
||||||
The important thing is to be MORE than Instance.capacity
|
|
||||||
so that the task-manager does not over-schedule this node
|
|
||||||
|
|
||||||
Ideally we would just use the capacity from the database plus reserve workers,
|
|
||||||
but this poses some bootstrap problems where OCP task containers
|
|
||||||
register themselves after startup
|
|
||||||
"""
|
|
||||||
# Get memory from ansible-runner
|
|
||||||
total_memory_gb = get_mem_in_bytes()
|
|
||||||
|
|
||||||
# This may replace memory calculation with a user override
|
|
||||||
corrected_memory = get_corrected_memory(total_memory_gb)
|
|
||||||
|
|
||||||
# Get same number as max forks based on memory, this function takes memory as bytes
|
|
||||||
mem_capacity = get_mem_effective_capacity(corrected_memory, is_control_node=True)
|
|
||||||
|
|
||||||
# Follow same process for CPU capacity constraint
|
|
||||||
cpu_count = get_cpu_count()
|
|
||||||
corrected_cpu = get_corrected_cpu(cpu_count)
|
|
||||||
cpu_capacity = get_cpu_effective_capacity(corrected_cpu, is_control_node=True)
|
|
||||||
|
|
||||||
# Here is what is different from health checks,
|
|
||||||
auto_max = max(mem_capacity, cpu_capacity)
|
|
||||||
|
|
||||||
# add magic number of extra workers to ensure
|
|
||||||
# we have a few extra workers to run the heartbeat
|
|
||||||
auto_max += 7
|
|
||||||
|
|
||||||
return auto_max
|
|
||||||
|
|
||||||
|
|
||||||
class AutoscalePool(WorkerPool):
|
class AutoscalePool(WorkerPool):
|
||||||
"""
|
"""
|
||||||
An extended pool implementation that automatically scales workers up and
|
An extended pool implementation that automatically scales workers up and
|
||||||
@@ -374,13 +317,22 @@ class AutoscalePool(WorkerPool):
|
|||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
self.max_workers = kwargs.pop('max_workers', None)
|
self.max_workers = kwargs.pop('max_workers', None)
|
||||||
self.max_worker_lifetime_seconds = kwargs.pop(
|
|
||||||
'max_worker_lifetime_seconds', getattr(settings, 'WORKER_MAX_LIFETIME_SECONDS', 14400)
|
|
||||||
) # Default to 4 hours
|
|
||||||
super(AutoscalePool, self).__init__(*args, **kwargs)
|
super(AutoscalePool, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
if self.max_workers is None:
|
if self.max_workers is None:
|
||||||
self.max_workers = get_auto_max_workers()
|
settings_absmem = getattr(settings, 'SYSTEM_TASK_ABS_MEM', None)
|
||||||
|
if settings_absmem is not None:
|
||||||
|
# There are 1073741824 bytes in a gigabyte. Convert bytes to gigabytes by dividing by 2**30
|
||||||
|
total_memory_gb = convert_mem_str_to_bytes(settings_absmem) // 2**30
|
||||||
|
else:
|
||||||
|
total_memory_gb = (psutil.virtual_memory().total >> 30) + 1 # noqa: round up
|
||||||
|
|
||||||
|
# Get same number as max forks based on memory, this function takes memory as bytes
|
||||||
|
self.max_workers = get_mem_effective_capacity(total_memory_gb * 2**30)
|
||||||
|
|
||||||
|
# add magic prime number of extra workers to ensure
|
||||||
|
# we have a few extra workers to run the heartbeat
|
||||||
|
self.max_workers += 7
|
||||||
|
|
||||||
# max workers can't be less than min_workers
|
# max workers can't be less than min_workers
|
||||||
self.max_workers = max(self.min_workers, self.max_workers)
|
self.max_workers = max(self.min_workers, self.max_workers)
|
||||||
@@ -394,9 +346,6 @@ class AutoscalePool(WorkerPool):
|
|||||||
self.scale_up_ct = 0
|
self.scale_up_ct = 0
|
||||||
self.worker_count_max = 0
|
self.worker_count_max = 0
|
||||||
|
|
||||||
# last time we wrote current tasks, to avoid too much log spam
|
|
||||||
self.last_task_list_log = time.monotonic()
|
|
||||||
|
|
||||||
def produce_subsystem_metrics(self, metrics_object):
|
def produce_subsystem_metrics(self, metrics_object):
|
||||||
metrics_object.set('dispatcher_pool_scale_up_events', self.scale_up_ct)
|
metrics_object.set('dispatcher_pool_scale_up_events', self.scale_up_ct)
|
||||||
metrics_object.set('dispatcher_pool_active_task_count', sum(len(w.managed_tasks) for w in self.workers))
|
metrics_object.set('dispatcher_pool_active_task_count', sum(len(w.managed_tasks) for w in self.workers))
|
||||||
@@ -436,7 +385,6 @@ class AutoscalePool(WorkerPool):
|
|||||||
"""
|
"""
|
||||||
orphaned = []
|
orphaned = []
|
||||||
for w in self.workers[::]:
|
for w in self.workers[::]:
|
||||||
is_retirement_age = self.max_worker_lifetime_seconds is not None and w.age > self.max_worker_lifetime_seconds
|
|
||||||
if not w.alive:
|
if not w.alive:
|
||||||
# the worker process has exited
|
# the worker process has exited
|
||||||
# 1. take the task it was running and enqueue the error
|
# 1. take the task it was running and enqueue the error
|
||||||
@@ -445,10 +393,6 @@ class AutoscalePool(WorkerPool):
|
|||||||
# send them to another worker
|
# send them to another worker
|
||||||
logger.error('worker pid:{} is gone (exit={})'.format(w.pid, w.exitcode))
|
logger.error('worker pid:{} is gone (exit={})'.format(w.pid, w.exitcode))
|
||||||
if w.current_task:
|
if w.current_task:
|
||||||
if w.current_task == {'task': RETIRED_SENTINEL_TASK}:
|
|
||||||
logger.debug('scaling down worker pid:{} due to worker age: {}'.format(w.pid, w.age))
|
|
||||||
self.workers.remove(w)
|
|
||||||
continue
|
|
||||||
if w.current_task != 'QUIT':
|
if w.current_task != 'QUIT':
|
||||||
try:
|
try:
|
||||||
for j in UnifiedJob.objects.filter(celery_task_id=w.current_task['uuid']):
|
for j in UnifiedJob.objects.filter(celery_task_id=w.current_task['uuid']):
|
||||||
@@ -459,7 +403,6 @@ class AutoscalePool(WorkerPool):
|
|||||||
logger.warning(f'Worker was told to quit but has not, pid={w.pid}')
|
logger.warning(f'Worker was told to quit but has not, pid={w.pid}')
|
||||||
orphaned.extend(w.orphaned_tasks)
|
orphaned.extend(w.orphaned_tasks)
|
||||||
self.workers.remove(w)
|
self.workers.remove(w)
|
||||||
|
|
||||||
elif w.idle and len(self.workers) > self.min_workers:
|
elif w.idle and len(self.workers) > self.min_workers:
|
||||||
# the process has an empty queue (it's idle) and we have
|
# the process has an empty queue (it's idle) and we have
|
||||||
# more processes in the pool than we need (> min)
|
# more processes in the pool than we need (> min)
|
||||||
@@ -468,22 +411,6 @@ class AutoscalePool(WorkerPool):
|
|||||||
logger.debug('scaling down worker pid:{}'.format(w.pid))
|
logger.debug('scaling down worker pid:{}'.format(w.pid))
|
||||||
w.quit()
|
w.quit()
|
||||||
self.workers.remove(w)
|
self.workers.remove(w)
|
||||||
|
|
||||||
elif w.idle and is_retirement_age:
|
|
||||||
logger.debug('scaling down worker pid:{} due to worker age: {}'.format(w.pid, w.age))
|
|
||||||
w.quit()
|
|
||||||
self.workers.remove(w)
|
|
||||||
|
|
||||||
elif is_retirement_age and not w.retiring and not w.idle:
|
|
||||||
logger.info(
|
|
||||||
f"Worker pid:{w.pid} (age: {w.age:.0f}s) exceeded max lifetime ({self.max_worker_lifetime_seconds:.0f}s). "
|
|
||||||
"Signaling for graceful retirement."
|
|
||||||
)
|
|
||||||
# Send QUIT signal; worker will finish current task then exit.
|
|
||||||
w.quit()
|
|
||||||
# mark as retiring to reject any future tasks that might be assigned in meantime
|
|
||||||
w.retiring = True
|
|
||||||
|
|
||||||
if w.alive:
|
if w.alive:
|
||||||
# if we discover a task manager invocation that's been running
|
# if we discover a task manager invocation that's been running
|
||||||
# too long, reap it (because otherwise it'll just hold the postgres
|
# too long, reap it (because otherwise it'll just hold the postgres
|
||||||
@@ -536,14 +463,6 @@ class AutoscalePool(WorkerPool):
|
|||||||
self.worker_count_max = new_worker_ct
|
self.worker_count_max = new_worker_ct
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def fast_task_serialization(current_task):
|
|
||||||
try:
|
|
||||||
return str(current_task.get('task')) + ' - ' + str(sorted(current_task.get('args', []))) + ' - ' + str(sorted(current_task.get('kwargs', {})))
|
|
||||||
except Exception:
|
|
||||||
# just make sure this does not make things worse
|
|
||||||
return str(current_task)
|
|
||||||
|
|
||||||
def write(self, preferred_queue, body):
|
def write(self, preferred_queue, body):
|
||||||
if 'guid' in body:
|
if 'guid' in body:
|
||||||
set_guid(body['guid'])
|
set_guid(body['guid'])
|
||||||
@@ -565,15 +484,6 @@ class AutoscalePool(WorkerPool):
|
|||||||
if isinstance(body, dict):
|
if isinstance(body, dict):
|
||||||
task_name = body.get('task')
|
task_name = body.get('task')
|
||||||
logger.warning(f'Workers maxed, queuing {task_name}, load: {sum(len(w.managed_tasks) for w in self.workers)} / {len(self.workers)}')
|
logger.warning(f'Workers maxed, queuing {task_name}, load: {sum(len(w.managed_tasks) for w in self.workers)} / {len(self.workers)}')
|
||||||
# Once every 10 seconds write out task list for debugging
|
|
||||||
if time.monotonic() - self.last_task_list_log >= 10.0:
|
|
||||||
task_counts = {}
|
|
||||||
for worker in self.workers:
|
|
||||||
task_slug = self.fast_task_serialization(worker.current_task)
|
|
||||||
task_counts.setdefault(task_slug, 0)
|
|
||||||
task_counts[task_slug] += 1
|
|
||||||
logger.info(f'Running tasks by count:\n{json.dumps(task_counts, indent=2)}')
|
|
||||||
self.last_task_list_log = time.monotonic()
|
|
||||||
return super(AutoscalePool, self).write(preferred_queue, body)
|
return super(AutoscalePool, self).write(preferred_queue, body)
|
||||||
except Exception:
|
except Exception:
|
||||||
for conn in connections.all():
|
for conn in connections.all():
|
||||||
|
|||||||
@@ -4,9 +4,6 @@ import json
|
|||||||
import time
|
import time
|
||||||
from uuid import uuid4
|
from uuid import uuid4
|
||||||
|
|
||||||
from dispatcherd.publish import submit_task
|
|
||||||
from dispatcherd.utils import resolve_callable
|
|
||||||
|
|
||||||
from django_guid import get_guid
|
from django_guid import get_guid
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
|
|
||||||
@@ -96,19 +93,6 @@ class task:
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def apply_async(cls, args=None, kwargs=None, queue=None, uuid=None, **kw):
|
def apply_async(cls, args=None, kwargs=None, queue=None, uuid=None, **kw):
|
||||||
try:
|
|
||||||
from flags.state import flag_enabled
|
|
||||||
|
|
||||||
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'):
|
|
||||||
# At this point we have the import string, and submit_task wants the method, so back to that
|
|
||||||
actual_task = resolve_callable(cls.name)
|
|
||||||
return submit_task(actual_task, args=args, kwargs=kwargs, queue=queue, uuid=uuid, **kw)
|
|
||||||
except Exception:
|
|
||||||
logger.exception(f"[DISPATCHER] Failed to check for alternative dispatcherd implementation for {cls.name}")
|
|
||||||
# Continue with original implementation if anything fails
|
|
||||||
pass
|
|
||||||
|
|
||||||
# Original implementation follows
|
|
||||||
queue = queue or getattr(cls.queue, 'im_func', cls.queue)
|
queue = queue or getattr(cls.queue, 'im_func', cls.queue)
|
||||||
if not queue:
|
if not queue:
|
||||||
msg = f'{cls.name}: Queue value required and may not be None'
|
msg = f'{cls.name}: Queue value required and may not be None'
|
||||||
|
|||||||
@@ -238,7 +238,7 @@ class AWXConsumerPG(AWXConsumerBase):
|
|||||||
def run(self, *args, **kwargs):
|
def run(self, *args, **kwargs):
|
||||||
super(AWXConsumerPG, self).run(*args, **kwargs)
|
super(AWXConsumerPG, self).run(*args, **kwargs)
|
||||||
|
|
||||||
logger.info(f"Running {self.name}, workers min={self.pool.min_workers} max={self.pool.max_workers}, listening to queues {self.queues}")
|
logger.info(f"Running worker {self.name} listening to queues {self.queues}")
|
||||||
init = False
|
init = False
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
|
|||||||
@@ -1,14 +0,0 @@
|
|||||||
from dispatcherd.worker.task import TaskWorker
|
|
||||||
|
|
||||||
from django.db import connection
|
|
||||||
|
|
||||||
|
|
||||||
class AWXTaskWorker(TaskWorker):
|
|
||||||
|
|
||||||
def on_start(self) -> None:
|
|
||||||
"""Get worker connected so that first task it gets will be worked quickly"""
|
|
||||||
connection.ensure_connection()
|
|
||||||
|
|
||||||
def pre_task(self, message) -> None:
|
|
||||||
"""This should remedy bad connections that can not fix themselves"""
|
|
||||||
connection.close_if_unusable_or_obsolete()
|
|
||||||
@@ -38,12 +38,5 @@ class PostRunError(Exception):
|
|||||||
super(PostRunError, self).__init__(msg)
|
super(PostRunError, self).__init__(msg)
|
||||||
|
|
||||||
|
|
||||||
class PolicyEvaluationError(Exception):
|
|
||||||
def __init__(self, msg, status='failed', tb=''):
|
|
||||||
self.status = status
|
|
||||||
self.tb = tb
|
|
||||||
super(PolicyEvaluationError, self).__init__(msg)
|
|
||||||
|
|
||||||
|
|
||||||
class ReceptorNodeNotFound(RuntimeError):
|
class ReceptorNodeNotFound(RuntimeError):
|
||||||
pass
|
pass
|
||||||
|
|||||||
@@ -14,14 +14,21 @@ from jinja2.exceptions import UndefinedError, TemplateSyntaxError, SecurityError
|
|||||||
# Django
|
# Django
|
||||||
from django.core import exceptions as django_exceptions
|
from django.core import exceptions as django_exceptions
|
||||||
from django.core.serializers.json import DjangoJSONEncoder
|
from django.core.serializers.json import DjangoJSONEncoder
|
||||||
from django.db.models.signals import m2m_changed, post_save
|
from django.db.models.signals import (
|
||||||
|
post_save,
|
||||||
|
post_delete,
|
||||||
|
)
|
||||||
|
from django.db.models.signals import m2m_changed
|
||||||
from django.db import models
|
from django.db import models
|
||||||
|
from django.db.models.fields.related import lazy_related_operation
|
||||||
from django.db.models.fields.related_descriptors import (
|
from django.db.models.fields.related_descriptors import (
|
||||||
ReverseOneToOneDescriptor,
|
ReverseOneToOneDescriptor,
|
||||||
ForwardManyToOneDescriptor,
|
ForwardManyToOneDescriptor,
|
||||||
ManyToManyDescriptor,
|
ManyToManyDescriptor,
|
||||||
|
ReverseManyToOneDescriptor,
|
||||||
create_forward_many_to_many_manager,
|
create_forward_many_to_many_manager,
|
||||||
)
|
)
|
||||||
|
from django.utils.encoding import smart_str
|
||||||
from django.db.models import JSONField
|
from django.db.models import JSONField
|
||||||
from django.utils.functional import cached_property
|
from django.utils.functional import cached_property
|
||||||
from django.utils.translation import gettext_lazy as _
|
from django.utils.translation import gettext_lazy as _
|
||||||
@@ -47,6 +54,7 @@ __all__ = [
|
|||||||
'ImplicitRoleField',
|
'ImplicitRoleField',
|
||||||
'SmartFilterField',
|
'SmartFilterField',
|
||||||
'OrderedManyToManyField',
|
'OrderedManyToManyField',
|
||||||
|
'update_role_parentage_for_instance',
|
||||||
'is_implicit_parent',
|
'is_implicit_parent',
|
||||||
]
|
]
|
||||||
|
|
||||||
@@ -138,6 +146,34 @@ class AutoOneToOneField(models.OneToOneField):
|
|||||||
setattr(cls, related.get_accessor_name(), AutoSingleRelatedObjectDescriptor(related))
|
setattr(cls, related.get_accessor_name(), AutoSingleRelatedObjectDescriptor(related))
|
||||||
|
|
||||||
|
|
||||||
|
def resolve_role_field(obj, field):
|
||||||
|
ret = []
|
||||||
|
|
||||||
|
field_components = field.split('.', 1)
|
||||||
|
if hasattr(obj, field_components[0]):
|
||||||
|
obj = getattr(obj, field_components[0])
|
||||||
|
else:
|
||||||
|
return []
|
||||||
|
|
||||||
|
if obj is None:
|
||||||
|
return []
|
||||||
|
|
||||||
|
if len(field_components) == 1:
|
||||||
|
# use extremely generous duck typing to accomidate all possible forms
|
||||||
|
# of the model that may be used during various migrations
|
||||||
|
if obj._meta.model_name != 'role' or obj._meta.app_label != 'main':
|
||||||
|
raise Exception(smart_str('{} refers to a {}, not a Role'.format(field, type(obj))))
|
||||||
|
ret.append(obj.id)
|
||||||
|
else:
|
||||||
|
if type(obj) is ManyToManyDescriptor:
|
||||||
|
for o in obj.all():
|
||||||
|
ret += resolve_role_field(o, field_components[1])
|
||||||
|
else:
|
||||||
|
ret += resolve_role_field(obj, field_components[1])
|
||||||
|
|
||||||
|
return ret
|
||||||
|
|
||||||
|
|
||||||
def is_implicit_parent(parent_role, child_role):
|
def is_implicit_parent(parent_role, child_role):
|
||||||
"""
|
"""
|
||||||
Determine if the parent_role is an implicit parent as defined by
|
Determine if the parent_role is an implicit parent as defined by
|
||||||
@@ -174,6 +210,34 @@ def is_implicit_parent(parent_role, child_role):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def update_role_parentage_for_instance(instance):
|
||||||
|
"""update_role_parentage_for_instance
|
||||||
|
updates the parents listing for all the roles
|
||||||
|
of a given instance if they have changed
|
||||||
|
"""
|
||||||
|
parents_removed = set()
|
||||||
|
parents_added = set()
|
||||||
|
for implicit_role_field in getattr(instance.__class__, '__implicit_role_fields'):
|
||||||
|
cur_role = getattr(instance, implicit_role_field.name)
|
||||||
|
original_parents = set(json.loads(cur_role.implicit_parents))
|
||||||
|
new_parents = implicit_role_field._resolve_parent_roles(instance)
|
||||||
|
removals = original_parents - new_parents
|
||||||
|
if removals:
|
||||||
|
cur_role.parents.remove(*list(removals))
|
||||||
|
parents_removed.add(cur_role.pk)
|
||||||
|
additions = new_parents - original_parents
|
||||||
|
if additions:
|
||||||
|
cur_role.parents.add(*list(additions))
|
||||||
|
parents_added.add(cur_role.pk)
|
||||||
|
new_parents_list = list(new_parents)
|
||||||
|
new_parents_list.sort()
|
||||||
|
new_parents_json = json.dumps(new_parents_list)
|
||||||
|
if cur_role.implicit_parents != new_parents_json:
|
||||||
|
cur_role.implicit_parents = new_parents_json
|
||||||
|
cur_role.save(update_fields=['implicit_parents'])
|
||||||
|
return (parents_added, parents_removed)
|
||||||
|
|
||||||
|
|
||||||
class ImplicitRoleDescriptor(ForwardManyToOneDescriptor):
|
class ImplicitRoleDescriptor(ForwardManyToOneDescriptor):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@@ -205,6 +269,65 @@ class ImplicitRoleField(models.ForeignKey):
|
|||||||
getattr(cls, '__implicit_role_fields').append(self)
|
getattr(cls, '__implicit_role_fields').append(self)
|
||||||
|
|
||||||
post_save.connect(self._post_save, cls, True, dispatch_uid='implicit-role-post-save')
|
post_save.connect(self._post_save, cls, True, dispatch_uid='implicit-role-post-save')
|
||||||
|
post_delete.connect(self._post_delete, cls, True, dispatch_uid='implicit-role-post-delete')
|
||||||
|
|
||||||
|
function = lambda local, related, field: self.bind_m2m_changed(field, related, local)
|
||||||
|
lazy_related_operation(function, cls, "self", field=self)
|
||||||
|
|
||||||
|
def bind_m2m_changed(self, _self, _role_class, cls):
|
||||||
|
if not self.parent_role:
|
||||||
|
return
|
||||||
|
|
||||||
|
field_names = self.parent_role
|
||||||
|
if type(field_names) is not list:
|
||||||
|
field_names = [field_names]
|
||||||
|
|
||||||
|
for field_name in field_names:
|
||||||
|
if field_name.startswith('singleton:'):
|
||||||
|
continue
|
||||||
|
|
||||||
|
field_name, sep, field_attr = field_name.partition('.')
|
||||||
|
# Non existent fields will occur if ever a parent model is
|
||||||
|
# moved inside a migration, needed for job_template_organization_field
|
||||||
|
# migration in particular
|
||||||
|
# consistency is assured by unit test awx.main.tests.functional
|
||||||
|
field = getattr(cls, field_name, None)
|
||||||
|
|
||||||
|
if field and type(field) is ReverseManyToOneDescriptor or type(field) is ManyToManyDescriptor:
|
||||||
|
if '.' in field_attr:
|
||||||
|
raise Exception('Referencing deep roles through ManyToMany fields is unsupported.')
|
||||||
|
|
||||||
|
if type(field) is ReverseManyToOneDescriptor:
|
||||||
|
sender = field.through
|
||||||
|
else:
|
||||||
|
sender = field.related.through
|
||||||
|
|
||||||
|
reverse = type(field) is ManyToManyDescriptor
|
||||||
|
m2m_changed.connect(self.m2m_update(field_attr, reverse), sender, weak=False)
|
||||||
|
|
||||||
|
def m2m_update(self, field_attr, _reverse):
|
||||||
|
def _m2m_update(instance, action, model, pk_set, reverse, **kwargs):
|
||||||
|
if action == 'post_add' or action == 'pre_remove':
|
||||||
|
if _reverse:
|
||||||
|
reverse = not reverse
|
||||||
|
|
||||||
|
if reverse:
|
||||||
|
for pk in pk_set:
|
||||||
|
obj = model.objects.get(pk=pk)
|
||||||
|
if action == 'post_add':
|
||||||
|
getattr(instance, field_attr).children.add(getattr(obj, self.name))
|
||||||
|
if action == 'pre_remove':
|
||||||
|
getattr(instance, field_attr).children.remove(getattr(obj, self.name))
|
||||||
|
|
||||||
|
else:
|
||||||
|
for pk in pk_set:
|
||||||
|
obj = model.objects.get(pk=pk)
|
||||||
|
if action == 'post_add':
|
||||||
|
getattr(instance, self.name).parents.add(getattr(obj, field_attr))
|
||||||
|
if action == 'pre_remove':
|
||||||
|
getattr(instance, self.name).parents.remove(getattr(obj, field_attr))
|
||||||
|
|
||||||
|
return _m2m_update
|
||||||
|
|
||||||
def _post_save(self, instance, created, *args, **kwargs):
|
def _post_save(self, instance, created, *args, **kwargs):
|
||||||
Role_ = utils.get_current_apps().get_model('main', 'Role')
|
Role_ = utils.get_current_apps().get_model('main', 'Role')
|
||||||
@@ -214,24 +337,68 @@ class ImplicitRoleField(models.ForeignKey):
|
|||||||
Model = utils.get_current_apps().get_model('main', instance.__class__.__name__)
|
Model = utils.get_current_apps().get_model('main', instance.__class__.__name__)
|
||||||
latest_instance = Model.objects.get(pk=instance.pk)
|
latest_instance = Model.objects.get(pk=instance.pk)
|
||||||
|
|
||||||
# Create any missing role objects
|
# Avoid circular import
|
||||||
missing_roles = []
|
from awx.main.models.rbac import batch_role_ancestor_rebuilding, Role
|
||||||
for implicit_role_field in getattr(latest_instance.__class__, '__implicit_role_fields'):
|
|
||||||
cur_role = getattr(latest_instance, implicit_role_field.name, None)
|
|
||||||
if cur_role is None:
|
|
||||||
missing_roles.append(Role_(role_field=implicit_role_field.name, content_type_id=ct_id, object_id=latest_instance.id))
|
|
||||||
|
|
||||||
if len(missing_roles) > 0:
|
with batch_role_ancestor_rebuilding():
|
||||||
Role_.objects.bulk_create(missing_roles)
|
# Create any missing role objects
|
||||||
updates = {}
|
missing_roles = []
|
||||||
role_ids = []
|
for implicit_role_field in getattr(latest_instance.__class__, '__implicit_role_fields'):
|
||||||
for role in Role_.objects.filter(content_type_id=ct_id, object_id=latest_instance.id):
|
cur_role = getattr(latest_instance, implicit_role_field.name, None)
|
||||||
setattr(latest_instance, role.role_field, role)
|
if cur_role is None:
|
||||||
updates[role.role_field] = role.id
|
missing_roles.append(Role_(role_field=implicit_role_field.name, content_type_id=ct_id, object_id=latest_instance.id))
|
||||||
role_ids.append(role.id)
|
|
||||||
type(latest_instance).objects.filter(pk=latest_instance.pk).update(**updates)
|
|
||||||
|
|
||||||
instance.refresh_from_db()
|
if len(missing_roles) > 0:
|
||||||
|
Role_.objects.bulk_create(missing_roles)
|
||||||
|
updates = {}
|
||||||
|
role_ids = []
|
||||||
|
for role in Role_.objects.filter(content_type_id=ct_id, object_id=latest_instance.id):
|
||||||
|
setattr(latest_instance, role.role_field, role)
|
||||||
|
updates[role.role_field] = role.id
|
||||||
|
role_ids.append(role.id)
|
||||||
|
type(latest_instance).objects.filter(pk=latest_instance.pk).update(**updates)
|
||||||
|
Role.rebuild_role_ancestor_list(role_ids, [])
|
||||||
|
|
||||||
|
update_role_parentage_for_instance(latest_instance)
|
||||||
|
instance.refresh_from_db()
|
||||||
|
|
||||||
|
def _resolve_parent_roles(self, instance):
|
||||||
|
if not self.parent_role:
|
||||||
|
return set()
|
||||||
|
|
||||||
|
paths = self.parent_role if type(self.parent_role) is list else [self.parent_role]
|
||||||
|
parent_roles = set()
|
||||||
|
|
||||||
|
for path in paths:
|
||||||
|
if path.startswith("singleton:"):
|
||||||
|
singleton_name = path[10:]
|
||||||
|
Role_ = utils.get_current_apps().get_model('main', 'Role')
|
||||||
|
qs = Role_.objects.filter(singleton_name=singleton_name)
|
||||||
|
if qs.count() >= 1:
|
||||||
|
role = qs[0]
|
||||||
|
else:
|
||||||
|
role = Role_.objects.create(singleton_name=singleton_name, role_field=singleton_name)
|
||||||
|
parents = [role.id]
|
||||||
|
else:
|
||||||
|
parents = resolve_role_field(instance, path)
|
||||||
|
|
||||||
|
for parent in parents:
|
||||||
|
parent_roles.add(parent)
|
||||||
|
return parent_roles
|
||||||
|
|
||||||
|
def _post_delete(self, instance, *args, **kwargs):
|
||||||
|
role_ids = []
|
||||||
|
for implicit_role_field in getattr(instance.__class__, '__implicit_role_fields'):
|
||||||
|
role_ids.append(getattr(instance, implicit_role_field.name + '_id'))
|
||||||
|
|
||||||
|
Role_ = utils.get_current_apps().get_model('main', 'Role')
|
||||||
|
child_ids = [x for x in Role_.parents.through.objects.filter(to_role_id__in=role_ids).distinct().values_list('from_role_id', flat=True)]
|
||||||
|
Role_.objects.filter(id__in=role_ids).delete()
|
||||||
|
|
||||||
|
# Avoid circular import
|
||||||
|
from awx.main.models.rbac import Role
|
||||||
|
|
||||||
|
Role.rebuild_role_ancestor_list([], child_ids)
|
||||||
|
|
||||||
|
|
||||||
class SmartFilterField(models.TextField):
|
class SmartFilterField(models.TextField):
|
||||||
|
|||||||
@@ -4,7 +4,6 @@
|
|||||||
from django.core.management.base import BaseCommand
|
from django.core.management.base import BaseCommand
|
||||||
from django.db import transaction
|
from django.db import transaction
|
||||||
from crum import impersonate
|
from crum import impersonate
|
||||||
from ansible_base.resource_registry.signals.handlers import no_reverse_sync
|
|
||||||
from awx.main.models import User, Organization, Project, Inventory, CredentialType, Credential, Host, JobTemplate
|
from awx.main.models import User, Organization, Project, Inventory, CredentialType, Credential, Host, JobTemplate
|
||||||
from awx.main.signals import disable_computed_fields
|
from awx.main.signals import disable_computed_fields
|
||||||
|
|
||||||
@@ -17,9 +16,8 @@ class Command(BaseCommand):
|
|||||||
def handle(self, *args, **kwargs):
|
def handle(self, *args, **kwargs):
|
||||||
# Wrap the operation in an atomic block, so we do not on accident
|
# Wrap the operation in an atomic block, so we do not on accident
|
||||||
# create the organization but not create the project, etc.
|
# create the organization but not create the project, etc.
|
||||||
with no_reverse_sync():
|
with transaction.atomic():
|
||||||
with transaction.atomic():
|
self._handle()
|
||||||
self._handle()
|
|
||||||
|
|
||||||
def _handle(self):
|
def _handle(self):
|
||||||
changed = False
|
changed = False
|
||||||
|
|||||||
@@ -33,7 +33,6 @@ from awx.main.utils.safe_yaml import sanitize_jinja
|
|||||||
from awx.main.models.rbac import batch_role_ancestor_rebuilding
|
from awx.main.models.rbac import batch_role_ancestor_rebuilding
|
||||||
from awx.main.utils import ignore_inventory_computed_fields, get_licenser
|
from awx.main.utils import ignore_inventory_computed_fields, get_licenser
|
||||||
from awx.main.utils.execution_environments import get_default_execution_environment
|
from awx.main.utils.execution_environments import get_default_execution_environment
|
||||||
from awx.main.utils.inventory_vars import update_group_variables
|
|
||||||
from awx.main.signals import disable_activity_stream
|
from awx.main.signals import disable_activity_stream
|
||||||
from awx.main.constants import STANDARD_INVENTORY_UPDATE_ENV
|
from awx.main.constants import STANDARD_INVENTORY_UPDATE_ENV
|
||||||
|
|
||||||
@@ -458,19 +457,19 @@ class Command(BaseCommand):
|
|||||||
"""
|
"""
|
||||||
Update inventory variables from "all" group.
|
Update inventory variables from "all" group.
|
||||||
"""
|
"""
|
||||||
|
# TODO: We disable variable overwrite here in case user-defined inventory variables get
|
||||||
|
# mangled. But we still need to figure out a better way of processing multiple inventory
|
||||||
|
# update variables mixing with each other.
|
||||||
|
# issue for this: https://github.com/ansible/awx/issues/11623
|
||||||
|
|
||||||
if self.inventory.kind == 'constructed' and self.inventory_source.overwrite_vars:
|
if self.inventory.kind == 'constructed' and self.inventory_source.overwrite_vars:
|
||||||
# NOTE: we had to add a exception case to not merge variables
|
# NOTE: we had to add a exception case to not merge variables
|
||||||
# to make constructed inventory coherent
|
# to make constructed inventory coherent
|
||||||
db_variables = self.all_group.variables
|
db_variables = self.all_group.variables
|
||||||
else:
|
else:
|
||||||
db_variables = update_group_variables(
|
db_variables = self.inventory.variables_dict
|
||||||
group_id=None, # `None` denotes the 'all' group (which doesn't have a pk).
|
db_variables.update(self.all_group.variables)
|
||||||
newvars=self.all_group.variables,
|
|
||||||
dbvars=self.inventory.variables_dict,
|
|
||||||
invsrc_id=self.inventory_source.id,
|
|
||||||
inventory_id=self.inventory.id,
|
|
||||||
overwrite_vars=self.overwrite_vars,
|
|
||||||
)
|
|
||||||
if db_variables != self.inventory.variables_dict:
|
if db_variables != self.inventory.variables_dict:
|
||||||
self.inventory.variables = json.dumps(db_variables)
|
self.inventory.variables = json.dumps(db_variables)
|
||||||
self.inventory.save(update_fields=['variables'])
|
self.inventory.save(update_fields=['variables'])
|
||||||
|
|||||||
@@ -2,21 +2,13 @@
|
|||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
import logging
|
import logging
|
||||||
import yaml
|
import yaml
|
||||||
import os
|
|
||||||
|
|
||||||
import redis
|
import redis
|
||||||
|
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
from django.core.management.base import BaseCommand, CommandError
|
from django.core.management.base import BaseCommand, CommandError
|
||||||
|
|
||||||
from flags.state import flag_enabled
|
|
||||||
|
|
||||||
from dispatcherd.factories import get_control_from_settings
|
|
||||||
from dispatcherd import run_service
|
|
||||||
from dispatcherd.config import setup as dispatcher_setup
|
|
||||||
|
|
||||||
from awx.main.dispatch import get_task_queuename
|
from awx.main.dispatch import get_task_queuename
|
||||||
from awx.main.dispatch.config import get_dispatcherd_config
|
|
||||||
from awx.main.dispatch.control import Control
|
from awx.main.dispatch.control import Control
|
||||||
from awx.main.dispatch.pool import AutoscalePool
|
from awx.main.dispatch.pool import AutoscalePool
|
||||||
from awx.main.dispatch.worker import AWXConsumerPG, TaskWorker
|
from awx.main.dispatch.worker import AWXConsumerPG, TaskWorker
|
||||||
@@ -48,44 +40,18 @@ class Command(BaseCommand):
|
|||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
def verify_dispatcherd_socket(self):
|
|
||||||
if not os.path.exists(settings.DISPATCHERD_DEBUGGING_SOCKFILE):
|
|
||||||
raise CommandError('Dispatcher is not running locally')
|
|
||||||
|
|
||||||
def handle(self, *arg, **options):
|
def handle(self, *arg, **options):
|
||||||
if options.get('status'):
|
if options.get('status'):
|
||||||
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'):
|
print(Control('dispatcher').status())
|
||||||
ctl = get_control_from_settings()
|
return
|
||||||
running_data = ctl.control_with_reply('status')
|
|
||||||
if len(running_data) != 1:
|
|
||||||
raise CommandError('Did not receive expected number of replies')
|
|
||||||
print(yaml.dump(running_data[0], default_flow_style=False))
|
|
||||||
return
|
|
||||||
else:
|
|
||||||
print(Control('dispatcher').status())
|
|
||||||
return
|
|
||||||
if options.get('schedule'):
|
if options.get('schedule'):
|
||||||
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'):
|
print(Control('dispatcher').schedule())
|
||||||
print('NOT YET IMPLEMENTED')
|
|
||||||
return
|
|
||||||
else:
|
|
||||||
print(Control('dispatcher').schedule())
|
|
||||||
return
|
return
|
||||||
if options.get('running'):
|
if options.get('running'):
|
||||||
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'):
|
print(Control('dispatcher').running())
|
||||||
ctl = get_control_from_settings()
|
return
|
||||||
running_data = ctl.control_with_reply('running')
|
|
||||||
print(yaml.dump(running_data, default_flow_style=False))
|
|
||||||
return
|
|
||||||
else:
|
|
||||||
print(Control('dispatcher').running())
|
|
||||||
return
|
|
||||||
if options.get('reload'):
|
if options.get('reload'):
|
||||||
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'):
|
return Control('dispatcher').control({'control': 'reload'})
|
||||||
print('NOT YET IMPLEMENTED')
|
|
||||||
return
|
|
||||||
else:
|
|
||||||
return Control('dispatcher').control({'control': 'reload'})
|
|
||||||
if options.get('cancel'):
|
if options.get('cancel'):
|
||||||
cancel_str = options.get('cancel')
|
cancel_str = options.get('cancel')
|
||||||
try:
|
try:
|
||||||
@@ -94,36 +60,21 @@ class Command(BaseCommand):
|
|||||||
cancel_data = [cancel_str]
|
cancel_data = [cancel_str]
|
||||||
if not isinstance(cancel_data, list):
|
if not isinstance(cancel_data, list):
|
||||||
cancel_data = [cancel_str]
|
cancel_data = [cancel_str]
|
||||||
|
print(Control('dispatcher').cancel(cancel_data))
|
||||||
|
return
|
||||||
|
|
||||||
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'):
|
consumer = None
|
||||||
ctl = get_control_from_settings()
|
|
||||||
results = []
|
|
||||||
for task_id in cancel_data:
|
|
||||||
# For each task UUID, send an individual cancel command
|
|
||||||
result = ctl.control_with_reply('cancel', data={'uuid': task_id})
|
|
||||||
results.append(result)
|
|
||||||
print(yaml.dump(results, default_flow_style=False))
|
|
||||||
return
|
|
||||||
else:
|
|
||||||
print(Control('dispatcher').cancel(cancel_data))
|
|
||||||
return
|
|
||||||
|
|
||||||
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'):
|
try:
|
||||||
dispatcher_setup(get_dispatcherd_config(for_service=True))
|
DispatcherMetricsServer().start()
|
||||||
run_service()
|
except redis.exceptions.ConnectionError as exc:
|
||||||
else:
|
raise CommandError(f'Dispatcher could not connect to redis, error: {exc}')
|
||||||
consumer = None
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
DispatcherMetricsServer().start()
|
queues = ['tower_broadcast_all', 'tower_settings_change', get_task_queuename()]
|
||||||
except redis.exceptions.ConnectionError as exc:
|
consumer = AWXConsumerPG('dispatcher', TaskWorker(), queues, AutoscalePool(min_workers=4), schedule=settings.CELERYBEAT_SCHEDULE)
|
||||||
raise CommandError(f'Dispatcher could not connect to redis, error: {exc}')
|
consumer.run()
|
||||||
|
except KeyboardInterrupt:
|
||||||
try:
|
logger.debug('Terminating Task Dispatcher')
|
||||||
queues = ['tower_broadcast_all', 'tower_settings_change', get_task_queuename()]
|
if consumer:
|
||||||
consumer = AWXConsumerPG('dispatcher', TaskWorker(), queues, AutoscalePool(min_workers=4), schedule=settings.CELERYBEAT_SCHEDULE)
|
consumer.stop()
|
||||||
consumer.run()
|
|
||||||
except KeyboardInterrupt:
|
|
||||||
logger.debug('Terminating Task Dispatcher')
|
|
||||||
if consumer:
|
|
||||||
consumer.stop()
|
|
||||||
|
|||||||
@@ -1,61 +0,0 @@
|
|||||||
# Generated by Django 4.2.18 on 2025-02-27 20:35
|
|
||||||
|
|
||||||
from django.db import migrations, models
|
|
||||||
|
|
||||||
|
|
||||||
class Migration(migrations.Migration):
|
|
||||||
|
|
||||||
dependencies = [('main', '0197_add_opa_query_path')]
|
|
||||||
|
|
||||||
operations = [
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='inventorysource',
|
|
||||||
name='source',
|
|
||||||
field=models.CharField(
|
|
||||||
choices=[
|
|
||||||
('file', 'File, Directory or Script'),
|
|
||||||
('constructed', 'Template additional groups and hostvars at runtime'),
|
|
||||||
('scm', 'Sourced from a Project'),
|
|
||||||
('ec2', 'Amazon EC2'),
|
|
||||||
('gce', 'Google Compute Engine'),
|
|
||||||
('azure_rm', 'Microsoft Azure Resource Manager'),
|
|
||||||
('vmware', 'VMware vCenter'),
|
|
||||||
('vmware_esxi', 'VMware ESXi'),
|
|
||||||
('satellite6', 'Red Hat Satellite 6'),
|
|
||||||
('openstack', 'OpenStack'),
|
|
||||||
('rhv', 'Red Hat Virtualization'),
|
|
||||||
('controller', 'Red Hat Ansible Automation Platform'),
|
|
||||||
('insights', 'Red Hat Insights'),
|
|
||||||
('terraform', 'Terraform State'),
|
|
||||||
('openshift_virtualization', 'OpenShift Virtualization'),
|
|
||||||
],
|
|
||||||
default=None,
|
|
||||||
max_length=32,
|
|
||||||
),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='inventoryupdate',
|
|
||||||
name='source',
|
|
||||||
field=models.CharField(
|
|
||||||
choices=[
|
|
||||||
('file', 'File, Directory or Script'),
|
|
||||||
('constructed', 'Template additional groups and hostvars at runtime'),
|
|
||||||
('scm', 'Sourced from a Project'),
|
|
||||||
('ec2', 'Amazon EC2'),
|
|
||||||
('gce', 'Google Compute Engine'),
|
|
||||||
('azure_rm', 'Microsoft Azure Resource Manager'),
|
|
||||||
('vmware', 'VMware vCenter'),
|
|
||||||
('vmware_esxi', 'VMware ESXi'),
|
|
||||||
('satellite6', 'Red Hat Satellite 6'),
|
|
||||||
('openstack', 'OpenStack'),
|
|
||||||
('rhv', 'Red Hat Virtualization'),
|
|
||||||
('controller', 'Red Hat Ansible Automation Platform'),
|
|
||||||
('insights', 'Red Hat Insights'),
|
|
||||||
('terraform', 'Terraform State'),
|
|
||||||
('openshift_virtualization', 'OpenShift Virtualization'),
|
|
||||||
],
|
|
||||||
default=None,
|
|
||||||
max_length=32,
|
|
||||||
),
|
|
||||||
),
|
|
||||||
]
|
|
||||||
15
awx/main/migrations/0198_delete_profile.py
Normal file
15
awx/main/migrations/0198_delete_profile.py
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
# Generated by Django 4.2.10 on 2024-09-16 10:22
|
||||||
|
|
||||||
|
from django.db import migrations
|
||||||
|
|
||||||
|
|
||||||
|
class Migration(migrations.Migration):
|
||||||
|
dependencies = [
|
||||||
|
('main', '0197_add_opa_query_path'),
|
||||||
|
]
|
||||||
|
|
||||||
|
operations = [
|
||||||
|
migrations.DeleteModel(
|
||||||
|
name='Profile',
|
||||||
|
),
|
||||||
|
]
|
||||||
@@ -1,32 +0,0 @@
|
|||||||
# Generated by Django 4.2.20 on 2025-04-24 09:08
|
|
||||||
|
|
||||||
from django.db import migrations, models
|
|
||||||
import django.db.models.deletion
|
|
||||||
|
|
||||||
|
|
||||||
class Migration(migrations.Migration):
|
|
||||||
|
|
||||||
dependencies = [
|
|
||||||
('main', '0198_alter_inventorysource_source_and_more'),
|
|
||||||
]
|
|
||||||
|
|
||||||
operations = [
|
|
||||||
migrations.CreateModel(
|
|
||||||
name='InventoryGroupVariablesWithHistory',
|
|
||||||
fields=[
|
|
||||||
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
|
||||||
('variables', models.JSONField()),
|
|
||||||
('group', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='inventory_group_variables', to='main.group')),
|
|
||||||
(
|
|
||||||
'inventory',
|
|
||||||
models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='inventory_group_variables', to='main.inventory'),
|
|
||||||
),
|
|
||||||
],
|
|
||||||
),
|
|
||||||
migrations.AddConstraint(
|
|
||||||
model_name='inventorygroupvariableswithhistory',
|
|
||||||
constraint=models.UniqueConstraint(
|
|
||||||
fields=('inventory', 'group'), name='unique_inventory_group', violation_error_message='Inventory/Group combination must be unique.'
|
|
||||||
),
|
|
||||||
),
|
|
||||||
]
|
|
||||||
26
awx/main/migrations/0199_remove_sso_app_content.py
Normal file
26
awx/main/migrations/0199_remove_sso_app_content.py
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
# Generated by Django 4.2.10 on 2024-09-16 15:21
|
||||||
|
|
||||||
|
from django.db import migrations
|
||||||
|
|
||||||
|
|
||||||
|
class Migration(migrations.Migration):
|
||||||
|
dependencies = [
|
||||||
|
('main', '0198_delete_profile'),
|
||||||
|
]
|
||||||
|
|
||||||
|
operations = [
|
||||||
|
# delete all sso application migrations
|
||||||
|
migrations.RunSQL("DELETE FROM django_migrations WHERE app = 'sso';"),
|
||||||
|
# delete all sso application content group permissions
|
||||||
|
migrations.RunSQL(
|
||||||
|
"DELETE FROM auth_group_permissions "
|
||||||
|
"WHERE permission_id IN "
|
||||||
|
"(SELECT id FROM auth_permission WHERE content_type_id in (SELECT id FROM django_content_type WHERE app_label = 'sso'));"
|
||||||
|
),
|
||||||
|
# delete all sso application content permissions
|
||||||
|
migrations.RunSQL("DELETE FROM auth_permission " "WHERE content_type_id IN (SELECT id FROM django_content_type WHERE app_label = 'sso');"),
|
||||||
|
# delete sso application content type
|
||||||
|
migrations.RunSQL("DELETE FROM django_content_type WHERE app_label = 'sso';"),
|
||||||
|
# drop sso application created table
|
||||||
|
migrations.RunSQL("DROP TABLE IF EXISTS sso_userenterpriseauth;"),
|
||||||
|
]
|
||||||
@@ -0,0 +1,23 @@
|
|||||||
|
# Generated by Django 4.2.10 on 2024-10-22 15:58
|
||||||
|
|
||||||
|
from django.db import migrations, models
|
||||||
|
|
||||||
|
|
||||||
|
class Migration(migrations.Migration):
|
||||||
|
|
||||||
|
dependencies = [
|
||||||
|
('main', '0199_remove_sso_app_content'),
|
||||||
|
]
|
||||||
|
|
||||||
|
operations = [
|
||||||
|
migrations.AlterField(
|
||||||
|
model_name='inventorysource',
|
||||||
|
name='source',
|
||||||
|
field=models.CharField(default=None, max_length=32),
|
||||||
|
),
|
||||||
|
migrations.AlterField(
|
||||||
|
model_name='inventoryupdate',
|
||||||
|
name='source',
|
||||||
|
field=models.CharField(default=None, max_length=32),
|
||||||
|
),
|
||||||
|
]
|
||||||
@@ -1,56 +0,0 @@
|
|||||||
# Generated by Django 4.2.20 on 2025-04-22 15:54
|
|
||||||
|
|
||||||
import logging
|
|
||||||
|
|
||||||
from django.db import migrations, models
|
|
||||||
|
|
||||||
from awx.main.migrations._db_constraints import _rename_duplicates
|
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def rename_jts(apps, schema_editor):
|
|
||||||
cls = apps.get_model('main', 'JobTemplate')
|
|
||||||
_rename_duplicates(cls)
|
|
||||||
|
|
||||||
|
|
||||||
def rename_projects(apps, schema_editor):
|
|
||||||
cls = apps.get_model('main', 'Project')
|
|
||||||
_rename_duplicates(cls)
|
|
||||||
|
|
||||||
|
|
||||||
def change_inventory_source_org_unique(apps, schema_editor):
|
|
||||||
cls = apps.get_model('main', 'InventorySource')
|
|
||||||
r = cls.objects.update(org_unique=False)
|
|
||||||
logger.info(f'Set database constraint rule for {r} inventory source objects')
|
|
||||||
|
|
||||||
|
|
||||||
def rename_wfjt(apps, schema_editor):
|
|
||||||
cls = apps.get_model('main', 'WorkflowJobTemplate')
|
|
||||||
_rename_duplicates(cls)
|
|
||||||
|
|
||||||
|
|
||||||
class Migration(migrations.Migration):
|
|
||||||
|
|
||||||
dependencies = [
|
|
||||||
('main', '0199_inventorygroupvariableswithhistory_and_more'),
|
|
||||||
]
|
|
||||||
|
|
||||||
operations = [
|
|
||||||
migrations.RunPython(rename_jts, migrations.RunPython.noop),
|
|
||||||
migrations.RunPython(rename_projects, migrations.RunPython.noop),
|
|
||||||
migrations.AddField(
|
|
||||||
model_name='unifiedjobtemplate',
|
|
||||||
name='org_unique',
|
|
||||||
field=models.BooleanField(blank=True, default=True, editable=False, help_text='Used internally to selectively enforce database constraint on name'),
|
|
||||||
),
|
|
||||||
migrations.RunPython(rename_wfjt, migrations.RunPython.noop),
|
|
||||||
migrations.RunPython(change_inventory_source_org_unique, migrations.RunPython.noop),
|
|
||||||
migrations.AddConstraint(
|
|
||||||
model_name='unifiedjobtemplate',
|
|
||||||
constraint=models.UniqueConstraint(
|
|
||||||
condition=models.Q(('org_unique', True)), fields=('polymorphic_ctype', 'name', 'organization'), name='ujt_hard_name_constraint'
|
|
||||||
),
|
|
||||||
),
|
|
||||||
]
|
|
||||||
@@ -0,0 +1,39 @@
|
|||||||
|
# Generated by Django 4.2.10 on 2024-10-24 14:06
|
||||||
|
|
||||||
|
from django.db import migrations
|
||||||
|
|
||||||
|
|
||||||
|
class Migration(migrations.Migration):
|
||||||
|
|
||||||
|
dependencies = [
|
||||||
|
('main', '0200_alter_inventorysource_source_and_more'),
|
||||||
|
]
|
||||||
|
|
||||||
|
operations = [
|
||||||
|
migrations.AlterUniqueTogether(
|
||||||
|
name='oauth2application',
|
||||||
|
unique_together=None,
|
||||||
|
),
|
||||||
|
migrations.RemoveField(
|
||||||
|
model_name='oauth2application',
|
||||||
|
name='organization',
|
||||||
|
),
|
||||||
|
migrations.RemoveField(
|
||||||
|
model_name='oauth2application',
|
||||||
|
name='user',
|
||||||
|
),
|
||||||
|
migrations.RemoveField(
|
||||||
|
model_name='activitystream',
|
||||||
|
name='o_auth2_access_token',
|
||||||
|
),
|
||||||
|
migrations.RemoveField(
|
||||||
|
model_name='activitystream',
|
||||||
|
name='o_auth2_application',
|
||||||
|
),
|
||||||
|
migrations.DeleteModel(
|
||||||
|
name='OAuth2AccessToken',
|
||||||
|
),
|
||||||
|
migrations.DeleteModel(
|
||||||
|
name='OAuth2Application',
|
||||||
|
),
|
||||||
|
]
|
||||||
@@ -1,26 +0,0 @@
|
|||||||
from django.db import migrations
|
|
||||||
|
|
||||||
# AWX
|
|
||||||
from awx.main.models import CredentialType
|
|
||||||
from awx.main.utils.common import set_current_apps
|
|
||||||
|
|
||||||
|
|
||||||
def setup_tower_managed_defaults(apps, schema_editor):
|
|
||||||
set_current_apps(apps)
|
|
||||||
CredentialType.setup_tower_managed_defaults(apps)
|
|
||||||
|
|
||||||
|
|
||||||
def setup_rbac_role_system_administrator(apps, schema_editor):
|
|
||||||
Role = apps.get_model('main', 'Role')
|
|
||||||
Role.objects.get_or_create(singleton_name='system_administrator', role_field='system_administrator')
|
|
||||||
|
|
||||||
|
|
||||||
class Migration(migrations.Migration):
|
|
||||||
dependencies = [
|
|
||||||
('main', '0200_template_name_constraint'),
|
|
||||||
]
|
|
||||||
|
|
||||||
operations = [
|
|
||||||
migrations.RunPython(setup_tower_managed_defaults),
|
|
||||||
migrations.RunPython(setup_rbac_role_system_administrator),
|
|
||||||
]
|
|
||||||
@@ -1,102 +0,0 @@
|
|||||||
# Generated by Django migration for converting Controller role definitions
|
|
||||||
|
|
||||||
from ansible_base.rbac.migrations._utils import give_permissions
|
|
||||||
from django.db import migrations
|
|
||||||
|
|
||||||
|
|
||||||
def convert_controller_role_definitions(apps, schema_editor):
|
|
||||||
"""
|
|
||||||
Convert Controller role definitions to regular role definitions:
|
|
||||||
- Controller Organization Admin -> Organization Admin
|
|
||||||
- Controller Organization Member -> Organization Member
|
|
||||||
- Controller Team Admin -> Team Admin
|
|
||||||
- Controller Team Member -> Team Member
|
|
||||||
- Controller System Auditor -> Platform Auditor
|
|
||||||
|
|
||||||
Then delete the old Controller role definitions.
|
|
||||||
"""
|
|
||||||
RoleDefinition = apps.get_model('dab_rbac', 'RoleDefinition')
|
|
||||||
RoleUserAssignment = apps.get_model('dab_rbac', 'RoleUserAssignment')
|
|
||||||
RoleTeamAssignment = apps.get_model('dab_rbac', 'RoleTeamAssignment')
|
|
||||||
Permission = apps.get_model('dab_rbac', 'DABPermission')
|
|
||||||
|
|
||||||
# Mapping of old Controller role names to new role names
|
|
||||||
role_mappings = {
|
|
||||||
'Controller Organization Admin': 'Organization Admin',
|
|
||||||
'Controller Organization Member': 'Organization Member',
|
|
||||||
'Controller Team Admin': 'Team Admin',
|
|
||||||
'Controller Team Member': 'Team Member',
|
|
||||||
}
|
|
||||||
|
|
||||||
for old_name, new_name in role_mappings.items():
|
|
||||||
# Find the old Controller role definition
|
|
||||||
old_role = RoleDefinition.objects.filter(name=old_name).first()
|
|
||||||
if not old_role:
|
|
||||||
continue # Skip if the old role doesn't exist
|
|
||||||
|
|
||||||
# Find the new role definition
|
|
||||||
new_role = RoleDefinition.objects.get(name=new_name)
|
|
||||||
|
|
||||||
# Collect all the assignments that need to be migrated
|
|
||||||
# Group by object (content_type + object_id) to batch the give_permissions calls
|
|
||||||
assignments_by_object = {}
|
|
||||||
|
|
||||||
# Get user assignments
|
|
||||||
user_assignments = RoleUserAssignment.objects.filter(role_definition=old_role).select_related('object_role')
|
|
||||||
for assignment in user_assignments:
|
|
||||||
key = (assignment.object_role.content_type_id, assignment.object_role.object_id)
|
|
||||||
if key not in assignments_by_object:
|
|
||||||
assignments_by_object[key] = {'users': [], 'teams': []}
|
|
||||||
assignments_by_object[key]['users'].append(assignment.user)
|
|
||||||
|
|
||||||
# Get team assignments
|
|
||||||
team_assignments = RoleTeamAssignment.objects.filter(role_definition=old_role).select_related('object_role')
|
|
||||||
for assignment in team_assignments:
|
|
||||||
key = (assignment.object_role.content_type_id, assignment.object_role.object_id)
|
|
||||||
if key not in assignments_by_object:
|
|
||||||
assignments_by_object[key] = {'users': [], 'teams': []}
|
|
||||||
assignments_by_object[key]['teams'].append(assignment.team.id)
|
|
||||||
|
|
||||||
# Use give_permissions to create new assignments with the new role definition
|
|
||||||
for (content_type_id, object_id), data in assignments_by_object.items():
|
|
||||||
if data['users'] or data['teams']:
|
|
||||||
give_permissions(
|
|
||||||
apps,
|
|
||||||
new_role,
|
|
||||||
users=data['users'],
|
|
||||||
teams=data['teams'],
|
|
||||||
object_id=object_id,
|
|
||||||
content_type_id=content_type_id,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Delete the old role definition (this will cascade to delete old assignments and ObjectRoles)
|
|
||||||
old_role.delete()
|
|
||||||
|
|
||||||
# Create or get Platform Auditor
|
|
||||||
auditor_rd, created = RoleDefinition.objects.get_or_create(
|
|
||||||
name='Platform Auditor',
|
|
||||||
defaults={'description': 'Migrated singleton role giving read permission to everything', 'managed': True},
|
|
||||||
)
|
|
||||||
if created:
|
|
||||||
auditor_rd.permissions.add(*list(Permission.objects.filter(codename__startswith='view')))
|
|
||||||
|
|
||||||
old_rd = RoleDefinition.objects.filter(name='Controller System Auditor').first()
|
|
||||||
if old_rd:
|
|
||||||
for assignment in RoleUserAssignment.objects.filter(role_definition=old_rd):
|
|
||||||
RoleUserAssignment.objects.create(
|
|
||||||
user=assignment.user,
|
|
||||||
role_definition=auditor_rd,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Delete the Controller System Auditor role
|
|
||||||
RoleDefinition.objects.filter(name='Controller System Auditor').delete()
|
|
||||||
|
|
||||||
|
|
||||||
class Migration(migrations.Migration):
|
|
||||||
dependencies = [
|
|
||||||
('main', '0201_create_managed_creds'),
|
|
||||||
]
|
|
||||||
|
|
||||||
operations = [
|
|
||||||
migrations.RunPython(convert_controller_role_definitions),
|
|
||||||
]
|
|
||||||
44
awx/main/migrations/0202_delete_token_cleanup_job.py
Normal file
44
awx/main/migrations/0202_delete_token_cleanup_job.py
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
# Generated by Django 4.2.16 on 2024-12-18 16:05
|
||||||
|
|
||||||
|
from django.db import migrations, models
|
||||||
|
|
||||||
|
from awx.main.migrations._create_system_jobs import delete_clear_tokens_sjt
|
||||||
|
|
||||||
|
|
||||||
|
class Migration(migrations.Migration):
|
||||||
|
|
||||||
|
dependencies = [
|
||||||
|
('main', '0201_alter_oauth2application_unique_together_and_more'),
|
||||||
|
]
|
||||||
|
|
||||||
|
operations = [
|
||||||
|
migrations.RunPython(delete_clear_tokens_sjt, migrations.RunPython.noop),
|
||||||
|
migrations.AlterField(
|
||||||
|
model_name='systemjob',
|
||||||
|
name='job_type',
|
||||||
|
field=models.CharField(
|
||||||
|
blank=True,
|
||||||
|
choices=[
|
||||||
|
('cleanup_jobs', 'Remove jobs older than a certain number of days'),
|
||||||
|
('cleanup_activitystream', 'Remove activity stream entries older than a certain number of days'),
|
||||||
|
('cleanup_sessions', 'Removes expired browser sessions from the database'),
|
||||||
|
],
|
||||||
|
default='',
|
||||||
|
max_length=32,
|
||||||
|
),
|
||||||
|
),
|
||||||
|
migrations.AlterField(
|
||||||
|
model_name='systemjobtemplate',
|
||||||
|
name='job_type',
|
||||||
|
field=models.CharField(
|
||||||
|
blank=True,
|
||||||
|
choices=[
|
||||||
|
('cleanup_jobs', 'Remove jobs older than a certain number of days'),
|
||||||
|
('cleanup_activitystream', 'Remove activity stream entries older than a certain number of days'),
|
||||||
|
('cleanup_sessions', 'Removes expired browser sessions from the database'),
|
||||||
|
],
|
||||||
|
default='',
|
||||||
|
max_length=32,
|
||||||
|
),
|
||||||
|
),
|
||||||
|
]
|
||||||
@@ -1,22 +0,0 @@
|
|||||||
import logging
|
|
||||||
|
|
||||||
from django.db import migrations
|
|
||||||
|
|
||||||
from awx.main.migrations._dab_rbac import consolidate_indirect_user_roles
|
|
||||||
|
|
||||||
logger = logging.getLogger('awx.main.migrations')
|
|
||||||
|
|
||||||
|
|
||||||
class Migration(migrations.Migration):
|
|
||||||
|
|
||||||
dependencies = [
|
|
||||||
('main', '0202_convert_controller_role_definitions'),
|
|
||||||
]
|
|
||||||
# The DAB RBAC app makes substantial model changes which by change-ordering comes after this
|
|
||||||
# not including run_before might sometimes work but this enforces a more strict and stable order
|
|
||||||
# for both applying migrations forwards and backwards
|
|
||||||
run_before = [("dab_rbac", "0004_remote_permissions_additions")]
|
|
||||||
|
|
||||||
operations = [
|
|
||||||
migrations.RunPython(consolidate_indirect_user_roles, migrations.RunPython.noop),
|
|
||||||
]
|
|
||||||
@@ -1,124 +0,0 @@
|
|||||||
# Generated by Django 4.2.10 on 2024-09-16 10:22
|
|
||||||
from django.db import migrations, models
|
|
||||||
from awx.main.migrations._create_system_jobs import delete_clear_tokens_sjt
|
|
||||||
|
|
||||||
|
|
||||||
# --- START of function merged from 0203_rename_github_app_kind.py ---
|
|
||||||
def update_github_app_kind(apps, schema_editor):
|
|
||||||
"""
|
|
||||||
Updates the 'kind' field for CredentialType records
|
|
||||||
from 'github_app' to 'github_app_lookup'.
|
|
||||||
This addresses a change in the entry point key for the GitHub App plugin.
|
|
||||||
"""
|
|
||||||
CredentialType = apps.get_model('main', 'CredentialType')
|
|
||||||
db_alias = schema_editor.connection.alias
|
|
||||||
CredentialType.objects.using(db_alias).filter(kind='github_app').update(kind='github_app_lookup')
|
|
||||||
|
|
||||||
|
|
||||||
# --- END of function merged from 0203_rename_github_app_kind.py ---
|
|
||||||
|
|
||||||
|
|
||||||
class Migration(migrations.Migration):
|
|
||||||
dependencies = [
|
|
||||||
('main', '0203_remove_team_of_teams'),
|
|
||||||
]
|
|
||||||
operations = [
|
|
||||||
migrations.DeleteModel(
|
|
||||||
name='Profile',
|
|
||||||
),
|
|
||||||
# Remove SSO app content
|
|
||||||
# delete all sso application migrations
|
|
||||||
# Added reverse_sql=migrations.RunSQL.noop to make this reversible for tests
|
|
||||||
migrations.RunSQL("DELETE FROM django_migrations WHERE app = 'sso';", reverse_sql=migrations.RunSQL.noop),
|
|
||||||
# delete all sso application content group permissions
|
|
||||||
# Added reverse_sql=migrations.RunSQL.noop to make this reversible for tests
|
|
||||||
migrations.RunSQL(
|
|
||||||
"DELETE FROM auth_group_permissions "
|
|
||||||
"WHERE permission_id IN "
|
|
||||||
"(SELECT id FROM auth_permission WHERE content_type_id in (SELECT id FROM django_content_type WHERE app_label = 'sso'));",
|
|
||||||
reverse_sql=migrations.RunSQL.noop,
|
|
||||||
),
|
|
||||||
# delete all sso application content permissions
|
|
||||||
# Added reverse_sql=migrations.RunSQL.noop to make this reversible for tests
|
|
||||||
migrations.RunSQL(
|
|
||||||
"DELETE FROM auth_permission " "WHERE content_type_id IN (SELECT id FROM django_content_type WHERE app_label = 'sso');",
|
|
||||||
reverse_sql=migrations.RunSQL.noop,
|
|
||||||
),
|
|
||||||
# delete sso application content type
|
|
||||||
# Added reverse_sql=migrations.RunSQL.noop to make this reversible for tests
|
|
||||||
migrations.RunSQL("DELETE FROM django_content_type WHERE app_label = 'sso';", reverse_sql=migrations.RunSQL.noop),
|
|
||||||
# drop sso application created table
|
|
||||||
# Added reverse_sql=migrations.RunSQL.noop to make this reversible for tests
|
|
||||||
migrations.RunSQL("DROP TABLE IF EXISTS sso_userenterpriseauth;", reverse_sql=migrations.RunSQL.noop),
|
|
||||||
# Alter inventory source source field
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='inventorysource',
|
|
||||||
name='source',
|
|
||||||
field=models.CharField(default=None, max_length=32),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='inventoryupdate',
|
|
||||||
name='source',
|
|
||||||
field=models.CharField(default=None, max_length=32),
|
|
||||||
),
|
|
||||||
# Alter OAuth2Application unique together
|
|
||||||
migrations.AlterUniqueTogether(
|
|
||||||
name='oauth2application',
|
|
||||||
unique_together=None,
|
|
||||||
),
|
|
||||||
migrations.RemoveField(
|
|
||||||
model_name='oauth2application',
|
|
||||||
name='organization',
|
|
||||||
),
|
|
||||||
migrations.RemoveField(
|
|
||||||
model_name='oauth2application',
|
|
||||||
name='user',
|
|
||||||
),
|
|
||||||
migrations.RemoveField(
|
|
||||||
model_name='activitystream',
|
|
||||||
name='o_auth2_access_token',
|
|
||||||
),
|
|
||||||
migrations.RemoveField(
|
|
||||||
model_name='activitystream',
|
|
||||||
name='o_auth2_application',
|
|
||||||
),
|
|
||||||
migrations.DeleteModel(
|
|
||||||
name='OAuth2AccessToken',
|
|
||||||
),
|
|
||||||
migrations.DeleteModel(
|
|
||||||
name='OAuth2Application',
|
|
||||||
),
|
|
||||||
# Delete system token cleanup jobs, because tokens were deleted
|
|
||||||
migrations.RunPython(delete_clear_tokens_sjt, migrations.RunPython.noop),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='systemjob',
|
|
||||||
name='job_type',
|
|
||||||
field=models.CharField(
|
|
||||||
blank=True,
|
|
||||||
choices=[
|
|
||||||
('cleanup_jobs', 'Remove jobs older than a certain number of days'),
|
|
||||||
('cleanup_activitystream', 'Remove activity stream entries older than a certain number of days'),
|
|
||||||
('cleanup_sessions', 'Removes expired browser sessions from the database'),
|
|
||||||
],
|
|
||||||
default='',
|
|
||||||
max_length=32,
|
|
||||||
),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='systemjobtemplate',
|
|
||||||
name='job_type',
|
|
||||||
field=models.CharField(
|
|
||||||
blank=True,
|
|
||||||
choices=[
|
|
||||||
('cleanup_jobs', 'Remove jobs older than a certain number of days'),
|
|
||||||
('cleanup_activitystream', 'Remove activity stream entries older than a certain number of days'),
|
|
||||||
('cleanup_sessions', 'Removes expired browser sessions from the database'),
|
|
||||||
],
|
|
||||||
default='',
|
|
||||||
max_length=32,
|
|
||||||
),
|
|
||||||
),
|
|
||||||
# --- START of operations merged from 0203_rename_github_app_kind.py ---
|
|
||||||
migrations.RunPython(update_github_app_kind, migrations.RunPython.noop),
|
|
||||||
# --- END of operations merged from 0203_rename_github_app_kind.py ---
|
|
||||||
]
|
|
||||||
@@ -1,6 +1,5 @@
|
|||||||
import logging
|
import logging
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger('awx.main.migrations')
|
logger = logging.getLogger('awx.main.migrations')
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
from collections import defaultdict
|
|
||||||
|
|
||||||
from django.apps import apps as global_apps
|
from django.apps import apps as global_apps
|
||||||
from django.db.models import ForeignKey
|
from django.db.models import ForeignKey
|
||||||
@@ -18,14 +17,7 @@ logger = logging.getLogger('awx.main.migrations._dab_rbac')
|
|||||||
|
|
||||||
|
|
||||||
def create_permissions_as_operation(apps, schema_editor):
|
def create_permissions_as_operation(apps, schema_editor):
|
||||||
logger.info('Running data migration create_permissions_as_operation')
|
|
||||||
# NOTE: the DAB ContentType changes adjusted how they fire
|
|
||||||
# before they would fire on every app config, like contenttypes
|
|
||||||
create_dab_permissions(global_apps.get_app_config("main"), apps=apps)
|
create_dab_permissions(global_apps.get_app_config("main"), apps=apps)
|
||||||
# This changed to only fire once and do a global creation
|
|
||||||
# so we need to call it for specifically the dab_rbac app
|
|
||||||
# multiple calls will not hurt anything
|
|
||||||
create_dab_permissions(global_apps.get_app_config("dab_rbac"), apps=apps)
|
|
||||||
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
@@ -120,12 +112,7 @@ def get_descendents(f, children_map):
|
|||||||
|
|
||||||
def get_permissions_for_role(role_field, children_map, apps):
|
def get_permissions_for_role(role_field, children_map, apps):
|
||||||
Permission = apps.get_model('dab_rbac', 'DABPermission')
|
Permission = apps.get_model('dab_rbac', 'DABPermission')
|
||||||
try:
|
ContentType = apps.get_model('contenttypes', 'ContentType')
|
||||||
# After migration for remote permissions
|
|
||||||
ContentType = apps.get_model('dab_rbac', 'DABContentType')
|
|
||||||
except LookupError:
|
|
||||||
# If using DAB from before remote permissions are implemented
|
|
||||||
ContentType = apps.get_model('contenttypes', 'ContentType')
|
|
||||||
|
|
||||||
perm_list = []
|
perm_list = []
|
||||||
for child_field in get_descendents(role_field, children_map):
|
for child_field in get_descendents(role_field, children_map):
|
||||||
@@ -168,15 +155,11 @@ def migrate_to_new_rbac(apps, schema_editor):
|
|||||||
This method moves the assigned permissions from the old rbac.py models
|
This method moves the assigned permissions from the old rbac.py models
|
||||||
to the new RoleDefinition and ObjectRole models
|
to the new RoleDefinition and ObjectRole models
|
||||||
"""
|
"""
|
||||||
logger.info('Running data migration migrate_to_new_rbac')
|
|
||||||
Role = apps.get_model('main', 'Role')
|
Role = apps.get_model('main', 'Role')
|
||||||
RoleDefinition = apps.get_model('dab_rbac', 'RoleDefinition')
|
RoleDefinition = apps.get_model('dab_rbac', 'RoleDefinition')
|
||||||
RoleUserAssignment = apps.get_model('dab_rbac', 'RoleUserAssignment')
|
RoleUserAssignment = apps.get_model('dab_rbac', 'RoleUserAssignment')
|
||||||
Permission = apps.get_model('dab_rbac', 'DABPermission')
|
Permission = apps.get_model('dab_rbac', 'DABPermission')
|
||||||
|
|
||||||
if Permission.objects.count() == 0:
|
|
||||||
raise RuntimeError('Running migrate_to_new_rbac requires DABPermission objects created first')
|
|
||||||
|
|
||||||
# remove add premissions that are not valid for migrations from old versions
|
# remove add premissions that are not valid for migrations from old versions
|
||||||
for perm_str in ('add_organization', 'add_jobtemplate'):
|
for perm_str in ('add_organization', 'add_jobtemplate'):
|
||||||
perm = Permission.objects.filter(codename=perm_str).first()
|
perm = Permission.objects.filter(codename=perm_str).first()
|
||||||
@@ -256,14 +239,11 @@ def migrate_to_new_rbac(apps, schema_editor):
|
|||||||
|
|
||||||
# Create new replacement system auditor role
|
# Create new replacement system auditor role
|
||||||
new_system_auditor, created = RoleDefinition.objects.get_or_create(
|
new_system_auditor, created = RoleDefinition.objects.get_or_create(
|
||||||
name='Platform Auditor',
|
name='Controller System Auditor',
|
||||||
defaults={'description': 'Migrated singleton role giving read permission to everything', 'managed': True},
|
defaults={'description': 'Migrated singleton role giving read permission to everything', 'managed': True},
|
||||||
)
|
)
|
||||||
new_system_auditor.permissions.add(*list(Permission.objects.filter(codename__startswith='view')))
|
new_system_auditor.permissions.add(*list(Permission.objects.filter(codename__startswith='view')))
|
||||||
|
|
||||||
if created:
|
|
||||||
logger.info(f'Created RoleDefinition {new_system_auditor.name} pk={new_system_auditor.pk} with {new_system_auditor.permissions.count()} permissions')
|
|
||||||
|
|
||||||
# migrate is_system_auditor flag, because it is no longer handled by a system role
|
# migrate is_system_auditor flag, because it is no longer handled by a system role
|
||||||
old_system_auditor = Role.objects.filter(singleton_name='system_auditor').first()
|
old_system_auditor = Role.objects.filter(singleton_name='system_auditor').first()
|
||||||
if old_system_auditor:
|
if old_system_auditor:
|
||||||
@@ -292,9 +272,8 @@ def get_or_create_managed(name, description, ct, permissions, RoleDefinition):
|
|||||||
|
|
||||||
def setup_managed_role_definitions(apps, schema_editor):
|
def setup_managed_role_definitions(apps, schema_editor):
|
||||||
"""
|
"""
|
||||||
Idempotent method to create or sync the managed role definitions
|
Idepotent method to create or sync the managed role definitions
|
||||||
"""
|
"""
|
||||||
logger.info('Running data migration setup_managed_role_definitions')
|
|
||||||
to_create = {
|
to_create = {
|
||||||
'object_admin': '{cls.__name__} Admin',
|
'object_admin': '{cls.__name__} Admin',
|
||||||
'org_admin': 'Organization Admin',
|
'org_admin': 'Organization Admin',
|
||||||
@@ -302,13 +281,7 @@ def setup_managed_role_definitions(apps, schema_editor):
|
|||||||
'special': '{cls.__name__} {action}',
|
'special': '{cls.__name__} {action}',
|
||||||
}
|
}
|
||||||
|
|
||||||
try:
|
ContentType = apps.get_model('contenttypes', 'ContentType')
|
||||||
# After migration for remote permissions
|
|
||||||
ContentType = apps.get_model('dab_rbac', 'DABContentType')
|
|
||||||
except LookupError:
|
|
||||||
# If using DAB from before remote permissions are implemented
|
|
||||||
ContentType = apps.get_model('contenttypes', 'ContentType')
|
|
||||||
|
|
||||||
Permission = apps.get_model('dab_rbac', 'DABPermission')
|
Permission = apps.get_model('dab_rbac', 'DABPermission')
|
||||||
RoleDefinition = apps.get_model('dab_rbac', 'RoleDefinition')
|
RoleDefinition = apps.get_model('dab_rbac', 'RoleDefinition')
|
||||||
Organization = apps.get_model(settings.ANSIBLE_BASE_ORGANIZATION_MODEL)
|
Organization = apps.get_model(settings.ANSIBLE_BASE_ORGANIZATION_MODEL)
|
||||||
@@ -336,6 +309,16 @@ def setup_managed_role_definitions(apps, schema_editor):
|
|||||||
to_create['object_admin'].format(cls=cls), f'Has all permissions to a single {cls._meta.verbose_name}', ct, indiv_perms, RoleDefinition
|
to_create['object_admin'].format(cls=cls), f'Has all permissions to a single {cls._meta.verbose_name}', ct, indiv_perms, RoleDefinition
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
if cls_name == 'team':
|
||||||
|
managed_role_definitions.append(
|
||||||
|
get_or_create_managed(
|
||||||
|
'Controller Team Admin',
|
||||||
|
f'Has all permissions to a single {cls._meta.verbose_name}',
|
||||||
|
ct,
|
||||||
|
indiv_perms,
|
||||||
|
RoleDefinition,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
if 'org_children' in to_create and (cls_name not in ('organization', 'instancegroup', 'team')):
|
if 'org_children' in to_create and (cls_name not in ('organization', 'instancegroup', 'team')):
|
||||||
org_child_perms = object_perms.copy()
|
org_child_perms = object_perms.copy()
|
||||||
@@ -376,6 +359,18 @@ def setup_managed_role_definitions(apps, schema_editor):
|
|||||||
RoleDefinition,
|
RoleDefinition,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
if action == 'member' and cls_name in ('organization', 'team'):
|
||||||
|
suffix = to_create['special'].format(cls=cls, action=action.title())
|
||||||
|
rd_name = f'Controller {suffix}'
|
||||||
|
managed_role_definitions.append(
|
||||||
|
get_or_create_managed(
|
||||||
|
rd_name,
|
||||||
|
f'Has {action} permissions to a single {cls._meta.verbose_name}',
|
||||||
|
ct,
|
||||||
|
perm_list,
|
||||||
|
RoleDefinition,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
if 'org_admin' in to_create:
|
if 'org_admin' in to_create:
|
||||||
managed_role_definitions.append(
|
managed_role_definitions.append(
|
||||||
@@ -387,6 +382,15 @@ def setup_managed_role_definitions(apps, schema_editor):
|
|||||||
RoleDefinition,
|
RoleDefinition,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
managed_role_definitions.append(
|
||||||
|
get_or_create_managed(
|
||||||
|
'Controller Organization Admin',
|
||||||
|
'Has all permissions to a single organization and all objects inside of it',
|
||||||
|
org_ct,
|
||||||
|
org_perms,
|
||||||
|
RoleDefinition,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
# Special "organization action" roles
|
# Special "organization action" roles
|
||||||
audit_permissions = [perm for perm in org_perms if perm.codename.startswith('view_')]
|
audit_permissions = [perm for perm in org_perms if perm.codename.startswith('view_')]
|
||||||
@@ -427,115 +431,3 @@ def setup_managed_role_definitions(apps, schema_editor):
|
|||||||
for role_definition in unexpected_role_definitions:
|
for role_definition in unexpected_role_definitions:
|
||||||
logger.info(f'Deleting old managed role definition {role_definition.name}, pk={role_definition.pk}')
|
logger.info(f'Deleting old managed role definition {role_definition.name}, pk={role_definition.pk}')
|
||||||
role_definition.delete()
|
role_definition.delete()
|
||||||
|
|
||||||
|
|
||||||
def get_team_to_team_relationships(apps, team_member_role):
|
|
||||||
"""
|
|
||||||
Find all team-to-team relationships where one team is a member of another.
|
|
||||||
Returns a dict mapping parent_team_id -> [child_team_id, ...]
|
|
||||||
"""
|
|
||||||
team_to_team_relationships = defaultdict(list)
|
|
||||||
|
|
||||||
# Find all team assignments with the Team Member role
|
|
||||||
RoleTeamAssignment = apps.get_model('dab_rbac', 'RoleTeamAssignment')
|
|
||||||
team_assignments = RoleTeamAssignment.objects.filter(role_definition=team_member_role).select_related('team')
|
|
||||||
|
|
||||||
for assignment in team_assignments:
|
|
||||||
parent_team_id = int(assignment.object_id)
|
|
||||||
child_team_id = assignment.team.id
|
|
||||||
team_to_team_relationships[parent_team_id].append(child_team_id)
|
|
||||||
|
|
||||||
return team_to_team_relationships
|
|
||||||
|
|
||||||
|
|
||||||
def get_all_user_members_of_team(apps, team_member_role, team_id, team_to_team_map, visited=None):
|
|
||||||
"""
|
|
||||||
Recursively find all users who are members of a team, including through nested teams.
|
|
||||||
"""
|
|
||||||
if visited is None:
|
|
||||||
visited = set()
|
|
||||||
|
|
||||||
if team_id in visited:
|
|
||||||
return set() # Avoid infinite recursion
|
|
||||||
|
|
||||||
visited.add(team_id)
|
|
||||||
all_users = set()
|
|
||||||
|
|
||||||
# Get direct user assignments to this team
|
|
||||||
RoleUserAssignment = apps.get_model('dab_rbac', 'RoleUserAssignment')
|
|
||||||
user_assignments = RoleUserAssignment.objects.filter(role_definition=team_member_role, object_id=team_id).select_related('user')
|
|
||||||
|
|
||||||
for assignment in user_assignments:
|
|
||||||
all_users.add(assignment.user)
|
|
||||||
|
|
||||||
# Get team-to-team assignments and recursively find their users
|
|
||||||
child_team_ids = team_to_team_map.get(team_id, [])
|
|
||||||
for child_team_id in child_team_ids:
|
|
||||||
nested_users = get_all_user_members_of_team(apps, team_member_role, child_team_id, team_to_team_map, visited.copy())
|
|
||||||
all_users.update(nested_users)
|
|
||||||
|
|
||||||
return all_users
|
|
||||||
|
|
||||||
|
|
||||||
def remove_team_to_team_assignment(apps, team_member_role, parent_team_id, child_team_id):
|
|
||||||
"""
|
|
||||||
Remove team-to-team memberships.
|
|
||||||
"""
|
|
||||||
Team = apps.get_model('main', 'Team')
|
|
||||||
RoleTeamAssignment = apps.get_model('dab_rbac', 'RoleTeamAssignment')
|
|
||||||
|
|
||||||
parent_team = Team.objects.get(id=parent_team_id)
|
|
||||||
child_team = Team.objects.get(id=child_team_id)
|
|
||||||
|
|
||||||
# Remove all team-to-team RoleTeamAssignments
|
|
||||||
RoleTeamAssignment.objects.filter(role_definition=team_member_role, object_id=parent_team_id, team=child_team).delete()
|
|
||||||
|
|
||||||
# Check mirroring Team model for children under member_role
|
|
||||||
parent_team.member_role.children.filter(object_id=child_team_id).delete()
|
|
||||||
|
|
||||||
|
|
||||||
def consolidate_indirect_user_roles(apps, schema_editor):
|
|
||||||
"""
|
|
||||||
A user should have a member role for every team they were indirectly
|
|
||||||
a member of. ex. Team A is a member of Team B. All users in Team A
|
|
||||||
previously were only members of Team A. They should now be members of
|
|
||||||
Team A and Team B.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# get models for membership on teams
|
|
||||||
RoleDefinition = apps.get_model('dab_rbac', 'RoleDefinition')
|
|
||||||
Team = apps.get_model('main', 'Team')
|
|
||||||
|
|
||||||
team_member_role = RoleDefinition.objects.get(name='Team Member')
|
|
||||||
|
|
||||||
team_to_team_map = get_team_to_team_relationships(apps, team_member_role)
|
|
||||||
|
|
||||||
if not team_to_team_map:
|
|
||||||
return # No team-to-team relationships to consolidate
|
|
||||||
|
|
||||||
# Get content type for Team - needed for give_permissions
|
|
||||||
try:
|
|
||||||
from django.contrib.contenttypes.models import ContentType
|
|
||||||
|
|
||||||
team_content_type = ContentType.objects.get_for_model(Team)
|
|
||||||
except ImportError:
|
|
||||||
# Fallback if ContentType is not available
|
|
||||||
ContentType = apps.get_model('contenttypes', 'ContentType')
|
|
||||||
team_content_type = ContentType.objects.get_for_model(Team)
|
|
||||||
|
|
||||||
# Get all users who should be direct members of a team
|
|
||||||
for parent_team_id, child_team_ids in team_to_team_map.items():
|
|
||||||
all_users = get_all_user_members_of_team(apps, team_member_role, parent_team_id, team_to_team_map)
|
|
||||||
|
|
||||||
# Create direct RoleUserAssignments for all users
|
|
||||||
if all_users:
|
|
||||||
give_permissions(apps=apps, rd=team_member_role, users=list(all_users), object_id=parent_team_id, content_type_id=team_content_type.id)
|
|
||||||
|
|
||||||
# Mirror assignments to Team model
|
|
||||||
parent_team = Team.objects.get(id=parent_team_id)
|
|
||||||
for user in all_users:
|
|
||||||
parent_team.member_role.members.add(user.id)
|
|
||||||
|
|
||||||
# Remove all team-to-team assignments for parent team
|
|
||||||
for child_team_id in child_team_ids:
|
|
||||||
remove_team_to_team_assignment(apps, team_member_role, parent_team_id, child_team_id)
|
|
||||||
|
|||||||
@@ -1,25 +0,0 @@
|
|||||||
import logging
|
|
||||||
|
|
||||||
from django.db.models import Count
|
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def _rename_duplicates(cls):
|
|
||||||
field = cls._meta.get_field('name')
|
|
||||||
max_len = field.max_length
|
|
||||||
for organization_id in cls.objects.order_by().values_list('organization_id', flat=True).distinct():
|
|
||||||
duplicate_data = cls.objects.values('name').filter(organization_id=organization_id).annotate(count=Count('name')).order_by().filter(count__gt=1)
|
|
||||||
for data in duplicate_data:
|
|
||||||
name = data['name']
|
|
||||||
for idx, ujt in enumerate(cls.objects.filter(name=name, organization_id=organization_id).order_by('created')):
|
|
||||||
if idx > 0:
|
|
||||||
suffix = f'_dup{idx}'
|
|
||||||
max_chars = max_len - len(suffix)
|
|
||||||
if len(ujt.name) >= max_chars:
|
|
||||||
ujt.name = ujt.name[:max_chars] + suffix
|
|
||||||
else:
|
|
||||||
ujt.name = ujt.name + suffix
|
|
||||||
logger.info(f'Renaming duplicate {cls._meta.model_name} to `{ujt.name}` because of duplicate name entry')
|
|
||||||
ujt.save(update_fields=['name'])
|
|
||||||
@@ -3,6 +3,7 @@ from time import time
|
|||||||
|
|
||||||
from django.db.models import Subquery, OuterRef, F
|
from django.db.models import Subquery, OuterRef, F
|
||||||
|
|
||||||
|
from awx.main.fields import update_role_parentage_for_instance
|
||||||
from awx.main.models.rbac import Role, batch_role_ancestor_rebuilding
|
from awx.main.models.rbac import Role, batch_role_ancestor_rebuilding
|
||||||
|
|
||||||
logger = logging.getLogger('rbac_migrations')
|
logger = logging.getLogger('rbac_migrations')
|
||||||
@@ -237,10 +238,85 @@ def restore_inventory_admins_backward(apps, schema_editor):
|
|||||||
|
|
||||||
|
|
||||||
def rebuild_role_hierarchy(apps, schema_editor):
|
def rebuild_role_hierarchy(apps, schema_editor):
|
||||||
"""Not used after DAB RBAC migration"""
|
"""
|
||||||
pass
|
This should be called in any migration when ownerships are changed.
|
||||||
|
Ex. I remove a user from the admin_role of a credential.
|
||||||
|
Ancestors are cached from parents for performance, this re-computes ancestors.
|
||||||
|
"""
|
||||||
|
logger.info('Computing role roots..')
|
||||||
|
start = time()
|
||||||
|
roots = Role.objects.all().values_list('id', flat=True)
|
||||||
|
stop = time()
|
||||||
|
logger.info('Found %d roots in %f seconds, rebuilding ancestry map' % (len(roots), stop - start))
|
||||||
|
start = time()
|
||||||
|
Role.rebuild_role_ancestor_list(roots, [])
|
||||||
|
stop = time()
|
||||||
|
logger.info('Rebuild ancestors completed in %f seconds' % (stop - start))
|
||||||
|
logger.info('Done.')
|
||||||
|
|
||||||
|
|
||||||
def rebuild_role_parentage(apps, schema_editor, models=None):
|
def rebuild_role_parentage(apps, schema_editor, models=None):
|
||||||
"""Not used after DAB RBAC migration"""
|
"""
|
||||||
pass
|
This should be called in any migration when any parent_role entry
|
||||||
|
is modified so that the cached parent fields will be updated. Ex:
|
||||||
|
foo_role = ImplicitRoleField(
|
||||||
|
parent_role=['bar_role'] # change to parent_role=['admin_role']
|
||||||
|
)
|
||||||
|
|
||||||
|
This is like rebuild_role_hierarchy, but that method updates ancestors,
|
||||||
|
whereas this method updates parents.
|
||||||
|
"""
|
||||||
|
start = time()
|
||||||
|
seen_models = set()
|
||||||
|
model_ct = 0
|
||||||
|
noop_ct = 0
|
||||||
|
ContentType = apps.get_model('contenttypes', "ContentType")
|
||||||
|
additions = set()
|
||||||
|
removals = set()
|
||||||
|
|
||||||
|
role_qs = Role.objects
|
||||||
|
if models:
|
||||||
|
# update_role_parentage_for_instance is expensive
|
||||||
|
# if the models have been downselected, ignore those which are not in the list
|
||||||
|
ct_ids = list(ContentType.objects.filter(model__in=[name.lower() for name in models]).values_list('id', flat=True))
|
||||||
|
role_qs = role_qs.filter(content_type__in=ct_ids)
|
||||||
|
|
||||||
|
for role in role_qs.iterator():
|
||||||
|
if not role.object_id:
|
||||||
|
continue
|
||||||
|
model_tuple = (role.content_type_id, role.object_id)
|
||||||
|
if model_tuple in seen_models:
|
||||||
|
continue
|
||||||
|
seen_models.add(model_tuple)
|
||||||
|
|
||||||
|
# The GenericForeignKey does not work right in migrations
|
||||||
|
# with the usage as role.content_object
|
||||||
|
# so we do the lookup ourselves with current migration models
|
||||||
|
ct = role.content_type
|
||||||
|
app = ct.app_label
|
||||||
|
ct_model = apps.get_model(app, ct.model)
|
||||||
|
content_object = ct_model.objects.get(pk=role.object_id)
|
||||||
|
|
||||||
|
parents_added, parents_removed = update_role_parentage_for_instance(content_object)
|
||||||
|
additions.update(parents_added)
|
||||||
|
removals.update(parents_removed)
|
||||||
|
if parents_added:
|
||||||
|
model_ct += 1
|
||||||
|
logger.debug('Added to parents of roles {} of {}'.format(parents_added, content_object))
|
||||||
|
if parents_removed:
|
||||||
|
model_ct += 1
|
||||||
|
logger.debug('Removed from parents of roles {} of {}'.format(parents_removed, content_object))
|
||||||
|
else:
|
||||||
|
noop_ct += 1
|
||||||
|
|
||||||
|
logger.debug('No changes to role parents for {} resources'.format(noop_ct))
|
||||||
|
logger.debug('Added parents to {} roles'.format(len(additions)))
|
||||||
|
logger.debug('Removed parents from {} roles'.format(len(removals)))
|
||||||
|
if model_ct:
|
||||||
|
logger.info('Updated implicit parents of {} resources'.format(model_ct))
|
||||||
|
|
||||||
|
logger.info('Rebuild parentage completed in %f seconds' % (time() - start))
|
||||||
|
|
||||||
|
# this is ran because the ordinary signals for
|
||||||
|
# Role.parents.add and Role.parents.remove not called in migration
|
||||||
|
Role.rebuild_role_ancestor_list(list(additions), list(removals))
|
||||||
|
|||||||
@@ -33,7 +33,6 @@ from awx.main.models.inventory import ( # noqa
|
|||||||
InventorySource,
|
InventorySource,
|
||||||
InventoryUpdate,
|
InventoryUpdate,
|
||||||
SmartInventoryMembership,
|
SmartInventoryMembership,
|
||||||
InventoryGroupVariablesWithHistory,
|
|
||||||
)
|
)
|
||||||
from awx.main.models.jobs import ( # noqa
|
from awx.main.models.jobs import ( # noqa
|
||||||
Job,
|
Job,
|
||||||
@@ -172,17 +171,35 @@ def cleanup_created_modified_by(sender, **kwargs):
|
|||||||
pre_delete.connect(cleanup_created_modified_by, sender=User)
|
pre_delete.connect(cleanup_created_modified_by, sender=User)
|
||||||
|
|
||||||
|
|
||||||
|
@property
|
||||||
|
def user_get_organizations(user):
|
||||||
|
return Organization.access_qs(user, 'member')
|
||||||
|
|
||||||
|
|
||||||
|
@property
|
||||||
|
def user_get_admin_of_organizations(user):
|
||||||
|
return Organization.access_qs(user, 'change')
|
||||||
|
|
||||||
|
|
||||||
|
@property
|
||||||
|
def user_get_auditor_of_organizations(user):
|
||||||
|
return Organization.access_qs(user, 'audit')
|
||||||
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def created(user):
|
def created(user):
|
||||||
return user.date_joined
|
return user.date_joined
|
||||||
|
|
||||||
|
|
||||||
|
User.add_to_class('organizations', user_get_organizations)
|
||||||
|
User.add_to_class('admin_of_organizations', user_get_admin_of_organizations)
|
||||||
|
User.add_to_class('auditor_of_organizations', user_get_auditor_of_organizations)
|
||||||
User.add_to_class('created', created)
|
User.add_to_class('created', created)
|
||||||
|
|
||||||
|
|
||||||
def get_system_auditor_role():
|
def get_system_auditor_role():
|
||||||
rd, created = RoleDefinition.objects.get_or_create(
|
rd, created = RoleDefinition.objects.get_or_create(
|
||||||
name='Platform Auditor', defaults={'description': 'Migrated singleton role giving read permission to everything'}
|
name='Controller System Auditor', defaults={'description': 'Migrated singleton role giving read permission to everything'}
|
||||||
)
|
)
|
||||||
if created:
|
if created:
|
||||||
rd.permissions.add(*list(permission_registry.permission_qs.filter(codename__startswith='view')))
|
rd.permissions.add(*list(permission_registry.permission_qs.filter(codename__startswith='view')))
|
||||||
|
|||||||
@@ -24,7 +24,6 @@ from awx.main.managers import DeferJobCreatedManager
|
|||||||
from awx.main.constants import MINIMAL_EVENTS
|
from awx.main.constants import MINIMAL_EVENTS
|
||||||
from awx.main.models.base import CreatedModifiedModel
|
from awx.main.models.base import CreatedModifiedModel
|
||||||
from awx.main.utils import ignore_inventory_computed_fields, camelcase_to_underscore
|
from awx.main.utils import ignore_inventory_computed_fields, camelcase_to_underscore
|
||||||
from awx.main.utils.db import bulk_update_sorted_by_id
|
|
||||||
|
|
||||||
analytics_logger = logging.getLogger('awx.analytics.job_events')
|
analytics_logger = logging.getLogger('awx.analytics.job_events')
|
||||||
|
|
||||||
@@ -603,7 +602,7 @@ class JobEvent(BasePlaybookEvent):
|
|||||||
h.last_job_host_summary_id = host_mapping[h.id]
|
h.last_job_host_summary_id = host_mapping[h.id]
|
||||||
updated_hosts.add(h)
|
updated_hosts.add(h)
|
||||||
|
|
||||||
bulk_update_sorted_by_id(Host, updated_hosts, ['last_job_id', 'last_job_host_summary_id'])
|
Host.objects.bulk_update(list(updated_hosts), ['last_job_id', 'last_job_host_summary_id'], batch_size=100)
|
||||||
|
|
||||||
# Create/update Host Metrics
|
# Create/update Host Metrics
|
||||||
self._update_host_metrics(updated_hosts_list)
|
self._update_host_metrics(updated_hosts_list)
|
||||||
|
|||||||
@@ -1024,10 +1024,7 @@ class InventorySourceOptions(BaseModel):
|
|||||||
# If a credential was provided, it's important that it matches
|
# If a credential was provided, it's important that it matches
|
||||||
# the actual inventory source being used (Amazon requires Amazon
|
# the actual inventory source being used (Amazon requires Amazon
|
||||||
# credentials; Rackspace requires Rackspace credentials; etc...)
|
# credentials; Rackspace requires Rackspace credentials; etc...)
|
||||||
# TODO: AAP-53978 check that this matches new awx-plugin content for ESXI
|
if source.replace('ec2', 'aws') != cred.kind:
|
||||||
if source == 'vmware_esxi' and source.replace('vmware_esxi', 'vmware') != cred.kind:
|
|
||||||
return _('VMWARE inventory sources (such as %s) require credentials for the matching cloud service.') % source
|
|
||||||
if source == 'ec2' and source.replace('ec2', 'aws') != cred.kind:
|
|
||||||
return _('Cloud-based inventory sources (such as %s) require credentials for the matching cloud service.') % source
|
return _('Cloud-based inventory sources (such as %s) require credentials for the matching cloud service.') % source
|
||||||
# Allow an EC2 source to omit the credential. If Tower is running on
|
# Allow an EC2 source to omit the credential. If Tower is running on
|
||||||
# an EC2 instance with an IAM Role assigned, boto will use credentials
|
# an EC2 instance with an IAM Role assigned, boto will use credentials
|
||||||
@@ -1123,10 +1120,8 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions, CustomVirtualE
|
|||||||
|
|
||||||
def save(self, *args, **kwargs):
|
def save(self, *args, **kwargs):
|
||||||
# if this is a new object, inherit organization from its inventory
|
# if this is a new object, inherit organization from its inventory
|
||||||
if not self.pk:
|
if not self.pk and self.inventory and self.inventory.organization_id and not self.organization_id:
|
||||||
self.org_unique = False # needed to exclude from unique (name, organization) constraint
|
self.organization_id = self.inventory.organization_id
|
||||||
if self.inventory and self.inventory.organization_id and not self.organization_id:
|
|
||||||
self.organization_id = self.inventory.organization_id
|
|
||||||
|
|
||||||
# If update_fields has been specified, add our field names to it,
|
# If update_fields has been specified, add our field names to it,
|
||||||
# if it hasn't been specified, then we're just doing a normal save.
|
# if it hasn't been specified, then we're just doing a normal save.
|
||||||
@@ -1407,38 +1402,3 @@ class CustomInventoryScript(CommonModelNameNotUnique):
|
|||||||
|
|
||||||
def get_absolute_url(self, request=None):
|
def get_absolute_url(self, request=None):
|
||||||
return reverse('api:inventory_script_detail', kwargs={'pk': self.pk}, request=request)
|
return reverse('api:inventory_script_detail', kwargs={'pk': self.pk}, request=request)
|
||||||
|
|
||||||
|
|
||||||
class InventoryGroupVariablesWithHistory(models.Model):
|
|
||||||
"""
|
|
||||||
Represents the inventory variables of one inventory group.
|
|
||||||
|
|
||||||
The purpose of this model is to persist the update history of the group
|
|
||||||
variables. The update history is maintained in another class
|
|
||||||
(`InventoryGroupVariables`), this class here is just a container for the
|
|
||||||
database storage.
|
|
||||||
"""
|
|
||||||
|
|
||||||
class Meta:
|
|
||||||
constraints = [
|
|
||||||
# Do not allow the same inventory/group combination more than once.
|
|
||||||
models.UniqueConstraint(
|
|
||||||
fields=["inventory", "group"],
|
|
||||||
name="unique_inventory_group",
|
|
||||||
violation_error_message=_("Inventory/Group combination must be unique."),
|
|
||||||
),
|
|
||||||
]
|
|
||||||
|
|
||||||
inventory = models.ForeignKey(
|
|
||||||
'Inventory',
|
|
||||||
related_name='inventory_group_variables',
|
|
||||||
null=True,
|
|
||||||
on_delete=models.CASCADE,
|
|
||||||
)
|
|
||||||
group = models.ForeignKey( # `None` denotes the 'all'-group.
|
|
||||||
'Group',
|
|
||||||
related_name='inventory_group_variables',
|
|
||||||
null=True,
|
|
||||||
on_delete=models.CASCADE,
|
|
||||||
)
|
|
||||||
variables = models.JSONField() # The group variables, including their history.
|
|
||||||
|
|||||||
@@ -358,6 +358,26 @@ class JobTemplate(
|
|||||||
update_fields.append('organization_id')
|
update_fields.append('organization_id')
|
||||||
return super(JobTemplate, self).save(*args, **kwargs)
|
return super(JobTemplate, self).save(*args, **kwargs)
|
||||||
|
|
||||||
|
def validate_unique(self, exclude=None):
|
||||||
|
"""Custom over-ride for JT specifically
|
||||||
|
because organization is inferred from project after full_clean is finished
|
||||||
|
thus the organization field is not yet set when validation happens
|
||||||
|
"""
|
||||||
|
errors = []
|
||||||
|
for ut in JobTemplate.SOFT_UNIQUE_TOGETHER:
|
||||||
|
kwargs = {'name': self.name}
|
||||||
|
if self.project:
|
||||||
|
kwargs['organization'] = self.project.organization_id
|
||||||
|
else:
|
||||||
|
kwargs['organization'] = None
|
||||||
|
qs = JobTemplate.objects.filter(**kwargs)
|
||||||
|
if self.pk:
|
||||||
|
qs = qs.exclude(pk=self.pk)
|
||||||
|
if qs.exists():
|
||||||
|
errors.append('%s with this (%s) combination already exists.' % (JobTemplate.__name__, ', '.join(set(ut) - {'polymorphic_ctype'})))
|
||||||
|
if errors:
|
||||||
|
raise ValidationError(errors)
|
||||||
|
|
||||||
def create_unified_job(self, **kwargs):
|
def create_unified_job(self, **kwargs):
|
||||||
prevent_slicing = kwargs.pop('_prevent_slicing', False)
|
prevent_slicing = kwargs.pop('_prevent_slicing', False)
|
||||||
slice_ct = self.get_effective_slice_ct(kwargs)
|
slice_ct = self.get_effective_slice_ct(kwargs)
|
||||||
@@ -384,26 +404,6 @@ class JobTemplate(
|
|||||||
WorkflowJobNode.objects.create(**create_kwargs)
|
WorkflowJobNode.objects.create(**create_kwargs)
|
||||||
return job
|
return job
|
||||||
|
|
||||||
def validate_unique(self, exclude=None):
|
|
||||||
"""Custom over-ride for JT specifically
|
|
||||||
because organization is inferred from project after full_clean is finished
|
|
||||||
thus the organization field is not yet set when validation happens
|
|
||||||
"""
|
|
||||||
errors = []
|
|
||||||
for ut in JobTemplate.SOFT_UNIQUE_TOGETHER:
|
|
||||||
kwargs = {'name': self.name}
|
|
||||||
if self.project:
|
|
||||||
kwargs['organization'] = self.project.organization_id
|
|
||||||
else:
|
|
||||||
kwargs['organization'] = None
|
|
||||||
qs = JobTemplate.objects.filter(**kwargs)
|
|
||||||
if self.pk:
|
|
||||||
qs = qs.exclude(pk=self.pk)
|
|
||||||
if qs.exists():
|
|
||||||
errors.append('%s with this (%s) combination already exists.' % (JobTemplate.__name__, ', '.join(set(ut) - {'polymorphic_ctype'})))
|
|
||||||
if errors:
|
|
||||||
raise ValidationError(errors)
|
|
||||||
|
|
||||||
def get_absolute_url(self, request=None):
|
def get_absolute_url(self, request=None):
|
||||||
return reverse('api:job_template_detail', kwargs={'pk': self.pk}, request=request)
|
return reverse('api:job_template_detail', kwargs={'pk': self.pk}, request=request)
|
||||||
|
|
||||||
|
|||||||
@@ -86,7 +86,7 @@ class ResourceMixin(models.Model):
|
|||||||
raise RuntimeError(f'Role filters only valid for users and ancestor role, received {accessor}')
|
raise RuntimeError(f'Role filters only valid for users and ancestor role, received {accessor}')
|
||||||
|
|
||||||
if content_types is None:
|
if content_types is None:
|
||||||
ct_kwarg = dict(content_type=ContentType.objects.get_for_model(cls))
|
ct_kwarg = dict(content_type_id=ContentType.objects.get_for_model(cls).id)
|
||||||
else:
|
else:
|
||||||
ct_kwarg = dict(content_type_id__in=content_types)
|
ct_kwarg = dict(content_type_id__in=content_types)
|
||||||
|
|
||||||
|
|||||||
@@ -27,9 +27,6 @@ from django.conf import settings
|
|||||||
|
|
||||||
# Ansible_base app
|
# Ansible_base app
|
||||||
from ansible_base.rbac.models import RoleDefinition, RoleUserAssignment, RoleTeamAssignment
|
from ansible_base.rbac.models import RoleDefinition, RoleUserAssignment, RoleTeamAssignment
|
||||||
from ansible_base.rbac.sync import maybe_reverse_sync_assignment, maybe_reverse_sync_unassignment, maybe_reverse_sync_role_definition
|
|
||||||
from ansible_base.rbac import permission_registry
|
|
||||||
from ansible_base.resource_registry.signals.handlers import no_reverse_sync
|
|
||||||
from ansible_base.lib.utils.models import get_type_for_model
|
from ansible_base.lib.utils.models import get_type_for_model
|
||||||
|
|
||||||
# AWX
|
# AWX
|
||||||
@@ -562,27 +559,34 @@ def get_role_definition(role):
|
|||||||
f = obj._meta.get_field(role.role_field)
|
f = obj._meta.get_field(role.role_field)
|
||||||
action_name = f.name.rsplit("_", 1)[0]
|
action_name = f.name.rsplit("_", 1)[0]
|
||||||
model_print = type(obj).__name__
|
model_print = type(obj).__name__
|
||||||
rd_name = f'{model_print} {action_name.title()} Compat'
|
|
||||||
perm_list = get_role_codenames(role)
|
perm_list = get_role_codenames(role)
|
||||||
defaults = {
|
defaults = {
|
||||||
'content_type': permission_registry.content_type_model.objects.get_by_natural_key(role.content_type.app_label, role.content_type.model),
|
'content_type_id': role.content_type_id,
|
||||||
'description': f'Has {action_name.title()} permission to {model_print} for backwards API compatibility',
|
'description': f'Has {action_name.title()} permission to {model_print} for backwards API compatibility',
|
||||||
}
|
}
|
||||||
|
# use Controller-specific role definitions for Team/Organization and member/admin
|
||||||
|
# instead of platform role definitions
|
||||||
|
# these should exist in the system already, so just do a lookup by role definition name
|
||||||
|
if model_print in ['Team', 'Organization'] and action_name in ['member', 'admin']:
|
||||||
|
rd_name = f'Controller {model_print} {action_name.title()}'
|
||||||
|
rd = RoleDefinition.objects.filter(name=rd_name).first()
|
||||||
|
if rd:
|
||||||
|
return rd
|
||||||
|
else:
|
||||||
|
return RoleDefinition.objects.create_from_permissions(permissions=perm_list, name=rd_name, managed=True, **defaults)
|
||||||
|
|
||||||
|
else:
|
||||||
|
rd_name = f'{model_print} {action_name.title()} Compat'
|
||||||
|
|
||||||
with impersonate(None):
|
with impersonate(None):
|
||||||
try:
|
try:
|
||||||
with no_reverse_sync():
|
rd, created = RoleDefinition.objects.get_or_create(name=rd_name, permissions=perm_list, defaults=defaults)
|
||||||
rd, created = RoleDefinition.objects.get_or_create(name=rd_name, permissions=perm_list, defaults=defaults)
|
|
||||||
except ValidationError:
|
except ValidationError:
|
||||||
# This is a tricky case - practically speaking, users should not be allowed to create team roles
|
# This is a tricky case - practically speaking, users should not be allowed to create team roles
|
||||||
# or roles that include the team member permission.
|
# or roles that include the team member permission.
|
||||||
# If we need to create this for compatibility purposes then we will create it as a managed non-editable role
|
# If we need to create this for compatibility purposes then we will create it as a managed non-editable role
|
||||||
defaults['managed'] = True
|
defaults['managed'] = True
|
||||||
with no_reverse_sync():
|
rd, created = RoleDefinition.objects.get_or_create(name=rd_name, permissions=perm_list, defaults=defaults)
|
||||||
rd, created = RoleDefinition.objects.get_or_create(name=rd_name, permissions=perm_list, defaults=defaults)
|
|
||||||
|
|
||||||
if created and rbac_sync_enabled.enabled:
|
|
||||||
maybe_reverse_sync_role_definition(rd, action='create')
|
|
||||||
return rd
|
return rd
|
||||||
|
|
||||||
|
|
||||||
@@ -596,6 +600,12 @@ def get_role_from_object_role(object_role):
|
|||||||
model_name, role_name, _ = rd.name.split()
|
model_name, role_name, _ = rd.name.split()
|
||||||
role_name = role_name.lower()
|
role_name = role_name.lower()
|
||||||
role_name += '_role'
|
role_name += '_role'
|
||||||
|
elif rd.name.startswith('Controller') and rd.name.endswith(' Admin'):
|
||||||
|
# Controller Organization Admin and Controller Team Admin
|
||||||
|
role_name = 'admin_role'
|
||||||
|
elif rd.name.startswith('Controller') and rd.name.endswith(' Member'):
|
||||||
|
# Controller Organization Member and Controller Team Member
|
||||||
|
role_name = 'member_role'
|
||||||
elif rd.name.endswith(' Admin') and rd.name.count(' ') == 2:
|
elif rd.name.endswith(' Admin') and rd.name.count(' ') == 2:
|
||||||
# cases like "Organization Project Admin"
|
# cases like "Organization Project Admin"
|
||||||
model_name, target_model_name, role_name = rd.name.split()
|
model_name, target_model_name, role_name = rd.name.split()
|
||||||
@@ -622,14 +632,12 @@ def get_role_from_object_role(object_role):
|
|||||||
return getattr(object_role.content_object, role_name)
|
return getattr(object_role.content_object, role_name)
|
||||||
|
|
||||||
|
|
||||||
def give_or_remove_permission(role, actor, giving=True, rd=None):
|
def give_or_remove_permission(role, actor, giving=True):
|
||||||
obj = role.content_object
|
obj = role.content_object
|
||||||
if obj is None:
|
if obj is None:
|
||||||
return
|
return
|
||||||
if not rd:
|
rd = get_role_definition(role)
|
||||||
rd = get_role_definition(role)
|
rd.give_or_remove_permission(actor, obj, giving=giving)
|
||||||
assignment = rd.give_or_remove_permission(actor, obj, giving=giving)
|
|
||||||
return assignment
|
|
||||||
|
|
||||||
|
|
||||||
class SyncEnabled(threading.local):
|
class SyncEnabled(threading.local):
|
||||||
@@ -681,15 +689,7 @@ def sync_members_to_new_rbac(instance, action, model, pk_set, reverse, **kwargs)
|
|||||||
role = Role.objects.get(pk=user_or_role_id)
|
role = Role.objects.get(pk=user_or_role_id)
|
||||||
else:
|
else:
|
||||||
user = get_user_model().objects.get(pk=user_or_role_id)
|
user = get_user_model().objects.get(pk=user_or_role_id)
|
||||||
rd = get_role_definition(role)
|
give_or_remove_permission(role, user, giving=is_giving)
|
||||||
assignment = give_or_remove_permission(role, user, giving=is_giving, rd=rd)
|
|
||||||
|
|
||||||
# sync to resource server
|
|
||||||
if rbac_sync_enabled.enabled:
|
|
||||||
if is_giving:
|
|
||||||
maybe_reverse_sync_assignment(assignment)
|
|
||||||
else:
|
|
||||||
maybe_reverse_sync_unassignment(rd, user, role.content_object)
|
|
||||||
|
|
||||||
|
|
||||||
def sync_parents_to_new_rbac(instance, action, model, pk_set, reverse, **kwargs):
|
def sync_parents_to_new_rbac(instance, action, model, pk_set, reverse, **kwargs):
|
||||||
@@ -732,19 +732,12 @@ def sync_parents_to_new_rbac(instance, action, model, pk_set, reverse, **kwargs)
|
|||||||
from awx.main.models.organization import Team
|
from awx.main.models.organization import Team
|
||||||
|
|
||||||
team = Team.objects.get(pk=parent_role.object_id)
|
team = Team.objects.get(pk=parent_role.object_id)
|
||||||
rd = get_role_definition(child_role)
|
give_or_remove_permission(child_role, team, giving=is_giving)
|
||||||
assignment = give_or_remove_permission(child_role, team, giving=is_giving, rd=rd)
|
|
||||||
|
|
||||||
# sync to resource server
|
|
||||||
if rbac_sync_enabled.enabled:
|
|
||||||
if is_giving:
|
|
||||||
maybe_reverse_sync_assignment(assignment)
|
|
||||||
else:
|
|
||||||
maybe_reverse_sync_unassignment(rd, team, child_role.content_object)
|
|
||||||
|
|
||||||
|
|
||||||
ROLE_DEFINITION_TO_ROLE_FIELD = {
|
ROLE_DEFINITION_TO_ROLE_FIELD = {
|
||||||
'Organization Member': 'member_role',
|
'Organization Member': 'member_role',
|
||||||
|
'Controller Organization Member': 'member_role',
|
||||||
'WorkflowJobTemplate Admin': 'admin_role',
|
'WorkflowJobTemplate Admin': 'admin_role',
|
||||||
'Organization WorkflowJobTemplate Admin': 'workflow_admin_role',
|
'Organization WorkflowJobTemplate Admin': 'workflow_admin_role',
|
||||||
'WorkflowJobTemplate Execute': 'execute_role',
|
'WorkflowJobTemplate Execute': 'execute_role',
|
||||||
@@ -769,8 +762,11 @@ ROLE_DEFINITION_TO_ROLE_FIELD = {
|
|||||||
'Organization Credential Admin': 'credential_admin_role',
|
'Organization Credential Admin': 'credential_admin_role',
|
||||||
'Credential Use': 'use_role',
|
'Credential Use': 'use_role',
|
||||||
'Team Admin': 'admin_role',
|
'Team Admin': 'admin_role',
|
||||||
|
'Controller Team Admin': 'admin_role',
|
||||||
'Team Member': 'member_role',
|
'Team Member': 'member_role',
|
||||||
|
'Controller Team Member': 'member_role',
|
||||||
'Organization Admin': 'admin_role',
|
'Organization Admin': 'admin_role',
|
||||||
|
'Controller Organization Admin': 'admin_role',
|
||||||
'Organization Audit': 'auditor_role',
|
'Organization Audit': 'auditor_role',
|
||||||
'Organization Execute': 'execute_role',
|
'Organization Execute': 'execute_role',
|
||||||
'Organization Approval': 'approval_role',
|
'Organization Approval': 'approval_role',
|
||||||
|
|||||||
@@ -18,13 +18,11 @@ from collections import OrderedDict
|
|||||||
# Django
|
# Django
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
from django.db import models, connection, transaction
|
from django.db import models, connection, transaction
|
||||||
from django.db.models.constraints import UniqueConstraint
|
|
||||||
from django.core.exceptions import NON_FIELD_ERRORS
|
from django.core.exceptions import NON_FIELD_ERRORS
|
||||||
from django.utils.translation import gettext_lazy as _
|
from django.utils.translation import gettext_lazy as _
|
||||||
from django.utils.timezone import now
|
from django.utils.timezone import now
|
||||||
from django.utils.encoding import smart_str
|
from django.utils.encoding import smart_str
|
||||||
from django.contrib.contenttypes.models import ContentType
|
from django.contrib.contenttypes.models import ContentType
|
||||||
from flags.state import flag_enabled
|
|
||||||
|
|
||||||
# REST Framework
|
# REST Framework
|
||||||
from rest_framework.exceptions import ParseError
|
from rest_framework.exceptions import ParseError
|
||||||
@@ -34,7 +32,6 @@ from polymorphic.models import PolymorphicModel
|
|||||||
|
|
||||||
from ansible_base.lib.utils.models import prevent_search, get_type_for_model
|
from ansible_base.lib.utils.models import prevent_search, get_type_for_model
|
||||||
from ansible_base.rbac import permission_registry
|
from ansible_base.rbac import permission_registry
|
||||||
from ansible_base.rbac.models import RoleEvaluation
|
|
||||||
|
|
||||||
# AWX
|
# AWX
|
||||||
from awx.main.models.base import CommonModelNameNotUnique, PasswordFieldsModel, NotificationFieldsModel
|
from awx.main.models.base import CommonModelNameNotUnique, PasswordFieldsModel, NotificationFieldsModel
|
||||||
@@ -114,10 +111,7 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, ExecutionEn
|
|||||||
ordering = ('name',)
|
ordering = ('name',)
|
||||||
# unique_together here is intentionally commented out. Please make sure sub-classes of this model
|
# unique_together here is intentionally commented out. Please make sure sub-classes of this model
|
||||||
# contain at least this uniqueness restriction: SOFT_UNIQUE_TOGETHER = [('polymorphic_ctype', 'name')]
|
# contain at least this uniqueness restriction: SOFT_UNIQUE_TOGETHER = [('polymorphic_ctype', 'name')]
|
||||||
# Unique name constraint - note that inventory source model is excluded from this constraint entirely
|
# unique_together = [('polymorphic_ctype', 'name', 'organization')]
|
||||||
constraints = [
|
|
||||||
UniqueConstraint(fields=['polymorphic_ctype', 'name', 'organization'], condition=models.Q(org_unique=True), name='ujt_hard_name_constraint')
|
|
||||||
]
|
|
||||||
|
|
||||||
old_pk = models.PositiveIntegerField(
|
old_pk = models.PositiveIntegerField(
|
||||||
null=True,
|
null=True,
|
||||||
@@ -186,9 +180,6 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, ExecutionEn
|
|||||||
)
|
)
|
||||||
labels = models.ManyToManyField("Label", blank=True, related_name='%(class)s_labels')
|
labels = models.ManyToManyField("Label", blank=True, related_name='%(class)s_labels')
|
||||||
instance_groups = OrderedManyToManyField('InstanceGroup', blank=True, through='UnifiedJobTemplateInstanceGroupMembership')
|
instance_groups = OrderedManyToManyField('InstanceGroup', blank=True, through='UnifiedJobTemplateInstanceGroupMembership')
|
||||||
org_unique = models.BooleanField(
|
|
||||||
blank=True, default=True, editable=False, help_text=_('Used internally to selectively enforce database constraint on name')
|
|
||||||
)
|
|
||||||
|
|
||||||
def get_absolute_url(self, request=None):
|
def get_absolute_url(self, request=None):
|
||||||
real_instance = self.get_real_instance()
|
real_instance = self.get_real_instance()
|
||||||
@@ -219,21 +210,20 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, ExecutionEn
|
|||||||
# do not use this if in a subclass
|
# do not use this if in a subclass
|
||||||
if cls != UnifiedJobTemplate:
|
if cls != UnifiedJobTemplate:
|
||||||
return super(UnifiedJobTemplate, cls).accessible_pk_qs(accessor, role_field)
|
return super(UnifiedJobTemplate, cls).accessible_pk_qs(accessor, role_field)
|
||||||
|
from ansible_base.rbac.models import RoleEvaluation
|
||||||
|
|
||||||
action = to_permissions[role_field]
|
action = to_permissions[role_field]
|
||||||
|
|
||||||
# Special condition for super auditor
|
# Special condition for super auditor
|
||||||
role_subclasses = cls._submodels_with_roles()
|
role_subclasses = cls._submodels_with_roles()
|
||||||
|
role_cts = ContentType.objects.get_for_models(*role_subclasses).values()
|
||||||
all_codenames = {f'{action}_{cls._meta.model_name}' for cls in role_subclasses}
|
all_codenames = {f'{action}_{cls._meta.model_name}' for cls in role_subclasses}
|
||||||
if not (all_codenames - accessor.singleton_permissions()):
|
if not (all_codenames - accessor.singleton_permissions()):
|
||||||
role_cts = ContentType.objects.get_for_models(*role_subclasses).values()
|
|
||||||
qs = cls.objects.filter(polymorphic_ctype__in=role_cts)
|
qs = cls.objects.filter(polymorphic_ctype__in=role_cts)
|
||||||
return qs.values_list('id', flat=True)
|
return qs.values_list('id', flat=True)
|
||||||
|
|
||||||
dab_role_cts = permission_registry.content_type_model.objects.get_for_models(*role_subclasses).values()
|
|
||||||
|
|
||||||
return (
|
return (
|
||||||
RoleEvaluation.objects.filter(role__in=accessor.has_roles.all(), codename__in=all_codenames, content_type_id__in=[ct.id for ct in dab_role_cts])
|
RoleEvaluation.objects.filter(role__in=accessor.has_roles.all(), codename__in=all_codenames, content_type_id__in=[ct.id for ct in role_cts])
|
||||||
.values_list('object_id')
|
.values_list('object_id')
|
||||||
.distinct()
|
.distinct()
|
||||||
)
|
)
|
||||||
@@ -1200,13 +1190,6 @@ class UnifiedJob(
|
|||||||
fd = StringIO(fd.getvalue().replace('\\r\\n', '\n'))
|
fd = StringIO(fd.getvalue().replace('\\r\\n', '\n'))
|
||||||
return fd
|
return fd
|
||||||
|
|
||||||
def _fix_double_escapes(self, content):
|
|
||||||
"""
|
|
||||||
Collapse double-escaped sequences into single-escaped form.
|
|
||||||
"""
|
|
||||||
# Replace \\ followed by one of ' " \ n r t
|
|
||||||
return re.sub(r'\\([\'"\\nrt])', r'\1', content)
|
|
||||||
|
|
||||||
def _escape_ascii(self, content):
|
def _escape_ascii(self, content):
|
||||||
# Remove ANSI escape sequences used to embed event data.
|
# Remove ANSI escape sequences used to embed event data.
|
||||||
content = re.sub(r'\x1b\[K(?:[A-Za-z0-9+/=]+\x1b\[\d+D)+\x1b\[K', '', content)
|
content = re.sub(r'\x1b\[K(?:[A-Za-z0-9+/=]+\x1b\[\d+D)+\x1b\[K', '', content)
|
||||||
@@ -1214,14 +1197,12 @@ class UnifiedJob(
|
|||||||
content = re.sub(r'\x1b[^m]*m', '', content)
|
content = re.sub(r'\x1b[^m]*m', '', content)
|
||||||
return content
|
return content
|
||||||
|
|
||||||
def _result_stdout_raw(self, redact_sensitive=False, escape_ascii=False, fix_escapes=False):
|
def _result_stdout_raw(self, redact_sensitive=False, escape_ascii=False):
|
||||||
content = self.result_stdout_raw_handle().read()
|
content = self.result_stdout_raw_handle().read()
|
||||||
if redact_sensitive:
|
if redact_sensitive:
|
||||||
content = UriCleaner.remove_sensitive(content)
|
content = UriCleaner.remove_sensitive(content)
|
||||||
if escape_ascii:
|
if escape_ascii:
|
||||||
content = self._escape_ascii(content)
|
content = self._escape_ascii(content)
|
||||||
if fix_escapes:
|
|
||||||
content = self._fix_double_escapes(content)
|
|
||||||
return content
|
return content
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@@ -1230,10 +1211,9 @@ class UnifiedJob(
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
def result_stdout(self):
|
def result_stdout(self):
|
||||||
# Human-facing output should fix escapes
|
return self._result_stdout_raw(escape_ascii=True)
|
||||||
return self._result_stdout_raw(escape_ascii=True, fix_escapes=True)
|
|
||||||
|
|
||||||
def _result_stdout_raw_limited(self, start_line=0, end_line=None, redact_sensitive=True, escape_ascii=False, fix_escapes=False):
|
def _result_stdout_raw_limited(self, start_line=0, end_line=None, redact_sensitive=True, escape_ascii=False):
|
||||||
return_buffer = StringIO()
|
return_buffer = StringIO()
|
||||||
if end_line is not None:
|
if end_line is not None:
|
||||||
end_line = int(end_line)
|
end_line = int(end_line)
|
||||||
@@ -1256,18 +1236,14 @@ class UnifiedJob(
|
|||||||
return_buffer = UriCleaner.remove_sensitive(return_buffer)
|
return_buffer = UriCleaner.remove_sensitive(return_buffer)
|
||||||
if escape_ascii:
|
if escape_ascii:
|
||||||
return_buffer = self._escape_ascii(return_buffer)
|
return_buffer = self._escape_ascii(return_buffer)
|
||||||
if fix_escapes:
|
|
||||||
return_buffer = self._fix_double_escapes(return_buffer)
|
|
||||||
|
|
||||||
return return_buffer, start_actual, end_actual, absolute_end
|
return return_buffer, start_actual, end_actual, absolute_end
|
||||||
|
|
||||||
def result_stdout_raw_limited(self, start_line=0, end_line=None, redact_sensitive=False):
|
def result_stdout_raw_limited(self, start_line=0, end_line=None, redact_sensitive=False):
|
||||||
# Raw should NOT fix escapes
|
|
||||||
return self._result_stdout_raw_limited(start_line, end_line, redact_sensitive)
|
return self._result_stdout_raw_limited(start_line, end_line, redact_sensitive)
|
||||||
|
|
||||||
def result_stdout_limited(self, start_line=0, end_line=None, redact_sensitive=False):
|
def result_stdout_limited(self, start_line=0, end_line=None, redact_sensitive=False):
|
||||||
# Human-facing should fix escapes
|
return self._result_stdout_raw_limited(start_line, end_line, redact_sensitive, escape_ascii=True)
|
||||||
return self._result_stdout_raw_limited(start_line, end_line, redact_sensitive, escape_ascii=True, fix_escapes=True)
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def workflow_job_id(self):
|
def workflow_job_id(self):
|
||||||
@@ -1386,30 +1362,7 @@ class UnifiedJob(
|
|||||||
traceback=self.result_traceback,
|
traceback=self.result_traceback,
|
||||||
)
|
)
|
||||||
|
|
||||||
def get_start_kwargs(self):
|
def pre_start(self, **kwargs):
|
||||||
needed = self.get_passwords_needed_to_start()
|
|
||||||
|
|
||||||
decrypted_start_args = decrypt_field(self, 'start_args')
|
|
||||||
|
|
||||||
if not decrypted_start_args or decrypted_start_args == '{}':
|
|
||||||
return None
|
|
||||||
|
|
||||||
try:
|
|
||||||
start_args = json.loads(decrypted_start_args)
|
|
||||||
except Exception:
|
|
||||||
logger.exception(f'Unexpected malformed start_args on unified_job={self.id}')
|
|
||||||
return None
|
|
||||||
|
|
||||||
opts = dict([(field, start_args.get(field, '')) for field in needed])
|
|
||||||
|
|
||||||
if not all(opts.values()):
|
|
||||||
missing_fields = ', '.join([k for k, v in opts.items() if not v])
|
|
||||||
self.job_explanation = u'Missing needed fields: %s.' % missing_fields
|
|
||||||
self.save(update_fields=['job_explanation'])
|
|
||||||
|
|
||||||
return opts
|
|
||||||
|
|
||||||
def pre_start(self):
|
|
||||||
if not self.can_start:
|
if not self.can_start:
|
||||||
self.job_explanation = u'%s is not in a startable state: %s, expecting one of %s' % (self._meta.verbose_name, self.status, str(('new', 'waiting')))
|
self.job_explanation = u'%s is not in a startable state: %s, expecting one of %s' % (self._meta.verbose_name, self.status, str(('new', 'waiting')))
|
||||||
self.save(update_fields=['job_explanation'])
|
self.save(update_fields=['job_explanation'])
|
||||||
@@ -1430,11 +1383,26 @@ class UnifiedJob(
|
|||||||
self.save(update_fields=['job_explanation'])
|
self.save(update_fields=['job_explanation'])
|
||||||
return (False, None)
|
return (False, None)
|
||||||
|
|
||||||
opts = self.get_start_kwargs()
|
needed = self.get_passwords_needed_to_start()
|
||||||
|
try:
|
||||||
|
start_args = json.loads(decrypt_field(self, 'start_args'))
|
||||||
|
except Exception:
|
||||||
|
start_args = None
|
||||||
|
|
||||||
if opts and (not all(opts.values())):
|
if start_args in (None, ''):
|
||||||
|
start_args = kwargs
|
||||||
|
|
||||||
|
opts = dict([(field, start_args.get(field, '')) for field in needed])
|
||||||
|
|
||||||
|
if not all(opts.values()):
|
||||||
|
missing_fields = ', '.join([k for k, v in opts.items() if not v])
|
||||||
|
self.job_explanation = u'Missing needed fields: %s.' % missing_fields
|
||||||
|
self.save(update_fields=['job_explanation'])
|
||||||
return (False, None)
|
return (False, None)
|
||||||
|
|
||||||
|
if 'extra_vars' in kwargs:
|
||||||
|
self.handle_extra_data(kwargs['extra_vars'])
|
||||||
|
|
||||||
# remove any job_explanations that may have been set while job was in pending
|
# remove any job_explanations that may have been set while job was in pending
|
||||||
if self.job_explanation != "":
|
if self.job_explanation != "":
|
||||||
self.job_explanation = ""
|
self.job_explanation = ""
|
||||||
@@ -1495,44 +1463,21 @@ class UnifiedJob(
|
|||||||
def cancel_dispatcher_process(self):
|
def cancel_dispatcher_process(self):
|
||||||
"""Returns True if dispatcher running this job acknowledged request and sent SIGTERM"""
|
"""Returns True if dispatcher running this job acknowledged request and sent SIGTERM"""
|
||||||
if not self.celery_task_id:
|
if not self.celery_task_id:
|
||||||
return False
|
return
|
||||||
|
|
||||||
canceled = []
|
canceled = []
|
||||||
# Special case for task manager (used during workflow job cancellation)
|
|
||||||
if not connection.get_autocommit():
|
if not connection.get_autocommit():
|
||||||
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'):
|
# this condition is purpose-written for the task manager, when it cancels jobs in workflows
|
||||||
try:
|
ControlDispatcher('dispatcher', self.controller_node).cancel([self.celery_task_id], with_reply=False)
|
||||||
from dispatcherd.factories import get_control_from_settings
|
|
||||||
|
|
||||||
ctl = get_control_from_settings()
|
|
||||||
ctl.control('cancel', data={'uuid': self.celery_task_id})
|
|
||||||
except Exception:
|
|
||||||
logger.exception("Error sending cancel command to new dispatcher")
|
|
||||||
else:
|
|
||||||
try:
|
|
||||||
ControlDispatcher('dispatcher', self.controller_node).cancel([self.celery_task_id], with_reply=False)
|
|
||||||
except Exception:
|
|
||||||
logger.exception("Error sending cancel command to legacy dispatcher")
|
|
||||||
return True # task manager itself needs to act under assumption that cancel was received
|
return True # task manager itself needs to act under assumption that cancel was received
|
||||||
|
|
||||||
# Standard case with reply
|
|
||||||
try:
|
try:
|
||||||
|
# Use control and reply mechanism to cancel and obtain confirmation
|
||||||
timeout = 5
|
timeout = 5
|
||||||
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'):
|
canceled = ControlDispatcher('dispatcher', self.controller_node).cancel([self.celery_task_id])
|
||||||
from dispatcherd.factories import get_control_from_settings
|
|
||||||
|
|
||||||
ctl = get_control_from_settings()
|
|
||||||
results = ctl.control_with_reply('cancel', data={'uuid': self.celery_task_id}, expected_replies=1, timeout=timeout)
|
|
||||||
# Check if cancel was successful by checking if we got any results
|
|
||||||
return bool(results and len(results) > 0)
|
|
||||||
else:
|
|
||||||
# Original implementation
|
|
||||||
canceled = ControlDispatcher('dispatcher', self.controller_node).cancel([self.celery_task_id])
|
|
||||||
except socket.timeout:
|
except socket.timeout:
|
||||||
logger.error(f'could not reach dispatcher on {self.controller_node} within {timeout}s')
|
logger.error(f'could not reach dispatcher on {self.controller_node} within {timeout}s')
|
||||||
except Exception:
|
except Exception:
|
||||||
logger.exception("error encountered when checking task status")
|
logger.exception("error encountered when checking task status")
|
||||||
|
|
||||||
return bool(self.celery_task_id in canceled) # True or False, whether confirmation was obtained
|
return bool(self.celery_task_id in canceled) # True or False, whether confirmation was obtained
|
||||||
|
|
||||||
def cancel(self, job_explanation=None, is_chain=False):
|
def cancel(self, job_explanation=None, is_chain=False):
|
||||||
|
|||||||
@@ -53,8 +53,8 @@ class GrafanaBackend(AWXBaseEmailBackend, CustomNotificationBase):
|
|||||||
):
|
):
|
||||||
super(GrafanaBackend, self).__init__(fail_silently=fail_silently)
|
super(GrafanaBackend, self).__init__(fail_silently=fail_silently)
|
||||||
self.grafana_key = grafana_key
|
self.grafana_key = grafana_key
|
||||||
self.dashboardId = int(dashboardId) if dashboardId != '' else None
|
self.dashboardId = int(dashboardId) if dashboardId is not None and panelId != "" else None
|
||||||
self.panelId = int(panelId) if panelId != '' else None
|
self.panelId = int(panelId) if panelId is not None and panelId != "" else None
|
||||||
self.annotation_tags = annotation_tags if annotation_tags is not None else []
|
self.annotation_tags = annotation_tags if annotation_tags is not None else []
|
||||||
self.grafana_no_verify_ssl = grafana_no_verify_ssl
|
self.grafana_no_verify_ssl = grafana_no_verify_ssl
|
||||||
self.isRegion = isRegion
|
self.isRegion = isRegion
|
||||||
|
|||||||
@@ -5,6 +5,8 @@ import time
|
|||||||
import ssl
|
import ssl
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
|
import irc.client
|
||||||
|
|
||||||
from django.utils.encoding import smart_str
|
from django.utils.encoding import smart_str
|
||||||
from django.utils.translation import gettext_lazy as _
|
from django.utils.translation import gettext_lazy as _
|
||||||
|
|
||||||
@@ -14,19 +16,6 @@ from awx.main.notifications.custom_notification_base import CustomNotificationBa
|
|||||||
logger = logging.getLogger('awx.main.notifications.irc_backend')
|
logger = logging.getLogger('awx.main.notifications.irc_backend')
|
||||||
|
|
||||||
|
|
||||||
def _irc():
|
|
||||||
"""
|
|
||||||
Prime the real jaraco namespace before importing irc.* so that
|
|
||||||
setuptools' vendored 'setuptools._vendor.jaraco' doesn't shadow
|
|
||||||
external 'jaraco.*' packages (e.g., jaraco.stream).
|
|
||||||
"""
|
|
||||||
import jaraco.stream # ensure the namespace package is established # noqa: F401
|
|
||||||
import irc.client as irc_client
|
|
||||||
import irc.connection as irc_connection
|
|
||||||
|
|
||||||
return irc_client, irc_connection
|
|
||||||
|
|
||||||
|
|
||||||
class IrcBackend(AWXBaseEmailBackend, CustomNotificationBase):
|
class IrcBackend(AWXBaseEmailBackend, CustomNotificationBase):
|
||||||
init_parameters = {
|
init_parameters = {
|
||||||
"server": {"label": "IRC Server Address", "type": "string"},
|
"server": {"label": "IRC Server Address", "type": "string"},
|
||||||
@@ -51,15 +40,12 @@ class IrcBackend(AWXBaseEmailBackend, CustomNotificationBase):
|
|||||||
def open(self):
|
def open(self):
|
||||||
if self.connection is not None:
|
if self.connection is not None:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
irc_client, irc_connection = _irc()
|
|
||||||
|
|
||||||
if self.use_ssl:
|
if self.use_ssl:
|
||||||
connection_factory = irc_connection.Factory(wrapper=ssl.wrap_socket)
|
connection_factory = irc.connection.Factory(wrapper=ssl.wrap_socket)
|
||||||
else:
|
else:
|
||||||
connection_factory = irc_connection.Factory()
|
connection_factory = irc.connection.Factory()
|
||||||
try:
|
try:
|
||||||
self.reactor = irc_client.Reactor()
|
self.reactor = irc.client.Reactor()
|
||||||
self.connection = self.reactor.server().connect(
|
self.connection = self.reactor.server().connect(
|
||||||
self.server,
|
self.server,
|
||||||
self.port,
|
self.port,
|
||||||
@@ -67,7 +53,7 @@ class IrcBackend(AWXBaseEmailBackend, CustomNotificationBase):
|
|||||||
password=self.password,
|
password=self.password,
|
||||||
connect_factory=connection_factory,
|
connect_factory=connection_factory,
|
||||||
)
|
)
|
||||||
except irc_client.ServerConnectionError as e:
|
except irc.client.ServerConnectionError as e:
|
||||||
logger.error(smart_str(_("Exception connecting to irc server: {}").format(e)))
|
logger.error(smart_str(_("Exception connecting to irc server: {}").format(e)))
|
||||||
if not self.fail_silently:
|
if not self.fail_silently:
|
||||||
raise
|
raise
|
||||||
@@ -79,9 +65,8 @@ class IrcBackend(AWXBaseEmailBackend, CustomNotificationBase):
|
|||||||
self.connection = None
|
self.connection = None
|
||||||
|
|
||||||
def on_connect(self, connection, event):
|
def on_connect(self, connection, event):
|
||||||
irc_client, _ = _irc()
|
|
||||||
for c in self.channels:
|
for c in self.channels:
|
||||||
if irc_client.is_channel(c):
|
if irc.client.is_channel(c):
|
||||||
connection.join(c)
|
connection.join(c)
|
||||||
else:
|
else:
|
||||||
for m in self.channels[c]:
|
for m in self.channels[c]:
|
||||||
|
|||||||
@@ -19,9 +19,6 @@ from django.utils.timezone import now as tz_now
|
|||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
from django.contrib.contenttypes.models import ContentType
|
from django.contrib.contenttypes.models import ContentType
|
||||||
|
|
||||||
# django-flags
|
|
||||||
from flags.state import flag_enabled
|
|
||||||
|
|
||||||
from ansible_base.lib.utils.models import get_type_for_model
|
from ansible_base.lib.utils.models import get_type_for_model
|
||||||
|
|
||||||
# django-ansible-base
|
# django-ansible-base
|
||||||
@@ -51,7 +48,6 @@ from awx.main.signals import disable_activity_stream
|
|||||||
from awx.main.constants import ACTIVE_STATES
|
from awx.main.constants import ACTIVE_STATES
|
||||||
from awx.main.scheduler.dependency_graph import DependencyGraph
|
from awx.main.scheduler.dependency_graph import DependencyGraph
|
||||||
from awx.main.scheduler.task_manager_models import TaskManagerModels
|
from awx.main.scheduler.task_manager_models import TaskManagerModels
|
||||||
from awx.main.tasks.jobs import dispatch_waiting_jobs
|
|
||||||
import awx.main.analytics.subsystem_metrics as s_metrics
|
import awx.main.analytics.subsystem_metrics as s_metrics
|
||||||
from awx.main.utils import decrypt_field
|
from awx.main.utils import decrypt_field
|
||||||
|
|
||||||
@@ -435,7 +431,6 @@ class TaskManager(TaskBase):
|
|||||||
# 5 minutes to start pending jobs. If this limit is reached, pending jobs
|
# 5 minutes to start pending jobs. If this limit is reached, pending jobs
|
||||||
# will no longer be started and will be started on the next task manager cycle.
|
# will no longer be started and will be started on the next task manager cycle.
|
||||||
self.time_delta_job_explanation = timedelta(seconds=30)
|
self.time_delta_job_explanation = timedelta(seconds=30)
|
||||||
self.control_nodes_to_notify: set[str] = set()
|
|
||||||
super().__init__(prefix="task_manager")
|
super().__init__(prefix="task_manager")
|
||||||
|
|
||||||
def after_lock_init(self):
|
def after_lock_init(self):
|
||||||
@@ -524,19 +519,16 @@ class TaskManager(TaskBase):
|
|||||||
task.save()
|
task.save()
|
||||||
task.log_lifecycle("waiting")
|
task.log_lifecycle("waiting")
|
||||||
|
|
||||||
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'):
|
# apply_async does a NOTIFY to the channel dispatcher is listening to
|
||||||
self.control_nodes_to_notify.add(task.get_queue_name())
|
# postgres will treat this as part of the transaction, which is what we want
|
||||||
else:
|
if task.status != 'failed' and type(task) is not WorkflowJob:
|
||||||
# apply_async does a NOTIFY to the channel dispatcher is listening to
|
task_cls = task._get_task_class()
|
||||||
# postgres will treat this as part of the transaction, which is what we want
|
task_cls.apply_async(
|
||||||
if task.status != 'failed' and type(task) is not WorkflowJob:
|
[task.pk],
|
||||||
task_cls = task._get_task_class()
|
opts,
|
||||||
task_cls.apply_async(
|
queue=task.get_queue_name(),
|
||||||
[task.pk],
|
uuid=task.celery_task_id,
|
||||||
opts,
|
)
|
||||||
queue=task.get_queue_name(),
|
|
||||||
uuid=task.celery_task_id,
|
|
||||||
)
|
|
||||||
|
|
||||||
# In exception cases, like a job failing pre-start checks, we send the websocket status message.
|
# In exception cases, like a job failing pre-start checks, we send the websocket status message.
|
||||||
# For jobs going into waiting, we omit this because of performance issues, as it should go to running quickly
|
# For jobs going into waiting, we omit this because of performance issues, as it should go to running quickly
|
||||||
@@ -729,8 +721,3 @@ class TaskManager(TaskBase):
|
|||||||
|
|
||||||
for workflow_approval in self.get_expired_workflow_approvals():
|
for workflow_approval in self.get_expired_workflow_approvals():
|
||||||
self.timeout_approval_node(workflow_approval)
|
self.timeout_approval_node(workflow_approval)
|
||||||
|
|
||||||
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'):
|
|
||||||
for controller_node in self.control_nodes_to_notify:
|
|
||||||
logger.info(f'Notifying node {controller_node} of new waiting jobs.')
|
|
||||||
dispatch_waiting_jobs.apply_async(queue=controller_node)
|
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ from django.conf import settings
|
|||||||
# AWX
|
# AWX
|
||||||
from awx import MODE
|
from awx import MODE
|
||||||
from awx.main.scheduler import TaskManager, DependencyManager, WorkflowManager
|
from awx.main.scheduler import TaskManager, DependencyManager, WorkflowManager
|
||||||
from awx.main.dispatch.publish import task as task_awx
|
from awx.main.dispatch.publish import task
|
||||||
from awx.main.dispatch import get_task_queuename
|
from awx.main.dispatch import get_task_queuename
|
||||||
|
|
||||||
logger = logging.getLogger('awx.main.scheduler')
|
logger = logging.getLogger('awx.main.scheduler')
|
||||||
@@ -20,16 +20,16 @@ def run_manager(manager, prefix):
|
|||||||
manager().schedule()
|
manager().schedule()
|
||||||
|
|
||||||
|
|
||||||
@task_awx(queue=get_task_queuename)
|
@task(queue=get_task_queuename)
|
||||||
def task_manager():
|
def task_manager():
|
||||||
run_manager(TaskManager, "task")
|
run_manager(TaskManager, "task")
|
||||||
|
|
||||||
|
|
||||||
@task_awx(queue=get_task_queuename)
|
@task(queue=get_task_queuename)
|
||||||
def dependency_manager():
|
def dependency_manager():
|
||||||
run_manager(DependencyManager, "dependency")
|
run_manager(DependencyManager, "dependency")
|
||||||
|
|
||||||
|
|
||||||
@task_awx(queue=get_task_queuename)
|
@task(queue=get_task_queuename)
|
||||||
def workflow_manager():
|
def workflow_manager():
|
||||||
run_manager(WorkflowManager, "workflow")
|
run_manager(WorkflowManager, "workflow")
|
||||||
|
|||||||
@@ -38,6 +38,7 @@ from awx.main.models import (
|
|||||||
InventorySource,
|
InventorySource,
|
||||||
Job,
|
Job,
|
||||||
JobHostSummary,
|
JobHostSummary,
|
||||||
|
JobTemplate,
|
||||||
Organization,
|
Organization,
|
||||||
Project,
|
Project,
|
||||||
Role,
|
Role,
|
||||||
@@ -55,7 +56,10 @@ from awx.main.models import (
|
|||||||
from awx.main.utils import model_instance_diff, model_to_dict, camelcase_to_underscore, get_current_apps
|
from awx.main.utils import model_instance_diff, model_to_dict, camelcase_to_underscore, get_current_apps
|
||||||
from awx.main.utils import ignore_inventory_computed_fields, ignore_inventory_group_removal, _inventory_updates
|
from awx.main.utils import ignore_inventory_computed_fields, ignore_inventory_group_removal, _inventory_updates
|
||||||
from awx.main.tasks.system import update_inventory_computed_fields, handle_removed_image
|
from awx.main.tasks.system import update_inventory_computed_fields, handle_removed_image
|
||||||
from awx.main.fields import is_implicit_parent
|
from awx.main.fields import (
|
||||||
|
is_implicit_parent,
|
||||||
|
update_role_parentage_for_instance,
|
||||||
|
)
|
||||||
|
|
||||||
from awx.main import consumers
|
from awx.main import consumers
|
||||||
|
|
||||||
@@ -188,6 +192,31 @@ def cleanup_detached_labels_on_deleted_parent(sender, instance, **kwargs):
|
|||||||
label.delete()
|
label.delete()
|
||||||
|
|
||||||
|
|
||||||
|
def save_related_job_templates(sender, instance, **kwargs):
|
||||||
|
"""save_related_job_templates loops through all of the
|
||||||
|
job templates that use an Inventory that have had their
|
||||||
|
Organization updated. This triggers the rebuilding of the RBAC hierarchy
|
||||||
|
and ensures the proper access restrictions.
|
||||||
|
"""
|
||||||
|
if sender is not Inventory:
|
||||||
|
raise ValueError('This signal callback is only intended for use with Project or Inventory')
|
||||||
|
|
||||||
|
update_fields = kwargs.get('update_fields', None)
|
||||||
|
if (update_fields and not ('organization' in update_fields or 'organization_id' in update_fields)) or kwargs.get('created', False):
|
||||||
|
return
|
||||||
|
|
||||||
|
if instance._prior_values_store.get('organization_id') != instance.organization_id:
|
||||||
|
jtq = JobTemplate.objects.filter(**{sender.__name__.lower(): instance})
|
||||||
|
for jt in jtq:
|
||||||
|
parents_added, parents_removed = update_role_parentage_for_instance(jt)
|
||||||
|
if parents_added or parents_removed:
|
||||||
|
logger.info(
|
||||||
|
'Permissions on JT {} changed due to inventory {} organization change from {} to {}.'.format(
|
||||||
|
jt.pk, instance.pk, instance._prior_values_store.get('organization_id'), instance.organization_id
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def connect_computed_field_signals():
|
def connect_computed_field_signals():
|
||||||
post_save.connect(emit_update_inventory_on_created_or_deleted, sender=Host)
|
post_save.connect(emit_update_inventory_on_created_or_deleted, sender=Host)
|
||||||
post_delete.connect(emit_update_inventory_on_created_or_deleted, sender=Host)
|
post_delete.connect(emit_update_inventory_on_created_or_deleted, sender=Host)
|
||||||
@@ -201,6 +230,7 @@ def connect_computed_field_signals():
|
|||||||
|
|
||||||
connect_computed_field_signals()
|
connect_computed_field_signals()
|
||||||
|
|
||||||
|
post_save.connect(save_related_job_templates, sender=Inventory)
|
||||||
m2m_changed.connect(rebuild_role_ancestor_list, Role.parents.through)
|
m2m_changed.connect(rebuild_role_ancestor_list, Role.parents.through)
|
||||||
m2m_changed.connect(rbac_activity_stream, Role.members.through)
|
m2m_changed.connect(rbac_activity_stream, Role.members.through)
|
||||||
m2m_changed.connect(rbac_activity_stream, Role.parents.through)
|
m2m_changed.connect(rbac_activity_stream, Role.parents.through)
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
from . import callback, facts, helpers, host_indirect, host_metrics, jobs, receptor, system # noqa
|
from . import host_metrics, jobs, receptor, system # noqa
|
||||||
|
|||||||
@@ -8,13 +8,13 @@ import logging
|
|||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
from django.utils.encoding import smart_str
|
from django.utils.encoding import smart_str
|
||||||
from django.utils.timezone import now
|
from django.utils.timezone import now
|
||||||
|
from django.db import OperationalError
|
||||||
|
|
||||||
# django-ansible-base
|
# django-ansible-base
|
||||||
from ansible_base.lib.logging.runtime import log_excess_runtime
|
from ansible_base.lib.logging.runtime import log_excess_runtime
|
||||||
|
|
||||||
# AWX
|
# AWX
|
||||||
from awx.main.utils.db import bulk_update_sorted_by_id
|
from awx.main.models.inventory import Host
|
||||||
from awx.main.models import Host
|
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger('awx.main.tasks.facts')
|
logger = logging.getLogger('awx.main.tasks.facts')
|
||||||
@@ -22,29 +22,27 @@ system_tracking_logger = logging.getLogger('awx.analytics.system_tracking')
|
|||||||
|
|
||||||
|
|
||||||
@log_excess_runtime(logger, debug_cutoff=0.01, msg='Inventory {inventory_id} host facts prepared for {written_ct} hosts, took {delta:.3f} s', add_log_data=True)
|
@log_excess_runtime(logger, debug_cutoff=0.01, msg='Inventory {inventory_id} host facts prepared for {written_ct} hosts, took {delta:.3f} s', add_log_data=True)
|
||||||
def start_fact_cache(hosts, artifacts_dir, timeout=None, inventory_id=None, log_data=None):
|
def start_fact_cache(hosts, destination, log_data, timeout=None, inventory_id=None):
|
||||||
log_data = log_data or {}
|
|
||||||
log_data['inventory_id'] = inventory_id
|
log_data['inventory_id'] = inventory_id
|
||||||
log_data['written_ct'] = 0
|
log_data['written_ct'] = 0
|
||||||
hosts_cached = []
|
hosts_cached = list()
|
||||||
|
try:
|
||||||
# Create the fact_cache directory inside artifacts_dir
|
os.makedirs(destination, mode=0o700)
|
||||||
fact_cache_dir = os.path.join(artifacts_dir, 'fact_cache')
|
except FileExistsError:
|
||||||
os.makedirs(fact_cache_dir, mode=0o700, exist_ok=True)
|
pass
|
||||||
|
|
||||||
if timeout is None:
|
if timeout is None:
|
||||||
timeout = settings.ANSIBLE_FACT_CACHE_TIMEOUT
|
timeout = settings.ANSIBLE_FACT_CACHE_TIMEOUT
|
||||||
|
|
||||||
last_write_time = None
|
last_filepath_written = None
|
||||||
|
|
||||||
for host in hosts:
|
for host in hosts:
|
||||||
hosts_cached.append(host.name)
|
hosts_cached.append(host)
|
||||||
if not host.ansible_facts_modified or (timeout and host.ansible_facts_modified < now() - datetime.timedelta(seconds=timeout)):
|
if not host.ansible_facts_modified or (timeout and host.ansible_facts_modified < now() - datetime.timedelta(seconds=timeout)):
|
||||||
continue # facts are expired - do not write them
|
continue # facts are expired - do not write them
|
||||||
|
|
||||||
filepath = os.path.join(fact_cache_dir, host.name)
|
filepath = os.sep.join(map(str, [destination, host.name]))
|
||||||
if not os.path.realpath(filepath).startswith(fact_cache_dir):
|
if not os.path.realpath(filepath).startswith(destination):
|
||||||
logger.error(f'facts for host {smart_str(host.name)} could not be cached')
|
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
|
||||||
continue
|
continue
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@@ -52,21 +50,37 @@ def start_fact_cache(hosts, artifacts_dir, timeout=None, inventory_id=None, log_
|
|||||||
os.chmod(f.name, 0o600)
|
os.chmod(f.name, 0o600)
|
||||||
json.dump(host.ansible_facts, f)
|
json.dump(host.ansible_facts, f)
|
||||||
log_data['written_ct'] += 1
|
log_data['written_ct'] += 1
|
||||||
last_write_time = os.path.getmtime(filepath)
|
last_filepath_written = filepath
|
||||||
except IOError:
|
except IOError:
|
||||||
logger.error(f'facts for host {smart_str(host.name)} could not be cached')
|
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Write summary file directly to the artifacts_dir
|
if last_filepath_written:
|
||||||
if inventory_id is not None:
|
return os.path.getmtime(last_filepath_written), hosts_cached
|
||||||
summary_file = os.path.join(artifacts_dir, 'host_cache_summary.json')
|
|
||||||
summary_data = {
|
return None, hosts_cached
|
||||||
'last_write_time': last_write_time,
|
|
||||||
'hosts_cached': hosts_cached,
|
|
||||||
'written_ct': log_data['written_ct'],
|
def raw_update_hosts(host_list):
|
||||||
}
|
Host.objects.bulk_update(host_list, ['ansible_facts', 'ansible_facts_modified'])
|
||||||
with open(summary_file, 'w', encoding='utf-8') as f:
|
|
||||||
json.dump(summary_data, f, indent=2)
|
|
||||||
|
def update_hosts(host_list, max_tries=5):
|
||||||
|
if not host_list:
|
||||||
|
return
|
||||||
|
for i in range(max_tries):
|
||||||
|
try:
|
||||||
|
raw_update_hosts(host_list)
|
||||||
|
except OperationalError as exc:
|
||||||
|
# Deadlocks can happen if this runs at the same time as another large query
|
||||||
|
# inventory updates and updating last_job_host_summary are candidates for conflict
|
||||||
|
# but these would resolve easily on a retry
|
||||||
|
if i + 1 < max_tries:
|
||||||
|
logger.info(f'OperationalError (suspected deadlock) saving host facts retry {i}, message: {exc}')
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
break
|
||||||
|
|
||||||
|
|
||||||
@log_excess_runtime(
|
@log_excess_runtime(
|
||||||
@@ -75,54 +89,32 @@ def start_fact_cache(hosts, artifacts_dir, timeout=None, inventory_id=None, log_
|
|||||||
msg='Inventory {inventory_id} host facts: updated {updated_ct}, cleared {cleared_ct}, unchanged {unmodified_ct}, took {delta:.3f} s',
|
msg='Inventory {inventory_id} host facts: updated {updated_ct}, cleared {cleared_ct}, unchanged {unmodified_ct}, took {delta:.3f} s',
|
||||||
add_log_data=True,
|
add_log_data=True,
|
||||||
)
|
)
|
||||||
def finish_fact_cache(artifacts_dir, job_id=None, inventory_id=None, log_data=None):
|
def finish_fact_cache(hosts_cached, destination, facts_write_time, log_data, job_id=None, inventory_id=None):
|
||||||
log_data = log_data or {}
|
|
||||||
log_data['inventory_id'] = inventory_id
|
log_data['inventory_id'] = inventory_id
|
||||||
log_data['updated_ct'] = 0
|
log_data['updated_ct'] = 0
|
||||||
log_data['unmodified_ct'] = 0
|
log_data['unmodified_ct'] = 0
|
||||||
log_data['cleared_ct'] = 0
|
log_data['cleared_ct'] = 0
|
||||||
# The summary file is directly inside the artifacts dir
|
|
||||||
summary_path = os.path.join(artifacts_dir, 'host_cache_summary.json')
|
|
||||||
if not os.path.exists(summary_path):
|
|
||||||
logger.error(f'Missing summary file at {summary_path}')
|
|
||||||
return
|
|
||||||
|
|
||||||
try:
|
|
||||||
with open(summary_path, 'r', encoding='utf-8') as f:
|
|
||||||
summary = json.load(f)
|
|
||||||
facts_write_time = os.path.getmtime(summary_path) # After successful read
|
|
||||||
except (json.JSONDecodeError, OSError) as e:
|
|
||||||
logger.error(f'Error reading summary file at {summary_path}: {e}')
|
|
||||||
return
|
|
||||||
|
|
||||||
host_names = summary.get('hosts_cached', [])
|
|
||||||
hosts_cached = Host.objects.filter(name__in=host_names).order_by('id').iterator()
|
|
||||||
# Path where individual fact files were written
|
|
||||||
fact_cache_dir = os.path.join(artifacts_dir, 'fact_cache')
|
|
||||||
hosts_to_update = []
|
hosts_to_update = []
|
||||||
|
|
||||||
for host in hosts_cached:
|
for host in hosts_cached:
|
||||||
filepath = os.path.join(fact_cache_dir, host.name)
|
filepath = os.sep.join(map(str, [destination, host.name]))
|
||||||
if not os.path.realpath(filepath).startswith(fact_cache_dir):
|
if not os.path.realpath(filepath).startswith(destination):
|
||||||
logger.error(f'Invalid path for facts file: {filepath}')
|
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if os.path.exists(filepath):
|
if os.path.exists(filepath):
|
||||||
# If the file changed since we wrote the last facts file, pre-playbook run...
|
# If the file changed since we wrote the last facts file, pre-playbook run...
|
||||||
modified = os.path.getmtime(filepath)
|
modified = os.path.getmtime(filepath)
|
||||||
if not facts_write_time or modified >= facts_write_time:
|
if (not facts_write_time) or modified > facts_write_time:
|
||||||
try:
|
with codecs.open(filepath, 'r', encoding='utf-8') as f:
|
||||||
with codecs.open(filepath, 'r', encoding='utf-8') as f:
|
try:
|
||||||
ansible_facts = json.load(f)
|
ansible_facts = json.load(f)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if ansible_facts != host.ansible_facts:
|
|
||||||
host.ansible_facts = ansible_facts
|
host.ansible_facts = ansible_facts
|
||||||
host.ansible_facts_modified = now()
|
host.ansible_facts_modified = now()
|
||||||
hosts_to_update.append(host)
|
hosts_to_update.append(host)
|
||||||
logger.info(
|
system_tracking_logger.info(
|
||||||
f'New fact for inventory {smart_str(host.inventory.name)} host {smart_str(host.name)}',
|
'New fact for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)),
|
||||||
extra=dict(
|
extra=dict(
|
||||||
inventory_id=host.inventory.id,
|
inventory_id=host.inventory.id,
|
||||||
host_name=host.name,
|
host_name=host.name,
|
||||||
@@ -132,8 +124,6 @@ def finish_fact_cache(artifacts_dir, job_id=None, inventory_id=None, log_data=No
|
|||||||
),
|
),
|
||||||
)
|
)
|
||||||
log_data['updated_ct'] += 1
|
log_data['updated_ct'] += 1
|
||||||
else:
|
|
||||||
log_data['unmodified_ct'] += 1
|
|
||||||
else:
|
else:
|
||||||
log_data['unmodified_ct'] += 1
|
log_data['unmodified_ct'] += 1
|
||||||
else:
|
else:
|
||||||
@@ -142,11 +132,9 @@ def finish_fact_cache(artifacts_dir, job_id=None, inventory_id=None, log_data=No
|
|||||||
host.ansible_facts = {}
|
host.ansible_facts = {}
|
||||||
host.ansible_facts_modified = now()
|
host.ansible_facts_modified = now()
|
||||||
hosts_to_update.append(host)
|
hosts_to_update.append(host)
|
||||||
logger.info(f'Facts cleared for inventory {smart_str(host.inventory.name)} host {smart_str(host.name)}')
|
system_tracking_logger.info('Facts cleared for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)))
|
||||||
log_data['cleared_ct'] += 1
|
log_data['cleared_ct'] += 1
|
||||||
|
if len(hosts_to_update) > 100:
|
||||||
if len(hosts_to_update) >= 100:
|
update_hosts(hosts_to_update)
|
||||||
bulk_update_sorted_by_id(Host, hosts_to_update, fields=['ansible_facts', 'ansible_facts_modified'])
|
|
||||||
hosts_to_update = []
|
hosts_to_update = []
|
||||||
|
update_hosts(hosts_to_update)
|
||||||
bulk_update_sorted_by_id(Host, hosts_to_update, fields=['ansible_facts', 'ansible_facts_modified'])
|
|
||||||
|
|||||||
@@ -77,14 +77,7 @@ def build_indirect_host_data(job: Job, job_event_queries: dict[str, dict[str, st
|
|||||||
if jq_str_for_event not in compiled_jq_expressions:
|
if jq_str_for_event not in compiled_jq_expressions:
|
||||||
compiled_jq_expressions[resolved_action] = jq.compile(jq_str_for_event)
|
compiled_jq_expressions[resolved_action] = jq.compile(jq_str_for_event)
|
||||||
compiled_jq = compiled_jq_expressions[resolved_action]
|
compiled_jq = compiled_jq_expressions[resolved_action]
|
||||||
|
for data in compiled_jq.input(event.event_data['res']).all():
|
||||||
try:
|
|
||||||
data_source = compiled_jq.input(event.event_data['res']).all()
|
|
||||||
except Exception as e:
|
|
||||||
logger.warning(f'error for module {resolved_action} and data {event.event_data["res"]}: {e}')
|
|
||||||
continue
|
|
||||||
|
|
||||||
for data in data_source:
|
|
||||||
# From this jq result (specific to a single Ansible module), get index information about this host record
|
# From this jq result (specific to a single Ansible module), get index information about this host record
|
||||||
if not data.get('canonical_facts'):
|
if not data.get('canonical_facts'):
|
||||||
if not facts_missing_logged:
|
if not facts_missing_logged:
|
||||||
|
|||||||
@@ -7,18 +7,17 @@ from django.db.models import Count, F
|
|||||||
from django.db.models.functions import TruncMonth
|
from django.db.models.functions import TruncMonth
|
||||||
from django.utils.timezone import now
|
from django.utils.timezone import now
|
||||||
from awx.main.dispatch import get_task_queuename
|
from awx.main.dispatch import get_task_queuename
|
||||||
from awx.main.dispatch.publish import task as task_awx
|
from awx.main.dispatch.publish import task
|
||||||
from awx.main.models.inventory import HostMetric, HostMetricSummaryMonthly
|
from awx.main.models.inventory import HostMetric, HostMetricSummaryMonthly
|
||||||
from awx.main.tasks.helpers import is_run_threshold_reached
|
from awx.main.tasks.helpers import is_run_threshold_reached
|
||||||
from awx.conf.license import get_license
|
from awx.conf.license import get_license
|
||||||
from ansible_base.lib.utils.db import advisory_lock
|
from ansible_base.lib.utils.db import advisory_lock
|
||||||
from awx.main.utils.db import bulk_update_sorted_by_id
|
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger('awx.main.tasks.host_metrics')
|
logger = logging.getLogger('awx.main.tasks.host_metrics')
|
||||||
|
|
||||||
|
|
||||||
@task_awx(queue=get_task_queuename)
|
@task(queue=get_task_queuename)
|
||||||
def cleanup_host_metrics():
|
def cleanup_host_metrics():
|
||||||
if is_run_threshold_reached(getattr(settings, 'CLEANUP_HOST_METRICS_LAST_TS', None), getattr(settings, 'CLEANUP_HOST_METRICS_INTERVAL', 30) * 86400):
|
if is_run_threshold_reached(getattr(settings, 'CLEANUP_HOST_METRICS_LAST_TS', None), getattr(settings, 'CLEANUP_HOST_METRICS_INTERVAL', 30) * 86400):
|
||||||
logger.info(f"Executing cleanup_host_metrics, last ran at {getattr(settings, 'CLEANUP_HOST_METRICS_LAST_TS', '---')}")
|
logger.info(f"Executing cleanup_host_metrics, last ran at {getattr(settings, 'CLEANUP_HOST_METRICS_LAST_TS', '---')}")
|
||||||
@@ -29,7 +28,7 @@ def cleanup_host_metrics():
|
|||||||
logger.info("Finished cleanup_host_metrics")
|
logger.info("Finished cleanup_host_metrics")
|
||||||
|
|
||||||
|
|
||||||
@task_awx(queue=get_task_queuename)
|
@task(queue=get_task_queuename)
|
||||||
def host_metric_summary_monthly():
|
def host_metric_summary_monthly():
|
||||||
"""Run cleanup host metrics summary monthly task each week"""
|
"""Run cleanup host metrics summary monthly task each week"""
|
||||||
if is_run_threshold_reached(getattr(settings, 'HOST_METRIC_SUMMARY_TASK_LAST_TS', None), getattr(settings, 'HOST_METRIC_SUMMARY_TASK_INTERVAL', 7) * 86400):
|
if is_run_threshold_reached(getattr(settings, 'HOST_METRIC_SUMMARY_TASK_LAST_TS', None), getattr(settings, 'HOST_METRIC_SUMMARY_TASK_INTERVAL', 7) * 86400):
|
||||||
@@ -147,9 +146,8 @@ class HostMetricSummaryMonthlyTask:
|
|||||||
month = month + relativedelta(months=1)
|
month = month + relativedelta(months=1)
|
||||||
|
|
||||||
# Create/Update stats
|
# Create/Update stats
|
||||||
HostMetricSummaryMonthly.objects.bulk_create(self.records_to_create)
|
HostMetricSummaryMonthly.objects.bulk_create(self.records_to_create, batch_size=1000)
|
||||||
|
HostMetricSummaryMonthly.objects.bulk_update(self.records_to_update, ['license_consumed', 'hosts_added', 'hosts_deleted'], batch_size=1000)
|
||||||
bulk_update_sorted_by_id(HostMetricSummaryMonthly, self.records_to_update, ['license_consumed', 'hosts_added', 'hosts_deleted'])
|
|
||||||
|
|
||||||
# Set timestamp of last run
|
# Set timestamp of last run
|
||||||
settings.HOST_METRIC_SUMMARY_TASK_LAST_TS = now()
|
settings.HOST_METRIC_SUMMARY_TASK_LAST_TS = now()
|
||||||
|
|||||||
@@ -17,12 +17,10 @@ import urllib.parse as urlparse
|
|||||||
|
|
||||||
# Django
|
# Django
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
from django.db import transaction
|
|
||||||
|
|
||||||
# Shared code for the AWX platform
|
# Shared code for the AWX platform
|
||||||
from awx_plugins.interfaces._temporary_private_container_api import CONTAINER_ROOT, get_incontainer_path
|
from awx_plugins.interfaces._temporary_private_container_api import CONTAINER_ROOT, get_incontainer_path
|
||||||
from django.utils.translation import gettext_lazy as _
|
|
||||||
from rest_framework.exceptions import PermissionDenied
|
|
||||||
|
|
||||||
# Runner
|
# Runner
|
||||||
import ansible_runner
|
import ansible_runner
|
||||||
@@ -31,12 +29,9 @@ import ansible_runner
|
|||||||
import git
|
import git
|
||||||
from gitdb.exc import BadName as BadGitName
|
from gitdb.exc import BadName as BadGitName
|
||||||
|
|
||||||
# Dispatcherd
|
|
||||||
from dispatcherd.publish import task
|
|
||||||
from dispatcherd.utils import serialize_task
|
|
||||||
|
|
||||||
# AWX
|
# AWX
|
||||||
from awx.main.dispatch.publish import task as task_awx
|
from awx.main.dispatch.publish import task
|
||||||
from awx.main.dispatch import get_task_queuename
|
from awx.main.dispatch import get_task_queuename
|
||||||
from awx.main.constants import (
|
from awx.main.constants import (
|
||||||
PRIVILEGE_ESCALATION_METHODS,
|
PRIVILEGE_ESCALATION_METHODS,
|
||||||
@@ -44,13 +39,13 @@ from awx.main.constants import (
|
|||||||
JOB_FOLDER_PREFIX,
|
JOB_FOLDER_PREFIX,
|
||||||
MAX_ISOLATED_PATH_COLON_DELIMITER,
|
MAX_ISOLATED_PATH_COLON_DELIMITER,
|
||||||
CONTAINER_VOLUMES_MOUNT_TYPES,
|
CONTAINER_VOLUMES_MOUNT_TYPES,
|
||||||
|
ACTIVE_STATES,
|
||||||
HOST_FACTS_FIELDS,
|
HOST_FACTS_FIELDS,
|
||||||
)
|
)
|
||||||
from awx.main.models import (
|
from awx.main.models import (
|
||||||
Instance,
|
Instance,
|
||||||
Inventory,
|
Inventory,
|
||||||
InventorySource,
|
InventorySource,
|
||||||
UnifiedJob,
|
|
||||||
Job,
|
Job,
|
||||||
AdHocCommand,
|
AdHocCommand,
|
||||||
ProjectUpdate,
|
ProjectUpdate,
|
||||||
@@ -70,12 +65,11 @@ from awx.main.tasks.callback import (
|
|||||||
RunnerCallbackForProjectUpdate,
|
RunnerCallbackForProjectUpdate,
|
||||||
RunnerCallbackForSystemJob,
|
RunnerCallbackForSystemJob,
|
||||||
)
|
)
|
||||||
from awx.main.tasks.policy import evaluate_policy
|
|
||||||
from awx.main.tasks.signals import with_signal_handling, signal_callback
|
from awx.main.tasks.signals import with_signal_handling, signal_callback
|
||||||
from awx.main.tasks.receptor import AWXReceptorJob
|
from awx.main.tasks.receptor import AWXReceptorJob
|
||||||
from awx.main.tasks.facts import start_fact_cache, finish_fact_cache
|
from awx.main.tasks.facts import start_fact_cache, finish_fact_cache
|
||||||
from awx.main.tasks.system import update_smart_memberships_for_inventory, update_inventory_computed_fields, events_processed_hook
|
from awx.main.tasks.system import update_smart_memberships_for_inventory, update_inventory_computed_fields, events_processed_hook
|
||||||
from awx.main.exceptions import AwxTaskError, PolicyEvaluationError, PostRunError, ReceptorNodeNotFound
|
from awx.main.exceptions import AwxTaskError, PostRunError, ReceptorNodeNotFound
|
||||||
from awx.main.utils.ansible import read_ansible_config
|
from awx.main.utils.ansible import read_ansible_config
|
||||||
from awx.main.utils.safe_yaml import safe_dump, sanitize_jinja
|
from awx.main.utils.safe_yaml import safe_dump, sanitize_jinja
|
||||||
from awx.main.utils.common import (
|
from awx.main.utils.common import (
|
||||||
@@ -89,6 +83,8 @@ from awx.main.utils.common import (
|
|||||||
from awx.conf.license import get_license
|
from awx.conf.license import get_license
|
||||||
from awx.main.utils.handlers import SpecialInventoryHandler
|
from awx.main.utils.handlers import SpecialInventoryHandler
|
||||||
from awx.main.utils.update_model import update_model
|
from awx.main.utils.update_model import update_model
|
||||||
|
from rest_framework.exceptions import PermissionDenied
|
||||||
|
from django.utils.translation import gettext_lazy as _
|
||||||
|
|
||||||
# Django flags
|
# Django flags
|
||||||
from flags.state import flag_enabled
|
from flags.state import flag_enabled
|
||||||
@@ -115,15 +111,6 @@ def with_path_cleanup(f):
|
|||||||
return _wrapped
|
return _wrapped
|
||||||
|
|
||||||
|
|
||||||
@task(on_duplicate='queue_one', bind=True, queue=get_task_queuename)
|
|
||||||
def dispatch_waiting_jobs(binder):
|
|
||||||
for uj in UnifiedJob.objects.filter(status='waiting', controller_node=settings.CLUSTER_HOST_ID).only('id', 'status', 'polymorphic_ctype', 'celery_task_id'):
|
|
||||||
kwargs = uj.get_start_kwargs()
|
|
||||||
if not kwargs:
|
|
||||||
kwargs = {}
|
|
||||||
binder.control('run', data={'task': serialize_task(uj._get_task_class()), 'args': [uj.id], 'kwargs': kwargs, 'uuid': uj.celery_task_id})
|
|
||||||
|
|
||||||
|
|
||||||
class BaseTask(object):
|
class BaseTask(object):
|
||||||
model = None
|
model = None
|
||||||
event_model = None
|
event_model = None
|
||||||
@@ -131,7 +118,6 @@ class BaseTask(object):
|
|||||||
callback_class = RunnerCallback
|
callback_class = RunnerCallback
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.instance = None
|
|
||||||
self.cleanup_paths = []
|
self.cleanup_paths = []
|
||||||
self.update_attempts = int(getattr(settings, 'DISPATCHER_DB_DOWNTOWN_TOLLERANCE', settings.DISPATCHER_DB_DOWNTIME_TOLERANCE) / 5)
|
self.update_attempts = int(getattr(settings, 'DISPATCHER_DB_DOWNTOWN_TOLLERANCE', settings.DISPATCHER_DB_DOWNTIME_TOLERANCE) / 5)
|
||||||
self.runner_callback = self.callback_class(model=self.model)
|
self.runner_callback = self.callback_class(model=self.model)
|
||||||
@@ -319,8 +305,6 @@ class BaseTask(object):
|
|||||||
# Add ANSIBLE_* settings to the subprocess environment.
|
# Add ANSIBLE_* settings to the subprocess environment.
|
||||||
for attr in dir(settings):
|
for attr in dir(settings):
|
||||||
if attr == attr.upper() and attr.startswith('ANSIBLE_') and not attr.startswith('ANSIBLE_BASE_'):
|
if attr == attr.upper() and attr.startswith('ANSIBLE_') and not attr.startswith('ANSIBLE_BASE_'):
|
||||||
if attr == 'ANSIBLE_STANDARD_SETTINGS_FILES':
|
|
||||||
continue # special case intended only for dynaconf use
|
|
||||||
env[attr] = str(getattr(settings, attr))
|
env[attr] = str(getattr(settings, attr))
|
||||||
# Also set environment variables configured in AWX_TASK_ENV setting.
|
# Also set environment variables configured in AWX_TASK_ENV setting.
|
||||||
for key, value in settings.AWX_TASK_ENV.items():
|
for key, value in settings.AWX_TASK_ENV.items():
|
||||||
@@ -468,48 +452,27 @@ class BaseTask(object):
|
|||||||
def should_use_fact_cache(self):
|
def should_use_fact_cache(self):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def transition_status(self, pk: int) -> bool:
|
|
||||||
"""Atomically transition status to running, if False returned, another process got it"""
|
|
||||||
with transaction.atomic():
|
|
||||||
# Explanation of parts for the fetch:
|
|
||||||
# .values - avoid loading a full object, this is known to lead to deadlocks due to signals
|
|
||||||
# the signals load other related rows which another process may be locking, and happens in practice
|
|
||||||
# of=('self',) - keeps FK tables out of the lock list, another way deadlocks can happen
|
|
||||||
# .get - just load the single job
|
|
||||||
instance_data = UnifiedJob.objects.select_for_update(of=('self',)).values('status', 'cancel_flag').get(pk=pk)
|
|
||||||
|
|
||||||
# If status is not waiting (obtained under lock) then this process does not have clearence to run
|
|
||||||
if instance_data['status'] == 'waiting':
|
|
||||||
if instance_data['cancel_flag']:
|
|
||||||
updated_status = 'canceled'
|
|
||||||
else:
|
|
||||||
updated_status = 'running'
|
|
||||||
# Explanation of the update:
|
|
||||||
# .filter - again, do not load the full object
|
|
||||||
# .update - a bulk update on just that one row, avoid loading unintended data
|
|
||||||
UnifiedJob.objects.filter(pk=pk).update(status=updated_status, start_args='')
|
|
||||||
elif instance_data['status'] == 'running':
|
|
||||||
logger.info(f'Job {pk} is being ran by another process, exiting')
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
@with_path_cleanup
|
@with_path_cleanup
|
||||||
@with_signal_handling
|
@with_signal_handling
|
||||||
def run(self, pk, **kwargs):
|
def run(self, pk, **kwargs):
|
||||||
"""
|
"""
|
||||||
Run the job/task and capture its output.
|
Run the job/task and capture its output.
|
||||||
"""
|
"""
|
||||||
if not self.instance: # Used to skip fetch for local runs
|
self.instance = self.model.objects.get(pk=pk)
|
||||||
if not self.transition_status(pk):
|
if self.instance.status != 'canceled' and self.instance.cancel_flag:
|
||||||
logger.info(f'Job {pk} is being ran by another process, exiting')
|
self.instance = self.update_model(self.instance.pk, start_args='', status='canceled')
|
||||||
return
|
if self.instance.status not in ACTIVE_STATES:
|
||||||
|
# Prevent starting the job if it has been reaped or handled by another process.
|
||||||
|
raise RuntimeError(f'Not starting {self.instance.status} task pk={pk} because {self.instance.status} is not a valid active state')
|
||||||
|
|
||||||
# Load the instance
|
if self.instance.execution_environment_id is None:
|
||||||
self.instance = self.update_model(pk)
|
from awx.main.signals import disable_activity_stream
|
||||||
if self.instance.status != 'running':
|
|
||||||
logger.error(f'Not starting {self.instance.status} task pk={pk} because its status "{self.instance.status}" is not expected')
|
|
||||||
return
|
|
||||||
|
|
||||||
|
with disable_activity_stream():
|
||||||
|
self.instance = self.update_model(self.instance.pk, execution_environment=self.instance.resolve_execution_environment())
|
||||||
|
|
||||||
|
# self.instance because of the update_model pattern and when it's used in callback handlers
|
||||||
|
self.instance = self.update_model(pk, status='running', start_args='') # blank field to remove encrypted passwords
|
||||||
self.instance.websocket_emit_status("running")
|
self.instance.websocket_emit_status("running")
|
||||||
status, rc = 'error', None
|
status, rc = 'error', None
|
||||||
self.runner_callback.event_ct = 0
|
self.runner_callback.event_ct = 0
|
||||||
@@ -522,20 +485,12 @@ class BaseTask(object):
|
|||||||
private_data_dir = None
|
private_data_dir = None
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if self.instance.execution_environment_id is None:
|
|
||||||
from awx.main.signals import disable_activity_stream
|
|
||||||
|
|
||||||
with disable_activity_stream():
|
|
||||||
self.instance = self.update_model(self.instance.pk, execution_environment=self.instance.resolve_execution_environment())
|
|
||||||
|
|
||||||
self.instance.send_notification_templates("running")
|
self.instance.send_notification_templates("running")
|
||||||
private_data_dir = self.build_private_data_dir(self.instance)
|
private_data_dir = self.build_private_data_dir(self.instance)
|
||||||
self.pre_run_hook(self.instance, private_data_dir)
|
self.pre_run_hook(self.instance, private_data_dir)
|
||||||
evaluate_policy(self.instance)
|
|
||||||
self.build_project_dir(self.instance, private_data_dir)
|
self.build_project_dir(self.instance, private_data_dir)
|
||||||
self.instance.log_lifecycle("preparing_playbook")
|
self.instance.log_lifecycle("preparing_playbook")
|
||||||
if self.instance.cancel_flag or signal_callback():
|
if self.instance.cancel_flag or signal_callback():
|
||||||
logger.debug(f'detected pre-run cancel flag for {self.instance.log_format}')
|
|
||||||
self.instance = self.update_model(self.instance.pk, status='canceled')
|
self.instance = self.update_model(self.instance.pk, status='canceled')
|
||||||
|
|
||||||
if self.instance.status != 'running':
|
if self.instance.status != 'running':
|
||||||
@@ -658,11 +613,12 @@ class BaseTask(object):
|
|||||||
elif status == 'canceled':
|
elif status == 'canceled':
|
||||||
self.instance = self.update_model(pk)
|
self.instance = self.update_model(pk)
|
||||||
cancel_flag_value = getattr(self.instance, 'cancel_flag', False)
|
cancel_flag_value = getattr(self.instance, 'cancel_flag', False)
|
||||||
if cancel_flag_value is False:
|
if (cancel_flag_value is False) and signal_callback():
|
||||||
self.runner_callback.delay_update(skip_if_already_set=True, job_explanation="Task was canceled due to receiving a shutdown signal.")
|
self.runner_callback.delay_update(skip_if_already_set=True, job_explanation="Task was canceled due to receiving a shutdown signal.")
|
||||||
status = 'failed'
|
status = 'failed'
|
||||||
except PolicyEvaluationError as exc:
|
elif cancel_flag_value is False:
|
||||||
self.runner_callback.delay_update(job_explanation=str(exc), result_traceback=str(exc))
|
self.runner_callback.delay_update(skip_if_already_set=True, job_explanation="The running ansible process received a shutdown signal.")
|
||||||
|
status = 'failed'
|
||||||
except ReceptorNodeNotFound as exc:
|
except ReceptorNodeNotFound as exc:
|
||||||
self.runner_callback.delay_update(job_explanation=str(exc))
|
self.runner_callback.delay_update(job_explanation=str(exc))
|
||||||
except Exception:
|
except Exception:
|
||||||
@@ -688,9 +644,6 @@ class BaseTask(object):
|
|||||||
|
|
||||||
# Field host_status_counts is used as a metric to check if event processing is finished
|
# Field host_status_counts is used as a metric to check if event processing is finished
|
||||||
# we send notifications if it is, if not, callback receiver will send them
|
# we send notifications if it is, if not, callback receiver will send them
|
||||||
if not self.instance:
|
|
||||||
logger.error(f'Unified job pk={pk} appears to be deleted while running')
|
|
||||||
return
|
|
||||||
if (self.instance.host_status_counts is not None) or (not self.runner_callback.wrapup_event_dispatched):
|
if (self.instance.host_status_counts is not None) or (not self.runner_callback.wrapup_event_dispatched):
|
||||||
events_processed_hook(self.instance)
|
events_processed_hook(self.instance)
|
||||||
|
|
||||||
@@ -787,7 +740,6 @@ class SourceControlMixin(BaseTask):
|
|||||||
try:
|
try:
|
||||||
# the job private_data_dir is passed so sync can download roles and collections there
|
# the job private_data_dir is passed so sync can download roles and collections there
|
||||||
sync_task = RunProjectUpdate(job_private_data_dir=private_data_dir)
|
sync_task = RunProjectUpdate(job_private_data_dir=private_data_dir)
|
||||||
sync_task.instance = local_project_sync # avoids "waiting" status check, performance
|
|
||||||
sync_task.run(local_project_sync.id)
|
sync_task.run(local_project_sync.id)
|
||||||
local_project_sync.refresh_from_db()
|
local_project_sync.refresh_from_db()
|
||||||
self.instance = self.update_model(self.instance.pk, scm_revision=local_project_sync.scm_revision)
|
self.instance = self.update_model(self.instance.pk, scm_revision=local_project_sync.scm_revision)
|
||||||
@@ -851,7 +803,7 @@ class SourceControlMixin(BaseTask):
|
|||||||
self.release_lock(project)
|
self.release_lock(project)
|
||||||
|
|
||||||
|
|
||||||
@task_awx(queue=get_task_queuename)
|
@task(queue=get_task_queuename)
|
||||||
class RunJob(SourceControlMixin, BaseTask):
|
class RunJob(SourceControlMixin, BaseTask):
|
||||||
"""
|
"""
|
||||||
Run a job using ansible-playbook.
|
Run a job using ansible-playbook.
|
||||||
@@ -1139,8 +1091,8 @@ class RunJob(SourceControlMixin, BaseTask):
|
|||||||
# where ansible expects to find it
|
# where ansible expects to find it
|
||||||
if self.should_use_fact_cache():
|
if self.should_use_fact_cache():
|
||||||
job.log_lifecycle("start_job_fact_cache")
|
job.log_lifecycle("start_job_fact_cache")
|
||||||
self.hosts_with_facts_cached = start_fact_cache(
|
self.facts_write_time, self.hosts_with_facts_cached = start_fact_cache(
|
||||||
job.get_hosts_for_fact_cache(), artifacts_dir=os.path.join(private_data_dir, 'artifacts', str(job.id)), inventory_id=job.inventory_id
|
job.get_hosts_for_fact_cache(), os.path.join(private_data_dir, 'artifacts', str(job.id), 'fact_cache'), inventory_id=job.inventory_id
|
||||||
)
|
)
|
||||||
|
|
||||||
def build_project_dir(self, job, private_data_dir):
|
def build_project_dir(self, job, private_data_dir):
|
||||||
@@ -1150,7 +1102,7 @@ class RunJob(SourceControlMixin, BaseTask):
|
|||||||
super(RunJob, self).post_run_hook(job, status)
|
super(RunJob, self).post_run_hook(job, status)
|
||||||
job.refresh_from_db(fields=['job_env'])
|
job.refresh_from_db(fields=['job_env'])
|
||||||
private_data_dir = job.job_env.get('AWX_PRIVATE_DATA_DIR')
|
private_data_dir = job.job_env.get('AWX_PRIVATE_DATA_DIR')
|
||||||
if not private_data_dir:
|
if (not private_data_dir) or (not hasattr(self, 'facts_write_time')):
|
||||||
# If there's no private data dir, that means we didn't get into the
|
# If there's no private data dir, that means we didn't get into the
|
||||||
# actual `run()` call; this _usually_ means something failed in
|
# actual `run()` call; this _usually_ means something failed in
|
||||||
# the pre_run_hook method
|
# the pre_run_hook method
|
||||||
@@ -1158,7 +1110,9 @@ class RunJob(SourceControlMixin, BaseTask):
|
|||||||
if self.should_use_fact_cache() and self.runner_callback.artifacts_processed:
|
if self.should_use_fact_cache() and self.runner_callback.artifacts_processed:
|
||||||
job.log_lifecycle("finish_job_fact_cache")
|
job.log_lifecycle("finish_job_fact_cache")
|
||||||
finish_fact_cache(
|
finish_fact_cache(
|
||||||
artifacts_dir=os.path.join(private_data_dir, 'artifacts', str(job.id)),
|
self.hosts_with_facts_cached,
|
||||||
|
os.path.join(private_data_dir, 'artifacts', str(job.id), 'fact_cache'),
|
||||||
|
facts_write_time=self.facts_write_time,
|
||||||
job_id=job.id,
|
job_id=job.id,
|
||||||
inventory_id=job.inventory_id,
|
inventory_id=job.inventory_id,
|
||||||
)
|
)
|
||||||
@@ -1174,7 +1128,7 @@ class RunJob(SourceControlMixin, BaseTask):
|
|||||||
update_inventory_computed_fields.delay(inventory.id)
|
update_inventory_computed_fields.delay(inventory.id)
|
||||||
|
|
||||||
|
|
||||||
@task_awx(queue=get_task_queuename)
|
@task(queue=get_task_queuename)
|
||||||
class RunProjectUpdate(BaseTask):
|
class RunProjectUpdate(BaseTask):
|
||||||
model = ProjectUpdate
|
model = ProjectUpdate
|
||||||
event_model = ProjectUpdateEvent
|
event_model = ProjectUpdateEvent
|
||||||
@@ -1513,7 +1467,7 @@ class RunProjectUpdate(BaseTask):
|
|||||||
return []
|
return []
|
||||||
|
|
||||||
|
|
||||||
@task_awx(queue=get_task_queuename)
|
@task(queue=get_task_queuename)
|
||||||
class RunInventoryUpdate(SourceControlMixin, BaseTask):
|
class RunInventoryUpdate(SourceControlMixin, BaseTask):
|
||||||
model = InventoryUpdate
|
model = InventoryUpdate
|
||||||
event_model = InventoryUpdateEvent
|
event_model = InventoryUpdateEvent
|
||||||
@@ -1624,7 +1578,7 @@ class RunInventoryUpdate(SourceControlMixin, BaseTask):
|
|||||||
# Include any facts from input inventories so they can be used in filters
|
# Include any facts from input inventories so they can be used in filters
|
||||||
start_fact_cache(
|
start_fact_cache(
|
||||||
input_inventory.hosts.only(*HOST_FACTS_FIELDS),
|
input_inventory.hosts.only(*HOST_FACTS_FIELDS),
|
||||||
artifacts_dir=os.path.join(private_data_dir, 'artifacts', str(inventory_update.id)),
|
os.path.join(private_data_dir, 'artifacts', str(inventory_update.id), 'fact_cache'),
|
||||||
inventory_id=input_inventory.id,
|
inventory_id=input_inventory.id,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -1776,7 +1730,7 @@ class RunInventoryUpdate(SourceControlMixin, BaseTask):
|
|||||||
raise PostRunError('Error occured while saving inventory data, see traceback or server logs', status='error', tb=traceback.format_exc())
|
raise PostRunError('Error occured while saving inventory data, see traceback or server logs', status='error', tb=traceback.format_exc())
|
||||||
|
|
||||||
|
|
||||||
@task_awx(queue=get_task_queuename)
|
@task(queue=get_task_queuename)
|
||||||
class RunAdHocCommand(BaseTask):
|
class RunAdHocCommand(BaseTask):
|
||||||
"""
|
"""
|
||||||
Run an ad hoc command using ansible.
|
Run an ad hoc command using ansible.
|
||||||
@@ -1929,7 +1883,7 @@ class RunAdHocCommand(BaseTask):
|
|||||||
return d
|
return d
|
||||||
|
|
||||||
|
|
||||||
@task_awx(queue=get_task_queuename)
|
@task(queue=get_task_queuename)
|
||||||
class RunSystemJob(BaseTask):
|
class RunSystemJob(BaseTask):
|
||||||
model = SystemJob
|
model = SystemJob
|
||||||
event_model = SystemJobEvent
|
event_model = SystemJobEvent
|
||||||
|
|||||||
@@ -1,458 +0,0 @@
|
|||||||
import json
|
|
||||||
import tempfile
|
|
||||||
import contextlib
|
|
||||||
|
|
||||||
from pprint import pformat
|
|
||||||
|
|
||||||
from typing import Optional, Union
|
|
||||||
|
|
||||||
from django.conf import settings
|
|
||||||
from django.utils.translation import gettext_lazy as _
|
|
||||||
from opa_client import OpaClient
|
|
||||||
from opa_client.base import BaseClient
|
|
||||||
from requests import HTTPError
|
|
||||||
from rest_framework import serializers
|
|
||||||
from rest_framework import fields
|
|
||||||
|
|
||||||
from awx.main import models
|
|
||||||
from awx.main.exceptions import PolicyEvaluationError
|
|
||||||
|
|
||||||
|
|
||||||
# Monkey patching opa_client.base.BaseClient to fix retries and timeout settings
|
|
||||||
_original_opa_base_client_init = BaseClient.__init__
|
|
||||||
|
|
||||||
|
|
||||||
def _opa_base_client_init_fix(
|
|
||||||
self,
|
|
||||||
host: str = "localhost",
|
|
||||||
port: int = 8181,
|
|
||||||
version: str = "v1",
|
|
||||||
ssl: bool = False,
|
|
||||||
cert: Optional[Union[str, tuple]] = None,
|
|
||||||
headers: Optional[dict] = None,
|
|
||||||
retries: int = 2,
|
|
||||||
timeout: float = 1.5,
|
|
||||||
):
|
|
||||||
_original_opa_base_client_init(self, host, port, version, ssl, cert, headers)
|
|
||||||
self.retries = retries
|
|
||||||
self.timeout = timeout
|
|
||||||
|
|
||||||
|
|
||||||
BaseClient.__init__ = _opa_base_client_init_fix
|
|
||||||
|
|
||||||
|
|
||||||
class _TeamSerializer(serializers.ModelSerializer):
|
|
||||||
class Meta:
|
|
||||||
model = models.Team
|
|
||||||
fields = ('id', 'name')
|
|
||||||
|
|
||||||
|
|
||||||
class _UserSerializer(serializers.ModelSerializer):
|
|
||||||
teams = serializers.SerializerMethodField()
|
|
||||||
|
|
||||||
class Meta:
|
|
||||||
model = models.User
|
|
||||||
fields = ('id', 'username', 'is_superuser', 'teams')
|
|
||||||
|
|
||||||
def get_teams(self, user: models.User):
|
|
||||||
teams = models.Team.access_qs(user, 'member')
|
|
||||||
return _TeamSerializer(many=True).to_representation(teams)
|
|
||||||
|
|
||||||
|
|
||||||
class _ExecutionEnvironmentSerializer(serializers.ModelSerializer):
|
|
||||||
class Meta:
|
|
||||||
model = models.ExecutionEnvironment
|
|
||||||
fields = (
|
|
||||||
'id',
|
|
||||||
'name',
|
|
||||||
'image',
|
|
||||||
'pull',
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class _InstanceGroupSerializer(serializers.ModelSerializer):
|
|
||||||
class Meta:
|
|
||||||
model = models.InstanceGroup
|
|
||||||
fields = (
|
|
||||||
'id',
|
|
||||||
'name',
|
|
||||||
'capacity',
|
|
||||||
'jobs_running',
|
|
||||||
'jobs_total',
|
|
||||||
'max_concurrent_jobs',
|
|
||||||
'max_forks',
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class _InventorySourceSerializer(serializers.ModelSerializer):
|
|
||||||
class Meta:
|
|
||||||
model = models.InventorySource
|
|
||||||
fields = ('id', 'name', 'source', 'status')
|
|
||||||
|
|
||||||
|
|
||||||
class _InventorySerializer(serializers.ModelSerializer):
|
|
||||||
inventory_sources = _InventorySourceSerializer(many=True)
|
|
||||||
|
|
||||||
class Meta:
|
|
||||||
model = models.Inventory
|
|
||||||
fields = (
|
|
||||||
'id',
|
|
||||||
'name',
|
|
||||||
'description',
|
|
||||||
'kind',
|
|
||||||
'total_hosts',
|
|
||||||
'total_groups',
|
|
||||||
'has_inventory_sources',
|
|
||||||
'total_inventory_sources',
|
|
||||||
'has_active_failures',
|
|
||||||
'hosts_with_active_failures',
|
|
||||||
'inventory_sources',
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class _JobTemplateSerializer(serializers.ModelSerializer):
|
|
||||||
class Meta:
|
|
||||||
model = models.JobTemplate
|
|
||||||
fields = (
|
|
||||||
'id',
|
|
||||||
'name',
|
|
||||||
'job_type',
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class _WorkflowJobTemplateSerializer(serializers.ModelSerializer):
|
|
||||||
class Meta:
|
|
||||||
model = models.WorkflowJobTemplate
|
|
||||||
fields = (
|
|
||||||
'id',
|
|
||||||
'name',
|
|
||||||
'job_type',
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class _WorkflowJobSerializer(serializers.ModelSerializer):
|
|
||||||
class Meta:
|
|
||||||
model = models.WorkflowJob
|
|
||||||
fields = (
|
|
||||||
'id',
|
|
||||||
'name',
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class _OrganizationSerializer(serializers.ModelSerializer):
|
|
||||||
class Meta:
|
|
||||||
model = models.Organization
|
|
||||||
fields = (
|
|
||||||
'id',
|
|
||||||
'name',
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class _ProjectSerializer(serializers.ModelSerializer):
|
|
||||||
class Meta:
|
|
||||||
model = models.Project
|
|
||||||
fields = (
|
|
||||||
'id',
|
|
||||||
'name',
|
|
||||||
'status',
|
|
||||||
'scm_type',
|
|
||||||
'scm_url',
|
|
||||||
'scm_branch',
|
|
||||||
'scm_refspec',
|
|
||||||
'scm_clean',
|
|
||||||
'scm_track_submodules',
|
|
||||||
'scm_delete_on_update',
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class _CredentialSerializer(serializers.ModelSerializer):
|
|
||||||
organization = _OrganizationSerializer()
|
|
||||||
|
|
||||||
class Meta:
|
|
||||||
model = models.Credential
|
|
||||||
fields = (
|
|
||||||
'id',
|
|
||||||
'name',
|
|
||||||
'description',
|
|
||||||
'organization',
|
|
||||||
'credential_type',
|
|
||||||
'managed',
|
|
||||||
'kind',
|
|
||||||
'cloud',
|
|
||||||
'kubernetes',
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class _LabelSerializer(serializers.ModelSerializer):
|
|
||||||
organization = _OrganizationSerializer()
|
|
||||||
|
|
||||||
class Meta:
|
|
||||||
model = models.Label
|
|
||||||
fields = ('id', 'name', 'organization')
|
|
||||||
|
|
||||||
|
|
||||||
class JobSerializer(serializers.ModelSerializer):
|
|
||||||
created_by = _UserSerializer()
|
|
||||||
credentials = _CredentialSerializer(many=True)
|
|
||||||
execution_environment = _ExecutionEnvironmentSerializer()
|
|
||||||
instance_group = _InstanceGroupSerializer()
|
|
||||||
inventory = _InventorySerializer()
|
|
||||||
job_template = _JobTemplateSerializer()
|
|
||||||
labels = _LabelSerializer(many=True)
|
|
||||||
organization = _OrganizationSerializer()
|
|
||||||
project = _ProjectSerializer()
|
|
||||||
extra_vars = fields.SerializerMethodField()
|
|
||||||
hosts_count = fields.SerializerMethodField()
|
|
||||||
workflow_job = fields.SerializerMethodField()
|
|
||||||
workflow_job_template = fields.SerializerMethodField()
|
|
||||||
|
|
||||||
class Meta:
|
|
||||||
model = models.Job
|
|
||||||
fields = (
|
|
||||||
'id',
|
|
||||||
'name',
|
|
||||||
'created',
|
|
||||||
'created_by',
|
|
||||||
'credentials',
|
|
||||||
'execution_environment',
|
|
||||||
'extra_vars',
|
|
||||||
'forks',
|
|
||||||
'hosts_count',
|
|
||||||
'instance_group',
|
|
||||||
'inventory',
|
|
||||||
'job_template',
|
|
||||||
'job_type',
|
|
||||||
'job_type_name',
|
|
||||||
'labels',
|
|
||||||
'launch_type',
|
|
||||||
'limit',
|
|
||||||
'launched_by',
|
|
||||||
'organization',
|
|
||||||
'playbook',
|
|
||||||
'project',
|
|
||||||
'scm_branch',
|
|
||||||
'scm_revision',
|
|
||||||
'workflow_job',
|
|
||||||
'workflow_job_template',
|
|
||||||
)
|
|
||||||
|
|
||||||
def get_extra_vars(self, obj: models.Job):
|
|
||||||
return json.loads(obj.display_extra_vars())
|
|
||||||
|
|
||||||
def get_hosts_count(self, obj: models.Job):
|
|
||||||
return obj.hosts.count()
|
|
||||||
|
|
||||||
def get_workflow_job(self, obj: models.Job):
|
|
||||||
workflow_job: models.WorkflowJob = obj.get_workflow_job()
|
|
||||||
if workflow_job is None:
|
|
||||||
return None
|
|
||||||
return _WorkflowJobSerializer().to_representation(workflow_job)
|
|
||||||
|
|
||||||
def get_workflow_job_template(self, obj: models.Job):
|
|
||||||
workflow_job: models.WorkflowJob = obj.get_workflow_job()
|
|
||||||
if workflow_job is None:
|
|
||||||
return None
|
|
||||||
|
|
||||||
workflow_job_template: models.WorkflowJobTemplate = workflow_job.workflow_job_template
|
|
||||||
if workflow_job_template is None:
|
|
||||||
return None
|
|
||||||
|
|
||||||
return _WorkflowJobTemplateSerializer().to_representation(workflow_job_template)
|
|
||||||
|
|
||||||
|
|
||||||
class OPAResultSerializer(serializers.Serializer):
|
|
||||||
allowed = fields.BooleanField(required=True)
|
|
||||||
violations = fields.ListField(child=fields.CharField())
|
|
||||||
|
|
||||||
|
|
||||||
class OPA_AUTH_TYPES:
|
|
||||||
NONE = 'None'
|
|
||||||
TOKEN = 'Token'
|
|
||||||
CERTIFICATE = 'Certificate'
|
|
||||||
|
|
||||||
|
|
||||||
@contextlib.contextmanager
|
|
||||||
def opa_cert_file():
|
|
||||||
"""
|
|
||||||
Context manager that creates temporary certificate files for OPA authentication.
|
|
||||||
|
|
||||||
For mTLS (mutual TLS), we need:
|
|
||||||
- Client certificate and key for client authentication
|
|
||||||
- CA certificate (optional) for server verification
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
tuple: (client_cert_path, verify_path)
|
|
||||||
- client_cert_path: Path to client cert file or None if not using client cert
|
|
||||||
- verify_path: Path to CA cert file, True to use system CA store, or False for no verification
|
|
||||||
"""
|
|
||||||
client_cert_temp = None
|
|
||||||
ca_temp = None
|
|
||||||
|
|
||||||
try:
|
|
||||||
# Case 1: Full mTLS with client cert and optional CA cert
|
|
||||||
if settings.OPA_AUTH_TYPE == OPA_AUTH_TYPES.CERTIFICATE:
|
|
||||||
# Create client certificate file (required for mTLS)
|
|
||||||
client_cert_temp = tempfile.NamedTemporaryFile(delete=True, mode='w', suffix=".pem")
|
|
||||||
client_cert_temp.write(settings.OPA_AUTH_CLIENT_CERT)
|
|
||||||
client_cert_temp.write("\n")
|
|
||||||
client_cert_temp.write(settings.OPA_AUTH_CLIENT_KEY)
|
|
||||||
client_cert_temp.write("\n")
|
|
||||||
client_cert_temp.flush()
|
|
||||||
|
|
||||||
# If CA cert is provided, use it for server verification
|
|
||||||
# Otherwise, use system CA store (True)
|
|
||||||
if settings.OPA_AUTH_CA_CERT:
|
|
||||||
ca_temp = tempfile.NamedTemporaryFile(delete=True, mode='w', suffix=".pem")
|
|
||||||
ca_temp.write(settings.OPA_AUTH_CA_CERT)
|
|
||||||
ca_temp.write("\n")
|
|
||||||
ca_temp.flush()
|
|
||||||
verify_path = ca_temp.name
|
|
||||||
else:
|
|
||||||
verify_path = True # Use system CA store
|
|
||||||
|
|
||||||
yield (client_cert_temp.name, verify_path)
|
|
||||||
|
|
||||||
# Case 2: TLS with only server verification (no client cert)
|
|
||||||
elif settings.OPA_SSL:
|
|
||||||
# If CA cert is provided, use it for server verification
|
|
||||||
# Otherwise, use system CA store (True)
|
|
||||||
if settings.OPA_AUTH_CA_CERT:
|
|
||||||
ca_temp = tempfile.NamedTemporaryFile(delete=True, mode='w', suffix=".pem")
|
|
||||||
ca_temp.write(settings.OPA_AUTH_CA_CERT)
|
|
||||||
ca_temp.write("\n")
|
|
||||||
ca_temp.flush()
|
|
||||||
verify_path = ca_temp.name
|
|
||||||
else:
|
|
||||||
verify_path = True # Use system CA store
|
|
||||||
|
|
||||||
yield (None, verify_path)
|
|
||||||
|
|
||||||
# Case 3: No TLS
|
|
||||||
else:
|
|
||||||
yield (None, False)
|
|
||||||
|
|
||||||
finally:
|
|
||||||
# Clean up temporary files
|
|
||||||
if client_cert_temp:
|
|
||||||
client_cert_temp.close()
|
|
||||||
if ca_temp:
|
|
||||||
ca_temp.close()
|
|
||||||
|
|
||||||
|
|
||||||
@contextlib.contextmanager
|
|
||||||
def opa_client(headers=None):
|
|
||||||
with opa_cert_file() as cert_files:
|
|
||||||
cert, verify = cert_files
|
|
||||||
|
|
||||||
with OpaClient(
|
|
||||||
host=settings.OPA_HOST,
|
|
||||||
port=settings.OPA_PORT,
|
|
||||||
headers=headers,
|
|
||||||
ssl=settings.OPA_SSL,
|
|
||||||
cert=cert,
|
|
||||||
timeout=settings.OPA_REQUEST_TIMEOUT,
|
|
||||||
retries=settings.OPA_REQUEST_RETRIES,
|
|
||||||
) as client:
|
|
||||||
# Workaround for https://github.com/Turall/OPA-python-client/issues/32
|
|
||||||
# by directly setting cert and verify on requests.session
|
|
||||||
client._session.cert = cert
|
|
||||||
client._session.verify = verify
|
|
||||||
|
|
||||||
yield client
|
|
||||||
|
|
||||||
|
|
||||||
def evaluate_policy(instance):
|
|
||||||
# Policy evaluation for Policy as Code feature
|
|
||||||
if not settings.OPA_HOST:
|
|
||||||
return
|
|
||||||
|
|
||||||
if not isinstance(instance, models.Job):
|
|
||||||
return
|
|
||||||
|
|
||||||
instance.log_lifecycle("evaluate_policy")
|
|
||||||
|
|
||||||
input_data = JobSerializer(instance=instance).data
|
|
||||||
|
|
||||||
headers = settings.OPA_AUTH_CUSTOM_HEADERS
|
|
||||||
if settings.OPA_AUTH_TYPE == OPA_AUTH_TYPES.TOKEN:
|
|
||||||
headers.update({'Authorization': 'Bearer {}'.format(settings.OPA_AUTH_TOKEN)})
|
|
||||||
|
|
||||||
if settings.OPA_AUTH_TYPE == OPA_AUTH_TYPES.CERTIFICATE and not settings.OPA_SSL:
|
|
||||||
raise PolicyEvaluationError(_('OPA_AUTH_TYPE=Certificate requires OPA_SSL to be enabled.'))
|
|
||||||
|
|
||||||
cert_settings_missing = []
|
|
||||||
|
|
||||||
if settings.OPA_AUTH_TYPE == OPA_AUTH_TYPES.CERTIFICATE:
|
|
||||||
if not settings.OPA_AUTH_CLIENT_CERT:
|
|
||||||
cert_settings_missing += ['OPA_AUTH_CLIENT_CERT']
|
|
||||||
if not settings.OPA_AUTH_CLIENT_KEY:
|
|
||||||
cert_settings_missing += ['OPA_AUTH_CLIENT_KEY']
|
|
||||||
if not settings.OPA_AUTH_CA_CERT:
|
|
||||||
cert_settings_missing += ['OPA_AUTH_CA_CERT']
|
|
||||||
|
|
||||||
if cert_settings_missing:
|
|
||||||
raise PolicyEvaluationError(_('Following certificate settings are missing for OPA_AUTH_TYPE=Certificate: {}').format(cert_settings_missing))
|
|
||||||
|
|
||||||
query_paths = [
|
|
||||||
('Organization', instance.organization.opa_query_path),
|
|
||||||
('Inventory', instance.inventory.opa_query_path),
|
|
||||||
('Job template', instance.job_template.opa_query_path),
|
|
||||||
]
|
|
||||||
violations = dict()
|
|
||||||
errors = dict()
|
|
||||||
|
|
||||||
try:
|
|
||||||
with opa_client(headers=headers) as client:
|
|
||||||
for path_type, query_path in query_paths:
|
|
||||||
response = dict()
|
|
||||||
try:
|
|
||||||
if not query_path:
|
|
||||||
continue
|
|
||||||
|
|
||||||
response = client.query_rule(input_data=input_data, package_path=query_path)
|
|
||||||
|
|
||||||
except HTTPError as e:
|
|
||||||
message = _('Call to OPA failed. Exception: {}').format(e)
|
|
||||||
try:
|
|
||||||
error_data = e.response.json()
|
|
||||||
except ValueError:
|
|
||||||
errors[path_type] = message
|
|
||||||
continue
|
|
||||||
|
|
||||||
error_code = error_data.get("code")
|
|
||||||
error_message = error_data.get("message")
|
|
||||||
if error_code or error_message:
|
|
||||||
message = _('Call to OPA failed. Code: {}, Message: {}').format(error_code, error_message)
|
|
||||||
errors[path_type] = message
|
|
||||||
continue
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
errors[path_type] = _('Call to OPA failed. Exception: {}').format(e)
|
|
||||||
continue
|
|
||||||
|
|
||||||
result = response.get('result')
|
|
||||||
if result is None:
|
|
||||||
errors[path_type] = _('Call to OPA did not return a "result" property. The path refers to an undefined document.')
|
|
||||||
continue
|
|
||||||
|
|
||||||
result_serializer = OPAResultSerializer(data=result)
|
|
||||||
if not result_serializer.is_valid():
|
|
||||||
errors[path_type] = _('OPA policy returned invalid result.')
|
|
||||||
continue
|
|
||||||
|
|
||||||
result_data = result_serializer.validated_data
|
|
||||||
if not result_data.get("allowed") and (result_violations := result_data.get("violations")):
|
|
||||||
violations[path_type] = result_violations
|
|
||||||
|
|
||||||
format_results = dict()
|
|
||||||
if any(errors[e] for e in errors):
|
|
||||||
format_results["Errors"] = errors
|
|
||||||
|
|
||||||
if any(violations[v] for v in violations):
|
|
||||||
format_results["Violations"] = violations
|
|
||||||
|
|
||||||
if violations or errors:
|
|
||||||
raise PolicyEvaluationError(pformat(format_results, width=80))
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
raise PolicyEvaluationError(_('This job cannot be executed due to a policy violation or error. See the following details:\n{}').format(e))
|
|
||||||
@@ -32,7 +32,7 @@ from awx.main.constants import MAX_ISOLATED_PATH_COLON_DELIMITER
|
|||||||
from awx.main.tasks.signals import signal_state, signal_callback, SignalExit
|
from awx.main.tasks.signals import signal_state, signal_callback, SignalExit
|
||||||
from awx.main.models import Instance, InstanceLink, UnifiedJob, ReceptorAddress
|
from awx.main.models import Instance, InstanceLink, UnifiedJob, ReceptorAddress
|
||||||
from awx.main.dispatch import get_task_queuename
|
from awx.main.dispatch import get_task_queuename
|
||||||
from awx.main.dispatch.publish import task as task_awx
|
from awx.main.dispatch.publish import task
|
||||||
|
|
||||||
# Receptorctl
|
# Receptorctl
|
||||||
from receptorctl.socket_interface import ReceptorControl
|
from receptorctl.socket_interface import ReceptorControl
|
||||||
@@ -852,7 +852,7 @@ def reload_receptor():
|
|||||||
raise RuntimeError("Receptor reload failed")
|
raise RuntimeError("Receptor reload failed")
|
||||||
|
|
||||||
|
|
||||||
@task_awx()
|
@task()
|
||||||
def write_receptor_config():
|
def write_receptor_config():
|
||||||
"""
|
"""
|
||||||
This task runs async on each control node, K8S only.
|
This task runs async on each control node, K8S only.
|
||||||
@@ -875,7 +875,7 @@ def write_receptor_config():
|
|||||||
reload_receptor()
|
reload_receptor()
|
||||||
|
|
||||||
|
|
||||||
@task_awx(queue=get_task_queuename)
|
@task(queue=get_task_queuename)
|
||||||
def remove_deprovisioned_node(hostname):
|
def remove_deprovisioned_node(hostname):
|
||||||
InstanceLink.objects.filter(source__hostname=hostname).update(link_state=InstanceLink.States.REMOVING)
|
InstanceLink.objects.filter(source__hostname=hostname).update(link_state=InstanceLink.States.REMOVING)
|
||||||
InstanceLink.objects.filter(target__instance__hostname=hostname).update(link_state=InstanceLink.States.REMOVING)
|
InstanceLink.objects.filter(target__instance__hostname=hostname).update(link_state=InstanceLink.States.REMOVING)
|
||||||
|
|||||||
@@ -14,21 +14,16 @@ class SignalExit(Exception):
|
|||||||
|
|
||||||
|
|
||||||
class SignalState:
|
class SignalState:
|
||||||
# SIGTERM: Sent by supervisord to process group on shutdown
|
|
||||||
# SIGUSR1: The dispatcherd cancel signal
|
|
||||||
signals = (signal.SIGTERM, signal.SIGINT, signal.SIGUSR1)
|
|
||||||
|
|
||||||
def reset(self):
|
def reset(self):
|
||||||
for for_signal in self.signals:
|
self.sigterm_flag = False
|
||||||
self.signal_flags[for_signal] = False
|
self.sigint_flag = False
|
||||||
self.original_methods[for_signal] = None
|
|
||||||
|
|
||||||
self.is_active = False # for nested context managers
|
self.is_active = False # for nested context managers
|
||||||
|
self.original_sigterm = None
|
||||||
|
self.original_sigint = None
|
||||||
self.raise_exception = False
|
self.raise_exception = False
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.signal_flags = {}
|
|
||||||
self.original_methods = {}
|
|
||||||
self.reset()
|
self.reset()
|
||||||
|
|
||||||
def raise_if_needed(self):
|
def raise_if_needed(self):
|
||||||
@@ -36,28 +31,31 @@ class SignalState:
|
|||||||
self.raise_exception = False # so it is not raised a second time in error handling
|
self.raise_exception = False # so it is not raised a second time in error handling
|
||||||
raise SignalExit()
|
raise SignalExit()
|
||||||
|
|
||||||
def set_signal_flag(self, *args, for_signal=None):
|
def set_sigterm_flag(self, *args):
|
||||||
self.signal_flags[for_signal] = True
|
self.sigterm_flag = True
|
||||||
logger.info(f'Processed signal {for_signal}, set exit flag')
|
self.raise_if_needed()
|
||||||
|
|
||||||
|
def set_sigint_flag(self, *args):
|
||||||
|
self.sigint_flag = True
|
||||||
self.raise_if_needed()
|
self.raise_if_needed()
|
||||||
|
|
||||||
def connect_signals(self):
|
def connect_signals(self):
|
||||||
for for_signal in self.signals:
|
self.original_sigterm = signal.getsignal(signal.SIGTERM)
|
||||||
self.original_methods[for_signal] = signal.getsignal(for_signal)
|
self.original_sigint = signal.getsignal(signal.SIGINT)
|
||||||
signal.signal(for_signal, lambda *args, for_signal=for_signal: self.set_signal_flag(*args, for_signal=for_signal))
|
signal.signal(signal.SIGTERM, self.set_sigterm_flag)
|
||||||
|
signal.signal(signal.SIGINT, self.set_sigint_flag)
|
||||||
self.is_active = True
|
self.is_active = True
|
||||||
|
|
||||||
def restore_signals(self):
|
def restore_signals(self):
|
||||||
for for_signal in self.signals:
|
signal.signal(signal.SIGTERM, self.original_sigterm)
|
||||||
original_method = self.original_methods[for_signal]
|
signal.signal(signal.SIGINT, self.original_sigint)
|
||||||
signal.signal(for_signal, original_method)
|
# if we got a signal while context manager was active, call parent methods.
|
||||||
# if we got a signal while context manager was active, call parent methods.
|
if self.sigterm_flag:
|
||||||
if self.signal_flags[for_signal]:
|
if callable(self.original_sigterm):
|
||||||
if callable(original_method):
|
self.original_sigterm()
|
||||||
try:
|
if self.sigint_flag:
|
||||||
original_method()
|
if callable(self.original_sigint):
|
||||||
except Exception as exc:
|
self.original_sigint()
|
||||||
logger.info(f'Error processing original {for_signal} signal, error: {str(exc)}')
|
|
||||||
self.reset()
|
self.reset()
|
||||||
|
|
||||||
|
|
||||||
@@ -65,7 +63,7 @@ signal_state = SignalState()
|
|||||||
|
|
||||||
|
|
||||||
def signal_callback():
|
def signal_callback():
|
||||||
return any(signal_state.signal_flags[for_signal] for for_signal in signal_state.signals)
|
return bool(signal_state.sigterm_flag or signal_state.sigint_flag)
|
||||||
|
|
||||||
|
|
||||||
def with_signal_handling(f):
|
def with_signal_handling(f):
|
||||||
|
|||||||
@@ -1,77 +1,78 @@
|
|||||||
# Python
|
# Python
|
||||||
|
from collections import namedtuple
|
||||||
import functools
|
import functools
|
||||||
import importlib
|
import importlib
|
||||||
import itertools
|
import itertools
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
|
import psycopg
|
||||||
|
from io import StringIO
|
||||||
|
from contextlib import redirect_stdout
|
||||||
import shutil
|
import shutil
|
||||||
import time
|
import time
|
||||||
from collections import namedtuple
|
|
||||||
from contextlib import redirect_stdout
|
|
||||||
from datetime import datetime
|
|
||||||
from distutils.version import LooseVersion as Version
|
from distutils.version import LooseVersion as Version
|
||||||
from io import StringIO
|
from datetime import datetime
|
||||||
|
|
||||||
# Runner
|
# Django
|
||||||
import ansible_runner.cleanup
|
from django.conf import settings
|
||||||
import psycopg
|
from django.db import connection, transaction, DatabaseError, IntegrityError
|
||||||
from ansible_base.lib.utils.db import advisory_lock
|
from django.db.models.fields.related import ForeignKey
|
||||||
|
from django.utils.timezone import now, timedelta
|
||||||
# django-ansible-base
|
from django.utils.encoding import smart_str
|
||||||
from ansible_base.resource_registry.tasks.sync import SyncExecutor
|
from django.contrib.auth.models import User
|
||||||
|
from django.utils.translation import gettext_lazy as _
|
||||||
|
from django.utils.translation import gettext_noop
|
||||||
|
from django.core.cache import cache
|
||||||
|
from django.core.exceptions import ObjectDoesNotExist
|
||||||
|
from django.db.models.query import QuerySet
|
||||||
|
|
||||||
# Django-CRUM
|
# Django-CRUM
|
||||||
from crum import impersonate
|
from crum import impersonate
|
||||||
|
|
||||||
|
# Django flags
|
||||||
|
from flags.state import flag_enabled
|
||||||
|
|
||||||
|
# Runner
|
||||||
|
import ansible_runner.cleanup
|
||||||
|
|
||||||
# dateutil
|
# dateutil
|
||||||
from dateutil.parser import parse as parse_date
|
from dateutil.parser import parse as parse_date
|
||||||
|
|
||||||
# Django
|
# django-ansible-base
|
||||||
from django.conf import settings
|
from ansible_base.resource_registry.tasks.sync import SyncExecutor
|
||||||
from django.contrib.auth.models import User
|
from ansible_base.lib.utils.db import advisory_lock
|
||||||
from django.core.cache import cache
|
|
||||||
from django.core.exceptions import ObjectDoesNotExist
|
|
||||||
from django.db import DatabaseError, IntegrityError, connection, transaction
|
|
||||||
from django.db.models.fields.related import ForeignKey
|
|
||||||
from django.db.models.query import QuerySet
|
|
||||||
from django.utils.encoding import smart_str
|
|
||||||
from django.utils.timezone import now, timedelta
|
|
||||||
from django.utils.translation import gettext_lazy as _
|
|
||||||
from django.utils.translation import gettext_noop
|
|
||||||
|
|
||||||
# Django flags
|
|
||||||
from flags.state import flag_enabled
|
|
||||||
from rest_framework.exceptions import PermissionDenied
|
|
||||||
|
|
||||||
# AWX
|
# AWX
|
||||||
from awx import __version__ as awx_application_version
|
from awx import __version__ as awx_application_version
|
||||||
from awx.conf import settings_registry
|
|
||||||
from awx.main import analytics
|
|
||||||
from awx.main.access import access_registry
|
from awx.main.access import access_registry
|
||||||
from awx.main.analytics.subsystem_metrics import DispatcherMetrics
|
|
||||||
from awx.main.constants import ACTIVE_STATES, ERROR_STATES
|
|
||||||
from awx.main.consumers import emit_channel_notification
|
|
||||||
from awx.main.dispatch import get_task_queuename, reaper
|
|
||||||
from awx.main.dispatch.publish import task as task_awx
|
|
||||||
from awx.main.models import (
|
from awx.main.models import (
|
||||||
|
Schedule,
|
||||||
|
TowerScheduleState,
|
||||||
Instance,
|
Instance,
|
||||||
InstanceGroup,
|
InstanceGroup,
|
||||||
Inventory,
|
|
||||||
Job,
|
|
||||||
Notification,
|
|
||||||
Schedule,
|
|
||||||
SmartInventoryMembership,
|
|
||||||
TowerScheduleState,
|
|
||||||
UnifiedJob,
|
UnifiedJob,
|
||||||
|
Notification,
|
||||||
|
Inventory,
|
||||||
|
SmartInventoryMembership,
|
||||||
|
Job,
|
||||||
convert_jsonfields,
|
convert_jsonfields,
|
||||||
)
|
)
|
||||||
|
from awx.main.constants import ACTIVE_STATES, ERROR_STATES
|
||||||
|
from awx.main.dispatch.publish import task
|
||||||
|
from awx.main.dispatch import get_task_queuename, reaper
|
||||||
|
from awx.main.utils.common import ignore_inventory_computed_fields, ignore_inventory_group_removal
|
||||||
|
|
||||||
|
from awx.main.utils.reload import stop_local_services
|
||||||
from awx.main.tasks.helpers import is_run_threshold_reached
|
from awx.main.tasks.helpers import is_run_threshold_reached
|
||||||
from awx.main.tasks.host_indirect import save_indirect_host_entries
|
from awx.main.tasks.host_indirect import save_indirect_host_entries
|
||||||
from awx.main.tasks.receptor import administrative_workunit_reaper, get_receptor_ctl, worker_cleanup, worker_info, write_receptor_config
|
from awx.main.tasks.receptor import get_receptor_ctl, worker_info, worker_cleanup, administrative_workunit_reaper, write_receptor_config
|
||||||
from awx.main.utils.common import ignore_inventory_computed_fields, ignore_inventory_group_removal
|
from awx.main.consumers import emit_channel_notification
|
||||||
from awx.main.utils.reload import stop_local_services
|
from awx.main import analytics
|
||||||
from dispatcherd.publish import task
|
from awx.conf import settings_registry
|
||||||
|
from awx.main.analytics.subsystem_metrics import DispatcherMetrics
|
||||||
|
|
||||||
|
from rest_framework.exceptions import PermissionDenied
|
||||||
|
|
||||||
logger = logging.getLogger('awx.main.tasks.system')
|
logger = logging.getLogger('awx.main.tasks.system')
|
||||||
|
|
||||||
@@ -82,12 +83,7 @@ Try upgrading OpenSSH or providing your private key in an different format. \
|
|||||||
'''
|
'''
|
||||||
|
|
||||||
|
|
||||||
def _run_dispatch_startup_common():
|
def dispatch_startup():
|
||||||
"""
|
|
||||||
Execute the common startup initialization steps.
|
|
||||||
This includes updating schedules, syncing instance membership, and starting
|
|
||||||
local reaping and resetting metrics.
|
|
||||||
"""
|
|
||||||
startup_logger = logging.getLogger('awx.main.tasks')
|
startup_logger = logging.getLogger('awx.main.tasks')
|
||||||
|
|
||||||
# TODO: Enable this on VM installs
|
# TODO: Enable this on VM installs
|
||||||
@@ -97,14 +93,14 @@ def _run_dispatch_startup_common():
|
|||||||
try:
|
try:
|
||||||
convert_jsonfields()
|
convert_jsonfields()
|
||||||
except Exception:
|
except Exception:
|
||||||
logger.exception("Failed JSON field conversion, skipping.")
|
logger.exception("Failed json field conversion, skipping.")
|
||||||
|
|
||||||
startup_logger.debug("Syncing schedules")
|
startup_logger.debug("Syncing Schedules")
|
||||||
for sch in Schedule.objects.all():
|
for sch in Schedule.objects.all():
|
||||||
try:
|
try:
|
||||||
sch.update_computed_fields()
|
sch.update_computed_fields()
|
||||||
except Exception:
|
except Exception:
|
||||||
logger.exception("Failed to rebuild schedule %s.", sch)
|
logger.exception("Failed to rebuild schedule {}.".format(sch))
|
||||||
|
|
||||||
#
|
#
|
||||||
# When the dispatcher starts, if the instance cannot be found in the database,
|
# When the dispatcher starts, if the instance cannot be found in the database,
|
||||||
@@ -124,67 +120,25 @@ def _run_dispatch_startup_common():
|
|||||||
apply_cluster_membership_policies()
|
apply_cluster_membership_policies()
|
||||||
cluster_node_heartbeat()
|
cluster_node_heartbeat()
|
||||||
reaper.startup_reaping()
|
reaper.startup_reaping()
|
||||||
|
reaper.reap_waiting(grace_period=0)
|
||||||
m = DispatcherMetrics()
|
m = DispatcherMetrics()
|
||||||
m.reset_values()
|
m.reset_values()
|
||||||
|
|
||||||
|
|
||||||
def _legacy_dispatch_startup():
|
|
||||||
"""
|
|
||||||
Legacy branch for startup: simply performs reaping of waiting jobs with a zero grace period.
|
|
||||||
"""
|
|
||||||
logger.debug("Legacy dispatcher: calling reaper.reap_waiting with grace_period=0")
|
|
||||||
reaper.reap_waiting(grace_period=0)
|
|
||||||
|
|
||||||
|
|
||||||
def _dispatcherd_dispatch_startup():
|
|
||||||
"""
|
|
||||||
New dispatcherd branch for startup: uses the control API to re-submit waiting jobs.
|
|
||||||
"""
|
|
||||||
logger.debug("Dispatcherd enabled: dispatching waiting jobs via control channel")
|
|
||||||
from awx.main.tasks.jobs import dispatch_waiting_jobs
|
|
||||||
|
|
||||||
dispatch_waiting_jobs.apply_async(queue=get_task_queuename())
|
|
||||||
|
|
||||||
|
|
||||||
def dispatch_startup():
|
|
||||||
"""
|
|
||||||
System initialization at startup.
|
|
||||||
First, execute the common logic.
|
|
||||||
Then, if FEATURE_DISPATCHERD_ENABLED is enabled, re-submit waiting jobs via the control API;
|
|
||||||
otherwise, fall back to legacy reaping of waiting jobs.
|
|
||||||
"""
|
|
||||||
_run_dispatch_startup_common()
|
|
||||||
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'):
|
|
||||||
_dispatcherd_dispatch_startup()
|
|
||||||
else:
|
|
||||||
_legacy_dispatch_startup()
|
|
||||||
|
|
||||||
|
|
||||||
def inform_cluster_of_shutdown():
|
def inform_cluster_of_shutdown():
|
||||||
"""
|
|
||||||
Clean system shutdown that marks the current instance offline.
|
|
||||||
In legacy mode, it also reaps waiting jobs.
|
|
||||||
In dispatcherd mode, it relies on dispatcherd's built-in cleanup.
|
|
||||||
"""
|
|
||||||
try:
|
try:
|
||||||
inst = Instance.objects.get(hostname=settings.CLUSTER_HOST_ID)
|
this_inst = Instance.objects.get(hostname=settings.CLUSTER_HOST_ID)
|
||||||
inst.mark_offline(update_last_seen=True, errors=_('Instance received normal shutdown signal'))
|
this_inst.mark_offline(update_last_seen=True, errors=_('Instance received normal shutdown signal'))
|
||||||
except Instance.DoesNotExist:
|
|
||||||
logger.exception("Cluster host not found: %s", settings.CLUSTER_HOST_ID)
|
|
||||||
return
|
|
||||||
|
|
||||||
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'):
|
|
||||||
logger.debug("Dispatcherd mode: no extra reaping required for instance %s", inst.hostname)
|
|
||||||
else:
|
|
||||||
try:
|
try:
|
||||||
logger.debug("Legacy mode: reaping waiting jobs for instance %s", inst.hostname)
|
reaper.reap_waiting(this_inst, grace_period=0)
|
||||||
reaper.reap_waiting(inst, grace_period=0)
|
|
||||||
except Exception:
|
except Exception:
|
||||||
logger.exception("Failed to reap waiting jobs for %s", inst.hostname)
|
logger.exception('failed to reap waiting jobs for {}'.format(this_inst.hostname))
|
||||||
logger.warning("Normal shutdown processed for instance %s; instance removed from capacity pool.", inst.hostname)
|
logger.warning('Normal shutdown signal for instance {}, removed self from capacity pool.'.format(this_inst.hostname))
|
||||||
|
except Exception:
|
||||||
|
logger.exception('Encountered problem with normal shutdown signal.')
|
||||||
|
|
||||||
|
|
||||||
@task_awx(queue=get_task_queuename)
|
@task(queue=get_task_queuename)
|
||||||
def migrate_jsonfield(table, pkfield, columns):
|
def migrate_jsonfield(table, pkfield, columns):
|
||||||
batchsize = 10000
|
batchsize = 10000
|
||||||
with advisory_lock(f'json_migration_{table}', wait=False) as acquired:
|
with advisory_lock(f'json_migration_{table}', wait=False) as acquired:
|
||||||
@@ -230,7 +184,7 @@ def migrate_jsonfield(table, pkfield, columns):
|
|||||||
logger.warning(f"Migration of {table} to jsonb is finished.")
|
logger.warning(f"Migration of {table} to jsonb is finished.")
|
||||||
|
|
||||||
|
|
||||||
@task_awx(queue=get_task_queuename)
|
@task(queue=get_task_queuename)
|
||||||
def apply_cluster_membership_policies():
|
def apply_cluster_membership_policies():
|
||||||
from awx.main.signals import disable_activity_stream
|
from awx.main.signals import disable_activity_stream
|
||||||
|
|
||||||
@@ -342,7 +296,7 @@ def apply_cluster_membership_policies():
|
|||||||
logger.debug('Cluster policy computation finished in {} seconds'.format(time.time() - started_compute))
|
logger.debug('Cluster policy computation finished in {} seconds'.format(time.time() - started_compute))
|
||||||
|
|
||||||
|
|
||||||
@task_awx(queue='tower_settings_change')
|
@task(queue='tower_settings_change')
|
||||||
def clear_setting_cache(setting_keys):
|
def clear_setting_cache(setting_keys):
|
||||||
# log that cache is being cleared
|
# log that cache is being cleared
|
||||||
logger.info(f"clear_setting_cache of keys {setting_keys}")
|
logger.info(f"clear_setting_cache of keys {setting_keys}")
|
||||||
@@ -355,7 +309,7 @@ def clear_setting_cache(setting_keys):
|
|||||||
cache.delete_many(cache_keys)
|
cache.delete_many(cache_keys)
|
||||||
|
|
||||||
|
|
||||||
@task_awx(queue='tower_broadcast_all')
|
@task(queue='tower_broadcast_all')
|
||||||
def delete_project_files(project_path):
|
def delete_project_files(project_path):
|
||||||
# TODO: possibly implement some retry logic
|
# TODO: possibly implement some retry logic
|
||||||
lock_file = project_path + '.lock'
|
lock_file = project_path + '.lock'
|
||||||
@@ -373,7 +327,7 @@ def delete_project_files(project_path):
|
|||||||
logger.exception('Could not remove lock file {}'.format(lock_file))
|
logger.exception('Could not remove lock file {}'.format(lock_file))
|
||||||
|
|
||||||
|
|
||||||
@task_awx(queue='tower_broadcast_all')
|
@task(queue='tower_broadcast_all')
|
||||||
def profile_sql(threshold=1, minutes=1):
|
def profile_sql(threshold=1, minutes=1):
|
||||||
if threshold <= 0:
|
if threshold <= 0:
|
||||||
cache.delete('awx-profile-sql-threshold')
|
cache.delete('awx-profile-sql-threshold')
|
||||||
@@ -383,7 +337,7 @@ def profile_sql(threshold=1, minutes=1):
|
|||||||
logger.error('SQL QUERIES >={}s ENABLED FOR {} MINUTE(S)'.format(threshold, minutes))
|
logger.error('SQL QUERIES >={}s ENABLED FOR {} MINUTE(S)'.format(threshold, minutes))
|
||||||
|
|
||||||
|
|
||||||
@task_awx(queue=get_task_queuename)
|
@task(queue=get_task_queuename)
|
||||||
def send_notifications(notification_list, job_id=None):
|
def send_notifications(notification_list, job_id=None):
|
||||||
if not isinstance(notification_list, list):
|
if not isinstance(notification_list, list):
|
||||||
raise TypeError("notification_list should be of type list")
|
raise TypeError("notification_list should be of type list")
|
||||||
@@ -428,13 +382,13 @@ def events_processed_hook(unified_job):
|
|||||||
save_indirect_host_entries.delay(unified_job.id)
|
save_indirect_host_entries.delay(unified_job.id)
|
||||||
|
|
||||||
|
|
||||||
@task_awx(queue=get_task_queuename)
|
@task(queue=get_task_queuename)
|
||||||
def gather_analytics():
|
def gather_analytics():
|
||||||
if is_run_threshold_reached(getattr(settings, 'AUTOMATION_ANALYTICS_LAST_GATHER', None), settings.AUTOMATION_ANALYTICS_GATHER_INTERVAL):
|
if is_run_threshold_reached(getattr(settings, 'AUTOMATION_ANALYTICS_LAST_GATHER', None), settings.AUTOMATION_ANALYTICS_GATHER_INTERVAL):
|
||||||
analytics.gather()
|
analytics.gather()
|
||||||
|
|
||||||
|
|
||||||
@task_awx(queue=get_task_queuename)
|
@task(queue=get_task_queuename)
|
||||||
def purge_old_stdout_files():
|
def purge_old_stdout_files():
|
||||||
nowtime = time.time()
|
nowtime = time.time()
|
||||||
for f in os.listdir(settings.JOBOUTPUT_ROOT):
|
for f in os.listdir(settings.JOBOUTPUT_ROOT):
|
||||||
@@ -496,18 +450,18 @@ class CleanupImagesAndFiles:
|
|||||||
cls.run_remote(this_inst, **kwargs)
|
cls.run_remote(this_inst, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
@task_awx(queue='tower_broadcast_all')
|
@task(queue='tower_broadcast_all')
|
||||||
def handle_removed_image(remove_images=None):
|
def handle_removed_image(remove_images=None):
|
||||||
"""Special broadcast invocation of this method to handle case of deleted EE"""
|
"""Special broadcast invocation of this method to handle case of deleted EE"""
|
||||||
CleanupImagesAndFiles.run(remove_images=remove_images, file_pattern='')
|
CleanupImagesAndFiles.run(remove_images=remove_images, file_pattern='')
|
||||||
|
|
||||||
|
|
||||||
@task_awx(queue=get_task_queuename)
|
@task(queue=get_task_queuename)
|
||||||
def cleanup_images_and_files():
|
def cleanup_images_and_files():
|
||||||
CleanupImagesAndFiles.run(image_prune=True)
|
CleanupImagesAndFiles.run(image_prune=True)
|
||||||
|
|
||||||
|
|
||||||
@task_awx(queue=get_task_queuename)
|
@task(queue=get_task_queuename)
|
||||||
def cluster_node_health_check(node):
|
def cluster_node_health_check(node):
|
||||||
"""
|
"""
|
||||||
Used for the health check endpoint, refreshes the status of the instance, but must be ran on target node
|
Used for the health check endpoint, refreshes the status of the instance, but must be ran on target node
|
||||||
@@ -526,7 +480,7 @@ def cluster_node_health_check(node):
|
|||||||
this_inst.local_health_check()
|
this_inst.local_health_check()
|
||||||
|
|
||||||
|
|
||||||
@task_awx(queue=get_task_queuename)
|
@task(queue=get_task_queuename)
|
||||||
def execution_node_health_check(node):
|
def execution_node_health_check(node):
|
||||||
if node == '':
|
if node == '':
|
||||||
logger.warning('Remote health check incorrectly called with blank string')
|
logger.warning('Remote health check incorrectly called with blank string')
|
||||||
@@ -594,16 +548,8 @@ def inspect_established_receptor_connections(mesh_status):
|
|||||||
def inspect_execution_and_hop_nodes(instance_list):
|
def inspect_execution_and_hop_nodes(instance_list):
|
||||||
with advisory_lock('inspect_execution_and_hop_nodes_lock', wait=False):
|
with advisory_lock('inspect_execution_and_hop_nodes_lock', wait=False):
|
||||||
node_lookup = {inst.hostname: inst for inst in instance_list}
|
node_lookup = {inst.hostname: inst for inst in instance_list}
|
||||||
try:
|
ctl = get_receptor_ctl()
|
||||||
ctl = get_receptor_ctl()
|
mesh_status = ctl.simple_command('status')
|
||||||
except FileNotFoundError:
|
|
||||||
logger.error('Receptor daemon not running, skipping execution node check')
|
|
||||||
return
|
|
||||||
try:
|
|
||||||
mesh_status = ctl.simple_command('status')
|
|
||||||
except ValueError as exc:
|
|
||||||
logger.error(f'Error running receptorctl status command, error: {str(exc)}')
|
|
||||||
return
|
|
||||||
|
|
||||||
inspect_established_receptor_connections(mesh_status)
|
inspect_established_receptor_connections(mesh_status)
|
||||||
|
|
||||||
@@ -651,109 +597,8 @@ def inspect_execution_and_hop_nodes(instance_list):
|
|||||||
execution_node_health_check.apply_async([hostname])
|
execution_node_health_check.apply_async([hostname])
|
||||||
|
|
||||||
|
|
||||||
@task_awx(queue=get_task_queuename, bind_kwargs=['dispatch_time', 'worker_tasks'])
|
@task(queue=get_task_queuename, bind_kwargs=['dispatch_time', 'worker_tasks'])
|
||||||
def cluster_node_heartbeat(dispatch_time=None, worker_tasks=None):
|
def cluster_node_heartbeat(dispatch_time=None, worker_tasks=None):
|
||||||
"""
|
|
||||||
Original implementation for AWX dispatcher.
|
|
||||||
Uses worker_tasks from bind_kwargs to track running tasks.
|
|
||||||
"""
|
|
||||||
# Run common instance management logic
|
|
||||||
this_inst, instance_list, lost_instances = _heartbeat_instance_management()
|
|
||||||
if this_inst is None:
|
|
||||||
return # Early return case from instance management
|
|
||||||
|
|
||||||
# Check versions
|
|
||||||
_heartbeat_check_versions(this_inst, instance_list)
|
|
||||||
|
|
||||||
# Handle lost instances
|
|
||||||
_heartbeat_handle_lost_instances(lost_instances, this_inst)
|
|
||||||
|
|
||||||
# Run local reaper - original implementation using worker_tasks
|
|
||||||
if worker_tasks is not None:
|
|
||||||
active_task_ids = []
|
|
||||||
for task_list in worker_tasks.values():
|
|
||||||
active_task_ids.extend(task_list)
|
|
||||||
|
|
||||||
# Convert dispatch_time to datetime
|
|
||||||
ref_time = datetime.fromisoformat(dispatch_time) if dispatch_time else now()
|
|
||||||
|
|
||||||
reaper.reap(instance=this_inst, excluded_uuids=active_task_ids, ref_time=ref_time)
|
|
||||||
|
|
||||||
if max(len(task_list) for task_list in worker_tasks.values()) <= 1:
|
|
||||||
reaper.reap_waiting(instance=this_inst, excluded_uuids=active_task_ids, ref_time=ref_time)
|
|
||||||
|
|
||||||
|
|
||||||
@task(queue=get_task_queuename, bind=True)
|
|
||||||
def adispatch_cluster_node_heartbeat(binder):
|
|
||||||
"""
|
|
||||||
Dispatcherd implementation.
|
|
||||||
Uses Control API to get running tasks.
|
|
||||||
"""
|
|
||||||
# Run common instance management logic
|
|
||||||
this_inst, instance_list, lost_instances = _heartbeat_instance_management()
|
|
||||||
if this_inst is None:
|
|
||||||
return # Early return case from instance management
|
|
||||||
|
|
||||||
# Check versions
|
|
||||||
_heartbeat_check_versions(this_inst, instance_list)
|
|
||||||
|
|
||||||
# Handle lost instances
|
|
||||||
_heartbeat_handle_lost_instances(lost_instances, this_inst)
|
|
||||||
|
|
||||||
# Get running tasks using dispatcherd API
|
|
||||||
active_task_ids = _get_active_task_ids_from_dispatcherd(binder)
|
|
||||||
if active_task_ids is None:
|
|
||||||
logger.warning("No active task IDs retrieved from dispatcherd, skipping reaper")
|
|
||||||
return # Failed to get task IDs, don't attempt reaping
|
|
||||||
|
|
||||||
# Run local reaper using tasks from dispatcherd
|
|
||||||
ref_time = now() # No dispatch_time in dispatcherd version
|
|
||||||
logger.debug(f"Running reaper with {len(active_task_ids)} excluded UUIDs")
|
|
||||||
reaper.reap(instance=this_inst, excluded_uuids=active_task_ids, ref_time=ref_time)
|
|
||||||
# If waiting jobs are hanging out, resubmit them
|
|
||||||
if UnifiedJob.objects.filter(controller_node=settings.CLUSTER_HOST_ID, status='waiting').exists():
|
|
||||||
from awx.main.tasks.jobs import dispatch_waiting_jobs
|
|
||||||
|
|
||||||
dispatch_waiting_jobs.apply_async(queue=get_task_queuename())
|
|
||||||
|
|
||||||
|
|
||||||
def _get_active_task_ids_from_dispatcherd(binder):
|
|
||||||
"""
|
|
||||||
Retrieve active task IDs from the dispatcherd control API.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
list: List of active task UUIDs
|
|
||||||
None: If there was an error retrieving the data
|
|
||||||
"""
|
|
||||||
active_task_ids = []
|
|
||||||
try:
|
|
||||||
|
|
||||||
logger.debug("Querying dispatcherd API for running tasks")
|
|
||||||
data = binder.control('running')
|
|
||||||
|
|
||||||
# Extract UUIDs from the running data
|
|
||||||
# Process running data: first item is a dict with node_id and task entries
|
|
||||||
data.pop('node_id', None)
|
|
||||||
|
|
||||||
# Extract task UUIDs from data structure
|
|
||||||
for task_key, task_value in data.items():
|
|
||||||
if isinstance(task_value, dict) and 'uuid' in task_value:
|
|
||||||
active_task_ids.append(task_value['uuid'])
|
|
||||||
logger.debug(f"Found active task with UUID: {task_value['uuid']}")
|
|
||||||
elif isinstance(task_key, str):
|
|
||||||
# Handle case where UUID might be the key
|
|
||||||
active_task_ids.append(task_key)
|
|
||||||
logger.debug(f"Found active task with key: {task_key}")
|
|
||||||
|
|
||||||
logger.debug(f"Retrieved {len(active_task_ids)} active task IDs from dispatcherd")
|
|
||||||
return active_task_ids
|
|
||||||
except Exception:
|
|
||||||
logger.exception("Failed to get running tasks from dispatcherd")
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def _heartbeat_instance_management():
|
|
||||||
"""Common logic for heartbeat instance management."""
|
|
||||||
logger.debug("Cluster node heartbeat task.")
|
logger.debug("Cluster node heartbeat task.")
|
||||||
nowtime = now()
|
nowtime = now()
|
||||||
instance_list = list(Instance.objects.filter(node_state__in=(Instance.States.READY, Instance.States.UNAVAILABLE, Instance.States.INSTALLED)))
|
instance_list = list(Instance.objects.filter(node_state__in=(Instance.States.READY, Instance.States.UNAVAILABLE, Instance.States.INSTALLED)))
|
||||||
@@ -780,7 +625,7 @@ def _heartbeat_instance_management():
|
|||||||
this_inst.local_health_check()
|
this_inst.local_health_check()
|
||||||
if startup_event and this_inst.capacity != 0:
|
if startup_event and this_inst.capacity != 0:
|
||||||
logger.warning(f'Rejoining the cluster as instance {this_inst.hostname}. Prior last_seen {last_last_seen}')
|
logger.warning(f'Rejoining the cluster as instance {this_inst.hostname}. Prior last_seen {last_last_seen}')
|
||||||
return None, None, None # Early return case
|
return
|
||||||
elif not last_last_seen:
|
elif not last_last_seen:
|
||||||
logger.warning(f'Instance does not have recorded last_seen, updating to {nowtime}')
|
logger.warning(f'Instance does not have recorded last_seen, updating to {nowtime}')
|
||||||
elif (nowtime - last_last_seen) > timedelta(seconds=settings.CLUSTER_NODE_HEARTBEAT_PERIOD + 2):
|
elif (nowtime - last_last_seen) > timedelta(seconds=settings.CLUSTER_NODE_HEARTBEAT_PERIOD + 2):
|
||||||
@@ -792,14 +637,8 @@ def _heartbeat_instance_management():
|
|||||||
logger.warning(f'Recreated instance record {this_inst.hostname} after unexpected removal')
|
logger.warning(f'Recreated instance record {this_inst.hostname} after unexpected removal')
|
||||||
this_inst.local_health_check()
|
this_inst.local_health_check()
|
||||||
else:
|
else:
|
||||||
logger.error("Cluster Host Not Found: {}".format(settings.CLUSTER_HOST_ID))
|
raise RuntimeError("Cluster Host Not Found: {}".format(settings.CLUSTER_HOST_ID))
|
||||||
return None, None, None
|
# IFF any node has a greater version than we do, then we'll shutdown services
|
||||||
|
|
||||||
return this_inst, instance_list, lost_instances
|
|
||||||
|
|
||||||
|
|
||||||
def _heartbeat_check_versions(this_inst, instance_list):
|
|
||||||
"""Check versions across instances and determine if shutdown is needed."""
|
|
||||||
for other_inst in instance_list:
|
for other_inst in instance_list:
|
||||||
if other_inst.node_type in ('execution', 'hop'):
|
if other_inst.node_type in ('execution', 'hop'):
|
||||||
continue
|
continue
|
||||||
@@ -816,9 +655,6 @@ def _heartbeat_check_versions(this_inst, instance_list):
|
|||||||
stop_local_services(communicate=False)
|
stop_local_services(communicate=False)
|
||||||
raise RuntimeError("Shutting down.")
|
raise RuntimeError("Shutting down.")
|
||||||
|
|
||||||
|
|
||||||
def _heartbeat_handle_lost_instances(lost_instances, this_inst):
|
|
||||||
"""Handle lost instances by reaping their jobs and marking them offline."""
|
|
||||||
for other_inst in lost_instances:
|
for other_inst in lost_instances:
|
||||||
try:
|
try:
|
||||||
explanation = "Job reaped due to instance shutdown"
|
explanation = "Job reaped due to instance shutdown"
|
||||||
@@ -849,8 +685,17 @@ def _heartbeat_handle_lost_instances(lost_instances, this_inst):
|
|||||||
else:
|
else:
|
||||||
logger.exception('No SQL state available. Error marking {} as lost'.format(other_inst.hostname))
|
logger.exception('No SQL state available. Error marking {} as lost'.format(other_inst.hostname))
|
||||||
|
|
||||||
|
# Run local reaper
|
||||||
|
if worker_tasks is not None:
|
||||||
|
active_task_ids = []
|
||||||
|
for task_list in worker_tasks.values():
|
||||||
|
active_task_ids.extend(task_list)
|
||||||
|
reaper.reap(instance=this_inst, excluded_uuids=active_task_ids, ref_time=datetime.fromisoformat(dispatch_time))
|
||||||
|
if max(len(task_list) for task_list in worker_tasks.values()) <= 1:
|
||||||
|
reaper.reap_waiting(instance=this_inst, excluded_uuids=active_task_ids, ref_time=datetime.fromisoformat(dispatch_time))
|
||||||
|
|
||||||
@task_awx(queue=get_task_queuename)
|
|
||||||
|
@task(queue=get_task_queuename)
|
||||||
def awx_receptor_workunit_reaper():
|
def awx_receptor_workunit_reaper():
|
||||||
"""
|
"""
|
||||||
When an AWX job is launched via receptor, files such as status, stdin, and stdout are created
|
When an AWX job is launched via receptor, files such as status, stdin, and stdout are created
|
||||||
@@ -873,16 +718,8 @@ def awx_receptor_workunit_reaper():
|
|||||||
if not settings.RECEPTOR_RELEASE_WORK:
|
if not settings.RECEPTOR_RELEASE_WORK:
|
||||||
return
|
return
|
||||||
logger.debug("Checking for unreleased receptor work units")
|
logger.debug("Checking for unreleased receptor work units")
|
||||||
try:
|
receptor_ctl = get_receptor_ctl()
|
||||||
receptor_ctl = get_receptor_ctl()
|
receptor_work_list = receptor_ctl.simple_command("work list")
|
||||||
except FileNotFoundError:
|
|
||||||
logger.info('Receptorctl sockfile not found for workunit reaper, doing nothing')
|
|
||||||
return
|
|
||||||
try:
|
|
||||||
receptor_work_list = receptor_ctl.simple_command("work list")
|
|
||||||
except ValueError as exc:
|
|
||||||
logger.info(f'Error getting work list for workunit reaper, error: {str(exc)}')
|
|
||||||
return
|
|
||||||
|
|
||||||
unit_ids = [id for id in receptor_work_list]
|
unit_ids = [id for id in receptor_work_list]
|
||||||
jobs_with_unreleased_receptor_units = UnifiedJob.objects.filter(work_unit_id__in=unit_ids).exclude(status__in=ACTIVE_STATES)
|
jobs_with_unreleased_receptor_units = UnifiedJob.objects.filter(work_unit_id__in=unit_ids).exclude(status__in=ACTIVE_STATES)
|
||||||
@@ -896,7 +733,7 @@ def awx_receptor_workunit_reaper():
|
|||||||
administrative_workunit_reaper(receptor_work_list)
|
administrative_workunit_reaper(receptor_work_list)
|
||||||
|
|
||||||
|
|
||||||
@task_awx(queue=get_task_queuename)
|
@task(queue=get_task_queuename)
|
||||||
def awx_k8s_reaper():
|
def awx_k8s_reaper():
|
||||||
if not settings.RECEPTOR_RELEASE_WORK:
|
if not settings.RECEPTOR_RELEASE_WORK:
|
||||||
return
|
return
|
||||||
@@ -919,7 +756,7 @@ def awx_k8s_reaper():
|
|||||||
logger.exception("Failed to delete orphaned pod {} from {}".format(job.log_format, group))
|
logger.exception("Failed to delete orphaned pod {} from {}".format(job.log_format, group))
|
||||||
|
|
||||||
|
|
||||||
@task_awx(queue=get_task_queuename)
|
@task(queue=get_task_queuename)
|
||||||
def awx_periodic_scheduler():
|
def awx_periodic_scheduler():
|
||||||
lock_session_timeout_milliseconds = settings.TASK_MANAGER_LOCK_TIMEOUT * 1000
|
lock_session_timeout_milliseconds = settings.TASK_MANAGER_LOCK_TIMEOUT * 1000
|
||||||
with advisory_lock('awx_periodic_scheduler_lock', lock_session_timeout_milliseconds=lock_session_timeout_milliseconds, wait=False) as acquired:
|
with advisory_lock('awx_periodic_scheduler_lock', lock_session_timeout_milliseconds=lock_session_timeout_milliseconds, wait=False) as acquired:
|
||||||
@@ -978,7 +815,7 @@ def awx_periodic_scheduler():
|
|||||||
emit_channel_notification('schedules-changed', dict(id=schedule.id, group_name="schedules"))
|
emit_channel_notification('schedules-changed', dict(id=schedule.id, group_name="schedules"))
|
||||||
|
|
||||||
|
|
||||||
@task_awx(queue=get_task_queuename)
|
@task(queue=get_task_queuename)
|
||||||
def handle_failure_notifications(task_ids):
|
def handle_failure_notifications(task_ids):
|
||||||
"""A task-ified version of the method that sends notifications."""
|
"""A task-ified version of the method that sends notifications."""
|
||||||
found_task_ids = set()
|
found_task_ids = set()
|
||||||
@@ -993,7 +830,7 @@ def handle_failure_notifications(task_ids):
|
|||||||
logger.warning(f'Could not send notifications for {deleted_tasks} because they were not found in the database')
|
logger.warning(f'Could not send notifications for {deleted_tasks} because they were not found in the database')
|
||||||
|
|
||||||
|
|
||||||
@task_awx(queue=get_task_queuename)
|
@task(queue=get_task_queuename)
|
||||||
def update_inventory_computed_fields(inventory_id):
|
def update_inventory_computed_fields(inventory_id):
|
||||||
"""
|
"""
|
||||||
Signal handler and wrapper around inventory.update_computed_fields to
|
Signal handler and wrapper around inventory.update_computed_fields to
|
||||||
@@ -1043,7 +880,7 @@ def update_smart_memberships_for_inventory(smart_inventory):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
@task_awx(queue=get_task_queuename)
|
@task(queue=get_task_queuename)
|
||||||
def update_host_smart_inventory_memberships():
|
def update_host_smart_inventory_memberships():
|
||||||
smart_inventories = Inventory.objects.filter(kind='smart', host_filter__isnull=False, pending_deletion=False)
|
smart_inventories = Inventory.objects.filter(kind='smart', host_filter__isnull=False, pending_deletion=False)
|
||||||
changed_inventories = set([])
|
changed_inventories = set([])
|
||||||
@@ -1059,7 +896,7 @@ def update_host_smart_inventory_memberships():
|
|||||||
smart_inventory.update_computed_fields()
|
smart_inventory.update_computed_fields()
|
||||||
|
|
||||||
|
|
||||||
@task_awx(queue=get_task_queuename)
|
@task(queue=get_task_queuename)
|
||||||
def delete_inventory(inventory_id, user_id, retries=5):
|
def delete_inventory(inventory_id, user_id, retries=5):
|
||||||
# Delete inventory as user
|
# Delete inventory as user
|
||||||
if user_id is None:
|
if user_id is None:
|
||||||
@@ -1121,7 +958,7 @@ def _reconstruct_relationships(copy_mapping):
|
|||||||
new_obj.save()
|
new_obj.save()
|
||||||
|
|
||||||
|
|
||||||
@task_awx(queue=get_task_queuename)
|
@task(queue=get_task_queuename)
|
||||||
def deep_copy_model_obj(model_module, model_name, obj_pk, new_obj_pk, user_pk, permission_check_func=None):
|
def deep_copy_model_obj(model_module, model_name, obj_pk, new_obj_pk, user_pk, permission_check_func=None):
|
||||||
logger.debug('Deep copy {} from {} to {}.'.format(model_name, obj_pk, new_obj_pk))
|
logger.debug('Deep copy {} from {} to {}.'.format(model_name, obj_pk, new_obj_pk))
|
||||||
|
|
||||||
@@ -1176,7 +1013,7 @@ def deep_copy_model_obj(model_module, model_name, obj_pk, new_obj_pk, user_pk, p
|
|||||||
update_inventory_computed_fields.delay(new_obj.id)
|
update_inventory_computed_fields.delay(new_obj.id)
|
||||||
|
|
||||||
|
|
||||||
@task_awx(queue=get_task_queuename)
|
@task(queue=get_task_queuename)
|
||||||
def periodic_resource_sync():
|
def periodic_resource_sync():
|
||||||
if not getattr(settings, 'RESOURCE_SERVER', None):
|
if not getattr(settings, 'RESOURCE_SERVER', None):
|
||||||
logger.debug("Skipping periodic resource_sync, RESOURCE_SERVER not configured")
|
logger.debug("Skipping periodic resource_sync, RESOURCE_SERVER not configured")
|
||||||
|
|||||||
@@ -8,12 +8,5 @@
|
|||||||
"CONTROLLER_PASSWORD": "fooo",
|
"CONTROLLER_PASSWORD": "fooo",
|
||||||
"CONTROLLER_USERNAME": "fooo",
|
"CONTROLLER_USERNAME": "fooo",
|
||||||
"CONTROLLER_OAUTH_TOKEN": "",
|
"CONTROLLER_OAUTH_TOKEN": "",
|
||||||
"CONTROLLER_VERIFY_SSL": "False",
|
"CONTROLLER_VERIFY_SSL": "False"
|
||||||
"AAP_HOSTNAME": "https://foo.invalid",
|
|
||||||
"AAP_PASSWORD": "fooo",
|
|
||||||
"AAP_USERNAME": "fooo",
|
|
||||||
"AAP_VALIDATE_CERTS": "False",
|
|
||||||
"CONTROLLER_REQUEST_TIMEOUT": "fooo",
|
|
||||||
"AAP_REQUEST_TIMEOUT": "fooo",
|
|
||||||
"AAP_TOKEN": ""
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,9 +0,0 @@
|
|||||||
---
|
|
||||||
- hosts: all
|
|
||||||
gather_facts: false
|
|
||||||
connection: local
|
|
||||||
vars:
|
|
||||||
sleep_interval: 5
|
|
||||||
tasks:
|
|
||||||
- name: sleep for a specified interval
|
|
||||||
command: sleep '{{ sleep_interval }}'
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
[all:vars]
|
|
||||||
a=value_a
|
|
||||||
b=value_b
|
|
||||||
@@ -1,57 +0,0 @@
|
|||||||
import time
|
|
||||||
import logging
|
|
||||||
|
|
||||||
from dispatcherd.publish import task
|
|
||||||
|
|
||||||
from django.db import connection
|
|
||||||
|
|
||||||
from awx.main.dispatch import get_task_queuename
|
|
||||||
from awx.main.dispatch.publish import task as old_task
|
|
||||||
|
|
||||||
from ansible_base.lib.utils.db import advisory_lock
|
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
@old_task(queue=get_task_queuename)
|
|
||||||
def sleep_task(seconds=10, log=False):
|
|
||||||
if log:
|
|
||||||
logger.info('starting sleep_task')
|
|
||||||
time.sleep(seconds)
|
|
||||||
if log:
|
|
||||||
logger.info('finished sleep_task')
|
|
||||||
|
|
||||||
|
|
||||||
@task()
|
|
||||||
def sleep_break_connection(seconds=0.2):
|
|
||||||
"""
|
|
||||||
Interact with the database in an intentionally breaking way.
|
|
||||||
After this finishes, queries made by this connection are expected to error
|
|
||||||
with "the connection is closed"
|
|
||||||
This is obviously a problem for any task that comes afterwards.
|
|
||||||
So this is used to break things so that the fixes may be demonstrated.
|
|
||||||
"""
|
|
||||||
with connection.cursor() as cursor:
|
|
||||||
cursor.execute(f"SET idle_session_timeout = '{seconds / 2}s';")
|
|
||||||
|
|
||||||
logger.info(f'sleeping for {seconds}s > {seconds / 2}s session timeout')
|
|
||||||
time.sleep(seconds)
|
|
||||||
|
|
||||||
for i in range(1, 3):
|
|
||||||
logger.info(f'\nRunning query number {i}')
|
|
||||||
try:
|
|
||||||
with connection.cursor() as cursor:
|
|
||||||
cursor.execute("SELECT 1;")
|
|
||||||
logger.info(' query worked, not expected')
|
|
||||||
except Exception as exc:
|
|
||||||
logger.info(f' query errored as expected\ntype: {type(exc)}\nstr: {str(exc)}')
|
|
||||||
|
|
||||||
logger.info(f'Connection present: {bool(connection.connection)}, reports closed: {getattr(connection.connection, "closed", "not_found")}')
|
|
||||||
|
|
||||||
|
|
||||||
@task()
|
|
||||||
def advisory_lock_exception():
|
|
||||||
time.sleep(0.2) # so it can fill up all the workers... hacky for now
|
|
||||||
with advisory_lock('advisory_lock_exception', lock_session_timeout_milliseconds=20):
|
|
||||||
raise RuntimeError('this is an intentional error')
|
|
||||||
@@ -87,8 +87,8 @@ def mock_analytic_post():
|
|||||||
{
|
{
|
||||||
'REDHAT_USERNAME': 'redhat_user',
|
'REDHAT_USERNAME': 'redhat_user',
|
||||||
'REDHAT_PASSWORD': 'redhat_pass', # NOSONAR
|
'REDHAT_PASSWORD': 'redhat_pass', # NOSONAR
|
||||||
'SUBSCRIPTIONS_CLIENT_ID': '',
|
'SUBSCRIPTIONS_USERNAME': '',
|
||||||
'SUBSCRIPTIONS_CLIENT_SECRET': '',
|
'SUBSCRIPTIONS_PASSWORD': '',
|
||||||
},
|
},
|
||||||
True,
|
True,
|
||||||
('redhat_user', 'redhat_pass'),
|
('redhat_user', 'redhat_pass'),
|
||||||
@@ -98,8 +98,8 @@ def mock_analytic_post():
|
|||||||
{
|
{
|
||||||
'REDHAT_USERNAME': None,
|
'REDHAT_USERNAME': None,
|
||||||
'REDHAT_PASSWORD': None,
|
'REDHAT_PASSWORD': None,
|
||||||
'SUBSCRIPTIONS_CLIENT_ID': 'subs_user',
|
'SUBSCRIPTIONS_USERNAME': 'subs_user',
|
||||||
'SUBSCRIPTIONS_CLIENT_SECRET': 'subs_pass', # NOSONAR
|
'SUBSCRIPTIONS_PASSWORD': 'subs_pass', # NOSONAR
|
||||||
},
|
},
|
||||||
True,
|
True,
|
||||||
('subs_user', 'subs_pass'),
|
('subs_user', 'subs_pass'),
|
||||||
@@ -109,8 +109,8 @@ def mock_analytic_post():
|
|||||||
{
|
{
|
||||||
'REDHAT_USERNAME': '',
|
'REDHAT_USERNAME': '',
|
||||||
'REDHAT_PASSWORD': '',
|
'REDHAT_PASSWORD': '',
|
||||||
'SUBSCRIPTIONS_CLIENT_ID': 'subs_user',
|
'SUBSCRIPTIONS_USERNAME': 'subs_user',
|
||||||
'SUBSCRIPTIONS_CLIENT_SECRET': 'subs_pass', # NOSONAR
|
'SUBSCRIPTIONS_PASSWORD': 'subs_pass', # NOSONAR
|
||||||
},
|
},
|
||||||
True,
|
True,
|
||||||
('subs_user', 'subs_pass'),
|
('subs_user', 'subs_pass'),
|
||||||
@@ -120,8 +120,8 @@ def mock_analytic_post():
|
|||||||
{
|
{
|
||||||
'REDHAT_USERNAME': '',
|
'REDHAT_USERNAME': '',
|
||||||
'REDHAT_PASSWORD': '',
|
'REDHAT_PASSWORD': '',
|
||||||
'SUBSCRIPTIONS_CLIENT_ID': '',
|
'SUBSCRIPTIONS_USERNAME': '',
|
||||||
'SUBSCRIPTIONS_CLIENT_SECRET': '',
|
'SUBSCRIPTIONS_PASSWORD': '',
|
||||||
},
|
},
|
||||||
False,
|
False,
|
||||||
None, # No request should be made
|
None, # No request should be made
|
||||||
@@ -131,8 +131,8 @@ def mock_analytic_post():
|
|||||||
{
|
{
|
||||||
'REDHAT_USERNAME': '',
|
'REDHAT_USERNAME': '',
|
||||||
'REDHAT_PASSWORD': 'redhat_pass', # NOSONAR
|
'REDHAT_PASSWORD': 'redhat_pass', # NOSONAR
|
||||||
'SUBSCRIPTIONS_CLIENT_ID': 'subs_user',
|
'SUBSCRIPTIONS_USERNAME': 'subs_user',
|
||||||
'SUBSCRIPTIONS_CLIENT_SECRET': '',
|
'SUBSCRIPTIONS_PASSWORD': '',
|
||||||
},
|
},
|
||||||
False,
|
False,
|
||||||
None, # Invalid, no request should be made
|
None, # Invalid, no request should be made
|
||||||
@@ -150,24 +150,3 @@ def test_ship_credential(setting_map, expected_result, expected_auth, temp_analy
|
|||||||
assert mock_analytic_post.call_args[1]['auth'] == expected_auth
|
assert mock_analytic_post.call_args[1]['auth'] == expected_auth
|
||||||
else:
|
else:
|
||||||
mock_analytic_post.assert_not_called()
|
mock_analytic_post.assert_not_called()
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.django_db
|
|
||||||
def test_gather_cleanup_on_auth_failure(mock_valid_license, temp_analytic_tar):
|
|
||||||
settings.INSIGHTS_TRACKING_STATE = True
|
|
||||||
settings.AUTOMATION_ANALYTICS_URL = 'https://example.com/api'
|
|
||||||
settings.REDHAT_USERNAME = 'test_user'
|
|
||||||
settings.REDHAT_PASSWORD = 'test_password'
|
|
||||||
|
|
||||||
with tempfile.NamedTemporaryFile(delete=False, suffix='.tar.gz') as temp_file:
|
|
||||||
temp_file_path = temp_file.name
|
|
||||||
|
|
||||||
try:
|
|
||||||
with mock.patch('awx.main.analytics.core.ship', return_value=False):
|
|
||||||
with mock.patch('awx.main.analytics.core.package', return_value=temp_file_path):
|
|
||||||
gather(module=importlib.import_module(__name__), collection_type='scheduled')
|
|
||||||
|
|
||||||
assert not os.path.exists(temp_file_path), "Temp file was not cleaned up after ship failure"
|
|
||||||
finally:
|
|
||||||
if os.path.exists(temp_file_path):
|
|
||||||
os.remove(temp_file_path)
|
|
||||||
|
|||||||
@@ -30,7 +30,6 @@ EXPECTED_VALUES = {
|
|||||||
'awx_license_instance_free': 0,
|
'awx_license_instance_free': 0,
|
||||||
'awx_pending_jobs_total': 0,
|
'awx_pending_jobs_total': 0,
|
||||||
'awx_database_connections_total': 1,
|
'awx_database_connections_total': 1,
|
||||||
'awx_license_expiry': 0,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -97,8 +97,8 @@ class TestAnalyticsGenericView:
|
|||||||
'INSIGHTS_TRACKING_STATE': True,
|
'INSIGHTS_TRACKING_STATE': True,
|
||||||
'REDHAT_USERNAME': 'redhat_user',
|
'REDHAT_USERNAME': 'redhat_user',
|
||||||
'REDHAT_PASSWORD': 'redhat_pass', # NOSONAR
|
'REDHAT_PASSWORD': 'redhat_pass', # NOSONAR
|
||||||
'SUBSCRIPTIONS_CLIENT_ID': '',
|
'SUBSCRIPTIONS_USERNAME': '',
|
||||||
'SUBSCRIPTIONS_CLIENT_SECRET': '',
|
'SUBSCRIPTIONS_PASSWORD': '',
|
||||||
},
|
},
|
||||||
('redhat_user', 'redhat_pass'),
|
('redhat_user', 'redhat_pass'),
|
||||||
None,
|
None,
|
||||||
@@ -109,8 +109,8 @@ class TestAnalyticsGenericView:
|
|||||||
'INSIGHTS_TRACKING_STATE': True,
|
'INSIGHTS_TRACKING_STATE': True,
|
||||||
'REDHAT_USERNAME': '',
|
'REDHAT_USERNAME': '',
|
||||||
'REDHAT_PASSWORD': '',
|
'REDHAT_PASSWORD': '',
|
||||||
'SUBSCRIPTIONS_CLIENT_ID': 'subs_user',
|
'SUBSCRIPTIONS_USERNAME': 'subs_user',
|
||||||
'SUBSCRIPTIONS_CLIENT_SECRET': 'subs_pass', # NOSONAR
|
'SUBSCRIPTIONS_PASSWORD': 'subs_pass', # NOSONAR
|
||||||
},
|
},
|
||||||
('subs_user', 'subs_pass'),
|
('subs_user', 'subs_pass'),
|
||||||
None,
|
None,
|
||||||
@@ -121,8 +121,8 @@ class TestAnalyticsGenericView:
|
|||||||
'INSIGHTS_TRACKING_STATE': True,
|
'INSIGHTS_TRACKING_STATE': True,
|
||||||
'REDHAT_USERNAME': '',
|
'REDHAT_USERNAME': '',
|
||||||
'REDHAT_PASSWORD': '',
|
'REDHAT_PASSWORD': '',
|
||||||
'SUBSCRIPTIONS_CLIENT_ID': '',
|
'SUBSCRIPTIONS_USERNAME': '',
|
||||||
'SUBSCRIPTIONS_CLIENT_SECRET': '',
|
'SUBSCRIPTIONS_PASSWORD': '',
|
||||||
},
|
},
|
||||||
None,
|
None,
|
||||||
ERROR_MISSING_USER,
|
ERROR_MISSING_USER,
|
||||||
@@ -133,8 +133,8 @@ class TestAnalyticsGenericView:
|
|||||||
'INSIGHTS_TRACKING_STATE': True,
|
'INSIGHTS_TRACKING_STATE': True,
|
||||||
'REDHAT_USERNAME': 'redhat_user',
|
'REDHAT_USERNAME': 'redhat_user',
|
||||||
'REDHAT_PASSWORD': 'redhat_pass', # NOSONAR
|
'REDHAT_PASSWORD': 'redhat_pass', # NOSONAR
|
||||||
'SUBSCRIPTIONS_CLIENT_ID': 'subs_user',
|
'SUBSCRIPTIONS_USERNAME': 'subs_user',
|
||||||
'SUBSCRIPTIONS_CLIENT_SECRET': 'subs_pass', # NOSONAR
|
'SUBSCRIPTIONS_PASSWORD': 'subs_pass', # NOSONAR
|
||||||
},
|
},
|
||||||
('redhat_user', 'redhat_pass'),
|
('redhat_user', 'redhat_pass'),
|
||||||
None,
|
None,
|
||||||
@@ -145,8 +145,8 @@ class TestAnalyticsGenericView:
|
|||||||
'INSIGHTS_TRACKING_STATE': True,
|
'INSIGHTS_TRACKING_STATE': True,
|
||||||
'REDHAT_USERNAME': '',
|
'REDHAT_USERNAME': '',
|
||||||
'REDHAT_PASSWORD': '',
|
'REDHAT_PASSWORD': '',
|
||||||
'SUBSCRIPTIONS_CLIENT_ID': 'subs_user', # NOSONAR
|
'SUBSCRIPTIONS_USERNAME': 'subs_user', # NOSONAR
|
||||||
'SUBSCRIPTIONS_CLIENT_SECRET': '',
|
'SUBSCRIPTIONS_PASSWORD': '',
|
||||||
},
|
},
|
||||||
None,
|
None,
|
||||||
ERROR_MISSING_PASSWORD,
|
ERROR_MISSING_PASSWORD,
|
||||||
@@ -155,36 +155,26 @@ class TestAnalyticsGenericView:
|
|||||||
)
|
)
|
||||||
@pytest.mark.django_db
|
@pytest.mark.django_db
|
||||||
def test__send_to_analytics_credentials(self, settings_map, expected_auth, expected_error_keyword):
|
def test__send_to_analytics_credentials(self, settings_map, expected_auth, expected_error_keyword):
|
||||||
"""
|
|
||||||
Test _send_to_analytics with various combinations of credentials.
|
|
||||||
"""
|
|
||||||
with override_settings(**settings_map):
|
with override_settings(**settings_map):
|
||||||
request = RequestFactory().post('/some/path')
|
request = RequestFactory().post('/some/path')
|
||||||
view = AnalyticsGenericView()
|
view = AnalyticsGenericView()
|
||||||
|
|
||||||
if expected_auth:
|
if expected_auth:
|
||||||
with mock.patch('awx.api.views.analytics.OIDCClient') as mock_oidc_client:
|
with mock.patch('requests.request') as mock_request:
|
||||||
# Configure the mock OIDCClient instance and its make_request method
|
mock_request.return_value = mock.Mock(status_code=200)
|
||||||
mock_client_instance = mock.Mock()
|
|
||||||
mock_oidc_client.return_value = mock_client_instance
|
|
||||||
mock_client_instance.make_request.return_value = mock.Mock(status_code=200)
|
|
||||||
|
|
||||||
analytic_url = view._get_analytics_url(request.path)
|
analytic_url = view._get_analytics_url(request.path)
|
||||||
response = view._send_to_analytics(request, 'POST')
|
response = view._send_to_analytics(request, 'POST')
|
||||||
|
|
||||||
# Assertions
|
# Assertions
|
||||||
# Assert OIDCClient instantiation
|
mock_request.assert_called_once_with(
|
||||||
expected_client_id, expected_client_secret = expected_auth
|
|
||||||
mock_oidc_client.assert_called_once_with(expected_client_id, expected_client_secret)
|
|
||||||
|
|
||||||
# Assert make_request call
|
|
||||||
mock_client_instance.make_request.assert_called_once_with(
|
|
||||||
'POST',
|
'POST',
|
||||||
analytic_url,
|
analytic_url,
|
||||||
headers=mock.ANY,
|
auth=expected_auth,
|
||||||
verify=mock.ANY,
|
verify=mock.ANY,
|
||||||
params=mock.ANY,
|
headers=mock.ANY,
|
||||||
json=mock.ANY,
|
json=mock.ANY,
|
||||||
|
params=mock.ANY,
|
||||||
timeout=mock.ANY,
|
timeout=mock.ANY,
|
||||||
)
|
)
|
||||||
assert response.status_code == 200
|
assert response.status_code == 200
|
||||||
@@ -196,64 +186,3 @@ class TestAnalyticsGenericView:
|
|||||||
# mock_error_response.assert_called_once_with(expected_error_keyword, remote=False)
|
# mock_error_response.assert_called_once_with(expected_error_keyword, remote=False)
|
||||||
assert response.status_code == status.HTTP_403_FORBIDDEN
|
assert response.status_code == status.HTTP_403_FORBIDDEN
|
||||||
assert response.data['error']['keyword'] == expected_error_keyword
|
assert response.data['error']['keyword'] == expected_error_keyword
|
||||||
|
|
||||||
@pytest.mark.django_db
|
|
||||||
@pytest.mark.parametrize(
|
|
||||||
"settings_map, expected_auth",
|
|
||||||
[
|
|
||||||
# Test case 1: Username and password should be used for basic auth
|
|
||||||
(
|
|
||||||
{
|
|
||||||
'INSIGHTS_TRACKING_STATE': True,
|
|
||||||
'REDHAT_USERNAME': 'redhat_user',
|
|
||||||
'REDHAT_PASSWORD': 'redhat_pass', # NOSONAR
|
|
||||||
'SUBSCRIPTIONS_CLIENT_ID': '',
|
|
||||||
'SUBSCRIPTIONS_CLIENT_SECRET': '',
|
|
||||||
},
|
|
||||||
('redhat_user', 'redhat_pass'),
|
|
||||||
),
|
|
||||||
# Test case 2: Client ID and secret should be used for basic auth
|
|
||||||
(
|
|
||||||
{
|
|
||||||
'INSIGHTS_TRACKING_STATE': True,
|
|
||||||
'REDHAT_USERNAME': '',
|
|
||||||
'REDHAT_PASSWORD': '',
|
|
||||||
'SUBSCRIPTIONS_CLIENT_ID': 'subs_user',
|
|
||||||
'SUBSCRIPTIONS_CLIENT_SECRET': 'subs_pass', # NOSONAR
|
|
||||||
},
|
|
||||||
None,
|
|
||||||
),
|
|
||||||
],
|
|
||||||
)
|
|
||||||
def test__send_to_analytics_fallback_to_basic_auth(self, settings_map, expected_auth):
|
|
||||||
"""
|
|
||||||
Test _send_to_analytics with basic auth fallback.
|
|
||||||
"""
|
|
||||||
with override_settings(**settings_map):
|
|
||||||
request = RequestFactory().post('/some/path')
|
|
||||||
view = AnalyticsGenericView()
|
|
||||||
|
|
||||||
with mock.patch('awx.api.views.analytics.OIDCClient') as mock_oidc_client, mock.patch(
|
|
||||||
'awx.api.views.analytics.AnalyticsGenericView._base_auth_request'
|
|
||||||
) as mock_base_auth_request:
|
|
||||||
# Configure the mock OIDCClient instance and its make_request method
|
|
||||||
mock_client_instance = mock.Mock()
|
|
||||||
mock_oidc_client.return_value = mock_client_instance
|
|
||||||
mock_client_instance.make_request.side_effect = requests.RequestException("Incorrect credentials")
|
|
||||||
|
|
||||||
analytic_url = view._get_analytics_url(request.path)
|
|
||||||
view._send_to_analytics(request, 'POST')
|
|
||||||
|
|
||||||
if expected_auth:
|
|
||||||
# assert mock_base_auth_request called with expected_auth
|
|
||||||
mock_base_auth_request.assert_called_once_with(
|
|
||||||
request,
|
|
||||||
'POST',
|
|
||||||
analytic_url,
|
|
||||||
expected_auth[0],
|
|
||||||
expected_auth[1],
|
|
||||||
mock.ANY,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
# assert mock_base_auth_request not called
|
|
||||||
mock_base_auth_request.assert_not_called()
|
|
||||||
|
|||||||
@@ -287,72 +287,6 @@ def test_sa_grant_private_credential_to_team_through_role_teams(post, credential
|
|||||||
assert response.status_code == 400
|
assert response.status_code == 400
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.django_db
|
|
||||||
def test_grant_credential_to_team_different_organization_through_role_teams(post, get, credential, organizations, admin, org_admin, team, team_member):
|
|
||||||
# # Test that credential from different org can be assigned to team by a superuser through role_teams_list endpoint
|
|
||||||
orgs = organizations(2)
|
|
||||||
credential.organization = orgs[0]
|
|
||||||
credential.save()
|
|
||||||
team.organization = orgs[1]
|
|
||||||
team.save()
|
|
||||||
|
|
||||||
# Non-superuser (org_admin) trying cross-org assignment should be denied
|
|
||||||
response = post(reverse('api:role_teams_list', kwargs={'pk': credential.use_role.id}), {'id': team.id}, org_admin)
|
|
||||||
assert response.status_code == 400
|
|
||||||
assert (
|
|
||||||
"You cannot grant a team access to a credential in a different organization. Only superusers can grant cross-organization credential access to teams"
|
|
||||||
in response.data['msg']
|
|
||||||
)
|
|
||||||
|
|
||||||
# Superuser (admin) can do cross-org assignment
|
|
||||||
response = post(reverse('api:role_teams_list', kwargs={'pk': credential.use_role.id}), {'id': team.id}, admin)
|
|
||||||
assert response.status_code == 204
|
|
||||||
|
|
||||||
assert credential.use_role in team.member_role.children.all()
|
|
||||||
assert team_member in credential.read_role
|
|
||||||
assert team_member in credential.use_role
|
|
||||||
assert team_member not in credential.admin_role
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.django_db
|
|
||||||
def test_grant_credential_to_team_different_organization(post, get, credential, organizations, admin, org_admin, team, team_member):
|
|
||||||
# Test that credential from different org can be assigned to team by a superuser
|
|
||||||
orgs = organizations(2)
|
|
||||||
credential.organization = orgs[0]
|
|
||||||
credential.save()
|
|
||||||
team.organization = orgs[1]
|
|
||||||
team.save()
|
|
||||||
|
|
||||||
# Non-superuser (org_admin, ...) trying cross-org assignment should be denied
|
|
||||||
response = post(reverse('api:team_roles_list', kwargs={'pk': team.id}), {'id': credential.use_role.id}, org_admin)
|
|
||||||
assert response.status_code == 400
|
|
||||||
assert (
|
|
||||||
"You cannot grant a team access to a credential in a different organization. Only superusers can grant cross-organization credential access to teams"
|
|
||||||
in response.data['msg']
|
|
||||||
)
|
|
||||||
|
|
||||||
# Superuser (system admin) can do cross-org assignment
|
|
||||||
response = post(reverse('api:team_roles_list', kwargs={'pk': team.id}), {'id': credential.use_role.id}, admin)
|
|
||||||
assert response.status_code == 204
|
|
||||||
|
|
||||||
assert credential.use_role in team.member_role.children.all()
|
|
||||||
|
|
||||||
assert team_member in credential.read_role
|
|
||||||
assert team_member in credential.use_role
|
|
||||||
assert team_member not in credential.admin_role
|
|
||||||
|
|
||||||
# Team member can see the credential in API
|
|
||||||
response = get(reverse('api:team_credentials_list', kwargs={'pk': team.id}), team_member)
|
|
||||||
assert response.status_code == 200
|
|
||||||
assert response.data['count'] == 1
|
|
||||||
assert response.data['results'][0]['id'] == credential.id
|
|
||||||
|
|
||||||
# Team member can see the credential in general credentials API
|
|
||||||
response = get(reverse('api:credential_list'), team_member)
|
|
||||||
assert response.status_code == 200
|
|
||||||
assert any(cred['id'] == credential.id for cred in response.data['results'])
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.django_db
|
@pytest.mark.django_db
|
||||||
def test_sa_grant_private_credential_to_team_through_team_roles(post, credential, admin, team):
|
def test_sa_grant_private_credential_to_team_through_team_roles(post, credential, admin, team):
|
||||||
# not even a system admin can grant a private cred to a team though
|
# not even a system admin can grant a private cred to a team though
|
||||||
@@ -1290,30 +1224,6 @@ def test_custom_credential_type_create(get, post, organization, admin):
|
|||||||
assert decrypt_field(cred, 'api_token') == 'secret'
|
assert decrypt_field(cred, 'api_token') == 'secret'
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.django_db
|
|
||||||
def test_galaxy_create_ok(post, organization, admin):
|
|
||||||
params = {
|
|
||||||
'credential_type': 1,
|
|
||||||
'name': 'Galaxy credential',
|
|
||||||
'inputs': {
|
|
||||||
'url': 'https://galaxy.ansible.com',
|
|
||||||
'token': 'some_galaxy_token',
|
|
||||||
},
|
|
||||||
}
|
|
||||||
galaxy = CredentialType.defaults['galaxy_api_token']()
|
|
||||||
galaxy.save()
|
|
||||||
params['user'] = admin.id
|
|
||||||
params['credential_type'] = galaxy.pk
|
|
||||||
response = post(reverse('api:credential_list'), params, admin)
|
|
||||||
assert response.status_code == 201
|
|
||||||
|
|
||||||
assert Credential.objects.count() == 1
|
|
||||||
cred = Credential.objects.all()[:1].get()
|
|
||||||
assert cred.credential_type == galaxy
|
|
||||||
assert cred.inputs['url'] == 'https://galaxy.ansible.com'
|
|
||||||
assert decrypt_field(cred, 'token') == 'some_galaxy_token'
|
|
||||||
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# misc xfail conditions
|
# misc xfail conditions
|
||||||
#
|
#
|
||||||
|
|||||||
64
awx/main/tests/functional/api/test_immutablesharedfields.py
Normal file
64
awx/main/tests/functional/api/test_immutablesharedfields.py
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
import pytest
|
||||||
|
|
||||||
|
from awx.api.versioning import reverse
|
||||||
|
from awx.main.models import Organization
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.django_db
|
||||||
|
class TestImmutableSharedFields:
|
||||||
|
@pytest.fixture(autouse=True)
|
||||||
|
def configure_settings(self, settings):
|
||||||
|
settings.ALLOW_LOCAL_RESOURCE_MANAGEMENT = False
|
||||||
|
|
||||||
|
def test_create_raises_permission_denied(self, admin_user, post):
|
||||||
|
orgA = Organization.objects.create(name='orgA')
|
||||||
|
resp = post(
|
||||||
|
url=reverse('api:team_list'),
|
||||||
|
data={'name': 'teamA', 'organization': orgA.id},
|
||||||
|
user=admin_user,
|
||||||
|
expect=403,
|
||||||
|
)
|
||||||
|
assert "Creation of this resource is not allowed" in resp.data['detail']
|
||||||
|
|
||||||
|
def test_perform_delete_raises_permission_denied(self, admin_user, delete):
|
||||||
|
orgA = Organization.objects.create(name='orgA')
|
||||||
|
team = orgA.teams.create(name='teamA')
|
||||||
|
resp = delete(
|
||||||
|
url=reverse('api:team_detail', kwargs={'pk': team.id}),
|
||||||
|
user=admin_user,
|
||||||
|
expect=403,
|
||||||
|
)
|
||||||
|
assert "Deletion of this resource is not allowed" in resp.data['detail']
|
||||||
|
|
||||||
|
def test_perform_update(self, admin_user, patch):
|
||||||
|
orgA = Organization.objects.create(name='orgA')
|
||||||
|
# allow patching non-shared fields
|
||||||
|
patch(
|
||||||
|
url=reverse('api:organization_detail', kwargs={'pk': orgA.id}),
|
||||||
|
data={"max_hosts": 76},
|
||||||
|
user=admin_user,
|
||||||
|
expect=200,
|
||||||
|
)
|
||||||
|
# prevent patching shared fields
|
||||||
|
resp = patch(url=reverse('api:organization_detail', kwargs={'pk': orgA.id}), data={"name": "orgB"}, user=admin_user, expect=403)
|
||||||
|
assert "Cannot change shared field" in resp.data['name']
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
'role',
|
||||||
|
['admin_role', 'member_role'],
|
||||||
|
)
|
||||||
|
@pytest.mark.parametrize('resource', ['organization', 'team'])
|
||||||
|
def test_prevent_assigning_member_to_organization_or_team(self, admin_user, post, resource, role):
|
||||||
|
orgA = Organization.objects.create(name='orgA')
|
||||||
|
if resource == 'organization':
|
||||||
|
role = getattr(orgA, role)
|
||||||
|
elif resource == 'team':
|
||||||
|
teamA = orgA.teams.create(name='teamA')
|
||||||
|
role = getattr(teamA, role)
|
||||||
|
resp = post(
|
||||||
|
url=reverse('api:user_roles_list', kwargs={'pk': admin_user.id}),
|
||||||
|
data={'id': role.id},
|
||||||
|
user=admin_user,
|
||||||
|
expect=403,
|
||||||
|
)
|
||||||
|
assert f"Cannot directly modify user membership to {resource}." in resp.data['msg']
|
||||||
@@ -1,5 +1,3 @@
|
|||||||
from unittest import mock
|
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from awx.api.versioning import reverse
|
from awx.api.versioning import reverse
|
||||||
@@ -7,9 +5,6 @@ from awx.main.models.activity_stream import ActivityStream
|
|||||||
from awx.main.models.ha import Instance
|
from awx.main.models.ha import Instance
|
||||||
|
|
||||||
from django.test.utils import override_settings
|
from django.test.utils import override_settings
|
||||||
from django.http import HttpResponse
|
|
||||||
|
|
||||||
from rest_framework import status
|
|
||||||
|
|
||||||
|
|
||||||
INSTANCE_KWARGS = dict(hostname='example-host', cpu=6, node_type='execution', memory=36000000000, cpu_capacity=6, mem_capacity=42)
|
INSTANCE_KWARGS = dict(hostname='example-host', cpu=6, node_type='execution', memory=36000000000, cpu_capacity=6, mem_capacity=42)
|
||||||
@@ -92,11 +87,3 @@ def test_custom_hostname_regex(post, admin_user):
|
|||||||
"peers": [],
|
"peers": [],
|
||||||
}
|
}
|
||||||
post(url=url, user=admin_user, data=data, expect=value[1])
|
post(url=url, user=admin_user, data=data, expect=value[1])
|
||||||
|
|
||||||
|
|
||||||
def test_instance_install_bundle(get, admin_user, system_auditor):
|
|
||||||
instance = Instance.objects.create(**INSTANCE_KWARGS)
|
|
||||||
url = reverse('api:instance_install_bundle', kwargs={'pk': instance.pk})
|
|
||||||
with mock.patch('awx.api.views.instance_install_bundle.InstanceInstallBundle.get', return_value=HttpResponse({'test': 'data'}, status=status.HTTP_200_OK)):
|
|
||||||
get(url=url, user=admin_user, expect=200)
|
|
||||||
get(url=url, user=system_auditor, expect=403)
|
|
||||||
|
|||||||
@@ -8,7 +8,6 @@ from django.core.exceptions import ValidationError
|
|||||||
from awx.api.versioning import reverse
|
from awx.api.versioning import reverse
|
||||||
|
|
||||||
from awx.main.models import InventorySource, Inventory, ActivityStream
|
from awx.main.models import InventorySource, Inventory, ActivityStream
|
||||||
from awx.main.utils.inventory_vars import update_group_variables
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
@@ -521,20 +520,6 @@ class TestInventorySourceCredential:
|
|||||||
patch(url=inv_src.get_absolute_url(), data={'credential': aws_cred.pk}, expect=200, user=admin_user)
|
patch(url=inv_src.get_absolute_url(), data={'credential': aws_cred.pk}, expect=200, user=admin_user)
|
||||||
assert list(inv_src.credentials.values_list('id', flat=True)) == [aws_cred.pk]
|
assert list(inv_src.credentials.values_list('id', flat=True)) == [aws_cred.pk]
|
||||||
|
|
||||||
@pytest.mark.skip(reason="Delay until AAP-53978 completed")
|
|
||||||
def test_vmware_cred_create_esxi_source(self, inventory, admin_user, organization, post, get):
|
|
||||||
"""Test that a vmware esxi source can be added with a vmware credential"""
|
|
||||||
from awx.main.models.credential import Credential, CredentialType
|
|
||||||
|
|
||||||
vmware = CredentialType.defaults['vmware']()
|
|
||||||
vmware.save()
|
|
||||||
vmware_cred = Credential.objects.create(credential_type=vmware, name="bar", organization=organization)
|
|
||||||
inv_src = InventorySource.objects.create(inventory=inventory, name='foobar', source='vmware_esxi')
|
|
||||||
r = post(url=reverse('api:inventory_source_credentials_list', kwargs={'pk': inv_src.pk}), data={'id': vmware_cred.pk}, expect=204, user=admin_user)
|
|
||||||
g = get(inv_src.get_absolute_url(), admin_user)
|
|
||||||
assert r.status_code == 204
|
|
||||||
assert g.data['credential'] == vmware_cred.pk
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.django_db
|
@pytest.mark.django_db
|
||||||
class TestControlledBySCM:
|
class TestControlledBySCM:
|
||||||
@@ -705,241 +690,3 @@ class TestConstructedInventory:
|
|||||||
assert inv_r.data['url'] != const_r.data['url']
|
assert inv_r.data['url'] != const_r.data['url']
|
||||||
assert inv_r.data['related']['constructed_url'] == url_const
|
assert inv_r.data['related']['constructed_url'] == url_const
|
||||||
assert const_r.data['related']['constructed_url'] == url_const
|
assert const_r.data['related']['constructed_url'] == url_const
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.django_db
|
|
||||||
class TestInventoryAllVariables:
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def simulate_update_from_source(inv_src, variables_dict, overwrite_vars=True):
|
|
||||||
"""
|
|
||||||
Update `inventory` with variables `variables_dict` from source
|
|
||||||
`inv_src`.
|
|
||||||
"""
|
|
||||||
# Perform an update from source the same way it is done in
|
|
||||||
# `inventory_import.Command._update_inventory`.
|
|
||||||
new_vars = update_group_variables(
|
|
||||||
group_id=None, # `None` denotes the 'all' group (which doesn't have a pk).
|
|
||||||
newvars=variables_dict,
|
|
||||||
dbvars=inv_src.inventory.variables_dict,
|
|
||||||
invsrc_id=inv_src.id,
|
|
||||||
inventory_id=inv_src.inventory.id,
|
|
||||||
overwrite_vars=overwrite_vars,
|
|
||||||
)
|
|
||||||
inv_src.inventory.variables = json.dumps(new_vars)
|
|
||||||
inv_src.inventory.save(update_fields=["variables"])
|
|
||||||
return new_vars
|
|
||||||
|
|
||||||
def update_and_verify(self, inv_src, new_vars, expect=None, overwrite_vars=True, teststep=None):
|
|
||||||
"""
|
|
||||||
Helper: Update from source and verify the new inventory variables.
|
|
||||||
|
|
||||||
:param inv_src: An inventory source object with its inventory property
|
|
||||||
set to the inventory fixture of the called.
|
|
||||||
:param dict new_vars: The variables of the inventory source `inv_src`.
|
|
||||||
:param dict expect: (optional) The expected variables state of the
|
|
||||||
inventory after the update. If not set or None, expect `new_vars`.
|
|
||||||
:param bool overwrite_vars: The status of the inventory source option
|
|
||||||
'overwrite variables'. Default is `True`.
|
|
||||||
:raise AssertionError: If the inventory does not contain the expected
|
|
||||||
variables after the update.
|
|
||||||
"""
|
|
||||||
self.simulate_update_from_source(inv_src, new_vars, overwrite_vars=overwrite_vars)
|
|
||||||
if teststep is not None:
|
|
||||||
assert inv_src.inventory.variables_dict == (expect if expect is not None else new_vars), f"Test step {teststep}"
|
|
||||||
else:
|
|
||||||
assert inv_src.inventory.variables_dict == (expect if expect is not None else new_vars)
|
|
||||||
|
|
||||||
def test_set_variables_through_inventory_details_update(self, inventory, patch, admin_user):
|
|
||||||
"""
|
|
||||||
Set an inventory variable by changing the inventory details, simulating
|
|
||||||
a user edit.
|
|
||||||
"""
|
|
||||||
# a: x
|
|
||||||
patch(url=reverse('api:inventory_detail', kwargs={'pk': inventory.pk}), data={'variables': 'a: x'}, user=admin_user, expect=200)
|
|
||||||
inventory.refresh_from_db()
|
|
||||||
assert inventory.variables_dict == {"a": "x"}
|
|
||||||
|
|
||||||
def test_variables_set_by_user_persist_update_from_src(self, inventory, inventory_source, patch, admin_user):
|
|
||||||
"""
|
|
||||||
Verify the special behavior that a variable which originates from a user
|
|
||||||
edit (instead of a source update), is not removed from the inventory
|
|
||||||
when a source update with overwrite_vars=True does not contain that
|
|
||||||
variable. This behavior is considered special because a variable which
|
|
||||||
originates from a source would actually be deleted.
|
|
||||||
|
|
||||||
In addition, verify that an existing variable which was set by a user
|
|
||||||
edit can be overwritten by a source update.
|
|
||||||
"""
|
|
||||||
# Set two variables via user edit.
|
|
||||||
patch(
|
|
||||||
url=reverse('api:inventory_detail', kwargs={'pk': inventory.pk}),
|
|
||||||
data={'variables': '{"a": "a_from_user", "b": "b_from_user"}'},
|
|
||||||
user=admin_user,
|
|
||||||
expect=200,
|
|
||||||
)
|
|
||||||
inventory.refresh_from_db()
|
|
||||||
assert inventory.variables_dict == {'a': 'a_from_user', 'b': 'b_from_user'}
|
|
||||||
# Update from a source which contains only one of the two variables from
|
|
||||||
# the previous update.
|
|
||||||
self.simulate_update_from_source(inventory_source, {'a': 'a_from_source'})
|
|
||||||
# Verify inventory variables.
|
|
||||||
assert inventory.variables_dict == {'a': 'a_from_source', 'b': 'b_from_user'}
|
|
||||||
|
|
||||||
def test_variables_set_through_src_get_removed_on_update_from_same_src(self, inventory, inventory_source, patch, admin_user):
|
|
||||||
"""
|
|
||||||
Verify that a variable which originates from a source update, is removed
|
|
||||||
from the inventory when a source update with overwrite_vars=True does
|
|
||||||
not contain that variable.
|
|
||||||
|
|
||||||
In addition, verify that an existing variable which was set by a user
|
|
||||||
edit can be overwritten by a source update.
|
|
||||||
"""
|
|
||||||
# Set two variables via update from source.
|
|
||||||
self.simulate_update_from_source(inventory_source, {'a': 'a_from_source', 'b': 'b_from_source'})
|
|
||||||
# Verify inventory variables.
|
|
||||||
assert inventory.variables_dict == {'a': 'a_from_source', 'b': 'b_from_source'}
|
|
||||||
# Update from the same source which now contains only one of the two
|
|
||||||
# variables from the previous update.
|
|
||||||
self.simulate_update_from_source(inventory_source, {'b': 'b_from_source'})
|
|
||||||
# Verify the variable has been deleted from the inventory.
|
|
||||||
assert inventory.variables_dict == {'b': 'b_from_source'}
|
|
||||||
|
|
||||||
def test_overwrite_variables_through_inventory_details_update(self, inventory, patch, admin_user):
|
|
||||||
"""
|
|
||||||
Set and update the inventory variables multiple times by changing the
|
|
||||||
inventory details via api, simulating user edits.
|
|
||||||
|
|
||||||
Any variables update by means of an inventory details update shall
|
|
||||||
overwright all existing inventory variables.
|
|
||||||
"""
|
|
||||||
# a: x
|
|
||||||
patch(url=reverse('api:inventory_detail', kwargs={'pk': inventory.pk}), data={'variables': 'a: x'}, user=admin_user, expect=200)
|
|
||||||
inventory.refresh_from_db()
|
|
||||||
assert inventory.variables_dict == {"a": "x"}
|
|
||||||
# a: x2
|
|
||||||
patch(url=reverse('api:inventory_detail', kwargs={'pk': inventory.pk}), data={'variables': 'a: x2'}, user=admin_user, expect=200)
|
|
||||||
inventory.refresh_from_db()
|
|
||||||
assert inventory.variables_dict == {"a": "x2"}
|
|
||||||
# b: y
|
|
||||||
patch(url=reverse('api:inventory_detail', kwargs={'pk': inventory.pk}), data={'variables': 'b: y'}, user=admin_user, expect=200)
|
|
||||||
inventory.refresh_from_db()
|
|
||||||
assert inventory.variables_dict == {"b": "y"}
|
|
||||||
|
|
||||||
def test_inventory_group_variables_internal_data(self, inventory, patch, admin_user):
|
|
||||||
"""
|
|
||||||
Basic verification of how variable updates are stored internally.
|
|
||||||
|
|
||||||
.. Warning::
|
|
||||||
|
|
||||||
This test verifies a specific implementation of the inventory
|
|
||||||
variables update business logic. It may deliver false negatives if
|
|
||||||
the implementation changes.
|
|
||||||
"""
|
|
||||||
# x: a
|
|
||||||
patch(url=reverse('api:inventory_detail', kwargs={'pk': inventory.pk}), data={'variables': 'a: x'}, user=admin_user, expect=200)
|
|
||||||
igv = inventory.inventory_group_variables.first()
|
|
||||||
assert igv.variables == {'a': [[-1, 'x']]}
|
|
||||||
# b: y
|
|
||||||
patch(url=reverse('api:inventory_detail', kwargs={'pk': inventory.pk}), data={'variables': 'b: y'}, user=admin_user, expect=200)
|
|
||||||
igv = inventory.inventory_group_variables.first()
|
|
||||||
assert igv.variables == {'b': [[-1, 'y']]}
|
|
||||||
|
|
||||||
def test_update_then_user_change(self, inventory, patch, admin_user, inventory_source):
|
|
||||||
"""
|
|
||||||
1. Update inventory vars by means of an inventory source update.
|
|
||||||
2. Update inventory vars by editing the inventory details (aka a 'user
|
|
||||||
update'), thereby changing variables values and deleting variables
|
|
||||||
from the inventory.
|
|
||||||
|
|
||||||
.. Warning::
|
|
||||||
|
|
||||||
This test partly relies on a specific implementation of the
|
|
||||||
inventory variables update business logic. It may deliver false
|
|
||||||
negatives if the implementation changes.
|
|
||||||
"""
|
|
||||||
assert inventory_source.inventory_id == inventory.pk # sanity
|
|
||||||
# ---- Test step 1: Set variables by updating from an inventory source.
|
|
||||||
self.simulate_update_from_source(inventory_source, {'foo': 'foo_from_source', 'bar': 'bar_from_source'})
|
|
||||||
# Verify inventory variables.
|
|
||||||
assert inventory.variables_dict == {'foo': 'foo_from_source', 'bar': 'bar_from_source'}
|
|
||||||
# Verify internal storage of variables data. Note that this is
|
|
||||||
# implementation specific
|
|
||||||
assert inventory.inventory_group_variables.count() == 1
|
|
||||||
igv = inventory.inventory_group_variables.first()
|
|
||||||
assert igv.variables == {'foo': [[inventory_source.id, 'foo_from_source']], 'bar': [[inventory_source.id, 'bar_from_source']]}
|
|
||||||
# ---- Test step 2: Change the variables by editing the inventory details.
|
|
||||||
patch(url=reverse('api:inventory_detail', kwargs={'pk': inventory.pk}), data={'variables': 'foo: foo_from_user'}, user=admin_user, expect=200)
|
|
||||||
inventory.refresh_from_db()
|
|
||||||
# Verify that variable `foo` contains the new value, and that variable
|
|
||||||
# `bar` has been deleted from the inventory.
|
|
||||||
assert inventory.variables_dict == {"foo": "foo_from_user"}
|
|
||||||
# Verify internal storage of variables data. Note that this is
|
|
||||||
# implementation specific
|
|
||||||
inventory.inventory_group_variables.count() == 1
|
|
||||||
igv = inventory.inventory_group_variables.first()
|
|
||||||
assert igv.variables == {'foo': [[-1, 'foo_from_user']]}
|
|
||||||
|
|
||||||
def test_monotonic_deletions(self, inventory, patch, admin_user):
|
|
||||||
"""
|
|
||||||
Verify the variables history logic for monotonic deletions.
|
|
||||||
|
|
||||||
Monotonic in this context means that the variables are deleted in the
|
|
||||||
reverse order of their creation.
|
|
||||||
|
|
||||||
1. Set inventory variable x: 0, expect INV={x: 0}
|
|
||||||
|
|
||||||
(The following steps use overwrite_variables=False)
|
|
||||||
|
|
||||||
2. Update from source A={x: 1}, expect INV={x: 1}
|
|
||||||
3. Update from source B={x: 2}, expect INV={x: 2}
|
|
||||||
4. Update from source B={}, expect INV={x: 1}
|
|
||||||
5. Update from source A={}, expect INV={x: 0}
|
|
||||||
"""
|
|
||||||
inv_src_a = InventorySource.objects.create(name="inv-src-A", inventory=inventory, source="ec2")
|
|
||||||
inv_src_b = InventorySource.objects.create(name="inv-src-B", inventory=inventory, source="ec2")
|
|
||||||
# Test step 1:
|
|
||||||
patch(url=reverse('api:inventory_detail', kwargs={'pk': inventory.pk}), data={'variables': 'x: 0'}, user=admin_user, expect=200)
|
|
||||||
inventory.refresh_from_db()
|
|
||||||
assert inventory.variables_dict == {"x": 0}
|
|
||||||
# Test step 2: Source A overwrites value of var x
|
|
||||||
self.update_and_verify(inv_src_a, {"x": 1}, teststep=2)
|
|
||||||
# Test step 3: Source A overwrites value of var x
|
|
||||||
self.update_and_verify(inv_src_b, {"x": 2}, teststep=3)
|
|
||||||
# Test step 4: Value of var x from source A reappears
|
|
||||||
self.update_and_verify(inv_src_b, {}, expect={"x": 1}, teststep=4)
|
|
||||||
# Test step 5: Value of var x from initial user edit reappears
|
|
||||||
self.update_and_verify(inv_src_a, {}, expect={"x": 0}, teststep=5)
|
|
||||||
|
|
||||||
def test_interleaved_deletions(self, inventory, patch, admin_user, inventory_source):
|
|
||||||
"""
|
|
||||||
Verify the variables history logic for interleaved deletions.
|
|
||||||
|
|
||||||
Interleaved in this context means that the variables are deleted in a
|
|
||||||
different order than the sequence of their creation.
|
|
||||||
|
|
||||||
1. Set inventory variable x: 0, expect INV={x: 0}
|
|
||||||
2. Update from source A={x: 1}, expect INV={x: 1}
|
|
||||||
3. Update from source B={x: 2}, expect INV={x: 2}
|
|
||||||
4. Update from source C={x: 3}, expect INV={x: 3}
|
|
||||||
5. Update from source B={}, expect INV={x: 3}
|
|
||||||
6. Update from source C={}, expect INV={x: 1}
|
|
||||||
"""
|
|
||||||
inv_src_a = InventorySource.objects.create(name="inv-src-A", inventory=inventory, source="ec2")
|
|
||||||
inv_src_b = InventorySource.objects.create(name="inv-src-B", inventory=inventory, source="ec2")
|
|
||||||
inv_src_c = InventorySource.objects.create(name="inv-src-C", inventory=inventory, source="ec2")
|
|
||||||
# Test step 1. Set inventory variable x: 0
|
|
||||||
patch(url=reverse('api:inventory_detail', kwargs={'pk': inventory.pk}), data={'variables': 'x: 0'}, user=admin_user, expect=200)
|
|
||||||
inventory.refresh_from_db()
|
|
||||||
assert inventory.variables_dict == {"x": 0}
|
|
||||||
# Test step 2: Source A overwrites value of var x
|
|
||||||
self.update_and_verify(inv_src_a, {"x": 1}, teststep=2)
|
|
||||||
# Test step 3: Source B overwrites value of var x
|
|
||||||
self.update_and_verify(inv_src_b, {"x": 2}, teststep=3)
|
|
||||||
# Test step 4: Source C overwrites value of var x
|
|
||||||
self.update_and_verify(inv_src_c, {"x": 3}, teststep=4)
|
|
||||||
# Test step 5: Value of var x from source C remains unchanged
|
|
||||||
self.update_and_verify(inv_src_b, {}, expect={"x": 3}, teststep=5)
|
|
||||||
# Test step 6: Value of var x from source A reappears, because the
|
|
||||||
# latest update from source B did not contain var x.
|
|
||||||
self.update_and_verify(inv_src_c, {}, expect={"x": 1}, teststep=6)
|
|
||||||
|
|||||||
@@ -1,191 +0,0 @@
|
|||||||
import pytest
|
|
||||||
from unittest.mock import patch, MagicMock
|
|
||||||
|
|
||||||
from awx.api.versioning import reverse
|
|
||||||
|
|
||||||
|
|
||||||
# Generated by Cursor (claude-4-sonnet)
|
|
||||||
@pytest.mark.django_db
|
|
||||||
class TestLicenseCacheClearing:
|
|
||||||
"""Test cache clearing for LICENSE setting changes"""
|
|
||||||
|
|
||||||
def test_license_from_manifest_clears_cache(self, admin_user, post):
|
|
||||||
"""Test that posting a manifest to /api/v2/config/ clears the LICENSE cache"""
|
|
||||||
|
|
||||||
# Mock the licenser and clear_setting_cache
|
|
||||||
with patch('awx.api.views.root.get_licenser') as mock_get_licenser, patch('awx.api.views.root.validate_entitlement_manifest') as mock_validate, patch(
|
|
||||||
'awx.api.views.root.clear_setting_cache'
|
|
||||||
) as mock_clear_cache, patch('django.db.connection.on_commit') as mock_on_commit:
|
|
||||||
|
|
||||||
# Set up mock license data
|
|
||||||
mock_license_data = {'valid_key': True, 'license_type': 'enterprise', 'instance_count': 100, 'subscription_name': 'Test Enterprise License'}
|
|
||||||
|
|
||||||
# Mock the validation and license processing
|
|
||||||
mock_validate.return_value = [{'some': 'manifest_data'}]
|
|
||||||
mock_licenser = MagicMock()
|
|
||||||
mock_licenser.license_from_manifest.return_value = mock_license_data
|
|
||||||
mock_get_licenser.return_value = mock_licenser
|
|
||||||
|
|
||||||
# Prepare the request data (base64 encoded manifest)
|
|
||||||
manifest_data = {'manifest': 'ZmFrZS1tYW5pZmVzdC1kYXRh'} # base64 for "fake-manifest-data"
|
|
||||||
|
|
||||||
# Make the POST request
|
|
||||||
url = reverse('api:api_v2_config_view')
|
|
||||||
response = post(url, manifest_data, admin_user, expect=200)
|
|
||||||
|
|
||||||
# Verify the response
|
|
||||||
assert response.data == mock_license_data
|
|
||||||
|
|
||||||
# Verify license_from_manifest was called
|
|
||||||
mock_licenser.license_from_manifest.assert_called_once()
|
|
||||||
|
|
||||||
# Verify on_commit was called (may be multiple times due to other settings)
|
|
||||||
assert mock_on_commit.call_count >= 1
|
|
||||||
|
|
||||||
# Execute all on_commit callbacks to trigger cache clearing
|
|
||||||
for call_args in mock_on_commit.call_args_list:
|
|
||||||
callback = call_args[0][0]
|
|
||||||
callback()
|
|
||||||
|
|
||||||
# Verify that clear_setting_cache.delay was called with ['LICENSE']
|
|
||||||
mock_clear_cache.delay.assert_any_call(['LICENSE'])
|
|
||||||
|
|
||||||
def test_config_delete_clears_cache(self, admin_user, delete):
|
|
||||||
"""Test that DELETE /api/v2/config/ clears the LICENSE cache"""
|
|
||||||
|
|
||||||
with patch('awx.api.views.root.clear_setting_cache') as mock_clear_cache, patch('django.db.connection.on_commit') as mock_on_commit:
|
|
||||||
|
|
||||||
# Make the DELETE request
|
|
||||||
url = reverse('api:api_v2_config_view')
|
|
||||||
delete(url, admin_user, expect=204)
|
|
||||||
|
|
||||||
# Verify on_commit was called at least once
|
|
||||||
assert mock_on_commit.call_count >= 1
|
|
||||||
|
|
||||||
# Execute all on_commit callbacks to trigger cache clearing
|
|
||||||
for call_args in mock_on_commit.call_args_list:
|
|
||||||
callback = call_args[0][0]
|
|
||||||
callback()
|
|
||||||
|
|
||||||
mock_clear_cache.delay.assert_called_once_with(['LICENSE'])
|
|
||||||
|
|
||||||
def test_attach_view_clears_cache(self, admin_user, post):
|
|
||||||
"""Test that posting to /api/v2/config/attach/ clears the LICENSE cache"""
|
|
||||||
|
|
||||||
with patch('awx.api.views.root.get_licenser') as mock_get_licenser, patch('awx.api.views.root.clear_setting_cache') as mock_clear_cache, patch(
|
|
||||||
'django.db.connection.on_commit'
|
|
||||||
) as mock_on_commit, patch('awx.api.views.root.settings') as mock_settings:
|
|
||||||
|
|
||||||
# Set up subscription credentials in settings
|
|
||||||
mock_settings.SUBSCRIPTIONS_CLIENT_ID = 'test-client-id'
|
|
||||||
mock_settings.SUBSCRIPTIONS_CLIENT_SECRET = 'test-client-secret'
|
|
||||||
|
|
||||||
# Set up mock licenser with validated subscriptions
|
|
||||||
mock_licenser = MagicMock()
|
|
||||||
subscription_data = {'subscription_id': 'test-subscription-123', 'valid_key': False, 'license_type': 'enterprise', 'instance_count': 50}
|
|
||||||
mock_licenser.validate_rh.return_value = [subscription_data]
|
|
||||||
mock_get_licenser.return_value = mock_licenser
|
|
||||||
|
|
||||||
# Prepare request data
|
|
||||||
request_data = {'subscription_id': 'test-subscription-123'}
|
|
||||||
|
|
||||||
# Make the POST request
|
|
||||||
url = reverse('api:api_v2_attach_view')
|
|
||||||
response = post(url, request_data, admin_user, expect=200)
|
|
||||||
|
|
||||||
# Verify the response includes valid_key=True
|
|
||||||
assert response.data['valid_key'] is True
|
|
||||||
assert response.data['subscription_id'] == 'test-subscription-123'
|
|
||||||
|
|
||||||
# Verify settings.LICENSE was set
|
|
||||||
expected_license = subscription_data.copy()
|
|
||||||
expected_license['valid_key'] = True
|
|
||||||
assert mock_settings.LICENSE == expected_license
|
|
||||||
|
|
||||||
# Verify cache clearing was scheduled
|
|
||||||
mock_on_commit.assert_called_once()
|
|
||||||
call_args = mock_on_commit.call_args[0][0] # Get the lambda function
|
|
||||||
|
|
||||||
# Execute the lambda to verify it calls clear_setting_cache
|
|
||||||
call_args()
|
|
||||||
mock_clear_cache.delay.assert_called_once_with(['LICENSE'])
|
|
||||||
|
|
||||||
def test_attach_view_subscription_not_found_no_cache_clear(self, admin_user, post):
|
|
||||||
"""Test that attach view doesn't clear cache when subscription is not found"""
|
|
||||||
|
|
||||||
with patch('awx.api.views.root.get_licenser') as mock_get_licenser, patch('awx.api.views.root.clear_setting_cache') as mock_clear_cache, patch(
|
|
||||||
'django.db.connection.on_commit'
|
|
||||||
) as mock_on_commit:
|
|
||||||
|
|
||||||
# Set up mock licenser with different subscription
|
|
||||||
mock_licenser = MagicMock()
|
|
||||||
subscription_data = {'subscription_id': 'different-subscription-456', 'valid_key': False, 'license_type': 'enterprise'} # Different ID
|
|
||||||
mock_licenser.validate_rh.return_value = [subscription_data]
|
|
||||||
mock_get_licenser.return_value = mock_licenser
|
|
||||||
|
|
||||||
# Request data with non-matching subscription ID
|
|
||||||
request_data = {
|
|
||||||
'subscription_id': 'test-subscription-123', # This won't match
|
|
||||||
}
|
|
||||||
|
|
||||||
# Make the POST request
|
|
||||||
url = reverse('api:api_v2_attach_view')
|
|
||||||
response = post(url, request_data, admin_user, expect=400)
|
|
||||||
|
|
||||||
# Verify error response
|
|
||||||
assert 'error' in response.data
|
|
||||||
|
|
||||||
# Verify cache clearing was NOT called (no matching subscription)
|
|
||||||
mock_on_commit.assert_not_called()
|
|
||||||
mock_clear_cache.delay.assert_not_called()
|
|
||||||
|
|
||||||
def test_manifest_validation_error_no_cache_clear(self, admin_user, post):
|
|
||||||
"""Test that config view doesn't clear cache when manifest validation fails"""
|
|
||||||
|
|
||||||
with patch('awx.api.views.root.validate_entitlement_manifest') as mock_validate, patch(
|
|
||||||
'awx.api.views.root.clear_setting_cache'
|
|
||||||
) as mock_clear_cache, patch('django.db.connection.on_commit') as mock_on_commit:
|
|
||||||
|
|
||||||
# Mock validation to raise ValueError
|
|
||||||
mock_validate.side_effect = ValueError("Invalid manifest")
|
|
||||||
|
|
||||||
# Prepare request data
|
|
||||||
manifest_data = {'manifest': 'aW52YWxpZC1tYW5pZmVzdA=='} # base64 for "invalid-manifest"
|
|
||||||
|
|
||||||
# Make the POST request
|
|
||||||
url = reverse('api:api_v2_config_view')
|
|
||||||
response = post(url, manifest_data, admin_user, expect=400)
|
|
||||||
|
|
||||||
# Verify error response
|
|
||||||
assert response.data['error'] == 'Invalid manifest'
|
|
||||||
|
|
||||||
# Verify cache clearing was NOT called (validation failed)
|
|
||||||
mock_on_commit.assert_not_called()
|
|
||||||
mock_clear_cache.delay.assert_not_called()
|
|
||||||
|
|
||||||
def test_license_processing_error_no_cache_clear(self, admin_user, post):
|
|
||||||
"""Test that config view doesn't clear cache when license processing fails"""
|
|
||||||
|
|
||||||
with patch('awx.api.views.root.get_licenser') as mock_get_licenser, patch('awx.api.views.root.validate_entitlement_manifest') as mock_validate, patch(
|
|
||||||
'awx.api.views.root.clear_setting_cache'
|
|
||||||
) as mock_clear_cache, patch('django.db.connection.on_commit') as mock_on_commit:
|
|
||||||
|
|
||||||
# Mock validation to succeed but license processing to fail
|
|
||||||
mock_validate.return_value = [{'some': 'manifest_data'}]
|
|
||||||
mock_licenser = MagicMock()
|
|
||||||
mock_licenser.license_from_manifest.side_effect = Exception("License processing failed")
|
|
||||||
mock_get_licenser.return_value = mock_licenser
|
|
||||||
|
|
||||||
# Prepare request data
|
|
||||||
manifest_data = {'manifest': 'ZmFrZS1tYW5pZmVzdA=='} # base64 for "fake-manifest"
|
|
||||||
|
|
||||||
# Make the POST request
|
|
||||||
url = reverse('api:api_v2_config_view')
|
|
||||||
response = post(url, manifest_data, admin_user, expect=400)
|
|
||||||
|
|
||||||
# Verify error response
|
|
||||||
assert response.data['error'] == 'Invalid License'
|
|
||||||
|
|
||||||
# Verify cache clearing was NOT called (license processing failed)
|
|
||||||
mock_on_commit.assert_not_called()
|
|
||||||
mock_clear_cache.delay.assert_not_called()
|
|
||||||
@@ -1,244 +0,0 @@
|
|||||||
from unittest.mock import patch, MagicMock
|
|
||||||
|
|
||||||
import pytest
|
|
||||||
from awx.api.versioning import reverse
|
|
||||||
from rest_framework import status
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.django_db
|
|
||||||
class TestApiV2SubscriptionView:
|
|
||||||
"""Test cases for the /api/v2/config/subscriptions/ endpoint"""
|
|
||||||
|
|
||||||
def test_basic_auth(self, post, admin):
|
|
||||||
"""Test POST with subscriptions_username and subscriptions_password calls validate_rh with basic_auth=True"""
|
|
||||||
data = {'subscriptions_username': 'test_user', 'subscriptions_password': 'test_password'}
|
|
||||||
|
|
||||||
with patch('awx.api.views.root.get_licenser') as mock_get_licenser:
|
|
||||||
mock_licenser = MagicMock()
|
|
||||||
mock_licenser.validate_rh.return_value = []
|
|
||||||
mock_get_licenser.return_value = mock_licenser
|
|
||||||
|
|
||||||
response = post(reverse('api:api_v2_subscription_view'), data, admin)
|
|
||||||
|
|
||||||
assert response.status_code == status.HTTP_200_OK
|
|
||||||
mock_licenser.validate_rh.assert_called_once_with('test_user', 'test_password', True)
|
|
||||||
|
|
||||||
def test_service_account(self, post, admin):
|
|
||||||
"""Test POST with subscriptions_client_id and subscriptions_client_secret calls validate_rh with basic_auth=False"""
|
|
||||||
data = {'subscriptions_client_id': 'test_client_id', 'subscriptions_client_secret': 'test_client_secret'}
|
|
||||||
|
|
||||||
with patch('awx.api.views.root.get_licenser') as mock_get_licenser:
|
|
||||||
mock_licenser = MagicMock()
|
|
||||||
mock_licenser.validate_rh.return_value = []
|
|
||||||
mock_get_licenser.return_value = mock_licenser
|
|
||||||
|
|
||||||
response = post(reverse('api:api_v2_subscription_view'), data, admin)
|
|
||||||
|
|
||||||
assert response.status_code == status.HTTP_200_OK
|
|
||||||
mock_licenser.validate_rh.assert_called_once_with('test_client_id', 'test_client_secret', False)
|
|
||||||
|
|
||||||
def test_encrypted_password_basic_auth(self, post, admin, settings):
|
|
||||||
"""Test POST with $encrypted$ password uses settings value for basic auth"""
|
|
||||||
data = {'subscriptions_username': 'test_user', 'subscriptions_password': '$encrypted$'}
|
|
||||||
|
|
||||||
settings.SUBSCRIPTIONS_PASSWORD = 'actual_password_from_settings'
|
|
||||||
|
|
||||||
with patch('awx.api.views.root.get_licenser') as mock_get_licenser:
|
|
||||||
mock_licenser = MagicMock()
|
|
||||||
mock_licenser.validate_rh.return_value = []
|
|
||||||
mock_get_licenser.return_value = mock_licenser
|
|
||||||
|
|
||||||
response = post(reverse('api:api_v2_subscription_view'), data, admin)
|
|
||||||
|
|
||||||
assert response.status_code == status.HTTP_200_OK
|
|
||||||
mock_licenser.validate_rh.assert_called_once_with('test_user', 'actual_password_from_settings', True)
|
|
||||||
|
|
||||||
def test_encrypted_client_secret_service_account(self, post, admin, settings):
|
|
||||||
"""Test POST with $encrypted$ client_secret uses settings value for service_account"""
|
|
||||||
data = {'subscriptions_client_id': 'test_client_id', 'subscriptions_client_secret': '$encrypted$'}
|
|
||||||
|
|
||||||
settings.SUBSCRIPTIONS_CLIENT_SECRET = 'actual_secret_from_settings'
|
|
||||||
|
|
||||||
with patch('awx.api.views.root.get_licenser') as mock_get_licenser:
|
|
||||||
mock_licenser = MagicMock()
|
|
||||||
mock_licenser.validate_rh.return_value = []
|
|
||||||
mock_get_licenser.return_value = mock_licenser
|
|
||||||
|
|
||||||
response = post(reverse('api:api_v2_subscription_view'), data, admin)
|
|
||||||
|
|
||||||
assert response.status_code == status.HTTP_200_OK
|
|
||||||
mock_licenser.validate_rh.assert_called_once_with('test_client_id', 'actual_secret_from_settings', False)
|
|
||||||
|
|
||||||
def test_missing_username_returns_error(self, post, admin):
|
|
||||||
"""Test POST with missing username returns 400 error"""
|
|
||||||
data = {'subscriptions_password': 'test_password'}
|
|
||||||
|
|
||||||
response = post(reverse('api:api_v2_subscription_view'), data, admin)
|
|
||||||
|
|
||||||
assert response.status_code == status.HTTP_400_BAD_REQUEST
|
|
||||||
assert 'Missing subscription credentials' in response.data['error']
|
|
||||||
|
|
||||||
def test_missing_password_returns_error(self, post, admin, settings):
|
|
||||||
"""Test POST with missing password returns 400 error"""
|
|
||||||
data = {'subscriptions_username': 'test_user'}
|
|
||||||
settings.SUBSCRIPTIONS_PASSWORD = None
|
|
||||||
|
|
||||||
response = post(reverse('api:api_v2_subscription_view'), data, admin)
|
|
||||||
|
|
||||||
assert response.status_code == status.HTTP_400_BAD_REQUEST
|
|
||||||
assert 'Missing subscription credentials' in response.data['error']
|
|
||||||
|
|
||||||
def test_missing_client_id_returns_error(self, post, admin):
|
|
||||||
"""Test POST with missing client_id returns 400 error"""
|
|
||||||
data = {'subscriptions_client_secret': 'test_secret'}
|
|
||||||
|
|
||||||
response = post(reverse('api:api_v2_subscription_view'), data, admin)
|
|
||||||
|
|
||||||
assert response.status_code == status.HTTP_400_BAD_REQUEST
|
|
||||||
assert 'Missing subscription credentials' in response.data['error']
|
|
||||||
|
|
||||||
def test_missing_client_secret_returns_error(self, post, admin, settings):
|
|
||||||
"""Test POST with missing client_secret returns 400 error"""
|
|
||||||
data = {'subscriptions_client_id': 'test_client_id'}
|
|
||||||
settings.SUBSCRIPTIONS_CLIENT_SECRET = None
|
|
||||||
|
|
||||||
response = post(reverse('api:api_v2_subscription_view'), data, admin)
|
|
||||||
|
|
||||||
assert response.status_code == status.HTTP_400_BAD_REQUEST
|
|
||||||
assert 'Missing subscription credentials' in response.data['error']
|
|
||||||
|
|
||||||
def test_empty_username_returns_error(self, post, admin):
|
|
||||||
"""Test POST with empty username returns 400 error"""
|
|
||||||
data = {'subscriptions_username': '', 'subscriptions_password': 'test_password'}
|
|
||||||
|
|
||||||
response = post(reverse('api:api_v2_subscription_view'), data, admin)
|
|
||||||
|
|
||||||
assert response.status_code == status.HTTP_400_BAD_REQUEST
|
|
||||||
assert 'Missing subscription credentials' in response.data['error']
|
|
||||||
|
|
||||||
def test_empty_password_returns_error(self, post, admin, settings):
|
|
||||||
"""Test POST with empty password returns 400 error"""
|
|
||||||
data = {'subscriptions_username': 'test_user', 'subscriptions_password': ''}
|
|
||||||
settings.SUBSCRIPTIONS_PASSWORD = None
|
|
||||||
|
|
||||||
response = post(reverse('api:api_v2_subscription_view'), data, admin)
|
|
||||||
|
|
||||||
assert response.status_code == status.HTTP_400_BAD_REQUEST
|
|
||||||
assert 'Missing subscription credentials' in response.data['error']
|
|
||||||
|
|
||||||
def test_non_superuser_permission_denied(self, post, rando):
|
|
||||||
"""Test that non-superuser cannot access the endpoint"""
|
|
||||||
data = {'subscriptions_username': 'test_user', 'subscriptions_password': 'test_password'}
|
|
||||||
|
|
||||||
response = post(reverse('api:api_v2_subscription_view'), data, rando)
|
|
||||||
|
|
||||||
assert response.status_code == status.HTTP_403_FORBIDDEN
|
|
||||||
|
|
||||||
def test_settings_updated_on_successful_basic_auth(self, post, admin, settings):
|
|
||||||
"""Test that settings are updated when basic auth validation succeeds"""
|
|
||||||
data = {'subscriptions_username': 'new_username', 'subscriptions_password': 'new_password'}
|
|
||||||
|
|
||||||
with patch('awx.api.views.root.get_licenser') as mock_get_licenser:
|
|
||||||
mock_licenser = MagicMock()
|
|
||||||
mock_licenser.validate_rh.return_value = []
|
|
||||||
mock_get_licenser.return_value = mock_licenser
|
|
||||||
|
|
||||||
response = post(reverse('api:api_v2_subscription_view'), data, admin)
|
|
||||||
|
|
||||||
assert response.status_code == status.HTTP_200_OK
|
|
||||||
assert settings.SUBSCRIPTIONS_USERNAME == 'new_username'
|
|
||||||
assert settings.SUBSCRIPTIONS_PASSWORD == 'new_password'
|
|
||||||
|
|
||||||
def test_settings_updated_on_successful_service_account(self, post, admin, settings):
|
|
||||||
"""Test that settings are updated when service account validation succeeds"""
|
|
||||||
data = {'subscriptions_client_id': 'new_client_id', 'subscriptions_client_secret': 'new_client_secret'}
|
|
||||||
|
|
||||||
with patch('awx.api.views.root.get_licenser') as mock_get_licenser:
|
|
||||||
mock_licenser = MagicMock()
|
|
||||||
mock_licenser.validate_rh.return_value = []
|
|
||||||
mock_get_licenser.return_value = mock_licenser
|
|
||||||
|
|
||||||
response = post(reverse('api:api_v2_subscription_view'), data, admin)
|
|
||||||
|
|
||||||
assert response.status_code == status.HTTP_200_OK
|
|
||||||
assert settings.SUBSCRIPTIONS_CLIENT_ID == 'new_client_id'
|
|
||||||
assert settings.SUBSCRIPTIONS_CLIENT_SECRET == 'new_client_secret'
|
|
||||||
|
|
||||||
def test_validate_rh_exception_handling(self, post, admin):
|
|
||||||
"""Test that exceptions from validate_rh are properly handled"""
|
|
||||||
data = {'subscriptions_username': 'test_user', 'subscriptions_password': 'test_password'}
|
|
||||||
|
|
||||||
with patch('awx.api.views.root.get_licenser') as mock_get_licenser:
|
|
||||||
mock_licenser = MagicMock()
|
|
||||||
mock_licenser.validate_rh.side_effect = Exception("Connection error")
|
|
||||||
mock_get_licenser.return_value = mock_licenser
|
|
||||||
|
|
||||||
response = post(reverse('api:api_v2_subscription_view'), data, admin)
|
|
||||||
|
|
||||||
assert response.status_code == status.HTTP_400_BAD_REQUEST
|
|
||||||
|
|
||||||
def test_mixed_credentials_prioritizes_client_id(self, post, admin):
|
|
||||||
"""Test that when both username and client_id are provided, client_id takes precedence"""
|
|
||||||
data = {
|
|
||||||
'subscriptions_username': 'test_user',
|
|
||||||
'subscriptions_password': 'test_password',
|
|
||||||
'subscriptions_client_id': 'test_client_id',
|
|
||||||
'subscriptions_client_secret': 'test_client_secret',
|
|
||||||
}
|
|
||||||
|
|
||||||
with patch('awx.api.views.root.get_licenser') as mock_get_licenser:
|
|
||||||
mock_licenser = MagicMock()
|
|
||||||
mock_licenser.validate_rh.return_value = []
|
|
||||||
mock_get_licenser.return_value = mock_licenser
|
|
||||||
|
|
||||||
response = post(reverse('api:api_v2_subscription_view'), data, admin)
|
|
||||||
|
|
||||||
assert response.status_code == status.HTTP_200_OK
|
|
||||||
# Should use service account (basic_auth=False) since client_id is present
|
|
||||||
mock_licenser.validate_rh.assert_called_once_with('test_client_id', 'test_client_secret', False)
|
|
||||||
|
|
||||||
def test_basic_auth_clears_service_account_settings(self, post, admin, settings):
|
|
||||||
"""Test that setting basic auth credentials clears service account settings"""
|
|
||||||
# Pre-populate service account settings
|
|
||||||
settings.SUBSCRIPTIONS_CLIENT_ID = 'existing_client_id'
|
|
||||||
settings.SUBSCRIPTIONS_CLIENT_SECRET = 'existing_client_secret'
|
|
||||||
|
|
||||||
data = {'subscriptions_username': 'test_user', 'subscriptions_password': 'test_password'}
|
|
||||||
|
|
||||||
with patch('awx.api.views.root.get_licenser') as mock_get_licenser:
|
|
||||||
mock_licenser = MagicMock()
|
|
||||||
mock_licenser.validate_rh.return_value = []
|
|
||||||
mock_get_licenser.return_value = mock_licenser
|
|
||||||
|
|
||||||
response = post(reverse('api:api_v2_subscription_view'), data, admin)
|
|
||||||
|
|
||||||
assert response.status_code == status.HTTP_200_OK
|
|
||||||
# Basic auth settings should be set
|
|
||||||
assert settings.SUBSCRIPTIONS_USERNAME == 'test_user'
|
|
||||||
assert settings.SUBSCRIPTIONS_PASSWORD == 'test_password'
|
|
||||||
# Service account settings should be cleared
|
|
||||||
assert settings.SUBSCRIPTIONS_CLIENT_ID == ""
|
|
||||||
assert settings.SUBSCRIPTIONS_CLIENT_SECRET == ""
|
|
||||||
|
|
||||||
def test_service_account_clears_basic_auth_settings(self, post, admin, settings):
|
|
||||||
"""Test that setting service account credentials clears basic auth settings"""
|
|
||||||
# Pre-populate basic auth settings
|
|
||||||
settings.SUBSCRIPTIONS_USERNAME = 'existing_username'
|
|
||||||
settings.SUBSCRIPTIONS_PASSWORD = 'existing_password'
|
|
||||||
|
|
||||||
data = {'subscriptions_client_id': 'test_client_id', 'subscriptions_client_secret': 'test_client_secret'}
|
|
||||||
|
|
||||||
with patch('awx.api.views.root.get_licenser') as mock_get_licenser:
|
|
||||||
mock_licenser = MagicMock()
|
|
||||||
mock_licenser.validate_rh.return_value = []
|
|
||||||
mock_get_licenser.return_value = mock_licenser
|
|
||||||
|
|
||||||
response = post(reverse('api:api_v2_subscription_view'), data, admin)
|
|
||||||
|
|
||||||
assert response.status_code == status.HTTP_200_OK
|
|
||||||
# Service account settings should be set
|
|
||||||
assert settings.SUBSCRIPTIONS_CLIENT_ID == 'test_client_id'
|
|
||||||
assert settings.SUBSCRIPTIONS_CLIENT_SECRET == 'test_client_secret'
|
|
||||||
# Basic auth settings should be cleared
|
|
||||||
assert settings.SUBSCRIPTIONS_USERNAME == ""
|
|
||||||
assert settings.SUBSCRIPTIONS_PASSWORD == ""
|
|
||||||
@@ -5,6 +5,10 @@ import pytest
|
|||||||
|
|
||||||
from django.contrib.sessions.middleware import SessionMiddleware
|
from django.contrib.sessions.middleware import SessionMiddleware
|
||||||
from django.test.utils import override_settings
|
from django.test.utils import override_settings
|
||||||
|
from django.contrib.auth.models import AnonymousUser
|
||||||
|
|
||||||
|
from ansible_base.lib.utils.response import get_relative_url
|
||||||
|
from ansible_base.lib.testing.fixtures import settings_override_mutable # NOQA: F401 imported to be a pytest fixture
|
||||||
|
|
||||||
from awx.main.models import User
|
from awx.main.models import User
|
||||||
from awx.api.versioning import reverse
|
from awx.api.versioning import reverse
|
||||||
@@ -17,6 +21,33 @@ from awx.api.versioning import reverse
|
|||||||
EXAMPLE_USER_DATA = {"username": "affable", "first_name": "a", "last_name": "a", "email": "a@a.com", "is_superuser": False, "password": "r$TyKiOCb#ED"}
|
EXAMPLE_USER_DATA = {"username": "affable", "first_name": "a", "last_name": "a", "email": "a@a.com", "is_superuser": False, "password": "r$TyKiOCb#ED"}
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.django_db
|
||||||
|
def test_validate_local_user(post, admin_user, settings, settings_override_mutable): # NOQA: F811 this is how you use a pytest fixture
|
||||||
|
"Copy of the test by same name in django-ansible-base for integration and compatibility testing"
|
||||||
|
url = get_relative_url('validate-local-account')
|
||||||
|
admin_user.set_password('password')
|
||||||
|
admin_user.save()
|
||||||
|
data = {
|
||||||
|
"username": admin_user.username,
|
||||||
|
"password": "password",
|
||||||
|
}
|
||||||
|
with override_settings(RESOURCE_SERVER={"URL": "https://foo.invalid", "SECRET_KEY": "foobar"}):
|
||||||
|
response = post(url=url, data=data, user=AnonymousUser(), expect=200)
|
||||||
|
|
||||||
|
assert 'ansible_id' in response.data
|
||||||
|
assert response.data['auth_code'] is not None, response.data
|
||||||
|
|
||||||
|
# No resource server, return coherent response but can not provide auth code
|
||||||
|
response = post(url=url, data=data, user=AnonymousUser(), expect=200)
|
||||||
|
assert 'ansible_id' in response.data
|
||||||
|
assert response.data['auth_code'] is None
|
||||||
|
|
||||||
|
# wrong password
|
||||||
|
data['password'] = 'foobar'
|
||||||
|
response = post(url=url, data=data, user=AnonymousUser(), expect=401)
|
||||||
|
# response.data may be none here, this is just testing that we get no server error
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.django_db
|
@pytest.mark.django_db
|
||||||
def test_user_create(post, admin):
|
def test_user_create(post, admin):
|
||||||
response = post(reverse('api:user_list'), EXAMPLE_USER_DATA, admin, middleware=SessionMiddleware(mock.Mock()))
|
response = post(reverse('api:user_list'), EXAMPLE_USER_DATA, admin, middleware=SessionMiddleware(mock.Mock()))
|
||||||
@@ -258,19 +289,3 @@ def test_user_verify_attribute_created(admin, get):
|
|||||||
for op, count in (('gt', 1), ('lt', 0)):
|
for op, count in (('gt', 1), ('lt', 0)):
|
||||||
resp = get(reverse('api:user_list') + f'?created__{op}={past}', admin)
|
resp = get(reverse('api:user_list') + f'?created__{op}={past}', admin)
|
||||||
assert resp.data['count'] == count
|
assert resp.data['count'] == count
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.django_db
|
|
||||||
def test_org_not_shown_in_admin_user_sublists(admin_user, get, organization):
|
|
||||||
for view_name in ('user_admin_of_organizations_list', 'user_organizations_list'):
|
|
||||||
url = reverse(f'api:{view_name}', kwargs={'pk': admin_user.pk})
|
|
||||||
r = get(url, user=admin_user, expect=200)
|
|
||||||
assert organization.pk not in [org['id'] for org in r.data['results']]
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.django_db
|
|
||||||
def test_admin_user_not_shown_in_org_users(admin_user, get, organization):
|
|
||||||
for view_name in ('organization_users_list', 'organization_admins_list'):
|
|
||||||
url = reverse(f'api:{view_name}', kwargs={'pk': organization.pk})
|
|
||||||
r = get(url, user=admin_user, expect=200)
|
|
||||||
assert admin_user.pk not in [u['id'] for u in r.data['results']]
|
|
||||||
|
|||||||
@@ -34,18 +34,40 @@ def test_wrapup_does_send_notifications(mocker):
|
|||||||
mock.assert_called_once_with('succeeded')
|
mock.assert_called_once_with('succeeded')
|
||||||
|
|
||||||
|
|
||||||
|
class FakeRedis:
|
||||||
|
def keys(self, *args, **kwargs):
|
||||||
|
return []
|
||||||
|
|
||||||
|
def set(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def get(self):
|
||||||
|
return None
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_url(cls, *args, **kwargs):
|
||||||
|
return cls()
|
||||||
|
|
||||||
|
def pipeline(self):
|
||||||
|
return self
|
||||||
|
|
||||||
|
|
||||||
class TestCallbackBrokerWorker(TransactionTestCase):
|
class TestCallbackBrokerWorker(TransactionTestCase):
|
||||||
@pytest.fixture(autouse=True)
|
@pytest.fixture(autouse=True)
|
||||||
def turn_off_websockets_and_redis(self, fake_redis):
|
def turn_off_websockets(self):
|
||||||
with mock.patch('awx.main.dispatch.worker.callback.emit_event_detail', lambda *a, **kw: None):
|
with mock.patch('awx.main.dispatch.worker.callback.emit_event_detail', lambda *a, **kw: None):
|
||||||
yield
|
yield
|
||||||
|
|
||||||
|
def get_worker(self):
|
||||||
|
with mock.patch('redis.Redis', new=FakeRedis): # turn off redis stuff
|
||||||
|
return CallbackBrokerWorker()
|
||||||
|
|
||||||
def event_create_kwargs(self):
|
def event_create_kwargs(self):
|
||||||
inventory_update = InventoryUpdate.objects.create(source='file', inventory_source=InventorySource.objects.create(source='file'))
|
inventory_update = InventoryUpdate.objects.create(source='file', inventory_source=InventorySource.objects.create(source='file'))
|
||||||
return dict(inventory_update=inventory_update, created=inventory_update.created)
|
return dict(inventory_update=inventory_update, created=inventory_update.created)
|
||||||
|
|
||||||
def test_flush_with_valid_event(self):
|
def test_flush_with_valid_event(self):
|
||||||
worker = CallbackBrokerWorker()
|
worker = self.get_worker()
|
||||||
events = [InventoryUpdateEvent(uuid=str(uuid4()), **self.event_create_kwargs())]
|
events = [InventoryUpdateEvent(uuid=str(uuid4()), **self.event_create_kwargs())]
|
||||||
worker.buff = {InventoryUpdateEvent: events}
|
worker.buff = {InventoryUpdateEvent: events}
|
||||||
worker.flush()
|
worker.flush()
|
||||||
@@ -53,7 +75,7 @@ class TestCallbackBrokerWorker(TransactionTestCase):
|
|||||||
assert InventoryUpdateEvent.objects.filter(uuid=events[0].uuid).count() == 1
|
assert InventoryUpdateEvent.objects.filter(uuid=events[0].uuid).count() == 1
|
||||||
|
|
||||||
def test_flush_with_invalid_event(self):
|
def test_flush_with_invalid_event(self):
|
||||||
worker = CallbackBrokerWorker()
|
worker = self.get_worker()
|
||||||
kwargs = self.event_create_kwargs()
|
kwargs = self.event_create_kwargs()
|
||||||
events = [
|
events = [
|
||||||
InventoryUpdateEvent(uuid=str(uuid4()), stdout='good1', **kwargs),
|
InventoryUpdateEvent(uuid=str(uuid4()), stdout='good1', **kwargs),
|
||||||
@@ -68,7 +90,7 @@ class TestCallbackBrokerWorker(TransactionTestCase):
|
|||||||
assert worker.buff == {InventoryUpdateEvent: [events[1]]}
|
assert worker.buff == {InventoryUpdateEvent: [events[1]]}
|
||||||
|
|
||||||
def test_duplicate_key_not_saved_twice(self):
|
def test_duplicate_key_not_saved_twice(self):
|
||||||
worker = CallbackBrokerWorker()
|
worker = self.get_worker()
|
||||||
events = [InventoryUpdateEvent(uuid=str(uuid4()), **self.event_create_kwargs())]
|
events = [InventoryUpdateEvent(uuid=str(uuid4()), **self.event_create_kwargs())]
|
||||||
worker.buff = {InventoryUpdateEvent: events.copy()}
|
worker.buff = {InventoryUpdateEvent: events.copy()}
|
||||||
worker.flush()
|
worker.flush()
|
||||||
@@ -82,7 +104,7 @@ class TestCallbackBrokerWorker(TransactionTestCase):
|
|||||||
assert worker.buff.get(InventoryUpdateEvent, []) == []
|
assert worker.buff.get(InventoryUpdateEvent, []) == []
|
||||||
|
|
||||||
def test_give_up_on_bad_event(self):
|
def test_give_up_on_bad_event(self):
|
||||||
worker = CallbackBrokerWorker()
|
worker = self.get_worker()
|
||||||
events = [InventoryUpdateEvent(uuid=str(uuid4()), counter=-2, **self.event_create_kwargs())]
|
events = [InventoryUpdateEvent(uuid=str(uuid4()), counter=-2, **self.event_create_kwargs())]
|
||||||
worker.buff = {InventoryUpdateEvent: events.copy()}
|
worker.buff = {InventoryUpdateEvent: events.copy()}
|
||||||
|
|
||||||
@@ -95,7 +117,7 @@ class TestCallbackBrokerWorker(TransactionTestCase):
|
|||||||
assert InventoryUpdateEvent.objects.filter(uuid=events[0].uuid).count() == 0 # sanity
|
assert InventoryUpdateEvent.objects.filter(uuid=events[0].uuid).count() == 0 # sanity
|
||||||
|
|
||||||
def test_flush_with_empty_buffer(self):
|
def test_flush_with_empty_buffer(self):
|
||||||
worker = CallbackBrokerWorker()
|
worker = self.get_worker()
|
||||||
worker.buff = {InventoryUpdateEvent: []}
|
worker.buff = {InventoryUpdateEvent: []}
|
||||||
with mock.patch.object(InventoryUpdateEvent.objects, 'bulk_create') as flush_mock:
|
with mock.patch.object(InventoryUpdateEvent.objects, 'bulk_create') as flush_mock:
|
||||||
worker.flush()
|
worker.flush()
|
||||||
@@ -105,7 +127,7 @@ class TestCallbackBrokerWorker(TransactionTestCase):
|
|||||||
# In postgres, text fields reject NUL character, 0x00
|
# In postgres, text fields reject NUL character, 0x00
|
||||||
# tests use sqlite3 which will not raise an error
|
# tests use sqlite3 which will not raise an error
|
||||||
# but we can still test that it is sanitized before saving
|
# but we can still test that it is sanitized before saving
|
||||||
worker = CallbackBrokerWorker()
|
worker = self.get_worker()
|
||||||
kwargs = self.event_create_kwargs()
|
kwargs = self.event_create_kwargs()
|
||||||
events = [InventoryUpdateEvent(uuid=str(uuid4()), stdout="\x00", **kwargs)]
|
events = [InventoryUpdateEvent(uuid=str(uuid4()), stdout="\x00", **kwargs)]
|
||||||
assert "\x00" in events[0].stdout # sanity
|
assert "\x00" in events[0].stdout # sanity
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
import logging
|
|
||||||
|
|
||||||
# Python
|
# Python
|
||||||
import pytest
|
import pytest
|
||||||
from unittest import mock
|
from unittest import mock
|
||||||
@@ -10,7 +8,7 @@ import importlib
|
|||||||
# Django
|
# Django
|
||||||
from django.urls import resolve
|
from django.urls import resolve
|
||||||
from django.http import Http404
|
from django.http import Http404
|
||||||
from django.apps import apps as global_apps
|
from django.apps import apps
|
||||||
from django.core.handlers.exception import response_for_exception
|
from django.core.handlers.exception import response_for_exception
|
||||||
from django.contrib.auth.models import User
|
from django.contrib.auth.models import User
|
||||||
from django.core.serializers.json import DjangoJSONEncoder
|
from django.core.serializers.json import DjangoJSONEncoder
|
||||||
@@ -49,8 +47,6 @@ from awx.main.models.ad_hoc_commands import AdHocCommand
|
|||||||
from awx.main.models.execution_environments import ExecutionEnvironment
|
from awx.main.models.execution_environments import ExecutionEnvironment
|
||||||
from awx.main.utils import is_testing
|
from awx.main.utils import is_testing
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
__SWAGGER_REQUESTS__ = {}
|
__SWAGGER_REQUESTS__ = {}
|
||||||
|
|
||||||
|
|
||||||
@@ -58,17 +54,8 @@ __SWAGGER_REQUESTS__ = {}
|
|||||||
dab_rr_initial = importlib.import_module('ansible_base.resource_registry.migrations.0001_initial')
|
dab_rr_initial = importlib.import_module('ansible_base.resource_registry.migrations.0001_initial')
|
||||||
|
|
||||||
|
|
||||||
def create_service_id(app_config, apps=global_apps, **kwargs):
|
|
||||||
try:
|
|
||||||
apps.get_model("dab_resource_registry", "ServiceID")
|
|
||||||
except LookupError:
|
|
||||||
logger.info('Looks like reverse migration, not creating resource registry ServiceID')
|
|
||||||
return
|
|
||||||
dab_rr_initial.create_service_id(apps, None)
|
|
||||||
|
|
||||||
|
|
||||||
if is_testing():
|
if is_testing():
|
||||||
post_migrate.connect(create_service_id)
|
post_migrate.connect(lambda **kwargs: dab_rr_initial.create_service_id(apps, None))
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="session")
|
@pytest.fixture(scope="session")
|
||||||
@@ -76,33 +63,6 @@ def swagger_autogen(requests=__SWAGGER_REQUESTS__):
|
|||||||
return requests
|
return requests
|
||||||
|
|
||||||
|
|
||||||
class FakeRedis:
|
|
||||||
def keys(self, *args, **kwargs):
|
|
||||||
return []
|
|
||||||
|
|
||||||
def set(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def get(self):
|
|
||||||
return None
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def from_url(cls, *args, **kwargs):
|
|
||||||
return cls()
|
|
||||||
|
|
||||||
def pipeline(self):
|
|
||||||
return self
|
|
||||||
|
|
||||||
def ping(self):
|
|
||||||
return
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
def fake_redis():
|
|
||||||
with mock.patch('redis.Redis', new=FakeRedis): # turn off redis stuff
|
|
||||||
yield
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def user():
|
def user():
|
||||||
def u(name, is_superuser=False):
|
def u(name, is_superuser=False):
|
||||||
@@ -139,7 +99,7 @@ def execution_environment():
|
|||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def setup_managed_roles():
|
def setup_managed_roles():
|
||||||
"Run the migration script to pre-create managed role definitions"
|
"Run the migration script to pre-create managed role definitions"
|
||||||
setup_managed_role_definitions(global_apps, None)
|
setup_managed_role_definitions(apps, None)
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
|
|||||||
@@ -1,147 +0,0 @@
|
|||||||
import pytest
|
|
||||||
|
|
||||||
from django.contrib.contenttypes.models import ContentType
|
|
||||||
from django.test import override_settings
|
|
||||||
from django.apps import apps
|
|
||||||
|
|
||||||
from ansible_base.rbac.models import RoleDefinition, RoleUserAssignment, RoleTeamAssignment
|
|
||||||
from ansible_base.rbac.migrations._utils import give_permissions
|
|
||||||
|
|
||||||
from awx.main.models import User, Team
|
|
||||||
from awx.main.migrations._dab_rbac import consolidate_indirect_user_roles
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.django_db
|
|
||||||
@override_settings(ANSIBLE_BASE_ALLOW_TEAM_PARENTS=True)
|
|
||||||
def test_consolidate_indirect_user_roles_with_nested_teams(setup_managed_roles, organization):
|
|
||||||
"""
|
|
||||||
Test the consolidate_indirect_user_roles function with a nested team hierarchy.
|
|
||||||
Setup:
|
|
||||||
- Users: A, B, C, D
|
|
||||||
- Teams: E, F, G
|
|
||||||
- Direct assignments: A→(E,F,G), B→E, C→F, D→G
|
|
||||||
- Team hierarchy: F→E (F is member of E), G→F (G is member of F)
|
|
||||||
Expected result after consolidation:
|
|
||||||
- Team E should have users: A, B, C, D (A directly, B directly, C through F, D through G→F)
|
|
||||||
- Team F should have users: A, C, D (A directly, C directly, D through G)
|
|
||||||
- Team G should have users: A, D (A directly, D directly)
|
|
||||||
"""
|
|
||||||
user_a = User.objects.create_user(username='user_a')
|
|
||||||
user_b = User.objects.create_user(username='user_b')
|
|
||||||
user_c = User.objects.create_user(username='user_c')
|
|
||||||
user_d = User.objects.create_user(username='user_d')
|
|
||||||
|
|
||||||
team_e = Team.objects.create(name='Team E', organization=organization)
|
|
||||||
team_f = Team.objects.create(name='Team F', organization=organization)
|
|
||||||
team_g = Team.objects.create(name='Team G', organization=organization)
|
|
||||||
|
|
||||||
# Get role definition and content type for give_permissions
|
|
||||||
team_member_role = RoleDefinition.objects.get(name='Team Member')
|
|
||||||
team_content_type = ContentType.objects.get_for_model(Team)
|
|
||||||
|
|
||||||
# Assign users to teams
|
|
||||||
give_permissions(apps=apps, rd=team_member_role, users=[user_a], object_id=team_e.id, content_type_id=team_content_type.id)
|
|
||||||
give_permissions(apps=apps, rd=team_member_role, users=[user_a], object_id=team_f.id, content_type_id=team_content_type.id)
|
|
||||||
give_permissions(apps=apps, rd=team_member_role, users=[user_a], object_id=team_g.id, content_type_id=team_content_type.id)
|
|
||||||
give_permissions(apps=apps, rd=team_member_role, users=[user_b], object_id=team_e.id, content_type_id=team_content_type.id)
|
|
||||||
give_permissions(apps=apps, rd=team_member_role, users=[user_c], object_id=team_f.id, content_type_id=team_content_type.id)
|
|
||||||
give_permissions(apps=apps, rd=team_member_role, users=[user_d], object_id=team_g.id, content_type_id=team_content_type.id)
|
|
||||||
|
|
||||||
# Mirror user assignments in the old RBAC system because signals don't run in tests
|
|
||||||
team_e.member_role.members.add(user_a.id, user_b.id)
|
|
||||||
team_f.member_role.members.add(user_a.id, user_c.id)
|
|
||||||
team_g.member_role.members.add(user_a.id, user_d.id)
|
|
||||||
|
|
||||||
# Setup team-to-team relationships
|
|
||||||
give_permissions(apps=apps, rd=team_member_role, teams=[team_f], object_id=team_e.id, content_type_id=team_content_type.id)
|
|
||||||
give_permissions(apps=apps, rd=team_member_role, teams=[team_g], object_id=team_f.id, content_type_id=team_content_type.id)
|
|
||||||
|
|
||||||
# Verify initial direct assignments
|
|
||||||
team_e_users_before = set(RoleUserAssignment.objects.filter(role_definition=team_member_role, object_id=team_e.id).values_list('user_id', flat=True))
|
|
||||||
assert team_e_users_before == {user_a.id, user_b.id}
|
|
||||||
team_f_users_before = set(RoleUserAssignment.objects.filter(role_definition=team_member_role, object_id=team_f.id).values_list('user_id', flat=True))
|
|
||||||
assert team_f_users_before == {user_a.id, user_c.id}
|
|
||||||
team_g_users_before = set(RoleUserAssignment.objects.filter(role_definition=team_member_role, object_id=team_g.id).values_list('user_id', flat=True))
|
|
||||||
assert team_g_users_before == {user_a.id, user_d.id}
|
|
||||||
|
|
||||||
# Verify team-to-team relationships exist
|
|
||||||
assert RoleTeamAssignment.objects.filter(role_definition=team_member_role, team=team_f, object_id=team_e.id).exists()
|
|
||||||
assert RoleTeamAssignment.objects.filter(role_definition=team_member_role, team=team_g, object_id=team_f.id).exists()
|
|
||||||
|
|
||||||
# Run the consolidation function
|
|
||||||
consolidate_indirect_user_roles(apps, None)
|
|
||||||
|
|
||||||
# Verify consolidation
|
|
||||||
team_e_users_after = set(RoleUserAssignment.objects.filter(role_definition=team_member_role, object_id=team_e.id).values_list('user_id', flat=True))
|
|
||||||
assert team_e_users_after == {user_a.id, user_b.id, user_c.id, user_d.id}, f"Team E should have users A, B, C, D but has {team_e_users_after}"
|
|
||||||
team_f_users_after = set(RoleUserAssignment.objects.filter(role_definition=team_member_role, object_id=team_f.id).values_list('user_id', flat=True))
|
|
||||||
assert team_f_users_after == {user_a.id, user_c.id, user_d.id}, f"Team F should have users A, C, D but has {team_f_users_after}"
|
|
||||||
team_g_users_after = set(RoleUserAssignment.objects.filter(role_definition=team_member_role, object_id=team_g.id).values_list('user_id', flat=True))
|
|
||||||
assert team_g_users_after == {user_a.id, user_d.id}, f"Team G should have users A, D but has {team_g_users_after}"
|
|
||||||
|
|
||||||
# Verify team member changes are mirrored to the old RBAC system
|
|
||||||
assert team_e_users_after == set(team_e.member_role.members.all().values_list('id', flat=True))
|
|
||||||
assert team_f_users_after == set(team_f.member_role.members.all().values_list('id', flat=True))
|
|
||||||
assert team_g_users_after == set(team_g.member_role.members.all().values_list('id', flat=True))
|
|
||||||
|
|
||||||
# Verify team-to-team relationships are removed after consolidation
|
|
||||||
assert not RoleTeamAssignment.objects.filter(
|
|
||||||
role_definition=team_member_role, team=team_f, object_id=team_e.id
|
|
||||||
).exists(), "Team-to-team relationship F→E should be removed"
|
|
||||||
assert not RoleTeamAssignment.objects.filter(
|
|
||||||
role_definition=team_member_role, team=team_g, object_id=team_f.id
|
|
||||||
).exists(), "Team-to-team relationship G→F should be removed"
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.django_db
|
|
||||||
@override_settings(ANSIBLE_BASE_ALLOW_TEAM_PARENTS=True)
|
|
||||||
def test_consolidate_indirect_user_roles_no_team_relationships(setup_managed_roles, organization):
|
|
||||||
"""
|
|
||||||
Test that the function handles the case where there are no team-to-team relationships.
|
|
||||||
It should return early without making any changes.
|
|
||||||
"""
|
|
||||||
# Create a user and team with direct assignment
|
|
||||||
user = User.objects.create_user(username='test_user')
|
|
||||||
team = Team.objects.create(name='Test Team', organization=organization)
|
|
||||||
|
|
||||||
team_member_role = RoleDefinition.objects.get(name='Team Member')
|
|
||||||
team_content_type = ContentType.objects.get_for_model(Team)
|
|
||||||
give_permissions(apps=apps, rd=team_member_role, users=[user], object_id=team.id, content_type_id=team_content_type.id)
|
|
||||||
|
|
||||||
# Compare count of assignments before and after consolidation
|
|
||||||
assignments_before = RoleUserAssignment.objects.filter(role_definition=team_member_role).count()
|
|
||||||
consolidate_indirect_user_roles(apps, None)
|
|
||||||
assignments_after = RoleUserAssignment.objects.filter(role_definition=team_member_role).count()
|
|
||||||
|
|
||||||
assert assignments_before == assignments_after, "Number of assignments should not change when there are no team-to-team relationships"
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.django_db
|
|
||||||
@override_settings(ANSIBLE_BASE_ALLOW_TEAM_PARENTS=True)
|
|
||||||
def test_consolidate_indirect_user_roles_circular_reference(setup_managed_roles, organization):
|
|
||||||
"""
|
|
||||||
Test that the function handles circular team references without infinite recursion.
|
|
||||||
"""
|
|
||||||
team_a = Team.objects.create(name='Team A', organization=organization)
|
|
||||||
team_b = Team.objects.create(name='Team B', organization=organization)
|
|
||||||
|
|
||||||
# Create a user assigned to team A
|
|
||||||
user = User.objects.create_user(username='test_user')
|
|
||||||
|
|
||||||
team_member_role = RoleDefinition.objects.get(name='Team Member')
|
|
||||||
team_content_type = ContentType.objects.get_for_model(Team)
|
|
||||||
give_permissions(apps=apps, rd=team_member_role, users=[user], object_id=team_a.id, content_type_id=team_content_type.id)
|
|
||||||
|
|
||||||
# Create circular team relationships: A → B → A
|
|
||||||
give_permissions(apps=apps, rd=team_member_role, teams=[team_b], object_id=team_a.id, content_type_id=team_content_type.id)
|
|
||||||
give_permissions(apps=apps, rd=team_member_role, teams=[team_a], object_id=team_b.id, content_type_id=team_content_type.id)
|
|
||||||
|
|
||||||
# Run the consolidation function - should not raise an exception
|
|
||||||
consolidate_indirect_user_roles(apps, None)
|
|
||||||
|
|
||||||
# Both teams should have the user assigned
|
|
||||||
team_a_users = set(RoleUserAssignment.objects.filter(role_definition=team_member_role, object_id=team_a.id).values_list('user_id', flat=True))
|
|
||||||
team_b_users = set(RoleUserAssignment.objects.filter(role_definition=team_member_role, object_id=team_b.id).values_list('user_id', flat=True))
|
|
||||||
|
|
||||||
assert user.id in team_a_users, "User should be assigned to team A"
|
|
||||||
assert user.id in team_b_users, "User should be assigned to team B"
|
|
||||||
@@ -1,5 +1,6 @@
|
|||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
|
from django.contrib.contenttypes.models import ContentType
|
||||||
from django.urls import reverse as django_reverse
|
from django.urls import reverse as django_reverse
|
||||||
|
|
||||||
from awx.api.versioning import reverse
|
from awx.api.versioning import reverse
|
||||||
@@ -7,14 +8,13 @@ from awx.main.models import JobTemplate, Inventory, Organization
|
|||||||
from awx.main.access import JobTemplateAccess, WorkflowJobTemplateAccess
|
from awx.main.access import JobTemplateAccess, WorkflowJobTemplateAccess
|
||||||
|
|
||||||
from ansible_base.rbac.models import RoleDefinition
|
from ansible_base.rbac.models import RoleDefinition
|
||||||
from ansible_base.rbac import permission_registry
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.django_db
|
@pytest.mark.django_db
|
||||||
def test_managed_roles_created(setup_managed_roles):
|
def test_managed_roles_created(setup_managed_roles):
|
||||||
"Managed RoleDefinitions are created in post_migration signal, we expect to see them here"
|
"Managed RoleDefinitions are created in post_migration signal, we expect to see them here"
|
||||||
for cls in (JobTemplate, Inventory):
|
for cls in (JobTemplate, Inventory):
|
||||||
ct = permission_registry.content_type_model.objects.get_for_model(cls)
|
ct = ContentType.objects.get_for_model(cls)
|
||||||
rds = list(RoleDefinition.objects.filter(content_type=ct))
|
rds = list(RoleDefinition.objects.filter(content_type=ct))
|
||||||
assert len(rds) > 1
|
assert len(rds) > 1
|
||||||
assert f'{cls.__name__} Admin' in [rd.name for rd in rds]
|
assert f'{cls.__name__} Admin' in [rd.name for rd in rds]
|
||||||
@@ -26,20 +26,17 @@ def test_managed_roles_created(setup_managed_roles):
|
|||||||
def test_custom_read_role(admin_user, post, setup_managed_roles):
|
def test_custom_read_role(admin_user, post, setup_managed_roles):
|
||||||
rd_url = django_reverse('roledefinition-list')
|
rd_url = django_reverse('roledefinition-list')
|
||||||
resp = post(
|
resp = post(
|
||||||
url=rd_url,
|
url=rd_url, data={"name": "read role made for test", "content_type": "awx.inventory", "permissions": ['view_inventory']}, user=admin_user, expect=201
|
||||||
data={"name": "read role made for test", "content_type": "awx.inventory", "permissions": ['awx.view_inventory']},
|
|
||||||
user=admin_user,
|
|
||||||
expect=201,
|
|
||||||
)
|
)
|
||||||
rd_id = resp.data['id']
|
rd_id = resp.data['id']
|
||||||
rd = RoleDefinition.objects.get(id=rd_id)
|
rd = RoleDefinition.objects.get(id=rd_id)
|
||||||
assert rd.content_type == permission_registry.content_type_model.objects.get_for_model(Inventory)
|
assert rd.content_type == ContentType.objects.get_for_model(Inventory)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.django_db
|
@pytest.mark.django_db
|
||||||
def test_custom_system_roles_prohibited(admin_user, post):
|
def test_custom_system_roles_prohibited(admin_user, post):
|
||||||
rd_url = django_reverse('roledefinition-list')
|
rd_url = django_reverse('roledefinition-list')
|
||||||
resp = post(url=rd_url, data={"name": "read role made for test", "content_type": None, "permissions": ['awx.view_inventory']}, user=admin_user, expect=400)
|
resp = post(url=rd_url, data={"name": "read role made for test", "content_type": None, "permissions": ['view_inventory']}, user=admin_user, expect=400)
|
||||||
assert 'System-wide roles are not enabled' in str(resp.data)
|
assert 'System-wide roles are not enabled' in str(resp.data)
|
||||||
|
|
||||||
|
|
||||||
@@ -74,7 +71,7 @@ def test_assign_custom_delete_role(admin_user, rando, inventory, delete, patch):
|
|||||||
rd, _ = RoleDefinition.objects.get_or_create(
|
rd, _ = RoleDefinition.objects.get_or_create(
|
||||||
name='inventory-delete',
|
name='inventory-delete',
|
||||||
permissions=['delete_inventory', 'view_inventory', 'change_inventory'],
|
permissions=['delete_inventory', 'view_inventory', 'change_inventory'],
|
||||||
content_type=permission_registry.content_type_model.objects.get_for_model(Inventory),
|
content_type=ContentType.objects.get_for_model(Inventory),
|
||||||
)
|
)
|
||||||
rd.give_permission(rando, inventory)
|
rd.give_permission(rando, inventory)
|
||||||
inv_id = inventory.pk
|
inv_id = inventory.pk
|
||||||
@@ -88,9 +85,7 @@ def test_assign_custom_delete_role(admin_user, rando, inventory, delete, patch):
|
|||||||
@pytest.mark.django_db
|
@pytest.mark.django_db
|
||||||
def test_assign_custom_add_role(admin_user, rando, organization, post, setup_managed_roles):
|
def test_assign_custom_add_role(admin_user, rando, organization, post, setup_managed_roles):
|
||||||
rd, _ = RoleDefinition.objects.get_or_create(
|
rd, _ = RoleDefinition.objects.get_or_create(
|
||||||
name='inventory-add',
|
name='inventory-add', permissions=['add_inventory', 'view_organization'], content_type=ContentType.objects.get_for_model(Organization)
|
||||||
permissions=['add_inventory', 'view_organization'],
|
|
||||||
content_type=permission_registry.content_type_model.objects.get_for_model(Organization),
|
|
||||||
)
|
)
|
||||||
rd.give_permission(rando, organization)
|
rd.give_permission(rando, organization)
|
||||||
url = reverse('api:inventory_list')
|
url = reverse('api:inventory_list')
|
||||||
@@ -151,6 +146,14 @@ def test_assign_credential_to_user_of_another_org(setup_managed_roles, credentia
|
|||||||
post(url=url, data={"user": org_admin.id, "role_definition": rd.id, "object_id": credential.id}, user=admin_user, expect=201)
|
post(url=url, data={"user": org_admin.id, "role_definition": rd.id, "object_id": credential.id}, user=admin_user, expect=201)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.django_db
|
||||||
|
def test_team_member_role_not_assignable(team, rando, post, admin_user, setup_managed_roles):
|
||||||
|
member_rd = RoleDefinition.objects.get(name='Organization Member')
|
||||||
|
url = django_reverse('roleuserassignment-list')
|
||||||
|
r = post(url, data={'object_id': team.id, 'role_definition': member_rd.id, 'user': rando.id}, user=admin_user, expect=400)
|
||||||
|
assert 'Not managed locally' in str(r.data)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.django_db
|
@pytest.mark.django_db
|
||||||
def test_adding_user_to_org_member_role(setup_managed_roles, organization, admin, bob, post, get):
|
def test_adding_user_to_org_member_role(setup_managed_roles, organization, admin, bob, post, get):
|
||||||
'''
|
'''
|
||||||
@@ -170,17 +173,10 @@ def test_adding_user_to_org_member_role(setup_managed_roles, organization, admin
|
|||||||
@pytest.mark.django_db
|
@pytest.mark.django_db
|
||||||
@pytest.mark.parametrize('actor', ['user', 'team'])
|
@pytest.mark.parametrize('actor', ['user', 'team'])
|
||||||
@pytest.mark.parametrize('role_name', ['Organization Admin', 'Organization Member', 'Team Admin', 'Team Member'])
|
@pytest.mark.parametrize('role_name', ['Organization Admin', 'Organization Member', 'Team Admin', 'Team Member'])
|
||||||
def test_adding_actor_to_platform_roles(setup_managed_roles, role_name, actor, organization, team, admin, bob, post):
|
def test_prevent_adding_actor_to_platform_roles(setup_managed_roles, role_name, actor, organization, team, admin, bob, post):
|
||||||
'''
|
'''
|
||||||
Allow user to be added to platform-level roles
|
Prevent user or team from being added to platform-level roles
|
||||||
Exceptions:
|
|
||||||
- Team cannot be added to Organization Member or Admin role
|
|
||||||
- Team cannot be added to Team Admin or Team Member role
|
|
||||||
'''
|
'''
|
||||||
if actor == 'team':
|
|
||||||
expect = 400
|
|
||||||
else:
|
|
||||||
expect = 201
|
|
||||||
rd = RoleDefinition.objects.get(name=role_name)
|
rd = RoleDefinition.objects.get(name=role_name)
|
||||||
endpoint = 'roleuserassignment-list' if actor == 'user' else 'roleteamassignment-list'
|
endpoint = 'roleuserassignment-list' if actor == 'user' else 'roleteamassignment-list'
|
||||||
url = django_reverse(endpoint)
|
url = django_reverse(endpoint)
|
||||||
@@ -188,9 +184,37 @@ def test_adding_actor_to_platform_roles(setup_managed_roles, role_name, actor, o
|
|||||||
data = {'object_id': object_id, 'role_definition': rd.id}
|
data = {'object_id': object_id, 'role_definition': rd.id}
|
||||||
actor_id = bob.id if actor == 'user' else team.id
|
actor_id = bob.id if actor == 'user' else team.id
|
||||||
data[actor] = actor_id
|
data[actor] = actor_id
|
||||||
r = post(url, data=data, user=admin, expect=expect)
|
r = post(url, data=data, user=admin, expect=400)
|
||||||
if expect == 400:
|
assert 'Not managed locally' in str(r.data)
|
||||||
if 'Organization' in role_name:
|
|
||||||
assert 'Assigning organization member permission to teams is not allowed' in str(r.data)
|
|
||||||
if 'Team' in role_name:
|
@pytest.mark.django_db
|
||||||
assert 'Assigning team permissions to other teams is not allowed' in str(r.data)
|
@pytest.mark.parametrize('role_name', ['Controller Team Admin', 'Controller Team Member'])
|
||||||
|
def test_adding_user_to_controller_team_roles(setup_managed_roles, role_name, team, admin, bob, post, get):
|
||||||
|
'''
|
||||||
|
Allow user to be added to Controller Team Admin or Controller Team Member
|
||||||
|
'''
|
||||||
|
url_detail = reverse('api:team_detail', kwargs={'pk': team.id})
|
||||||
|
get(url_detail, user=bob, expect=403)
|
||||||
|
|
||||||
|
rd = RoleDefinition.objects.get(name=role_name)
|
||||||
|
url = django_reverse('roleuserassignment-list')
|
||||||
|
post(url, data={'object_id': team.id, 'role_definition': rd.id, 'user': bob.id}, user=admin, expect=201)
|
||||||
|
|
||||||
|
get(url_detail, user=bob, expect=200)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.django_db
|
||||||
|
@pytest.mark.parametrize('role_name', ['Controller Organization Admin', 'Controller Organization Member'])
|
||||||
|
def test_adding_user_to_controller_organization_roles(setup_managed_roles, role_name, organization, admin, bob, post, get):
|
||||||
|
'''
|
||||||
|
Allow user to be added to Controller Organization Admin or Controller Organization Member
|
||||||
|
'''
|
||||||
|
url_detail = reverse('api:organization_detail', kwargs={'pk': organization.id})
|
||||||
|
get(url_detail, user=bob, expect=403)
|
||||||
|
|
||||||
|
rd = RoleDefinition.objects.get(name=role_name)
|
||||||
|
url = django_reverse('roleuserassignment-list')
|
||||||
|
post(url, data={'object_id': organization.id, 'role_definition': rd.id, 'user': bob.id}, user=admin, expect=201)
|
||||||
|
|
||||||
|
get(url, user=bob, expect=200)
|
||||||
|
|||||||
@@ -15,14 +15,6 @@ def test_roles_to_not_create(setup_managed_roles):
|
|||||||
raise Exception(f'Found RoleDefinitions that should not exist: {bad_names}')
|
raise Exception(f'Found RoleDefinitions that should not exist: {bad_names}')
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.django_db
|
|
||||||
def test_org_admin_role(setup_managed_roles):
|
|
||||||
rd = RoleDefinition.objects.get(name='Organization Admin')
|
|
||||||
codenames = list(rd.permissions.values_list('codename', flat=True))
|
|
||||||
assert 'view_inventory' in codenames
|
|
||||||
assert 'change_inventory' in codenames
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.django_db
|
@pytest.mark.django_db
|
||||||
def test_project_update_role(setup_managed_roles):
|
def test_project_update_role(setup_managed_roles):
|
||||||
"""Role to allow updating a project on the object-level should exist"""
|
"""Role to allow updating a project on the object-level should exist"""
|
||||||
@@ -39,18 +31,32 @@ def test_org_child_add_permission(setup_managed_roles):
|
|||||||
assert not DABPermission.objects.filter(codename='add_jobtemplate').exists()
|
assert not DABPermission.objects.filter(codename='add_jobtemplate').exists()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.django_db
|
||||||
|
def test_controller_specific_roles_have_correct_permissions(setup_managed_roles):
|
||||||
|
'''
|
||||||
|
Controller specific roles should have the same permissions as the platform roles
|
||||||
|
e.g. Controller Team Admin should have same permission set as Team Admin
|
||||||
|
'''
|
||||||
|
for rd_name in ['Controller Team Admin', 'Controller Team Member', 'Controller Organization Member', 'Controller Organization Admin']:
|
||||||
|
rd = RoleDefinition.objects.get(name=rd_name)
|
||||||
|
rd_platform = RoleDefinition.objects.get(name=rd_name.split('Controller ')[1])
|
||||||
|
assert set(rd.permissions.all()) == set(rd_platform.permissions.all())
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.django_db
|
@pytest.mark.django_db
|
||||||
@pytest.mark.parametrize('resource_name', ['Team', 'Organization'])
|
@pytest.mark.parametrize('resource_name', ['Team', 'Organization'])
|
||||||
@pytest.mark.parametrize('action', ['Member', 'Admin'])
|
@pytest.mark.parametrize('action', ['Member', 'Admin'])
|
||||||
def test_legacy_RBAC_uses_platform_roles(setup_managed_roles, resource_name, action, team, bob, organization):
|
def test_legacy_RBAC_uses_controller_specific_roles(setup_managed_roles, resource_name, action, team, bob, organization):
|
||||||
'''
|
'''
|
||||||
Assignment to legacy RBAC roles should use platform role definitions
|
Assignment to legacy RBAC roles should use controller specific role definitions
|
||||||
e.g. Team Admin, Team Member, Organization Member, Organization Admin
|
e.g. Controller Team Admin, Controller Team Member, Controller Organization Member, Controller Organization Admin
|
||||||
'''
|
'''
|
||||||
resource = team if resource_name == 'Team' else organization
|
resource = team if resource_name == 'Team' else organization
|
||||||
if action == 'Member':
|
if action == 'Member':
|
||||||
resource.member_role.members.add(bob)
|
resource.member_role.members.add(bob)
|
||||||
else:
|
else:
|
||||||
resource.admin_role.members.add(bob)
|
resource.admin_role.members.add(bob)
|
||||||
rd = RoleDefinition.objects.get(name=f'{resource_name} {action}')
|
rd = RoleDefinition.objects.get(name=f'Controller {resource_name} {action}')
|
||||||
|
rd_platform = RoleDefinition.objects.get(name=f'{resource_name} {action}')
|
||||||
assert RoleUserAssignment.objects.filter(role_definition=rd, user=bob, object_id=resource.id).exists()
|
assert RoleUserAssignment.objects.filter(role_definition=rd, user=bob, object_id=resource.id).exists()
|
||||||
|
assert not RoleUserAssignment.objects.filter(role_definition=rd_platform, user=bob, object_id=resource.id).exists()
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user