mirror of
https://github.com/ansible/awx.git
synced 2026-02-05 11:34:43 -03:30
Compare commits
90 Commits
test-ansib
...
pin-ansibl
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0928571777 | ||
|
|
7977e8639c | ||
|
|
bf0567ca41 | ||
|
|
d6482d3898 | ||
|
|
20b203ea8e | ||
|
|
1330a1b353 | ||
|
|
022314b542 | ||
|
|
3db2e04efe | ||
|
|
db874f5aea | ||
|
|
c975b1aa22 | ||
|
|
d005402205 | ||
|
|
635e947413 | ||
|
|
024fe55047 | ||
|
|
a909083792 | ||
|
|
873e6a084c | ||
|
|
6182d68b74 | ||
|
|
1a4dbcfe2e | ||
|
|
c449c4c41a | ||
|
|
31ee509dd5 | ||
|
|
222f387d65 | ||
|
|
d7ca19f9f0 | ||
|
|
a655a3f127 | ||
|
|
9520c83da9 | ||
|
|
144f08f762 | ||
|
|
6aea699284 | ||
|
|
7ee0aab856 | ||
|
|
3eb809696a | ||
|
|
7995196cff | ||
|
|
eb96d5d984 | ||
|
|
94764a1f17 | ||
|
|
c1b6f9a786 | ||
|
|
32bbf3a0c3 | ||
|
|
c76ae8a2ac | ||
|
|
6accd1e5e6 | ||
|
|
01eb162378 | ||
|
|
20a512bdd9 | ||
|
|
f734d8bf19 | ||
|
|
872349ac75 | ||
|
|
6377824af5 | ||
|
|
537850c650 | ||
|
|
0d85dc5fc5 | ||
|
|
2ba6603436 | ||
|
|
21c463c0dd | ||
|
|
c3bf843ad7 | ||
|
|
de4e707bb2 | ||
|
|
95289ff28c | ||
|
|
000f6b0708 | ||
|
|
c799d51ec8 | ||
|
|
db6e8b9bad | ||
|
|
483417762f | ||
|
|
49240ca8e8 | ||
|
|
5ff3d4b2fc | ||
|
|
3f96ea17d6 | ||
|
|
f59ad4f39c | ||
|
|
c3ee0c2d8a | ||
|
|
7a3010f0e6 | ||
|
|
05dc9bad1c | ||
|
|
38f0f8d45f | ||
|
|
d3ee9a1bfd | ||
|
|
438aa463d5 | ||
|
|
51f9160654 | ||
|
|
ac3123a2ac | ||
|
|
c4ee5127c5 | ||
|
|
9ec7540c4b | ||
|
|
2389fc691e | ||
|
|
567f5a2476 | ||
|
|
e837535396 | ||
|
|
1d57f1c355 | ||
|
|
7676f14114 | ||
|
|
182e5cfaa4 | ||
|
|
99be91e939 | ||
|
|
9ff163b919 | ||
|
|
5d0d0404c7 | ||
|
|
5d53821ce5 | ||
|
|
39cd09ce19 | ||
|
|
cd0e27446a | ||
|
|
628a0e6a36 | ||
|
|
8fb5862223 | ||
|
|
6f7d5ca8a3 | ||
|
|
0f0f5aa289 | ||
|
|
bc12fa2283 | ||
|
|
03b37037d6 | ||
|
|
5668973d70 | ||
|
|
e6434454ce | ||
|
|
3ba9c026ea | ||
|
|
a206ca22ec | ||
|
|
e961cbe46f | ||
|
|
0ffe04ed9c | ||
|
|
ee739b5fd9 | ||
|
|
abc04e5c88 |
@@ -2,7 +2,7 @@
|
||||
|
||||
codecov:
|
||||
notify:
|
||||
after_n_builds: 6 # Number of test matrix+lint jobs uploading coverage
|
||||
after_n_builds: 9 # Number of test matrix+lint jobs uploading coverage
|
||||
wait_for_ci: false
|
||||
|
||||
require_ci_to_pass: false
|
||||
|
||||
19
.coveragerc
19
.coveragerc
@@ -17,8 +17,27 @@ exclude_also =
|
||||
|
||||
[run]
|
||||
branch = True
|
||||
# NOTE: `disable_warnings` is needed when `pytest-cov` runs in tandem
|
||||
# NOTE: with `pytest-xdist`. These warnings are false negative in this
|
||||
# NOTE: context.
|
||||
#
|
||||
# NOTE: It's `coveragepy` that emits the warnings and previously they
|
||||
# NOTE: wouldn't get on the radar of `pytest`'s `filterwarnings`
|
||||
# NOTE: mechanism. This changed, however, with `pytest >= 8.4`. And
|
||||
# NOTE: since we set `filterwarnings = error`, those warnings are being
|
||||
# NOTE: raised as exceptions, cascading into `pytest`'s internals and
|
||||
# NOTE: causing tracebacks and crashes of the test sessions.
|
||||
#
|
||||
# Ref:
|
||||
# * https://github.com/pytest-dev/pytest-cov/issues/693
|
||||
# * https://github.com/pytest-dev/pytest-cov/pull/695
|
||||
# * https://github.com/pytest-dev/pytest-cov/pull/696
|
||||
disable_warnings =
|
||||
module-not-measured
|
||||
omit =
|
||||
awx/main/migrations/*
|
||||
awx/settings/defaults.py
|
||||
awx/settings/*_defaults.py
|
||||
source =
|
||||
.
|
||||
source_pkgs =
|
||||
|
||||
8
.github/PULL_REQUEST_TEMPLATE.md
vendored
8
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -4,7 +4,8 @@
|
||||
<!---
|
||||
If you are fixing an existing issue, please include "related #nnn" in your
|
||||
commit message and your description; but you should still explain what
|
||||
the change does.
|
||||
the change does. Also please make sure that if this PR has an attached JIRA, put AAP-<number>
|
||||
in as the first entry for your PR title.
|
||||
-->
|
||||
|
||||
##### ISSUE TYPE
|
||||
@@ -22,11 +23,6 @@ the change does.
|
||||
- Docs
|
||||
- Other
|
||||
|
||||
##### AWX VERSION
|
||||
<!--- Paste verbatim output from `make VERSION` between quotes below -->
|
||||
```
|
||||
|
||||
```
|
||||
|
||||
|
||||
##### ADDITIONAL INFORMATION
|
||||
|
||||
25
.github/actions/awx_devel_image/action.yml
vendored
25
.github/actions/awx_devel_image/action.yml
vendored
@@ -11,9 +11,7 @@ inputs:
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Get python version from Makefile
|
||||
shell: bash
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
- uses: ./.github/actions/setup-python
|
||||
|
||||
- name: Set lower case owner name
|
||||
shell: bash
|
||||
@@ -26,26 +24,9 @@ runs:
|
||||
run: |
|
||||
echo "${{ inputs.github-token }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
|
||||
|
||||
- name: Generate placeholder SSH private key if SSH auth for private repos is not needed
|
||||
id: generate_key
|
||||
shell: bash
|
||||
run: |
|
||||
if [[ -z "${{ inputs.private-github-key }}" ]]; then
|
||||
ssh-keygen -t ed25519 -C "github-actions" -N "" -f ~/.ssh/id_ed25519
|
||||
echo "SSH_PRIVATE_KEY<<EOF" >> $GITHUB_OUTPUT
|
||||
cat ~/.ssh/id_ed25519 >> $GITHUB_OUTPUT
|
||||
echo "EOF" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "SSH_PRIVATE_KEY<<EOF" >> $GITHUB_OUTPUT
|
||||
echo "${{ inputs.private-github-key }}" >> $GITHUB_OUTPUT
|
||||
echo "EOF" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Add private GitHub key to SSH agent
|
||||
uses: webfactory/ssh-agent@v0.9.0
|
||||
- uses: ./.github/actions/setup-ssh-agent
|
||||
with:
|
||||
ssh-private-key: ${{ steps.generate_key.outputs.SSH_PRIVATE_KEY }}
|
||||
|
||||
ssh-private-key: ${{ inputs.private-github-key }}
|
||||
|
||||
- name: Pre-pull latest devel image to warm cache
|
||||
shell: bash
|
||||
|
||||
2
.github/actions/run_awx_devel/action.yml
vendored
2
.github/actions/run_awx_devel/action.yml
vendored
@@ -36,7 +36,7 @@ runs:
|
||||
|
||||
- name: Upgrade ansible-core
|
||||
shell: bash
|
||||
run: python3 -m pip install --upgrade ansible-core
|
||||
run: python3 -m pip install --upgrade 'ansible-core<2.18'
|
||||
|
||||
- name: Install system deps
|
||||
shell: bash
|
||||
|
||||
27
.github/actions/setup-python/action.yml
vendored
Normal file
27
.github/actions/setup-python/action.yml
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
name: 'Setup Python from Makefile'
|
||||
description: 'Extract and set up Python version from Makefile'
|
||||
inputs:
|
||||
python-version:
|
||||
description: 'Override Python version (optional)'
|
||||
required: false
|
||||
default: ''
|
||||
working-directory:
|
||||
description: 'Directory containing the Makefile'
|
||||
required: false
|
||||
default: '.'
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Get python version from Makefile
|
||||
shell: bash
|
||||
run: |
|
||||
if [ -n "${{ inputs.python-version }}" ]; then
|
||||
echo "py_version=${{ inputs.python-version }}" >> $GITHUB_ENV
|
||||
else
|
||||
cd ${{ inputs.working-directory }}
|
||||
echo "py_version=`make PYTHON_VERSION`" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: Install python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
29
.github/actions/setup-ssh-agent/action.yml
vendored
Normal file
29
.github/actions/setup-ssh-agent/action.yml
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
name: 'Setup SSH for GitHub'
|
||||
description: 'Configure SSH for private repository access'
|
||||
inputs:
|
||||
ssh-private-key:
|
||||
description: 'SSH private key for repository access'
|
||||
required: false
|
||||
default: ''
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Generate placeholder SSH private key if SSH auth for private repos is not needed
|
||||
id: generate_key
|
||||
shell: bash
|
||||
run: |
|
||||
if [[ -z "${{ inputs.ssh-private-key }}" ]]; then
|
||||
ssh-keygen -t ed25519 -C "github-actions" -N "" -f ~/.ssh/id_ed25519
|
||||
echo "SSH_PRIVATE_KEY<<EOF" >> $GITHUB_OUTPUT
|
||||
cat ~/.ssh/id_ed25519 >> $GITHUB_OUTPUT
|
||||
echo "EOF" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "SSH_PRIVATE_KEY<<EOF" >> $GITHUB_OUTPUT
|
||||
echo "${{ inputs.ssh-private-key }}" >> $GITHUB_OUTPUT
|
||||
echo "EOF" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Add private GitHub key to SSH agent
|
||||
uses: webfactory/ssh-agent@v0.9.0
|
||||
with:
|
||||
ssh-private-key: ${{ steps.generate_key.outputs.SSH_PRIVATE_KEY }}
|
||||
100
.github/workflows/ci.yml
vendored
100
.github/workflows/ci.yml
vendored
@@ -130,7 +130,7 @@ jobs:
|
||||
with:
|
||||
show-progress: false
|
||||
|
||||
- uses: actions/setup-python@v5
|
||||
- uses: ./.github/actions/setup-python
|
||||
with:
|
||||
python-version: '3.x'
|
||||
|
||||
@@ -161,6 +161,10 @@ jobs:
|
||||
show-progress: false
|
||||
path: awx
|
||||
|
||||
- uses: ./awx/.github/actions/setup-ssh-agent
|
||||
with:
|
||||
ssh-private-key: ${{ secrets.PRIVATE_GITHUB_KEY }}
|
||||
|
||||
- name: Checkout awx-operator
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
@@ -168,39 +172,14 @@ jobs:
|
||||
repository: ansible/awx-operator
|
||||
path: awx-operator
|
||||
|
||||
- name: Get python version from Makefile
|
||||
working-directory: awx
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v4
|
||||
- uses: ./awx/.github/actions/setup-python
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
working-directory: awx
|
||||
|
||||
- name: Install playbook dependencies
|
||||
run: |
|
||||
python3 -m pip install docker
|
||||
|
||||
- name: Generate placeholder SSH private key if SSH auth for private repos is not needed
|
||||
id: generate_key
|
||||
shell: bash
|
||||
run: |
|
||||
if [[ -z "${{ secrets.PRIVATE_GITHUB_KEY }}" ]]; then
|
||||
ssh-keygen -t ed25519 -C "github-actions" -N "" -f ~/.ssh/id_ed25519
|
||||
echo "SSH_PRIVATE_KEY<<EOF" >> $GITHUB_OUTPUT
|
||||
cat ~/.ssh/id_ed25519 >> $GITHUB_OUTPUT
|
||||
echo "EOF" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "SSH_PRIVATE_KEY<<EOF" >> $GITHUB_OUTPUT
|
||||
echo "${{ secrets.PRIVATE_GITHUB_KEY }}" >> $GITHUB_OUTPUT
|
||||
echo "EOF" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Add private GitHub key to SSH agent
|
||||
uses: webfactory/ssh-agent@v0.9.0
|
||||
with:
|
||||
ssh-private-key: ${{ steps.generate_key.outputs.SSH_PRIVATE_KEY }}
|
||||
|
||||
- name: Build AWX image
|
||||
working-directory: awx
|
||||
run: |
|
||||
@@ -299,7 +278,7 @@ jobs:
|
||||
with:
|
||||
show-progress: false
|
||||
|
||||
- uses: actions/setup-python@v5
|
||||
- uses: ./.github/actions/setup-python
|
||||
with:
|
||||
python-version: '3.x'
|
||||
|
||||
@@ -356,6 +335,7 @@ jobs:
|
||||
with:
|
||||
name: coverage-${{ matrix.target-regex.name }}
|
||||
path: ~/.ansible/collections/ansible_collections/awx/awx/tests/output/coverage/
|
||||
retention-days: 1
|
||||
|
||||
- uses: ./.github/actions/upload_awx_devel_logs
|
||||
if: always()
|
||||
@@ -373,32 +353,22 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
show-progress: false
|
||||
|
||||
- uses: actions/setup-python@v5
|
||||
- uses: ./.github/actions/setup-python
|
||||
with:
|
||||
python-version: '3.x'
|
||||
|
||||
- name: Upgrade ansible-core
|
||||
run: python3 -m pip install --upgrade ansible-core
|
||||
run: python3 -m pip install --upgrade 'ansible-core<2.18'
|
||||
|
||||
- name: Download coverage artifacts A to H
|
||||
- name: Download coverage artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: coverage-a-h
|
||||
path: coverage
|
||||
|
||||
- name: Download coverage artifacts I to P
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: coverage-i-p
|
||||
path: coverage
|
||||
|
||||
- name: Download coverage artifacts Z to Z
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: coverage-r-z0-9
|
||||
merge-multiple: true
|
||||
path: coverage
|
||||
pattern: coverage-*
|
||||
|
||||
- name: Combine coverage
|
||||
run: |
|
||||
@@ -416,46 +386,6 @@ jobs:
|
||||
echo '## AWX Collection Integration Coverage HTML' >> $GITHUB_STEP_SUMMARY
|
||||
echo 'Download the HTML artifacts to view the coverage report.' >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
# This is a huge hack, there's no official action for removing artifacts currently.
|
||||
# Also ACTIONS_RUNTIME_URL and ACTIONS_RUNTIME_TOKEN aren't available in normal run
|
||||
# steps, so we have to use github-script to get them.
|
||||
#
|
||||
# The advantage of doing this, though, is that we save on artifact storage space.
|
||||
|
||||
- name: Get secret artifact runtime URL
|
||||
uses: actions/github-script@v6
|
||||
id: get-runtime-url
|
||||
with:
|
||||
result-encoding: string
|
||||
script: |
|
||||
const { ACTIONS_RUNTIME_URL } = process.env;
|
||||
return ACTIONS_RUNTIME_URL;
|
||||
|
||||
- name: Get secret artifact runtime token
|
||||
uses: actions/github-script@v6
|
||||
id: get-runtime-token
|
||||
with:
|
||||
result-encoding: string
|
||||
script: |
|
||||
const { ACTIONS_RUNTIME_TOKEN } = process.env;
|
||||
return ACTIONS_RUNTIME_TOKEN;
|
||||
|
||||
- name: Remove intermediary artifacts
|
||||
env:
|
||||
ACTIONS_RUNTIME_URL: ${{ steps.get-runtime-url.outputs.result }}
|
||||
ACTIONS_RUNTIME_TOKEN: ${{ steps.get-runtime-token.outputs.result }}
|
||||
run: |
|
||||
echo "::add-mask::${ACTIONS_RUNTIME_TOKEN}"
|
||||
artifacts=$(
|
||||
curl -H "Authorization: Bearer $ACTIONS_RUNTIME_TOKEN" \
|
||||
${ACTIONS_RUNTIME_URL}_apis/pipelines/workflows/${{ github.run_id }}/artifacts?api-version=6.0-preview \
|
||||
| jq -r '.value | .[] | select(.name | startswith("coverage-")) | .url'
|
||||
)
|
||||
|
||||
for artifact in $artifacts; do
|
||||
curl -i -X DELETE -H "Accept: application/json;api-version=6.0-preview" -H "Authorization: Bearer $ACTIONS_RUNTIME_TOKEN" "$artifact"
|
||||
done
|
||||
|
||||
- name: Upload coverage report as artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
|
||||
26
.github/workflows/devel_images.yml
vendored
26
.github/workflows/devel_images.yml
vendored
@@ -49,14 +49,10 @@ jobs:
|
||||
run: |
|
||||
echo "DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER,,}" >> $GITHUB_ENV
|
||||
echo "COMPOSE_TAG=${GITHUB_REF##*/}" >> $GITHUB_ENV
|
||||
echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
env:
|
||||
OWNER: '${{ github.repository_owner }}'
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
- uses: ./.github/actions/setup-python
|
||||
|
||||
- name: Log in to registry
|
||||
run: |
|
||||
@@ -73,25 +69,9 @@ jobs:
|
||||
make ui
|
||||
if: matrix.build-targets.image-name == 'awx'
|
||||
|
||||
- name: Generate placeholder SSH private key if SSH auth for private repos is not needed
|
||||
id: generate_key
|
||||
shell: bash
|
||||
run: |
|
||||
if [[ -z "${{ secrets.PRIVATE_GITHUB_KEY }}" ]]; then
|
||||
ssh-keygen -t ed25519 -C "github-actions" -N "" -f ~/.ssh/id_ed25519
|
||||
echo "SSH_PRIVATE_KEY<<EOF" >> $GITHUB_OUTPUT
|
||||
cat ~/.ssh/id_ed25519 >> $GITHUB_OUTPUT
|
||||
echo "EOF" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "SSH_PRIVATE_KEY<<EOF" >> $GITHUB_OUTPUT
|
||||
echo "${{ secrets.PRIVATE_GITHUB_KEY }}" >> $GITHUB_OUTPUT
|
||||
echo "EOF" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Add private GitHub key to SSH agent
|
||||
uses: webfactory/ssh-agent@v0.9.0
|
||||
- uses: ./.github/actions/setup-ssh-agent
|
||||
with:
|
||||
ssh-private-key: ${{ steps.generate_key.outputs.SSH_PRIVATE_KEY }}
|
||||
ssh-private-key: ${{ secrets.PRIVATE_GITHUB_KEY }}
|
||||
|
||||
- name: Build and push AWX devel images
|
||||
run: |
|
||||
|
||||
2
.github/workflows/docs.yml
vendored
2
.github/workflows/docs.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
||||
with:
|
||||
show-progress: false
|
||||
|
||||
- uses: actions/setup-python@v5
|
||||
- uses: ./.github/actions/setup-python
|
||||
with:
|
||||
python-version: '3.x'
|
||||
|
||||
|
||||
4
.github/workflows/label_issue.yml
vendored
4
.github/workflows/label_issue.yml
vendored
@@ -34,9 +34,11 @@ jobs:
|
||||
with:
|
||||
show-progress: false
|
||||
|
||||
- uses: actions/setup-python@v4
|
||||
- uses: ./.github/actions/setup-python
|
||||
|
||||
- name: Install python requests
|
||||
run: pip install requests
|
||||
|
||||
- name: Check if user is a member of Ansible org
|
||||
uses: jannekem/run-python-script-action@v1
|
||||
id: check_user
|
||||
|
||||
2
.github/workflows/label_pr.yml
vendored
2
.github/workflows/label_pr.yml
vendored
@@ -33,7 +33,7 @@ jobs:
|
||||
with:
|
||||
show-progress: false
|
||||
|
||||
- uses: actions/setup-python@v5
|
||||
- uses: ./.github/actions/setup-python
|
||||
with:
|
||||
python-version: '3.x'
|
||||
|
||||
|
||||
8
.github/workflows/promote.yml
vendored
8
.github/workflows/promote.yml
vendored
@@ -36,13 +36,7 @@ jobs:
|
||||
with:
|
||||
show-progress: false
|
||||
|
||||
- name: Get python version from Makefile
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
- uses: ./.github/actions/setup-python
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
|
||||
9
.github/workflows/stage.yml
vendored
9
.github/workflows/stage.yml
vendored
@@ -64,14 +64,9 @@ jobs:
|
||||
repository: ansible/awx-logos
|
||||
path: awx-logos
|
||||
|
||||
- name: Get python version from Makefile
|
||||
working-directory: awx
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v4
|
||||
- uses: ./awx/.github/actions/setup-python
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
working-directory: awx
|
||||
|
||||
- name: Install playbook dependencies
|
||||
run: |
|
||||
|
||||
15
.github/workflows/upload_schema.yml
vendored
15
.github/workflows/upload_schema.yml
vendored
@@ -5,6 +5,7 @@ env:
|
||||
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- devel
|
||||
@@ -22,18 +23,16 @@ jobs:
|
||||
with:
|
||||
show-progress: false
|
||||
|
||||
- name: Get python version from Makefile
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
- uses: ./.github/actions/setup-python
|
||||
|
||||
- name: Log in to registry
|
||||
run: |
|
||||
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
|
||||
|
||||
- uses: ./.github/actions/setup-ssh-agent
|
||||
with:
|
||||
ssh-private-key: ${{ secrets.PRIVATE_GITHUB_KEY }}
|
||||
|
||||
- name: Pre-pull image to warm build cache
|
||||
run: |
|
||||
docker pull -q ghcr.io/${{ github.repository_owner }}/awx_devel:${GITHUB_REF##*/} || :
|
||||
@@ -56,5 +55,3 @@ jobs:
|
||||
ansible localhost -c local, -m command -a "{{ ansible_python_interpreter + ' -m pip install boto3'}}"
|
||||
ansible localhost -c local -m aws_s3 \
|
||||
-a "src=${{ github.workspace }}/schema.json bucket=awx-public-ci-files object=${GITHUB_REF##*/}/schema.json mode=put permission=public-read"
|
||||
|
||||
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -150,6 +150,8 @@ use_dev_supervisor.txt
|
||||
|
||||
awx/ui/src
|
||||
awx/ui/build
|
||||
awx/ui/.ui-built
|
||||
awx/ui_next
|
||||
|
||||
# Docs build stuff
|
||||
docs/docsite/build/
|
||||
|
||||
24
Makefile
24
Makefile
@@ -19,6 +19,12 @@ COLLECTION_VERSION ?= $(shell $(PYTHON) tools/scripts/scm_version.py | cut -d .
|
||||
COLLECTION_SANITY_ARGS ?= --docker
|
||||
# collection unit testing directories
|
||||
COLLECTION_TEST_DIRS ?= awx_collection/test/awx
|
||||
# pytest added args to collect coverage
|
||||
COVERAGE_ARGS ?= --cov --cov-report=xml --junitxml=reports/junit.xml
|
||||
# pytest test directories
|
||||
TEST_DIRS ?= awx/main/tests/unit awx/main/tests/functional awx/conf/tests
|
||||
# pytest args to run tests in parallel
|
||||
PARALLEL_TESTS ?= -n auto
|
||||
# collection integration test directories (defaults to all)
|
||||
COLLECTION_TEST_TARGET ?=
|
||||
# args for collection install
|
||||
@@ -309,14 +315,14 @@ black: reports
|
||||
@chmod +x .git/hooks/pre-commit
|
||||
|
||||
genschema: reports
|
||||
$(MAKE) swagger PYTEST_ARGS="--genschema --create-db "
|
||||
$(MAKE) swagger PYTEST_ADDOPTS="--genschema --create-db "
|
||||
mv swagger.json schema.json
|
||||
|
||||
swagger: reports
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
(set -o pipefail && py.test --cov --cov-report=xml --junitxml=reports/junit.xml $(PYTEST_ARGS) awx/conf/tests/functional awx/main/tests/functional/api awx/main/tests/docs | tee reports/$@.report)
|
||||
(set -o pipefail && py.test $(COVERAGE_ARGS) $(PARALLEL_TESTS) awx/conf/tests/functional awx/main/tests/functional/api awx/main/tests/docs | tee reports/$@.report)
|
||||
@if [ "${GITHUB_ACTIONS}" = "true" ]; \
|
||||
then \
|
||||
echo 'cov-report-files=reports/coverage.xml' >> "${GITHUB_OUTPUT}"; \
|
||||
@@ -334,14 +340,12 @@ api-lint:
|
||||
awx-link:
|
||||
[ -d "/awx_devel/awx.egg-info" ] || $(PYTHON) /awx_devel/tools/scripts/egg_info_dev
|
||||
|
||||
TEST_DIRS ?= awx/main/tests/unit awx/main/tests/functional awx/conf/tests
|
||||
PYTEST_ARGS ?= -n auto
|
||||
## Run all API unit tests.
|
||||
test:
|
||||
if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
PYTHONDONTWRITEBYTECODE=1 py.test -p no:cacheprovider $(PYTEST_ARGS) $(TEST_DIRS)
|
||||
PYTHONDONTWRITEBYTECODE=1 py.test -p no:cacheprovider $(PARALLEL_TESTS) $(TEST_DIRS)
|
||||
cd awxkit && $(VENV_BASE)/awx/bin/tox -re py3
|
||||
awx-manage check_migrations --dry-run --check -n 'missing_migration_file'
|
||||
|
||||
@@ -350,7 +354,7 @@ live_test:
|
||||
|
||||
## Run all API unit tests with coverage enabled.
|
||||
test_coverage:
|
||||
$(MAKE) test PYTEST_ARGS="--create-db --cov --cov-report=xml --junitxml=reports/junit.xml"
|
||||
$(MAKE) test PYTEST_ADDOPTS="--create-db $(COVERAGE_ARGS)"
|
||||
@if [ "${GITHUB_ACTIONS}" = "true" ]; \
|
||||
then \
|
||||
echo 'cov-report-files=awxkit/coverage.xml,reports/coverage.xml' >> "${GITHUB_OUTPUT}"; \
|
||||
@@ -358,7 +362,7 @@ test_coverage:
|
||||
fi
|
||||
|
||||
test_migrations:
|
||||
PYTHONDONTWRITEBYTECODE=1 py.test -p no:cacheprovider --migrations -m migration_test --create-db --cov=awx --cov-report=xml --junitxml=reports/junit.xml $(PYTEST_ARGS) $(TEST_DIRS)
|
||||
PYTHONDONTWRITEBYTECODE=1 py.test -p no:cacheprovider --migrations -m migration_test --create-db $(PARALLEL_TESTS) $(COVERAGE_ARGS) $(TEST_DIRS)
|
||||
@if [ "${GITHUB_ACTIONS}" = "true" ]; \
|
||||
then \
|
||||
echo 'cov-report-files=reports/coverage.xml' >> "${GITHUB_OUTPUT}"; \
|
||||
@@ -374,9 +378,9 @@ test_collection:
|
||||
if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi && \
|
||||
if ! [ -x "$(shell command -v ansible-playbook)" ]; then pip install ansible-core; fi
|
||||
if ! [ -x "$(shell command -v ansible-playbook)" ]; then pip install "ansible-core<2.19"; fi
|
||||
ansible --version
|
||||
py.test $(COLLECTION_TEST_DIRS) --cov --cov-report=xml --junitxml=reports/junit.xml -v
|
||||
py.test $(COLLECTION_TEST_DIRS) $(COVERAGE_ARGS) -v
|
||||
@if [ "${GITHUB_ACTIONS}" = "true" ]; \
|
||||
then \
|
||||
echo 'cov-report-files=reports/coverage.xml' >> "${GITHUB_OUTPUT}"; \
|
||||
@@ -413,7 +417,7 @@ install_collection: build_collection
|
||||
test_collection_sanity:
|
||||
rm -rf awx_collection_build/
|
||||
rm -rf $(COLLECTION_INSTALL)
|
||||
if ! [ -x "$(shell command -v ansible-test)" ]; then pip install ansible-core; fi
|
||||
if ! [ -x "$(shell command -v ansible-test)" ]; then pip install "ansible-core<2.19"; fi
|
||||
ansible --version
|
||||
COLLECTION_VERSION=1.0.0 $(MAKE) install_collection
|
||||
cd $(COLLECTION_INSTALL) && \
|
||||
|
||||
11
README.md
11
README.md
@@ -3,6 +3,17 @@
|
||||
|
||||
<img src="https://raw.githubusercontent.com/ansible/awx-logos/master/awx/ui/client/assets/logo-login.svg?sanitize=true" width=200 alt="AWX" />
|
||||
|
||||
> [!CAUTION]
|
||||
> The last release of this repository was released on Jul 2, 2024.
|
||||
> **Releases of this project are now paused during a large scale refactoring.**
|
||||
> For more information, follow [the Forum](https://forum.ansible.com/) and - more specifically - see the various communications on the matter:
|
||||
>
|
||||
> * [Blog: Upcoming Changes to the AWX Project](https://www.ansible.com/blog/upcoming-changes-to-the-awx-project/)
|
||||
> * [Streamlining AWX Releases](https://forum.ansible.com/t/streamlining-awx-releases/6894) Primary update
|
||||
> * [Refactoring AWX into a Pluggable, Service-Oriented Architecture](https://forum.ansible.com/t/refactoring-awx-into-a-pluggable-service-oriented-architecture/7404)
|
||||
> * [Upcoming changes to AWX Operator installation methods](https://forum.ansible.com/t/upcoming-changes-to-awx-operator-installation-methods/7598)
|
||||
> * [AWX UI and credential types transitioning to the new pluggable architecture](https://forum.ansible.com/t/awx-ui-and-credential-types-transitioning-to-the-new-pluggable-architecture/8027)
|
||||
|
||||
AWX provides a web-based user interface, REST API, and task engine built on top of [Ansible](https://github.com/ansible/ansible). It is one of the upstream projects for [Red Hat Ansible Automation Platform](https://www.ansible.com/products/automation-platform).
|
||||
|
||||
To install AWX, please view the [Install guide](./INSTALL.md).
|
||||
|
||||
@@ -62,7 +62,8 @@ else:
|
||||
|
||||
def prepare_env():
|
||||
# Update the default settings environment variable based on current mode.
|
||||
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'awx.settings.%s' % MODE)
|
||||
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'awx.settings')
|
||||
os.environ.setdefault('AWX_MODE', MODE)
|
||||
# Hide DeprecationWarnings when running in production. Need to first load
|
||||
# settings to apply our filter after Django's own warnings filter.
|
||||
from django.conf import settings
|
||||
|
||||
@@ -161,7 +161,7 @@ def get_view_description(view, html=False):
|
||||
|
||||
|
||||
def get_default_schema():
|
||||
if settings.SETTINGS_MODULE == 'awx.settings.development':
|
||||
if settings.DYNACONF.is_development_mode:
|
||||
from awx.api.swagger import schema_view
|
||||
|
||||
return schema_view
|
||||
|
||||
@@ -6,6 +6,8 @@ import copy
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
import yaml
|
||||
import urllib.parse
|
||||
from collections import Counter, OrderedDict
|
||||
from datetime import timedelta
|
||||
from uuid import uuid4
|
||||
@@ -115,6 +117,7 @@ from awx.main.utils import (
|
||||
from awx.main.utils.filters import SmartFilter
|
||||
from awx.main.utils.plugins import load_combined_inventory_source_options
|
||||
from awx.main.utils.named_url_graph import reset_counters
|
||||
from awx.main.utils.inventory_vars import update_group_variables
|
||||
from awx.main.scheduler.task_manager_models import TaskManagerModels
|
||||
from awx.main.redact import UriCleaner, REPLACE_STR
|
||||
from awx.main.signals import update_inventory_computed_fields
|
||||
@@ -626,15 +629,41 @@ class BaseSerializer(serializers.ModelSerializer, metaclass=BaseSerializerMetacl
|
||||
return exclusions
|
||||
|
||||
def validate(self, attrs):
|
||||
"""
|
||||
Apply serializer validation. Called by DRF.
|
||||
|
||||
Can be extended by subclasses. Or consider overwriting
|
||||
`validate_with_obj` in subclasses, which provides access to the model
|
||||
object and exception handling for field validation.
|
||||
|
||||
:param dict attrs: The names and values of the model form fields.
|
||||
:raise rest_framework.exceptions.ValidationError: If the validation
|
||||
fails.
|
||||
|
||||
The exception must contain a dict with the names of the form fields
|
||||
which failed validation as keys, and a list of error messages as
|
||||
values. This ensures that the error messages are rendered near the
|
||||
relevant fields.
|
||||
:return: The names and values from the model form fields, possibly
|
||||
modified by the validations.
|
||||
:rtype: dict
|
||||
"""
|
||||
attrs = super(BaseSerializer, self).validate(attrs)
|
||||
# Create/update a model instance and run its full_clean() method to
|
||||
# do any validation implemented on the model class.
|
||||
exclusions = self.get_validation_exclusions(self.instance)
|
||||
# Create a new model instance or take the existing one if it exists,
|
||||
# and update its attributes with the respective field values from
|
||||
# attrs.
|
||||
obj = self.instance or self.Meta.model()
|
||||
for k, v in attrs.items():
|
||||
if k not in exclusions and k != 'canonical_address_port':
|
||||
setattr(obj, k, v)
|
||||
try:
|
||||
# Create/update a model instance and run its full_clean() method to
|
||||
# do any validation implemented on the model class.
|
||||
exclusions = self.get_validation_exclusions(self.instance)
|
||||
obj = self.instance or self.Meta.model()
|
||||
for k, v in attrs.items():
|
||||
if k not in exclusions and k != 'canonical_address_port':
|
||||
setattr(obj, k, v)
|
||||
# Run serializer validators which need the model object for
|
||||
# validation.
|
||||
self.validate_with_obj(attrs, obj)
|
||||
# Apply any validations implemented on the model class.
|
||||
obj.full_clean(exclude=exclusions)
|
||||
# full_clean may modify values on the instance; copy those changes
|
||||
# back to attrs so they are saved.
|
||||
@@ -663,6 +692,32 @@ class BaseSerializer(serializers.ModelSerializer, metaclass=BaseSerializerMetacl
|
||||
raise ValidationError(d)
|
||||
return attrs
|
||||
|
||||
def validate_with_obj(self, attrs, obj):
|
||||
"""
|
||||
Overwrite this if you need the model instance for your validation.
|
||||
|
||||
:param dict attrs: The names and values of the model form fields.
|
||||
:param obj: An instance of the class's meta model.
|
||||
|
||||
If the serializer runs on a newly created object, obj contains only
|
||||
the attrs from its serializer. If the serializer runs because an
|
||||
object has been edited, obj is the existing model instance with all
|
||||
attributes and values available.
|
||||
:raise django.core.exceptionsValidationError: Raise this if your
|
||||
validation fails.
|
||||
|
||||
To make the error appear at the respective form field, instantiate
|
||||
the Exception with a dict containing the field name as key and the
|
||||
error message as value.
|
||||
|
||||
Example: ``ValidationError({"password": "Not good enough!"})``
|
||||
|
||||
If the exception contains just a string, the message cannot be
|
||||
related to a field and is rendered at the top of the model form.
|
||||
:return: None
|
||||
"""
|
||||
return
|
||||
|
||||
def reverse(self, *args, **kwargs):
|
||||
kwargs['request'] = self.context.get('request')
|
||||
return reverse(*args, **kwargs)
|
||||
@@ -679,7 +734,22 @@ class EmptySerializer(serializers.Serializer):
|
||||
pass
|
||||
|
||||
|
||||
class UnifiedJobTemplateSerializer(BaseSerializer):
|
||||
class OpaQueryPathMixin(serializers.Serializer):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
def validate_opa_query_path(self, value):
|
||||
# Decode the URL and re-encode it
|
||||
decoded_value = urllib.parse.unquote(value)
|
||||
re_encoded_value = urllib.parse.quote(decoded_value, safe='/')
|
||||
|
||||
if value != re_encoded_value:
|
||||
raise serializers.ValidationError(_("The URL must be properly encoded."))
|
||||
|
||||
return value
|
||||
|
||||
|
||||
class UnifiedJobTemplateSerializer(BaseSerializer, OpaQueryPathMixin):
|
||||
# As a base serializer, the capabilities prefetch is not used directly,
|
||||
# instead they are derived from the Workflow Job Template Serializer and the Job Template Serializer, respectively.
|
||||
capabilities_prefetch = []
|
||||
@@ -984,7 +1054,6 @@ class UserSerializer(BaseSerializer):
|
||||
return ret
|
||||
|
||||
def validate_password(self, value):
|
||||
django_validate_password(value)
|
||||
if not self.instance and value in (None, ''):
|
||||
raise serializers.ValidationError(_('Password required for new User.'))
|
||||
|
||||
@@ -1007,6 +1076,50 @@ class UserSerializer(BaseSerializer):
|
||||
|
||||
return value
|
||||
|
||||
def validate_with_obj(self, attrs, obj):
|
||||
"""
|
||||
Validate the password with the Django password validators
|
||||
|
||||
To enable the Django password validators, configure
|
||||
`settings.AUTH_PASSWORD_VALIDATORS` as described in the [Django
|
||||
docs](https://docs.djangoproject.com/en/5.1/topics/auth/passwords/#enabling-password-validation)
|
||||
|
||||
:param dict attrs: The User form field names and their values as a dict.
|
||||
Example::
|
||||
|
||||
{
|
||||
'username': 'TestUsername', 'first_name': 'FirstName',
|
||||
'last_name': 'LastName', 'email': 'First.Last@my.org',
|
||||
'is_superuser': False, 'is_system_auditor': False,
|
||||
'password': 'secret123'
|
||||
}
|
||||
|
||||
:param obj: The User model instance.
|
||||
:raises django.core.exceptions.ValidationError: Raise this if at least
|
||||
one Django password validator fails.
|
||||
|
||||
The exception contains a dict ``{"password": <error-message>``}
|
||||
which indicates that the password field has failed validation, and
|
||||
the reason for failure.
|
||||
:return: None.
|
||||
"""
|
||||
# We must do this here instead of in `validate_password` bacause some
|
||||
# django password validators need access to other model instance fields,
|
||||
# e.g. ``username`` for the ``UserAttributeSimilarityValidator``.
|
||||
password = attrs.get("password")
|
||||
# Skip validation if no password has been entered. This may happen when
|
||||
# an existing User is edited.
|
||||
if password and password != '$encrypted$':
|
||||
# Apply validators from settings.AUTH_PASSWORD_VALIDATORS. This may
|
||||
# raise ValidationError.
|
||||
#
|
||||
# If the validation fails, re-raise the exception with adjusted
|
||||
# content to make the error appear near the password field.
|
||||
try:
|
||||
django_validate_password(password, user=obj)
|
||||
except DjangoValidationError as exc:
|
||||
raise DjangoValidationError({"password": exc.messages})
|
||||
|
||||
def _update_password(self, obj, new_password):
|
||||
if new_password and new_password != '$encrypted$':
|
||||
obj.set_password(new_password)
|
||||
@@ -1069,12 +1182,12 @@ class UserActivityStreamSerializer(UserSerializer):
|
||||
fields = ('*', '-is_system_auditor')
|
||||
|
||||
|
||||
class OrganizationSerializer(BaseSerializer):
|
||||
class OrganizationSerializer(BaseSerializer, OpaQueryPathMixin):
|
||||
show_capabilities = ['edit', 'delete']
|
||||
|
||||
class Meta:
|
||||
model = Organization
|
||||
fields = ('*', 'max_hosts', 'custom_virtualenv', 'default_environment')
|
||||
fields = ('*', 'max_hosts', 'custom_virtualenv', 'default_environment', 'opa_query_path')
|
||||
read_only_fields = ('*', 'custom_virtualenv')
|
||||
|
||||
def get_related(self, obj):
|
||||
@@ -1428,7 +1541,7 @@ class LabelsListMixin(object):
|
||||
return res
|
||||
|
||||
|
||||
class InventorySerializer(LabelsListMixin, BaseSerializerWithVariables):
|
||||
class InventorySerializer(LabelsListMixin, BaseSerializerWithVariables, OpaQueryPathMixin):
|
||||
show_capabilities = ['edit', 'delete', 'adhoc', 'copy']
|
||||
capabilities_prefetch = ['admin', 'adhoc', {'copy': 'organization.inventory_admin'}]
|
||||
|
||||
@@ -1449,6 +1562,7 @@ class InventorySerializer(LabelsListMixin, BaseSerializerWithVariables):
|
||||
'inventory_sources_with_failures',
|
||||
'pending_deletion',
|
||||
'prevent_instance_group_fallback',
|
||||
'opa_query_path',
|
||||
)
|
||||
|
||||
def get_related(self, obj):
|
||||
@@ -1518,8 +1632,68 @@ class InventorySerializer(LabelsListMixin, BaseSerializerWithVariables):
|
||||
|
||||
if kind == 'smart' and not host_filter:
|
||||
raise serializers.ValidationError({'host_filter': _('Smart inventories must specify host_filter')})
|
||||
|
||||
return super(InventorySerializer, self).validate(attrs)
|
||||
|
||||
@staticmethod
|
||||
def _update_variables(variables, inventory_id):
|
||||
"""
|
||||
Update the inventory variables of the 'all'-group.
|
||||
|
||||
The variables field contains vars from the inventory dialog, hence
|
||||
representing the "all"-group variables.
|
||||
|
||||
Since this is not an update from an inventory source, we update the
|
||||
variables when the inventory details form is saved.
|
||||
|
||||
A user edit on the inventory variables is considered a reset of the
|
||||
variables update history. Particularly if the user removes a variable by
|
||||
editing the inventory variables field, the variable is not supposed to
|
||||
reappear with a value from a previous inventory source update.
|
||||
|
||||
We achieve this by forcing `reset=True` on such an update.
|
||||
|
||||
As a side-effect, variables which have been set by source updates and
|
||||
have survived a user-edit (i.e. they have not been deleted from the
|
||||
variables field) will be assumed to originate from the user edit and are
|
||||
thus no longer deleted from the inventory when they are removed from
|
||||
their original source!
|
||||
|
||||
Note that we use the inventory source id -1 for user-edit updates
|
||||
because a regular inventory source cannot have an id of -1 since
|
||||
PostgreSQL assigns pk's starting from 1 (if this assumption doesn't hold
|
||||
true, we have to assign another special value for invsrc_id).
|
||||
|
||||
:param str variables: The variables as plain text in yaml or json
|
||||
format.
|
||||
:param int inventory_id: The primary key of the related inventory
|
||||
object.
|
||||
"""
|
||||
variables_dict = parse_yaml_or_json(variables, silent_failure=False)
|
||||
logger.debug(f"InventorySerializer._update_variables: {inventory_id=} {variables_dict=}, {variables=}")
|
||||
update_group_variables(
|
||||
group_id=None, # `None` denotes the 'all' group (which doesn't have a pk).
|
||||
newvars=variables_dict,
|
||||
dbvars=None,
|
||||
invsrc_id=-1,
|
||||
inventory_id=inventory_id,
|
||||
reset=True,
|
||||
)
|
||||
|
||||
def create(self, validated_data):
|
||||
"""Called when a new inventory has to be created."""
|
||||
logger.debug(f"InventorySerializer.create({validated_data=}) >>>>")
|
||||
obj = super().create(validated_data)
|
||||
self._update_variables(validated_data.get("variables") or "", obj.id)
|
||||
return obj
|
||||
|
||||
def update(self, obj, validated_data):
|
||||
"""Called when an existing inventory is updated."""
|
||||
logger.debug(f"InventorySerializer.update({validated_data=}) >>>>")
|
||||
obj = super().update(obj, validated_data)
|
||||
self._update_variables(validated_data.get("variables") or "", obj.id)
|
||||
return obj
|
||||
|
||||
|
||||
class ConstructedFieldMixin(serializers.Field):
|
||||
def get_attribute(self, instance):
|
||||
@@ -1809,10 +1983,12 @@ class GroupSerializer(BaseSerializerWithVariables):
|
||||
return res
|
||||
|
||||
def validate(self, attrs):
|
||||
# Do not allow the group name to conflict with an existing host name.
|
||||
name = force_str(attrs.get('name', self.instance and self.instance.name or ''))
|
||||
inventory = attrs.get('inventory', self.instance and self.instance.inventory or '')
|
||||
if Host.objects.filter(name=name, inventory=inventory).exists():
|
||||
raise serializers.ValidationError(_('A Host with that name already exists.'))
|
||||
#
|
||||
return super(GroupSerializer, self).validate(attrs)
|
||||
|
||||
def validate_name(self, value):
|
||||
@@ -3151,6 +3327,7 @@ class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobO
|
||||
'webhook_service',
|
||||
'webhook_credential',
|
||||
'prevent_instance_group_fallback',
|
||||
'opa_query_path',
|
||||
)
|
||||
read_only_fields = ('*', 'custom_virtualenv')
|
||||
|
||||
@@ -3352,11 +3529,17 @@ class JobRelaunchSerializer(BaseSerializer):
|
||||
choices=[('all', _('No change to job limit')), ('failed', _('All failed and unreachable hosts'))],
|
||||
write_only=True,
|
||||
)
|
||||
job_type = serializers.ChoiceField(
|
||||
required=False,
|
||||
allow_null=True,
|
||||
choices=NEW_JOB_TYPE_CHOICES,
|
||||
write_only=True,
|
||||
)
|
||||
credential_passwords = VerbatimField(required=True, write_only=True)
|
||||
|
||||
class Meta:
|
||||
model = Job
|
||||
fields = ('passwords_needed_to_start', 'retry_counts', 'hosts', 'credential_passwords')
|
||||
fields = ('passwords_needed_to_start', 'retry_counts', 'hosts', 'job_type', 'credential_passwords')
|
||||
|
||||
def validate_credential_passwords(self, value):
|
||||
pnts = self.instance.passwords_needed_to_start
|
||||
@@ -5815,6 +5998,34 @@ class InstanceGroupSerializer(BaseSerializer):
|
||||
raise serializers.ValidationError(_('Only Kubernetes credentials can be associated with an Instance Group'))
|
||||
return value
|
||||
|
||||
def validate_pod_spec_override(self, value):
|
||||
if not value:
|
||||
return value
|
||||
|
||||
# value should be empty for non-container groups
|
||||
if self.instance and not self.instance.is_container_group:
|
||||
raise serializers.ValidationError(_('pod_spec_override is only valid for container groups'))
|
||||
|
||||
pod_spec_override_json = None
|
||||
# defect if the value is yaml or json if yaml convert to json
|
||||
try:
|
||||
# convert yaml to json
|
||||
pod_spec_override_json = yaml.safe_load(value)
|
||||
except yaml.YAMLError:
|
||||
try:
|
||||
pod_spec_override_json = json.loads(value)
|
||||
except json.JSONDecodeError:
|
||||
raise serializers.ValidationError(_('pod_spec_override must be valid yaml or json'))
|
||||
|
||||
# validate the
|
||||
spec = pod_spec_override_json.get('spec', {})
|
||||
automount_service_account_token = spec.get('automountServiceAccountToken', False)
|
||||
|
||||
if automount_service_account_token:
|
||||
raise serializers.ValidationError(_('automountServiceAccountToken is not allowed for security reasons'))
|
||||
|
||||
return value
|
||||
|
||||
def validate(self, attrs):
|
||||
attrs = super(InstanceGroupSerializer, self).validate(attrs)
|
||||
|
||||
|
||||
@@ -56,7 +56,7 @@ from wsgiref.util import FileWrapper
|
||||
# django-ansible-base
|
||||
from ansible_base.lib.utils.requests import get_remote_hosts
|
||||
from ansible_base.rbac.models import RoleEvaluation, ObjectRole
|
||||
from ansible_base.resource_registry.shared_types import OrganizationType, TeamType, UserType
|
||||
from ansible_base.rbac import permission_registry
|
||||
|
||||
# AWX
|
||||
from awx.main.tasks.system import send_notifications, update_inventory_computed_fields
|
||||
@@ -671,81 +671,16 @@ class ScheduleUnifiedJobsList(SubListAPIView):
|
||||
name = _('Schedule Jobs List')
|
||||
|
||||
|
||||
def immutablesharedfields(cls):
|
||||
'''
|
||||
Class decorator to prevent modifying shared resources when ALLOW_LOCAL_RESOURCE_MANAGEMENT setting is set to False.
|
||||
|
||||
Works by overriding these view methods:
|
||||
- create
|
||||
- delete
|
||||
- perform_update
|
||||
create and delete are overridden to raise a PermissionDenied exception.
|
||||
perform_update is overridden to check if any shared fields are being modified,
|
||||
and raise a PermissionDenied exception if so.
|
||||
'''
|
||||
# create instead of perform_create because some of our views
|
||||
# override create instead of perform_create
|
||||
if hasattr(cls, 'create'):
|
||||
cls.original_create = cls.create
|
||||
|
||||
@functools.wraps(cls.create)
|
||||
def create_wrapper(*args, **kwargs):
|
||||
if settings.ALLOW_LOCAL_RESOURCE_MANAGEMENT:
|
||||
return cls.original_create(*args, **kwargs)
|
||||
raise PermissionDenied({'detail': _('Creation of this resource is not allowed. Create this resource via the platform ingress.')})
|
||||
|
||||
cls.create = create_wrapper
|
||||
|
||||
if hasattr(cls, 'delete'):
|
||||
cls.original_delete = cls.delete
|
||||
|
||||
@functools.wraps(cls.delete)
|
||||
def delete_wrapper(*args, **kwargs):
|
||||
if settings.ALLOW_LOCAL_RESOURCE_MANAGEMENT:
|
||||
return cls.original_delete(*args, **kwargs)
|
||||
raise PermissionDenied({'detail': _('Deletion of this resource is not allowed. Delete this resource via the platform ingress.')})
|
||||
|
||||
cls.delete = delete_wrapper
|
||||
|
||||
if hasattr(cls, 'perform_update'):
|
||||
cls.original_perform_update = cls.perform_update
|
||||
|
||||
@functools.wraps(cls.perform_update)
|
||||
def update_wrapper(*args, **kwargs):
|
||||
if not settings.ALLOW_LOCAL_RESOURCE_MANAGEMENT:
|
||||
view, serializer = args
|
||||
instance = view.get_object()
|
||||
if instance:
|
||||
if isinstance(instance, models.Organization):
|
||||
shared_fields = OrganizationType._declared_fields.keys()
|
||||
elif isinstance(instance, models.User):
|
||||
shared_fields = UserType._declared_fields.keys()
|
||||
elif isinstance(instance, models.Team):
|
||||
shared_fields = TeamType._declared_fields.keys()
|
||||
attrs = serializer.validated_data
|
||||
for field in shared_fields:
|
||||
if field in attrs and getattr(instance, field) != attrs[field]:
|
||||
raise PermissionDenied({field: _(f"Cannot change shared field '{field}'. Alter this field via the platform ingress.")})
|
||||
return cls.original_perform_update(*args, **kwargs)
|
||||
|
||||
cls.perform_update = update_wrapper
|
||||
|
||||
return cls
|
||||
|
||||
|
||||
@immutablesharedfields
|
||||
class TeamList(ListCreateAPIView):
|
||||
model = models.Team
|
||||
serializer_class = serializers.TeamSerializer
|
||||
|
||||
|
||||
@immutablesharedfields
|
||||
class TeamDetail(RetrieveUpdateDestroyAPIView):
|
||||
model = models.Team
|
||||
serializer_class = serializers.TeamSerializer
|
||||
|
||||
|
||||
@immutablesharedfields
|
||||
class TeamUsersList(BaseUsersList):
|
||||
model = models.User
|
||||
serializer_class = serializers.UserSerializer
|
||||
@@ -816,8 +751,8 @@ class TeamProjectsList(SubListAPIView):
|
||||
def get_queryset(self):
|
||||
team = self.get_parent_object()
|
||||
self.check_parent_access(team)
|
||||
model_ct = ContentType.objects.get_for_model(self.model)
|
||||
parent_ct = ContentType.objects.get_for_model(self.parent_model)
|
||||
model_ct = permission_registry.content_type_model.objects.get_for_model(self.model)
|
||||
parent_ct = permission_registry.content_type_model.objects.get_for_model(self.parent_model)
|
||||
|
||||
rd = get_role_definition(team.member_role)
|
||||
role = ObjectRole.objects.filter(object_id=team.id, content_type=parent_ct, role_definition=rd).first()
|
||||
@@ -1127,7 +1062,6 @@ class ProjectCopy(CopyAPIView):
|
||||
copy_return_serializer_class = serializers.ProjectSerializer
|
||||
|
||||
|
||||
@immutablesharedfields
|
||||
class UserList(ListCreateAPIView):
|
||||
model = models.User
|
||||
serializer_class = serializers.UserSerializer
|
||||
@@ -1184,14 +1118,6 @@ class UserRolesList(SubListAttachDetachAPIView):
|
||||
role = get_object_or_400(models.Role, pk=sub_id)
|
||||
|
||||
content_types = ContentType.objects.get_for_models(models.Organization, models.Team, models.Credential) # dict of {model: content_type}
|
||||
# Prevent user to be associated with team/org when ALLOW_LOCAL_RESOURCE_MANAGEMENT is False
|
||||
if not settings.ALLOW_LOCAL_RESOURCE_MANAGEMENT:
|
||||
for model in [models.Organization, models.Team]:
|
||||
ct = content_types[model]
|
||||
if role.content_type == ct and role.role_field in ['member_role', 'admin_role']:
|
||||
data = dict(msg=_(f"Cannot directly modify user membership to {ct.model}. Direct shared resource management disabled"))
|
||||
return Response(data, status=status.HTTP_403_FORBIDDEN)
|
||||
|
||||
credential_content_type = content_types[models.Credential]
|
||||
if role.content_type == credential_content_type:
|
||||
if 'disassociate' not in request.data and role.content_object.organization and user not in role.content_object.organization.member_role:
|
||||
@@ -1264,7 +1190,6 @@ class UserActivityStreamList(SubListAPIView):
|
||||
return qs.filter(Q(actor=parent) | Q(user__in=[parent]))
|
||||
|
||||
|
||||
@immutablesharedfields
|
||||
class UserDetail(RetrieveUpdateDestroyAPIView):
|
||||
model = models.User
|
||||
serializer_class = serializers.UserSerializer
|
||||
@@ -3435,6 +3360,7 @@ class JobRelaunch(RetrieveAPIView):
|
||||
|
||||
copy_kwargs = {}
|
||||
retry_hosts = serializer.validated_data.get('hosts', None)
|
||||
job_type = serializer.validated_data.get('job_type', None)
|
||||
if retry_hosts and retry_hosts != 'all':
|
||||
if obj.status in ACTIVE_STATES:
|
||||
return Response(
|
||||
@@ -3455,6 +3381,8 @@ class JobRelaunch(RetrieveAPIView):
|
||||
)
|
||||
copy_kwargs['limit'] = ','.join(retry_host_list)
|
||||
|
||||
if job_type:
|
||||
copy_kwargs['job_type'] = job_type
|
||||
new_job = obj.copy_unified_job(**copy_kwargs)
|
||||
result = new_job.signal_start(**serializer.validated_data['credential_passwords'])
|
||||
if not result:
|
||||
@@ -4236,13 +4164,6 @@ class RoleUsersList(SubListAttachDetachAPIView):
|
||||
role = self.get_parent_object()
|
||||
|
||||
content_types = ContentType.objects.get_for_models(models.Organization, models.Team, models.Credential) # dict of {model: content_type}
|
||||
if not settings.ALLOW_LOCAL_RESOURCE_MANAGEMENT:
|
||||
for model in [models.Organization, models.Team]:
|
||||
ct = content_types[model]
|
||||
if role.content_type == ct and role.role_field in ['member_role', 'admin_role']:
|
||||
data = dict(msg=_(f"Cannot directly modify user membership to {ct.model}. Direct shared resource management disabled"))
|
||||
return Response(data, status=status.HTTP_403_FORBIDDEN)
|
||||
|
||||
credential_content_type = content_types[models.Credential]
|
||||
if role.content_type == credential_content_type:
|
||||
if 'disassociate' not in request.data and role.content_object.organization and user not in role.content_object.organization.member_role:
|
||||
|
||||
@@ -10,7 +10,7 @@ from awx.api.generics import APIView, Response
|
||||
from awx.api.permissions import AnalyticsPermission
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.utils import get_awx_version
|
||||
from awx.main.utils.analytics_proxy import OIDCClient, DEFAULT_OIDC_ENDPOINT
|
||||
from awx.main.utils.analytics_proxy import OIDCClient
|
||||
from rest_framework import status
|
||||
|
||||
from collections import OrderedDict
|
||||
@@ -202,10 +202,16 @@ class AnalyticsGenericView(APIView):
|
||||
if method not in ["GET", "POST", "OPTIONS"]:
|
||||
return self._error_response(ERROR_UNSUPPORTED_METHOD, method, remote=False, status_code=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
||||
url = self._get_analytics_url(request.path)
|
||||
using_subscriptions_credentials = False
|
||||
try:
|
||||
rh_user = self._get_setting('REDHAT_USERNAME', None, ERROR_MISSING_USER)
|
||||
rh_password = self._get_setting('REDHAT_PASSWORD', None, ERROR_MISSING_PASSWORD)
|
||||
client = OIDCClient(rh_user, rh_password, DEFAULT_OIDC_ENDPOINT, ['api.console'])
|
||||
rh_user = getattr(settings, 'REDHAT_USERNAME', None)
|
||||
rh_password = getattr(settings, 'REDHAT_PASSWORD', None)
|
||||
if not (rh_user and rh_password):
|
||||
rh_user = self._get_setting('SUBSCRIPTIONS_CLIENT_ID', None, ERROR_MISSING_USER)
|
||||
rh_password = self._get_setting('SUBSCRIPTIONS_CLIENT_SECRET', None, ERROR_MISSING_PASSWORD)
|
||||
using_subscriptions_credentials = True
|
||||
|
||||
client = OIDCClient(rh_user, rh_password)
|
||||
response = client.make_request(
|
||||
method,
|
||||
url,
|
||||
@@ -216,17 +222,17 @@ class AnalyticsGenericView(APIView):
|
||||
timeout=(31, 31),
|
||||
)
|
||||
except requests.RequestException:
|
||||
logger.error("Automation Analytics API request failed, trying base auth method")
|
||||
response = self._base_auth_request(request, method, url, rh_user, rh_password, headers)
|
||||
except MissingSettings:
|
||||
rh_user = self._get_setting('SUBSCRIPTIONS_USERNAME', None, ERROR_MISSING_USER)
|
||||
rh_password = self._get_setting('SUBSCRIPTIONS_PASSWORD', None, ERROR_MISSING_PASSWORD)
|
||||
response = self._base_auth_request(request, method, url, rh_user, rh_password, headers)
|
||||
# subscriptions credentials are not valid for basic auth, so just return 401
|
||||
if using_subscriptions_credentials:
|
||||
response = Response(status=status.HTTP_401_UNAUTHORIZED)
|
||||
else:
|
||||
logger.error("Automation Analytics API request failed, trying base auth method")
|
||||
response = self._base_auth_request(request, method, url, rh_user, rh_password, headers)
|
||||
#
|
||||
# Missing or wrong user/pass
|
||||
#
|
||||
if response.status_code == status.HTTP_401_UNAUTHORIZED:
|
||||
text = (response.text or '').rstrip("\n")
|
||||
text = response.get('text', '').rstrip("\n")
|
||||
return self._error_response(ERROR_UNAUTHORIZED, text, remote=True, remote_status_code=response.status_code)
|
||||
#
|
||||
# Not found, No entitlement or No data in Analytics
|
||||
|
||||
@@ -53,18 +53,15 @@ from awx.api.serializers import (
|
||||
CredentialSerializer,
|
||||
)
|
||||
from awx.api.views.mixin import RelatedJobsPreventDeleteMixin, OrganizationCountsMixin, OrganizationInstanceGroupMembershipMixin
|
||||
from awx.api.views import immutablesharedfields
|
||||
|
||||
logger = logging.getLogger('awx.api.views.organization')
|
||||
|
||||
|
||||
@immutablesharedfields
|
||||
class OrganizationList(OrganizationCountsMixin, ListCreateAPIView):
|
||||
model = Organization
|
||||
serializer_class = OrganizationSerializer
|
||||
|
||||
|
||||
@immutablesharedfields
|
||||
class OrganizationDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView):
|
||||
model = Organization
|
||||
serializer_class = OrganizationSerializer
|
||||
@@ -107,7 +104,6 @@ class OrganizationInventoriesList(SubListAPIView):
|
||||
relationship = 'inventories'
|
||||
|
||||
|
||||
@immutablesharedfields
|
||||
class OrganizationUsersList(BaseUsersList):
|
||||
model = User
|
||||
serializer_class = UserSerializer
|
||||
@@ -116,7 +112,6 @@ class OrganizationUsersList(BaseUsersList):
|
||||
ordering = ('username',)
|
||||
|
||||
|
||||
@immutablesharedfields
|
||||
class OrganizationAdminsList(BaseUsersList):
|
||||
model = User
|
||||
serializer_class = UserSerializer
|
||||
@@ -155,7 +150,6 @@ class OrganizationWorkflowJobTemplatesList(SubListCreateAPIView):
|
||||
parent_key = 'organization'
|
||||
|
||||
|
||||
@immutablesharedfields
|
||||
class OrganizationTeamsList(SubListCreateAttachDetachAPIView):
|
||||
model = Team
|
||||
serializer_class = TeamSerializer
|
||||
|
||||
@@ -32,6 +32,7 @@ from awx.api.versioning import URLPathVersioning, reverse, drf_reverse
|
||||
from awx.main.constants import PRIVILEGE_ESCALATION_METHODS
|
||||
from awx.main.models import Project, Organization, Instance, InstanceGroup, JobTemplate
|
||||
from awx.main.utils import set_environ
|
||||
from awx.main.utils.analytics_proxy import TokenError
|
||||
from awx.main.utils.licensing import get_licenser
|
||||
|
||||
logger = logging.getLogger('awx.api.views.root')
|
||||
@@ -176,19 +177,21 @@ class ApiV2SubscriptionView(APIView):
|
||||
|
||||
def post(self, request):
|
||||
data = request.data.copy()
|
||||
if data.get('subscriptions_password') == '$encrypted$':
|
||||
data['subscriptions_password'] = settings.SUBSCRIPTIONS_PASSWORD
|
||||
if data.get('subscriptions_client_secret') == '$encrypted$':
|
||||
data['subscriptions_client_secret'] = settings.SUBSCRIPTIONS_CLIENT_SECRET
|
||||
try:
|
||||
user, pw = data.get('subscriptions_username'), data.get('subscriptions_password')
|
||||
user, pw = data.get('subscriptions_client_id'), data.get('subscriptions_client_secret')
|
||||
with set_environ(**settings.AWX_TASK_ENV):
|
||||
validated = get_licenser().validate_rh(user, pw)
|
||||
if user:
|
||||
settings.SUBSCRIPTIONS_USERNAME = data['subscriptions_username']
|
||||
settings.SUBSCRIPTIONS_CLIENT_ID = data['subscriptions_client_id']
|
||||
if pw:
|
||||
settings.SUBSCRIPTIONS_PASSWORD = data['subscriptions_password']
|
||||
settings.SUBSCRIPTIONS_CLIENT_SECRET = data['subscriptions_client_secret']
|
||||
except Exception as exc:
|
||||
msg = _("Invalid Subscription")
|
||||
if isinstance(exc, requests.exceptions.HTTPError) and getattr(getattr(exc, 'response', None), 'status_code', None) == 401:
|
||||
if isinstance(exc, TokenError) or (
|
||||
isinstance(exc, requests.exceptions.HTTPError) and getattr(getattr(exc, 'response', None), 'status_code', None) == 401
|
||||
):
|
||||
msg = _("The provided credentials are invalid (HTTP 401).")
|
||||
elif isinstance(exc, requests.exceptions.ProxyError):
|
||||
msg = _("Unable to connect to proxy server.")
|
||||
@@ -215,12 +218,12 @@ class ApiV2AttachView(APIView):
|
||||
|
||||
def post(self, request):
|
||||
data = request.data.copy()
|
||||
pool_id = data.get('pool_id', None)
|
||||
if not pool_id:
|
||||
return Response({"error": _("No subscription pool ID provided.")}, status=status.HTTP_400_BAD_REQUEST)
|
||||
user = getattr(settings, 'SUBSCRIPTIONS_USERNAME', None)
|
||||
pw = getattr(settings, 'SUBSCRIPTIONS_PASSWORD', None)
|
||||
if pool_id and user and pw:
|
||||
subscription_id = data.get('subscription_id', None)
|
||||
if not subscription_id:
|
||||
return Response({"error": _("No subscription ID provided.")}, status=status.HTTP_400_BAD_REQUEST)
|
||||
user = getattr(settings, 'SUBSCRIPTIONS_CLIENT_ID', None)
|
||||
pw = getattr(settings, 'SUBSCRIPTIONS_CLIENT_SECRET', None)
|
||||
if subscription_id and user and pw:
|
||||
data = request.data.copy()
|
||||
try:
|
||||
with set_environ(**settings.AWX_TASK_ENV):
|
||||
@@ -239,7 +242,7 @@ class ApiV2AttachView(APIView):
|
||||
logger.exception(smart_str(u"Invalid subscription submitted."), extra=dict(actor=request.user.username))
|
||||
return Response({"error": msg}, status=status.HTTP_400_BAD_REQUEST)
|
||||
for sub in validated:
|
||||
if sub['pool_id'] == pool_id:
|
||||
if sub['subscription_id'] == subscription_id:
|
||||
sub['valid_key'] = True
|
||||
settings.LICENSE = sub
|
||||
return Response(sub)
|
||||
|
||||
@@ -10,7 +10,7 @@ from django.core.validators import URLValidator, _lazy_re_compile
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
# Django REST Framework
|
||||
from rest_framework.fields import BooleanField, CharField, ChoiceField, DictField, DateTimeField, EmailField, IntegerField, ListField # noqa
|
||||
from rest_framework.fields import BooleanField, CharField, ChoiceField, DictField, DateTimeField, EmailField, IntegerField, ListField, FloatField # noqa
|
||||
from rest_framework.serializers import PrimaryKeyRelatedField # noqa
|
||||
|
||||
# AWX
|
||||
@@ -207,7 +207,8 @@ class URLField(CharField):
|
||||
if self.allow_plain_hostname:
|
||||
try:
|
||||
url_parts = urlparse.urlsplit(value)
|
||||
if url_parts.hostname and '.' not in url_parts.hostname:
|
||||
looks_like_ipv6 = bool(url_parts.netloc and url_parts.netloc.startswith('[') and url_parts.netloc.endswith(']'))
|
||||
if not looks_like_ipv6 and url_parts.hostname and '.' not in url_parts.hostname:
|
||||
netloc = '{}.local'.format(url_parts.hostname)
|
||||
if url_parts.port:
|
||||
netloc = '{}:{}'.format(netloc, url_parts.port)
|
||||
|
||||
@@ -27,5 +27,5 @@ def _migrate_setting(apps, old_key, new_key, encrypted=False):
|
||||
|
||||
|
||||
def prefill_rh_credentials(apps, schema_editor):
|
||||
_migrate_setting(apps, 'REDHAT_USERNAME', 'SUBSCRIPTIONS_USERNAME', encrypted=False)
|
||||
_migrate_setting(apps, 'REDHAT_PASSWORD', 'SUBSCRIPTIONS_PASSWORD', encrypted=True)
|
||||
_migrate_setting(apps, 'REDHAT_USERNAME', 'SUBSCRIPTIONS_CLIENT_ID', encrypted=False)
|
||||
_migrate_setting(apps, 'REDHAT_PASSWORD', 'SUBSCRIPTIONS_CLIENT_SECRET', encrypted=True)
|
||||
|
||||
@@ -38,6 +38,7 @@ class SettingsRegistry(object):
|
||||
if setting in self._registry:
|
||||
raise ImproperlyConfigured('Setting "{}" is already registered.'.format(setting))
|
||||
category = kwargs.setdefault('category', None)
|
||||
kwargs.setdefault('required', False) # No setting is ordinarily required
|
||||
category_slug = kwargs.setdefault('category_slug', slugify(category or '') or None)
|
||||
if category_slug in {'all', 'changed', 'user-defaults'}:
|
||||
raise ImproperlyConfigured('"{}" is a reserved category slug.'.format(category_slug))
|
||||
|
||||
@@ -128,3 +128,41 @@ class TestURLField:
|
||||
else:
|
||||
with pytest.raises(ValidationError):
|
||||
field.run_validators(url)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"url, expect_error",
|
||||
[
|
||||
("https://[1:2:3]", True),
|
||||
("http://[1:2:3]", True),
|
||||
("https://[2001:db8:3333:4444:5555:6666:7777:8888", True),
|
||||
("https://2001:db8:3333:4444:5555:6666:7777:8888", True),
|
||||
("https://[2001:db8:3333:4444:5555:6666:7777:8888]", False),
|
||||
("https://[::1]", False),
|
||||
("https://[::]", False),
|
||||
("https://[2001:db8::1]", False),
|
||||
("https://[2001:db8:0:0:0:0:1:1]", False),
|
||||
("https://[fe80::2%eth0]", True), # ipv6 scope identifier
|
||||
("https://[fe80:0:0:0:200:f8ff:fe21:67cf]", False),
|
||||
("https://[::ffff:192.168.1.10]", False),
|
||||
("https://[0:0:0:0:0:ffff:c000:0201]", False),
|
||||
("https://[2001:0db8:000a:0001:0000:0000:0000:0000]", False),
|
||||
("https://[2001:db8:a:1::]", False),
|
||||
("https://[ff02::1]", False),
|
||||
("https://[ff02:0:0:0:0:0:0:1]", False),
|
||||
("https://[fc00::1]", False),
|
||||
("https://[fd12:3456:789a:1::1]", False),
|
||||
("https://[2001:db8::abcd:ef12:3456:7890]", False),
|
||||
("https://[2001:db8:0000:abcd:0000:ef12:0000:3456]", False),
|
||||
("https://[::ffff:10.0.0.1]", False),
|
||||
("https://[2001:db8:cafe::]", False),
|
||||
("https://[2001:db8:cafe:0:0:0:0:0]", False),
|
||||
("https://[fe80::210:f3ff:fedf:4567%3]", True), # ipv6 scope identifier, numerical interface
|
||||
],
|
||||
)
|
||||
def test_ipv6_urls(self, url, expect_error):
|
||||
field = URLField()
|
||||
if expect_error:
|
||||
with pytest.raises(ValidationError, match="Enter a valid URL"):
|
||||
field.run_validators(url)
|
||||
else:
|
||||
field.run_validators(url)
|
||||
|
||||
@@ -2098,7 +2098,7 @@ class WorkflowJobAccess(BaseAccess):
|
||||
def filtered_queryset(self):
|
||||
return WorkflowJob.objects.filter(
|
||||
Q(unified_job_template__in=UnifiedJobTemplate.accessible_pk_qs(self.user, 'read_role'))
|
||||
| Q(organization__in=Organization.objects.filter(Q(admin_role__members=self.user)), is_bulk_job=True)
|
||||
| Q(organization__in=Organization.accessible_pk_qs(self.user, 'auditor_role'))
|
||||
)
|
||||
|
||||
def can_read(self, obj):
|
||||
@@ -2496,12 +2496,11 @@ class UnifiedJobAccess(BaseAccess):
|
||||
|
||||
def filtered_queryset(self):
|
||||
inv_pk_qs = Inventory._accessible_pk_qs(Inventory, self.user, 'read_role')
|
||||
org_auditor_qs = Organization.objects.filter(Q(admin_role__members=self.user) | Q(auditor_role__members=self.user))
|
||||
qs = self.model.objects.filter(
|
||||
Q(unified_job_template_id__in=UnifiedJobTemplate.accessible_pk_qs(self.user, 'read_role'))
|
||||
| Q(inventoryupdate__inventory_source__inventory__id__in=inv_pk_qs)
|
||||
| Q(adhoccommand__inventory__id__in=inv_pk_qs)
|
||||
| Q(organization__in=org_auditor_qs)
|
||||
| Q(organization__in=Organization.accessible_pk_qs(self.user, 'auditor_role'))
|
||||
)
|
||||
return qs
|
||||
|
||||
|
||||
@@ -3,13 +3,13 @@ import logging
|
||||
|
||||
# AWX
|
||||
from awx.main.analytics.subsystem_metrics import DispatcherMetrics, CallbackReceiverMetrics
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch.publish import task as task_awx
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
|
||||
logger = logging.getLogger('awx.main.scheduler')
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task_awx(queue=get_task_queuename)
|
||||
def send_subsystem_metrics():
|
||||
DispatcherMetrics().send_metrics()
|
||||
CallbackReceiverMetrics().send_metrics()
|
||||
|
||||
@@ -142,7 +142,7 @@ def config(since, **kwargs):
|
||||
return {
|
||||
'platform': {
|
||||
'system': platform.system(),
|
||||
'dist': distro.linux_distribution(),
|
||||
'dist': (distro.name(), distro.version(), distro.codename()),
|
||||
'release': platform.release(),
|
||||
'type': install_type,
|
||||
},
|
||||
|
||||
@@ -22,7 +22,7 @@ from ansible_base.lib.utils.db import advisory_lock
|
||||
from awx.main.models import Job
|
||||
from awx.main.access import access_registry
|
||||
from awx.main.utils import get_awx_http_client_headers, set_environ, datetime_hook
|
||||
from awx.main.utils.analytics_proxy import OIDCClient, DEFAULT_OIDC_ENDPOINT
|
||||
from awx.main.utils.analytics_proxy import OIDCClient
|
||||
|
||||
__all__ = ['register', 'gather', 'ship']
|
||||
|
||||
@@ -186,7 +186,7 @@ def gather(dest=None, module=None, subset=None, since=None, until=None, collecti
|
||||
|
||||
if not (
|
||||
settings.AUTOMATION_ANALYTICS_URL
|
||||
and ((settings.REDHAT_USERNAME and settings.REDHAT_PASSWORD) or (settings.SUBSCRIPTIONS_USERNAME and settings.SUBSCRIPTIONS_PASSWORD))
|
||||
and ((settings.REDHAT_USERNAME and settings.REDHAT_PASSWORD) or (settings.SUBSCRIPTIONS_CLIENT_ID and settings.SUBSCRIPTIONS_CLIENT_SECRET))
|
||||
):
|
||||
logger.log(log_level, "Not gathering analytics, configuration is invalid. Use --dry-run to gather locally without sending.")
|
||||
return None
|
||||
@@ -324,10 +324,10 @@ def gather(dest=None, module=None, subset=None, since=None, until=None, collecti
|
||||
settings.AUTOMATION_ANALYTICS_LAST_ENTRIES = json.dumps(last_entries, cls=DjangoJSONEncoder)
|
||||
|
||||
if collection_type != 'dry-run':
|
||||
if succeeded:
|
||||
for fpath in tarfiles:
|
||||
if os.path.exists(fpath):
|
||||
os.remove(fpath)
|
||||
for fpath in tarfiles:
|
||||
if os.path.exists(fpath):
|
||||
os.remove(fpath)
|
||||
|
||||
with disable_activity_stream():
|
||||
if not settings.AUTOMATION_ANALYTICS_LAST_GATHER or until > settings.AUTOMATION_ANALYTICS_LAST_GATHER:
|
||||
# `AUTOMATION_ANALYTICS_LAST_GATHER` is set whether collection succeeds or fails;
|
||||
@@ -368,8 +368,20 @@ def ship(path):
|
||||
logger.error('AUTOMATION_ANALYTICS_URL is not set')
|
||||
return False
|
||||
|
||||
rh_user = getattr(settings, 'REDHAT_USERNAME', None)
|
||||
rh_password = getattr(settings, 'REDHAT_PASSWORD', None)
|
||||
rh_id = getattr(settings, 'REDHAT_USERNAME', None)
|
||||
rh_secret = getattr(settings, 'REDHAT_PASSWORD', None)
|
||||
|
||||
if not (rh_id and rh_secret):
|
||||
rh_id = getattr(settings, 'SUBSCRIPTIONS_CLIENT_ID', None)
|
||||
rh_secret = getattr(settings, 'SUBSCRIPTIONS_CLIENT_SECRET', None)
|
||||
|
||||
if not rh_id:
|
||||
logger.error('Neither REDHAT_USERNAME nor SUBSCRIPTIONS_CLIENT_ID are set')
|
||||
return False
|
||||
|
||||
if not rh_secret:
|
||||
logger.error('Neither REDHAT_PASSWORD nor SUBSCRIPTIONS_CLIENT_SECRET are set')
|
||||
return False
|
||||
|
||||
with open(path, 'rb') as f:
|
||||
files = {'file': (os.path.basename(path), f, settings.INSIGHTS_AGENT_MIME)}
|
||||
@@ -377,25 +389,13 @@ def ship(path):
|
||||
s.headers = get_awx_http_client_headers()
|
||||
s.headers.pop('Content-Type')
|
||||
with set_environ(**settings.AWX_TASK_ENV):
|
||||
if rh_user and rh_password:
|
||||
try:
|
||||
client = OIDCClient(rh_user, rh_password, DEFAULT_OIDC_ENDPOINT, ['api.console'])
|
||||
response = client.make_request("POST", url, headers=s.headers, files=files, verify=settings.INSIGHTS_CERT_PATH, timeout=(31, 31))
|
||||
except requests.RequestException:
|
||||
logger.error("Automation Analytics API request failed, trying base auth method")
|
||||
response = s.post(url, files=files, verify=settings.INSIGHTS_CERT_PATH, auth=(rh_user, rh_password), headers=s.headers, timeout=(31, 31))
|
||||
elif not rh_user or not rh_password:
|
||||
logger.info('REDHAT_USERNAME and REDHAT_PASSWORD are not set, using SUBSCRIPTIONS_USERNAME and SUBSCRIPTIONS_PASSWORD')
|
||||
rh_user = getattr(settings, 'SUBSCRIPTIONS_USERNAME', None)
|
||||
rh_password = getattr(settings, 'SUBSCRIPTIONS_PASSWORD', None)
|
||||
if rh_user and rh_password:
|
||||
response = s.post(url, files=files, verify=settings.INSIGHTS_CERT_PATH, auth=(rh_user, rh_password), headers=s.headers, timeout=(31, 31))
|
||||
elif not rh_user:
|
||||
logger.error('REDHAT_USERNAME and SUBSCRIPTIONS_USERNAME are not set')
|
||||
return False
|
||||
elif not rh_password:
|
||||
logger.error('REDHAT_PASSWORD and SUBSCRIPTIONS_USERNAME are not set')
|
||||
return False
|
||||
try:
|
||||
client = OIDCClient(rh_id, rh_secret)
|
||||
response = client.make_request("POST", url, headers=s.headers, files=files, verify=settings.INSIGHTS_CERT_PATH, timeout=(31, 31))
|
||||
except requests.RequestException:
|
||||
logger.error("Automation Analytics API request failed, trying base auth method")
|
||||
response = s.post(url, files=files, verify=settings.INSIGHTS_CERT_PATH, auth=(rh_id, rh_secret), headers=s.headers, timeout=(31, 31))
|
||||
|
||||
# Accept 2XX status_codes
|
||||
if response.status_code >= 300:
|
||||
logger.error('Upload failed with status {}, {}'.format(response.status_code, response.text))
|
||||
|
||||
@@ -128,6 +128,7 @@ def metrics():
|
||||
registry=REGISTRY,
|
||||
)
|
||||
|
||||
LICENSE_EXPIRY = Gauge('awx_license_expiry', 'Time before license expires', registry=REGISTRY)
|
||||
LICENSE_INSTANCE_TOTAL = Gauge('awx_license_instance_total', 'Total number of managed hosts provided by your license', registry=REGISTRY)
|
||||
LICENSE_INSTANCE_FREE = Gauge('awx_license_instance_free', 'Number of remaining managed hosts provided by your license', registry=REGISTRY)
|
||||
|
||||
@@ -148,6 +149,7 @@ def metrics():
|
||||
}
|
||||
)
|
||||
|
||||
LICENSE_EXPIRY.set(str(license_info.get('time_remaining', 0)))
|
||||
LICENSE_INSTANCE_TOTAL.set(str(license_info.get('instance_count', 0)))
|
||||
LICENSE_INSTANCE_FREE.set(str(license_info.get('free_instances', 0)))
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@ from prometheus_client.core import GaugeMetricFamily, HistogramMetricFamily
|
||||
from prometheus_client.registry import CollectorRegistry
|
||||
from django.conf import settings
|
||||
from django.http import HttpRequest
|
||||
import redis.exceptions
|
||||
from rest_framework.request import Request
|
||||
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
@@ -290,8 +291,12 @@ class Metrics(MetricsNamespace):
|
||||
def send_metrics(self):
|
||||
# more than one thread could be calling this at the same time, so should
|
||||
# acquire redis lock before sending metrics
|
||||
lock = self.conn.lock(root_key + '-' + self._namespace + '_lock')
|
||||
if not lock.acquire(blocking=False):
|
||||
try:
|
||||
lock = self.conn.lock(root_key + '-' + self._namespace + '_lock')
|
||||
if not lock.acquire(blocking=False):
|
||||
return
|
||||
except redis.exceptions.ConnectionError as exc:
|
||||
logger.warning(f'Connection error in send_metrics: {exc}')
|
||||
return
|
||||
try:
|
||||
current_time = time.time()
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
import os
|
||||
|
||||
from dispatcherd.config import setup as dispatcher_setup
|
||||
|
||||
from django.apps import AppConfig
|
||||
from django.db import connection
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from awx.main.utils.common import bypass_in_test, load_all_entry_points_for
|
||||
from awx.main.utils.migration import is_database_synchronized
|
||||
@@ -76,9 +79,28 @@ class MainConfig(AppConfig):
|
||||
cls = entry_point.load()
|
||||
InventorySourceOptions.injectors[entry_point_name] = cls
|
||||
|
||||
def configure_dispatcherd(self):
|
||||
"""This implements the default configuration for dispatcherd
|
||||
|
||||
If running the tasking service like awx-manage run_dispatcher,
|
||||
some additional config will be applied on top of this.
|
||||
This configuration provides the minimum such that code can submit
|
||||
tasks to pg_notify to run those tasks.
|
||||
"""
|
||||
from awx.main.dispatch.config import get_dispatcherd_config
|
||||
|
||||
if connection.vendor != 'postgresql':
|
||||
config_dict = get_dispatcherd_config(mock_publish=True)
|
||||
else:
|
||||
config_dict = get_dispatcherd_config()
|
||||
|
||||
dispatcher_setup(config_dict)
|
||||
|
||||
def ready(self):
|
||||
super().ready()
|
||||
|
||||
self.configure_dispatcherd()
|
||||
|
||||
"""
|
||||
Credential loading triggers database operations. There are cases we want to call
|
||||
awx-manage collectstatic without a database. All management commands invoke the ready() code
|
||||
|
||||
152
awx/main/conf.py
152
awx/main/conf.py
@@ -12,6 +12,7 @@ from rest_framework import serializers
|
||||
from awx.conf import fields, register, register_validate
|
||||
from awx.main.models import ExecutionEnvironment
|
||||
from awx.main.constants import SUBSCRIPTION_USAGE_MODEL_UNIQUE_HOSTS
|
||||
from awx.main.tasks.policy import OPA_AUTH_TYPES
|
||||
|
||||
logger = logging.getLogger('awx.main.conf')
|
||||
|
||||
@@ -90,7 +91,6 @@ register(
|
||||
),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
required=False,
|
||||
)
|
||||
|
||||
register(
|
||||
@@ -105,6 +105,7 @@ register(
|
||||
),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
hidden=True,
|
||||
)
|
||||
|
||||
register(
|
||||
@@ -124,8 +125,8 @@ register(
|
||||
allow_blank=True,
|
||||
encrypted=False,
|
||||
read_only=False,
|
||||
label=_('Red Hat customer username'),
|
||||
help_text=_('This username is used to send data to Automation Analytics'),
|
||||
label=_('Red Hat Client ID for Analytics'),
|
||||
help_text=_('Client ID used to send data to Automation Analytics'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
@@ -137,34 +138,34 @@ register(
|
||||
allow_blank=True,
|
||||
encrypted=True,
|
||||
read_only=False,
|
||||
label=_('Red Hat customer password'),
|
||||
help_text=_('This password is used to send data to Automation Analytics'),
|
||||
label=_('Red Hat Client Secret for Analytics'),
|
||||
help_text=_('Client secret used to send data to Automation Analytics'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
register(
|
||||
'SUBSCRIPTIONS_USERNAME',
|
||||
'SUBSCRIPTIONS_CLIENT_ID',
|
||||
field_class=fields.CharField,
|
||||
default='',
|
||||
allow_blank=True,
|
||||
encrypted=False,
|
||||
read_only=False,
|
||||
label=_('Red Hat or Satellite username'),
|
||||
help_text=_('This username is used to retrieve subscription and content information'), # noqa
|
||||
label=_('Red Hat Client ID for Subscriptions'),
|
||||
help_text=_('Client ID used to retrieve subscription and content information'), # noqa
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
register(
|
||||
'SUBSCRIPTIONS_PASSWORD',
|
||||
'SUBSCRIPTIONS_CLIENT_SECRET',
|
||||
field_class=fields.CharField,
|
||||
default='',
|
||||
allow_blank=True,
|
||||
encrypted=True,
|
||||
read_only=False,
|
||||
label=_('Red Hat or Satellite password'),
|
||||
help_text=_('This password is used to retrieve subscription and content information'), # noqa
|
||||
label=_('Red Hat Client Secret for Subscriptions'),
|
||||
help_text=_('Client secret used to retrieve subscription and content information'), # noqa
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
@@ -237,7 +238,6 @@ register(
|
||||
help_text=_('List of modules allowed to be used by ad-hoc jobs.'),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
required=False,
|
||||
)
|
||||
|
||||
register(
|
||||
@@ -248,7 +248,6 @@ register(
|
||||
('never', _('Never')),
|
||||
('template', _('Only On Job Template Definitions')),
|
||||
],
|
||||
required=True,
|
||||
label=_('When can extra variables contain Jinja templates?'),
|
||||
help_text=_(
|
||||
'Ansible allows variable substitution via the Jinja2 templating '
|
||||
@@ -273,7 +272,6 @@ register(
|
||||
register(
|
||||
'AWX_ISOLATION_SHOW_PATHS',
|
||||
field_class=fields.StringListIsolatedPathField,
|
||||
required=False,
|
||||
label=_('Paths to expose to isolated jobs'),
|
||||
help_text=_(
|
||||
'List of paths that would otherwise be hidden to expose to isolated jobs. Enter one path per line. '
|
||||
@@ -439,7 +437,6 @@ register(
|
||||
register(
|
||||
'AWX_ANSIBLE_CALLBACK_PLUGINS',
|
||||
field_class=fields.StringListField,
|
||||
required=False,
|
||||
label=_('Ansible Callback Plugins'),
|
||||
help_text=_('List of paths to search for extra callback plugins to be used when running jobs. Enter one path per line.'),
|
||||
category=_('Jobs'),
|
||||
@@ -553,7 +550,6 @@ register(
|
||||
help_text=_('Port on Logging Aggregator to send logs to (if required and not provided in Logging Aggregator).'),
|
||||
category=_('Logging'),
|
||||
category_slug='logging',
|
||||
required=False,
|
||||
)
|
||||
register(
|
||||
'LOG_AGGREGATOR_TYPE',
|
||||
@@ -575,7 +571,6 @@ register(
|
||||
help_text=_('Username for external log aggregator (if required; HTTP/s only).'),
|
||||
category=_('Logging'),
|
||||
category_slug='logging',
|
||||
required=False,
|
||||
)
|
||||
register(
|
||||
'LOG_AGGREGATOR_PASSWORD',
|
||||
@@ -587,7 +582,6 @@ register(
|
||||
help_text=_('Password or authentication token for external log aggregator (if required; HTTP/s only).'),
|
||||
category=_('Logging'),
|
||||
category_slug='logging',
|
||||
required=False,
|
||||
)
|
||||
register(
|
||||
'LOG_AGGREGATOR_LOGGERS',
|
||||
@@ -774,7 +768,6 @@ register(
|
||||
allow_null=True,
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
required=False,
|
||||
hidden=True,
|
||||
)
|
||||
register(
|
||||
@@ -980,3 +973,124 @@ def csrf_trusted_origins_validate(serializer, attrs):
|
||||
|
||||
|
||||
register_validate('system', csrf_trusted_origins_validate)
|
||||
|
||||
|
||||
register(
|
||||
'OPA_HOST',
|
||||
field_class=fields.CharField,
|
||||
label=_('OPA server hostname'),
|
||||
default='',
|
||||
help_text=_('The hostname used to connect to the OPA server. If empty, policy enforcement will be disabled.'),
|
||||
category=('PolicyAsCode'),
|
||||
category_slug='policyascode',
|
||||
allow_blank=True,
|
||||
)
|
||||
|
||||
register(
|
||||
'OPA_PORT',
|
||||
field_class=fields.IntegerField,
|
||||
label=_('OPA server port'),
|
||||
default=8181,
|
||||
help_text=_('The port used to connect to the OPA server. Defaults to 8181.'),
|
||||
category=('PolicyAsCode'),
|
||||
category_slug='policyascode',
|
||||
)
|
||||
|
||||
register(
|
||||
'OPA_SSL',
|
||||
field_class=fields.BooleanField,
|
||||
label=_('Use SSL for OPA connection'),
|
||||
default=False,
|
||||
help_text=_('Enable or disable the use of SSL to connect to the OPA server. Defaults to false.'),
|
||||
category=('PolicyAsCode'),
|
||||
category_slug='policyascode',
|
||||
)
|
||||
|
||||
register(
|
||||
'OPA_AUTH_TYPE',
|
||||
field_class=fields.ChoiceField,
|
||||
label=_('OPA authentication type'),
|
||||
choices=[OPA_AUTH_TYPES.NONE, OPA_AUTH_TYPES.TOKEN, OPA_AUTH_TYPES.CERTIFICATE],
|
||||
default=OPA_AUTH_TYPES.NONE,
|
||||
help_text=_('The authentication type that will be used to connect to the OPA server: "None", "Token", or "Certificate".'),
|
||||
category=('PolicyAsCode'),
|
||||
category_slug='policyascode',
|
||||
)
|
||||
|
||||
register(
|
||||
'OPA_AUTH_TOKEN',
|
||||
field_class=fields.CharField,
|
||||
label=_('OPA authentication token'),
|
||||
default='',
|
||||
help_text=_(
|
||||
'The token for authentication to the OPA server. Required when OPA_AUTH_TYPE is "Token". If an authorization header is defined in OPA_AUTH_CUSTOM_HEADERS, it will be overridden by OPA_AUTH_TOKEN.'
|
||||
),
|
||||
category=('PolicyAsCode'),
|
||||
category_slug='policyascode',
|
||||
allow_blank=True,
|
||||
encrypted=True,
|
||||
)
|
||||
|
||||
register(
|
||||
'OPA_AUTH_CLIENT_CERT',
|
||||
field_class=fields.CharField,
|
||||
label=_('OPA client certificate content'),
|
||||
default='',
|
||||
help_text=_('The content of the client certificate file for mTLS authentication to the OPA server. Required when OPA_AUTH_TYPE is "Certificate".'),
|
||||
category=('PolicyAsCode'),
|
||||
category_slug='policyascode',
|
||||
allow_blank=True,
|
||||
)
|
||||
|
||||
register(
|
||||
'OPA_AUTH_CLIENT_KEY',
|
||||
field_class=fields.CharField,
|
||||
label=_('OPA client key content'),
|
||||
default='',
|
||||
help_text=_('The content of the client key for mTLS authentication to the OPA server. Required when OPA_AUTH_TYPE is "Certificate".'),
|
||||
category=('PolicyAsCode'),
|
||||
category_slug='policyascode',
|
||||
allow_blank=True,
|
||||
encrypted=True,
|
||||
)
|
||||
|
||||
register(
|
||||
'OPA_AUTH_CA_CERT',
|
||||
field_class=fields.CharField,
|
||||
label=_('OPA CA certificate content'),
|
||||
default='',
|
||||
help_text=_('The content of the CA certificate for mTLS authentication to the OPA server. Required when OPA_AUTH_TYPE is "Certificate".'),
|
||||
category=('PolicyAsCode'),
|
||||
category_slug='policyascode',
|
||||
allow_blank=True,
|
||||
)
|
||||
|
||||
register(
|
||||
'OPA_AUTH_CUSTOM_HEADERS',
|
||||
field_class=fields.DictField,
|
||||
label=_('OPA custom authentication headers'),
|
||||
default={},
|
||||
help_text=_('Optional custom headers included in requests to the OPA server. Defaults to empty dictionary ({}).'),
|
||||
category=('PolicyAsCode'),
|
||||
category_slug='policyascode',
|
||||
)
|
||||
|
||||
register(
|
||||
'OPA_REQUEST_TIMEOUT',
|
||||
field_class=fields.FloatField,
|
||||
label=_('OPA request timeout'),
|
||||
default=1.5,
|
||||
help_text=_('The number of seconds after which the connection to the OPA server will time out. Defaults to 1.5 seconds.'),
|
||||
category=('PolicyAsCode'),
|
||||
category_slug='policyascode',
|
||||
)
|
||||
|
||||
register(
|
||||
'OPA_REQUEST_RETRIES',
|
||||
field_class=fields.IntegerField,
|
||||
label=_('OPA request retry count'),
|
||||
default=2,
|
||||
help_text=_('The number of retry attempts for connecting to the OPA server. Default is 2.'),
|
||||
category=('PolicyAsCode'),
|
||||
category_slug='policyascode',
|
||||
)
|
||||
|
||||
@@ -77,6 +77,8 @@ LOGGER_BLOCKLIST = (
|
||||
'awx.main.utils.log',
|
||||
# loggers that may be called getting logging settings
|
||||
'awx.conf',
|
||||
# dispatcherd should only use 1 database connection
|
||||
'dispatcherd',
|
||||
)
|
||||
|
||||
# Reported version for node seen in receptor mesh but for which capacity check
|
||||
|
||||
53
awx/main/dispatch/config.py
Normal file
53
awx/main/dispatch/config.py
Normal file
@@ -0,0 +1,53 @@
|
||||
from django.conf import settings
|
||||
|
||||
from ansible_base.lib.utils.db import get_pg_notify_params
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
from awx.main.dispatch.pool import get_auto_max_workers
|
||||
|
||||
|
||||
def get_dispatcherd_config(for_service: bool = False, mock_publish: bool = False) -> dict:
|
||||
"""Return a dictionary config for dispatcherd
|
||||
|
||||
Parameters:
|
||||
for_service: if True, include dynamic options needed for running the dispatcher service
|
||||
this will require database access, you should delay evaluation until after app setup
|
||||
"""
|
||||
config = {
|
||||
"version": 2,
|
||||
"service": {
|
||||
"pool_kwargs": {
|
||||
"min_workers": settings.JOB_EVENT_WORKERS,
|
||||
"max_workers": get_auto_max_workers(),
|
||||
},
|
||||
"main_kwargs": {"node_id": settings.CLUSTER_HOST_ID},
|
||||
"process_manager_cls": "ForkServerManager",
|
||||
"process_manager_kwargs": {"preload_modules": ['awx.main.dispatch.hazmat']},
|
||||
},
|
||||
"brokers": {
|
||||
"socket": {"socket_path": settings.DISPATCHERD_DEBUGGING_SOCKFILE},
|
||||
},
|
||||
"publish": {"default_control_broker": "socket"},
|
||||
"worker": {"worker_cls": "awx.main.dispatch.worker.dispatcherd.AWXTaskWorker"},
|
||||
}
|
||||
|
||||
if mock_publish:
|
||||
config["brokers"]["noop"] = {}
|
||||
config["publish"]["default_broker"] = "noop"
|
||||
else:
|
||||
config["brokers"]["pg_notify"] = {
|
||||
"config": get_pg_notify_params(),
|
||||
"sync_connection_factory": "ansible_base.lib.utils.db.psycopg_connection_from_django",
|
||||
"default_publish_channel": settings.CLUSTER_HOST_ID, # used for debugging commands
|
||||
}
|
||||
config["publish"]["default_broker"] = "pg_notify"
|
||||
|
||||
if for_service:
|
||||
config["producers"] = {
|
||||
"ScheduledProducer": {"task_schedule": settings.DISPATCHER_SCHEDULE},
|
||||
"OnStartProducer": {"task_list": {"awx.main.tasks.system.dispatch_startup": {}}},
|
||||
"ControlProducer": {},
|
||||
}
|
||||
|
||||
config["brokers"]["pg_notify"]["channels"] = ['tower_broadcast_all', 'tower_settings_change', get_task_queuename()]
|
||||
|
||||
return config
|
||||
36
awx/main/dispatch/hazmat.py
Normal file
36
awx/main/dispatch/hazmat.py
Normal file
@@ -0,0 +1,36 @@
|
||||
import django
|
||||
|
||||
# dispatcherd publisher logic is likely to be used, but needs manual preload
|
||||
from dispatcherd.brokers import pg_notify # noqa
|
||||
|
||||
# Cache may not be initialized until we are in the worker, so preload here
|
||||
from channels_redis import core # noqa
|
||||
|
||||
from awx import prepare_env
|
||||
|
||||
from dispatcherd.utils import resolve_callable
|
||||
|
||||
|
||||
prepare_env()
|
||||
|
||||
django.setup() # noqa
|
||||
|
||||
|
||||
from django.conf import settings
|
||||
|
||||
|
||||
# Preload all periodic tasks so their imports will be in shared memory
|
||||
for name, options in settings.CELERYBEAT_SCHEDULE.items():
|
||||
resolve_callable(options['task'])
|
||||
|
||||
|
||||
# Preload in-line import from tasks
|
||||
from awx.main.scheduler.kubernetes import PodManager # noqa
|
||||
|
||||
|
||||
from django.core.cache import cache as django_cache
|
||||
from django.db import connection
|
||||
|
||||
|
||||
connection.close()
|
||||
django_cache.close()
|
||||
@@ -88,8 +88,10 @@ class Scheduler:
|
||||
# internally times are all referenced relative to startup time, add grace period
|
||||
self.global_start = time.time() + 2.0
|
||||
|
||||
def get_and_mark_pending(self):
|
||||
relative_time = time.time() - self.global_start
|
||||
def get_and_mark_pending(self, reftime=None):
|
||||
if reftime is None:
|
||||
reftime = time.time() # mostly for tests
|
||||
relative_time = reftime - self.global_start
|
||||
to_run = []
|
||||
for job in self.jobs:
|
||||
if job.due_to_run(relative_time):
|
||||
@@ -98,8 +100,10 @@ class Scheduler:
|
||||
job.mark_run(relative_time)
|
||||
return to_run
|
||||
|
||||
def time_until_next_run(self):
|
||||
relative_time = time.time() - self.global_start
|
||||
def time_until_next_run(self, reftime=None):
|
||||
if reftime is None:
|
||||
reftime = time.time() # mostly for tests
|
||||
relative_time = reftime - self.global_start
|
||||
next_job = min(self.jobs, key=lambda j: j.next_run)
|
||||
delta = next_job.next_run - relative_time
|
||||
if delta <= 0.1:
|
||||
@@ -115,10 +119,11 @@ class Scheduler:
|
||||
def debug(self, *args, **kwargs):
|
||||
data = dict()
|
||||
data['title'] = 'Scheduler status'
|
||||
reftime = time.time()
|
||||
|
||||
now = datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S UTC')
|
||||
now = datetime.fromtimestamp(reftime).strftime('%Y-%m-%d %H:%M:%S UTC')
|
||||
start_time = datetime.fromtimestamp(self.global_start).strftime('%Y-%m-%d %H:%M:%S UTC')
|
||||
relative_time = time.time() - self.global_start
|
||||
relative_time = reftime - self.global_start
|
||||
data['started_time'] = start_time
|
||||
data['current_time'] = now
|
||||
data['current_time_relative'] = round(relative_time, 3)
|
||||
|
||||
@@ -7,6 +7,7 @@ import time
|
||||
import traceback
|
||||
from datetime import datetime
|
||||
from uuid import uuid4
|
||||
import json
|
||||
|
||||
import collections
|
||||
from multiprocessing import Process
|
||||
@@ -25,7 +26,10 @@ from ansible_base.lib.logging.runtime import log_excess_runtime
|
||||
|
||||
from awx.main.models import UnifiedJob
|
||||
from awx.main.dispatch import reaper
|
||||
from awx.main.utils.common import convert_mem_str_to_bytes, get_mem_effective_capacity
|
||||
from awx.main.utils.common import get_mem_effective_capacity, get_corrected_memory, get_corrected_cpu, get_cpu_effective_capacity
|
||||
|
||||
# ansible-runner
|
||||
from ansible_runner.utils.capacity import get_mem_in_bytes, get_cpu_count
|
||||
|
||||
if 'run_callback_receiver' in sys.argv:
|
||||
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
|
||||
@@ -33,6 +37,9 @@ else:
|
||||
logger = logging.getLogger('awx.main.dispatch')
|
||||
|
||||
|
||||
RETIRED_SENTINEL_TASK = "[retired]"
|
||||
|
||||
|
||||
class NoOpResultQueue(object):
|
||||
def put(self, item):
|
||||
pass
|
||||
@@ -77,11 +84,17 @@ class PoolWorker(object):
|
||||
self.queue = MPQueue(queue_size)
|
||||
self.process = Process(target=target, args=(self.queue, self.finished) + args)
|
||||
self.process.daemon = True
|
||||
self.creation_time = time.monotonic()
|
||||
self.retiring = False
|
||||
|
||||
def start(self):
|
||||
self.process.start()
|
||||
|
||||
def put(self, body):
|
||||
if self.retiring:
|
||||
uuid = body.get('uuid', 'N/A') if isinstance(body, dict) else 'N/A'
|
||||
logger.info(f"Worker pid:{self.pid} is retiring. Refusing new task {uuid}.")
|
||||
raise QueueFull("Worker is retiring and not accepting new tasks") # AutoscalePool.write handles QueueFull
|
||||
uuid = '?'
|
||||
if isinstance(body, dict):
|
||||
if not body.get('uuid'):
|
||||
@@ -100,6 +113,11 @@ class PoolWorker(object):
|
||||
"""
|
||||
self.queue.put('QUIT')
|
||||
|
||||
@property
|
||||
def age(self):
|
||||
"""Returns the current age of the worker in seconds."""
|
||||
return time.monotonic() - self.creation_time
|
||||
|
||||
@property
|
||||
def pid(self):
|
||||
return self.process.pid
|
||||
@@ -146,6 +164,8 @@ class PoolWorker(object):
|
||||
# the purpose of self.managed_tasks is to just track internal
|
||||
# state of which events are *currently* being processed.
|
||||
logger.warning('Event UUID {} appears to be have been duplicated.'.format(uuid))
|
||||
if self.retiring:
|
||||
self.managed_tasks[RETIRED_SENTINEL_TASK] = {'task': RETIRED_SENTINEL_TASK}
|
||||
|
||||
@property
|
||||
def current_task(self):
|
||||
@@ -261,6 +281,8 @@ class WorkerPool(object):
|
||||
'{% for w in workers %}'
|
||||
'. worker[pid:{{ w.pid }}]{% if not w.alive %} GONE exit={{ w.exitcode }}{% endif %}'
|
||||
' sent={{ w.messages_sent }}'
|
||||
' age={{ "%.0f"|format(w.age) }}s'
|
||||
' retiring={{ w.retiring }}'
|
||||
'{% if w.messages_finished %} finished={{ w.messages_finished }}{% endif %}'
|
||||
' qsize={{ w.managed_tasks|length }}'
|
||||
' rss={{ w.mb }}MB'
|
||||
@@ -307,6 +329,41 @@ class WorkerPool(object):
|
||||
logger.exception('could not kill {}'.format(worker.pid))
|
||||
|
||||
|
||||
def get_auto_max_workers():
|
||||
"""Method we normally rely on to get max_workers
|
||||
|
||||
Uses almost same logic as Instance.local_health_check
|
||||
The important thing is to be MORE than Instance.capacity
|
||||
so that the task-manager does not over-schedule this node
|
||||
|
||||
Ideally we would just use the capacity from the database plus reserve workers,
|
||||
but this poses some bootstrap problems where OCP task containers
|
||||
register themselves after startup
|
||||
"""
|
||||
# Get memory from ansible-runner
|
||||
total_memory_gb = get_mem_in_bytes()
|
||||
|
||||
# This may replace memory calculation with a user override
|
||||
corrected_memory = get_corrected_memory(total_memory_gb)
|
||||
|
||||
# Get same number as max forks based on memory, this function takes memory as bytes
|
||||
mem_capacity = get_mem_effective_capacity(corrected_memory, is_control_node=True)
|
||||
|
||||
# Follow same process for CPU capacity constraint
|
||||
cpu_count = get_cpu_count()
|
||||
corrected_cpu = get_corrected_cpu(cpu_count)
|
||||
cpu_capacity = get_cpu_effective_capacity(corrected_cpu, is_control_node=True)
|
||||
|
||||
# Here is what is different from health checks,
|
||||
auto_max = max(mem_capacity, cpu_capacity)
|
||||
|
||||
# add magic number of extra workers to ensure
|
||||
# we have a few extra workers to run the heartbeat
|
||||
auto_max += 7
|
||||
|
||||
return auto_max
|
||||
|
||||
|
||||
class AutoscalePool(WorkerPool):
|
||||
"""
|
||||
An extended pool implementation that automatically scales workers up and
|
||||
@@ -317,22 +374,13 @@ class AutoscalePool(WorkerPool):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.max_workers = kwargs.pop('max_workers', None)
|
||||
self.max_worker_lifetime_seconds = kwargs.pop(
|
||||
'max_worker_lifetime_seconds', getattr(settings, 'WORKER_MAX_LIFETIME_SECONDS', 14400)
|
||||
) # Default to 4 hours
|
||||
super(AutoscalePool, self).__init__(*args, **kwargs)
|
||||
|
||||
if self.max_workers is None:
|
||||
settings_absmem = getattr(settings, 'SYSTEM_TASK_ABS_MEM', None)
|
||||
if settings_absmem is not None:
|
||||
# There are 1073741824 bytes in a gigabyte. Convert bytes to gigabytes by dividing by 2**30
|
||||
total_memory_gb = convert_mem_str_to_bytes(settings_absmem) // 2**30
|
||||
else:
|
||||
total_memory_gb = (psutil.virtual_memory().total >> 30) + 1 # noqa: round up
|
||||
|
||||
# Get same number as max forks based on memory, this function takes memory as bytes
|
||||
self.max_workers = get_mem_effective_capacity(total_memory_gb * 2**30)
|
||||
|
||||
# add magic prime number of extra workers to ensure
|
||||
# we have a few extra workers to run the heartbeat
|
||||
self.max_workers += 7
|
||||
self.max_workers = get_auto_max_workers()
|
||||
|
||||
# max workers can't be less than min_workers
|
||||
self.max_workers = max(self.min_workers, self.max_workers)
|
||||
@@ -346,6 +394,9 @@ class AutoscalePool(WorkerPool):
|
||||
self.scale_up_ct = 0
|
||||
self.worker_count_max = 0
|
||||
|
||||
# last time we wrote current tasks, to avoid too much log spam
|
||||
self.last_task_list_log = time.monotonic()
|
||||
|
||||
def produce_subsystem_metrics(self, metrics_object):
|
||||
metrics_object.set('dispatcher_pool_scale_up_events', self.scale_up_ct)
|
||||
metrics_object.set('dispatcher_pool_active_task_count', sum(len(w.managed_tasks) for w in self.workers))
|
||||
@@ -385,6 +436,7 @@ class AutoscalePool(WorkerPool):
|
||||
"""
|
||||
orphaned = []
|
||||
for w in self.workers[::]:
|
||||
is_retirement_age = self.max_worker_lifetime_seconds is not None and w.age > self.max_worker_lifetime_seconds
|
||||
if not w.alive:
|
||||
# the worker process has exited
|
||||
# 1. take the task it was running and enqueue the error
|
||||
@@ -393,6 +445,10 @@ class AutoscalePool(WorkerPool):
|
||||
# send them to another worker
|
||||
logger.error('worker pid:{} is gone (exit={})'.format(w.pid, w.exitcode))
|
||||
if w.current_task:
|
||||
if w.current_task == {'task': RETIRED_SENTINEL_TASK}:
|
||||
logger.debug('scaling down worker pid:{} due to worker age: {}'.format(w.pid, w.age))
|
||||
self.workers.remove(w)
|
||||
continue
|
||||
if w.current_task != 'QUIT':
|
||||
try:
|
||||
for j in UnifiedJob.objects.filter(celery_task_id=w.current_task['uuid']):
|
||||
@@ -403,6 +459,7 @@ class AutoscalePool(WorkerPool):
|
||||
logger.warning(f'Worker was told to quit but has not, pid={w.pid}')
|
||||
orphaned.extend(w.orphaned_tasks)
|
||||
self.workers.remove(w)
|
||||
|
||||
elif w.idle and len(self.workers) > self.min_workers:
|
||||
# the process has an empty queue (it's idle) and we have
|
||||
# more processes in the pool than we need (> min)
|
||||
@@ -411,6 +468,22 @@ class AutoscalePool(WorkerPool):
|
||||
logger.debug('scaling down worker pid:{}'.format(w.pid))
|
||||
w.quit()
|
||||
self.workers.remove(w)
|
||||
|
||||
elif w.idle and is_retirement_age:
|
||||
logger.debug('scaling down worker pid:{} due to worker age: {}'.format(w.pid, w.age))
|
||||
w.quit()
|
||||
self.workers.remove(w)
|
||||
|
||||
elif is_retirement_age and not w.retiring and not w.idle:
|
||||
logger.info(
|
||||
f"Worker pid:{w.pid} (age: {w.age:.0f}s) exceeded max lifetime ({self.max_worker_lifetime_seconds:.0f}s). "
|
||||
"Signaling for graceful retirement."
|
||||
)
|
||||
# Send QUIT signal; worker will finish current task then exit.
|
||||
w.quit()
|
||||
# mark as retiring to reject any future tasks that might be assigned in meantime
|
||||
w.retiring = True
|
||||
|
||||
if w.alive:
|
||||
# if we discover a task manager invocation that's been running
|
||||
# too long, reap it (because otherwise it'll just hold the postgres
|
||||
@@ -463,6 +536,14 @@ class AutoscalePool(WorkerPool):
|
||||
self.worker_count_max = new_worker_ct
|
||||
return ret
|
||||
|
||||
@staticmethod
|
||||
def fast_task_serialization(current_task):
|
||||
try:
|
||||
return str(current_task.get('task')) + ' - ' + str(sorted(current_task.get('args', []))) + ' - ' + str(sorted(current_task.get('kwargs', {})))
|
||||
except Exception:
|
||||
# just make sure this does not make things worse
|
||||
return str(current_task)
|
||||
|
||||
def write(self, preferred_queue, body):
|
||||
if 'guid' in body:
|
||||
set_guid(body['guid'])
|
||||
@@ -484,6 +565,15 @@ class AutoscalePool(WorkerPool):
|
||||
if isinstance(body, dict):
|
||||
task_name = body.get('task')
|
||||
logger.warning(f'Workers maxed, queuing {task_name}, load: {sum(len(w.managed_tasks) for w in self.workers)} / {len(self.workers)}')
|
||||
# Once every 10 seconds write out task list for debugging
|
||||
if time.monotonic() - self.last_task_list_log >= 10.0:
|
||||
task_counts = {}
|
||||
for worker in self.workers:
|
||||
task_slug = self.fast_task_serialization(worker.current_task)
|
||||
task_counts.setdefault(task_slug, 0)
|
||||
task_counts[task_slug] += 1
|
||||
logger.info(f'Running tasks by count:\n{json.dumps(task_counts, indent=2)}')
|
||||
self.last_task_list_log = time.monotonic()
|
||||
return super(AutoscalePool, self).write(preferred_queue, body)
|
||||
except Exception:
|
||||
for conn in connections.all():
|
||||
|
||||
@@ -4,6 +4,9 @@ import json
|
||||
import time
|
||||
from uuid import uuid4
|
||||
|
||||
from dispatcherd.publish import submit_task
|
||||
from dispatcherd.utils import resolve_callable
|
||||
|
||||
from django_guid import get_guid
|
||||
from django.conf import settings
|
||||
|
||||
@@ -93,6 +96,19 @@ class task:
|
||||
|
||||
@classmethod
|
||||
def apply_async(cls, args=None, kwargs=None, queue=None, uuid=None, **kw):
|
||||
try:
|
||||
from flags.state import flag_enabled
|
||||
|
||||
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'):
|
||||
# At this point we have the import string, and submit_task wants the method, so back to that
|
||||
actual_task = resolve_callable(cls.name)
|
||||
return submit_task(actual_task, args=args, kwargs=kwargs, queue=queue, uuid=uuid, **kw)
|
||||
except Exception:
|
||||
logger.exception(f"[DISPATCHER] Failed to check for alternative dispatcherd implementation for {cls.name}")
|
||||
# Continue with original implementation if anything fails
|
||||
pass
|
||||
|
||||
# Original implementation follows
|
||||
queue = queue or getattr(cls.queue, 'im_func', cls.queue)
|
||||
if not queue:
|
||||
msg = f'{cls.name}: Queue value required and may not be None'
|
||||
|
||||
@@ -15,6 +15,7 @@ from datetime import timedelta
|
||||
|
||||
from django import db
|
||||
from django.conf import settings
|
||||
import redis.exceptions
|
||||
|
||||
from ansible_base.lib.logging.runtime import log_excess_runtime
|
||||
|
||||
@@ -130,10 +131,13 @@ class AWXConsumerBase(object):
|
||||
@log_excess_runtime(logger, debug_cutoff=0.05, cutoff=0.2)
|
||||
def record_statistics(self):
|
||||
if time.time() - self.last_stats > 1: # buffer stat recording to once per second
|
||||
save_data = self.pool.debug()
|
||||
try:
|
||||
self.redis.set(f'awx_{self.name}_statistics', self.pool.debug())
|
||||
self.redis.set(f'awx_{self.name}_statistics', save_data)
|
||||
except redis.exceptions.ConnectionError as exc:
|
||||
logger.warning(f'Redis connection error saving {self.name} status data:\n{exc}\nmissed data:\n{save_data}')
|
||||
except Exception:
|
||||
logger.exception(f"encountered an error communicating with redis to store {self.name} statistics")
|
||||
logger.exception(f"Unknown redis error saving {self.name} status data:\nmissed data:\n{save_data}")
|
||||
self.last_stats = time.time()
|
||||
|
||||
def run(self, *args, **kwargs):
|
||||
@@ -189,7 +193,10 @@ class AWXConsumerPG(AWXConsumerBase):
|
||||
current_time = time.time()
|
||||
self.pool.produce_subsystem_metrics(self.subsystem_metrics)
|
||||
self.subsystem_metrics.set('dispatcher_availability', self.listen_cumulative_time / (current_time - self.last_metrics_gather))
|
||||
self.subsystem_metrics.pipe_execute()
|
||||
try:
|
||||
self.subsystem_metrics.pipe_execute()
|
||||
except redis.exceptions.ConnectionError as exc:
|
||||
logger.warning(f'Redis connection error saving dispatcher metrics, error:\n{exc}')
|
||||
self.listen_cumulative_time = 0.0
|
||||
self.last_metrics_gather = current_time
|
||||
|
||||
@@ -205,7 +212,11 @@ class AWXConsumerPG(AWXConsumerBase):
|
||||
except Exception as exc:
|
||||
logger.warning(f'Failed to save dispatcher statistics {exc}')
|
||||
|
||||
for job in self.scheduler.get_and_mark_pending():
|
||||
# Everything benchmarks to the same original time, so that skews due to
|
||||
# runtime of the actions, themselves, do not mess up scheduling expectations
|
||||
reftime = time.time()
|
||||
|
||||
for job in self.scheduler.get_and_mark_pending(reftime=reftime):
|
||||
if 'control' in job.data:
|
||||
try:
|
||||
job.data['control']()
|
||||
@@ -222,12 +233,12 @@ class AWXConsumerPG(AWXConsumerBase):
|
||||
|
||||
self.listen_start = time.time()
|
||||
|
||||
return self.scheduler.time_until_next_run()
|
||||
return self.scheduler.time_until_next_run(reftime=reftime)
|
||||
|
||||
def run(self, *args, **kwargs):
|
||||
super(AWXConsumerPG, self).run(*args, **kwargs)
|
||||
|
||||
logger.info(f"Running worker {self.name} listening to queues {self.queues}")
|
||||
logger.info(f"Running {self.name}, workers min={self.pool.min_workers} max={self.pool.max_workers}, listening to queues {self.queues}")
|
||||
init = False
|
||||
|
||||
while True:
|
||||
|
||||
@@ -86,6 +86,7 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
return os.getpid()
|
||||
|
||||
def read(self, queue):
|
||||
has_redis_error = False
|
||||
try:
|
||||
res = self.redis.blpop(self.queue_name, timeout=1)
|
||||
if res is None:
|
||||
@@ -95,14 +96,21 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
self.subsystem_metrics.inc('callback_receiver_events_popped_redis', 1)
|
||||
self.subsystem_metrics.inc('callback_receiver_events_in_memory', 1)
|
||||
return json.loads(res[1])
|
||||
except redis.exceptions.ConnectionError as exc:
|
||||
# Low noise log, because very common and many workers will write this
|
||||
logger.error(f"redis connection error: {exc}")
|
||||
has_redis_error = True
|
||||
time.sleep(5)
|
||||
except redis.exceptions.RedisError:
|
||||
logger.exception("encountered an error communicating with redis")
|
||||
has_redis_error = True
|
||||
time.sleep(1)
|
||||
except (json.JSONDecodeError, KeyError):
|
||||
logger.exception("failed to decode JSON message from redis")
|
||||
finally:
|
||||
self.record_statistics()
|
||||
self.record_read_metrics()
|
||||
if not has_redis_error:
|
||||
self.record_statistics()
|
||||
self.record_read_metrics()
|
||||
|
||||
return {'event': 'FLUSH'}
|
||||
|
||||
|
||||
14
awx/main/dispatch/worker/dispatcherd.py
Normal file
14
awx/main/dispatch/worker/dispatcherd.py
Normal file
@@ -0,0 +1,14 @@
|
||||
from dispatcherd.worker.task import TaskWorker
|
||||
|
||||
from django.db import connection
|
||||
|
||||
|
||||
class AWXTaskWorker(TaskWorker):
|
||||
|
||||
def on_start(self) -> None:
|
||||
"""Get worker connected so that first task it gets will be worked quickly"""
|
||||
connection.ensure_connection()
|
||||
|
||||
def pre_task(self, message) -> None:
|
||||
"""This should remedy bad connections that can not fix themselves"""
|
||||
connection.close_if_unusable_or_obsolete()
|
||||
@@ -38,5 +38,12 @@ class PostRunError(Exception):
|
||||
super(PostRunError, self).__init__(msg)
|
||||
|
||||
|
||||
class PolicyEvaluationError(Exception):
|
||||
def __init__(self, msg, status='failed', tb=''):
|
||||
self.status = status
|
||||
self.tb = tb
|
||||
super(PolicyEvaluationError, self).__init__(msg)
|
||||
|
||||
|
||||
class ReceptorNodeNotFound(RuntimeError):
|
||||
pass
|
||||
|
||||
@@ -33,6 +33,7 @@ from awx.main.utils.safe_yaml import sanitize_jinja
|
||||
from awx.main.models.rbac import batch_role_ancestor_rebuilding
|
||||
from awx.main.utils import ignore_inventory_computed_fields, get_licenser
|
||||
from awx.main.utils.execution_environments import get_default_execution_environment
|
||||
from awx.main.utils.inventory_vars import update_group_variables
|
||||
from awx.main.signals import disable_activity_stream
|
||||
from awx.main.constants import STANDARD_INVENTORY_UPDATE_ENV
|
||||
|
||||
@@ -457,19 +458,19 @@ class Command(BaseCommand):
|
||||
"""
|
||||
Update inventory variables from "all" group.
|
||||
"""
|
||||
# TODO: We disable variable overwrite here in case user-defined inventory variables get
|
||||
# mangled. But we still need to figure out a better way of processing multiple inventory
|
||||
# update variables mixing with each other.
|
||||
# issue for this: https://github.com/ansible/awx/issues/11623
|
||||
|
||||
if self.inventory.kind == 'constructed' and self.inventory_source.overwrite_vars:
|
||||
# NOTE: we had to add a exception case to not merge variables
|
||||
# to make constructed inventory coherent
|
||||
db_variables = self.all_group.variables
|
||||
else:
|
||||
db_variables = self.inventory.variables_dict
|
||||
db_variables.update(self.all_group.variables)
|
||||
|
||||
db_variables = update_group_variables(
|
||||
group_id=None, # `None` denotes the 'all' group (which doesn't have a pk).
|
||||
newvars=self.all_group.variables,
|
||||
dbvars=self.inventory.variables_dict,
|
||||
invsrc_id=self.inventory_source.id,
|
||||
inventory_id=self.inventory.id,
|
||||
overwrite_vars=self.overwrite_vars,
|
||||
)
|
||||
if db_variables != self.inventory.variables_dict:
|
||||
self.inventory.variables = json.dumps(db_variables)
|
||||
self.inventory.save(update_fields=['variables'])
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
from django.conf import settings
|
||||
from django.core.management.base import BaseCommand
|
||||
from awx.main.analytics.subsystem_metrics import CallbackReceiverMetricsServer
|
||||
import redis
|
||||
|
||||
from django.conf import settings
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
import redis.exceptions
|
||||
|
||||
from awx.main.analytics.subsystem_metrics import CallbackReceiverMetricsServer
|
||||
from awx.main.dispatch.control import Control
|
||||
from awx.main.dispatch.worker import AWXConsumerRedis, CallbackBrokerWorker
|
||||
|
||||
@@ -27,7 +30,10 @@ class Command(BaseCommand):
|
||||
return
|
||||
consumer = None
|
||||
|
||||
CallbackReceiverMetricsServer().start()
|
||||
try:
|
||||
CallbackReceiverMetricsServer().start()
|
||||
except redis.exceptions.ConnectionError as exc:
|
||||
raise CommandError(f'Callback receiver could not connect to redis, error: {exc}')
|
||||
|
||||
try:
|
||||
consumer = AWXConsumerRedis(
|
||||
|
||||
@@ -2,11 +2,21 @@
|
||||
# All Rights Reserved.
|
||||
import logging
|
||||
import yaml
|
||||
import os
|
||||
|
||||
import redis
|
||||
|
||||
from django.conf import settings
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
|
||||
from flags.state import flag_enabled
|
||||
|
||||
from dispatcherd.factories import get_control_from_settings
|
||||
from dispatcherd import run_service
|
||||
from dispatcherd.config import setup as dispatcher_setup
|
||||
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
from awx.main.dispatch.config import get_dispatcherd_config
|
||||
from awx.main.dispatch.control import Control
|
||||
from awx.main.dispatch.pool import AutoscalePool
|
||||
from awx.main.dispatch.worker import AWXConsumerPG, TaskWorker
|
||||
@@ -38,18 +48,44 @@ class Command(BaseCommand):
|
||||
),
|
||||
)
|
||||
|
||||
def verify_dispatcherd_socket(self):
|
||||
if not os.path.exists(settings.DISPATCHERD_DEBUGGING_SOCKFILE):
|
||||
raise CommandError('Dispatcher is not running locally')
|
||||
|
||||
def handle(self, *arg, **options):
|
||||
if options.get('status'):
|
||||
print(Control('dispatcher').status())
|
||||
return
|
||||
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'):
|
||||
ctl = get_control_from_settings()
|
||||
running_data = ctl.control_with_reply('status')
|
||||
if len(running_data) != 1:
|
||||
raise CommandError('Did not receive expected number of replies')
|
||||
print(yaml.dump(running_data[0], default_flow_style=False))
|
||||
return
|
||||
else:
|
||||
print(Control('dispatcher').status())
|
||||
return
|
||||
if options.get('schedule'):
|
||||
print(Control('dispatcher').schedule())
|
||||
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'):
|
||||
print('NOT YET IMPLEMENTED')
|
||||
return
|
||||
else:
|
||||
print(Control('dispatcher').schedule())
|
||||
return
|
||||
if options.get('running'):
|
||||
print(Control('dispatcher').running())
|
||||
return
|
||||
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'):
|
||||
ctl = get_control_from_settings()
|
||||
running_data = ctl.control_with_reply('running')
|
||||
print(yaml.dump(running_data, default_flow_style=False))
|
||||
return
|
||||
else:
|
||||
print(Control('dispatcher').running())
|
||||
return
|
||||
if options.get('reload'):
|
||||
return Control('dispatcher').control({'control': 'reload'})
|
||||
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'):
|
||||
print('NOT YET IMPLEMENTED')
|
||||
return
|
||||
else:
|
||||
return Control('dispatcher').control({'control': 'reload'})
|
||||
if options.get('cancel'):
|
||||
cancel_str = options.get('cancel')
|
||||
try:
|
||||
@@ -58,18 +94,36 @@ class Command(BaseCommand):
|
||||
cancel_data = [cancel_str]
|
||||
if not isinstance(cancel_data, list):
|
||||
cancel_data = [cancel_str]
|
||||
print(Control('dispatcher').cancel(cancel_data))
|
||||
return
|
||||
|
||||
consumer = None
|
||||
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'):
|
||||
ctl = get_control_from_settings()
|
||||
results = []
|
||||
for task_id in cancel_data:
|
||||
# For each task UUID, send an individual cancel command
|
||||
result = ctl.control_with_reply('cancel', data={'uuid': task_id})
|
||||
results.append(result)
|
||||
print(yaml.dump(results, default_flow_style=False))
|
||||
return
|
||||
else:
|
||||
print(Control('dispatcher').cancel(cancel_data))
|
||||
return
|
||||
|
||||
DispatcherMetricsServer().start()
|
||||
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'):
|
||||
dispatcher_setup(get_dispatcherd_config(for_service=True))
|
||||
run_service()
|
||||
else:
|
||||
consumer = None
|
||||
|
||||
try:
|
||||
queues = ['tower_broadcast_all', 'tower_settings_change', get_task_queuename()]
|
||||
consumer = AWXConsumerPG('dispatcher', TaskWorker(), queues, AutoscalePool(min_workers=4), schedule=settings.CELERYBEAT_SCHEDULE)
|
||||
consumer.run()
|
||||
except KeyboardInterrupt:
|
||||
logger.debug('Terminating Task Dispatcher')
|
||||
if consumer:
|
||||
consumer.stop()
|
||||
try:
|
||||
DispatcherMetricsServer().start()
|
||||
except redis.exceptions.ConnectionError as exc:
|
||||
raise CommandError(f'Dispatcher could not connect to redis, error: {exc}')
|
||||
|
||||
try:
|
||||
queues = ['tower_broadcast_all', 'tower_settings_change', get_task_queuename()]
|
||||
consumer = AWXConsumerPG('dispatcher', TaskWorker(), queues, AutoscalePool(min_workers=4), schedule=settings.CELERYBEAT_SCHEDULE)
|
||||
consumer.run()
|
||||
except KeyboardInterrupt:
|
||||
logger.debug('Terminating Task Dispatcher')
|
||||
if consumer:
|
||||
consumer.stop()
|
||||
|
||||
46
awx/main/migrations/0197_add_opa_query_path.py
Normal file
46
awx/main/migrations/0197_add_opa_query_path.py
Normal file
@@ -0,0 +1,46 @@
|
||||
# Generated by Django 4.2.18 on 2025-03-17 16:10
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0196_indirect_managed_node_audit'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='inventory',
|
||||
name='opa_query_path',
|
||||
field=models.CharField(
|
||||
blank=True,
|
||||
default=None,
|
||||
help_text='The query path for the OPA policy to evaluate prior to job execution. The query path should be formatted as package/rule.',
|
||||
max_length=128,
|
||||
null=True,
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='jobtemplate',
|
||||
name='opa_query_path',
|
||||
field=models.CharField(
|
||||
blank=True,
|
||||
default=None,
|
||||
help_text='The query path for the OPA policy to evaluate prior to job execution. The query path should be formatted as package/rule.',
|
||||
max_length=128,
|
||||
null=True,
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='organization',
|
||||
name='opa_query_path',
|
||||
field=models.CharField(
|
||||
blank=True,
|
||||
default=None,
|
||||
help_text='The query path for the OPA policy to evaluate prior to job execution. The query path should be formatted as package/rule.',
|
||||
max_length=128,
|
||||
null=True,
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -1,15 +0,0 @@
|
||||
# Generated by Django 4.2.10 on 2024-09-16 10:22
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
('main', '0196_indirect_managed_node_audit'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.DeleteModel(
|
||||
name='Profile',
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,61 @@
|
||||
# Generated by Django 4.2.18 on 2025-02-27 20:35
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [('main', '0197_add_opa_query_path')]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='inventorysource',
|
||||
name='source',
|
||||
field=models.CharField(
|
||||
choices=[
|
||||
('file', 'File, Directory or Script'),
|
||||
('constructed', 'Template additional groups and hostvars at runtime'),
|
||||
('scm', 'Sourced from a Project'),
|
||||
('ec2', 'Amazon EC2'),
|
||||
('gce', 'Google Compute Engine'),
|
||||
('azure_rm', 'Microsoft Azure Resource Manager'),
|
||||
('vmware', 'VMware vCenter'),
|
||||
('vmware_esxi', 'VMware ESXi'),
|
||||
('satellite6', 'Red Hat Satellite 6'),
|
||||
('openstack', 'OpenStack'),
|
||||
('rhv', 'Red Hat Virtualization'),
|
||||
('controller', 'Red Hat Ansible Automation Platform'),
|
||||
('insights', 'Red Hat Insights'),
|
||||
('terraform', 'Terraform State'),
|
||||
('openshift_virtualization', 'OpenShift Virtualization'),
|
||||
],
|
||||
default=None,
|
||||
max_length=32,
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventoryupdate',
|
||||
name='source',
|
||||
field=models.CharField(
|
||||
choices=[
|
||||
('file', 'File, Directory or Script'),
|
||||
('constructed', 'Template additional groups and hostvars at runtime'),
|
||||
('scm', 'Sourced from a Project'),
|
||||
('ec2', 'Amazon EC2'),
|
||||
('gce', 'Google Compute Engine'),
|
||||
('azure_rm', 'Microsoft Azure Resource Manager'),
|
||||
('vmware', 'VMware vCenter'),
|
||||
('vmware_esxi', 'VMware ESXi'),
|
||||
('satellite6', 'Red Hat Satellite 6'),
|
||||
('openstack', 'OpenStack'),
|
||||
('rhv', 'Red Hat Virtualization'),
|
||||
('controller', 'Red Hat Ansible Automation Platform'),
|
||||
('insights', 'Red Hat Insights'),
|
||||
('terraform', 'Terraform State'),
|
||||
('openshift_virtualization', 'OpenShift Virtualization'),
|
||||
],
|
||||
default=None,
|
||||
max_length=32,
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -1,26 +0,0 @@
|
||||
# Generated by Django 4.2.10 on 2024-09-16 15:21
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
('main', '0197_delete_profile'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
# delete all sso application migrations
|
||||
migrations.RunSQL("DELETE FROM django_migrations WHERE app = 'sso';"),
|
||||
# delete all sso application content group permissions
|
||||
migrations.RunSQL(
|
||||
"DELETE FROM auth_group_permissions "
|
||||
"WHERE permission_id IN "
|
||||
"(SELECT id FROM auth_permission WHERE content_type_id in (SELECT id FROM django_content_type WHERE app_label = 'sso'));"
|
||||
),
|
||||
# delete all sso application content permissions
|
||||
migrations.RunSQL("DELETE FROM auth_permission " "WHERE content_type_id IN (SELECT id FROM django_content_type WHERE app_label = 'sso');"),
|
||||
# delete sso application content type
|
||||
migrations.RunSQL("DELETE FROM django_content_type WHERE app_label = 'sso';"),
|
||||
# drop sso application created table
|
||||
migrations.RunSQL("DROP TABLE IF EXISTS sso_userenterpriseauth;"),
|
||||
]
|
||||
@@ -1,23 +0,0 @@
|
||||
# Generated by Django 4.2.10 on 2024-10-22 15:58
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0198_remove_sso_app_content'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='inventorysource',
|
||||
name='source',
|
||||
field=models.CharField(default=None, max_length=32),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventoryupdate',
|
||||
name='source',
|
||||
field=models.CharField(default=None, max_length=32),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,32 @@
|
||||
# Generated by Django 4.2.20 on 2025-04-24 09:08
|
||||
|
||||
from django.db import migrations, models
|
||||
import django.db.models.deletion
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0198_alter_inventorysource_source_and_more'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.CreateModel(
|
||||
name='InventoryGroupVariablesWithHistory',
|
||||
fields=[
|
||||
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
||||
('variables', models.JSONField()),
|
||||
('group', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='inventory_group_variables', to='main.group')),
|
||||
(
|
||||
'inventory',
|
||||
models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='inventory_group_variables', to='main.inventory'),
|
||||
),
|
||||
],
|
||||
),
|
||||
migrations.AddConstraint(
|
||||
model_name='inventorygroupvariableswithhistory',
|
||||
constraint=models.UniqueConstraint(
|
||||
fields=('inventory', 'group'), name='unique_inventory_group', violation_error_message='Inventory/Group combination must be unique.'
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -1,39 +0,0 @@
|
||||
# Generated by Django 4.2.10 on 2024-10-24 14:06
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0199_alter_inventorysource_source_and_more'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterUniqueTogether(
|
||||
name='oauth2application',
|
||||
unique_together=None,
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='oauth2application',
|
||||
name='organization',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='oauth2application',
|
||||
name='user',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='activitystream',
|
||||
name='o_auth2_access_token',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='activitystream',
|
||||
name='o_auth2_application',
|
||||
),
|
||||
migrations.DeleteModel(
|
||||
name='OAuth2AccessToken',
|
||||
),
|
||||
migrations.DeleteModel(
|
||||
name='OAuth2Application',
|
||||
),
|
||||
]
|
||||
50
awx/main/migrations/0200_template_name_constraint.py
Normal file
50
awx/main/migrations/0200_template_name_constraint.py
Normal file
@@ -0,0 +1,50 @@
|
||||
# Generated by Django 4.2.20 on 2025-04-22 15:54
|
||||
|
||||
import logging
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
from awx.main.migrations._db_constraints import _rename_duplicates
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def rename_jts(apps, schema_editor):
|
||||
cls = apps.get_model('main', 'JobTemplate')
|
||||
_rename_duplicates(cls)
|
||||
|
||||
|
||||
def rename_projects(apps, schema_editor):
|
||||
cls = apps.get_model('main', 'Project')
|
||||
_rename_duplicates(cls)
|
||||
|
||||
|
||||
def change_inventory_source_org_unique(apps, schema_editor):
|
||||
cls = apps.get_model('main', 'InventorySource')
|
||||
r = cls.objects.update(org_unique=False)
|
||||
logger.info(f'Set database constraint rule for {r} inventory source objects')
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0199_inventorygroupvariableswithhistory_and_more'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(rename_jts, migrations.RunPython.noop),
|
||||
migrations.RunPython(rename_projects, migrations.RunPython.noop),
|
||||
migrations.AddField(
|
||||
model_name='unifiedjobtemplate',
|
||||
name='org_unique',
|
||||
field=models.BooleanField(blank=True, default=True, editable=False, help_text='Used internally to selectively enforce database constraint on name'),
|
||||
),
|
||||
migrations.RunPython(change_inventory_source_org_unique, migrations.RunPython.noop),
|
||||
migrations.AddConstraint(
|
||||
model_name='unifiedjobtemplate',
|
||||
constraint=models.UniqueConstraint(
|
||||
condition=models.Q(('org_unique', True)), fields=('polymorphic_ctype', 'name', 'organization'), name='ujt_hard_name_constraint'
|
||||
),
|
||||
),
|
||||
]
|
||||
9
awx/main/migrations/0201_create_managed_creds.py
Normal file
9
awx/main/migrations/0201_create_managed_creds.py
Normal file
@@ -0,0 +1,9 @@
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
('main', '0200_template_name_constraint'),
|
||||
]
|
||||
|
||||
operations = []
|
||||
@@ -1,44 +0,0 @@
|
||||
# Generated by Django 4.2.16 on 2024-12-18 16:05
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
from awx.main.migrations._create_system_jobs import delete_clear_tokens_sjt
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0200_alter_oauth2application_unique_together_and_more'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(delete_clear_tokens_sjt, migrations.RunPython.noop),
|
||||
migrations.AlterField(
|
||||
model_name='systemjob',
|
||||
name='job_type',
|
||||
field=models.CharField(
|
||||
blank=True,
|
||||
choices=[
|
||||
('cleanup_jobs', 'Remove jobs older than a certain number of days'),
|
||||
('cleanup_activitystream', 'Remove activity stream entries older than a certain number of days'),
|
||||
('cleanup_sessions', 'Removes expired browser sessions from the database'),
|
||||
],
|
||||
default='',
|
||||
max_length=32,
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='systemjobtemplate',
|
||||
name='job_type',
|
||||
field=models.CharField(
|
||||
blank=True,
|
||||
choices=[
|
||||
('cleanup_jobs', 'Remove jobs older than a certain number of days'),
|
||||
('cleanup_activitystream', 'Remove activity stream entries older than a certain number of days'),
|
||||
('cleanup_sessions', 'Removes expired browser sessions from the database'),
|
||||
],
|
||||
default='',
|
||||
max_length=32,
|
||||
),
|
||||
),
|
||||
]
|
||||
100
awx/main/migrations/0202_squashed_deletions.py
Normal file
100
awx/main/migrations/0202_squashed_deletions.py
Normal file
@@ -0,0 +1,100 @@
|
||||
# Generated by Django 4.2.10 on 2024-09-16 10:22
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
from awx.main.migrations._create_system_jobs import delete_clear_tokens_sjt
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
('main', '0201_create_managed_creds'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.DeleteModel(
|
||||
name='Profile',
|
||||
),
|
||||
# Remove SSO app content
|
||||
# delete all sso application migrations
|
||||
migrations.RunSQL("DELETE FROM django_migrations WHERE app = 'sso';"),
|
||||
# delete all sso application content group permissions
|
||||
migrations.RunSQL(
|
||||
"DELETE FROM auth_group_permissions "
|
||||
"WHERE permission_id IN "
|
||||
"(SELECT id FROM auth_permission WHERE content_type_id in (SELECT id FROM django_content_type WHERE app_label = 'sso'));"
|
||||
),
|
||||
# delete all sso application content permissions
|
||||
migrations.RunSQL("DELETE FROM auth_permission " "WHERE content_type_id IN (SELECT id FROM django_content_type WHERE app_label = 'sso');"),
|
||||
# delete sso application content type
|
||||
migrations.RunSQL("DELETE FROM django_content_type WHERE app_label = 'sso';"),
|
||||
# drop sso application created table
|
||||
migrations.RunSQL("DROP TABLE IF EXISTS sso_userenterpriseauth;"),
|
||||
# Alter inventory source source field
|
||||
migrations.AlterField(
|
||||
model_name='inventorysource',
|
||||
name='source',
|
||||
field=models.CharField(default=None, max_length=32),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventoryupdate',
|
||||
name='source',
|
||||
field=models.CharField(default=None, max_length=32),
|
||||
),
|
||||
# Alter OAuth2Application unique together
|
||||
migrations.AlterUniqueTogether(
|
||||
name='oauth2application',
|
||||
unique_together=None,
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='oauth2application',
|
||||
name='organization',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='oauth2application',
|
||||
name='user',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='activitystream',
|
||||
name='o_auth2_access_token',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='activitystream',
|
||||
name='o_auth2_application',
|
||||
),
|
||||
migrations.DeleteModel(
|
||||
name='OAuth2AccessToken',
|
||||
),
|
||||
migrations.DeleteModel(
|
||||
name='OAuth2Application',
|
||||
),
|
||||
# Delete system token cleanup jobs, because tokens were deleted
|
||||
migrations.RunPython(delete_clear_tokens_sjt, migrations.RunPython.noop),
|
||||
migrations.AlterField(
|
||||
model_name='systemjob',
|
||||
name='job_type',
|
||||
field=models.CharField(
|
||||
blank=True,
|
||||
choices=[
|
||||
('cleanup_jobs', 'Remove jobs older than a certain number of days'),
|
||||
('cleanup_activitystream', 'Remove activity stream entries older than a certain number of days'),
|
||||
('cleanup_sessions', 'Removes expired browser sessions from the database'),
|
||||
],
|
||||
default='',
|
||||
max_length=32,
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='systemjobtemplate',
|
||||
name='job_type',
|
||||
field=models.CharField(
|
||||
blank=True,
|
||||
choices=[
|
||||
('cleanup_jobs', 'Remove jobs older than a certain number of days'),
|
||||
('cleanup_activitystream', 'Remove activity stream entries older than a certain number of days'),
|
||||
('cleanup_sessions', 'Removes expired browser sessions from the database'),
|
||||
],
|
||||
default='',
|
||||
max_length=32,
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -17,7 +17,13 @@ logger = logging.getLogger('awx.main.migrations._dab_rbac')
|
||||
|
||||
|
||||
def create_permissions_as_operation(apps, schema_editor):
|
||||
# NOTE: the DAB ContentType changes adjusted how they fire
|
||||
# before they would fire on every app config, like contenttypes
|
||||
create_dab_permissions(global_apps.get_app_config("main"), apps=apps)
|
||||
# This changed to only fire once and do a global creation
|
||||
# so we need to call it for specifically the dab_rbac app
|
||||
# multiple calls will not hurt anything
|
||||
create_dab_permissions(global_apps.get_app_config("dab_rbac"), apps=apps)
|
||||
|
||||
|
||||
"""
|
||||
@@ -112,7 +118,12 @@ def get_descendents(f, children_map):
|
||||
|
||||
def get_permissions_for_role(role_field, children_map, apps):
|
||||
Permission = apps.get_model('dab_rbac', 'DABPermission')
|
||||
ContentType = apps.get_model('contenttypes', 'ContentType')
|
||||
try:
|
||||
# After migration for remote permissions
|
||||
ContentType = apps.get_model('dab_rbac', 'DABContentType')
|
||||
except LookupError:
|
||||
# If using DAB from before remote permissions are implemented
|
||||
ContentType = apps.get_model('contenttypes', 'ContentType')
|
||||
|
||||
perm_list = []
|
||||
for child_field in get_descendents(role_field, children_map):
|
||||
@@ -281,7 +292,13 @@ def setup_managed_role_definitions(apps, schema_editor):
|
||||
'special': '{cls.__name__} {action}',
|
||||
}
|
||||
|
||||
ContentType = apps.get_model('contenttypes', 'ContentType')
|
||||
try:
|
||||
# After migration for remote permissions
|
||||
ContentType = apps.get_model('dab_rbac', 'DABContentType')
|
||||
except LookupError:
|
||||
# If using DAB from before remote permissions are implemented
|
||||
ContentType = apps.get_model('contenttypes', 'ContentType')
|
||||
|
||||
Permission = apps.get_model('dab_rbac', 'DABPermission')
|
||||
RoleDefinition = apps.get_model('dab_rbac', 'RoleDefinition')
|
||||
Organization = apps.get_model(settings.ANSIBLE_BASE_ORGANIZATION_MODEL)
|
||||
|
||||
25
awx/main/migrations/_db_constraints.py
Normal file
25
awx/main/migrations/_db_constraints.py
Normal file
@@ -0,0 +1,25 @@
|
||||
import logging
|
||||
|
||||
from django.db.models import Count
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _rename_duplicates(cls):
|
||||
field = cls._meta.get_field('name')
|
||||
max_len = field.max_length
|
||||
for organization_id in cls.objects.order_by().values_list('organization_id', flat=True).distinct():
|
||||
duplicate_data = cls.objects.values('name').filter(organization_id=organization_id).annotate(count=Count('name')).order_by().filter(count__gt=1)
|
||||
for data in duplicate_data:
|
||||
name = data['name']
|
||||
for idx, ujt in enumerate(cls.objects.filter(name=name, organization_id=organization_id).order_by('created')):
|
||||
if idx > 0:
|
||||
suffix = f'_dup{idx}'
|
||||
max_chars = max_len - len(suffix)
|
||||
if len(ujt.name) >= max_chars:
|
||||
ujt.name = ujt.name[:max_chars] + suffix
|
||||
else:
|
||||
ujt.name = ujt.name + suffix
|
||||
logger.info(f'Renaming duplicate {cls._meta.model_name} to `{ujt.name}` because of duplicate name entry')
|
||||
ujt.save(update_fields=['name'])
|
||||
@@ -33,6 +33,7 @@ from awx.main.models.inventory import ( # noqa
|
||||
InventorySource,
|
||||
InventoryUpdate,
|
||||
SmartInventoryMembership,
|
||||
InventoryGroupVariablesWithHistory,
|
||||
)
|
||||
from awx.main.models.jobs import ( # noqa
|
||||
Job,
|
||||
|
||||
@@ -550,10 +550,10 @@ class CredentialType(CommonModelNameNotUnique):
|
||||
# TODO: User "side-loaded" credential custom_injectors isn't supported
|
||||
ManagedCredentialType.registry[ns] = SimpleNamespace(namespace=ns, name=plugin.name, kind='external', inputs=plugin.inputs, backend=plugin.backend)
|
||||
|
||||
def inject_credential(self, credential, env, safe_env, args, private_data_dir):
|
||||
def inject_credential(self, credential, env, safe_env, args, private_data_dir, container_root=None):
|
||||
from awx_plugins.interfaces._temporary_private_inject_api import inject_credential
|
||||
|
||||
inject_credential(self, credential, env, safe_env, args, private_data_dir)
|
||||
inject_credential(self, credential, env, safe_env, args, private_data_dir, container_root=container_root)
|
||||
|
||||
|
||||
class CredentialTypeHelper:
|
||||
|
||||
@@ -24,6 +24,7 @@ from awx.main.managers import DeferJobCreatedManager
|
||||
from awx.main.constants import MINIMAL_EVENTS
|
||||
from awx.main.models.base import CreatedModifiedModel
|
||||
from awx.main.utils import ignore_inventory_computed_fields, camelcase_to_underscore
|
||||
from awx.main.utils.db import bulk_update_sorted_by_id
|
||||
|
||||
analytics_logger = logging.getLogger('awx.analytics.job_events')
|
||||
|
||||
@@ -565,7 +566,6 @@ class JobEvent(BasePlaybookEvent):
|
||||
summaries = dict()
|
||||
updated_hosts_list = list()
|
||||
for host in hostnames:
|
||||
updated_hosts_list.append(host.lower())
|
||||
host_id = host_map.get(host)
|
||||
if host_id not in existing_host_ids:
|
||||
host_id = None
|
||||
@@ -582,6 +582,12 @@ class JobEvent(BasePlaybookEvent):
|
||||
summary.failed = bool(summary.dark or summary.failures)
|
||||
summaries[(host_id, host)] = summary
|
||||
|
||||
# do not count dark / unreachable hosts as updated
|
||||
if not bool(summary.dark):
|
||||
updated_hosts_list.append(host.lower())
|
||||
else:
|
||||
logger.warning(f'host {host.lower()} is dark / unreachable, not marking it as updated')
|
||||
|
||||
JobHostSummary.objects.bulk_create(summaries.values())
|
||||
|
||||
# update the last_job_id and last_job_host_summary_id
|
||||
@@ -597,7 +603,7 @@ class JobEvent(BasePlaybookEvent):
|
||||
h.last_job_host_summary_id = host_mapping[h.id]
|
||||
updated_hosts.add(h)
|
||||
|
||||
Host.objects.bulk_update(list(updated_hosts), ['last_job_id', 'last_job_host_summary_id'], batch_size=100)
|
||||
bulk_update_sorted_by_id(Host, updated_hosts, ['last_job_id', 'last_job_host_summary_id'])
|
||||
|
||||
# Create/update Host Metrics
|
||||
self._update_host_metrics(updated_hosts_list)
|
||||
|
||||
@@ -43,6 +43,7 @@ from awx.main.models.mixins import (
|
||||
TaskManagerInventoryUpdateMixin,
|
||||
RelatedJobsMixin,
|
||||
CustomVirtualEnvMixin,
|
||||
OpaQueryPathMixin,
|
||||
)
|
||||
from awx.main.models.notifications import (
|
||||
NotificationTemplate,
|
||||
@@ -68,7 +69,7 @@ class InventoryConstructedInventoryMembership(models.Model):
|
||||
)
|
||||
|
||||
|
||||
class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
|
||||
class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin, OpaQueryPathMixin):
|
||||
"""
|
||||
an inventory source contains lists and hosts.
|
||||
"""
|
||||
@@ -1119,8 +1120,10 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions, CustomVirtualE
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
# if this is a new object, inherit organization from its inventory
|
||||
if not self.pk and self.inventory and self.inventory.organization_id and not self.organization_id:
|
||||
self.organization_id = self.inventory.organization_id
|
||||
if not self.pk:
|
||||
self.org_unique = False # needed to exclude from unique (name, organization) constraint
|
||||
if self.inventory and self.inventory.organization_id and not self.organization_id:
|
||||
self.organization_id = self.inventory.organization_id
|
||||
|
||||
# If update_fields has been specified, add our field names to it,
|
||||
# if it hasn't been specified, then we're just doing a normal save.
|
||||
@@ -1401,3 +1404,38 @@ class CustomInventoryScript(CommonModelNameNotUnique):
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:inventory_script_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
|
||||
class InventoryGroupVariablesWithHistory(models.Model):
|
||||
"""
|
||||
Represents the inventory variables of one inventory group.
|
||||
|
||||
The purpose of this model is to persist the update history of the group
|
||||
variables. The update history is maintained in another class
|
||||
(`InventoryGroupVariables`), this class here is just a container for the
|
||||
database storage.
|
||||
"""
|
||||
|
||||
class Meta:
|
||||
constraints = [
|
||||
# Do not allow the same inventory/group combination more than once.
|
||||
models.UniqueConstraint(
|
||||
fields=["inventory", "group"],
|
||||
name="unique_inventory_group",
|
||||
violation_error_message=_("Inventory/Group combination must be unique."),
|
||||
),
|
||||
]
|
||||
|
||||
inventory = models.ForeignKey(
|
||||
'Inventory',
|
||||
related_name='inventory_group_variables',
|
||||
null=True,
|
||||
on_delete=models.CASCADE,
|
||||
)
|
||||
group = models.ForeignKey( # `None` denotes the 'all'-group.
|
||||
'Group',
|
||||
related_name='inventory_group_variables',
|
||||
null=True,
|
||||
on_delete=models.CASCADE,
|
||||
)
|
||||
variables = models.JSONField() # The group variables, including their history.
|
||||
|
||||
@@ -51,6 +51,7 @@ from awx.main.models.mixins import (
|
||||
RelatedJobsMixin,
|
||||
WebhookMixin,
|
||||
WebhookTemplateMixin,
|
||||
OpaQueryPathMixin,
|
||||
)
|
||||
from awx.main.constants import JOB_VARIABLE_PREFIXES
|
||||
|
||||
@@ -192,7 +193,9 @@ class JobOptions(BaseModel):
|
||||
return needed
|
||||
|
||||
|
||||
class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, ResourceMixin, CustomVirtualEnvMixin, RelatedJobsMixin, WebhookTemplateMixin):
|
||||
class JobTemplate(
|
||||
UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, ResourceMixin, CustomVirtualEnvMixin, RelatedJobsMixin, WebhookTemplateMixin, OpaQueryPathMixin
|
||||
):
|
||||
"""
|
||||
A job template is a reusable job definition for applying a project (with
|
||||
playbook) to an inventory source with a given credential.
|
||||
@@ -355,26 +358,6 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
update_fields.append('organization_id')
|
||||
return super(JobTemplate, self).save(*args, **kwargs)
|
||||
|
||||
def validate_unique(self, exclude=None):
|
||||
"""Custom over-ride for JT specifically
|
||||
because organization is inferred from project after full_clean is finished
|
||||
thus the organization field is not yet set when validation happens
|
||||
"""
|
||||
errors = []
|
||||
for ut in JobTemplate.SOFT_UNIQUE_TOGETHER:
|
||||
kwargs = {'name': self.name}
|
||||
if self.project:
|
||||
kwargs['organization'] = self.project.organization_id
|
||||
else:
|
||||
kwargs['organization'] = None
|
||||
qs = JobTemplate.objects.filter(**kwargs)
|
||||
if self.pk:
|
||||
qs = qs.exclude(pk=self.pk)
|
||||
if qs.exists():
|
||||
errors.append('%s with this (%s) combination already exists.' % (JobTemplate.__name__, ', '.join(set(ut) - {'polymorphic_ctype'})))
|
||||
if errors:
|
||||
raise ValidationError(errors)
|
||||
|
||||
def create_unified_job(self, **kwargs):
|
||||
prevent_slicing = kwargs.pop('_prevent_slicing', False)
|
||||
slice_ct = self.get_effective_slice_ct(kwargs)
|
||||
@@ -401,6 +384,26 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
WorkflowJobNode.objects.create(**create_kwargs)
|
||||
return job
|
||||
|
||||
def validate_unique(self, exclude=None):
|
||||
"""Custom over-ride for JT specifically
|
||||
because organization is inferred from project after full_clean is finished
|
||||
thus the organization field is not yet set when validation happens
|
||||
"""
|
||||
errors = []
|
||||
for ut in JobTemplate.SOFT_UNIQUE_TOGETHER:
|
||||
kwargs = {'name': self.name}
|
||||
if self.project:
|
||||
kwargs['organization'] = self.project.organization_id
|
||||
else:
|
||||
kwargs['organization'] = None
|
||||
qs = JobTemplate.objects.filter(**kwargs)
|
||||
if self.pk:
|
||||
qs = qs.exclude(pk=self.pk)
|
||||
if qs.exists():
|
||||
errors.append('%s with this (%s) combination already exists.' % (JobTemplate.__name__, ', '.join(set(ut) - {'polymorphic_ctype'})))
|
||||
if errors:
|
||||
raise ValidationError(errors)
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:job_template_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
|
||||
@@ -42,6 +42,7 @@ __all__ = [
|
||||
'TaskManagerInventoryUpdateMixin',
|
||||
'ExecutionEnvironmentMixin',
|
||||
'CustomVirtualEnvMixin',
|
||||
'OpaQueryPathMixin',
|
||||
]
|
||||
|
||||
|
||||
@@ -85,7 +86,7 @@ class ResourceMixin(models.Model):
|
||||
raise RuntimeError(f'Role filters only valid for users and ancestor role, received {accessor}')
|
||||
|
||||
if content_types is None:
|
||||
ct_kwarg = dict(content_type_id=ContentType.objects.get_for_model(cls).id)
|
||||
ct_kwarg = dict(content_type=ContentType.objects.get_for_model(cls))
|
||||
else:
|
||||
ct_kwarg = dict(content_type_id__in=content_types)
|
||||
|
||||
@@ -692,3 +693,16 @@ class WebhookMixin(models.Model):
|
||||
logger.debug("Webhook status update sent.")
|
||||
else:
|
||||
logger.error("Posting webhook status failed, code: {}\n" "{}\nPayload sent: {}".format(response.status_code, response.text, json.dumps(data)))
|
||||
|
||||
|
||||
class OpaQueryPathMixin(models.Model):
|
||||
class Meta:
|
||||
abstract = True
|
||||
|
||||
opa_query_path = models.CharField(
|
||||
max_length=128,
|
||||
blank=True,
|
||||
null=True,
|
||||
default=None,
|
||||
help_text=_("The query path for the OPA policy to evaluate prior to job execution. The query path should be formatted as package/rule."),
|
||||
)
|
||||
|
||||
@@ -22,12 +22,12 @@ from awx.main.models.rbac import (
|
||||
ROLE_SINGLETON_SYSTEM_AUDITOR,
|
||||
)
|
||||
from awx.main.models.unified_jobs import UnifiedJob
|
||||
from awx.main.models.mixins import ResourceMixin, CustomVirtualEnvMixin, RelatedJobsMixin
|
||||
from awx.main.models.mixins import ResourceMixin, CustomVirtualEnvMixin, RelatedJobsMixin, OpaQueryPathMixin
|
||||
|
||||
__all__ = ['Organization', 'Team', 'UserSessionMembership']
|
||||
|
||||
|
||||
class Organization(CommonModel, NotificationFieldsModel, ResourceMixin, CustomVirtualEnvMixin, RelatedJobsMixin):
|
||||
class Organization(CommonModel, NotificationFieldsModel, ResourceMixin, CustomVirtualEnvMixin, RelatedJobsMixin, OpaQueryPathMixin):
|
||||
"""
|
||||
An organization is the basic unit of multi-tenancy divisions
|
||||
"""
|
||||
|
||||
@@ -27,6 +27,7 @@ from django.conf import settings
|
||||
|
||||
# Ansible_base app
|
||||
from ansible_base.rbac.models import RoleDefinition, RoleUserAssignment, RoleTeamAssignment
|
||||
from ansible_base.rbac import permission_registry
|
||||
from ansible_base.lib.utils.models import get_type_for_model
|
||||
|
||||
# AWX
|
||||
@@ -561,7 +562,7 @@ def get_role_definition(role):
|
||||
model_print = type(obj).__name__
|
||||
perm_list = get_role_codenames(role)
|
||||
defaults = {
|
||||
'content_type_id': role.content_type_id,
|
||||
'content_type': permission_registry.content_type_model.objects.get_by_natural_key(role.content_type.app_label, role.content_type.model),
|
||||
'description': f'Has {action_name.title()} permission to {model_print} for backwards API compatibility',
|
||||
}
|
||||
# use Controller-specific role definitions for Team/Organization and member/admin
|
||||
|
||||
@@ -18,11 +18,13 @@ from collections import OrderedDict
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.db import models, connection, transaction
|
||||
from django.db.models.constraints import UniqueConstraint
|
||||
from django.core.exceptions import NON_FIELD_ERRORS
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from django.utils.timezone import now
|
||||
from django.utils.encoding import smart_str
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
from flags.state import flag_enabled
|
||||
|
||||
# REST Framework
|
||||
from rest_framework.exceptions import ParseError
|
||||
@@ -32,6 +34,7 @@ from polymorphic.models import PolymorphicModel
|
||||
|
||||
from ansible_base.lib.utils.models import prevent_search, get_type_for_model
|
||||
from ansible_base.rbac import permission_registry
|
||||
from ansible_base.rbac.models import RoleEvaluation
|
||||
|
||||
# AWX
|
||||
from awx.main.models.base import CommonModelNameNotUnique, PasswordFieldsModel, NotificationFieldsModel
|
||||
@@ -111,7 +114,10 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, ExecutionEn
|
||||
ordering = ('name',)
|
||||
# unique_together here is intentionally commented out. Please make sure sub-classes of this model
|
||||
# contain at least this uniqueness restriction: SOFT_UNIQUE_TOGETHER = [('polymorphic_ctype', 'name')]
|
||||
# unique_together = [('polymorphic_ctype', 'name', 'organization')]
|
||||
# Unique name constraint - note that inventory source model is excluded from this constraint entirely
|
||||
constraints = [
|
||||
UniqueConstraint(fields=['polymorphic_ctype', 'name', 'organization'], condition=models.Q(org_unique=True), name='ujt_hard_name_constraint')
|
||||
]
|
||||
|
||||
old_pk = models.PositiveIntegerField(
|
||||
null=True,
|
||||
@@ -180,6 +186,9 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, ExecutionEn
|
||||
)
|
||||
labels = models.ManyToManyField("Label", blank=True, related_name='%(class)s_labels')
|
||||
instance_groups = OrderedManyToManyField('InstanceGroup', blank=True, through='UnifiedJobTemplateInstanceGroupMembership')
|
||||
org_unique = models.BooleanField(
|
||||
blank=True, default=True, editable=False, help_text=_('Used internally to selectively enforce database constraint on name')
|
||||
)
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
real_instance = self.get_real_instance()
|
||||
@@ -210,20 +219,21 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, ExecutionEn
|
||||
# do not use this if in a subclass
|
||||
if cls != UnifiedJobTemplate:
|
||||
return super(UnifiedJobTemplate, cls).accessible_pk_qs(accessor, role_field)
|
||||
from ansible_base.rbac.models import RoleEvaluation
|
||||
|
||||
action = to_permissions[role_field]
|
||||
|
||||
# Special condition for super auditor
|
||||
role_subclasses = cls._submodels_with_roles()
|
||||
role_cts = ContentType.objects.get_for_models(*role_subclasses).values()
|
||||
all_codenames = {f'{action}_{cls._meta.model_name}' for cls in role_subclasses}
|
||||
if not (all_codenames - accessor.singleton_permissions()):
|
||||
role_cts = ContentType.objects.get_for_models(*role_subclasses).values()
|
||||
qs = cls.objects.filter(polymorphic_ctype__in=role_cts)
|
||||
return qs.values_list('id', flat=True)
|
||||
|
||||
dab_role_cts = permission_registry.content_type_model.objects.get_for_models(*role_subclasses).values()
|
||||
|
||||
return (
|
||||
RoleEvaluation.objects.filter(role__in=accessor.has_roles.all(), codename__in=all_codenames, content_type_id__in=[ct.id for ct in role_cts])
|
||||
RoleEvaluation.objects.filter(role__in=accessor.has_roles.all(), codename__in=all_codenames, content_type_id__in=[ct.id for ct in dab_role_cts])
|
||||
.values_list('object_id')
|
||||
.distinct()
|
||||
)
|
||||
@@ -1362,7 +1372,30 @@ class UnifiedJob(
|
||||
traceback=self.result_traceback,
|
||||
)
|
||||
|
||||
def pre_start(self, **kwargs):
|
||||
def get_start_kwargs(self):
|
||||
needed = self.get_passwords_needed_to_start()
|
||||
|
||||
decrypted_start_args = decrypt_field(self, 'start_args')
|
||||
|
||||
if not decrypted_start_args or decrypted_start_args == '{}':
|
||||
return None
|
||||
|
||||
try:
|
||||
start_args = json.loads(decrypted_start_args)
|
||||
except Exception:
|
||||
logger.exception(f'Unexpected malformed start_args on unified_job={self.id}')
|
||||
return None
|
||||
|
||||
opts = dict([(field, start_args.get(field, '')) for field in needed])
|
||||
|
||||
if not all(opts.values()):
|
||||
missing_fields = ', '.join([k for k, v in opts.items() if not v])
|
||||
self.job_explanation = u'Missing needed fields: %s.' % missing_fields
|
||||
self.save(update_fields=['job_explanation'])
|
||||
|
||||
return opts
|
||||
|
||||
def pre_start(self):
|
||||
if not self.can_start:
|
||||
self.job_explanation = u'%s is not in a startable state: %s, expecting one of %s' % (self._meta.verbose_name, self.status, str(('new', 'waiting')))
|
||||
self.save(update_fields=['job_explanation'])
|
||||
@@ -1383,26 +1416,11 @@ class UnifiedJob(
|
||||
self.save(update_fields=['job_explanation'])
|
||||
return (False, None)
|
||||
|
||||
needed = self.get_passwords_needed_to_start()
|
||||
try:
|
||||
start_args = json.loads(decrypt_field(self, 'start_args'))
|
||||
except Exception:
|
||||
start_args = None
|
||||
opts = self.get_start_kwargs()
|
||||
|
||||
if start_args in (None, ''):
|
||||
start_args = kwargs
|
||||
|
||||
opts = dict([(field, start_args.get(field, '')) for field in needed])
|
||||
|
||||
if not all(opts.values()):
|
||||
missing_fields = ', '.join([k for k, v in opts.items() if not v])
|
||||
self.job_explanation = u'Missing needed fields: %s.' % missing_fields
|
||||
self.save(update_fields=['job_explanation'])
|
||||
if opts and (not all(opts.values())):
|
||||
return (False, None)
|
||||
|
||||
if 'extra_vars' in kwargs:
|
||||
self.handle_extra_data(kwargs['extra_vars'])
|
||||
|
||||
# remove any job_explanations that may have been set while job was in pending
|
||||
if self.job_explanation != "":
|
||||
self.job_explanation = ""
|
||||
@@ -1463,21 +1481,44 @@ class UnifiedJob(
|
||||
def cancel_dispatcher_process(self):
|
||||
"""Returns True if dispatcher running this job acknowledged request and sent SIGTERM"""
|
||||
if not self.celery_task_id:
|
||||
return
|
||||
return False
|
||||
|
||||
canceled = []
|
||||
# Special case for task manager (used during workflow job cancellation)
|
||||
if not connection.get_autocommit():
|
||||
# this condition is purpose-written for the task manager, when it cancels jobs in workflows
|
||||
ControlDispatcher('dispatcher', self.controller_node).cancel([self.celery_task_id], with_reply=False)
|
||||
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'):
|
||||
try:
|
||||
from dispatcherd.factories import get_control_from_settings
|
||||
|
||||
ctl = get_control_from_settings()
|
||||
ctl.control('cancel', data={'uuid': self.celery_task_id})
|
||||
except Exception:
|
||||
logger.exception("Error sending cancel command to new dispatcher")
|
||||
else:
|
||||
try:
|
||||
ControlDispatcher('dispatcher', self.controller_node).cancel([self.celery_task_id], with_reply=False)
|
||||
except Exception:
|
||||
logger.exception("Error sending cancel command to legacy dispatcher")
|
||||
return True # task manager itself needs to act under assumption that cancel was received
|
||||
|
||||
# Standard case with reply
|
||||
try:
|
||||
# Use control and reply mechanism to cancel and obtain confirmation
|
||||
timeout = 5
|
||||
canceled = ControlDispatcher('dispatcher', self.controller_node).cancel([self.celery_task_id])
|
||||
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'):
|
||||
from dispatcherd.factories import get_control_from_settings
|
||||
|
||||
ctl = get_control_from_settings()
|
||||
results = ctl.control_with_reply('cancel', data={'uuid': self.celery_task_id}, expected_replies=1, timeout=timeout)
|
||||
# Check if cancel was successful by checking if we got any results
|
||||
return bool(results and len(results) > 0)
|
||||
else:
|
||||
# Original implementation
|
||||
canceled = ControlDispatcher('dispatcher', self.controller_node).cancel([self.celery_task_id])
|
||||
except socket.timeout:
|
||||
logger.error(f'could not reach dispatcher on {self.controller_node} within {timeout}s')
|
||||
except Exception:
|
||||
logger.exception("error encountered when checking task status")
|
||||
|
||||
return bool(self.celery_task_id in canceled) # True or False, whether confirmation was obtained
|
||||
|
||||
def cancel(self, job_explanation=None, is_chain=False):
|
||||
|
||||
@@ -53,8 +53,8 @@ class GrafanaBackend(AWXBaseEmailBackend, CustomNotificationBase):
|
||||
):
|
||||
super(GrafanaBackend, self).__init__(fail_silently=fail_silently)
|
||||
self.grafana_key = grafana_key
|
||||
self.dashboardId = int(dashboardId) if dashboardId is not None else None
|
||||
self.panelId = int(panelId) if panelId is not None else None
|
||||
self.dashboardId = int(dashboardId) if dashboardId is not None and panelId != "" else None
|
||||
self.panelId = int(panelId) if panelId is not None and panelId != "" else None
|
||||
self.annotation_tags = annotation_tags if annotation_tags is not None else []
|
||||
self.grafana_no_verify_ssl = grafana_no_verify_ssl
|
||||
self.isRegion = isRegion
|
||||
@@ -97,6 +97,7 @@ class GrafanaBackend(AWXBaseEmailBackend, CustomNotificationBase):
|
||||
r = requests.post(
|
||||
"{}/api/annotations".format(m.recipients()[0]), json=grafana_data, headers=grafana_headers, verify=(not self.grafana_no_verify_ssl)
|
||||
)
|
||||
|
||||
if r.status_code >= 400:
|
||||
logger.error(smart_str(_("Error sending notification grafana: {}").format(r.status_code)))
|
||||
if not self.fail_silently:
|
||||
|
||||
@@ -174,6 +174,9 @@ class PodManager(object):
|
||||
)
|
||||
pod_spec['spec']['containers'][0]['name'] = self.pod_name
|
||||
|
||||
# Prevent mounting of service account token in job pods in order to prevent job pods from accessing the k8s API via in cluster service account auth
|
||||
pod_spec['spec']['automountServiceAccountToken'] = False
|
||||
|
||||
return pod_spec
|
||||
|
||||
|
||||
|
||||
@@ -10,6 +10,8 @@ import time
|
||||
import sys
|
||||
import signal
|
||||
|
||||
import redis
|
||||
|
||||
# Django
|
||||
from django.db import transaction
|
||||
from django.utils.translation import gettext_lazy as _, gettext_noop
|
||||
@@ -17,6 +19,9 @@ from django.utils.timezone import now as tz_now
|
||||
from django.conf import settings
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
|
||||
# django-flags
|
||||
from flags.state import flag_enabled
|
||||
|
||||
from ansible_base.lib.utils.models import get_type_for_model
|
||||
|
||||
# django-ansible-base
|
||||
@@ -46,6 +51,7 @@ from awx.main.signals import disable_activity_stream
|
||||
from awx.main.constants import ACTIVE_STATES
|
||||
from awx.main.scheduler.dependency_graph import DependencyGraph
|
||||
from awx.main.scheduler.task_manager_models import TaskManagerModels
|
||||
from awx.main.tasks.jobs import dispatch_waiting_jobs
|
||||
import awx.main.analytics.subsystem_metrics as s_metrics
|
||||
from awx.main.utils import decrypt_field
|
||||
|
||||
@@ -120,6 +126,8 @@ class TaskBase:
|
||||
self.subsystem_metrics.pipe_execute()
|
||||
else:
|
||||
logger.debug(f"skipping recording {self.prefix} metrics, last recorded {time_last_recorded} seconds ago")
|
||||
except redis.exceptions.ConnectionError as exc:
|
||||
logger.warning(f"Redis connection error saving metrics for {self.prefix}, error: {exc}")
|
||||
except Exception:
|
||||
logger.exception(f"Error saving metrics for {self.prefix}")
|
||||
|
||||
@@ -427,6 +435,7 @@ class TaskManager(TaskBase):
|
||||
# 5 minutes to start pending jobs. If this limit is reached, pending jobs
|
||||
# will no longer be started and will be started on the next task manager cycle.
|
||||
self.time_delta_job_explanation = timedelta(seconds=30)
|
||||
self.control_nodes_to_notify: set[str] = set()
|
||||
super().__init__(prefix="task_manager")
|
||||
|
||||
def after_lock_init(self):
|
||||
@@ -515,16 +524,19 @@ class TaskManager(TaskBase):
|
||||
task.save()
|
||||
task.log_lifecycle("waiting")
|
||||
|
||||
# apply_async does a NOTIFY to the channel dispatcher is listening to
|
||||
# postgres will treat this as part of the transaction, which is what we want
|
||||
if task.status != 'failed' and type(task) is not WorkflowJob:
|
||||
task_cls = task._get_task_class()
|
||||
task_cls.apply_async(
|
||||
[task.pk],
|
||||
opts,
|
||||
queue=task.get_queue_name(),
|
||||
uuid=task.celery_task_id,
|
||||
)
|
||||
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'):
|
||||
self.control_nodes_to_notify.add(task.get_queue_name())
|
||||
else:
|
||||
# apply_async does a NOTIFY to the channel dispatcher is listening to
|
||||
# postgres will treat this as part of the transaction, which is what we want
|
||||
if task.status != 'failed' and type(task) is not WorkflowJob:
|
||||
task_cls = task._get_task_class()
|
||||
task_cls.apply_async(
|
||||
[task.pk],
|
||||
opts,
|
||||
queue=task.get_queue_name(),
|
||||
uuid=task.celery_task_id,
|
||||
)
|
||||
|
||||
# In exception cases, like a job failing pre-start checks, we send the websocket status message.
|
||||
# For jobs going into waiting, we omit this because of performance issues, as it should go to running quickly
|
||||
@@ -717,3 +729,8 @@ class TaskManager(TaskBase):
|
||||
|
||||
for workflow_approval in self.get_expired_workflow_approvals():
|
||||
self.timeout_approval_node(workflow_approval)
|
||||
|
||||
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'):
|
||||
for controller_node in self.control_nodes_to_notify:
|
||||
logger.info(f'Notifying node {controller_node} of new waiting jobs.')
|
||||
dispatch_waiting_jobs.apply_async(queue=controller_node)
|
||||
|
||||
@@ -7,7 +7,7 @@ from django.conf import settings
|
||||
# AWX
|
||||
from awx import MODE
|
||||
from awx.main.scheduler import TaskManager, DependencyManager, WorkflowManager
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch.publish import task as task_awx
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
|
||||
logger = logging.getLogger('awx.main.scheduler')
|
||||
@@ -20,16 +20,16 @@ def run_manager(manager, prefix):
|
||||
manager().schedule()
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task_awx(queue=get_task_queuename)
|
||||
def task_manager():
|
||||
run_manager(TaskManager, "task")
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task_awx(queue=get_task_queuename)
|
||||
def dependency_manager():
|
||||
run_manager(DependencyManager, "dependency")
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task_awx(queue=get_task_queuename)
|
||||
def workflow_manager():
|
||||
run_manager(WorkflowManager, "workflow")
|
||||
|
||||
@@ -1 +1 @@
|
||||
from . import host_metrics, jobs, receptor, system # noqa
|
||||
from . import callback, facts, helpers, host_indirect, host_metrics, jobs, receptor, system # noqa
|
||||
|
||||
@@ -6,16 +6,15 @@ import logging
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.db.models.query import QuerySet
|
||||
from django.utils.encoding import smart_str
|
||||
from django.utils.timezone import now
|
||||
from django.db import OperationalError
|
||||
|
||||
# django-ansible-base
|
||||
from ansible_base.lib.logging.runtime import log_excess_runtime
|
||||
|
||||
# AWX
|
||||
from awx.main.models.inventory import Host
|
||||
from awx.main.utils.db import bulk_update_sorted_by_id
|
||||
from awx.main.models import Host
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.tasks.facts')
|
||||
@@ -23,63 +22,51 @@ system_tracking_logger = logging.getLogger('awx.analytics.system_tracking')
|
||||
|
||||
|
||||
@log_excess_runtime(logger, debug_cutoff=0.01, msg='Inventory {inventory_id} host facts prepared for {written_ct} hosts, took {delta:.3f} s', add_log_data=True)
|
||||
def start_fact_cache(hosts, destination, log_data, timeout=None, inventory_id=None):
|
||||
def start_fact_cache(hosts, artifacts_dir, timeout=None, inventory_id=None, log_data=None):
|
||||
log_data = log_data or {}
|
||||
log_data['inventory_id'] = inventory_id
|
||||
log_data['written_ct'] = 0
|
||||
try:
|
||||
os.makedirs(destination, mode=0o700)
|
||||
except FileExistsError:
|
||||
pass
|
||||
hosts_cached = []
|
||||
|
||||
# Create the fact_cache directory inside artifacts_dir
|
||||
fact_cache_dir = os.path.join(artifacts_dir, 'fact_cache')
|
||||
os.makedirs(fact_cache_dir, mode=0o700, exist_ok=True)
|
||||
|
||||
if timeout is None:
|
||||
timeout = settings.ANSIBLE_FACT_CACHE_TIMEOUT
|
||||
|
||||
if isinstance(hosts, QuerySet):
|
||||
hosts = hosts.iterator()
|
||||
last_write_time = None
|
||||
|
||||
last_filepath_written = None
|
||||
for host in hosts:
|
||||
if (not host.ansible_facts_modified) or (timeout and host.ansible_facts_modified < now() - datetime.timedelta(seconds=timeout)):
|
||||
hosts_cached.append(host.name)
|
||||
if not host.ansible_facts_modified or (timeout and host.ansible_facts_modified < now() - datetime.timedelta(seconds=timeout)):
|
||||
continue # facts are expired - do not write them
|
||||
filepath = os.sep.join(map(str, [destination, host.name]))
|
||||
if not os.path.realpath(filepath).startswith(destination):
|
||||
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
|
||||
|
||||
filepath = os.path.join(fact_cache_dir, host.name)
|
||||
if not os.path.realpath(filepath).startswith(fact_cache_dir):
|
||||
logger.error(f'facts for host {smart_str(host.name)} could not be cached')
|
||||
continue
|
||||
|
||||
try:
|
||||
with codecs.open(filepath, 'w', encoding='utf-8') as f:
|
||||
os.chmod(f.name, 0o600)
|
||||
json.dump(host.ansible_facts, f)
|
||||
log_data['written_ct'] += 1
|
||||
last_filepath_written = filepath
|
||||
last_write_time = os.path.getmtime(filepath)
|
||||
except IOError:
|
||||
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
|
||||
logger.error(f'facts for host {smart_str(host.name)} could not be cached')
|
||||
continue
|
||||
# make note of the time we wrote the last file so we can check if any file changed later
|
||||
if last_filepath_written:
|
||||
return os.path.getmtime(last_filepath_written)
|
||||
return None
|
||||
|
||||
|
||||
def raw_update_hosts(host_list):
|
||||
Host.objects.bulk_update(host_list, ['ansible_facts', 'ansible_facts_modified'])
|
||||
|
||||
|
||||
def update_hosts(host_list, max_tries=5):
|
||||
if not host_list:
|
||||
return
|
||||
for i in range(max_tries):
|
||||
try:
|
||||
raw_update_hosts(host_list)
|
||||
except OperationalError as exc:
|
||||
# Deadlocks can happen if this runs at the same time as another large query
|
||||
# inventory updates and updating last_job_host_summary are candidates for conflict
|
||||
# but these would resolve easily on a retry
|
||||
if i + 1 < max_tries:
|
||||
logger.info(f'OperationalError (suspected deadlock) saving host facts retry {i}, message: {exc}')
|
||||
continue
|
||||
else:
|
||||
raise
|
||||
break
|
||||
# Write summary file directly to the artifacts_dir
|
||||
if inventory_id is not None:
|
||||
summary_file = os.path.join(artifacts_dir, 'host_cache_summary.json')
|
||||
summary_data = {
|
||||
'last_write_time': last_write_time,
|
||||
'hosts_cached': hosts_cached,
|
||||
'written_ct': log_data['written_ct'],
|
||||
}
|
||||
with open(summary_file, 'w', encoding='utf-8') as f:
|
||||
json.dump(summary_data, f, indent=2)
|
||||
|
||||
|
||||
@log_excess_runtime(
|
||||
@@ -88,35 +75,54 @@ def update_hosts(host_list, max_tries=5):
|
||||
msg='Inventory {inventory_id} host facts: updated {updated_ct}, cleared {cleared_ct}, unchanged {unmodified_ct}, took {delta:.3f} s',
|
||||
add_log_data=True,
|
||||
)
|
||||
def finish_fact_cache(hosts, destination, facts_write_time, log_data, job_id=None, inventory_id=None):
|
||||
def finish_fact_cache(artifacts_dir, job_id=None, inventory_id=None, log_data=None):
|
||||
log_data = log_data or {}
|
||||
log_data['inventory_id'] = inventory_id
|
||||
log_data['updated_ct'] = 0
|
||||
log_data['unmodified_ct'] = 0
|
||||
log_data['cleared_ct'] = 0
|
||||
# The summary file is directly inside the artifacts dir
|
||||
summary_path = os.path.join(artifacts_dir, 'host_cache_summary.json')
|
||||
if not os.path.exists(summary_path):
|
||||
logger.error(f'Missing summary file at {summary_path}')
|
||||
return
|
||||
|
||||
if isinstance(hosts, QuerySet):
|
||||
hosts = hosts.iterator()
|
||||
try:
|
||||
with open(summary_path, 'r', encoding='utf-8') as f:
|
||||
summary = json.load(f)
|
||||
facts_write_time = os.path.getmtime(summary_path) # After successful read
|
||||
except (json.JSONDecodeError, OSError) as e:
|
||||
logger.error(f'Error reading summary file at {summary_path}: {e}')
|
||||
return
|
||||
|
||||
host_names = summary.get('hosts_cached', [])
|
||||
hosts_cached = Host.objects.filter(name__in=host_names).order_by('id').iterator()
|
||||
# Path where individual fact files were written
|
||||
fact_cache_dir = os.path.join(artifacts_dir, 'fact_cache')
|
||||
hosts_to_update = []
|
||||
for host in hosts:
|
||||
filepath = os.sep.join(map(str, [destination, host.name]))
|
||||
if not os.path.realpath(filepath).startswith(destination):
|
||||
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
|
||||
|
||||
for host in hosts_cached:
|
||||
filepath = os.path.join(fact_cache_dir, host.name)
|
||||
if not os.path.realpath(filepath).startswith(fact_cache_dir):
|
||||
logger.error(f'Invalid path for facts file: {filepath}')
|
||||
continue
|
||||
|
||||
if os.path.exists(filepath):
|
||||
# If the file changed since we wrote the last facts file, pre-playbook run...
|
||||
modified = os.path.getmtime(filepath)
|
||||
if (not facts_write_time) or modified > facts_write_time:
|
||||
with codecs.open(filepath, 'r', encoding='utf-8') as f:
|
||||
try:
|
||||
if not facts_write_time or modified >= facts_write_time:
|
||||
try:
|
||||
with codecs.open(filepath, 'r', encoding='utf-8') as f:
|
||||
ansible_facts = json.load(f)
|
||||
except ValueError:
|
||||
continue
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
if ansible_facts != host.ansible_facts:
|
||||
host.ansible_facts = ansible_facts
|
||||
host.ansible_facts_modified = now()
|
||||
hosts_to_update.append(host)
|
||||
system_tracking_logger.info(
|
||||
'New fact for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)),
|
||||
logger.info(
|
||||
f'New fact for inventory {smart_str(host.inventory.name)} host {smart_str(host.name)}',
|
||||
extra=dict(
|
||||
inventory_id=host.inventory.id,
|
||||
host_name=host.name,
|
||||
@@ -126,16 +132,21 @@ def finish_fact_cache(hosts, destination, facts_write_time, log_data, job_id=Non
|
||||
),
|
||||
)
|
||||
log_data['updated_ct'] += 1
|
||||
else:
|
||||
log_data['unmodified_ct'] += 1
|
||||
else:
|
||||
log_data['unmodified_ct'] += 1
|
||||
else:
|
||||
# if the file goes missing, ansible removed it (likely via clear_facts)
|
||||
# if the file goes missing, but the host has not started facts, then we should not clear the facts
|
||||
host.ansible_facts = {}
|
||||
host.ansible_facts_modified = now()
|
||||
hosts_to_update.append(host)
|
||||
system_tracking_logger.info('Facts cleared for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)))
|
||||
logger.info(f'Facts cleared for inventory {smart_str(host.inventory.name)} host {smart_str(host.name)}')
|
||||
log_data['cleared_ct'] += 1
|
||||
if len(hosts_to_update) > 100:
|
||||
update_hosts(hosts_to_update)
|
||||
|
||||
if len(hosts_to_update) >= 100:
|
||||
bulk_update_sorted_by_id(Host, hosts_to_update, fields=['ansible_facts', 'ansible_facts_modified'])
|
||||
hosts_to_update = []
|
||||
update_hosts(hosts_to_update)
|
||||
|
||||
bulk_update_sorted_by_id(Host, hosts_to_update, fields=['ansible_facts', 'ansible_facts_modified'])
|
||||
|
||||
@@ -12,7 +12,7 @@ from django.db import transaction
|
||||
# Django flags
|
||||
from flags.state import flag_enabled
|
||||
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch.publish import task as task_awx
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
from awx.main.models.indirect_managed_node_audit import IndirectManagedNodeAudit
|
||||
from awx.main.models.event_query import EventQuery
|
||||
@@ -45,26 +45,46 @@ def build_indirect_host_data(job: Job, job_event_queries: dict[str, dict[str, st
|
||||
facts_missing_logged = False
|
||||
unhashable_facts_logged = False
|
||||
|
||||
job_event_queries_fqcn = {}
|
||||
for query_k, query_v in job_event_queries.items():
|
||||
if len(parts := query_k.split('.')) != 3:
|
||||
logger.info(f"Skiping malformed query '{query_k}'. Expected to be of the form 'a.b.c'")
|
||||
continue
|
||||
if parts[2] != '*':
|
||||
continue
|
||||
job_event_queries_fqcn['.'.join(parts[0:2])] = query_v
|
||||
|
||||
for event in job.job_events.filter(event_data__isnull=False).iterator():
|
||||
if 'res' not in event.event_data:
|
||||
continue
|
||||
|
||||
if 'resolved_action' not in event.event_data or event.event_data['resolved_action'] not in job_event_queries.keys():
|
||||
if not (resolved_action := event.event_data.get('resolved_action', None)):
|
||||
continue
|
||||
|
||||
resolved_action = event.event_data['resolved_action']
|
||||
if len(resolved_action_parts := resolved_action.split('.')) != 3:
|
||||
logger.debug(f"Malformed invocation module name '{resolved_action}'. Expected to be of the form 'a.b.c'")
|
||||
continue
|
||||
|
||||
# We expect a dict with a 'query' key for the resolved_action
|
||||
if 'query' not in job_event_queries[resolved_action]:
|
||||
resolved_action_fqcn = '.'.join(resolved_action_parts[0:2])
|
||||
|
||||
# Match module invocation to collection queries
|
||||
# First match against fully qualified query names i.e. a.b.c
|
||||
# Then try and match against wildcard queries i.e. a.b.*
|
||||
if not (jq_str_for_event := job_event_queries.get(resolved_action, job_event_queries_fqcn.get(resolved_action_fqcn, {})).get('query')):
|
||||
continue
|
||||
|
||||
# Recall from cache, or process the jq expression, and loop over the jq results
|
||||
jq_str_for_event = job_event_queries[resolved_action]['query']
|
||||
|
||||
if jq_str_for_event not in compiled_jq_expressions:
|
||||
compiled_jq_expressions[resolved_action] = jq.compile(jq_str_for_event)
|
||||
compiled_jq = compiled_jq_expressions[resolved_action]
|
||||
for data in compiled_jq.input(event.event_data['res']).all():
|
||||
|
||||
try:
|
||||
data_source = compiled_jq.input(event.event_data['res']).all()
|
||||
except Exception as e:
|
||||
logger.warning(f'error for module {resolved_action} and data {event.event_data["res"]}: {e}')
|
||||
continue
|
||||
|
||||
for data in data_source:
|
||||
# From this jq result (specific to a single Ansible module), get index information about this host record
|
||||
if not data.get('canonical_facts'):
|
||||
if not facts_missing_logged:
|
||||
@@ -139,7 +159,7 @@ def cleanup_old_indirect_host_entries() -> None:
|
||||
IndirectManagedNodeAudit.objects.filter(created__lt=limit).delete()
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task_awx(queue=get_task_queuename)
|
||||
def save_indirect_host_entries(job_id: int, wait_for_events: bool = True) -> None:
|
||||
try:
|
||||
job = Job.objects.get(id=job_id)
|
||||
@@ -181,7 +201,7 @@ def save_indirect_host_entries(job_id: int, wait_for_events: bool = True) -> Non
|
||||
logger.exception(f'Error processing indirect host data for job_id={job_id}')
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task_awx(queue=get_task_queuename)
|
||||
def cleanup_and_save_indirect_host_entries_fallback() -> None:
|
||||
if not flag_enabled("FEATURE_INDIRECT_NODE_COUNTING_ENABLED"):
|
||||
return
|
||||
|
||||
@@ -7,17 +7,18 @@ from django.db.models import Count, F
|
||||
from django.db.models.functions import TruncMonth
|
||||
from django.utils.timezone import now
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch.publish import task as task_awx
|
||||
from awx.main.models.inventory import HostMetric, HostMetricSummaryMonthly
|
||||
from awx.main.tasks.helpers import is_run_threshold_reached
|
||||
from awx.conf.license import get_license
|
||||
from ansible_base.lib.utils.db import advisory_lock
|
||||
from awx.main.utils.db import bulk_update_sorted_by_id
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.tasks.host_metrics')
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task_awx(queue=get_task_queuename)
|
||||
def cleanup_host_metrics():
|
||||
if is_run_threshold_reached(getattr(settings, 'CLEANUP_HOST_METRICS_LAST_TS', None), getattr(settings, 'CLEANUP_HOST_METRICS_INTERVAL', 30) * 86400):
|
||||
logger.info(f"Executing cleanup_host_metrics, last ran at {getattr(settings, 'CLEANUP_HOST_METRICS_LAST_TS', '---')}")
|
||||
@@ -28,7 +29,7 @@ def cleanup_host_metrics():
|
||||
logger.info("Finished cleanup_host_metrics")
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task_awx(queue=get_task_queuename)
|
||||
def host_metric_summary_monthly():
|
||||
"""Run cleanup host metrics summary monthly task each week"""
|
||||
if is_run_threshold_reached(getattr(settings, 'HOST_METRIC_SUMMARY_TASK_LAST_TS', None), getattr(settings, 'HOST_METRIC_SUMMARY_TASK_INTERVAL', 7) * 86400):
|
||||
@@ -146,8 +147,9 @@ class HostMetricSummaryMonthlyTask:
|
||||
month = month + relativedelta(months=1)
|
||||
|
||||
# Create/Update stats
|
||||
HostMetricSummaryMonthly.objects.bulk_create(self.records_to_create, batch_size=1000)
|
||||
HostMetricSummaryMonthly.objects.bulk_update(self.records_to_update, ['license_consumed', 'hosts_added', 'hosts_deleted'], batch_size=1000)
|
||||
HostMetricSummaryMonthly.objects.bulk_create(self.records_to_create)
|
||||
|
||||
bulk_update_sorted_by_id(HostMetricSummaryMonthly, self.records_to_update, ['license_consumed', 'hosts_added', 'hosts_deleted'])
|
||||
|
||||
# Set timestamp of last run
|
||||
settings.HOST_METRIC_SUMMARY_TASK_LAST_TS = now()
|
||||
|
||||
@@ -17,11 +17,11 @@ import urllib.parse as urlparse
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.db import transaction
|
||||
|
||||
# Shared code for the AWX platform
|
||||
from awx_plugins.interfaces._temporary_private_container_api import CONTAINER_ROOT, get_incontainer_path
|
||||
|
||||
|
||||
# Runner
|
||||
import ansible_runner
|
||||
|
||||
@@ -29,9 +29,12 @@ import ansible_runner
|
||||
import git
|
||||
from gitdb.exc import BadName as BadGitName
|
||||
|
||||
# Dispatcherd
|
||||
from dispatcherd.publish import task
|
||||
from dispatcherd.utils import serialize_task
|
||||
|
||||
# AWX
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch.publish import task as task_awx
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
from awx.main.constants import (
|
||||
PRIVILEGE_ESCALATION_METHODS,
|
||||
@@ -39,13 +42,13 @@ from awx.main.constants import (
|
||||
JOB_FOLDER_PREFIX,
|
||||
MAX_ISOLATED_PATH_COLON_DELIMITER,
|
||||
CONTAINER_VOLUMES_MOUNT_TYPES,
|
||||
ACTIVE_STATES,
|
||||
HOST_FACTS_FIELDS,
|
||||
)
|
||||
from awx.main.models import (
|
||||
Instance,
|
||||
Inventory,
|
||||
InventorySource,
|
||||
UnifiedJob,
|
||||
Job,
|
||||
AdHocCommand,
|
||||
ProjectUpdate,
|
||||
@@ -65,11 +68,12 @@ from awx.main.tasks.callback import (
|
||||
RunnerCallbackForProjectUpdate,
|
||||
RunnerCallbackForSystemJob,
|
||||
)
|
||||
from awx.main.tasks.policy import evaluate_policy
|
||||
from awx.main.tasks.signals import with_signal_handling, signal_callback
|
||||
from awx.main.tasks.receptor import AWXReceptorJob
|
||||
from awx.main.tasks.facts import start_fact_cache, finish_fact_cache
|
||||
from awx.main.tasks.system import update_smart_memberships_for_inventory, update_inventory_computed_fields, events_processed_hook
|
||||
from awx.main.exceptions import AwxTaskError, PostRunError, ReceptorNodeNotFound
|
||||
from awx.main.exceptions import AwxTaskError, PolicyEvaluationError, PostRunError, ReceptorNodeNotFound
|
||||
from awx.main.utils.ansible import read_ansible_config
|
||||
from awx.main.utils.safe_yaml import safe_dump, sanitize_jinja
|
||||
from awx.main.utils.common import (
|
||||
@@ -111,6 +115,15 @@ def with_path_cleanup(f):
|
||||
return _wrapped
|
||||
|
||||
|
||||
@task(on_duplicate='queue_one', bind=True, queue=get_task_queuename)
|
||||
def dispatch_waiting_jobs(binder):
|
||||
for uj in UnifiedJob.objects.filter(status='waiting', controller_node=settings.CLUSTER_HOST_ID).only('id', 'status', 'polymorphic_ctype', 'celery_task_id'):
|
||||
kwargs = uj.get_start_kwargs()
|
||||
if not kwargs:
|
||||
kwargs = {}
|
||||
binder.control('run', data={'task': serialize_task(uj._get_task_class()), 'args': [uj.id], 'kwargs': kwargs, 'uuid': uj.celery_task_id})
|
||||
|
||||
|
||||
class BaseTask(object):
|
||||
model = None
|
||||
event_model = None
|
||||
@@ -118,6 +131,7 @@ class BaseTask(object):
|
||||
callback_class = RunnerCallback
|
||||
|
||||
def __init__(self):
|
||||
self.instance = None
|
||||
self.cleanup_paths = []
|
||||
self.update_attempts = int(getattr(settings, 'DISPATCHER_DB_DOWNTOWN_TOLLERANCE', settings.DISPATCHER_DB_DOWNTIME_TOLERANCE) / 5)
|
||||
self.runner_callback = self.callback_class(model=self.model)
|
||||
@@ -305,6 +319,8 @@ class BaseTask(object):
|
||||
# Add ANSIBLE_* settings to the subprocess environment.
|
||||
for attr in dir(settings):
|
||||
if attr == attr.upper() and attr.startswith('ANSIBLE_') and not attr.startswith('ANSIBLE_BASE_'):
|
||||
if attr == 'ANSIBLE_STANDARD_SETTINGS_FILES':
|
||||
continue # special case intended only for dynaconf use
|
||||
env[attr] = str(getattr(settings, attr))
|
||||
# Also set environment variables configured in AWX_TASK_ENV setting.
|
||||
for key, value in settings.AWX_TASK_ENV.items():
|
||||
@@ -452,27 +468,48 @@ class BaseTask(object):
|
||||
def should_use_fact_cache(self):
|
||||
return False
|
||||
|
||||
def transition_status(self, pk: int) -> bool:
|
||||
"""Atomically transition status to running, if False returned, another process got it"""
|
||||
with transaction.atomic():
|
||||
# Explanation of parts for the fetch:
|
||||
# .values - avoid loading a full object, this is known to lead to deadlocks due to signals
|
||||
# the signals load other related rows which another process may be locking, and happens in practice
|
||||
# of=('self',) - keeps FK tables out of the lock list, another way deadlocks can happen
|
||||
# .get - just load the single job
|
||||
instance_data = UnifiedJob.objects.select_for_update(of=('self',)).values('status', 'cancel_flag').get(pk=pk)
|
||||
|
||||
# If status is not waiting (obtained under lock) then this process does not have clearence to run
|
||||
if instance_data['status'] == 'waiting':
|
||||
if instance_data['cancel_flag']:
|
||||
updated_status = 'canceled'
|
||||
else:
|
||||
updated_status = 'running'
|
||||
# Explanation of the update:
|
||||
# .filter - again, do not load the full object
|
||||
# .update - a bulk update on just that one row, avoid loading unintended data
|
||||
UnifiedJob.objects.filter(pk=pk).update(status=updated_status, start_args='')
|
||||
elif instance_data['status'] == 'running':
|
||||
logger.info(f'Job {pk} is being ran by another process, exiting')
|
||||
return False
|
||||
return True
|
||||
|
||||
@with_path_cleanup
|
||||
@with_signal_handling
|
||||
def run(self, pk, **kwargs):
|
||||
"""
|
||||
Run the job/task and capture its output.
|
||||
"""
|
||||
self.instance = self.model.objects.get(pk=pk)
|
||||
if self.instance.status != 'canceled' and self.instance.cancel_flag:
|
||||
self.instance = self.update_model(self.instance.pk, start_args='', status='canceled')
|
||||
if self.instance.status not in ACTIVE_STATES:
|
||||
# Prevent starting the job if it has been reaped or handled by another process.
|
||||
raise RuntimeError(f'Not starting {self.instance.status} task pk={pk} because {self.instance.status} is not a valid active state')
|
||||
if not self.instance: # Used to skip fetch for local runs
|
||||
if not self.transition_status(pk):
|
||||
logger.info(f'Job {pk} is being ran by another process, exiting')
|
||||
return
|
||||
|
||||
if self.instance.execution_environment_id is None:
|
||||
from awx.main.signals import disable_activity_stream
|
||||
# Load the instance
|
||||
self.instance = self.update_model(pk)
|
||||
if self.instance.status != 'running':
|
||||
logger.error(f'Not starting {self.instance.status} task pk={pk} because its status "{self.instance.status}" is not expected')
|
||||
return
|
||||
|
||||
with disable_activity_stream():
|
||||
self.instance = self.update_model(self.instance.pk, execution_environment=self.instance.resolve_execution_environment())
|
||||
|
||||
# self.instance because of the update_model pattern and when it's used in callback handlers
|
||||
self.instance = self.update_model(pk, status='running', start_args='') # blank field to remove encrypted passwords
|
||||
self.instance.websocket_emit_status("running")
|
||||
status, rc = 'error', None
|
||||
self.runner_callback.event_ct = 0
|
||||
@@ -485,12 +522,20 @@ class BaseTask(object):
|
||||
private_data_dir = None
|
||||
|
||||
try:
|
||||
if self.instance.execution_environment_id is None:
|
||||
from awx.main.signals import disable_activity_stream
|
||||
|
||||
with disable_activity_stream():
|
||||
self.instance = self.update_model(self.instance.pk, execution_environment=self.instance.resolve_execution_environment())
|
||||
|
||||
self.instance.send_notification_templates("running")
|
||||
private_data_dir = self.build_private_data_dir(self.instance)
|
||||
self.pre_run_hook(self.instance, private_data_dir)
|
||||
evaluate_policy(self.instance)
|
||||
self.build_project_dir(self.instance, private_data_dir)
|
||||
self.instance.log_lifecycle("preparing_playbook")
|
||||
if self.instance.cancel_flag or signal_callback():
|
||||
logger.debug(f'detected pre-run cancel flag for {self.instance.log_format}')
|
||||
self.instance = self.update_model(self.instance.pk, status='canceled')
|
||||
|
||||
if self.instance.status != 'running':
|
||||
@@ -522,9 +567,13 @@ class BaseTask(object):
|
||||
|
||||
credentials = self.build_credentials_list(self.instance)
|
||||
|
||||
container_root = None
|
||||
if settings.IS_K8S and isinstance(self.instance, ProjectUpdate):
|
||||
container_root = private_data_dir
|
||||
|
||||
for credential in credentials:
|
||||
if credential:
|
||||
credential.credential_type.inject_credential(credential, env, self.safe_cred_env, args, private_data_dir)
|
||||
credential.credential_type.inject_credential(credential, env, self.safe_cred_env, args, private_data_dir, container_root=container_root)
|
||||
|
||||
self.runner_callback.safe_env.update(self.safe_cred_env)
|
||||
|
||||
@@ -609,12 +658,11 @@ class BaseTask(object):
|
||||
elif status == 'canceled':
|
||||
self.instance = self.update_model(pk)
|
||||
cancel_flag_value = getattr(self.instance, 'cancel_flag', False)
|
||||
if (cancel_flag_value is False) and signal_callback():
|
||||
if cancel_flag_value is False:
|
||||
self.runner_callback.delay_update(skip_if_already_set=True, job_explanation="Task was canceled due to receiving a shutdown signal.")
|
||||
status = 'failed'
|
||||
elif cancel_flag_value is False:
|
||||
self.runner_callback.delay_update(skip_if_already_set=True, job_explanation="The running ansible process received a shutdown signal.")
|
||||
status = 'failed'
|
||||
except PolicyEvaluationError as exc:
|
||||
self.runner_callback.delay_update(job_explanation=str(exc), result_traceback=str(exc))
|
||||
except ReceptorNodeNotFound as exc:
|
||||
self.runner_callback.delay_update(job_explanation=str(exc))
|
||||
except Exception:
|
||||
@@ -640,6 +688,9 @@ class BaseTask(object):
|
||||
|
||||
# Field host_status_counts is used as a metric to check if event processing is finished
|
||||
# we send notifications if it is, if not, callback receiver will send them
|
||||
if not self.instance:
|
||||
logger.error(f'Unified job pk={pk} appears to be deleted while running')
|
||||
return
|
||||
if (self.instance.host_status_counts is not None) or (not self.runner_callback.wrapup_event_dispatched):
|
||||
events_processed_hook(self.instance)
|
||||
|
||||
@@ -736,6 +787,7 @@ class SourceControlMixin(BaseTask):
|
||||
try:
|
||||
# the job private_data_dir is passed so sync can download roles and collections there
|
||||
sync_task = RunProjectUpdate(job_private_data_dir=private_data_dir)
|
||||
sync_task.instance = local_project_sync # avoids "waiting" status check, performance
|
||||
sync_task.run(local_project_sync.id)
|
||||
local_project_sync.refresh_from_db()
|
||||
self.instance = self.update_model(self.instance.pk, scm_revision=local_project_sync.scm_revision)
|
||||
@@ -799,7 +851,7 @@ class SourceControlMixin(BaseTask):
|
||||
self.release_lock(project)
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task_awx(queue=get_task_queuename)
|
||||
class RunJob(SourceControlMixin, BaseTask):
|
||||
"""
|
||||
Run a job using ansible-playbook.
|
||||
@@ -1087,8 +1139,8 @@ class RunJob(SourceControlMixin, BaseTask):
|
||||
# where ansible expects to find it
|
||||
if self.should_use_fact_cache():
|
||||
job.log_lifecycle("start_job_fact_cache")
|
||||
self.facts_write_time = start_fact_cache(
|
||||
job.get_hosts_for_fact_cache(), os.path.join(private_data_dir, 'artifacts', str(job.id), 'fact_cache'), inventory_id=job.inventory_id
|
||||
self.hosts_with_facts_cached = start_fact_cache(
|
||||
job.get_hosts_for_fact_cache(), artifacts_dir=os.path.join(private_data_dir, 'artifacts', str(job.id)), inventory_id=job.inventory_id
|
||||
)
|
||||
|
||||
def build_project_dir(self, job, private_data_dir):
|
||||
@@ -1098,7 +1150,7 @@ class RunJob(SourceControlMixin, BaseTask):
|
||||
super(RunJob, self).post_run_hook(job, status)
|
||||
job.refresh_from_db(fields=['job_env'])
|
||||
private_data_dir = job.job_env.get('AWX_PRIVATE_DATA_DIR')
|
||||
if (not private_data_dir) or (not hasattr(self, 'facts_write_time')):
|
||||
if not private_data_dir:
|
||||
# If there's no private data dir, that means we didn't get into the
|
||||
# actual `run()` call; this _usually_ means something failed in
|
||||
# the pre_run_hook method
|
||||
@@ -1106,9 +1158,7 @@ class RunJob(SourceControlMixin, BaseTask):
|
||||
if self.should_use_fact_cache() and self.runner_callback.artifacts_processed:
|
||||
job.log_lifecycle("finish_job_fact_cache")
|
||||
finish_fact_cache(
|
||||
job.get_hosts_for_fact_cache(),
|
||||
os.path.join(private_data_dir, 'artifacts', str(job.id), 'fact_cache'),
|
||||
facts_write_time=self.facts_write_time,
|
||||
artifacts_dir=os.path.join(private_data_dir, 'artifacts', str(job.id)),
|
||||
job_id=job.id,
|
||||
inventory_id=job.inventory_id,
|
||||
)
|
||||
@@ -1124,7 +1174,7 @@ class RunJob(SourceControlMixin, BaseTask):
|
||||
update_inventory_computed_fields.delay(inventory.id)
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task_awx(queue=get_task_queuename)
|
||||
class RunProjectUpdate(BaseTask):
|
||||
model = ProjectUpdate
|
||||
event_model = ProjectUpdateEvent
|
||||
@@ -1463,7 +1513,7 @@ class RunProjectUpdate(BaseTask):
|
||||
return []
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task_awx(queue=get_task_queuename)
|
||||
class RunInventoryUpdate(SourceControlMixin, BaseTask):
|
||||
model = InventoryUpdate
|
||||
event_model = InventoryUpdateEvent
|
||||
@@ -1574,7 +1624,7 @@ class RunInventoryUpdate(SourceControlMixin, BaseTask):
|
||||
# Include any facts from input inventories so they can be used in filters
|
||||
start_fact_cache(
|
||||
input_inventory.hosts.only(*HOST_FACTS_FIELDS),
|
||||
os.path.join(private_data_dir, 'artifacts', str(inventory_update.id), 'fact_cache'),
|
||||
artifacts_dir=os.path.join(private_data_dir, 'artifacts', str(inventory_update.id)),
|
||||
inventory_id=input_inventory.id,
|
||||
)
|
||||
|
||||
@@ -1726,7 +1776,7 @@ class RunInventoryUpdate(SourceControlMixin, BaseTask):
|
||||
raise PostRunError('Error occured while saving inventory data, see traceback or server logs', status='error', tb=traceback.format_exc())
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task_awx(queue=get_task_queuename)
|
||||
class RunAdHocCommand(BaseTask):
|
||||
"""
|
||||
Run an ad hoc command using ansible.
|
||||
@@ -1879,7 +1929,7 @@ class RunAdHocCommand(BaseTask):
|
||||
return d
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task_awx(queue=get_task_queuename)
|
||||
class RunSystemJob(BaseTask):
|
||||
model = SystemJob
|
||||
event_model = SystemJobEvent
|
||||
|
||||
458
awx/main/tasks/policy.py
Normal file
458
awx/main/tasks/policy.py
Normal file
@@ -0,0 +1,458 @@
|
||||
import json
|
||||
import tempfile
|
||||
import contextlib
|
||||
|
||||
from pprint import pformat
|
||||
|
||||
from typing import Optional, Union
|
||||
|
||||
from django.conf import settings
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from opa_client import OpaClient
|
||||
from opa_client.base import BaseClient
|
||||
from requests import HTTPError
|
||||
from rest_framework import serializers
|
||||
from rest_framework import fields
|
||||
|
||||
from awx.main import models
|
||||
from awx.main.exceptions import PolicyEvaluationError
|
||||
|
||||
|
||||
# Monkey patching opa_client.base.BaseClient to fix retries and timeout settings
|
||||
_original_opa_base_client_init = BaseClient.__init__
|
||||
|
||||
|
||||
def _opa_base_client_init_fix(
|
||||
self,
|
||||
host: str = "localhost",
|
||||
port: int = 8181,
|
||||
version: str = "v1",
|
||||
ssl: bool = False,
|
||||
cert: Optional[Union[str, tuple]] = None,
|
||||
headers: Optional[dict] = None,
|
||||
retries: int = 2,
|
||||
timeout: float = 1.5,
|
||||
):
|
||||
_original_opa_base_client_init(self, host, port, version, ssl, cert, headers)
|
||||
self.retries = retries
|
||||
self.timeout = timeout
|
||||
|
||||
|
||||
BaseClient.__init__ = _opa_base_client_init_fix
|
||||
|
||||
|
||||
class _TeamSerializer(serializers.ModelSerializer):
|
||||
class Meta:
|
||||
model = models.Team
|
||||
fields = ('id', 'name')
|
||||
|
||||
|
||||
class _UserSerializer(serializers.ModelSerializer):
|
||||
teams = serializers.SerializerMethodField()
|
||||
|
||||
class Meta:
|
||||
model = models.User
|
||||
fields = ('id', 'username', 'is_superuser', 'teams')
|
||||
|
||||
def get_teams(self, user: models.User):
|
||||
teams = models.Team.access_qs(user, 'member')
|
||||
return _TeamSerializer(many=True).to_representation(teams)
|
||||
|
||||
|
||||
class _ExecutionEnvironmentSerializer(serializers.ModelSerializer):
|
||||
class Meta:
|
||||
model = models.ExecutionEnvironment
|
||||
fields = (
|
||||
'id',
|
||||
'name',
|
||||
'image',
|
||||
'pull',
|
||||
)
|
||||
|
||||
|
||||
class _InstanceGroupSerializer(serializers.ModelSerializer):
|
||||
class Meta:
|
||||
model = models.InstanceGroup
|
||||
fields = (
|
||||
'id',
|
||||
'name',
|
||||
'capacity',
|
||||
'jobs_running',
|
||||
'jobs_total',
|
||||
'max_concurrent_jobs',
|
||||
'max_forks',
|
||||
)
|
||||
|
||||
|
||||
class _InventorySourceSerializer(serializers.ModelSerializer):
|
||||
class Meta:
|
||||
model = models.InventorySource
|
||||
fields = ('id', 'name', 'source', 'status')
|
||||
|
||||
|
||||
class _InventorySerializer(serializers.ModelSerializer):
|
||||
inventory_sources = _InventorySourceSerializer(many=True)
|
||||
|
||||
class Meta:
|
||||
model = models.Inventory
|
||||
fields = (
|
||||
'id',
|
||||
'name',
|
||||
'description',
|
||||
'kind',
|
||||
'total_hosts',
|
||||
'total_groups',
|
||||
'has_inventory_sources',
|
||||
'total_inventory_sources',
|
||||
'has_active_failures',
|
||||
'hosts_with_active_failures',
|
||||
'inventory_sources',
|
||||
)
|
||||
|
||||
|
||||
class _JobTemplateSerializer(serializers.ModelSerializer):
|
||||
class Meta:
|
||||
model = models.JobTemplate
|
||||
fields = (
|
||||
'id',
|
||||
'name',
|
||||
'job_type',
|
||||
)
|
||||
|
||||
|
||||
class _WorkflowJobTemplateSerializer(serializers.ModelSerializer):
|
||||
class Meta:
|
||||
model = models.WorkflowJobTemplate
|
||||
fields = (
|
||||
'id',
|
||||
'name',
|
||||
'job_type',
|
||||
)
|
||||
|
||||
|
||||
class _WorkflowJobSerializer(serializers.ModelSerializer):
|
||||
class Meta:
|
||||
model = models.WorkflowJob
|
||||
fields = (
|
||||
'id',
|
||||
'name',
|
||||
)
|
||||
|
||||
|
||||
class _OrganizationSerializer(serializers.ModelSerializer):
|
||||
class Meta:
|
||||
model = models.Organization
|
||||
fields = (
|
||||
'id',
|
||||
'name',
|
||||
)
|
||||
|
||||
|
||||
class _ProjectSerializer(serializers.ModelSerializer):
|
||||
class Meta:
|
||||
model = models.Project
|
||||
fields = (
|
||||
'id',
|
||||
'name',
|
||||
'status',
|
||||
'scm_type',
|
||||
'scm_url',
|
||||
'scm_branch',
|
||||
'scm_refspec',
|
||||
'scm_clean',
|
||||
'scm_track_submodules',
|
||||
'scm_delete_on_update',
|
||||
)
|
||||
|
||||
|
||||
class _CredentialSerializer(serializers.ModelSerializer):
|
||||
organization = _OrganizationSerializer()
|
||||
|
||||
class Meta:
|
||||
model = models.Credential
|
||||
fields = (
|
||||
'id',
|
||||
'name',
|
||||
'description',
|
||||
'organization',
|
||||
'credential_type',
|
||||
'managed',
|
||||
'kind',
|
||||
'cloud',
|
||||
'kubernetes',
|
||||
)
|
||||
|
||||
|
||||
class _LabelSerializer(serializers.ModelSerializer):
|
||||
organization = _OrganizationSerializer()
|
||||
|
||||
class Meta:
|
||||
model = models.Label
|
||||
fields = ('id', 'name', 'organization')
|
||||
|
||||
|
||||
class JobSerializer(serializers.ModelSerializer):
|
||||
created_by = _UserSerializer()
|
||||
credentials = _CredentialSerializer(many=True)
|
||||
execution_environment = _ExecutionEnvironmentSerializer()
|
||||
instance_group = _InstanceGroupSerializer()
|
||||
inventory = _InventorySerializer()
|
||||
job_template = _JobTemplateSerializer()
|
||||
labels = _LabelSerializer(many=True)
|
||||
organization = _OrganizationSerializer()
|
||||
project = _ProjectSerializer()
|
||||
extra_vars = fields.SerializerMethodField()
|
||||
hosts_count = fields.SerializerMethodField()
|
||||
workflow_job = fields.SerializerMethodField()
|
||||
workflow_job_template = fields.SerializerMethodField()
|
||||
|
||||
class Meta:
|
||||
model = models.Job
|
||||
fields = (
|
||||
'id',
|
||||
'name',
|
||||
'created',
|
||||
'created_by',
|
||||
'credentials',
|
||||
'execution_environment',
|
||||
'extra_vars',
|
||||
'forks',
|
||||
'hosts_count',
|
||||
'instance_group',
|
||||
'inventory',
|
||||
'job_template',
|
||||
'job_type',
|
||||
'job_type_name',
|
||||
'labels',
|
||||
'launch_type',
|
||||
'limit',
|
||||
'launched_by',
|
||||
'organization',
|
||||
'playbook',
|
||||
'project',
|
||||
'scm_branch',
|
||||
'scm_revision',
|
||||
'workflow_job',
|
||||
'workflow_job_template',
|
||||
)
|
||||
|
||||
def get_extra_vars(self, obj: models.Job):
|
||||
return json.loads(obj.display_extra_vars())
|
||||
|
||||
def get_hosts_count(self, obj: models.Job):
|
||||
return obj.hosts.count()
|
||||
|
||||
def get_workflow_job(self, obj: models.Job):
|
||||
workflow_job: models.WorkflowJob = obj.get_workflow_job()
|
||||
if workflow_job is None:
|
||||
return None
|
||||
return _WorkflowJobSerializer().to_representation(workflow_job)
|
||||
|
||||
def get_workflow_job_template(self, obj: models.Job):
|
||||
workflow_job: models.WorkflowJob = obj.get_workflow_job()
|
||||
if workflow_job is None:
|
||||
return None
|
||||
|
||||
workflow_job_template: models.WorkflowJobTemplate = workflow_job.workflow_job_template
|
||||
if workflow_job_template is None:
|
||||
return None
|
||||
|
||||
return _WorkflowJobTemplateSerializer().to_representation(workflow_job_template)
|
||||
|
||||
|
||||
class OPAResultSerializer(serializers.Serializer):
|
||||
allowed = fields.BooleanField(required=True)
|
||||
violations = fields.ListField(child=fields.CharField())
|
||||
|
||||
|
||||
class OPA_AUTH_TYPES:
|
||||
NONE = 'None'
|
||||
TOKEN = 'Token'
|
||||
CERTIFICATE = 'Certificate'
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def opa_cert_file():
|
||||
"""
|
||||
Context manager that creates temporary certificate files for OPA authentication.
|
||||
|
||||
For mTLS (mutual TLS), we need:
|
||||
- Client certificate and key for client authentication
|
||||
- CA certificate (optional) for server verification
|
||||
|
||||
Returns:
|
||||
tuple: (client_cert_path, verify_path)
|
||||
- client_cert_path: Path to client cert file or None if not using client cert
|
||||
- verify_path: Path to CA cert file, True to use system CA store, or False for no verification
|
||||
"""
|
||||
client_cert_temp = None
|
||||
ca_temp = None
|
||||
|
||||
try:
|
||||
# Case 1: Full mTLS with client cert and optional CA cert
|
||||
if settings.OPA_AUTH_TYPE == OPA_AUTH_TYPES.CERTIFICATE:
|
||||
# Create client certificate file (required for mTLS)
|
||||
client_cert_temp = tempfile.NamedTemporaryFile(delete=True, mode='w', suffix=".pem")
|
||||
client_cert_temp.write(settings.OPA_AUTH_CLIENT_CERT)
|
||||
client_cert_temp.write("\n")
|
||||
client_cert_temp.write(settings.OPA_AUTH_CLIENT_KEY)
|
||||
client_cert_temp.write("\n")
|
||||
client_cert_temp.flush()
|
||||
|
||||
# If CA cert is provided, use it for server verification
|
||||
# Otherwise, use system CA store (True)
|
||||
if settings.OPA_AUTH_CA_CERT:
|
||||
ca_temp = tempfile.NamedTemporaryFile(delete=True, mode='w', suffix=".pem")
|
||||
ca_temp.write(settings.OPA_AUTH_CA_CERT)
|
||||
ca_temp.write("\n")
|
||||
ca_temp.flush()
|
||||
verify_path = ca_temp.name
|
||||
else:
|
||||
verify_path = True # Use system CA store
|
||||
|
||||
yield (client_cert_temp.name, verify_path)
|
||||
|
||||
# Case 2: TLS with only server verification (no client cert)
|
||||
elif settings.OPA_SSL:
|
||||
# If CA cert is provided, use it for server verification
|
||||
# Otherwise, use system CA store (True)
|
||||
if settings.OPA_AUTH_CA_CERT:
|
||||
ca_temp = tempfile.NamedTemporaryFile(delete=True, mode='w', suffix=".pem")
|
||||
ca_temp.write(settings.OPA_AUTH_CA_CERT)
|
||||
ca_temp.write("\n")
|
||||
ca_temp.flush()
|
||||
verify_path = ca_temp.name
|
||||
else:
|
||||
verify_path = True # Use system CA store
|
||||
|
||||
yield (None, verify_path)
|
||||
|
||||
# Case 3: No TLS
|
||||
else:
|
||||
yield (None, False)
|
||||
|
||||
finally:
|
||||
# Clean up temporary files
|
||||
if client_cert_temp:
|
||||
client_cert_temp.close()
|
||||
if ca_temp:
|
||||
ca_temp.close()
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def opa_client(headers=None):
|
||||
with opa_cert_file() as cert_files:
|
||||
cert, verify = cert_files
|
||||
|
||||
with OpaClient(
|
||||
host=settings.OPA_HOST,
|
||||
port=settings.OPA_PORT,
|
||||
headers=headers,
|
||||
ssl=settings.OPA_SSL,
|
||||
cert=cert,
|
||||
timeout=settings.OPA_REQUEST_TIMEOUT,
|
||||
retries=settings.OPA_REQUEST_RETRIES,
|
||||
) as client:
|
||||
# Workaround for https://github.com/Turall/OPA-python-client/issues/32
|
||||
# by directly setting cert and verify on requests.session
|
||||
client._session.cert = cert
|
||||
client._session.verify = verify
|
||||
|
||||
yield client
|
||||
|
||||
|
||||
def evaluate_policy(instance):
|
||||
# Policy evaluation for Policy as Code feature
|
||||
if not settings.OPA_HOST:
|
||||
return
|
||||
|
||||
if not isinstance(instance, models.Job):
|
||||
return
|
||||
|
||||
instance.log_lifecycle("evaluate_policy")
|
||||
|
||||
input_data = JobSerializer(instance=instance).data
|
||||
|
||||
headers = settings.OPA_AUTH_CUSTOM_HEADERS
|
||||
if settings.OPA_AUTH_TYPE == OPA_AUTH_TYPES.TOKEN:
|
||||
headers.update({'Authorization': 'Bearer {}'.format(settings.OPA_AUTH_TOKEN)})
|
||||
|
||||
if settings.OPA_AUTH_TYPE == OPA_AUTH_TYPES.CERTIFICATE and not settings.OPA_SSL:
|
||||
raise PolicyEvaluationError(_('OPA_AUTH_TYPE=Certificate requires OPA_SSL to be enabled.'))
|
||||
|
||||
cert_settings_missing = []
|
||||
|
||||
if settings.OPA_AUTH_TYPE == OPA_AUTH_TYPES.CERTIFICATE:
|
||||
if not settings.OPA_AUTH_CLIENT_CERT:
|
||||
cert_settings_missing += ['OPA_AUTH_CLIENT_CERT']
|
||||
if not settings.OPA_AUTH_CLIENT_KEY:
|
||||
cert_settings_missing += ['OPA_AUTH_CLIENT_KEY']
|
||||
if not settings.OPA_AUTH_CA_CERT:
|
||||
cert_settings_missing += ['OPA_AUTH_CA_CERT']
|
||||
|
||||
if cert_settings_missing:
|
||||
raise PolicyEvaluationError(_('Following certificate settings are missing for OPA_AUTH_TYPE=Certificate: {}').format(cert_settings_missing))
|
||||
|
||||
query_paths = [
|
||||
('Organization', instance.organization.opa_query_path),
|
||||
('Inventory', instance.inventory.opa_query_path),
|
||||
('Job template', instance.job_template.opa_query_path),
|
||||
]
|
||||
violations = dict()
|
||||
errors = dict()
|
||||
|
||||
try:
|
||||
with opa_client(headers=headers) as client:
|
||||
for path_type, query_path in query_paths:
|
||||
response = dict()
|
||||
try:
|
||||
if not query_path:
|
||||
continue
|
||||
|
||||
response = client.query_rule(input_data=input_data, package_path=query_path)
|
||||
|
||||
except HTTPError as e:
|
||||
message = _('Call to OPA failed. Exception: {}').format(e)
|
||||
try:
|
||||
error_data = e.response.json()
|
||||
except ValueError:
|
||||
errors[path_type] = message
|
||||
continue
|
||||
|
||||
error_code = error_data.get("code")
|
||||
error_message = error_data.get("message")
|
||||
if error_code or error_message:
|
||||
message = _('Call to OPA failed. Code: {}, Message: {}').format(error_code, error_message)
|
||||
errors[path_type] = message
|
||||
continue
|
||||
|
||||
except Exception as e:
|
||||
errors[path_type] = _('Call to OPA failed. Exception: {}').format(e)
|
||||
continue
|
||||
|
||||
result = response.get('result')
|
||||
if result is None:
|
||||
errors[path_type] = _('Call to OPA did not return a "result" property. The path refers to an undefined document.')
|
||||
continue
|
||||
|
||||
result_serializer = OPAResultSerializer(data=result)
|
||||
if not result_serializer.is_valid():
|
||||
errors[path_type] = _('OPA policy returned invalid result.')
|
||||
continue
|
||||
|
||||
result_data = result_serializer.validated_data
|
||||
if not result_data.get("allowed") and (result_violations := result_data.get("violations")):
|
||||
violations[path_type] = result_violations
|
||||
|
||||
format_results = dict()
|
||||
if any(errors[e] for e in errors):
|
||||
format_results["Errors"] = errors
|
||||
|
||||
if any(violations[v] for v in violations):
|
||||
format_results["Violations"] = violations
|
||||
|
||||
if violations or errors:
|
||||
raise PolicyEvaluationError(pformat(format_results, width=80))
|
||||
|
||||
except Exception as e:
|
||||
raise PolicyEvaluationError(_('This job cannot be executed due to a policy violation or error. See the following details:\n{}').format(e))
|
||||
@@ -32,7 +32,7 @@ from awx.main.constants import MAX_ISOLATED_PATH_COLON_DELIMITER
|
||||
from awx.main.tasks.signals import signal_state, signal_callback, SignalExit
|
||||
from awx.main.models import Instance, InstanceLink, UnifiedJob, ReceptorAddress
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch.publish import task as task_awx
|
||||
|
||||
# Receptorctl
|
||||
from receptorctl.socket_interface import ReceptorControl
|
||||
@@ -852,7 +852,7 @@ def reload_receptor():
|
||||
raise RuntimeError("Receptor reload failed")
|
||||
|
||||
|
||||
@task()
|
||||
@task_awx()
|
||||
def write_receptor_config():
|
||||
"""
|
||||
This task runs async on each control node, K8S only.
|
||||
@@ -875,7 +875,7 @@ def write_receptor_config():
|
||||
reload_receptor()
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task_awx(queue=get_task_queuename)
|
||||
def remove_deprovisioned_node(hostname):
|
||||
InstanceLink.objects.filter(source__hostname=hostname).update(link_state=InstanceLink.States.REMOVING)
|
||||
InstanceLink.objects.filter(target__instance__hostname=hostname).update(link_state=InstanceLink.States.REMOVING)
|
||||
|
||||
@@ -14,16 +14,21 @@ class SignalExit(Exception):
|
||||
|
||||
|
||||
class SignalState:
|
||||
# SIGTERM: Sent by supervisord to process group on shutdown
|
||||
# SIGUSR1: The dispatcherd cancel signal
|
||||
signals = (signal.SIGTERM, signal.SIGINT, signal.SIGUSR1)
|
||||
|
||||
def reset(self):
|
||||
self.sigterm_flag = False
|
||||
self.sigint_flag = False
|
||||
for for_signal in self.signals:
|
||||
self.signal_flags[for_signal] = False
|
||||
self.original_methods[for_signal] = None
|
||||
|
||||
self.is_active = False # for nested context managers
|
||||
self.original_sigterm = None
|
||||
self.original_sigint = None
|
||||
self.raise_exception = False
|
||||
|
||||
def __init__(self):
|
||||
self.signal_flags = {}
|
||||
self.original_methods = {}
|
||||
self.reset()
|
||||
|
||||
def raise_if_needed(self):
|
||||
@@ -31,31 +36,28 @@ class SignalState:
|
||||
self.raise_exception = False # so it is not raised a second time in error handling
|
||||
raise SignalExit()
|
||||
|
||||
def set_sigterm_flag(self, *args):
|
||||
self.sigterm_flag = True
|
||||
self.raise_if_needed()
|
||||
|
||||
def set_sigint_flag(self, *args):
|
||||
self.sigint_flag = True
|
||||
def set_signal_flag(self, *args, for_signal=None):
|
||||
self.signal_flags[for_signal] = True
|
||||
logger.info(f'Processed signal {for_signal}, set exit flag')
|
||||
self.raise_if_needed()
|
||||
|
||||
def connect_signals(self):
|
||||
self.original_sigterm = signal.getsignal(signal.SIGTERM)
|
||||
self.original_sigint = signal.getsignal(signal.SIGINT)
|
||||
signal.signal(signal.SIGTERM, self.set_sigterm_flag)
|
||||
signal.signal(signal.SIGINT, self.set_sigint_flag)
|
||||
for for_signal in self.signals:
|
||||
self.original_methods[for_signal] = signal.getsignal(for_signal)
|
||||
signal.signal(for_signal, lambda *args, for_signal=for_signal: self.set_signal_flag(*args, for_signal=for_signal))
|
||||
self.is_active = True
|
||||
|
||||
def restore_signals(self):
|
||||
signal.signal(signal.SIGTERM, self.original_sigterm)
|
||||
signal.signal(signal.SIGINT, self.original_sigint)
|
||||
# if we got a signal while context manager was active, call parent methods.
|
||||
if self.sigterm_flag:
|
||||
if callable(self.original_sigterm):
|
||||
self.original_sigterm()
|
||||
if self.sigint_flag:
|
||||
if callable(self.original_sigint):
|
||||
self.original_sigint()
|
||||
for for_signal in self.signals:
|
||||
original_method = self.original_methods[for_signal]
|
||||
signal.signal(for_signal, original_method)
|
||||
# if we got a signal while context manager was active, call parent methods.
|
||||
if self.signal_flags[for_signal]:
|
||||
if callable(original_method):
|
||||
try:
|
||||
original_method()
|
||||
except Exception as exc:
|
||||
logger.info(f'Error processing original {for_signal} signal, error: {str(exc)}')
|
||||
self.reset()
|
||||
|
||||
|
||||
@@ -63,7 +65,7 @@ signal_state = SignalState()
|
||||
|
||||
|
||||
def signal_callback():
|
||||
return bool(signal_state.sigterm_flag or signal_state.sigint_flag)
|
||||
return any(signal_state.signal_flags[for_signal] for for_signal in signal_state.signals)
|
||||
|
||||
|
||||
def with_signal_handling(f):
|
||||
|
||||
@@ -1,78 +1,77 @@
|
||||
# Python
|
||||
from collections import namedtuple
|
||||
import functools
|
||||
import importlib
|
||||
import itertools
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import psycopg
|
||||
from io import StringIO
|
||||
from contextlib import redirect_stdout
|
||||
import shutil
|
||||
import time
|
||||
from distutils.version import LooseVersion as Version
|
||||
from collections import namedtuple
|
||||
from contextlib import redirect_stdout
|
||||
from datetime import datetime
|
||||
from distutils.version import LooseVersion as Version
|
||||
from io import StringIO
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.db import connection, transaction, DatabaseError, IntegrityError
|
||||
from django.db.models.fields.related import ForeignKey
|
||||
from django.utils.timezone import now, timedelta
|
||||
from django.utils.encoding import smart_str
|
||||
from django.contrib.auth.models import User
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from django.utils.translation import gettext_noop
|
||||
from django.core.cache import cache
|
||||
from django.core.exceptions import ObjectDoesNotExist
|
||||
from django.db.models.query import QuerySet
|
||||
# Runner
|
||||
import ansible_runner.cleanup
|
||||
import psycopg
|
||||
from ansible_base.lib.utils.db import advisory_lock
|
||||
|
||||
# django-ansible-base
|
||||
from ansible_base.resource_registry.tasks.sync import SyncExecutor
|
||||
|
||||
# Django-CRUM
|
||||
from crum import impersonate
|
||||
|
||||
# Django flags
|
||||
from flags.state import flag_enabled
|
||||
|
||||
# Runner
|
||||
import ansible_runner.cleanup
|
||||
|
||||
# dateutil
|
||||
from dateutil.parser import parse as parse_date
|
||||
|
||||
# django-ansible-base
|
||||
from ansible_base.resource_registry.tasks.sync import SyncExecutor
|
||||
from ansible_base.lib.utils.db import advisory_lock
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.contrib.auth.models import User
|
||||
from django.core.cache import cache
|
||||
from django.core.exceptions import ObjectDoesNotExist
|
||||
from django.db import DatabaseError, IntegrityError, connection, transaction
|
||||
from django.db.models.fields.related import ForeignKey
|
||||
from django.db.models.query import QuerySet
|
||||
from django.utils.encoding import smart_str
|
||||
from django.utils.timezone import now, timedelta
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from django.utils.translation import gettext_noop
|
||||
|
||||
# Django flags
|
||||
from flags.state import flag_enabled
|
||||
from rest_framework.exceptions import PermissionDenied
|
||||
|
||||
# AWX
|
||||
from awx import __version__ as awx_application_version
|
||||
from awx.conf import settings_registry
|
||||
from awx.main import analytics
|
||||
from awx.main.access import access_registry
|
||||
from awx.main.analytics.subsystem_metrics import DispatcherMetrics
|
||||
from awx.main.constants import ACTIVE_STATES, ERROR_STATES
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
from awx.main.dispatch import get_task_queuename, reaper
|
||||
from awx.main.dispatch.publish import task as task_awx
|
||||
from awx.main.models import (
|
||||
Schedule,
|
||||
TowerScheduleState,
|
||||
Instance,
|
||||
InstanceGroup,
|
||||
UnifiedJob,
|
||||
Notification,
|
||||
Inventory,
|
||||
SmartInventoryMembership,
|
||||
Job,
|
||||
Notification,
|
||||
Schedule,
|
||||
SmartInventoryMembership,
|
||||
TowerScheduleState,
|
||||
UnifiedJob,
|
||||
convert_jsonfields,
|
||||
)
|
||||
from awx.main.constants import ACTIVE_STATES, ERROR_STATES
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch import get_task_queuename, reaper
|
||||
from awx.main.utils.common import ignore_inventory_computed_fields, ignore_inventory_group_removal
|
||||
|
||||
from awx.main.utils.reload import stop_local_services
|
||||
from awx.main.tasks.helpers import is_run_threshold_reached
|
||||
from awx.main.tasks.host_indirect import save_indirect_host_entries
|
||||
from awx.main.tasks.receptor import get_receptor_ctl, worker_info, worker_cleanup, administrative_workunit_reaper, write_receptor_config
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
from awx.main import analytics
|
||||
from awx.conf import settings_registry
|
||||
from awx.main.analytics.subsystem_metrics import DispatcherMetrics
|
||||
|
||||
from rest_framework.exceptions import PermissionDenied
|
||||
from awx.main.tasks.receptor import administrative_workunit_reaper, get_receptor_ctl, worker_cleanup, worker_info, write_receptor_config
|
||||
from awx.main.utils.common import ignore_inventory_computed_fields, ignore_inventory_group_removal
|
||||
from awx.main.utils.reload import stop_local_services
|
||||
from dispatcherd.publish import task
|
||||
|
||||
logger = logging.getLogger('awx.main.tasks.system')
|
||||
|
||||
@@ -83,7 +82,12 @@ Try upgrading OpenSSH or providing your private key in an different format. \
|
||||
'''
|
||||
|
||||
|
||||
def dispatch_startup():
|
||||
def _run_dispatch_startup_common():
|
||||
"""
|
||||
Execute the common startup initialization steps.
|
||||
This includes updating schedules, syncing instance membership, and starting
|
||||
local reaping and resetting metrics.
|
||||
"""
|
||||
startup_logger = logging.getLogger('awx.main.tasks')
|
||||
|
||||
# TODO: Enable this on VM installs
|
||||
@@ -93,14 +97,14 @@ def dispatch_startup():
|
||||
try:
|
||||
convert_jsonfields()
|
||||
except Exception:
|
||||
logger.exception("Failed json field conversion, skipping.")
|
||||
logger.exception("Failed JSON field conversion, skipping.")
|
||||
|
||||
startup_logger.debug("Syncing Schedules")
|
||||
startup_logger.debug("Syncing schedules")
|
||||
for sch in Schedule.objects.all():
|
||||
try:
|
||||
sch.update_computed_fields()
|
||||
except Exception:
|
||||
logger.exception("Failed to rebuild schedule {}.".format(sch))
|
||||
logger.exception("Failed to rebuild schedule %s.", sch)
|
||||
|
||||
#
|
||||
# When the dispatcher starts, if the instance cannot be found in the database,
|
||||
@@ -120,25 +124,67 @@ def dispatch_startup():
|
||||
apply_cluster_membership_policies()
|
||||
cluster_node_heartbeat()
|
||||
reaper.startup_reaping()
|
||||
reaper.reap_waiting(grace_period=0)
|
||||
m = DispatcherMetrics()
|
||||
m.reset_values()
|
||||
|
||||
|
||||
def _legacy_dispatch_startup():
|
||||
"""
|
||||
Legacy branch for startup: simply performs reaping of waiting jobs with a zero grace period.
|
||||
"""
|
||||
logger.debug("Legacy dispatcher: calling reaper.reap_waiting with grace_period=0")
|
||||
reaper.reap_waiting(grace_period=0)
|
||||
|
||||
|
||||
def _dispatcherd_dispatch_startup():
|
||||
"""
|
||||
New dispatcherd branch for startup: uses the control API to re-submit waiting jobs.
|
||||
"""
|
||||
logger.debug("Dispatcherd enabled: dispatching waiting jobs via control channel")
|
||||
from awx.main.tasks.jobs import dispatch_waiting_jobs
|
||||
|
||||
dispatch_waiting_jobs.apply_async(queue=get_task_queuename())
|
||||
|
||||
|
||||
def dispatch_startup():
|
||||
"""
|
||||
System initialization at startup.
|
||||
First, execute the common logic.
|
||||
Then, if FEATURE_DISPATCHERD_ENABLED is enabled, re-submit waiting jobs via the control API;
|
||||
otherwise, fall back to legacy reaping of waiting jobs.
|
||||
"""
|
||||
_run_dispatch_startup_common()
|
||||
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'):
|
||||
_dispatcherd_dispatch_startup()
|
||||
else:
|
||||
_legacy_dispatch_startup()
|
||||
|
||||
|
||||
def inform_cluster_of_shutdown():
|
||||
"""
|
||||
Clean system shutdown that marks the current instance offline.
|
||||
In legacy mode, it also reaps waiting jobs.
|
||||
In dispatcherd mode, it relies on dispatcherd's built-in cleanup.
|
||||
"""
|
||||
try:
|
||||
this_inst = Instance.objects.get(hostname=settings.CLUSTER_HOST_ID)
|
||||
this_inst.mark_offline(update_last_seen=True, errors=_('Instance received normal shutdown signal'))
|
||||
inst = Instance.objects.get(hostname=settings.CLUSTER_HOST_ID)
|
||||
inst.mark_offline(update_last_seen=True, errors=_('Instance received normal shutdown signal'))
|
||||
except Instance.DoesNotExist:
|
||||
logger.exception("Cluster host not found: %s", settings.CLUSTER_HOST_ID)
|
||||
return
|
||||
|
||||
if flag_enabled('FEATURE_DISPATCHERD_ENABLED'):
|
||||
logger.debug("Dispatcherd mode: no extra reaping required for instance %s", inst.hostname)
|
||||
else:
|
||||
try:
|
||||
reaper.reap_waiting(this_inst, grace_period=0)
|
||||
logger.debug("Legacy mode: reaping waiting jobs for instance %s", inst.hostname)
|
||||
reaper.reap_waiting(inst, grace_period=0)
|
||||
except Exception:
|
||||
logger.exception('failed to reap waiting jobs for {}'.format(this_inst.hostname))
|
||||
logger.warning('Normal shutdown signal for instance {}, removed self from capacity pool.'.format(this_inst.hostname))
|
||||
except Exception:
|
||||
logger.exception('Encountered problem with normal shutdown signal.')
|
||||
logger.exception("Failed to reap waiting jobs for %s", inst.hostname)
|
||||
logger.warning("Normal shutdown processed for instance %s; instance removed from capacity pool.", inst.hostname)
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task_awx(queue=get_task_queuename)
|
||||
def migrate_jsonfield(table, pkfield, columns):
|
||||
batchsize = 10000
|
||||
with advisory_lock(f'json_migration_{table}', wait=False) as acquired:
|
||||
@@ -184,7 +230,7 @@ def migrate_jsonfield(table, pkfield, columns):
|
||||
logger.warning(f"Migration of {table} to jsonb is finished.")
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task_awx(queue=get_task_queuename)
|
||||
def apply_cluster_membership_policies():
|
||||
from awx.main.signals import disable_activity_stream
|
||||
|
||||
@@ -296,7 +342,7 @@ def apply_cluster_membership_policies():
|
||||
logger.debug('Cluster policy computation finished in {} seconds'.format(time.time() - started_compute))
|
||||
|
||||
|
||||
@task(queue='tower_settings_change')
|
||||
@task_awx(queue='tower_settings_change')
|
||||
def clear_setting_cache(setting_keys):
|
||||
# log that cache is being cleared
|
||||
logger.info(f"clear_setting_cache of keys {setting_keys}")
|
||||
@@ -309,7 +355,7 @@ def clear_setting_cache(setting_keys):
|
||||
cache.delete_many(cache_keys)
|
||||
|
||||
|
||||
@task(queue='tower_broadcast_all')
|
||||
@task_awx(queue='tower_broadcast_all')
|
||||
def delete_project_files(project_path):
|
||||
# TODO: possibly implement some retry logic
|
||||
lock_file = project_path + '.lock'
|
||||
@@ -327,7 +373,7 @@ def delete_project_files(project_path):
|
||||
logger.exception('Could not remove lock file {}'.format(lock_file))
|
||||
|
||||
|
||||
@task(queue='tower_broadcast_all')
|
||||
@task_awx(queue='tower_broadcast_all')
|
||||
def profile_sql(threshold=1, minutes=1):
|
||||
if threshold <= 0:
|
||||
cache.delete('awx-profile-sql-threshold')
|
||||
@@ -337,7 +383,7 @@ def profile_sql(threshold=1, minutes=1):
|
||||
logger.error('SQL QUERIES >={}s ENABLED FOR {} MINUTE(S)'.format(threshold, minutes))
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task_awx(queue=get_task_queuename)
|
||||
def send_notifications(notification_list, job_id=None):
|
||||
if not isinstance(notification_list, list):
|
||||
raise TypeError("notification_list should be of type list")
|
||||
@@ -382,13 +428,13 @@ def events_processed_hook(unified_job):
|
||||
save_indirect_host_entries.delay(unified_job.id)
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task_awx(queue=get_task_queuename)
|
||||
def gather_analytics():
|
||||
if is_run_threshold_reached(getattr(settings, 'AUTOMATION_ANALYTICS_LAST_GATHER', None), settings.AUTOMATION_ANALYTICS_GATHER_INTERVAL):
|
||||
analytics.gather()
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task_awx(queue=get_task_queuename)
|
||||
def purge_old_stdout_files():
|
||||
nowtime = time.time()
|
||||
for f in os.listdir(settings.JOBOUTPUT_ROOT):
|
||||
@@ -450,18 +496,18 @@ class CleanupImagesAndFiles:
|
||||
cls.run_remote(this_inst, **kwargs)
|
||||
|
||||
|
||||
@task(queue='tower_broadcast_all')
|
||||
@task_awx(queue='tower_broadcast_all')
|
||||
def handle_removed_image(remove_images=None):
|
||||
"""Special broadcast invocation of this method to handle case of deleted EE"""
|
||||
CleanupImagesAndFiles.run(remove_images=remove_images, file_pattern='')
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task_awx(queue=get_task_queuename)
|
||||
def cleanup_images_and_files():
|
||||
CleanupImagesAndFiles.run(image_prune=True)
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task_awx(queue=get_task_queuename)
|
||||
def cluster_node_health_check(node):
|
||||
"""
|
||||
Used for the health check endpoint, refreshes the status of the instance, but must be ran on target node
|
||||
@@ -480,7 +526,7 @@ def cluster_node_health_check(node):
|
||||
this_inst.local_health_check()
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task_awx(queue=get_task_queuename)
|
||||
def execution_node_health_check(node):
|
||||
if node == '':
|
||||
logger.warning('Remote health check incorrectly called with blank string')
|
||||
@@ -548,8 +594,16 @@ def inspect_established_receptor_connections(mesh_status):
|
||||
def inspect_execution_and_hop_nodes(instance_list):
|
||||
with advisory_lock('inspect_execution_and_hop_nodes_lock', wait=False):
|
||||
node_lookup = {inst.hostname: inst for inst in instance_list}
|
||||
ctl = get_receptor_ctl()
|
||||
mesh_status = ctl.simple_command('status')
|
||||
try:
|
||||
ctl = get_receptor_ctl()
|
||||
except FileNotFoundError:
|
||||
logger.error('Receptor daemon not running, skipping execution node check')
|
||||
return
|
||||
try:
|
||||
mesh_status = ctl.simple_command('status')
|
||||
except ValueError as exc:
|
||||
logger.error(f'Error running receptorctl status command, error: {str(exc)}')
|
||||
return
|
||||
|
||||
inspect_established_receptor_connections(mesh_status)
|
||||
|
||||
@@ -597,8 +651,109 @@ def inspect_execution_and_hop_nodes(instance_list):
|
||||
execution_node_health_check.apply_async([hostname])
|
||||
|
||||
|
||||
@task(queue=get_task_queuename, bind_kwargs=['dispatch_time', 'worker_tasks'])
|
||||
@task_awx(queue=get_task_queuename, bind_kwargs=['dispatch_time', 'worker_tasks'])
|
||||
def cluster_node_heartbeat(dispatch_time=None, worker_tasks=None):
|
||||
"""
|
||||
Original implementation for AWX dispatcher.
|
||||
Uses worker_tasks from bind_kwargs to track running tasks.
|
||||
"""
|
||||
# Run common instance management logic
|
||||
this_inst, instance_list, lost_instances = _heartbeat_instance_management()
|
||||
if this_inst is None:
|
||||
return # Early return case from instance management
|
||||
|
||||
# Check versions
|
||||
_heartbeat_check_versions(this_inst, instance_list)
|
||||
|
||||
# Handle lost instances
|
||||
_heartbeat_handle_lost_instances(lost_instances, this_inst)
|
||||
|
||||
# Run local reaper - original implementation using worker_tasks
|
||||
if worker_tasks is not None:
|
||||
active_task_ids = []
|
||||
for task_list in worker_tasks.values():
|
||||
active_task_ids.extend(task_list)
|
||||
|
||||
# Convert dispatch_time to datetime
|
||||
ref_time = datetime.fromisoformat(dispatch_time) if dispatch_time else now()
|
||||
|
||||
reaper.reap(instance=this_inst, excluded_uuids=active_task_ids, ref_time=ref_time)
|
||||
|
||||
if max(len(task_list) for task_list in worker_tasks.values()) <= 1:
|
||||
reaper.reap_waiting(instance=this_inst, excluded_uuids=active_task_ids, ref_time=ref_time)
|
||||
|
||||
|
||||
@task(queue=get_task_queuename, bind=True)
|
||||
def adispatch_cluster_node_heartbeat(binder):
|
||||
"""
|
||||
Dispatcherd implementation.
|
||||
Uses Control API to get running tasks.
|
||||
"""
|
||||
# Run common instance management logic
|
||||
this_inst, instance_list, lost_instances = _heartbeat_instance_management()
|
||||
if this_inst is None:
|
||||
return # Early return case from instance management
|
||||
|
||||
# Check versions
|
||||
_heartbeat_check_versions(this_inst, instance_list)
|
||||
|
||||
# Handle lost instances
|
||||
_heartbeat_handle_lost_instances(lost_instances, this_inst)
|
||||
|
||||
# Get running tasks using dispatcherd API
|
||||
active_task_ids = _get_active_task_ids_from_dispatcherd(binder)
|
||||
if active_task_ids is None:
|
||||
logger.warning("No active task IDs retrieved from dispatcherd, skipping reaper")
|
||||
return # Failed to get task IDs, don't attempt reaping
|
||||
|
||||
# Run local reaper using tasks from dispatcherd
|
||||
ref_time = now() # No dispatch_time in dispatcherd version
|
||||
logger.debug(f"Running reaper with {len(active_task_ids)} excluded UUIDs")
|
||||
reaper.reap(instance=this_inst, excluded_uuids=active_task_ids, ref_time=ref_time)
|
||||
# If waiting jobs are hanging out, resubmit them
|
||||
if UnifiedJob.objects.filter(controller_node=settings.CLUSTER_HOST_ID, status='waiting').exists():
|
||||
from awx.main.tasks.jobs import dispatch_waiting_jobs
|
||||
|
||||
dispatch_waiting_jobs.apply_async(queue=get_task_queuename())
|
||||
|
||||
|
||||
def _get_active_task_ids_from_dispatcherd(binder):
|
||||
"""
|
||||
Retrieve active task IDs from the dispatcherd control API.
|
||||
|
||||
Returns:
|
||||
list: List of active task UUIDs
|
||||
None: If there was an error retrieving the data
|
||||
"""
|
||||
active_task_ids = []
|
||||
try:
|
||||
|
||||
logger.debug("Querying dispatcherd API for running tasks")
|
||||
data = binder.control('running')
|
||||
|
||||
# Extract UUIDs from the running data
|
||||
# Process running data: first item is a dict with node_id and task entries
|
||||
data.pop('node_id', None)
|
||||
|
||||
# Extract task UUIDs from data structure
|
||||
for task_key, task_value in data.items():
|
||||
if isinstance(task_value, dict) and 'uuid' in task_value:
|
||||
active_task_ids.append(task_value['uuid'])
|
||||
logger.debug(f"Found active task with UUID: {task_value['uuid']}")
|
||||
elif isinstance(task_key, str):
|
||||
# Handle case where UUID might be the key
|
||||
active_task_ids.append(task_key)
|
||||
logger.debug(f"Found active task with key: {task_key}")
|
||||
|
||||
logger.debug(f"Retrieved {len(active_task_ids)} active task IDs from dispatcherd")
|
||||
return active_task_ids
|
||||
except Exception:
|
||||
logger.exception("Failed to get running tasks from dispatcherd")
|
||||
return None
|
||||
|
||||
|
||||
def _heartbeat_instance_management():
|
||||
"""Common logic for heartbeat instance management."""
|
||||
logger.debug("Cluster node heartbeat task.")
|
||||
nowtime = now()
|
||||
instance_list = list(Instance.objects.filter(node_state__in=(Instance.States.READY, Instance.States.UNAVAILABLE, Instance.States.INSTALLED)))
|
||||
@@ -625,7 +780,7 @@ def cluster_node_heartbeat(dispatch_time=None, worker_tasks=None):
|
||||
this_inst.local_health_check()
|
||||
if startup_event and this_inst.capacity != 0:
|
||||
logger.warning(f'Rejoining the cluster as instance {this_inst.hostname}. Prior last_seen {last_last_seen}')
|
||||
return
|
||||
return None, None, None # Early return case
|
||||
elif not last_last_seen:
|
||||
logger.warning(f'Instance does not have recorded last_seen, updating to {nowtime}')
|
||||
elif (nowtime - last_last_seen) > timedelta(seconds=settings.CLUSTER_NODE_HEARTBEAT_PERIOD + 2):
|
||||
@@ -637,8 +792,14 @@ def cluster_node_heartbeat(dispatch_time=None, worker_tasks=None):
|
||||
logger.warning(f'Recreated instance record {this_inst.hostname} after unexpected removal')
|
||||
this_inst.local_health_check()
|
||||
else:
|
||||
raise RuntimeError("Cluster Host Not Found: {}".format(settings.CLUSTER_HOST_ID))
|
||||
# IFF any node has a greater version than we do, then we'll shutdown services
|
||||
logger.error("Cluster Host Not Found: {}".format(settings.CLUSTER_HOST_ID))
|
||||
return None, None, None
|
||||
|
||||
return this_inst, instance_list, lost_instances
|
||||
|
||||
|
||||
def _heartbeat_check_versions(this_inst, instance_list):
|
||||
"""Check versions across instances and determine if shutdown is needed."""
|
||||
for other_inst in instance_list:
|
||||
if other_inst.node_type in ('execution', 'hop'):
|
||||
continue
|
||||
@@ -655,6 +816,9 @@ def cluster_node_heartbeat(dispatch_time=None, worker_tasks=None):
|
||||
stop_local_services(communicate=False)
|
||||
raise RuntimeError("Shutting down.")
|
||||
|
||||
|
||||
def _heartbeat_handle_lost_instances(lost_instances, this_inst):
|
||||
"""Handle lost instances by reaping their jobs and marking them offline."""
|
||||
for other_inst in lost_instances:
|
||||
try:
|
||||
explanation = "Job reaped due to instance shutdown"
|
||||
@@ -685,17 +849,8 @@ def cluster_node_heartbeat(dispatch_time=None, worker_tasks=None):
|
||||
else:
|
||||
logger.exception('No SQL state available. Error marking {} as lost'.format(other_inst.hostname))
|
||||
|
||||
# Run local reaper
|
||||
if worker_tasks is not None:
|
||||
active_task_ids = []
|
||||
for task_list in worker_tasks.values():
|
||||
active_task_ids.extend(task_list)
|
||||
reaper.reap(instance=this_inst, excluded_uuids=active_task_ids, ref_time=datetime.fromisoformat(dispatch_time))
|
||||
if max(len(task_list) for task_list in worker_tasks.values()) <= 1:
|
||||
reaper.reap_waiting(instance=this_inst, excluded_uuids=active_task_ids, ref_time=datetime.fromisoformat(dispatch_time))
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task_awx(queue=get_task_queuename)
|
||||
def awx_receptor_workunit_reaper():
|
||||
"""
|
||||
When an AWX job is launched via receptor, files such as status, stdin, and stdout are created
|
||||
@@ -718,8 +873,16 @@ def awx_receptor_workunit_reaper():
|
||||
if not settings.RECEPTOR_RELEASE_WORK:
|
||||
return
|
||||
logger.debug("Checking for unreleased receptor work units")
|
||||
receptor_ctl = get_receptor_ctl()
|
||||
receptor_work_list = receptor_ctl.simple_command("work list")
|
||||
try:
|
||||
receptor_ctl = get_receptor_ctl()
|
||||
except FileNotFoundError:
|
||||
logger.info('Receptorctl sockfile not found for workunit reaper, doing nothing')
|
||||
return
|
||||
try:
|
||||
receptor_work_list = receptor_ctl.simple_command("work list")
|
||||
except ValueError as exc:
|
||||
logger.info(f'Error getting work list for workunit reaper, error: {str(exc)}')
|
||||
return
|
||||
|
||||
unit_ids = [id for id in receptor_work_list]
|
||||
jobs_with_unreleased_receptor_units = UnifiedJob.objects.filter(work_unit_id__in=unit_ids).exclude(status__in=ACTIVE_STATES)
|
||||
@@ -733,7 +896,7 @@ def awx_receptor_workunit_reaper():
|
||||
administrative_workunit_reaper(receptor_work_list)
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task_awx(queue=get_task_queuename)
|
||||
def awx_k8s_reaper():
|
||||
if not settings.RECEPTOR_RELEASE_WORK:
|
||||
return
|
||||
@@ -756,7 +919,7 @@ def awx_k8s_reaper():
|
||||
logger.exception("Failed to delete orphaned pod {} from {}".format(job.log_format, group))
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task_awx(queue=get_task_queuename)
|
||||
def awx_periodic_scheduler():
|
||||
lock_session_timeout_milliseconds = settings.TASK_MANAGER_LOCK_TIMEOUT * 1000
|
||||
with advisory_lock('awx_periodic_scheduler_lock', lock_session_timeout_milliseconds=lock_session_timeout_milliseconds, wait=False) as acquired:
|
||||
@@ -815,7 +978,7 @@ def awx_periodic_scheduler():
|
||||
emit_channel_notification('schedules-changed', dict(id=schedule.id, group_name="schedules"))
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task_awx(queue=get_task_queuename)
|
||||
def handle_failure_notifications(task_ids):
|
||||
"""A task-ified version of the method that sends notifications."""
|
||||
found_task_ids = set()
|
||||
@@ -830,7 +993,7 @@ def handle_failure_notifications(task_ids):
|
||||
logger.warning(f'Could not send notifications for {deleted_tasks} because they were not found in the database')
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task_awx(queue=get_task_queuename)
|
||||
def update_inventory_computed_fields(inventory_id):
|
||||
"""
|
||||
Signal handler and wrapper around inventory.update_computed_fields to
|
||||
@@ -880,7 +1043,7 @@ def update_smart_memberships_for_inventory(smart_inventory):
|
||||
return False
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task_awx(queue=get_task_queuename)
|
||||
def update_host_smart_inventory_memberships():
|
||||
smart_inventories = Inventory.objects.filter(kind='smart', host_filter__isnull=False, pending_deletion=False)
|
||||
changed_inventories = set([])
|
||||
@@ -896,7 +1059,7 @@ def update_host_smart_inventory_memberships():
|
||||
smart_inventory.update_computed_fields()
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task_awx(queue=get_task_queuename)
|
||||
def delete_inventory(inventory_id, user_id, retries=5):
|
||||
# Delete inventory as user
|
||||
if user_id is None:
|
||||
@@ -958,7 +1121,7 @@ def _reconstruct_relationships(copy_mapping):
|
||||
new_obj.save()
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task_awx(queue=get_task_queuename)
|
||||
def deep_copy_model_obj(model_module, model_name, obj_pk, new_obj_pk, user_pk, permission_check_func=None):
|
||||
logger.debug('Deep copy {} from {} to {}.'.format(model_name, obj_pk, new_obj_pk))
|
||||
|
||||
@@ -1013,7 +1176,7 @@ def deep_copy_model_obj(model_module, model_name, obj_pk, new_obj_pk, user_pk, p
|
||||
update_inventory_computed_fields.delay(new_obj.id)
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@task_awx(queue=get_task_queuename)
|
||||
def periodic_resource_sync():
|
||||
if not getattr(settings, 'RESOURCE_SERVER', None):
|
||||
logger.debug("Skipping periodic resource_sync, RESOURCE_SERVER not configured")
|
||||
|
||||
@@ -8,5 +8,12 @@
|
||||
"CONTROLLER_PASSWORD": "fooo",
|
||||
"CONTROLLER_USERNAME": "fooo",
|
||||
"CONTROLLER_OAUTH_TOKEN": "",
|
||||
"CONTROLLER_VERIFY_SSL": "False"
|
||||
"CONTROLLER_VERIFY_SSL": "False",
|
||||
"AAP_HOSTNAME": "https://foo.invalid",
|
||||
"AAP_PASSWORD": "fooo",
|
||||
"AAP_USERNAME": "fooo",
|
||||
"AAP_VALIDATE_CERTS": "False",
|
||||
"CONTROLLER_REQUEST_TIMEOUT": "fooo",
|
||||
"AAP_REQUEST_TIMEOUT": "fooo",
|
||||
"AAP_TOKEN": ""
|
||||
}
|
||||
|
||||
9
awx/main/tests/data/projects/debug/sleep.yml
Normal file
9
awx/main/tests/data/projects/debug/sleep.yml
Normal file
@@ -0,0 +1,9 @@
|
||||
---
|
||||
- hosts: all
|
||||
gather_facts: false
|
||||
connection: local
|
||||
vars:
|
||||
sleep_interval: 5
|
||||
tasks:
|
||||
- name: sleep for a specified interval
|
||||
command: sleep '{{ sleep_interval }}'
|
||||
7
awx/main/tests/data/projects/facts/clear.yml
Normal file
7
awx/main/tests/data/projects/facts/clear.yml
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
|
||||
- hosts: all
|
||||
gather_facts: false
|
||||
connection: local
|
||||
tasks:
|
||||
- meta: clear_facts
|
||||
17
awx/main/tests/data/projects/facts/gather.yml
Normal file
17
awx/main/tests/data/projects/facts/gather.yml
Normal file
@@ -0,0 +1,17 @@
|
||||
---
|
||||
|
||||
- hosts: all
|
||||
vars:
|
||||
extra_value: ""
|
||||
gather_facts: false
|
||||
connection: local
|
||||
tasks:
|
||||
- name: set a custom fact
|
||||
set_fact:
|
||||
foo: "bar{{ extra_value }}"
|
||||
bar:
|
||||
a:
|
||||
b:
|
||||
- "c"
|
||||
- "d"
|
||||
cacheable: true
|
||||
9
awx/main/tests/data/projects/facts/no_op.yml
Normal file
9
awx/main/tests/data/projects/facts/no_op.yml
Normal file
@@ -0,0 +1,9 @@
|
||||
---
|
||||
|
||||
- hosts: all
|
||||
gather_facts: false
|
||||
connection: local
|
||||
vars:
|
||||
msg: 'hello'
|
||||
tasks:
|
||||
- debug: var=msg
|
||||
@@ -0,0 +1,3 @@
|
||||
[all:vars]
|
||||
a=value_a
|
||||
b=value_b
|
||||
57
awx/main/tests/data/sleep_task.py
Normal file
57
awx/main/tests/data/sleep_task.py
Normal file
@@ -0,0 +1,57 @@
|
||||
import time
|
||||
import logging
|
||||
|
||||
from dispatcherd.publish import task
|
||||
|
||||
from django.db import connection
|
||||
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
from awx.main.dispatch.publish import task as old_task
|
||||
|
||||
from ansible_base.lib.utils.db import advisory_lock
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@old_task(queue=get_task_queuename)
|
||||
def sleep_task(seconds=10, log=False):
|
||||
if log:
|
||||
logger.info('starting sleep_task')
|
||||
time.sleep(seconds)
|
||||
if log:
|
||||
logger.info('finished sleep_task')
|
||||
|
||||
|
||||
@task()
|
||||
def sleep_break_connection(seconds=0.2):
|
||||
"""
|
||||
Interact with the database in an intentionally breaking way.
|
||||
After this finishes, queries made by this connection are expected to error
|
||||
with "the connection is closed"
|
||||
This is obviously a problem for any task that comes afterwards.
|
||||
So this is used to break things so that the fixes may be demonstrated.
|
||||
"""
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute(f"SET idle_session_timeout = '{seconds / 2}s';")
|
||||
|
||||
logger.info(f'sleeping for {seconds}s > {seconds / 2}s session timeout')
|
||||
time.sleep(seconds)
|
||||
|
||||
for i in range(1, 3):
|
||||
logger.info(f'\nRunning query number {i}')
|
||||
try:
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute("SELECT 1;")
|
||||
logger.info(' query worked, not expected')
|
||||
except Exception as exc:
|
||||
logger.info(f' query errored as expected\ntype: {type(exc)}\nstr: {str(exc)}')
|
||||
|
||||
logger.info(f'Connection present: {bool(connection.connection)}, reports closed: {getattr(connection.connection, "closed", "not_found")}')
|
||||
|
||||
|
||||
@task()
|
||||
def advisory_lock_exception():
|
||||
time.sleep(0.2) # so it can fill up all the workers... hacky for now
|
||||
with advisory_lock('advisory_lock_exception', lock_session_timeout_milliseconds=20):
|
||||
raise RuntimeError('this is an intentional error')
|
||||
@@ -87,8 +87,8 @@ def mock_analytic_post():
|
||||
{
|
||||
'REDHAT_USERNAME': 'redhat_user',
|
||||
'REDHAT_PASSWORD': 'redhat_pass', # NOSONAR
|
||||
'SUBSCRIPTIONS_USERNAME': '',
|
||||
'SUBSCRIPTIONS_PASSWORD': '',
|
||||
'SUBSCRIPTIONS_CLIENT_ID': '',
|
||||
'SUBSCRIPTIONS_CLIENT_SECRET': '',
|
||||
},
|
||||
True,
|
||||
('redhat_user', 'redhat_pass'),
|
||||
@@ -98,8 +98,8 @@ def mock_analytic_post():
|
||||
{
|
||||
'REDHAT_USERNAME': None,
|
||||
'REDHAT_PASSWORD': None,
|
||||
'SUBSCRIPTIONS_USERNAME': 'subs_user',
|
||||
'SUBSCRIPTIONS_PASSWORD': 'subs_pass', # NOSONAR
|
||||
'SUBSCRIPTIONS_CLIENT_ID': 'subs_user',
|
||||
'SUBSCRIPTIONS_CLIENT_SECRET': 'subs_pass', # NOSONAR
|
||||
},
|
||||
True,
|
||||
('subs_user', 'subs_pass'),
|
||||
@@ -109,8 +109,8 @@ def mock_analytic_post():
|
||||
{
|
||||
'REDHAT_USERNAME': '',
|
||||
'REDHAT_PASSWORD': '',
|
||||
'SUBSCRIPTIONS_USERNAME': 'subs_user',
|
||||
'SUBSCRIPTIONS_PASSWORD': 'subs_pass', # NOSONAR
|
||||
'SUBSCRIPTIONS_CLIENT_ID': 'subs_user',
|
||||
'SUBSCRIPTIONS_CLIENT_SECRET': 'subs_pass', # NOSONAR
|
||||
},
|
||||
True,
|
||||
('subs_user', 'subs_pass'),
|
||||
@@ -120,8 +120,8 @@ def mock_analytic_post():
|
||||
{
|
||||
'REDHAT_USERNAME': '',
|
||||
'REDHAT_PASSWORD': '',
|
||||
'SUBSCRIPTIONS_USERNAME': '',
|
||||
'SUBSCRIPTIONS_PASSWORD': '',
|
||||
'SUBSCRIPTIONS_CLIENT_ID': '',
|
||||
'SUBSCRIPTIONS_CLIENT_SECRET': '',
|
||||
},
|
||||
False,
|
||||
None, # No request should be made
|
||||
@@ -131,8 +131,8 @@ def mock_analytic_post():
|
||||
{
|
||||
'REDHAT_USERNAME': '',
|
||||
'REDHAT_PASSWORD': 'redhat_pass', # NOSONAR
|
||||
'SUBSCRIPTIONS_USERNAME': 'subs_user',
|
||||
'SUBSCRIPTIONS_PASSWORD': '',
|
||||
'SUBSCRIPTIONS_CLIENT_ID': 'subs_user',
|
||||
'SUBSCRIPTIONS_CLIENT_SECRET': '',
|
||||
},
|
||||
False,
|
||||
None, # Invalid, no request should be made
|
||||
@@ -150,3 +150,24 @@ def test_ship_credential(setting_map, expected_result, expected_auth, temp_analy
|
||||
assert mock_analytic_post.call_args[1]['auth'] == expected_auth
|
||||
else:
|
||||
mock_analytic_post.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_gather_cleanup_on_auth_failure(mock_valid_license, temp_analytic_tar):
|
||||
settings.INSIGHTS_TRACKING_STATE = True
|
||||
settings.AUTOMATION_ANALYTICS_URL = 'https://example.com/api'
|
||||
settings.REDHAT_USERNAME = 'test_user'
|
||||
settings.REDHAT_PASSWORD = 'test_password'
|
||||
|
||||
with tempfile.NamedTemporaryFile(delete=False, suffix='.tar.gz') as temp_file:
|
||||
temp_file_path = temp_file.name
|
||||
|
||||
try:
|
||||
with mock.patch('awx.main.analytics.core.ship', return_value=False):
|
||||
with mock.patch('awx.main.analytics.core.package', return_value=temp_file_path):
|
||||
gather(module=importlib.import_module(__name__), collection_type='scheduled')
|
||||
|
||||
assert not os.path.exists(temp_file_path), "Temp file was not cleaned up after ship failure"
|
||||
finally:
|
||||
if os.path.exists(temp_file_path):
|
||||
os.remove(temp_file_path)
|
||||
|
||||
@@ -30,6 +30,7 @@ EXPECTED_VALUES = {
|
||||
'awx_license_instance_free': 0,
|
||||
'awx_pending_jobs_total': 0,
|
||||
'awx_database_connections_total': 1,
|
||||
'awx_license_expiry': 0,
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -97,8 +97,8 @@ class TestAnalyticsGenericView:
|
||||
'INSIGHTS_TRACKING_STATE': True,
|
||||
'REDHAT_USERNAME': 'redhat_user',
|
||||
'REDHAT_PASSWORD': 'redhat_pass', # NOSONAR
|
||||
'SUBSCRIPTIONS_USERNAME': '',
|
||||
'SUBSCRIPTIONS_PASSWORD': '',
|
||||
'SUBSCRIPTIONS_CLIENT_ID': '',
|
||||
'SUBSCRIPTIONS_CLIENT_SECRET': '',
|
||||
},
|
||||
('redhat_user', 'redhat_pass'),
|
||||
None,
|
||||
@@ -109,8 +109,8 @@ class TestAnalyticsGenericView:
|
||||
'INSIGHTS_TRACKING_STATE': True,
|
||||
'REDHAT_USERNAME': '',
|
||||
'REDHAT_PASSWORD': '',
|
||||
'SUBSCRIPTIONS_USERNAME': 'subs_user',
|
||||
'SUBSCRIPTIONS_PASSWORD': 'subs_pass', # NOSONAR
|
||||
'SUBSCRIPTIONS_CLIENT_ID': 'subs_user',
|
||||
'SUBSCRIPTIONS_CLIENT_SECRET': 'subs_pass', # NOSONAR
|
||||
},
|
||||
('subs_user', 'subs_pass'),
|
||||
None,
|
||||
@@ -121,8 +121,8 @@ class TestAnalyticsGenericView:
|
||||
'INSIGHTS_TRACKING_STATE': True,
|
||||
'REDHAT_USERNAME': '',
|
||||
'REDHAT_PASSWORD': '',
|
||||
'SUBSCRIPTIONS_USERNAME': '',
|
||||
'SUBSCRIPTIONS_PASSWORD': '',
|
||||
'SUBSCRIPTIONS_CLIENT_ID': '',
|
||||
'SUBSCRIPTIONS_CLIENT_SECRET': '',
|
||||
},
|
||||
None,
|
||||
ERROR_MISSING_USER,
|
||||
@@ -133,8 +133,8 @@ class TestAnalyticsGenericView:
|
||||
'INSIGHTS_TRACKING_STATE': True,
|
||||
'REDHAT_USERNAME': 'redhat_user',
|
||||
'REDHAT_PASSWORD': 'redhat_pass', # NOSONAR
|
||||
'SUBSCRIPTIONS_USERNAME': 'subs_user',
|
||||
'SUBSCRIPTIONS_PASSWORD': 'subs_pass', # NOSONAR
|
||||
'SUBSCRIPTIONS_CLIENT_ID': 'subs_user',
|
||||
'SUBSCRIPTIONS_CLIENT_SECRET': 'subs_pass', # NOSONAR
|
||||
},
|
||||
('redhat_user', 'redhat_pass'),
|
||||
None,
|
||||
@@ -145,8 +145,8 @@ class TestAnalyticsGenericView:
|
||||
'INSIGHTS_TRACKING_STATE': True,
|
||||
'REDHAT_USERNAME': '',
|
||||
'REDHAT_PASSWORD': '',
|
||||
'SUBSCRIPTIONS_USERNAME': 'subs_user', # NOSONAR
|
||||
'SUBSCRIPTIONS_PASSWORD': '',
|
||||
'SUBSCRIPTIONS_CLIENT_ID': 'subs_user', # NOSONAR
|
||||
'SUBSCRIPTIONS_CLIENT_SECRET': '',
|
||||
},
|
||||
None,
|
||||
ERROR_MISSING_PASSWORD,
|
||||
@@ -155,26 +155,36 @@ class TestAnalyticsGenericView:
|
||||
)
|
||||
@pytest.mark.django_db
|
||||
def test__send_to_analytics_credentials(self, settings_map, expected_auth, expected_error_keyword):
|
||||
"""
|
||||
Test _send_to_analytics with various combinations of credentials.
|
||||
"""
|
||||
with override_settings(**settings_map):
|
||||
request = RequestFactory().post('/some/path')
|
||||
view = AnalyticsGenericView()
|
||||
|
||||
if expected_auth:
|
||||
with mock.patch('requests.request') as mock_request:
|
||||
mock_request.return_value = mock.Mock(status_code=200)
|
||||
with mock.patch('awx.api.views.analytics.OIDCClient') as mock_oidc_client:
|
||||
# Configure the mock OIDCClient instance and its make_request method
|
||||
mock_client_instance = mock.Mock()
|
||||
mock_oidc_client.return_value = mock_client_instance
|
||||
mock_client_instance.make_request.return_value = mock.Mock(status_code=200)
|
||||
|
||||
analytic_url = view._get_analytics_url(request.path)
|
||||
response = view._send_to_analytics(request, 'POST')
|
||||
|
||||
# Assertions
|
||||
mock_request.assert_called_once_with(
|
||||
# Assert OIDCClient instantiation
|
||||
expected_client_id, expected_client_secret = expected_auth
|
||||
mock_oidc_client.assert_called_once_with(expected_client_id, expected_client_secret)
|
||||
|
||||
# Assert make_request call
|
||||
mock_client_instance.make_request.assert_called_once_with(
|
||||
'POST',
|
||||
analytic_url,
|
||||
auth=expected_auth,
|
||||
verify=mock.ANY,
|
||||
headers=mock.ANY,
|
||||
json=mock.ANY,
|
||||
verify=mock.ANY,
|
||||
params=mock.ANY,
|
||||
json=mock.ANY,
|
||||
timeout=mock.ANY,
|
||||
)
|
||||
assert response.status_code == 200
|
||||
@@ -186,3 +196,64 @@ class TestAnalyticsGenericView:
|
||||
# mock_error_response.assert_called_once_with(expected_error_keyword, remote=False)
|
||||
assert response.status_code == status.HTTP_403_FORBIDDEN
|
||||
assert response.data['error']['keyword'] == expected_error_keyword
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize(
|
||||
"settings_map, expected_auth",
|
||||
[
|
||||
# Test case 1: Username and password should be used for basic auth
|
||||
(
|
||||
{
|
||||
'INSIGHTS_TRACKING_STATE': True,
|
||||
'REDHAT_USERNAME': 'redhat_user',
|
||||
'REDHAT_PASSWORD': 'redhat_pass', # NOSONAR
|
||||
'SUBSCRIPTIONS_CLIENT_ID': '',
|
||||
'SUBSCRIPTIONS_CLIENT_SECRET': '',
|
||||
},
|
||||
('redhat_user', 'redhat_pass'),
|
||||
),
|
||||
# Test case 2: Client ID and secret should be used for basic auth
|
||||
(
|
||||
{
|
||||
'INSIGHTS_TRACKING_STATE': True,
|
||||
'REDHAT_USERNAME': '',
|
||||
'REDHAT_PASSWORD': '',
|
||||
'SUBSCRIPTIONS_CLIENT_ID': 'subs_user',
|
||||
'SUBSCRIPTIONS_CLIENT_SECRET': 'subs_pass', # NOSONAR
|
||||
},
|
||||
None,
|
||||
),
|
||||
],
|
||||
)
|
||||
def test__send_to_analytics_fallback_to_basic_auth(self, settings_map, expected_auth):
|
||||
"""
|
||||
Test _send_to_analytics with basic auth fallback.
|
||||
"""
|
||||
with override_settings(**settings_map):
|
||||
request = RequestFactory().post('/some/path')
|
||||
view = AnalyticsGenericView()
|
||||
|
||||
with mock.patch('awx.api.views.analytics.OIDCClient') as mock_oidc_client, mock.patch(
|
||||
'awx.api.views.analytics.AnalyticsGenericView._base_auth_request'
|
||||
) as mock_base_auth_request:
|
||||
# Configure the mock OIDCClient instance and its make_request method
|
||||
mock_client_instance = mock.Mock()
|
||||
mock_oidc_client.return_value = mock_client_instance
|
||||
mock_client_instance.make_request.side_effect = requests.RequestException("Incorrect credentials")
|
||||
|
||||
analytic_url = view._get_analytics_url(request.path)
|
||||
view._send_to_analytics(request, 'POST')
|
||||
|
||||
if expected_auth:
|
||||
# assert mock_base_auth_request called with expected_auth
|
||||
mock_base_auth_request.assert_called_once_with(
|
||||
request,
|
||||
'POST',
|
||||
analytic_url,
|
||||
expected_auth[0],
|
||||
expected_auth[1],
|
||||
mock.ANY,
|
||||
)
|
||||
else:
|
||||
# assert mock_base_auth_request not called
|
||||
mock_base_auth_request.assert_not_called()
|
||||
|
||||
@@ -1,64 +0,0 @@
|
||||
import pytest
|
||||
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.models import Organization
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestImmutableSharedFields:
|
||||
@pytest.fixture(autouse=True)
|
||||
def configure_settings(self, settings):
|
||||
settings.ALLOW_LOCAL_RESOURCE_MANAGEMENT = False
|
||||
|
||||
def test_create_raises_permission_denied(self, admin_user, post):
|
||||
orgA = Organization.objects.create(name='orgA')
|
||||
resp = post(
|
||||
url=reverse('api:team_list'),
|
||||
data={'name': 'teamA', 'organization': orgA.id},
|
||||
user=admin_user,
|
||||
expect=403,
|
||||
)
|
||||
assert "Creation of this resource is not allowed" in resp.data['detail']
|
||||
|
||||
def test_perform_delete_raises_permission_denied(self, admin_user, delete):
|
||||
orgA = Organization.objects.create(name='orgA')
|
||||
team = orgA.teams.create(name='teamA')
|
||||
resp = delete(
|
||||
url=reverse('api:team_detail', kwargs={'pk': team.id}),
|
||||
user=admin_user,
|
||||
expect=403,
|
||||
)
|
||||
assert "Deletion of this resource is not allowed" in resp.data['detail']
|
||||
|
||||
def test_perform_update(self, admin_user, patch):
|
||||
orgA = Organization.objects.create(name='orgA')
|
||||
# allow patching non-shared fields
|
||||
patch(
|
||||
url=reverse('api:organization_detail', kwargs={'pk': orgA.id}),
|
||||
data={"max_hosts": 76},
|
||||
user=admin_user,
|
||||
expect=200,
|
||||
)
|
||||
# prevent patching shared fields
|
||||
resp = patch(url=reverse('api:organization_detail', kwargs={'pk': orgA.id}), data={"name": "orgB"}, user=admin_user, expect=403)
|
||||
assert "Cannot change shared field" in resp.data['name']
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'role',
|
||||
['admin_role', 'member_role'],
|
||||
)
|
||||
@pytest.mark.parametrize('resource', ['organization', 'team'])
|
||||
def test_prevent_assigning_member_to_organization_or_team(self, admin_user, post, resource, role):
|
||||
orgA = Organization.objects.create(name='orgA')
|
||||
if resource == 'organization':
|
||||
role = getattr(orgA, role)
|
||||
elif resource == 'team':
|
||||
teamA = orgA.teams.create(name='teamA')
|
||||
role = getattr(teamA, role)
|
||||
resp = post(
|
||||
url=reverse('api:user_roles_list', kwargs={'pk': admin_user.id}),
|
||||
data={'id': role.id},
|
||||
user=admin_user,
|
||||
expect=403,
|
||||
)
|
||||
assert f"Cannot directly modify user membership to {resource}." in resp.data['msg']
|
||||
@@ -8,6 +8,7 @@ from django.core.exceptions import ValidationError
|
||||
from awx.api.versioning import reverse
|
||||
|
||||
from awx.main.models import InventorySource, Inventory, ActivityStream
|
||||
from awx.main.utils.inventory_vars import update_group_variables
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -690,3 +691,241 @@ class TestConstructedInventory:
|
||||
assert inv_r.data['url'] != const_r.data['url']
|
||||
assert inv_r.data['related']['constructed_url'] == url_const
|
||||
assert const_r.data['related']['constructed_url'] == url_const
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestInventoryAllVariables:
|
||||
|
||||
@staticmethod
|
||||
def simulate_update_from_source(inv_src, variables_dict, overwrite_vars=True):
|
||||
"""
|
||||
Update `inventory` with variables `variables_dict` from source
|
||||
`inv_src`.
|
||||
"""
|
||||
# Perform an update from source the same way it is done in
|
||||
# `inventory_import.Command._update_inventory`.
|
||||
new_vars = update_group_variables(
|
||||
group_id=None, # `None` denotes the 'all' group (which doesn't have a pk).
|
||||
newvars=variables_dict,
|
||||
dbvars=inv_src.inventory.variables_dict,
|
||||
invsrc_id=inv_src.id,
|
||||
inventory_id=inv_src.inventory.id,
|
||||
overwrite_vars=overwrite_vars,
|
||||
)
|
||||
inv_src.inventory.variables = json.dumps(new_vars)
|
||||
inv_src.inventory.save(update_fields=["variables"])
|
||||
return new_vars
|
||||
|
||||
def update_and_verify(self, inv_src, new_vars, expect=None, overwrite_vars=True, teststep=None):
|
||||
"""
|
||||
Helper: Update from source and verify the new inventory variables.
|
||||
|
||||
:param inv_src: An inventory source object with its inventory property
|
||||
set to the inventory fixture of the called.
|
||||
:param dict new_vars: The variables of the inventory source `inv_src`.
|
||||
:param dict expect: (optional) The expected variables state of the
|
||||
inventory after the update. If not set or None, expect `new_vars`.
|
||||
:param bool overwrite_vars: The status of the inventory source option
|
||||
'overwrite variables'. Default is `True`.
|
||||
:raise AssertionError: If the inventory does not contain the expected
|
||||
variables after the update.
|
||||
"""
|
||||
self.simulate_update_from_source(inv_src, new_vars, overwrite_vars=overwrite_vars)
|
||||
if teststep is not None:
|
||||
assert inv_src.inventory.variables_dict == (expect if expect is not None else new_vars), f"Test step {teststep}"
|
||||
else:
|
||||
assert inv_src.inventory.variables_dict == (expect if expect is not None else new_vars)
|
||||
|
||||
def test_set_variables_through_inventory_details_update(self, inventory, patch, admin_user):
|
||||
"""
|
||||
Set an inventory variable by changing the inventory details, simulating
|
||||
a user edit.
|
||||
"""
|
||||
# a: x
|
||||
patch(url=reverse('api:inventory_detail', kwargs={'pk': inventory.pk}), data={'variables': 'a: x'}, user=admin_user, expect=200)
|
||||
inventory.refresh_from_db()
|
||||
assert inventory.variables_dict == {"a": "x"}
|
||||
|
||||
def test_variables_set_by_user_persist_update_from_src(self, inventory, inventory_source, patch, admin_user):
|
||||
"""
|
||||
Verify the special behavior that a variable which originates from a user
|
||||
edit (instead of a source update), is not removed from the inventory
|
||||
when a source update with overwrite_vars=True does not contain that
|
||||
variable. This behavior is considered special because a variable which
|
||||
originates from a source would actually be deleted.
|
||||
|
||||
In addition, verify that an existing variable which was set by a user
|
||||
edit can be overwritten by a source update.
|
||||
"""
|
||||
# Set two variables via user edit.
|
||||
patch(
|
||||
url=reverse('api:inventory_detail', kwargs={'pk': inventory.pk}),
|
||||
data={'variables': '{"a": "a_from_user", "b": "b_from_user"}'},
|
||||
user=admin_user,
|
||||
expect=200,
|
||||
)
|
||||
inventory.refresh_from_db()
|
||||
assert inventory.variables_dict == {'a': 'a_from_user', 'b': 'b_from_user'}
|
||||
# Update from a source which contains only one of the two variables from
|
||||
# the previous update.
|
||||
self.simulate_update_from_source(inventory_source, {'a': 'a_from_source'})
|
||||
# Verify inventory variables.
|
||||
assert inventory.variables_dict == {'a': 'a_from_source', 'b': 'b_from_user'}
|
||||
|
||||
def test_variables_set_through_src_get_removed_on_update_from_same_src(self, inventory, inventory_source, patch, admin_user):
|
||||
"""
|
||||
Verify that a variable which originates from a source update, is removed
|
||||
from the inventory when a source update with overwrite_vars=True does
|
||||
not contain that variable.
|
||||
|
||||
In addition, verify that an existing variable which was set by a user
|
||||
edit can be overwritten by a source update.
|
||||
"""
|
||||
# Set two variables via update from source.
|
||||
self.simulate_update_from_source(inventory_source, {'a': 'a_from_source', 'b': 'b_from_source'})
|
||||
# Verify inventory variables.
|
||||
assert inventory.variables_dict == {'a': 'a_from_source', 'b': 'b_from_source'}
|
||||
# Update from the same source which now contains only one of the two
|
||||
# variables from the previous update.
|
||||
self.simulate_update_from_source(inventory_source, {'b': 'b_from_source'})
|
||||
# Verify the variable has been deleted from the inventory.
|
||||
assert inventory.variables_dict == {'b': 'b_from_source'}
|
||||
|
||||
def test_overwrite_variables_through_inventory_details_update(self, inventory, patch, admin_user):
|
||||
"""
|
||||
Set and update the inventory variables multiple times by changing the
|
||||
inventory details via api, simulating user edits.
|
||||
|
||||
Any variables update by means of an inventory details update shall
|
||||
overwright all existing inventory variables.
|
||||
"""
|
||||
# a: x
|
||||
patch(url=reverse('api:inventory_detail', kwargs={'pk': inventory.pk}), data={'variables': 'a: x'}, user=admin_user, expect=200)
|
||||
inventory.refresh_from_db()
|
||||
assert inventory.variables_dict == {"a": "x"}
|
||||
# a: x2
|
||||
patch(url=reverse('api:inventory_detail', kwargs={'pk': inventory.pk}), data={'variables': 'a: x2'}, user=admin_user, expect=200)
|
||||
inventory.refresh_from_db()
|
||||
assert inventory.variables_dict == {"a": "x2"}
|
||||
# b: y
|
||||
patch(url=reverse('api:inventory_detail', kwargs={'pk': inventory.pk}), data={'variables': 'b: y'}, user=admin_user, expect=200)
|
||||
inventory.refresh_from_db()
|
||||
assert inventory.variables_dict == {"b": "y"}
|
||||
|
||||
def test_inventory_group_variables_internal_data(self, inventory, patch, admin_user):
|
||||
"""
|
||||
Basic verification of how variable updates are stored internally.
|
||||
|
||||
.. Warning::
|
||||
|
||||
This test verifies a specific implementation of the inventory
|
||||
variables update business logic. It may deliver false negatives if
|
||||
the implementation changes.
|
||||
"""
|
||||
# x: a
|
||||
patch(url=reverse('api:inventory_detail', kwargs={'pk': inventory.pk}), data={'variables': 'a: x'}, user=admin_user, expect=200)
|
||||
igv = inventory.inventory_group_variables.first()
|
||||
assert igv.variables == {'a': [[-1, 'x']]}
|
||||
# b: y
|
||||
patch(url=reverse('api:inventory_detail', kwargs={'pk': inventory.pk}), data={'variables': 'b: y'}, user=admin_user, expect=200)
|
||||
igv = inventory.inventory_group_variables.first()
|
||||
assert igv.variables == {'b': [[-1, 'y']]}
|
||||
|
||||
def test_update_then_user_change(self, inventory, patch, admin_user, inventory_source):
|
||||
"""
|
||||
1. Update inventory vars by means of an inventory source update.
|
||||
2. Update inventory vars by editing the inventory details (aka a 'user
|
||||
update'), thereby changing variables values and deleting variables
|
||||
from the inventory.
|
||||
|
||||
.. Warning::
|
||||
|
||||
This test partly relies on a specific implementation of the
|
||||
inventory variables update business logic. It may deliver false
|
||||
negatives if the implementation changes.
|
||||
"""
|
||||
assert inventory_source.inventory_id == inventory.pk # sanity
|
||||
# ---- Test step 1: Set variables by updating from an inventory source.
|
||||
self.simulate_update_from_source(inventory_source, {'foo': 'foo_from_source', 'bar': 'bar_from_source'})
|
||||
# Verify inventory variables.
|
||||
assert inventory.variables_dict == {'foo': 'foo_from_source', 'bar': 'bar_from_source'}
|
||||
# Verify internal storage of variables data. Note that this is
|
||||
# implementation specific
|
||||
assert inventory.inventory_group_variables.count() == 1
|
||||
igv = inventory.inventory_group_variables.first()
|
||||
assert igv.variables == {'foo': [[inventory_source.id, 'foo_from_source']], 'bar': [[inventory_source.id, 'bar_from_source']]}
|
||||
# ---- Test step 2: Change the variables by editing the inventory details.
|
||||
patch(url=reverse('api:inventory_detail', kwargs={'pk': inventory.pk}), data={'variables': 'foo: foo_from_user'}, user=admin_user, expect=200)
|
||||
inventory.refresh_from_db()
|
||||
# Verify that variable `foo` contains the new value, and that variable
|
||||
# `bar` has been deleted from the inventory.
|
||||
assert inventory.variables_dict == {"foo": "foo_from_user"}
|
||||
# Verify internal storage of variables data. Note that this is
|
||||
# implementation specific
|
||||
inventory.inventory_group_variables.count() == 1
|
||||
igv = inventory.inventory_group_variables.first()
|
||||
assert igv.variables == {'foo': [[-1, 'foo_from_user']]}
|
||||
|
||||
def test_monotonic_deletions(self, inventory, patch, admin_user):
|
||||
"""
|
||||
Verify the variables history logic for monotonic deletions.
|
||||
|
||||
Monotonic in this context means that the variables are deleted in the
|
||||
reverse order of their creation.
|
||||
|
||||
1. Set inventory variable x: 0, expect INV={x: 0}
|
||||
|
||||
(The following steps use overwrite_variables=False)
|
||||
|
||||
2. Update from source A={x: 1}, expect INV={x: 1}
|
||||
3. Update from source B={x: 2}, expect INV={x: 2}
|
||||
4. Update from source B={}, expect INV={x: 1}
|
||||
5. Update from source A={}, expect INV={x: 0}
|
||||
"""
|
||||
inv_src_a = InventorySource.objects.create(name="inv-src-A", inventory=inventory, source="ec2")
|
||||
inv_src_b = InventorySource.objects.create(name="inv-src-B", inventory=inventory, source="ec2")
|
||||
# Test step 1:
|
||||
patch(url=reverse('api:inventory_detail', kwargs={'pk': inventory.pk}), data={'variables': 'x: 0'}, user=admin_user, expect=200)
|
||||
inventory.refresh_from_db()
|
||||
assert inventory.variables_dict == {"x": 0}
|
||||
# Test step 2: Source A overwrites value of var x
|
||||
self.update_and_verify(inv_src_a, {"x": 1}, teststep=2)
|
||||
# Test step 3: Source A overwrites value of var x
|
||||
self.update_and_verify(inv_src_b, {"x": 2}, teststep=3)
|
||||
# Test step 4: Value of var x from source A reappears
|
||||
self.update_and_verify(inv_src_b, {}, expect={"x": 1}, teststep=4)
|
||||
# Test step 5: Value of var x from initial user edit reappears
|
||||
self.update_and_verify(inv_src_a, {}, expect={"x": 0}, teststep=5)
|
||||
|
||||
def test_interleaved_deletions(self, inventory, patch, admin_user, inventory_source):
|
||||
"""
|
||||
Verify the variables history logic for interleaved deletions.
|
||||
|
||||
Interleaved in this context means that the variables are deleted in a
|
||||
different order than the sequence of their creation.
|
||||
|
||||
1. Set inventory variable x: 0, expect INV={x: 0}
|
||||
2. Update from source A={x: 1}, expect INV={x: 1}
|
||||
3. Update from source B={x: 2}, expect INV={x: 2}
|
||||
4. Update from source C={x: 3}, expect INV={x: 3}
|
||||
5. Update from source B={}, expect INV={x: 3}
|
||||
6. Update from source C={}, expect INV={x: 1}
|
||||
"""
|
||||
inv_src_a = InventorySource.objects.create(name="inv-src-A", inventory=inventory, source="ec2")
|
||||
inv_src_b = InventorySource.objects.create(name="inv-src-B", inventory=inventory, source="ec2")
|
||||
inv_src_c = InventorySource.objects.create(name="inv-src-C", inventory=inventory, source="ec2")
|
||||
# Test step 1. Set inventory variable x: 0
|
||||
patch(url=reverse('api:inventory_detail', kwargs={'pk': inventory.pk}), data={'variables': 'x: 0'}, user=admin_user, expect=200)
|
||||
inventory.refresh_from_db()
|
||||
assert inventory.variables_dict == {"x": 0}
|
||||
# Test step 2: Source A overwrites value of var x
|
||||
self.update_and_verify(inv_src_a, {"x": 1}, teststep=2)
|
||||
# Test step 3: Source B overwrites value of var x
|
||||
self.update_and_verify(inv_src_b, {"x": 2}, teststep=3)
|
||||
# Test step 4: Source C overwrites value of var x
|
||||
self.update_and_verify(inv_src_c, {"x": 3}, teststep=4)
|
||||
# Test step 5: Value of var x from source C remains unchanged
|
||||
self.update_and_verify(inv_src_b, {}, expect={"x": 3}, teststep=5)
|
||||
# Test step 6: Value of var x from source A reappears, because the
|
||||
# latest update from source B did not contain var x.
|
||||
self.update_and_verify(inv_src_c, {}, expect={"x": 1}, teststep=6)
|
||||
|
||||
@@ -210,6 +210,39 @@ def test_disallowed_http_update_methods(put, patch, post, inventory, project, ad
|
||||
patch(url=reverse('api:job_detail', kwargs={'pk': job.pk}), data={}, user=admin_user, expect=405)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize(
|
||||
"job_type",
|
||||
[
|
||||
'run',
|
||||
'check',
|
||||
],
|
||||
)
|
||||
def test_job_relaunch_with_job_type(post, inventory, project, machine_credential, admin_user, job_type):
|
||||
# Create a job template
|
||||
jt = JobTemplate.objects.create(name='testjt', inventory=inventory, project=project)
|
||||
|
||||
# Set initial job type
|
||||
init_job_type = 'check' if job_type == 'run' else 'run'
|
||||
|
||||
# Create a job instance
|
||||
job = jt.create_unified_job(_eager_fields={'job_type': init_job_type})
|
||||
|
||||
# Perform the POST request
|
||||
url = reverse('api:job_relaunch', kwargs={'pk': job.pk})
|
||||
r = post(url=url, data={'job_type': job_type}, user=admin_user, expect=201)
|
||||
|
||||
# Assert that the response status code is 201 (Created)
|
||||
assert r.status_code == 201
|
||||
|
||||
# Retrieve the newly created job from the response
|
||||
new_job_id = r.data.get('id')
|
||||
new_job = Job.objects.get(id=new_job_id)
|
||||
|
||||
# Assert that the new job has the correct job type
|
||||
assert new_job.job_type == job_type
|
||||
|
||||
|
||||
class TestControllerNode:
|
||||
@pytest.fixture
|
||||
def project_update(self, project):
|
||||
|
||||
@@ -56,6 +56,175 @@ def test_user_create(post, admin):
|
||||
assert not response.data['is_system_auditor']
|
||||
|
||||
|
||||
# Disable local password checks to ensure that any ValidationError originates from the Django validators.
|
||||
@override_settings(
|
||||
LOCAL_PASSWORD_MIN_LENGTH=1,
|
||||
LOCAL_PASSWORD_MIN_DIGITS=0,
|
||||
LOCAL_PASSWORD_MIN_UPPER=0,
|
||||
LOCAL_PASSWORD_MIN_SPECIAL=0,
|
||||
)
|
||||
@pytest.mark.django_db
|
||||
def test_user_create_with_django_password_validation_basic(post, admin):
|
||||
"""Test if the Django password validators are applied correctly."""
|
||||
with override_settings(
|
||||
AUTH_PASSWORD_VALIDATORS=[
|
||||
{
|
||||
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
|
||||
},
|
||||
{
|
||||
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
|
||||
'OPTIONS': {
|
||||
'min_length': 3,
|
||||
},
|
||||
},
|
||||
{
|
||||
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
|
||||
},
|
||||
{
|
||||
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
|
||||
},
|
||||
],
|
||||
):
|
||||
# This user should fail the UserAttrSimilarity, MinLength and CommonPassword validators.
|
||||
user_attrs = (
|
||||
{
|
||||
"password": "Password", # NOSONAR
|
||||
"username": "Password",
|
||||
"is_superuser": False,
|
||||
},
|
||||
)
|
||||
print(f"Create user with invalid password {user_attrs=}")
|
||||
response = post(reverse('api:user_list'), user_attrs, admin, middleware=SessionMiddleware(mock.Mock()))
|
||||
assert response.status_code == 400
|
||||
# This user should pass all Django validators.
|
||||
user_attrs = {
|
||||
"password": "r$TyKiOCb#ED", # NOSONAR
|
||||
"username": "TestUser",
|
||||
"is_superuser": False,
|
||||
}
|
||||
print(f"Create user with valid password {user_attrs=}")
|
||||
response = post(reverse('api:user_list'), user_attrs, admin, middleware=SessionMiddleware(mock.Mock()))
|
||||
assert response.status_code == 201
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"user_attrs,validators,expected_status_code",
|
||||
[
|
||||
# Test password similarity with username.
|
||||
(
|
||||
{"password": "TestUser1", "username": "TestUser1", "is_superuser": False}, # NOSONAR
|
||||
[
|
||||
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
|
||||
],
|
||||
400,
|
||||
),
|
||||
(
|
||||
{"password": "abc", "username": "TestUser1", "is_superuser": False}, # NOSONAR
|
||||
[
|
||||
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
|
||||
],
|
||||
201,
|
||||
),
|
||||
# Test password min length criterion.
|
||||
(
|
||||
{"password": "TooShort", "username": "TestUser1", "is_superuser": False}, # NOSONAR
|
||||
[
|
||||
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 'OPTIONS': {'min_length': 9}},
|
||||
],
|
||||
400,
|
||||
),
|
||||
(
|
||||
{"password": "LongEnough", "username": "TestUser1", "is_superuser": False}, # NOSONAR
|
||||
[
|
||||
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 'OPTIONS': {'min_length': 9}},
|
||||
],
|
||||
201,
|
||||
),
|
||||
# Test password is too common criterion.
|
||||
(
|
||||
{"password": "Password", "username": "TestUser1", "is_superuser": False}, # NOSONAR
|
||||
[
|
||||
{'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator'},
|
||||
],
|
||||
400,
|
||||
),
|
||||
(
|
||||
{"password": "aEArV$5Vkdw", "username": "TestUser1", "is_superuser": False}, # NOSONAR
|
||||
[
|
||||
{'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator'},
|
||||
],
|
||||
201,
|
||||
),
|
||||
# Test if password is only numeric.
|
||||
(
|
||||
{"password": "1234567890", "username": "TestUser1", "is_superuser": False}, # NOSONAR
|
||||
[
|
||||
{'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator'},
|
||||
],
|
||||
400,
|
||||
),
|
||||
(
|
||||
{"password": "abc4567890", "username": "TestUser1", "is_superuser": False}, # NOSONAR
|
||||
[
|
||||
{'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator'},
|
||||
],
|
||||
201,
|
||||
),
|
||||
],
|
||||
)
|
||||
# Disable local password checks to ensure that any ValidationError originates from the Django validators.
|
||||
@override_settings(
|
||||
LOCAL_PASSWORD_MIN_LENGTH=1,
|
||||
LOCAL_PASSWORD_MIN_DIGITS=0,
|
||||
LOCAL_PASSWORD_MIN_UPPER=0,
|
||||
LOCAL_PASSWORD_MIN_SPECIAL=0,
|
||||
)
|
||||
@pytest.mark.django_db
|
||||
def test_user_create_with_django_password_validation_ext(post, delete, admin, user_attrs, validators, expected_status_code):
|
||||
"""Test the functionality of the single Django password validators."""
|
||||
#
|
||||
default_parameters = {
|
||||
# Default values for input parameters which are None.
|
||||
"user_attrs": {
|
||||
"password": "r$TyKiOCb#ED", # NOSONAR
|
||||
"username": "DefaultUser",
|
||||
"is_superuser": False,
|
||||
},
|
||||
"validators": [
|
||||
{
|
||||
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
|
||||
},
|
||||
{
|
||||
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
|
||||
'OPTIONS': {
|
||||
'min_length': 8,
|
||||
},
|
||||
},
|
||||
{
|
||||
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
|
||||
},
|
||||
{
|
||||
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
|
||||
},
|
||||
],
|
||||
}
|
||||
user_attrs = user_attrs if user_attrs is not None else default_parameters["user_attrs"]
|
||||
validators = validators if validators is not None else default_parameters["validators"]
|
||||
with override_settings(AUTH_PASSWORD_VALIDATORS=validators):
|
||||
response = post(reverse('api:user_list'), user_attrs, admin, middleware=SessionMiddleware(mock.Mock()))
|
||||
assert response.status_code == expected_status_code
|
||||
# Delete user if it was created succesfully.
|
||||
if response.status_code == 201:
|
||||
response = delete(reverse('api:user_detail', kwargs={'pk': response.data['id']}), admin, middleware=SessionMiddleware(mock.Mock()))
|
||||
assert response.status_code == 204
|
||||
else:
|
||||
# Catch the unexpected behavior that sometimes the user is written
|
||||
# into the database before the validation fails. This actually can
|
||||
# happen if UserSerializer.validate instantiates User(**attrs)!
|
||||
username = user_attrs['username']
|
||||
assert not User.objects.filter(username=username)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_fail_double_create_user(post, admin):
|
||||
response = post(reverse('api:user_list'), EXAMPLE_USER_DATA, admin, middleware=SessionMiddleware(mock.Mock()))
|
||||
@@ -82,6 +251,10 @@ def test_updating_own_password_refreshes_session(patch, admin):
|
||||
Updating your own password should refresh the session id.
|
||||
'''
|
||||
with mock.patch('awx.api.serializers.update_session_auth_hash') as update_session_auth_hash:
|
||||
# Attention: If the Django password validator `CommonPasswordValidator`
|
||||
# is active, this test case will fail because this validator raises on
|
||||
# password 'newpassword'. Consider changing the hard-coded password to
|
||||
# something uncommon.
|
||||
patch(reverse('api:user_detail', kwargs={'pk': admin.pk}), {'password': 'newpassword'}, admin, middleware=SessionMiddleware(mock.Mock()))
|
||||
assert update_session_auth_hash.called
|
||||
|
||||
|
||||
@@ -34,40 +34,18 @@ def test_wrapup_does_send_notifications(mocker):
|
||||
mock.assert_called_once_with('succeeded')
|
||||
|
||||
|
||||
class FakeRedis:
|
||||
def keys(self, *args, **kwargs):
|
||||
return []
|
||||
|
||||
def set(self):
|
||||
pass
|
||||
|
||||
def get(self):
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def from_url(cls, *args, **kwargs):
|
||||
return cls()
|
||||
|
||||
def pipeline(self):
|
||||
return self
|
||||
|
||||
|
||||
class TestCallbackBrokerWorker(TransactionTestCase):
|
||||
@pytest.fixture(autouse=True)
|
||||
def turn_off_websockets(self):
|
||||
def turn_off_websockets_and_redis(self, fake_redis):
|
||||
with mock.patch('awx.main.dispatch.worker.callback.emit_event_detail', lambda *a, **kw: None):
|
||||
yield
|
||||
|
||||
def get_worker(self):
|
||||
with mock.patch('redis.Redis', new=FakeRedis): # turn off redis stuff
|
||||
return CallbackBrokerWorker()
|
||||
|
||||
def event_create_kwargs(self):
|
||||
inventory_update = InventoryUpdate.objects.create(source='file', inventory_source=InventorySource.objects.create(source='file'))
|
||||
return dict(inventory_update=inventory_update, created=inventory_update.created)
|
||||
|
||||
def test_flush_with_valid_event(self):
|
||||
worker = self.get_worker()
|
||||
worker = CallbackBrokerWorker()
|
||||
events = [InventoryUpdateEvent(uuid=str(uuid4()), **self.event_create_kwargs())]
|
||||
worker.buff = {InventoryUpdateEvent: events}
|
||||
worker.flush()
|
||||
@@ -75,7 +53,7 @@ class TestCallbackBrokerWorker(TransactionTestCase):
|
||||
assert InventoryUpdateEvent.objects.filter(uuid=events[0].uuid).count() == 1
|
||||
|
||||
def test_flush_with_invalid_event(self):
|
||||
worker = self.get_worker()
|
||||
worker = CallbackBrokerWorker()
|
||||
kwargs = self.event_create_kwargs()
|
||||
events = [
|
||||
InventoryUpdateEvent(uuid=str(uuid4()), stdout='good1', **kwargs),
|
||||
@@ -90,7 +68,7 @@ class TestCallbackBrokerWorker(TransactionTestCase):
|
||||
assert worker.buff == {InventoryUpdateEvent: [events[1]]}
|
||||
|
||||
def test_duplicate_key_not_saved_twice(self):
|
||||
worker = self.get_worker()
|
||||
worker = CallbackBrokerWorker()
|
||||
events = [InventoryUpdateEvent(uuid=str(uuid4()), **self.event_create_kwargs())]
|
||||
worker.buff = {InventoryUpdateEvent: events.copy()}
|
||||
worker.flush()
|
||||
@@ -104,7 +82,7 @@ class TestCallbackBrokerWorker(TransactionTestCase):
|
||||
assert worker.buff.get(InventoryUpdateEvent, []) == []
|
||||
|
||||
def test_give_up_on_bad_event(self):
|
||||
worker = self.get_worker()
|
||||
worker = CallbackBrokerWorker()
|
||||
events = [InventoryUpdateEvent(uuid=str(uuid4()), counter=-2, **self.event_create_kwargs())]
|
||||
worker.buff = {InventoryUpdateEvent: events.copy()}
|
||||
|
||||
@@ -117,7 +95,7 @@ class TestCallbackBrokerWorker(TransactionTestCase):
|
||||
assert InventoryUpdateEvent.objects.filter(uuid=events[0].uuid).count() == 0 # sanity
|
||||
|
||||
def test_flush_with_empty_buffer(self):
|
||||
worker = self.get_worker()
|
||||
worker = CallbackBrokerWorker()
|
||||
worker.buff = {InventoryUpdateEvent: []}
|
||||
with mock.patch.object(InventoryUpdateEvent.objects, 'bulk_create') as flush_mock:
|
||||
worker.flush()
|
||||
@@ -127,7 +105,7 @@ class TestCallbackBrokerWorker(TransactionTestCase):
|
||||
# In postgres, text fields reject NUL character, 0x00
|
||||
# tests use sqlite3 which will not raise an error
|
||||
# but we can still test that it is sanitized before saving
|
||||
worker = self.get_worker()
|
||||
worker = CallbackBrokerWorker()
|
||||
kwargs = self.event_create_kwargs()
|
||||
events = [InventoryUpdateEvent(uuid=str(uuid4()), stdout="\x00", **kwargs)]
|
||||
assert "\x00" in events[0].stdout # sanity
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user