mirror of
https://github.com/ansible/awx.git
synced 2026-02-04 19:18:13 -03:30
Compare commits
226 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2529fdcfd7 | ||
|
|
19dff9c2d1 | ||
|
|
2a6cf032f8 | ||
|
|
6119b33a50 | ||
|
|
aacf9653c5 | ||
|
|
325f5250db | ||
|
|
b14518c1e5 | ||
|
|
6440e3cb55 | ||
|
|
b5f6aac3aa | ||
|
|
6e5e1c8fff | ||
|
|
bf42c63c12 | ||
|
|
df24cb692b | ||
|
|
0d825a744b | ||
|
|
5e48bf091b | ||
|
|
1294cec92c | ||
|
|
dae12ee1b8 | ||
|
|
b091f6cf79 | ||
|
|
fe564c5fad | ||
|
|
eb3bc84461 | ||
|
|
6aa2997dce | ||
|
|
dd00bbba42 | ||
|
|
fe6bac6d9e | ||
|
|
87abbd4b10 | ||
|
|
fb04e5d9f6 | ||
|
|
478e2cb28d | ||
|
|
2ac304d289 | ||
|
|
3e5851f3af | ||
|
|
adb1b12074 | ||
|
|
8fae20c48a | ||
|
|
ec364cc60e | ||
|
|
1cfd51764e | ||
|
|
0b8fedfd04 | ||
|
|
72a8173462 | ||
|
|
873b1fbe07 | ||
|
|
1f36e84b45 | ||
|
|
8c4bff2b86 | ||
|
|
14f636af84 | ||
|
|
0057c8daf6 | ||
|
|
d8a28b3c06 | ||
|
|
40c2b700fe | ||
|
|
71d548f9e5 | ||
|
|
dd98963f86 | ||
|
|
4b467dfd8d | ||
|
|
456b56778e | ||
|
|
5b3cb20f92 | ||
|
|
d7086a3c88 | ||
|
|
21e7ab078c | ||
|
|
946ca0b3b8 | ||
|
|
b831dbd608 | ||
|
|
943e455f9d | ||
|
|
53bc88abe2 | ||
|
|
3b4d95633e | ||
|
|
93c329d9d5 | ||
|
|
f4c53aaf22 | ||
|
|
333ef76cbd | ||
|
|
fc0b58fd04 | ||
|
|
bef0a8b23a | ||
|
|
a5f33456b6 | ||
|
|
21fb395912 | ||
|
|
44255f378d | ||
|
|
71a6d48612 | ||
|
|
b7e5f5d1e1 | ||
|
|
b6b167627c | ||
|
|
20f5b255c9 | ||
|
|
3bcf46555d | ||
|
|
94703ccf84 | ||
|
|
6cdea1909d | ||
|
|
f133580172 | ||
|
|
4b90a7fcd1 | ||
|
|
95bfedad5b | ||
|
|
1081f2d8e9 | ||
|
|
c4ab54d7f3 | ||
|
|
bcefcd8cf8 | ||
|
|
0bd057529d | ||
|
|
a82c03e2e2 | ||
|
|
447ac77535 | ||
|
|
72d0928f1b | ||
|
|
6d727d4bc4 | ||
|
|
6040e44d9d | ||
|
|
b99ce5cd62 | ||
|
|
ba8a90c55f | ||
|
|
7ee2172517 | ||
|
|
07f49f5925 | ||
|
|
376993077a | ||
|
|
48f586bac4 | ||
|
|
16dab57c63 | ||
|
|
75a71492fd | ||
|
|
e9bd99c1ff | ||
|
|
56878b4910 | ||
|
|
19ca480078 | ||
|
|
64eb963025 | ||
|
|
dc34d0887a | ||
|
|
160634fb6f | ||
|
|
9745058546 | ||
|
|
c97a48b165 | ||
|
|
259bca0113 | ||
|
|
92c2b4e983 | ||
|
|
127a0cff23 | ||
|
|
a0ef25006a | ||
|
|
50c98a52f7 | ||
|
|
4008d72af6 | ||
|
|
e72e9f94b9 | ||
|
|
9d60b0b9c6 | ||
|
|
05b58c4df6 | ||
|
|
b1b960fd17 | ||
|
|
3c8f71e559 | ||
|
|
f5922f76fa | ||
|
|
05582702c6 | ||
|
|
1d340c5b4e | ||
|
|
15925f1416 | ||
|
|
6e06a20cca | ||
|
|
bb3acbb8ad | ||
|
|
a88e47930c | ||
|
|
a0d4515ba4 | ||
|
|
770cc10a78 | ||
|
|
159dd62d84 | ||
|
|
640e5db9c6 | ||
|
|
9ed527eb26 | ||
|
|
29ad6e1eaa | ||
|
|
3e607f8964 | ||
|
|
c9d1a4d063 | ||
|
|
a290b082db | ||
|
|
6d3c22e801 | ||
|
|
1f91773a3c | ||
|
|
7b846e1e49 | ||
|
|
f7a2de8a07 | ||
|
|
194c214f03 | ||
|
|
77e30dd4b2 | ||
|
|
9d7421b9bc | ||
|
|
3b8e662916 | ||
|
|
aa3228eec9 | ||
|
|
7b0598c7d8 | ||
|
|
49832d6379 | ||
|
|
8feeb5f1fa | ||
|
|
56230ba5d1 | ||
|
|
480aaeace5 | ||
|
|
3eaea396be | ||
|
|
deef8669c9 | ||
|
|
63223a2cc7 | ||
|
|
a28bc2eb3f | ||
|
|
09168e5832 | ||
|
|
6df1de4262 | ||
|
|
e072bb7668 | ||
|
|
ec579fd637 | ||
|
|
b95d521162 | ||
|
|
d03a6a809d | ||
|
|
4466976e10 | ||
|
|
5733f78fd8 | ||
|
|
20fc7c702a | ||
|
|
6ce5799689 | ||
|
|
dc81aa46d0 | ||
|
|
ab3ceaecad | ||
|
|
1bb4240a6b | ||
|
|
5e105c2cbd | ||
|
|
cdb4f0b7fd | ||
|
|
cf1e448577 | ||
|
|
224e9e0324 | ||
|
|
660dab439b | ||
|
|
5ce2055431 | ||
|
|
951bd1cc87 | ||
|
|
c9190ebd8f | ||
|
|
eb33973fa3 | ||
|
|
40be2e7b6e | ||
|
|
485813211a | ||
|
|
0a87bf1b5e | ||
|
|
fa0e0b2576 | ||
|
|
1d3b2f57ce | ||
|
|
0577e1ee79 | ||
|
|
470ecc4a4f | ||
|
|
965127637b | ||
|
|
eba130cf41 | ||
|
|
441336301e | ||
|
|
2a0be898e6 | ||
|
|
c47acc5988 | ||
|
|
70ba32b5b2 | ||
|
|
81e06dace2 | ||
|
|
3e8202590c | ||
|
|
ad96a72ebe | ||
|
|
eb0058268b | ||
|
|
2bf6512a8e | ||
|
|
855f61a04e | ||
|
|
532e71ff45 | ||
|
|
b9ea114cac | ||
|
|
e41ad82687 | ||
|
|
3bd25c682e | ||
|
|
7169c75b1a | ||
|
|
fdb359a67b | ||
|
|
ed2a59c1a3 | ||
|
|
906f8a1dce | ||
|
|
6833976c54 | ||
|
|
d15405eafe | ||
|
|
6c3bbfc3be | ||
|
|
2e3e6cbde5 | ||
|
|
54894c14dc | ||
|
|
2a51f23b7d | ||
|
|
80df31fc4e | ||
|
|
8f8462b38e | ||
|
|
0c41abea0e | ||
|
|
3eda1ede8d | ||
|
|
40fca6db57 | ||
|
|
148111a072 | ||
|
|
9cad45feac | ||
|
|
6834568c5d | ||
|
|
f7fdb7fe8d | ||
|
|
d8abd4912b | ||
|
|
4fbdc412ad | ||
|
|
db1af57daa | ||
|
|
ffa59864ee | ||
|
|
b209bc67b4 | ||
|
|
1faea020af | ||
|
|
b55a099620 | ||
|
|
f6dd3cb988 | ||
|
|
c448b87c85 | ||
|
|
4dd823121a | ||
|
|
ec4f10d868 | ||
|
|
2a1dffd363 | ||
|
|
8c7ab8fcf2 | ||
|
|
3de8455960 | ||
|
|
d832e75e99 | ||
|
|
a89e266feb | ||
|
|
8e1516eeb7 | ||
|
|
c7f2fdbe57 | ||
|
|
c75757bf22 | ||
|
|
b8ec7c4072 | ||
|
|
bb1c155bc9 | ||
|
|
4822dd79fc |
28
.github/actions/awx_devel_image/action.yml
vendored
Normal file
28
.github/actions/awx_devel_image/action.yml
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
name: Setup images for AWX
|
||||
description: Builds new awx_devel image
|
||||
inputs:
|
||||
github-token:
|
||||
description: GitHub Token for registry access
|
||||
required: true
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Get python version from Makefile
|
||||
shell: bash
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Log in to registry
|
||||
shell: bash
|
||||
run: |
|
||||
echo "${{ inputs.github-token }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
|
||||
|
||||
- name: Pre-pull latest devel image to warm cache
|
||||
shell: bash
|
||||
run: docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${{ github.base_ref }}
|
||||
|
||||
- name: Build image for current source checkout
|
||||
shell: bash
|
||||
run: |
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} \
|
||||
COMPOSE_TAG=${{ github.base_ref }} \
|
||||
make docker-compose-build
|
||||
77
.github/actions/run_awx_devel/action.yml
vendored
Normal file
77
.github/actions/run_awx_devel/action.yml
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
name: Run AWX docker-compose
|
||||
description: Runs AWX with `make docker-compose`
|
||||
inputs:
|
||||
github-token:
|
||||
description: GitHub Token to pass to awx_devel_image
|
||||
required: true
|
||||
build-ui:
|
||||
description: Should the UI be built?
|
||||
required: false
|
||||
default: false
|
||||
type: boolean
|
||||
outputs:
|
||||
ip:
|
||||
description: The IP of the tools_awx_1 container
|
||||
value: ${{ steps.data.outputs.ip }}
|
||||
admin-token:
|
||||
description: OAuth token for admin user
|
||||
value: ${{ steps.data.outputs.admin_token }}
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Build awx_devel image for running checks
|
||||
uses: ./.github/actions/awx_devel_image
|
||||
with:
|
||||
github-token: ${{ inputs.github-token }}
|
||||
|
||||
- name: Upgrade ansible-core
|
||||
shell: bash
|
||||
run: python3 -m pip install --upgrade ansible-core
|
||||
|
||||
- name: Install system deps
|
||||
shell: bash
|
||||
run: sudo apt-get install -y gettext
|
||||
|
||||
- name: Start AWX
|
||||
shell: bash
|
||||
run: |
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} \
|
||||
COMPOSE_TAG=${{ github.base_ref }} \
|
||||
COMPOSE_UP_OPTS="-d" \
|
||||
make docker-compose
|
||||
|
||||
- name: Update default AWX password
|
||||
shell: bash
|
||||
run: |
|
||||
SECONDS=0
|
||||
while [[ "$(curl -s -o /dev/null -w ''%{http_code}'' -k https://localhost:8043/api/v2/ping/)" != "200" ]]; do
|
||||
if [[ $SECONDS -gt 600 ]]; then
|
||||
echo "Timing out, AWX never came up"
|
||||
exit 1
|
||||
fi
|
||||
echo "Waiting for AWX..."
|
||||
sleep 5
|
||||
done
|
||||
echo "AWX is up, updating the password..."
|
||||
docker exec -i tools_awx_1 sh <<-EOSH
|
||||
awx-manage update_password --username=admin --password=password
|
||||
EOSH
|
||||
|
||||
- name: Build UI
|
||||
# This must be a string comparison in composite actions:
|
||||
# https://github.com/actions/runner/issues/2238
|
||||
if: ${{ inputs.build-ui == 'true' }}
|
||||
shell: bash
|
||||
run: |
|
||||
docker exec -i tools_awx_1 sh <<-EOSH
|
||||
make ui-devel
|
||||
EOSH
|
||||
|
||||
- name: Get instance data
|
||||
id: data
|
||||
shell: bash
|
||||
run: |
|
||||
AWX_IP=$(docker inspect -f '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' tools_awx_1)
|
||||
ADMIN_TOKEN=$(docker exec -i tools_awx_1 awx-manage create_oauth2_token --user admin)
|
||||
echo "ip=$AWX_IP" >> $GITHUB_OUTPUT
|
||||
echo "admin_token=$ADMIN_TOKEN" >> $GITHUB_OUTPUT
|
||||
19
.github/actions/upload_awx_devel_logs/action.yml
vendored
Normal file
19
.github/actions/upload_awx_devel_logs/action.yml
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
name: Upload logs
|
||||
description: Upload logs from `make docker-compose` devel environment to GitHub as an artifact
|
||||
inputs:
|
||||
log-filename:
|
||||
description: "*Unique* name of the log file"
|
||||
required: true
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Get AWX logs
|
||||
shell: bash
|
||||
run: |
|
||||
docker logs tools_awx_1 > ${{ inputs.log-filename }}
|
||||
|
||||
- name: Upload AWX logs as artifact
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: docker-compose-logs
|
||||
path: ${{ inputs.log-filename }}
|
||||
10
.github/dependabot.yml
vendored
Normal file
10
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "pip"
|
||||
directory: "docs/docsite/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 2
|
||||
labels:
|
||||
- "docs"
|
||||
- "dependencies"
|
||||
4
.github/triage_replies.md
vendored
4
.github/triage_replies.md
vendored
@@ -7,8 +7,8 @@
|
||||
|
||||
## PRs/Issues
|
||||
|
||||
### Visit our mailing list
|
||||
- Hello, this appears to be less of a bug report or feature request and more of a question. Could you please ask this on our mailing list? See https://github.com/ansible/awx/#get-involved for information for ways to connect with us.
|
||||
### Visit the Forum or Matrix
|
||||
- Hello, this appears to be less of a bug report or feature request and more of a question. Could you please ask this on either the [Ansible AWX channel on Matrix](https://matrix.to/#/#awx:ansible.com) or the [Ansible Community Forum](https://forum.ansible.com/tag/awx)?
|
||||
|
||||
### Denied Submission
|
||||
|
||||
|
||||
169
.github/workflows/ci.yml
vendored
169
.github/workflows/ci.yml
vendored
@@ -11,6 +11,7 @@ jobs:
|
||||
common-tests:
|
||||
name: ${{ matrix.tests.name }}
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
@@ -20,6 +21,8 @@ jobs:
|
||||
tests:
|
||||
- name: api-test
|
||||
command: /start_tests.sh
|
||||
- name: api-migrations
|
||||
command: /start_tests.sh test_migrations
|
||||
- name: api-lint
|
||||
command: /var/lib/awx/venv/awx/bin/tox -e linters
|
||||
- name: api-swagger
|
||||
@@ -35,29 +38,42 @@ jobs:
|
||||
- name: ui-test-general
|
||||
command: make ui-test-general
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Build awx_devel image for running checks
|
||||
uses: ./.github/actions/awx_devel_image
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Run check ${{ matrix.tests.name }}
|
||||
run: AWX_DOCKER_CMD='${{ matrix.tests.command }}' make github_ci_runner
|
||||
run: AWX_DOCKER_CMD='${{ matrix.tests.command }}' make docker-runner
|
||||
|
||||
dev-env:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- uses: ./.github/actions/run_awx_devel
|
||||
id: awx
|
||||
with:
|
||||
build-ui: false
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Run smoke test
|
||||
run: make github_ci_setup && ansible-playbook tools/docker-compose/ansible/smoke-test.yml -v
|
||||
run: ansible-playbook tools/docker-compose/ansible/smoke-test.yml -v
|
||||
|
||||
awx-operator:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- name: Checkout awx
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
path: awx
|
||||
|
||||
- name: Checkout awx-operator
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: ansible/awx-operator
|
||||
path: awx-operator
|
||||
@@ -67,7 +83,7 @@ jobs:
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v2
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
|
||||
@@ -99,10 +115,11 @@ jobs:
|
||||
collection-sanity:
|
||||
name: awx_collection sanity
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
# The containers that GitHub Actions use have Ansible installed, so upgrade to make sure we have the latest version.
|
||||
- name: Upgrade ansible-core
|
||||
@@ -114,3 +131,139 @@ jobs:
|
||||
# needed due to cgroupsv2. This is fixed, but a stable release
|
||||
# with the fix has not been made yet.
|
||||
ANSIBLE_TEST_PREFER_PODMAN: 1
|
||||
|
||||
collection-integration:
|
||||
name: awx_collection integration
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target-regex:
|
||||
- name: a-h
|
||||
regex: ^[a-h]
|
||||
- name: i-p
|
||||
regex: ^[i-p]
|
||||
- name: r-z0-9
|
||||
regex: ^[r-z0-9]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- uses: ./.github/actions/run_awx_devel
|
||||
id: awx
|
||||
with:
|
||||
build-ui: false
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Install dependencies for running tests
|
||||
run: |
|
||||
python3 -m pip install -e ./awxkit/
|
||||
python3 -m pip install -r awx_collection/requirements.txt
|
||||
|
||||
- name: Run integration tests
|
||||
run: |
|
||||
echo "::remove-matcher owner=python::" # Disable annoying annotations from setup-python
|
||||
echo '[general]' > ~/.tower_cli.cfg
|
||||
echo 'host = https://${{ steps.awx.outputs.ip }}:8043' >> ~/.tower_cli.cfg
|
||||
echo 'oauth_token = ${{ steps.awx.outputs.admin-token }}' >> ~/.tower_cli.cfg
|
||||
echo 'verify_ssl = false' >> ~/.tower_cli.cfg
|
||||
TARGETS="$(ls awx_collection/tests/integration/targets | grep '${{ matrix.target-regex.regex }}' | tr '\n' ' ')"
|
||||
make COLLECTION_VERSION=100.100.100-git COLLECTION_TEST_TARGET="--coverage --requirements $TARGETS" test_collection_integration
|
||||
env:
|
||||
ANSIBLE_TEST_PREFER_PODMAN: 1
|
||||
|
||||
# Upload coverage report as artifact
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always()
|
||||
with:
|
||||
name: coverage-${{ matrix.target-regex.name }}
|
||||
path: ~/.ansible/collections/ansible_collections/awx/awx/tests/output/coverage/
|
||||
|
||||
- uses: ./.github/actions/upload_awx_devel_logs
|
||||
if: always()
|
||||
with:
|
||||
log-filename: collection-integration-${{ matrix.target-regex.name }}.log
|
||||
|
||||
collection-integration-coverage-combine:
|
||||
name: combine awx_collection integration coverage
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
needs:
|
||||
- collection-integration
|
||||
strategy:
|
||||
fail-fast: false
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Upgrade ansible-core
|
||||
run: python3 -m pip install --upgrade ansible-core
|
||||
|
||||
- name: Download coverage artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: coverage
|
||||
|
||||
- name: Combine coverage
|
||||
run: |
|
||||
make COLLECTION_VERSION=100.100.100-git install_collection
|
||||
mkdir -p ~/.ansible/collections/ansible_collections/awx/awx/tests/output/coverage
|
||||
cd coverage
|
||||
for i in coverage-*; do
|
||||
cp -rv $i/* ~/.ansible/collections/ansible_collections/awx/awx/tests/output/coverage/
|
||||
done
|
||||
cd ~/.ansible/collections/ansible_collections/awx/awx
|
||||
ansible-test coverage combine --requirements
|
||||
ansible-test coverage html
|
||||
echo '## AWX Collection Integration Coverage' >> $GITHUB_STEP_SUMMARY
|
||||
echo '```' >> $GITHUB_STEP_SUMMARY
|
||||
ansible-test coverage report >> $GITHUB_STEP_SUMMARY
|
||||
echo '```' >> $GITHUB_STEP_SUMMARY
|
||||
echo >> $GITHUB_STEP_SUMMARY
|
||||
echo '## AWX Collection Integration Coverage HTML' >> $GITHUB_STEP_SUMMARY
|
||||
echo 'Download the HTML artifacts to view the coverage report.' >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
# This is a huge hack, there's no official action for removing artifacts currently.
|
||||
# Also ACTIONS_RUNTIME_URL and ACTIONS_RUNTIME_TOKEN aren't available in normal run
|
||||
# steps, so we have to use github-script to get them.
|
||||
#
|
||||
# The advantage of doing this, though, is that we save on artifact storage space.
|
||||
|
||||
- name: Get secret artifact runtime URL
|
||||
uses: actions/github-script@v6
|
||||
id: get-runtime-url
|
||||
with:
|
||||
result-encoding: string
|
||||
script: |
|
||||
const { ACTIONS_RUNTIME_URL } = process.env;
|
||||
return ACTIONS_RUNTIME_URL;
|
||||
|
||||
- name: Get secret artifact runtime token
|
||||
uses: actions/github-script@v6
|
||||
id: get-runtime-token
|
||||
with:
|
||||
result-encoding: string
|
||||
script: |
|
||||
const { ACTIONS_RUNTIME_TOKEN } = process.env;
|
||||
return ACTIONS_RUNTIME_TOKEN;
|
||||
|
||||
- name: Remove intermediary artifacts
|
||||
env:
|
||||
ACTIONS_RUNTIME_URL: ${{ steps.get-runtime-url.outputs.result }}
|
||||
ACTIONS_RUNTIME_TOKEN: ${{ steps.get-runtime-token.outputs.result }}
|
||||
run: |
|
||||
echo "::add-mask::${ACTIONS_RUNTIME_TOKEN}"
|
||||
artifacts=$(
|
||||
curl -H "Authorization: Bearer $ACTIONS_RUNTIME_TOKEN" \
|
||||
${ACTIONS_RUNTIME_URL}_apis/pipelines/workflows/${{ github.run_id }}/artifacts?api-version=6.0-preview \
|
||||
| jq -r '.value | .[] | select(.name | startswith("coverage-")) | .url'
|
||||
)
|
||||
|
||||
for artifact in $artifacts; do
|
||||
curl -i -X DELETE -H "Accept: application/json;api-version=6.0-preview" -H "Authorization: Bearer $ACTIONS_RUNTIME_TOKEN" "$artifact"
|
||||
done
|
||||
|
||||
- name: Upload coverage report as artifact
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: awx-collection-integration-coverage-html
|
||||
path: ~/.ansible/collections/ansible_collections/awx/awx/tests/output/reports/coverage
|
||||
|
||||
5
.github/workflows/devel_images.yml
vendored
5
.github/workflows/devel_images.yml
vendored
@@ -12,11 +12,12 @@ jobs:
|
||||
push:
|
||||
if: endsWith(github.repository, '/awx') || startsWith(github.ref, 'refs/heads/release_')
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Get python version from Makefile
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
@@ -28,7 +29,7 @@ jobs:
|
||||
OWNER: '${{ github.repository_owner }}'
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v2
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
|
||||
|
||||
17
.github/workflows/docs.yml
vendored
Normal file
17
.github/workflows/docs.yml
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
---
|
||||
name: Docsite CI
|
||||
on:
|
||||
pull_request:
|
||||
jobs:
|
||||
docsite-build:
|
||||
name: docsite test build
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: install tox
|
||||
run: pip install tox
|
||||
|
||||
- name: Assure docs can be built
|
||||
run: tox -e docs
|
||||
54
.github/workflows/e2e_test.yml
vendored
54
.github/workflows/e2e_test.yml
vendored
@@ -19,41 +19,20 @@ jobs:
|
||||
job: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Get python version from Makefile
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v2
|
||||
- uses: ./.github/actions/run_awx_devel
|
||||
id: awx
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
|
||||
- name: Install system deps
|
||||
run: sudo apt-get install -y gettext
|
||||
|
||||
- name: Log in to registry
|
||||
run: |
|
||||
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
|
||||
|
||||
- name: Pre-pull image to warm build cache
|
||||
run: |
|
||||
docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${{ github.base_ref }}
|
||||
|
||||
- name: Build UI
|
||||
run: |
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} COMPOSE_TAG=${{ github.base_ref }} make ui-devel
|
||||
|
||||
- name: Start AWX
|
||||
run: |
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} COMPOSE_TAG=${{ github.base_ref }} make docker-compose &> make-docker-compose-output.log &
|
||||
build-ui: true
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Pull awx_cypress_base image
|
||||
run: |
|
||||
docker pull quay.io/awx/awx_cypress_base:latest
|
||||
|
||||
- name: Checkout test project
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: ${{ github.repository_owner }}/tower-qa
|
||||
ssh-key: ${{ secrets.QA_REPO_KEY }}
|
||||
@@ -65,18 +44,6 @@ jobs:
|
||||
cd ${{ secrets.E2E_PROJECT }}/ui-tests/awx-pf-tests
|
||||
docker build -t awx-pf-tests .
|
||||
|
||||
- name: Update default AWX password
|
||||
run: |
|
||||
while [[ "$(curl -s -o /dev/null -w ''%{http_code}'' -k https://localhost:8043/api/v2/ping/)" != "200" ]]
|
||||
do
|
||||
echo "Waiting for AWX..."
|
||||
sleep 5;
|
||||
done
|
||||
echo "AWX is up, updating the password..."
|
||||
docker exec -i tools_awx_1 sh <<-EOSH
|
||||
awx-manage update_password --username=admin --password=password
|
||||
EOSH
|
||||
|
||||
- name: Run E2E tests
|
||||
env:
|
||||
CYPRESS_RECORD_KEY: ${{ secrets.CYPRESS_RECORD_KEY }}
|
||||
@@ -86,7 +53,7 @@ jobs:
|
||||
export COMMIT_INFO_SHA=$GITHUB_SHA
|
||||
export COMMIT_INFO_REMOTE=$GITHUB_REPOSITORY_OWNER
|
||||
cd ${{ secrets.E2E_PROJECT }}/ui-tests/awx-pf-tests
|
||||
AWX_IP=$(docker inspect -f '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' tools_awx_1)
|
||||
AWX_IP=${{ steps.awx.outputs.ip }}
|
||||
printenv > .env
|
||||
echo "Executing tests:"
|
||||
docker run \
|
||||
@@ -102,8 +69,7 @@ jobs:
|
||||
-w /e2e \
|
||||
awx-pf-tests run --project .
|
||||
|
||||
- name: Save AWX logs
|
||||
uses: actions/upload-artifact@v2
|
||||
- uses: ./.github/actions/upload_awx_devel_logs
|
||||
if: always()
|
||||
with:
|
||||
name: AWX-logs-${{ matrix.job }}
|
||||
path: make-docker-compose-output.log
|
||||
log-filename: e2e-${{ matrix.job }}.log
|
||||
|
||||
@@ -9,6 +9,7 @@ on:
|
||||
jobs:
|
||||
push:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
|
||||
4
.github/workflows/label_issue.yml
vendored
4
.github/workflows/label_issue.yml
vendored
@@ -13,6 +13,7 @@ permissions:
|
||||
jobs:
|
||||
triage:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
name: Label Issue
|
||||
|
||||
steps:
|
||||
@@ -26,9 +27,10 @@ jobs:
|
||||
|
||||
community:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
name: Label Issue - Community
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
- name: Install python requests
|
||||
run: pip install requests
|
||||
|
||||
4
.github/workflows/label_pr.yml
vendored
4
.github/workflows/label_pr.yml
vendored
@@ -14,6 +14,7 @@ permissions:
|
||||
jobs:
|
||||
triage:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
name: Label PR
|
||||
|
||||
steps:
|
||||
@@ -25,9 +26,10 @@ jobs:
|
||||
|
||||
community:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
name: Label PR - Community
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
- name: Install python requests
|
||||
run: pip install requests
|
||||
|
||||
2
.github/workflows/pr_body_check.yml
vendored
2
.github/workflows/pr_body_check.yml
vendored
@@ -7,8 +7,10 @@ on:
|
||||
types: [opened, edited, reopened, synchronize]
|
||||
jobs:
|
||||
pr-check:
|
||||
if: github.repository_owner == 'ansible' && endsWith(github.repository, 'awx')
|
||||
name: Scan PR description for semantic versioning keywords
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
|
||||
35
.github/workflows/pr_body_check_jira.yml
vendored
35
.github/workflows/pr_body_check_jira.yml
vendored
@@ -1,35 +0,0 @@
|
||||
---
|
||||
name: Check body for reference to jira
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- release_**
|
||||
jobs:
|
||||
pr-check:
|
||||
if: github.repository_owner == 'ansible' && github.repository != 'awx'
|
||||
name: Scan PR description for JIRA links
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
steps:
|
||||
- name: Check for JIRA lines
|
||||
env:
|
||||
PR_BODY: ${{ github.event.pull_request.body }}
|
||||
run: |
|
||||
echo "$PR_BODY" | grep "JIRA: None" > no_jira
|
||||
echo "$PR_BODY" | grep "JIRA: https://.*[0-9]+"> jira
|
||||
exit 0
|
||||
# We exit 0 and set the shell to prevent the returns from the greps from failing this step
|
||||
# See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#exit-codes-and-error-action-preference
|
||||
shell: bash {0}
|
||||
|
||||
- name: Check for exactly one item
|
||||
run: |
|
||||
if [ $(cat no_jira jira | wc -l) != 1 ] ; then
|
||||
echo "The PR body must contain exactly one of [ 'JIRA: None' or 'JIRA: <one or more links>' ]"
|
||||
echo "We counted $(cat no_jira jira | wc -l)"
|
||||
exit 255;
|
||||
else
|
||||
exit 0;
|
||||
fi
|
||||
15
.github/workflows/promote.yml
vendored
15
.github/workflows/promote.yml
vendored
@@ -13,17 +13,18 @@ permissions:
|
||||
|
||||
jobs:
|
||||
promote:
|
||||
if: endsWith(github.repository, '/awx')
|
||||
if: endsWith(github.repository, '/awx')
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 90
|
||||
steps:
|
||||
- name: Checkout awx
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Get python version from Makefile
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v2
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
|
||||
@@ -40,9 +41,13 @@ jobs:
|
||||
if: ${{ github.repository_owner != 'ansible' }}
|
||||
|
||||
- name: Build collection and publish to galaxy
|
||||
env:
|
||||
COLLECTION_NAMESPACE: ${{ env.collection_namespace }}
|
||||
COLLECTION_VERSION: ${{ github.event.release.tag_name }}
|
||||
COLLECTION_TEMPLATE_VERSION: true
|
||||
run: |
|
||||
COLLECTION_TEMPLATE_VERSION=true COLLECTION_NAMESPACE=${{ env.collection_namespace }} make build_collection
|
||||
if [ "$(curl --head -sw '%{http_code}' https://galaxy.ansible.com/download/${{ env.collection_namespace }}-awx-${{ github.event.release.tag_name }}.tar.gz | tail -1)" == "302" ] ; then \
|
||||
make build_collection
|
||||
if [ "$(curl -L --head -sw '%{http_code}' https://galaxy.ansible.com/download/${{ env.collection_namespace }}-awx-${{ github.event.release.tag_name }}.tar.gz | tail -1)" == "302" ] ; then \
|
||||
echo "Galaxy release already done"; \
|
||||
else \
|
||||
ansible-galaxy collection publish \
|
||||
|
||||
9
.github/workflows/stage.yml
vendored
9
.github/workflows/stage.yml
vendored
@@ -23,6 +23,7 @@ jobs:
|
||||
stage:
|
||||
if: endsWith(github.repository, '/awx')
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 90
|
||||
permissions:
|
||||
packages: write
|
||||
contents: write
|
||||
@@ -44,7 +45,7 @@ jobs:
|
||||
exit 0
|
||||
|
||||
- name: Checkout awx
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
path: awx
|
||||
|
||||
@@ -52,18 +53,18 @@ jobs:
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v2
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
|
||||
- name: Checkout awx-logos
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: ansible/awx-logos
|
||||
path: awx-logos
|
||||
|
||||
- name: Checkout awx-operator
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: ${{ github.repository_owner }}/awx-operator
|
||||
path: awx-operator
|
||||
|
||||
1
.github/workflows/update_dependabot_prs.yml
vendored
1
.github/workflows/update_dependabot_prs.yml
vendored
@@ -9,6 +9,7 @@ jobs:
|
||||
name: Update Dependabot Prs
|
||||
if: contains(github.event.pull_request.labels.*.name, 'dependencies') && contains(github.event.pull_request.labels.*.name, 'component:ui')
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
|
||||
steps:
|
||||
- name: Checkout branch
|
||||
|
||||
5
.github/workflows/upload_schema.yml
vendored
5
.github/workflows/upload_schema.yml
vendored
@@ -13,17 +13,18 @@ on:
|
||||
jobs:
|
||||
push:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Get python version from Makefile
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v2
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
|
||||
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -165,3 +165,7 @@ use_dev_supervisor.txt
|
||||
|
||||
awx/ui_next/src
|
||||
awx/ui_next/build
|
||||
|
||||
# Docs build stuff
|
||||
docs/docsite/build/
|
||||
_readthedocs/
|
||||
|
||||
5
.gitleaks.toml
Normal file
5
.gitleaks.toml
Normal file
@@ -0,0 +1,5 @@
|
||||
[allowlist]
|
||||
description = "Documentation contains example secrets and passwords"
|
||||
paths = [
|
||||
"docs/docsite/rst/administration/oauth2_token_auth.rst",
|
||||
]
|
||||
5
.pip-tools.toml
Normal file
5
.pip-tools.toml
Normal file
@@ -0,0 +1,5 @@
|
||||
[tool.pip-tools]
|
||||
resolver = "backtracking"
|
||||
allow-unsafe = true
|
||||
strip-extras = true
|
||||
quiet = true
|
||||
16
.readthedocs.yaml
Normal file
16
.readthedocs.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
# Read the Docs configuration file
|
||||
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
|
||||
|
||||
version: 2
|
||||
|
||||
build:
|
||||
os: ubuntu-22.04
|
||||
tools:
|
||||
python: >-
|
||||
3.11
|
||||
commands:
|
||||
- pip install --user tox
|
||||
- python3 -m tox -e docs --notest -v
|
||||
- python3 -m tox -e docs --skip-pkg-install -q
|
||||
- mkdir -p _readthedocs/html/
|
||||
- mv docs/docsite/build/html/* _readthedocs/html/
|
||||
@@ -10,6 +10,7 @@ ignore: |
|
||||
tools/docker-compose/_sources
|
||||
# django template files
|
||||
awx/api/templates/instance_install_bundle/**
|
||||
.readthedocs.yaml
|
||||
|
||||
extends: default
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ recursive-exclude awx/settings local_settings.py*
|
||||
include tools/scripts/request_tower_configuration.sh
|
||||
include tools/scripts/request_tower_configuration.ps1
|
||||
include tools/scripts/automation-controller-service
|
||||
include tools/scripts/failure-event-handler
|
||||
include tools/scripts/rsyslog-4xx-recovery
|
||||
include tools/scripts/awx-python
|
||||
include awx/playbooks/library/mkfifo.py
|
||||
include tools/sosreport/*
|
||||
|
||||
31
Makefile
31
Makefile
@@ -6,6 +6,7 @@ DOCKER_COMPOSE ?= docker-compose
|
||||
OFFICIAL ?= no
|
||||
NODE ?= node
|
||||
NPM_BIN ?= npm
|
||||
KIND_BIN ?= $(shell which kind)
|
||||
CHROMIUM_BIN=/tmp/chrome-linux/chrome
|
||||
GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
|
||||
MANAGEMENT_COMMAND ?= awx-manage
|
||||
@@ -42,6 +43,8 @@ PROMETHEUS ?= false
|
||||
GRAFANA ?= false
|
||||
# If set to true docker-compose will also start a hashicorp vault instance
|
||||
VAULT ?= false
|
||||
# If set to true docker-compose will also start a hashicorp vault instance with TLS enabled
|
||||
VAULT_TLS ?= false
|
||||
# If set to true docker-compose will also start a tacacs+ instance
|
||||
TACACS ?= false
|
||||
|
||||
@@ -60,7 +63,7 @@ RECEPTOR_IMAGE ?= quay.io/ansible/receptor:devel
|
||||
SRC_ONLY_PKGS ?= cffi,pycparser,psycopg,twilio
|
||||
# These should be upgraded in the AWX and Ansible venv before attempting
|
||||
# to install the actual requirements
|
||||
VENV_BOOTSTRAP ?= pip==21.2.4 setuptools==65.6.3 setuptools_scm[toml]==7.0.5 wheel==0.38.4
|
||||
VENV_BOOTSTRAP ?= pip==21.2.4 setuptools==65.6.3 setuptools_scm[toml]==8.0.4 wheel==0.38.4
|
||||
|
||||
NAME ?= awx
|
||||
|
||||
@@ -78,7 +81,7 @@ I18N_FLAG_FILE = .i18n_built
|
||||
sdist \
|
||||
ui-release ui-devel \
|
||||
VERSION PYTHON_VERSION docker-compose-sources \
|
||||
.git/hooks/pre-commit github_ci_setup github_ci_runner
|
||||
.git/hooks/pre-commit
|
||||
|
||||
clean-tmp:
|
||||
rm -rf tmp/
|
||||
@@ -323,21 +326,16 @@ test:
|
||||
cd awxkit && $(VENV_BASE)/awx/bin/tox -re py3
|
||||
awx-manage check_migrations --dry-run --check -n 'missing_migration_file'
|
||||
|
||||
## Login to Github container image registry, pull image, then build image.
|
||||
github_ci_setup:
|
||||
# GITHUB_ACTOR is automatic github actions env var
|
||||
# CI_GITHUB_TOKEN is defined in .github files
|
||||
echo $(CI_GITHUB_TOKEN) | docker login ghcr.io -u $(GITHUB_ACTOR) --password-stdin
|
||||
docker pull $(DEVEL_IMAGE_NAME) || : # Pre-pull image to warm build cache
|
||||
$(MAKE) docker-compose-build
|
||||
test_migrations:
|
||||
if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
PYTHONDONTWRITEBYTECODE=1 py.test -p no:cacheprovider --migrations -m migration_test $(PYTEST_ARGS) $(TEST_DIRS)
|
||||
|
||||
## Runs AWX_DOCKER_CMD inside a new docker container.
|
||||
docker-runner:
|
||||
docker run -u $(shell id -u) --rm -v $(shell pwd):/awx_devel/:Z --workdir=/awx_devel $(DEVEL_IMAGE_NAME) $(AWX_DOCKER_CMD)
|
||||
|
||||
## Builds image and runs AWX_DOCKER_CMD in it, mainly for .github checks.
|
||||
github_ci_runner: github_ci_setup docker-runner
|
||||
|
||||
test_collection:
|
||||
rm -f $(shell ls -d $(VENV_BASE)/awx/lib/python* | head -n 1)/no-global-site-packages.txt
|
||||
if [ "$(VENV_BASE)" ]; then \
|
||||
@@ -383,7 +381,7 @@ test_collection_sanity:
|
||||
cd $(COLLECTION_INSTALL) && ansible-test sanity $(COLLECTION_SANITY_ARGS)
|
||||
|
||||
test_collection_integration: install_collection
|
||||
cd $(COLLECTION_INSTALL) && ansible-test integration $(COLLECTION_TEST_TARGET)
|
||||
cd $(COLLECTION_INSTALL) && ansible-test integration -vvv $(COLLECTION_TEST_TARGET)
|
||||
|
||||
test_unit:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
@@ -532,13 +530,15 @@ docker-compose-sources: .git/hooks/pre-commit
|
||||
-e enable_prometheus=$(PROMETHEUS) \
|
||||
-e enable_grafana=$(GRAFANA) \
|
||||
-e enable_vault=$(VAULT) \
|
||||
-e vault_tls=$(VAULT_TLS) \
|
||||
-e enable_tacacs=$(TACACS) \
|
||||
$(EXTRA_SOURCES_ANSIBLE_OPTS)
|
||||
|
||||
docker-compose: awx/projects docker-compose-sources
|
||||
ansible-galaxy install --ignore-certs -r tools/docker-compose/ansible/requirements.yml;
|
||||
ansible-playbook -i tools/docker-compose/inventory tools/docker-compose/ansible/initialize_containers.yml \
|
||||
-e enable_vault=$(VAULT);
|
||||
-e enable_vault=$(VAULT) \
|
||||
-e vault_tls=$(VAULT_TLS);
|
||||
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml $(COMPOSE_OPTS) up $(COMPOSE_UP_OPTS) --remove-orphans
|
||||
|
||||
docker-compose-credential-plugins: awx/projects docker-compose-sources
|
||||
@@ -664,6 +664,9 @@ awx-kube-dev-build: Dockerfile.kube-dev
|
||||
-t $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) .
|
||||
|
||||
|
||||
kind-dev-load: awx-kube-dev-build
|
||||
$(KIND_BIN) load docker-image $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG)
|
||||
|
||||
# Translation TASKS
|
||||
# --------------------------------------
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
[](https://github.com/ansible/awx/actions/workflows/ci.yml) [](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) [](https://github.com/ansible/awx/blob/devel/LICENSE.md) [](https://groups.google.com/g/awx-project)
|
||||
[](https://libera.chat)
|
||||
[](https://chat.ansible.im/#/welcome) [](https://forum.ansible.com)
|
||||
|
||||
<img src="https://raw.githubusercontent.com/ansible/awx-logos/master/awx/ui/client/assets/logo-login.svg?sanitize=true" width=200 alt="AWX" />
|
||||
|
||||
@@ -30,12 +30,12 @@ If you're experiencing a problem that you feel is a bug in AWX or have ideas for
|
||||
Code of Conduct
|
||||
---------------
|
||||
|
||||
We ask all of our community members and contributors to adhere to the [Ansible code of conduct](http://docs.ansible.com/ansible/latest/community/code_of_conduct.html). If you have questions or need assistance, please reach out to our community team at [codeofconduct@ansible.com](mailto:codeofconduct@ansible.com)
|
||||
We ask all of our community members and contributors to adhere to the [Ansible code of conduct](http://docs.ansible.com/ansible/latest/community/code_of_conduct.html). If you have questions or need assistance, please reach out to our community team at [codeofconduct@ansible.com](mailto:codeofconduct@ansible.com)
|
||||
|
||||
Get Involved
|
||||
------------
|
||||
|
||||
We welcome your feedback and ideas. Here's how to reach us with feedback and questions:
|
||||
|
||||
- Join the `#ansible-awx` channel on irc.libera.chat
|
||||
- Join the [mailing list](https://groups.google.com/forum/#!forum/awx-project)
|
||||
- Join the [Ansible AWX channel on Matrix](https://matrix.to/#/#awx:ansible.com)
|
||||
- Join the [Ansible Community Forum](https://forum.ansible.com)
|
||||
|
||||
@@ -52,39 +52,14 @@ try:
|
||||
except ImportError: # pragma: no cover
|
||||
MODE = 'production'
|
||||
|
||||
import hashlib
|
||||
|
||||
try:
|
||||
import django # noqa: F401
|
||||
|
||||
HAS_DJANGO = True
|
||||
except ImportError:
|
||||
HAS_DJANGO = False
|
||||
pass
|
||||
else:
|
||||
from django.db.backends.base import schema
|
||||
from django.db.models import indexes
|
||||
from django.db.backends.utils import names_digest
|
||||
from django.db import connection
|
||||
|
||||
if HAS_DJANGO is True:
|
||||
# See upgrade blocker note in requirements/README.md
|
||||
try:
|
||||
names_digest('foo', 'bar', 'baz', length=8)
|
||||
except ValueError:
|
||||
|
||||
def names_digest(*args, length):
|
||||
"""
|
||||
Generate a 32-bit digest of a set of arguments that can be used to shorten
|
||||
identifying names. Support for use in FIPS environments.
|
||||
"""
|
||||
h = hashlib.md5(usedforsecurity=False)
|
||||
for arg in args:
|
||||
h.update(arg.encode())
|
||||
return h.hexdigest()[:length]
|
||||
|
||||
schema.names_digest = names_digest
|
||||
indexes.names_digest = names_digest
|
||||
|
||||
|
||||
def find_commands(management_dir):
|
||||
# Modified version of function from django/core/management/__init__.py.
|
||||
|
||||
@@ -1,450 +0,0 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
# Python
|
||||
import re
|
||||
import json
|
||||
from functools import reduce
|
||||
|
||||
# Django
|
||||
from django.core.exceptions import FieldError, ValidationError, FieldDoesNotExist
|
||||
from django.db import models
|
||||
from django.db.models import Q, CharField, IntegerField, BooleanField, TextField, JSONField
|
||||
from django.db.models.fields.related import ForeignObjectRel, ManyToManyField, ForeignKey
|
||||
from django.db.models.functions import Cast
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
from django.contrib.contenttypes.fields import GenericForeignKey
|
||||
from django.utils.encoding import force_str
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
# Django REST Framework
|
||||
from rest_framework.exceptions import ParseError, PermissionDenied
|
||||
from rest_framework.filters import BaseFilterBackend
|
||||
|
||||
# AWX
|
||||
from awx.main.utils import get_type_for_model, to_python_boolean
|
||||
from awx.main.utils.db import get_all_field_names
|
||||
|
||||
|
||||
class TypeFilterBackend(BaseFilterBackend):
|
||||
"""
|
||||
Filter on type field now returned with all objects.
|
||||
"""
|
||||
|
||||
def filter_queryset(self, request, queryset, view):
|
||||
try:
|
||||
types = None
|
||||
for key, value in request.query_params.items():
|
||||
if key == 'type':
|
||||
if ',' in value:
|
||||
types = value.split(',')
|
||||
else:
|
||||
types = (value,)
|
||||
if types:
|
||||
types_map = {}
|
||||
for ct in ContentType.objects.filter(Q(app_label='main') | Q(app_label='auth', model='user')):
|
||||
ct_model = ct.model_class()
|
||||
if not ct_model:
|
||||
continue
|
||||
ct_type = get_type_for_model(ct_model)
|
||||
types_map[ct_type] = ct.pk
|
||||
model = queryset.model
|
||||
model_type = get_type_for_model(model)
|
||||
if 'polymorphic_ctype' in get_all_field_names(model):
|
||||
types_pks = set([v for k, v in types_map.items() if k in types])
|
||||
queryset = queryset.filter(polymorphic_ctype_id__in=types_pks)
|
||||
elif model_type in types:
|
||||
queryset = queryset
|
||||
else:
|
||||
queryset = queryset.none()
|
||||
return queryset
|
||||
except FieldError as e:
|
||||
# Return a 400 for invalid field names.
|
||||
raise ParseError(*e.args)
|
||||
|
||||
|
||||
def get_fields_from_path(model, path):
|
||||
"""
|
||||
Given a Django ORM lookup path (possibly over multiple models)
|
||||
Returns the fields in the line, and also the revised lookup path
|
||||
ex., given
|
||||
model=Organization
|
||||
path='project__timeout'
|
||||
returns tuple of fields traversed as well and a corrected path,
|
||||
for special cases we do substitutions
|
||||
([<IntegerField for timeout>], 'project__timeout')
|
||||
"""
|
||||
# Store of all the fields used to detect repeats
|
||||
field_list = []
|
||||
new_parts = []
|
||||
for name in path.split('__'):
|
||||
if model is None:
|
||||
raise ParseError(_('No related model for field {}.').format(name))
|
||||
# HACK: Make project and inventory source filtering by old field names work for backwards compatibility.
|
||||
if model._meta.object_name in ('Project', 'InventorySource'):
|
||||
name = {'current_update': 'current_job', 'last_update': 'last_job', 'last_update_failed': 'last_job_failed', 'last_updated': 'last_job_run'}.get(
|
||||
name, name
|
||||
)
|
||||
|
||||
if name == 'type' and 'polymorphic_ctype' in get_all_field_names(model):
|
||||
name = 'polymorphic_ctype'
|
||||
new_parts.append('polymorphic_ctype__model')
|
||||
else:
|
||||
new_parts.append(name)
|
||||
|
||||
if name in getattr(model, 'PASSWORD_FIELDS', ()):
|
||||
raise PermissionDenied(_('Filtering on password fields is not allowed.'))
|
||||
elif name == 'pk':
|
||||
field = model._meta.pk
|
||||
else:
|
||||
name_alt = name.replace("_", "")
|
||||
if name_alt in model._meta.fields_map.keys():
|
||||
field = model._meta.fields_map[name_alt]
|
||||
new_parts.pop()
|
||||
new_parts.append(name_alt)
|
||||
else:
|
||||
field = model._meta.get_field(name)
|
||||
if isinstance(field, ForeignObjectRel) and getattr(field.field, '__prevent_search__', False):
|
||||
raise PermissionDenied(_('Filtering on %s is not allowed.' % name))
|
||||
elif getattr(field, '__prevent_search__', False):
|
||||
raise PermissionDenied(_('Filtering on %s is not allowed.' % name))
|
||||
if field in field_list:
|
||||
# Field traversed twice, could create infinite JOINs, DoSing Tower
|
||||
raise ParseError(_('Loops not allowed in filters, detected on field {}.').format(field.name))
|
||||
field_list.append(field)
|
||||
model = getattr(field, 'related_model', None)
|
||||
|
||||
return field_list, '__'.join(new_parts)
|
||||
|
||||
|
||||
def get_field_from_path(model, path):
|
||||
"""
|
||||
Given a Django ORM lookup path (possibly over multiple models)
|
||||
Returns the last field in the line, and the revised lookup path
|
||||
ex.
|
||||
(<IntegerField for timeout>, 'project__timeout')
|
||||
"""
|
||||
field_list, new_path = get_fields_from_path(model, path)
|
||||
return (field_list[-1], new_path)
|
||||
|
||||
|
||||
class FieldLookupBackend(BaseFilterBackend):
|
||||
"""
|
||||
Filter using field lookups provided via query string parameters.
|
||||
"""
|
||||
|
||||
RESERVED_NAMES = ('page', 'page_size', 'format', 'order', 'order_by', 'search', 'type', 'host_filter', 'count_disabled', 'no_truncate', 'limit')
|
||||
|
||||
SUPPORTED_LOOKUPS = (
|
||||
'exact',
|
||||
'iexact',
|
||||
'contains',
|
||||
'icontains',
|
||||
'startswith',
|
||||
'istartswith',
|
||||
'endswith',
|
||||
'iendswith',
|
||||
'regex',
|
||||
'iregex',
|
||||
'gt',
|
||||
'gte',
|
||||
'lt',
|
||||
'lte',
|
||||
'in',
|
||||
'isnull',
|
||||
'search',
|
||||
)
|
||||
|
||||
# A list of fields that we know can be filtered on without the possibility
|
||||
# of introducing duplicates
|
||||
NO_DUPLICATES_ALLOW_LIST = (CharField, IntegerField, BooleanField, TextField)
|
||||
|
||||
def get_fields_from_lookup(self, model, lookup):
|
||||
if '__' in lookup and lookup.rsplit('__', 1)[-1] in self.SUPPORTED_LOOKUPS:
|
||||
path, suffix = lookup.rsplit('__', 1)
|
||||
else:
|
||||
path = lookup
|
||||
suffix = 'exact'
|
||||
|
||||
if not path:
|
||||
raise ParseError(_('Query string field name not provided.'))
|
||||
|
||||
# FIXME: Could build up a list of models used across relationships, use
|
||||
# those lookups combined with request.user.get_queryset(Model) to make
|
||||
# sure user cannot query using objects he could not view.
|
||||
field_list, new_path = get_fields_from_path(model, path)
|
||||
|
||||
new_lookup = new_path
|
||||
new_lookup = '__'.join([new_path, suffix])
|
||||
return field_list, new_lookup
|
||||
|
||||
def get_field_from_lookup(self, model, lookup):
|
||||
'''Method to match return type of single field, if needed.'''
|
||||
field_list, new_lookup = self.get_fields_from_lookup(model, lookup)
|
||||
return (field_list[-1], new_lookup)
|
||||
|
||||
def to_python_related(self, value):
|
||||
value = force_str(value)
|
||||
if value.lower() in ('none', 'null'):
|
||||
return None
|
||||
else:
|
||||
return int(value)
|
||||
|
||||
def value_to_python_for_field(self, field, value):
|
||||
if isinstance(field, models.BooleanField):
|
||||
return to_python_boolean(value)
|
||||
elif isinstance(field, (ForeignObjectRel, ManyToManyField, GenericForeignKey, ForeignKey)):
|
||||
try:
|
||||
return self.to_python_related(value)
|
||||
except ValueError:
|
||||
raise ParseError(_('Invalid {field_name} id: {field_id}').format(field_name=getattr(field, 'name', 'related field'), field_id=value))
|
||||
else:
|
||||
return field.to_python(value)
|
||||
|
||||
def value_to_python(self, model, lookup, value):
|
||||
try:
|
||||
lookup.encode("ascii")
|
||||
except UnicodeEncodeError:
|
||||
raise ValueError("%r is not an allowed field name. Must be ascii encodable." % lookup)
|
||||
|
||||
field_list, new_lookup = self.get_fields_from_lookup(model, lookup)
|
||||
field = field_list[-1]
|
||||
|
||||
needs_distinct = not all(isinstance(f, self.NO_DUPLICATES_ALLOW_LIST) for f in field_list)
|
||||
|
||||
# Type names are stored without underscores internally, but are presented and
|
||||
# and serialized over the API containing underscores so we remove `_`
|
||||
# for polymorphic_ctype__model lookups.
|
||||
if new_lookup.startswith('polymorphic_ctype__model'):
|
||||
value = value.replace('_', '')
|
||||
elif new_lookup.endswith('__isnull'):
|
||||
value = to_python_boolean(value)
|
||||
elif new_lookup.endswith('__in'):
|
||||
items = []
|
||||
if not value:
|
||||
raise ValueError('cannot provide empty value for __in')
|
||||
for item in value.split(','):
|
||||
items.append(self.value_to_python_for_field(field, item))
|
||||
value = items
|
||||
elif new_lookup.endswith('__regex') or new_lookup.endswith('__iregex'):
|
||||
try:
|
||||
re.compile(value)
|
||||
except re.error as e:
|
||||
raise ValueError(e.args[0])
|
||||
elif new_lookup.endswith('__iexact'):
|
||||
if not isinstance(field, (CharField, TextField)):
|
||||
raise ValueError(f'{field.name} is not a text field and cannot be filtered by case-insensitive search')
|
||||
elif new_lookup.endswith('__search'):
|
||||
related_model = getattr(field, 'related_model', None)
|
||||
if not related_model:
|
||||
raise ValueError('%s is not searchable' % new_lookup[:-8])
|
||||
new_lookups = []
|
||||
for rm_field in related_model._meta.fields:
|
||||
if rm_field.name in ('username', 'first_name', 'last_name', 'email', 'name', 'description', 'playbook'):
|
||||
new_lookups.append('{}__{}__icontains'.format(new_lookup[:-8], rm_field.name))
|
||||
return value, new_lookups, needs_distinct
|
||||
else:
|
||||
if isinstance(field, JSONField):
|
||||
new_lookup = new_lookup.replace(field.name, f'{field.name}_as_txt')
|
||||
value = self.value_to_python_for_field(field, value)
|
||||
return value, new_lookup, needs_distinct
|
||||
|
||||
def filter_queryset(self, request, queryset, view):
|
||||
try:
|
||||
# Apply filters specified via query_params. Each entry in the lists
|
||||
# below is (negate, field, value).
|
||||
and_filters = []
|
||||
or_filters = []
|
||||
chain_filters = []
|
||||
role_filters = []
|
||||
search_filters = {}
|
||||
needs_distinct = False
|
||||
# Can only have two values: 'AND', 'OR'
|
||||
# If 'AND' is used, an item must satisfy all conditions to show up in the results.
|
||||
# If 'OR' is used, an item just needs to satisfy one condition to appear in results.
|
||||
search_filter_relation = 'OR'
|
||||
for key, values in request.query_params.lists():
|
||||
if key in self.RESERVED_NAMES:
|
||||
continue
|
||||
|
||||
# HACK: make `created` available via API for the Django User ORM model
|
||||
# so it keep compatibility with other objects which exposes the `created` attr.
|
||||
if queryset.model._meta.object_name == 'User' and key.startswith('created'):
|
||||
key = key.replace('created', 'date_joined')
|
||||
|
||||
# HACK: Make job event filtering by host name mostly work even
|
||||
# when not capturing job event hosts M2M.
|
||||
if queryset.model._meta.object_name == 'JobEvent' and key.startswith('hosts__name'):
|
||||
key = key.replace('hosts__name', 'or__host__name')
|
||||
or_filters.append((False, 'host__name__isnull', True))
|
||||
|
||||
# Custom __int filter suffix (internal use only).
|
||||
q_int = False
|
||||
if key.endswith('__int'):
|
||||
key = key[:-5]
|
||||
q_int = True
|
||||
|
||||
# RBAC filtering
|
||||
if key == 'role_level':
|
||||
role_filters.append(values[0])
|
||||
continue
|
||||
|
||||
# Search across related objects.
|
||||
if key.endswith('__search'):
|
||||
if values and ',' in values[0]:
|
||||
search_filter_relation = 'AND'
|
||||
values = reduce(lambda list1, list2: list1 + list2, [i.split(',') for i in values])
|
||||
for value in values:
|
||||
search_value, new_keys, _ = self.value_to_python(queryset.model, key, force_str(value))
|
||||
assert isinstance(new_keys, list)
|
||||
search_filters[search_value] = new_keys
|
||||
# by definition, search *only* joins across relations,
|
||||
# so it _always_ needs a .distinct()
|
||||
needs_distinct = True
|
||||
continue
|
||||
|
||||
# Custom chain__ and or__ filters, mutually exclusive (both can
|
||||
# precede not__).
|
||||
q_chain = False
|
||||
q_or = False
|
||||
if key.startswith('chain__'):
|
||||
key = key[7:]
|
||||
q_chain = True
|
||||
elif key.startswith('or__'):
|
||||
key = key[4:]
|
||||
q_or = True
|
||||
|
||||
# Custom not__ filter prefix.
|
||||
q_not = False
|
||||
if key.startswith('not__'):
|
||||
key = key[5:]
|
||||
q_not = True
|
||||
|
||||
# Convert value(s) to python and add to the appropriate list.
|
||||
for value in values:
|
||||
if q_int:
|
||||
value = int(value)
|
||||
value, new_key, distinct = self.value_to_python(queryset.model, key, value)
|
||||
if distinct:
|
||||
needs_distinct = True
|
||||
if '_as_txt' in new_key:
|
||||
fname = next(item for item in new_key.split('__') if item.endswith('_as_txt'))
|
||||
queryset = queryset.annotate(**{fname: Cast(fname[:-7], output_field=TextField())})
|
||||
if q_chain:
|
||||
chain_filters.append((q_not, new_key, value))
|
||||
elif q_or:
|
||||
or_filters.append((q_not, new_key, value))
|
||||
else:
|
||||
and_filters.append((q_not, new_key, value))
|
||||
|
||||
# Now build Q objects for database query filter.
|
||||
if and_filters or or_filters or chain_filters or role_filters or search_filters:
|
||||
args = []
|
||||
for n, k, v in and_filters:
|
||||
if n:
|
||||
args.append(~Q(**{k: v}))
|
||||
else:
|
||||
args.append(Q(**{k: v}))
|
||||
for role_name in role_filters:
|
||||
if not hasattr(queryset.model, 'accessible_pk_qs'):
|
||||
raise ParseError(_('Cannot apply role_level filter to this list because its model does not use roles for access control.'))
|
||||
args.append(Q(pk__in=queryset.model.accessible_pk_qs(request.user, role_name)))
|
||||
if or_filters:
|
||||
q = Q()
|
||||
for n, k, v in or_filters:
|
||||
if n:
|
||||
q |= ~Q(**{k: v})
|
||||
else:
|
||||
q |= Q(**{k: v})
|
||||
args.append(q)
|
||||
if search_filters and search_filter_relation == 'OR':
|
||||
q = Q()
|
||||
for term, constrains in search_filters.items():
|
||||
for constrain in constrains:
|
||||
q |= Q(**{constrain: term})
|
||||
args.append(q)
|
||||
elif search_filters and search_filter_relation == 'AND':
|
||||
for term, constrains in search_filters.items():
|
||||
q_chain = Q()
|
||||
for constrain in constrains:
|
||||
q_chain |= Q(**{constrain: term})
|
||||
queryset = queryset.filter(q_chain)
|
||||
for n, k, v in chain_filters:
|
||||
if n:
|
||||
q = ~Q(**{k: v})
|
||||
else:
|
||||
q = Q(**{k: v})
|
||||
queryset = queryset.filter(q)
|
||||
queryset = queryset.filter(*args)
|
||||
if needs_distinct:
|
||||
queryset = queryset.distinct()
|
||||
return queryset
|
||||
except (FieldError, FieldDoesNotExist, ValueError, TypeError) as e:
|
||||
raise ParseError(e.args[0])
|
||||
except ValidationError as e:
|
||||
raise ParseError(json.dumps(e.messages, ensure_ascii=False))
|
||||
|
||||
|
||||
class OrderByBackend(BaseFilterBackend):
|
||||
"""
|
||||
Filter to apply ordering based on query string parameters.
|
||||
"""
|
||||
|
||||
def filter_queryset(self, request, queryset, view):
|
||||
try:
|
||||
order_by = None
|
||||
for key, value in request.query_params.items():
|
||||
if key in ('order', 'order_by'):
|
||||
order_by = value
|
||||
if ',' in value:
|
||||
order_by = value.split(',')
|
||||
else:
|
||||
order_by = (value,)
|
||||
default_order_by = self.get_default_ordering(view)
|
||||
# glue the order by and default order by together so that the default is the backup option
|
||||
order_by = list(order_by or []) + list(default_order_by or [])
|
||||
if order_by:
|
||||
order_by = self._validate_ordering_fields(queryset.model, order_by)
|
||||
# Special handling of the type field for ordering. In this
|
||||
# case, we're not sorting exactly on the type field, but
|
||||
# given the limited number of views with multiple types,
|
||||
# sorting on polymorphic_ctype.model is effectively the same.
|
||||
new_order_by = []
|
||||
if 'polymorphic_ctype' in get_all_field_names(queryset.model):
|
||||
for field in order_by:
|
||||
if field == 'type':
|
||||
new_order_by.append('polymorphic_ctype__model')
|
||||
elif field == '-type':
|
||||
new_order_by.append('-polymorphic_ctype__model')
|
||||
else:
|
||||
new_order_by.append(field)
|
||||
else:
|
||||
for field in order_by:
|
||||
if field not in ('type', '-type'):
|
||||
new_order_by.append(field)
|
||||
queryset = queryset.order_by(*new_order_by)
|
||||
return queryset
|
||||
except FieldError as e:
|
||||
# Return a 400 for invalid field names.
|
||||
raise ParseError(*e.args)
|
||||
|
||||
def get_default_ordering(self, view):
|
||||
ordering = getattr(view, 'ordering', None)
|
||||
if isinstance(ordering, str):
|
||||
return (ordering,)
|
||||
return ordering
|
||||
|
||||
def _validate_ordering_fields(self, model, order_by):
|
||||
for field_name in order_by:
|
||||
# strip off the negation prefix `-` if it exists
|
||||
prefix = ''
|
||||
path = field_name
|
||||
if field_name[0] == '-':
|
||||
prefix = field_name[0]
|
||||
path = field_name[1:]
|
||||
try:
|
||||
field, new_path = get_field_from_path(model, path)
|
||||
new_path = '{}{}'.format(prefix, new_path)
|
||||
except (FieldError, FieldDoesNotExist) as e:
|
||||
raise ParseError(e.args[0])
|
||||
yield new_path
|
||||
@@ -30,12 +30,13 @@ from rest_framework.permissions import IsAuthenticated
|
||||
from rest_framework.renderers import StaticHTMLRenderer
|
||||
from rest_framework.negotiation import DefaultContentNegotiation
|
||||
|
||||
from ansible_base.filters.rest_framework.field_lookup_backend import FieldLookupBackend
|
||||
from ansible_base.utils.models import get_all_field_names
|
||||
|
||||
# AWX
|
||||
from awx.api.filters import FieldLookupBackend
|
||||
from awx.main.models import UnifiedJob, UnifiedJobTemplate, User, Role, Credential, WorkflowJobTemplateNode, WorkflowApprovalTemplate
|
||||
from awx.main.access import optimize_queryset
|
||||
from awx.main.utils import camelcase_to_underscore, get_search_fields, getattrd, get_object_or_400, decrypt_field, get_awx_version
|
||||
from awx.main.utils.db import get_all_field_names
|
||||
from awx.main.utils.licensing import server_product_name
|
||||
from awx.main.views import ApiErrorView
|
||||
from awx.api.serializers import ResourceAccessListElementSerializer, CopySerializer
|
||||
|
||||
@@ -43,6 +43,8 @@ from rest_framework.utils.serializer_helpers import ReturnList
|
||||
# Django-Polymorphic
|
||||
from polymorphic.models import PolymorphicModel
|
||||
|
||||
from ansible_base.utils.models import get_type_for_model
|
||||
|
||||
# AWX
|
||||
from awx.main.access import get_user_capabilities
|
||||
from awx.main.constants import ACTIVE_STATES, CENSOR_VALUE
|
||||
@@ -99,10 +101,9 @@ from awx.main.models import (
|
||||
CLOUD_INVENTORY_SOURCES,
|
||||
)
|
||||
from awx.main.models.base import VERBOSITY_CHOICES, NEW_JOB_TYPE_CHOICES
|
||||
from awx.main.models.rbac import get_roles_on_resource, role_summary_fields_generator
|
||||
from awx.main.models.rbac import role_summary_fields_generator, RoleAncestorEntry
|
||||
from awx.main.fields import ImplicitRoleField
|
||||
from awx.main.utils import (
|
||||
get_type_for_model,
|
||||
get_model_for_type,
|
||||
camelcase_to_underscore,
|
||||
getattrd,
|
||||
@@ -2201,6 +2202,99 @@ class BulkHostCreateSerializer(serializers.Serializer):
|
||||
return return_data
|
||||
|
||||
|
||||
class BulkHostDeleteSerializer(serializers.Serializer):
|
||||
hosts = serializers.ListField(
|
||||
allow_empty=False,
|
||||
max_length=100000,
|
||||
write_only=True,
|
||||
help_text=_('List of hosts ids to be deleted, e.g. [105, 130, 131, 200]'),
|
||||
)
|
||||
|
||||
class Meta:
|
||||
model = Host
|
||||
fields = ('hosts',)
|
||||
|
||||
def validate(self, attrs):
|
||||
request = self.context.get('request', None)
|
||||
max_hosts = settings.BULK_HOST_MAX_DELETE
|
||||
# Validating the number of hosts to be deleted
|
||||
if len(attrs['hosts']) > max_hosts:
|
||||
raise serializers.ValidationError(
|
||||
{
|
||||
"ERROR": 'Number of hosts exceeds system setting BULK_HOST_MAX_DELETE',
|
||||
"BULK_HOST_MAX_DELETE": max_hosts,
|
||||
"Hosts_count": len(attrs['hosts']),
|
||||
}
|
||||
)
|
||||
|
||||
# Getting list of all host objects, filtered by the list of the hosts to delete
|
||||
attrs['host_qs'] = Host.objects.get_queryset().filter(pk__in=attrs['hosts']).only('id', 'inventory_id', 'name')
|
||||
|
||||
# Converting the queryset data in a dict. to reduce the number of queries when
|
||||
# manipulating the data
|
||||
attrs['hosts_data'] = attrs['host_qs'].values()
|
||||
|
||||
if len(attrs['host_qs']) == 0:
|
||||
error_hosts = {host: "Hosts do not exist or you lack permission to delete it" for host in attrs['hosts']}
|
||||
raise serializers.ValidationError({'hosts': error_hosts})
|
||||
|
||||
if len(attrs['host_qs']) < len(attrs['hosts']):
|
||||
hosts_exists = [host['id'] for host in attrs['hosts_data']]
|
||||
failed_hosts = list(set(attrs['hosts']).difference(hosts_exists))
|
||||
error_hosts = {host: "Hosts do not exist or you lack permission to delete it" for host in failed_hosts}
|
||||
raise serializers.ValidationError({'hosts': error_hosts})
|
||||
|
||||
# Getting all inventories that the hosts can be in
|
||||
inv_list = list(set([host['inventory_id'] for host in attrs['hosts_data']]))
|
||||
|
||||
# Checking that the user have permission to all inventories
|
||||
errors = dict()
|
||||
for inv in Inventory.objects.get_queryset().filter(pk__in=inv_list):
|
||||
if request and not request.user.is_superuser:
|
||||
if request.user not in inv.admin_role:
|
||||
errors[inv.name] = "Lack permissions to delete hosts from this inventory."
|
||||
if errors != {}:
|
||||
raise PermissionDenied({"inventories": errors})
|
||||
|
||||
# check the inventory type only if the user have permission to it.
|
||||
errors = dict()
|
||||
for inv in Inventory.objects.get_queryset().filter(pk__in=inv_list):
|
||||
if inv.kind != '':
|
||||
errors[inv.name] = "Hosts can only be deleted from manual inventories."
|
||||
if errors != {}:
|
||||
raise serializers.ValidationError({"inventories": errors})
|
||||
attrs['inventories'] = inv_list
|
||||
return attrs
|
||||
|
||||
def delete(self, validated_data):
|
||||
result = {"hosts": dict()}
|
||||
changes = {'deleted_hosts': dict()}
|
||||
for inventory in validated_data['inventories']:
|
||||
changes['deleted_hosts'][inventory] = list()
|
||||
|
||||
for host in validated_data['hosts_data']:
|
||||
result["hosts"][host["id"]] = f"The host {host['name']} was deleted"
|
||||
changes['deleted_hosts'][host["inventory_id"]].append({"host_id": host["id"], "host_name": host["name"]})
|
||||
|
||||
try:
|
||||
validated_data['host_qs'].delete()
|
||||
except Exception as e:
|
||||
raise serializers.ValidationError({"detail": _(f"cannot delete hosts, host deletion error {e}")})
|
||||
|
||||
request = self.context.get('request', None)
|
||||
|
||||
for inventory in validated_data['inventories']:
|
||||
activity_entry = ActivityStream.objects.create(
|
||||
operation='update',
|
||||
object1='inventory',
|
||||
changes=json.dumps(changes['deleted_hosts'][inventory]),
|
||||
actor=request.user,
|
||||
)
|
||||
activity_entry.inventory.add(inventory)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
class GroupTreeSerializer(GroupSerializer):
|
||||
children = serializers.SerializerMethodField()
|
||||
|
||||
@@ -2664,6 +2758,17 @@ class ResourceAccessListElementSerializer(UserSerializer):
|
||||
if 'summary_fields' not in ret:
|
||||
ret['summary_fields'] = {}
|
||||
|
||||
team_content_type = ContentType.objects.get_for_model(Team)
|
||||
content_type = ContentType.objects.get_for_model(obj)
|
||||
|
||||
def get_roles_on_resource(parent_role):
|
||||
"Returns a string list of the roles a parent_role has for current obj."
|
||||
return list(
|
||||
RoleAncestorEntry.objects.filter(ancestor=parent_role, content_type_id=content_type.id, object_id=obj.id)
|
||||
.values_list('role_field', flat=True)
|
||||
.distinct()
|
||||
)
|
||||
|
||||
def format_role_perm(role):
|
||||
role_dict = {'id': role.id, 'name': role.name, 'description': role.description}
|
||||
try:
|
||||
@@ -2679,7 +2784,7 @@ class ResourceAccessListElementSerializer(UserSerializer):
|
||||
else:
|
||||
# Singleton roles should not be managed from this view, as per copy/edit rework spec
|
||||
role_dict['user_capabilities'] = {'unattach': False}
|
||||
return {'role': role_dict, 'descendant_roles': get_roles_on_resource(obj, role)}
|
||||
return {'role': role_dict, 'descendant_roles': get_roles_on_resource(role)}
|
||||
|
||||
def format_team_role_perm(naive_team_role, permissive_role_ids):
|
||||
ret = []
|
||||
@@ -2705,12 +2810,9 @@ class ResourceAccessListElementSerializer(UserSerializer):
|
||||
else:
|
||||
# Singleton roles should not be managed from this view, as per copy/edit rework spec
|
||||
role_dict['user_capabilities'] = {'unattach': False}
|
||||
ret.append({'role': role_dict, 'descendant_roles': get_roles_on_resource(obj, team_role)})
|
||||
ret.append({'role': role_dict, 'descendant_roles': get_roles_on_resource(team_role)})
|
||||
return ret
|
||||
|
||||
team_content_type = ContentType.objects.get_for_model(Team)
|
||||
content_type = ContentType.objects.get_for_model(obj)
|
||||
|
||||
direct_permissive_role_ids = Role.objects.filter(content_type=content_type, object_id=obj.id).values_list('id', flat=True)
|
||||
all_permissive_role_ids = Role.objects.filter(content_type=content_type, object_id=obj.id).values_list('ancestors__id', flat=True)
|
||||
|
||||
@@ -3233,7 +3335,7 @@ class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobO
|
||||
if get_field_from_model_or_attrs('host_config_key') and not inventory:
|
||||
raise serializers.ValidationError({'host_config_key': _("Cannot enable provisioning callback without an inventory set.")})
|
||||
|
||||
prompting_error_message = _("Must either set a default value or ask to prompt on launch.")
|
||||
prompting_error_message = _("You must either set a default value or ask to prompt on launch.")
|
||||
if project is None:
|
||||
raise serializers.ValidationError({'project': _("Job Templates must have a project assigned.")})
|
||||
elif inventory is None and not get_field_from_model_or_attrs('ask_inventory_on_launch'):
|
||||
@@ -5356,10 +5458,16 @@ class ScheduleSerializer(LaunchConfigurationBaseSerializer, SchedulePreviewSeria
|
||||
class InstanceLinkSerializer(BaseSerializer):
|
||||
class Meta:
|
||||
model = InstanceLink
|
||||
fields = ('source', 'target', 'link_state')
|
||||
fields = ('id', 'url', 'related', 'source', 'target', 'link_state')
|
||||
|
||||
source = serializers.SlugRelatedField(slug_field="hostname", read_only=True)
|
||||
target = serializers.SlugRelatedField(slug_field="hostname", read_only=True)
|
||||
source = serializers.SlugRelatedField(slug_field="hostname", queryset=Instance.objects.all())
|
||||
target = serializers.SlugRelatedField(slug_field="hostname", queryset=Instance.objects.all())
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(InstanceLinkSerializer, self).get_related(obj)
|
||||
res['source_instance'] = self.reverse('api:instance_detail', kwargs={'pk': obj.source.id})
|
||||
res['target_instance'] = self.reverse('api:instance_detail', kwargs={'pk': obj.target.id})
|
||||
return res
|
||||
|
||||
|
||||
class InstanceNodeSerializer(BaseSerializer):
|
||||
@@ -5376,6 +5484,7 @@ class InstanceSerializer(BaseSerializer):
|
||||
jobs_running = serializers.IntegerField(help_text=_('Count of jobs in the running or waiting state that are targeted for this instance'), read_only=True)
|
||||
jobs_total = serializers.IntegerField(help_text=_('Count of all jobs that target this instance'), read_only=True)
|
||||
health_check_pending = serializers.SerializerMethodField()
|
||||
peers = serializers.SlugRelatedField(many=True, required=False, slug_field="hostname", queryset=Instance.objects.all())
|
||||
|
||||
class Meta:
|
||||
model = Instance
|
||||
@@ -5412,6 +5521,8 @@ class InstanceSerializer(BaseSerializer):
|
||||
'node_state',
|
||||
'ip_address',
|
||||
'listener_port',
|
||||
'peers',
|
||||
'peers_from_control_nodes',
|
||||
)
|
||||
extra_kwargs = {
|
||||
'node_type': {'initial': Instance.Types.EXECUTION, 'default': Instance.Types.EXECUTION},
|
||||
@@ -5464,22 +5575,57 @@ class InstanceSerializer(BaseSerializer):
|
||||
def get_health_check_pending(self, obj):
|
||||
return obj.health_check_pending
|
||||
|
||||
def validate(self, data):
|
||||
if self.instance:
|
||||
if self.instance.node_type == Instance.Types.HOP:
|
||||
raise serializers.ValidationError("Hop node instances may not be changed.")
|
||||
else:
|
||||
if not settings.IS_K8S:
|
||||
raise serializers.ValidationError("Can only create instances on Kubernetes or OpenShift.")
|
||||
return data
|
||||
def validate(self, attrs):
|
||||
def get_field_from_model_or_attrs(fd):
|
||||
return attrs.get(fd, self.instance and getattr(self.instance, fd) or None)
|
||||
|
||||
def check_peers_changed():
|
||||
'''
|
||||
return True if
|
||||
- 'peers' in attrs
|
||||
- instance peers matches peers in attrs
|
||||
'''
|
||||
return self.instance and 'peers' in attrs and set(self.instance.peers.all()) != set(attrs['peers'])
|
||||
|
||||
if not self.instance and not settings.IS_K8S:
|
||||
raise serializers.ValidationError(_("Can only create instances on Kubernetes or OpenShift."))
|
||||
|
||||
node_type = get_field_from_model_or_attrs("node_type")
|
||||
peers_from_control_nodes = get_field_from_model_or_attrs("peers_from_control_nodes")
|
||||
listener_port = get_field_from_model_or_attrs("listener_port")
|
||||
peers = attrs.get('peers', [])
|
||||
|
||||
if peers_from_control_nodes and node_type not in (Instance.Types.EXECUTION, Instance.Types.HOP):
|
||||
raise serializers.ValidationError(_("peers_from_control_nodes can only be enabled for execution or hop nodes."))
|
||||
|
||||
if node_type in [Instance.Types.CONTROL, Instance.Types.HYBRID]:
|
||||
if check_peers_changed():
|
||||
raise serializers.ValidationError(
|
||||
_("Setting peers manually for control nodes is not allowed. Enable peers_from_control_nodes on the hop and execution nodes instead.")
|
||||
)
|
||||
|
||||
if not listener_port and peers_from_control_nodes:
|
||||
raise serializers.ValidationError(_("Field listener_port must be a valid integer when peers_from_control_nodes is enabled."))
|
||||
|
||||
if not listener_port and self.instance and self.instance.peers_from.exists():
|
||||
raise serializers.ValidationError(_("Field listener_port must be a valid integer when other nodes peer to it."))
|
||||
|
||||
for peer in peers:
|
||||
if peer.listener_port is None:
|
||||
raise serializers.ValidationError(_("Field listener_port must be set on peer ") + peer.hostname + ".")
|
||||
|
||||
if not settings.IS_K8S:
|
||||
if check_peers_changed():
|
||||
raise serializers.ValidationError(_("Cannot change peers."))
|
||||
|
||||
return super().validate(attrs)
|
||||
|
||||
def validate_node_type(self, value):
|
||||
if not self.instance:
|
||||
if value not in (Instance.Types.EXECUTION,):
|
||||
raise serializers.ValidationError("Can only create execution nodes.")
|
||||
else:
|
||||
if self.instance.node_type != value:
|
||||
raise serializers.ValidationError("Cannot change node type.")
|
||||
if not self.instance and value not in [Instance.Types.HOP, Instance.Types.EXECUTION]:
|
||||
raise serializers.ValidationError(_("Can only create execution or hop nodes."))
|
||||
|
||||
if self.instance and self.instance.node_type != value:
|
||||
raise serializers.ValidationError(_("Cannot change node type."))
|
||||
|
||||
return value
|
||||
|
||||
@@ -5487,30 +5633,41 @@ class InstanceSerializer(BaseSerializer):
|
||||
if self.instance:
|
||||
if value != self.instance.node_state:
|
||||
if not settings.IS_K8S:
|
||||
raise serializers.ValidationError("Can only change the state on Kubernetes or OpenShift.")
|
||||
raise serializers.ValidationError(_("Can only change the state on Kubernetes or OpenShift."))
|
||||
if value != Instance.States.DEPROVISIONING:
|
||||
raise serializers.ValidationError("Can only change instances to the 'deprovisioning' state.")
|
||||
if self.instance.node_type not in (Instance.Types.EXECUTION,):
|
||||
raise serializers.ValidationError("Can only deprovision execution nodes.")
|
||||
raise serializers.ValidationError(_("Can only change instances to the 'deprovisioning' state."))
|
||||
if self.instance.node_type not in (Instance.Types.EXECUTION, Instance.Types.HOP):
|
||||
raise serializers.ValidationError(_("Can only deprovision execution or hop nodes."))
|
||||
else:
|
||||
if value and value != Instance.States.INSTALLED:
|
||||
raise serializers.ValidationError("Can only create instances in the 'installed' state.")
|
||||
raise serializers.ValidationError(_("Can only create instances in the 'installed' state."))
|
||||
|
||||
return value
|
||||
|
||||
def validate_hostname(self, value):
|
||||
"""
|
||||
- Hostname cannot be "localhost" - but can be something like localhost.domain
|
||||
- Cannot change the hostname of an-already instantiated & initialized Instance object
|
||||
Cannot change the hostname
|
||||
"""
|
||||
if self.instance and self.instance.hostname != value:
|
||||
raise serializers.ValidationError("Cannot change hostname.")
|
||||
raise serializers.ValidationError(_("Cannot change hostname."))
|
||||
|
||||
return value
|
||||
|
||||
def validate_listener_port(self, value):
|
||||
if self.instance and self.instance.listener_port != value:
|
||||
raise serializers.ValidationError("Cannot change listener port.")
|
||||
"""
|
||||
Cannot change listener port, unless going from none to integer, and vice versa
|
||||
"""
|
||||
if value and self.instance and self.instance.listener_port and self.instance.listener_port != value:
|
||||
raise serializers.ValidationError(_("Cannot change listener port."))
|
||||
|
||||
return value
|
||||
|
||||
def validate_peers_from_control_nodes(self, value):
|
||||
"""
|
||||
Can only enable for K8S based deployments
|
||||
"""
|
||||
if value and not settings.IS_K8S:
|
||||
raise serializers.ValidationError(_("Can only be enabled on Kubernetes or Openshift."))
|
||||
|
||||
return value
|
||||
|
||||
@@ -5518,7 +5675,19 @@ class InstanceSerializer(BaseSerializer):
|
||||
class InstanceHealthCheckSerializer(BaseSerializer):
|
||||
class Meta:
|
||||
model = Instance
|
||||
read_only_fields = ('uuid', 'hostname', 'version', 'last_health_check', 'errors', 'cpu', 'memory', 'cpu_capacity', 'mem_capacity', 'capacity')
|
||||
read_only_fields = (
|
||||
'uuid',
|
||||
'hostname',
|
||||
'ip_address',
|
||||
'version',
|
||||
'last_health_check',
|
||||
'errors',
|
||||
'cpu',
|
||||
'memory',
|
||||
'cpu_capacity',
|
||||
'mem_capacity',
|
||||
'capacity',
|
||||
)
|
||||
fields = read_only_fields
|
||||
|
||||
|
||||
|
||||
22
awx/api/templates/api/bulk_host_delete_view.md
Normal file
22
awx/api/templates/api/bulk_host_delete_view.md
Normal file
@@ -0,0 +1,22 @@
|
||||
# Bulk Host Delete
|
||||
|
||||
This endpoint allows the client to delete multiple hosts from inventories.
|
||||
They may do this by providing a list of hosts ID's to be deleted.
|
||||
|
||||
Example:
|
||||
|
||||
{
|
||||
"hosts": [1, 2, 3, 4, 5]
|
||||
}
|
||||
|
||||
Return data:
|
||||
|
||||
{
|
||||
"hosts": {
|
||||
"1": "The host a1 was deleted",
|
||||
"2": "The host a2 was deleted",
|
||||
"3": "The host a3 was deleted",
|
||||
"4": "The host a4 was deleted",
|
||||
"5": "The host a5 was deleted",
|
||||
}
|
||||
}
|
||||
@@ -3,21 +3,35 @@ receptor_group: awx
|
||||
receptor_verify: true
|
||||
receptor_tls: true
|
||||
receptor_mintls13: false
|
||||
{% if instance.node_type == "execution" %}
|
||||
receptor_work_commands:
|
||||
ansible-runner:
|
||||
command: ansible-runner
|
||||
params: worker
|
||||
allowruntimeparams: true
|
||||
verifysignature: true
|
||||
additional_python_packages:
|
||||
- ansible-runner
|
||||
{% endif %}
|
||||
custom_worksign_public_keyfile: receptor/work_public_key.pem
|
||||
custom_tls_certfile: receptor/tls/receptor.crt
|
||||
custom_tls_keyfile: receptor/tls/receptor.key
|
||||
custom_ca_certfile: receptor/tls/ca/mesh-CA.crt
|
||||
receptor_protocol: 'tcp'
|
||||
{% if instance.listener_port %}
|
||||
receptor_listener: true
|
||||
receptor_port: {{ instance.listener_port }}
|
||||
receptor_dependencies:
|
||||
- python39-pip
|
||||
{% else %}
|
||||
receptor_listener: false
|
||||
{% endif %}
|
||||
{% if peers %}
|
||||
receptor_peers:
|
||||
{% for peer in peers %}
|
||||
- host: {{ peer.host }}
|
||||
port: {{ peer.port }}
|
||||
protocol: tcp
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% verbatim %}
|
||||
podman_user: "{{ receptor_user }}"
|
||||
podman_group: "{{ receptor_group }}"
|
||||
|
||||
@@ -1,20 +1,16 @@
|
||||
{% verbatim %}
|
||||
---
|
||||
- hosts: all
|
||||
become: yes
|
||||
tasks:
|
||||
- name: Create the receptor user
|
||||
user:
|
||||
{% verbatim %}
|
||||
name: "{{ receptor_user }}"
|
||||
{% endverbatim %}
|
||||
shell: /bin/bash
|
||||
- name: Enable Copr repo for Receptor
|
||||
command: dnf copr enable ansible-awx/receptor -y
|
||||
{% if instance.node_type == "execution" %}
|
||||
- import_role:
|
||||
name: ansible.receptor.podman
|
||||
{% endif %}
|
||||
- import_role:
|
||||
name: ansible.receptor.setup
|
||||
- name: Install ansible-runner
|
||||
pip:
|
||||
name: ansible-runner
|
||||
executable: pip3.9
|
||||
{% endverbatim %}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
---
|
||||
collections:
|
||||
- name: ansible.receptor
|
||||
version: 1.1.0
|
||||
version: 2.0.2
|
||||
|
||||
@@ -36,6 +36,7 @@ from awx.api.views import (
|
||||
from awx.api.views.bulk import (
|
||||
BulkView,
|
||||
BulkHostCreateView,
|
||||
BulkHostDeleteView,
|
||||
BulkJobLaunchView,
|
||||
)
|
||||
|
||||
@@ -152,6 +153,7 @@ v2_urls = [
|
||||
re_path(r'^workflow_approvals/', include(workflow_approval_urls)),
|
||||
re_path(r'^bulk/$', BulkView.as_view(), name='bulk'),
|
||||
re_path(r'^bulk/host_create/$', BulkHostCreateView.as_view(), name='bulk_host_create'),
|
||||
re_path(r'^bulk/host_delete/$', BulkHostDeleteView.as_view(), name='bulk_host_delete'),
|
||||
re_path(r'^bulk/job_launch/$', BulkJobLaunchView.as_view(), name='bulk_job_launch'),
|
||||
]
|
||||
|
||||
|
||||
@@ -128,6 +128,10 @@ logger = logging.getLogger('awx.api.views')
|
||||
|
||||
|
||||
def unpartitioned_event_horizon(cls):
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute(f"SELECT 1 FROM INFORMATION_SCHEMA.TABLES WHERE table_name = '_unpartitioned_{cls._meta.db_table}';")
|
||||
if not cursor.fetchone():
|
||||
return 0
|
||||
with connection.cursor() as cursor:
|
||||
try:
|
||||
cursor.execute(f'SELECT MAX(id) FROM _unpartitioned_{cls._meta.db_table}')
|
||||
@@ -341,17 +345,18 @@ class InstanceDetail(RetrieveUpdateAPIView):
|
||||
|
||||
def update_raw_data(self, data):
|
||||
# these fields are only valid on creation of an instance, so they unwanted on detail view
|
||||
data.pop('listener_port', None)
|
||||
data.pop('node_type', None)
|
||||
data.pop('hostname', None)
|
||||
data.pop('ip_address', None)
|
||||
return super(InstanceDetail, self).update_raw_data(data)
|
||||
|
||||
def update(self, request, *args, **kwargs):
|
||||
r = super(InstanceDetail, self).update(request, *args, **kwargs)
|
||||
if status.is_success(r.status_code):
|
||||
obj = self.get_object()
|
||||
obj.set_capacity_value()
|
||||
obj.save(update_fields=['capacity'])
|
||||
capacity_changed = obj.set_capacity_value()
|
||||
if capacity_changed:
|
||||
obj.save(update_fields=['capacity'])
|
||||
r.data = serializers.InstanceSerializer(obj, context=self.get_serializer_context()).to_representation(obj)
|
||||
return r
|
||||
|
||||
@@ -737,8 +742,8 @@ class TeamActivityStreamList(SubListAPIView):
|
||||
qs = self.request.user.get_queryset(self.model)
|
||||
return qs.filter(
|
||||
Q(team=parent)
|
||||
| Q(project__in=models.Project.accessible_objects(parent, 'read_role'))
|
||||
| Q(credential__in=models.Credential.accessible_objects(parent, 'read_role'))
|
||||
| Q(project__in=models.Project.accessible_objects(parent.member_role, 'read_role'))
|
||||
| Q(credential__in=models.Credential.accessible_objects(parent.member_role, 'read_role'))
|
||||
)
|
||||
|
||||
|
||||
@@ -1392,7 +1397,7 @@ class OrganizationCredentialList(SubListCreateAPIView):
|
||||
self.check_parent_access(organization)
|
||||
|
||||
user_visible = models.Credential.accessible_objects(self.request.user, 'read_role').all()
|
||||
org_set = models.Credential.accessible_objects(organization.admin_role, 'read_role').all()
|
||||
org_set = models.Credential.objects.filter(organization=organization)
|
||||
|
||||
if self.request.user.is_superuser or self.request.user.is_system_auditor:
|
||||
return org_set
|
||||
|
||||
@@ -34,6 +34,7 @@ class BulkView(APIView):
|
||||
'''List top level resources'''
|
||||
data = OrderedDict()
|
||||
data['host_create'] = reverse('api:bulk_host_create', request=request)
|
||||
data['host_delete'] = reverse('api:bulk_host_delete', request=request)
|
||||
data['job_launch'] = reverse('api:bulk_job_launch', request=request)
|
||||
return Response(data)
|
||||
|
||||
@@ -72,3 +73,20 @@ class BulkHostCreateView(GenericAPIView):
|
||||
result = serializer.create(serializer.validated_data)
|
||||
return Response(result, status=status.HTTP_201_CREATED)
|
||||
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
|
||||
class BulkHostDeleteView(GenericAPIView):
|
||||
permission_classes = [IsAuthenticated]
|
||||
model = Host
|
||||
serializer_class = serializers.BulkHostDeleteSerializer
|
||||
allowed_methods = ['GET', 'POST', 'OPTIONS']
|
||||
|
||||
def get(self, request):
|
||||
return Response({"detail": "Bulk delete hosts with this endpoint"}, status=status.HTTP_200_OK)
|
||||
|
||||
def post(self, request):
|
||||
serializer = serializers.BulkHostDeleteSerializer(data=request.data, context={'request': request})
|
||||
if serializer.is_valid():
|
||||
result = serializer.delete(serializer.validated_data)
|
||||
return Response(result, status=status.HTTP_201_CREATED)
|
||||
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
@@ -6,6 +6,8 @@ import io
|
||||
import ipaddress
|
||||
import os
|
||||
import tarfile
|
||||
import time
|
||||
import re
|
||||
|
||||
import asn1
|
||||
from awx.api import serializers
|
||||
@@ -40,6 +42,8 @@ RECEPTOR_OID = "1.3.6.1.4.1.2312.19.1"
|
||||
# │ │ └── receptor.key
|
||||
# │ └── work-public-key.pem
|
||||
# └── requirements.yml
|
||||
|
||||
|
||||
class InstanceInstallBundle(GenericAPIView):
|
||||
name = _('Install Bundle')
|
||||
model = models.Instance
|
||||
@@ -49,9 +53,9 @@ class InstanceInstallBundle(GenericAPIView):
|
||||
def get(self, request, *args, **kwargs):
|
||||
instance_obj = self.get_object()
|
||||
|
||||
if instance_obj.node_type not in ('execution',):
|
||||
if instance_obj.node_type not in ('execution', 'hop'):
|
||||
return Response(
|
||||
data=dict(msg=_('Install bundle can only be generated for execution nodes.')),
|
||||
data=dict(msg=_('Install bundle can only be generated for execution or hop nodes.')),
|
||||
status=status.HTTP_400_BAD_REQUEST,
|
||||
)
|
||||
|
||||
@@ -66,37 +70,37 @@ class InstanceInstallBundle(GenericAPIView):
|
||||
# generate and write the receptor key to receptor/tls/receptor.key in the tar file
|
||||
key, cert = generate_receptor_tls(instance_obj)
|
||||
|
||||
def tar_addfile(tarinfo, filecontent):
|
||||
tarinfo.mtime = time.time()
|
||||
tarinfo.size = len(filecontent)
|
||||
tar.addfile(tarinfo, io.BytesIO(filecontent))
|
||||
|
||||
key_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/receptor/tls/receptor.key")
|
||||
key_tarinfo.size = len(key)
|
||||
tar.addfile(key_tarinfo, io.BytesIO(key))
|
||||
tar_addfile(key_tarinfo, key)
|
||||
|
||||
cert_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/receptor/tls/receptor.crt")
|
||||
cert_tarinfo.size = len(cert)
|
||||
tar.addfile(cert_tarinfo, io.BytesIO(cert))
|
||||
tar_addfile(cert_tarinfo, cert)
|
||||
|
||||
# generate and write install_receptor.yml to the tar file
|
||||
playbook = generate_playbook().encode('utf-8')
|
||||
playbook = generate_playbook(instance_obj).encode('utf-8')
|
||||
playbook_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/install_receptor.yml")
|
||||
playbook_tarinfo.size = len(playbook)
|
||||
tar.addfile(playbook_tarinfo, io.BytesIO(playbook))
|
||||
tar_addfile(playbook_tarinfo, playbook)
|
||||
|
||||
# generate and write inventory.yml to the tar file
|
||||
inventory_yml = generate_inventory_yml(instance_obj).encode('utf-8')
|
||||
inventory_yml_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/inventory.yml")
|
||||
inventory_yml_tarinfo.size = len(inventory_yml)
|
||||
tar.addfile(inventory_yml_tarinfo, io.BytesIO(inventory_yml))
|
||||
tar_addfile(inventory_yml_tarinfo, inventory_yml)
|
||||
|
||||
# generate and write group_vars/all.yml to the tar file
|
||||
group_vars = generate_group_vars_all_yml(instance_obj).encode('utf-8')
|
||||
group_vars_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/group_vars/all.yml")
|
||||
group_vars_tarinfo.size = len(group_vars)
|
||||
tar.addfile(group_vars_tarinfo, io.BytesIO(group_vars))
|
||||
tar_addfile(group_vars_tarinfo, group_vars)
|
||||
|
||||
# generate and write requirements.yml to the tar file
|
||||
requirements_yml = generate_requirements_yml().encode('utf-8')
|
||||
requirements_yml_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/requirements.yml")
|
||||
requirements_yml_tarinfo.size = len(requirements_yml)
|
||||
tar.addfile(requirements_yml_tarinfo, io.BytesIO(requirements_yml))
|
||||
tar_addfile(requirements_yml_tarinfo, requirements_yml)
|
||||
|
||||
# respond with the tarfile
|
||||
f.seek(0)
|
||||
@@ -105,8 +109,10 @@ class InstanceInstallBundle(GenericAPIView):
|
||||
return response
|
||||
|
||||
|
||||
def generate_playbook():
|
||||
return render_to_string("instance_install_bundle/install_receptor.yml")
|
||||
def generate_playbook(instance_obj):
|
||||
playbook_yaml = render_to_string("instance_install_bundle/install_receptor.yml", context=dict(instance=instance_obj))
|
||||
# convert consecutive newlines with a single newline
|
||||
return re.sub(r'\n+', '\n', playbook_yaml)
|
||||
|
||||
|
||||
def generate_requirements_yml():
|
||||
@@ -118,7 +124,12 @@ def generate_inventory_yml(instance_obj):
|
||||
|
||||
|
||||
def generate_group_vars_all_yml(instance_obj):
|
||||
return render_to_string("instance_install_bundle/group_vars/all.yml", context=dict(instance=instance_obj))
|
||||
peers = []
|
||||
for instance in instance_obj.peers.all():
|
||||
peers.append(dict(host=instance.hostname, port=instance.listener_port))
|
||||
all_yaml = render_to_string("instance_install_bundle/group_vars/all.yml", context=dict(instance=instance_obj, peers=peers))
|
||||
# convert consecutive newlines with a single newline
|
||||
return re.sub(r'\n+', '\n', all_yaml)
|
||||
|
||||
|
||||
def generate_receptor_tls(instance_obj):
|
||||
|
||||
@@ -7,8 +7,10 @@ import json
|
||||
# Django
|
||||
from django.db import models
|
||||
|
||||
from ansible_base.utils.models import prevent_search
|
||||
|
||||
# AWX
|
||||
from awx.main.models.base import CreatedModifiedModel, prevent_search
|
||||
from awx.main.models.base import CreatedModifiedModel
|
||||
from awx.main.utils import encrypt_field
|
||||
from awx.conf import settings_registry
|
||||
|
||||
|
||||
@@ -418,6 +418,10 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
"""Get value while accepting the in-memory cache if key is available"""
|
||||
with _ctit_db_wrapper(trans_safe=True):
|
||||
return self._get_local(name)
|
||||
# If the last line did not return, that means we hit a database error
|
||||
# in that case, we should not have a local cache value
|
||||
# thus, return empty as a signal to use the default
|
||||
return empty
|
||||
|
||||
def __getattr__(self, name):
|
||||
value = empty
|
||||
|
||||
@@ -13,6 +13,7 @@ from unittest import mock
|
||||
from django.conf import LazySettings
|
||||
from django.core.cache.backends.locmem import LocMemCache
|
||||
from django.core.exceptions import ImproperlyConfigured
|
||||
from django.db.utils import Error as DBError
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
import pytest
|
||||
|
||||
@@ -331,3 +332,18 @@ def test_in_memory_cache_works(settings):
|
||||
with mock.patch.object(settings, '_get_local') as mock_get:
|
||||
assert settings.AWX_VAR == 'DEFAULT'
|
||||
mock_get.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.defined_in_file(AWX_VAR=[])
|
||||
def test_getattr_with_database_error(settings):
|
||||
"""
|
||||
If a setting is defined via the registry and has a null-ish default which is not None
|
||||
then referencing that setting during a database outage should give that default
|
||||
this is regression testing for a bug where it would return None
|
||||
"""
|
||||
settings.registry.register('AWX_VAR', field_class=fields.StringListField, default=[], category=_('System'), category_slug='system')
|
||||
settings._awx_conf_memoizedcache.clear()
|
||||
|
||||
with mock.patch('django.db.backends.base.base.BaseDatabaseWrapper.ensure_connection') as mock_ensure:
|
||||
mock_ensure.side_effect = DBError('for test')
|
||||
assert settings.AWX_VAR == []
|
||||
|
||||
@@ -20,11 +20,12 @@ from rest_framework.exceptions import ParseError, PermissionDenied
|
||||
# Django OAuth Toolkit
|
||||
from awx.main.models.oauth import OAuth2Application, OAuth2AccessToken
|
||||
|
||||
from ansible_base.utils.validation import to_python_boolean
|
||||
|
||||
# AWX
|
||||
from awx.main.utils import (
|
||||
get_object_or_400,
|
||||
get_pk_from_dict,
|
||||
to_python_boolean,
|
||||
get_licenser,
|
||||
)
|
||||
from awx.main.models import (
|
||||
@@ -79,7 +80,6 @@ __all__ = [
|
||||
'get_user_queryset',
|
||||
'check_user_access',
|
||||
'check_user_access_with_errors',
|
||||
'user_accessible_objects',
|
||||
'consumer_access',
|
||||
]
|
||||
|
||||
@@ -136,10 +136,6 @@ def register_access(model_class, access_class):
|
||||
access_registry[model_class] = access_class
|
||||
|
||||
|
||||
def user_accessible_objects(user, role_name):
|
||||
return ResourceMixin._accessible_objects(User, user, role_name)
|
||||
|
||||
|
||||
def get_user_queryset(user, model_class):
|
||||
"""
|
||||
Return a queryset for the given model_class containing only the instances
|
||||
|
||||
@@ -694,16 +694,18 @@ register(
|
||||
category_slug='logging',
|
||||
)
|
||||
register(
|
||||
'LOG_AGGREGATOR_MAX_DISK_USAGE_GB',
|
||||
'LOG_AGGREGATOR_ACTION_QUEUE_SIZE',
|
||||
field_class=fields.IntegerField,
|
||||
default=1,
|
||||
default=131072,
|
||||
min_value=1,
|
||||
label=_('Maximum disk persistence for external log aggregation (in GB)'),
|
||||
label=_('Maximum number of messages that can be stored in the log action queue'),
|
||||
help_text=_(
|
||||
'Amount of data to store (in gigabytes) during an outage of '
|
||||
'the external log aggregator (defaults to 1). '
|
||||
'Equivalent to the rsyslogd queue.maxdiskspace setting for main_queue. '
|
||||
'Notably, this is used for the rsyslogd main queue (for input messages).'
|
||||
'Defines how large the rsyslog action queue can grow in number of messages '
|
||||
'stored. This can have an impact on memory utilization. When the queue '
|
||||
'reaches 75% of this number, the queue will start writing to disk '
|
||||
'(queue.highWatermark in rsyslog). When it reaches 90%, NOTICE, INFO, and '
|
||||
'DEBUG messages will start to be discarded (queue.discardMark with '
|
||||
'queue.discardSeverity=5).'
|
||||
),
|
||||
category=_('Logging'),
|
||||
category_slug='logging',
|
||||
@@ -718,8 +720,7 @@ register(
|
||||
'Amount of data to store (in gigabytes) if an rsyslog action takes time '
|
||||
'to process an incoming message (defaults to 1). '
|
||||
'Equivalent to the rsyslogd queue.maxdiskspace setting on the action (e.g. omhttp). '
|
||||
'Like LOG_AGGREGATOR_MAX_DISK_USAGE_GB, it stores files in the directory specified '
|
||||
'by LOG_AGGREGATOR_MAX_DISK_USAGE_PATH.'
|
||||
'It stores files in the directory specified by LOG_AGGREGATOR_MAX_DISK_USAGE_PATH.'
|
||||
),
|
||||
category=_('Logging'),
|
||||
category_slug='logging',
|
||||
@@ -826,6 +827,16 @@ register(
|
||||
category_slug='bulk',
|
||||
)
|
||||
|
||||
register(
|
||||
'BULK_HOST_MAX_DELETE',
|
||||
field_class=fields.IntegerField,
|
||||
default=250,
|
||||
label=_('Max number of hosts to allow to be deleted in a single bulk action'),
|
||||
help_text=_('Max number of hosts to allow to be deleted in a single bulk action'),
|
||||
category=_('Bulk Actions'),
|
||||
category_slug='bulk',
|
||||
)
|
||||
|
||||
register(
|
||||
'UI_NEXT',
|
||||
field_class=fields.BooleanField,
|
||||
|
||||
@@ -4,6 +4,8 @@ from urllib.parse import urljoin, quote
|
||||
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
import requests
|
||||
import base64
|
||||
import binascii
|
||||
|
||||
|
||||
conjur_inputs = {
|
||||
@@ -50,6 +52,13 @@ conjur_inputs = {
|
||||
}
|
||||
|
||||
|
||||
def _is_base64(s: str) -> bool:
|
||||
try:
|
||||
return base64.b64encode(base64.b64decode(s.encode("utf-8"))) == s.encode("utf-8")
|
||||
except binascii.Error:
|
||||
return False
|
||||
|
||||
|
||||
def conjur_backend(**kwargs):
|
||||
url = kwargs['url']
|
||||
api_key = kwargs['api_key']
|
||||
@@ -77,7 +86,7 @@ def conjur_backend(**kwargs):
|
||||
token = resp.content.decode('utf-8')
|
||||
|
||||
lookup_kwargs = {
|
||||
'headers': {'Authorization': 'Token token="{}"'.format(token)},
|
||||
'headers': {'Authorization': 'Token token="{}"'.format(token if _is_base64(token) else base64.b64encode(token.encode('utf-8')).decode('utf-8'))},
|
||||
'allow_redirects': False,
|
||||
}
|
||||
|
||||
|
||||
@@ -2,25 +2,29 @@ from .plugin import CredentialPlugin
|
||||
|
||||
from django.conf import settings
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from thycotic.secrets.vault import SecretsVault
|
||||
|
||||
from delinea.secrets.vault import PasswordGrantAuthorizer, SecretsVault
|
||||
from base64 import b64decode
|
||||
|
||||
dsv_inputs = {
|
||||
'fields': [
|
||||
{
|
||||
'id': 'tenant',
|
||||
'label': _('Tenant'),
|
||||
'help_text': _('The tenant e.g. "ex" when the URL is https://ex.secretservercloud.com'),
|
||||
'help_text': _('The tenant e.g. "ex" when the URL is https://ex.secretsvaultcloud.com'),
|
||||
'type': 'string',
|
||||
},
|
||||
{
|
||||
'id': 'tld',
|
||||
'label': _('Top-level Domain (TLD)'),
|
||||
'help_text': _('The TLD of the tenant e.g. "com" when the URL is https://ex.secretservercloud.com'),
|
||||
'choices': ['ca', 'com', 'com.au', 'com.sg', 'eu'],
|
||||
'help_text': _('The TLD of the tenant e.g. "com" when the URL is https://ex.secretsvaultcloud.com'),
|
||||
'choices': ['ca', 'com', 'com.au', 'eu'],
|
||||
'default': 'com',
|
||||
},
|
||||
{'id': 'client_id', 'label': _('Client ID'), 'type': 'string'},
|
||||
{
|
||||
'id': 'client_id',
|
||||
'label': _('Client ID'),
|
||||
'type': 'string',
|
||||
},
|
||||
{
|
||||
'id': 'client_secret',
|
||||
'label': _('Client Secret'),
|
||||
@@ -41,8 +45,16 @@ dsv_inputs = {
|
||||
'help_text': _('The field to extract from the secret'),
|
||||
'type': 'string',
|
||||
},
|
||||
{
|
||||
'id': 'secret_decoding',
|
||||
'label': _('Should the secret be base64 decoded?'),
|
||||
'help_text': _('Specify whether the secret should be base64 decoded, typically used for storing files, such as SSH keys'),
|
||||
'choices': ['No Decoding', 'Decode Base64'],
|
||||
'type': 'string',
|
||||
'default': 'No Decoding',
|
||||
},
|
||||
],
|
||||
'required': ['tenant', 'client_id', 'client_secret', 'path', 'secret_field'],
|
||||
'required': ['tenant', 'client_id', 'client_secret', 'path', 'secret_field', 'secret_decoding'],
|
||||
}
|
||||
|
||||
if settings.DEBUG:
|
||||
@@ -51,12 +63,32 @@ if settings.DEBUG:
|
||||
'id': 'url_template',
|
||||
'label': _('URL template'),
|
||||
'type': 'string',
|
||||
'default': 'https://{}.secretsvaultcloud.{}/v1',
|
||||
'default': 'https://{}.secretsvaultcloud.{}',
|
||||
}
|
||||
)
|
||||
|
||||
dsv_plugin = CredentialPlugin(
|
||||
'Thycotic DevOps Secrets Vault',
|
||||
dsv_inputs,
|
||||
lambda **kwargs: SecretsVault(**{k: v for (k, v) in kwargs.items() if k in [field['id'] for field in dsv_inputs['fields']]}).get_secret(kwargs['path'])['data'][kwargs['secret_field']], # fmt: skip
|
||||
)
|
||||
|
||||
def dsv_backend(**kwargs):
|
||||
tenant_name = kwargs['tenant']
|
||||
tenant_tld = kwargs.get('tld', 'com')
|
||||
tenant_url_template = kwargs.get('url_template', 'https://{}.secretsvaultcloud.{}')
|
||||
client_id = kwargs['client_id']
|
||||
client_secret = kwargs['client_secret']
|
||||
secret_path = kwargs['path']
|
||||
secret_field = kwargs['secret_field']
|
||||
# providing a default value to remain backward compatible for secrets that have not specified this option
|
||||
secret_decoding = kwargs.get('secret_decoding', 'No Decoding')
|
||||
|
||||
tenant_url = tenant_url_template.format(tenant_name, tenant_tld.strip("."))
|
||||
|
||||
authorizer = PasswordGrantAuthorizer(tenant_url, client_id, client_secret)
|
||||
dsv_secret = SecretsVault(tenant_url, authorizer).get_secret(secret_path)
|
||||
|
||||
# files can be uploaded base64 decoded to DSV and thus decoding it only, when asked for
|
||||
if secret_decoding == 'Decode Base64':
|
||||
return b64decode(dsv_secret['data'][secret_field]).decode()
|
||||
|
||||
return dsv_secret['data'][secret_field]
|
||||
|
||||
|
||||
dsv_plugin = CredentialPlugin(name='Thycotic DevOps Secrets Vault', inputs=dsv_inputs, backend=dsv_backend)
|
||||
|
||||
@@ -41,6 +41,34 @@ base_inputs = {
|
||||
'secret': True,
|
||||
'help_text': _('The Secret ID for AppRole Authentication'),
|
||||
},
|
||||
{
|
||||
'id': 'client_cert_public',
|
||||
'label': _('Client Certificate'),
|
||||
'type': 'string',
|
||||
'multiline': True,
|
||||
'help_text': _(
|
||||
'The PEM-encoded client certificate used for TLS client authentication.'
|
||||
' This should include the certificate and any intermediate certififcates.'
|
||||
),
|
||||
},
|
||||
{
|
||||
'id': 'client_cert_private',
|
||||
'label': _('Client Certificate Key'),
|
||||
'type': 'string',
|
||||
'multiline': True,
|
||||
'secret': True,
|
||||
'help_text': _('The certificate private key used for TLS client authentication.'),
|
||||
},
|
||||
{
|
||||
'id': 'client_cert_role',
|
||||
'label': _('TLS Authentication Role'),
|
||||
'type': 'string',
|
||||
'multiline': False,
|
||||
'help_text': _(
|
||||
'The role configured in Hashicorp Vault for TLS client authentication.'
|
||||
' If not provided, Hashicorp Vault may assign roles based on the certificate used.'
|
||||
),
|
||||
},
|
||||
{
|
||||
'id': 'namespace',
|
||||
'label': _('Namespace name (Vault Enterprise only)'),
|
||||
@@ -164,8 +192,10 @@ def handle_auth(**kwargs):
|
||||
token = method_auth(**kwargs, auth_param=approle_auth(**kwargs))
|
||||
elif kwargs.get('kubernetes_role'):
|
||||
token = method_auth(**kwargs, auth_param=kubernetes_auth(**kwargs))
|
||||
elif kwargs.get('client_cert_public') and kwargs.get('client_cert_private'):
|
||||
token = method_auth(**kwargs, auth_param=client_cert_auth(**kwargs))
|
||||
else:
|
||||
raise Exception('Either token or AppRole/Kubernetes authentication parameters must be set')
|
||||
raise Exception('Either a token or AppRole, Kubernetes, or TLS authentication parameters must be set')
|
||||
|
||||
return token
|
||||
|
||||
@@ -181,6 +211,10 @@ def kubernetes_auth(**kwargs):
|
||||
return {'role': kwargs['kubernetes_role'], 'jwt': jwt}
|
||||
|
||||
|
||||
def client_cert_auth(**kwargs):
|
||||
return {'name': kwargs.get('client_cert_role')}
|
||||
|
||||
|
||||
def method_auth(**kwargs):
|
||||
# get auth method specific params
|
||||
request_kwargs = {'json': kwargs['auth_param'], 'timeout': 30}
|
||||
@@ -193,13 +227,22 @@ def method_auth(**kwargs):
|
||||
cacert = kwargs.get('cacert', None)
|
||||
|
||||
sess = requests.Session()
|
||||
|
||||
# Namespace support
|
||||
if kwargs.get('namespace'):
|
||||
sess.headers['X-Vault-Namespace'] = kwargs['namespace']
|
||||
request_url = '/'.join([url, 'auth', auth_path, 'login']).rstrip('/')
|
||||
with CertFiles(cacert) as cert:
|
||||
request_kwargs['verify'] = cert
|
||||
resp = sess.post(request_url, **request_kwargs)
|
||||
# TLS client certificate support
|
||||
if kwargs.get('client_cert_public') and kwargs.get('client_cert_private'):
|
||||
# Add client cert to requests Session before making call
|
||||
with CertFiles(kwargs['client_cert_public'], key=kwargs['client_cert_private']) as client_cert:
|
||||
sess.cert = client_cert
|
||||
resp = sess.post(request_url, **request_kwargs)
|
||||
else:
|
||||
# Make call without client certificate
|
||||
resp = sess.post(request_url, **request_kwargs)
|
||||
resp.raise_for_status()
|
||||
token = resp.json()['auth']['client_token']
|
||||
return token
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
from .plugin import CredentialPlugin
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
from thycotic.secrets.server import DomainPasswordGrantAuthorizer, PasswordGrantAuthorizer, SecretServer, ServerSecret
|
||||
try:
|
||||
from delinea.secrets.server import DomainPasswordGrantAuthorizer, PasswordGrantAuthorizer, SecretServer, ServerSecret
|
||||
except ImportError:
|
||||
from thycotic.secrets.server import DomainPasswordGrantAuthorizer, PasswordGrantAuthorizer, SecretServer, ServerSecret
|
||||
|
||||
tss_inputs = {
|
||||
'fields': [
|
||||
@@ -51,7 +54,9 @@ tss_inputs = {
|
||||
|
||||
def tss_backend(**kwargs):
|
||||
if kwargs.get("domain"):
|
||||
authorizer = DomainPasswordGrantAuthorizer(kwargs['server_url'], kwargs['username'], kwargs['password'], kwargs['domain'])
|
||||
authorizer = DomainPasswordGrantAuthorizer(
|
||||
base_url=kwargs['server_url'], username=kwargs['username'], domain=kwargs['domain'], password=kwargs['password']
|
||||
)
|
||||
else:
|
||||
authorizer = PasswordGrantAuthorizer(kwargs['server_url'], kwargs['username'], kwargs['password'])
|
||||
secret_server = SecretServer(kwargs['server_url'], authorizer)
|
||||
|
||||
@@ -37,8 +37,11 @@ class Control(object):
|
||||
def running(self, *args, **kwargs):
|
||||
return self.control_with_reply('running', *args, **kwargs)
|
||||
|
||||
def cancel(self, task_ids, *args, **kwargs):
|
||||
return self.control_with_reply('cancel', *args, extra_data={'task_ids': task_ids}, **kwargs)
|
||||
def cancel(self, task_ids, with_reply=True):
|
||||
if with_reply:
|
||||
return self.control_with_reply('cancel', extra_data={'task_ids': task_ids})
|
||||
else:
|
||||
self.control({'control': 'cancel', 'task_ids': task_ids, 'reply_to': None}, extra_data={'task_ids': task_ids})
|
||||
|
||||
def schedule(self, *args, **kwargs):
|
||||
return self.control_with_reply('schedule', *args, **kwargs)
|
||||
|
||||
@@ -89,8 +89,9 @@ class AWXConsumerBase(object):
|
||||
if task_ids and not msg:
|
||||
logger.info(f'Could not locate running tasks to cancel with ids={task_ids}')
|
||||
|
||||
with pg_bus_conn() as conn:
|
||||
conn.notify(reply_queue, json.dumps(msg))
|
||||
if reply_queue is not None:
|
||||
with pg_bus_conn() as conn:
|
||||
conn.notify(reply_queue, json.dumps(msg))
|
||||
elif control == 'reload':
|
||||
for worker in self.pool.workers:
|
||||
worker.quit()
|
||||
@@ -161,7 +162,7 @@ class AWXConsumerRedis(AWXConsumerBase):
|
||||
class AWXConsumerPG(AWXConsumerBase):
|
||||
def __init__(self, *args, schedule=None, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.pg_max_wait = settings.DISPATCHER_DB_DOWNTOWN_TOLLERANCE
|
||||
self.pg_max_wait = settings.DISPATCHER_DB_DOWNTIME_TOLERANCE
|
||||
# if no successful loops have ran since startup, then we should fail right away
|
||||
self.pg_is_down = True # set so that we fail if we get database errors on startup
|
||||
init_time = time.time()
|
||||
|
||||
@@ -24,6 +24,9 @@ class Command(BaseCommand):
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--days', dest='days', type=int, default=90, metavar='N', help='Remove activity stream events more than N days old')
|
||||
parser.add_argument('--dry-run', dest='dry_run', action='store_true', default=False, help='Dry run mode (show items that would be removed)')
|
||||
parser.add_argument(
|
||||
'--batch-size', dest='batch_size', type=int, default=500, metavar='X', help='Remove activity stream events in batch of X events. Defaults to 500.'
|
||||
)
|
||||
|
||||
def init_logging(self):
|
||||
log_levels = dict(enumerate([logging.ERROR, logging.INFO, logging.DEBUG, 0]))
|
||||
@@ -48,7 +51,7 @@ class Command(BaseCommand):
|
||||
else:
|
||||
pks_to_delete.add(asobj.pk)
|
||||
# Cleanup objects in batches instead of deleting each one individually.
|
||||
if len(pks_to_delete) >= 500:
|
||||
if len(pks_to_delete) >= self.batch_size:
|
||||
ActivityStream.objects.filter(pk__in=pks_to_delete).delete()
|
||||
n_deleted_items += len(pks_to_delete)
|
||||
pks_to_delete.clear()
|
||||
@@ -63,4 +66,5 @@ class Command(BaseCommand):
|
||||
self.days = int(options.get('days', 30))
|
||||
self.cutoff = now() - datetime.timedelta(days=self.days)
|
||||
self.dry_run = bool(options.get('dry_run', False))
|
||||
self.batch_size = int(options.get('batch_size', 500))
|
||||
self.cleanup_activitystream()
|
||||
|
||||
@@ -1,22 +1,22 @@
|
||||
from awx.main.models import HostMetric
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.conf import settings
|
||||
from awx.main.tasks.host_metrics import HostMetricTask
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""
|
||||
Run soft-deleting of HostMetrics
|
||||
This command provides cleanup task for HostMetric model.
|
||||
There are two modes, which run in following order:
|
||||
- soft cleanup
|
||||
- - Perform soft-deletion of all host metrics last automated 12 months ago or before.
|
||||
This is the same as issuing a DELETE request to /api/v2/host_metrics/N/ for all host metrics that match the criteria.
|
||||
- - updates columns delete, deleted_counter and last_deleted
|
||||
- hard cleanup
|
||||
- - Permanently erase from the database all host metrics last automated 36 months ago or before.
|
||||
This operation happens after the soft deletion has finished.
|
||||
"""
|
||||
|
||||
help = 'Run soft-deleting of HostMetrics'
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--months-ago', type=int, dest='months-ago', action='store', help='Threshold in months for soft-deleting')
|
||||
help = 'Run soft and hard-deletion of HostMetrics'
|
||||
|
||||
def handle(self, *args, **options):
|
||||
months_ago = options.get('months-ago') or None
|
||||
|
||||
if not months_ago:
|
||||
months_ago = getattr(settings, 'CLEANUP_HOST_METRICS_SOFT_THRESHOLD', 12)
|
||||
|
||||
HostMetric.cleanup_task(months_ago)
|
||||
HostMetricTask().cleanup(soft_threshold=settings.CLEANUP_HOST_METRICS_SOFT_THRESHOLD, hard_threshold=settings.CLEANUP_HOST_METRICS_HARD_THRESHOLD)
|
||||
|
||||
@@ -9,6 +9,7 @@ import re
|
||||
|
||||
|
||||
# Django
|
||||
from django.apps import apps
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
from django.db import transaction, connection
|
||||
from django.db.models import Min, Max
|
||||
@@ -150,6 +151,9 @@ class Command(BaseCommand):
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--days', dest='days', type=int, default=90, metavar='N', help='Remove jobs/updates executed more than N days ago. Defaults to 90.')
|
||||
parser.add_argument('--dry-run', dest='dry_run', action='store_true', default=False, help='Dry run mode (show items that would be removed)')
|
||||
parser.add_argument(
|
||||
'--batch-size', dest='batch_size', type=int, default=100000, metavar='X', help='Remove jobs in batch of X jobs. Defaults to 100000.'
|
||||
)
|
||||
parser.add_argument('--jobs', dest='only_jobs', action='store_true', default=False, help='Remove jobs')
|
||||
parser.add_argument('--ad-hoc-commands', dest='only_ad_hoc_commands', action='store_true', default=False, help='Remove ad hoc commands')
|
||||
parser.add_argument('--project-updates', dest='only_project_updates', action='store_true', default=False, help='Remove project updates')
|
||||
@@ -195,18 +199,58 @@ class Command(BaseCommand):
|
||||
delete_meta.delete_jobs()
|
||||
return (delete_meta.jobs_no_delete_count, delete_meta.jobs_to_delete_count)
|
||||
|
||||
def _cascade_delete_job_events(self, model, pk_list):
|
||||
def has_unpartitioned_table(self, model):
|
||||
tblname = unified_job_class_to_event_table_name(model)
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute(f"SELECT 1 FROM pg_tables WHERE tablename = '_unpartitioned_{tblname}';")
|
||||
row = cursor.fetchone()
|
||||
if row is None:
|
||||
return False
|
||||
return True
|
||||
|
||||
def _delete_unpartitioned_table(self, model):
|
||||
"If the unpartitioned table is no longer necessary, it will drop the table"
|
||||
tblname = unified_job_class_to_event_table_name(model)
|
||||
if not self.has_unpartitioned_table(model):
|
||||
self.logger.debug(f'Table _unpartitioned_{tblname} does not exist, you are fully migrated.')
|
||||
return
|
||||
|
||||
with connection.cursor() as cursor:
|
||||
# same as UnpartitionedJobEvent.objects.aggregate(Max('created'))
|
||||
cursor.execute(f'SELECT MAX("_unpartitioned_{tblname}"."created") FROM "_unpartitioned_{tblname}";')
|
||||
row = cursor.fetchone()
|
||||
last_created = row[0]
|
||||
|
||||
if last_created:
|
||||
self.logger.info(f'Last event created in _unpartitioned_{tblname} was {last_created.isoformat()}')
|
||||
else:
|
||||
self.logger.info(f'Table _unpartitioned_{tblname} has no events in it')
|
||||
|
||||
if (last_created is None) or (last_created < self.cutoff):
|
||||
self.logger.warning(
|
||||
f'Dropping table _unpartitioned_{tblname} since no records are newer than {self.cutoff}\n'
|
||||
'WARNING - this will happen in a separate transaction so a failure will not roll back prior cleanup'
|
||||
)
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute(f'DROP TABLE _unpartitioned_{tblname};')
|
||||
|
||||
def _delete_unpartitioned_events(self, model, pk_list):
|
||||
"If unpartitioned job events remain, it will cascade those from jobs in pk_list"
|
||||
tblname = unified_job_class_to_event_table_name(model)
|
||||
rel_name = model().event_parent_key
|
||||
|
||||
# Bail if the unpartitioned table does not exist anymore
|
||||
if not self.has_unpartitioned_table(model):
|
||||
return
|
||||
|
||||
# Table still exists, delete individual unpartitioned events
|
||||
if pk_list:
|
||||
with connection.cursor() as cursor:
|
||||
tblname = unified_job_class_to_event_table_name(model)
|
||||
|
||||
self.logger.debug(f'Deleting {len(pk_list)} events from _unpartitioned_{tblname}, use a longer cleanup window to delete the table.')
|
||||
pk_list_csv = ','.join(map(str, pk_list))
|
||||
rel_name = model().event_parent_key
|
||||
cursor.execute(f"DELETE FROM _unpartitioned_{tblname} WHERE {rel_name} IN ({pk_list_csv})")
|
||||
cursor.execute(f"DELETE FROM _unpartitioned_{tblname} WHERE {rel_name} IN ({pk_list_csv});")
|
||||
|
||||
def cleanup_jobs(self):
|
||||
batch_size = 100000
|
||||
|
||||
# Hack to avoid doing N+1 queries as each item in the Job query set does
|
||||
# an individual query to get the underlying UnifiedJob.
|
||||
Job.polymorphic_super_sub_accessors_replaced = True
|
||||
@@ -221,13 +265,14 @@ class Command(BaseCommand):
|
||||
deleted = 0
|
||||
info = qs.aggregate(min=Min('id'), max=Max('id'))
|
||||
if info['min'] is not None:
|
||||
for start in range(info['min'], info['max'] + 1, batch_size):
|
||||
qs_batch = qs.filter(id__gte=start, id__lte=start + batch_size)
|
||||
for start in range(info['min'], info['max'] + 1, self.batch_size):
|
||||
qs_batch = qs.filter(id__gte=start, id__lte=start + self.batch_size)
|
||||
pk_list = qs_batch.values_list('id', flat=True)
|
||||
|
||||
_, results = qs_batch.delete()
|
||||
deleted += results['main.Job']
|
||||
self._cascade_delete_job_events(Job, pk_list)
|
||||
# Avoid dropping the job event table in case we have interacted with it already
|
||||
self._delete_unpartitioned_events(Job, pk_list)
|
||||
|
||||
return skipped, deleted
|
||||
|
||||
@@ -250,7 +295,7 @@ class Command(BaseCommand):
|
||||
deleted += 1
|
||||
|
||||
if not self.dry_run:
|
||||
self._cascade_delete_job_events(AdHocCommand, pk_list)
|
||||
self._delete_unpartitioned_events(AdHocCommand, pk_list)
|
||||
|
||||
skipped += AdHocCommand.objects.filter(created__gte=self.cutoff).count()
|
||||
return skipped, deleted
|
||||
@@ -278,7 +323,7 @@ class Command(BaseCommand):
|
||||
deleted += 1
|
||||
|
||||
if not self.dry_run:
|
||||
self._cascade_delete_job_events(ProjectUpdate, pk_list)
|
||||
self._delete_unpartitioned_events(ProjectUpdate, pk_list)
|
||||
|
||||
skipped += ProjectUpdate.objects.filter(created__gte=self.cutoff).count()
|
||||
return skipped, deleted
|
||||
@@ -306,7 +351,7 @@ class Command(BaseCommand):
|
||||
deleted += 1
|
||||
|
||||
if not self.dry_run:
|
||||
self._cascade_delete_job_events(InventoryUpdate, pk_list)
|
||||
self._delete_unpartitioned_events(InventoryUpdate, pk_list)
|
||||
|
||||
skipped += InventoryUpdate.objects.filter(created__gte=self.cutoff).count()
|
||||
return skipped, deleted
|
||||
@@ -330,7 +375,7 @@ class Command(BaseCommand):
|
||||
deleted += 1
|
||||
|
||||
if not self.dry_run:
|
||||
self._cascade_delete_job_events(SystemJob, pk_list)
|
||||
self._delete_unpartitioned_events(SystemJob, pk_list)
|
||||
|
||||
skipped += SystemJob.objects.filter(created__gte=self.cutoff).count()
|
||||
return skipped, deleted
|
||||
@@ -375,12 +420,12 @@ class Command(BaseCommand):
|
||||
skipped += Notification.objects.filter(created__gte=self.cutoff).count()
|
||||
return skipped, deleted
|
||||
|
||||
@transaction.atomic
|
||||
def handle(self, *args, **options):
|
||||
self.verbosity = int(options.get('verbosity', 1))
|
||||
self.init_logging()
|
||||
self.days = int(options.get('days', 90))
|
||||
self.dry_run = bool(options.get('dry_run', False))
|
||||
self.batch_size = int(options.get('batch_size', 100000))
|
||||
try:
|
||||
self.cutoff = now() - datetime.timedelta(days=self.days)
|
||||
except OverflowError:
|
||||
@@ -402,19 +447,29 @@ class Command(BaseCommand):
|
||||
del s.receivers[:]
|
||||
s.sender_receivers_cache.clear()
|
||||
|
||||
for m in model_names:
|
||||
if m not in models_to_cleanup:
|
||||
continue
|
||||
with transaction.atomic():
|
||||
for m in models_to_cleanup:
|
||||
skipped, deleted = getattr(self, 'cleanup_%s' % m)()
|
||||
|
||||
skipped, deleted = getattr(self, 'cleanup_%s' % m)()
|
||||
func = getattr(self, 'cleanup_%s_partition' % m, None)
|
||||
if func:
|
||||
skipped_partition, deleted_partition = func()
|
||||
skipped += skipped_partition
|
||||
deleted += deleted_partition
|
||||
|
||||
func = getattr(self, 'cleanup_%s_partition' % m, None)
|
||||
if func:
|
||||
skipped_partition, deleted_partition = func()
|
||||
skipped += skipped_partition
|
||||
deleted += deleted_partition
|
||||
if self.dry_run:
|
||||
self.logger.log(99, '%s: %d would be deleted, %d would be skipped.', m.replace('_', ' '), deleted, skipped)
|
||||
else:
|
||||
self.logger.log(99, '%s: %d deleted, %d skipped.', m.replace('_', ' '), deleted, skipped)
|
||||
|
||||
if self.dry_run:
|
||||
self.logger.log(99, '%s: %d would be deleted, %d would be skipped.', m.replace('_', ' '), deleted, skipped)
|
||||
else:
|
||||
self.logger.log(99, '%s: %d deleted, %d skipped.', m.replace('_', ' '), deleted, skipped)
|
||||
# Deleting unpartitioned tables cannot be done in same transaction as updates to related tables
|
||||
if not self.dry_run:
|
||||
with transaction.atomic():
|
||||
for m in models_to_cleanup:
|
||||
unified_job_class_name = m[:-1].title().replace('Management', 'System').replace('_', '')
|
||||
unified_job_class = apps.get_model('main', unified_job_class_name)
|
||||
try:
|
||||
unified_job_class().event_class
|
||||
except (NotImplementedError, AttributeError):
|
||||
continue # no need to run this for models without events
|
||||
self._delete_unpartitioned_table(unified_job_class)
|
||||
|
||||
@@ -25,17 +25,20 @@ class Command(BaseCommand):
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--hostname', dest='hostname', type=str, help="Hostname used during provisioning")
|
||||
parser.add_argument('--listener_port', dest='listener_port', type=int, help="Receptor listener port")
|
||||
parser.add_argument('--node_type', type=str, default='hybrid', choices=['control', 'execution', 'hop', 'hybrid'], help="Instance Node type")
|
||||
parser.add_argument('--uuid', type=str, help="Instance UUID")
|
||||
|
||||
def _register_hostname(self, hostname, node_type, uuid):
|
||||
def _register_hostname(self, hostname, node_type, uuid, listener_port):
|
||||
if not hostname:
|
||||
if not settings.AWX_AUTO_DEPROVISION_INSTANCES:
|
||||
raise CommandError('Registering with values from settings only intended for use in K8s installs')
|
||||
|
||||
from awx.main.management.commands.register_queue import RegisterQueue
|
||||
|
||||
(changed, instance) = Instance.objects.register(ip_address=os.environ.get('MY_POD_IP'), node_type='control', node_uuid=settings.SYSTEM_UUID)
|
||||
(changed, instance) = Instance.objects.register(
|
||||
ip_address=os.environ.get('MY_POD_IP'), listener_port=listener_port, node_type='control', node_uuid=settings.SYSTEM_UUID
|
||||
)
|
||||
RegisterQueue(settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME, 100, 0, [], is_container_group=False).register()
|
||||
RegisterQueue(
|
||||
settings.DEFAULT_EXECUTION_QUEUE_NAME,
|
||||
@@ -48,7 +51,7 @@ class Command(BaseCommand):
|
||||
max_concurrent_jobs=settings.DEFAULT_EXECUTION_QUEUE_MAX_CONCURRENT_JOBS,
|
||||
).register()
|
||||
else:
|
||||
(changed, instance) = Instance.objects.register(hostname=hostname, node_type=node_type, node_uuid=uuid)
|
||||
(changed, instance) = Instance.objects.register(hostname=hostname, node_type=node_type, node_uuid=uuid, listener_port=listener_port)
|
||||
if changed:
|
||||
print("Successfully registered instance {}".format(hostname))
|
||||
else:
|
||||
@@ -58,6 +61,6 @@ class Command(BaseCommand):
|
||||
@transaction.atomic
|
||||
def handle(self, **options):
|
||||
self.changed = False
|
||||
self._register_hostname(options.get('hostname'), options.get('node_type'), options.get('uuid'))
|
||||
self._register_hostname(options.get('hostname'), options.get('node_type'), options.get('uuid'), options.get('listener_port'))
|
||||
if self.changed:
|
||||
print("(changed: True)")
|
||||
|
||||
@@ -115,21 +115,25 @@ class InstanceManager(models.Manager):
|
||||
return node[0]
|
||||
raise RuntimeError("No instance found with the current cluster host id")
|
||||
|
||||
def register(self, node_uuid=None, hostname=None, ip_address=None, node_type='hybrid', defaults=None):
|
||||
def register(self, node_uuid=None, hostname=None, ip_address="", listener_port=None, node_type='hybrid', defaults=None):
|
||||
if not hostname:
|
||||
hostname = settings.CLUSTER_HOST_ID
|
||||
|
||||
if not ip_address:
|
||||
ip_address = ""
|
||||
|
||||
with advisory_lock('instance_registration_%s' % hostname):
|
||||
if settings.AWX_AUTO_DEPROVISION_INSTANCES:
|
||||
# detect any instances with the same IP address.
|
||||
# if one exists, set it to None
|
||||
inst_conflicting_ip = self.filter(ip_address=ip_address).exclude(hostname=hostname)
|
||||
if inst_conflicting_ip.exists():
|
||||
for other_inst in inst_conflicting_ip:
|
||||
other_hostname = other_inst.hostname
|
||||
other_inst.ip_address = None
|
||||
other_inst.save(update_fields=['ip_address'])
|
||||
logger.warning("IP address {0} conflict detected, ip address unset for host {1}.".format(ip_address, other_hostname))
|
||||
# if one exists, set it to ""
|
||||
if ip_address:
|
||||
inst_conflicting_ip = self.filter(ip_address=ip_address).exclude(hostname=hostname)
|
||||
if inst_conflicting_ip.exists():
|
||||
for other_inst in inst_conflicting_ip:
|
||||
other_hostname = other_inst.hostname
|
||||
other_inst.ip_address = ""
|
||||
other_inst.save(update_fields=['ip_address'])
|
||||
logger.warning("IP address {0} conflict detected, ip address unset for host {1}.".format(ip_address, other_hostname))
|
||||
|
||||
# Return existing instance that matches hostname or UUID (default to UUID)
|
||||
if node_uuid is not None and node_uuid != UUID_DEFAULT and self.filter(uuid=node_uuid).exists():
|
||||
@@ -157,6 +161,9 @@ class InstanceManager(models.Manager):
|
||||
if instance.node_type != node_type:
|
||||
instance.node_type = node_type
|
||||
update_fields.append('node_type')
|
||||
if instance.listener_port != listener_port:
|
||||
instance.listener_port = listener_port
|
||||
update_fields.append('listener_port')
|
||||
if update_fields:
|
||||
instance.save(update_fields=update_fields)
|
||||
return (True, instance)
|
||||
@@ -167,12 +174,11 @@ class InstanceManager(models.Manager):
|
||||
create_defaults = {
|
||||
'node_state': Instance.States.INSTALLED,
|
||||
'capacity': 0,
|
||||
'listener_port': 27199,
|
||||
}
|
||||
if defaults is not None:
|
||||
create_defaults.update(defaults)
|
||||
uuid_option = {'uuid': node_uuid if node_uuid is not None else uuid.uuid4()}
|
||||
if node_type == 'execution' and 'version' not in create_defaults:
|
||||
create_defaults['version'] = RECEPTOR_PENDING
|
||||
instance = self.create(hostname=hostname, ip_address=ip_address, node_type=node_type, **create_defaults, **uuid_option)
|
||||
instance = self.create(hostname=hostname, ip_address=ip_address, listener_port=listener_port, node_type=node_type, **create_defaults, **uuid_option)
|
||||
return (True, instance)
|
||||
|
||||
@@ -9,6 +9,7 @@ from django.conf import settings
|
||||
# AWX
|
||||
import awx.main.fields
|
||||
from awx.main.models import Host
|
||||
from ._sqlite_helper import dbawaremigrations
|
||||
|
||||
|
||||
def replaces():
|
||||
@@ -131,9 +132,11 @@ class Migration(migrations.Migration):
|
||||
help_text='If enabled, Tower will act as an Ansible Fact Cache Plugin; persisting facts at the end of a playbook run to the database and caching facts for use by Ansible.',
|
||||
),
|
||||
),
|
||||
migrations.RunSQL(
|
||||
dbawaremigrations.RunSQL(
|
||||
sql="CREATE INDEX host_ansible_facts_default_gin ON {} USING gin(ansible_facts jsonb_path_ops);".format(Host._meta.db_table),
|
||||
reverse_sql='DROP INDEX host_ansible_facts_default_gin;',
|
||||
sqlite_sql=dbawaremigrations.RunSQL.noop,
|
||||
sqlite_reverse_sql=dbawaremigrations.RunSQL.noop,
|
||||
),
|
||||
# SCM file-based inventories
|
||||
migrations.AddField(
|
||||
|
||||
@@ -3,24 +3,27 @@ from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
from ._sqlite_helper import dbawaremigrations
|
||||
|
||||
tables_to_drop = [
|
||||
'celery_taskmeta',
|
||||
'celery_tasksetmeta',
|
||||
'djcelery_crontabschedule',
|
||||
'djcelery_intervalschedule',
|
||||
'djcelery_periodictask',
|
||||
'djcelery_periodictasks',
|
||||
'djcelery_taskstate',
|
||||
'djcelery_workerstate',
|
||||
'djkombu_message',
|
||||
'djkombu_queue',
|
||||
]
|
||||
postgres_sql = ([("DROP TABLE IF EXISTS {} CASCADE;".format(table))] for table in tables_to_drop)
|
||||
sqlite_sql = ([("DROP TABLE IF EXISTS {};".format(table))] for table in tables_to_drop)
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
('main', '0049_v330_validate_instance_capacity_adjustment'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunSQL([("DROP TABLE IF EXISTS {} CASCADE;".format(table))])
|
||||
for table in (
|
||||
'celery_taskmeta',
|
||||
'celery_tasksetmeta',
|
||||
'djcelery_crontabschedule',
|
||||
'djcelery_intervalschedule',
|
||||
'djcelery_periodictask',
|
||||
'djcelery_periodictasks',
|
||||
'djcelery_taskstate',
|
||||
'djcelery_workerstate',
|
||||
'djkombu_message',
|
||||
'djkombu_queue',
|
||||
)
|
||||
]
|
||||
operations = [dbawaremigrations.RunSQL(p, sqlite_sql=s) for p, s in zip(postgres_sql, sqlite_sql)]
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
|
||||
from django.db import migrations, models, connection
|
||||
|
||||
from ._sqlite_helper import dbawaremigrations
|
||||
|
||||
|
||||
def migrate_event_data(apps, schema_editor):
|
||||
# see: https://github.com/ansible/awx/issues/6010
|
||||
@@ -24,6 +26,11 @@ def migrate_event_data(apps, schema_editor):
|
||||
cursor.execute(f'ALTER TABLE {tblname} ALTER COLUMN id TYPE bigint USING id::bigint;')
|
||||
|
||||
|
||||
def migrate_event_data_sqlite(apps, schema_editor):
|
||||
# TODO: cmeyers fill this in
|
||||
return
|
||||
|
||||
|
||||
class FakeAlterField(migrations.AlterField):
|
||||
def database_forwards(self, *args):
|
||||
# this is intentionally left blank, because we're
|
||||
@@ -37,7 +44,7 @@ class Migration(migrations.Migration):
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(migrate_event_data),
|
||||
dbawaremigrations.RunPython(migrate_event_data, sqlite_code=migrate_event_data_sqlite),
|
||||
FakeAlterField(
|
||||
model_name='adhoccommandevent',
|
||||
name='id',
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
from django.db import migrations, models, connection
|
||||
|
||||
from ._sqlite_helper import dbawaremigrations
|
||||
|
||||
|
||||
def migrate_event_data(apps, schema_editor):
|
||||
# see: https://github.com/ansible/awx/issues/9039
|
||||
@@ -59,6 +61,10 @@ def migrate_event_data(apps, schema_editor):
|
||||
cursor.execute('DROP INDEX IF EXISTS main_jobevent_job_id_idx')
|
||||
|
||||
|
||||
def migrate_event_data_sqlite(apps, schema_editor):
|
||||
return None
|
||||
|
||||
|
||||
class FakeAddField(migrations.AddField):
|
||||
def database_forwards(self, *args):
|
||||
# this is intentionally left blank, because we're
|
||||
@@ -72,7 +78,7 @@ class Migration(migrations.Migration):
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(migrate_event_data),
|
||||
dbawaremigrations.RunPython(migrate_event_data, sqlite_code=migrate_event_data_sqlite),
|
||||
FakeAddField(
|
||||
model_name='jobevent',
|
||||
name='job_created',
|
||||
|
||||
@@ -3,6 +3,8 @@
|
||||
import awx.main.models.notifications
|
||||
from django.db import migrations, models
|
||||
|
||||
from ._sqlite_helper import dbawaremigrations
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
@@ -104,11 +106,12 @@ class Migration(migrations.Migration):
|
||||
name='deleted_actor',
|
||||
field=models.JSONField(null=True),
|
||||
),
|
||||
migrations.RunSQL(
|
||||
dbawaremigrations.RunSQL(
|
||||
"""
|
||||
ALTER TABLE main_activitystream RENAME setting TO setting_old;
|
||||
ALTER TABLE main_activitystream ALTER COLUMN setting_old DROP NOT NULL;
|
||||
""",
|
||||
sqlite_sql="ALTER TABLE main_activitystream RENAME setting TO setting_old",
|
||||
state_operations=[
|
||||
migrations.RemoveField(
|
||||
model_name='activitystream',
|
||||
@@ -121,11 +124,12 @@ class Migration(migrations.Migration):
|
||||
name='setting',
|
||||
field=models.JSONField(blank=True, default=dict),
|
||||
),
|
||||
migrations.RunSQL(
|
||||
dbawaremigrations.RunSQL(
|
||||
"""
|
||||
ALTER TABLE main_job RENAME survey_passwords TO survey_passwords_old;
|
||||
ALTER TABLE main_job ALTER COLUMN survey_passwords_old DROP NOT NULL;
|
||||
""",
|
||||
sqlite_sql="ALTER TABLE main_job RENAME survey_passwords TO survey_passwords_old",
|
||||
state_operations=[
|
||||
migrations.RemoveField(
|
||||
model_name='job',
|
||||
@@ -138,11 +142,12 @@ class Migration(migrations.Migration):
|
||||
name='survey_passwords',
|
||||
field=models.JSONField(blank=True, default=dict, editable=False),
|
||||
),
|
||||
migrations.RunSQL(
|
||||
dbawaremigrations.RunSQL(
|
||||
"""
|
||||
ALTER TABLE main_joblaunchconfig RENAME char_prompts TO char_prompts_old;
|
||||
ALTER TABLE main_joblaunchconfig ALTER COLUMN char_prompts_old DROP NOT NULL;
|
||||
""",
|
||||
sqlite_sql="ALTER TABLE main_joblaunchconfig RENAME char_prompts TO char_prompts_old",
|
||||
state_operations=[
|
||||
migrations.RemoveField(
|
||||
model_name='joblaunchconfig',
|
||||
@@ -155,11 +160,12 @@ class Migration(migrations.Migration):
|
||||
name='char_prompts',
|
||||
field=models.JSONField(blank=True, default=dict),
|
||||
),
|
||||
migrations.RunSQL(
|
||||
dbawaremigrations.RunSQL(
|
||||
"""
|
||||
ALTER TABLE main_joblaunchconfig RENAME survey_passwords TO survey_passwords_old;
|
||||
ALTER TABLE main_joblaunchconfig ALTER COLUMN survey_passwords_old DROP NOT NULL;
|
||||
""",
|
||||
sqlite_sql="ALTER TABLE main_joblaunchconfig RENAME survey_passwords TO survey_passwords_old;",
|
||||
state_operations=[
|
||||
migrations.RemoveField(
|
||||
model_name='joblaunchconfig',
|
||||
@@ -172,11 +178,12 @@ class Migration(migrations.Migration):
|
||||
name='survey_passwords',
|
||||
field=models.JSONField(blank=True, default=dict, editable=False),
|
||||
),
|
||||
migrations.RunSQL(
|
||||
dbawaremigrations.RunSQL(
|
||||
"""
|
||||
ALTER TABLE main_notification RENAME body TO body_old;
|
||||
ALTER TABLE main_notification ALTER COLUMN body_old DROP NOT NULL;
|
||||
""",
|
||||
sqlite_sql="ALTER TABLE main_notification RENAME body TO body_old",
|
||||
state_operations=[
|
||||
migrations.RemoveField(
|
||||
model_name='notification',
|
||||
@@ -189,11 +196,12 @@ class Migration(migrations.Migration):
|
||||
name='body',
|
||||
field=models.JSONField(blank=True, default=dict),
|
||||
),
|
||||
migrations.RunSQL(
|
||||
dbawaremigrations.RunSQL(
|
||||
"""
|
||||
ALTER TABLE main_unifiedjob RENAME job_env TO job_env_old;
|
||||
ALTER TABLE main_unifiedjob ALTER COLUMN job_env_old DROP NOT NULL;
|
||||
""",
|
||||
sqlite_sql="ALTER TABLE main_unifiedjob RENAME job_env TO job_env_old",
|
||||
state_operations=[
|
||||
migrations.RemoveField(
|
||||
model_name='unifiedjob',
|
||||
@@ -206,11 +214,12 @@ class Migration(migrations.Migration):
|
||||
name='job_env',
|
||||
field=models.JSONField(blank=True, default=dict, editable=False),
|
||||
),
|
||||
migrations.RunSQL(
|
||||
dbawaremigrations.RunSQL(
|
||||
"""
|
||||
ALTER TABLE main_workflowjob RENAME char_prompts TO char_prompts_old;
|
||||
ALTER TABLE main_workflowjob ALTER COLUMN char_prompts_old DROP NOT NULL;
|
||||
""",
|
||||
sqlite_sql="ALTER TABLE main_workflowjob RENAME char_prompts TO char_prompts_old",
|
||||
state_operations=[
|
||||
migrations.RemoveField(
|
||||
model_name='workflowjob',
|
||||
@@ -223,11 +232,12 @@ class Migration(migrations.Migration):
|
||||
name='char_prompts',
|
||||
field=models.JSONField(blank=True, default=dict),
|
||||
),
|
||||
migrations.RunSQL(
|
||||
dbawaremigrations.RunSQL(
|
||||
"""
|
||||
ALTER TABLE main_workflowjob RENAME survey_passwords TO survey_passwords_old;
|
||||
ALTER TABLE main_workflowjob ALTER COLUMN survey_passwords_old DROP NOT NULL;
|
||||
""",
|
||||
sqlite_sql="ALTER TABLE main_workflowjob RENAME survey_passwords TO survey_passwords_old",
|
||||
state_operations=[
|
||||
migrations.RemoveField(
|
||||
model_name='workflowjob',
|
||||
@@ -240,11 +250,12 @@ class Migration(migrations.Migration):
|
||||
name='survey_passwords',
|
||||
field=models.JSONField(blank=True, default=dict, editable=False),
|
||||
),
|
||||
migrations.RunSQL(
|
||||
dbawaremigrations.RunSQL(
|
||||
"""
|
||||
ALTER TABLE main_workflowjobnode RENAME char_prompts TO char_prompts_old;
|
||||
ALTER TABLE main_workflowjobnode ALTER COLUMN char_prompts_old DROP NOT NULL;
|
||||
""",
|
||||
sqlite_sql="ALTER TABLE main_workflowjobnode RENAME char_prompts TO char_prompts_old",
|
||||
state_operations=[
|
||||
migrations.RemoveField(
|
||||
model_name='workflowjobnode',
|
||||
@@ -257,11 +268,12 @@ class Migration(migrations.Migration):
|
||||
name='char_prompts',
|
||||
field=models.JSONField(blank=True, default=dict),
|
||||
),
|
||||
migrations.RunSQL(
|
||||
dbawaremigrations.RunSQL(
|
||||
"""
|
||||
ALTER TABLE main_workflowjobnode RENAME survey_passwords TO survey_passwords_old;
|
||||
ALTER TABLE main_workflowjobnode ALTER COLUMN survey_passwords_old DROP NOT NULL;
|
||||
""",
|
||||
sqlite_sql="ALTER TABLE main_workflowjobnode RENAME survey_passwords TO survey_passwords_old",
|
||||
state_operations=[
|
||||
migrations.RemoveField(
|
||||
model_name='workflowjobnode',
|
||||
|
||||
@@ -3,6 +3,8 @@ from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
from ._sqlite_helper import dbawaremigrations
|
||||
|
||||
|
||||
def delete_taggit_contenttypes(apps, schema_editor):
|
||||
ContentType = apps.get_model('contenttypes', 'ContentType')
|
||||
@@ -20,8 +22,8 @@ class Migration(migrations.Migration):
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunSQL("DROP TABLE IF EXISTS taggit_tag CASCADE;"),
|
||||
migrations.RunSQL("DROP TABLE IF EXISTS taggit_taggeditem CASCADE;"),
|
||||
dbawaremigrations.RunSQL("DROP TABLE IF EXISTS taggit_tag CASCADE;", sqlite_sql="DROP TABLE IF EXISTS taggit_tag;"),
|
||||
dbawaremigrations.RunSQL("DROP TABLE IF EXISTS taggit_taggeditem CASCADE;", sqlite_sql="DROP TABLE IF EXISTS taggit_taggeditem;"),
|
||||
migrations.RunPython(delete_taggit_contenttypes),
|
||||
migrations.RunPython(delete_taggit_migration_records),
|
||||
]
|
||||
|
||||
75
awx/main/migrations/0187_hop_nodes.py
Normal file
75
awx/main/migrations/0187_hop_nodes.py
Normal file
@@ -0,0 +1,75 @@
|
||||
# Generated by Django 4.2.3 on 2023-08-04 20:50
|
||||
|
||||
import django.core.validators
|
||||
from django.db import migrations, models
|
||||
from django.conf import settings
|
||||
|
||||
|
||||
def automatically_peer_from_control_plane(apps, schema_editor):
|
||||
if settings.IS_K8S:
|
||||
Instance = apps.get_model('main', 'Instance')
|
||||
Instance.objects.filter(node_type='execution').update(peers_from_control_nodes=True)
|
||||
Instance.objects.filter(node_type='control').update(listener_port=None)
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
('main', '0186_drop_django_taggit'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterModelOptions(
|
||||
name='instancelink',
|
||||
options={'ordering': ('id',)},
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='instance',
|
||||
name='peers_from_control_nodes',
|
||||
field=models.BooleanField(default=False, help_text='If True, control plane cluster nodes should automatically peer to it.'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='instance',
|
||||
name='ip_address',
|
||||
field=models.CharField(blank=True, default='', max_length=50),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='instance',
|
||||
name='listener_port',
|
||||
field=models.PositiveIntegerField(
|
||||
blank=True,
|
||||
default=None,
|
||||
help_text='Port that Receptor will listen for incoming connections on.',
|
||||
null=True,
|
||||
validators=[django.core.validators.MinValueValidator(1024), django.core.validators.MaxValueValidator(65535)],
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='instance',
|
||||
name='peers',
|
||||
field=models.ManyToManyField(related_name='peers_from', through='main.InstanceLink', to='main.instance'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='instancelink',
|
||||
name='link_state',
|
||||
field=models.CharField(
|
||||
choices=[('adding', 'Adding'), ('established', 'Established'), ('removing', 'Removing')],
|
||||
default='adding',
|
||||
help_text='Indicates the current life cycle stage of this peer link.',
|
||||
max_length=16,
|
||||
),
|
||||
),
|
||||
migrations.AddConstraint(
|
||||
model_name='instance',
|
||||
constraint=models.UniqueConstraint(
|
||||
condition=models.Q(('ip_address', ''), _negated=True),
|
||||
fields=('ip_address',),
|
||||
name='unique_ip_address_not_empty',
|
||||
violation_error_message='Field ip_address must be unique.',
|
||||
),
|
||||
),
|
||||
migrations.AddConstraint(
|
||||
model_name='instancelink',
|
||||
constraint=models.CheckConstraint(check=models.Q(('source', models.F('target')), _negated=True), name='source_and_target_can_not_be_equal'),
|
||||
),
|
||||
migrations.RunPython(automatically_peer_from_control_plane),
|
||||
]
|
||||
61
awx/main/migrations/_sqlite_helper.py
Normal file
61
awx/main/migrations/_sqlite_helper.py
Normal file
@@ -0,0 +1,61 @@
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
class RunSQL(migrations.operations.special.RunSQL):
|
||||
"""
|
||||
Bit of a hack here. Django actually wants this decision made in the router
|
||||
and we can pass **hints.
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
if 'sqlite_sql' not in kwargs:
|
||||
raise ValueError("sqlite_sql parameter required")
|
||||
sqlite_sql = kwargs.pop('sqlite_sql')
|
||||
|
||||
self.sqlite_sql = sqlite_sql
|
||||
self.sqlite_reverse_sql = kwargs.pop('sqlite_reverse_sql', None)
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
def database_forwards(self, app_label, schema_editor, from_state, to_state):
|
||||
if not schema_editor.connection.vendor.startswith('postgres'):
|
||||
self.sql = self.sqlite_sql or migrations.RunSQL.noop
|
||||
super().database_forwards(app_label, schema_editor, from_state, to_state)
|
||||
|
||||
def database_backwards(self, app_label, schema_editor, from_state, to_state):
|
||||
if not schema_editor.connection.vendor.startswith('postgres'):
|
||||
self.reverse_sql = self.sqlite_reverse_sql or migrations.RunSQL.noop
|
||||
super().database_backwards(app_label, schema_editor, from_state, to_state)
|
||||
|
||||
|
||||
class RunPython(migrations.operations.special.RunPython):
|
||||
"""
|
||||
Bit of a hack here. Django actually wants this decision made in the router
|
||||
and we can pass **hints.
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
if 'sqlite_code' not in kwargs:
|
||||
raise ValueError("sqlite_code parameter required")
|
||||
sqlite_code = kwargs.pop('sqlite_code')
|
||||
|
||||
self.sqlite_code = sqlite_code
|
||||
self.sqlite_reverse_code = kwargs.pop('sqlite_reverse_code', None)
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
def database_forwards(self, app_label, schema_editor, from_state, to_state):
|
||||
if not schema_editor.connection.vendor.startswith('postgres'):
|
||||
self.code = self.sqlite_code or migrations.RunPython.noop
|
||||
super().database_forwards(app_label, schema_editor, from_state, to_state)
|
||||
|
||||
def database_backwards(self, app_label, schema_editor, from_state, to_state):
|
||||
if not schema_editor.connection.vendor.startswith('postgres'):
|
||||
self.reverse_code = self.sqlite_reverse_code or migrations.RunPython.noop
|
||||
super().database_backwards(app_label, schema_editor, from_state, to_state)
|
||||
|
||||
|
||||
class _sqlitemigrations:
|
||||
RunPython = RunPython
|
||||
RunSQL = RunSQL
|
||||
|
||||
|
||||
dbawaremigrations = _sqlitemigrations()
|
||||
@@ -6,8 +6,10 @@ from django.conf import settings # noqa
|
||||
from django.db import connection
|
||||
from django.db.models.signals import pre_delete # noqa
|
||||
|
||||
from ansible_base.utils.models import prevent_search
|
||||
|
||||
# AWX
|
||||
from awx.main.models.base import BaseModel, PrimordialModel, prevent_search, accepts_json, CLOUD_INVENTORY_SOURCES, VERBOSITY_CHOICES # noqa
|
||||
from awx.main.models.base import BaseModel, PrimordialModel, accepts_json, CLOUD_INVENTORY_SOURCES, VERBOSITY_CHOICES # noqa
|
||||
from awx.main.models.unified_jobs import UnifiedJob, UnifiedJobTemplate, StdoutMaxBytesExceeded # noqa
|
||||
from awx.main.models.organization import Organization, Profile, Team, UserSessionMembership # noqa
|
||||
from awx.main.models.credential import Credential, CredentialType, CredentialInputSource, ManagedCredentialType, build_safe_env # noqa
|
||||
@@ -57,7 +59,6 @@ from awx.main.models.ha import ( # noqa
|
||||
from awx.main.models.rbac import ( # noqa
|
||||
Role,
|
||||
batch_role_ancestor_rebuilding,
|
||||
get_roles_on_resource,
|
||||
role_summary_fields_generator,
|
||||
ROLE_SINGLETON_SYSTEM_ADMINISTRATOR,
|
||||
ROLE_SINGLETON_SYSTEM_AUDITOR,
|
||||
@@ -91,13 +92,12 @@ from oauth2_provider.models import Grant, RefreshToken # noqa -- needed django-
|
||||
|
||||
# Add custom methods to User model for permissions checks.
|
||||
from django.contrib.auth.models import User # noqa
|
||||
from awx.main.access import get_user_queryset, check_user_access, check_user_access_with_errors, user_accessible_objects # noqa
|
||||
from awx.main.access import get_user_queryset, check_user_access, check_user_access_with_errors # noqa
|
||||
|
||||
|
||||
User.add_to_class('get_queryset', get_user_queryset)
|
||||
User.add_to_class('can_access', check_user_access)
|
||||
User.add_to_class('can_access_with_errors', check_user_access_with_errors)
|
||||
User.add_to_class('accessible_objects', user_accessible_objects)
|
||||
|
||||
|
||||
def convert_jsonfields():
|
||||
|
||||
@@ -12,9 +12,11 @@ from django.utils.text import Truncator
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from django.core.exceptions import ValidationError
|
||||
|
||||
from ansible_base.utils.models import prevent_search
|
||||
|
||||
# AWX
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.models.base import prevent_search, AD_HOC_JOB_TYPE_CHOICES, VERBOSITY_CHOICES, VarsDictProperty
|
||||
from awx.main.models.base import AD_HOC_JOB_TYPE_CHOICES, VERBOSITY_CHOICES, VarsDictProperty
|
||||
from awx.main.models.events import AdHocCommandEvent, UnpartitionedAdHocCommandEvent
|
||||
from awx.main.models.unified_jobs import UnifiedJob
|
||||
from awx.main.models.notifications import JobNotificationMixin, NotificationTemplate
|
||||
|
||||
@@ -15,7 +15,6 @@ from awx.main.utils import encrypt_field, parse_yaml_or_json
|
||||
from awx.main.constants import CLOUD_PROVIDERS
|
||||
|
||||
__all__ = [
|
||||
'prevent_search',
|
||||
'VarsDictProperty',
|
||||
'BaseModel',
|
||||
'CreatedModifiedModel',
|
||||
@@ -384,23 +383,6 @@ class NotificationFieldsModel(BaseModel):
|
||||
notification_templates_started = models.ManyToManyField("NotificationTemplate", blank=True, related_name='%(class)s_notification_templates_for_started')
|
||||
|
||||
|
||||
def prevent_search(relation):
|
||||
"""
|
||||
Used to mark a model field or relation as "restricted from filtering"
|
||||
e.g.,
|
||||
|
||||
class AuthToken(BaseModel):
|
||||
user = prevent_search(models.ForeignKey(...))
|
||||
sensitive_data = prevent_search(models.CharField(...))
|
||||
|
||||
The flag set by this function is used by
|
||||
`awx.api.filters.FieldLookupBackend` to block fields and relations that
|
||||
should not be searchable/filterable via search query params
|
||||
"""
|
||||
setattr(relation, '__prevent_search__', True)
|
||||
return relation
|
||||
|
||||
|
||||
def accepts_json(relation):
|
||||
"""
|
||||
Used to mark a model field as allowing JSON e.g,. JobTemplate.extra_vars
|
||||
|
||||
@@ -17,6 +17,7 @@ from jinja2 import sandbox
|
||||
from django.db import models
|
||||
from django.utils.translation import gettext_lazy as _, gettext_noop
|
||||
from django.core.exceptions import ValidationError
|
||||
from django.conf import settings
|
||||
from django.utils.encoding import force_str
|
||||
from django.utils.functional import cached_property
|
||||
from django.utils.timezone import now
|
||||
@@ -30,7 +31,7 @@ from awx.main.fields import (
|
||||
CredentialTypeInjectorField,
|
||||
DynamicCredentialInputField,
|
||||
)
|
||||
from awx.main.utils import decrypt_field, classproperty
|
||||
from awx.main.utils import decrypt_field, classproperty, set_environ
|
||||
from awx.main.utils.safe_yaml import safe_dump
|
||||
from awx.main.utils.execution_environments import to_container_path
|
||||
from awx.main.validators import validate_ssh_private_key
|
||||
@@ -1252,7 +1253,9 @@ class CredentialInputSource(PrimordialModel):
|
||||
backend_kwargs[field_name] = value
|
||||
|
||||
backend_kwargs.update(self.metadata)
|
||||
return backend(**backend_kwargs)
|
||||
|
||||
with set_environ(**settings.AWX_TASK_ENV):
|
||||
return backend(**backend_kwargs)
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
view_name = 'api:credential_input_source_detail'
|
||||
|
||||
@@ -12,18 +12,21 @@ from django.dispatch import receiver
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from django.conf import settings
|
||||
from django.utils.timezone import now, timedelta
|
||||
from django.db.models import Sum
|
||||
from django.db.models import Sum, Q
|
||||
|
||||
import redis
|
||||
from solo.models import SingletonModel
|
||||
|
||||
from ansible_base.utils.models import prevent_search
|
||||
|
||||
# AWX
|
||||
from awx import __version__ as awx_application_version
|
||||
from awx.main.utils import is_testing
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.fields import ImplicitRoleField
|
||||
from awx.main.managers import InstanceManager, UUID_DEFAULT
|
||||
from awx.main.constants import JOB_FOLDER_PREFIX
|
||||
from awx.main.models.base import BaseModel, HasEditsMixin, prevent_search
|
||||
from awx.main.models.base import BaseModel, HasEditsMixin
|
||||
from awx.main.models.rbac import (
|
||||
ROLE_SINGLETON_SYSTEM_ADMINISTRATOR,
|
||||
ROLE_SINGLETON_SYSTEM_AUDITOR,
|
||||
@@ -70,16 +73,33 @@ class InstanceLink(BaseModel):
|
||||
REMOVING = 'removing', _('Removing')
|
||||
|
||||
link_state = models.CharField(
|
||||
choices=States.choices, default=States.ESTABLISHED, max_length=16, help_text=_("Indicates the current life cycle stage of this peer link.")
|
||||
choices=States.choices, default=States.ADDING, max_length=16, help_text=_("Indicates the current life cycle stage of this peer link.")
|
||||
)
|
||||
|
||||
class Meta:
|
||||
unique_together = ('source', 'target')
|
||||
ordering = ("id",)
|
||||
constraints = [models.CheckConstraint(check=~models.Q(source=models.F('target')), name='source_and_target_can_not_be_equal')]
|
||||
|
||||
|
||||
class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
"""A model representing an AWX instance running against this database."""
|
||||
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
ordering = ("hostname",)
|
||||
constraints = [
|
||||
models.UniqueConstraint(
|
||||
fields=["ip_address"],
|
||||
condition=~Q(ip_address=""), # don't apply to constraint to empty entries
|
||||
name="unique_ip_address_not_empty",
|
||||
violation_error_message=_("Field ip_address must be unique."),
|
||||
)
|
||||
]
|
||||
|
||||
def __str__(self):
|
||||
return self.hostname
|
||||
|
||||
objects = InstanceManager()
|
||||
|
||||
# Fields set in instance registration
|
||||
@@ -87,10 +107,8 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
hostname = models.CharField(max_length=250, unique=True)
|
||||
ip_address = models.CharField(
|
||||
blank=True,
|
||||
null=True,
|
||||
default=None,
|
||||
default="",
|
||||
max_length=50,
|
||||
unique=True,
|
||||
)
|
||||
# Auto-fields, implementation is different from BaseModel
|
||||
created = models.DateTimeField(auto_now_add=True)
|
||||
@@ -169,16 +187,14 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
)
|
||||
listener_port = models.PositiveIntegerField(
|
||||
blank=True,
|
||||
default=27199,
|
||||
validators=[MinValueValidator(1), MaxValueValidator(65535)],
|
||||
null=True,
|
||||
default=None,
|
||||
validators=[MinValueValidator(1024), MaxValueValidator(65535)],
|
||||
help_text=_("Port that Receptor will listen for incoming connections on."),
|
||||
)
|
||||
|
||||
peers = models.ManyToManyField('self', symmetrical=False, through=InstanceLink, through_fields=('source', 'target'))
|
||||
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
ordering = ("hostname",)
|
||||
peers = models.ManyToManyField('self', symmetrical=False, through=InstanceLink, through_fields=('source', 'target'), related_name='peers_from')
|
||||
peers_from_control_nodes = models.BooleanField(default=False, help_text=_("If True, control plane cluster nodes should automatically peer to it."))
|
||||
|
||||
POLICY_FIELDS = frozenset(('managed_by_policy', 'hostname', 'capacity_adjustment'))
|
||||
|
||||
@@ -275,10 +291,14 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
if update_last_seen:
|
||||
update_fields += ['last_seen']
|
||||
if perform_save:
|
||||
self.save(update_fields=update_fields)
|
||||
from awx.main.signals import disable_activity_stream
|
||||
|
||||
with disable_activity_stream():
|
||||
self.save(update_fields=update_fields)
|
||||
return update_fields
|
||||
|
||||
def set_capacity_value(self):
|
||||
old_val = self.capacity
|
||||
"""Sets capacity according to capacity adjustment rule (no save)"""
|
||||
if self.enabled and self.node_type != 'hop':
|
||||
lower_cap = min(self.mem_capacity, self.cpu_capacity)
|
||||
@@ -286,6 +306,7 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
self.capacity = lower_cap + (higher_cap - lower_cap) * self.capacity_adjustment
|
||||
else:
|
||||
self.capacity = 0
|
||||
return int(self.capacity) != int(old_val) # return True if value changed
|
||||
|
||||
def refresh_capacity_fields(self):
|
||||
"""Update derived capacity fields from cpu and memory (no save)"""
|
||||
@@ -293,8 +314,8 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
self.cpu_capacity = 0
|
||||
self.mem_capacity = 0 # formula has a non-zero offset, so we make sure it is 0 for hop nodes
|
||||
else:
|
||||
self.cpu_capacity = get_cpu_effective_capacity(self.cpu)
|
||||
self.mem_capacity = get_mem_effective_capacity(self.memory)
|
||||
self.cpu_capacity = get_cpu_effective_capacity(self.cpu, is_control_node=bool(self.node_type in (Instance.Types.CONTROL, Instance.Types.HYBRID)))
|
||||
self.mem_capacity = get_mem_effective_capacity(self.memory, is_control_node=bool(self.node_type in (Instance.Types.CONTROL, Instance.Types.HYBRID)))
|
||||
self.set_capacity_value()
|
||||
|
||||
def save_health_data(self, version=None, cpu=0, memory=0, uuid=None, update_last_seen=False, errors=''):
|
||||
@@ -317,12 +338,17 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
self.version = version
|
||||
update_fields.append('version')
|
||||
|
||||
new_cpu = get_corrected_cpu(cpu)
|
||||
if self.node_type == Instance.Types.EXECUTION:
|
||||
new_cpu = cpu
|
||||
new_memory = memory
|
||||
else:
|
||||
new_cpu = get_corrected_cpu(cpu)
|
||||
new_memory = get_corrected_memory(memory)
|
||||
|
||||
if new_cpu != self.cpu:
|
||||
self.cpu = new_cpu
|
||||
update_fields.append('cpu')
|
||||
|
||||
new_memory = get_corrected_memory(memory)
|
||||
if new_memory != self.memory:
|
||||
self.memory = new_memory
|
||||
update_fields.append('memory')
|
||||
@@ -464,21 +490,50 @@ def on_instance_group_saved(sender, instance, created=False, raw=False, **kwargs
|
||||
instance.set_default_policy_fields()
|
||||
|
||||
|
||||
def schedule_write_receptor_config(broadcast=True):
|
||||
from awx.main.tasks.receptor import write_receptor_config # prevents circular import
|
||||
|
||||
# broadcast to all control instances to update their receptor configs
|
||||
if broadcast:
|
||||
connection.on_commit(lambda: write_receptor_config.apply_async(queue='tower_broadcast_all'))
|
||||
else:
|
||||
if not is_testing():
|
||||
write_receptor_config() # just run locally
|
||||
|
||||
|
||||
@receiver(post_save, sender=Instance)
|
||||
def on_instance_saved(sender, instance, created=False, raw=False, **kwargs):
|
||||
if settings.IS_K8S and instance.node_type in (Instance.Types.EXECUTION,):
|
||||
'''
|
||||
Here we link control nodes to hop or execution nodes based on the
|
||||
peers_from_control_nodes field.
|
||||
write_receptor_config should be called on each control node when:
|
||||
1. new node is created with peers_from_control_nodes enabled
|
||||
2. a node changes its value of peers_from_control_nodes
|
||||
3. a new control node comes online and has instances to peer to
|
||||
'''
|
||||
if created and settings.IS_K8S and instance.node_type in [Instance.Types.CONTROL, Instance.Types.HYBRID]:
|
||||
inst = Instance.objects.filter(peers_from_control_nodes=True)
|
||||
if set(instance.peers.all()) != set(inst):
|
||||
instance.peers.set(inst)
|
||||
schedule_write_receptor_config(broadcast=False)
|
||||
|
||||
if settings.IS_K8S and instance.node_type in [Instance.Types.HOP, Instance.Types.EXECUTION]:
|
||||
if instance.node_state == Instance.States.DEPROVISIONING:
|
||||
from awx.main.tasks.receptor import remove_deprovisioned_node # prevents circular import
|
||||
|
||||
# wait for jobs on the node to complete, then delete the
|
||||
# node and kick off write_receptor_config
|
||||
connection.on_commit(lambda: remove_deprovisioned_node.apply_async([instance.hostname]))
|
||||
|
||||
if instance.node_state == Instance.States.INSTALLED:
|
||||
from awx.main.tasks.receptor import write_receptor_config # prevents circular import
|
||||
|
||||
# broadcast to all control instances to update their receptor configs
|
||||
connection.on_commit(lambda: write_receptor_config.apply_async(queue='tower_broadcast_all'))
|
||||
else:
|
||||
control_instances = set(Instance.objects.filter(node_type__in=[Instance.Types.CONTROL, Instance.Types.HYBRID]))
|
||||
if instance.peers_from_control_nodes:
|
||||
if (control_instances & set(instance.peers_from.all())) != set(control_instances):
|
||||
instance.peers_from.add(*control_instances)
|
||||
schedule_write_receptor_config() # keep method separate to make pytest mocking easier
|
||||
else:
|
||||
if set(control_instances) & set(instance.peers_from.all()):
|
||||
instance.peers_from.remove(*control_instances)
|
||||
schedule_write_receptor_config()
|
||||
|
||||
if created or instance.has_policy_changes():
|
||||
schedule_policy_task()
|
||||
@@ -493,6 +548,8 @@ def on_instance_group_deleted(sender, instance, using, **kwargs):
|
||||
@receiver(post_delete, sender=Instance)
|
||||
def on_instance_deleted(sender, instance, using, **kwargs):
|
||||
schedule_policy_task()
|
||||
if settings.IS_K8S and instance.node_type in (Instance.Types.EXECUTION, Instance.Types.HOP) and instance.peers_from_control_nodes:
|
||||
schedule_write_receptor_config()
|
||||
|
||||
|
||||
class UnifiedJobTemplateInstanceGroupMembership(models.Model):
|
||||
|
||||
@@ -10,7 +10,6 @@ import copy
|
||||
import os.path
|
||||
from urllib.parse import urljoin
|
||||
|
||||
import dateutil.relativedelta
|
||||
import yaml
|
||||
|
||||
# Django
|
||||
@@ -26,6 +25,8 @@ from django.db.models import Q
|
||||
# REST Framework
|
||||
from rest_framework.exceptions import ParseError
|
||||
|
||||
from ansible_base.utils.models import prevent_search
|
||||
|
||||
# AWX
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.constants import CLOUD_PROVIDERS
|
||||
@@ -36,7 +37,7 @@ from awx.main.fields import (
|
||||
OrderedManyToManyField,
|
||||
)
|
||||
from awx.main.managers import HostManager, HostMetricActiveManager
|
||||
from awx.main.models.base import BaseModel, CommonModelNameNotUnique, VarsDictProperty, CLOUD_INVENTORY_SOURCES, prevent_search, accepts_json
|
||||
from awx.main.models.base import BaseModel, CommonModelNameNotUnique, VarsDictProperty, CLOUD_INVENTORY_SOURCES, accepts_json
|
||||
from awx.main.models.events import InventoryUpdateEvent, UnpartitionedInventoryUpdateEvent
|
||||
from awx.main.models.unified_jobs import UnifiedJob, UnifiedJobTemplate
|
||||
from awx.main.models.mixins import (
|
||||
@@ -890,23 +891,6 @@ class HostMetric(models.Model):
|
||||
self.deleted = False
|
||||
self.save(update_fields=['deleted'])
|
||||
|
||||
@classmethod
|
||||
def cleanup_task(cls, months_ago):
|
||||
try:
|
||||
months_ago = int(months_ago)
|
||||
if months_ago <= 0:
|
||||
raise ValueError()
|
||||
|
||||
last_automation_before = now() - dateutil.relativedelta.relativedelta(months=months_ago)
|
||||
|
||||
logger.info(f'cleanup_host_metrics: soft-deleting records last automated before {last_automation_before}')
|
||||
HostMetric.active_objects.filter(last_automation__lt=last_automation_before).update(
|
||||
deleted=True, deleted_counter=models.F('deleted_counter') + 1, last_deleted=now()
|
||||
)
|
||||
settings.CLEANUP_HOST_METRICS_LAST_TS = now()
|
||||
except (TypeError, ValueError):
|
||||
logger.error(f"cleanup_host_metrics: months_ago({months_ago}) has to be a positive integer value")
|
||||
|
||||
|
||||
class HostMetricSummaryMonthly(models.Model):
|
||||
"""
|
||||
|
||||
@@ -20,13 +20,14 @@ from django.core.exceptions import FieldDoesNotExist
|
||||
# REST Framework
|
||||
from rest_framework.exceptions import ParseError
|
||||
|
||||
from ansible_base.utils.models import prevent_search
|
||||
|
||||
# AWX
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.constants import HOST_FACTS_FIELDS
|
||||
from awx.main.models.base import (
|
||||
BaseModel,
|
||||
CreatedModifiedModel,
|
||||
prevent_search,
|
||||
accepts_json,
|
||||
JOB_TYPE_CHOICES,
|
||||
NEW_JOB_TYPE_CHOICES,
|
||||
|
||||
@@ -9,7 +9,6 @@ import requests
|
||||
# Django
|
||||
from django.apps import apps
|
||||
from django.conf import settings
|
||||
from django.contrib.auth.models import User # noqa
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
from django.core.exceptions import ValidationError
|
||||
from django.db import models
|
||||
@@ -17,9 +16,10 @@ from django.db.models.query import QuerySet
|
||||
from django.utils.crypto import get_random_string
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
from ansible_base.utils.models import prevent_search
|
||||
|
||||
# AWX
|
||||
from awx.main.models.base import prevent_search
|
||||
from awx.main.models.rbac import Role, RoleAncestorEntry, get_roles_on_resource
|
||||
from awx.main.models.rbac import Role, RoleAncestorEntry
|
||||
from awx.main.utils import parse_yaml_or_json, get_custom_venv_choices, get_licenser, polymorphic
|
||||
from awx.main.utils.execution_environments import get_default_execution_environment
|
||||
from awx.main.utils.encryption import decrypt_value, get_encryption_key, is_encrypted
|
||||
@@ -54,10 +54,7 @@ class ResourceMixin(models.Model):
|
||||
Use instead of `MyModel.objects` when you want to only consider
|
||||
resources that a user has specific permissions for. For example:
|
||||
MyModel.accessible_objects(user, 'read_role').filter(name__istartswith='bar');
|
||||
NOTE: This should only be used for list type things. If you have a
|
||||
specific resource you want to check permissions on, it is more
|
||||
performant to resolve the resource in question then call
|
||||
`myresource.get_permissions(user)`.
|
||||
NOTE: This should only be used for list type things.
|
||||
"""
|
||||
return ResourceMixin._accessible_objects(cls, accessor, role_field)
|
||||
|
||||
@@ -67,13 +64,12 @@ class ResourceMixin(models.Model):
|
||||
|
||||
@staticmethod
|
||||
def _accessible_pk_qs(cls, accessor, role_field, content_types=None):
|
||||
if type(accessor) == User:
|
||||
if accessor._meta.model_name == 'user':
|
||||
ancestor_roles = accessor.roles.all()
|
||||
elif type(accessor) == Role:
|
||||
ancestor_roles = [accessor]
|
||||
else:
|
||||
accessor_type = ContentType.objects.get_for_model(accessor)
|
||||
ancestor_roles = Role.objects.filter(content_type__pk=accessor_type.id, object_id=accessor.id)
|
||||
raise RuntimeError(f'Role filters only valid for users and ancestor role, received {accessor}')
|
||||
|
||||
if content_types is None:
|
||||
ct_kwarg = dict(content_type_id=ContentType.objects.get_for_model(cls).id)
|
||||
@@ -86,15 +82,6 @@ class ResourceMixin(models.Model):
|
||||
def _accessible_objects(cls, accessor, role_field):
|
||||
return cls.objects.filter(pk__in=ResourceMixin._accessible_pk_qs(cls, accessor, role_field))
|
||||
|
||||
def get_permissions(self, accessor):
|
||||
"""
|
||||
Returns a string list of the roles a accessor has for a given resource.
|
||||
An accessor can be either a User, Role, or an arbitrary resource that
|
||||
contains one or more Roles associated with it.
|
||||
"""
|
||||
|
||||
return get_roles_on_resource(self, accessor)
|
||||
|
||||
|
||||
class SurveyJobTemplateMixin(models.Model):
|
||||
class Meta:
|
||||
|
||||
@@ -15,9 +15,11 @@ from django.utils.encoding import smart_str, force_str
|
||||
from jinja2 import sandbox, ChainableUndefined
|
||||
from jinja2.exceptions import TemplateSyntaxError, UndefinedError, SecurityError
|
||||
|
||||
from ansible_base.utils.models import prevent_search
|
||||
|
||||
# AWX
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.models.base import CommonModelNameNotUnique, CreatedModifiedModel, prevent_search
|
||||
from awx.main.models.base import CommonModelNameNotUnique, CreatedModifiedModel
|
||||
from awx.main.utils import encrypt_field, decrypt_field, set_environ
|
||||
from awx.main.notifications.email_backend import CustomEmailBackend
|
||||
from awx.main.notifications.slack_backend import SlackBackend
|
||||
|
||||
@@ -15,12 +15,10 @@ from django.utils.translation import gettext_lazy as _
|
||||
|
||||
# AWX
|
||||
from awx.api.versioning import reverse
|
||||
from django.contrib.auth.models import User # noqa
|
||||
|
||||
__all__ = [
|
||||
'Role',
|
||||
'batch_role_ancestor_rebuilding',
|
||||
'get_roles_on_resource',
|
||||
'ROLE_SINGLETON_SYSTEM_ADMINISTRATOR',
|
||||
'ROLE_SINGLETON_SYSTEM_AUDITOR',
|
||||
'role_summary_fields_generator',
|
||||
@@ -170,16 +168,10 @@ class Role(models.Model):
|
||||
return reverse('api:role_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
def __contains__(self, accessor):
|
||||
if type(accessor) == User:
|
||||
if accessor._meta.model_name == 'user':
|
||||
return self.ancestors.filter(members=accessor).exists()
|
||||
elif accessor.__class__.__name__ == 'Team':
|
||||
return self.ancestors.filter(pk=accessor.member_role.id).exists()
|
||||
elif type(accessor) == Role:
|
||||
return self.ancestors.filter(pk=accessor.pk).exists()
|
||||
else:
|
||||
accessor_type = ContentType.objects.get_for_model(accessor)
|
||||
roles = Role.objects.filter(content_type__pk=accessor_type.id, object_id=accessor.id)
|
||||
return self.ancestors.filter(pk__in=roles).exists()
|
||||
raise RuntimeError(f'Role evaluations only valid for users, received {accessor}')
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
@@ -460,31 +452,6 @@ class RoleAncestorEntry(models.Model):
|
||||
object_id = models.PositiveIntegerField(null=False)
|
||||
|
||||
|
||||
def get_roles_on_resource(resource, accessor):
|
||||
"""
|
||||
Returns a string list of the roles a accessor has for a given resource.
|
||||
An accessor can be either a User, Role, or an arbitrary resource that
|
||||
contains one or more Roles associated with it.
|
||||
"""
|
||||
|
||||
if type(accessor) == User:
|
||||
roles = accessor.roles.all()
|
||||
elif type(accessor) == Role:
|
||||
roles = [accessor]
|
||||
else:
|
||||
accessor_type = ContentType.objects.get_for_model(accessor)
|
||||
roles = Role.objects.filter(content_type__pk=accessor_type.id, object_id=accessor.id)
|
||||
|
||||
return [
|
||||
role_field
|
||||
for role_field in RoleAncestorEntry.objects.filter(
|
||||
ancestor__in=roles, content_type_id=ContentType.objects.get_for_model(resource).id, object_id=resource.id
|
||||
)
|
||||
.values_list('role_field', flat=True)
|
||||
.distinct()
|
||||
]
|
||||
|
||||
|
||||
def role_summary_fields_generator(content_object, role_field):
|
||||
global role_descriptions
|
||||
global role_names
|
||||
|
||||
@@ -30,8 +30,10 @@ from rest_framework.exceptions import ParseError
|
||||
# Django-Polymorphic
|
||||
from polymorphic.models import PolymorphicModel
|
||||
|
||||
from ansible_base.utils.models import prevent_search, get_type_for_model
|
||||
|
||||
# AWX
|
||||
from awx.main.models.base import CommonModelNameNotUnique, PasswordFieldsModel, NotificationFieldsModel, prevent_search
|
||||
from awx.main.models.base import CommonModelNameNotUnique, PasswordFieldsModel, NotificationFieldsModel
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
from awx.main.dispatch.control import Control as ControlDispatcher
|
||||
from awx.main.registrar import activity_stream_registrar
|
||||
@@ -42,7 +44,6 @@ from awx.main.utils.common import (
|
||||
_inventory_updates,
|
||||
copy_model_by_class,
|
||||
copy_m2m_relationships,
|
||||
get_type_for_model,
|
||||
parse_yaml_or_json,
|
||||
getattr_dne,
|
||||
ScheduleDependencyManager,
|
||||
@@ -1439,6 +1440,11 @@ class UnifiedJob(
|
||||
if not self.celery_task_id:
|
||||
return
|
||||
canceled = []
|
||||
if not connection.get_autocommit():
|
||||
# this condition is purpose-written for the task manager, when it cancels jobs in workflows
|
||||
ControlDispatcher('dispatcher', self.controller_node).cancel([self.celery_task_id], with_reply=False)
|
||||
return True # task manager itself needs to act under assumption that cancel was received
|
||||
|
||||
try:
|
||||
# Use control and reply mechanism to cancel and obtain confirmation
|
||||
timeout = 5
|
||||
|
||||
@@ -23,9 +23,11 @@ from crum import get_current_user
|
||||
from jinja2 import sandbox
|
||||
from jinja2.exceptions import TemplateSyntaxError, UndefinedError, SecurityError
|
||||
|
||||
from ansible_base.utils.models import prevent_search
|
||||
|
||||
# AWX
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.models import prevent_search, accepts_json, UnifiedJobTemplate, UnifiedJob
|
||||
from awx.main.models import accepts_json, UnifiedJobTemplate, UnifiedJob
|
||||
from awx.main.models.notifications import NotificationTemplate, JobNotificationMixin
|
||||
from awx.main.models.base import CreatedModifiedModel, VarsDictProperty
|
||||
from awx.main.models.rbac import ROLE_SINGLETON_SYSTEM_ADMINISTRATOR, ROLE_SINGLETON_SYSTEM_AUDITOR
|
||||
|
||||
@@ -39,11 +39,15 @@ class TwilioBackend(AWXBaseEmailBackend, CustomNotificationBase):
|
||||
logger.error(smart_str(_("Exception connecting to Twilio: {}").format(e)))
|
||||
|
||||
for m in messages:
|
||||
try:
|
||||
connection.messages.create(to=m.to, from_=m.from_email, body=m.subject)
|
||||
sent_messages += 1
|
||||
except Exception as e:
|
||||
logger.error(smart_str(_("Exception sending messages: {}").format(e)))
|
||||
if not self.fail_silently:
|
||||
raise
|
||||
failed = False
|
||||
for dest in m.to:
|
||||
try:
|
||||
logger.debug(smart_str(_("FROM: {} / TO: {}").format(m.from_email, dest)))
|
||||
connection.messages.create(to=dest, from_=m.from_email, body=m.subject)
|
||||
sent_messages += 1
|
||||
except Exception as e:
|
||||
logger.error(smart_str(_("Exception sending messages: {}").format(e)))
|
||||
failed = True
|
||||
if not self.fail_silently and failed:
|
||||
raise
|
||||
return sent_messages
|
||||
|
||||
@@ -17,6 +17,8 @@ from django.utils.timezone import now as tz_now
|
||||
from django.conf import settings
|
||||
from django.contrib.contenttypes.models import ContentType
|
||||
|
||||
from ansible_base.utils.models import get_type_for_model
|
||||
|
||||
# AWX
|
||||
from awx.main.dispatch.reaper import reap_job
|
||||
from awx.main.models import (
|
||||
@@ -34,7 +36,6 @@ from awx.main.models import (
|
||||
from awx.main.scheduler.dag_workflow import WorkflowDAG
|
||||
from awx.main.utils.pglock import advisory_lock
|
||||
from awx.main.utils import (
|
||||
get_type_for_model,
|
||||
ScheduleTaskManager,
|
||||
ScheduleWorkflowManager,
|
||||
)
|
||||
@@ -124,6 +125,13 @@ class TaskBase:
|
||||
self.record_aggregate_metrics()
|
||||
sys.exit(1)
|
||||
|
||||
def get_local_metrics(self):
|
||||
data = {}
|
||||
for k, metric in self.subsystem_metrics.METRICS.items():
|
||||
if k.startswith(self.prefix) and metric.metric_has_changed:
|
||||
data[k[len(self.prefix) + 1 :]] = metric.current_value
|
||||
return data
|
||||
|
||||
def schedule(self):
|
||||
# Always be able to restore the original signal handler if we finish
|
||||
original_sigusr1 = signal.getsignal(signal.SIGUSR1)
|
||||
@@ -146,10 +154,14 @@ class TaskBase:
|
||||
signal.signal(signal.SIGUSR1, original_sigusr1)
|
||||
commit_start = time.time()
|
||||
|
||||
logger.debug(f"Commiting {self.prefix} Scheduler changes")
|
||||
|
||||
if self.prefix == "task_manager":
|
||||
self.subsystem_metrics.set(f"{self.prefix}_commit_seconds", time.time() - commit_start)
|
||||
local_metrics = self.get_local_metrics()
|
||||
self.record_aggregate_metrics()
|
||||
logger.debug(f"Finishing {self.prefix} Scheduler")
|
||||
|
||||
logger.debug(f"Finished {self.prefix} Scheduler, timing data:\n{local_metrics}")
|
||||
|
||||
|
||||
class WorkflowManager(TaskBase):
|
||||
@@ -259,6 +271,9 @@ class WorkflowManager(TaskBase):
|
||||
job.status = 'failed'
|
||||
job.save(update_fields=['status', 'job_explanation'])
|
||||
job.websocket_emit_status('failed')
|
||||
# NOTE: sending notification templates here is slightly worse performance
|
||||
# this is not yet optimized in the same way as for the TaskManager
|
||||
job.send_notification_templates('failed')
|
||||
ScheduleWorkflowManager().schedule()
|
||||
|
||||
# TODO: should we emit a status on the socket here similar to tasks.py awx_periodic_scheduler() ?
|
||||
@@ -419,6 +434,25 @@ class TaskManager(TaskBase):
|
||||
self.tm_models = TaskManagerModels()
|
||||
self.controlplane_ig = self.tm_models.instance_groups.controlplane_ig
|
||||
|
||||
def process_job_dep_failures(self, task):
|
||||
"""If job depends on a job that has failed, mark as failed and handle misc stuff."""
|
||||
for dep in task.dependent_jobs.all():
|
||||
# if we detect a failed or error dependency, go ahead and fail this task.
|
||||
if dep.status in ("error", "failed"):
|
||||
task.status = 'failed'
|
||||
logger.warning(f'Previous task failed task: {task.id} dep: {dep.id} task manager')
|
||||
task.job_explanation = 'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % (
|
||||
get_type_for_model(type(dep)),
|
||||
dep.name,
|
||||
dep.id,
|
||||
)
|
||||
task.save(update_fields=['status', 'job_explanation'])
|
||||
task.websocket_emit_status('failed')
|
||||
self.pre_start_failed.append(task.id)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def job_blocked_by(self, task):
|
||||
# TODO: I'm not happy with this, I think blocking behavior should be decided outside of the dependency graph
|
||||
# in the old task manager this was handled as a method on each task object outside of the graph and
|
||||
@@ -430,20 +464,6 @@ class TaskManager(TaskBase):
|
||||
for dep in task.dependent_jobs.all():
|
||||
if dep.status in ACTIVE_STATES:
|
||||
return dep
|
||||
# if we detect a failed or error dependency, go ahead and fail this
|
||||
# task. The errback on the dependency takes some time to trigger,
|
||||
# and we don't want the task to enter running state if its
|
||||
# dependency has failed or errored.
|
||||
elif dep.status in ("error", "failed"):
|
||||
task.status = 'failed'
|
||||
task.job_explanation = 'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % (
|
||||
get_type_for_model(type(dep)),
|
||||
dep.name,
|
||||
dep.id,
|
||||
)
|
||||
task.save(update_fields=['status', 'job_explanation'])
|
||||
task.websocket_emit_status('failed')
|
||||
return dep
|
||||
|
||||
return None
|
||||
|
||||
@@ -463,7 +483,6 @@ class TaskManager(TaskBase):
|
||||
if self.start_task_limit == 0:
|
||||
# schedule another run immediately after this task manager
|
||||
ScheduleTaskManager().schedule()
|
||||
from awx.main.tasks.system import handle_work_error, handle_work_success
|
||||
|
||||
task.status = 'waiting'
|
||||
|
||||
@@ -474,7 +493,7 @@ class TaskManager(TaskBase):
|
||||
task.job_explanation += ' '
|
||||
task.job_explanation += 'Task failed pre-start check.'
|
||||
task.save()
|
||||
# TODO: run error handler to fail sub-tasks and send notifications
|
||||
self.pre_start_failed.append(task.id)
|
||||
else:
|
||||
if type(task) is WorkflowJob:
|
||||
task.status = 'running'
|
||||
@@ -496,19 +515,16 @@ class TaskManager(TaskBase):
|
||||
# apply_async does a NOTIFY to the channel dispatcher is listening to
|
||||
# postgres will treat this as part of the transaction, which is what we want
|
||||
if task.status != 'failed' and type(task) is not WorkflowJob:
|
||||
task_actual = {'type': get_type_for_model(type(task)), 'id': task.id}
|
||||
task_cls = task._get_task_class()
|
||||
task_cls.apply_async(
|
||||
[task.pk],
|
||||
opts,
|
||||
queue=task.get_queue_name(),
|
||||
uuid=task.celery_task_id,
|
||||
callbacks=[{'task': handle_work_success.name, 'kwargs': {'task_actual': task_actual}}],
|
||||
errbacks=[{'task': handle_work_error.name, 'kwargs': {'task_actual': task_actual}}],
|
||||
)
|
||||
|
||||
# In exception cases, like a job failing pre-start checks, we send the websocket status message
|
||||
# for jobs going into waiting, we omit this because of performance issues, as it should go to running quickly
|
||||
# In exception cases, like a job failing pre-start checks, we send the websocket status message.
|
||||
# For jobs going into waiting, we omit this because of performance issues, as it should go to running quickly
|
||||
if task.status != 'waiting':
|
||||
task.websocket_emit_status(task.status) # adds to on_commit
|
||||
|
||||
@@ -529,6 +545,11 @@ class TaskManager(TaskBase):
|
||||
if self.timed_out():
|
||||
logger.warning("Task manager has reached time out while processing pending jobs, exiting loop early")
|
||||
break
|
||||
|
||||
has_failed = self.process_job_dep_failures(task)
|
||||
if has_failed:
|
||||
continue
|
||||
|
||||
blocked_by = self.job_blocked_by(task)
|
||||
if blocked_by:
|
||||
self.subsystem_metrics.inc(f"{self.prefix}_tasks_blocked", 1)
|
||||
@@ -642,6 +663,11 @@ class TaskManager(TaskBase):
|
||||
reap_job(j, 'failed')
|
||||
|
||||
def process_tasks(self):
|
||||
# maintain a list of jobs that went to an early failure state,
|
||||
# meaning the dispatcher never got these jobs,
|
||||
# that means we have to handle notifications for those
|
||||
self.pre_start_failed = []
|
||||
|
||||
running_tasks = [t for t in self.all_tasks if t.status in ['waiting', 'running']]
|
||||
self.process_running_tasks(running_tasks)
|
||||
self.subsystem_metrics.inc(f"{self.prefix}_running_processed", len(running_tasks))
|
||||
@@ -651,6 +677,11 @@ class TaskManager(TaskBase):
|
||||
self.process_pending_tasks(pending_tasks)
|
||||
self.subsystem_metrics.inc(f"{self.prefix}_pending_processed", len(pending_tasks))
|
||||
|
||||
if self.pre_start_failed:
|
||||
from awx.main.tasks.system import handle_failure_notifications
|
||||
|
||||
handle_failure_notifications.delay(self.pre_start_failed)
|
||||
|
||||
def timeout_approval_node(self, task):
|
||||
if self.timed_out():
|
||||
logger.warning("Task manager has reached time out while processing approval nodes, exiting loop early")
|
||||
|
||||
@@ -29,7 +29,7 @@ class RunnerCallback:
|
||||
self.safe_env = {}
|
||||
self.event_ct = 0
|
||||
self.model = model
|
||||
self.update_attempts = int(settings.DISPATCHER_DB_DOWNTOWN_TOLLERANCE / 5)
|
||||
self.update_attempts = int(settings.DISPATCHER_DB_DOWNTIME_TOLERANCE / 5)
|
||||
self.wrapup_event_dispatched = False
|
||||
self.artifacts_processed = False
|
||||
self.extra_update_fields = {}
|
||||
@@ -208,9 +208,10 @@ class RunnerCallback:
|
||||
# We opened a connection just for that save, close it here now
|
||||
connections.close_all()
|
||||
elif status_data['status'] == 'error':
|
||||
result_traceback = status_data.get('result_traceback', None)
|
||||
if result_traceback:
|
||||
self.delay_update(result_traceback=result_traceback)
|
||||
for field_name in ('result_traceback', 'job_explanation'):
|
||||
field_value = status_data.get(field_name, None)
|
||||
if field_value:
|
||||
self.delay_update(**{field_name: field_value})
|
||||
|
||||
def artifacts_handler(self, artifact_dir):
|
||||
self.artifacts_processed = True
|
||||
|
||||
10
awx/main/tasks/helpers.py
Normal file
10
awx/main/tasks/helpers.py
Normal file
@@ -0,0 +1,10 @@
|
||||
from django.utils.timezone import now
|
||||
from rest_framework.fields import DateTimeField
|
||||
|
||||
|
||||
def is_run_threshold_reached(setting, threshold_seconds):
|
||||
last_time = DateTimeField().to_internal_value(setting) if setting else None
|
||||
if not last_time:
|
||||
return True
|
||||
else:
|
||||
return (now() - last_time).total_seconds() > threshold_seconds
|
||||
@@ -3,33 +3,90 @@ from dateutil.relativedelta import relativedelta
|
||||
import logging
|
||||
|
||||
from django.conf import settings
|
||||
from django.db.models import Count
|
||||
from django.db.models import Count, F
|
||||
from django.db.models.functions import TruncMonth
|
||||
from django.utils.timezone import now
|
||||
from rest_framework.fields import DateTimeField
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.models.inventory import HostMetric, HostMetricSummaryMonthly
|
||||
from awx.main.tasks.helpers import is_run_threshold_reached
|
||||
from awx.conf.license import get_license
|
||||
|
||||
logger = logging.getLogger('awx.main.tasks.host_metric_summary_monthly')
|
||||
logger = logging.getLogger('awx.main.tasks.host_metrics')
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
def cleanup_host_metrics():
|
||||
if is_run_threshold_reached(getattr(settings, 'CLEANUP_HOST_METRICS_LAST_TS', None), getattr(settings, 'CLEANUP_HOST_METRICS_INTERVAL', 30) * 86400):
|
||||
logger.info(f"Executing cleanup_host_metrics, last ran at {getattr(settings, 'CLEANUP_HOST_METRICS_LAST_TS', '---')}")
|
||||
HostMetricTask().cleanup(
|
||||
soft_threshold=getattr(settings, 'CLEANUP_HOST_METRICS_SOFT_THRESHOLD', 12),
|
||||
hard_threshold=getattr(settings, 'CLEANUP_HOST_METRICS_HARD_THRESHOLD', 36),
|
||||
)
|
||||
logger.info("Finished cleanup_host_metrics")
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
def host_metric_summary_monthly():
|
||||
"""Run cleanup host metrics summary monthly task each week"""
|
||||
if _is_run_threshold_reached(
|
||||
getattr(settings, 'HOST_METRIC_SUMMARY_TASK_LAST_TS', None), getattr(settings, 'HOST_METRIC_SUMMARY_TASK_INTERVAL', 7) * 86400
|
||||
):
|
||||
if is_run_threshold_reached(getattr(settings, 'HOST_METRIC_SUMMARY_TASK_LAST_TS', None), getattr(settings, 'HOST_METRIC_SUMMARY_TASK_INTERVAL', 7) * 86400):
|
||||
logger.info(f"Executing host_metric_summary_monthly, last ran at {getattr(settings, 'HOST_METRIC_SUMMARY_TASK_LAST_TS', '---')}")
|
||||
HostMetricSummaryMonthlyTask().execute()
|
||||
logger.info("Finished host_metric_summary_monthly")
|
||||
|
||||
|
||||
def _is_run_threshold_reached(setting, threshold_seconds):
|
||||
last_time = DateTimeField().to_internal_value(setting) if setting else DateTimeField().to_internal_value('1970-01-01')
|
||||
class HostMetricTask:
|
||||
"""
|
||||
This class provides cleanup task for HostMetric model.
|
||||
There are two modes:
|
||||
- soft cleanup (updates columns delete, deleted_counter and last_deleted)
|
||||
- hard cleanup (deletes from the db)
|
||||
"""
|
||||
|
||||
return (now() - last_time).total_seconds() > threshold_seconds
|
||||
def cleanup(self, soft_threshold=None, hard_threshold=None):
|
||||
"""
|
||||
Main entrypoint, runs either soft cleanup, hard cleanup or both
|
||||
|
||||
:param soft_threshold: (int)
|
||||
:param hard_threshold: (int)
|
||||
"""
|
||||
if hard_threshold is not None:
|
||||
self.hard_cleanup(hard_threshold)
|
||||
if soft_threshold is not None:
|
||||
self.soft_cleanup(soft_threshold)
|
||||
|
||||
settings.CLEANUP_HOST_METRICS_LAST_TS = now()
|
||||
|
||||
@staticmethod
|
||||
def soft_cleanup(threshold=None):
|
||||
if threshold is None:
|
||||
threshold = getattr(settings, 'CLEANUP_HOST_METRICS_SOFT_THRESHOLD', 12)
|
||||
|
||||
try:
|
||||
threshold = int(threshold)
|
||||
except (ValueError, TypeError) as e:
|
||||
raise type(e)("soft_threshold has to be convertible to number") from e
|
||||
|
||||
last_automation_before = now() - relativedelta(months=threshold)
|
||||
rows = HostMetric.active_objects.filter(last_automation__lt=last_automation_before).update(
|
||||
deleted=True, deleted_counter=F('deleted_counter') + 1, last_deleted=now()
|
||||
)
|
||||
logger.info(f'cleanup_host_metrics: soft-deleted records last automated before {last_automation_before}, affected rows: {rows}')
|
||||
|
||||
@staticmethod
|
||||
def hard_cleanup(threshold=None):
|
||||
if threshold is None:
|
||||
threshold = getattr(settings, 'CLEANUP_HOST_METRICS_HARD_THRESHOLD', 36)
|
||||
|
||||
try:
|
||||
threshold = int(threshold)
|
||||
except (ValueError, TypeError) as e:
|
||||
raise type(e)("hard_threshold has to be convertible to number") from e
|
||||
|
||||
last_deleted_before = now() - relativedelta(months=threshold)
|
||||
queryset = HostMetric.objects.filter(deleted=True, last_deleted__lt=last_deleted_before)
|
||||
rows = queryset.delete()
|
||||
logger.info(f'cleanup_host_metrics: hard-deleted records which were soft deleted before {last_deleted_before}, affected rows: {rows[0]}')
|
||||
|
||||
|
||||
class HostMetricSummaryMonthlyTask:
|
||||
|
||||
@@ -74,6 +74,8 @@ from awx.main.utils.common import (
|
||||
extract_ansible_vars,
|
||||
get_awx_version,
|
||||
create_partition,
|
||||
ScheduleWorkflowManager,
|
||||
ScheduleTaskManager,
|
||||
)
|
||||
from awx.conf.license import get_license
|
||||
from awx.main.utils.handlers import SpecialInventoryHandler
|
||||
@@ -112,7 +114,7 @@ class BaseTask(object):
|
||||
|
||||
def __init__(self):
|
||||
self.cleanup_paths = []
|
||||
self.update_attempts = int(settings.DISPATCHER_DB_DOWNTOWN_TOLLERANCE / 5)
|
||||
self.update_attempts = int(settings.DISPATCHER_DB_DOWNTIME_TOLERANCE / 5)
|
||||
self.runner_callback = self.callback_class(model=self.model)
|
||||
|
||||
def update_model(self, pk, _attempt=0, **updates):
|
||||
@@ -450,6 +452,12 @@ class BaseTask(object):
|
||||
instance.ansible_version = ansible_version_info
|
||||
instance.save(update_fields=['ansible_version'])
|
||||
|
||||
# Run task manager appropriately for speculative dependencies
|
||||
if instance.unifiedjob_blocked_jobs.exists():
|
||||
ScheduleTaskManager().schedule()
|
||||
if instance.spawned_by_workflow:
|
||||
ScheduleWorkflowManager().schedule()
|
||||
|
||||
def should_use_fact_cache(self):
|
||||
return False
|
||||
|
||||
@@ -1873,6 +1881,8 @@ class RunSystemJob(BaseTask):
|
||||
if system_job.job_type in ('cleanup_jobs', 'cleanup_activitystream'):
|
||||
if 'days' in json_vars:
|
||||
args.extend(['--days', str(json_vars.get('days', 60))])
|
||||
if 'batch_size' in json_vars:
|
||||
args.extend(['--batch-size', str(json_vars['batch_size'])])
|
||||
if 'dry_run' in json_vars and json_vars['dry_run']:
|
||||
args.extend(['--dry-run'])
|
||||
if system_job.job_type == 'cleanup_jobs':
|
||||
|
||||
@@ -30,6 +30,7 @@ from awx.main.tasks.signals import signal_state, signal_callback, SignalExit
|
||||
from awx.main.models import Instance, InstanceLink, UnifiedJob
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.utils.pglock import advisory_lock
|
||||
|
||||
# Receptorctl
|
||||
from receptorctl.socket_interface import ReceptorControl
|
||||
@@ -431,16 +432,16 @@ class AWXReceptorJob:
|
||||
# massive, only ask for last 1000 bytes
|
||||
startpos = max(stdout_size - 1000, 0)
|
||||
resultsock, resultfile = receptor_ctl.get_work_results(self.unit_id, startpos=startpos, return_socket=True, return_sockfile=True)
|
||||
resultsock.setblocking(False) # this makes resultfile reads non blocking
|
||||
lines = resultfile.readlines()
|
||||
receptor_output = b"".join(lines).decode()
|
||||
if receptor_output:
|
||||
self.task.runner_callback.delay_update(result_traceback=receptor_output)
|
||||
self.task.runner_callback.delay_update(result_traceback=f'Worker output:\n{receptor_output}')
|
||||
elif detail:
|
||||
self.task.runner_callback.delay_update(result_traceback=detail)
|
||||
self.task.runner_callback.delay_update(result_traceback=f'Receptor detail:\n{detail}')
|
||||
else:
|
||||
logger.warning(f'No result details or output from {self.task.instance.log_format}, status:\n{state_name}')
|
||||
except Exception:
|
||||
logger.exception(f'Work results error from job id={self.task.instance.id} work_unit={self.task.instance.work_unit_id}')
|
||||
raise RuntimeError(detail)
|
||||
|
||||
return res
|
||||
@@ -675,26 +676,41 @@ RECEPTOR_CONFIG_STARTER = (
|
||||
)
|
||||
|
||||
|
||||
@task()
|
||||
def write_receptor_config():
|
||||
lock = FileLock(__RECEPTOR_CONF_LOCKFILE)
|
||||
with lock:
|
||||
receptor_config = list(RECEPTOR_CONFIG_STARTER)
|
||||
def should_update_config(instances):
|
||||
'''
|
||||
checks that the list of instances matches the list of
|
||||
tcp-peers in the config
|
||||
'''
|
||||
current_config = read_receptor_config() # this gets receptor conf lock
|
||||
current_peers = []
|
||||
for config_entry in current_config:
|
||||
for key, value in config_entry.items():
|
||||
if key.endswith('-peer'):
|
||||
current_peers.append(value['address'])
|
||||
intended_peers = [f"{i.hostname}:{i.listener_port}" for i in instances]
|
||||
logger.debug(f"Peers current {current_peers} intended {intended_peers}")
|
||||
if set(current_peers) == set(intended_peers):
|
||||
return False # config file is already update to date
|
||||
|
||||
this_inst = Instance.objects.me()
|
||||
instances = Instance.objects.filter(node_type=Instance.Types.EXECUTION)
|
||||
existing_peers = {link.target_id for link in InstanceLink.objects.filter(source=this_inst)}
|
||||
new_links = []
|
||||
for instance in instances:
|
||||
peer = {'tcp-peer': {'address': f'{instance.hostname}:{instance.listener_port}', 'tls': 'tlsclient'}}
|
||||
receptor_config.append(peer)
|
||||
if instance.id not in existing_peers:
|
||||
new_links.append(InstanceLink(source=this_inst, target=instance, link_state=InstanceLink.States.ADDING))
|
||||
return True
|
||||
|
||||
InstanceLink.objects.bulk_create(new_links)
|
||||
|
||||
with open(__RECEPTOR_CONF, 'w') as file:
|
||||
yaml.dump(receptor_config, file, default_flow_style=False)
|
||||
def generate_config_data():
|
||||
# returns two values
|
||||
# receptor config - based on current database peers
|
||||
# should_update - If True, receptor_config differs from the receptor conf file on disk
|
||||
instances = Instance.objects.filter(node_type__in=(Instance.Types.EXECUTION, Instance.Types.HOP), peers_from_control_nodes=True)
|
||||
|
||||
receptor_config = list(RECEPTOR_CONFIG_STARTER)
|
||||
for instance in instances:
|
||||
peer = {'tcp-peer': {'address': f'{instance.hostname}:{instance.listener_port}', 'tls': 'tlsclient'}}
|
||||
receptor_config.append(peer)
|
||||
should_update = should_update_config(instances)
|
||||
return receptor_config, should_update
|
||||
|
||||
|
||||
def reload_receptor():
|
||||
logger.warning("Receptor config changed, reloading receptor")
|
||||
|
||||
# This needs to be outside of the lock because this function itself will acquire the lock.
|
||||
receptor_ctl = get_receptor_ctl()
|
||||
@@ -710,8 +726,29 @@ def write_receptor_config():
|
||||
else:
|
||||
raise RuntimeError("Receptor reload failed")
|
||||
|
||||
links = InstanceLink.objects.filter(source=this_inst, target__in=instances, link_state=InstanceLink.States.ADDING)
|
||||
links.update(link_state=InstanceLink.States.ESTABLISHED)
|
||||
|
||||
@task()
|
||||
def write_receptor_config():
|
||||
"""
|
||||
This task runs async on each control node, K8S only.
|
||||
It is triggered whenever remote is added or removed, or if peers_from_control_nodes
|
||||
is flipped.
|
||||
It is possible for write_receptor_config to be called multiple times.
|
||||
For example, if new instances are added in quick succession.
|
||||
To prevent that case, each control node first grabs a DB advisory lock, specific
|
||||
to just that control node (i.e. multiple control nodes can run this function
|
||||
at the same time, since it only writes the local receptor config file)
|
||||
"""
|
||||
with advisory_lock(f"{settings.CLUSTER_HOST_ID}_write_receptor_config", wait=True):
|
||||
# Config file needs to be updated
|
||||
receptor_config, should_update = generate_config_data()
|
||||
if should_update:
|
||||
lock = FileLock(__RECEPTOR_CONF_LOCKFILE)
|
||||
with lock:
|
||||
with open(__RECEPTOR_CONF, 'w') as file:
|
||||
yaml.dump(receptor_config, file, default_flow_style=False)
|
||||
|
||||
reload_receptor()
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@@ -731,6 +768,3 @@ def remove_deprovisioned_node(hostname):
|
||||
|
||||
# This will as a side effect also delete the InstanceLinks that are tied to it.
|
||||
Instance.objects.filter(hostname=hostname).delete()
|
||||
|
||||
# Update the receptor configs for all of the control-plane.
|
||||
write_receptor_config.apply_async(queue='tower_broadcast_all')
|
||||
|
||||
@@ -16,7 +16,9 @@ class SignalExit(Exception):
|
||||
class SignalState:
|
||||
def reset(self):
|
||||
self.sigterm_flag = False
|
||||
self.is_active = False
|
||||
self.sigint_flag = False
|
||||
|
||||
self.is_active = False # for nested context managers
|
||||
self.original_sigterm = None
|
||||
self.original_sigint = None
|
||||
self.raise_exception = False
|
||||
@@ -24,23 +26,36 @@ class SignalState:
|
||||
def __init__(self):
|
||||
self.reset()
|
||||
|
||||
def set_flag(self, *args):
|
||||
"""Method to pass into the python signal.signal method to receive signals"""
|
||||
self.sigterm_flag = True
|
||||
def raise_if_needed(self):
|
||||
if self.raise_exception:
|
||||
self.raise_exception = False # so it is not raised a second time in error handling
|
||||
raise SignalExit()
|
||||
|
||||
def set_sigterm_flag(self, *args):
|
||||
self.sigterm_flag = True
|
||||
self.raise_if_needed()
|
||||
|
||||
def set_sigint_flag(self, *args):
|
||||
self.sigint_flag = True
|
||||
self.raise_if_needed()
|
||||
|
||||
def connect_signals(self):
|
||||
self.original_sigterm = signal.getsignal(signal.SIGTERM)
|
||||
self.original_sigint = signal.getsignal(signal.SIGINT)
|
||||
signal.signal(signal.SIGTERM, self.set_flag)
|
||||
signal.signal(signal.SIGINT, self.set_flag)
|
||||
signal.signal(signal.SIGTERM, self.set_sigterm_flag)
|
||||
signal.signal(signal.SIGINT, self.set_sigint_flag)
|
||||
self.is_active = True
|
||||
|
||||
def restore_signals(self):
|
||||
signal.signal(signal.SIGTERM, self.original_sigterm)
|
||||
signal.signal(signal.SIGINT, self.original_sigint)
|
||||
# if we got a signal while context manager was active, call parent methods.
|
||||
if self.sigterm_flag:
|
||||
if callable(self.original_sigterm):
|
||||
self.original_sigterm()
|
||||
if self.sigint_flag:
|
||||
if callable(self.original_sigint):
|
||||
self.original_sigint()
|
||||
self.reset()
|
||||
|
||||
|
||||
@@ -48,7 +63,7 @@ signal_state = SignalState()
|
||||
|
||||
|
||||
def signal_callback():
|
||||
return signal_state.sigterm_flag
|
||||
return bool(signal_state.sigterm_flag or signal_state.sigint_flag)
|
||||
|
||||
|
||||
def with_signal_handling(f):
|
||||
|
||||
@@ -48,22 +48,16 @@ from awx.main.models import (
|
||||
Inventory,
|
||||
SmartInventoryMembership,
|
||||
Job,
|
||||
HostMetric,
|
||||
convert_jsonfields,
|
||||
)
|
||||
from awx.main.constants import ACTIVE_STATES
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch import get_task_queuename, reaper
|
||||
from awx.main.utils.common import (
|
||||
get_type_for_model,
|
||||
ignore_inventory_computed_fields,
|
||||
ignore_inventory_group_removal,
|
||||
ScheduleWorkflowManager,
|
||||
ScheduleTaskManager,
|
||||
)
|
||||
from awx.main.utils.common import ignore_inventory_computed_fields, ignore_inventory_group_removal
|
||||
|
||||
from awx.main.utils.reload import stop_local_services
|
||||
from awx.main.utils.pglock import advisory_lock
|
||||
from awx.main.tasks.helpers import is_run_threshold_reached
|
||||
from awx.main.tasks.receptor import get_receptor_ctl, worker_info, worker_cleanup, administrative_workunit_reaper, write_receptor_config
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
from awx.main import analytics
|
||||
@@ -368,9 +362,7 @@ def send_notifications(notification_list, job_id=None):
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
def gather_analytics():
|
||||
from awx.conf.models import Setting
|
||||
|
||||
if is_run_threshold_reached(Setting.objects.filter(key='AUTOMATION_ANALYTICS_LAST_GATHER').first(), settings.AUTOMATION_ANALYTICS_GATHER_INTERVAL):
|
||||
if is_run_threshold_reached(getattr(settings, 'AUTOMATION_ANALYTICS_LAST_GATHER', None), settings.AUTOMATION_ANALYTICS_GATHER_INTERVAL):
|
||||
analytics.gather()
|
||||
|
||||
|
||||
@@ -427,29 +419,6 @@ def cleanup_images_and_files():
|
||||
_cleanup_images_and_files()
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
def cleanup_host_metrics():
|
||||
"""Run cleanup host metrics ~each month"""
|
||||
# TODO: move whole method to host_metrics in follow-up PR
|
||||
from awx.conf.models import Setting
|
||||
|
||||
if is_run_threshold_reached(
|
||||
Setting.objects.filter(key='CLEANUP_HOST_METRICS_LAST_TS').first(), getattr(settings, 'CLEANUP_HOST_METRICS_INTERVAL', 30) * 86400
|
||||
):
|
||||
months_ago = getattr(settings, 'CLEANUP_HOST_METRICS_SOFT_THRESHOLD', 12)
|
||||
logger.info("Executing cleanup_host_metrics")
|
||||
HostMetric.cleanup_task(months_ago)
|
||||
logger.info("Finished cleanup_host_metrics")
|
||||
|
||||
|
||||
def is_run_threshold_reached(setting, threshold_seconds):
|
||||
from rest_framework.fields import DateTimeField
|
||||
|
||||
last_time = DateTimeField().to_internal_value(setting.value) if setting and setting.value else DateTimeField().to_internal_value('1970-01-01')
|
||||
|
||||
return (now() - last_time).total_seconds() > threshold_seconds
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
def cluster_node_health_check(node):
|
||||
"""
|
||||
@@ -491,7 +460,6 @@ def execution_node_health_check(node):
|
||||
data = worker_info(node)
|
||||
|
||||
prior_capacity = instance.capacity
|
||||
|
||||
instance.save_health_data(
|
||||
version='ansible-runner-' + data.get('runner_version', '???'),
|
||||
cpu=data.get('cpu_count', 0),
|
||||
@@ -512,13 +480,37 @@ def execution_node_health_check(node):
|
||||
return data
|
||||
|
||||
|
||||
def inspect_execution_nodes(instance_list):
|
||||
with advisory_lock('inspect_execution_nodes_lock', wait=False):
|
||||
node_lookup = {inst.hostname: inst for inst in instance_list}
|
||||
def inspect_established_receptor_connections(mesh_status):
|
||||
'''
|
||||
Flips link state from ADDING to ESTABLISHED
|
||||
If the InstanceLink source and target match the entries
|
||||
in Known Connection Costs, flip to Established.
|
||||
'''
|
||||
from awx.main.models import InstanceLink
|
||||
|
||||
all_links = InstanceLink.objects.filter(link_state=InstanceLink.States.ADDING)
|
||||
if not all_links.exists():
|
||||
return
|
||||
active_receptor_conns = mesh_status['KnownConnectionCosts']
|
||||
update_links = []
|
||||
for link in all_links:
|
||||
if link.link_state != InstanceLink.States.REMOVING:
|
||||
if link.target.hostname in active_receptor_conns.get(link.source.hostname, {}):
|
||||
if link.link_state is not InstanceLink.States.ESTABLISHED:
|
||||
link.link_state = InstanceLink.States.ESTABLISHED
|
||||
update_links.append(link)
|
||||
|
||||
InstanceLink.objects.bulk_update(update_links, ['link_state'])
|
||||
|
||||
|
||||
def inspect_execution_and_hop_nodes(instance_list):
|
||||
with advisory_lock('inspect_execution_and_hop_nodes_lock', wait=False):
|
||||
node_lookup = {inst.hostname: inst for inst in instance_list}
|
||||
ctl = get_receptor_ctl()
|
||||
mesh_status = ctl.simple_command('status')
|
||||
|
||||
inspect_established_receptor_connections(mesh_status)
|
||||
|
||||
nowtime = now()
|
||||
workers = mesh_status['Advertisements']
|
||||
|
||||
@@ -576,7 +568,7 @@ def cluster_node_heartbeat(dispatch_time=None, worker_tasks=None):
|
||||
this_inst = inst
|
||||
break
|
||||
|
||||
inspect_execution_nodes(instance_list)
|
||||
inspect_execution_and_hop_nodes(instance_list)
|
||||
|
||||
for inst in list(instance_list):
|
||||
if inst == this_inst:
|
||||
@@ -765,66 +757,21 @@ def awx_periodic_scheduler():
|
||||
new_unified_job.save(update_fields=['status', 'job_explanation'])
|
||||
new_unified_job.websocket_emit_status("failed")
|
||||
emit_channel_notification('schedules-changed', dict(id=schedule.id, group_name="schedules"))
|
||||
state.save()
|
||||
|
||||
|
||||
def schedule_manager_success_or_error(instance):
|
||||
if instance.unifiedjob_blocked_jobs.exists():
|
||||
ScheduleTaskManager().schedule()
|
||||
if instance.spawned_by_workflow:
|
||||
ScheduleWorkflowManager().schedule()
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
def handle_work_success(task_actual):
|
||||
try:
|
||||
instance = UnifiedJob.get_instance_by_type(task_actual['type'], task_actual['id'])
|
||||
except ObjectDoesNotExist:
|
||||
logger.warning('Missing {} `{}` in success callback.'.format(task_actual['type'], task_actual['id']))
|
||||
return
|
||||
if not instance:
|
||||
return
|
||||
schedule_manager_success_or_error(instance)
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
def handle_work_error(task_actual):
|
||||
try:
|
||||
instance = UnifiedJob.get_instance_by_type(task_actual['type'], task_actual['id'])
|
||||
except ObjectDoesNotExist:
|
||||
logger.warning('Missing {} `{}` in error callback.'.format(task_actual['type'], task_actual['id']))
|
||||
return
|
||||
if not instance:
|
||||
return
|
||||
|
||||
subtasks = instance.get_jobs_fail_chain() # reverse of dependent_jobs mostly
|
||||
logger.debug(f'Executing error task id {task_actual["id"]}, subtasks: {[subtask.id for subtask in subtasks]}')
|
||||
|
||||
deps_of_deps = {}
|
||||
|
||||
for subtask in subtasks:
|
||||
if subtask.celery_task_id != instance.celery_task_id and not subtask.cancel_flag and not subtask.status in ('successful', 'failed'):
|
||||
# If there are multiple in the dependency chain, A->B->C, and this was called for A, blame B for clarity
|
||||
blame_job = deps_of_deps.get(subtask.id, instance)
|
||||
subtask.status = 'failed'
|
||||
subtask.failed = True
|
||||
if not subtask.job_explanation:
|
||||
subtask.job_explanation = 'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % (
|
||||
get_type_for_model(type(blame_job)),
|
||||
blame_job.name,
|
||||
blame_job.id,
|
||||
)
|
||||
subtask.save()
|
||||
subtask.websocket_emit_status("failed")
|
||||
|
||||
for sub_subtask in subtask.get_jobs_fail_chain():
|
||||
deps_of_deps[sub_subtask.id] = subtask
|
||||
|
||||
# We only send 1 job complete message since all the job completion message
|
||||
# handling does is trigger the scheduler. If we extend the functionality of
|
||||
# what the job complete message handler does then we may want to send a
|
||||
# completion event for each job here.
|
||||
schedule_manager_success_or_error(instance)
|
||||
def handle_failure_notifications(task_ids):
|
||||
"""A task-ified version of the method that sends notifications."""
|
||||
found_task_ids = set()
|
||||
for instance in UnifiedJob.objects.filter(id__in=task_ids):
|
||||
found_task_ids.add(instance.id)
|
||||
try:
|
||||
instance.send_notification_templates('failed')
|
||||
except Exception:
|
||||
logger.exception(f'Error preparing notifications for task {instance.id}')
|
||||
deleted_tasks = set(task_ids) - found_task_ids
|
||||
if deleted_tasks:
|
||||
logger.warning(f'Could not send notifications for {deleted_tasks} because they were not found in the database')
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
|
||||
@@ -84,5 +84,6 @@ def test_custom_hostname_regex(post, admin_user):
|
||||
"hostname": value[0],
|
||||
"node_type": "execution",
|
||||
"node_state": "installed",
|
||||
"peers": [],
|
||||
}
|
||||
post(url=url, user=admin_user, data=data, expect=value[1])
|
||||
|
||||
342
awx/main/tests/functional/api/test_instance_peers.py
Normal file
342
awx/main/tests/functional/api/test_instance_peers.py
Normal file
@@ -0,0 +1,342 @@
|
||||
import pytest
|
||||
import yaml
|
||||
import itertools
|
||||
from unittest import mock
|
||||
|
||||
from django.db.utils import IntegrityError
|
||||
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.models import Instance
|
||||
from awx.api.views.instance_install_bundle import generate_group_vars_all_yml
|
||||
|
||||
|
||||
def has_peer(group_vars, peer):
|
||||
peers = group_vars.get('receptor_peers', [])
|
||||
for p in peers:
|
||||
if f"{p['host']}:{p['port']}" == peer:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestPeers:
|
||||
@pytest.fixture(autouse=True)
|
||||
def configure_settings(self, settings):
|
||||
settings.IS_K8S = True
|
||||
|
||||
@pytest.mark.parametrize('node_type', ['control', 'hybrid'])
|
||||
def test_prevent_peering_to_self(self, node_type):
|
||||
"""
|
||||
cannot peer to self
|
||||
"""
|
||||
control_instance = Instance.objects.create(hostname='abc', node_type=node_type)
|
||||
with pytest.raises(IntegrityError):
|
||||
control_instance.peers.add(control_instance)
|
||||
|
||||
@pytest.mark.parametrize('node_type', ['control', 'hybrid', 'hop', 'execution'])
|
||||
def test_creating_node(self, node_type, admin_user, post):
|
||||
"""
|
||||
can only add hop and execution nodes via API
|
||||
"""
|
||||
post(
|
||||
url=reverse('api:instance_list'),
|
||||
data={"hostname": "abc", "node_type": node_type},
|
||||
user=admin_user,
|
||||
expect=400 if node_type in ['control', 'hybrid'] else 201,
|
||||
)
|
||||
|
||||
def test_changing_node_type(self, admin_user, patch):
|
||||
"""
|
||||
cannot change node type
|
||||
"""
|
||||
hop = Instance.objects.create(hostname='abc', node_type="hop")
|
||||
patch(
|
||||
url=reverse('api:instance_detail', kwargs={'pk': hop.pk}),
|
||||
data={"node_type": "execution"},
|
||||
user=admin_user,
|
||||
expect=400,
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize('node_type', ['hop', 'execution'])
|
||||
def test_listener_port_null(self, node_type, admin_user, post):
|
||||
"""
|
||||
listener_port can be None
|
||||
"""
|
||||
post(
|
||||
url=reverse('api:instance_list'),
|
||||
data={"hostname": "abc", "node_type": node_type, "listener_port": None},
|
||||
user=admin_user,
|
||||
expect=201,
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize('node_type, allowed', [('control', False), ('hybrid', False), ('hop', True), ('execution', True)])
|
||||
def test_peers_from_control_nodes_allowed(self, node_type, allowed, post, admin_user):
|
||||
"""
|
||||
only hop and execution nodes can have peers_from_control_nodes set to True
|
||||
"""
|
||||
post(
|
||||
url=reverse('api:instance_list'),
|
||||
data={"hostname": "abc", "peers_from_control_nodes": True, "node_type": node_type, "listener_port": 6789},
|
||||
user=admin_user,
|
||||
expect=201 if allowed else 400,
|
||||
)
|
||||
|
||||
def test_listener_port_is_required(self, admin_user, post):
|
||||
"""
|
||||
if adding instance to peers list, that instance must have listener_port set
|
||||
"""
|
||||
Instance.objects.create(hostname='abc', node_type="hop", listener_port=None)
|
||||
post(
|
||||
url=reverse('api:instance_list'),
|
||||
data={"hostname": "ex", "peers_from_control_nodes": False, "node_type": "execution", "listener_port": None, "peers": ["abc"]},
|
||||
user=admin_user,
|
||||
expect=400,
|
||||
)
|
||||
|
||||
def test_peers_from_control_nodes_listener_port_enabled(self, admin_user, post):
|
||||
"""
|
||||
if peers_from_control_nodes is True, listener_port must an integer
|
||||
Assert that all other combinations are allowed
|
||||
"""
|
||||
for index, item in enumerate(itertools.product(['hop', 'execution'], [True, False], [None, 6789])):
|
||||
node_type, peers_from, listener_port = item
|
||||
# only disallowed case is when peers_from is True and listener port is None
|
||||
disallowed = peers_from and not listener_port
|
||||
post(
|
||||
url=reverse('api:instance_list'),
|
||||
data={"hostname": f"abc{index}", "peers_from_control_nodes": peers_from, "node_type": node_type, "listener_port": listener_port},
|
||||
user=admin_user,
|
||||
expect=400 if disallowed else 201,
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize('node_type', ['control', 'hybrid'])
|
||||
def test_disallow_modifying_peers_control_nodes(self, node_type, admin_user, patch):
|
||||
"""
|
||||
for control nodes, peers field should not be
|
||||
modified directly via patch.
|
||||
"""
|
||||
control = Instance.objects.create(hostname='abc', node_type=node_type)
|
||||
hop1 = Instance.objects.create(hostname='hop1', node_type='hop', peers_from_control_nodes=True, listener_port=6789)
|
||||
hop2 = Instance.objects.create(hostname='hop2', node_type='hop', peers_from_control_nodes=False, listener_port=6789)
|
||||
assert [hop1] == list(control.peers.all()) # only hop1 should be peered
|
||||
patch(
|
||||
url=reverse('api:instance_detail', kwargs={'pk': control.pk}),
|
||||
data={"peers": ["hop2"]},
|
||||
user=admin_user,
|
||||
expect=400, # cannot add peers directly
|
||||
)
|
||||
patch(
|
||||
url=reverse('api:instance_detail', kwargs={'pk': control.pk}),
|
||||
data={"peers": ["hop1"]},
|
||||
user=admin_user,
|
||||
expect=200, # patching with current peers list should be okay
|
||||
)
|
||||
patch(
|
||||
url=reverse('api:instance_detail', kwargs={'pk': control.pk}),
|
||||
data={"peers": []},
|
||||
user=admin_user,
|
||||
expect=400, # cannot remove peers directly
|
||||
)
|
||||
patch(
|
||||
url=reverse('api:instance_detail', kwargs={'pk': control.pk}),
|
||||
data={},
|
||||
user=admin_user,
|
||||
expect=200, # patching without data should be fine too
|
||||
)
|
||||
# patch hop2
|
||||
patch(
|
||||
url=reverse('api:instance_detail', kwargs={'pk': hop2.pk}),
|
||||
data={"peers_from_control_nodes": True},
|
||||
user=admin_user,
|
||||
expect=200, # patching without data should be fine too
|
||||
)
|
||||
assert {hop1, hop2} == set(control.peers.all()) # hop1 and hop2 should now be peered from control node
|
||||
|
||||
def test_disallow_changing_hostname(self, admin_user, patch):
|
||||
"""
|
||||
cannot change hostname
|
||||
"""
|
||||
hop = Instance.objects.create(hostname='hop', node_type='hop')
|
||||
patch(
|
||||
url=reverse('api:instance_detail', kwargs={'pk': hop.pk}),
|
||||
data={"hostname": "hop2"},
|
||||
user=admin_user,
|
||||
expect=400,
|
||||
)
|
||||
|
||||
def test_disallow_changing_node_state(self, admin_user, patch):
|
||||
"""
|
||||
only allow setting to deprovisioning
|
||||
"""
|
||||
hop = Instance.objects.create(hostname='hop', node_type='hop', node_state='installed')
|
||||
patch(
|
||||
url=reverse('api:instance_detail', kwargs={'pk': hop.pk}),
|
||||
data={"node_state": "deprovisioning"},
|
||||
user=admin_user,
|
||||
expect=200,
|
||||
)
|
||||
patch(
|
||||
url=reverse('api:instance_detail', kwargs={'pk': hop.pk}),
|
||||
data={"node_state": "ready"},
|
||||
user=admin_user,
|
||||
expect=400,
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize('node_type', ['control', 'hybrid'])
|
||||
def test_control_node_automatically_peers(self, node_type):
|
||||
"""
|
||||
a new control node should automatically
|
||||
peer to hop
|
||||
|
||||
peer to hop should be removed if hop is deleted
|
||||
"""
|
||||
|
||||
hop = Instance.objects.create(hostname='hop', node_type='hop', peers_from_control_nodes=True, listener_port=6789)
|
||||
control = Instance.objects.create(hostname='abc', node_type=node_type)
|
||||
assert hop in control.peers.all()
|
||||
hop.delete()
|
||||
assert not control.peers.exists()
|
||||
|
||||
@pytest.mark.parametrize('node_type', ['control', 'hybrid'])
|
||||
def test_control_node_retains_other_peers(self, node_type):
|
||||
"""
|
||||
if a new node comes online, other peer relationships should
|
||||
remain intact
|
||||
"""
|
||||
hop1 = Instance.objects.create(hostname='hop1', node_type='hop', listener_port=6789, peers_from_control_nodes=True)
|
||||
hop2 = Instance.objects.create(hostname='hop2', node_type='hop', listener_port=6789, peers_from_control_nodes=False)
|
||||
hop1.peers.add(hop2)
|
||||
|
||||
# a control node is added
|
||||
Instance.objects.create(hostname='control', node_type=node_type, listener_port=None)
|
||||
|
||||
assert hop1.peers.exists()
|
||||
|
||||
def test_group_vars(self, get, admin_user):
|
||||
"""
|
||||
control > hop1 > hop2 < execution
|
||||
"""
|
||||
control = Instance.objects.create(hostname='control', node_type='control', listener_port=None)
|
||||
hop1 = Instance.objects.create(hostname='hop1', node_type='hop', listener_port=6789, peers_from_control_nodes=True)
|
||||
hop2 = Instance.objects.create(hostname='hop2', node_type='hop', listener_port=6789, peers_from_control_nodes=False)
|
||||
execution = Instance.objects.create(hostname='execution', node_type='execution', listener_port=6789)
|
||||
|
||||
execution.peers.add(hop2)
|
||||
hop1.peers.add(hop2)
|
||||
|
||||
control_vars = yaml.safe_load(generate_group_vars_all_yml(control))
|
||||
hop1_vars = yaml.safe_load(generate_group_vars_all_yml(hop1))
|
||||
hop2_vars = yaml.safe_load(generate_group_vars_all_yml(hop2))
|
||||
execution_vars = yaml.safe_load(generate_group_vars_all_yml(execution))
|
||||
|
||||
# control group vars assertions
|
||||
assert has_peer(control_vars, 'hop1:6789')
|
||||
assert not has_peer(control_vars, 'hop2:6789')
|
||||
assert not has_peer(control_vars, 'execution:6789')
|
||||
assert not control_vars.get('receptor_listener', False)
|
||||
|
||||
# hop1 group vars assertions
|
||||
assert has_peer(hop1_vars, 'hop2:6789')
|
||||
assert not has_peer(hop1_vars, 'execution:6789')
|
||||
assert hop1_vars.get('receptor_listener', False)
|
||||
|
||||
# hop2 group vars assertions
|
||||
assert not has_peer(hop2_vars, 'hop1:6789')
|
||||
assert not has_peer(hop2_vars, 'execution:6789')
|
||||
assert hop2_vars.get('receptor_listener', False)
|
||||
assert hop2_vars.get('receptor_peers', []) == []
|
||||
|
||||
# execution group vars assertions
|
||||
assert has_peer(execution_vars, 'hop2:6789')
|
||||
assert not has_peer(execution_vars, 'hop1:6789')
|
||||
assert execution_vars.get('receptor_listener', False)
|
||||
|
||||
def test_write_receptor_config_called(self):
|
||||
"""
|
||||
Assert that write_receptor_config is called
|
||||
when certain instances are created, or if
|
||||
peers_from_control_nodes changes.
|
||||
In general, write_receptor_config should only
|
||||
be called when necessary, as it will reload
|
||||
receptor backend connections which is not trivial.
|
||||
"""
|
||||
with mock.patch('awx.main.models.ha.schedule_write_receptor_config') as write_method:
|
||||
# new control instance but nothing to peer to (no)
|
||||
control = Instance.objects.create(hostname='control1', node_type='control')
|
||||
write_method.assert_not_called()
|
||||
|
||||
# new hop node with peers_from_control_nodes False (no)
|
||||
hop1 = Instance.objects.create(hostname='hop1', node_type='hop', listener_port=6789, peers_from_control_nodes=False)
|
||||
hop1.delete()
|
||||
write_method.assert_not_called()
|
||||
|
||||
# new hop node with peers_from_control_nodes True (yes)
|
||||
hop1 = Instance.objects.create(hostname='hop1', node_type='hop', listener_port=6789, peers_from_control_nodes=True)
|
||||
write_method.assert_called()
|
||||
write_method.reset_mock()
|
||||
|
||||
# new control instance but with something to peer to (yes)
|
||||
Instance.objects.create(hostname='control2', node_type='control')
|
||||
write_method.assert_called()
|
||||
write_method.reset_mock()
|
||||
|
||||
# new hop node with peers_from_control_nodes False and peered to another hop node (no)
|
||||
hop2 = Instance.objects.create(hostname='hop2', node_type='hop', listener_port=6789, peers_from_control_nodes=False)
|
||||
hop2.peers.add(hop1)
|
||||
hop2.delete()
|
||||
write_method.assert_not_called()
|
||||
|
||||
# changing peers_from_control_nodes to False (yes)
|
||||
hop1.peers_from_control_nodes = False
|
||||
hop1.save()
|
||||
write_method.assert_called()
|
||||
write_method.reset_mock()
|
||||
|
||||
# deleting hop node that has peers_from_control_nodes to False (no)
|
||||
hop1.delete()
|
||||
write_method.assert_not_called()
|
||||
|
||||
# deleting control nodes (no)
|
||||
control.delete()
|
||||
write_method.assert_not_called()
|
||||
|
||||
def test_write_receptor_config_data(self):
|
||||
"""
|
||||
Assert the correct peers are included in data that will
|
||||
be written to receptor.conf
|
||||
"""
|
||||
from awx.main.tasks.receptor import RECEPTOR_CONFIG_STARTER
|
||||
|
||||
with mock.patch('awx.main.tasks.receptor.read_receptor_config', return_value=list(RECEPTOR_CONFIG_STARTER)):
|
||||
from awx.main.tasks.receptor import generate_config_data
|
||||
|
||||
_, should_update = generate_config_data()
|
||||
assert not should_update
|
||||
|
||||
# not peered, so config file should not be updated
|
||||
for i in range(3):
|
||||
Instance.objects.create(hostname=f"exNo-{i}", node_type='execution', listener_port=6789, peers_from_control_nodes=False)
|
||||
|
||||
_, should_update = generate_config_data()
|
||||
assert not should_update
|
||||
|
||||
# peered, so config file should be updated
|
||||
expected_peers = []
|
||||
for i in range(3):
|
||||
expected_peers.append(f"hop-{i}:6789")
|
||||
Instance.objects.create(hostname=f"hop-{i}", node_type='hop', listener_port=6789, peers_from_control_nodes=True)
|
||||
|
||||
for i in range(3):
|
||||
expected_peers.append(f"exYes-{i}:6789")
|
||||
Instance.objects.create(hostname=f"exYes-{i}", node_type='execution', listener_port=6789, peers_from_control_nodes=True)
|
||||
|
||||
new_config, should_update = generate_config_data()
|
||||
assert should_update
|
||||
|
||||
peers = []
|
||||
for entry in new_config:
|
||||
for key, value in entry.items():
|
||||
if key == "tcp-peer":
|
||||
peers.append(value['address'])
|
||||
|
||||
assert set(expected_peers) == set(peers)
|
||||
@@ -2,12 +2,12 @@ from unittest import mock
|
||||
import pytest
|
||||
import json
|
||||
|
||||
from ansible_base.utils.models import get_type_for_model
|
||||
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.models.jobs import JobTemplate, Job
|
||||
from awx.main.models.activity_stream import ActivityStream
|
||||
from awx.main.access import JobTemplateAccess
|
||||
from awx.main.utils.common import get_type_for_model
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
||||
@@ -0,0 +1,78 @@
|
||||
import pytest
|
||||
|
||||
from awx.main.tasks.host_metrics import HostMetricTask
|
||||
from awx.main.models.inventory import HostMetric
|
||||
from awx.main.tests.factories.fixtures import mk_host_metric
|
||||
from dateutil.relativedelta import relativedelta
|
||||
from django.conf import settings
|
||||
from django.utils import timezone
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_no_host_metrics():
|
||||
"""No-crash test"""
|
||||
assert HostMetric.objects.count() == 0
|
||||
HostMetricTask().cleanup(soft_threshold=0, hard_threshold=0)
|
||||
HostMetricTask().cleanup(soft_threshold=24, hard_threshold=42)
|
||||
assert HostMetric.objects.count() == 0
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_delete_exception():
|
||||
"""Crash test"""
|
||||
with pytest.raises(ValueError):
|
||||
HostMetricTask().soft_cleanup("")
|
||||
with pytest.raises(TypeError):
|
||||
HostMetricTask().hard_cleanup(set())
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('threshold', [settings.CLEANUP_HOST_METRICS_SOFT_THRESHOLD, 20])
|
||||
def test_soft_delete(threshold):
|
||||
"""Metrics with last_automation < threshold are updated to deleted=True"""
|
||||
mk_host_metric('host_1', first_automation=ago(months=1), last_automation=ago(months=1), deleted=False)
|
||||
mk_host_metric('host_2', first_automation=ago(months=1), last_automation=ago(months=1), deleted=True)
|
||||
mk_host_metric('host_3', first_automation=ago(months=1), last_automation=ago(months=threshold, hours=-1), deleted=False)
|
||||
mk_host_metric('host_4', first_automation=ago(months=1), last_automation=ago(months=threshold, hours=-1), deleted=True)
|
||||
mk_host_metric('host_5', first_automation=ago(months=1), last_automation=ago(months=threshold, hours=1), deleted=False)
|
||||
mk_host_metric('host_6', first_automation=ago(months=1), last_automation=ago(months=threshold, hours=1), deleted=True)
|
||||
mk_host_metric('host_7', first_automation=ago(months=1), last_automation=ago(months=42), deleted=False)
|
||||
mk_host_metric('host_8', first_automation=ago(months=1), last_automation=ago(months=42), deleted=True)
|
||||
|
||||
assert HostMetric.objects.count() == 8
|
||||
assert HostMetric.active_objects.count() == 4
|
||||
|
||||
for i in range(2):
|
||||
HostMetricTask().cleanup(soft_threshold=threshold)
|
||||
assert HostMetric.objects.count() == 8
|
||||
|
||||
hostnames = set(HostMetric.objects.filter(deleted=False).order_by('hostname').values_list('hostname', flat=True))
|
||||
assert hostnames == {'host_1', 'host_3'}
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('threshold', [settings.CLEANUP_HOST_METRICS_HARD_THRESHOLD, 20])
|
||||
def test_hard_delete(threshold):
|
||||
"""Metrics with last_deleted < threshold and deleted=True are deleted from the db"""
|
||||
mk_host_metric('host_1', first_automation=ago(months=1), last_deleted=ago(months=1), deleted=False)
|
||||
mk_host_metric('host_2', first_automation=ago(months=1), last_deleted=ago(months=1), deleted=True)
|
||||
mk_host_metric('host_3', first_automation=ago(months=1), last_deleted=ago(months=threshold, hours=-1), deleted=False)
|
||||
mk_host_metric('host_4', first_automation=ago(months=1), last_deleted=ago(months=threshold, hours=-1), deleted=True)
|
||||
mk_host_metric('host_5', first_automation=ago(months=1), last_deleted=ago(months=threshold, hours=1), deleted=False)
|
||||
mk_host_metric('host_6', first_automation=ago(months=1), last_deleted=ago(months=threshold, hours=1), deleted=True)
|
||||
mk_host_metric('host_7', first_automation=ago(months=1), last_deleted=ago(months=42), deleted=False)
|
||||
mk_host_metric('host_8', first_automation=ago(months=1), last_deleted=ago(months=42), deleted=True)
|
||||
|
||||
assert HostMetric.objects.count() == 8
|
||||
assert HostMetric.active_objects.count() == 4
|
||||
|
||||
for i in range(2):
|
||||
HostMetricTask().cleanup(hard_threshold=threshold)
|
||||
assert HostMetric.objects.count() == 6
|
||||
|
||||
hostnames = set(HostMetric.objects.order_by('hostname').values_list('hostname', flat=True))
|
||||
assert hostnames == {'host_1', 'host_2', 'host_3', 'host_4', 'host_5', 'host_7'}
|
||||
|
||||
|
||||
def ago(months=0, hours=0):
|
||||
return timezone.now() - relativedelta(months=months, hours=hours)
|
||||
@@ -309,3 +309,139 @@ def test_bulk_job_set_all_prompt(job_template, organization, inventory, project,
|
||||
assert node[0].limit == 'kansas'
|
||||
assert node[0].skip_tags == 'foobar'
|
||||
assert node[0].job_tags == 'untagged'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('num_hosts, num_queries', [(1, 70), (10, 150), (25, 250)])
|
||||
def test_bulk_host_delete_num_queries(organization, inventory, post, get, user, num_hosts, num_queries, django_assert_max_num_queries):
|
||||
'''
|
||||
If I am a...
|
||||
org admin
|
||||
inventory admin at org level
|
||||
admin of a particular inventory
|
||||
superuser
|
||||
|
||||
Bulk Host delete should take under a certain number of queries
|
||||
'''
|
||||
users_list = setup_admin_users_list(organization, inventory, user)
|
||||
for u in users_list:
|
||||
hosts = [{'name': str(uuid4())} for i in range(num_hosts)]
|
||||
with django_assert_max_num_queries(num_queries):
|
||||
bulk_host_create_response = post(reverse('api:bulk_host_create'), {'inventory': inventory.id, 'hosts': hosts}, u, expect=201).data
|
||||
assert len(bulk_host_create_response['hosts']) == len(hosts), f"unexpected number of hosts created for user {u}"
|
||||
hosts_ids_created = get_inventory_hosts(get, inventory.id, u)
|
||||
bulk_host_delete_response = post(reverse('api:bulk_host_delete'), {'hosts': hosts_ids_created}, u, expect=201).data
|
||||
assert len(bulk_host_delete_response['hosts'].keys()) == len(hosts), f"unexpected number of hosts deleted for user {u}"
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_bulk_host_delete_rbac(organization, inventory, post, get, user):
|
||||
'''
|
||||
If I am a...
|
||||
org admin
|
||||
inventory admin at org level
|
||||
admin of a particular invenotry
|
||||
... I can bulk delete hosts
|
||||
|
||||
Everyone else cannot
|
||||
'''
|
||||
admin_users_list = setup_admin_users_list(organization, inventory, user)
|
||||
users_list = setup_none_admin_uses_list(organization, inventory, user)
|
||||
|
||||
for indx, u in enumerate(admin_users_list):
|
||||
bulk_host_create_response = post(
|
||||
reverse('api:bulk_host_create'), {'inventory': inventory.id, 'hosts': [{'name': f'foobar-{indx}'}]}, u, expect=201
|
||||
).data
|
||||
assert len(bulk_host_create_response['hosts']) == 1, f"unexpected number of hosts created for user {u}"
|
||||
assert Host.objects.filter(inventory__id=inventory.id)[0].name == f'foobar-{indx}'
|
||||
hosts_ids_created = get_inventory_hosts(get, inventory.id, u)
|
||||
bulk_host_delete_response = post(reverse('api:bulk_host_delete'), {'hosts': hosts_ids_created}, u, expect=201).data
|
||||
assert len(bulk_host_delete_response['hosts'].keys()) == 1, f"unexpected number of hosts deleted by user {u}"
|
||||
|
||||
for indx, create_u in enumerate(admin_users_list):
|
||||
bulk_host_create_response = post(
|
||||
reverse('api:bulk_host_create'), {'inventory': inventory.id, 'hosts': [{'name': f'foobar2-{indx}'}]}, create_u, expect=201
|
||||
).data
|
||||
print(bulk_host_create_response)
|
||||
assert bulk_host_create_response['hosts'][0]['name'] == f'foobar2-{indx}'
|
||||
hosts_ids_created = get_inventory_hosts(get, inventory.id, create_u)
|
||||
print(f"Try to delete {hosts_ids_created}")
|
||||
for delete_u in users_list:
|
||||
bulk_host_delete_response = post(reverse('api:bulk_host_delete'), {'hosts': hosts_ids_created}, delete_u, expect=403).data
|
||||
assert "Lack permissions to delete hosts from this inventory." in bulk_host_delete_response['inventories'].values()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_bulk_host_delete_from_multiple_inv(organization, inventory, post, get, user):
|
||||
'''
|
||||
If I am inventory admin at org level
|
||||
|
||||
Bulk Host delete should be enabled only on my inventory
|
||||
'''
|
||||
num_hosts = 10
|
||||
inventory.organization = organization
|
||||
|
||||
# Create second inventory
|
||||
inv2 = organization.inventories.create(name="second-test-inv")
|
||||
inv2.organization = organization
|
||||
admin2_user = user('inventory2_admin', False)
|
||||
inv2.admin_role.members.add(admin2_user)
|
||||
|
||||
admin_user = user('inventory_admin', False)
|
||||
inventory.admin_role.members.add(admin_user)
|
||||
|
||||
organization.member_role.members.add(admin_user)
|
||||
organization.member_role.members.add(admin2_user)
|
||||
|
||||
hosts = [{'name': str(uuid4())} for i in range(num_hosts)]
|
||||
hosts2 = [{'name': str(uuid4())} for i in range(num_hosts)]
|
||||
|
||||
# create hosts in each of the inventories
|
||||
bulk_host_create_response = post(reverse('api:bulk_host_create'), {'inventory': inventory.id, 'hosts': hosts}, admin_user, expect=201).data
|
||||
assert len(bulk_host_create_response['hosts']) == len(hosts), f"unexpected number of hosts created for user {admin_user}"
|
||||
|
||||
bulk_host_create_response2 = post(reverse('api:bulk_host_create'), {'inventory': inv2.id, 'hosts': hosts2}, admin2_user, expect=201).data
|
||||
assert len(bulk_host_create_response2['hosts']) == len(hosts), f"unexpected number of hosts created for user {admin2_user}"
|
||||
|
||||
# get all hosts ids - from both inventories
|
||||
hosts_ids_created = get_inventory_hosts(get, inventory.id, admin_user)
|
||||
hosts_ids_created += get_inventory_hosts(get, inv2.id, admin2_user)
|
||||
|
||||
expected_error = "Lack permissions to delete hosts from this inventory."
|
||||
# try to delete ALL hosts with admin user of inventory 1.
|
||||
for inv_name, invadmin in zip([inv2.name, inventory.name], [admin_user, admin2_user]):
|
||||
bulk_host_delete_response = post(reverse('api:bulk_host_delete'), {'hosts': hosts_ids_created}, invadmin, expect=403).data
|
||||
result_message = bulk_host_delete_response['inventories'][inv_name]
|
||||
assert result_message == expected_error, f"deleted hosts without permission by user {invadmin}"
|
||||
|
||||
|
||||
def setup_admin_users_list(organization, inventory, user):
|
||||
inventory.organization = organization
|
||||
inventory_admin = user('inventory_admin', False)
|
||||
org_admin = user('org_admin', False)
|
||||
org_inv_admin = user('org_admin', False)
|
||||
superuser = user('admin', True)
|
||||
for u in [org_admin, org_inv_admin, inventory_admin]:
|
||||
organization.member_role.members.add(u)
|
||||
organization.admin_role.members.add(org_admin)
|
||||
organization.inventory_admin_role.members.add(org_inv_admin)
|
||||
inventory.admin_role.members.add(inventory_admin)
|
||||
return [inventory_admin, org_inv_admin, superuser, org_admin]
|
||||
|
||||
|
||||
def setup_none_admin_uses_list(organization, inventory, user):
|
||||
inventory.organization = organization
|
||||
auditor = user('auditor', False)
|
||||
member = user('member', False)
|
||||
use_inv_member = user('member', False)
|
||||
for u in [auditor, member, use_inv_member]:
|
||||
organization.member_role.members.add(u)
|
||||
inventory.use_role.members.add(use_inv_member)
|
||||
organization.auditor_role.members.add(auditor)
|
||||
return [auditor, member, use_inv_member]
|
||||
|
||||
|
||||
def get_inventory_hosts(get, inv_id, use_user):
|
||||
data = get(reverse('api:inventory_hosts_list', kwargs={'pk': inv_id}), use_user, expect=200).data
|
||||
results = [host['id'] for host in data['results']]
|
||||
return results
|
||||
|
||||
@@ -40,6 +40,26 @@ def test_hashivault_kubernetes_auth():
|
||||
assert res == expected_res
|
||||
|
||||
|
||||
def test_hashivault_client_cert_auth_explicit_role():
|
||||
kwargs = {
|
||||
'client_cert_role': 'test-cert-1',
|
||||
}
|
||||
expected_res = {
|
||||
'name': 'test-cert-1',
|
||||
}
|
||||
res = hashivault.client_cert_auth(**kwargs)
|
||||
assert res == expected_res
|
||||
|
||||
|
||||
def test_hashivault_client_cert_auth_no_role():
|
||||
kwargs = {}
|
||||
expected_res = {
|
||||
'name': None,
|
||||
}
|
||||
res = hashivault.client_cert_auth(**kwargs)
|
||||
assert res == expected_res
|
||||
|
||||
|
||||
def test_hashivault_handle_auth_token():
|
||||
kwargs = {
|
||||
'token': 'the_token',
|
||||
@@ -73,6 +93,43 @@ def test_hashivault_handle_auth_kubernetes():
|
||||
assert token == 'the_token'
|
||||
|
||||
|
||||
def test_hashivault_handle_auth_client_cert():
|
||||
kwargs = {
|
||||
'client_cert_public': "foo",
|
||||
'client_cert_private': "bar",
|
||||
'client_cert_role': 'test-cert-1',
|
||||
}
|
||||
auth_params = {
|
||||
'name': 'test-cert-1',
|
||||
}
|
||||
with mock.patch.object(hashivault, 'method_auth') as method_mock:
|
||||
method_mock.return_value = 'the_token'
|
||||
token = hashivault.handle_auth(**kwargs)
|
||||
method_mock.assert_called_with(**kwargs, auth_param=auth_params)
|
||||
assert token == 'the_token'
|
||||
|
||||
|
||||
def test_hashivault_handle_auth_not_enough_args():
|
||||
with pytest.raises(Exception):
|
||||
hashivault.handle_auth()
|
||||
|
||||
|
||||
class TestDelineaImports:
|
||||
"""
|
||||
These module have a try-except for ImportError which will allow using the older library
|
||||
but we do not want the awx_devel image to have the older library,
|
||||
so these tests are designed to fail if these wind up using the fallback import
|
||||
"""
|
||||
|
||||
def test_dsv_import(self):
|
||||
from awx.main.credential_plugins.dsv import SecretsVault # noqa
|
||||
|
||||
# assert this module as opposed to older thycotic.secrets.vault
|
||||
assert SecretsVault.__module__ == 'delinea.secrets.vault'
|
||||
|
||||
def test_tss_import(self):
|
||||
from awx.main.credential_plugins.tss import DomainPasswordGrantAuthorizer, PasswordGrantAuthorizer, SecretServer, ServerSecret # noqa
|
||||
|
||||
for cls in (DomainPasswordGrantAuthorizer, PasswordGrantAuthorizer, SecretServer, ServerSecret):
|
||||
# assert this module as opposed to older thycotic.secrets.server
|
||||
assert cls.__module__ == 'delinea.secrets.server'
|
||||
|
||||
@@ -37,9 +37,9 @@ def test_orphan_unified_job_creation(instance, inventory):
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@mock.patch('awx.main.tasks.system.inspect_execution_nodes', lambda *args, **kwargs: None)
|
||||
@mock.patch('awx.main.models.ha.get_cpu_effective_capacity', lambda cpu: 8)
|
||||
@mock.patch('awx.main.models.ha.get_mem_effective_capacity', lambda mem: 62)
|
||||
@mock.patch('awx.main.tasks.system.inspect_execution_and_hop_nodes', lambda *args, **kwargs: None)
|
||||
@mock.patch('awx.main.models.ha.get_cpu_effective_capacity', lambda cpu, is_control_node: 8)
|
||||
@mock.patch('awx.main.models.ha.get_mem_effective_capacity', lambda mem, is_control_node: 62)
|
||||
def test_job_capacity_and_with_inactive_node():
|
||||
i = Instance.objects.create(hostname='test-1')
|
||||
i.save_health_data('18.0.1', 2, 8000)
|
||||
|
||||
44
awx/main/tests/functional/test_migrations.py
Normal file
44
awx/main/tests/functional/test_migrations.py
Normal file
@@ -0,0 +1,44 @@
|
||||
import pytest
|
||||
|
||||
from django_test_migrations.plan import all_migrations, nodes_to_tuples
|
||||
|
||||
"""
|
||||
Most tests that live in here can probably be deleted at some point. They are mainly
|
||||
for a developer. When AWX versions that users upgrade from falls out of support that
|
||||
is when migration tests can be deleted. This is also a good time to squash. Squashing
|
||||
will likely mess with the tests that live here.
|
||||
|
||||
The smoke test should be kept in here. The smoke test ensures that our migrations
|
||||
continue to work when sqlite is the backing database (vs. the default DB of postgres).
|
||||
"""
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestMigrationSmoke:
|
||||
def test_happy_path(self, migrator):
|
||||
"""
|
||||
This smoke test runs all the migrations.
|
||||
|
||||
Example of how to use django-test-migration to invoke particular migration(s)
|
||||
while weaving in object creation and assertions.
|
||||
|
||||
Note that this is more than just an example. It is a smoke test because it runs ALL
|
||||
the migrations. Our "normal" unit tests subvert the migrations running because it is slow.
|
||||
"""
|
||||
migration_nodes = all_migrations('default')
|
||||
migration_tuples = nodes_to_tuples(migration_nodes)
|
||||
final_migration = migration_tuples[-1]
|
||||
|
||||
migrator.apply_initial_migration(('main', None))
|
||||
# I just picked a newish migration at the time of writing this.
|
||||
# If someone from the future finds themselves here because the are squashing migrations
|
||||
# it is fine to change the 0180_... below to some other newish migration
|
||||
intermediate_state = migrator.apply_tested_migration(('main', '0180_add_hostmetric_fields'))
|
||||
|
||||
Instance = intermediate_state.apps.get_model('main', 'Instance')
|
||||
# Create any old object in the database
|
||||
Instance.objects.create(hostname='foobar', node_type='control')
|
||||
|
||||
final_state = migrator.apply_tested_migration(final_migration)
|
||||
Instance = final_state.apps.get_model('main', 'Instance')
|
||||
assert Instance.objects.filter(hostname='foobar').count() == 1
|
||||
@@ -208,6 +208,6 @@ def test_auto_parenting():
|
||||
@pytest.mark.django_db
|
||||
def test_update_parents_keeps_teams(team, project):
|
||||
project.update_role.parents.add(team.member_role)
|
||||
assert team.member_role in project.update_role # test prep sanity check
|
||||
assert list(Project.accessible_objects(team.member_role, 'update_role')) == [project] # test prep sanity check
|
||||
update_role_parentage_for_instance(project)
|
||||
assert team.member_role in project.update_role # actual assertion
|
||||
assert list(Project.accessible_objects(team.member_role, 'update_role')) == [project] # actual assertion
|
||||
|
||||
@@ -92,7 +92,7 @@ def test_team_accessible_by(team, user, project):
|
||||
u = user('team_member', False)
|
||||
|
||||
team.member_role.children.add(project.use_role)
|
||||
assert team in project.read_role
|
||||
assert list(Project.accessible_objects(team.member_role, 'read_role')) == [project]
|
||||
assert u not in project.read_role
|
||||
|
||||
team.member_role.members.add(u)
|
||||
@@ -104,7 +104,7 @@ def test_team_accessible_objects(team, user, project):
|
||||
u = user('team_member', False)
|
||||
|
||||
team.member_role.children.add(project.use_role)
|
||||
assert len(Project.accessible_objects(team, 'read_role')) == 1
|
||||
assert len(Project.accessible_objects(team.member_role, 'read_role')) == 1
|
||||
assert not Project.accessible_objects(u, 'read_role')
|
||||
|
||||
team.member_role.members.add(u)
|
||||
|
||||
@@ -122,25 +122,6 @@ def test_team_org_resource_role(ext_auth, organization, rando, org_admin, team):
|
||||
] == [True for i in range(2)]
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_user_accessible_objects(user, organization):
|
||||
"""
|
||||
We cannot directly use accessible_objects for User model because
|
||||
both editing and read permissions are obligated to complex business logic
|
||||
"""
|
||||
admin = user('admin', False)
|
||||
u = user('john', False)
|
||||
access = UserAccess(admin)
|
||||
assert access.get_queryset().count() == 1 # can only see himself
|
||||
|
||||
organization.member_role.members.add(u)
|
||||
organization.member_role.members.add(admin)
|
||||
assert access.get_queryset().count() == 2
|
||||
|
||||
organization.member_role.members.remove(u)
|
||||
assert access.get_queryset().count() == 1
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_org_admin_create_sys_auditor(org_admin):
|
||||
access = UserAccess(org_admin)
|
||||
|
||||
@@ -5,8 +5,8 @@ import tempfile
|
||||
import shutil
|
||||
|
||||
from awx.main.tasks.jobs import RunJob
|
||||
from awx.main.tasks.system import execution_node_health_check, _cleanup_images_and_files, handle_work_error
|
||||
from awx.main.models import Instance, Job, InventoryUpdate, ProjectUpdate
|
||||
from awx.main.tasks.system import execution_node_health_check, _cleanup_images_and_files
|
||||
from awx.main.models import Instance, Job
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -73,17 +73,3 @@ def test_does_not_run_reaped_job(mocker, mock_me):
|
||||
job.refresh_from_db()
|
||||
assert job.status == 'failed'
|
||||
mock_run.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_handle_work_error_nested(project, inventory_source):
|
||||
pu = ProjectUpdate.objects.create(status='failed', project=project, celery_task_id='1234')
|
||||
iu = InventoryUpdate.objects.create(status='pending', inventory_source=inventory_source, source='scm')
|
||||
job = Job.objects.create(status='pending')
|
||||
iu.dependent_jobs.add(pu)
|
||||
job.dependent_jobs.add(pu, iu)
|
||||
handle_work_error({'type': 'project_update', 'id': pu.id})
|
||||
iu.refresh_from_db()
|
||||
job.refresh_from_db()
|
||||
assert iu.job_explanation == f'Previous Task Failed: {{"job_type": "project_update", "job_name": "", "job_id": "{pu.id}"}}'
|
||||
assert job.job_explanation == f'Previous Task Failed: {{"job_type": "inventory_update", "job_name": "", "job_id": "{iu.id}"}}'
|
||||
|
||||
@@ -3,15 +3,13 @@
|
||||
import pytest
|
||||
|
||||
# Django
|
||||
from django.core.exceptions import FieldDoesNotExist
|
||||
from rest_framework.exceptions import PermissionDenied
|
||||
|
||||
from rest_framework.exceptions import PermissionDenied, ParseError
|
||||
from ansible_base.filters.rest_framework.field_lookup_backend import FieldLookupBackend
|
||||
|
||||
from awx.api.filters import FieldLookupBackend, OrderByBackend, get_field_from_path
|
||||
from awx.main.models import (
|
||||
AdHocCommand,
|
||||
ActivityStream,
|
||||
Credential,
|
||||
Job,
|
||||
JobTemplate,
|
||||
SystemJob,
|
||||
@@ -20,88 +18,11 @@ from awx.main.models import (
|
||||
WorkflowJob,
|
||||
WorkflowJobTemplate,
|
||||
WorkflowJobOptions,
|
||||
InventorySource,
|
||||
JobEvent,
|
||||
)
|
||||
from awx.main.models.oauth import OAuth2Application
|
||||
from awx.main.models.jobs import JobOptions
|
||||
|
||||
|
||||
def test_related():
|
||||
field_lookup = FieldLookupBackend()
|
||||
lookup = '__'.join(['inventory', 'organization', 'pk'])
|
||||
field, new_lookup = field_lookup.get_field_from_lookup(InventorySource, lookup)
|
||||
print(field)
|
||||
print(new_lookup)
|
||||
|
||||
|
||||
def test_invalid_filter_key():
|
||||
field_lookup = FieldLookupBackend()
|
||||
# FieldDoesNotExist is caught and converted to ParseError by filter_queryset
|
||||
with pytest.raises(FieldDoesNotExist) as excinfo:
|
||||
field_lookup.value_to_python(JobEvent, 'event_data.task_action', 'foo')
|
||||
assert 'has no field named' in str(excinfo)
|
||||
|
||||
|
||||
def test_invalid_field_hop():
|
||||
with pytest.raises(ParseError) as excinfo:
|
||||
get_field_from_path(Credential, 'organization__description__user')
|
||||
assert 'No related model for' in str(excinfo)
|
||||
|
||||
|
||||
def test_invalid_order_by_key():
|
||||
field_order_by = OrderByBackend()
|
||||
with pytest.raises(ParseError) as excinfo:
|
||||
[f for f in field_order_by._validate_ordering_fields(JobEvent, ('event_data.task_action',))]
|
||||
assert 'has no field named' in str(excinfo)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(u"empty_value", [u'', ''])
|
||||
def test_empty_in(empty_value):
|
||||
field_lookup = FieldLookupBackend()
|
||||
with pytest.raises(ValueError) as excinfo:
|
||||
field_lookup.value_to_python(JobTemplate, 'project__name__in', empty_value)
|
||||
assert 'empty value for __in' in str(excinfo.value)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(u"valid_value", [u'foo', u'foo,'])
|
||||
def test_valid_in(valid_value):
|
||||
field_lookup = FieldLookupBackend()
|
||||
value, new_lookup, _ = field_lookup.value_to_python(JobTemplate, 'project__name__in', valid_value)
|
||||
assert 'foo' in value
|
||||
|
||||
|
||||
def test_invalid_field():
|
||||
invalid_field = u"ヽヾ"
|
||||
field_lookup = FieldLookupBackend()
|
||||
with pytest.raises(ValueError) as excinfo:
|
||||
field_lookup.value_to_python(WorkflowJobTemplate, invalid_field, 'foo')
|
||||
assert 'is not an allowed field name. Must be ascii encodable.' in str(excinfo.value)
|
||||
|
||||
|
||||
def test_valid_iexact():
|
||||
field_lookup = FieldLookupBackend()
|
||||
value, new_lookup, _ = field_lookup.value_to_python(JobTemplate, 'project__name__iexact', 'foo')
|
||||
assert 'foo' in value
|
||||
|
||||
|
||||
def test_invalid_iexact():
|
||||
field_lookup = FieldLookupBackend()
|
||||
with pytest.raises(ValueError) as excinfo:
|
||||
field_lookup.value_to_python(Job, 'id__iexact', '1')
|
||||
assert 'is not a text field and cannot be filtered by case-insensitive search' in str(excinfo.value)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('lookup_suffix', ['', 'contains', 'startswith', 'in'])
|
||||
@pytest.mark.parametrize('password_field', Credential.PASSWORD_FIELDS)
|
||||
def test_filter_on_password_field(password_field, lookup_suffix):
|
||||
field_lookup = FieldLookupBackend()
|
||||
lookup = '__'.join(filter(None, [password_field, lookup_suffix]))
|
||||
with pytest.raises(PermissionDenied) as excinfo:
|
||||
field, new_lookup = field_lookup.get_field_from_lookup(Credential, lookup)
|
||||
assert 'not allowed' in str(excinfo.value)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'model, query',
|
||||
[
|
||||
@@ -128,10 +49,3 @@ def test_filter_sensitive_fields_and_relations(model, query):
|
||||
with pytest.raises(PermissionDenied) as excinfo:
|
||||
field, new_lookup = field_lookup.get_field_from_lookup(model, query)
|
||||
assert 'not allowed' in str(excinfo.value)
|
||||
|
||||
|
||||
def test_looping_filters_prohibited():
|
||||
field_lookup = FieldLookupBackend()
|
||||
with pytest.raises(ParseError) as loop_exc:
|
||||
field_lookup.get_field_from_lookup(Job, 'job_events__job__job_events')
|
||||
assert 'job_events' in str(loop_exc.value)
|
||||
|
||||
@@ -47,7 +47,7 @@ data_loggly = {
|
||||
'\n'.join(
|
||||
[
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")\nmodule(load="omhttp")',
|
||||
'action(type="omhttp" server="logs-01.loggly.com" serverport="80" usehttps="off" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxdiskspace="1g" queue.type="LinkedList" queue.saveOnShutdown="on" errorfile="/var/log/tower/rsyslog.err" restpath="inputs/1fd38090-2af1-4e1e-8d80-492899da0f71/tag/http/")', # noqa
|
||||
'action(type="omhttp" server="logs-01.loggly.com" serverport="80" usehttps="off" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxDiskSpace="1g" queue.maxFileSize="100m" queue.type="LinkedList" queue.saveOnShutdown="on" queue.syncqueuefiles="on" queue.checkpointInterval="1000" queue.size="131072" queue.highwaterMark="98304" queue.discardMark="117964" queue.discardSeverity="5" errorfile="/var/log/tower/rsyslog.err" restpath="inputs/1fd38090-2af1-4e1e-8d80-492899da0f71/tag/http/")', # noqa
|
||||
]
|
||||
),
|
||||
),
|
||||
@@ -61,7 +61,7 @@ data_loggly = {
|
||||
'\n'.join(
|
||||
[
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")',
|
||||
'action(type="omfwd" target="localhost" port="9000" protocol="udp" action.resumeRetryCount="-1" action.resumeInterval="5" template="awx")', # noqa
|
||||
'action(type="omfwd" target="localhost" port="9000" protocol="udp" action.resumeRetryCount="-1" action.resumeInterval="5" template="awx" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxDiskSpace="1g" queue.maxFileSize="100m" queue.type="LinkedList" queue.saveOnShutdown="on" queue.syncqueuefiles="on" queue.checkpointInterval="1000" queue.size="131072" queue.highwaterMark="98304" queue.discardMark="117964" queue.discardSeverity="5")', # noqa
|
||||
]
|
||||
),
|
||||
),
|
||||
@@ -75,7 +75,7 @@ data_loggly = {
|
||||
'\n'.join(
|
||||
[
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")',
|
||||
'action(type="omfwd" target="localhost" port="9000" protocol="tcp" action.resumeRetryCount="-1" action.resumeInterval="5" template="awx")', # noqa
|
||||
'action(type="omfwd" target="localhost" port="9000" protocol="tcp" action.resumeRetryCount="-1" action.resumeInterval="5" template="awx" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxDiskSpace="1g" queue.maxFileSize="100m" queue.type="LinkedList" queue.saveOnShutdown="on" queue.syncqueuefiles="on" queue.checkpointInterval="1000" queue.size="131072" queue.highwaterMark="98304" queue.discardMark="117964" queue.discardSeverity="5")', # noqa
|
||||
]
|
||||
),
|
||||
),
|
||||
@@ -89,7 +89,7 @@ data_loggly = {
|
||||
'\n'.join(
|
||||
[
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")\nmodule(load="omhttp")',
|
||||
'action(type="omhttp" server="yoursplunk" serverport="443" usehttps="on" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxdiskspace="1g" queue.type="LinkedList" queue.saveOnShutdown="on" errorfile="/var/log/tower/rsyslog.err" restpath="services/collector/event")', # noqa
|
||||
'action(type="omhttp" server="yoursplunk" serverport="443" usehttps="on" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxDiskSpace="1g" queue.maxFileSize="100m" queue.type="LinkedList" queue.saveOnShutdown="on" queue.syncqueuefiles="on" queue.checkpointInterval="1000" queue.size="131072" queue.highwaterMark="98304" queue.discardMark="117964" queue.discardSeverity="5" errorfile="/var/log/tower/rsyslog.err" restpath="services/collector/event")', # noqa
|
||||
]
|
||||
),
|
||||
),
|
||||
@@ -103,7 +103,7 @@ data_loggly = {
|
||||
'\n'.join(
|
||||
[
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")\nmodule(load="omhttp")',
|
||||
'action(type="omhttp" server="yoursplunk" serverport="80" usehttps="off" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxdiskspace="1g" queue.type="LinkedList" queue.saveOnShutdown="on" errorfile="/var/log/tower/rsyslog.err" restpath="services/collector/event")', # noqa
|
||||
'action(type="omhttp" server="yoursplunk" serverport="80" usehttps="off" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxDiskSpace="1g" queue.maxFileSize="100m" queue.type="LinkedList" queue.saveOnShutdown="on" queue.syncqueuefiles="on" queue.checkpointInterval="1000" queue.size="131072" queue.highwaterMark="98304" queue.discardMark="117964" queue.discardSeverity="5" errorfile="/var/log/tower/rsyslog.err" restpath="services/collector/event")', # noqa
|
||||
]
|
||||
),
|
||||
),
|
||||
@@ -117,7 +117,7 @@ data_loggly = {
|
||||
'\n'.join(
|
||||
[
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")\nmodule(load="omhttp")',
|
||||
'action(type="omhttp" server="yoursplunk" serverport="8088" usehttps="on" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxdiskspace="1g" queue.type="LinkedList" queue.saveOnShutdown="on" errorfile="/var/log/tower/rsyslog.err" restpath="services/collector/event")', # noqa
|
||||
'action(type="omhttp" server="yoursplunk" serverport="8088" usehttps="on" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxDiskSpace="1g" queue.maxFileSize="100m" queue.type="LinkedList" queue.saveOnShutdown="on" queue.syncqueuefiles="on" queue.checkpointInterval="1000" queue.size="131072" queue.highwaterMark="98304" queue.discardMark="117964" queue.discardSeverity="5" errorfile="/var/log/tower/rsyslog.err" restpath="services/collector/event")', # noqa
|
||||
]
|
||||
),
|
||||
),
|
||||
@@ -131,7 +131,7 @@ data_loggly = {
|
||||
'\n'.join(
|
||||
[
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")\nmodule(load="omhttp")',
|
||||
'action(type="omhttp" server="yoursplunk" serverport="8088" usehttps="on" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxdiskspace="1g" queue.type="LinkedList" queue.saveOnShutdown="on" errorfile="/var/log/tower/rsyslog.err" restpath="services/collector/event")', # noqa
|
||||
'action(type="omhttp" server="yoursplunk" serverport="8088" usehttps="on" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxDiskSpace="1g" queue.maxFileSize="100m" queue.type="LinkedList" queue.saveOnShutdown="on" queue.syncqueuefiles="on" queue.checkpointInterval="1000" queue.size="131072" queue.highwaterMark="98304" queue.discardMark="117964" queue.discardSeverity="5" errorfile="/var/log/tower/rsyslog.err" restpath="services/collector/event")', # noqa
|
||||
]
|
||||
),
|
||||
),
|
||||
@@ -145,7 +145,7 @@ data_loggly = {
|
||||
'\n'.join(
|
||||
[
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")\nmodule(load="omhttp")',
|
||||
'action(type="omhttp" server="yoursplunk.org" serverport="8088" usehttps="on" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxdiskspace="1g" queue.type="LinkedList" queue.saveOnShutdown="on" errorfile="/var/log/tower/rsyslog.err" restpath="services/collector/event")', # noqa
|
||||
'action(type="omhttp" server="yoursplunk.org" serverport="8088" usehttps="on" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxDiskSpace="1g" queue.maxFileSize="100m" queue.type="LinkedList" queue.saveOnShutdown="on" queue.syncqueuefiles="on" queue.checkpointInterval="1000" queue.size="131072" queue.highwaterMark="98304" queue.discardMark="117964" queue.discardSeverity="5" errorfile="/var/log/tower/rsyslog.err" restpath="services/collector/event")', # noqa
|
||||
]
|
||||
),
|
||||
),
|
||||
@@ -159,7 +159,7 @@ data_loggly = {
|
||||
'\n'.join(
|
||||
[
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")\nmodule(load="omhttp")',
|
||||
'action(type="omhttp" server="yoursplunk.org" serverport="8088" usehttps="off" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxdiskspace="1g" queue.type="LinkedList" queue.saveOnShutdown="on" errorfile="/var/log/tower/rsyslog.err" restpath="services/collector/event")', # noqa
|
||||
'action(type="omhttp" server="yoursplunk.org" serverport="8088" usehttps="off" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxDiskSpace="1g" queue.maxFileSize="100m" queue.type="LinkedList" queue.saveOnShutdown="on" queue.syncqueuefiles="on" queue.checkpointInterval="1000" queue.size="131072" queue.highwaterMark="98304" queue.discardMark="117964" queue.discardSeverity="5" errorfile="/var/log/tower/rsyslog.err" restpath="services/collector/event")', # noqa
|
||||
]
|
||||
),
|
||||
),
|
||||
@@ -173,7 +173,7 @@ data_loggly = {
|
||||
'\n'.join(
|
||||
[
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")\nmodule(load="omhttp")',
|
||||
'action(type="omhttp" server="endpoint5.collection.us2.sumologic.com" serverport="443" usehttps="on" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxdiskspace="1g" queue.type="LinkedList" queue.saveOnShutdown="on" errorfile="/var/log/tower/rsyslog.err" restpath="receiver/v1/http/ZaVnC4dhaV0qoiETY0MrM3wwLoDgO1jFgjOxE6-39qokkj3LGtOroZ8wNaN2M6DtgYrJZsmSi4-36_Up5TbbN_8hosYonLKHSSOSKY845LuLZBCBwStrHQ==")', # noqa
|
||||
'action(type="omhttp" server="endpoint5.collection.us2.sumologic.com" serverport="443" usehttps="on" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxDiskSpace="1g" queue.maxFileSize="100m" queue.type="LinkedList" queue.saveOnShutdown="on" queue.syncqueuefiles="on" queue.checkpointInterval="1000" queue.size="131072" queue.highwaterMark="98304" queue.discardMark="117964" queue.discardSeverity="5" errorfile="/var/log/tower/rsyslog.err" restpath="receiver/v1/http/ZaVnC4dhaV0qoiETY0MrM3wwLoDgO1jFgjOxE6-39qokkj3LGtOroZ8wNaN2M6DtgYrJZsmSi4-36_Up5TbbN_8hosYonLKHSSOSKY845LuLZBCBwStrHQ==")', # noqa
|
||||
]
|
||||
),
|
||||
),
|
||||
|
||||
@@ -36,7 +36,9 @@ def test_SYSTEM_TASK_ABS_MEM_conversion(value, converted_value, mem_capacity):
|
||||
mock_settings.IS_K8S = True
|
||||
assert convert_mem_str_to_bytes(value) == converted_value
|
||||
assert get_corrected_memory(-1) == converted_value
|
||||
assert get_mem_effective_capacity(-1) == mem_capacity
|
||||
assert get_mem_effective_capacity(1, is_control_node=True) == mem_capacity
|
||||
# SYSTEM_TASK_ABS_MEM should not effect memory and capacity for execution nodes
|
||||
assert get_mem_effective_capacity(2147483648, is_control_node=False) == 20
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@@ -58,4 +60,6 @@ def test_SYSTEM_TASK_ABS_CPU_conversion(value, converted_value, cpu_capacity):
|
||||
mock_settings.SYSTEM_TASK_FORKS_CPU = 4
|
||||
assert convert_cpu_str_to_decimal_cpu(value) == converted_value
|
||||
assert get_corrected_cpu(-1) == converted_value
|
||||
assert get_cpu_effective_capacity(-1) == cpu_capacity
|
||||
assert get_cpu_effective_capacity(-1, is_control_node=True) == cpu_capacity
|
||||
# SYSTEM_TASK_ABS_CPU should not effect cpu count and capacity for execution nodes
|
||||
assert get_cpu_effective_capacity(2.0, is_control_node=False) == 8
|
||||
|
||||
@@ -1,8 +1,43 @@
|
||||
import signal
|
||||
import functools
|
||||
|
||||
from awx.main.tasks.signals import signal_state, signal_callback, with_signal_handling
|
||||
|
||||
|
||||
def pytest_sigint():
|
||||
pytest_sigint.called_count += 1
|
||||
|
||||
|
||||
def pytest_sigterm():
|
||||
pytest_sigterm.called_count += 1
|
||||
|
||||
|
||||
def tmp_signals_for_test(func):
|
||||
"""
|
||||
When we run our internal signal handlers, it will call the original signal
|
||||
handlers when its own work is finished.
|
||||
This would crash the test runners normally, because those methods will
|
||||
shut down the process.
|
||||
So this is a decorator to safely replace existing signal handlers
|
||||
with new signal handlers that do nothing so that tests do not crash.
|
||||
"""
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapper():
|
||||
original_sigterm = signal.getsignal(signal.SIGTERM)
|
||||
original_sigint = signal.getsignal(signal.SIGINT)
|
||||
signal.signal(signal.SIGTERM, pytest_sigterm)
|
||||
signal.signal(signal.SIGINT, pytest_sigint)
|
||||
pytest_sigterm.called_count = 0
|
||||
pytest_sigint.called_count = 0
|
||||
func()
|
||||
signal.signal(signal.SIGTERM, original_sigterm)
|
||||
signal.signal(signal.SIGINT, original_sigint)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
@tmp_signals_for_test
|
||||
def test_outer_inner_signal_handling():
|
||||
"""
|
||||
Even if the flag is set in the outer context, its value should persist in the inner context
|
||||
@@ -15,17 +50,22 @@ def test_outer_inner_signal_handling():
|
||||
@with_signal_handling
|
||||
def f1():
|
||||
assert signal_callback() is False
|
||||
signal_state.set_flag()
|
||||
signal_state.set_sigterm_flag()
|
||||
assert signal_callback()
|
||||
f2()
|
||||
|
||||
original_sigterm = signal.getsignal(signal.SIGTERM)
|
||||
assert signal_callback() is False
|
||||
assert pytest_sigterm.called_count == 0
|
||||
assert pytest_sigint.called_count == 0
|
||||
f1()
|
||||
assert signal_callback() is False
|
||||
assert signal.getsignal(signal.SIGTERM) is original_sigterm
|
||||
assert pytest_sigterm.called_count == 1
|
||||
assert pytest_sigint.called_count == 0
|
||||
|
||||
|
||||
@tmp_signals_for_test
|
||||
def test_inner_outer_signal_handling():
|
||||
"""
|
||||
Even if the flag is set in the inner context, its value should persist in the outer context
|
||||
@@ -34,7 +74,7 @@ def test_inner_outer_signal_handling():
|
||||
@with_signal_handling
|
||||
def f2():
|
||||
assert signal_callback() is False
|
||||
signal_state.set_flag()
|
||||
signal_state.set_sigint_flag()
|
||||
assert signal_callback()
|
||||
|
||||
@with_signal_handling
|
||||
@@ -45,6 +85,10 @@ def test_inner_outer_signal_handling():
|
||||
|
||||
original_sigterm = signal.getsignal(signal.SIGTERM)
|
||||
assert signal_callback() is False
|
||||
assert pytest_sigterm.called_count == 0
|
||||
assert pytest_sigint.called_count == 0
|
||||
f1()
|
||||
assert signal_callback() is False
|
||||
assert signal.getsignal(signal.SIGTERM) is original_sigterm
|
||||
assert pytest_sigterm.called_count == 0
|
||||
assert pytest_sigint.called_count == 1
|
||||
|
||||
@@ -143,13 +143,6 @@ def test_send_notifications_job_id(mocker):
|
||||
assert UnifiedJob.objects.get.called_with(id=1)
|
||||
|
||||
|
||||
def test_work_success_callback_missing_job():
|
||||
task_data = {'type': 'project_update', 'id': 9999}
|
||||
with mock.patch('django.db.models.query.QuerySet.get') as get_mock:
|
||||
get_mock.side_effect = ProjectUpdate.DoesNotExist()
|
||||
assert system.handle_work_success(task_data) is None
|
||||
|
||||
|
||||
@mock.patch('awx.main.models.UnifiedJob.objects.get')
|
||||
@mock.patch('awx.main.models.Notification.objects.filter')
|
||||
def test_send_notifications_list(mock_notifications_filter, mock_job_get, mocker):
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user