mirror of
https://github.com/ansible/awx.git
synced 2026-02-11 14:44:44 -03:30
Compare commits
181 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9745058546 | ||
|
|
c97a48b165 | ||
|
|
259bca0113 | ||
|
|
92c2b4e983 | ||
|
|
127a0cff23 | ||
|
|
a0ef25006a | ||
|
|
50c98a52f7 | ||
|
|
4008d72af6 | ||
|
|
e72e9f94b9 | ||
|
|
9d60b0b9c6 | ||
|
|
05b58c4df6 | ||
|
|
b1b960fd17 | ||
|
|
3c8f71e559 | ||
|
|
f5922f76fa | ||
|
|
05582702c6 | ||
|
|
1d340c5b4e | ||
|
|
15925f1416 | ||
|
|
6e06a20cca | ||
|
|
bb3acbb8ad | ||
|
|
a88e47930c | ||
|
|
a0d4515ba4 | ||
|
|
770cc10a78 | ||
|
|
159dd62d84 | ||
|
|
640e5db9c6 | ||
|
|
9ed527eb26 | ||
|
|
29ad6e1eaa | ||
|
|
3e607f8964 | ||
|
|
c9d1a4d063 | ||
|
|
a290b082db | ||
|
|
6d3c22e801 | ||
|
|
1f91773a3c | ||
|
|
7b846e1e49 | ||
|
|
f7a2de8a07 | ||
|
|
194c214f03 | ||
|
|
77e30dd4b2 | ||
|
|
9d7421b9bc | ||
|
|
3b8e662916 | ||
|
|
aa3228eec9 | ||
|
|
7b0598c7d8 | ||
|
|
49832d6379 | ||
|
|
8feeb5f1fa | ||
|
|
56230ba5d1 | ||
|
|
480aaeace5 | ||
|
|
3eaea396be | ||
|
|
deef8669c9 | ||
|
|
63223a2cc7 | ||
|
|
a28bc2eb3f | ||
|
|
09168e5832 | ||
|
|
6df1de4262 | ||
|
|
e072bb7668 | ||
|
|
ec579fd637 | ||
|
|
b95d521162 | ||
|
|
d03a6a809d | ||
|
|
4466976e10 | ||
|
|
5733f78fd8 | ||
|
|
20fc7c702a | ||
|
|
6ce5799689 | ||
|
|
dc81aa46d0 | ||
|
|
ab3ceaecad | ||
|
|
1bb4240a6b | ||
|
|
5e105c2cbd | ||
|
|
cdb4f0b7fd | ||
|
|
cf1e448577 | ||
|
|
224e9e0324 | ||
|
|
660dab439b | ||
|
|
5ce2055431 | ||
|
|
951bd1cc87 | ||
|
|
c9190ebd8f | ||
|
|
eb33973fa3 | ||
|
|
40be2e7b6e | ||
|
|
485813211a | ||
|
|
0a87bf1b5e | ||
|
|
fa0e0b2576 | ||
|
|
1d3b2f57ce | ||
|
|
0577e1ee79 | ||
|
|
470ecc4a4f | ||
|
|
965127637b | ||
|
|
eba130cf41 | ||
|
|
441336301e | ||
|
|
2a0be898e6 | ||
|
|
c47acc5988 | ||
|
|
70ba32b5b2 | ||
|
|
81e06dace2 | ||
|
|
3e8202590c | ||
|
|
ad96a72ebe | ||
|
|
eb0058268b | ||
|
|
2bf6512a8e | ||
|
|
855f61a04e | ||
|
|
532e71ff45 | ||
|
|
b9ea114cac | ||
|
|
e41ad82687 | ||
|
|
3bd25c682e | ||
|
|
7169c75b1a | ||
|
|
fdb359a67b | ||
|
|
ed2a59c1a3 | ||
|
|
906f8a1dce | ||
|
|
6833976c54 | ||
|
|
d15405eafe | ||
|
|
6c3bbfc3be | ||
|
|
2e3e6cbde5 | ||
|
|
54894c14dc | ||
|
|
2a51f23b7d | ||
|
|
80df31fc4e | ||
|
|
8f8462b38e | ||
|
|
0c41abea0e | ||
|
|
3eda1ede8d | ||
|
|
40fca6db57 | ||
|
|
148111a072 | ||
|
|
9cad45feac | ||
|
|
6834568c5d | ||
|
|
f7fdb7fe8d | ||
|
|
d8abd4912b | ||
|
|
4fbdc412ad | ||
|
|
db1af57daa | ||
|
|
ffa59864ee | ||
|
|
b209bc67b4 | ||
|
|
1faea020af | ||
|
|
b55a099620 | ||
|
|
f6dd3cb988 | ||
|
|
c448b87c85 | ||
|
|
4dd823121a | ||
|
|
ec4f10d868 | ||
|
|
2a1dffd363 | ||
|
|
8c7ab8fcf2 | ||
|
|
3de8455960 | ||
|
|
d832e75e99 | ||
|
|
a89e266feb | ||
|
|
8e1516eeb7 | ||
|
|
c7f2fdbe57 | ||
|
|
c75757bf22 | ||
|
|
b8ec7c4072 | ||
|
|
bb1c155bc9 | ||
|
|
4822dd79fc | ||
|
|
4cd90163fc | ||
|
|
8dc6ceffee | ||
|
|
2c7184f9d2 | ||
|
|
5cf93febaa | ||
|
|
284bd8377a | ||
|
|
14992cee17 | ||
|
|
6db663eacb | ||
|
|
87bb70bcc0 | ||
|
|
c2d02841e8 | ||
|
|
e5a6007bf1 | ||
|
|
6f9ea1892b | ||
|
|
abc56305cc | ||
|
|
9bb6786a58 | ||
|
|
aec9a9ca56 | ||
|
|
7e4cf859f5 | ||
|
|
90c3d8a275 | ||
|
|
6d1c8de4ed | ||
|
|
601b62deef | ||
|
|
131dd088cd | ||
|
|
445d892050 | ||
|
|
35a576f2dd | ||
|
|
7838641215 | ||
|
|
ab5cc2e69c | ||
|
|
5a63533967 | ||
|
|
b549ae1efa | ||
|
|
bd0089fd35 | ||
|
|
40d18e95c2 | ||
|
|
191a0f7f2a | ||
|
|
852bb0717c | ||
|
|
98bfe3f43f | ||
|
|
53a7b7818e | ||
|
|
e7c7454a3a | ||
|
|
63e82aa4a3 | ||
|
|
fc1b74aa68 | ||
|
|
ea455df9f4 | ||
|
|
8e2a5ed8ae | ||
|
|
1d7e54bd39 | ||
|
|
83df056f71 | ||
|
|
48edb15a03 | ||
|
|
8ddc19a927 | ||
|
|
b021ad7b28 | ||
|
|
b8ba2feecd | ||
|
|
8cfb704f86 | ||
|
|
efcac860de | ||
|
|
6c5590e0e6 | ||
|
|
0edcd688a2 | ||
|
|
b8c48f7d50 | ||
|
|
07e30a3d5f |
28
.github/actions/awx_devel_image/action.yml
vendored
Normal file
28
.github/actions/awx_devel_image/action.yml
vendored
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
name: Setup images for AWX
|
||||||
|
description: Builds new awx_devel image
|
||||||
|
inputs:
|
||||||
|
github-token:
|
||||||
|
description: GitHub Token for registry access
|
||||||
|
required: true
|
||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
- name: Get python version from Makefile
|
||||||
|
shell: bash
|
||||||
|
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: Log in to registry
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "${{ inputs.github-token }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
|
||||||
|
|
||||||
|
- name: Pre-pull latest devel image to warm cache
|
||||||
|
shell: bash
|
||||||
|
run: docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${{ github.base_ref }}
|
||||||
|
|
||||||
|
- name: Build image for current source checkout
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} \
|
||||||
|
COMPOSE_TAG=${{ github.base_ref }} \
|
||||||
|
make docker-compose-build
|
||||||
73
.github/actions/run_awx_devel/action.yml
vendored
Normal file
73
.github/actions/run_awx_devel/action.yml
vendored
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
name: Run AWX docker-compose
|
||||||
|
description: Runs AWX with `make docker-compose`
|
||||||
|
inputs:
|
||||||
|
github-token:
|
||||||
|
description: GitHub Token to pass to awx_devel_image
|
||||||
|
required: true
|
||||||
|
build-ui:
|
||||||
|
description: Should the UI be built?
|
||||||
|
required: false
|
||||||
|
default: false
|
||||||
|
type: boolean
|
||||||
|
outputs:
|
||||||
|
ip:
|
||||||
|
description: The IP of the tools_awx_1 container
|
||||||
|
value: ${{ steps.data.outputs.ip }}
|
||||||
|
admin-token:
|
||||||
|
description: OAuth token for admin user
|
||||||
|
value: ${{ steps.data.outputs.admin_token }}
|
||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
- name: Build awx_devel image for running checks
|
||||||
|
uses: ./.github/actions/awx_devel_image
|
||||||
|
with:
|
||||||
|
github-token: ${{ inputs.github-token }}
|
||||||
|
|
||||||
|
- name: Upgrade ansible-core
|
||||||
|
shell: bash
|
||||||
|
run: python3 -m pip install --upgrade ansible-core
|
||||||
|
|
||||||
|
- name: Install system deps
|
||||||
|
shell: bash
|
||||||
|
run: sudo apt-get install -y gettext
|
||||||
|
|
||||||
|
- name: Start AWX
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} \
|
||||||
|
COMPOSE_TAG=${{ github.base_ref }} \
|
||||||
|
COMPOSE_UP_OPTS="-d" \
|
||||||
|
make docker-compose
|
||||||
|
|
||||||
|
- name: Update default AWX password
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
while [[ "$(curl -s -o /dev/null -w ''%{http_code}'' -k https://localhost:8043/api/v2/ping/)" != "200" ]]
|
||||||
|
do
|
||||||
|
echo "Waiting for AWX..."
|
||||||
|
sleep 5
|
||||||
|
done
|
||||||
|
echo "AWX is up, updating the password..."
|
||||||
|
docker exec -i tools_awx_1 sh <<-EOSH
|
||||||
|
awx-manage update_password --username=admin --password=password
|
||||||
|
EOSH
|
||||||
|
|
||||||
|
- name: Build UI
|
||||||
|
# This must be a string comparison in composite actions:
|
||||||
|
# https://github.com/actions/runner/issues/2238
|
||||||
|
if: ${{ inputs.build-ui == 'true' }}
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
docker exec -i tools_awx_1 sh <<-EOSH
|
||||||
|
make ui-devel
|
||||||
|
EOSH
|
||||||
|
|
||||||
|
- name: Get instance data
|
||||||
|
id: data
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
AWX_IP=$(docker inspect -f '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' tools_awx_1)
|
||||||
|
ADMIN_TOKEN=$(docker exec -i tools_awx_1 awx-manage create_oauth2_token --user admin)
|
||||||
|
echo "ip=$AWX_IP" >> $GITHUB_OUTPUT
|
||||||
|
echo "admin_token=$ADMIN_TOKEN" >> $GITHUB_OUTPUT
|
||||||
19
.github/actions/upload_awx_devel_logs/action.yml
vendored
Normal file
19
.github/actions/upload_awx_devel_logs/action.yml
vendored
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
name: Upload logs
|
||||||
|
description: Upload logs from `make docker-compose` devel environment to GitHub as an artifact
|
||||||
|
inputs:
|
||||||
|
log-filename:
|
||||||
|
description: "*Unique* name of the log file"
|
||||||
|
required: true
|
||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
- name: Get AWX logs
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
docker logs tools_awx_1 > ${{ inputs.log-filename }}
|
||||||
|
|
||||||
|
- name: Upload AWX logs as artifact
|
||||||
|
uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
name: docker-compose-logs
|
||||||
|
path: ${{ inputs.log-filename }}
|
||||||
4
.github/pr_labeler.yml
vendored
4
.github/pr_labeler.yml
vendored
@@ -15,5 +15,5 @@
|
|||||||
|
|
||||||
"dependencies":
|
"dependencies":
|
||||||
- any: ["awx/ui/package.json"]
|
- any: ["awx/ui/package.json"]
|
||||||
- any: ["awx/requirements/*.txt"]
|
- any: ["requirements/*.txt"]
|
||||||
- any: ["awx/requirements/requirements.in"]
|
- any: ["requirements/requirements.in"]
|
||||||
|
|||||||
161
.github/workflows/ci.yml
vendored
161
.github/workflows/ci.yml
vendored
@@ -35,29 +35,40 @@ jobs:
|
|||||||
- name: ui-test-general
|
- name: ui-test-general
|
||||||
command: make ui-test-general
|
command: make ui-test-general
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Build awx_devel image for running checks
|
||||||
|
uses: ./.github/actions/awx_devel_image
|
||||||
|
with:
|
||||||
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Run check ${{ matrix.tests.name }}
|
- name: Run check ${{ matrix.tests.name }}
|
||||||
run: AWX_DOCKER_CMD='${{ matrix.tests.command }}' make github_ci_runner
|
run: AWX_DOCKER_CMD='${{ matrix.tests.command }}' make docker-runner
|
||||||
|
|
||||||
dev-env:
|
dev-env:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- uses: ./.github/actions/run_awx_devel
|
||||||
|
id: awx
|
||||||
|
with:
|
||||||
|
build-ui: false
|
||||||
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Run smoke test
|
- name: Run smoke test
|
||||||
run: make github_ci_setup && ansible-playbook tools/docker-compose/ansible/smoke-test.yml -v
|
run: ansible-playbook tools/docker-compose/ansible/smoke-test.yml -v
|
||||||
|
|
||||||
awx-operator:
|
awx-operator:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout awx
|
- name: Checkout awx
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
path: awx
|
path: awx
|
||||||
|
|
||||||
- name: Checkout awx-operator
|
- name: Checkout awx-operator
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
repository: ansible/awx-operator
|
repository: ansible/awx-operator
|
||||||
path: awx-operator
|
path: awx-operator
|
||||||
@@ -67,7 +78,7 @@ jobs:
|
|||||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||||
|
|
||||||
- name: Install python ${{ env.py_version }}
|
- name: Install python ${{ env.py_version }}
|
||||||
uses: actions/setup-python@v2
|
uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: ${{ env.py_version }}
|
python-version: ${{ env.py_version }}
|
||||||
|
|
||||||
@@ -102,7 +113,7 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
# The containers that GitHub Actions use have Ansible installed, so upgrade to make sure we have the latest version.
|
# The containers that GitHub Actions use have Ansible installed, so upgrade to make sure we have the latest version.
|
||||||
- name: Upgrade ansible-core
|
- name: Upgrade ansible-core
|
||||||
@@ -114,3 +125,137 @@ jobs:
|
|||||||
# needed due to cgroupsv2. This is fixed, but a stable release
|
# needed due to cgroupsv2. This is fixed, but a stable release
|
||||||
# with the fix has not been made yet.
|
# with the fix has not been made yet.
|
||||||
ANSIBLE_TEST_PREFER_PODMAN: 1
|
ANSIBLE_TEST_PREFER_PODMAN: 1
|
||||||
|
|
||||||
|
collection-integration:
|
||||||
|
name: awx_collection integration
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
target-regex:
|
||||||
|
- name: a-h
|
||||||
|
regex: ^[a-h]
|
||||||
|
- name: i-p
|
||||||
|
regex: ^[i-p]
|
||||||
|
- name: r-z0-9
|
||||||
|
regex: ^[r-z0-9]
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- uses: ./.github/actions/run_awx_devel
|
||||||
|
id: awx
|
||||||
|
with:
|
||||||
|
build-ui: false
|
||||||
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Install dependencies for running tests
|
||||||
|
run: |
|
||||||
|
python3 -m pip install -e ./awxkit/
|
||||||
|
python3 -m pip install -r awx_collection/requirements.txt
|
||||||
|
|
||||||
|
- name: Run integration tests
|
||||||
|
run: |
|
||||||
|
echo "::remove-matcher owner=python::" # Disable annoying annotations from setup-python
|
||||||
|
echo '[general]' > ~/.tower_cli.cfg
|
||||||
|
echo 'host = https://${{ steps.awx.outputs.ip }}:8043' >> ~/.tower_cli.cfg
|
||||||
|
echo 'oauth_token = ${{ steps.awx.outputs.admin-token }}' >> ~/.tower_cli.cfg
|
||||||
|
echo 'verify_ssl = false' >> ~/.tower_cli.cfg
|
||||||
|
TARGETS="$(ls awx_collection/tests/integration/targets | grep '${{ matrix.target-regex.regex }}' | tr '\n' ' ')"
|
||||||
|
make COLLECTION_VERSION=100.100.100-git COLLECTION_TEST_TARGET="--coverage --requirements $TARGETS" test_collection_integration
|
||||||
|
env:
|
||||||
|
ANSIBLE_TEST_PREFER_PODMAN: 1
|
||||||
|
|
||||||
|
# Upload coverage report as artifact
|
||||||
|
- uses: actions/upload-artifact@v3
|
||||||
|
if: always()
|
||||||
|
with:
|
||||||
|
name: coverage-${{ matrix.target-regex.name }}
|
||||||
|
path: ~/.ansible/collections/ansible_collections/awx/awx/tests/output/coverage/
|
||||||
|
|
||||||
|
- uses: ./.github/actions/upload_awx_devel_logs
|
||||||
|
if: always()
|
||||||
|
with:
|
||||||
|
log-filename: collection-integration-${{ matrix.target-regex.name }}.log
|
||||||
|
|
||||||
|
collection-integration-coverage-combine:
|
||||||
|
name: combine awx_collection integration coverage
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs:
|
||||||
|
- collection-integration
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Upgrade ansible-core
|
||||||
|
run: python3 -m pip install --upgrade ansible-core
|
||||||
|
|
||||||
|
- name: Download coverage artifacts
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
path: coverage
|
||||||
|
|
||||||
|
- name: Combine coverage
|
||||||
|
run: |
|
||||||
|
make COLLECTION_VERSION=100.100.100-git install_collection
|
||||||
|
mkdir -p ~/.ansible/collections/ansible_collections/awx/awx/tests/output/coverage
|
||||||
|
cd coverage
|
||||||
|
for i in coverage-*; do
|
||||||
|
cp -rv $i/* ~/.ansible/collections/ansible_collections/awx/awx/tests/output/coverage/
|
||||||
|
done
|
||||||
|
cd ~/.ansible/collections/ansible_collections/awx/awx
|
||||||
|
ansible-test coverage combine --requirements
|
||||||
|
ansible-test coverage html
|
||||||
|
echo '## AWX Collection Integration Coverage' >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo '```' >> $GITHUB_STEP_SUMMARY
|
||||||
|
ansible-test coverage report >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo '```' >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo '## AWX Collection Integration Coverage HTML' >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo 'Download the HTML artifacts to view the coverage report.' >> $GITHUB_STEP_SUMMARY
|
||||||
|
|
||||||
|
# This is a huge hack, there's no official action for removing artifacts currently.
|
||||||
|
# Also ACTIONS_RUNTIME_URL and ACTIONS_RUNTIME_TOKEN aren't available in normal run
|
||||||
|
# steps, so we have to use github-script to get them.
|
||||||
|
#
|
||||||
|
# The advantage of doing this, though, is that we save on artifact storage space.
|
||||||
|
|
||||||
|
- name: Get secret artifact runtime URL
|
||||||
|
uses: actions/github-script@v6
|
||||||
|
id: get-runtime-url
|
||||||
|
with:
|
||||||
|
result-encoding: string
|
||||||
|
script: |
|
||||||
|
const { ACTIONS_RUNTIME_URL } = process.env;
|
||||||
|
return ACTIONS_RUNTIME_URL;
|
||||||
|
|
||||||
|
- name: Get secret artifact runtime token
|
||||||
|
uses: actions/github-script@v6
|
||||||
|
id: get-runtime-token
|
||||||
|
with:
|
||||||
|
result-encoding: string
|
||||||
|
script: |
|
||||||
|
const { ACTIONS_RUNTIME_TOKEN } = process.env;
|
||||||
|
return ACTIONS_RUNTIME_TOKEN;
|
||||||
|
|
||||||
|
- name: Remove intermediary artifacts
|
||||||
|
env:
|
||||||
|
ACTIONS_RUNTIME_URL: ${{ steps.get-runtime-url.outputs.result }}
|
||||||
|
ACTIONS_RUNTIME_TOKEN: ${{ steps.get-runtime-token.outputs.result }}
|
||||||
|
run: |
|
||||||
|
echo "::add-mask::${ACTIONS_RUNTIME_TOKEN}"
|
||||||
|
artifacts=$(
|
||||||
|
curl -H "Authorization: Bearer $ACTIONS_RUNTIME_TOKEN" \
|
||||||
|
${ACTIONS_RUNTIME_URL}_apis/pipelines/workflows/${{ github.run_id }}/artifacts?api-version=6.0-preview \
|
||||||
|
| jq -r '.value | .[] | select(.name | startswith("coverage-")) | .url'
|
||||||
|
)
|
||||||
|
|
||||||
|
for artifact in $artifacts; do
|
||||||
|
curl -i -X DELETE -H "Accept: application/json;api-version=6.0-preview" -H "Authorization: Bearer $ACTIONS_RUNTIME_TOKEN" "$artifact"
|
||||||
|
done
|
||||||
|
|
||||||
|
- name: Upload coverage report as artifact
|
||||||
|
uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
name: awx-collection-integration-coverage-html
|
||||||
|
path: ~/.ansible/collections/ansible_collections/awx/awx/tests/output/reports/coverage
|
||||||
|
|||||||
11
.github/workflows/devel_images.yml
vendored
11
.github/workflows/devel_images.yml
vendored
@@ -16,7 +16,7 @@ jobs:
|
|||||||
packages: write
|
packages: write
|
||||||
contents: read
|
contents: read
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
- name: Get python version from Makefile
|
- name: Get python version from Makefile
|
||||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||||
@@ -28,7 +28,7 @@ jobs:
|
|||||||
OWNER: '${{ github.repository_owner }}'
|
OWNER: '${{ github.repository_owner }}'
|
||||||
|
|
||||||
- name: Install python ${{ env.py_version }}
|
- name: Install python ${{ env.py_version }}
|
||||||
uses: actions/setup-python@v2
|
uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: ${{ env.py_version }}
|
python-version: ${{ env.py_version }}
|
||||||
|
|
||||||
@@ -48,8 +48,11 @@ jobs:
|
|||||||
DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} COMPOSE_TAG=${GITHUB_REF##*/} make awx-kube-dev-build
|
DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} COMPOSE_TAG=${GITHUB_REF##*/} make awx-kube-dev-build
|
||||||
DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} COMPOSE_TAG=${GITHUB_REF##*/} make awx-kube-build
|
DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} COMPOSE_TAG=${GITHUB_REF##*/} make awx-kube-build
|
||||||
|
|
||||||
- name: Push image
|
- name: Push development images
|
||||||
run: |
|
run: |
|
||||||
docker push ghcr.io/${OWNER_LC}/awx_devel:${GITHUB_REF##*/}
|
docker push ghcr.io/${OWNER_LC}/awx_devel:${GITHUB_REF##*/}
|
||||||
docker push ghcr.io/${OWNER_LC}/awx_kube_devel:${GITHUB_REF##*/}
|
docker push ghcr.io/${OWNER_LC}/awx_kube_devel:${GITHUB_REF##*/}
|
||||||
docker push ghcr.io/${OWNER_LC}/awx:${GITHUB_REF##*/}
|
|
||||||
|
- name: Push AWX k8s image, only for upstream and feature branches
|
||||||
|
run: docker push ghcr.io/${OWNER_LC}/awx:${GITHUB_REF##*/}
|
||||||
|
if: endsWith(github.repository, '/awx')
|
||||||
|
|||||||
16
.github/workflows/docs.yml
vendored
Normal file
16
.github/workflows/docs.yml
vendored
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
---
|
||||||
|
name: Docsite CI
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
jobs:
|
||||||
|
docsite-build:
|
||||||
|
name: docsite test build
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: install tox
|
||||||
|
run: pip install tox
|
||||||
|
|
||||||
|
- name: Assure docs can be built
|
||||||
|
run: tox -e docs
|
||||||
54
.github/workflows/e2e_test.yml
vendored
54
.github/workflows/e2e_test.yml
vendored
@@ -19,41 +19,20 @@ jobs:
|
|||||||
job: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]
|
job: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
- name: Get python version from Makefile
|
- uses: ./.github/actions/run_awx_devel
|
||||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
id: awx
|
||||||
|
|
||||||
- name: Install python ${{ env.py_version }}
|
|
||||||
uses: actions/setup-python@v2
|
|
||||||
with:
|
with:
|
||||||
python-version: ${{ env.py_version }}
|
build-ui: true
|
||||||
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Install system deps
|
|
||||||
run: sudo apt-get install -y gettext
|
|
||||||
|
|
||||||
- name: Log in to registry
|
|
||||||
run: |
|
|
||||||
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
|
|
||||||
|
|
||||||
- name: Pre-pull image to warm build cache
|
|
||||||
run: |
|
|
||||||
docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${{ github.base_ref }}
|
|
||||||
|
|
||||||
- name: Build UI
|
|
||||||
run: |
|
|
||||||
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} COMPOSE_TAG=${{ github.base_ref }} make ui-devel
|
|
||||||
|
|
||||||
- name: Start AWX
|
|
||||||
run: |
|
|
||||||
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} COMPOSE_TAG=${{ github.base_ref }} make docker-compose &> make-docker-compose-output.log &
|
|
||||||
|
|
||||||
- name: Pull awx_cypress_base image
|
- name: Pull awx_cypress_base image
|
||||||
run: |
|
run: |
|
||||||
docker pull quay.io/awx/awx_cypress_base:latest
|
docker pull quay.io/awx/awx_cypress_base:latest
|
||||||
|
|
||||||
- name: Checkout test project
|
- name: Checkout test project
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
repository: ${{ github.repository_owner }}/tower-qa
|
repository: ${{ github.repository_owner }}/tower-qa
|
||||||
ssh-key: ${{ secrets.QA_REPO_KEY }}
|
ssh-key: ${{ secrets.QA_REPO_KEY }}
|
||||||
@@ -65,18 +44,6 @@ jobs:
|
|||||||
cd ${{ secrets.E2E_PROJECT }}/ui-tests/awx-pf-tests
|
cd ${{ secrets.E2E_PROJECT }}/ui-tests/awx-pf-tests
|
||||||
docker build -t awx-pf-tests .
|
docker build -t awx-pf-tests .
|
||||||
|
|
||||||
- name: Update default AWX password
|
|
||||||
run: |
|
|
||||||
while [[ "$(curl -s -o /dev/null -w ''%{http_code}'' -k https://localhost:8043/api/v2/ping/)" != "200" ]]
|
|
||||||
do
|
|
||||||
echo "Waiting for AWX..."
|
|
||||||
sleep 5;
|
|
||||||
done
|
|
||||||
echo "AWX is up, updating the password..."
|
|
||||||
docker exec -i tools_awx_1 sh <<-EOSH
|
|
||||||
awx-manage update_password --username=admin --password=password
|
|
||||||
EOSH
|
|
||||||
|
|
||||||
- name: Run E2E tests
|
- name: Run E2E tests
|
||||||
env:
|
env:
|
||||||
CYPRESS_RECORD_KEY: ${{ secrets.CYPRESS_RECORD_KEY }}
|
CYPRESS_RECORD_KEY: ${{ secrets.CYPRESS_RECORD_KEY }}
|
||||||
@@ -86,7 +53,7 @@ jobs:
|
|||||||
export COMMIT_INFO_SHA=$GITHUB_SHA
|
export COMMIT_INFO_SHA=$GITHUB_SHA
|
||||||
export COMMIT_INFO_REMOTE=$GITHUB_REPOSITORY_OWNER
|
export COMMIT_INFO_REMOTE=$GITHUB_REPOSITORY_OWNER
|
||||||
cd ${{ secrets.E2E_PROJECT }}/ui-tests/awx-pf-tests
|
cd ${{ secrets.E2E_PROJECT }}/ui-tests/awx-pf-tests
|
||||||
AWX_IP=$(docker inspect -f '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' tools_awx_1)
|
AWX_IP=${{ steps.awx.outputs.ip }}
|
||||||
printenv > .env
|
printenv > .env
|
||||||
echo "Executing tests:"
|
echo "Executing tests:"
|
||||||
docker run \
|
docker run \
|
||||||
@@ -102,8 +69,7 @@ jobs:
|
|||||||
-w /e2e \
|
-w /e2e \
|
||||||
awx-pf-tests run --project .
|
awx-pf-tests run --project .
|
||||||
|
|
||||||
- name: Save AWX logs
|
- uses: ./.github/actions/upload_awx_devel_logs
|
||||||
uses: actions/upload-artifact@v2
|
if: always()
|
||||||
with:
|
with:
|
||||||
name: AWX-logs-${{ matrix.job }}
|
log-filename: e2e-${{ matrix.job }}.log
|
||||||
path: make-docker-compose-output.log
|
|
||||||
|
|||||||
2
.github/workflows/label_issue.yml
vendored
2
.github/workflows/label_issue.yml
vendored
@@ -28,7 +28,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
name: Label Issue - Community
|
name: Label Issue - Community
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v3
|
||||||
- uses: actions/setup-python@v4
|
- uses: actions/setup-python@v4
|
||||||
- name: Install python requests
|
- name: Install python requests
|
||||||
run: pip install requests
|
run: pip install requests
|
||||||
|
|||||||
2
.github/workflows/label_pr.yml
vendored
2
.github/workflows/label_pr.yml
vendored
@@ -27,7 +27,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
name: Label PR - Community
|
name: Label PR - Community
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v3
|
||||||
- uses: actions/setup-python@v4
|
- uses: actions/setup-python@v4
|
||||||
- name: Install python requests
|
- name: Install python requests
|
||||||
run: pip install requests
|
run: pip install requests
|
||||||
|
|||||||
1
.github/workflows/pr_body_check.yml
vendored
1
.github/workflows/pr_body_check.yml
vendored
@@ -7,6 +7,7 @@ on:
|
|||||||
types: [opened, edited, reopened, synchronize]
|
types: [opened, edited, reopened, synchronize]
|
||||||
jobs:
|
jobs:
|
||||||
pr-check:
|
pr-check:
|
||||||
|
if: github.repository_owner == 'ansible' && endsWith(github.repository, 'awx')
|
||||||
name: Scan PR description for semantic versioning keywords
|
name: Scan PR description for semantic versioning keywords
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
permissions:
|
permissions:
|
||||||
|
|||||||
10
.github/workflows/promote.yml
vendored
10
.github/workflows/promote.yml
vendored
@@ -17,13 +17,13 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout awx
|
- name: Checkout awx
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
- name: Get python version from Makefile
|
- name: Get python version from Makefile
|
||||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||||
|
|
||||||
- name: Install python ${{ env.py_version }}
|
- name: Install python ${{ env.py_version }}
|
||||||
uses: actions/setup-python@v2
|
uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: ${{ env.py_version }}
|
python-version: ${{ env.py_version }}
|
||||||
|
|
||||||
@@ -40,8 +40,12 @@ jobs:
|
|||||||
if: ${{ github.repository_owner != 'ansible' }}
|
if: ${{ github.repository_owner != 'ansible' }}
|
||||||
|
|
||||||
- name: Build collection and publish to galaxy
|
- name: Build collection and publish to galaxy
|
||||||
|
env:
|
||||||
|
COLLECTION_NAMESPACE: ${{ env.collection_namespace }}
|
||||||
|
COLLECTION_VERSION: ${{ github.event.release.tag_name }}
|
||||||
|
COLLECTION_TEMPLATE_VERSION: true
|
||||||
run: |
|
run: |
|
||||||
COLLECTION_TEMPLATE_VERSION=true COLLECTION_NAMESPACE=${{ env.collection_namespace }} make build_collection
|
make build_collection
|
||||||
if [ "$(curl --head -sw '%{http_code}' https://galaxy.ansible.com/download/${{ env.collection_namespace }}-awx-${{ github.event.release.tag_name }}.tar.gz | tail -1)" == "302" ] ; then \
|
if [ "$(curl --head -sw '%{http_code}' https://galaxy.ansible.com/download/${{ env.collection_namespace }}-awx-${{ github.event.release.tag_name }}.tar.gz | tail -1)" == "302" ] ; then \
|
||||||
echo "Galaxy release already done"; \
|
echo "Galaxy release already done"; \
|
||||||
else \
|
else \
|
||||||
|
|||||||
8
.github/workflows/stage.yml
vendored
8
.github/workflows/stage.yml
vendored
@@ -44,7 +44,7 @@ jobs:
|
|||||||
exit 0
|
exit 0
|
||||||
|
|
||||||
- name: Checkout awx
|
- name: Checkout awx
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
path: awx
|
path: awx
|
||||||
|
|
||||||
@@ -52,18 +52,18 @@ jobs:
|
|||||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||||
|
|
||||||
- name: Install python ${{ env.py_version }}
|
- name: Install python ${{ env.py_version }}
|
||||||
uses: actions/setup-python@v2
|
uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: ${{ env.py_version }}
|
python-version: ${{ env.py_version }}
|
||||||
|
|
||||||
- name: Checkout awx-logos
|
- name: Checkout awx-logos
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
repository: ansible/awx-logos
|
repository: ansible/awx-logos
|
||||||
path: awx-logos
|
path: awx-logos
|
||||||
|
|
||||||
- name: Checkout awx-operator
|
- name: Checkout awx-operator
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
repository: ${{ github.repository_owner }}/awx-operator
|
repository: ${{ github.repository_owner }}/awx-operator
|
||||||
path: awx-operator
|
path: awx-operator
|
||||||
|
|||||||
4
.github/workflows/upload_schema.yml
vendored
4
.github/workflows/upload_schema.yml
vendored
@@ -17,13 +17,13 @@ jobs:
|
|||||||
packages: write
|
packages: write
|
||||||
contents: read
|
contents: read
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
- name: Get python version from Makefile
|
- name: Get python version from Makefile
|
||||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||||
|
|
||||||
- name: Install python ${{ env.py_version }}
|
- name: Install python ${{ env.py_version }}
|
||||||
uses: actions/setup-python@v2
|
uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: ${{ env.py_version }}
|
python-version: ${{ env.py_version }}
|
||||||
|
|
||||||
|
|||||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -165,3 +165,7 @@ use_dev_supervisor.txt
|
|||||||
|
|
||||||
awx/ui_next/src
|
awx/ui_next/src
|
||||||
awx/ui_next/build
|
awx/ui_next/build
|
||||||
|
|
||||||
|
# Docs build stuff
|
||||||
|
docs/docsite/build/
|
||||||
|
_readthedocs/
|
||||||
|
|||||||
5
.gitleaks.toml
Normal file
5
.gitleaks.toml
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
[allowlist]
|
||||||
|
description = "Documentation contains example secrets and passwords"
|
||||||
|
paths = [
|
||||||
|
"docs/docsite/rst/administration/oauth2_token_auth.rst",
|
||||||
|
]
|
||||||
15
.readthedocs.yaml
Normal file
15
.readthedocs.yaml
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
# Read the Docs configuration file
|
||||||
|
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
|
||||||
|
|
||||||
|
version: 2
|
||||||
|
|
||||||
|
build:
|
||||||
|
os: ubuntu-22.04
|
||||||
|
tools:
|
||||||
|
python: >-
|
||||||
|
3.11
|
||||||
|
commands:
|
||||||
|
- pip install --user tox
|
||||||
|
- python3 -m tox -e docs
|
||||||
|
- mkdir -p _readthedocs/html/
|
||||||
|
- mv docs/docsite/build/html/* _readthedocs/html/
|
||||||
@@ -10,6 +10,7 @@ ignore: |
|
|||||||
tools/docker-compose/_sources
|
tools/docker-compose/_sources
|
||||||
# django template files
|
# django template files
|
||||||
awx/api/templates/instance_install_bundle/**
|
awx/api/templates/instance_install_bundle/**
|
||||||
|
.readthedocs.yaml
|
||||||
|
|
||||||
extends: default
|
extends: default
|
||||||
|
|
||||||
|
|||||||
@@ -4,6 +4,6 @@
|
|||||||
|
|
||||||
Early versions of AWX did not support seamless upgrades between major versions and required the use of a backup and restore tool to perform upgrades.
|
Early versions of AWX did not support seamless upgrades between major versions and required the use of a backup and restore tool to perform upgrades.
|
||||||
|
|
||||||
Users who wish to upgrade modern AWX installations should follow the instructions at:
|
As of version 18.0, `awx-operator` is the preferred install/upgrade method. Users who wish to upgrade modern AWX installations should follow the instructions at:
|
||||||
|
|
||||||
https://github.com/ansible/awx/blob/devel/INSTALL.md#upgrading-from-previous-versions
|
https://github.com/ansible/awx-operator/blob/devel/docs/upgrade/upgrading.md
|
||||||
|
|||||||
20
Makefile
20
Makefile
@@ -1,10 +1,12 @@
|
|||||||
-include awx/ui_next/Makefile
|
-include awx/ui_next/Makefile
|
||||||
|
|
||||||
PYTHON := $(notdir $(shell for i in python3.9 python3; do command -v $$i; done|sed 1q))
|
PYTHON := $(notdir $(shell for i in python3.9 python3; do command -v $$i; done|sed 1q))
|
||||||
|
SHELL := bash
|
||||||
DOCKER_COMPOSE ?= docker-compose
|
DOCKER_COMPOSE ?= docker-compose
|
||||||
OFFICIAL ?= no
|
OFFICIAL ?= no
|
||||||
NODE ?= node
|
NODE ?= node
|
||||||
NPM_BIN ?= npm
|
NPM_BIN ?= npm
|
||||||
|
KIND_BIN ?= $(shell which kind)
|
||||||
CHROMIUM_BIN=/tmp/chrome-linux/chrome
|
CHROMIUM_BIN=/tmp/chrome-linux/chrome
|
||||||
GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
|
GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
|
||||||
MANAGEMENT_COMMAND ?= awx-manage
|
MANAGEMENT_COMMAND ?= awx-manage
|
||||||
@@ -77,7 +79,7 @@ I18N_FLAG_FILE = .i18n_built
|
|||||||
sdist \
|
sdist \
|
||||||
ui-release ui-devel \
|
ui-release ui-devel \
|
||||||
VERSION PYTHON_VERSION docker-compose-sources \
|
VERSION PYTHON_VERSION docker-compose-sources \
|
||||||
.git/hooks/pre-commit github_ci_setup github_ci_runner
|
.git/hooks/pre-commit
|
||||||
|
|
||||||
clean-tmp:
|
clean-tmp:
|
||||||
rm -rf tmp/
|
rm -rf tmp/
|
||||||
@@ -322,21 +324,10 @@ test:
|
|||||||
cd awxkit && $(VENV_BASE)/awx/bin/tox -re py3
|
cd awxkit && $(VENV_BASE)/awx/bin/tox -re py3
|
||||||
awx-manage check_migrations --dry-run --check -n 'missing_migration_file'
|
awx-manage check_migrations --dry-run --check -n 'missing_migration_file'
|
||||||
|
|
||||||
## Login to Github container image registry, pull image, then build image.
|
|
||||||
github_ci_setup:
|
|
||||||
# GITHUB_ACTOR is automatic github actions env var
|
|
||||||
# CI_GITHUB_TOKEN is defined in .github files
|
|
||||||
echo $(CI_GITHUB_TOKEN) | docker login ghcr.io -u $(GITHUB_ACTOR) --password-stdin
|
|
||||||
docker pull $(DEVEL_IMAGE_NAME) || : # Pre-pull image to warm build cache
|
|
||||||
$(MAKE) docker-compose-build
|
|
||||||
|
|
||||||
## Runs AWX_DOCKER_CMD inside a new docker container.
|
## Runs AWX_DOCKER_CMD inside a new docker container.
|
||||||
docker-runner:
|
docker-runner:
|
||||||
docker run -u $(shell id -u) --rm -v $(shell pwd):/awx_devel/:Z --workdir=/awx_devel $(DEVEL_IMAGE_NAME) $(AWX_DOCKER_CMD)
|
docker run -u $(shell id -u) --rm -v $(shell pwd):/awx_devel/:Z --workdir=/awx_devel $(DEVEL_IMAGE_NAME) $(AWX_DOCKER_CMD)
|
||||||
|
|
||||||
## Builds image and runs AWX_DOCKER_CMD in it, mainly for .github checks.
|
|
||||||
github_ci_runner: github_ci_setup docker-runner
|
|
||||||
|
|
||||||
test_collection:
|
test_collection:
|
||||||
rm -f $(shell ls -d $(VENV_BASE)/awx/lib/python* | head -n 1)/no-global-site-packages.txt
|
rm -f $(shell ls -d $(VENV_BASE)/awx/lib/python* | head -n 1)/no-global-site-packages.txt
|
||||||
if [ "$(VENV_BASE)" ]; then \
|
if [ "$(VENV_BASE)" ]; then \
|
||||||
@@ -382,7 +373,7 @@ test_collection_sanity:
|
|||||||
cd $(COLLECTION_INSTALL) && ansible-test sanity $(COLLECTION_SANITY_ARGS)
|
cd $(COLLECTION_INSTALL) && ansible-test sanity $(COLLECTION_SANITY_ARGS)
|
||||||
|
|
||||||
test_collection_integration: install_collection
|
test_collection_integration: install_collection
|
||||||
cd $(COLLECTION_INSTALL) && ansible-test integration $(COLLECTION_TEST_TARGET)
|
cd $(COLLECTION_INSTALL) && ansible-test integration -vvv $(COLLECTION_TEST_TARGET)
|
||||||
|
|
||||||
test_unit:
|
test_unit:
|
||||||
@if [ "$(VENV_BASE)" ]; then \
|
@if [ "$(VENV_BASE)" ]; then \
|
||||||
@@ -663,6 +654,9 @@ awx-kube-dev-build: Dockerfile.kube-dev
|
|||||||
-t $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) .
|
-t $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) .
|
||||||
|
|
||||||
|
|
||||||
|
kind-dev-load: awx-kube-dev-build
|
||||||
|
$(KIND_BIN) load docker-image $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG)
|
||||||
|
|
||||||
# Translation TASKS
|
# Translation TASKS
|
||||||
# --------------------------------------
|
# --------------------------------------
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
[](https://github.com/ansible/awx/actions/workflows/ci.yml) [](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) [](https://github.com/ansible/awx/blob/devel/LICENSE.md) [](https://groups.google.com/g/awx-project)
|
[](https://github.com/ansible/awx/actions/workflows/ci.yml) [](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) [](https://github.com/ansible/awx/blob/devel/LICENSE.md) [](https://groups.google.com/g/awx-project)
|
||||||
[](https://libera.chat)
|
[](https://chat.ansible.im/#/welcome) [](https://forum.ansible.com)
|
||||||
|
|
||||||
<img src="https://raw.githubusercontent.com/ansible/awx-logos/master/awx/ui/client/assets/logo-login.svg?sanitize=true" width=200 alt="AWX" />
|
<img src="https://raw.githubusercontent.com/ansible/awx-logos/master/awx/ui/client/assets/logo-login.svg?sanitize=true" width=200 alt="AWX" />
|
||||||
|
|
||||||
@@ -37,5 +37,6 @@ Get Involved
|
|||||||
|
|
||||||
We welcome your feedback and ideas. Here's how to reach us with feedback and questions:
|
We welcome your feedback and ideas. Here's how to reach us with feedback and questions:
|
||||||
|
|
||||||
- Join the `#ansible-awx` channel on irc.libera.chat
|
- Join the [Ansible AWX channel on Matrix](https://matrix.to/#/#awx:ansible.com)
|
||||||
- Join the [mailing list](https://groups.google.com/forum/#!forum/awx-project)
|
- Join the [Ansible Community Forum](https://forum.ansible.com)
|
||||||
|
- Join the [mailing list](https://groups.google.com/forum/#!forum/awx-project)
|
||||||
|
|||||||
@@ -52,39 +52,14 @@ try:
|
|||||||
except ImportError: # pragma: no cover
|
except ImportError: # pragma: no cover
|
||||||
MODE = 'production'
|
MODE = 'production'
|
||||||
|
|
||||||
import hashlib
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import django # noqa: F401
|
import django # noqa: F401
|
||||||
|
|
||||||
HAS_DJANGO = True
|
|
||||||
except ImportError:
|
except ImportError:
|
||||||
HAS_DJANGO = False
|
pass
|
||||||
else:
|
else:
|
||||||
from django.db.backends.base import schema
|
|
||||||
from django.db.models import indexes
|
|
||||||
from django.db.backends.utils import names_digest
|
|
||||||
from django.db import connection
|
from django.db import connection
|
||||||
|
|
||||||
if HAS_DJANGO is True:
|
|
||||||
# See upgrade blocker note in requirements/README.md
|
|
||||||
try:
|
|
||||||
names_digest('foo', 'bar', 'baz', length=8)
|
|
||||||
except ValueError:
|
|
||||||
|
|
||||||
def names_digest(*args, length):
|
|
||||||
"""
|
|
||||||
Generate a 32-bit digest of a set of arguments that can be used to shorten
|
|
||||||
identifying names. Support for use in FIPS environments.
|
|
||||||
"""
|
|
||||||
h = hashlib.md5(usedforsecurity=False)
|
|
||||||
for arg in args:
|
|
||||||
h.update(arg.encode())
|
|
||||||
return h.hexdigest()[:length]
|
|
||||||
|
|
||||||
schema.names_digest = names_digest
|
|
||||||
indexes.names_digest = names_digest
|
|
||||||
|
|
||||||
|
|
||||||
def find_commands(management_dir):
|
def find_commands(management_dir):
|
||||||
# Modified version of function from django/core/management/__init__.py.
|
# Modified version of function from django/core/management/__init__.py.
|
||||||
|
|||||||
@@ -232,7 +232,8 @@ class APIView(views.APIView):
|
|||||||
|
|
||||||
response = super(APIView, self).finalize_response(request, response, *args, **kwargs)
|
response = super(APIView, self).finalize_response(request, response, *args, **kwargs)
|
||||||
time_started = getattr(self, 'time_started', None)
|
time_started = getattr(self, 'time_started', None)
|
||||||
response['X-API-Product-Version'] = get_awx_version()
|
if request.user.is_authenticated:
|
||||||
|
response['X-API-Product-Version'] = get_awx_version()
|
||||||
response['X-API-Product-Name'] = server_product_name()
|
response['X-API-Product-Name'] = server_product_name()
|
||||||
|
|
||||||
response['X-API-Node'] = settings.CLUSTER_HOST_ID
|
response['X-API-Node'] = settings.CLUSTER_HOST_ID
|
||||||
|
|||||||
@@ -3233,7 +3233,7 @@ class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobO
|
|||||||
if get_field_from_model_or_attrs('host_config_key') and not inventory:
|
if get_field_from_model_or_attrs('host_config_key') and not inventory:
|
||||||
raise serializers.ValidationError({'host_config_key': _("Cannot enable provisioning callback without an inventory set.")})
|
raise serializers.ValidationError({'host_config_key': _("Cannot enable provisioning callback without an inventory set.")})
|
||||||
|
|
||||||
prompting_error_message = _("Must either set a default value or ask to prompt on launch.")
|
prompting_error_message = _("You must either set a default value or ask to prompt on launch.")
|
||||||
if project is None:
|
if project is None:
|
||||||
raise serializers.ValidationError({'project': _("Job Templates must have a project assigned.")})
|
raise serializers.ValidationError({'project': _("Job Templates must have a project assigned.")})
|
||||||
elif inventory is None and not get_field_from_model_or_attrs('ask_inventory_on_launch'):
|
elif inventory is None and not get_field_from_model_or_attrs('ask_inventory_on_launch'):
|
||||||
@@ -5356,10 +5356,16 @@ class ScheduleSerializer(LaunchConfigurationBaseSerializer, SchedulePreviewSeria
|
|||||||
class InstanceLinkSerializer(BaseSerializer):
|
class InstanceLinkSerializer(BaseSerializer):
|
||||||
class Meta:
|
class Meta:
|
||||||
model = InstanceLink
|
model = InstanceLink
|
||||||
fields = ('source', 'target', 'link_state')
|
fields = ('id', 'url', 'related', 'source', 'target', 'link_state')
|
||||||
|
|
||||||
source = serializers.SlugRelatedField(slug_field="hostname", read_only=True)
|
source = serializers.SlugRelatedField(slug_field="hostname", queryset=Instance.objects.all())
|
||||||
target = serializers.SlugRelatedField(slug_field="hostname", read_only=True)
|
target = serializers.SlugRelatedField(slug_field="hostname", queryset=Instance.objects.all())
|
||||||
|
|
||||||
|
def get_related(self, obj):
|
||||||
|
res = super(InstanceLinkSerializer, self).get_related(obj)
|
||||||
|
res['source_instance'] = self.reverse('api:instance_detail', kwargs={'pk': obj.source.id})
|
||||||
|
res['target_instance'] = self.reverse('api:instance_detail', kwargs={'pk': obj.target.id})
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
class InstanceNodeSerializer(BaseSerializer):
|
class InstanceNodeSerializer(BaseSerializer):
|
||||||
@@ -5376,6 +5382,7 @@ class InstanceSerializer(BaseSerializer):
|
|||||||
jobs_running = serializers.IntegerField(help_text=_('Count of jobs in the running or waiting state that are targeted for this instance'), read_only=True)
|
jobs_running = serializers.IntegerField(help_text=_('Count of jobs in the running or waiting state that are targeted for this instance'), read_only=True)
|
||||||
jobs_total = serializers.IntegerField(help_text=_('Count of all jobs that target this instance'), read_only=True)
|
jobs_total = serializers.IntegerField(help_text=_('Count of all jobs that target this instance'), read_only=True)
|
||||||
health_check_pending = serializers.SerializerMethodField()
|
health_check_pending = serializers.SerializerMethodField()
|
||||||
|
peers = serializers.SlugRelatedField(many=True, required=False, slug_field="hostname", queryset=Instance.objects.all())
|
||||||
|
|
||||||
class Meta:
|
class Meta:
|
||||||
model = Instance
|
model = Instance
|
||||||
@@ -5412,6 +5419,8 @@ class InstanceSerializer(BaseSerializer):
|
|||||||
'node_state',
|
'node_state',
|
||||||
'ip_address',
|
'ip_address',
|
||||||
'listener_port',
|
'listener_port',
|
||||||
|
'peers',
|
||||||
|
'peers_from_control_nodes',
|
||||||
)
|
)
|
||||||
extra_kwargs = {
|
extra_kwargs = {
|
||||||
'node_type': {'initial': Instance.Types.EXECUTION, 'default': Instance.Types.EXECUTION},
|
'node_type': {'initial': Instance.Types.EXECUTION, 'default': Instance.Types.EXECUTION},
|
||||||
@@ -5464,22 +5473,57 @@ class InstanceSerializer(BaseSerializer):
|
|||||||
def get_health_check_pending(self, obj):
|
def get_health_check_pending(self, obj):
|
||||||
return obj.health_check_pending
|
return obj.health_check_pending
|
||||||
|
|
||||||
def validate(self, data):
|
def validate(self, attrs):
|
||||||
if self.instance:
|
def get_field_from_model_or_attrs(fd):
|
||||||
if self.instance.node_type == Instance.Types.HOP:
|
return attrs.get(fd, self.instance and getattr(self.instance, fd) or None)
|
||||||
raise serializers.ValidationError("Hop node instances may not be changed.")
|
|
||||||
else:
|
def check_peers_changed():
|
||||||
if not settings.IS_K8S:
|
'''
|
||||||
raise serializers.ValidationError("Can only create instances on Kubernetes or OpenShift.")
|
return True if
|
||||||
return data
|
- 'peers' in attrs
|
||||||
|
- instance peers matches peers in attrs
|
||||||
|
'''
|
||||||
|
return self.instance and 'peers' in attrs and set(self.instance.peers.all()) != set(attrs['peers'])
|
||||||
|
|
||||||
|
if not self.instance and not settings.IS_K8S:
|
||||||
|
raise serializers.ValidationError(_("Can only create instances on Kubernetes or OpenShift."))
|
||||||
|
|
||||||
|
node_type = get_field_from_model_or_attrs("node_type")
|
||||||
|
peers_from_control_nodes = get_field_from_model_or_attrs("peers_from_control_nodes")
|
||||||
|
listener_port = get_field_from_model_or_attrs("listener_port")
|
||||||
|
peers = attrs.get('peers', [])
|
||||||
|
|
||||||
|
if peers_from_control_nodes and node_type not in (Instance.Types.EXECUTION, Instance.Types.HOP):
|
||||||
|
raise serializers.ValidationError(_("peers_from_control_nodes can only be enabled for execution or hop nodes."))
|
||||||
|
|
||||||
|
if node_type in [Instance.Types.CONTROL, Instance.Types.HYBRID]:
|
||||||
|
if check_peers_changed():
|
||||||
|
raise serializers.ValidationError(
|
||||||
|
_("Setting peers manually for control nodes is not allowed. Enable peers_from_control_nodes on the hop and execution nodes instead.")
|
||||||
|
)
|
||||||
|
|
||||||
|
if not listener_port and peers_from_control_nodes:
|
||||||
|
raise serializers.ValidationError(_("Field listener_port must be a valid integer when peers_from_control_nodes is enabled."))
|
||||||
|
|
||||||
|
if not listener_port and self.instance and self.instance.peers_from.exists():
|
||||||
|
raise serializers.ValidationError(_("Field listener_port must be a valid integer when other nodes peer to it."))
|
||||||
|
|
||||||
|
for peer in peers:
|
||||||
|
if peer.listener_port is None:
|
||||||
|
raise serializers.ValidationError(_("Field listener_port must be set on peer ") + peer.hostname + ".")
|
||||||
|
|
||||||
|
if not settings.IS_K8S:
|
||||||
|
if check_peers_changed():
|
||||||
|
raise serializers.ValidationError(_("Cannot change peers."))
|
||||||
|
|
||||||
|
return super().validate(attrs)
|
||||||
|
|
||||||
def validate_node_type(self, value):
|
def validate_node_type(self, value):
|
||||||
if not self.instance:
|
if not self.instance and value not in [Instance.Types.HOP, Instance.Types.EXECUTION]:
|
||||||
if value not in (Instance.Types.EXECUTION,):
|
raise serializers.ValidationError(_("Can only create execution or hop nodes."))
|
||||||
raise serializers.ValidationError("Can only create execution nodes.")
|
|
||||||
else:
|
if self.instance and self.instance.node_type != value:
|
||||||
if self.instance.node_type != value:
|
raise serializers.ValidationError(_("Cannot change node type."))
|
||||||
raise serializers.ValidationError("Cannot change node type.")
|
|
||||||
|
|
||||||
return value
|
return value
|
||||||
|
|
||||||
@@ -5487,30 +5531,41 @@ class InstanceSerializer(BaseSerializer):
|
|||||||
if self.instance:
|
if self.instance:
|
||||||
if value != self.instance.node_state:
|
if value != self.instance.node_state:
|
||||||
if not settings.IS_K8S:
|
if not settings.IS_K8S:
|
||||||
raise serializers.ValidationError("Can only change the state on Kubernetes or OpenShift.")
|
raise serializers.ValidationError(_("Can only change the state on Kubernetes or OpenShift."))
|
||||||
if value != Instance.States.DEPROVISIONING:
|
if value != Instance.States.DEPROVISIONING:
|
||||||
raise serializers.ValidationError("Can only change instances to the 'deprovisioning' state.")
|
raise serializers.ValidationError(_("Can only change instances to the 'deprovisioning' state."))
|
||||||
if self.instance.node_type not in (Instance.Types.EXECUTION,):
|
if self.instance.node_type not in (Instance.Types.EXECUTION, Instance.Types.HOP):
|
||||||
raise serializers.ValidationError("Can only deprovision execution nodes.")
|
raise serializers.ValidationError(_("Can only deprovision execution or hop nodes."))
|
||||||
else:
|
else:
|
||||||
if value and value != Instance.States.INSTALLED:
|
if value and value != Instance.States.INSTALLED:
|
||||||
raise serializers.ValidationError("Can only create instances in the 'installed' state.")
|
raise serializers.ValidationError(_("Can only create instances in the 'installed' state."))
|
||||||
|
|
||||||
return value
|
return value
|
||||||
|
|
||||||
def validate_hostname(self, value):
|
def validate_hostname(self, value):
|
||||||
"""
|
"""
|
||||||
- Hostname cannot be "localhost" - but can be something like localhost.domain
|
Cannot change the hostname
|
||||||
- Cannot change the hostname of an-already instantiated & initialized Instance object
|
|
||||||
"""
|
"""
|
||||||
if self.instance and self.instance.hostname != value:
|
if self.instance and self.instance.hostname != value:
|
||||||
raise serializers.ValidationError("Cannot change hostname.")
|
raise serializers.ValidationError(_("Cannot change hostname."))
|
||||||
|
|
||||||
return value
|
return value
|
||||||
|
|
||||||
def validate_listener_port(self, value):
|
def validate_listener_port(self, value):
|
||||||
if self.instance and self.instance.listener_port != value:
|
"""
|
||||||
raise serializers.ValidationError("Cannot change listener port.")
|
Cannot change listener port, unless going from none to integer, and vice versa
|
||||||
|
"""
|
||||||
|
if value and self.instance and self.instance.listener_port and self.instance.listener_port != value:
|
||||||
|
raise serializers.ValidationError(_("Cannot change listener port."))
|
||||||
|
|
||||||
|
return value
|
||||||
|
|
||||||
|
def validate_peers_from_control_nodes(self, value):
|
||||||
|
"""
|
||||||
|
Can only enable for K8S based deployments
|
||||||
|
"""
|
||||||
|
if value and not settings.IS_K8S:
|
||||||
|
raise serializers.ValidationError(_("Can only be enabled on Kubernetes or Openshift."))
|
||||||
|
|
||||||
return value
|
return value
|
||||||
|
|
||||||
@@ -5518,7 +5573,19 @@ class InstanceSerializer(BaseSerializer):
|
|||||||
class InstanceHealthCheckSerializer(BaseSerializer):
|
class InstanceHealthCheckSerializer(BaseSerializer):
|
||||||
class Meta:
|
class Meta:
|
||||||
model = Instance
|
model = Instance
|
||||||
read_only_fields = ('uuid', 'hostname', 'version', 'last_health_check', 'errors', 'cpu', 'memory', 'cpu_capacity', 'mem_capacity', 'capacity')
|
read_only_fields = (
|
||||||
|
'uuid',
|
||||||
|
'hostname',
|
||||||
|
'ip_address',
|
||||||
|
'version',
|
||||||
|
'last_health_check',
|
||||||
|
'errors',
|
||||||
|
'cpu',
|
||||||
|
'memory',
|
||||||
|
'cpu_capacity',
|
||||||
|
'mem_capacity',
|
||||||
|
'capacity',
|
||||||
|
)
|
||||||
fields = read_only_fields
|
fields = read_only_fields
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -3,21 +3,35 @@ receptor_group: awx
|
|||||||
receptor_verify: true
|
receptor_verify: true
|
||||||
receptor_tls: true
|
receptor_tls: true
|
||||||
receptor_mintls13: false
|
receptor_mintls13: false
|
||||||
|
{% if instance.node_type == "execution" %}
|
||||||
receptor_work_commands:
|
receptor_work_commands:
|
||||||
ansible-runner:
|
ansible-runner:
|
||||||
command: ansible-runner
|
command: ansible-runner
|
||||||
params: worker
|
params: worker
|
||||||
allowruntimeparams: true
|
allowruntimeparams: true
|
||||||
verifysignature: true
|
verifysignature: true
|
||||||
|
additional_python_packages:
|
||||||
|
- ansible-runner
|
||||||
|
{% endif %}
|
||||||
custom_worksign_public_keyfile: receptor/work_public_key.pem
|
custom_worksign_public_keyfile: receptor/work_public_key.pem
|
||||||
custom_tls_certfile: receptor/tls/receptor.crt
|
custom_tls_certfile: receptor/tls/receptor.crt
|
||||||
custom_tls_keyfile: receptor/tls/receptor.key
|
custom_tls_keyfile: receptor/tls/receptor.key
|
||||||
custom_ca_certfile: receptor/tls/ca/mesh-CA.crt
|
custom_ca_certfile: receptor/tls/ca/mesh-CA.crt
|
||||||
receptor_protocol: 'tcp'
|
receptor_protocol: 'tcp'
|
||||||
|
{% if instance.listener_port %}
|
||||||
receptor_listener: true
|
receptor_listener: true
|
||||||
receptor_port: {{ instance.listener_port }}
|
receptor_port: {{ instance.listener_port }}
|
||||||
receptor_dependencies:
|
{% else %}
|
||||||
- python39-pip
|
receptor_listener: false
|
||||||
|
{% endif %}
|
||||||
|
{% if peers %}
|
||||||
|
receptor_peers:
|
||||||
|
{% for peer in peers %}
|
||||||
|
- host: {{ peer.host }}
|
||||||
|
port: {{ peer.port }}
|
||||||
|
protocol: tcp
|
||||||
|
{% endfor %}
|
||||||
|
{% endif %}
|
||||||
{% verbatim %}
|
{% verbatim %}
|
||||||
podman_user: "{{ receptor_user }}"
|
podman_user: "{{ receptor_user }}"
|
||||||
podman_group: "{{ receptor_group }}"
|
podman_group: "{{ receptor_group }}"
|
||||||
|
|||||||
@@ -1,20 +1,16 @@
|
|||||||
{% verbatim %}
|
|
||||||
---
|
---
|
||||||
- hosts: all
|
- hosts: all
|
||||||
become: yes
|
become: yes
|
||||||
tasks:
|
tasks:
|
||||||
- name: Create the receptor user
|
- name: Create the receptor user
|
||||||
user:
|
user:
|
||||||
|
{% verbatim %}
|
||||||
name: "{{ receptor_user }}"
|
name: "{{ receptor_user }}"
|
||||||
|
{% endverbatim %}
|
||||||
shell: /bin/bash
|
shell: /bin/bash
|
||||||
- name: Enable Copr repo for Receptor
|
{% if instance.node_type == "execution" %}
|
||||||
command: dnf copr enable ansible-awx/receptor -y
|
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ansible.receptor.podman
|
name: ansible.receptor.podman
|
||||||
|
{% endif %}
|
||||||
- import_role:
|
- import_role:
|
||||||
name: ansible.receptor.setup
|
name: ansible.receptor.setup
|
||||||
- name: Install ansible-runner
|
|
||||||
pip:
|
|
||||||
name: ansible-runner
|
|
||||||
executable: pip3.9
|
|
||||||
{% endverbatim %}
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
---
|
---
|
||||||
collections:
|
collections:
|
||||||
- name: ansible.receptor
|
- name: ansible.receptor
|
||||||
version: 1.1.0
|
version: 2.0.0
|
||||||
|
|||||||
@@ -30,7 +30,7 @@ from awx.api.views import (
|
|||||||
OAuth2TokenList,
|
OAuth2TokenList,
|
||||||
ApplicationOAuth2TokenList,
|
ApplicationOAuth2TokenList,
|
||||||
OAuth2ApplicationDetail,
|
OAuth2ApplicationDetail,
|
||||||
# HostMetricSummaryMonthlyList, # It will be enabled in future version of the AWX
|
HostMetricSummaryMonthlyList,
|
||||||
)
|
)
|
||||||
|
|
||||||
from awx.api.views.bulk import (
|
from awx.api.views.bulk import (
|
||||||
@@ -123,8 +123,7 @@ v2_urls = [
|
|||||||
re_path(r'^constructed_inventories/', include(constructed_inventory_urls)),
|
re_path(r'^constructed_inventories/', include(constructed_inventory_urls)),
|
||||||
re_path(r'^hosts/', include(host_urls)),
|
re_path(r'^hosts/', include(host_urls)),
|
||||||
re_path(r'^host_metrics/', include(host_metric_urls)),
|
re_path(r'^host_metrics/', include(host_metric_urls)),
|
||||||
# It will be enabled in future version of the AWX
|
re_path(r'^host_metric_summary_monthly/$', HostMetricSummaryMonthlyList.as_view(), name='host_metric_summary_monthly_list'),
|
||||||
# re_path(r'^host_metric_summary_monthly/$', HostMetricSummaryMonthlyList.as_view(), name='host_metric_summary_monthly_list'),
|
|
||||||
re_path(r'^groups/', include(group_urls)),
|
re_path(r'^groups/', include(group_urls)),
|
||||||
re_path(r'^inventory_sources/', include(inventory_source_urls)),
|
re_path(r'^inventory_sources/', include(inventory_source_urls)),
|
||||||
re_path(r'^inventory_updates/', include(inventory_update_urls)),
|
re_path(r'^inventory_updates/', include(inventory_update_urls)),
|
||||||
|
|||||||
@@ -341,17 +341,18 @@ class InstanceDetail(RetrieveUpdateAPIView):
|
|||||||
|
|
||||||
def update_raw_data(self, data):
|
def update_raw_data(self, data):
|
||||||
# these fields are only valid on creation of an instance, so they unwanted on detail view
|
# these fields are only valid on creation of an instance, so they unwanted on detail view
|
||||||
data.pop('listener_port', None)
|
|
||||||
data.pop('node_type', None)
|
data.pop('node_type', None)
|
||||||
data.pop('hostname', None)
|
data.pop('hostname', None)
|
||||||
|
data.pop('ip_address', None)
|
||||||
return super(InstanceDetail, self).update_raw_data(data)
|
return super(InstanceDetail, self).update_raw_data(data)
|
||||||
|
|
||||||
def update(self, request, *args, **kwargs):
|
def update(self, request, *args, **kwargs):
|
||||||
r = super(InstanceDetail, self).update(request, *args, **kwargs)
|
r = super(InstanceDetail, self).update(request, *args, **kwargs)
|
||||||
if status.is_success(r.status_code):
|
if status.is_success(r.status_code):
|
||||||
obj = self.get_object()
|
obj = self.get_object()
|
||||||
obj.set_capacity_value()
|
capacity_changed = obj.set_capacity_value()
|
||||||
obj.save(update_fields=['capacity'])
|
if capacity_changed:
|
||||||
|
obj.save(update_fields=['capacity'])
|
||||||
r.data = serializers.InstanceSerializer(obj, context=self.get_serializer_context()).to_representation(obj)
|
r.data = serializers.InstanceSerializer(obj, context=self.get_serializer_context()).to_representation(obj)
|
||||||
return r
|
return r
|
||||||
|
|
||||||
@@ -1564,16 +1565,15 @@ class HostMetricDetail(RetrieveDestroyAPIView):
|
|||||||
return Response(status=status.HTTP_204_NO_CONTENT)
|
return Response(status=status.HTTP_204_NO_CONTENT)
|
||||||
|
|
||||||
|
|
||||||
# It will be enabled in future version of the AWX
|
class HostMetricSummaryMonthlyList(ListAPIView):
|
||||||
# class HostMetricSummaryMonthlyList(ListAPIView):
|
name = _("Host Metrics Summary Monthly")
|
||||||
# name = _("Host Metrics Summary Monthly")
|
model = models.HostMetricSummaryMonthly
|
||||||
# model = models.HostMetricSummaryMonthly
|
serializer_class = serializers.HostMetricSummaryMonthlySerializer
|
||||||
# serializer_class = serializers.HostMetricSummaryMonthlySerializer
|
permission_classes = (IsSystemAdminOrAuditor,)
|
||||||
# permission_classes = (IsSystemAdminOrAuditor,)
|
search_fields = ('date',)
|
||||||
# search_fields = ('date',)
|
|
||||||
#
|
def get_queryset(self):
|
||||||
# def get_queryset(self):
|
return self.model.objects.all()
|
||||||
# return self.model.objects.all()
|
|
||||||
|
|
||||||
|
|
||||||
class HostList(HostRelatedSearchMixin, ListCreateAPIView):
|
class HostList(HostRelatedSearchMixin, ListCreateAPIView):
|
||||||
|
|||||||
@@ -6,6 +6,8 @@ import io
|
|||||||
import ipaddress
|
import ipaddress
|
||||||
import os
|
import os
|
||||||
import tarfile
|
import tarfile
|
||||||
|
import time
|
||||||
|
import re
|
||||||
|
|
||||||
import asn1
|
import asn1
|
||||||
from awx.api import serializers
|
from awx.api import serializers
|
||||||
@@ -40,6 +42,8 @@ RECEPTOR_OID = "1.3.6.1.4.1.2312.19.1"
|
|||||||
# │ │ └── receptor.key
|
# │ │ └── receptor.key
|
||||||
# │ └── work-public-key.pem
|
# │ └── work-public-key.pem
|
||||||
# └── requirements.yml
|
# └── requirements.yml
|
||||||
|
|
||||||
|
|
||||||
class InstanceInstallBundle(GenericAPIView):
|
class InstanceInstallBundle(GenericAPIView):
|
||||||
name = _('Install Bundle')
|
name = _('Install Bundle')
|
||||||
model = models.Instance
|
model = models.Instance
|
||||||
@@ -49,9 +53,9 @@ class InstanceInstallBundle(GenericAPIView):
|
|||||||
def get(self, request, *args, **kwargs):
|
def get(self, request, *args, **kwargs):
|
||||||
instance_obj = self.get_object()
|
instance_obj = self.get_object()
|
||||||
|
|
||||||
if instance_obj.node_type not in ('execution',):
|
if instance_obj.node_type not in ('execution', 'hop'):
|
||||||
return Response(
|
return Response(
|
||||||
data=dict(msg=_('Install bundle can only be generated for execution nodes.')),
|
data=dict(msg=_('Install bundle can only be generated for execution or hop nodes.')),
|
||||||
status=status.HTTP_400_BAD_REQUEST,
|
status=status.HTTP_400_BAD_REQUEST,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -66,37 +70,37 @@ class InstanceInstallBundle(GenericAPIView):
|
|||||||
# generate and write the receptor key to receptor/tls/receptor.key in the tar file
|
# generate and write the receptor key to receptor/tls/receptor.key in the tar file
|
||||||
key, cert = generate_receptor_tls(instance_obj)
|
key, cert = generate_receptor_tls(instance_obj)
|
||||||
|
|
||||||
|
def tar_addfile(tarinfo, filecontent):
|
||||||
|
tarinfo.mtime = time.time()
|
||||||
|
tarinfo.size = len(filecontent)
|
||||||
|
tar.addfile(tarinfo, io.BytesIO(filecontent))
|
||||||
|
|
||||||
key_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/receptor/tls/receptor.key")
|
key_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/receptor/tls/receptor.key")
|
||||||
key_tarinfo.size = len(key)
|
tar_addfile(key_tarinfo, key)
|
||||||
tar.addfile(key_tarinfo, io.BytesIO(key))
|
|
||||||
|
|
||||||
cert_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/receptor/tls/receptor.crt")
|
cert_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/receptor/tls/receptor.crt")
|
||||||
cert_tarinfo.size = len(cert)
|
cert_tarinfo.size = len(cert)
|
||||||
tar.addfile(cert_tarinfo, io.BytesIO(cert))
|
tar_addfile(cert_tarinfo, cert)
|
||||||
|
|
||||||
# generate and write install_receptor.yml to the tar file
|
# generate and write install_receptor.yml to the tar file
|
||||||
playbook = generate_playbook().encode('utf-8')
|
playbook = generate_playbook(instance_obj).encode('utf-8')
|
||||||
playbook_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/install_receptor.yml")
|
playbook_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/install_receptor.yml")
|
||||||
playbook_tarinfo.size = len(playbook)
|
tar_addfile(playbook_tarinfo, playbook)
|
||||||
tar.addfile(playbook_tarinfo, io.BytesIO(playbook))
|
|
||||||
|
|
||||||
# generate and write inventory.yml to the tar file
|
# generate and write inventory.yml to the tar file
|
||||||
inventory_yml = generate_inventory_yml(instance_obj).encode('utf-8')
|
inventory_yml = generate_inventory_yml(instance_obj).encode('utf-8')
|
||||||
inventory_yml_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/inventory.yml")
|
inventory_yml_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/inventory.yml")
|
||||||
inventory_yml_tarinfo.size = len(inventory_yml)
|
tar_addfile(inventory_yml_tarinfo, inventory_yml)
|
||||||
tar.addfile(inventory_yml_tarinfo, io.BytesIO(inventory_yml))
|
|
||||||
|
|
||||||
# generate and write group_vars/all.yml to the tar file
|
# generate and write group_vars/all.yml to the tar file
|
||||||
group_vars = generate_group_vars_all_yml(instance_obj).encode('utf-8')
|
group_vars = generate_group_vars_all_yml(instance_obj).encode('utf-8')
|
||||||
group_vars_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/group_vars/all.yml")
|
group_vars_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/group_vars/all.yml")
|
||||||
group_vars_tarinfo.size = len(group_vars)
|
tar_addfile(group_vars_tarinfo, group_vars)
|
||||||
tar.addfile(group_vars_tarinfo, io.BytesIO(group_vars))
|
|
||||||
|
|
||||||
# generate and write requirements.yml to the tar file
|
# generate and write requirements.yml to the tar file
|
||||||
requirements_yml = generate_requirements_yml().encode('utf-8')
|
requirements_yml = generate_requirements_yml().encode('utf-8')
|
||||||
requirements_yml_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/requirements.yml")
|
requirements_yml_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/requirements.yml")
|
||||||
requirements_yml_tarinfo.size = len(requirements_yml)
|
tar_addfile(requirements_yml_tarinfo, requirements_yml)
|
||||||
tar.addfile(requirements_yml_tarinfo, io.BytesIO(requirements_yml))
|
|
||||||
|
|
||||||
# respond with the tarfile
|
# respond with the tarfile
|
||||||
f.seek(0)
|
f.seek(0)
|
||||||
@@ -105,8 +109,10 @@ class InstanceInstallBundle(GenericAPIView):
|
|||||||
return response
|
return response
|
||||||
|
|
||||||
|
|
||||||
def generate_playbook():
|
def generate_playbook(instance_obj):
|
||||||
return render_to_string("instance_install_bundle/install_receptor.yml")
|
playbook_yaml = render_to_string("instance_install_bundle/install_receptor.yml", context=dict(instance=instance_obj))
|
||||||
|
# convert consecutive newlines with a single newline
|
||||||
|
return re.sub(r'\n+', '\n', playbook_yaml)
|
||||||
|
|
||||||
|
|
||||||
def generate_requirements_yml():
|
def generate_requirements_yml():
|
||||||
@@ -118,7 +124,12 @@ def generate_inventory_yml(instance_obj):
|
|||||||
|
|
||||||
|
|
||||||
def generate_group_vars_all_yml(instance_obj):
|
def generate_group_vars_all_yml(instance_obj):
|
||||||
return render_to_string("instance_install_bundle/group_vars/all.yml", context=dict(instance=instance_obj))
|
peers = []
|
||||||
|
for instance in instance_obj.peers.all():
|
||||||
|
peers.append(dict(host=instance.hostname, port=instance.listener_port))
|
||||||
|
all_yaml = render_to_string("instance_install_bundle/group_vars/all.yml", context=dict(instance=instance_obj, peers=peers))
|
||||||
|
# convert consecutive newlines with a single newline
|
||||||
|
return re.sub(r'\n+', '\n', all_yaml)
|
||||||
|
|
||||||
|
|
||||||
def generate_receptor_tls(instance_obj):
|
def generate_receptor_tls(instance_obj):
|
||||||
|
|||||||
@@ -107,8 +107,7 @@ class ApiVersionRootView(APIView):
|
|||||||
data['groups'] = reverse('api:group_list', request=request)
|
data['groups'] = reverse('api:group_list', request=request)
|
||||||
data['hosts'] = reverse('api:host_list', request=request)
|
data['hosts'] = reverse('api:host_list', request=request)
|
||||||
data['host_metrics'] = reverse('api:host_metric_list', request=request)
|
data['host_metrics'] = reverse('api:host_metric_list', request=request)
|
||||||
# It will be enabled in future version of the AWX
|
data['host_metric_summary_monthly'] = reverse('api:host_metric_summary_monthly_list', request=request)
|
||||||
# data['host_metric_summary_monthly'] = reverse('api:host_metric_summary_monthly_list', request=request)
|
|
||||||
data['job_templates'] = reverse('api:job_template_list', request=request)
|
data['job_templates'] = reverse('api:job_template_list', request=request)
|
||||||
data['jobs'] = reverse('api:job_list', request=request)
|
data['jobs'] = reverse('api:job_list', request=request)
|
||||||
data['ad_hoc_commands'] = reverse('api:ad_hoc_command_list', request=request)
|
data['ad_hoc_commands'] = reverse('api:ad_hoc_command_list', request=request)
|
||||||
|
|||||||
@@ -418,6 +418,10 @@ class SettingsWrapper(UserSettingsHolder):
|
|||||||
"""Get value while accepting the in-memory cache if key is available"""
|
"""Get value while accepting the in-memory cache if key is available"""
|
||||||
with _ctit_db_wrapper(trans_safe=True):
|
with _ctit_db_wrapper(trans_safe=True):
|
||||||
return self._get_local(name)
|
return self._get_local(name)
|
||||||
|
# If the last line did not return, that means we hit a database error
|
||||||
|
# in that case, we should not have a local cache value
|
||||||
|
# thus, return empty as a signal to use the default
|
||||||
|
return empty
|
||||||
|
|
||||||
def __getattr__(self, name):
|
def __getattr__(self, name):
|
||||||
value = empty
|
value = empty
|
||||||
|
|||||||
@@ -13,6 +13,7 @@ from unittest import mock
|
|||||||
from django.conf import LazySettings
|
from django.conf import LazySettings
|
||||||
from django.core.cache.backends.locmem import LocMemCache
|
from django.core.cache.backends.locmem import LocMemCache
|
||||||
from django.core.exceptions import ImproperlyConfigured
|
from django.core.exceptions import ImproperlyConfigured
|
||||||
|
from django.db.utils import Error as DBError
|
||||||
from django.utils.translation import gettext_lazy as _
|
from django.utils.translation import gettext_lazy as _
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
@@ -331,3 +332,18 @@ def test_in_memory_cache_works(settings):
|
|||||||
with mock.patch.object(settings, '_get_local') as mock_get:
|
with mock.patch.object(settings, '_get_local') as mock_get:
|
||||||
assert settings.AWX_VAR == 'DEFAULT'
|
assert settings.AWX_VAR == 'DEFAULT'
|
||||||
mock_get.assert_not_called()
|
mock_get.assert_not_called()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.defined_in_file(AWX_VAR=[])
|
||||||
|
def test_getattr_with_database_error(settings):
|
||||||
|
"""
|
||||||
|
If a setting is defined via the registry and has a null-ish default which is not None
|
||||||
|
then referencing that setting during a database outage should give that default
|
||||||
|
this is regression testing for a bug where it would return None
|
||||||
|
"""
|
||||||
|
settings.registry.register('AWX_VAR', field_class=fields.StringListField, default=[], category=_('System'), category_slug='system')
|
||||||
|
settings._awx_conf_memoizedcache.clear()
|
||||||
|
|
||||||
|
with mock.patch('django.db.backends.base.base.BaseDatabaseWrapper.ensure_connection') as mock_ensure:
|
||||||
|
mock_ensure.side_effect = DBError('for test')
|
||||||
|
assert settings.AWX_VAR == []
|
||||||
|
|||||||
@@ -366,9 +366,9 @@ class BaseAccess(object):
|
|||||||
report_violation = lambda message: None
|
report_violation = lambda message: None
|
||||||
else:
|
else:
|
||||||
report_violation = lambda message: logger.warning(message)
|
report_violation = lambda message: logger.warning(message)
|
||||||
if validation_info.get('trial', False) is True or validation_info['instance_count'] == 10: # basic 10 license
|
if validation_info.get('trial', False) is True:
|
||||||
|
|
||||||
def report_violation(message):
|
def report_violation(message): # noqa
|
||||||
raise PermissionDenied(message)
|
raise PermissionDenied(message)
|
||||||
|
|
||||||
if check_expiration and validation_info.get('time_remaining', None) is None:
|
if check_expiration and validation_info.get('time_remaining', None) is None:
|
||||||
|
|||||||
@@ -613,3 +613,20 @@ def host_metric_table(since, full_path, until, **kwargs):
|
|||||||
since.isoformat(), until.isoformat(), since.isoformat(), until.isoformat()
|
since.isoformat(), until.isoformat(), since.isoformat(), until.isoformat()
|
||||||
)
|
)
|
||||||
return _copy_table(table='host_metric', query=host_metric_query, path=full_path)
|
return _copy_table(table='host_metric', query=host_metric_query, path=full_path)
|
||||||
|
|
||||||
|
|
||||||
|
@register('host_metric_summary_monthly_table', '1.0', format='csv', description=_('HostMetricSummaryMonthly export, full sync'), expensive=trivial_slicing)
|
||||||
|
def host_metric_summary_monthly_table(since, full_path, **kwargs):
|
||||||
|
query = '''
|
||||||
|
COPY (SELECT main_hostmetricsummarymonthly.id,
|
||||||
|
main_hostmetricsummarymonthly.date,
|
||||||
|
main_hostmetricsummarymonthly.license_capacity,
|
||||||
|
main_hostmetricsummarymonthly.license_consumed,
|
||||||
|
main_hostmetricsummarymonthly.hosts_added,
|
||||||
|
main_hostmetricsummarymonthly.hosts_deleted,
|
||||||
|
main_hostmetricsummarymonthly.indirectly_managed_hosts
|
||||||
|
FROM main_hostmetricsummarymonthly
|
||||||
|
ORDER BY main_hostmetricsummarymonthly.id ASC) TO STDOUT WITH CSV HEADER
|
||||||
|
'''
|
||||||
|
|
||||||
|
return _copy_table(table='host_metric_summary_monthly', query=query, path=full_path)
|
||||||
|
|||||||
87
awx/main/cache.py
Normal file
87
awx/main/cache.py
Normal file
@@ -0,0 +1,87 @@
|
|||||||
|
import functools
|
||||||
|
|
||||||
|
from django.conf import settings
|
||||||
|
from django.core.cache.backends.base import DEFAULT_TIMEOUT
|
||||||
|
from django.core.cache.backends.redis import RedisCache
|
||||||
|
|
||||||
|
from redis.exceptions import ConnectionError, ResponseError, TimeoutError
|
||||||
|
import socket
|
||||||
|
|
||||||
|
# This list comes from what django-redis ignores and the behavior we are trying
|
||||||
|
# to retain while dropping the dependency on django-redis.
|
||||||
|
IGNORED_EXCEPTIONS = (TimeoutError, ResponseError, ConnectionError, socket.timeout)
|
||||||
|
|
||||||
|
CONNECTION_INTERRUPTED_SENTINEL = object()
|
||||||
|
|
||||||
|
|
||||||
|
def optionally_ignore_exceptions(func=None, return_value=None):
|
||||||
|
if func is None:
|
||||||
|
return functools.partial(optionally_ignore_exceptions, return_value=return_value)
|
||||||
|
|
||||||
|
@functools.wraps(func)
|
||||||
|
def wrapper(*args, **kwargs):
|
||||||
|
try:
|
||||||
|
return func(*args, **kwargs)
|
||||||
|
except IGNORED_EXCEPTIONS as e:
|
||||||
|
if settings.DJANGO_REDIS_IGNORE_EXCEPTIONS:
|
||||||
|
return return_value
|
||||||
|
raise e.__cause__ or e
|
||||||
|
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
|
class AWXRedisCache(RedisCache):
|
||||||
|
"""
|
||||||
|
We just want to wrap the upstream RedisCache class so that we can ignore
|
||||||
|
the exceptions that it raises when the cache is unavailable.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@optionally_ignore_exceptions
|
||||||
|
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
|
||||||
|
return super().add(key, value, timeout, version)
|
||||||
|
|
||||||
|
@optionally_ignore_exceptions(return_value=CONNECTION_INTERRUPTED_SENTINEL)
|
||||||
|
def _get(self, key, default=None, version=None):
|
||||||
|
return super().get(key, default, version)
|
||||||
|
|
||||||
|
def get(self, key, default=None, version=None):
|
||||||
|
value = self._get(key, default, version)
|
||||||
|
if value is CONNECTION_INTERRUPTED_SENTINEL:
|
||||||
|
return default
|
||||||
|
return value
|
||||||
|
|
||||||
|
@optionally_ignore_exceptions
|
||||||
|
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
|
||||||
|
return super().set(key, value, timeout, version)
|
||||||
|
|
||||||
|
@optionally_ignore_exceptions
|
||||||
|
def touch(self, key, timeout=DEFAULT_TIMEOUT, version=None):
|
||||||
|
return super().touch(key, timeout, version)
|
||||||
|
|
||||||
|
@optionally_ignore_exceptions
|
||||||
|
def delete(self, key, version=None):
|
||||||
|
return super().delete(key, version)
|
||||||
|
|
||||||
|
@optionally_ignore_exceptions
|
||||||
|
def get_many(self, keys, version=None):
|
||||||
|
return super().get_many(keys, version)
|
||||||
|
|
||||||
|
@optionally_ignore_exceptions
|
||||||
|
def has_key(self, key, version=None):
|
||||||
|
return super().has_key(key, version)
|
||||||
|
|
||||||
|
@optionally_ignore_exceptions
|
||||||
|
def incr(self, key, delta=1, version=None):
|
||||||
|
return super().incr(key, delta, version)
|
||||||
|
|
||||||
|
@optionally_ignore_exceptions
|
||||||
|
def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None):
|
||||||
|
return super().set_many(data, timeout, version)
|
||||||
|
|
||||||
|
@optionally_ignore_exceptions
|
||||||
|
def delete_many(self, keys, version=None):
|
||||||
|
return super().delete_many(keys, version)
|
||||||
|
|
||||||
|
@optionally_ignore_exceptions
|
||||||
|
def clear(self):
|
||||||
|
return super().clear()
|
||||||
@@ -862,6 +862,15 @@ register(
|
|||||||
category_slug='system',
|
category_slug='system',
|
||||||
)
|
)
|
||||||
|
|
||||||
|
register(
|
||||||
|
'HOST_METRIC_SUMMARY_TASK_LAST_TS',
|
||||||
|
field_class=fields.DateTimeField,
|
||||||
|
label=_('Last computing date of HostMetricSummaryMonthly'),
|
||||||
|
allow_null=True,
|
||||||
|
category=_('System'),
|
||||||
|
category_slug='system',
|
||||||
|
)
|
||||||
|
|
||||||
register(
|
register(
|
||||||
'AWX_CLEANUP_PATHS',
|
'AWX_CLEANUP_PATHS',
|
||||||
field_class=fields.BooleanField,
|
field_class=fields.BooleanField,
|
||||||
|
|||||||
@@ -4,6 +4,8 @@ from urllib.parse import urljoin, quote
|
|||||||
|
|
||||||
from django.utils.translation import gettext_lazy as _
|
from django.utils.translation import gettext_lazy as _
|
||||||
import requests
|
import requests
|
||||||
|
import base64
|
||||||
|
import binascii
|
||||||
|
|
||||||
|
|
||||||
conjur_inputs = {
|
conjur_inputs = {
|
||||||
@@ -50,6 +52,13 @@ conjur_inputs = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _is_base64(s: str) -> bool:
|
||||||
|
try:
|
||||||
|
return base64.b64encode(base64.b64decode(s.encode("utf-8"))) == s.encode("utf-8")
|
||||||
|
except binascii.Error:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
def conjur_backend(**kwargs):
|
def conjur_backend(**kwargs):
|
||||||
url = kwargs['url']
|
url = kwargs['url']
|
||||||
api_key = kwargs['api_key']
|
api_key = kwargs['api_key']
|
||||||
@@ -77,7 +86,7 @@ def conjur_backend(**kwargs):
|
|||||||
token = resp.content.decode('utf-8')
|
token = resp.content.decode('utf-8')
|
||||||
|
|
||||||
lookup_kwargs = {
|
lookup_kwargs = {
|
||||||
'headers': {'Authorization': 'Token token="{}"'.format(token)},
|
'headers': {'Authorization': 'Token token="{}"'.format(token if _is_base64(token) else base64.b64encode(token.encode('utf-8')).decode('utf-8'))},
|
||||||
'allow_redirects': False,
|
'allow_redirects': False,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,10 @@
|
|||||||
from .plugin import CredentialPlugin
|
from .plugin import CredentialPlugin
|
||||||
from django.utils.translation import gettext_lazy as _
|
from django.utils.translation import gettext_lazy as _
|
||||||
|
|
||||||
from thycotic.secrets.server import DomainPasswordGrantAuthorizer, PasswordGrantAuthorizer, SecretServer, ServerSecret
|
try:
|
||||||
|
from delinea.secrets.server import DomainPasswordGrantAuthorizer, PasswordGrantAuthorizer, SecretServer, ServerSecret
|
||||||
|
except ImportError:
|
||||||
|
from thycotic.secrets.server import DomainPasswordGrantAuthorizer, PasswordGrantAuthorizer, SecretServer, ServerSecret
|
||||||
|
|
||||||
tss_inputs = {
|
tss_inputs = {
|
||||||
'fields': [
|
'fields': [
|
||||||
@@ -51,7 +54,9 @@ tss_inputs = {
|
|||||||
|
|
||||||
def tss_backend(**kwargs):
|
def tss_backend(**kwargs):
|
||||||
if kwargs.get("domain"):
|
if kwargs.get("domain"):
|
||||||
authorizer = DomainPasswordGrantAuthorizer(kwargs['server_url'], kwargs['username'], kwargs['password'], kwargs['domain'])
|
authorizer = DomainPasswordGrantAuthorizer(
|
||||||
|
base_url=kwargs['server_url'], username=kwargs['username'], domain=kwargs['domain'], password=kwargs['password']
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
authorizer = PasswordGrantAuthorizer(kwargs['server_url'], kwargs['username'], kwargs['password'])
|
authorizer = PasswordGrantAuthorizer(kwargs['server_url'], kwargs['username'], kwargs['password'])
|
||||||
secret_server = SecretServer(kwargs['server_url'], authorizer)
|
secret_server = SecretServer(kwargs['server_url'], authorizer)
|
||||||
|
|||||||
@@ -40,8 +40,12 @@ def get_task_queuename():
|
|||||||
|
|
||||||
|
|
||||||
class PubSub(object):
|
class PubSub(object):
|
||||||
def __init__(self, conn):
|
def __init__(self, conn, select_timeout=None):
|
||||||
self.conn = conn
|
self.conn = conn
|
||||||
|
if select_timeout is None:
|
||||||
|
self.select_timeout = 5
|
||||||
|
else:
|
||||||
|
self.select_timeout = select_timeout
|
||||||
|
|
||||||
def listen(self, channel):
|
def listen(self, channel):
|
||||||
with self.conn.cursor() as cur:
|
with self.conn.cursor() as cur:
|
||||||
@@ -55,16 +59,33 @@ class PubSub(object):
|
|||||||
with self.conn.cursor() as cur:
|
with self.conn.cursor() as cur:
|
||||||
cur.execute('SELECT pg_notify(%s, %s);', (channel, payload))
|
cur.execute('SELECT pg_notify(%s, %s);', (channel, payload))
|
||||||
|
|
||||||
def events(self, select_timeout=5, yield_timeouts=False):
|
@staticmethod
|
||||||
|
def current_notifies(conn):
|
||||||
|
"""
|
||||||
|
Altered version of .notifies method from psycopg library
|
||||||
|
This removes the outer while True loop so that we only process
|
||||||
|
queued notifications
|
||||||
|
"""
|
||||||
|
with conn.lock:
|
||||||
|
try:
|
||||||
|
ns = conn.wait(psycopg.generators.notifies(conn.pgconn))
|
||||||
|
except psycopg.errors._NO_TRACEBACK as ex:
|
||||||
|
raise ex.with_traceback(None)
|
||||||
|
enc = psycopg._encodings.pgconn_encoding(conn.pgconn)
|
||||||
|
for pgn in ns:
|
||||||
|
n = psycopg.connection.Notify(pgn.relname.decode(enc), pgn.extra.decode(enc), pgn.be_pid)
|
||||||
|
yield n
|
||||||
|
|
||||||
|
def events(self, yield_timeouts=False):
|
||||||
if not self.conn.autocommit:
|
if not self.conn.autocommit:
|
||||||
raise RuntimeError('Listening for events can only be done in autocommit mode')
|
raise RuntimeError('Listening for events can only be done in autocommit mode')
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
if select.select([self.conn], [], [], select_timeout) == NOT_READY:
|
if select.select([self.conn], [], [], self.select_timeout) == NOT_READY:
|
||||||
if yield_timeouts:
|
if yield_timeouts:
|
||||||
yield None
|
yield None
|
||||||
else:
|
else:
|
||||||
notification_generator = self.conn.notifies()
|
notification_generator = self.current_notifies(self.conn)
|
||||||
for notification in notification_generator:
|
for notification in notification_generator:
|
||||||
yield notification
|
yield notification
|
||||||
|
|
||||||
@@ -73,7 +94,7 @@ class PubSub(object):
|
|||||||
|
|
||||||
|
|
||||||
@contextmanager
|
@contextmanager
|
||||||
def pg_bus_conn(new_connection=False):
|
def pg_bus_conn(new_connection=False, select_timeout=None):
|
||||||
'''
|
'''
|
||||||
Any listeners probably want to establish a new database connection,
|
Any listeners probably want to establish a new database connection,
|
||||||
separate from the Django connection used for queries, because that will prevent
|
separate from the Django connection used for queries, because that will prevent
|
||||||
@@ -98,7 +119,7 @@ def pg_bus_conn(new_connection=False):
|
|||||||
raise RuntimeError('Unexpectedly could not connect to postgres for pg_notify actions')
|
raise RuntimeError('Unexpectedly could not connect to postgres for pg_notify actions')
|
||||||
conn = pg_connection.connection
|
conn = pg_connection.connection
|
||||||
|
|
||||||
pubsub = PubSub(conn)
|
pubsub = PubSub(conn, select_timeout=select_timeout)
|
||||||
yield pubsub
|
yield pubsub
|
||||||
if new_connection:
|
if new_connection:
|
||||||
conn.close()
|
conn.close()
|
||||||
|
|||||||
@@ -40,6 +40,9 @@ class Control(object):
|
|||||||
def cancel(self, task_ids, *args, **kwargs):
|
def cancel(self, task_ids, *args, **kwargs):
|
||||||
return self.control_with_reply('cancel', *args, extra_data={'task_ids': task_ids}, **kwargs)
|
return self.control_with_reply('cancel', *args, extra_data={'task_ids': task_ids}, **kwargs)
|
||||||
|
|
||||||
|
def schedule(self, *args, **kwargs):
|
||||||
|
return self.control_with_reply('schedule', *args, **kwargs)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def generate_reply_queue_name(cls):
|
def generate_reply_queue_name(cls):
|
||||||
return f"reply_to_{str(uuid.uuid4()).replace('-','_')}"
|
return f"reply_to_{str(uuid.uuid4()).replace('-','_')}"
|
||||||
@@ -52,14 +55,14 @@ class Control(object):
|
|||||||
if not connection.get_autocommit():
|
if not connection.get_autocommit():
|
||||||
raise RuntimeError('Control-with-reply messages can only be done in autocommit mode')
|
raise RuntimeError('Control-with-reply messages can only be done in autocommit mode')
|
||||||
|
|
||||||
with pg_bus_conn() as conn:
|
with pg_bus_conn(select_timeout=timeout) as conn:
|
||||||
conn.listen(reply_queue)
|
conn.listen(reply_queue)
|
||||||
send_data = {'control': command, 'reply_to': reply_queue}
|
send_data = {'control': command, 'reply_to': reply_queue}
|
||||||
if extra_data:
|
if extra_data:
|
||||||
send_data.update(extra_data)
|
send_data.update(extra_data)
|
||||||
conn.notify(self.queuename, json.dumps(send_data))
|
conn.notify(self.queuename, json.dumps(send_data))
|
||||||
|
|
||||||
for reply in conn.events(select_timeout=timeout, yield_timeouts=True):
|
for reply in conn.events(yield_timeouts=True):
|
||||||
if reply is None:
|
if reply is None:
|
||||||
logger.error(f'{self.service} did not reply within {timeout}s')
|
logger.error(f'{self.service} did not reply within {timeout}s')
|
||||||
raise RuntimeError(f"{self.service} did not reply within {timeout}s")
|
raise RuntimeError(f"{self.service} did not reply within {timeout}s")
|
||||||
|
|||||||
@@ -1,57 +1,142 @@
|
|||||||
import logging
|
import logging
|
||||||
import os
|
|
||||||
import time
|
import time
|
||||||
from multiprocessing import Process
|
import yaml
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
from django.conf import settings
|
|
||||||
from django.db import connections
|
|
||||||
from schedule import Scheduler
|
|
||||||
from django_guid import set_guid
|
|
||||||
from django_guid.utils import generate_guid
|
|
||||||
|
|
||||||
from awx.main.dispatch.worker import TaskWorker
|
|
||||||
from awx.main.utils.db import set_connection_name
|
|
||||||
|
|
||||||
logger = logging.getLogger('awx.main.dispatch.periodic')
|
logger = logging.getLogger('awx.main.dispatch.periodic')
|
||||||
|
|
||||||
|
|
||||||
class Scheduler(Scheduler):
|
class ScheduledTask:
|
||||||
def run_continuously(self):
|
"""
|
||||||
idle_seconds = max(1, min(self.jobs).period.total_seconds() / 2)
|
Class representing schedules, very loosely modeled after python schedule library Job
|
||||||
|
the idea of this class is to:
|
||||||
|
- only deal in relative times (time since the scheduler global start)
|
||||||
|
- only deal in integer math for target runtimes, but float for current relative time
|
||||||
|
|
||||||
def run():
|
Missed schedule policy:
|
||||||
ppid = os.getppid()
|
Invariant target times are maintained, meaning that if interval=10s offset=0
|
||||||
logger.warning('periodic beat started')
|
and it runs at t=7s, then it calls for next run in 3s.
|
||||||
|
However, if a complete interval has passed, that is counted as a missed run,
|
||||||
|
and missed runs are abandoned (no catch-up runs).
|
||||||
|
"""
|
||||||
|
|
||||||
set_connection_name('periodic') # set application_name to distinguish from other dispatcher processes
|
def __init__(self, name: str, data: dict):
|
||||||
|
# parameters need for schedule computation
|
||||||
|
self.interval = int(data['schedule'].total_seconds())
|
||||||
|
self.offset = 0 # offset relative to start time this schedule begins
|
||||||
|
self.index = 0 # number of periods of the schedule that has passed
|
||||||
|
|
||||||
while True:
|
# parameters that do not affect scheduling logic
|
||||||
if os.getppid() != ppid:
|
self.last_run = None # time of last run, only used for debug
|
||||||
# if the parent PID changes, this process has been orphaned
|
self.completed_runs = 0 # number of times schedule is known to run
|
||||||
# via e.g., segfault or sigkill, we should exit too
|
self.name = name
|
||||||
pid = os.getpid()
|
self.data = data # used by caller to know what to run
|
||||||
logger.warning(f'periodic beat exiting gracefully pid:{pid}')
|
|
||||||
raise SystemExit()
|
|
||||||
try:
|
|
||||||
for conn in connections.all():
|
|
||||||
# If the database connection has a hiccup, re-establish a new
|
|
||||||
# connection
|
|
||||||
conn.close_if_unusable_or_obsolete()
|
|
||||||
set_guid(generate_guid())
|
|
||||||
self.run_pending()
|
|
||||||
except Exception:
|
|
||||||
logger.exception('encountered an error while scheduling periodic tasks')
|
|
||||||
time.sleep(idle_seconds)
|
|
||||||
|
|
||||||
process = Process(target=run)
|
@property
|
||||||
process.daemon = True
|
def next_run(self):
|
||||||
process.start()
|
"Time until the next run with t=0 being the global_start of the scheduler class"
|
||||||
|
return (self.index + 1) * self.interval + self.offset
|
||||||
|
|
||||||
|
def due_to_run(self, relative_time):
|
||||||
|
return bool(self.next_run <= relative_time)
|
||||||
|
|
||||||
|
def expected_runs(self, relative_time):
|
||||||
|
return int((relative_time - self.offset) / self.interval)
|
||||||
|
|
||||||
|
def mark_run(self, relative_time):
|
||||||
|
self.last_run = relative_time
|
||||||
|
self.completed_runs += 1
|
||||||
|
new_index = self.expected_runs(relative_time)
|
||||||
|
if new_index > self.index + 1:
|
||||||
|
logger.warning(f'Missed {new_index - self.index - 1} schedules of {self.name}')
|
||||||
|
self.index = new_index
|
||||||
|
|
||||||
|
def missed_runs(self, relative_time):
|
||||||
|
"Number of times job was supposed to ran but failed to, only used for debug"
|
||||||
|
missed_ct = self.expected_runs(relative_time) - self.completed_runs
|
||||||
|
# if this is currently due to run do not count that as a missed run
|
||||||
|
if missed_ct and self.due_to_run(relative_time):
|
||||||
|
missed_ct -= 1
|
||||||
|
return missed_ct
|
||||||
|
|
||||||
|
|
||||||
def run_continuously():
|
class Scheduler:
|
||||||
scheduler = Scheduler()
|
def __init__(self, schedule):
|
||||||
for task in settings.CELERYBEAT_SCHEDULE.values():
|
"""
|
||||||
apply_async = TaskWorker.resolve_callable(task['task']).apply_async
|
Expects schedule in the form of a dictionary like
|
||||||
total_seconds = task['schedule'].total_seconds()
|
{
|
||||||
scheduler.every(total_seconds).seconds.do(apply_async)
|
'job1': {'schedule': timedelta(seconds=50), 'other': 'stuff'}
|
||||||
scheduler.run_continuously()
|
}
|
||||||
|
Only the schedule nearest-second value is used for scheduling,
|
||||||
|
the rest of the data is for use by the caller to know what to run.
|
||||||
|
"""
|
||||||
|
self.jobs = [ScheduledTask(name, data) for name, data in schedule.items()]
|
||||||
|
min_interval = min(job.interval for job in self.jobs)
|
||||||
|
num_jobs = len(self.jobs)
|
||||||
|
|
||||||
|
# this is intentionally oppioniated against spammy schedules
|
||||||
|
# a core goal is to spread out the scheduled tasks (for worker management)
|
||||||
|
# and high-frequency schedules just do not work with that
|
||||||
|
if num_jobs > min_interval:
|
||||||
|
raise RuntimeError(f'Number of schedules ({num_jobs}) is more than the shortest schedule interval ({min_interval} seconds).')
|
||||||
|
|
||||||
|
# even space out jobs over the base interval
|
||||||
|
for i, job in enumerate(self.jobs):
|
||||||
|
job.offset = (i * min_interval) // num_jobs
|
||||||
|
|
||||||
|
# internally times are all referenced relative to startup time, add grace period
|
||||||
|
self.global_start = time.time() + 2.0
|
||||||
|
|
||||||
|
def get_and_mark_pending(self):
|
||||||
|
relative_time = time.time() - self.global_start
|
||||||
|
to_run = []
|
||||||
|
for job in self.jobs:
|
||||||
|
if job.due_to_run(relative_time):
|
||||||
|
to_run.append(job)
|
||||||
|
logger.debug(f'scheduler found {job.name} to run, {relative_time - job.next_run} seconds after target')
|
||||||
|
job.mark_run(relative_time)
|
||||||
|
return to_run
|
||||||
|
|
||||||
|
def time_until_next_run(self):
|
||||||
|
relative_time = time.time() - self.global_start
|
||||||
|
next_job = min(self.jobs, key=lambda j: j.next_run)
|
||||||
|
delta = next_job.next_run - relative_time
|
||||||
|
if delta <= 0.1:
|
||||||
|
# careful not to give 0 or negative values to the select timeout, which has unclear interpretation
|
||||||
|
logger.warning(f'Scheduler next run of {next_job.name} is {-delta} seconds in the past')
|
||||||
|
return 0.1
|
||||||
|
elif delta > 20.0:
|
||||||
|
logger.warning(f'Scheduler next run unexpectedly over 20 seconds in future: {delta}')
|
||||||
|
return 20.0
|
||||||
|
logger.debug(f'Scheduler next run is {next_job.name} in {delta} seconds')
|
||||||
|
return delta
|
||||||
|
|
||||||
|
def debug(self, *args, **kwargs):
|
||||||
|
data = dict()
|
||||||
|
data['title'] = 'Scheduler status'
|
||||||
|
|
||||||
|
now = datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S UTC')
|
||||||
|
start_time = datetime.fromtimestamp(self.global_start).strftime('%Y-%m-%d %H:%M:%S UTC')
|
||||||
|
relative_time = time.time() - self.global_start
|
||||||
|
data['started_time'] = start_time
|
||||||
|
data['current_time'] = now
|
||||||
|
data['current_time_relative'] = round(relative_time, 3)
|
||||||
|
data['total_schedules'] = len(self.jobs)
|
||||||
|
|
||||||
|
data['schedule_list'] = dict(
|
||||||
|
[
|
||||||
|
(
|
||||||
|
job.name,
|
||||||
|
dict(
|
||||||
|
last_run_seconds_ago=round(relative_time - job.last_run, 3) if job.last_run else None,
|
||||||
|
next_run_in_seconds=round(job.next_run - relative_time, 3),
|
||||||
|
offset_in_seconds=job.offset,
|
||||||
|
completed_runs=job.completed_runs,
|
||||||
|
missed_runs=job.missed_runs(relative_time),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
for job in sorted(self.jobs, key=lambda job: job.interval)
|
||||||
|
]
|
||||||
|
)
|
||||||
|
return yaml.safe_dump(data, default_flow_style=False, sort_keys=False)
|
||||||
|
|||||||
@@ -417,16 +417,16 @@ class AutoscalePool(WorkerPool):
|
|||||||
# the task manager to never do more work
|
# the task manager to never do more work
|
||||||
current_task = w.current_task
|
current_task = w.current_task
|
||||||
if current_task and isinstance(current_task, dict):
|
if current_task and isinstance(current_task, dict):
|
||||||
endings = ['tasks.task_manager', 'tasks.dependency_manager', 'tasks.workflow_manager']
|
endings = ('tasks.task_manager', 'tasks.dependency_manager', 'tasks.workflow_manager')
|
||||||
current_task_name = current_task.get('task', '')
|
current_task_name = current_task.get('task', '')
|
||||||
if any(current_task_name.endswith(e) for e in endings):
|
if current_task_name.endswith(endings):
|
||||||
if 'started' not in current_task:
|
if 'started' not in current_task:
|
||||||
w.managed_tasks[current_task['uuid']]['started'] = time.time()
|
w.managed_tasks[current_task['uuid']]['started'] = time.time()
|
||||||
age = time.time() - current_task['started']
|
age = time.time() - current_task['started']
|
||||||
w.managed_tasks[current_task['uuid']]['age'] = age
|
w.managed_tasks[current_task['uuid']]['age'] = age
|
||||||
if age > self.task_manager_timeout:
|
if age > self.task_manager_timeout:
|
||||||
logger.error(f'{current_task_name} has held the advisory lock for {age}, sending SIGTERM to {w.pid}')
|
logger.error(f'{current_task_name} has held the advisory lock for {age}, sending SIGUSR1 to {w.pid}')
|
||||||
os.kill(w.pid, signal.SIGTERM)
|
os.kill(w.pid, signal.SIGUSR1)
|
||||||
|
|
||||||
for m in orphaned:
|
for m in orphaned:
|
||||||
# if all the workers are dead, spawn at least one
|
# if all the workers are dead, spawn at least one
|
||||||
|
|||||||
@@ -73,15 +73,15 @@ class task:
|
|||||||
return cls.apply_async(args, kwargs)
|
return cls.apply_async(args, kwargs)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def apply_async(cls, args=None, kwargs=None, queue=None, uuid=None, **kw):
|
def get_async_body(cls, args=None, kwargs=None, uuid=None, **kw):
|
||||||
|
"""
|
||||||
|
Get the python dict to become JSON data in the pg_notify message
|
||||||
|
This same message gets passed over the dispatcher IPC queue to workers
|
||||||
|
If a task is submitted to a multiprocessing pool, skipping pg_notify, this might be used directly
|
||||||
|
"""
|
||||||
task_id = uuid or str(uuid4())
|
task_id = uuid or str(uuid4())
|
||||||
args = args or []
|
args = args or []
|
||||||
kwargs = kwargs or {}
|
kwargs = kwargs or {}
|
||||||
queue = queue or getattr(cls.queue, 'im_func', cls.queue)
|
|
||||||
if not queue:
|
|
||||||
msg = f'{cls.name}: Queue value required and may not be None'
|
|
||||||
logger.error(msg)
|
|
||||||
raise ValueError(msg)
|
|
||||||
obj = {'uuid': task_id, 'args': args, 'kwargs': kwargs, 'task': cls.name, 'time_pub': time.time()}
|
obj = {'uuid': task_id, 'args': args, 'kwargs': kwargs, 'task': cls.name, 'time_pub': time.time()}
|
||||||
guid = get_guid()
|
guid = get_guid()
|
||||||
if guid:
|
if guid:
|
||||||
@@ -89,6 +89,16 @@ class task:
|
|||||||
if bind_kwargs:
|
if bind_kwargs:
|
||||||
obj['bind_kwargs'] = bind_kwargs
|
obj['bind_kwargs'] = bind_kwargs
|
||||||
obj.update(**kw)
|
obj.update(**kw)
|
||||||
|
return obj
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def apply_async(cls, args=None, kwargs=None, queue=None, uuid=None, **kw):
|
||||||
|
queue = queue or getattr(cls.queue, 'im_func', cls.queue)
|
||||||
|
if not queue:
|
||||||
|
msg = f'{cls.name}: Queue value required and may not be None'
|
||||||
|
logger.error(msg)
|
||||||
|
raise ValueError(msg)
|
||||||
|
obj = cls.get_async_body(args=args, kwargs=kwargs, uuid=uuid, **kw)
|
||||||
if callable(queue):
|
if callable(queue):
|
||||||
queue = queue()
|
queue = queue()
|
||||||
if not is_testing():
|
if not is_testing():
|
||||||
@@ -116,4 +126,5 @@ class task:
|
|||||||
setattr(fn, 'name', cls.name)
|
setattr(fn, 'name', cls.name)
|
||||||
setattr(fn, 'apply_async', cls.apply_async)
|
setattr(fn, 'apply_async', cls.apply_async)
|
||||||
setattr(fn, 'delay', cls.delay)
|
setattr(fn, 'delay', cls.delay)
|
||||||
|
setattr(fn, 'get_async_body', cls.get_async_body)
|
||||||
return fn
|
return fn
|
||||||
|
|||||||
@@ -11,11 +11,13 @@ import psycopg
|
|||||||
import time
|
import time
|
||||||
from uuid import UUID
|
from uuid import UUID
|
||||||
from queue import Empty as QueueEmpty
|
from queue import Empty as QueueEmpty
|
||||||
|
from datetime import timedelta
|
||||||
|
|
||||||
from django import db
|
from django import db
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
|
|
||||||
from awx.main.dispatch.pool import WorkerPool
|
from awx.main.dispatch.pool import WorkerPool
|
||||||
|
from awx.main.dispatch.periodic import Scheduler
|
||||||
from awx.main.dispatch import pg_bus_conn
|
from awx.main.dispatch import pg_bus_conn
|
||||||
from awx.main.utils.common import log_excess_runtime
|
from awx.main.utils.common import log_excess_runtime
|
||||||
from awx.main.utils.db import set_connection_name
|
from awx.main.utils.db import set_connection_name
|
||||||
@@ -64,10 +66,12 @@ class AWXConsumerBase(object):
|
|||||||
def control(self, body):
|
def control(self, body):
|
||||||
logger.warning(f'Received control signal:\n{body}')
|
logger.warning(f'Received control signal:\n{body}')
|
||||||
control = body.get('control')
|
control = body.get('control')
|
||||||
if control in ('status', 'running', 'cancel'):
|
if control in ('status', 'schedule', 'running', 'cancel'):
|
||||||
reply_queue = body['reply_to']
|
reply_queue = body['reply_to']
|
||||||
if control == 'status':
|
if control == 'status':
|
||||||
msg = '\n'.join([self.listening_on, self.pool.debug()])
|
msg = '\n'.join([self.listening_on, self.pool.debug()])
|
||||||
|
if control == 'schedule':
|
||||||
|
msg = self.scheduler.debug()
|
||||||
elif control == 'running':
|
elif control == 'running':
|
||||||
msg = []
|
msg = []
|
||||||
for worker in self.pool.workers:
|
for worker in self.pool.workers:
|
||||||
@@ -93,16 +97,11 @@ class AWXConsumerBase(object):
|
|||||||
else:
|
else:
|
||||||
logger.error('unrecognized control message: {}'.format(control))
|
logger.error('unrecognized control message: {}'.format(control))
|
||||||
|
|
||||||
def process_task(self, body):
|
def dispatch_task(self, body):
|
||||||
|
"""This will place the given body into a worker queue to run method decorated as a task"""
|
||||||
if isinstance(body, dict):
|
if isinstance(body, dict):
|
||||||
body['time_ack'] = time.time()
|
body['time_ack'] = time.time()
|
||||||
|
|
||||||
if 'control' in body:
|
|
||||||
try:
|
|
||||||
return self.control(body)
|
|
||||||
except Exception:
|
|
||||||
logger.exception(f"Exception handling control message: {body}")
|
|
||||||
return
|
|
||||||
if len(self.pool):
|
if len(self.pool):
|
||||||
if "uuid" in body and body['uuid']:
|
if "uuid" in body and body['uuid']:
|
||||||
try:
|
try:
|
||||||
@@ -116,15 +115,24 @@ class AWXConsumerBase(object):
|
|||||||
self.pool.write(queue, body)
|
self.pool.write(queue, body)
|
||||||
self.total_messages += 1
|
self.total_messages += 1
|
||||||
|
|
||||||
|
def process_task(self, body):
|
||||||
|
"""Routes the task details in body as either a control task or a task-task"""
|
||||||
|
if 'control' in body:
|
||||||
|
try:
|
||||||
|
return self.control(body)
|
||||||
|
except Exception:
|
||||||
|
logger.exception(f"Exception handling control message: {body}")
|
||||||
|
return
|
||||||
|
self.dispatch_task(body)
|
||||||
|
|
||||||
@log_excess_runtime(logger)
|
@log_excess_runtime(logger)
|
||||||
def record_statistics(self):
|
def record_statistics(self):
|
||||||
if time.time() - self.last_stats > 1: # buffer stat recording to once per second
|
if time.time() - self.last_stats > 1: # buffer stat recording to once per second
|
||||||
try:
|
try:
|
||||||
self.redis.set(f'awx_{self.name}_statistics', self.pool.debug())
|
self.redis.set(f'awx_{self.name}_statistics', self.pool.debug())
|
||||||
self.last_stats = time.time()
|
|
||||||
except Exception:
|
except Exception:
|
||||||
logger.exception(f"encountered an error communicating with redis to store {self.name} statistics")
|
logger.exception(f"encountered an error communicating with redis to store {self.name} statistics")
|
||||||
self.last_stats = time.time()
|
self.last_stats = time.time()
|
||||||
|
|
||||||
def run(self, *args, **kwargs):
|
def run(self, *args, **kwargs):
|
||||||
signal.signal(signal.SIGINT, self.stop)
|
signal.signal(signal.SIGINT, self.stop)
|
||||||
@@ -151,9 +159,9 @@ class AWXConsumerRedis(AWXConsumerBase):
|
|||||||
|
|
||||||
|
|
||||||
class AWXConsumerPG(AWXConsumerBase):
|
class AWXConsumerPG(AWXConsumerBase):
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, schedule=None, **kwargs):
|
||||||
super().__init__(*args, **kwargs)
|
super().__init__(*args, **kwargs)
|
||||||
self.pg_max_wait = settings.DISPATCHER_DB_DOWNTOWN_TOLLERANCE
|
self.pg_max_wait = settings.DISPATCHER_DB_DOWNTIME_TOLERANCE
|
||||||
# if no successful loops have ran since startup, then we should fail right away
|
# if no successful loops have ran since startup, then we should fail right away
|
||||||
self.pg_is_down = True # set so that we fail if we get database errors on startup
|
self.pg_is_down = True # set so that we fail if we get database errors on startup
|
||||||
init_time = time.time()
|
init_time = time.time()
|
||||||
@@ -162,24 +170,53 @@ class AWXConsumerPG(AWXConsumerBase):
|
|||||||
self.subsystem_metrics = s_metrics.Metrics(auto_pipe_execute=False)
|
self.subsystem_metrics = s_metrics.Metrics(auto_pipe_execute=False)
|
||||||
self.last_metrics_gather = init_time
|
self.last_metrics_gather = init_time
|
||||||
self.listen_cumulative_time = 0.0
|
self.listen_cumulative_time = 0.0
|
||||||
|
if schedule:
|
||||||
|
schedule = schedule.copy()
|
||||||
|
else:
|
||||||
|
schedule = {}
|
||||||
|
# add control tasks to be ran at regular schedules
|
||||||
|
# NOTE: if we run out of database connections, it is important to still run cleanup
|
||||||
|
# so that we scale down workers and free up connections
|
||||||
|
schedule['pool_cleanup'] = {'control': self.pool.cleanup, 'schedule': timedelta(seconds=60)}
|
||||||
|
# record subsystem metrics for the dispatcher
|
||||||
|
schedule['metrics_gather'] = {'control': self.record_metrics, 'schedule': timedelta(seconds=20)}
|
||||||
|
self.scheduler = Scheduler(schedule)
|
||||||
|
|
||||||
|
def record_metrics(self):
|
||||||
|
current_time = time.time()
|
||||||
|
self.pool.produce_subsystem_metrics(self.subsystem_metrics)
|
||||||
|
self.subsystem_metrics.set('dispatcher_availability', self.listen_cumulative_time / (current_time - self.last_metrics_gather))
|
||||||
|
self.subsystem_metrics.pipe_execute()
|
||||||
|
self.listen_cumulative_time = 0.0
|
||||||
|
self.last_metrics_gather = current_time
|
||||||
|
|
||||||
def run_periodic_tasks(self):
|
def run_periodic_tasks(self):
|
||||||
self.record_statistics() # maintains time buffer in method
|
"""
|
||||||
|
Run general periodic logic, and return maximum time in seconds before
|
||||||
|
the next requested run
|
||||||
|
This may be called more often than that when events are consumed
|
||||||
|
so this should be very efficient in that
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
self.record_statistics() # maintains time buffer in method
|
||||||
|
except Exception as exc:
|
||||||
|
logger.warning(f'Failed to save dispatcher statistics {exc}')
|
||||||
|
|
||||||
current_time = time.time()
|
for job in self.scheduler.get_and_mark_pending():
|
||||||
if current_time - self.last_cleanup > 60: # same as cluster_node_heartbeat
|
if 'control' in job.data:
|
||||||
# NOTE: if we run out of database connections, it is important to still run cleanup
|
try:
|
||||||
# so that we scale down workers and free up connections
|
job.data['control']()
|
||||||
self.pool.cleanup()
|
except Exception:
|
||||||
self.last_cleanup = current_time
|
logger.exception(f'Error running control task {job.data}')
|
||||||
|
elif 'task' in job.data:
|
||||||
|
body = self.worker.resolve_callable(job.data['task']).get_async_body()
|
||||||
|
# bypasses pg_notify for scheduled tasks
|
||||||
|
self.dispatch_task(body)
|
||||||
|
|
||||||
# record subsystem metrics for the dispatcher
|
self.pg_is_down = False
|
||||||
if current_time - self.last_metrics_gather > 20:
|
self.listen_start = time.time()
|
||||||
self.pool.produce_subsystem_metrics(self.subsystem_metrics)
|
|
||||||
self.subsystem_metrics.set('dispatcher_availability', self.listen_cumulative_time / (current_time - self.last_metrics_gather))
|
return self.scheduler.time_until_next_run()
|
||||||
self.subsystem_metrics.pipe_execute()
|
|
||||||
self.listen_cumulative_time = 0.0
|
|
||||||
self.last_metrics_gather = current_time
|
|
||||||
|
|
||||||
def run(self, *args, **kwargs):
|
def run(self, *args, **kwargs):
|
||||||
super(AWXConsumerPG, self).run(*args, **kwargs)
|
super(AWXConsumerPG, self).run(*args, **kwargs)
|
||||||
@@ -195,14 +232,15 @@ class AWXConsumerPG(AWXConsumerBase):
|
|||||||
if init is False:
|
if init is False:
|
||||||
self.worker.on_start()
|
self.worker.on_start()
|
||||||
init = True
|
init = True
|
||||||
self.listen_start = time.time()
|
# run_periodic_tasks run scheduled actions and gives time until next scheduled action
|
||||||
|
# this is saved to the conn (PubSub) object in order to modify read timeout in-loop
|
||||||
|
conn.select_timeout = self.run_periodic_tasks()
|
||||||
|
# this is the main operational loop for awx-manage run_dispatcher
|
||||||
for e in conn.events(yield_timeouts=True):
|
for e in conn.events(yield_timeouts=True):
|
||||||
self.listen_cumulative_time += time.time() - self.listen_start
|
self.listen_cumulative_time += time.time() - self.listen_start # for metrics
|
||||||
if e is not None:
|
if e is not None:
|
||||||
self.process_task(json.loads(e.payload))
|
self.process_task(json.loads(e.payload))
|
||||||
self.run_periodic_tasks()
|
conn.select_timeout = self.run_periodic_tasks()
|
||||||
self.pg_is_down = False
|
|
||||||
self.listen_start = time.time()
|
|
||||||
if self.should_stop:
|
if self.should_stop:
|
||||||
return
|
return
|
||||||
except psycopg.InterfaceError:
|
except psycopg.InterfaceError:
|
||||||
@@ -250,8 +288,8 @@ class BaseWorker(object):
|
|||||||
break
|
break
|
||||||
except QueueEmpty:
|
except QueueEmpty:
|
||||||
continue
|
continue
|
||||||
except Exception as e:
|
except Exception:
|
||||||
logger.error("Exception on worker {}, restarting: ".format(idx) + str(e))
|
logger.exception("Exception on worker {}, reconnecting: ".format(idx))
|
||||||
continue
|
continue
|
||||||
try:
|
try:
|
||||||
for conn in db.connections.all():
|
for conn in db.connections.all():
|
||||||
|
|||||||
@@ -1,22 +1,22 @@
|
|||||||
from awx.main.models import HostMetric
|
|
||||||
from django.core.management.base import BaseCommand
|
from django.core.management.base import BaseCommand
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
|
from awx.main.tasks.host_metrics import HostMetricTask
|
||||||
|
|
||||||
|
|
||||||
class Command(BaseCommand):
|
class Command(BaseCommand):
|
||||||
"""
|
"""
|
||||||
Run soft-deleting of HostMetrics
|
This command provides cleanup task for HostMetric model.
|
||||||
|
There are two modes, which run in following order:
|
||||||
|
- soft cleanup
|
||||||
|
- - Perform soft-deletion of all host metrics last automated 12 months ago or before.
|
||||||
|
This is the same as issuing a DELETE request to /api/v2/host_metrics/N/ for all host metrics that match the criteria.
|
||||||
|
- - updates columns delete, deleted_counter and last_deleted
|
||||||
|
- hard cleanup
|
||||||
|
- - Permanently erase from the database all host metrics last automated 36 months ago or before.
|
||||||
|
This operation happens after the soft deletion has finished.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
help = 'Run soft-deleting of HostMetrics'
|
help = 'Run soft and hard-deletion of HostMetrics'
|
||||||
|
|
||||||
def add_arguments(self, parser):
|
|
||||||
parser.add_argument('--months-ago', type=int, dest='months-ago', action='store', help='Threshold in months for soft-deleting')
|
|
||||||
|
|
||||||
def handle(self, *args, **options):
|
def handle(self, *args, **options):
|
||||||
months_ago = options.get('months-ago') or None
|
HostMetricTask().cleanup(soft_threshold=settings.CLEANUP_HOST_METRICS_SOFT_THRESHOLD, hard_threshold=settings.CLEANUP_HOST_METRICS_HARD_THRESHOLD)
|
||||||
|
|
||||||
if not months_ago:
|
|
||||||
months_ago = getattr(settings, 'CLEANUP_HOST_METRICS_THRESHOLD', 12)
|
|
||||||
|
|
||||||
HostMetric.cleanup_task(months_ago)
|
|
||||||
|
|||||||
@@ -195,14 +195,35 @@ class Command(BaseCommand):
|
|||||||
delete_meta.delete_jobs()
|
delete_meta.delete_jobs()
|
||||||
return (delete_meta.jobs_no_delete_count, delete_meta.jobs_to_delete_count)
|
return (delete_meta.jobs_no_delete_count, delete_meta.jobs_to_delete_count)
|
||||||
|
|
||||||
def _cascade_delete_job_events(self, model, pk_list):
|
def _handle_unpartitioned_events(self, model, pk_list):
|
||||||
|
"""
|
||||||
|
If unpartitioned job events remain, it will cascade those from jobs in pk_list
|
||||||
|
if the unpartitioned table is no longer necessary, it will drop the table
|
||||||
|
"""
|
||||||
|
tblname = unified_job_class_to_event_table_name(model)
|
||||||
|
rel_name = model().event_parent_key
|
||||||
|
with connection.cursor() as cursor:
|
||||||
|
cursor.execute(f"SELECT 1 FROM pg_tables WHERE tablename = '_unpartitioned_{tblname}';")
|
||||||
|
row = cursor.fetchone()
|
||||||
|
if row is None:
|
||||||
|
self.logger.debug(f'Unpartitioned table for {rel_name} does not exist, you are fully migrated')
|
||||||
|
return
|
||||||
if pk_list:
|
if pk_list:
|
||||||
with connection.cursor() as cursor:
|
with connection.cursor() as cursor:
|
||||||
tblname = unified_job_class_to_event_table_name(model)
|
|
||||||
|
|
||||||
pk_list_csv = ','.join(map(str, pk_list))
|
pk_list_csv = ','.join(map(str, pk_list))
|
||||||
rel_name = model().event_parent_key
|
|
||||||
cursor.execute(f"DELETE FROM _unpartitioned_{tblname} WHERE {rel_name} IN ({pk_list_csv})")
|
cursor.execute(f"DELETE FROM _unpartitioned_{tblname} WHERE {rel_name} IN ({pk_list_csv})")
|
||||||
|
with connection.cursor() as cursor:
|
||||||
|
# same as UnpartitionedJobEvent.objects.aggregate(Max('created'))
|
||||||
|
cursor.execute(f'SELECT MAX("_unpartitioned_{tblname}"."created") FROM "_unpartitioned_{tblname}"')
|
||||||
|
row = cursor.fetchone()
|
||||||
|
last_created = row[0]
|
||||||
|
if last_created:
|
||||||
|
self.logger.info(f'Last event created in _unpartitioned_{tblname} was {last_created.isoformat()}')
|
||||||
|
else:
|
||||||
|
self.logger.info(f'Table _unpartitioned_{tblname} has no events in it')
|
||||||
|
if (last_created is None) or (last_created < self.cutoff):
|
||||||
|
self.logger.warning(f'Dropping table _unpartitioned_{tblname} since no records are newer than {self.cutoff}')
|
||||||
|
cursor.execute(f'DROP TABLE _unpartitioned_{tblname}')
|
||||||
|
|
||||||
def cleanup_jobs(self):
|
def cleanup_jobs(self):
|
||||||
batch_size = 100000
|
batch_size = 100000
|
||||||
@@ -227,7 +248,7 @@ class Command(BaseCommand):
|
|||||||
|
|
||||||
_, results = qs_batch.delete()
|
_, results = qs_batch.delete()
|
||||||
deleted += results['main.Job']
|
deleted += results['main.Job']
|
||||||
self._cascade_delete_job_events(Job, pk_list)
|
self._handle_unpartitioned_events(Job, pk_list)
|
||||||
|
|
||||||
return skipped, deleted
|
return skipped, deleted
|
||||||
|
|
||||||
@@ -250,7 +271,7 @@ class Command(BaseCommand):
|
|||||||
deleted += 1
|
deleted += 1
|
||||||
|
|
||||||
if not self.dry_run:
|
if not self.dry_run:
|
||||||
self._cascade_delete_job_events(AdHocCommand, pk_list)
|
self._handle_unpartitioned_events(AdHocCommand, pk_list)
|
||||||
|
|
||||||
skipped += AdHocCommand.objects.filter(created__gte=self.cutoff).count()
|
skipped += AdHocCommand.objects.filter(created__gte=self.cutoff).count()
|
||||||
return skipped, deleted
|
return skipped, deleted
|
||||||
@@ -278,7 +299,7 @@ class Command(BaseCommand):
|
|||||||
deleted += 1
|
deleted += 1
|
||||||
|
|
||||||
if not self.dry_run:
|
if not self.dry_run:
|
||||||
self._cascade_delete_job_events(ProjectUpdate, pk_list)
|
self._handle_unpartitioned_events(ProjectUpdate, pk_list)
|
||||||
|
|
||||||
skipped += ProjectUpdate.objects.filter(created__gte=self.cutoff).count()
|
skipped += ProjectUpdate.objects.filter(created__gte=self.cutoff).count()
|
||||||
return skipped, deleted
|
return skipped, deleted
|
||||||
@@ -306,7 +327,7 @@ class Command(BaseCommand):
|
|||||||
deleted += 1
|
deleted += 1
|
||||||
|
|
||||||
if not self.dry_run:
|
if not self.dry_run:
|
||||||
self._cascade_delete_job_events(InventoryUpdate, pk_list)
|
self._handle_unpartitioned_events(InventoryUpdate, pk_list)
|
||||||
|
|
||||||
skipped += InventoryUpdate.objects.filter(created__gte=self.cutoff).count()
|
skipped += InventoryUpdate.objects.filter(created__gte=self.cutoff).count()
|
||||||
return skipped, deleted
|
return skipped, deleted
|
||||||
@@ -330,7 +351,7 @@ class Command(BaseCommand):
|
|||||||
deleted += 1
|
deleted += 1
|
||||||
|
|
||||||
if not self.dry_run:
|
if not self.dry_run:
|
||||||
self._cascade_delete_job_events(SystemJob, pk_list)
|
self._handle_unpartitioned_events(SystemJob, pk_list)
|
||||||
|
|
||||||
skipped += SystemJob.objects.filter(created__gte=self.cutoff).count()
|
skipped += SystemJob.objects.filter(created__gte=self.cutoff).count()
|
||||||
return skipped, deleted
|
return skipped, deleted
|
||||||
|
|||||||
@@ -0,0 +1,9 @@
|
|||||||
|
from django.core.management.base import BaseCommand
|
||||||
|
from awx.main.tasks.host_metrics import HostMetricSummaryMonthlyTask
|
||||||
|
|
||||||
|
|
||||||
|
class Command(BaseCommand):
|
||||||
|
help = 'Computing of HostMetricSummaryMonthly'
|
||||||
|
|
||||||
|
def handle(self, *args, **options):
|
||||||
|
HostMetricSummaryMonthlyTask().execute()
|
||||||
@@ -25,17 +25,20 @@ class Command(BaseCommand):
|
|||||||
|
|
||||||
def add_arguments(self, parser):
|
def add_arguments(self, parser):
|
||||||
parser.add_argument('--hostname', dest='hostname', type=str, help="Hostname used during provisioning")
|
parser.add_argument('--hostname', dest='hostname', type=str, help="Hostname used during provisioning")
|
||||||
|
parser.add_argument('--listener_port', dest='listener_port', type=int, help="Receptor listener port")
|
||||||
parser.add_argument('--node_type', type=str, default='hybrid', choices=['control', 'execution', 'hop', 'hybrid'], help="Instance Node type")
|
parser.add_argument('--node_type', type=str, default='hybrid', choices=['control', 'execution', 'hop', 'hybrid'], help="Instance Node type")
|
||||||
parser.add_argument('--uuid', type=str, help="Instance UUID")
|
parser.add_argument('--uuid', type=str, help="Instance UUID")
|
||||||
|
|
||||||
def _register_hostname(self, hostname, node_type, uuid):
|
def _register_hostname(self, hostname, node_type, uuid, listener_port):
|
||||||
if not hostname:
|
if not hostname:
|
||||||
if not settings.AWX_AUTO_DEPROVISION_INSTANCES:
|
if not settings.AWX_AUTO_DEPROVISION_INSTANCES:
|
||||||
raise CommandError('Registering with values from settings only intended for use in K8s installs')
|
raise CommandError('Registering with values from settings only intended for use in K8s installs')
|
||||||
|
|
||||||
from awx.main.management.commands.register_queue import RegisterQueue
|
from awx.main.management.commands.register_queue import RegisterQueue
|
||||||
|
|
||||||
(changed, instance) = Instance.objects.register(ip_address=os.environ.get('MY_POD_IP'), node_type='control', node_uuid=settings.SYSTEM_UUID)
|
(changed, instance) = Instance.objects.register(
|
||||||
|
ip_address=os.environ.get('MY_POD_IP'), listener_port=listener_port, node_type='control', node_uuid=settings.SYSTEM_UUID
|
||||||
|
)
|
||||||
RegisterQueue(settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME, 100, 0, [], is_container_group=False).register()
|
RegisterQueue(settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME, 100, 0, [], is_container_group=False).register()
|
||||||
RegisterQueue(
|
RegisterQueue(
|
||||||
settings.DEFAULT_EXECUTION_QUEUE_NAME,
|
settings.DEFAULT_EXECUTION_QUEUE_NAME,
|
||||||
@@ -48,7 +51,7 @@ class Command(BaseCommand):
|
|||||||
max_concurrent_jobs=settings.DEFAULT_EXECUTION_QUEUE_MAX_CONCURRENT_JOBS,
|
max_concurrent_jobs=settings.DEFAULT_EXECUTION_QUEUE_MAX_CONCURRENT_JOBS,
|
||||||
).register()
|
).register()
|
||||||
else:
|
else:
|
||||||
(changed, instance) = Instance.objects.register(hostname=hostname, node_type=node_type, node_uuid=uuid)
|
(changed, instance) = Instance.objects.register(hostname=hostname, node_type=node_type, node_uuid=uuid, listener_port=listener_port)
|
||||||
if changed:
|
if changed:
|
||||||
print("Successfully registered instance {}".format(hostname))
|
print("Successfully registered instance {}".format(hostname))
|
||||||
else:
|
else:
|
||||||
@@ -58,6 +61,6 @@ class Command(BaseCommand):
|
|||||||
@transaction.atomic
|
@transaction.atomic
|
||||||
def handle(self, **options):
|
def handle(self, **options):
|
||||||
self.changed = False
|
self.changed = False
|
||||||
self._register_hostname(options.get('hostname'), options.get('node_type'), options.get('uuid'))
|
self._register_hostname(options.get('hostname'), options.get('node_type'), options.get('uuid'), options.get('listener_port'))
|
||||||
if self.changed:
|
if self.changed:
|
||||||
print("(changed: True)")
|
print("(changed: True)")
|
||||||
|
|||||||
@@ -3,15 +3,13 @@
|
|||||||
import logging
|
import logging
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
from django.core.cache import cache as django_cache
|
from django.conf import settings
|
||||||
from django.core.management.base import BaseCommand
|
from django.core.management.base import BaseCommand
|
||||||
from django.db import connection as django_connection
|
|
||||||
|
|
||||||
from awx.main.dispatch import get_task_queuename
|
from awx.main.dispatch import get_task_queuename
|
||||||
from awx.main.dispatch.control import Control
|
from awx.main.dispatch.control import Control
|
||||||
from awx.main.dispatch.pool import AutoscalePool
|
from awx.main.dispatch.pool import AutoscalePool
|
||||||
from awx.main.dispatch.worker import AWXConsumerPG, TaskWorker
|
from awx.main.dispatch.worker import AWXConsumerPG, TaskWorker
|
||||||
from awx.main.dispatch import periodic
|
|
||||||
|
|
||||||
logger = logging.getLogger('awx.main.dispatch')
|
logger = logging.getLogger('awx.main.dispatch')
|
||||||
|
|
||||||
@@ -21,6 +19,7 @@ class Command(BaseCommand):
|
|||||||
|
|
||||||
def add_arguments(self, parser):
|
def add_arguments(self, parser):
|
||||||
parser.add_argument('--status', dest='status', action='store_true', help='print the internal state of any running dispatchers')
|
parser.add_argument('--status', dest='status', action='store_true', help='print the internal state of any running dispatchers')
|
||||||
|
parser.add_argument('--schedule', dest='schedule', action='store_true', help='print the current status of schedules being ran by dispatcher')
|
||||||
parser.add_argument('--running', dest='running', action='store_true', help='print the UUIDs of any tasked managed by this dispatcher')
|
parser.add_argument('--running', dest='running', action='store_true', help='print the UUIDs of any tasked managed by this dispatcher')
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'--reload',
|
'--reload',
|
||||||
@@ -42,6 +41,9 @@ class Command(BaseCommand):
|
|||||||
if options.get('status'):
|
if options.get('status'):
|
||||||
print(Control('dispatcher').status())
|
print(Control('dispatcher').status())
|
||||||
return
|
return
|
||||||
|
if options.get('schedule'):
|
||||||
|
print(Control('dispatcher').schedule())
|
||||||
|
return
|
||||||
if options.get('running'):
|
if options.get('running'):
|
||||||
print(Control('dispatcher').running())
|
print(Control('dispatcher').running())
|
||||||
return
|
return
|
||||||
@@ -58,21 +60,11 @@ class Command(BaseCommand):
|
|||||||
print(Control('dispatcher').cancel(cancel_data))
|
print(Control('dispatcher').cancel(cancel_data))
|
||||||
return
|
return
|
||||||
|
|
||||||
# It's important to close these because we're _about_ to fork, and we
|
|
||||||
# don't want the forked processes to inherit the open sockets
|
|
||||||
# for the DB and cache connections (that way lies race conditions)
|
|
||||||
django_connection.close()
|
|
||||||
django_cache.close()
|
|
||||||
|
|
||||||
# spawn a daemon thread to periodically enqueues scheduled tasks
|
|
||||||
# (like the node heartbeat)
|
|
||||||
periodic.run_continuously()
|
|
||||||
|
|
||||||
consumer = None
|
consumer = None
|
||||||
|
|
||||||
try:
|
try:
|
||||||
queues = ['tower_broadcast_all', 'tower_settings_change', get_task_queuename()]
|
queues = ['tower_broadcast_all', 'tower_settings_change', get_task_queuename()]
|
||||||
consumer = AWXConsumerPG('dispatcher', TaskWorker(), queues, AutoscalePool(min_workers=4))
|
consumer = AWXConsumerPG('dispatcher', TaskWorker(), queues, AutoscalePool(min_workers=4), schedule=settings.CELERYBEAT_SCHEDULE)
|
||||||
consumer.run()
|
consumer.run()
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
logger.debug('Terminating Task Dispatcher')
|
logger.debug('Terminating Task Dispatcher')
|
||||||
|
|||||||
@@ -115,21 +115,25 @@ class InstanceManager(models.Manager):
|
|||||||
return node[0]
|
return node[0]
|
||||||
raise RuntimeError("No instance found with the current cluster host id")
|
raise RuntimeError("No instance found with the current cluster host id")
|
||||||
|
|
||||||
def register(self, node_uuid=None, hostname=None, ip_address=None, node_type='hybrid', defaults=None):
|
def register(self, node_uuid=None, hostname=None, ip_address="", listener_port=None, node_type='hybrid', defaults=None):
|
||||||
if not hostname:
|
if not hostname:
|
||||||
hostname = settings.CLUSTER_HOST_ID
|
hostname = settings.CLUSTER_HOST_ID
|
||||||
|
|
||||||
|
if not ip_address:
|
||||||
|
ip_address = ""
|
||||||
|
|
||||||
with advisory_lock('instance_registration_%s' % hostname):
|
with advisory_lock('instance_registration_%s' % hostname):
|
||||||
if settings.AWX_AUTO_DEPROVISION_INSTANCES:
|
if settings.AWX_AUTO_DEPROVISION_INSTANCES:
|
||||||
# detect any instances with the same IP address.
|
# detect any instances with the same IP address.
|
||||||
# if one exists, set it to None
|
# if one exists, set it to ""
|
||||||
inst_conflicting_ip = self.filter(ip_address=ip_address).exclude(hostname=hostname)
|
if ip_address:
|
||||||
if inst_conflicting_ip.exists():
|
inst_conflicting_ip = self.filter(ip_address=ip_address).exclude(hostname=hostname)
|
||||||
for other_inst in inst_conflicting_ip:
|
if inst_conflicting_ip.exists():
|
||||||
other_hostname = other_inst.hostname
|
for other_inst in inst_conflicting_ip:
|
||||||
other_inst.ip_address = None
|
other_hostname = other_inst.hostname
|
||||||
other_inst.save(update_fields=['ip_address'])
|
other_inst.ip_address = ""
|
||||||
logger.warning("IP address {0} conflict detected, ip address unset for host {1}.".format(ip_address, other_hostname))
|
other_inst.save(update_fields=['ip_address'])
|
||||||
|
logger.warning("IP address {0} conflict detected, ip address unset for host {1}.".format(ip_address, other_hostname))
|
||||||
|
|
||||||
# Return existing instance that matches hostname or UUID (default to UUID)
|
# Return existing instance that matches hostname or UUID (default to UUID)
|
||||||
if node_uuid is not None and node_uuid != UUID_DEFAULT and self.filter(uuid=node_uuid).exists():
|
if node_uuid is not None and node_uuid != UUID_DEFAULT and self.filter(uuid=node_uuid).exists():
|
||||||
@@ -157,6 +161,9 @@ class InstanceManager(models.Manager):
|
|||||||
if instance.node_type != node_type:
|
if instance.node_type != node_type:
|
||||||
instance.node_type = node_type
|
instance.node_type = node_type
|
||||||
update_fields.append('node_type')
|
update_fields.append('node_type')
|
||||||
|
if instance.listener_port != listener_port:
|
||||||
|
instance.listener_port = listener_port
|
||||||
|
update_fields.append('listener_port')
|
||||||
if update_fields:
|
if update_fields:
|
||||||
instance.save(update_fields=update_fields)
|
instance.save(update_fields=update_fields)
|
||||||
return (True, instance)
|
return (True, instance)
|
||||||
@@ -167,12 +174,11 @@ class InstanceManager(models.Manager):
|
|||||||
create_defaults = {
|
create_defaults = {
|
||||||
'node_state': Instance.States.INSTALLED,
|
'node_state': Instance.States.INSTALLED,
|
||||||
'capacity': 0,
|
'capacity': 0,
|
||||||
'listener_port': 27199,
|
|
||||||
}
|
}
|
||||||
if defaults is not None:
|
if defaults is not None:
|
||||||
create_defaults.update(defaults)
|
create_defaults.update(defaults)
|
||||||
uuid_option = {'uuid': node_uuid if node_uuid is not None else uuid.uuid4()}
|
uuid_option = {'uuid': node_uuid if node_uuid is not None else uuid.uuid4()}
|
||||||
if node_type == 'execution' and 'version' not in create_defaults:
|
if node_type == 'execution' and 'version' not in create_defaults:
|
||||||
create_defaults['version'] = RECEPTOR_PENDING
|
create_defaults['version'] = RECEPTOR_PENDING
|
||||||
instance = self.create(hostname=hostname, ip_address=ip_address, node_type=node_type, **create_defaults, **uuid_option)
|
instance = self.create(hostname=hostname, ip_address=ip_address, listener_port=listener_port, node_type=node_type, **create_defaults, **uuid_option)
|
||||||
return (True, instance)
|
return (True, instance)
|
||||||
|
|||||||
@@ -9,13 +9,11 @@ from django.db import migrations, models
|
|||||||
import django.utils.timezone
|
import django.utils.timezone
|
||||||
import django.db.models.deletion
|
import django.db.models.deletion
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
import taggit.managers
|
|
||||||
import awx.main.fields
|
import awx.main.fields
|
||||||
|
|
||||||
|
|
||||||
class Migration(migrations.Migration):
|
class Migration(migrations.Migration):
|
||||||
dependencies = [
|
dependencies = [
|
||||||
('taggit', '0002_auto_20150616_2121'),
|
|
||||||
('contenttypes', '0002_remove_content_type_name'),
|
('contenttypes', '0002_remove_content_type_name'),
|
||||||
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
|
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
|
||||||
]
|
]
|
||||||
@@ -184,12 +182,6 @@ class Migration(migrations.Migration):
|
|||||||
null=True,
|
null=True,
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
(
|
|
||||||
'tags',
|
|
||||||
taggit.managers.TaggableManager(
|
|
||||||
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
|
|
||||||
),
|
|
||||||
),
|
|
||||||
],
|
],
|
||||||
options={
|
options={
|
||||||
'ordering': ('kind', 'name'),
|
'ordering': ('kind', 'name'),
|
||||||
@@ -529,12 +521,6 @@ class Migration(migrations.Migration):
|
|||||||
null=True,
|
null=True,
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
(
|
|
||||||
'tags',
|
|
||||||
taggit.managers.TaggableManager(
|
|
||||||
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
|
|
||||||
),
|
|
||||||
),
|
|
||||||
('users', models.ManyToManyField(related_name='organizations', to=settings.AUTH_USER_MODEL, blank=True)),
|
('users', models.ManyToManyField(related_name='organizations', to=settings.AUTH_USER_MODEL, blank=True)),
|
||||||
],
|
],
|
||||||
options={
|
options={
|
||||||
@@ -589,12 +575,6 @@ class Migration(migrations.Migration):
|
|||||||
null=True,
|
null=True,
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
(
|
|
||||||
'tags',
|
|
||||||
taggit.managers.TaggableManager(
|
|
||||||
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
|
|
||||||
),
|
|
||||||
),
|
|
||||||
],
|
],
|
||||||
),
|
),
|
||||||
migrations.CreateModel(
|
migrations.CreateModel(
|
||||||
@@ -644,12 +624,6 @@ class Migration(migrations.Migration):
|
|||||||
null=True,
|
null=True,
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
(
|
|
||||||
'tags',
|
|
||||||
taggit.managers.TaggableManager(
|
|
||||||
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
|
|
||||||
),
|
|
||||||
),
|
|
||||||
],
|
],
|
||||||
options={
|
options={
|
||||||
'ordering': ['-next_run'],
|
'ordering': ['-next_run'],
|
||||||
@@ -687,12 +661,6 @@ class Migration(migrations.Migration):
|
|||||||
),
|
),
|
||||||
),
|
),
|
||||||
('organization', models.ForeignKey(related_name='teams', on_delete=django.db.models.deletion.SET_NULL, to='main.Organization', null=True)),
|
('organization', models.ForeignKey(related_name='teams', on_delete=django.db.models.deletion.SET_NULL, to='main.Organization', null=True)),
|
||||||
(
|
|
||||||
'tags',
|
|
||||||
taggit.managers.TaggableManager(
|
|
||||||
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
|
|
||||||
),
|
|
||||||
),
|
|
||||||
('users', models.ManyToManyField(related_name='teams', to=settings.AUTH_USER_MODEL, blank=True)),
|
('users', models.ManyToManyField(related_name='teams', to=settings.AUTH_USER_MODEL, blank=True)),
|
||||||
],
|
],
|
||||||
options={
|
options={
|
||||||
@@ -1267,13 +1235,6 @@ class Migration(migrations.Migration):
|
|||||||
null=True,
|
null=True,
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
migrations.AddField(
|
|
||||||
model_name='unifiedjobtemplate',
|
|
||||||
name='tags',
|
|
||||||
field=taggit.managers.TaggableManager(
|
|
||||||
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
|
|
||||||
),
|
|
||||||
),
|
|
||||||
migrations.AddField(
|
migrations.AddField(
|
||||||
model_name='unifiedjob',
|
model_name='unifiedjob',
|
||||||
name='created_by',
|
name='created_by',
|
||||||
@@ -1319,13 +1280,6 @@ class Migration(migrations.Migration):
|
|||||||
name='schedule',
|
name='schedule',
|
||||||
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, default=None, editable=False, to='main.Schedule', null=True),
|
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, default=None, editable=False, to='main.Schedule', null=True),
|
||||||
),
|
),
|
||||||
migrations.AddField(
|
|
||||||
model_name='unifiedjob',
|
|
||||||
name='tags',
|
|
||||||
field=taggit.managers.TaggableManager(
|
|
||||||
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
|
|
||||||
),
|
|
||||||
),
|
|
||||||
migrations.AddField(
|
migrations.AddField(
|
||||||
model_name='unifiedjob',
|
model_name='unifiedjob',
|
||||||
name='unified_job_template',
|
name='unified_job_template',
|
||||||
@@ -1370,13 +1324,6 @@ class Migration(migrations.Migration):
|
|||||||
help_text='Organization containing this inventory.',
|
help_text='Organization containing this inventory.',
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
migrations.AddField(
|
|
||||||
model_name='inventory',
|
|
||||||
name='tags',
|
|
||||||
field=taggit.managers.TaggableManager(
|
|
||||||
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
|
|
||||||
),
|
|
||||||
),
|
|
||||||
migrations.AddField(
|
migrations.AddField(
|
||||||
model_name='host',
|
model_name='host',
|
||||||
name='inventory',
|
name='inventory',
|
||||||
@@ -1407,13 +1354,6 @@ class Migration(migrations.Migration):
|
|||||||
null=True,
|
null=True,
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
migrations.AddField(
|
|
||||||
model_name='host',
|
|
||||||
name='tags',
|
|
||||||
field=taggit.managers.TaggableManager(
|
|
||||||
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
|
|
||||||
),
|
|
||||||
),
|
|
||||||
migrations.AddField(
|
migrations.AddField(
|
||||||
model_name='group',
|
model_name='group',
|
||||||
name='hosts',
|
name='hosts',
|
||||||
@@ -1441,13 +1381,6 @@ class Migration(migrations.Migration):
|
|||||||
name='parents',
|
name='parents',
|
||||||
field=models.ManyToManyField(related_name='children', to='main.Group', blank=True),
|
field=models.ManyToManyField(related_name='children', to='main.Group', blank=True),
|
||||||
),
|
),
|
||||||
migrations.AddField(
|
|
||||||
model_name='group',
|
|
||||||
name='tags',
|
|
||||||
field=taggit.managers.TaggableManager(
|
|
||||||
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
|
|
||||||
),
|
|
||||||
),
|
|
||||||
migrations.AddField(
|
migrations.AddField(
|
||||||
model_name='custominventoryscript',
|
model_name='custominventoryscript',
|
||||||
name='organization',
|
name='organization',
|
||||||
@@ -1459,13 +1392,6 @@ class Migration(migrations.Migration):
|
|||||||
null=True,
|
null=True,
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
migrations.AddField(
|
|
||||||
model_name='custominventoryscript',
|
|
||||||
name='tags',
|
|
||||||
field=taggit.managers.TaggableManager(
|
|
||||||
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
|
|
||||||
),
|
|
||||||
),
|
|
||||||
migrations.AddField(
|
migrations.AddField(
|
||||||
model_name='credential',
|
model_name='credential',
|
||||||
name='team',
|
name='team',
|
||||||
|
|||||||
@@ -12,8 +12,6 @@ import django.db.models.deletion
|
|||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
from django.utils.timezone import now
|
from django.utils.timezone import now
|
||||||
|
|
||||||
import taggit.managers
|
|
||||||
|
|
||||||
|
|
||||||
def create_system_job_templates(apps, schema_editor):
|
def create_system_job_templates(apps, schema_editor):
|
||||||
"""
|
"""
|
||||||
@@ -125,7 +123,6 @@ class Migration(migrations.Migration):
|
|||||||
]
|
]
|
||||||
|
|
||||||
dependencies = [
|
dependencies = [
|
||||||
('taggit', '0002_auto_20150616_2121'),
|
|
||||||
('contenttypes', '0002_remove_content_type_name'),
|
('contenttypes', '0002_remove_content_type_name'),
|
||||||
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
|
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
|
||||||
('main', '0001_initial'),
|
('main', '0001_initial'),
|
||||||
@@ -256,12 +253,6 @@ class Migration(migrations.Migration):
|
|||||||
'organization',
|
'organization',
|
||||||
models.ForeignKey(related_name='notification_templates', on_delete=django.db.models.deletion.SET_NULL, to='main.Organization', null=True),
|
models.ForeignKey(related_name='notification_templates', on_delete=django.db.models.deletion.SET_NULL, to='main.Organization', null=True),
|
||||||
),
|
),
|
||||||
(
|
|
||||||
'tags',
|
|
||||||
taggit.managers.TaggableManager(
|
|
||||||
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
|
|
||||||
),
|
|
||||||
),
|
|
||||||
],
|
],
|
||||||
),
|
),
|
||||||
migrations.AddField(
|
migrations.AddField(
|
||||||
@@ -721,12 +712,6 @@ class Migration(migrations.Migration):
|
|||||||
help_text='Organization this label belongs to.',
|
help_text='Organization this label belongs to.',
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
(
|
|
||||||
'tags',
|
|
||||||
taggit.managers.TaggableManager(
|
|
||||||
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
|
|
||||||
),
|
|
||||||
),
|
|
||||||
],
|
],
|
||||||
options={
|
options={
|
||||||
'ordering': ('organization', 'name'),
|
'ordering': ('organization', 'name'),
|
||||||
|
|||||||
@@ -5,7 +5,6 @@ from __future__ import unicode_literals
|
|||||||
# Django
|
# Django
|
||||||
from django.db import connection, migrations, models, OperationalError, ProgrammingError
|
from django.db import connection, migrations, models, OperationalError, ProgrammingError
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
import taggit.managers
|
|
||||||
|
|
||||||
# AWX
|
# AWX
|
||||||
import awx.main.fields
|
import awx.main.fields
|
||||||
@@ -317,10 +316,6 @@ class Migration(migrations.Migration):
|
|||||||
model_name='permission',
|
model_name='permission',
|
||||||
name='project',
|
name='project',
|
||||||
),
|
),
|
||||||
migrations.RemoveField(
|
|
||||||
model_name='permission',
|
|
||||||
name='tags',
|
|
||||||
),
|
|
||||||
migrations.RemoveField(
|
migrations.RemoveField(
|
||||||
model_name='permission',
|
model_name='permission',
|
||||||
name='team',
|
name='team',
|
||||||
@@ -510,12 +505,6 @@ class Migration(migrations.Migration):
|
|||||||
null=True,
|
null=True,
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
(
|
|
||||||
'tags',
|
|
||||||
taggit.managers.TaggableManager(
|
|
||||||
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
|
|
||||||
),
|
|
||||||
),
|
|
||||||
],
|
],
|
||||||
options={
|
options={
|
||||||
'ordering': ('kind', 'name'),
|
'ordering': ('kind', 'name'),
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ from __future__ import unicode_literals
|
|||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
from django.db import migrations, models
|
from django.db import migrations, models
|
||||||
import django.db.models.deletion
|
import django.db.models.deletion
|
||||||
import taggit.managers
|
|
||||||
|
|
||||||
# AWX
|
# AWX
|
||||||
import awx.main.fields
|
import awx.main.fields
|
||||||
@@ -20,7 +19,6 @@ def setup_tower_managed_defaults(apps, schema_editor):
|
|||||||
class Migration(migrations.Migration):
|
class Migration(migrations.Migration):
|
||||||
dependencies = [
|
dependencies = [
|
||||||
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
|
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
|
||||||
('taggit', '0002_auto_20150616_2121'),
|
|
||||||
('main', '0066_v350_inventorysource_custom_virtualenv'),
|
('main', '0066_v350_inventorysource_custom_virtualenv'),
|
||||||
]
|
]
|
||||||
|
|
||||||
@@ -60,12 +58,6 @@ class Migration(migrations.Migration):
|
|||||||
'source_credential',
|
'source_credential',
|
||||||
models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='target_input_sources', to='main.Credential'),
|
models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='target_input_sources', to='main.Credential'),
|
||||||
),
|
),
|
||||||
(
|
|
||||||
'tags',
|
|
||||||
taggit.managers.TaggableManager(
|
|
||||||
blank=True, help_text='A comma-separated list of tags.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags'
|
|
||||||
),
|
|
||||||
),
|
|
||||||
(
|
(
|
||||||
'target_credential',
|
'target_credential',
|
||||||
models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='input_sources', to='main.Credential'),
|
models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='input_sources', to='main.Credential'),
|
||||||
|
|||||||
@@ -4,12 +4,10 @@ from django.conf import settings
|
|||||||
from django.db import migrations, models
|
from django.db import migrations, models
|
||||||
import django.db.models.deletion
|
import django.db.models.deletion
|
||||||
import django.db.models.expressions
|
import django.db.models.expressions
|
||||||
import taggit.managers
|
|
||||||
|
|
||||||
|
|
||||||
class Migration(migrations.Migration):
|
class Migration(migrations.Migration):
|
||||||
dependencies = [
|
dependencies = [
|
||||||
('taggit', '0003_taggeditem_add_unique_index'),
|
|
||||||
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
|
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
|
||||||
('main', '0123_drop_hg_support'),
|
('main', '0123_drop_hg_support'),
|
||||||
]
|
]
|
||||||
@@ -69,12 +67,6 @@ class Migration(migrations.Migration):
|
|||||||
to='main.Organization',
|
to='main.Organization',
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
(
|
|
||||||
'tags',
|
|
||||||
taggit.managers.TaggableManager(
|
|
||||||
blank=True, help_text='A comma-separated list of tags.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags'
|
|
||||||
),
|
|
||||||
),
|
|
||||||
],
|
],
|
||||||
options={
|
options={
|
||||||
'ordering': (django.db.models.expressions.OrderBy(django.db.models.expressions.F('organization_id'), nulls_first=True), 'image'),
|
'ordering': (django.db.models.expressions.OrderBy(django.db.models.expressions.F('organization_id'), nulls_first=True), 'image'),
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
# Generated by Django 4.2 on 2023-06-09 19:51
|
# Generated by Django 4.2.3 on 2023-08-02 13:18
|
||||||
|
|
||||||
import awx.main.models.notifications
|
import awx.main.models.notifications
|
||||||
from django.db import migrations, models
|
from django.db import migrations, models
|
||||||
@@ -11,16 +11,6 @@ class Migration(migrations.Migration):
|
|||||||
]
|
]
|
||||||
|
|
||||||
operations = [
|
operations = [
|
||||||
migrations.AlterField(
|
|
||||||
model_name='activitystream',
|
|
||||||
name='deleted_actor',
|
|
||||||
field=models.JSONField(null=True),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='activitystream',
|
|
||||||
name='setting',
|
|
||||||
field=models.JSONField(blank=True, default=dict),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
migrations.AlterField(
|
||||||
model_name='instancegroup',
|
model_name='instancegroup',
|
||||||
name='policy_instance_list',
|
name='policy_instance_list',
|
||||||
@@ -28,31 +18,11 @@ class Migration(migrations.Migration):
|
|||||||
blank=True, default=list, help_text='List of exact-match Instances that will always be automatically assigned to this group'
|
blank=True, default=list, help_text='List of exact-match Instances that will always be automatically assigned to this group'
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
migrations.AlterField(
|
|
||||||
model_name='job',
|
|
||||||
name='survey_passwords',
|
|
||||||
field=models.JSONField(blank=True, default=dict, editable=False),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='joblaunchconfig',
|
|
||||||
name='char_prompts',
|
|
||||||
field=models.JSONField(blank=True, default=dict),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='joblaunchconfig',
|
|
||||||
name='survey_passwords',
|
|
||||||
field=models.JSONField(blank=True, default=dict, editable=False),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
migrations.AlterField(
|
||||||
model_name='jobtemplate',
|
model_name='jobtemplate',
|
||||||
name='survey_spec',
|
name='survey_spec',
|
||||||
field=models.JSONField(blank=True, default=dict),
|
field=models.JSONField(blank=True, default=dict),
|
||||||
),
|
),
|
||||||
migrations.AlterField(
|
|
||||||
model_name='notification',
|
|
||||||
name='body',
|
|
||||||
field=models.JSONField(blank=True, default=dict),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
migrations.AlterField(
|
||||||
model_name='notificationtemplate',
|
model_name='notificationtemplate',
|
||||||
name='messages',
|
name='messages',
|
||||||
@@ -94,31 +64,6 @@ class Migration(migrations.Migration):
|
|||||||
name='survey_passwords',
|
name='survey_passwords',
|
||||||
field=models.JSONField(blank=True, default=dict, editable=False),
|
field=models.JSONField(blank=True, default=dict, editable=False),
|
||||||
),
|
),
|
||||||
migrations.AlterField(
|
|
||||||
model_name='unifiedjob',
|
|
||||||
name='job_env',
|
|
||||||
field=models.JSONField(blank=True, default=dict, editable=False),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='workflowjob',
|
|
||||||
name='char_prompts',
|
|
||||||
field=models.JSONField(blank=True, default=dict),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='workflowjob',
|
|
||||||
name='survey_passwords',
|
|
||||||
field=models.JSONField(blank=True, default=dict, editable=False),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='workflowjobnode',
|
|
||||||
name='char_prompts',
|
|
||||||
field=models.JSONField(blank=True, default=dict),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='workflowjobnode',
|
|
||||||
name='survey_passwords',
|
|
||||||
field=models.JSONField(blank=True, default=dict, editable=False),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
migrations.AlterField(
|
||||||
model_name='workflowjobtemplate',
|
model_name='workflowjobtemplate',
|
||||||
name='char_prompts',
|
name='char_prompts',
|
||||||
@@ -139,4 +84,194 @@ class Migration(migrations.Migration):
|
|||||||
name='survey_passwords',
|
name='survey_passwords',
|
||||||
field=models.JSONField(blank=True, default=dict, editable=False),
|
field=models.JSONField(blank=True, default=dict, editable=False),
|
||||||
),
|
),
|
||||||
|
# These are potentially a problem. Move the existing fields
|
||||||
|
# aside while pretending like they've been deleted, then add
|
||||||
|
# in fresh empty fields. Make the old fields nullable where
|
||||||
|
# needed while we are at it, so that new rows don't hit
|
||||||
|
# IntegrityError. We'll do the data migration out-of-band
|
||||||
|
# using a task.
|
||||||
|
migrations.RunSQL( # Already nullable
|
||||||
|
"ALTER TABLE main_activitystream RENAME deleted_actor TO deleted_actor_old;",
|
||||||
|
state_operations=[
|
||||||
|
migrations.RemoveField(
|
||||||
|
model_name='activitystream',
|
||||||
|
name='deleted_actor',
|
||||||
|
),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='activitystream',
|
||||||
|
name='deleted_actor',
|
||||||
|
field=models.JSONField(null=True),
|
||||||
|
),
|
||||||
|
migrations.RunSQL(
|
||||||
|
"""
|
||||||
|
ALTER TABLE main_activitystream RENAME setting TO setting_old;
|
||||||
|
ALTER TABLE main_activitystream ALTER COLUMN setting_old DROP NOT NULL;
|
||||||
|
""",
|
||||||
|
state_operations=[
|
||||||
|
migrations.RemoveField(
|
||||||
|
model_name='activitystream',
|
||||||
|
name='setting',
|
||||||
|
),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='activitystream',
|
||||||
|
name='setting',
|
||||||
|
field=models.JSONField(blank=True, default=dict),
|
||||||
|
),
|
||||||
|
migrations.RunSQL(
|
||||||
|
"""
|
||||||
|
ALTER TABLE main_job RENAME survey_passwords TO survey_passwords_old;
|
||||||
|
ALTER TABLE main_job ALTER COLUMN survey_passwords_old DROP NOT NULL;
|
||||||
|
""",
|
||||||
|
state_operations=[
|
||||||
|
migrations.RemoveField(
|
||||||
|
model_name='job',
|
||||||
|
name='survey_passwords',
|
||||||
|
),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='job',
|
||||||
|
name='survey_passwords',
|
||||||
|
field=models.JSONField(blank=True, default=dict, editable=False),
|
||||||
|
),
|
||||||
|
migrations.RunSQL(
|
||||||
|
"""
|
||||||
|
ALTER TABLE main_joblaunchconfig RENAME char_prompts TO char_prompts_old;
|
||||||
|
ALTER TABLE main_joblaunchconfig ALTER COLUMN char_prompts_old DROP NOT NULL;
|
||||||
|
""",
|
||||||
|
state_operations=[
|
||||||
|
migrations.RemoveField(
|
||||||
|
model_name='joblaunchconfig',
|
||||||
|
name='char_prompts',
|
||||||
|
),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='joblaunchconfig',
|
||||||
|
name='char_prompts',
|
||||||
|
field=models.JSONField(blank=True, default=dict),
|
||||||
|
),
|
||||||
|
migrations.RunSQL(
|
||||||
|
"""
|
||||||
|
ALTER TABLE main_joblaunchconfig RENAME survey_passwords TO survey_passwords_old;
|
||||||
|
ALTER TABLE main_joblaunchconfig ALTER COLUMN survey_passwords_old DROP NOT NULL;
|
||||||
|
""",
|
||||||
|
state_operations=[
|
||||||
|
migrations.RemoveField(
|
||||||
|
model_name='joblaunchconfig',
|
||||||
|
name='survey_passwords',
|
||||||
|
),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='joblaunchconfig',
|
||||||
|
name='survey_passwords',
|
||||||
|
field=models.JSONField(blank=True, default=dict, editable=False),
|
||||||
|
),
|
||||||
|
migrations.RunSQL(
|
||||||
|
"""
|
||||||
|
ALTER TABLE main_notification RENAME body TO body_old;
|
||||||
|
ALTER TABLE main_notification ALTER COLUMN body_old DROP NOT NULL;
|
||||||
|
""",
|
||||||
|
state_operations=[
|
||||||
|
migrations.RemoveField(
|
||||||
|
model_name='notification',
|
||||||
|
name='body',
|
||||||
|
),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='notification',
|
||||||
|
name='body',
|
||||||
|
field=models.JSONField(blank=True, default=dict),
|
||||||
|
),
|
||||||
|
migrations.RunSQL(
|
||||||
|
"""
|
||||||
|
ALTER TABLE main_unifiedjob RENAME job_env TO job_env_old;
|
||||||
|
ALTER TABLE main_unifiedjob ALTER COLUMN job_env_old DROP NOT NULL;
|
||||||
|
""",
|
||||||
|
state_operations=[
|
||||||
|
migrations.RemoveField(
|
||||||
|
model_name='unifiedjob',
|
||||||
|
name='job_env',
|
||||||
|
),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='unifiedjob',
|
||||||
|
name='job_env',
|
||||||
|
field=models.JSONField(blank=True, default=dict, editable=False),
|
||||||
|
),
|
||||||
|
migrations.RunSQL(
|
||||||
|
"""
|
||||||
|
ALTER TABLE main_workflowjob RENAME char_prompts TO char_prompts_old;
|
||||||
|
ALTER TABLE main_workflowjob ALTER COLUMN char_prompts_old DROP NOT NULL;
|
||||||
|
""",
|
||||||
|
state_operations=[
|
||||||
|
migrations.RemoveField(
|
||||||
|
model_name='workflowjob',
|
||||||
|
name='char_prompts',
|
||||||
|
),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='workflowjob',
|
||||||
|
name='char_prompts',
|
||||||
|
field=models.JSONField(blank=True, default=dict),
|
||||||
|
),
|
||||||
|
migrations.RunSQL(
|
||||||
|
"""
|
||||||
|
ALTER TABLE main_workflowjob RENAME survey_passwords TO survey_passwords_old;
|
||||||
|
ALTER TABLE main_workflowjob ALTER COLUMN survey_passwords_old DROP NOT NULL;
|
||||||
|
""",
|
||||||
|
state_operations=[
|
||||||
|
migrations.RemoveField(
|
||||||
|
model_name='workflowjob',
|
||||||
|
name='survey_passwords',
|
||||||
|
),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='workflowjob',
|
||||||
|
name='survey_passwords',
|
||||||
|
field=models.JSONField(blank=True, default=dict, editable=False),
|
||||||
|
),
|
||||||
|
migrations.RunSQL(
|
||||||
|
"""
|
||||||
|
ALTER TABLE main_workflowjobnode RENAME char_prompts TO char_prompts_old;
|
||||||
|
ALTER TABLE main_workflowjobnode ALTER COLUMN char_prompts_old DROP NOT NULL;
|
||||||
|
""",
|
||||||
|
state_operations=[
|
||||||
|
migrations.RemoveField(
|
||||||
|
model_name='workflowjobnode',
|
||||||
|
name='char_prompts',
|
||||||
|
),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='workflowjobnode',
|
||||||
|
name='char_prompts',
|
||||||
|
field=models.JSONField(blank=True, default=dict),
|
||||||
|
),
|
||||||
|
migrations.RunSQL(
|
||||||
|
"""
|
||||||
|
ALTER TABLE main_workflowjobnode RENAME survey_passwords TO survey_passwords_old;
|
||||||
|
ALTER TABLE main_workflowjobnode ALTER COLUMN survey_passwords_old DROP NOT NULL;
|
||||||
|
""",
|
||||||
|
state_operations=[
|
||||||
|
migrations.RemoveField(
|
||||||
|
model_name='workflowjobnode',
|
||||||
|
name='survey_passwords',
|
||||||
|
),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='workflowjobnode',
|
||||||
|
name='survey_passwords',
|
||||||
|
field=models.JSONField(blank=True, default=dict, editable=False),
|
||||||
|
),
|
||||||
]
|
]
|
||||||
|
|||||||
27
awx/main/migrations/0186_drop_django_taggit.py
Normal file
27
awx/main/migrations/0186_drop_django_taggit.py
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
from django.db import migrations
|
||||||
|
|
||||||
|
|
||||||
|
def delete_taggit_contenttypes(apps, schema_editor):
|
||||||
|
ContentType = apps.get_model('contenttypes', 'ContentType')
|
||||||
|
ContentType.objects.filter(app_label='taggit').delete()
|
||||||
|
|
||||||
|
|
||||||
|
def delete_taggit_migration_records(apps, schema_editor):
|
||||||
|
recorder = migrations.recorder.MigrationRecorder(connection=schema_editor.connection)
|
||||||
|
recorder.migration_qs.filter(app='taggit').delete()
|
||||||
|
|
||||||
|
|
||||||
|
class Migration(migrations.Migration):
|
||||||
|
dependencies = [
|
||||||
|
('main', '0185_move_JSONBlob_to_JSONField'),
|
||||||
|
]
|
||||||
|
|
||||||
|
operations = [
|
||||||
|
migrations.RunSQL("DROP TABLE IF EXISTS taggit_tag CASCADE;"),
|
||||||
|
migrations.RunSQL("DROP TABLE IF EXISTS taggit_taggeditem CASCADE;"),
|
||||||
|
migrations.RunPython(delete_taggit_contenttypes),
|
||||||
|
migrations.RunPython(delete_taggit_migration_records),
|
||||||
|
]
|
||||||
75
awx/main/migrations/0187_hop_nodes.py
Normal file
75
awx/main/migrations/0187_hop_nodes.py
Normal file
@@ -0,0 +1,75 @@
|
|||||||
|
# Generated by Django 4.2.3 on 2023-08-04 20:50
|
||||||
|
|
||||||
|
import django.core.validators
|
||||||
|
from django.db import migrations, models
|
||||||
|
from django.conf import settings
|
||||||
|
|
||||||
|
|
||||||
|
def automatically_peer_from_control_plane(apps, schema_editor):
|
||||||
|
if settings.IS_K8S:
|
||||||
|
Instance = apps.get_model('main', 'Instance')
|
||||||
|
Instance.objects.filter(node_type='execution').update(peers_from_control_nodes=True)
|
||||||
|
Instance.objects.filter(node_type='control').update(listener_port=None)
|
||||||
|
|
||||||
|
|
||||||
|
class Migration(migrations.Migration):
|
||||||
|
dependencies = [
|
||||||
|
('main', '0186_drop_django_taggit'),
|
||||||
|
]
|
||||||
|
|
||||||
|
operations = [
|
||||||
|
migrations.AlterModelOptions(
|
||||||
|
name='instancelink',
|
||||||
|
options={'ordering': ('id',)},
|
||||||
|
),
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='instance',
|
||||||
|
name='peers_from_control_nodes',
|
||||||
|
field=models.BooleanField(default=False, help_text='If True, control plane cluster nodes should automatically peer to it.'),
|
||||||
|
),
|
||||||
|
migrations.AlterField(
|
||||||
|
model_name='instance',
|
||||||
|
name='ip_address',
|
||||||
|
field=models.CharField(blank=True, default='', max_length=50),
|
||||||
|
),
|
||||||
|
migrations.AlterField(
|
||||||
|
model_name='instance',
|
||||||
|
name='listener_port',
|
||||||
|
field=models.PositiveIntegerField(
|
||||||
|
blank=True,
|
||||||
|
default=None,
|
||||||
|
help_text='Port that Receptor will listen for incoming connections on.',
|
||||||
|
null=True,
|
||||||
|
validators=[django.core.validators.MinValueValidator(1024), django.core.validators.MaxValueValidator(65535)],
|
||||||
|
),
|
||||||
|
),
|
||||||
|
migrations.AlterField(
|
||||||
|
model_name='instance',
|
||||||
|
name='peers',
|
||||||
|
field=models.ManyToManyField(related_name='peers_from', through='main.InstanceLink', to='main.instance'),
|
||||||
|
),
|
||||||
|
migrations.AlterField(
|
||||||
|
model_name='instancelink',
|
||||||
|
name='link_state',
|
||||||
|
field=models.CharField(
|
||||||
|
choices=[('adding', 'Adding'), ('established', 'Established'), ('removing', 'Removing')],
|
||||||
|
default='adding',
|
||||||
|
help_text='Indicates the current life cycle stage of this peer link.',
|
||||||
|
max_length=16,
|
||||||
|
),
|
||||||
|
),
|
||||||
|
migrations.AddConstraint(
|
||||||
|
model_name='instance',
|
||||||
|
constraint=models.UniqueConstraint(
|
||||||
|
condition=models.Q(('ip_address', ''), _negated=True),
|
||||||
|
fields=('ip_address',),
|
||||||
|
name='unique_ip_address_not_empty',
|
||||||
|
violation_error_message='Field ip_address must be unique.',
|
||||||
|
),
|
||||||
|
),
|
||||||
|
migrations.AddConstraint(
|
||||||
|
model_name='instancelink',
|
||||||
|
constraint=models.CheckConstraint(check=models.Q(('source', models.F('target')), _negated=True), name='source_and_target_can_not_be_equal'),
|
||||||
|
),
|
||||||
|
migrations.RunPython(automatically_peer_from_control_plane),
|
||||||
|
]
|
||||||
@@ -3,6 +3,7 @@
|
|||||||
|
|
||||||
# Django
|
# Django
|
||||||
from django.conf import settings # noqa
|
from django.conf import settings # noqa
|
||||||
|
from django.db import connection
|
||||||
from django.db.models.signals import pre_delete # noqa
|
from django.db.models.signals import pre_delete # noqa
|
||||||
|
|
||||||
# AWX
|
# AWX
|
||||||
@@ -99,6 +100,58 @@ User.add_to_class('can_access_with_errors', check_user_access_with_errors)
|
|||||||
User.add_to_class('accessible_objects', user_accessible_objects)
|
User.add_to_class('accessible_objects', user_accessible_objects)
|
||||||
|
|
||||||
|
|
||||||
|
def convert_jsonfields():
|
||||||
|
if connection.vendor != 'postgresql':
|
||||||
|
return
|
||||||
|
|
||||||
|
# fmt: off
|
||||||
|
fields = [
|
||||||
|
('main_activitystream', 'id', (
|
||||||
|
'deleted_actor',
|
||||||
|
'setting',
|
||||||
|
)),
|
||||||
|
('main_job', 'unifiedjob_ptr_id', (
|
||||||
|
'survey_passwords',
|
||||||
|
)),
|
||||||
|
('main_joblaunchconfig', 'id', (
|
||||||
|
'char_prompts',
|
||||||
|
'survey_passwords',
|
||||||
|
)),
|
||||||
|
('main_notification', 'id', (
|
||||||
|
'body',
|
||||||
|
)),
|
||||||
|
('main_unifiedjob', 'id', (
|
||||||
|
'job_env',
|
||||||
|
)),
|
||||||
|
('main_workflowjob', 'unifiedjob_ptr_id', (
|
||||||
|
'char_prompts',
|
||||||
|
'survey_passwords',
|
||||||
|
)),
|
||||||
|
('main_workflowjobnode', 'id', (
|
||||||
|
'char_prompts',
|
||||||
|
'survey_passwords',
|
||||||
|
)),
|
||||||
|
]
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
with connection.cursor() as cursor:
|
||||||
|
for table, pkfield, columns in fields:
|
||||||
|
# Do the renamed old columns still exist? If so, run the task.
|
||||||
|
old_columns = ','.join(f"'{column}_old'" for column in columns)
|
||||||
|
cursor.execute(
|
||||||
|
f"""
|
||||||
|
select count(1) from information_schema.columns
|
||||||
|
where
|
||||||
|
table_name = %s and column_name in ({old_columns});
|
||||||
|
""",
|
||||||
|
(table,),
|
||||||
|
)
|
||||||
|
if cursor.fetchone()[0]:
|
||||||
|
from awx.main.tasks.system import migrate_jsonfield
|
||||||
|
|
||||||
|
migrate_jsonfield.apply_async([table, pkfield, columns])
|
||||||
|
|
||||||
|
|
||||||
def cleanup_created_modified_by(sender, **kwargs):
|
def cleanup_created_modified_by(sender, **kwargs):
|
||||||
# work around a bug in django-polymorphic that doesn't properly
|
# work around a bug in django-polymorphic that doesn't properly
|
||||||
# handle cascades for reverse foreign keys on the polymorphic base model
|
# handle cascades for reverse foreign keys on the polymorphic base model
|
||||||
|
|||||||
@@ -7,9 +7,6 @@ from django.core.exceptions import ValidationError, ObjectDoesNotExist
|
|||||||
from django.utils.translation import gettext_lazy as _
|
from django.utils.translation import gettext_lazy as _
|
||||||
from django.utils.timezone import now
|
from django.utils.timezone import now
|
||||||
|
|
||||||
# Django-Taggit
|
|
||||||
from taggit.managers import TaggableManager
|
|
||||||
|
|
||||||
# Django-CRUM
|
# Django-CRUM
|
||||||
from crum import get_current_user
|
from crum import get_current_user
|
||||||
|
|
||||||
@@ -301,8 +298,6 @@ class PrimordialModel(HasEditsMixin, CreatedModifiedModel):
|
|||||||
on_delete=models.SET_NULL,
|
on_delete=models.SET_NULL,
|
||||||
)
|
)
|
||||||
|
|
||||||
tags = TaggableManager(blank=True)
|
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
r = super(PrimordialModel, self).__init__(*args, **kwargs)
|
r = super(PrimordialModel, self).__init__(*args, **kwargs)
|
||||||
if self.pk:
|
if self.pk:
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ from jinja2 import sandbox
|
|||||||
from django.db import models
|
from django.db import models
|
||||||
from django.utils.translation import gettext_lazy as _, gettext_noop
|
from django.utils.translation import gettext_lazy as _, gettext_noop
|
||||||
from django.core.exceptions import ValidationError
|
from django.core.exceptions import ValidationError
|
||||||
|
from django.conf import settings
|
||||||
from django.utils.encoding import force_str
|
from django.utils.encoding import force_str
|
||||||
from django.utils.functional import cached_property
|
from django.utils.functional import cached_property
|
||||||
from django.utils.timezone import now
|
from django.utils.timezone import now
|
||||||
@@ -30,7 +31,7 @@ from awx.main.fields import (
|
|||||||
CredentialTypeInjectorField,
|
CredentialTypeInjectorField,
|
||||||
DynamicCredentialInputField,
|
DynamicCredentialInputField,
|
||||||
)
|
)
|
||||||
from awx.main.utils import decrypt_field, classproperty
|
from awx.main.utils import decrypt_field, classproperty, set_environ
|
||||||
from awx.main.utils.safe_yaml import safe_dump
|
from awx.main.utils.safe_yaml import safe_dump
|
||||||
from awx.main.utils.execution_environments import to_container_path
|
from awx.main.utils.execution_environments import to_container_path
|
||||||
from awx.main.validators import validate_ssh_private_key
|
from awx.main.validators import validate_ssh_private_key
|
||||||
@@ -1252,7 +1253,9 @@ class CredentialInputSource(PrimordialModel):
|
|||||||
backend_kwargs[field_name] = value
|
backend_kwargs[field_name] = value
|
||||||
|
|
||||||
backend_kwargs.update(self.metadata)
|
backend_kwargs.update(self.metadata)
|
||||||
return backend(**backend_kwargs)
|
|
||||||
|
with set_environ(**settings.AWX_TASK_ENV):
|
||||||
|
return backend(**backend_kwargs)
|
||||||
|
|
||||||
def get_absolute_url(self, request=None):
|
def get_absolute_url(self, request=None):
|
||||||
view_name = 'api:credential_input_source_detail'
|
view_name = 'api:credential_input_source_detail'
|
||||||
|
|||||||
@@ -12,13 +12,14 @@ from django.dispatch import receiver
|
|||||||
from django.utils.translation import gettext_lazy as _
|
from django.utils.translation import gettext_lazy as _
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
from django.utils.timezone import now, timedelta
|
from django.utils.timezone import now, timedelta
|
||||||
from django.db.models import Sum
|
from django.db.models import Sum, Q
|
||||||
|
|
||||||
import redis
|
import redis
|
||||||
from solo.models import SingletonModel
|
from solo.models import SingletonModel
|
||||||
|
|
||||||
# AWX
|
# AWX
|
||||||
from awx import __version__ as awx_application_version
|
from awx import __version__ as awx_application_version
|
||||||
|
from awx.main.utils import is_testing
|
||||||
from awx.api.versioning import reverse
|
from awx.api.versioning import reverse
|
||||||
from awx.main.fields import ImplicitRoleField
|
from awx.main.fields import ImplicitRoleField
|
||||||
from awx.main.managers import InstanceManager, UUID_DEFAULT
|
from awx.main.managers import InstanceManager, UUID_DEFAULT
|
||||||
@@ -70,16 +71,33 @@ class InstanceLink(BaseModel):
|
|||||||
REMOVING = 'removing', _('Removing')
|
REMOVING = 'removing', _('Removing')
|
||||||
|
|
||||||
link_state = models.CharField(
|
link_state = models.CharField(
|
||||||
choices=States.choices, default=States.ESTABLISHED, max_length=16, help_text=_("Indicates the current life cycle stage of this peer link.")
|
choices=States.choices, default=States.ADDING, max_length=16, help_text=_("Indicates the current life cycle stage of this peer link.")
|
||||||
)
|
)
|
||||||
|
|
||||||
class Meta:
|
class Meta:
|
||||||
unique_together = ('source', 'target')
|
unique_together = ('source', 'target')
|
||||||
|
ordering = ("id",)
|
||||||
|
constraints = [models.CheckConstraint(check=~models.Q(source=models.F('target')), name='source_and_target_can_not_be_equal')]
|
||||||
|
|
||||||
|
|
||||||
class Instance(HasPolicyEditsMixin, BaseModel):
|
class Instance(HasPolicyEditsMixin, BaseModel):
|
||||||
"""A model representing an AWX instance running against this database."""
|
"""A model representing an AWX instance running against this database."""
|
||||||
|
|
||||||
|
class Meta:
|
||||||
|
app_label = 'main'
|
||||||
|
ordering = ("hostname",)
|
||||||
|
constraints = [
|
||||||
|
models.UniqueConstraint(
|
||||||
|
fields=["ip_address"],
|
||||||
|
condition=~Q(ip_address=""), # don't apply to constraint to empty entries
|
||||||
|
name="unique_ip_address_not_empty",
|
||||||
|
violation_error_message=_("Field ip_address must be unique."),
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return self.hostname
|
||||||
|
|
||||||
objects = InstanceManager()
|
objects = InstanceManager()
|
||||||
|
|
||||||
# Fields set in instance registration
|
# Fields set in instance registration
|
||||||
@@ -87,10 +105,8 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
|||||||
hostname = models.CharField(max_length=250, unique=True)
|
hostname = models.CharField(max_length=250, unique=True)
|
||||||
ip_address = models.CharField(
|
ip_address = models.CharField(
|
||||||
blank=True,
|
blank=True,
|
||||||
null=True,
|
default="",
|
||||||
default=None,
|
|
||||||
max_length=50,
|
max_length=50,
|
||||||
unique=True,
|
|
||||||
)
|
)
|
||||||
# Auto-fields, implementation is different from BaseModel
|
# Auto-fields, implementation is different from BaseModel
|
||||||
created = models.DateTimeField(auto_now_add=True)
|
created = models.DateTimeField(auto_now_add=True)
|
||||||
@@ -169,16 +185,14 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
|||||||
)
|
)
|
||||||
listener_port = models.PositiveIntegerField(
|
listener_port = models.PositiveIntegerField(
|
||||||
blank=True,
|
blank=True,
|
||||||
default=27199,
|
null=True,
|
||||||
validators=[MinValueValidator(1), MaxValueValidator(65535)],
|
default=None,
|
||||||
|
validators=[MinValueValidator(1024), MaxValueValidator(65535)],
|
||||||
help_text=_("Port that Receptor will listen for incoming connections on."),
|
help_text=_("Port that Receptor will listen for incoming connections on."),
|
||||||
)
|
)
|
||||||
|
|
||||||
peers = models.ManyToManyField('self', symmetrical=False, through=InstanceLink, through_fields=('source', 'target'))
|
peers = models.ManyToManyField('self', symmetrical=False, through=InstanceLink, through_fields=('source', 'target'), related_name='peers_from')
|
||||||
|
peers_from_control_nodes = models.BooleanField(default=False, help_text=_("If True, control plane cluster nodes should automatically peer to it."))
|
||||||
class Meta:
|
|
||||||
app_label = 'main'
|
|
||||||
ordering = ("hostname",)
|
|
||||||
|
|
||||||
POLICY_FIELDS = frozenset(('managed_by_policy', 'hostname', 'capacity_adjustment'))
|
POLICY_FIELDS = frozenset(('managed_by_policy', 'hostname', 'capacity_adjustment'))
|
||||||
|
|
||||||
@@ -275,10 +289,14 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
|||||||
if update_last_seen:
|
if update_last_seen:
|
||||||
update_fields += ['last_seen']
|
update_fields += ['last_seen']
|
||||||
if perform_save:
|
if perform_save:
|
||||||
self.save(update_fields=update_fields)
|
from awx.main.signals import disable_activity_stream
|
||||||
|
|
||||||
|
with disable_activity_stream():
|
||||||
|
self.save(update_fields=update_fields)
|
||||||
return update_fields
|
return update_fields
|
||||||
|
|
||||||
def set_capacity_value(self):
|
def set_capacity_value(self):
|
||||||
|
old_val = self.capacity
|
||||||
"""Sets capacity according to capacity adjustment rule (no save)"""
|
"""Sets capacity according to capacity adjustment rule (no save)"""
|
||||||
if self.enabled and self.node_type != 'hop':
|
if self.enabled and self.node_type != 'hop':
|
||||||
lower_cap = min(self.mem_capacity, self.cpu_capacity)
|
lower_cap = min(self.mem_capacity, self.cpu_capacity)
|
||||||
@@ -286,6 +304,7 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
|||||||
self.capacity = lower_cap + (higher_cap - lower_cap) * self.capacity_adjustment
|
self.capacity = lower_cap + (higher_cap - lower_cap) * self.capacity_adjustment
|
||||||
else:
|
else:
|
||||||
self.capacity = 0
|
self.capacity = 0
|
||||||
|
return int(self.capacity) != int(old_val) # return True if value changed
|
||||||
|
|
||||||
def refresh_capacity_fields(self):
|
def refresh_capacity_fields(self):
|
||||||
"""Update derived capacity fields from cpu and memory (no save)"""
|
"""Update derived capacity fields from cpu and memory (no save)"""
|
||||||
@@ -293,8 +312,8 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
|||||||
self.cpu_capacity = 0
|
self.cpu_capacity = 0
|
||||||
self.mem_capacity = 0 # formula has a non-zero offset, so we make sure it is 0 for hop nodes
|
self.mem_capacity = 0 # formula has a non-zero offset, so we make sure it is 0 for hop nodes
|
||||||
else:
|
else:
|
||||||
self.cpu_capacity = get_cpu_effective_capacity(self.cpu)
|
self.cpu_capacity = get_cpu_effective_capacity(self.cpu, is_control_node=bool(self.node_type in (Instance.Types.CONTROL, Instance.Types.HYBRID)))
|
||||||
self.mem_capacity = get_mem_effective_capacity(self.memory)
|
self.mem_capacity = get_mem_effective_capacity(self.memory, is_control_node=bool(self.node_type in (Instance.Types.CONTROL, Instance.Types.HYBRID)))
|
||||||
self.set_capacity_value()
|
self.set_capacity_value()
|
||||||
|
|
||||||
def save_health_data(self, version=None, cpu=0, memory=0, uuid=None, update_last_seen=False, errors=''):
|
def save_health_data(self, version=None, cpu=0, memory=0, uuid=None, update_last_seen=False, errors=''):
|
||||||
@@ -317,12 +336,17 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
|||||||
self.version = version
|
self.version = version
|
||||||
update_fields.append('version')
|
update_fields.append('version')
|
||||||
|
|
||||||
new_cpu = get_corrected_cpu(cpu)
|
if self.node_type == Instance.Types.EXECUTION:
|
||||||
|
new_cpu = cpu
|
||||||
|
new_memory = memory
|
||||||
|
else:
|
||||||
|
new_cpu = get_corrected_cpu(cpu)
|
||||||
|
new_memory = get_corrected_memory(memory)
|
||||||
|
|
||||||
if new_cpu != self.cpu:
|
if new_cpu != self.cpu:
|
||||||
self.cpu = new_cpu
|
self.cpu = new_cpu
|
||||||
update_fields.append('cpu')
|
update_fields.append('cpu')
|
||||||
|
|
||||||
new_memory = get_corrected_memory(memory)
|
|
||||||
if new_memory != self.memory:
|
if new_memory != self.memory:
|
||||||
self.memory = new_memory
|
self.memory = new_memory
|
||||||
update_fields.append('memory')
|
update_fields.append('memory')
|
||||||
@@ -464,21 +488,50 @@ def on_instance_group_saved(sender, instance, created=False, raw=False, **kwargs
|
|||||||
instance.set_default_policy_fields()
|
instance.set_default_policy_fields()
|
||||||
|
|
||||||
|
|
||||||
|
def schedule_write_receptor_config(broadcast=True):
|
||||||
|
from awx.main.tasks.receptor import write_receptor_config # prevents circular import
|
||||||
|
|
||||||
|
# broadcast to all control instances to update their receptor configs
|
||||||
|
if broadcast:
|
||||||
|
connection.on_commit(lambda: write_receptor_config.apply_async(queue='tower_broadcast_all'))
|
||||||
|
else:
|
||||||
|
if not is_testing():
|
||||||
|
write_receptor_config() # just run locally
|
||||||
|
|
||||||
|
|
||||||
@receiver(post_save, sender=Instance)
|
@receiver(post_save, sender=Instance)
|
||||||
def on_instance_saved(sender, instance, created=False, raw=False, **kwargs):
|
def on_instance_saved(sender, instance, created=False, raw=False, **kwargs):
|
||||||
if settings.IS_K8S and instance.node_type in (Instance.Types.EXECUTION,):
|
'''
|
||||||
|
Here we link control nodes to hop or execution nodes based on the
|
||||||
|
peers_from_control_nodes field.
|
||||||
|
write_receptor_config should be called on each control node when:
|
||||||
|
1. new node is created with peers_from_control_nodes enabled
|
||||||
|
2. a node changes its value of peers_from_control_nodes
|
||||||
|
3. a new control node comes online and has instances to peer to
|
||||||
|
'''
|
||||||
|
if created and settings.IS_K8S and instance.node_type in [Instance.Types.CONTROL, Instance.Types.HYBRID]:
|
||||||
|
inst = Instance.objects.filter(peers_from_control_nodes=True)
|
||||||
|
if set(instance.peers.all()) != set(inst):
|
||||||
|
instance.peers.set(inst)
|
||||||
|
schedule_write_receptor_config(broadcast=False)
|
||||||
|
|
||||||
|
if settings.IS_K8S and instance.node_type in [Instance.Types.HOP, Instance.Types.EXECUTION]:
|
||||||
if instance.node_state == Instance.States.DEPROVISIONING:
|
if instance.node_state == Instance.States.DEPROVISIONING:
|
||||||
from awx.main.tasks.receptor import remove_deprovisioned_node # prevents circular import
|
from awx.main.tasks.receptor import remove_deprovisioned_node # prevents circular import
|
||||||
|
|
||||||
# wait for jobs on the node to complete, then delete the
|
# wait for jobs on the node to complete, then delete the
|
||||||
# node and kick off write_receptor_config
|
# node and kick off write_receptor_config
|
||||||
connection.on_commit(lambda: remove_deprovisioned_node.apply_async([instance.hostname]))
|
connection.on_commit(lambda: remove_deprovisioned_node.apply_async([instance.hostname]))
|
||||||
|
else:
|
||||||
if instance.node_state == Instance.States.INSTALLED:
|
control_instances = set(Instance.objects.filter(node_type__in=[Instance.Types.CONTROL, Instance.Types.HYBRID]))
|
||||||
from awx.main.tasks.receptor import write_receptor_config # prevents circular import
|
if instance.peers_from_control_nodes:
|
||||||
|
if (control_instances & set(instance.peers_from.all())) != set(control_instances):
|
||||||
# broadcast to all control instances to update their receptor configs
|
instance.peers_from.add(*control_instances)
|
||||||
connection.on_commit(lambda: write_receptor_config.apply_async(queue='tower_broadcast_all'))
|
schedule_write_receptor_config() # keep method separate to make pytest mocking easier
|
||||||
|
else:
|
||||||
|
if set(control_instances) & set(instance.peers_from.all()):
|
||||||
|
instance.peers_from.remove(*control_instances)
|
||||||
|
schedule_write_receptor_config()
|
||||||
|
|
||||||
if created or instance.has_policy_changes():
|
if created or instance.has_policy_changes():
|
||||||
schedule_policy_task()
|
schedule_policy_task()
|
||||||
@@ -493,6 +546,8 @@ def on_instance_group_deleted(sender, instance, using, **kwargs):
|
|||||||
@receiver(post_delete, sender=Instance)
|
@receiver(post_delete, sender=Instance)
|
||||||
def on_instance_deleted(sender, instance, using, **kwargs):
|
def on_instance_deleted(sender, instance, using, **kwargs):
|
||||||
schedule_policy_task()
|
schedule_policy_task()
|
||||||
|
if settings.IS_K8S and instance.node_type in (Instance.Types.EXECUTION, Instance.Types.HOP) and instance.peers_from_control_nodes:
|
||||||
|
schedule_write_receptor_config()
|
||||||
|
|
||||||
|
|
||||||
class UnifiedJobTemplateInstanceGroupMembership(models.Model):
|
class UnifiedJobTemplateInstanceGroupMembership(models.Model):
|
||||||
|
|||||||
@@ -10,7 +10,6 @@ import copy
|
|||||||
import os.path
|
import os.path
|
||||||
from urllib.parse import urljoin
|
from urllib.parse import urljoin
|
||||||
|
|
||||||
import dateutil.relativedelta
|
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
# Django
|
# Django
|
||||||
@@ -890,27 +889,10 @@ class HostMetric(models.Model):
|
|||||||
self.deleted = False
|
self.deleted = False
|
||||||
self.save(update_fields=['deleted'])
|
self.save(update_fields=['deleted'])
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def cleanup_task(cls, months_ago):
|
|
||||||
try:
|
|
||||||
months_ago = int(months_ago)
|
|
||||||
if months_ago <= 0:
|
|
||||||
raise ValueError()
|
|
||||||
|
|
||||||
last_automation_before = now() - dateutil.relativedelta.relativedelta(months=months_ago)
|
|
||||||
|
|
||||||
logger.info(f'Cleanup [HostMetric]: soft-deleting records last automated before {last_automation_before}')
|
|
||||||
HostMetric.active_objects.filter(last_automation__lt=last_automation_before).update(
|
|
||||||
deleted=True, deleted_counter=models.F('deleted_counter') + 1, last_deleted=now()
|
|
||||||
)
|
|
||||||
settings.CLEANUP_HOST_METRICS_LAST_TS = now()
|
|
||||||
except (TypeError, ValueError):
|
|
||||||
logger.error(f"Cleanup [HostMetric]: months_ago({months_ago}) has to be a positive integer value")
|
|
||||||
|
|
||||||
|
|
||||||
class HostMetricSummaryMonthly(models.Model):
|
class HostMetricSummaryMonthly(models.Model):
|
||||||
"""
|
"""
|
||||||
HostMetric summaries computed by scheduled task <TODO> monthly
|
HostMetric summaries computed by scheduled task 'awx.main.tasks.system.host_metric_summary_monthly' monthly
|
||||||
"""
|
"""
|
||||||
|
|
||||||
date = models.DateField(unique=True)
|
date = models.DateField(unique=True)
|
||||||
|
|||||||
@@ -3,8 +3,6 @@
|
|||||||
|
|
||||||
from django.db.models.signals import pre_save, post_save, pre_delete, m2m_changed
|
from django.db.models.signals import pre_save, post_save, pre_delete, m2m_changed
|
||||||
|
|
||||||
from taggit.managers import TaggableManager
|
|
||||||
|
|
||||||
|
|
||||||
class ActivityStreamRegistrar(object):
|
class ActivityStreamRegistrar(object):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
@@ -21,8 +19,6 @@ class ActivityStreamRegistrar(object):
|
|||||||
pre_delete.connect(activity_stream_delete, sender=model, dispatch_uid=str(self.__class__) + str(model) + "_delete")
|
pre_delete.connect(activity_stream_delete, sender=model, dispatch_uid=str(self.__class__) + str(model) + "_delete")
|
||||||
|
|
||||||
for m2mfield in model._meta.many_to_many:
|
for m2mfield in model._meta.many_to_many:
|
||||||
if isinstance(m2mfield, TaggableManager):
|
|
||||||
continue # Special case for taggit app
|
|
||||||
try:
|
try:
|
||||||
m2m_attr = getattr(model, m2mfield.name)
|
m2m_attr = getattr(model, m2mfield.name)
|
||||||
m2m_changed.connect(
|
m2m_changed.connect(
|
||||||
|
|||||||
@@ -25,7 +25,6 @@ from awx.main.models import (
|
|||||||
InventoryUpdate,
|
InventoryUpdate,
|
||||||
Job,
|
Job,
|
||||||
Project,
|
Project,
|
||||||
ProjectUpdate,
|
|
||||||
UnifiedJob,
|
UnifiedJob,
|
||||||
WorkflowApproval,
|
WorkflowApproval,
|
||||||
WorkflowJob,
|
WorkflowJob,
|
||||||
@@ -102,27 +101,40 @@ class TaskBase:
|
|||||||
|
|
||||||
def record_aggregate_metrics(self, *args):
|
def record_aggregate_metrics(self, *args):
|
||||||
if not is_testing():
|
if not is_testing():
|
||||||
# increment task_manager_schedule_calls regardless if the other
|
try:
|
||||||
# metrics are recorded
|
# increment task_manager_schedule_calls regardless if the other
|
||||||
s_metrics.Metrics(auto_pipe_execute=True).inc(f"{self.prefix}__schedule_calls", 1)
|
# metrics are recorded
|
||||||
# Only record metrics if the last time recording was more
|
s_metrics.Metrics(auto_pipe_execute=True).inc(f"{self.prefix}__schedule_calls", 1)
|
||||||
# than SUBSYSTEM_METRICS_TASK_MANAGER_RECORD_INTERVAL ago.
|
# Only record metrics if the last time recording was more
|
||||||
# Prevents a short-duration task manager that runs directly after a
|
# than SUBSYSTEM_METRICS_TASK_MANAGER_RECORD_INTERVAL ago.
|
||||||
# long task manager to override useful metrics.
|
# Prevents a short-duration task manager that runs directly after a
|
||||||
current_time = time.time()
|
# long task manager to override useful metrics.
|
||||||
time_last_recorded = current_time - self.subsystem_metrics.decode(f"{self.prefix}_recorded_timestamp")
|
current_time = time.time()
|
||||||
if time_last_recorded > settings.SUBSYSTEM_METRICS_TASK_MANAGER_RECORD_INTERVAL:
|
time_last_recorded = current_time - self.subsystem_metrics.decode(f"{self.prefix}_recorded_timestamp")
|
||||||
logger.debug(f"recording {self.prefix} metrics, last recorded {time_last_recorded} seconds ago")
|
if time_last_recorded > settings.SUBSYSTEM_METRICS_TASK_MANAGER_RECORD_INTERVAL:
|
||||||
self.subsystem_metrics.set(f"{self.prefix}_recorded_timestamp", current_time)
|
logger.debug(f"recording {self.prefix} metrics, last recorded {time_last_recorded} seconds ago")
|
||||||
self.subsystem_metrics.pipe_execute()
|
self.subsystem_metrics.set(f"{self.prefix}_recorded_timestamp", current_time)
|
||||||
else:
|
self.subsystem_metrics.pipe_execute()
|
||||||
logger.debug(f"skipping recording {self.prefix} metrics, last recorded {time_last_recorded} seconds ago")
|
else:
|
||||||
|
logger.debug(f"skipping recording {self.prefix} metrics, last recorded {time_last_recorded} seconds ago")
|
||||||
|
except Exception:
|
||||||
|
logger.exception(f"Error saving metrics for {self.prefix}")
|
||||||
|
|
||||||
def record_aggregate_metrics_and_exit(self, *args):
|
def record_aggregate_metrics_and_exit(self, *args):
|
||||||
self.record_aggregate_metrics()
|
self.record_aggregate_metrics()
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
def get_local_metrics(self):
|
||||||
|
data = {}
|
||||||
|
for k, metric in self.subsystem_metrics.METRICS.items():
|
||||||
|
if k.startswith(self.prefix) and metric.metric_has_changed:
|
||||||
|
data[k[len(self.prefix) + 1 :]] = metric.current_value
|
||||||
|
return data
|
||||||
|
|
||||||
def schedule(self):
|
def schedule(self):
|
||||||
|
# Always be able to restore the original signal handler if we finish
|
||||||
|
original_sigusr1 = signal.getsignal(signal.SIGUSR1)
|
||||||
|
|
||||||
# Lock
|
# Lock
|
||||||
with task_manager_bulk_reschedule():
|
with task_manager_bulk_reschedule():
|
||||||
with advisory_lock(f"{self.prefix}_lock", wait=False) as acquired:
|
with advisory_lock(f"{self.prefix}_lock", wait=False) as acquired:
|
||||||
@@ -131,15 +143,24 @@ class TaskBase:
|
|||||||
logger.debug(f"Not running {self.prefix} scheduler, another task holds lock")
|
logger.debug(f"Not running {self.prefix} scheduler, another task holds lock")
|
||||||
return
|
return
|
||||||
logger.debug(f"Starting {self.prefix} Scheduler")
|
logger.debug(f"Starting {self.prefix} Scheduler")
|
||||||
# if sigterm due to timeout, still record metrics
|
# if sigusr1 due to timeout, still record metrics
|
||||||
signal.signal(signal.SIGTERM, self.record_aggregate_metrics_and_exit)
|
signal.signal(signal.SIGUSR1, self.record_aggregate_metrics_and_exit)
|
||||||
self._schedule()
|
try:
|
||||||
|
self._schedule()
|
||||||
|
finally:
|
||||||
|
# Reset the signal handler back to the default just in case anything
|
||||||
|
# else uses the same signal for other purposes
|
||||||
|
signal.signal(signal.SIGUSR1, original_sigusr1)
|
||||||
commit_start = time.time()
|
commit_start = time.time()
|
||||||
|
|
||||||
|
logger.debug(f"Commiting {self.prefix} Scheduler changes")
|
||||||
|
|
||||||
if self.prefix == "task_manager":
|
if self.prefix == "task_manager":
|
||||||
self.subsystem_metrics.set(f"{self.prefix}_commit_seconds", time.time() - commit_start)
|
self.subsystem_metrics.set(f"{self.prefix}_commit_seconds", time.time() - commit_start)
|
||||||
|
local_metrics = self.get_local_metrics()
|
||||||
self.record_aggregate_metrics()
|
self.record_aggregate_metrics()
|
||||||
logger.debug(f"Finishing {self.prefix} Scheduler")
|
|
||||||
|
logger.debug(f"Finished {self.prefix} Scheduler, timing data:\n{local_metrics}")
|
||||||
|
|
||||||
|
|
||||||
class WorkflowManager(TaskBase):
|
class WorkflowManager(TaskBase):
|
||||||
@@ -154,7 +175,6 @@ class WorkflowManager(TaskBase):
|
|||||||
logger.warning("Workflow manager has reached time out while processing running workflows, exiting loop early")
|
logger.warning("Workflow manager has reached time out while processing running workflows, exiting loop early")
|
||||||
ScheduleWorkflowManager().schedule()
|
ScheduleWorkflowManager().schedule()
|
||||||
# Do not process any more workflow jobs. Stop here.
|
# Do not process any more workflow jobs. Stop here.
|
||||||
# Maybe we should schedule another WorkflowManager run
|
|
||||||
break
|
break
|
||||||
dag = WorkflowDAG(workflow_job)
|
dag = WorkflowDAG(workflow_job)
|
||||||
status_changed = False
|
status_changed = False
|
||||||
@@ -169,8 +189,8 @@ class WorkflowManager(TaskBase):
|
|||||||
workflow_job.save(update_fields=['status', 'start_args'])
|
workflow_job.save(update_fields=['status', 'start_args'])
|
||||||
status_changed = True
|
status_changed = True
|
||||||
else:
|
else:
|
||||||
workflow_nodes = dag.mark_dnr_nodes()
|
dnr_nodes = dag.mark_dnr_nodes()
|
||||||
WorkflowJobNode.objects.bulk_update(workflow_nodes, ['do_not_run'])
|
WorkflowJobNode.objects.bulk_update(dnr_nodes, ['do_not_run'])
|
||||||
# If workflow is now done, we do special things to mark it as done.
|
# If workflow is now done, we do special things to mark it as done.
|
||||||
is_done = dag.is_workflow_done()
|
is_done = dag.is_workflow_done()
|
||||||
if is_done:
|
if is_done:
|
||||||
@@ -250,6 +270,7 @@ class WorkflowManager(TaskBase):
|
|||||||
job.status = 'failed'
|
job.status = 'failed'
|
||||||
job.save(update_fields=['status', 'job_explanation'])
|
job.save(update_fields=['status', 'job_explanation'])
|
||||||
job.websocket_emit_status('failed')
|
job.websocket_emit_status('failed')
|
||||||
|
ScheduleWorkflowManager().schedule()
|
||||||
|
|
||||||
# TODO: should we emit a status on the socket here similar to tasks.py awx_periodic_scheduler() ?
|
# TODO: should we emit a status on the socket here similar to tasks.py awx_periodic_scheduler() ?
|
||||||
# emit_websocket_notification('/socket.io/jobs', '', dict(id=))
|
# emit_websocket_notification('/socket.io/jobs', '', dict(id=))
|
||||||
@@ -270,184 +291,115 @@ class WorkflowManager(TaskBase):
|
|||||||
class DependencyManager(TaskBase):
|
class DependencyManager(TaskBase):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__(prefix="dependency_manager")
|
super().__init__(prefix="dependency_manager")
|
||||||
|
self.all_projects = {}
|
||||||
|
self.all_inventory_sources = {}
|
||||||
|
|
||||||
def create_project_update(self, task, project_id=None):
|
def cache_projects_and_sources(self, task_list):
|
||||||
if project_id is None:
|
project_ids = set()
|
||||||
project_id = task.project_id
|
|
||||||
project_task = Project.objects.get(id=project_id).create_project_update(_eager_fields=dict(launch_type='dependency'))
|
|
||||||
|
|
||||||
# Project created 1 seconds behind
|
|
||||||
project_task.created = task.created - timedelta(seconds=1)
|
|
||||||
project_task.status = 'pending'
|
|
||||||
project_task.save()
|
|
||||||
logger.debug('Spawned {} as dependency of {}'.format(project_task.log_format, task.log_format))
|
|
||||||
return project_task
|
|
||||||
|
|
||||||
def create_inventory_update(self, task, inventory_source_task):
|
|
||||||
inventory_task = InventorySource.objects.get(id=inventory_source_task.id).create_inventory_update(_eager_fields=dict(launch_type='dependency'))
|
|
||||||
|
|
||||||
inventory_task.created = task.created - timedelta(seconds=2)
|
|
||||||
inventory_task.status = 'pending'
|
|
||||||
inventory_task.save()
|
|
||||||
logger.debug('Spawned {} as dependency of {}'.format(inventory_task.log_format, task.log_format))
|
|
||||||
|
|
||||||
return inventory_task
|
|
||||||
|
|
||||||
def add_dependencies(self, task, dependencies):
|
|
||||||
with disable_activity_stream():
|
|
||||||
task.dependent_jobs.add(*dependencies)
|
|
||||||
|
|
||||||
def get_inventory_source_tasks(self):
|
|
||||||
inventory_ids = set()
|
inventory_ids = set()
|
||||||
for task in self.all_tasks:
|
for task in task_list:
|
||||||
if isinstance(task, Job):
|
if isinstance(task, Job):
|
||||||
inventory_ids.add(task.inventory_id)
|
if task.project_id:
|
||||||
self.all_inventory_sources = [invsrc for invsrc in InventorySource.objects.filter(inventory_id__in=inventory_ids, update_on_launch=True)]
|
project_ids.add(task.project_id)
|
||||||
|
if task.inventory_id:
|
||||||
|
inventory_ids.add(task.inventory_id)
|
||||||
|
elif isinstance(task, InventoryUpdate):
|
||||||
|
if task.inventory_source and task.inventory_source.source_project_id:
|
||||||
|
project_ids.add(task.inventory_source.source_project_id)
|
||||||
|
|
||||||
def get_latest_inventory_update(self, inventory_source):
|
for proj in Project.objects.filter(id__in=project_ids, scm_update_on_launch=True):
|
||||||
latest_inventory_update = InventoryUpdate.objects.filter(inventory_source=inventory_source).order_by("-created")
|
self.all_projects[proj.id] = proj
|
||||||
if not latest_inventory_update.exists():
|
|
||||||
return None
|
|
||||||
return latest_inventory_update.first()
|
|
||||||
|
|
||||||
def should_update_inventory_source(self, job, latest_inventory_update):
|
for invsrc in InventorySource.objects.filter(inventory_id__in=inventory_ids, update_on_launch=True):
|
||||||
now = tz_now()
|
self.all_inventory_sources.setdefault(invsrc.inventory_id, [])
|
||||||
|
self.all_inventory_sources[invsrc.inventory_id].append(invsrc)
|
||||||
|
|
||||||
if latest_inventory_update is None:
|
@staticmethod
|
||||||
|
def should_update_again(update, cache_timeout):
|
||||||
|
'''
|
||||||
|
If it has never updated, we need to update
|
||||||
|
If there is already an update in progress then we do not need to a new create one
|
||||||
|
If the last update failed, we always need to try and update again
|
||||||
|
If current time is more than cache_timeout after last update, then we need a new one
|
||||||
|
'''
|
||||||
|
if (update is None) or (update.status in ['failed', 'canceled', 'error']):
|
||||||
return True
|
return True
|
||||||
'''
|
if update.status in ['waiting', 'pending', 'running']:
|
||||||
If there's already a inventory update utilizing this job that's about to run
|
|
||||||
then we don't need to create one
|
|
||||||
'''
|
|
||||||
if latest_inventory_update.status in ['waiting', 'pending', 'running']:
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
timeout_seconds = timedelta(seconds=latest_inventory_update.inventory_source.update_cache_timeout)
|
return bool(((update.finished + timedelta(seconds=cache_timeout))) < tz_now())
|
||||||
if (latest_inventory_update.finished + timeout_seconds) < now:
|
|
||||||
return True
|
|
||||||
if latest_inventory_update.inventory_source.update_on_launch is True and latest_inventory_update.status in ['failed', 'canceled', 'error']:
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
def get_latest_project_update(self, project_id):
|
def get_or_create_project_update(self, project_id):
|
||||||
latest_project_update = ProjectUpdate.objects.filter(project=project_id, job_type='check').order_by("-created")
|
project = self.all_projects.get(project_id, None)
|
||||||
if not latest_project_update.exists():
|
if project is not None:
|
||||||
return None
|
latest_project_update = project.project_updates.filter(job_type='check').order_by("-created").first()
|
||||||
return latest_project_update.first()
|
if self.should_update_again(latest_project_update, project.scm_update_cache_timeout):
|
||||||
|
project_task = project.create_project_update(_eager_fields=dict(launch_type='dependency'))
|
||||||
def should_update_related_project(self, job, latest_project_update):
|
project_task.signal_start()
|
||||||
now = tz_now()
|
return [project_task]
|
||||||
|
else:
|
||||||
if latest_project_update is None:
|
return [latest_project_update]
|
||||||
return True
|
return []
|
||||||
|
|
||||||
if latest_project_update.status in ['failed', 'canceled']:
|
|
||||||
return True
|
|
||||||
|
|
||||||
'''
|
|
||||||
If there's already a project update utilizing this job that's about to run
|
|
||||||
then we don't need to create one
|
|
||||||
'''
|
|
||||||
if latest_project_update.status in ['waiting', 'pending', 'running']:
|
|
||||||
return False
|
|
||||||
|
|
||||||
'''
|
|
||||||
If the latest project update has a created time == job_created_time-1
|
|
||||||
then consider the project update found. This is so we don't enter an infinite loop
|
|
||||||
of updating the project when cache timeout is 0.
|
|
||||||
'''
|
|
||||||
if (
|
|
||||||
latest_project_update.project.scm_update_cache_timeout == 0
|
|
||||||
and latest_project_update.launch_type == 'dependency'
|
|
||||||
and latest_project_update.created == job.created - timedelta(seconds=1)
|
|
||||||
):
|
|
||||||
return False
|
|
||||||
'''
|
|
||||||
Normal Cache Timeout Logic
|
|
||||||
'''
|
|
||||||
timeout_seconds = timedelta(seconds=latest_project_update.project.scm_update_cache_timeout)
|
|
||||||
if (latest_project_update.finished + timeout_seconds) < now:
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
def gen_dep_for_job(self, task):
|
def gen_dep_for_job(self, task):
|
||||||
created_dependencies = []
|
dependencies = self.get_or_create_project_update(task.project_id)
|
||||||
dependencies = []
|
|
||||||
# TODO: Can remove task.project None check after scan-job-default-playbook is removed
|
|
||||||
if task.project is not None and task.project.scm_update_on_launch is True:
|
|
||||||
latest_project_update = self.get_latest_project_update(task.project_id)
|
|
||||||
if self.should_update_related_project(task, latest_project_update):
|
|
||||||
latest_project_update = self.create_project_update(task)
|
|
||||||
created_dependencies.append(latest_project_update)
|
|
||||||
dependencies.append(latest_project_update)
|
|
||||||
|
|
||||||
# Inventory created 2 seconds behind job
|
|
||||||
try:
|
try:
|
||||||
start_args = json.loads(decrypt_field(task, field_name="start_args"))
|
start_args = json.loads(decrypt_field(task, field_name="start_args"))
|
||||||
except ValueError:
|
except ValueError:
|
||||||
start_args = dict()
|
start_args = dict()
|
||||||
# generator for inventory sources related to this task
|
# generator for update-on-launch inventory sources related to this task
|
||||||
task_inv_sources = (invsrc for invsrc in self.all_inventory_sources if invsrc.inventory_id == task.inventory_id)
|
for inventory_source in self.all_inventory_sources.get(task.inventory_id, []):
|
||||||
for inventory_source in task_inv_sources:
|
|
||||||
if "inventory_sources_already_updated" in start_args and inventory_source.id in start_args['inventory_sources_already_updated']:
|
if "inventory_sources_already_updated" in start_args and inventory_source.id in start_args['inventory_sources_already_updated']:
|
||||||
continue
|
continue
|
||||||
if not inventory_source.update_on_launch:
|
latest_inventory_update = inventory_source.inventory_updates.order_by("-created").first()
|
||||||
continue
|
if self.should_update_again(latest_inventory_update, inventory_source.update_cache_timeout):
|
||||||
latest_inventory_update = self.get_latest_inventory_update(inventory_source)
|
inventory_task = inventory_source.create_inventory_update(_eager_fields=dict(launch_type='dependency'))
|
||||||
if self.should_update_inventory_source(task, latest_inventory_update):
|
inventory_task.signal_start()
|
||||||
inventory_task = self.create_inventory_update(task, inventory_source)
|
|
||||||
created_dependencies.append(inventory_task)
|
|
||||||
dependencies.append(inventory_task)
|
dependencies.append(inventory_task)
|
||||||
else:
|
else:
|
||||||
dependencies.append(latest_inventory_update)
|
dependencies.append(latest_inventory_update)
|
||||||
|
|
||||||
if dependencies:
|
return dependencies
|
||||||
self.add_dependencies(task, dependencies)
|
|
||||||
|
|
||||||
return created_dependencies
|
|
||||||
|
|
||||||
def gen_dep_for_inventory_update(self, inventory_task):
|
def gen_dep_for_inventory_update(self, inventory_task):
|
||||||
created_dependencies = []
|
|
||||||
if inventory_task.source == "scm":
|
if inventory_task.source == "scm":
|
||||||
invsrc = inventory_task.inventory_source
|
invsrc = inventory_task.inventory_source
|
||||||
if not invsrc.source_project.scm_update_on_launch:
|
if invsrc:
|
||||||
return created_dependencies
|
return self.get_or_create_project_update(invsrc.source_project_id)
|
||||||
|
return []
|
||||||
latest_src_project_update = self.get_latest_project_update(invsrc.source_project_id)
|
|
||||||
if self.should_update_related_project(inventory_task, latest_src_project_update):
|
|
||||||
latest_src_project_update = self.create_project_update(inventory_task, project_id=invsrc.source_project_id)
|
|
||||||
created_dependencies.append(latest_src_project_update)
|
|
||||||
self.add_dependencies(inventory_task, [latest_src_project_update])
|
|
||||||
latest_src_project_update.scm_inventory_updates.add(inventory_task)
|
|
||||||
return created_dependencies
|
|
||||||
|
|
||||||
@timeit
|
@timeit
|
||||||
def generate_dependencies(self, undeped_tasks):
|
def generate_dependencies(self, undeped_tasks):
|
||||||
created_dependencies = []
|
dependencies = []
|
||||||
|
self.cache_projects_and_sources(undeped_tasks)
|
||||||
for task in undeped_tasks:
|
for task in undeped_tasks:
|
||||||
task.log_lifecycle("acknowledged")
|
task.log_lifecycle("acknowledged")
|
||||||
if type(task) is Job:
|
if type(task) is Job:
|
||||||
created_dependencies += self.gen_dep_for_job(task)
|
job_deps = self.gen_dep_for_job(task)
|
||||||
elif type(task) is InventoryUpdate:
|
elif type(task) is InventoryUpdate:
|
||||||
created_dependencies += self.gen_dep_for_inventory_update(task)
|
job_deps = self.gen_dep_for_inventory_update(task)
|
||||||
else:
|
else:
|
||||||
continue
|
continue
|
||||||
|
if job_deps:
|
||||||
|
dependencies += job_deps
|
||||||
|
with disable_activity_stream():
|
||||||
|
task.dependent_jobs.add(*dependencies)
|
||||||
|
logger.debug(f'Linked {[dep.log_format for dep in dependencies]} as dependencies of {task.log_format}')
|
||||||
|
|
||||||
UnifiedJob.objects.filter(pk__in=[task.pk for task in undeped_tasks]).update(dependencies_processed=True)
|
UnifiedJob.objects.filter(pk__in=[task.pk for task in undeped_tasks]).update(dependencies_processed=True)
|
||||||
|
|
||||||
return created_dependencies
|
return dependencies
|
||||||
|
|
||||||
def process_tasks(self):
|
|
||||||
deps = self.generate_dependencies(self.all_tasks)
|
|
||||||
self.generate_dependencies(deps)
|
|
||||||
self.subsystem_metrics.inc(f"{self.prefix}_pending_processed", len(self.all_tasks) + len(deps))
|
|
||||||
|
|
||||||
@timeit
|
@timeit
|
||||||
def _schedule(self):
|
def _schedule(self):
|
||||||
self.get_tasks(dict(status__in=["pending"], dependencies_processed=False))
|
self.get_tasks(dict(status__in=["pending"], dependencies_processed=False))
|
||||||
|
|
||||||
if len(self.all_tasks) > 0:
|
if len(self.all_tasks) > 0:
|
||||||
self.get_inventory_source_tasks()
|
deps = self.generate_dependencies(self.all_tasks)
|
||||||
self.process_tasks()
|
undeped_deps = [dep for dep in deps if dep.dependencies_processed is False]
|
||||||
|
self.generate_dependencies(undeped_deps)
|
||||||
|
self.subsystem_metrics.inc(f"{self.prefix}_pending_processed", len(self.all_tasks) + len(undeped_deps))
|
||||||
ScheduleTaskManager().schedule()
|
ScheduleTaskManager().schedule()
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
from . import jobs, receptor, system # noqa
|
from . import host_metrics, jobs, receptor, system # noqa
|
||||||
|
|||||||
@@ -29,8 +29,9 @@ class RunnerCallback:
|
|||||||
self.safe_env = {}
|
self.safe_env = {}
|
||||||
self.event_ct = 0
|
self.event_ct = 0
|
||||||
self.model = model
|
self.model = model
|
||||||
self.update_attempts = int(settings.DISPATCHER_DB_DOWNTOWN_TOLLERANCE / 5)
|
self.update_attempts = int(settings.DISPATCHER_DB_DOWNTIME_TOLERANCE / 5)
|
||||||
self.wrapup_event_dispatched = False
|
self.wrapup_event_dispatched = False
|
||||||
|
self.artifacts_processed = False
|
||||||
self.extra_update_fields = {}
|
self.extra_update_fields = {}
|
||||||
|
|
||||||
def update_model(self, pk, _attempt=0, **updates):
|
def update_model(self, pk, _attempt=0, **updates):
|
||||||
@@ -207,9 +208,13 @@ class RunnerCallback:
|
|||||||
# We opened a connection just for that save, close it here now
|
# We opened a connection just for that save, close it here now
|
||||||
connections.close_all()
|
connections.close_all()
|
||||||
elif status_data['status'] == 'error':
|
elif status_data['status'] == 'error':
|
||||||
result_traceback = status_data.get('result_traceback', None)
|
for field_name in ('result_traceback', 'job_explanation'):
|
||||||
if result_traceback:
|
field_value = status_data.get(field_name, None)
|
||||||
self.delay_update(result_traceback=result_traceback)
|
if field_value:
|
||||||
|
self.delay_update(**{field_name: field_value})
|
||||||
|
|
||||||
|
def artifacts_handler(self, artifact_dir):
|
||||||
|
self.artifacts_processed = True
|
||||||
|
|
||||||
|
|
||||||
class RunnerCallbackForProjectUpdate(RunnerCallback):
|
class RunnerCallbackForProjectUpdate(RunnerCallback):
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ from django.conf import settings
|
|||||||
from django.db.models.query import QuerySet
|
from django.db.models.query import QuerySet
|
||||||
from django.utils.encoding import smart_str
|
from django.utils.encoding import smart_str
|
||||||
from django.utils.timezone import now
|
from django.utils.timezone import now
|
||||||
|
from django.db import OperationalError
|
||||||
|
|
||||||
# AWX
|
# AWX
|
||||||
from awx.main.utils.common import log_excess_runtime
|
from awx.main.utils.common import log_excess_runtime
|
||||||
@@ -57,6 +58,28 @@ def start_fact_cache(hosts, destination, log_data, timeout=None, inventory_id=No
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def raw_update_hosts(host_list):
|
||||||
|
Host.objects.bulk_update(host_list, ['ansible_facts', 'ansible_facts_modified'])
|
||||||
|
|
||||||
|
|
||||||
|
def update_hosts(host_list, max_tries=5):
|
||||||
|
if not host_list:
|
||||||
|
return
|
||||||
|
for i in range(max_tries):
|
||||||
|
try:
|
||||||
|
raw_update_hosts(host_list)
|
||||||
|
except OperationalError as exc:
|
||||||
|
# Deadlocks can happen if this runs at the same time as another large query
|
||||||
|
# inventory updates and updating last_job_host_summary are candidates for conflict
|
||||||
|
# but these would resolve easily on a retry
|
||||||
|
if i + 1 < max_tries:
|
||||||
|
logger.info(f'OperationalError (suspected deadlock) saving host facts retry {i}, message: {exc}')
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
break
|
||||||
|
|
||||||
|
|
||||||
@log_excess_runtime(
|
@log_excess_runtime(
|
||||||
logger,
|
logger,
|
||||||
debug_cutoff=0.01,
|
debug_cutoff=0.01,
|
||||||
@@ -111,7 +134,6 @@ def finish_fact_cache(hosts, destination, facts_write_time, log_data, job_id=Non
|
|||||||
system_tracking_logger.info('Facts cleared for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)))
|
system_tracking_logger.info('Facts cleared for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)))
|
||||||
log_data['cleared_ct'] += 1
|
log_data['cleared_ct'] += 1
|
||||||
if len(hosts_to_update) > 100:
|
if len(hosts_to_update) > 100:
|
||||||
Host.objects.bulk_update(hosts_to_update, ['ansible_facts', 'ansible_facts_modified'])
|
update_hosts(hosts_to_update)
|
||||||
hosts_to_update = []
|
hosts_to_update = []
|
||||||
if hosts_to_update:
|
update_hosts(hosts_to_update)
|
||||||
Host.objects.bulk_update(hosts_to_update, ['ansible_facts', 'ansible_facts_modified'])
|
|
||||||
|
|||||||
10
awx/main/tasks/helpers.py
Normal file
10
awx/main/tasks/helpers.py
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
from django.utils.timezone import now
|
||||||
|
from rest_framework.fields import DateTimeField
|
||||||
|
|
||||||
|
|
||||||
|
def is_run_threshold_reached(setting, threshold_seconds):
|
||||||
|
last_time = DateTimeField().to_internal_value(setting) if setting else None
|
||||||
|
if not last_time:
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return (now() - last_time).total_seconds() > threshold_seconds
|
||||||
262
awx/main/tasks/host_metrics.py
Normal file
262
awx/main/tasks/host_metrics.py
Normal file
@@ -0,0 +1,262 @@
|
|||||||
|
import datetime
|
||||||
|
from dateutil.relativedelta import relativedelta
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from django.conf import settings
|
||||||
|
from django.db.models import Count, F
|
||||||
|
from django.db.models.functions import TruncMonth
|
||||||
|
from django.utils.timezone import now
|
||||||
|
from awx.main.dispatch import get_task_queuename
|
||||||
|
from awx.main.dispatch.publish import task
|
||||||
|
from awx.main.models.inventory import HostMetric, HostMetricSummaryMonthly
|
||||||
|
from awx.main.tasks.helpers import is_run_threshold_reached
|
||||||
|
from awx.conf.license import get_license
|
||||||
|
|
||||||
|
logger = logging.getLogger('awx.main.tasks.host_metrics')
|
||||||
|
|
||||||
|
|
||||||
|
@task(queue=get_task_queuename)
|
||||||
|
def cleanup_host_metrics():
|
||||||
|
if is_run_threshold_reached(getattr(settings, 'CLEANUP_HOST_METRICS_LAST_TS', None), getattr(settings, 'CLEANUP_HOST_METRICS_INTERVAL', 30) * 86400):
|
||||||
|
logger.info(f"Executing cleanup_host_metrics, last ran at {getattr(settings, 'CLEANUP_HOST_METRICS_LAST_TS', '---')}")
|
||||||
|
HostMetricTask().cleanup(
|
||||||
|
soft_threshold=getattr(settings, 'CLEANUP_HOST_METRICS_SOFT_THRESHOLD', 12),
|
||||||
|
hard_threshold=getattr(settings, 'CLEANUP_HOST_METRICS_HARD_THRESHOLD', 36),
|
||||||
|
)
|
||||||
|
logger.info("Finished cleanup_host_metrics")
|
||||||
|
|
||||||
|
|
||||||
|
@task(queue=get_task_queuename)
|
||||||
|
def host_metric_summary_monthly():
|
||||||
|
"""Run cleanup host metrics summary monthly task each week"""
|
||||||
|
if is_run_threshold_reached(getattr(settings, 'HOST_METRIC_SUMMARY_TASK_LAST_TS', None), getattr(settings, 'HOST_METRIC_SUMMARY_TASK_INTERVAL', 7) * 86400):
|
||||||
|
logger.info(f"Executing host_metric_summary_monthly, last ran at {getattr(settings, 'HOST_METRIC_SUMMARY_TASK_LAST_TS', '---')}")
|
||||||
|
HostMetricSummaryMonthlyTask().execute()
|
||||||
|
logger.info("Finished host_metric_summary_monthly")
|
||||||
|
|
||||||
|
|
||||||
|
class HostMetricTask:
|
||||||
|
"""
|
||||||
|
This class provides cleanup task for HostMetric model.
|
||||||
|
There are two modes:
|
||||||
|
- soft cleanup (updates columns delete, deleted_counter and last_deleted)
|
||||||
|
- hard cleanup (deletes from the db)
|
||||||
|
"""
|
||||||
|
|
||||||
|
def cleanup(self, soft_threshold=None, hard_threshold=None):
|
||||||
|
"""
|
||||||
|
Main entrypoint, runs either soft cleanup, hard cleanup or both
|
||||||
|
|
||||||
|
:param soft_threshold: (int)
|
||||||
|
:param hard_threshold: (int)
|
||||||
|
"""
|
||||||
|
if hard_threshold is not None:
|
||||||
|
self.hard_cleanup(hard_threshold)
|
||||||
|
if soft_threshold is not None:
|
||||||
|
self.soft_cleanup(soft_threshold)
|
||||||
|
|
||||||
|
settings.CLEANUP_HOST_METRICS_LAST_TS = now()
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def soft_cleanup(threshold=None):
|
||||||
|
if threshold is None:
|
||||||
|
threshold = getattr(settings, 'CLEANUP_HOST_METRICS_SOFT_THRESHOLD', 12)
|
||||||
|
|
||||||
|
try:
|
||||||
|
threshold = int(threshold)
|
||||||
|
except (ValueError, TypeError) as e:
|
||||||
|
raise type(e)("soft_threshold has to be convertible to number") from e
|
||||||
|
|
||||||
|
last_automation_before = now() - relativedelta(months=threshold)
|
||||||
|
rows = HostMetric.active_objects.filter(last_automation__lt=last_automation_before).update(
|
||||||
|
deleted=True, deleted_counter=F('deleted_counter') + 1, last_deleted=now()
|
||||||
|
)
|
||||||
|
logger.info(f'cleanup_host_metrics: soft-deleted records last automated before {last_automation_before}, affected rows: {rows}')
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def hard_cleanup(threshold=None):
|
||||||
|
if threshold is None:
|
||||||
|
threshold = getattr(settings, 'CLEANUP_HOST_METRICS_HARD_THRESHOLD', 36)
|
||||||
|
|
||||||
|
try:
|
||||||
|
threshold = int(threshold)
|
||||||
|
except (ValueError, TypeError) as e:
|
||||||
|
raise type(e)("hard_threshold has to be convertible to number") from e
|
||||||
|
|
||||||
|
last_deleted_before = now() - relativedelta(months=threshold)
|
||||||
|
queryset = HostMetric.objects.filter(deleted=True, last_deleted__lt=last_deleted_before)
|
||||||
|
rows = queryset.delete()
|
||||||
|
logger.info(f'cleanup_host_metrics: hard-deleted records which were soft deleted before {last_deleted_before}, affected rows: {rows[0]}')
|
||||||
|
|
||||||
|
|
||||||
|
class HostMetricSummaryMonthlyTask:
|
||||||
|
"""
|
||||||
|
This task computes last [threshold] months of HostMetricSummaryMonthly table
|
||||||
|
[threshold] is setting CLEANUP_HOST_METRICS_HARD_THRESHOLD
|
||||||
|
Each record in the table represents changes in HostMetric table in one month
|
||||||
|
It always overrides all the months newer than <threshold>, never updates older months
|
||||||
|
Algorithm:
|
||||||
|
- hosts_added are HostMetric records with first_automation in given month
|
||||||
|
- hosts_deleted are HostMetric records with deleted=True and last_deleted in given month
|
||||||
|
- - HostMetrics soft-deleted before <threshold> also increases hosts_deleted in their last_deleted month
|
||||||
|
- license_consumed is license_consumed(previous month) + hosts_added - hosts_deleted
|
||||||
|
- - license_consumed for HostMetricSummaryMonthly.date < [threshold] is computed also from
|
||||||
|
all HostMetrics.first_automation < [threshold]
|
||||||
|
- license_capacity is set only for current month, and it's never updated (value taken from current subscription)
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.host_metrics = {}
|
||||||
|
self.processed_month = self._get_first_month()
|
||||||
|
self.existing_summaries = None
|
||||||
|
self.existing_summaries_idx = 0
|
||||||
|
self.existing_summaries_cnt = 0
|
||||||
|
self.records_to_create = []
|
||||||
|
self.records_to_update = []
|
||||||
|
|
||||||
|
def execute(self):
|
||||||
|
self._load_existing_summaries()
|
||||||
|
self._load_hosts_added()
|
||||||
|
self._load_hosts_deleted()
|
||||||
|
|
||||||
|
# Get first month after last hard delete
|
||||||
|
month = self._get_first_month()
|
||||||
|
license_consumed = self._get_license_consumed_before(month)
|
||||||
|
|
||||||
|
# Fill record for each month
|
||||||
|
while month <= datetime.date.today().replace(day=1):
|
||||||
|
summary = self._find_or_create_summary(month)
|
||||||
|
# Update summary and update license_consumed by hosts added/removed this month
|
||||||
|
self._update_summary(summary, month, license_consumed)
|
||||||
|
license_consumed = summary.license_consumed
|
||||||
|
|
||||||
|
month = month + relativedelta(months=1)
|
||||||
|
|
||||||
|
# Create/Update stats
|
||||||
|
HostMetricSummaryMonthly.objects.bulk_create(self.records_to_create, batch_size=1000)
|
||||||
|
HostMetricSummaryMonthly.objects.bulk_update(self.records_to_update, ['license_consumed', 'hosts_added', 'hosts_deleted'], batch_size=1000)
|
||||||
|
|
||||||
|
# Set timestamp of last run
|
||||||
|
settings.HOST_METRIC_SUMMARY_TASK_LAST_TS = now()
|
||||||
|
|
||||||
|
def _get_license_consumed_before(self, month):
|
||||||
|
license_consumed = 0
|
||||||
|
for metric_month, metric in self.host_metrics.items():
|
||||||
|
if metric_month < month:
|
||||||
|
hosts_added = metric.get('hosts_added', 0)
|
||||||
|
hosts_deleted = metric.get('hosts_deleted', 0)
|
||||||
|
license_consumed = license_consumed + hosts_added - hosts_deleted
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
return license_consumed
|
||||||
|
|
||||||
|
def _load_existing_summaries(self):
|
||||||
|
"""Find all summaries newer than host metrics delete threshold"""
|
||||||
|
self.existing_summaries = HostMetricSummaryMonthly.objects.filter(date__gte=self._get_first_month()).order_by('date')
|
||||||
|
self.existing_summaries_idx = 0
|
||||||
|
self.existing_summaries_cnt = len(self.existing_summaries)
|
||||||
|
|
||||||
|
def _load_hosts_added(self):
|
||||||
|
"""Aggregates hosts added each month, by the 'first_automation' timestamp"""
|
||||||
|
#
|
||||||
|
# -- SQL translation (for better code readability)
|
||||||
|
# SELECT date_trunc('month', first_automation) as month,
|
||||||
|
# count(first_automation) AS hosts_added
|
||||||
|
# FROM main_hostmetric
|
||||||
|
# GROUP BY month
|
||||||
|
# ORDER by month;
|
||||||
|
result = (
|
||||||
|
HostMetric.objects.annotate(month=TruncMonth('first_automation'))
|
||||||
|
.values('month')
|
||||||
|
.annotate(hosts_added=Count('first_automation'))
|
||||||
|
.values('month', 'hosts_added')
|
||||||
|
.order_by('month')
|
||||||
|
)
|
||||||
|
|
||||||
|
for host_metric in list(result):
|
||||||
|
month = host_metric['month']
|
||||||
|
if month:
|
||||||
|
beginning_of_month = datetime.date(month.year, month.month, 1)
|
||||||
|
if self.host_metrics.get(beginning_of_month) is None:
|
||||||
|
self.host_metrics[beginning_of_month] = {}
|
||||||
|
self.host_metrics[beginning_of_month]['hosts_added'] = host_metric['hosts_added']
|
||||||
|
|
||||||
|
def _load_hosts_deleted(self):
|
||||||
|
"""
|
||||||
|
Aggregates hosts deleted each month, by the 'last_deleted' timestamp.
|
||||||
|
Host metrics have to be deleted NOW to be counted as deleted before
|
||||||
|
(by intention - statistics can change retrospectively by re-automation of previously deleted host)
|
||||||
|
"""
|
||||||
|
#
|
||||||
|
# -- SQL translation (for better code readability)
|
||||||
|
# SELECT date_trunc('month', last_deleted) as month,
|
||||||
|
# count(last_deleted) AS hosts_deleted
|
||||||
|
# FROM main_hostmetric
|
||||||
|
# WHERE deleted = True
|
||||||
|
# GROUP BY 1 # equal to "GROUP BY month"
|
||||||
|
# ORDER by month;
|
||||||
|
result = (
|
||||||
|
HostMetric.objects.annotate(month=TruncMonth('last_deleted'))
|
||||||
|
.values('month')
|
||||||
|
.annotate(hosts_deleted=Count('last_deleted'))
|
||||||
|
.values('month', 'hosts_deleted')
|
||||||
|
.filter(deleted=True)
|
||||||
|
.order_by('month')
|
||||||
|
)
|
||||||
|
for host_metric in list(result):
|
||||||
|
month = host_metric['month']
|
||||||
|
if month:
|
||||||
|
beginning_of_month = datetime.date(month.year, month.month, 1)
|
||||||
|
if self.host_metrics.get(beginning_of_month) is None:
|
||||||
|
self.host_metrics[beginning_of_month] = {}
|
||||||
|
self.host_metrics[beginning_of_month]['hosts_deleted'] = host_metric['hosts_deleted']
|
||||||
|
|
||||||
|
def _find_or_create_summary(self, month):
|
||||||
|
summary = self._find_summary(month)
|
||||||
|
|
||||||
|
if not summary:
|
||||||
|
summary = HostMetricSummaryMonthly(date=month)
|
||||||
|
self.records_to_create.append(summary)
|
||||||
|
else:
|
||||||
|
self.records_to_update.append(summary)
|
||||||
|
return summary
|
||||||
|
|
||||||
|
def _find_summary(self, month):
|
||||||
|
"""
|
||||||
|
Existing summaries are ordered by month ASC.
|
||||||
|
This method is called with month in ascending order too => only 1 traversing is enough
|
||||||
|
"""
|
||||||
|
summary = None
|
||||||
|
while not summary and self.existing_summaries_idx < self.existing_summaries_cnt:
|
||||||
|
tmp = self.existing_summaries[self.existing_summaries_idx]
|
||||||
|
if tmp.date < month:
|
||||||
|
self.existing_summaries_idx += 1
|
||||||
|
elif tmp.date == month:
|
||||||
|
summary = tmp
|
||||||
|
elif tmp.date > month:
|
||||||
|
break
|
||||||
|
return summary
|
||||||
|
|
||||||
|
def _update_summary(self, summary, month, license_consumed):
|
||||||
|
"""Updates the metric with hosts added and deleted and set license info for current month"""
|
||||||
|
# Get month counts from host metrics, zero if not found
|
||||||
|
hosts_added, hosts_deleted = 0, 0
|
||||||
|
if metric := self.host_metrics.get(month, None):
|
||||||
|
hosts_added = metric.get('hosts_added', 0)
|
||||||
|
hosts_deleted = metric.get('hosts_deleted', 0)
|
||||||
|
|
||||||
|
summary.license_consumed = license_consumed + hosts_added - hosts_deleted
|
||||||
|
summary.hosts_added = hosts_added
|
||||||
|
summary.hosts_deleted = hosts_deleted
|
||||||
|
|
||||||
|
# Set subscription count for current month
|
||||||
|
if month == datetime.date.today().replace(day=1):
|
||||||
|
license_info = get_license()
|
||||||
|
summary.license_capacity = license_info.get('instance_count', 0)
|
||||||
|
return summary
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _get_first_month():
|
||||||
|
"""Returns first month after host metrics hard delete threshold"""
|
||||||
|
threshold = getattr(settings, 'CLEANUP_HOST_METRICS_HARD_THRESHOLD', 36)
|
||||||
|
return datetime.date.today().replace(day=1) - relativedelta(months=int(threshold) - 1)
|
||||||
@@ -112,7 +112,7 @@ class BaseTask(object):
|
|||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.cleanup_paths = []
|
self.cleanup_paths = []
|
||||||
self.update_attempts = int(settings.DISPATCHER_DB_DOWNTOWN_TOLLERANCE / 5)
|
self.update_attempts = int(settings.DISPATCHER_DB_DOWNTIME_TOLERANCE / 5)
|
||||||
self.runner_callback = self.callback_class(model=self.model)
|
self.runner_callback = self.callback_class(model=self.model)
|
||||||
|
|
||||||
def update_model(self, pk, _attempt=0, **updates):
|
def update_model(self, pk, _attempt=0, **updates):
|
||||||
@@ -1094,7 +1094,7 @@ class RunJob(SourceControlMixin, BaseTask):
|
|||||||
# actual `run()` call; this _usually_ means something failed in
|
# actual `run()` call; this _usually_ means something failed in
|
||||||
# the pre_run_hook method
|
# the pre_run_hook method
|
||||||
return
|
return
|
||||||
if self.should_use_fact_cache():
|
if self.should_use_fact_cache() and self.runner_callback.artifacts_processed:
|
||||||
job.log_lifecycle("finish_job_fact_cache")
|
job.log_lifecycle("finish_job_fact_cache")
|
||||||
finish_fact_cache(
|
finish_fact_cache(
|
||||||
job.get_hosts_for_fact_cache(),
|
job.get_hosts_for_fact_cache(),
|
||||||
|
|||||||
@@ -30,6 +30,7 @@ from awx.main.tasks.signals import signal_state, signal_callback, SignalExit
|
|||||||
from awx.main.models import Instance, InstanceLink, UnifiedJob
|
from awx.main.models import Instance, InstanceLink, UnifiedJob
|
||||||
from awx.main.dispatch import get_task_queuename
|
from awx.main.dispatch import get_task_queuename
|
||||||
from awx.main.dispatch.publish import task
|
from awx.main.dispatch.publish import task
|
||||||
|
from awx.main.utils.pglock import advisory_lock
|
||||||
|
|
||||||
# Receptorctl
|
# Receptorctl
|
||||||
from receptorctl.socket_interface import ReceptorControl
|
from receptorctl.socket_interface import ReceptorControl
|
||||||
@@ -431,16 +432,16 @@ class AWXReceptorJob:
|
|||||||
# massive, only ask for last 1000 bytes
|
# massive, only ask for last 1000 bytes
|
||||||
startpos = max(stdout_size - 1000, 0)
|
startpos = max(stdout_size - 1000, 0)
|
||||||
resultsock, resultfile = receptor_ctl.get_work_results(self.unit_id, startpos=startpos, return_socket=True, return_sockfile=True)
|
resultsock, resultfile = receptor_ctl.get_work_results(self.unit_id, startpos=startpos, return_socket=True, return_sockfile=True)
|
||||||
resultsock.setblocking(False) # this makes resultfile reads non blocking
|
|
||||||
lines = resultfile.readlines()
|
lines = resultfile.readlines()
|
||||||
receptor_output = b"".join(lines).decode()
|
receptor_output = b"".join(lines).decode()
|
||||||
if receptor_output:
|
if receptor_output:
|
||||||
self.task.runner_callback.delay_update(result_traceback=receptor_output)
|
self.task.runner_callback.delay_update(result_traceback=f'Worker output:\n{receptor_output}')
|
||||||
elif detail:
|
elif detail:
|
||||||
self.task.runner_callback.delay_update(result_traceback=detail)
|
self.task.runner_callback.delay_update(result_traceback=f'Receptor detail:\n{detail}')
|
||||||
else:
|
else:
|
||||||
logger.warning(f'No result details or output from {self.task.instance.log_format}, status:\n{state_name}')
|
logger.warning(f'No result details or output from {self.task.instance.log_format}, status:\n{state_name}')
|
||||||
except Exception:
|
except Exception:
|
||||||
|
logger.exception(f'Work results error from job id={self.task.instance.id} work_unit={self.task.instance.work_unit_id}')
|
||||||
raise RuntimeError(detail)
|
raise RuntimeError(detail)
|
||||||
|
|
||||||
return res
|
return res
|
||||||
@@ -464,6 +465,7 @@ class AWXReceptorJob:
|
|||||||
event_handler=self.task.runner_callback.event_handler,
|
event_handler=self.task.runner_callback.event_handler,
|
||||||
finished_callback=self.task.runner_callback.finished_callback,
|
finished_callback=self.task.runner_callback.finished_callback,
|
||||||
status_handler=self.task.runner_callback.status_handler,
|
status_handler=self.task.runner_callback.status_handler,
|
||||||
|
artifacts_handler=self.task.runner_callback.artifacts_handler,
|
||||||
**self.runner_params,
|
**self.runner_params,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -674,26 +676,41 @@ RECEPTOR_CONFIG_STARTER = (
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@task()
|
def should_update_config(instances):
|
||||||
def write_receptor_config():
|
'''
|
||||||
lock = FileLock(__RECEPTOR_CONF_LOCKFILE)
|
checks that the list of instances matches the list of
|
||||||
with lock:
|
tcp-peers in the config
|
||||||
receptor_config = list(RECEPTOR_CONFIG_STARTER)
|
'''
|
||||||
|
current_config = read_receptor_config() # this gets receptor conf lock
|
||||||
|
current_peers = []
|
||||||
|
for config_entry in current_config:
|
||||||
|
for key, value in config_entry.items():
|
||||||
|
if key.endswith('-peer'):
|
||||||
|
current_peers.append(value['address'])
|
||||||
|
intended_peers = [f"{i.hostname}:{i.listener_port}" for i in instances]
|
||||||
|
logger.debug(f"Peers current {current_peers} intended {intended_peers}")
|
||||||
|
if set(current_peers) == set(intended_peers):
|
||||||
|
return False # config file is already update to date
|
||||||
|
|
||||||
this_inst = Instance.objects.me()
|
return True
|
||||||
instances = Instance.objects.filter(node_type=Instance.Types.EXECUTION)
|
|
||||||
existing_peers = {link.target_id for link in InstanceLink.objects.filter(source=this_inst)}
|
|
||||||
new_links = []
|
|
||||||
for instance in instances:
|
|
||||||
peer = {'tcp-peer': {'address': f'{instance.hostname}:{instance.listener_port}', 'tls': 'tlsclient'}}
|
|
||||||
receptor_config.append(peer)
|
|
||||||
if instance.id not in existing_peers:
|
|
||||||
new_links.append(InstanceLink(source=this_inst, target=instance, link_state=InstanceLink.States.ADDING))
|
|
||||||
|
|
||||||
InstanceLink.objects.bulk_create(new_links)
|
|
||||||
|
|
||||||
with open(__RECEPTOR_CONF, 'w') as file:
|
def generate_config_data():
|
||||||
yaml.dump(receptor_config, file, default_flow_style=False)
|
# returns two values
|
||||||
|
# receptor config - based on current database peers
|
||||||
|
# should_update - If True, receptor_config differs from the receptor conf file on disk
|
||||||
|
instances = Instance.objects.filter(node_type__in=(Instance.Types.EXECUTION, Instance.Types.HOP), peers_from_control_nodes=True)
|
||||||
|
|
||||||
|
receptor_config = list(RECEPTOR_CONFIG_STARTER)
|
||||||
|
for instance in instances:
|
||||||
|
peer = {'tcp-peer': {'address': f'{instance.hostname}:{instance.listener_port}', 'tls': 'tlsclient'}}
|
||||||
|
receptor_config.append(peer)
|
||||||
|
should_update = should_update_config(instances)
|
||||||
|
return receptor_config, should_update
|
||||||
|
|
||||||
|
|
||||||
|
def reload_receptor():
|
||||||
|
logger.warning("Receptor config changed, reloading receptor")
|
||||||
|
|
||||||
# This needs to be outside of the lock because this function itself will acquire the lock.
|
# This needs to be outside of the lock because this function itself will acquire the lock.
|
||||||
receptor_ctl = get_receptor_ctl()
|
receptor_ctl = get_receptor_ctl()
|
||||||
@@ -709,8 +726,29 @@ def write_receptor_config():
|
|||||||
else:
|
else:
|
||||||
raise RuntimeError("Receptor reload failed")
|
raise RuntimeError("Receptor reload failed")
|
||||||
|
|
||||||
links = InstanceLink.objects.filter(source=this_inst, target__in=instances, link_state=InstanceLink.States.ADDING)
|
|
||||||
links.update(link_state=InstanceLink.States.ESTABLISHED)
|
@task()
|
||||||
|
def write_receptor_config():
|
||||||
|
"""
|
||||||
|
This task runs async on each control node, K8S only.
|
||||||
|
It is triggered whenever remote is added or removed, or if peers_from_control_nodes
|
||||||
|
is flipped.
|
||||||
|
It is possible for write_receptor_config to be called multiple times.
|
||||||
|
For example, if new instances are added in quick succession.
|
||||||
|
To prevent that case, each control node first grabs a DB advisory lock, specific
|
||||||
|
to just that control node (i.e. multiple control nodes can run this function
|
||||||
|
at the same time, since it only writes the local receptor config file)
|
||||||
|
"""
|
||||||
|
with advisory_lock(f"{settings.CLUSTER_HOST_ID}_write_receptor_config", wait=True):
|
||||||
|
# Config file needs to be updated
|
||||||
|
receptor_config, should_update = generate_config_data()
|
||||||
|
if should_update:
|
||||||
|
lock = FileLock(__RECEPTOR_CONF_LOCKFILE)
|
||||||
|
with lock:
|
||||||
|
with open(__RECEPTOR_CONF, 'w') as file:
|
||||||
|
yaml.dump(receptor_config, file, default_flow_style=False)
|
||||||
|
|
||||||
|
reload_receptor()
|
||||||
|
|
||||||
|
|
||||||
@task(queue=get_task_queuename)
|
@task(queue=get_task_queuename)
|
||||||
@@ -730,6 +768,3 @@ def remove_deprovisioned_node(hostname):
|
|||||||
|
|
||||||
# This will as a side effect also delete the InstanceLinks that are tied to it.
|
# This will as a side effect also delete the InstanceLinks that are tied to it.
|
||||||
Instance.objects.filter(hostname=hostname).delete()
|
Instance.objects.filter(hostname=hostname).delete()
|
||||||
|
|
||||||
# Update the receptor configs for all of the control-plane.
|
|
||||||
write_receptor_config.apply_async(queue='tower_broadcast_all')
|
|
||||||
|
|||||||
@@ -2,6 +2,7 @@
|
|||||||
from collections import namedtuple
|
from collections import namedtuple
|
||||||
import functools
|
import functools
|
||||||
import importlib
|
import importlib
|
||||||
|
import itertools
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
@@ -14,7 +15,7 @@ from datetime import datetime
|
|||||||
|
|
||||||
# Django
|
# Django
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
from django.db import transaction, DatabaseError, IntegrityError
|
from django.db import connection, transaction, DatabaseError, IntegrityError
|
||||||
from django.db.models.fields.related import ForeignKey
|
from django.db.models.fields.related import ForeignKey
|
||||||
from django.utils.timezone import now, timedelta
|
from django.utils.timezone import now, timedelta
|
||||||
from django.utils.encoding import smart_str
|
from django.utils.encoding import smart_str
|
||||||
@@ -47,7 +48,7 @@ from awx.main.models import (
|
|||||||
Inventory,
|
Inventory,
|
||||||
SmartInventoryMembership,
|
SmartInventoryMembership,
|
||||||
Job,
|
Job,
|
||||||
HostMetric,
|
convert_jsonfields,
|
||||||
)
|
)
|
||||||
from awx.main.constants import ACTIVE_STATES
|
from awx.main.constants import ACTIVE_STATES
|
||||||
from awx.main.dispatch.publish import task
|
from awx.main.dispatch.publish import task
|
||||||
@@ -62,6 +63,7 @@ from awx.main.utils.common import (
|
|||||||
|
|
||||||
from awx.main.utils.reload import stop_local_services
|
from awx.main.utils.reload import stop_local_services
|
||||||
from awx.main.utils.pglock import advisory_lock
|
from awx.main.utils.pglock import advisory_lock
|
||||||
|
from awx.main.tasks.helpers import is_run_threshold_reached
|
||||||
from awx.main.tasks.receptor import get_receptor_ctl, worker_info, worker_cleanup, administrative_workunit_reaper, write_receptor_config
|
from awx.main.tasks.receptor import get_receptor_ctl, worker_info, worker_cleanup, administrative_workunit_reaper, write_receptor_config
|
||||||
from awx.main.consumers import emit_channel_notification
|
from awx.main.consumers import emit_channel_notification
|
||||||
from awx.main import analytics
|
from awx.main import analytics
|
||||||
@@ -86,6 +88,11 @@ def dispatch_startup():
|
|||||||
if settings.IS_K8S:
|
if settings.IS_K8S:
|
||||||
write_receptor_config()
|
write_receptor_config()
|
||||||
|
|
||||||
|
try:
|
||||||
|
convert_jsonfields()
|
||||||
|
except Exception:
|
||||||
|
logger.exception("Failed json field conversion, skipping.")
|
||||||
|
|
||||||
startup_logger.debug("Syncing Schedules")
|
startup_logger.debug("Syncing Schedules")
|
||||||
for sch in Schedule.objects.all():
|
for sch in Schedule.objects.all():
|
||||||
try:
|
try:
|
||||||
@@ -129,6 +136,52 @@ def inform_cluster_of_shutdown():
|
|||||||
logger.exception('Encountered problem with normal shutdown signal.')
|
logger.exception('Encountered problem with normal shutdown signal.')
|
||||||
|
|
||||||
|
|
||||||
|
@task(queue=get_task_queuename)
|
||||||
|
def migrate_jsonfield(table, pkfield, columns):
|
||||||
|
batchsize = 10000
|
||||||
|
with advisory_lock(f'json_migration_{table}', wait=False) as acquired:
|
||||||
|
if not acquired:
|
||||||
|
return
|
||||||
|
|
||||||
|
from django.db.migrations.executor import MigrationExecutor
|
||||||
|
|
||||||
|
# If Django is currently running migrations, wait until it is done.
|
||||||
|
while True:
|
||||||
|
executor = MigrationExecutor(connection)
|
||||||
|
if not executor.migration_plan(executor.loader.graph.leaf_nodes()):
|
||||||
|
break
|
||||||
|
time.sleep(120)
|
||||||
|
|
||||||
|
logger.warning(f"Migrating json fields for {table}: {', '.join(columns)}")
|
||||||
|
|
||||||
|
with connection.cursor() as cursor:
|
||||||
|
for i in itertools.count(0, batchsize):
|
||||||
|
# Are there even any rows in the table beyond this point?
|
||||||
|
cursor.execute(f"select count(1) from {table} where {pkfield} >= %s limit 1;", (i,))
|
||||||
|
if not cursor.fetchone()[0]:
|
||||||
|
break
|
||||||
|
|
||||||
|
column_expr = ', '.join(f"{colname} = {colname}_old::jsonb" for colname in columns)
|
||||||
|
# If any of the old columns have non-null values, the data needs to be cast and copied over.
|
||||||
|
empty_expr = ' or '.join(f"{colname}_old is not null" for colname in columns)
|
||||||
|
cursor.execute( # Only clobber the new fields if there is non-null data in the old ones.
|
||||||
|
f"""
|
||||||
|
update {table}
|
||||||
|
set {column_expr}
|
||||||
|
where {pkfield} >= %s and {pkfield} < %s
|
||||||
|
and {empty_expr};
|
||||||
|
""",
|
||||||
|
(i, i + batchsize),
|
||||||
|
)
|
||||||
|
rows = cursor.rowcount
|
||||||
|
logger.debug(f"Batch {i} to {i + batchsize} copied on {table}, {rows} rows affected.")
|
||||||
|
|
||||||
|
column_expr = ', '.join(f"DROP COLUMN {column}_old" for column in columns)
|
||||||
|
cursor.execute(f"ALTER TABLE {table} {column_expr};")
|
||||||
|
|
||||||
|
logger.warning(f"Migration of {table} to jsonb is finished.")
|
||||||
|
|
||||||
|
|
||||||
@task(queue=get_task_queuename)
|
@task(queue=get_task_queuename)
|
||||||
def apply_cluster_membership_policies():
|
def apply_cluster_membership_policies():
|
||||||
from awx.main.signals import disable_activity_stream
|
from awx.main.signals import disable_activity_stream
|
||||||
@@ -315,14 +368,7 @@ def send_notifications(notification_list, job_id=None):
|
|||||||
|
|
||||||
@task(queue=get_task_queuename)
|
@task(queue=get_task_queuename)
|
||||||
def gather_analytics():
|
def gather_analytics():
|
||||||
from awx.conf.models import Setting
|
if is_run_threshold_reached(getattr(settings, 'AUTOMATION_ANALYTICS_LAST_GATHER', None), settings.AUTOMATION_ANALYTICS_GATHER_INTERVAL):
|
||||||
from rest_framework.fields import DateTimeField
|
|
||||||
|
|
||||||
last_gather = Setting.objects.filter(key='AUTOMATION_ANALYTICS_LAST_GATHER').first()
|
|
||||||
last_time = DateTimeField().to_internal_value(last_gather.value) if last_gather and last_gather.value else None
|
|
||||||
gather_time = now()
|
|
||||||
|
|
||||||
if not last_time or ((gather_time - last_time).total_seconds() > settings.AUTOMATION_ANALYTICS_GATHER_INTERVAL):
|
|
||||||
analytics.gather()
|
analytics.gather()
|
||||||
|
|
||||||
|
|
||||||
@@ -379,20 +425,6 @@ def cleanup_images_and_files():
|
|||||||
_cleanup_images_and_files()
|
_cleanup_images_and_files()
|
||||||
|
|
||||||
|
|
||||||
@task(queue=get_task_queuename)
|
|
||||||
def cleanup_host_metrics():
|
|
||||||
from awx.conf.models import Setting
|
|
||||||
from rest_framework.fields import DateTimeField
|
|
||||||
|
|
||||||
last_cleanup = Setting.objects.filter(key='CLEANUP_HOST_METRICS_LAST_TS').first()
|
|
||||||
last_time = DateTimeField().to_internal_value(last_cleanup.value) if last_cleanup and last_cleanup.value else None
|
|
||||||
|
|
||||||
cleanup_interval_secs = getattr(settings, 'CLEANUP_HOST_METRICS_INTERVAL', 30) * 86400
|
|
||||||
if not last_time or ((now() - last_time).total_seconds() > cleanup_interval_secs):
|
|
||||||
months_ago = getattr(settings, 'CLEANUP_HOST_METRICS_THRESHOLD', 12)
|
|
||||||
HostMetric.cleanup_task(months_ago)
|
|
||||||
|
|
||||||
|
|
||||||
@task(queue=get_task_queuename)
|
@task(queue=get_task_queuename)
|
||||||
def cluster_node_health_check(node):
|
def cluster_node_health_check(node):
|
||||||
"""
|
"""
|
||||||
@@ -434,7 +466,6 @@ def execution_node_health_check(node):
|
|||||||
data = worker_info(node)
|
data = worker_info(node)
|
||||||
|
|
||||||
prior_capacity = instance.capacity
|
prior_capacity = instance.capacity
|
||||||
|
|
||||||
instance.save_health_data(
|
instance.save_health_data(
|
||||||
version='ansible-runner-' + data.get('runner_version', '???'),
|
version='ansible-runner-' + data.get('runner_version', '???'),
|
||||||
cpu=data.get('cpu_count', 0),
|
cpu=data.get('cpu_count', 0),
|
||||||
@@ -455,13 +486,37 @@ def execution_node_health_check(node):
|
|||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
def inspect_execution_nodes(instance_list):
|
def inspect_established_receptor_connections(mesh_status):
|
||||||
with advisory_lock('inspect_execution_nodes_lock', wait=False):
|
'''
|
||||||
node_lookup = {inst.hostname: inst for inst in instance_list}
|
Flips link state from ADDING to ESTABLISHED
|
||||||
|
If the InstanceLink source and target match the entries
|
||||||
|
in Known Connection Costs, flip to Established.
|
||||||
|
'''
|
||||||
|
from awx.main.models import InstanceLink
|
||||||
|
|
||||||
|
all_links = InstanceLink.objects.filter(link_state=InstanceLink.States.ADDING)
|
||||||
|
if not all_links.exists():
|
||||||
|
return
|
||||||
|
active_receptor_conns = mesh_status['KnownConnectionCosts']
|
||||||
|
update_links = []
|
||||||
|
for link in all_links:
|
||||||
|
if link.link_state != InstanceLink.States.REMOVING:
|
||||||
|
if link.target.hostname in active_receptor_conns.get(link.source.hostname, {}):
|
||||||
|
if link.link_state is not InstanceLink.States.ESTABLISHED:
|
||||||
|
link.link_state = InstanceLink.States.ESTABLISHED
|
||||||
|
update_links.append(link)
|
||||||
|
|
||||||
|
InstanceLink.objects.bulk_update(update_links, ['link_state'])
|
||||||
|
|
||||||
|
|
||||||
|
def inspect_execution_and_hop_nodes(instance_list):
|
||||||
|
with advisory_lock('inspect_execution_and_hop_nodes_lock', wait=False):
|
||||||
|
node_lookup = {inst.hostname: inst for inst in instance_list}
|
||||||
ctl = get_receptor_ctl()
|
ctl = get_receptor_ctl()
|
||||||
mesh_status = ctl.simple_command('status')
|
mesh_status = ctl.simple_command('status')
|
||||||
|
|
||||||
|
inspect_established_receptor_connections(mesh_status)
|
||||||
|
|
||||||
nowtime = now()
|
nowtime = now()
|
||||||
workers = mesh_status['Advertisements']
|
workers = mesh_status['Advertisements']
|
||||||
|
|
||||||
@@ -519,7 +574,7 @@ def cluster_node_heartbeat(dispatch_time=None, worker_tasks=None):
|
|||||||
this_inst = inst
|
this_inst = inst
|
||||||
break
|
break
|
||||||
|
|
||||||
inspect_execution_nodes(instance_list)
|
inspect_execution_and_hop_nodes(instance_list)
|
||||||
|
|
||||||
for inst in list(instance_list):
|
for inst in list(instance_list):
|
||||||
if inst == this_inst:
|
if inst == this_inst:
|
||||||
@@ -708,7 +763,6 @@ def awx_periodic_scheduler():
|
|||||||
new_unified_job.save(update_fields=['status', 'job_explanation'])
|
new_unified_job.save(update_fields=['status', 'job_explanation'])
|
||||||
new_unified_job.websocket_emit_status("failed")
|
new_unified_job.websocket_emit_status("failed")
|
||||||
emit_channel_notification('schedules-changed', dict(id=schedule.id, group_name="schedules"))
|
emit_channel_notification('schedules-changed', dict(id=schedule.id, group_name="schedules"))
|
||||||
state.save()
|
|
||||||
|
|
||||||
|
|
||||||
def schedule_manager_success_or_error(instance):
|
def schedule_manager_success_or_error(instance):
|
||||||
@@ -839,10 +893,7 @@ def delete_inventory(inventory_id, user_id, retries=5):
|
|||||||
user = None
|
user = None
|
||||||
with ignore_inventory_computed_fields(), ignore_inventory_group_removal(), impersonate(user):
|
with ignore_inventory_computed_fields(), ignore_inventory_group_removal(), impersonate(user):
|
||||||
try:
|
try:
|
||||||
i = Inventory.objects.get(id=inventory_id)
|
Inventory.objects.get(id=inventory_id).delete()
|
||||||
for host in i.hosts.iterator():
|
|
||||||
host.job_events_as_primary_host.update(host=None)
|
|
||||||
i.delete()
|
|
||||||
emit_channel_notification('inventories-status_changed', {'group_name': 'inventories', 'inventory_id': inventory_id, 'status': 'deleted'})
|
emit_channel_notification('inventories-status_changed', {'group_name': 'inventories', 'inventory_id': inventory_id, 'status': 'deleted'})
|
||||||
logger.debug('Deleted inventory {} as user {}.'.format(inventory_id, user_id))
|
logger.debug('Deleted inventory {} as user {}.'.format(inventory_id, user_id))
|
||||||
except Inventory.DoesNotExist:
|
except Inventory.DoesNotExist:
|
||||||
|
|||||||
@@ -1,6 +1,9 @@
|
|||||||
import json
|
import json
|
||||||
|
|
||||||
from django.contrib.auth.models import User
|
from django.contrib.auth.models import User
|
||||||
|
from django.core.exceptions import ValidationError
|
||||||
|
|
||||||
|
from unittest import mock
|
||||||
|
|
||||||
from awx.main.models import (
|
from awx.main.models import (
|
||||||
Organization,
|
Organization,
|
||||||
@@ -20,6 +23,7 @@ from awx.main.models import (
|
|||||||
WorkflowJobNode,
|
WorkflowJobNode,
|
||||||
WorkflowJobTemplateNode,
|
WorkflowJobTemplateNode,
|
||||||
)
|
)
|
||||||
|
from awx.main.models.inventory import HostMetric, HostMetricSummaryMonthly
|
||||||
|
|
||||||
# mk methods should create only a single object of a single type.
|
# mk methods should create only a single object of a single type.
|
||||||
# they should also have the option of being persisted or not.
|
# they should also have the option of being persisted or not.
|
||||||
@@ -248,3 +252,42 @@ def mk_workflow_job_node(unified_job_template=None, success_nodes=None, failure_
|
|||||||
if persisted:
|
if persisted:
|
||||||
workflow_node.save()
|
workflow_node.save()
|
||||||
return workflow_node
|
return workflow_node
|
||||||
|
|
||||||
|
|
||||||
|
def mk_host_metric(hostname, first_automation, last_automation=None, last_deleted=None, deleted=False, persisted=True):
|
||||||
|
ok, idx = False, 1
|
||||||
|
while not ok:
|
||||||
|
try:
|
||||||
|
with mock.patch("django.utils.timezone.now") as mock_now:
|
||||||
|
mock_now.return_value = first_automation
|
||||||
|
metric = HostMetric(
|
||||||
|
hostname=hostname or f"host-{first_automation}-{idx}",
|
||||||
|
first_automation=first_automation,
|
||||||
|
last_automation=last_automation or first_automation,
|
||||||
|
last_deleted=last_deleted,
|
||||||
|
deleted=deleted,
|
||||||
|
)
|
||||||
|
metric.validate_unique()
|
||||||
|
if persisted:
|
||||||
|
metric.save()
|
||||||
|
ok = True
|
||||||
|
except ValidationError as e:
|
||||||
|
# Repeat create for auto-generated hostname
|
||||||
|
if not hostname and e.message_dict.get('hostname', None):
|
||||||
|
idx += 1
|
||||||
|
else:
|
||||||
|
raise e
|
||||||
|
|
||||||
|
|
||||||
|
def mk_host_metric_summary(date, license_consumed=0, license_capacity=0, hosts_added=0, hosts_deleted=0, indirectly_managed_hosts=0, persisted=True):
|
||||||
|
summary = HostMetricSummaryMonthly(
|
||||||
|
date=date,
|
||||||
|
license_consumed=license_consumed,
|
||||||
|
license_capacity=license_capacity,
|
||||||
|
hosts_added=hosts_added,
|
||||||
|
hosts_deleted=hosts_deleted,
|
||||||
|
indirectly_managed_hosts=indirectly_managed_hosts,
|
||||||
|
)
|
||||||
|
if persisted:
|
||||||
|
summary.save()
|
||||||
|
return summary
|
||||||
|
|||||||
@@ -84,5 +84,6 @@ def test_custom_hostname_regex(post, admin_user):
|
|||||||
"hostname": value[0],
|
"hostname": value[0],
|
||||||
"node_type": "execution",
|
"node_type": "execution",
|
||||||
"node_state": "installed",
|
"node_state": "installed",
|
||||||
|
"peers": [],
|
||||||
}
|
}
|
||||||
post(url=url, user=admin_user, data=data, expect=value[1])
|
post(url=url, user=admin_user, data=data, expect=value[1])
|
||||||
|
|||||||
342
awx/main/tests/functional/api/test_instance_peers.py
Normal file
342
awx/main/tests/functional/api/test_instance_peers.py
Normal file
@@ -0,0 +1,342 @@
|
|||||||
|
import pytest
|
||||||
|
import yaml
|
||||||
|
import itertools
|
||||||
|
from unittest import mock
|
||||||
|
|
||||||
|
from django.db.utils import IntegrityError
|
||||||
|
|
||||||
|
from awx.api.versioning import reverse
|
||||||
|
from awx.main.models import Instance
|
||||||
|
from awx.api.views.instance_install_bundle import generate_group_vars_all_yml
|
||||||
|
|
||||||
|
|
||||||
|
def has_peer(group_vars, peer):
|
||||||
|
peers = group_vars.get('receptor_peers', [])
|
||||||
|
for p in peers:
|
||||||
|
if f"{p['host']}:{p['port']}" == peer:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.django_db
|
||||||
|
class TestPeers:
|
||||||
|
@pytest.fixture(autouse=True)
|
||||||
|
def configure_settings(self, settings):
|
||||||
|
settings.IS_K8S = True
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('node_type', ['control', 'hybrid'])
|
||||||
|
def test_prevent_peering_to_self(self, node_type):
|
||||||
|
"""
|
||||||
|
cannot peer to self
|
||||||
|
"""
|
||||||
|
control_instance = Instance.objects.create(hostname='abc', node_type=node_type)
|
||||||
|
with pytest.raises(IntegrityError):
|
||||||
|
control_instance.peers.add(control_instance)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('node_type', ['control', 'hybrid', 'hop', 'execution'])
|
||||||
|
def test_creating_node(self, node_type, admin_user, post):
|
||||||
|
"""
|
||||||
|
can only add hop and execution nodes via API
|
||||||
|
"""
|
||||||
|
post(
|
||||||
|
url=reverse('api:instance_list'),
|
||||||
|
data={"hostname": "abc", "node_type": node_type},
|
||||||
|
user=admin_user,
|
||||||
|
expect=400 if node_type in ['control', 'hybrid'] else 201,
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_changing_node_type(self, admin_user, patch):
|
||||||
|
"""
|
||||||
|
cannot change node type
|
||||||
|
"""
|
||||||
|
hop = Instance.objects.create(hostname='abc', node_type="hop")
|
||||||
|
patch(
|
||||||
|
url=reverse('api:instance_detail', kwargs={'pk': hop.pk}),
|
||||||
|
data={"node_type": "execution"},
|
||||||
|
user=admin_user,
|
||||||
|
expect=400,
|
||||||
|
)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('node_type', ['hop', 'execution'])
|
||||||
|
def test_listener_port_null(self, node_type, admin_user, post):
|
||||||
|
"""
|
||||||
|
listener_port can be None
|
||||||
|
"""
|
||||||
|
post(
|
||||||
|
url=reverse('api:instance_list'),
|
||||||
|
data={"hostname": "abc", "node_type": node_type, "listener_port": None},
|
||||||
|
user=admin_user,
|
||||||
|
expect=201,
|
||||||
|
)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('node_type, allowed', [('control', False), ('hybrid', False), ('hop', True), ('execution', True)])
|
||||||
|
def test_peers_from_control_nodes_allowed(self, node_type, allowed, post, admin_user):
|
||||||
|
"""
|
||||||
|
only hop and execution nodes can have peers_from_control_nodes set to True
|
||||||
|
"""
|
||||||
|
post(
|
||||||
|
url=reverse('api:instance_list'),
|
||||||
|
data={"hostname": "abc", "peers_from_control_nodes": True, "node_type": node_type, "listener_port": 6789},
|
||||||
|
user=admin_user,
|
||||||
|
expect=201 if allowed else 400,
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_listener_port_is_required(self, admin_user, post):
|
||||||
|
"""
|
||||||
|
if adding instance to peers list, that instance must have listener_port set
|
||||||
|
"""
|
||||||
|
Instance.objects.create(hostname='abc', node_type="hop", listener_port=None)
|
||||||
|
post(
|
||||||
|
url=reverse('api:instance_list'),
|
||||||
|
data={"hostname": "ex", "peers_from_control_nodes": False, "node_type": "execution", "listener_port": None, "peers": ["abc"]},
|
||||||
|
user=admin_user,
|
||||||
|
expect=400,
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_peers_from_control_nodes_listener_port_enabled(self, admin_user, post):
|
||||||
|
"""
|
||||||
|
if peers_from_control_nodes is True, listener_port must an integer
|
||||||
|
Assert that all other combinations are allowed
|
||||||
|
"""
|
||||||
|
for index, item in enumerate(itertools.product(['hop', 'execution'], [True, False], [None, 6789])):
|
||||||
|
node_type, peers_from, listener_port = item
|
||||||
|
# only disallowed case is when peers_from is True and listener port is None
|
||||||
|
disallowed = peers_from and not listener_port
|
||||||
|
post(
|
||||||
|
url=reverse('api:instance_list'),
|
||||||
|
data={"hostname": f"abc{index}", "peers_from_control_nodes": peers_from, "node_type": node_type, "listener_port": listener_port},
|
||||||
|
user=admin_user,
|
||||||
|
expect=400 if disallowed else 201,
|
||||||
|
)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('node_type', ['control', 'hybrid'])
|
||||||
|
def test_disallow_modifying_peers_control_nodes(self, node_type, admin_user, patch):
|
||||||
|
"""
|
||||||
|
for control nodes, peers field should not be
|
||||||
|
modified directly via patch.
|
||||||
|
"""
|
||||||
|
control = Instance.objects.create(hostname='abc', node_type=node_type)
|
||||||
|
hop1 = Instance.objects.create(hostname='hop1', node_type='hop', peers_from_control_nodes=True, listener_port=6789)
|
||||||
|
hop2 = Instance.objects.create(hostname='hop2', node_type='hop', peers_from_control_nodes=False, listener_port=6789)
|
||||||
|
assert [hop1] == list(control.peers.all()) # only hop1 should be peered
|
||||||
|
patch(
|
||||||
|
url=reverse('api:instance_detail', kwargs={'pk': control.pk}),
|
||||||
|
data={"peers": ["hop2"]},
|
||||||
|
user=admin_user,
|
||||||
|
expect=400, # cannot add peers directly
|
||||||
|
)
|
||||||
|
patch(
|
||||||
|
url=reverse('api:instance_detail', kwargs={'pk': control.pk}),
|
||||||
|
data={"peers": ["hop1"]},
|
||||||
|
user=admin_user,
|
||||||
|
expect=200, # patching with current peers list should be okay
|
||||||
|
)
|
||||||
|
patch(
|
||||||
|
url=reverse('api:instance_detail', kwargs={'pk': control.pk}),
|
||||||
|
data={"peers": []},
|
||||||
|
user=admin_user,
|
||||||
|
expect=400, # cannot remove peers directly
|
||||||
|
)
|
||||||
|
patch(
|
||||||
|
url=reverse('api:instance_detail', kwargs={'pk': control.pk}),
|
||||||
|
data={},
|
||||||
|
user=admin_user,
|
||||||
|
expect=200, # patching without data should be fine too
|
||||||
|
)
|
||||||
|
# patch hop2
|
||||||
|
patch(
|
||||||
|
url=reverse('api:instance_detail', kwargs={'pk': hop2.pk}),
|
||||||
|
data={"peers_from_control_nodes": True},
|
||||||
|
user=admin_user,
|
||||||
|
expect=200, # patching without data should be fine too
|
||||||
|
)
|
||||||
|
assert {hop1, hop2} == set(control.peers.all()) # hop1 and hop2 should now be peered from control node
|
||||||
|
|
||||||
|
def test_disallow_changing_hostname(self, admin_user, patch):
|
||||||
|
"""
|
||||||
|
cannot change hostname
|
||||||
|
"""
|
||||||
|
hop = Instance.objects.create(hostname='hop', node_type='hop')
|
||||||
|
patch(
|
||||||
|
url=reverse('api:instance_detail', kwargs={'pk': hop.pk}),
|
||||||
|
data={"hostname": "hop2"},
|
||||||
|
user=admin_user,
|
||||||
|
expect=400,
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_disallow_changing_node_state(self, admin_user, patch):
|
||||||
|
"""
|
||||||
|
only allow setting to deprovisioning
|
||||||
|
"""
|
||||||
|
hop = Instance.objects.create(hostname='hop', node_type='hop', node_state='installed')
|
||||||
|
patch(
|
||||||
|
url=reverse('api:instance_detail', kwargs={'pk': hop.pk}),
|
||||||
|
data={"node_state": "deprovisioning"},
|
||||||
|
user=admin_user,
|
||||||
|
expect=200,
|
||||||
|
)
|
||||||
|
patch(
|
||||||
|
url=reverse('api:instance_detail', kwargs={'pk': hop.pk}),
|
||||||
|
data={"node_state": "ready"},
|
||||||
|
user=admin_user,
|
||||||
|
expect=400,
|
||||||
|
)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('node_type', ['control', 'hybrid'])
|
||||||
|
def test_control_node_automatically_peers(self, node_type):
|
||||||
|
"""
|
||||||
|
a new control node should automatically
|
||||||
|
peer to hop
|
||||||
|
|
||||||
|
peer to hop should be removed if hop is deleted
|
||||||
|
"""
|
||||||
|
|
||||||
|
hop = Instance.objects.create(hostname='hop', node_type='hop', peers_from_control_nodes=True, listener_port=6789)
|
||||||
|
control = Instance.objects.create(hostname='abc', node_type=node_type)
|
||||||
|
assert hop in control.peers.all()
|
||||||
|
hop.delete()
|
||||||
|
assert not control.peers.exists()
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('node_type', ['control', 'hybrid'])
|
||||||
|
def test_control_node_retains_other_peers(self, node_type):
|
||||||
|
"""
|
||||||
|
if a new node comes online, other peer relationships should
|
||||||
|
remain intact
|
||||||
|
"""
|
||||||
|
hop1 = Instance.objects.create(hostname='hop1', node_type='hop', listener_port=6789, peers_from_control_nodes=True)
|
||||||
|
hop2 = Instance.objects.create(hostname='hop2', node_type='hop', listener_port=6789, peers_from_control_nodes=False)
|
||||||
|
hop1.peers.add(hop2)
|
||||||
|
|
||||||
|
# a control node is added
|
||||||
|
Instance.objects.create(hostname='control', node_type=node_type, listener_port=None)
|
||||||
|
|
||||||
|
assert hop1.peers.exists()
|
||||||
|
|
||||||
|
def test_group_vars(self, get, admin_user):
|
||||||
|
"""
|
||||||
|
control > hop1 > hop2 < execution
|
||||||
|
"""
|
||||||
|
control = Instance.objects.create(hostname='control', node_type='control', listener_port=None)
|
||||||
|
hop1 = Instance.objects.create(hostname='hop1', node_type='hop', listener_port=6789, peers_from_control_nodes=True)
|
||||||
|
hop2 = Instance.objects.create(hostname='hop2', node_type='hop', listener_port=6789, peers_from_control_nodes=False)
|
||||||
|
execution = Instance.objects.create(hostname='execution', node_type='execution', listener_port=6789)
|
||||||
|
|
||||||
|
execution.peers.add(hop2)
|
||||||
|
hop1.peers.add(hop2)
|
||||||
|
|
||||||
|
control_vars = yaml.safe_load(generate_group_vars_all_yml(control))
|
||||||
|
hop1_vars = yaml.safe_load(generate_group_vars_all_yml(hop1))
|
||||||
|
hop2_vars = yaml.safe_load(generate_group_vars_all_yml(hop2))
|
||||||
|
execution_vars = yaml.safe_load(generate_group_vars_all_yml(execution))
|
||||||
|
|
||||||
|
# control group vars assertions
|
||||||
|
assert has_peer(control_vars, 'hop1:6789')
|
||||||
|
assert not has_peer(control_vars, 'hop2:6789')
|
||||||
|
assert not has_peer(control_vars, 'execution:6789')
|
||||||
|
assert not control_vars.get('receptor_listener', False)
|
||||||
|
|
||||||
|
# hop1 group vars assertions
|
||||||
|
assert has_peer(hop1_vars, 'hop2:6789')
|
||||||
|
assert not has_peer(hop1_vars, 'execution:6789')
|
||||||
|
assert hop1_vars.get('receptor_listener', False)
|
||||||
|
|
||||||
|
# hop2 group vars assertions
|
||||||
|
assert not has_peer(hop2_vars, 'hop1:6789')
|
||||||
|
assert not has_peer(hop2_vars, 'execution:6789')
|
||||||
|
assert hop2_vars.get('receptor_listener', False)
|
||||||
|
assert hop2_vars.get('receptor_peers', []) == []
|
||||||
|
|
||||||
|
# execution group vars assertions
|
||||||
|
assert has_peer(execution_vars, 'hop2:6789')
|
||||||
|
assert not has_peer(execution_vars, 'hop1:6789')
|
||||||
|
assert execution_vars.get('receptor_listener', False)
|
||||||
|
|
||||||
|
def test_write_receptor_config_called(self):
|
||||||
|
"""
|
||||||
|
Assert that write_receptor_config is called
|
||||||
|
when certain instances are created, or if
|
||||||
|
peers_from_control_nodes changes.
|
||||||
|
In general, write_receptor_config should only
|
||||||
|
be called when necessary, as it will reload
|
||||||
|
receptor backend connections which is not trivial.
|
||||||
|
"""
|
||||||
|
with mock.patch('awx.main.models.ha.schedule_write_receptor_config') as write_method:
|
||||||
|
# new control instance but nothing to peer to (no)
|
||||||
|
control = Instance.objects.create(hostname='control1', node_type='control')
|
||||||
|
write_method.assert_not_called()
|
||||||
|
|
||||||
|
# new hop node with peers_from_control_nodes False (no)
|
||||||
|
hop1 = Instance.objects.create(hostname='hop1', node_type='hop', listener_port=6789, peers_from_control_nodes=False)
|
||||||
|
hop1.delete()
|
||||||
|
write_method.assert_not_called()
|
||||||
|
|
||||||
|
# new hop node with peers_from_control_nodes True (yes)
|
||||||
|
hop1 = Instance.objects.create(hostname='hop1', node_type='hop', listener_port=6789, peers_from_control_nodes=True)
|
||||||
|
write_method.assert_called()
|
||||||
|
write_method.reset_mock()
|
||||||
|
|
||||||
|
# new control instance but with something to peer to (yes)
|
||||||
|
Instance.objects.create(hostname='control2', node_type='control')
|
||||||
|
write_method.assert_called()
|
||||||
|
write_method.reset_mock()
|
||||||
|
|
||||||
|
# new hop node with peers_from_control_nodes False and peered to another hop node (no)
|
||||||
|
hop2 = Instance.objects.create(hostname='hop2', node_type='hop', listener_port=6789, peers_from_control_nodes=False)
|
||||||
|
hop2.peers.add(hop1)
|
||||||
|
hop2.delete()
|
||||||
|
write_method.assert_not_called()
|
||||||
|
|
||||||
|
# changing peers_from_control_nodes to False (yes)
|
||||||
|
hop1.peers_from_control_nodes = False
|
||||||
|
hop1.save()
|
||||||
|
write_method.assert_called()
|
||||||
|
write_method.reset_mock()
|
||||||
|
|
||||||
|
# deleting hop node that has peers_from_control_nodes to False (no)
|
||||||
|
hop1.delete()
|
||||||
|
write_method.assert_not_called()
|
||||||
|
|
||||||
|
# deleting control nodes (no)
|
||||||
|
control.delete()
|
||||||
|
write_method.assert_not_called()
|
||||||
|
|
||||||
|
def test_write_receptor_config_data(self):
|
||||||
|
"""
|
||||||
|
Assert the correct peers are included in data that will
|
||||||
|
be written to receptor.conf
|
||||||
|
"""
|
||||||
|
from awx.main.tasks.receptor import RECEPTOR_CONFIG_STARTER
|
||||||
|
|
||||||
|
with mock.patch('awx.main.tasks.receptor.read_receptor_config', return_value=list(RECEPTOR_CONFIG_STARTER)):
|
||||||
|
from awx.main.tasks.receptor import generate_config_data
|
||||||
|
|
||||||
|
_, should_update = generate_config_data()
|
||||||
|
assert not should_update
|
||||||
|
|
||||||
|
# not peered, so config file should not be updated
|
||||||
|
for i in range(3):
|
||||||
|
Instance.objects.create(hostname=f"exNo-{i}", node_type='execution', listener_port=6789, peers_from_control_nodes=False)
|
||||||
|
|
||||||
|
_, should_update = generate_config_data()
|
||||||
|
assert not should_update
|
||||||
|
|
||||||
|
# peered, so config file should be updated
|
||||||
|
expected_peers = []
|
||||||
|
for i in range(3):
|
||||||
|
expected_peers.append(f"hop-{i}:6789")
|
||||||
|
Instance.objects.create(hostname=f"hop-{i}", node_type='hop', listener_port=6789, peers_from_control_nodes=True)
|
||||||
|
|
||||||
|
for i in range(3):
|
||||||
|
expected_peers.append(f"exYes-{i}:6789")
|
||||||
|
Instance.objects.create(hostname=f"exYes-{i}", node_type='execution', listener_port=6789, peers_from_control_nodes=True)
|
||||||
|
|
||||||
|
new_config, should_update = generate_config_data()
|
||||||
|
assert should_update
|
||||||
|
|
||||||
|
peers = []
|
||||||
|
for entry in new_config:
|
||||||
|
for key, value in entry.items():
|
||||||
|
if key == "tcp-peer":
|
||||||
|
peers.append(value['address'])
|
||||||
|
|
||||||
|
assert set(expected_peers) == set(peers)
|
||||||
@@ -0,0 +1,78 @@
|
|||||||
|
import pytest
|
||||||
|
|
||||||
|
from awx.main.tasks.host_metrics import HostMetricTask
|
||||||
|
from awx.main.models.inventory import HostMetric
|
||||||
|
from awx.main.tests.factories.fixtures import mk_host_metric
|
||||||
|
from dateutil.relativedelta import relativedelta
|
||||||
|
from django.conf import settings
|
||||||
|
from django.utils import timezone
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.django_db
|
||||||
|
def test_no_host_metrics():
|
||||||
|
"""No-crash test"""
|
||||||
|
assert HostMetric.objects.count() == 0
|
||||||
|
HostMetricTask().cleanup(soft_threshold=0, hard_threshold=0)
|
||||||
|
HostMetricTask().cleanup(soft_threshold=24, hard_threshold=42)
|
||||||
|
assert HostMetric.objects.count() == 0
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.django_db
|
||||||
|
def test_delete_exception():
|
||||||
|
"""Crash test"""
|
||||||
|
with pytest.raises(ValueError):
|
||||||
|
HostMetricTask().soft_cleanup("")
|
||||||
|
with pytest.raises(TypeError):
|
||||||
|
HostMetricTask().hard_cleanup(set())
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.django_db
|
||||||
|
@pytest.mark.parametrize('threshold', [settings.CLEANUP_HOST_METRICS_SOFT_THRESHOLD, 20])
|
||||||
|
def test_soft_delete(threshold):
|
||||||
|
"""Metrics with last_automation < threshold are updated to deleted=True"""
|
||||||
|
mk_host_metric('host_1', first_automation=ago(months=1), last_automation=ago(months=1), deleted=False)
|
||||||
|
mk_host_metric('host_2', first_automation=ago(months=1), last_automation=ago(months=1), deleted=True)
|
||||||
|
mk_host_metric('host_3', first_automation=ago(months=1), last_automation=ago(months=threshold, hours=-1), deleted=False)
|
||||||
|
mk_host_metric('host_4', first_automation=ago(months=1), last_automation=ago(months=threshold, hours=-1), deleted=True)
|
||||||
|
mk_host_metric('host_5', first_automation=ago(months=1), last_automation=ago(months=threshold, hours=1), deleted=False)
|
||||||
|
mk_host_metric('host_6', first_automation=ago(months=1), last_automation=ago(months=threshold, hours=1), deleted=True)
|
||||||
|
mk_host_metric('host_7', first_automation=ago(months=1), last_automation=ago(months=42), deleted=False)
|
||||||
|
mk_host_metric('host_8', first_automation=ago(months=1), last_automation=ago(months=42), deleted=True)
|
||||||
|
|
||||||
|
assert HostMetric.objects.count() == 8
|
||||||
|
assert HostMetric.active_objects.count() == 4
|
||||||
|
|
||||||
|
for i in range(2):
|
||||||
|
HostMetricTask().cleanup(soft_threshold=threshold)
|
||||||
|
assert HostMetric.objects.count() == 8
|
||||||
|
|
||||||
|
hostnames = set(HostMetric.objects.filter(deleted=False).order_by('hostname').values_list('hostname', flat=True))
|
||||||
|
assert hostnames == {'host_1', 'host_3'}
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.django_db
|
||||||
|
@pytest.mark.parametrize('threshold', [settings.CLEANUP_HOST_METRICS_HARD_THRESHOLD, 20])
|
||||||
|
def test_hard_delete(threshold):
|
||||||
|
"""Metrics with last_deleted < threshold and deleted=True are deleted from the db"""
|
||||||
|
mk_host_metric('host_1', first_automation=ago(months=1), last_deleted=ago(months=1), deleted=False)
|
||||||
|
mk_host_metric('host_2', first_automation=ago(months=1), last_deleted=ago(months=1), deleted=True)
|
||||||
|
mk_host_metric('host_3', first_automation=ago(months=1), last_deleted=ago(months=threshold, hours=-1), deleted=False)
|
||||||
|
mk_host_metric('host_4', first_automation=ago(months=1), last_deleted=ago(months=threshold, hours=-1), deleted=True)
|
||||||
|
mk_host_metric('host_5', first_automation=ago(months=1), last_deleted=ago(months=threshold, hours=1), deleted=False)
|
||||||
|
mk_host_metric('host_6', first_automation=ago(months=1), last_deleted=ago(months=threshold, hours=1), deleted=True)
|
||||||
|
mk_host_metric('host_7', first_automation=ago(months=1), last_deleted=ago(months=42), deleted=False)
|
||||||
|
mk_host_metric('host_8', first_automation=ago(months=1), last_deleted=ago(months=42), deleted=True)
|
||||||
|
|
||||||
|
assert HostMetric.objects.count() == 8
|
||||||
|
assert HostMetric.active_objects.count() == 4
|
||||||
|
|
||||||
|
for i in range(2):
|
||||||
|
HostMetricTask().cleanup(hard_threshold=threshold)
|
||||||
|
assert HostMetric.objects.count() == 6
|
||||||
|
|
||||||
|
hostnames = set(HostMetric.objects.order_by('hostname').values_list('hostname', flat=True))
|
||||||
|
assert hostnames == {'host_1', 'host_2', 'host_3', 'host_4', 'host_5', 'host_7'}
|
||||||
|
|
||||||
|
|
||||||
|
def ago(months=0, hours=0):
|
||||||
|
return timezone.now() - relativedelta(months=months, hours=hours)
|
||||||
@@ -0,0 +1,382 @@
|
|||||||
|
import pytest
|
||||||
|
import datetime
|
||||||
|
from dateutil.relativedelta import relativedelta
|
||||||
|
from django.conf import settings
|
||||||
|
from django.utils import timezone
|
||||||
|
|
||||||
|
|
||||||
|
from awx.main.management.commands.host_metric_summary_monthly import Command
|
||||||
|
from awx.main.models.inventory import HostMetric, HostMetricSummaryMonthly
|
||||||
|
from awx.main.tests.factories.fixtures import mk_host_metric, mk_host_metric_summary
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def threshold():
|
||||||
|
return int(getattr(settings, 'CLEANUP_HOST_METRICS_HARD_THRESHOLD', 36))
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.django_db
|
||||||
|
@pytest.mark.parametrize("metrics_cnt", [0, 1, 2, 3])
|
||||||
|
@pytest.mark.parametrize("mode", ["old_data", "actual_data", "all_data"])
|
||||||
|
def test_summaries_counts(threshold, metrics_cnt, mode):
|
||||||
|
assert HostMetricSummaryMonthly.objects.count() == 0
|
||||||
|
|
||||||
|
for idx in range(metrics_cnt):
|
||||||
|
if mode == "old_data" or mode == "all_data":
|
||||||
|
mk_host_metric(None, months_ago(threshold + idx, "dt"))
|
||||||
|
elif mode == "actual_data" or mode == "all_data":
|
||||||
|
mk_host_metric(None, (months_ago(threshold - idx, "dt")))
|
||||||
|
|
||||||
|
Command().handle()
|
||||||
|
|
||||||
|
# Number of records is equal to host metrics' hard cleanup months
|
||||||
|
assert HostMetricSummaryMonthly.objects.count() == threshold
|
||||||
|
|
||||||
|
# Records start with date in the month following to the threshold month
|
||||||
|
date = months_ago(threshold - 1)
|
||||||
|
for metric in list(HostMetricSummaryMonthly.objects.order_by('date').all()):
|
||||||
|
assert metric.date == date
|
||||||
|
date += relativedelta(months=1)
|
||||||
|
|
||||||
|
# Older record are untouched
|
||||||
|
mk_host_metric_summary(date=months_ago(threshold + 10))
|
||||||
|
Command().handle()
|
||||||
|
|
||||||
|
assert HostMetricSummaryMonthly.objects.count() == threshold + 1
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.django_db
|
||||||
|
@pytest.mark.parametrize("mode", ["old_data", "actual_data", "all_data"])
|
||||||
|
def test_summary_values(threshold, mode):
|
||||||
|
tester = {"old_data": MetricsTesterOldData(threshold), "actual_data": MetricsTesterActualData(threshold), "all_data": MetricsTesterCombinedData(threshold)}[
|
||||||
|
mode
|
||||||
|
]
|
||||||
|
|
||||||
|
for iteration in ["create_metrics", "add_old_summaries", "change_metrics", "delete_metrics", "add_metrics"]:
|
||||||
|
getattr(tester, iteration)() # call method by string
|
||||||
|
|
||||||
|
# Operation is idempotent, repeat twice
|
||||||
|
for _ in range(2):
|
||||||
|
Command().handle()
|
||||||
|
# call assert method by string
|
||||||
|
getattr(tester, f"assert_{iteration}")()
|
||||||
|
|
||||||
|
|
||||||
|
class MetricsTester:
|
||||||
|
def __init__(self, threshold, ignore_asserts=False):
|
||||||
|
self.threshold = threshold
|
||||||
|
self.expected_summaries = {}
|
||||||
|
self.ignore_asserts = ignore_asserts
|
||||||
|
|
||||||
|
def add_old_summaries(self):
|
||||||
|
"""These records don't correspond with Host metrics"""
|
||||||
|
mk_host_metric_summary(self.below(4), license_consumed=100, hosts_added=10, hosts_deleted=5)
|
||||||
|
mk_host_metric_summary(self.below(3), license_consumed=105, hosts_added=20, hosts_deleted=10)
|
||||||
|
mk_host_metric_summary(self.below(2), license_consumed=115, hosts_added=60, hosts_deleted=75)
|
||||||
|
|
||||||
|
def assert_add_old_summaries(self):
|
||||||
|
"""Old summary records should be untouched"""
|
||||||
|
self.expected_summaries[self.below(4)] = {"date": self.below(4), "license_consumed": 100, "hosts_added": 10, "hosts_deleted": 5}
|
||||||
|
self.expected_summaries[self.below(3)] = {"date": self.below(3), "license_consumed": 105, "hosts_added": 20, "hosts_deleted": 10}
|
||||||
|
self.expected_summaries[self.below(2)] = {"date": self.below(2), "license_consumed": 115, "hosts_added": 60, "hosts_deleted": 75}
|
||||||
|
|
||||||
|
self.assert_host_metric_summaries()
|
||||||
|
|
||||||
|
def assert_host_metric_summaries(self):
|
||||||
|
"""Ignore asserts when old/actual test object is used only as a helper for Combined test"""
|
||||||
|
if self.ignore_asserts:
|
||||||
|
return True
|
||||||
|
|
||||||
|
for summary in list(HostMetricSummaryMonthly.objects.order_by('date').all()):
|
||||||
|
assert self.expected_summaries.get(summary.date, None) is not None
|
||||||
|
|
||||||
|
assert self.expected_summaries[summary.date] == {
|
||||||
|
"date": summary.date,
|
||||||
|
"license_consumed": summary.license_consumed,
|
||||||
|
"hosts_added": summary.hosts_added,
|
||||||
|
"hosts_deleted": summary.hosts_deleted,
|
||||||
|
}
|
||||||
|
|
||||||
|
def below(self, months, fmt="date"):
|
||||||
|
"""months below threshold, returns first date of that month"""
|
||||||
|
date = months_ago(self.threshold + months)
|
||||||
|
if fmt == "dt":
|
||||||
|
return timezone.make_aware(datetime.datetime.combine(date, datetime.datetime.min.time()))
|
||||||
|
else:
|
||||||
|
return date
|
||||||
|
|
||||||
|
def above(self, months, fmt="date"):
|
||||||
|
"""months above threshold, returns first date of that month"""
|
||||||
|
date = months_ago(self.threshold - months)
|
||||||
|
if fmt == "dt":
|
||||||
|
return timezone.make_aware(datetime.datetime.combine(date, datetime.datetime.min.time()))
|
||||||
|
else:
|
||||||
|
return date
|
||||||
|
|
||||||
|
|
||||||
|
class MetricsTesterOldData(MetricsTester):
|
||||||
|
def create_metrics(self):
|
||||||
|
"""Creates 7 host metrics older than delete threshold"""
|
||||||
|
mk_host_metric("host_1", first_automation=self.below(3, "dt"))
|
||||||
|
mk_host_metric("host_2", first_automation=self.below(2, "dt"))
|
||||||
|
mk_host_metric("host_3", first_automation=self.below(2, "dt"), last_deleted=self.above(2, "dt"), deleted=False)
|
||||||
|
mk_host_metric("host_4", first_automation=self.below(2, "dt"), last_deleted=self.above(2, "dt"), deleted=True)
|
||||||
|
mk_host_metric("host_5", first_automation=self.below(2, "dt"), last_deleted=self.below(2, "dt"), deleted=True)
|
||||||
|
mk_host_metric("host_6", first_automation=self.below(1, "dt"), last_deleted=self.below(1, "dt"), deleted=False)
|
||||||
|
mk_host_metric("host_7", first_automation=self.below(1, "dt"))
|
||||||
|
|
||||||
|
def assert_create_metrics(self):
|
||||||
|
"""
|
||||||
|
Month 1 is computed from older host metrics,
|
||||||
|
Month 2 has deletion (host_4)
|
||||||
|
Other months are unchanged (same as month 2)
|
||||||
|
"""
|
||||||
|
self.expected_summaries = {
|
||||||
|
self.above(1): {"date": self.above(1), "license_consumed": 6, "hosts_added": 0, "hosts_deleted": 0},
|
||||||
|
self.above(2): {"date": self.above(2), "license_consumed": 5, "hosts_added": 0, "hosts_deleted": 1},
|
||||||
|
}
|
||||||
|
# no change in months 3+
|
||||||
|
idx = 3
|
||||||
|
month = self.above(idx)
|
||||||
|
while month <= beginning_of_the_month():
|
||||||
|
self.expected_summaries[self.above(idx)] = {"date": self.above(idx), "license_consumed": 5, "hosts_added": 0, "hosts_deleted": 0}
|
||||||
|
month += relativedelta(months=1)
|
||||||
|
idx += 1
|
||||||
|
|
||||||
|
self.assert_host_metric_summaries()
|
||||||
|
|
||||||
|
def add_old_summaries(self):
|
||||||
|
super().add_old_summaries()
|
||||||
|
|
||||||
|
def assert_add_old_summaries(self):
|
||||||
|
super().assert_add_old_summaries()
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def change_metrics():
|
||||||
|
"""Hosts 1,2 soft deleted, host_4 automated again (undeleted)"""
|
||||||
|
HostMetric.objects.filter(hostname='host_1').update(last_deleted=beginning_of_the_month("dt"), deleted=True)
|
||||||
|
HostMetric.objects.filter(hostname='host_2').update(last_deleted=timezone.now(), deleted=True)
|
||||||
|
HostMetric.objects.filter(hostname='host_4').update(deleted=False)
|
||||||
|
|
||||||
|
def assert_change_metrics(self):
|
||||||
|
"""
|
||||||
|
Summaries since month 2 were changed (host_4 restored == automated again)
|
||||||
|
Current month has 2 deletions (host_1, host_2)
|
||||||
|
"""
|
||||||
|
self.expected_summaries[self.above(2)] |= {'hosts_deleted': 0}
|
||||||
|
for idx in range(2, self.threshold):
|
||||||
|
self.expected_summaries[self.above(idx)] |= {'license_consumed': 6}
|
||||||
|
self.expected_summaries[beginning_of_the_month()] |= {'license_consumed': 4, 'hosts_deleted': 2}
|
||||||
|
|
||||||
|
self.assert_host_metric_summaries()
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def delete_metrics():
|
||||||
|
"""Deletes metric deleted before the threshold"""
|
||||||
|
HostMetric.objects.filter(hostname='host_5').delete()
|
||||||
|
|
||||||
|
def assert_delete_metrics(self):
|
||||||
|
"""No change"""
|
||||||
|
self.assert_host_metric_summaries()
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def add_metrics():
|
||||||
|
"""Adds new metrics"""
|
||||||
|
mk_host_metric("host_24", first_automation=beginning_of_the_month("dt"))
|
||||||
|
mk_host_metric("host_25", first_automation=beginning_of_the_month("dt")) # timezone.now())
|
||||||
|
|
||||||
|
def assert_add_metrics(self):
|
||||||
|
"""Summary in current month is updated"""
|
||||||
|
self.expected_summaries[beginning_of_the_month()]['license_consumed'] = 6
|
||||||
|
self.expected_summaries[beginning_of_the_month()]['hosts_added'] = 2
|
||||||
|
|
||||||
|
self.assert_host_metric_summaries()
|
||||||
|
|
||||||
|
|
||||||
|
class MetricsTesterActualData(MetricsTester):
|
||||||
|
def create_metrics(self):
|
||||||
|
"""Creates 16 host metrics newer than delete threshold"""
|
||||||
|
mk_host_metric("host_8", first_automation=self.above(1, "dt"))
|
||||||
|
mk_host_metric("host_9", first_automation=self.above(1, "dt"), last_deleted=self.above(1, "dt"))
|
||||||
|
mk_host_metric("host_10", first_automation=self.above(1, "dt"), last_deleted=self.above(1, "dt"), deleted=True)
|
||||||
|
mk_host_metric("host_11", first_automation=self.above(1, "dt"), last_deleted=self.above(2, "dt"))
|
||||||
|
mk_host_metric("host_12", first_automation=self.above(1, "dt"), last_deleted=self.above(2, "dt"), deleted=True)
|
||||||
|
mk_host_metric("host_13", first_automation=self.above(2, "dt"))
|
||||||
|
mk_host_metric("host_14", first_automation=self.above(2, "dt"), last_deleted=self.above(2, "dt"))
|
||||||
|
mk_host_metric("host_15", first_automation=self.above(2, "dt"), last_deleted=self.above(2, "dt"), deleted=True)
|
||||||
|
mk_host_metric("host_16", first_automation=self.above(2, "dt"), last_deleted=self.above(3, "dt"))
|
||||||
|
mk_host_metric("host_17", first_automation=self.above(2, "dt"), last_deleted=self.above(3, "dt"), deleted=True)
|
||||||
|
mk_host_metric("host_18", first_automation=self.above(4, "dt"))
|
||||||
|
# next one shouldn't happen in real (deleted=True, last_deleted = NULL)
|
||||||
|
mk_host_metric("host_19", first_automation=self.above(4, "dt"), deleted=True)
|
||||||
|
mk_host_metric("host_20", first_automation=self.above(4, "dt"), last_deleted=self.above(4, "dt"))
|
||||||
|
mk_host_metric("host_21", first_automation=self.above(4, "dt"), last_deleted=self.above(4, "dt"), deleted=True)
|
||||||
|
mk_host_metric("host_22", first_automation=self.above(4, "dt"), last_deleted=self.above(5, "dt"))
|
||||||
|
mk_host_metric("host_23", first_automation=self.above(4, "dt"), last_deleted=self.above(5, "dt"), deleted=True)
|
||||||
|
|
||||||
|
def assert_create_metrics(self):
|
||||||
|
self.expected_summaries = {
|
||||||
|
self.above(1): {"date": self.above(1), "license_consumed": 4, "hosts_added": 5, "hosts_deleted": 1},
|
||||||
|
self.above(2): {"date": self.above(2), "license_consumed": 7, "hosts_added": 5, "hosts_deleted": 2},
|
||||||
|
self.above(3): {"date": self.above(3), "license_consumed": 6, "hosts_added": 0, "hosts_deleted": 1},
|
||||||
|
self.above(4): {"date": self.above(4), "license_consumed": 11, "hosts_added": 6, "hosts_deleted": 1},
|
||||||
|
self.above(5): {"date": self.above(5), "license_consumed": 10, "hosts_added": 0, "hosts_deleted": 1},
|
||||||
|
}
|
||||||
|
# no change in months 6+
|
||||||
|
idx = 6
|
||||||
|
month = self.above(idx)
|
||||||
|
while month <= beginning_of_the_month():
|
||||||
|
self.expected_summaries[self.above(idx)] = {"date": self.above(idx), "license_consumed": 10, "hosts_added": 0, "hosts_deleted": 0}
|
||||||
|
month += relativedelta(months=1)
|
||||||
|
idx += 1
|
||||||
|
|
||||||
|
self.assert_host_metric_summaries()
|
||||||
|
|
||||||
|
def add_old_summaries(self):
|
||||||
|
super().add_old_summaries()
|
||||||
|
|
||||||
|
def assert_add_old_summaries(self):
|
||||||
|
super().assert_add_old_summaries()
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def change_metrics():
|
||||||
|
"""
|
||||||
|
- Hosts 12, 19, 21 were automated again (undeleted)
|
||||||
|
- Host 16 was soft deleted
|
||||||
|
- Host 17 was undeleted and soft deleted again
|
||||||
|
"""
|
||||||
|
HostMetric.objects.filter(hostname='host_12').update(deleted=False)
|
||||||
|
HostMetric.objects.filter(hostname='host_16').update(last_deleted=timezone.now(), deleted=True)
|
||||||
|
HostMetric.objects.filter(hostname='host_17').update(last_deleted=beginning_of_the_month("dt"), deleted=True)
|
||||||
|
HostMetric.objects.filter(hostname='host_19').update(deleted=False)
|
||||||
|
HostMetric.objects.filter(hostname='host_21').update(deleted=False)
|
||||||
|
|
||||||
|
def assert_change_metrics(self):
|
||||||
|
"""
|
||||||
|
Summaries since month 2 were changed
|
||||||
|
Current month has 2 deletions (host_16, host_17)
|
||||||
|
"""
|
||||||
|
self.expected_summaries[self.above(2)] |= {'license_consumed': 8, 'hosts_deleted': 1}
|
||||||
|
self.expected_summaries[self.above(3)] |= {'license_consumed': 8, 'hosts_deleted': 0}
|
||||||
|
self.expected_summaries[self.above(4)] |= {'license_consumed': 14, 'hosts_deleted': 0}
|
||||||
|
|
||||||
|
# month 5 had hosts_deleted 1 => license_consumed == 14 - 1
|
||||||
|
for idx in range(5, self.threshold):
|
||||||
|
self.expected_summaries[self.above(idx)] |= {'license_consumed': 13}
|
||||||
|
self.expected_summaries[beginning_of_the_month()] |= {'license_consumed': 11, 'hosts_deleted': 2}
|
||||||
|
|
||||||
|
self.assert_host_metric_summaries()
|
||||||
|
|
||||||
|
def delete_metrics(self):
|
||||||
|
"""Hard cleanup can't delete metrics newer than threshold. No change"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
def assert_delete_metrics(self):
|
||||||
|
"""No change"""
|
||||||
|
self.assert_host_metric_summaries()
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def add_metrics():
|
||||||
|
"""Adds new metrics"""
|
||||||
|
mk_host_metric("host_26", first_automation=beginning_of_the_month("dt"))
|
||||||
|
mk_host_metric("host_27", first_automation=timezone.now())
|
||||||
|
|
||||||
|
def assert_add_metrics(self):
|
||||||
|
"""
|
||||||
|
Two metrics were deleted in current month by change_metrics()
|
||||||
|
Two metrics are added now
|
||||||
|
=> license_consumed is equal to the previous month (13 - 2 + 2)
|
||||||
|
"""
|
||||||
|
self.expected_summaries[beginning_of_the_month()] |= {'license_consumed': 13, 'hosts_added': 2}
|
||||||
|
|
||||||
|
self.assert_host_metric_summaries()
|
||||||
|
|
||||||
|
|
||||||
|
class MetricsTesterCombinedData(MetricsTester):
|
||||||
|
def __init__(self, threshold):
|
||||||
|
super().__init__(threshold)
|
||||||
|
self.old_data = MetricsTesterOldData(threshold, ignore_asserts=True)
|
||||||
|
self.actual_data = MetricsTesterActualData(threshold, ignore_asserts=True)
|
||||||
|
|
||||||
|
def assert_host_metric_summaries(self):
|
||||||
|
self._combine_expected_summaries()
|
||||||
|
super().assert_host_metric_summaries()
|
||||||
|
|
||||||
|
def create_metrics(self):
|
||||||
|
self.old_data.create_metrics()
|
||||||
|
self.actual_data.create_metrics()
|
||||||
|
|
||||||
|
def assert_create_metrics(self):
|
||||||
|
self.old_data.assert_create_metrics()
|
||||||
|
self.actual_data.assert_create_metrics()
|
||||||
|
|
||||||
|
self.assert_host_metric_summaries()
|
||||||
|
|
||||||
|
def add_old_summaries(self):
|
||||||
|
super().add_old_summaries()
|
||||||
|
|
||||||
|
def assert_add_old_summaries(self):
|
||||||
|
self.old_data.assert_add_old_summaries()
|
||||||
|
self.actual_data.assert_add_old_summaries()
|
||||||
|
|
||||||
|
self.assert_host_metric_summaries()
|
||||||
|
|
||||||
|
def change_metrics(self):
|
||||||
|
self.old_data.change_metrics()
|
||||||
|
self.actual_data.change_metrics()
|
||||||
|
|
||||||
|
def assert_change_metrics(self):
|
||||||
|
self.old_data.assert_change_metrics()
|
||||||
|
self.actual_data.assert_change_metrics()
|
||||||
|
|
||||||
|
self.assert_host_metric_summaries()
|
||||||
|
|
||||||
|
def delete_metrics(self):
|
||||||
|
self.old_data.delete_metrics()
|
||||||
|
self.actual_data.delete_metrics()
|
||||||
|
|
||||||
|
def assert_delete_metrics(self):
|
||||||
|
self.old_data.assert_delete_metrics()
|
||||||
|
self.actual_data.assert_delete_metrics()
|
||||||
|
|
||||||
|
self.assert_host_metric_summaries()
|
||||||
|
|
||||||
|
def add_metrics(self):
|
||||||
|
self.old_data.add_metrics()
|
||||||
|
self.actual_data.add_metrics()
|
||||||
|
|
||||||
|
def assert_add_metrics(self):
|
||||||
|
self.old_data.assert_add_metrics()
|
||||||
|
self.actual_data.assert_add_metrics()
|
||||||
|
|
||||||
|
self.assert_host_metric_summaries()
|
||||||
|
|
||||||
|
def _combine_expected_summaries(self):
|
||||||
|
"""
|
||||||
|
Expected summaries are sum of expected values for tests with old and actual data
|
||||||
|
Except data older than hard delete threshold (these summaries are untouched by task => the same in all tests)
|
||||||
|
"""
|
||||||
|
for date, summary in self.old_data.expected_summaries.items():
|
||||||
|
if date <= months_ago(self.threshold):
|
||||||
|
license_consumed = summary['license_consumed']
|
||||||
|
hosts_added = summary['hosts_added']
|
||||||
|
hosts_deleted = summary['hosts_deleted']
|
||||||
|
else:
|
||||||
|
license_consumed = summary['license_consumed'] + self.actual_data.expected_summaries[date]['license_consumed']
|
||||||
|
hosts_added = summary['hosts_added'] + self.actual_data.expected_summaries[date]['hosts_added']
|
||||||
|
hosts_deleted = summary['hosts_deleted'] + self.actual_data.expected_summaries[date]['hosts_deleted']
|
||||||
|
self.expected_summaries[date] = {'date': date, 'license_consumed': license_consumed, 'hosts_added': hosts_added, 'hosts_deleted': hosts_deleted}
|
||||||
|
|
||||||
|
|
||||||
|
def months_ago(num, fmt="date"):
|
||||||
|
if num is None:
|
||||||
|
return None
|
||||||
|
return beginning_of_the_month(fmt) - relativedelta(months=num)
|
||||||
|
|
||||||
|
|
||||||
|
def beginning_of_the_month(fmt="date"):
|
||||||
|
date = datetime.date.today().replace(day=1)
|
||||||
|
if fmt == "dt":
|
||||||
|
return timezone.make_aware(datetime.datetime.combine(date, datetime.datetime.min.time()))
|
||||||
|
else:
|
||||||
|
return date
|
||||||
@@ -331,15 +331,13 @@ def test_single_job_dependencies_project_launch(controlplane_instance_group, job
|
|||||||
p.save(skip_update=True)
|
p.save(skip_update=True)
|
||||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||||
dm = DependencyManager()
|
dm = DependencyManager()
|
||||||
with mock.patch.object(DependencyManager, "create_project_update", wraps=dm.create_project_update) as mock_pu:
|
dm.schedule()
|
||||||
dm.schedule()
|
pu = [x for x in p.project_updates.all()]
|
||||||
mock_pu.assert_called_once_with(j)
|
assert len(pu) == 1
|
||||||
pu = [x for x in p.project_updates.all()]
|
TaskManager().schedule()
|
||||||
assert len(pu) == 1
|
TaskManager.start_task.assert_called_once_with(pu[0], controlplane_instance_group, instance)
|
||||||
TaskManager().schedule()
|
pu[0].status = "successful"
|
||||||
TaskManager.start_task.assert_called_once_with(pu[0], controlplane_instance_group, instance)
|
pu[0].save()
|
||||||
pu[0].status = "successful"
|
|
||||||
pu[0].save()
|
|
||||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||||
TaskManager().schedule()
|
TaskManager().schedule()
|
||||||
TaskManager.start_task.assert_called_once_with(j, controlplane_instance_group, instance)
|
TaskManager.start_task.assert_called_once_with(j, controlplane_instance_group, instance)
|
||||||
@@ -359,15 +357,14 @@ def test_single_job_dependencies_inventory_update_launch(controlplane_instance_g
|
|||||||
i.inventory_sources.add(ii)
|
i.inventory_sources.add(ii)
|
||||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||||
dm = DependencyManager()
|
dm = DependencyManager()
|
||||||
with mock.patch.object(DependencyManager, "create_inventory_update", wraps=dm.create_inventory_update) as mock_iu:
|
dm.schedule()
|
||||||
dm.schedule()
|
assert ii.inventory_updates.count() == 1
|
||||||
mock_iu.assert_called_once_with(j, ii)
|
iu = [x for x in ii.inventory_updates.all()]
|
||||||
iu = [x for x in ii.inventory_updates.all()]
|
assert len(iu) == 1
|
||||||
assert len(iu) == 1
|
TaskManager().schedule()
|
||||||
TaskManager().schedule()
|
TaskManager.start_task.assert_called_once_with(iu[0], controlplane_instance_group, instance)
|
||||||
TaskManager.start_task.assert_called_once_with(iu[0], controlplane_instance_group, instance)
|
iu[0].status = "successful"
|
||||||
iu[0].status = "successful"
|
iu[0].save()
|
||||||
iu[0].save()
|
|
||||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||||
TaskManager().schedule()
|
TaskManager().schedule()
|
||||||
TaskManager.start_task.assert_called_once_with(j, controlplane_instance_group, instance)
|
TaskManager.start_task.assert_called_once_with(j, controlplane_instance_group, instance)
|
||||||
@@ -382,11 +379,11 @@ def test_inventory_update_launches_project_update(controlplane_instance_group, s
|
|||||||
iu = ii.create_inventory_update()
|
iu = ii.create_inventory_update()
|
||||||
iu.status = "pending"
|
iu.status = "pending"
|
||||||
iu.save()
|
iu.save()
|
||||||
|
assert project.project_updates.count() == 0
|
||||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||||
dm = DependencyManager()
|
dm = DependencyManager()
|
||||||
with mock.patch.object(DependencyManager, "create_project_update", wraps=dm.create_project_update) as mock_pu:
|
dm.schedule()
|
||||||
dm.schedule()
|
assert project.project_updates.count() == 1
|
||||||
mock_pu.assert_called_with(iu, project_id=project.id)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.django_db
|
@pytest.mark.django_db
|
||||||
@@ -407,9 +404,8 @@ def test_job_dependency_with_already_updated(controlplane_instance_group, job_te
|
|||||||
j.save()
|
j.save()
|
||||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||||
dm = DependencyManager()
|
dm = DependencyManager()
|
||||||
with mock.patch.object(DependencyManager, "create_inventory_update", wraps=dm.create_inventory_update) as mock_iu:
|
dm.schedule()
|
||||||
dm.schedule()
|
assert ii.inventory_updates.count() == 0
|
||||||
mock_iu.assert_not_called()
|
|
||||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||||
TaskManager().schedule()
|
TaskManager().schedule()
|
||||||
TaskManager.start_task.assert_called_once_with(j, controlplane_instance_group, instance)
|
TaskManager.start_task.assert_called_once_with(j, controlplane_instance_group, instance)
|
||||||
@@ -442,7 +438,9 @@ def test_shared_dependencies_launch(controlplane_instance_group, job_template_fa
|
|||||||
TaskManager().schedule()
|
TaskManager().schedule()
|
||||||
pu = p.project_updates.first()
|
pu = p.project_updates.first()
|
||||||
iu = ii.inventory_updates.first()
|
iu = ii.inventory_updates.first()
|
||||||
TaskManager.start_task.assert_has_calls([mock.call(iu, controlplane_instance_group, instance), mock.call(pu, controlplane_instance_group, instance)])
|
TaskManager.start_task.assert_has_calls(
|
||||||
|
[mock.call(iu, controlplane_instance_group, instance), mock.call(pu, controlplane_instance_group, instance)], any_order=True
|
||||||
|
)
|
||||||
pu.status = "successful"
|
pu.status = "successful"
|
||||||
pu.finished = pu.created + timedelta(seconds=1)
|
pu.finished = pu.created + timedelta(seconds=1)
|
||||||
pu.save()
|
pu.save()
|
||||||
@@ -451,7 +449,9 @@ def test_shared_dependencies_launch(controlplane_instance_group, job_template_fa
|
|||||||
iu.save()
|
iu.save()
|
||||||
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
with mock.patch("awx.main.scheduler.TaskManager.start_task"):
|
||||||
TaskManager().schedule()
|
TaskManager().schedule()
|
||||||
TaskManager.start_task.assert_has_calls([mock.call(j1, controlplane_instance_group, instance), mock.call(j2, controlplane_instance_group, instance)])
|
TaskManager.start_task.assert_has_calls(
|
||||||
|
[mock.call(j1, controlplane_instance_group, instance), mock.call(j2, controlplane_instance_group, instance)], any_order=True
|
||||||
|
)
|
||||||
pu = [x for x in p.project_updates.all()]
|
pu = [x for x in p.project_updates.all()]
|
||||||
iu = [x for x in ii.inventory_updates.all()]
|
iu = [x for x in ii.inventory_updates.all()]
|
||||||
assert len(pu) == 1
|
assert len(pu) == 1
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ import multiprocessing
|
|||||||
import random
|
import random
|
||||||
import signal
|
import signal
|
||||||
import time
|
import time
|
||||||
|
import yaml
|
||||||
from unittest import mock
|
from unittest import mock
|
||||||
|
|
||||||
from django.utils.timezone import now as tz_now
|
from django.utils.timezone import now as tz_now
|
||||||
@@ -13,6 +14,7 @@ from awx.main.dispatch import reaper
|
|||||||
from awx.main.dispatch.pool import StatefulPoolWorker, WorkerPool, AutoscalePool
|
from awx.main.dispatch.pool import StatefulPoolWorker, WorkerPool, AutoscalePool
|
||||||
from awx.main.dispatch.publish import task
|
from awx.main.dispatch.publish import task
|
||||||
from awx.main.dispatch.worker import BaseWorker, TaskWorker
|
from awx.main.dispatch.worker import BaseWorker, TaskWorker
|
||||||
|
from awx.main.dispatch.periodic import Scheduler
|
||||||
|
|
||||||
|
|
||||||
'''
|
'''
|
||||||
@@ -439,3 +441,76 @@ class TestJobReaper(object):
|
|||||||
assert job.started > ref_time
|
assert job.started > ref_time
|
||||||
assert job.status == 'running'
|
assert job.status == 'running'
|
||||||
assert job.job_explanation == ''
|
assert job.job_explanation == ''
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.django_db
|
||||||
|
class TestScheduler:
|
||||||
|
def test_too_many_schedules_freak_out(self):
|
||||||
|
with pytest.raises(RuntimeError):
|
||||||
|
Scheduler({'job1': {'schedule': datetime.timedelta(seconds=1)}, 'job2': {'schedule': datetime.timedelta(seconds=1)}})
|
||||||
|
|
||||||
|
def test_spread_out(self):
|
||||||
|
scheduler = Scheduler(
|
||||||
|
{
|
||||||
|
'job1': {'schedule': datetime.timedelta(seconds=16)},
|
||||||
|
'job2': {'schedule': datetime.timedelta(seconds=16)},
|
||||||
|
'job3': {'schedule': datetime.timedelta(seconds=16)},
|
||||||
|
'job4': {'schedule': datetime.timedelta(seconds=16)},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
assert [job.offset for job in scheduler.jobs] == [0, 4, 8, 12]
|
||||||
|
|
||||||
|
def test_missed_schedule(self, mocker):
|
||||||
|
scheduler = Scheduler({'job1': {'schedule': datetime.timedelta(seconds=10)}})
|
||||||
|
assert scheduler.jobs[0].missed_runs(time.time() - scheduler.global_start) == 0
|
||||||
|
mocker.patch('awx.main.dispatch.periodic.time.time', return_value=scheduler.global_start + 50)
|
||||||
|
scheduler.get_and_mark_pending()
|
||||||
|
assert scheduler.jobs[0].missed_runs(50) > 1
|
||||||
|
|
||||||
|
def test_advance_schedule(self, mocker):
|
||||||
|
scheduler = Scheduler(
|
||||||
|
{
|
||||||
|
'job1': {'schedule': datetime.timedelta(seconds=30)},
|
||||||
|
'joba': {'schedule': datetime.timedelta(seconds=20)},
|
||||||
|
'jobb': {'schedule': datetime.timedelta(seconds=20)},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
for job in scheduler.jobs:
|
||||||
|
# HACK: the offsets automatically added make this a hard test to write... so remove offsets
|
||||||
|
job.offset = 0.0
|
||||||
|
mocker.patch('awx.main.dispatch.periodic.time.time', return_value=scheduler.global_start + 29)
|
||||||
|
to_run = scheduler.get_and_mark_pending()
|
||||||
|
assert set(job.name for job in to_run) == set(['joba', 'jobb'])
|
||||||
|
mocker.patch('awx.main.dispatch.periodic.time.time', return_value=scheduler.global_start + 39)
|
||||||
|
to_run = scheduler.get_and_mark_pending()
|
||||||
|
assert len(to_run) == 1
|
||||||
|
assert to_run[0].name == 'job1'
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_job(scheduler, name):
|
||||||
|
for job in scheduler.jobs:
|
||||||
|
if job.name == name:
|
||||||
|
return job
|
||||||
|
|
||||||
|
def test_scheduler_debug(self, mocker):
|
||||||
|
scheduler = Scheduler(
|
||||||
|
{
|
||||||
|
'joba': {'schedule': datetime.timedelta(seconds=20)},
|
||||||
|
'jobb': {'schedule': datetime.timedelta(seconds=50)},
|
||||||
|
'jobc': {'schedule': datetime.timedelta(seconds=500)},
|
||||||
|
'jobd': {'schedule': datetime.timedelta(seconds=20)},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
rel_time = 119.9 # slightly under the 6th 20-second bin, to avoid offset problems
|
||||||
|
current_time = scheduler.global_start + rel_time
|
||||||
|
mocker.patch('awx.main.dispatch.periodic.time.time', return_value=current_time - 1.0e-8)
|
||||||
|
self.get_job(scheduler, 'jobb').mark_run(rel_time)
|
||||||
|
self.get_job(scheduler, 'jobd').mark_run(rel_time - 20.0)
|
||||||
|
|
||||||
|
output = scheduler.debug()
|
||||||
|
data = yaml.safe_load(output)
|
||||||
|
assert data['schedule_list']['jobc']['last_run_seconds_ago'] is None
|
||||||
|
assert data['schedule_list']['joba']['missed_runs'] == 4
|
||||||
|
assert data['schedule_list']['jobd']['missed_runs'] == 3
|
||||||
|
assert data['schedule_list']['jobd']['completed_runs'] == 1
|
||||||
|
assert data['schedule_list']['jobb']['next_run_in_seconds'] > 25.0
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ import json
|
|||||||
from awx.main.models import (
|
from awx.main.models import (
|
||||||
Job,
|
Job,
|
||||||
Instance,
|
Instance,
|
||||||
|
Host,
|
||||||
JobHostSummary,
|
JobHostSummary,
|
||||||
InventoryUpdate,
|
InventoryUpdate,
|
||||||
InventorySource,
|
InventorySource,
|
||||||
@@ -18,6 +19,9 @@ from awx.main.models import (
|
|||||||
ExecutionEnvironment,
|
ExecutionEnvironment,
|
||||||
)
|
)
|
||||||
from awx.main.tasks.system import cluster_node_heartbeat
|
from awx.main.tasks.system import cluster_node_heartbeat
|
||||||
|
from awx.main.tasks.facts import update_hosts
|
||||||
|
|
||||||
|
from django.db import OperationalError
|
||||||
from django.test.utils import override_settings
|
from django.test.utils import override_settings
|
||||||
|
|
||||||
|
|
||||||
@@ -33,9 +37,9 @@ def test_orphan_unified_job_creation(instance, inventory):
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.django_db
|
@pytest.mark.django_db
|
||||||
@mock.patch('awx.main.tasks.system.inspect_execution_nodes', lambda *args, **kwargs: None)
|
@mock.patch('awx.main.tasks.system.inspect_execution_and_hop_nodes', lambda *args, **kwargs: None)
|
||||||
@mock.patch('awx.main.models.ha.get_cpu_effective_capacity', lambda cpu: 8)
|
@mock.patch('awx.main.models.ha.get_cpu_effective_capacity', lambda cpu, is_control_node: 8)
|
||||||
@mock.patch('awx.main.models.ha.get_mem_effective_capacity', lambda mem: 62)
|
@mock.patch('awx.main.models.ha.get_mem_effective_capacity', lambda mem, is_control_node: 62)
|
||||||
def test_job_capacity_and_with_inactive_node():
|
def test_job_capacity_and_with_inactive_node():
|
||||||
i = Instance.objects.create(hostname='test-1')
|
i = Instance.objects.create(hostname='test-1')
|
||||||
i.save_health_data('18.0.1', 2, 8000)
|
i.save_health_data('18.0.1', 2, 8000)
|
||||||
@@ -112,6 +116,51 @@ def test_job_notification_host_data(inventory, machine_credential, project, job_
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.django_db
|
||||||
|
class TestAnsibleFactsSave:
|
||||||
|
current_call = 0
|
||||||
|
|
||||||
|
def test_update_hosts_deleted_host(self, inventory):
|
||||||
|
hosts = [Host.objects.create(inventory=inventory, name=f'foo{i}') for i in range(3)]
|
||||||
|
for host in hosts:
|
||||||
|
host.ansible_facts = {'foo': 'bar'}
|
||||||
|
last_pk = hosts[-1].pk
|
||||||
|
assert inventory.hosts.count() == 3
|
||||||
|
Host.objects.get(pk=last_pk).delete()
|
||||||
|
assert inventory.hosts.count() == 2
|
||||||
|
update_hosts(hosts)
|
||||||
|
assert inventory.hosts.count() == 2
|
||||||
|
for host in inventory.hosts.all():
|
||||||
|
host.refresh_from_db()
|
||||||
|
assert host.ansible_facts == {'foo': 'bar'}
|
||||||
|
|
||||||
|
def test_update_hosts_forever_deadlock(self, inventory, mocker):
|
||||||
|
hosts = [Host.objects.create(inventory=inventory, name=f'foo{i}') for i in range(3)]
|
||||||
|
for host in hosts:
|
||||||
|
host.ansible_facts = {'foo': 'bar'}
|
||||||
|
db_mock = mocker.patch('awx.main.tasks.facts.Host.objects.bulk_update')
|
||||||
|
db_mock.side_effect = OperationalError('deadlock detected')
|
||||||
|
with pytest.raises(OperationalError):
|
||||||
|
update_hosts(hosts)
|
||||||
|
|
||||||
|
def fake_bulk_update(self, host_list):
|
||||||
|
if self.current_call > 2:
|
||||||
|
return Host.objects.bulk_update(host_list, ['ansible_facts', 'ansible_facts_modified'])
|
||||||
|
self.current_call += 1
|
||||||
|
raise OperationalError('deadlock detected')
|
||||||
|
|
||||||
|
def test_update_hosts_resolved_deadlock(self, inventory, mocker):
|
||||||
|
hosts = [Host.objects.create(inventory=inventory, name=f'foo{i}') for i in range(3)]
|
||||||
|
for host in hosts:
|
||||||
|
host.ansible_facts = {'foo': 'bar'}
|
||||||
|
self.current_call = 0
|
||||||
|
mocker.patch('awx.main.tasks.facts.raw_update_hosts', new=self.fake_bulk_update)
|
||||||
|
update_hosts(hosts)
|
||||||
|
for host in inventory.hosts.all():
|
||||||
|
host.refresh_from_db()
|
||||||
|
assert host.ansible_facts == {'foo': 'bar'}
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.django_db
|
@pytest.mark.django_db
|
||||||
class TestLaunchConfig:
|
class TestLaunchConfig:
|
||||||
def test_null_creation_from_prompts(self):
|
def test_null_creation_from_prompts(self):
|
||||||
|
|||||||
@@ -36,7 +36,9 @@ def test_SYSTEM_TASK_ABS_MEM_conversion(value, converted_value, mem_capacity):
|
|||||||
mock_settings.IS_K8S = True
|
mock_settings.IS_K8S = True
|
||||||
assert convert_mem_str_to_bytes(value) == converted_value
|
assert convert_mem_str_to_bytes(value) == converted_value
|
||||||
assert get_corrected_memory(-1) == converted_value
|
assert get_corrected_memory(-1) == converted_value
|
||||||
assert get_mem_effective_capacity(-1) == mem_capacity
|
assert get_mem_effective_capacity(1, is_control_node=True) == mem_capacity
|
||||||
|
# SYSTEM_TASK_ABS_MEM should not effect memory and capacity for execution nodes
|
||||||
|
assert get_mem_effective_capacity(2147483648, is_control_node=False) == 20
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
@@ -58,4 +60,6 @@ def test_SYSTEM_TASK_ABS_CPU_conversion(value, converted_value, cpu_capacity):
|
|||||||
mock_settings.SYSTEM_TASK_FORKS_CPU = 4
|
mock_settings.SYSTEM_TASK_FORKS_CPU = 4
|
||||||
assert convert_cpu_str_to_decimal_cpu(value) == converted_value
|
assert convert_cpu_str_to_decimal_cpu(value) == converted_value
|
||||||
assert get_corrected_cpu(-1) == converted_value
|
assert get_corrected_cpu(-1) == converted_value
|
||||||
assert get_cpu_effective_capacity(-1) == cpu_capacity
|
assert get_cpu_effective_capacity(-1, is_control_node=True) == cpu_capacity
|
||||||
|
# SYSTEM_TASK_ABS_CPU should not effect cpu count and capacity for execution nodes
|
||||||
|
assert get_cpu_effective_capacity(2.0, is_control_node=False) == 8
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ from django.core.exceptions import ObjectDoesNotExist, FieldDoesNotExist
|
|||||||
from django.utils.dateparse import parse_datetime
|
from django.utils.dateparse import parse_datetime
|
||||||
from django.utils.translation import gettext_lazy as _
|
from django.utils.translation import gettext_lazy as _
|
||||||
from django.utils.functional import cached_property
|
from django.utils.functional import cached_property
|
||||||
from django.db import connection, transaction, ProgrammingError
|
from django.db import connection, transaction, ProgrammingError, IntegrityError
|
||||||
from django.db.models.fields.related import ForeignObjectRel, ManyToManyField
|
from django.db.models.fields.related import ForeignObjectRel, ManyToManyField
|
||||||
from django.db.models.fields.related_descriptors import ForwardManyToOneDescriptor, ManyToManyDescriptor
|
from django.db.models.fields.related_descriptors import ForwardManyToOneDescriptor, ManyToManyDescriptor
|
||||||
from django.db.models.query import QuerySet
|
from django.db.models.query import QuerySet
|
||||||
@@ -768,14 +768,13 @@ def get_corrected_cpu(cpu_count): # formerlly get_cpu_capacity
|
|||||||
return cpu_count # no correction
|
return cpu_count # no correction
|
||||||
|
|
||||||
|
|
||||||
def get_cpu_effective_capacity(cpu_count):
|
def get_cpu_effective_capacity(cpu_count, is_control_node=False):
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
|
|
||||||
cpu_count = get_corrected_cpu(cpu_count)
|
|
||||||
|
|
||||||
settings_forkcpu = getattr(settings, 'SYSTEM_TASK_FORKS_CPU', None)
|
settings_forkcpu = getattr(settings, 'SYSTEM_TASK_FORKS_CPU', None)
|
||||||
env_forkcpu = os.getenv('SYSTEM_TASK_FORKS_CPU', None)
|
env_forkcpu = os.getenv('SYSTEM_TASK_FORKS_CPU', None)
|
||||||
|
if is_control_node:
|
||||||
|
cpu_count = get_corrected_cpu(cpu_count)
|
||||||
if env_forkcpu:
|
if env_forkcpu:
|
||||||
forkcpu = int(env_forkcpu)
|
forkcpu = int(env_forkcpu)
|
||||||
elif settings_forkcpu:
|
elif settings_forkcpu:
|
||||||
@@ -834,6 +833,7 @@ def get_corrected_memory(memory):
|
|||||||
|
|
||||||
# Runner returns memory in bytes
|
# Runner returns memory in bytes
|
||||||
# so we convert memory from settings to bytes as well.
|
# so we convert memory from settings to bytes as well.
|
||||||
|
|
||||||
if env_absmem is not None:
|
if env_absmem is not None:
|
||||||
return convert_mem_str_to_bytes(env_absmem)
|
return convert_mem_str_to_bytes(env_absmem)
|
||||||
elif settings_absmem is not None:
|
elif settings_absmem is not None:
|
||||||
@@ -842,14 +842,13 @@ def get_corrected_memory(memory):
|
|||||||
return memory
|
return memory
|
||||||
|
|
||||||
|
|
||||||
def get_mem_effective_capacity(mem_bytes):
|
def get_mem_effective_capacity(mem_bytes, is_control_node=False):
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
|
|
||||||
mem_bytes = get_corrected_memory(mem_bytes)
|
|
||||||
|
|
||||||
settings_mem_mb_per_fork = getattr(settings, 'SYSTEM_TASK_FORKS_MEM', None)
|
settings_mem_mb_per_fork = getattr(settings, 'SYSTEM_TASK_FORKS_MEM', None)
|
||||||
env_mem_mb_per_fork = os.getenv('SYSTEM_TASK_FORKS_MEM', None)
|
env_mem_mb_per_fork = os.getenv('SYSTEM_TASK_FORKS_MEM', None)
|
||||||
|
if is_control_node:
|
||||||
|
mem_bytes = get_corrected_memory(mem_bytes)
|
||||||
if env_mem_mb_per_fork:
|
if env_mem_mb_per_fork:
|
||||||
mem_mb_per_fork = int(env_mem_mb_per_fork)
|
mem_mb_per_fork = int(env_mem_mb_per_fork)
|
||||||
elif settings_mem_mb_per_fork:
|
elif settings_mem_mb_per_fork:
|
||||||
@@ -1165,13 +1164,24 @@ def create_partition(tblname, start=None):
|
|||||||
try:
|
try:
|
||||||
with transaction.atomic():
|
with transaction.atomic():
|
||||||
with connection.cursor() as cursor:
|
with connection.cursor() as cursor:
|
||||||
|
cursor.execute(f"SELECT EXISTS (SELECT FROM information_schema.tables WHERE table_name = '{tblname}_{partition_label}');")
|
||||||
|
row = cursor.fetchone()
|
||||||
|
if row is not None:
|
||||||
|
for val in row: # should only have 1
|
||||||
|
if val is True:
|
||||||
|
logger.debug(f'Event partition table {tblname}_{partition_label} already exists')
|
||||||
|
return
|
||||||
|
|
||||||
cursor.execute(
|
cursor.execute(
|
||||||
f'CREATE TABLE IF NOT EXISTS {tblname}_{partition_label} '
|
f'CREATE TABLE {tblname}_{partition_label} (LIKE {tblname} INCLUDING DEFAULTS INCLUDING CONSTRAINTS); '
|
||||||
f'PARTITION OF {tblname} '
|
f'ALTER TABLE {tblname} ATTACH PARTITION {tblname}_{partition_label} '
|
||||||
f'FOR VALUES FROM (\'{start_timestamp}\') to (\'{end_timestamp}\');'
|
f'FOR VALUES FROM (\'{start_timestamp}\') TO (\'{end_timestamp}\');'
|
||||||
)
|
)
|
||||||
except ProgrammingError as e:
|
except (ProgrammingError, IntegrityError) as e:
|
||||||
logger.debug(f'Caught known error due to existing partition: {e}')
|
if 'already exists' in str(e):
|
||||||
|
logger.info(f'Caught known error due to partition creation race: {e}')
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
def cleanup_new_process(func):
|
def cleanup_new_process(func):
|
||||||
|
|||||||
@@ -283,6 +283,7 @@ class LogstashFormatter(LogstashFormatterBase):
|
|||||||
message.update(self.get_debug_fields(record))
|
message.update(self.get_debug_fields(record))
|
||||||
|
|
||||||
if settings.LOG_AGGREGATOR_TYPE == 'splunk':
|
if settings.LOG_AGGREGATOR_TYPE == 'splunk':
|
||||||
# splunk messages must have a top level "event" key
|
# splunk messages must have a top level "event" key when using the /services/collector/event receiver.
|
||||||
message = {'event': message}
|
# The event receiver wont scan an event for a timestamp field therefore a time field must also be supplied containing epoch timestamp
|
||||||
|
message = {'time': record.created, 'event': message}
|
||||||
return self.serialize(message)
|
return self.serialize(message)
|
||||||
|
|||||||
@@ -97,8 +97,6 @@ class SpecialInventoryHandler(logging.Handler):
|
|||||||
self.event_handler(dispatch_data)
|
self.event_handler(dispatch_data)
|
||||||
|
|
||||||
|
|
||||||
ColorHandler = logging.StreamHandler
|
|
||||||
|
|
||||||
if settings.COLOR_LOGS is True:
|
if settings.COLOR_LOGS is True:
|
||||||
try:
|
try:
|
||||||
from logutils.colorize import ColorizingStreamHandler
|
from logutils.colorize import ColorizingStreamHandler
|
||||||
@@ -133,3 +131,5 @@ if settings.COLOR_LOGS is True:
|
|||||||
except ImportError:
|
except ImportError:
|
||||||
# logutils is only used for colored logs in the dev environment
|
# logutils is only used for colored logs in the dev environment
|
||||||
pass
|
pass
|
||||||
|
else:
|
||||||
|
ColorHandler = logging.StreamHandler
|
||||||
|
|||||||
@@ -175,7 +175,12 @@ class Licenser(object):
|
|||||||
license.setdefault('pool_id', sub['pool']['id'])
|
license.setdefault('pool_id', sub['pool']['id'])
|
||||||
license.setdefault('product_name', sub['pool']['productName'])
|
license.setdefault('product_name', sub['pool']['productName'])
|
||||||
license.setdefault('valid_key', True)
|
license.setdefault('valid_key', True)
|
||||||
license.setdefault('license_type', 'enterprise')
|
if sub['pool']['productId'].startswith('S'):
|
||||||
|
license.setdefault('trial', True)
|
||||||
|
license.setdefault('license_type', 'trial')
|
||||||
|
else:
|
||||||
|
license.setdefault('trial', False)
|
||||||
|
license.setdefault('license_type', 'enterprise')
|
||||||
license.setdefault('satellite', False)
|
license.setdefault('satellite', False)
|
||||||
# Use the nearest end date
|
# Use the nearest end date
|
||||||
endDate = parse_date(sub['endDate'])
|
endDate = parse_date(sub['endDate'])
|
||||||
@@ -287,7 +292,7 @@ class Licenser(object):
|
|||||||
license['productId'] = sub['product_id']
|
license['productId'] = sub['product_id']
|
||||||
license['quantity'] = int(sub['quantity'])
|
license['quantity'] = int(sub['quantity'])
|
||||||
license['support_level'] = sub['support_level']
|
license['support_level'] = sub['support_level']
|
||||||
license['usage'] = sub['usage']
|
license['usage'] = sub.get('usage')
|
||||||
license['subscription_name'] = sub['name']
|
license['subscription_name'] = sub['name']
|
||||||
license['subscriptionId'] = sub['subscription_id']
|
license['subscriptionId'] = sub['subscription_id']
|
||||||
license['accountNumber'] = sub['account_number']
|
license['accountNumber'] = sub['account_number']
|
||||||
|
|||||||
@@ -255,6 +255,9 @@
|
|||||||
tags:
|
tags:
|
||||||
- install_collections
|
- install_collections
|
||||||
- install_roles
|
- install_roles
|
||||||
|
module_defaults:
|
||||||
|
ansible.builtin.command:
|
||||||
|
chdir: "{{ project_path | quote }}"
|
||||||
|
|
||||||
# We combine our additional_galaxy_env into galaxy_task_env so that our values are preferred over anything a user would set
|
# We combine our additional_galaxy_env into galaxy_task_env so that our values are preferred over anything a user would set
|
||||||
environment: "{{ galaxy_task_env | combine(additional_galaxy_env) }}"
|
environment: "{{ galaxy_task_env | combine(additional_galaxy_env) }}"
|
||||||
|
|||||||
@@ -210,7 +210,7 @@ JOB_EVENT_WORKERS = 4
|
|||||||
|
|
||||||
# The number of seconds to buffer callback receiver bulk
|
# The number of seconds to buffer callback receiver bulk
|
||||||
# writes in memory before flushing via JobEvent.objects.bulk_create()
|
# writes in memory before flushing via JobEvent.objects.bulk_create()
|
||||||
JOB_EVENT_BUFFER_SECONDS = 0.1
|
JOB_EVENT_BUFFER_SECONDS = 1
|
||||||
|
|
||||||
# The interval at which callback receiver statistics should be
|
# The interval at which callback receiver statistics should be
|
||||||
# recorded
|
# recorded
|
||||||
@@ -327,7 +327,6 @@ INSTALLED_APPS = [
|
|||||||
'rest_framework',
|
'rest_framework',
|
||||||
'django_extensions',
|
'django_extensions',
|
||||||
'polymorphic',
|
'polymorphic',
|
||||||
'taggit',
|
|
||||||
'social_django',
|
'social_django',
|
||||||
'django_guid',
|
'django_guid',
|
||||||
'corsheaders',
|
'corsheaders',
|
||||||
@@ -454,7 +453,7 @@ RECEPTOR_SERVICE_ADVERTISEMENT_PERIOD = 60 # https://github.com/ansible/recepto
|
|||||||
EXECUTION_NODE_REMEDIATION_CHECKS = 60 * 30 # once every 30 minutes check if an execution node errors have been resolved
|
EXECUTION_NODE_REMEDIATION_CHECKS = 60 * 30 # once every 30 minutes check if an execution node errors have been resolved
|
||||||
|
|
||||||
# Amount of time dispatcher will try to reconnect to database for jobs and consuming new work
|
# Amount of time dispatcher will try to reconnect to database for jobs and consuming new work
|
||||||
DISPATCHER_DB_DOWNTOWN_TOLLERANCE = 40
|
DISPATCHER_DB_DOWNTIME_TOLERANCE = 40
|
||||||
|
|
||||||
BROKER_URL = 'unix:///var/run/redis/redis.sock'
|
BROKER_URL = 'unix:///var/run/redis/redis.sock'
|
||||||
CELERYBEAT_SCHEDULE = {
|
CELERYBEAT_SCHEDULE = {
|
||||||
@@ -471,12 +470,13 @@ CELERYBEAT_SCHEDULE = {
|
|||||||
'receptor_reaper': {'task': 'awx.main.tasks.system.awx_receptor_workunit_reaper', 'schedule': timedelta(seconds=60)},
|
'receptor_reaper': {'task': 'awx.main.tasks.system.awx_receptor_workunit_reaper', 'schedule': timedelta(seconds=60)},
|
||||||
'send_subsystem_metrics': {'task': 'awx.main.analytics.analytics_tasks.send_subsystem_metrics', 'schedule': timedelta(seconds=20)},
|
'send_subsystem_metrics': {'task': 'awx.main.analytics.analytics_tasks.send_subsystem_metrics', 'schedule': timedelta(seconds=20)},
|
||||||
'cleanup_images': {'task': 'awx.main.tasks.system.cleanup_images_and_files', 'schedule': timedelta(hours=3)},
|
'cleanup_images': {'task': 'awx.main.tasks.system.cleanup_images_and_files', 'schedule': timedelta(hours=3)},
|
||||||
'cleanup_host_metrics': {'task': 'awx.main.tasks.system.cleanup_host_metrics', 'schedule': timedelta(days=1)},
|
'cleanup_host_metrics': {'task': 'awx.main.tasks.host_metrics.cleanup_host_metrics', 'schedule': timedelta(hours=3, minutes=30)},
|
||||||
|
'host_metric_summary_monthly': {'task': 'awx.main.tasks.host_metrics.host_metric_summary_monthly', 'schedule': timedelta(hours=4)},
|
||||||
}
|
}
|
||||||
|
|
||||||
# Django Caching Configuration
|
# Django Caching Configuration
|
||||||
DJANGO_REDIS_IGNORE_EXCEPTIONS = True
|
DJANGO_REDIS_IGNORE_EXCEPTIONS = True
|
||||||
CACHES = {'default': {'BACKEND': 'django_redis.cache.RedisCache', 'LOCATION': 'unix:/var/run/redis/redis.sock?db=1'}}
|
CACHES = {'default': {'BACKEND': 'awx.main.cache.AWXRedisCache', 'LOCATION': 'unix:///var/run/redis/redis.sock?db=1'}}
|
||||||
|
|
||||||
# Social Auth configuration.
|
# Social Auth configuration.
|
||||||
SOCIAL_AUTH_STRATEGY = 'social_django.strategy.DjangoStrategy'
|
SOCIAL_AUTH_STRATEGY = 'social_django.strategy.DjangoStrategy'
|
||||||
@@ -1049,9 +1049,17 @@ UI_NEXT = True
|
|||||||
# - 'unique_managed_hosts': Compliant = automated - deleted hosts (using /api/v2/host_metrics/)
|
# - 'unique_managed_hosts': Compliant = automated - deleted hosts (using /api/v2/host_metrics/)
|
||||||
SUBSCRIPTION_USAGE_MODEL = ''
|
SUBSCRIPTION_USAGE_MODEL = ''
|
||||||
|
|
||||||
# Host metrics cleanup - last time of the cleanup run (soft-deleting records)
|
# Host metrics cleanup - last time of the task/command run
|
||||||
CLEANUP_HOST_METRICS_LAST_TS = None
|
CLEANUP_HOST_METRICS_LAST_TS = None
|
||||||
# Host metrics cleanup - minimal interval between two cleanups in days
|
# Host metrics cleanup - minimal interval between two cleanups in days
|
||||||
CLEANUP_HOST_METRICS_INTERVAL = 30 # days
|
CLEANUP_HOST_METRICS_INTERVAL = 30 # days
|
||||||
# Host metrics cleanup - soft-delete HostMetric records with last_automation < [threshold] (in months)
|
# Host metrics cleanup - soft-delete HostMetric records with last_automation < [threshold] (in months)
|
||||||
CLEANUP_HOST_METRICS_THRESHOLD = 12 # months
|
CLEANUP_HOST_METRICS_SOFT_THRESHOLD = 12 # months
|
||||||
|
# Host metrics cleanup
|
||||||
|
# - delete HostMetric record with deleted=True and last_deleted < [threshold]
|
||||||
|
# - also threshold for computing HostMetricSummaryMonthly (command/scheduled task)
|
||||||
|
CLEANUP_HOST_METRICS_HARD_THRESHOLD = 36 # months
|
||||||
|
|
||||||
|
# Host metric summary monthly task - last time of run
|
||||||
|
HOST_METRIC_SUMMARY_TASK_LAST_TS = None
|
||||||
|
HOST_METRIC_SUMMARY_TASK_INTERVAL = 7 # days
|
||||||
|
|||||||
@@ -28,8 +28,8 @@ SHELL_PLUS_PRINT_SQL = False
|
|||||||
|
|
||||||
# show colored logs in the dev environment
|
# show colored logs in the dev environment
|
||||||
# to disable this, set `COLOR_LOGS = False` in awx/settings/local_settings.py
|
# to disable this, set `COLOR_LOGS = False` in awx/settings/local_settings.py
|
||||||
LOGGING['handlers']['console']['()'] = 'awx.main.utils.handlers.ColorHandler' # noqa
|
|
||||||
COLOR_LOGS = True
|
COLOR_LOGS = True
|
||||||
|
LOGGING['handlers']['console']['()'] = 'awx.main.utils.handlers.ColorHandler' # noqa
|
||||||
|
|
||||||
ALLOWED_HOSTS = ['*']
|
ALLOWED_HOSTS = ['*']
|
||||||
|
|
||||||
|
|||||||
@@ -87,7 +87,7 @@ def _update_user_orgs(backend, desired_org_state, orgs_to_create, user=None):
|
|||||||
is_member_expression = org_opts.get(user_type, None)
|
is_member_expression = org_opts.get(user_type, None)
|
||||||
remove_members = bool(org_opts.get('remove_{}'.format(user_type), remove))
|
remove_members = bool(org_opts.get('remove_{}'.format(user_type), remove))
|
||||||
has_role = _update_m2m_from_expression(user, is_member_expression, remove_members)
|
has_role = _update_m2m_from_expression(user, is_member_expression, remove_members)
|
||||||
desired_org_state[organization_name][role_name] = has_role
|
desired_org_state[organization_name][role_name] = desired_org_state[organization_name].get(role_name, False) or has_role
|
||||||
|
|
||||||
|
|
||||||
def _update_user_teams(backend, desired_team_state, teams_to_create, user=None):
|
def _update_user_teams(backend, desired_team_state, teams_to_create, user=None):
|
||||||
|
|||||||
@@ -637,3 +637,75 @@ class TestSAMLUserFlags:
|
|||||||
}
|
}
|
||||||
|
|
||||||
assert expected == _check_flag(user, 'superuser', attributes, user_flags_settings)
|
assert expected == _check_flag(user, 'superuser', attributes, user_flags_settings)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.django_db
|
||||||
|
def test__update_user_orgs_org_map_and_saml_attr():
|
||||||
|
"""
|
||||||
|
This combines the action of two other tests where an org membership is defined both by
|
||||||
|
the ORGANIZATION_MAP and the SOCIAL_AUTH_SAML_ORGANIZATION_ATTR at the same time
|
||||||
|
"""
|
||||||
|
|
||||||
|
# This data will make the user a member
|
||||||
|
class BackendClass:
|
||||||
|
s = {
|
||||||
|
'ORGANIZATION_MAP': {
|
||||||
|
'Default1': {
|
||||||
|
'remove': True,
|
||||||
|
'remove_admins': True,
|
||||||
|
'users': 'foobar',
|
||||||
|
'remove_users': True,
|
||||||
|
'organization_alias': 'o1_alias',
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def setting(self, key):
|
||||||
|
return self.s[key]
|
||||||
|
|
||||||
|
backend = BackendClass()
|
||||||
|
|
||||||
|
setting = {
|
||||||
|
'saml_attr': 'memberOf',
|
||||||
|
'saml_admin_attr': 'admins',
|
||||||
|
'saml_auditor_attr': 'auditors',
|
||||||
|
'remove': True,
|
||||||
|
'remove_admins': True,
|
||||||
|
}
|
||||||
|
|
||||||
|
# This data from the server will make the user an admin of the organization
|
||||||
|
kwargs = {
|
||||||
|
'username': 'foobar',
|
||||||
|
'uid': 'idp:cmeyers@redhat.com',
|
||||||
|
'request': {u'SAMLResponse': [], u'RelayState': [u'idp']},
|
||||||
|
'is_new': False,
|
||||||
|
'response': {
|
||||||
|
'session_index': '_0728f0e0-b766-0135-75fa-02842b07c044',
|
||||||
|
'idp_name': u'idp',
|
||||||
|
'attributes': {
|
||||||
|
'admins': ['Default1'],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
'social': None,
|
||||||
|
'strategy': None,
|
||||||
|
'new_association': False,
|
||||||
|
}
|
||||||
|
|
||||||
|
this_user = User.objects.create(username='foobar')
|
||||||
|
|
||||||
|
with override_settings(SOCIAL_AUTH_SAML_ORGANIZATION_ATTR=setting):
|
||||||
|
desired_org_state = {}
|
||||||
|
orgs_to_create = []
|
||||||
|
|
||||||
|
# this should add user as an admin of the org
|
||||||
|
_update_user_orgs_by_saml_attr(backend, desired_org_state, orgs_to_create, **kwargs)
|
||||||
|
assert desired_org_state['o1_alias']['admin_role'] is True
|
||||||
|
|
||||||
|
assert set(orgs_to_create) == set(['o1_alias'])
|
||||||
|
|
||||||
|
# this should add user as a member of the org without reverting the admin status
|
||||||
|
_update_user_orgs(backend, desired_org_state, orgs_to_create, this_user)
|
||||||
|
assert desired_org_state['o1_alias']['member_role'] is True
|
||||||
|
assert desired_org_state['o1_alias']['admin_role'] is True
|
||||||
|
|
||||||
|
assert set(orgs_to_create) == set(['o1_alias'])
|
||||||
|
|||||||
@@ -19,7 +19,7 @@
|
|||||||
<input type="text" name="username" maxlength="100"
|
<input type="text" name="username" maxlength="100"
|
||||||
autocapitalize="off"
|
autocapitalize="off"
|
||||||
autocorrect="off" class="form-control textinput textInput"
|
autocorrect="off" class="form-control textinput textInput"
|
||||||
id="id_username" required autofocus
|
id="id_username" autocomplete="off" required autofocus
|
||||||
{% if form.username.value %}value="{{ form.username.value }}"{% endif %}>
|
{% if form.username.value %}value="{{ form.username.value }}"{% endif %}>
|
||||||
{% if form.username.errors %}
|
{% if form.username.errors %}
|
||||||
<p class="text-error">{{ form.username.errors|striptags }}</p>
|
<p class="text-error">{{ form.username.errors|striptags }}</p>
|
||||||
@@ -31,7 +31,8 @@
|
|||||||
<div class="form-group">
|
<div class="form-group">
|
||||||
<label for="id_password">Password:</label>
|
<label for="id_password">Password:</label>
|
||||||
<input type="password" name="password" maxlength="100" autocapitalize="off"
|
<input type="password" name="password" maxlength="100" autocapitalize="off"
|
||||||
autocorrect="off" class="form-control textinput textInput" id="id_password" required>
|
autocorrect="off" class="form-control textinput textInput" id="id_password"
|
||||||
|
autocomplete="off" required>
|
||||||
{% if form.password.errors %}
|
{% if form.password.errors %}
|
||||||
<p class="text-error">{{ form.password.errors|striptags }}</p>
|
<p class="text-error">{{ form.password.errors|striptags }}</p>
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|||||||
1771
awx/ui/package-lock.json
generated
1771
awx/ui/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -33,12 +33,12 @@
|
|||||||
"styled-components": "5.3.6"
|
"styled-components": "5.3.6"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@babel/core": "^7.16.10",
|
"@babel/core": "^7.22.9",
|
||||||
"@babel/eslint-parser": "^7.16.5",
|
"@babel/eslint-parser": "^7.22.9",
|
||||||
"@babel/eslint-plugin": "^7.16.5",
|
"@babel/eslint-plugin": "^7.22.10",
|
||||||
"@babel/plugin-syntax-jsx": "7.16.7",
|
"@babel/plugin-syntax-jsx": "^7.22.5",
|
||||||
"@babel/polyfill": "^7.8.7",
|
"@babel/polyfill": "^7.12.1",
|
||||||
"@babel/preset-react": "7.16.7",
|
"@babel/preset-react": "^7.22.5",
|
||||||
"@cypress/instrument-cra": "^1.4.0",
|
"@cypress/instrument-cra": "^1.4.0",
|
||||||
"@lingui/cli": "^3.7.1",
|
"@lingui/cli": "^3.7.1",
|
||||||
"@lingui/loader": "3.15.0",
|
"@lingui/loader": "3.15.0",
|
||||||
|
|||||||
@@ -5,7 +5,11 @@
|
|||||||
<title data-cy="migration-title">{{ title }}</title>
|
<title data-cy="migration-title">{{ title }}</title>
|
||||||
<meta
|
<meta
|
||||||
http-equiv="Content-Security-Policy"
|
http-equiv="Content-Security-Policy"
|
||||||
content="default-src 'self'; connect-src 'self' ws: wss:; style-src 'self' 'unsafe-inline'; script-src 'self' 'nonce-{{ csp_nonce }}' *.pendo.io; img-src 'self' *.pendo.io data:;"
|
content="default-src 'self';
|
||||||
|
connect-src 'self' ws: wss:;
|
||||||
|
style-src 'self' 'unsafe-inline';
|
||||||
|
script-src 'self' 'nonce-{{ csp_nonce }}' *.pendo.io;
|
||||||
|
img-src 'self' *.pendo.io data:;"
|
||||||
/>
|
/>
|
||||||
<meta charset="utf-8">
|
<meta charset="utf-8">
|
||||||
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
|
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
|
||||||
|
|||||||
@@ -33,6 +33,7 @@ import Roles from './models/Roles';
|
|||||||
import Root from './models/Root';
|
import Root from './models/Root';
|
||||||
import Schedules from './models/Schedules';
|
import Schedules from './models/Schedules';
|
||||||
import Settings from './models/Settings';
|
import Settings from './models/Settings';
|
||||||
|
import SubscriptionUsage from './models/SubscriptionUsage';
|
||||||
import SystemJobs from './models/SystemJobs';
|
import SystemJobs from './models/SystemJobs';
|
||||||
import SystemJobTemplates from './models/SystemJobTemplates';
|
import SystemJobTemplates from './models/SystemJobTemplates';
|
||||||
import Teams from './models/Teams';
|
import Teams from './models/Teams';
|
||||||
@@ -82,6 +83,7 @@ const RolesAPI = new Roles();
|
|||||||
const RootAPI = new Root();
|
const RootAPI = new Root();
|
||||||
const SchedulesAPI = new Schedules();
|
const SchedulesAPI = new Schedules();
|
||||||
const SettingsAPI = new Settings();
|
const SettingsAPI = new Settings();
|
||||||
|
const SubscriptionUsageAPI = new SubscriptionUsage();
|
||||||
const SystemJobsAPI = new SystemJobs();
|
const SystemJobsAPI = new SystemJobs();
|
||||||
const SystemJobTemplatesAPI = new SystemJobTemplates();
|
const SystemJobTemplatesAPI = new SystemJobTemplates();
|
||||||
const TeamsAPI = new Teams();
|
const TeamsAPI = new Teams();
|
||||||
@@ -132,6 +134,7 @@ export {
|
|||||||
RootAPI,
|
RootAPI,
|
||||||
SchedulesAPI,
|
SchedulesAPI,
|
||||||
SettingsAPI,
|
SettingsAPI,
|
||||||
|
SubscriptionUsageAPI,
|
||||||
SystemJobsAPI,
|
SystemJobsAPI,
|
||||||
SystemJobTemplatesAPI,
|
SystemJobTemplatesAPI,
|
||||||
TeamsAPI,
|
TeamsAPI,
|
||||||
|
|||||||
@@ -6,5 +6,20 @@ class ConstructedInventories extends InstanceGroupsMixin(Base) {
|
|||||||
super(http);
|
super(http);
|
||||||
this.baseUrl = 'api/v2/constructed_inventories/';
|
this.baseUrl = 'api/v2/constructed_inventories/';
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async readConstructedInventoryOptions(id, method) {
|
||||||
|
const {
|
||||||
|
data: { actions },
|
||||||
|
} = await this.http.options(`${this.baseUrl}${id}/`);
|
||||||
|
|
||||||
|
if (actions[method]) {
|
||||||
|
return actions[method];
|
||||||
|
}
|
||||||
|
|
||||||
|
throw new Error(
|
||||||
|
`You have insufficient access to this Constructed Inventory.
|
||||||
|
Please contact your system administrator if there is an issue with your access.`
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
export default ConstructedInventories;
|
export default ConstructedInventories;
|
||||||
|
|||||||
51
awx/ui/src/api/models/ConstructedInventories.test.js
Normal file
51
awx/ui/src/api/models/ConstructedInventories.test.js
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
import ConstructedInventories from './ConstructedInventories';
|
||||||
|
|
||||||
|
describe('ConstructedInventoriesAPI', () => {
|
||||||
|
const constructedInventoryId = 1;
|
||||||
|
const constructedInventoryMethod = 'PUT';
|
||||||
|
let ConstructedInventoriesAPI;
|
||||||
|
let mockHttp;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
const optionsPromise = () =>
|
||||||
|
Promise.resolve({
|
||||||
|
data: {
|
||||||
|
actions: {
|
||||||
|
PUT: {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
});
|
||||||
|
mockHttp = {
|
||||||
|
options: jest.fn(optionsPromise),
|
||||||
|
};
|
||||||
|
ConstructedInventoriesAPI = new ConstructedInventories(mockHttp);
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
jest.resetAllMocks();
|
||||||
|
});
|
||||||
|
|
||||||
|
test('readConstructedInventoryOptions calls options with the expected params', async () => {
|
||||||
|
await ConstructedInventoriesAPI.readConstructedInventoryOptions(
|
||||||
|
constructedInventoryId,
|
||||||
|
constructedInventoryMethod
|
||||||
|
);
|
||||||
|
expect(mockHttp.options).toHaveBeenCalledTimes(1);
|
||||||
|
expect(mockHttp.options).toHaveBeenCalledWith(
|
||||||
|
`api/v2/constructed_inventories/${constructedInventoryId}/`
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
test('readConstructedInventory should throw an error if action method is missing', async () => {
|
||||||
|
try {
|
||||||
|
await ConstructedInventoriesAPI.readConstructedInventoryOptions(
|
||||||
|
constructedInventoryId,
|
||||||
|
'POST'
|
||||||
|
);
|
||||||
|
} catch (error) {
|
||||||
|
expect(error.message).toContain(
|
||||||
|
'You have insufficient access to this Constructed Inventory.'
|
||||||
|
);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
16
awx/ui/src/api/models/SubscriptionUsage.js
Normal file
16
awx/ui/src/api/models/SubscriptionUsage.js
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
import Base from '../Base';
|
||||||
|
|
||||||
|
class SubscriptionUsage extends Base {
|
||||||
|
constructor(http) {
|
||||||
|
super(http);
|
||||||
|
this.baseUrl = 'api/v2/host_metric_summary_monthly/';
|
||||||
|
}
|
||||||
|
|
||||||
|
readSubscriptionUsageChart(dateRange) {
|
||||||
|
return this.http.get(
|
||||||
|
`${this.baseUrl}?date__gte=${dateRange}&order_by=date&page_size=100`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export default SubscriptionUsage;
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user