mirror of
https://github.com/ansible/awx.git
synced 2026-02-05 11:34:43 -03:30
Compare commits
142 Commits
23.0.0
...
feature_ad
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a689f87f1c | ||
|
|
7501ad6836 | ||
|
|
dd00bbba42 | ||
|
|
fe6bac6d9e | ||
|
|
87abbd4b10 | ||
|
|
fb04e5d9f6 | ||
|
|
478e2cb28d | ||
|
|
2ac304d289 | ||
|
|
3e5851f3af | ||
|
|
adb1b12074 | ||
|
|
8fae20c48a | ||
|
|
ec364cc60e | ||
|
|
1cfd51764e | ||
|
|
0b8fedfd04 | ||
|
|
72a8173462 | ||
|
|
873b1fbe07 | ||
|
|
1f36e84b45 | ||
|
|
8c4bff2b86 | ||
|
|
14f636af84 | ||
|
|
0057c8daf6 | ||
|
|
d8a28b3c06 | ||
|
|
40c2b700fe | ||
|
|
71d548f9e5 | ||
|
|
dd98963f86 | ||
|
|
4b467dfd8d | ||
|
|
456b56778e | ||
|
|
5b3cb20f92 | ||
|
|
d7086a3c88 | ||
|
|
21e7ab078c | ||
|
|
946ca0b3b8 | ||
|
|
b831dbd608 | ||
|
|
943e455f9d | ||
|
|
53bc88abe2 | ||
|
|
3b4d95633e | ||
|
|
93c329d9d5 | ||
|
|
f4c53aaf22 | ||
|
|
333ef76cbd | ||
|
|
fc0b58fd04 | ||
|
|
bef0a8b23a | ||
|
|
a5f33456b6 | ||
|
|
21fb395912 | ||
|
|
44255f378d | ||
|
|
71a6d48612 | ||
|
|
b7e5f5d1e1 | ||
|
|
b6b167627c | ||
|
|
20f5b255c9 | ||
|
|
3bcf46555d | ||
|
|
94703ccf84 | ||
|
|
6cdea1909d | ||
|
|
f133580172 | ||
|
|
4b90a7fcd1 | ||
|
|
95bfedad5b | ||
|
|
1081f2d8e9 | ||
|
|
c4ab54d7f3 | ||
|
|
bcefcd8cf8 | ||
|
|
0bd057529d | ||
|
|
a82c03e2e2 | ||
|
|
447ac77535 | ||
|
|
72d0928f1b | ||
|
|
6d727d4bc4 | ||
|
|
6040e44d9d | ||
|
|
b99ce5cd62 | ||
|
|
ba8a90c55f | ||
|
|
7ee2172517 | ||
|
|
07f49f5925 | ||
|
|
376993077a | ||
|
|
48f586bac4 | ||
|
|
16dab57c63 | ||
|
|
75a71492fd | ||
|
|
e9bd99c1ff | ||
|
|
56878b4910 | ||
|
|
19ca480078 | ||
|
|
64eb963025 | ||
|
|
dc34d0887a | ||
|
|
160634fb6f | ||
|
|
9745058546 | ||
|
|
c97a48b165 | ||
|
|
259bca0113 | ||
|
|
92c2b4e983 | ||
|
|
127a0cff23 | ||
|
|
a0ef25006a | ||
|
|
50c98a52f7 | ||
|
|
4008d72af6 | ||
|
|
e72e9f94b9 | ||
|
|
9d60b0b9c6 | ||
|
|
05b58c4df6 | ||
|
|
b1b960fd17 | ||
|
|
3c8f71e559 | ||
|
|
f5922f76fa | ||
|
|
05582702c6 | ||
|
|
1d340c5b4e | ||
|
|
15925f1416 | ||
|
|
6e06a20cca | ||
|
|
bb3acbb8ad | ||
|
|
a88e47930c | ||
|
|
a0d4515ba4 | ||
|
|
770cc10a78 | ||
|
|
159dd62d84 | ||
|
|
640e5db9c6 | ||
|
|
9ed527eb26 | ||
|
|
29ad6e1eaa | ||
|
|
3e607f8964 | ||
|
|
c9d1a4d063 | ||
|
|
a290b082db | ||
|
|
6d3c22e801 | ||
|
|
1f91773a3c | ||
|
|
7b846e1e49 | ||
|
|
f7a2de8a07 | ||
|
|
194c214f03 | ||
|
|
77e30dd4b2 | ||
|
|
9d7421b9bc | ||
|
|
3b8e662916 | ||
|
|
aa3228eec9 | ||
|
|
7b0598c7d8 | ||
|
|
49832d6379 | ||
|
|
8feeb5f1fa | ||
|
|
56230ba5d1 | ||
|
|
480aaeace5 | ||
|
|
3eaea396be | ||
|
|
deef8669c9 | ||
|
|
63223a2cc7 | ||
|
|
a28bc2eb3f | ||
|
|
09168e5832 | ||
|
|
6df1de4262 | ||
|
|
e072bb7668 | ||
|
|
ec579fd637 | ||
|
|
b95d521162 | ||
|
|
d03a6a809d | ||
|
|
4466976e10 | ||
|
|
5733f78fd8 | ||
|
|
20fc7c702a | ||
|
|
6ce5799689 | ||
|
|
dc81aa46d0 | ||
|
|
ab3ceaecad | ||
|
|
1bb4240a6b | ||
|
|
5e105c2cbd | ||
|
|
cdb4f0b7fd | ||
|
|
cf1e448577 | ||
|
|
224e9e0324 | ||
|
|
660dab439b | ||
|
|
5ce2055431 | ||
|
|
951bd1cc87 |
28
.github/actions/awx_devel_image/action.yml
vendored
Normal file
28
.github/actions/awx_devel_image/action.yml
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
name: Setup images for AWX
|
||||
description: Builds new awx_devel image
|
||||
inputs:
|
||||
github-token:
|
||||
description: GitHub Token for registry access
|
||||
required: true
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Get python version from Makefile
|
||||
shell: bash
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Log in to registry
|
||||
shell: bash
|
||||
run: |
|
||||
echo "${{ inputs.github-token }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
|
||||
|
||||
- name: Pre-pull latest devel image to warm cache
|
||||
shell: bash
|
||||
run: docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${{ github.base_ref }}
|
||||
|
||||
- name: Build image for current source checkout
|
||||
shell: bash
|
||||
run: |
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} \
|
||||
COMPOSE_TAG=${{ github.base_ref }} \
|
||||
make docker-compose-build
|
||||
77
.github/actions/run_awx_devel/action.yml
vendored
Normal file
77
.github/actions/run_awx_devel/action.yml
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
name: Run AWX docker-compose
|
||||
description: Runs AWX with `make docker-compose`
|
||||
inputs:
|
||||
github-token:
|
||||
description: GitHub Token to pass to awx_devel_image
|
||||
required: true
|
||||
build-ui:
|
||||
description: Should the UI be built?
|
||||
required: false
|
||||
default: false
|
||||
type: boolean
|
||||
outputs:
|
||||
ip:
|
||||
description: The IP of the tools_awx_1 container
|
||||
value: ${{ steps.data.outputs.ip }}
|
||||
admin-token:
|
||||
description: OAuth token for admin user
|
||||
value: ${{ steps.data.outputs.admin_token }}
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Build awx_devel image for running checks
|
||||
uses: ./.github/actions/awx_devel_image
|
||||
with:
|
||||
github-token: ${{ inputs.github-token }}
|
||||
|
||||
- name: Upgrade ansible-core
|
||||
shell: bash
|
||||
run: python3 -m pip install --upgrade ansible-core
|
||||
|
||||
- name: Install system deps
|
||||
shell: bash
|
||||
run: sudo apt-get install -y gettext
|
||||
|
||||
- name: Start AWX
|
||||
shell: bash
|
||||
run: |
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} \
|
||||
COMPOSE_TAG=${{ github.base_ref }} \
|
||||
COMPOSE_UP_OPTS="-d" \
|
||||
make docker-compose
|
||||
|
||||
- name: Update default AWX password
|
||||
shell: bash
|
||||
run: |
|
||||
SECONDS=0
|
||||
while [[ "$(curl -s -o /dev/null -w ''%{http_code}'' -k https://localhost:8043/api/v2/ping/)" != "200" ]]; do
|
||||
if [[ $SECONDS -gt 600 ]]; then
|
||||
echo "Timing out, AWX never came up"
|
||||
exit 1
|
||||
fi
|
||||
echo "Waiting for AWX..."
|
||||
sleep 5
|
||||
done
|
||||
echo "AWX is up, updating the password..."
|
||||
docker exec -i tools_awx_1 sh <<-EOSH
|
||||
awx-manage update_password --username=admin --password=password
|
||||
EOSH
|
||||
|
||||
- name: Build UI
|
||||
# This must be a string comparison in composite actions:
|
||||
# https://github.com/actions/runner/issues/2238
|
||||
if: ${{ inputs.build-ui == 'true' }}
|
||||
shell: bash
|
||||
run: |
|
||||
docker exec -i tools_awx_1 sh <<-EOSH
|
||||
make ui-devel
|
||||
EOSH
|
||||
|
||||
- name: Get instance data
|
||||
id: data
|
||||
shell: bash
|
||||
run: |
|
||||
AWX_IP=$(docker inspect -f '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' tools_awx_1)
|
||||
ADMIN_TOKEN=$(docker exec -i tools_awx_1 awx-manage create_oauth2_token --user admin)
|
||||
echo "ip=$AWX_IP" >> $GITHUB_OUTPUT
|
||||
echo "admin_token=$ADMIN_TOKEN" >> $GITHUB_OUTPUT
|
||||
19
.github/actions/upload_awx_devel_logs/action.yml
vendored
Normal file
19
.github/actions/upload_awx_devel_logs/action.yml
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
name: Upload logs
|
||||
description: Upload logs from `make docker-compose` devel environment to GitHub as an artifact
|
||||
inputs:
|
||||
log-filename:
|
||||
description: "*Unique* name of the log file"
|
||||
required: true
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Get AWX logs
|
||||
shell: bash
|
||||
run: |
|
||||
docker logs tools_awx_1 > ${{ inputs.log-filename }}
|
||||
|
||||
- name: Upload AWX logs as artifact
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: docker-compose-logs
|
||||
path: ${{ inputs.log-filename }}
|
||||
4
.github/triage_replies.md
vendored
4
.github/triage_replies.md
vendored
@@ -7,8 +7,8 @@
|
||||
|
||||
## PRs/Issues
|
||||
|
||||
### Visit our mailing list
|
||||
- Hello, this appears to be less of a bug report or feature request and more of a question. Could you please ask this on our mailing list? See https://github.com/ansible/awx/#get-involved for information for ways to connect with us.
|
||||
### Visit the Forum or Matrix
|
||||
- Hello, this appears to be less of a bug report or feature request and more of a question. Could you please ask this on either the [Ansible AWX channel on Matrix](https://matrix.to/#/#awx:ansible.com) or the [Ansible Community Forum](https://forum.ansible.com/tag/awx)?
|
||||
|
||||
### Denied Submission
|
||||
|
||||
|
||||
169
.github/workflows/ci.yml
vendored
169
.github/workflows/ci.yml
vendored
@@ -11,6 +11,7 @@ jobs:
|
||||
common-tests:
|
||||
name: ${{ matrix.tests.name }}
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
@@ -20,6 +21,8 @@ jobs:
|
||||
tests:
|
||||
- name: api-test
|
||||
command: /start_tests.sh
|
||||
- name: api-migrations
|
||||
command: /start_tests.sh test_migrations
|
||||
- name: api-lint
|
||||
command: /var/lib/awx/venv/awx/bin/tox -e linters
|
||||
- name: api-swagger
|
||||
@@ -35,29 +38,42 @@ jobs:
|
||||
- name: ui-test-general
|
||||
command: make ui-test-general
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Build awx_devel image for running checks
|
||||
uses: ./.github/actions/awx_devel_image
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Run check ${{ matrix.tests.name }}
|
||||
run: AWX_DOCKER_CMD='${{ matrix.tests.command }}' make github_ci_runner
|
||||
run: AWX_DOCKER_CMD='${{ matrix.tests.command }}' make docker-runner
|
||||
|
||||
dev-env:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- uses: ./.github/actions/run_awx_devel
|
||||
id: awx
|
||||
with:
|
||||
build-ui: false
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Run smoke test
|
||||
run: make github_ci_setup && ansible-playbook tools/docker-compose/ansible/smoke-test.yml -v
|
||||
run: ansible-playbook tools/docker-compose/ansible/smoke-test.yml -v
|
||||
|
||||
awx-operator:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- name: Checkout awx
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
path: awx
|
||||
|
||||
- name: Checkout awx-operator
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: ansible/awx-operator
|
||||
path: awx-operator
|
||||
@@ -67,7 +83,7 @@ jobs:
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v2
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
|
||||
@@ -99,10 +115,11 @@ jobs:
|
||||
collection-sanity:
|
||||
name: awx_collection sanity
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
# The containers that GitHub Actions use have Ansible installed, so upgrade to make sure we have the latest version.
|
||||
- name: Upgrade ansible-core
|
||||
@@ -114,3 +131,139 @@ jobs:
|
||||
# needed due to cgroupsv2. This is fixed, but a stable release
|
||||
# with the fix has not been made yet.
|
||||
ANSIBLE_TEST_PREFER_PODMAN: 1
|
||||
|
||||
collection-integration:
|
||||
name: awx_collection integration
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target-regex:
|
||||
- name: a-h
|
||||
regex: ^[a-h]
|
||||
- name: i-p
|
||||
regex: ^[i-p]
|
||||
- name: r-z0-9
|
||||
regex: ^[r-z0-9]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- uses: ./.github/actions/run_awx_devel
|
||||
id: awx
|
||||
with:
|
||||
build-ui: false
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Install dependencies for running tests
|
||||
run: |
|
||||
python3 -m pip install -e ./awxkit/
|
||||
python3 -m pip install -r awx_collection/requirements.txt
|
||||
|
||||
- name: Run integration tests
|
||||
run: |
|
||||
echo "::remove-matcher owner=python::" # Disable annoying annotations from setup-python
|
||||
echo '[general]' > ~/.tower_cli.cfg
|
||||
echo 'host = https://${{ steps.awx.outputs.ip }}:8043' >> ~/.tower_cli.cfg
|
||||
echo 'oauth_token = ${{ steps.awx.outputs.admin-token }}' >> ~/.tower_cli.cfg
|
||||
echo 'verify_ssl = false' >> ~/.tower_cli.cfg
|
||||
TARGETS="$(ls awx_collection/tests/integration/targets | grep '${{ matrix.target-regex.regex }}' | tr '\n' ' ')"
|
||||
make COLLECTION_VERSION=100.100.100-git COLLECTION_TEST_TARGET="--coverage --requirements $TARGETS" test_collection_integration
|
||||
env:
|
||||
ANSIBLE_TEST_PREFER_PODMAN: 1
|
||||
|
||||
# Upload coverage report as artifact
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always()
|
||||
with:
|
||||
name: coverage-${{ matrix.target-regex.name }}
|
||||
path: ~/.ansible/collections/ansible_collections/awx/awx/tests/output/coverage/
|
||||
|
||||
- uses: ./.github/actions/upload_awx_devel_logs
|
||||
if: always()
|
||||
with:
|
||||
log-filename: collection-integration-${{ matrix.target-regex.name }}.log
|
||||
|
||||
collection-integration-coverage-combine:
|
||||
name: combine awx_collection integration coverage
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
needs:
|
||||
- collection-integration
|
||||
strategy:
|
||||
fail-fast: false
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Upgrade ansible-core
|
||||
run: python3 -m pip install --upgrade ansible-core
|
||||
|
||||
- name: Download coverage artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: coverage
|
||||
|
||||
- name: Combine coverage
|
||||
run: |
|
||||
make COLLECTION_VERSION=100.100.100-git install_collection
|
||||
mkdir -p ~/.ansible/collections/ansible_collections/awx/awx/tests/output/coverage
|
||||
cd coverage
|
||||
for i in coverage-*; do
|
||||
cp -rv $i/* ~/.ansible/collections/ansible_collections/awx/awx/tests/output/coverage/
|
||||
done
|
||||
cd ~/.ansible/collections/ansible_collections/awx/awx
|
||||
ansible-test coverage combine --requirements
|
||||
ansible-test coverage html
|
||||
echo '## AWX Collection Integration Coverage' >> $GITHUB_STEP_SUMMARY
|
||||
echo '```' >> $GITHUB_STEP_SUMMARY
|
||||
ansible-test coverage report >> $GITHUB_STEP_SUMMARY
|
||||
echo '```' >> $GITHUB_STEP_SUMMARY
|
||||
echo >> $GITHUB_STEP_SUMMARY
|
||||
echo '## AWX Collection Integration Coverage HTML' >> $GITHUB_STEP_SUMMARY
|
||||
echo 'Download the HTML artifacts to view the coverage report.' >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
# This is a huge hack, there's no official action for removing artifacts currently.
|
||||
# Also ACTIONS_RUNTIME_URL and ACTIONS_RUNTIME_TOKEN aren't available in normal run
|
||||
# steps, so we have to use github-script to get them.
|
||||
#
|
||||
# The advantage of doing this, though, is that we save on artifact storage space.
|
||||
|
||||
- name: Get secret artifact runtime URL
|
||||
uses: actions/github-script@v6
|
||||
id: get-runtime-url
|
||||
with:
|
||||
result-encoding: string
|
||||
script: |
|
||||
const { ACTIONS_RUNTIME_URL } = process.env;
|
||||
return ACTIONS_RUNTIME_URL;
|
||||
|
||||
- name: Get secret artifact runtime token
|
||||
uses: actions/github-script@v6
|
||||
id: get-runtime-token
|
||||
with:
|
||||
result-encoding: string
|
||||
script: |
|
||||
const { ACTIONS_RUNTIME_TOKEN } = process.env;
|
||||
return ACTIONS_RUNTIME_TOKEN;
|
||||
|
||||
- name: Remove intermediary artifacts
|
||||
env:
|
||||
ACTIONS_RUNTIME_URL: ${{ steps.get-runtime-url.outputs.result }}
|
||||
ACTIONS_RUNTIME_TOKEN: ${{ steps.get-runtime-token.outputs.result }}
|
||||
run: |
|
||||
echo "::add-mask::${ACTIONS_RUNTIME_TOKEN}"
|
||||
artifacts=$(
|
||||
curl -H "Authorization: Bearer $ACTIONS_RUNTIME_TOKEN" \
|
||||
${ACTIONS_RUNTIME_URL}_apis/pipelines/workflows/${{ github.run_id }}/artifacts?api-version=6.0-preview \
|
||||
| jq -r '.value | .[] | select(.name | startswith("coverage-")) | .url'
|
||||
)
|
||||
|
||||
for artifact in $artifacts; do
|
||||
curl -i -X DELETE -H "Accept: application/json;api-version=6.0-preview" -H "Authorization: Bearer $ACTIONS_RUNTIME_TOKEN" "$artifact"
|
||||
done
|
||||
|
||||
- name: Upload coverage report as artifact
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: awx-collection-integration-coverage-html
|
||||
path: ~/.ansible/collections/ansible_collections/awx/awx/tests/output/reports/coverage
|
||||
|
||||
5
.github/workflows/devel_images.yml
vendored
5
.github/workflows/devel_images.yml
vendored
@@ -12,11 +12,12 @@ jobs:
|
||||
push:
|
||||
if: endsWith(github.repository, '/awx') || startsWith(github.ref, 'refs/heads/release_')
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Get python version from Makefile
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
@@ -28,7 +29,7 @@ jobs:
|
||||
OWNER: '${{ github.repository_owner }}'
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v2
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
|
||||
|
||||
17
.github/workflows/docs.yml
vendored
Normal file
17
.github/workflows/docs.yml
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
---
|
||||
name: Docsite CI
|
||||
on:
|
||||
pull_request:
|
||||
jobs:
|
||||
docsite-build:
|
||||
name: docsite test build
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: install tox
|
||||
run: pip install tox
|
||||
|
||||
- name: Assure docs can be built
|
||||
run: tox -e docs
|
||||
54
.github/workflows/e2e_test.yml
vendored
54
.github/workflows/e2e_test.yml
vendored
@@ -19,41 +19,20 @@ jobs:
|
||||
job: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Get python version from Makefile
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v2
|
||||
- uses: ./.github/actions/run_awx_devel
|
||||
id: awx
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
|
||||
- name: Install system deps
|
||||
run: sudo apt-get install -y gettext
|
||||
|
||||
- name: Log in to registry
|
||||
run: |
|
||||
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
|
||||
|
||||
- name: Pre-pull image to warm build cache
|
||||
run: |
|
||||
docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${{ github.base_ref }}
|
||||
|
||||
- name: Build UI
|
||||
run: |
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} COMPOSE_TAG=${{ github.base_ref }} make ui-devel
|
||||
|
||||
- name: Start AWX
|
||||
run: |
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} COMPOSE_TAG=${{ github.base_ref }} make docker-compose &> make-docker-compose-output.log &
|
||||
build-ui: true
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Pull awx_cypress_base image
|
||||
run: |
|
||||
docker pull quay.io/awx/awx_cypress_base:latest
|
||||
|
||||
- name: Checkout test project
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: ${{ github.repository_owner }}/tower-qa
|
||||
ssh-key: ${{ secrets.QA_REPO_KEY }}
|
||||
@@ -65,18 +44,6 @@ jobs:
|
||||
cd ${{ secrets.E2E_PROJECT }}/ui-tests/awx-pf-tests
|
||||
docker build -t awx-pf-tests .
|
||||
|
||||
- name: Update default AWX password
|
||||
run: |
|
||||
while [[ "$(curl -s -o /dev/null -w ''%{http_code}'' -k https://localhost:8043/api/v2/ping/)" != "200" ]]
|
||||
do
|
||||
echo "Waiting for AWX..."
|
||||
sleep 5;
|
||||
done
|
||||
echo "AWX is up, updating the password..."
|
||||
docker exec -i tools_awx_1 sh <<-EOSH
|
||||
awx-manage update_password --username=admin --password=password
|
||||
EOSH
|
||||
|
||||
- name: Run E2E tests
|
||||
env:
|
||||
CYPRESS_RECORD_KEY: ${{ secrets.CYPRESS_RECORD_KEY }}
|
||||
@@ -86,7 +53,7 @@ jobs:
|
||||
export COMMIT_INFO_SHA=$GITHUB_SHA
|
||||
export COMMIT_INFO_REMOTE=$GITHUB_REPOSITORY_OWNER
|
||||
cd ${{ secrets.E2E_PROJECT }}/ui-tests/awx-pf-tests
|
||||
AWX_IP=$(docker inspect -f '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' tools_awx_1)
|
||||
AWX_IP=${{ steps.awx.outputs.ip }}
|
||||
printenv > .env
|
||||
echo "Executing tests:"
|
||||
docker run \
|
||||
@@ -102,8 +69,7 @@ jobs:
|
||||
-w /e2e \
|
||||
awx-pf-tests run --project .
|
||||
|
||||
- name: Save AWX logs
|
||||
uses: actions/upload-artifact@v2
|
||||
- uses: ./.github/actions/upload_awx_devel_logs
|
||||
if: always()
|
||||
with:
|
||||
name: AWX-logs-${{ matrix.job }}
|
||||
path: make-docker-compose-output.log
|
||||
log-filename: e2e-${{ matrix.job }}.log
|
||||
|
||||
@@ -9,6 +9,7 @@ on:
|
||||
jobs:
|
||||
push:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
|
||||
4
.github/workflows/label_issue.yml
vendored
4
.github/workflows/label_issue.yml
vendored
@@ -13,6 +13,7 @@ permissions:
|
||||
jobs:
|
||||
triage:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
name: Label Issue
|
||||
|
||||
steps:
|
||||
@@ -26,9 +27,10 @@ jobs:
|
||||
|
||||
community:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
name: Label Issue - Community
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
- name: Install python requests
|
||||
run: pip install requests
|
||||
|
||||
4
.github/workflows/label_pr.yml
vendored
4
.github/workflows/label_pr.yml
vendored
@@ -14,6 +14,7 @@ permissions:
|
||||
jobs:
|
||||
triage:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
name: Label PR
|
||||
|
||||
steps:
|
||||
@@ -25,9 +26,10 @@ jobs:
|
||||
|
||||
community:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
name: Label PR - Community
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
- name: Install python requests
|
||||
run: pip install requests
|
||||
|
||||
1
.github/workflows/pr_body_check.yml
vendored
1
.github/workflows/pr_body_check.yml
vendored
@@ -10,6 +10,7 @@ jobs:
|
||||
if: github.repository_owner == 'ansible' && endsWith(github.repository, 'awx')
|
||||
name: Scan PR description for semantic versioning keywords
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
|
||||
15
.github/workflows/promote.yml
vendored
15
.github/workflows/promote.yml
vendored
@@ -13,17 +13,18 @@ permissions:
|
||||
|
||||
jobs:
|
||||
promote:
|
||||
if: endsWith(github.repository, '/awx')
|
||||
if: endsWith(github.repository, '/awx')
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 90
|
||||
steps:
|
||||
- name: Checkout awx
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Get python version from Makefile
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v2
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
|
||||
@@ -40,9 +41,13 @@ jobs:
|
||||
if: ${{ github.repository_owner != 'ansible' }}
|
||||
|
||||
- name: Build collection and publish to galaxy
|
||||
env:
|
||||
COLLECTION_NAMESPACE: ${{ env.collection_namespace }}
|
||||
COLLECTION_VERSION: ${{ github.event.release.tag_name }}
|
||||
COLLECTION_TEMPLATE_VERSION: true
|
||||
run: |
|
||||
COLLECTION_TEMPLATE_VERSION=true COLLECTION_NAMESPACE=${{ env.collection_namespace }} make build_collection
|
||||
if [ "$(curl --head -sw '%{http_code}' https://galaxy.ansible.com/download/${{ env.collection_namespace }}-awx-${{ github.event.release.tag_name }}.tar.gz | tail -1)" == "302" ] ; then \
|
||||
make build_collection
|
||||
if [ "$(curl -L --head -sw '%{http_code}' https://galaxy.ansible.com/download/${{ env.collection_namespace }}-awx-${{ github.event.release.tag_name }}.tar.gz | tail -1)" == "302" ] ; then \
|
||||
echo "Galaxy release already done"; \
|
||||
else \
|
||||
ansible-galaxy collection publish \
|
||||
|
||||
9
.github/workflows/stage.yml
vendored
9
.github/workflows/stage.yml
vendored
@@ -23,6 +23,7 @@ jobs:
|
||||
stage:
|
||||
if: endsWith(github.repository, '/awx')
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 90
|
||||
permissions:
|
||||
packages: write
|
||||
contents: write
|
||||
@@ -44,7 +45,7 @@ jobs:
|
||||
exit 0
|
||||
|
||||
- name: Checkout awx
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
path: awx
|
||||
|
||||
@@ -52,18 +53,18 @@ jobs:
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v2
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
|
||||
- name: Checkout awx-logos
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: ansible/awx-logos
|
||||
path: awx-logos
|
||||
|
||||
- name: Checkout awx-operator
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: ${{ github.repository_owner }}/awx-operator
|
||||
path: awx-operator
|
||||
|
||||
1
.github/workflows/update_dependabot_prs.yml
vendored
1
.github/workflows/update_dependabot_prs.yml
vendored
@@ -9,6 +9,7 @@ jobs:
|
||||
name: Update Dependabot Prs
|
||||
if: contains(github.event.pull_request.labels.*.name, 'dependencies') && contains(github.event.pull_request.labels.*.name, 'component:ui')
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
|
||||
steps:
|
||||
- name: Checkout branch
|
||||
|
||||
5
.github/workflows/upload_schema.yml
vendored
5
.github/workflows/upload_schema.yml
vendored
@@ -13,17 +13,18 @@ on:
|
||||
jobs:
|
||||
push:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Get python version from Makefile
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v2
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
|
||||
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -165,3 +165,7 @@ use_dev_supervisor.txt
|
||||
|
||||
awx/ui_next/src
|
||||
awx/ui_next/build
|
||||
|
||||
# Docs build stuff
|
||||
docs/docsite/build/
|
||||
_readthedocs/
|
||||
|
||||
5
.gitleaks.toml
Normal file
5
.gitleaks.toml
Normal file
@@ -0,0 +1,5 @@
|
||||
[allowlist]
|
||||
description = "Documentation contains example secrets and passwords"
|
||||
paths = [
|
||||
"docs/docsite/rst/administration/oauth2_token_auth.rst",
|
||||
]
|
||||
5
.pip-tools.toml
Normal file
5
.pip-tools.toml
Normal file
@@ -0,0 +1,5 @@
|
||||
[tool.pip-tools]
|
||||
resolver = "backtracking"
|
||||
allow-unsafe = true
|
||||
strip-extras = true
|
||||
quiet = true
|
||||
16
.readthedocs.yaml
Normal file
16
.readthedocs.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
# Read the Docs configuration file
|
||||
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
|
||||
|
||||
version: 2
|
||||
|
||||
build:
|
||||
os: ubuntu-22.04
|
||||
tools:
|
||||
python: >-
|
||||
3.11
|
||||
commands:
|
||||
- pip install --user tox
|
||||
- python3 -m tox -e docs --notest -v
|
||||
- python3 -m tox -e docs --skip-pkg-install -q
|
||||
- mkdir -p _readthedocs/html/
|
||||
- mv docs/docsite/build/html/* _readthedocs/html/
|
||||
@@ -10,6 +10,7 @@ ignore: |
|
||||
tools/docker-compose/_sources
|
||||
# django template files
|
||||
awx/api/templates/instance_install_bundle/**
|
||||
.readthedocs.yaml
|
||||
|
||||
extends: default
|
||||
|
||||
|
||||
23
Makefile
23
Makefile
@@ -6,6 +6,7 @@ DOCKER_COMPOSE ?= docker-compose
|
||||
OFFICIAL ?= no
|
||||
NODE ?= node
|
||||
NPM_BIN ?= npm
|
||||
KIND_BIN ?= $(shell which kind)
|
||||
CHROMIUM_BIN=/tmp/chrome-linux/chrome
|
||||
GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
|
||||
MANAGEMENT_COMMAND ?= awx-manage
|
||||
@@ -78,7 +79,7 @@ I18N_FLAG_FILE = .i18n_built
|
||||
sdist \
|
||||
ui-release ui-devel \
|
||||
VERSION PYTHON_VERSION docker-compose-sources \
|
||||
.git/hooks/pre-commit github_ci_setup github_ci_runner
|
||||
.git/hooks/pre-commit
|
||||
|
||||
clean-tmp:
|
||||
rm -rf tmp/
|
||||
@@ -323,21 +324,16 @@ test:
|
||||
cd awxkit && $(VENV_BASE)/awx/bin/tox -re py3
|
||||
awx-manage check_migrations --dry-run --check -n 'missing_migration_file'
|
||||
|
||||
## Login to Github container image registry, pull image, then build image.
|
||||
github_ci_setup:
|
||||
# GITHUB_ACTOR is automatic github actions env var
|
||||
# CI_GITHUB_TOKEN is defined in .github files
|
||||
echo $(CI_GITHUB_TOKEN) | docker login ghcr.io -u $(GITHUB_ACTOR) --password-stdin
|
||||
docker pull $(DEVEL_IMAGE_NAME) || : # Pre-pull image to warm build cache
|
||||
$(MAKE) docker-compose-build
|
||||
test_migrations:
|
||||
if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
PYTHONDONTWRITEBYTECODE=1 py.test -p no:cacheprovider --migrations -m migration_test $(PYTEST_ARGS) $(TEST_DIRS)
|
||||
|
||||
## Runs AWX_DOCKER_CMD inside a new docker container.
|
||||
docker-runner:
|
||||
docker run -u $(shell id -u) --rm -v $(shell pwd):/awx_devel/:Z --workdir=/awx_devel $(DEVEL_IMAGE_NAME) $(AWX_DOCKER_CMD)
|
||||
|
||||
## Builds image and runs AWX_DOCKER_CMD in it, mainly for .github checks.
|
||||
github_ci_runner: github_ci_setup docker-runner
|
||||
|
||||
test_collection:
|
||||
rm -f $(shell ls -d $(VENV_BASE)/awx/lib/python* | head -n 1)/no-global-site-packages.txt
|
||||
if [ "$(VENV_BASE)" ]; then \
|
||||
@@ -383,7 +379,7 @@ test_collection_sanity:
|
||||
cd $(COLLECTION_INSTALL) && ansible-test sanity $(COLLECTION_SANITY_ARGS)
|
||||
|
||||
test_collection_integration: install_collection
|
||||
cd $(COLLECTION_INSTALL) && ansible-test integration $(COLLECTION_TEST_TARGET)
|
||||
cd $(COLLECTION_INSTALL) && ansible-test integration -vvv $(COLLECTION_TEST_TARGET)
|
||||
|
||||
test_unit:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
@@ -664,6 +660,9 @@ awx-kube-dev-build: Dockerfile.kube-dev
|
||||
-t $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) .
|
||||
|
||||
|
||||
kind-dev-load: awx-kube-dev-build
|
||||
$(KIND_BIN) load docker-image $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG)
|
||||
|
||||
# Translation TASKS
|
||||
# --------------------------------------
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
[](https://github.com/ansible/awx/actions/workflows/ci.yml) [](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) [](https://github.com/ansible/awx/blob/devel/LICENSE.md) [](https://groups.google.com/g/awx-project)
|
||||
[](https://libera.chat)
|
||||
[](https://chat.ansible.im/#/welcome) [](https://forum.ansible.com)
|
||||
|
||||
<img src="https://raw.githubusercontent.com/ansible/awx-logos/master/awx/ui/client/assets/logo-login.svg?sanitize=true" width=200 alt="AWX" />
|
||||
|
||||
@@ -30,12 +30,12 @@ If you're experiencing a problem that you feel is a bug in AWX or have ideas for
|
||||
Code of Conduct
|
||||
---------------
|
||||
|
||||
We ask all of our community members and contributors to adhere to the [Ansible code of conduct](http://docs.ansible.com/ansible/latest/community/code_of_conduct.html). If you have questions or need assistance, please reach out to our community team at [codeofconduct@ansible.com](mailto:codeofconduct@ansible.com)
|
||||
We ask all of our community members and contributors to adhere to the [Ansible code of conduct](http://docs.ansible.com/ansible/latest/community/code_of_conduct.html). If you have questions or need assistance, please reach out to our community team at [codeofconduct@ansible.com](mailto:codeofconduct@ansible.com)
|
||||
|
||||
Get Involved
|
||||
------------
|
||||
|
||||
We welcome your feedback and ideas. Here's how to reach us with feedback and questions:
|
||||
|
||||
- Join the `#ansible-awx` channel on irc.libera.chat
|
||||
- Join the [mailing list](https://groups.google.com/forum/#!forum/awx-project)
|
||||
- Join the [Ansible AWX channel on Matrix](https://matrix.to/#/#awx:ansible.com)
|
||||
- Join the [Ansible Community Forum](https://forum.ansible.com)
|
||||
|
||||
@@ -52,39 +52,14 @@ try:
|
||||
except ImportError: # pragma: no cover
|
||||
MODE = 'production'
|
||||
|
||||
import hashlib
|
||||
|
||||
try:
|
||||
import django # noqa: F401
|
||||
|
||||
HAS_DJANGO = True
|
||||
except ImportError:
|
||||
HAS_DJANGO = False
|
||||
pass
|
||||
else:
|
||||
from django.db.backends.base import schema
|
||||
from django.db.models import indexes
|
||||
from django.db.backends.utils import names_digest
|
||||
from django.db import connection
|
||||
|
||||
if HAS_DJANGO is True:
|
||||
# See upgrade blocker note in requirements/README.md
|
||||
try:
|
||||
names_digest('foo', 'bar', 'baz', length=8)
|
||||
except ValueError:
|
||||
|
||||
def names_digest(*args, length):
|
||||
"""
|
||||
Generate a 32-bit digest of a set of arguments that can be used to shorten
|
||||
identifying names. Support for use in FIPS environments.
|
||||
"""
|
||||
h = hashlib.md5(usedforsecurity=False)
|
||||
for arg in args:
|
||||
h.update(arg.encode())
|
||||
return h.hexdigest()[:length]
|
||||
|
||||
schema.names_digest = names_digest
|
||||
indexes.names_digest = names_digest
|
||||
|
||||
|
||||
def find_commands(management_dir):
|
||||
# Modified version of function from django/core/management/__init__.py.
|
||||
|
||||
@@ -3233,7 +3233,7 @@ class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobO
|
||||
if get_field_from_model_or_attrs('host_config_key') and not inventory:
|
||||
raise serializers.ValidationError({'host_config_key': _("Cannot enable provisioning callback without an inventory set.")})
|
||||
|
||||
prompting_error_message = _("Must either set a default value or ask to prompt on launch.")
|
||||
prompting_error_message = _("You must either set a default value or ask to prompt on launch.")
|
||||
if project is None:
|
||||
raise serializers.ValidationError({'project': _("Job Templates must have a project assigned.")})
|
||||
elif inventory is None and not get_field_from_model_or_attrs('ask_inventory_on_launch'):
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
---
|
||||
collections:
|
||||
- name: ansible.receptor
|
||||
version: 2.0.0
|
||||
version: 2.0.2
|
||||
|
||||
@@ -128,6 +128,10 @@ logger = logging.getLogger('awx.api.views')
|
||||
|
||||
|
||||
def unpartitioned_event_horizon(cls):
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute(f"SELECT 1 FROM INFORMATION_SCHEMA.TABLES WHERE table_name = '_unpartitioned_{cls._meta.db_table}';")
|
||||
if not cursor.fetchone():
|
||||
return 0
|
||||
with connection.cursor() as cursor:
|
||||
try:
|
||||
cursor.execute(f'SELECT MAX(id) FROM _unpartitioned_{cls._meta.db_table}')
|
||||
|
||||
@@ -418,6 +418,10 @@ class SettingsWrapper(UserSettingsHolder):
|
||||
"""Get value while accepting the in-memory cache if key is available"""
|
||||
with _ctit_db_wrapper(trans_safe=True):
|
||||
return self._get_local(name)
|
||||
# If the last line did not return, that means we hit a database error
|
||||
# in that case, we should not have a local cache value
|
||||
# thus, return empty as a signal to use the default
|
||||
return empty
|
||||
|
||||
def __getattr__(self, name):
|
||||
value = empty
|
||||
|
||||
@@ -13,6 +13,7 @@ from unittest import mock
|
||||
from django.conf import LazySettings
|
||||
from django.core.cache.backends.locmem import LocMemCache
|
||||
from django.core.exceptions import ImproperlyConfigured
|
||||
from django.db.utils import Error as DBError
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
import pytest
|
||||
|
||||
@@ -331,3 +332,18 @@ def test_in_memory_cache_works(settings):
|
||||
with mock.patch.object(settings, '_get_local') as mock_get:
|
||||
assert settings.AWX_VAR == 'DEFAULT'
|
||||
mock_get.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.defined_in_file(AWX_VAR=[])
|
||||
def test_getattr_with_database_error(settings):
|
||||
"""
|
||||
If a setting is defined via the registry and has a null-ish default which is not None
|
||||
then referencing that setting during a database outage should give that default
|
||||
this is regression testing for a bug where it would return None
|
||||
"""
|
||||
settings.registry.register('AWX_VAR', field_class=fields.StringListField, default=[], category=_('System'), category_slug='system')
|
||||
settings._awx_conf_memoizedcache.clear()
|
||||
|
||||
with mock.patch('django.db.backends.base.base.BaseDatabaseWrapper.ensure_connection') as mock_ensure:
|
||||
mock_ensure.side_effect = DBError('for test')
|
||||
assert settings.AWX_VAR == []
|
||||
|
||||
@@ -79,7 +79,6 @@ __all__ = [
|
||||
'get_user_queryset',
|
||||
'check_user_access',
|
||||
'check_user_access_with_errors',
|
||||
'user_accessible_objects',
|
||||
'consumer_access',
|
||||
]
|
||||
|
||||
@@ -136,10 +135,6 @@ def register_access(model_class, access_class):
|
||||
access_registry[model_class] = access_class
|
||||
|
||||
|
||||
def user_accessible_objects(user, role_name):
|
||||
return ResourceMixin._accessible_objects(User, user, role_name)
|
||||
|
||||
|
||||
def get_user_queryset(user, model_class):
|
||||
"""
|
||||
Return a queryset for the given model_class containing only the instances
|
||||
|
||||
@@ -694,16 +694,18 @@ register(
|
||||
category_slug='logging',
|
||||
)
|
||||
register(
|
||||
'LOG_AGGREGATOR_MAX_DISK_USAGE_GB',
|
||||
'LOG_AGGREGATOR_ACTION_QUEUE_SIZE',
|
||||
field_class=fields.IntegerField,
|
||||
default=1,
|
||||
default=131072,
|
||||
min_value=1,
|
||||
label=_('Maximum disk persistence for external log aggregation (in GB)'),
|
||||
label=_('Maximum number of messages that can be stored in the log action queue'),
|
||||
help_text=_(
|
||||
'Amount of data to store (in gigabytes) during an outage of '
|
||||
'the external log aggregator (defaults to 1). '
|
||||
'Equivalent to the rsyslogd queue.maxdiskspace setting for main_queue. '
|
||||
'Notably, this is used for the rsyslogd main queue (for input messages).'
|
||||
'Defines how large the rsyslog action queue can grow in number of messages '
|
||||
'stored. This can have an impact on memory utilization. When the queue '
|
||||
'reaches 75% of this number, the queue will start writing to disk '
|
||||
'(queue.highWatermark in rsyslog). When it reaches 90%, NOTICE, INFO, and '
|
||||
'DEBUG messages will start to be discarded (queue.discardMark with '
|
||||
'queue.discardSeverity=5).'
|
||||
),
|
||||
category=_('Logging'),
|
||||
category_slug='logging',
|
||||
@@ -718,8 +720,7 @@ register(
|
||||
'Amount of data to store (in gigabytes) if an rsyslog action takes time '
|
||||
'to process an incoming message (defaults to 1). '
|
||||
'Equivalent to the rsyslogd queue.maxdiskspace setting on the action (e.g. omhttp). '
|
||||
'Like LOG_AGGREGATOR_MAX_DISK_USAGE_GB, it stores files in the directory specified '
|
||||
'by LOG_AGGREGATOR_MAX_DISK_USAGE_PATH.'
|
||||
'It stores files in the directory specified by LOG_AGGREGATOR_MAX_DISK_USAGE_PATH.'
|
||||
),
|
||||
category=_('Logging'),
|
||||
category_slug='logging',
|
||||
|
||||
@@ -4,6 +4,8 @@ from urllib.parse import urljoin, quote
|
||||
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
import requests
|
||||
import base64
|
||||
import binascii
|
||||
|
||||
|
||||
conjur_inputs = {
|
||||
@@ -50,6 +52,13 @@ conjur_inputs = {
|
||||
}
|
||||
|
||||
|
||||
def _is_base64(s: str) -> bool:
|
||||
try:
|
||||
return base64.b64encode(base64.b64decode(s.encode("utf-8"))) == s.encode("utf-8")
|
||||
except binascii.Error:
|
||||
return False
|
||||
|
||||
|
||||
def conjur_backend(**kwargs):
|
||||
url = kwargs['url']
|
||||
api_key = kwargs['api_key']
|
||||
@@ -77,7 +86,7 @@ def conjur_backend(**kwargs):
|
||||
token = resp.content.decode('utf-8')
|
||||
|
||||
lookup_kwargs = {
|
||||
'headers': {'Authorization': 'Token token="{}"'.format(token)},
|
||||
'headers': {'Authorization': 'Token token="{}"'.format(token if _is_base64(token) else base64.b64encode(token.encode('utf-8')).decode('utf-8'))},
|
||||
'allow_redirects': False,
|
||||
}
|
||||
|
||||
|
||||
@@ -2,25 +2,29 @@ from .plugin import CredentialPlugin
|
||||
|
||||
from django.conf import settings
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from thycotic.secrets.vault import SecretsVault
|
||||
|
||||
from delinea.secrets.vault import PasswordGrantAuthorizer, SecretsVault
|
||||
from base64 import b64decode
|
||||
|
||||
dsv_inputs = {
|
||||
'fields': [
|
||||
{
|
||||
'id': 'tenant',
|
||||
'label': _('Tenant'),
|
||||
'help_text': _('The tenant e.g. "ex" when the URL is https://ex.secretservercloud.com'),
|
||||
'help_text': _('The tenant e.g. "ex" when the URL is https://ex.secretsvaultcloud.com'),
|
||||
'type': 'string',
|
||||
},
|
||||
{
|
||||
'id': 'tld',
|
||||
'label': _('Top-level Domain (TLD)'),
|
||||
'help_text': _('The TLD of the tenant e.g. "com" when the URL is https://ex.secretservercloud.com'),
|
||||
'choices': ['ca', 'com', 'com.au', 'com.sg', 'eu'],
|
||||
'help_text': _('The TLD of the tenant e.g. "com" when the URL is https://ex.secretsvaultcloud.com'),
|
||||
'choices': ['ca', 'com', 'com.au', 'eu'],
|
||||
'default': 'com',
|
||||
},
|
||||
{'id': 'client_id', 'label': _('Client ID'), 'type': 'string'},
|
||||
{
|
||||
'id': 'client_id',
|
||||
'label': _('Client ID'),
|
||||
'type': 'string',
|
||||
},
|
||||
{
|
||||
'id': 'client_secret',
|
||||
'label': _('Client Secret'),
|
||||
@@ -41,8 +45,16 @@ dsv_inputs = {
|
||||
'help_text': _('The field to extract from the secret'),
|
||||
'type': 'string',
|
||||
},
|
||||
{
|
||||
'id': 'secret_decoding',
|
||||
'label': _('Should the secret be base64 decoded?'),
|
||||
'help_text': _('Specify whether the secret should be base64 decoded, typically used for storing files, such as SSH keys'),
|
||||
'choices': ['No Decoding', 'Decode Base64'],
|
||||
'type': 'string',
|
||||
'default': 'No Decoding',
|
||||
},
|
||||
],
|
||||
'required': ['tenant', 'client_id', 'client_secret', 'path', 'secret_field'],
|
||||
'required': ['tenant', 'client_id', 'client_secret', 'path', 'secret_field', 'secret_decoding'],
|
||||
}
|
||||
|
||||
if settings.DEBUG:
|
||||
@@ -51,12 +63,32 @@ if settings.DEBUG:
|
||||
'id': 'url_template',
|
||||
'label': _('URL template'),
|
||||
'type': 'string',
|
||||
'default': 'https://{}.secretsvaultcloud.{}/v1',
|
||||
'default': 'https://{}.secretsvaultcloud.{}',
|
||||
}
|
||||
)
|
||||
|
||||
dsv_plugin = CredentialPlugin(
|
||||
'Thycotic DevOps Secrets Vault',
|
||||
dsv_inputs,
|
||||
lambda **kwargs: SecretsVault(**{k: v for (k, v) in kwargs.items() if k in [field['id'] for field in dsv_inputs['fields']]}).get_secret(kwargs['path'])['data'][kwargs['secret_field']], # fmt: skip
|
||||
)
|
||||
|
||||
def dsv_backend(**kwargs):
|
||||
tenant_name = kwargs['tenant']
|
||||
tenant_tld = kwargs.get('tld', 'com')
|
||||
tenant_url_template = kwargs.get('url_template', 'https://{}.secretsvaultcloud.{}')
|
||||
client_id = kwargs['client_id']
|
||||
client_secret = kwargs['client_secret']
|
||||
secret_path = kwargs['path']
|
||||
secret_field = kwargs['secret_field']
|
||||
# providing a default value to remain backward compatible for secrets that have not specified this option
|
||||
secret_decoding = kwargs.get('secret_decoding', 'No Decoding')
|
||||
|
||||
tenant_url = tenant_url_template.format(tenant_name, tenant_tld.strip("."))
|
||||
|
||||
authorizer = PasswordGrantAuthorizer(tenant_url, client_id, client_secret)
|
||||
dsv_secret = SecretsVault(tenant_url, authorizer).get_secret(secret_path)
|
||||
|
||||
# files can be uploaded base64 decoded to DSV and thus decoding it only, when asked for
|
||||
if secret_decoding == 'Decode Base64':
|
||||
return b64decode(dsv_secret['data'][secret_field]).decode()
|
||||
|
||||
return dsv_secret['data'][secret_field]
|
||||
|
||||
|
||||
dsv_plugin = CredentialPlugin(name='Thycotic DevOps Secrets Vault', inputs=dsv_inputs, backend=dsv_backend)
|
||||
|
||||
@@ -54,7 +54,9 @@ tss_inputs = {
|
||||
|
||||
def tss_backend(**kwargs):
|
||||
if kwargs.get("domain"):
|
||||
authorizer = DomainPasswordGrantAuthorizer(kwargs['server_url'], kwargs['username'], kwargs['password'], kwargs['domain'])
|
||||
authorizer = DomainPasswordGrantAuthorizer(
|
||||
base_url=kwargs['server_url'], username=kwargs['username'], domain=kwargs['domain'], password=kwargs['password']
|
||||
)
|
||||
else:
|
||||
authorizer = PasswordGrantAuthorizer(kwargs['server_url'], kwargs['username'], kwargs['password'])
|
||||
secret_server = SecretServer(kwargs['server_url'], authorizer)
|
||||
|
||||
@@ -37,8 +37,11 @@ class Control(object):
|
||||
def running(self, *args, **kwargs):
|
||||
return self.control_with_reply('running', *args, **kwargs)
|
||||
|
||||
def cancel(self, task_ids, *args, **kwargs):
|
||||
return self.control_with_reply('cancel', *args, extra_data={'task_ids': task_ids}, **kwargs)
|
||||
def cancel(self, task_ids, with_reply=True):
|
||||
if with_reply:
|
||||
return self.control_with_reply('cancel', extra_data={'task_ids': task_ids})
|
||||
else:
|
||||
self.control({'control': 'cancel', 'task_ids': task_ids, 'reply_to': None}, extra_data={'task_ids': task_ids})
|
||||
|
||||
def schedule(self, *args, **kwargs):
|
||||
return self.control_with_reply('schedule', *args, **kwargs)
|
||||
|
||||
@@ -89,8 +89,9 @@ class AWXConsumerBase(object):
|
||||
if task_ids and not msg:
|
||||
logger.info(f'Could not locate running tasks to cancel with ids={task_ids}')
|
||||
|
||||
with pg_bus_conn() as conn:
|
||||
conn.notify(reply_queue, json.dumps(msg))
|
||||
if reply_queue is not None:
|
||||
with pg_bus_conn() as conn:
|
||||
conn.notify(reply_queue, json.dumps(msg))
|
||||
elif control == 'reload':
|
||||
for worker in self.pool.workers:
|
||||
worker.quit()
|
||||
|
||||
@@ -24,6 +24,9 @@ class Command(BaseCommand):
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--days', dest='days', type=int, default=90, metavar='N', help='Remove activity stream events more than N days old')
|
||||
parser.add_argument('--dry-run', dest='dry_run', action='store_true', default=False, help='Dry run mode (show items that would be removed)')
|
||||
parser.add_argument(
|
||||
'--batch-size', dest='batch_size', type=int, default=500, metavar='X', help='Remove activity stream events in batch of X events. Defaults to 500.'
|
||||
)
|
||||
|
||||
def init_logging(self):
|
||||
log_levels = dict(enumerate([logging.ERROR, logging.INFO, logging.DEBUG, 0]))
|
||||
@@ -48,7 +51,7 @@ class Command(BaseCommand):
|
||||
else:
|
||||
pks_to_delete.add(asobj.pk)
|
||||
# Cleanup objects in batches instead of deleting each one individually.
|
||||
if len(pks_to_delete) >= 500:
|
||||
if len(pks_to_delete) >= self.batch_size:
|
||||
ActivityStream.objects.filter(pk__in=pks_to_delete).delete()
|
||||
n_deleted_items += len(pks_to_delete)
|
||||
pks_to_delete.clear()
|
||||
@@ -63,4 +66,5 @@ class Command(BaseCommand):
|
||||
self.days = int(options.get('days', 30))
|
||||
self.cutoff = now() - datetime.timedelta(days=self.days)
|
||||
self.dry_run = bool(options.get('dry_run', False))
|
||||
self.batch_size = int(options.get('batch_size', 500))
|
||||
self.cleanup_activitystream()
|
||||
|
||||
@@ -1,22 +1,22 @@
|
||||
from awx.main.models import HostMetric
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.conf import settings
|
||||
from awx.main.tasks.host_metrics import HostMetricTask
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""
|
||||
Run soft-deleting of HostMetrics
|
||||
This command provides cleanup task for HostMetric model.
|
||||
There are two modes, which run in following order:
|
||||
- soft cleanup
|
||||
- - Perform soft-deletion of all host metrics last automated 12 months ago or before.
|
||||
This is the same as issuing a DELETE request to /api/v2/host_metrics/N/ for all host metrics that match the criteria.
|
||||
- - updates columns delete, deleted_counter and last_deleted
|
||||
- hard cleanup
|
||||
- - Permanently erase from the database all host metrics last automated 36 months ago or before.
|
||||
This operation happens after the soft deletion has finished.
|
||||
"""
|
||||
|
||||
help = 'Run soft-deleting of HostMetrics'
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--months-ago', type=int, dest='months-ago', action='store', help='Threshold in months for soft-deleting')
|
||||
help = 'Run soft and hard-deletion of HostMetrics'
|
||||
|
||||
def handle(self, *args, **options):
|
||||
months_ago = options.get('months-ago') or None
|
||||
|
||||
if not months_ago:
|
||||
months_ago = getattr(settings, 'CLEANUP_HOST_METRICS_SOFT_THRESHOLD', 12)
|
||||
|
||||
HostMetric.cleanup_task(months_ago)
|
||||
HostMetricTask().cleanup(soft_threshold=settings.CLEANUP_HOST_METRICS_SOFT_THRESHOLD, hard_threshold=settings.CLEANUP_HOST_METRICS_HARD_THRESHOLD)
|
||||
|
||||
@@ -9,6 +9,7 @@ import re
|
||||
|
||||
|
||||
# Django
|
||||
from django.apps import apps
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
from django.db import transaction, connection
|
||||
from django.db.models import Min, Max
|
||||
@@ -150,6 +151,9 @@ class Command(BaseCommand):
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--days', dest='days', type=int, default=90, metavar='N', help='Remove jobs/updates executed more than N days ago. Defaults to 90.')
|
||||
parser.add_argument('--dry-run', dest='dry_run', action='store_true', default=False, help='Dry run mode (show items that would be removed)')
|
||||
parser.add_argument(
|
||||
'--batch-size', dest='batch_size', type=int, default=100000, metavar='X', help='Remove jobs in batch of X jobs. Defaults to 100000.'
|
||||
)
|
||||
parser.add_argument('--jobs', dest='only_jobs', action='store_true', default=False, help='Remove jobs')
|
||||
parser.add_argument('--ad-hoc-commands', dest='only_ad_hoc_commands', action='store_true', default=False, help='Remove ad hoc commands')
|
||||
parser.add_argument('--project-updates', dest='only_project_updates', action='store_true', default=False, help='Remove project updates')
|
||||
@@ -195,18 +199,58 @@ class Command(BaseCommand):
|
||||
delete_meta.delete_jobs()
|
||||
return (delete_meta.jobs_no_delete_count, delete_meta.jobs_to_delete_count)
|
||||
|
||||
def _cascade_delete_job_events(self, model, pk_list):
|
||||
def has_unpartitioned_table(self, model):
|
||||
tblname = unified_job_class_to_event_table_name(model)
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute(f"SELECT 1 FROM pg_tables WHERE tablename = '_unpartitioned_{tblname}';")
|
||||
row = cursor.fetchone()
|
||||
if row is None:
|
||||
return False
|
||||
return True
|
||||
|
||||
def _delete_unpartitioned_table(self, model):
|
||||
"If the unpartitioned table is no longer necessary, it will drop the table"
|
||||
tblname = unified_job_class_to_event_table_name(model)
|
||||
if not self.has_unpartitioned_table(model):
|
||||
self.logger.debug(f'Table _unpartitioned_{tblname} does not exist, you are fully migrated.')
|
||||
return
|
||||
|
||||
with connection.cursor() as cursor:
|
||||
# same as UnpartitionedJobEvent.objects.aggregate(Max('created'))
|
||||
cursor.execute(f'SELECT MAX("_unpartitioned_{tblname}"."created") FROM "_unpartitioned_{tblname}";')
|
||||
row = cursor.fetchone()
|
||||
last_created = row[0]
|
||||
|
||||
if last_created:
|
||||
self.logger.info(f'Last event created in _unpartitioned_{tblname} was {last_created.isoformat()}')
|
||||
else:
|
||||
self.logger.info(f'Table _unpartitioned_{tblname} has no events in it')
|
||||
|
||||
if (last_created is None) or (last_created < self.cutoff):
|
||||
self.logger.warning(
|
||||
f'Dropping table _unpartitioned_{tblname} since no records are newer than {self.cutoff}\n'
|
||||
'WARNING - this will happen in a separate transaction so a failure will not roll back prior cleanup'
|
||||
)
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute(f'DROP TABLE _unpartitioned_{tblname};')
|
||||
|
||||
def _delete_unpartitioned_events(self, model, pk_list):
|
||||
"If unpartitioned job events remain, it will cascade those from jobs in pk_list"
|
||||
tblname = unified_job_class_to_event_table_name(model)
|
||||
rel_name = model().event_parent_key
|
||||
|
||||
# Bail if the unpartitioned table does not exist anymore
|
||||
if not self.has_unpartitioned_table(model):
|
||||
return
|
||||
|
||||
# Table still exists, delete individual unpartitioned events
|
||||
if pk_list:
|
||||
with connection.cursor() as cursor:
|
||||
tblname = unified_job_class_to_event_table_name(model)
|
||||
|
||||
self.logger.debug(f'Deleting {len(pk_list)} events from _unpartitioned_{tblname}, use a longer cleanup window to delete the table.')
|
||||
pk_list_csv = ','.join(map(str, pk_list))
|
||||
rel_name = model().event_parent_key
|
||||
cursor.execute(f"DELETE FROM _unpartitioned_{tblname} WHERE {rel_name} IN ({pk_list_csv})")
|
||||
cursor.execute(f"DELETE FROM _unpartitioned_{tblname} WHERE {rel_name} IN ({pk_list_csv});")
|
||||
|
||||
def cleanup_jobs(self):
|
||||
batch_size = 100000
|
||||
|
||||
# Hack to avoid doing N+1 queries as each item in the Job query set does
|
||||
# an individual query to get the underlying UnifiedJob.
|
||||
Job.polymorphic_super_sub_accessors_replaced = True
|
||||
@@ -221,13 +265,14 @@ class Command(BaseCommand):
|
||||
deleted = 0
|
||||
info = qs.aggregate(min=Min('id'), max=Max('id'))
|
||||
if info['min'] is not None:
|
||||
for start in range(info['min'], info['max'] + 1, batch_size):
|
||||
qs_batch = qs.filter(id__gte=start, id__lte=start + batch_size)
|
||||
for start in range(info['min'], info['max'] + 1, self.batch_size):
|
||||
qs_batch = qs.filter(id__gte=start, id__lte=start + self.batch_size)
|
||||
pk_list = qs_batch.values_list('id', flat=True)
|
||||
|
||||
_, results = qs_batch.delete()
|
||||
deleted += results['main.Job']
|
||||
self._cascade_delete_job_events(Job, pk_list)
|
||||
# Avoid dropping the job event table in case we have interacted with it already
|
||||
self._delete_unpartitioned_events(Job, pk_list)
|
||||
|
||||
return skipped, deleted
|
||||
|
||||
@@ -250,7 +295,7 @@ class Command(BaseCommand):
|
||||
deleted += 1
|
||||
|
||||
if not self.dry_run:
|
||||
self._cascade_delete_job_events(AdHocCommand, pk_list)
|
||||
self._delete_unpartitioned_events(AdHocCommand, pk_list)
|
||||
|
||||
skipped += AdHocCommand.objects.filter(created__gte=self.cutoff).count()
|
||||
return skipped, deleted
|
||||
@@ -278,7 +323,7 @@ class Command(BaseCommand):
|
||||
deleted += 1
|
||||
|
||||
if not self.dry_run:
|
||||
self._cascade_delete_job_events(ProjectUpdate, pk_list)
|
||||
self._delete_unpartitioned_events(ProjectUpdate, pk_list)
|
||||
|
||||
skipped += ProjectUpdate.objects.filter(created__gte=self.cutoff).count()
|
||||
return skipped, deleted
|
||||
@@ -306,7 +351,7 @@ class Command(BaseCommand):
|
||||
deleted += 1
|
||||
|
||||
if not self.dry_run:
|
||||
self._cascade_delete_job_events(InventoryUpdate, pk_list)
|
||||
self._delete_unpartitioned_events(InventoryUpdate, pk_list)
|
||||
|
||||
skipped += InventoryUpdate.objects.filter(created__gte=self.cutoff).count()
|
||||
return skipped, deleted
|
||||
@@ -330,7 +375,7 @@ class Command(BaseCommand):
|
||||
deleted += 1
|
||||
|
||||
if not self.dry_run:
|
||||
self._cascade_delete_job_events(SystemJob, pk_list)
|
||||
self._delete_unpartitioned_events(SystemJob, pk_list)
|
||||
|
||||
skipped += SystemJob.objects.filter(created__gte=self.cutoff).count()
|
||||
return skipped, deleted
|
||||
@@ -375,12 +420,12 @@ class Command(BaseCommand):
|
||||
skipped += Notification.objects.filter(created__gte=self.cutoff).count()
|
||||
return skipped, deleted
|
||||
|
||||
@transaction.atomic
|
||||
def handle(self, *args, **options):
|
||||
self.verbosity = int(options.get('verbosity', 1))
|
||||
self.init_logging()
|
||||
self.days = int(options.get('days', 90))
|
||||
self.dry_run = bool(options.get('dry_run', False))
|
||||
self.batch_size = int(options.get('batch_size', 100000))
|
||||
try:
|
||||
self.cutoff = now() - datetime.timedelta(days=self.days)
|
||||
except OverflowError:
|
||||
@@ -402,19 +447,29 @@ class Command(BaseCommand):
|
||||
del s.receivers[:]
|
||||
s.sender_receivers_cache.clear()
|
||||
|
||||
for m in model_names:
|
||||
if m not in models_to_cleanup:
|
||||
continue
|
||||
with transaction.atomic():
|
||||
for m in models_to_cleanup:
|
||||
skipped, deleted = getattr(self, 'cleanup_%s' % m)()
|
||||
|
||||
skipped, deleted = getattr(self, 'cleanup_%s' % m)()
|
||||
func = getattr(self, 'cleanup_%s_partition' % m, None)
|
||||
if func:
|
||||
skipped_partition, deleted_partition = func()
|
||||
skipped += skipped_partition
|
||||
deleted += deleted_partition
|
||||
|
||||
func = getattr(self, 'cleanup_%s_partition' % m, None)
|
||||
if func:
|
||||
skipped_partition, deleted_partition = func()
|
||||
skipped += skipped_partition
|
||||
deleted += deleted_partition
|
||||
if self.dry_run:
|
||||
self.logger.log(99, '%s: %d would be deleted, %d would be skipped.', m.replace('_', ' '), deleted, skipped)
|
||||
else:
|
||||
self.logger.log(99, '%s: %d deleted, %d skipped.', m.replace('_', ' '), deleted, skipped)
|
||||
|
||||
if self.dry_run:
|
||||
self.logger.log(99, '%s: %d would be deleted, %d would be skipped.', m.replace('_', ' '), deleted, skipped)
|
||||
else:
|
||||
self.logger.log(99, '%s: %d deleted, %d skipped.', m.replace('_', ' '), deleted, skipped)
|
||||
# Deleting unpartitioned tables cannot be done in same transaction as updates to related tables
|
||||
if not self.dry_run:
|
||||
with transaction.atomic():
|
||||
for m in models_to_cleanup:
|
||||
unified_job_class_name = m[:-1].title().replace('Management', 'System').replace('_', '')
|
||||
unified_job_class = apps.get_model('main', unified_job_class_name)
|
||||
try:
|
||||
unified_job_class().event_class
|
||||
except (NotImplementedError, AttributeError):
|
||||
continue # no need to run this for models without events
|
||||
self._delete_unpartitioned_table(unified_job_class)
|
||||
|
||||
@@ -125,14 +125,15 @@ class InstanceManager(models.Manager):
|
||||
with advisory_lock('instance_registration_%s' % hostname):
|
||||
if settings.AWX_AUTO_DEPROVISION_INSTANCES:
|
||||
# detect any instances with the same IP address.
|
||||
# if one exists, set it to None
|
||||
inst_conflicting_ip = self.filter(ip_address=ip_address).exclude(hostname=hostname)
|
||||
if inst_conflicting_ip.exists():
|
||||
for other_inst in inst_conflicting_ip:
|
||||
other_hostname = other_inst.hostname
|
||||
other_inst.ip_address = None
|
||||
other_inst.save(update_fields=['ip_address'])
|
||||
logger.warning("IP address {0} conflict detected, ip address unset for host {1}.".format(ip_address, other_hostname))
|
||||
# if one exists, set it to ""
|
||||
if ip_address:
|
||||
inst_conflicting_ip = self.filter(ip_address=ip_address).exclude(hostname=hostname)
|
||||
if inst_conflicting_ip.exists():
|
||||
for other_inst in inst_conflicting_ip:
|
||||
other_hostname = other_inst.hostname
|
||||
other_inst.ip_address = ""
|
||||
other_inst.save(update_fields=['ip_address'])
|
||||
logger.warning("IP address {0} conflict detected, ip address unset for host {1}.".format(ip_address, other_hostname))
|
||||
|
||||
# Return existing instance that matches hostname or UUID (default to UUID)
|
||||
if node_uuid is not None and node_uuid != UUID_DEFAULT and self.filter(uuid=node_uuid).exists():
|
||||
|
||||
@@ -9,6 +9,7 @@ from django.conf import settings
|
||||
# AWX
|
||||
import awx.main.fields
|
||||
from awx.main.models import Host
|
||||
from ._sqlite_helper import dbawaremigrations
|
||||
|
||||
|
||||
def replaces():
|
||||
@@ -131,9 +132,11 @@ class Migration(migrations.Migration):
|
||||
help_text='If enabled, Tower will act as an Ansible Fact Cache Plugin; persisting facts at the end of a playbook run to the database and caching facts for use by Ansible.',
|
||||
),
|
||||
),
|
||||
migrations.RunSQL(
|
||||
dbawaremigrations.RunSQL(
|
||||
sql="CREATE INDEX host_ansible_facts_default_gin ON {} USING gin(ansible_facts jsonb_path_ops);".format(Host._meta.db_table),
|
||||
reverse_sql='DROP INDEX host_ansible_facts_default_gin;',
|
||||
sqlite_sql=dbawaremigrations.RunSQL.noop,
|
||||
sqlite_reverse_sql=dbawaremigrations.RunSQL.noop,
|
||||
),
|
||||
# SCM file-based inventories
|
||||
migrations.AddField(
|
||||
|
||||
@@ -3,24 +3,27 @@ from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
from ._sqlite_helper import dbawaremigrations
|
||||
|
||||
tables_to_drop = [
|
||||
'celery_taskmeta',
|
||||
'celery_tasksetmeta',
|
||||
'djcelery_crontabschedule',
|
||||
'djcelery_intervalschedule',
|
||||
'djcelery_periodictask',
|
||||
'djcelery_periodictasks',
|
||||
'djcelery_taskstate',
|
||||
'djcelery_workerstate',
|
||||
'djkombu_message',
|
||||
'djkombu_queue',
|
||||
]
|
||||
postgres_sql = ([("DROP TABLE IF EXISTS {} CASCADE;".format(table))] for table in tables_to_drop)
|
||||
sqlite_sql = ([("DROP TABLE IF EXISTS {};".format(table))] for table in tables_to_drop)
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
('main', '0049_v330_validate_instance_capacity_adjustment'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunSQL([("DROP TABLE IF EXISTS {} CASCADE;".format(table))])
|
||||
for table in (
|
||||
'celery_taskmeta',
|
||||
'celery_tasksetmeta',
|
||||
'djcelery_crontabschedule',
|
||||
'djcelery_intervalschedule',
|
||||
'djcelery_periodictask',
|
||||
'djcelery_periodictasks',
|
||||
'djcelery_taskstate',
|
||||
'djcelery_workerstate',
|
||||
'djkombu_message',
|
||||
'djkombu_queue',
|
||||
)
|
||||
]
|
||||
operations = [dbawaremigrations.RunSQL(p, sqlite_sql=s) for p, s in zip(postgres_sql, sqlite_sql)]
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
|
||||
from django.db import migrations, models, connection
|
||||
|
||||
from ._sqlite_helper import dbawaremigrations
|
||||
|
||||
|
||||
def migrate_event_data(apps, schema_editor):
|
||||
# see: https://github.com/ansible/awx/issues/6010
|
||||
@@ -24,6 +26,11 @@ def migrate_event_data(apps, schema_editor):
|
||||
cursor.execute(f'ALTER TABLE {tblname} ALTER COLUMN id TYPE bigint USING id::bigint;')
|
||||
|
||||
|
||||
def migrate_event_data_sqlite(apps, schema_editor):
|
||||
# TODO: cmeyers fill this in
|
||||
return
|
||||
|
||||
|
||||
class FakeAlterField(migrations.AlterField):
|
||||
def database_forwards(self, *args):
|
||||
# this is intentionally left blank, because we're
|
||||
@@ -37,7 +44,7 @@ class Migration(migrations.Migration):
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(migrate_event_data),
|
||||
dbawaremigrations.RunPython(migrate_event_data, sqlite_code=migrate_event_data_sqlite),
|
||||
FakeAlterField(
|
||||
model_name='adhoccommandevent',
|
||||
name='id',
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
from django.db import migrations, models, connection
|
||||
|
||||
from ._sqlite_helper import dbawaremigrations
|
||||
|
||||
|
||||
def migrate_event_data(apps, schema_editor):
|
||||
# see: https://github.com/ansible/awx/issues/9039
|
||||
@@ -59,6 +61,10 @@ def migrate_event_data(apps, schema_editor):
|
||||
cursor.execute('DROP INDEX IF EXISTS main_jobevent_job_id_idx')
|
||||
|
||||
|
||||
def migrate_event_data_sqlite(apps, schema_editor):
|
||||
return None
|
||||
|
||||
|
||||
class FakeAddField(migrations.AddField):
|
||||
def database_forwards(self, *args):
|
||||
# this is intentionally left blank, because we're
|
||||
@@ -72,7 +78,7 @@ class Migration(migrations.Migration):
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(migrate_event_data),
|
||||
dbawaremigrations.RunPython(migrate_event_data, sqlite_code=migrate_event_data_sqlite),
|
||||
FakeAddField(
|
||||
model_name='jobevent',
|
||||
name='job_created',
|
||||
|
||||
@@ -3,6 +3,8 @@
|
||||
import awx.main.models.notifications
|
||||
from django.db import migrations, models
|
||||
|
||||
from ._sqlite_helper import dbawaremigrations
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
@@ -104,11 +106,12 @@ class Migration(migrations.Migration):
|
||||
name='deleted_actor',
|
||||
field=models.JSONField(null=True),
|
||||
),
|
||||
migrations.RunSQL(
|
||||
dbawaremigrations.RunSQL(
|
||||
"""
|
||||
ALTER TABLE main_activitystream RENAME setting TO setting_old;
|
||||
ALTER TABLE main_activitystream ALTER COLUMN setting_old DROP NOT NULL;
|
||||
""",
|
||||
sqlite_sql="ALTER TABLE main_activitystream RENAME setting TO setting_old",
|
||||
state_operations=[
|
||||
migrations.RemoveField(
|
||||
model_name='activitystream',
|
||||
@@ -121,11 +124,12 @@ class Migration(migrations.Migration):
|
||||
name='setting',
|
||||
field=models.JSONField(blank=True, default=dict),
|
||||
),
|
||||
migrations.RunSQL(
|
||||
dbawaremigrations.RunSQL(
|
||||
"""
|
||||
ALTER TABLE main_job RENAME survey_passwords TO survey_passwords_old;
|
||||
ALTER TABLE main_job ALTER COLUMN survey_passwords_old DROP NOT NULL;
|
||||
""",
|
||||
sqlite_sql="ALTER TABLE main_job RENAME survey_passwords TO survey_passwords_old",
|
||||
state_operations=[
|
||||
migrations.RemoveField(
|
||||
model_name='job',
|
||||
@@ -138,11 +142,12 @@ class Migration(migrations.Migration):
|
||||
name='survey_passwords',
|
||||
field=models.JSONField(blank=True, default=dict, editable=False),
|
||||
),
|
||||
migrations.RunSQL(
|
||||
dbawaremigrations.RunSQL(
|
||||
"""
|
||||
ALTER TABLE main_joblaunchconfig RENAME char_prompts TO char_prompts_old;
|
||||
ALTER TABLE main_joblaunchconfig ALTER COLUMN char_prompts_old DROP NOT NULL;
|
||||
""",
|
||||
sqlite_sql="ALTER TABLE main_joblaunchconfig RENAME char_prompts TO char_prompts_old",
|
||||
state_operations=[
|
||||
migrations.RemoveField(
|
||||
model_name='joblaunchconfig',
|
||||
@@ -155,11 +160,12 @@ class Migration(migrations.Migration):
|
||||
name='char_prompts',
|
||||
field=models.JSONField(blank=True, default=dict),
|
||||
),
|
||||
migrations.RunSQL(
|
||||
dbawaremigrations.RunSQL(
|
||||
"""
|
||||
ALTER TABLE main_joblaunchconfig RENAME survey_passwords TO survey_passwords_old;
|
||||
ALTER TABLE main_joblaunchconfig ALTER COLUMN survey_passwords_old DROP NOT NULL;
|
||||
""",
|
||||
sqlite_sql="ALTER TABLE main_joblaunchconfig RENAME survey_passwords TO survey_passwords_old;",
|
||||
state_operations=[
|
||||
migrations.RemoveField(
|
||||
model_name='joblaunchconfig',
|
||||
@@ -172,11 +178,12 @@ class Migration(migrations.Migration):
|
||||
name='survey_passwords',
|
||||
field=models.JSONField(blank=True, default=dict, editable=False),
|
||||
),
|
||||
migrations.RunSQL(
|
||||
dbawaremigrations.RunSQL(
|
||||
"""
|
||||
ALTER TABLE main_notification RENAME body TO body_old;
|
||||
ALTER TABLE main_notification ALTER COLUMN body_old DROP NOT NULL;
|
||||
""",
|
||||
sqlite_sql="ALTER TABLE main_notification RENAME body TO body_old",
|
||||
state_operations=[
|
||||
migrations.RemoveField(
|
||||
model_name='notification',
|
||||
@@ -189,11 +196,12 @@ class Migration(migrations.Migration):
|
||||
name='body',
|
||||
field=models.JSONField(blank=True, default=dict),
|
||||
),
|
||||
migrations.RunSQL(
|
||||
dbawaremigrations.RunSQL(
|
||||
"""
|
||||
ALTER TABLE main_unifiedjob RENAME job_env TO job_env_old;
|
||||
ALTER TABLE main_unifiedjob ALTER COLUMN job_env_old DROP NOT NULL;
|
||||
""",
|
||||
sqlite_sql="ALTER TABLE main_unifiedjob RENAME job_env TO job_env_old",
|
||||
state_operations=[
|
||||
migrations.RemoveField(
|
||||
model_name='unifiedjob',
|
||||
@@ -206,11 +214,12 @@ class Migration(migrations.Migration):
|
||||
name='job_env',
|
||||
field=models.JSONField(blank=True, default=dict, editable=False),
|
||||
),
|
||||
migrations.RunSQL(
|
||||
dbawaremigrations.RunSQL(
|
||||
"""
|
||||
ALTER TABLE main_workflowjob RENAME char_prompts TO char_prompts_old;
|
||||
ALTER TABLE main_workflowjob ALTER COLUMN char_prompts_old DROP NOT NULL;
|
||||
""",
|
||||
sqlite_sql="ALTER TABLE main_workflowjob RENAME char_prompts TO char_prompts_old",
|
||||
state_operations=[
|
||||
migrations.RemoveField(
|
||||
model_name='workflowjob',
|
||||
@@ -223,11 +232,12 @@ class Migration(migrations.Migration):
|
||||
name='char_prompts',
|
||||
field=models.JSONField(blank=True, default=dict),
|
||||
),
|
||||
migrations.RunSQL(
|
||||
dbawaremigrations.RunSQL(
|
||||
"""
|
||||
ALTER TABLE main_workflowjob RENAME survey_passwords TO survey_passwords_old;
|
||||
ALTER TABLE main_workflowjob ALTER COLUMN survey_passwords_old DROP NOT NULL;
|
||||
""",
|
||||
sqlite_sql="ALTER TABLE main_workflowjob RENAME survey_passwords TO survey_passwords_old",
|
||||
state_operations=[
|
||||
migrations.RemoveField(
|
||||
model_name='workflowjob',
|
||||
@@ -240,11 +250,12 @@ class Migration(migrations.Migration):
|
||||
name='survey_passwords',
|
||||
field=models.JSONField(blank=True, default=dict, editable=False),
|
||||
),
|
||||
migrations.RunSQL(
|
||||
dbawaremigrations.RunSQL(
|
||||
"""
|
||||
ALTER TABLE main_workflowjobnode RENAME char_prompts TO char_prompts_old;
|
||||
ALTER TABLE main_workflowjobnode ALTER COLUMN char_prompts_old DROP NOT NULL;
|
||||
""",
|
||||
sqlite_sql="ALTER TABLE main_workflowjobnode RENAME char_prompts TO char_prompts_old",
|
||||
state_operations=[
|
||||
migrations.RemoveField(
|
||||
model_name='workflowjobnode',
|
||||
@@ -257,11 +268,12 @@ class Migration(migrations.Migration):
|
||||
name='char_prompts',
|
||||
field=models.JSONField(blank=True, default=dict),
|
||||
),
|
||||
migrations.RunSQL(
|
||||
dbawaremigrations.RunSQL(
|
||||
"""
|
||||
ALTER TABLE main_workflowjobnode RENAME survey_passwords TO survey_passwords_old;
|
||||
ALTER TABLE main_workflowjobnode ALTER COLUMN survey_passwords_old DROP NOT NULL;
|
||||
""",
|
||||
sqlite_sql="ALTER TABLE main_workflowjobnode RENAME survey_passwords TO survey_passwords_old",
|
||||
state_operations=[
|
||||
migrations.RemoveField(
|
||||
model_name='workflowjobnode',
|
||||
|
||||
@@ -3,6 +3,8 @@ from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
from ._sqlite_helper import dbawaremigrations
|
||||
|
||||
|
||||
def delete_taggit_contenttypes(apps, schema_editor):
|
||||
ContentType = apps.get_model('contenttypes', 'ContentType')
|
||||
@@ -20,8 +22,8 @@ class Migration(migrations.Migration):
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunSQL("DROP TABLE IF EXISTS taggit_tag CASCADE;"),
|
||||
migrations.RunSQL("DROP TABLE IF EXISTS taggit_taggeditem CASCADE;"),
|
||||
dbawaremigrations.RunSQL("DROP TABLE IF EXISTS taggit_tag CASCADE;", sqlite_sql="DROP TABLE IF EXISTS taggit_tag;"),
|
||||
dbawaremigrations.RunSQL("DROP TABLE IF EXISTS taggit_taggeditem CASCADE;", sqlite_sql="DROP TABLE IF EXISTS taggit_taggeditem;"),
|
||||
migrations.RunPython(delete_taggit_contenttypes),
|
||||
migrations.RunPython(delete_taggit_migration_records),
|
||||
]
|
||||
|
||||
61
awx/main/migrations/_sqlite_helper.py
Normal file
61
awx/main/migrations/_sqlite_helper.py
Normal file
@@ -0,0 +1,61 @@
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
class RunSQL(migrations.operations.special.RunSQL):
|
||||
"""
|
||||
Bit of a hack here. Django actually wants this decision made in the router
|
||||
and we can pass **hints.
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
if 'sqlite_sql' not in kwargs:
|
||||
raise ValueError("sqlite_sql parameter required")
|
||||
sqlite_sql = kwargs.pop('sqlite_sql')
|
||||
|
||||
self.sqlite_sql = sqlite_sql
|
||||
self.sqlite_reverse_sql = kwargs.pop('sqlite_reverse_sql', None)
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
def database_forwards(self, app_label, schema_editor, from_state, to_state):
|
||||
if not schema_editor.connection.vendor.startswith('postgres'):
|
||||
self.sql = self.sqlite_sql or migrations.RunSQL.noop
|
||||
super().database_forwards(app_label, schema_editor, from_state, to_state)
|
||||
|
||||
def database_backwards(self, app_label, schema_editor, from_state, to_state):
|
||||
if not schema_editor.connection.vendor.startswith('postgres'):
|
||||
self.reverse_sql = self.sqlite_reverse_sql or migrations.RunSQL.noop
|
||||
super().database_backwards(app_label, schema_editor, from_state, to_state)
|
||||
|
||||
|
||||
class RunPython(migrations.operations.special.RunPython):
|
||||
"""
|
||||
Bit of a hack here. Django actually wants this decision made in the router
|
||||
and we can pass **hints.
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
if 'sqlite_code' not in kwargs:
|
||||
raise ValueError("sqlite_code parameter required")
|
||||
sqlite_code = kwargs.pop('sqlite_code')
|
||||
|
||||
self.sqlite_code = sqlite_code
|
||||
self.sqlite_reverse_code = kwargs.pop('sqlite_reverse_code', None)
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
def database_forwards(self, app_label, schema_editor, from_state, to_state):
|
||||
if not schema_editor.connection.vendor.startswith('postgres'):
|
||||
self.code = self.sqlite_code or migrations.RunPython.noop
|
||||
super().database_forwards(app_label, schema_editor, from_state, to_state)
|
||||
|
||||
def database_backwards(self, app_label, schema_editor, from_state, to_state):
|
||||
if not schema_editor.connection.vendor.startswith('postgres'):
|
||||
self.reverse_code = self.sqlite_reverse_code or migrations.RunPython.noop
|
||||
super().database_backwards(app_label, schema_editor, from_state, to_state)
|
||||
|
||||
|
||||
class _sqlitemigrations:
|
||||
RunPython = RunPython
|
||||
RunSQL = RunSQL
|
||||
|
||||
|
||||
dbawaremigrations = _sqlitemigrations()
|
||||
@@ -57,7 +57,6 @@ from awx.main.models.ha import ( # noqa
|
||||
from awx.main.models.rbac import ( # noqa
|
||||
Role,
|
||||
batch_role_ancestor_rebuilding,
|
||||
get_roles_on_resource,
|
||||
role_summary_fields_generator,
|
||||
ROLE_SINGLETON_SYSTEM_ADMINISTRATOR,
|
||||
ROLE_SINGLETON_SYSTEM_AUDITOR,
|
||||
@@ -91,13 +90,12 @@ from oauth2_provider.models import Grant, RefreshToken # noqa -- needed django-
|
||||
|
||||
# Add custom methods to User model for permissions checks.
|
||||
from django.contrib.auth.models import User # noqa
|
||||
from awx.main.access import get_user_queryset, check_user_access, check_user_access_with_errors, user_accessible_objects # noqa
|
||||
from awx.main.access import get_user_queryset, check_user_access, check_user_access_with_errors # noqa
|
||||
|
||||
|
||||
User.add_to_class('get_queryset', get_user_queryset)
|
||||
User.add_to_class('can_access', check_user_access)
|
||||
User.add_to_class('can_access_with_errors', check_user_access_with_errors)
|
||||
User.add_to_class('accessible_objects', user_accessible_objects)
|
||||
|
||||
|
||||
def convert_jsonfields():
|
||||
|
||||
@@ -289,7 +289,10 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
if update_last_seen:
|
||||
update_fields += ['last_seen']
|
||||
if perform_save:
|
||||
self.save(update_fields=update_fields)
|
||||
from awx.main.signals import disable_activity_stream
|
||||
|
||||
with disable_activity_stream():
|
||||
self.save(update_fields=update_fields)
|
||||
return update_fields
|
||||
|
||||
def set_capacity_value(self):
|
||||
@@ -309,8 +312,8 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
self.cpu_capacity = 0
|
||||
self.mem_capacity = 0 # formula has a non-zero offset, so we make sure it is 0 for hop nodes
|
||||
else:
|
||||
self.cpu_capacity = get_cpu_effective_capacity(self.cpu)
|
||||
self.mem_capacity = get_mem_effective_capacity(self.memory)
|
||||
self.cpu_capacity = get_cpu_effective_capacity(self.cpu, is_control_node=bool(self.node_type in (Instance.Types.CONTROL, Instance.Types.HYBRID)))
|
||||
self.mem_capacity = get_mem_effective_capacity(self.memory, is_control_node=bool(self.node_type in (Instance.Types.CONTROL, Instance.Types.HYBRID)))
|
||||
self.set_capacity_value()
|
||||
|
||||
def save_health_data(self, version=None, cpu=0, memory=0, uuid=None, update_last_seen=False, errors=''):
|
||||
@@ -333,12 +336,17 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
self.version = version
|
||||
update_fields.append('version')
|
||||
|
||||
new_cpu = get_corrected_cpu(cpu)
|
||||
if self.node_type == Instance.Types.EXECUTION:
|
||||
new_cpu = cpu
|
||||
new_memory = memory
|
||||
else:
|
||||
new_cpu = get_corrected_cpu(cpu)
|
||||
new_memory = get_corrected_memory(memory)
|
||||
|
||||
if new_cpu != self.cpu:
|
||||
self.cpu = new_cpu
|
||||
update_fields.append('cpu')
|
||||
|
||||
new_memory = get_corrected_memory(memory)
|
||||
if new_memory != self.memory:
|
||||
self.memory = new_memory
|
||||
update_fields.append('memory')
|
||||
|
||||
@@ -10,7 +10,6 @@ import copy
|
||||
import os.path
|
||||
from urllib.parse import urljoin
|
||||
|
||||
import dateutil.relativedelta
|
||||
import yaml
|
||||
|
||||
# Django
|
||||
@@ -890,23 +889,6 @@ class HostMetric(models.Model):
|
||||
self.deleted = False
|
||||
self.save(update_fields=['deleted'])
|
||||
|
||||
@classmethod
|
||||
def cleanup_task(cls, months_ago):
|
||||
try:
|
||||
months_ago = int(months_ago)
|
||||
if months_ago <= 0:
|
||||
raise ValueError()
|
||||
|
||||
last_automation_before = now() - dateutil.relativedelta.relativedelta(months=months_ago)
|
||||
|
||||
logger.info(f'cleanup_host_metrics: soft-deleting records last automated before {last_automation_before}')
|
||||
HostMetric.active_objects.filter(last_automation__lt=last_automation_before).update(
|
||||
deleted=True, deleted_counter=models.F('deleted_counter') + 1, last_deleted=now()
|
||||
)
|
||||
settings.CLEANUP_HOST_METRICS_LAST_TS = now()
|
||||
except (TypeError, ValueError):
|
||||
logger.error(f"cleanup_host_metrics: months_ago({months_ago}) has to be a positive integer value")
|
||||
|
||||
|
||||
class HostMetricSummaryMonthly(models.Model):
|
||||
"""
|
||||
|
||||
@@ -19,7 +19,7 @@ from django.utils.translation import gettext_lazy as _
|
||||
|
||||
# AWX
|
||||
from awx.main.models.base import prevent_search
|
||||
from awx.main.models.rbac import Role, RoleAncestorEntry, get_roles_on_resource
|
||||
from awx.main.models.rbac import Role, RoleAncestorEntry
|
||||
from awx.main.utils import parse_yaml_or_json, get_custom_venv_choices, get_licenser, polymorphic
|
||||
from awx.main.utils.execution_environments import get_default_execution_environment
|
||||
from awx.main.utils.encryption import decrypt_value, get_encryption_key, is_encrypted
|
||||
@@ -54,10 +54,7 @@ class ResourceMixin(models.Model):
|
||||
Use instead of `MyModel.objects` when you want to only consider
|
||||
resources that a user has specific permissions for. For example:
|
||||
MyModel.accessible_objects(user, 'read_role').filter(name__istartswith='bar');
|
||||
NOTE: This should only be used for list type things. If you have a
|
||||
specific resource you want to check permissions on, it is more
|
||||
performant to resolve the resource in question then call
|
||||
`myresource.get_permissions(user)`.
|
||||
NOTE: This should only be used for list type things.
|
||||
"""
|
||||
return ResourceMixin._accessible_objects(cls, accessor, role_field)
|
||||
|
||||
@@ -86,15 +83,6 @@ class ResourceMixin(models.Model):
|
||||
def _accessible_objects(cls, accessor, role_field):
|
||||
return cls.objects.filter(pk__in=ResourceMixin._accessible_pk_qs(cls, accessor, role_field))
|
||||
|
||||
def get_permissions(self, accessor):
|
||||
"""
|
||||
Returns a string list of the roles a accessor has for a given resource.
|
||||
An accessor can be either a User, Role, or an arbitrary resource that
|
||||
contains one or more Roles associated with it.
|
||||
"""
|
||||
|
||||
return get_roles_on_resource(self, accessor)
|
||||
|
||||
|
||||
class SurveyJobTemplateMixin(models.Model):
|
||||
class Meta:
|
||||
|
||||
@@ -1439,6 +1439,11 @@ class UnifiedJob(
|
||||
if not self.celery_task_id:
|
||||
return
|
||||
canceled = []
|
||||
if not connection.get_autocommit():
|
||||
# this condition is purpose-written for the task manager, when it cancels jobs in workflows
|
||||
ControlDispatcher('dispatcher', self.controller_node).cancel([self.celery_task_id], with_reply=False)
|
||||
return True # task manager itself needs to act under assumption that cancel was received
|
||||
|
||||
try:
|
||||
# Use control and reply mechanism to cancel and obtain confirmation
|
||||
timeout = 5
|
||||
|
||||
@@ -124,6 +124,13 @@ class TaskBase:
|
||||
self.record_aggregate_metrics()
|
||||
sys.exit(1)
|
||||
|
||||
def get_local_metrics(self):
|
||||
data = {}
|
||||
for k, metric in self.subsystem_metrics.METRICS.items():
|
||||
if k.startswith(self.prefix) and metric.metric_has_changed:
|
||||
data[k[len(self.prefix) + 1 :]] = metric.current_value
|
||||
return data
|
||||
|
||||
def schedule(self):
|
||||
# Always be able to restore the original signal handler if we finish
|
||||
original_sigusr1 = signal.getsignal(signal.SIGUSR1)
|
||||
@@ -146,10 +153,14 @@ class TaskBase:
|
||||
signal.signal(signal.SIGUSR1, original_sigusr1)
|
||||
commit_start = time.time()
|
||||
|
||||
logger.debug(f"Commiting {self.prefix} Scheduler changes")
|
||||
|
||||
if self.prefix == "task_manager":
|
||||
self.subsystem_metrics.set(f"{self.prefix}_commit_seconds", time.time() - commit_start)
|
||||
local_metrics = self.get_local_metrics()
|
||||
self.record_aggregate_metrics()
|
||||
logger.debug(f"Finishing {self.prefix} Scheduler")
|
||||
|
||||
logger.debug(f"Finished {self.prefix} Scheduler, timing data:\n{local_metrics}")
|
||||
|
||||
|
||||
class WorkflowManager(TaskBase):
|
||||
@@ -259,6 +270,9 @@ class WorkflowManager(TaskBase):
|
||||
job.status = 'failed'
|
||||
job.save(update_fields=['status', 'job_explanation'])
|
||||
job.websocket_emit_status('failed')
|
||||
# NOTE: sending notification templates here is slightly worse performance
|
||||
# this is not yet optimized in the same way as for the TaskManager
|
||||
job.send_notification_templates('failed')
|
||||
ScheduleWorkflowManager().schedule()
|
||||
|
||||
# TODO: should we emit a status on the socket here similar to tasks.py awx_periodic_scheduler() ?
|
||||
@@ -419,6 +433,25 @@ class TaskManager(TaskBase):
|
||||
self.tm_models = TaskManagerModels()
|
||||
self.controlplane_ig = self.tm_models.instance_groups.controlplane_ig
|
||||
|
||||
def process_job_dep_failures(self, task):
|
||||
"""If job depends on a job that has failed, mark as failed and handle misc stuff."""
|
||||
for dep in task.dependent_jobs.all():
|
||||
# if we detect a failed or error dependency, go ahead and fail this task.
|
||||
if dep.status in ("error", "failed"):
|
||||
task.status = 'failed'
|
||||
logger.warning(f'Previous task failed task: {task.id} dep: {dep.id} task manager')
|
||||
task.job_explanation = 'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % (
|
||||
get_type_for_model(type(dep)),
|
||||
dep.name,
|
||||
dep.id,
|
||||
)
|
||||
task.save(update_fields=['status', 'job_explanation'])
|
||||
task.websocket_emit_status('failed')
|
||||
self.pre_start_failed.append(task.id)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def job_blocked_by(self, task):
|
||||
# TODO: I'm not happy with this, I think blocking behavior should be decided outside of the dependency graph
|
||||
# in the old task manager this was handled as a method on each task object outside of the graph and
|
||||
@@ -430,20 +463,6 @@ class TaskManager(TaskBase):
|
||||
for dep in task.dependent_jobs.all():
|
||||
if dep.status in ACTIVE_STATES:
|
||||
return dep
|
||||
# if we detect a failed or error dependency, go ahead and fail this
|
||||
# task. The errback on the dependency takes some time to trigger,
|
||||
# and we don't want the task to enter running state if its
|
||||
# dependency has failed or errored.
|
||||
elif dep.status in ("error", "failed"):
|
||||
task.status = 'failed'
|
||||
task.job_explanation = 'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % (
|
||||
get_type_for_model(type(dep)),
|
||||
dep.name,
|
||||
dep.id,
|
||||
)
|
||||
task.save(update_fields=['status', 'job_explanation'])
|
||||
task.websocket_emit_status('failed')
|
||||
return dep
|
||||
|
||||
return None
|
||||
|
||||
@@ -463,7 +482,6 @@ class TaskManager(TaskBase):
|
||||
if self.start_task_limit == 0:
|
||||
# schedule another run immediately after this task manager
|
||||
ScheduleTaskManager().schedule()
|
||||
from awx.main.tasks.system import handle_work_error, handle_work_success
|
||||
|
||||
task.status = 'waiting'
|
||||
|
||||
@@ -474,7 +492,7 @@ class TaskManager(TaskBase):
|
||||
task.job_explanation += ' '
|
||||
task.job_explanation += 'Task failed pre-start check.'
|
||||
task.save()
|
||||
# TODO: run error handler to fail sub-tasks and send notifications
|
||||
self.pre_start_failed.append(task.id)
|
||||
else:
|
||||
if type(task) is WorkflowJob:
|
||||
task.status = 'running'
|
||||
@@ -496,19 +514,16 @@ class TaskManager(TaskBase):
|
||||
# apply_async does a NOTIFY to the channel dispatcher is listening to
|
||||
# postgres will treat this as part of the transaction, which is what we want
|
||||
if task.status != 'failed' and type(task) is not WorkflowJob:
|
||||
task_actual = {'type': get_type_for_model(type(task)), 'id': task.id}
|
||||
task_cls = task._get_task_class()
|
||||
task_cls.apply_async(
|
||||
[task.pk],
|
||||
opts,
|
||||
queue=task.get_queue_name(),
|
||||
uuid=task.celery_task_id,
|
||||
callbacks=[{'task': handle_work_success.name, 'kwargs': {'task_actual': task_actual}}],
|
||||
errbacks=[{'task': handle_work_error.name, 'kwargs': {'task_actual': task_actual}}],
|
||||
)
|
||||
|
||||
# In exception cases, like a job failing pre-start checks, we send the websocket status message
|
||||
# for jobs going into waiting, we omit this because of performance issues, as it should go to running quickly
|
||||
# In exception cases, like a job failing pre-start checks, we send the websocket status message.
|
||||
# For jobs going into waiting, we omit this because of performance issues, as it should go to running quickly
|
||||
if task.status != 'waiting':
|
||||
task.websocket_emit_status(task.status) # adds to on_commit
|
||||
|
||||
@@ -529,6 +544,11 @@ class TaskManager(TaskBase):
|
||||
if self.timed_out():
|
||||
logger.warning("Task manager has reached time out while processing pending jobs, exiting loop early")
|
||||
break
|
||||
|
||||
has_failed = self.process_job_dep_failures(task)
|
||||
if has_failed:
|
||||
continue
|
||||
|
||||
blocked_by = self.job_blocked_by(task)
|
||||
if blocked_by:
|
||||
self.subsystem_metrics.inc(f"{self.prefix}_tasks_blocked", 1)
|
||||
@@ -642,6 +662,11 @@ class TaskManager(TaskBase):
|
||||
reap_job(j, 'failed')
|
||||
|
||||
def process_tasks(self):
|
||||
# maintain a list of jobs that went to an early failure state,
|
||||
# meaning the dispatcher never got these jobs,
|
||||
# that means we have to handle notifications for those
|
||||
self.pre_start_failed = []
|
||||
|
||||
running_tasks = [t for t in self.all_tasks if t.status in ['waiting', 'running']]
|
||||
self.process_running_tasks(running_tasks)
|
||||
self.subsystem_metrics.inc(f"{self.prefix}_running_processed", len(running_tasks))
|
||||
@@ -651,6 +676,11 @@ class TaskManager(TaskBase):
|
||||
self.process_pending_tasks(pending_tasks)
|
||||
self.subsystem_metrics.inc(f"{self.prefix}_pending_processed", len(pending_tasks))
|
||||
|
||||
if self.pre_start_failed:
|
||||
from awx.main.tasks.system import handle_failure_notifications
|
||||
|
||||
handle_failure_notifications.delay(self.pre_start_failed)
|
||||
|
||||
def timeout_approval_node(self, task):
|
||||
if self.timed_out():
|
||||
logger.warning("Task manager has reached time out while processing approval nodes, exiting loop early")
|
||||
|
||||
@@ -208,9 +208,10 @@ class RunnerCallback:
|
||||
# We opened a connection just for that save, close it here now
|
||||
connections.close_all()
|
||||
elif status_data['status'] == 'error':
|
||||
result_traceback = status_data.get('result_traceback', None)
|
||||
if result_traceback:
|
||||
self.delay_update(result_traceback=result_traceback)
|
||||
for field_name in ('result_traceback', 'job_explanation'):
|
||||
field_value = status_data.get(field_name, None)
|
||||
if field_value:
|
||||
self.delay_update(**{field_name: field_value})
|
||||
|
||||
def artifacts_handler(self, artifact_dir):
|
||||
self.artifacts_processed = True
|
||||
|
||||
10
awx/main/tasks/helpers.py
Normal file
10
awx/main/tasks/helpers.py
Normal file
@@ -0,0 +1,10 @@
|
||||
from django.utils.timezone import now
|
||||
from rest_framework.fields import DateTimeField
|
||||
|
||||
|
||||
def is_run_threshold_reached(setting, threshold_seconds):
|
||||
last_time = DateTimeField().to_internal_value(setting) if setting else None
|
||||
if not last_time:
|
||||
return True
|
||||
else:
|
||||
return (now() - last_time).total_seconds() > threshold_seconds
|
||||
@@ -3,33 +3,90 @@ from dateutil.relativedelta import relativedelta
|
||||
import logging
|
||||
|
||||
from django.conf import settings
|
||||
from django.db.models import Count
|
||||
from django.db.models import Count, F
|
||||
from django.db.models.functions import TruncMonth
|
||||
from django.utils.timezone import now
|
||||
from rest_framework.fields import DateTimeField
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.models.inventory import HostMetric, HostMetricSummaryMonthly
|
||||
from awx.main.tasks.helpers import is_run_threshold_reached
|
||||
from awx.conf.license import get_license
|
||||
|
||||
logger = logging.getLogger('awx.main.tasks.host_metric_summary_monthly')
|
||||
logger = logging.getLogger('awx.main.tasks.host_metrics')
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
def cleanup_host_metrics():
|
||||
if is_run_threshold_reached(getattr(settings, 'CLEANUP_HOST_METRICS_LAST_TS', None), getattr(settings, 'CLEANUP_HOST_METRICS_INTERVAL', 30) * 86400):
|
||||
logger.info(f"Executing cleanup_host_metrics, last ran at {getattr(settings, 'CLEANUP_HOST_METRICS_LAST_TS', '---')}")
|
||||
HostMetricTask().cleanup(
|
||||
soft_threshold=getattr(settings, 'CLEANUP_HOST_METRICS_SOFT_THRESHOLD', 12),
|
||||
hard_threshold=getattr(settings, 'CLEANUP_HOST_METRICS_HARD_THRESHOLD', 36),
|
||||
)
|
||||
logger.info("Finished cleanup_host_metrics")
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
def host_metric_summary_monthly():
|
||||
"""Run cleanup host metrics summary monthly task each week"""
|
||||
if _is_run_threshold_reached(
|
||||
getattr(settings, 'HOST_METRIC_SUMMARY_TASK_LAST_TS', None), getattr(settings, 'HOST_METRIC_SUMMARY_TASK_INTERVAL', 7) * 86400
|
||||
):
|
||||
if is_run_threshold_reached(getattr(settings, 'HOST_METRIC_SUMMARY_TASK_LAST_TS', None), getattr(settings, 'HOST_METRIC_SUMMARY_TASK_INTERVAL', 7) * 86400):
|
||||
logger.info(f"Executing host_metric_summary_monthly, last ran at {getattr(settings, 'HOST_METRIC_SUMMARY_TASK_LAST_TS', '---')}")
|
||||
HostMetricSummaryMonthlyTask().execute()
|
||||
logger.info("Finished host_metric_summary_monthly")
|
||||
|
||||
|
||||
def _is_run_threshold_reached(setting, threshold_seconds):
|
||||
last_time = DateTimeField().to_internal_value(setting) if setting else DateTimeField().to_internal_value('1970-01-01')
|
||||
class HostMetricTask:
|
||||
"""
|
||||
This class provides cleanup task for HostMetric model.
|
||||
There are two modes:
|
||||
- soft cleanup (updates columns delete, deleted_counter and last_deleted)
|
||||
- hard cleanup (deletes from the db)
|
||||
"""
|
||||
|
||||
return (now() - last_time).total_seconds() > threshold_seconds
|
||||
def cleanup(self, soft_threshold=None, hard_threshold=None):
|
||||
"""
|
||||
Main entrypoint, runs either soft cleanup, hard cleanup or both
|
||||
|
||||
:param soft_threshold: (int)
|
||||
:param hard_threshold: (int)
|
||||
"""
|
||||
if hard_threshold is not None:
|
||||
self.hard_cleanup(hard_threshold)
|
||||
if soft_threshold is not None:
|
||||
self.soft_cleanup(soft_threshold)
|
||||
|
||||
settings.CLEANUP_HOST_METRICS_LAST_TS = now()
|
||||
|
||||
@staticmethod
|
||||
def soft_cleanup(threshold=None):
|
||||
if threshold is None:
|
||||
threshold = getattr(settings, 'CLEANUP_HOST_METRICS_SOFT_THRESHOLD', 12)
|
||||
|
||||
try:
|
||||
threshold = int(threshold)
|
||||
except (ValueError, TypeError) as e:
|
||||
raise type(e)("soft_threshold has to be convertible to number") from e
|
||||
|
||||
last_automation_before = now() - relativedelta(months=threshold)
|
||||
rows = HostMetric.active_objects.filter(last_automation__lt=last_automation_before).update(
|
||||
deleted=True, deleted_counter=F('deleted_counter') + 1, last_deleted=now()
|
||||
)
|
||||
logger.info(f'cleanup_host_metrics: soft-deleted records last automated before {last_automation_before}, affected rows: {rows}')
|
||||
|
||||
@staticmethod
|
||||
def hard_cleanup(threshold=None):
|
||||
if threshold is None:
|
||||
threshold = getattr(settings, 'CLEANUP_HOST_METRICS_HARD_THRESHOLD', 36)
|
||||
|
||||
try:
|
||||
threshold = int(threshold)
|
||||
except (ValueError, TypeError) as e:
|
||||
raise type(e)("hard_threshold has to be convertible to number") from e
|
||||
|
||||
last_deleted_before = now() - relativedelta(months=threshold)
|
||||
queryset = HostMetric.objects.filter(deleted=True, last_deleted__lt=last_deleted_before)
|
||||
rows = queryset.delete()
|
||||
logger.info(f'cleanup_host_metrics: hard-deleted records which were soft deleted before {last_deleted_before}, affected rows: {rows[0]}')
|
||||
|
||||
|
||||
class HostMetricSummaryMonthlyTask:
|
||||
|
||||
@@ -74,6 +74,8 @@ from awx.main.utils.common import (
|
||||
extract_ansible_vars,
|
||||
get_awx_version,
|
||||
create_partition,
|
||||
ScheduleWorkflowManager,
|
||||
ScheduleTaskManager,
|
||||
)
|
||||
from awx.conf.license import get_license
|
||||
from awx.main.utils.handlers import SpecialInventoryHandler
|
||||
@@ -450,6 +452,12 @@ class BaseTask(object):
|
||||
instance.ansible_version = ansible_version_info
|
||||
instance.save(update_fields=['ansible_version'])
|
||||
|
||||
# Run task manager appropriately for speculative dependencies
|
||||
if instance.unifiedjob_blocked_jobs.exists():
|
||||
ScheduleTaskManager().schedule()
|
||||
if instance.spawned_by_workflow:
|
||||
ScheduleWorkflowManager().schedule()
|
||||
|
||||
def should_use_fact_cache(self):
|
||||
return False
|
||||
|
||||
@@ -1873,6 +1881,8 @@ class RunSystemJob(BaseTask):
|
||||
if system_job.job_type in ('cleanup_jobs', 'cleanup_activitystream'):
|
||||
if 'days' in json_vars:
|
||||
args.extend(['--days', str(json_vars.get('days', 60))])
|
||||
if 'batch_size' in json_vars:
|
||||
args.extend(['--batch-size', str(json_vars['batch_size'])])
|
||||
if 'dry_run' in json_vars and json_vars['dry_run']:
|
||||
args.extend(['--dry-run'])
|
||||
if system_job.job_type == 'cleanup_jobs':
|
||||
|
||||
@@ -432,16 +432,16 @@ class AWXReceptorJob:
|
||||
# massive, only ask for last 1000 bytes
|
||||
startpos = max(stdout_size - 1000, 0)
|
||||
resultsock, resultfile = receptor_ctl.get_work_results(self.unit_id, startpos=startpos, return_socket=True, return_sockfile=True)
|
||||
resultsock.setblocking(False) # this makes resultfile reads non blocking
|
||||
lines = resultfile.readlines()
|
||||
receptor_output = b"".join(lines).decode()
|
||||
if receptor_output:
|
||||
self.task.runner_callback.delay_update(result_traceback=receptor_output)
|
||||
self.task.runner_callback.delay_update(result_traceback=f'Worker output:\n{receptor_output}')
|
||||
elif detail:
|
||||
self.task.runner_callback.delay_update(result_traceback=detail)
|
||||
self.task.runner_callback.delay_update(result_traceback=f'Receptor detail:\n{detail}')
|
||||
else:
|
||||
logger.warning(f'No result details or output from {self.task.instance.log_format}, status:\n{state_name}')
|
||||
except Exception:
|
||||
logger.exception(f'Work results error from job id={self.task.instance.id} work_unit={self.task.instance.work_unit_id}')
|
||||
raise RuntimeError(detail)
|
||||
|
||||
return res
|
||||
|
||||
@@ -16,7 +16,9 @@ class SignalExit(Exception):
|
||||
class SignalState:
|
||||
def reset(self):
|
||||
self.sigterm_flag = False
|
||||
self.is_active = False
|
||||
self.sigint_flag = False
|
||||
|
||||
self.is_active = False # for nested context managers
|
||||
self.original_sigterm = None
|
||||
self.original_sigint = None
|
||||
self.raise_exception = False
|
||||
@@ -24,23 +26,36 @@ class SignalState:
|
||||
def __init__(self):
|
||||
self.reset()
|
||||
|
||||
def set_flag(self, *args):
|
||||
"""Method to pass into the python signal.signal method to receive signals"""
|
||||
self.sigterm_flag = True
|
||||
def raise_if_needed(self):
|
||||
if self.raise_exception:
|
||||
self.raise_exception = False # so it is not raised a second time in error handling
|
||||
raise SignalExit()
|
||||
|
||||
def set_sigterm_flag(self, *args):
|
||||
self.sigterm_flag = True
|
||||
self.raise_if_needed()
|
||||
|
||||
def set_sigint_flag(self, *args):
|
||||
self.sigint_flag = True
|
||||
self.raise_if_needed()
|
||||
|
||||
def connect_signals(self):
|
||||
self.original_sigterm = signal.getsignal(signal.SIGTERM)
|
||||
self.original_sigint = signal.getsignal(signal.SIGINT)
|
||||
signal.signal(signal.SIGTERM, self.set_flag)
|
||||
signal.signal(signal.SIGINT, self.set_flag)
|
||||
signal.signal(signal.SIGTERM, self.set_sigterm_flag)
|
||||
signal.signal(signal.SIGINT, self.set_sigint_flag)
|
||||
self.is_active = True
|
||||
|
||||
def restore_signals(self):
|
||||
signal.signal(signal.SIGTERM, self.original_sigterm)
|
||||
signal.signal(signal.SIGINT, self.original_sigint)
|
||||
# if we got a signal while context manager was active, call parent methods.
|
||||
if self.sigterm_flag:
|
||||
if callable(self.original_sigterm):
|
||||
self.original_sigterm()
|
||||
if self.sigint_flag:
|
||||
if callable(self.original_sigint):
|
||||
self.original_sigint()
|
||||
self.reset()
|
||||
|
||||
|
||||
@@ -48,7 +63,7 @@ signal_state = SignalState()
|
||||
|
||||
|
||||
def signal_callback():
|
||||
return signal_state.sigterm_flag
|
||||
return bool(signal_state.sigterm_flag or signal_state.sigint_flag)
|
||||
|
||||
|
||||
def with_signal_handling(f):
|
||||
|
||||
@@ -48,22 +48,16 @@ from awx.main.models import (
|
||||
Inventory,
|
||||
SmartInventoryMembership,
|
||||
Job,
|
||||
HostMetric,
|
||||
convert_jsonfields,
|
||||
)
|
||||
from awx.main.constants import ACTIVE_STATES
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch import get_task_queuename, reaper
|
||||
from awx.main.utils.common import (
|
||||
get_type_for_model,
|
||||
ignore_inventory_computed_fields,
|
||||
ignore_inventory_group_removal,
|
||||
ScheduleWorkflowManager,
|
||||
ScheduleTaskManager,
|
||||
)
|
||||
from awx.main.utils.common import ignore_inventory_computed_fields, ignore_inventory_group_removal
|
||||
|
||||
from awx.main.utils.reload import stop_local_services
|
||||
from awx.main.utils.pglock import advisory_lock
|
||||
from awx.main.tasks.helpers import is_run_threshold_reached
|
||||
from awx.main.tasks.receptor import get_receptor_ctl, worker_info, worker_cleanup, administrative_workunit_reaper, write_receptor_config
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
from awx.main import analytics
|
||||
@@ -368,9 +362,7 @@ def send_notifications(notification_list, job_id=None):
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
def gather_analytics():
|
||||
from awx.conf.models import Setting
|
||||
|
||||
if is_run_threshold_reached(Setting.objects.filter(key='AUTOMATION_ANALYTICS_LAST_GATHER').first(), settings.AUTOMATION_ANALYTICS_GATHER_INTERVAL):
|
||||
if is_run_threshold_reached(getattr(settings, 'AUTOMATION_ANALYTICS_LAST_GATHER', None), settings.AUTOMATION_ANALYTICS_GATHER_INTERVAL):
|
||||
analytics.gather()
|
||||
|
||||
|
||||
@@ -427,29 +419,6 @@ def cleanup_images_and_files():
|
||||
_cleanup_images_and_files()
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
def cleanup_host_metrics():
|
||||
"""Run cleanup host metrics ~each month"""
|
||||
# TODO: move whole method to host_metrics in follow-up PR
|
||||
from awx.conf.models import Setting
|
||||
|
||||
if is_run_threshold_reached(
|
||||
Setting.objects.filter(key='CLEANUP_HOST_METRICS_LAST_TS').first(), getattr(settings, 'CLEANUP_HOST_METRICS_INTERVAL', 30) * 86400
|
||||
):
|
||||
months_ago = getattr(settings, 'CLEANUP_HOST_METRICS_SOFT_THRESHOLD', 12)
|
||||
logger.info("Executing cleanup_host_metrics")
|
||||
HostMetric.cleanup_task(months_ago)
|
||||
logger.info("Finished cleanup_host_metrics")
|
||||
|
||||
|
||||
def is_run_threshold_reached(setting, threshold_seconds):
|
||||
from rest_framework.fields import DateTimeField
|
||||
|
||||
last_time = DateTimeField().to_internal_value(setting.value) if setting and setting.value else DateTimeField().to_internal_value('1970-01-01')
|
||||
|
||||
return (now() - last_time).total_seconds() > threshold_seconds
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
def cluster_node_health_check(node):
|
||||
"""
|
||||
@@ -491,7 +460,6 @@ def execution_node_health_check(node):
|
||||
data = worker_info(node)
|
||||
|
||||
prior_capacity = instance.capacity
|
||||
|
||||
instance.save_health_data(
|
||||
version='ansible-runner-' + data.get('runner_version', '???'),
|
||||
cpu=data.get('cpu_count', 0),
|
||||
@@ -789,66 +757,21 @@ def awx_periodic_scheduler():
|
||||
new_unified_job.save(update_fields=['status', 'job_explanation'])
|
||||
new_unified_job.websocket_emit_status("failed")
|
||||
emit_channel_notification('schedules-changed', dict(id=schedule.id, group_name="schedules"))
|
||||
state.save()
|
||||
|
||||
|
||||
def schedule_manager_success_or_error(instance):
|
||||
if instance.unifiedjob_blocked_jobs.exists():
|
||||
ScheduleTaskManager().schedule()
|
||||
if instance.spawned_by_workflow:
|
||||
ScheduleWorkflowManager().schedule()
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
def handle_work_success(task_actual):
|
||||
try:
|
||||
instance = UnifiedJob.get_instance_by_type(task_actual['type'], task_actual['id'])
|
||||
except ObjectDoesNotExist:
|
||||
logger.warning('Missing {} `{}` in success callback.'.format(task_actual['type'], task_actual['id']))
|
||||
return
|
||||
if not instance:
|
||||
return
|
||||
schedule_manager_success_or_error(instance)
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
def handle_work_error(task_actual):
|
||||
try:
|
||||
instance = UnifiedJob.get_instance_by_type(task_actual['type'], task_actual['id'])
|
||||
except ObjectDoesNotExist:
|
||||
logger.warning('Missing {} `{}` in error callback.'.format(task_actual['type'], task_actual['id']))
|
||||
return
|
||||
if not instance:
|
||||
return
|
||||
|
||||
subtasks = instance.get_jobs_fail_chain() # reverse of dependent_jobs mostly
|
||||
logger.debug(f'Executing error task id {task_actual["id"]}, subtasks: {[subtask.id for subtask in subtasks]}')
|
||||
|
||||
deps_of_deps = {}
|
||||
|
||||
for subtask in subtasks:
|
||||
if subtask.celery_task_id != instance.celery_task_id and not subtask.cancel_flag and not subtask.status in ('successful', 'failed'):
|
||||
# If there are multiple in the dependency chain, A->B->C, and this was called for A, blame B for clarity
|
||||
blame_job = deps_of_deps.get(subtask.id, instance)
|
||||
subtask.status = 'failed'
|
||||
subtask.failed = True
|
||||
if not subtask.job_explanation:
|
||||
subtask.job_explanation = 'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % (
|
||||
get_type_for_model(type(blame_job)),
|
||||
blame_job.name,
|
||||
blame_job.id,
|
||||
)
|
||||
subtask.save()
|
||||
subtask.websocket_emit_status("failed")
|
||||
|
||||
for sub_subtask in subtask.get_jobs_fail_chain():
|
||||
deps_of_deps[sub_subtask.id] = subtask
|
||||
|
||||
# We only send 1 job complete message since all the job completion message
|
||||
# handling does is trigger the scheduler. If we extend the functionality of
|
||||
# what the job complete message handler does then we may want to send a
|
||||
# completion event for each job here.
|
||||
schedule_manager_success_or_error(instance)
|
||||
def handle_failure_notifications(task_ids):
|
||||
"""A task-ified version of the method that sends notifications."""
|
||||
found_task_ids = set()
|
||||
for instance in UnifiedJob.objects.filter(id__in=task_ids):
|
||||
found_task_ids.add(instance.id)
|
||||
try:
|
||||
instance.send_notification_templates('failed')
|
||||
except Exception:
|
||||
logger.exception(f'Error preparing notifications for task {instance.id}')
|
||||
deleted_tasks = set(task_ids) - found_task_ids
|
||||
if deleted_tasks:
|
||||
logger.warning(f'Could not send notifications for {deleted_tasks} because they were not found in the database')
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
|
||||
@@ -0,0 +1,78 @@
|
||||
import pytest
|
||||
|
||||
from awx.main.tasks.host_metrics import HostMetricTask
|
||||
from awx.main.models.inventory import HostMetric
|
||||
from awx.main.tests.factories.fixtures import mk_host_metric
|
||||
from dateutil.relativedelta import relativedelta
|
||||
from django.conf import settings
|
||||
from django.utils import timezone
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_no_host_metrics():
|
||||
"""No-crash test"""
|
||||
assert HostMetric.objects.count() == 0
|
||||
HostMetricTask().cleanup(soft_threshold=0, hard_threshold=0)
|
||||
HostMetricTask().cleanup(soft_threshold=24, hard_threshold=42)
|
||||
assert HostMetric.objects.count() == 0
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_delete_exception():
|
||||
"""Crash test"""
|
||||
with pytest.raises(ValueError):
|
||||
HostMetricTask().soft_cleanup("")
|
||||
with pytest.raises(TypeError):
|
||||
HostMetricTask().hard_cleanup(set())
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('threshold', [settings.CLEANUP_HOST_METRICS_SOFT_THRESHOLD, 20])
|
||||
def test_soft_delete(threshold):
|
||||
"""Metrics with last_automation < threshold are updated to deleted=True"""
|
||||
mk_host_metric('host_1', first_automation=ago(months=1), last_automation=ago(months=1), deleted=False)
|
||||
mk_host_metric('host_2', first_automation=ago(months=1), last_automation=ago(months=1), deleted=True)
|
||||
mk_host_metric('host_3', first_automation=ago(months=1), last_automation=ago(months=threshold, hours=-1), deleted=False)
|
||||
mk_host_metric('host_4', first_automation=ago(months=1), last_automation=ago(months=threshold, hours=-1), deleted=True)
|
||||
mk_host_metric('host_5', first_automation=ago(months=1), last_automation=ago(months=threshold, hours=1), deleted=False)
|
||||
mk_host_metric('host_6', first_automation=ago(months=1), last_automation=ago(months=threshold, hours=1), deleted=True)
|
||||
mk_host_metric('host_7', first_automation=ago(months=1), last_automation=ago(months=42), deleted=False)
|
||||
mk_host_metric('host_8', first_automation=ago(months=1), last_automation=ago(months=42), deleted=True)
|
||||
|
||||
assert HostMetric.objects.count() == 8
|
||||
assert HostMetric.active_objects.count() == 4
|
||||
|
||||
for i in range(2):
|
||||
HostMetricTask().cleanup(soft_threshold=threshold)
|
||||
assert HostMetric.objects.count() == 8
|
||||
|
||||
hostnames = set(HostMetric.objects.filter(deleted=False).order_by('hostname').values_list('hostname', flat=True))
|
||||
assert hostnames == {'host_1', 'host_3'}
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('threshold', [settings.CLEANUP_HOST_METRICS_HARD_THRESHOLD, 20])
|
||||
def test_hard_delete(threshold):
|
||||
"""Metrics with last_deleted < threshold and deleted=True are deleted from the db"""
|
||||
mk_host_metric('host_1', first_automation=ago(months=1), last_deleted=ago(months=1), deleted=False)
|
||||
mk_host_metric('host_2', first_automation=ago(months=1), last_deleted=ago(months=1), deleted=True)
|
||||
mk_host_metric('host_3', first_automation=ago(months=1), last_deleted=ago(months=threshold, hours=-1), deleted=False)
|
||||
mk_host_metric('host_4', first_automation=ago(months=1), last_deleted=ago(months=threshold, hours=-1), deleted=True)
|
||||
mk_host_metric('host_5', first_automation=ago(months=1), last_deleted=ago(months=threshold, hours=1), deleted=False)
|
||||
mk_host_metric('host_6', first_automation=ago(months=1), last_deleted=ago(months=threshold, hours=1), deleted=True)
|
||||
mk_host_metric('host_7', first_automation=ago(months=1), last_deleted=ago(months=42), deleted=False)
|
||||
mk_host_metric('host_8', first_automation=ago(months=1), last_deleted=ago(months=42), deleted=True)
|
||||
|
||||
assert HostMetric.objects.count() == 8
|
||||
assert HostMetric.active_objects.count() == 4
|
||||
|
||||
for i in range(2):
|
||||
HostMetricTask().cleanup(hard_threshold=threshold)
|
||||
assert HostMetric.objects.count() == 6
|
||||
|
||||
hostnames = set(HostMetric.objects.order_by('hostname').values_list('hostname', flat=True))
|
||||
assert hostnames == {'host_1', 'host_2', 'host_3', 'host_4', 'host_5', 'host_7'}
|
||||
|
||||
|
||||
def ago(months=0, hours=0):
|
||||
return timezone.now() - relativedelta(months=months, hours=hours)
|
||||
@@ -76,3 +76,24 @@ def test_hashivault_handle_auth_kubernetes():
|
||||
def test_hashivault_handle_auth_not_enough_args():
|
||||
with pytest.raises(Exception):
|
||||
hashivault.handle_auth()
|
||||
|
||||
|
||||
class TestDelineaImports:
|
||||
"""
|
||||
These module have a try-except for ImportError which will allow using the older library
|
||||
but we do not want the awx_devel image to have the older library,
|
||||
so these tests are designed to fail if these wind up using the fallback import
|
||||
"""
|
||||
|
||||
def test_dsv_import(self):
|
||||
from awx.main.credential_plugins.dsv import SecretsVault # noqa
|
||||
|
||||
# assert this module as opposed to older thycotic.secrets.vault
|
||||
assert SecretsVault.__module__ == 'delinea.secrets.vault'
|
||||
|
||||
def test_tss_import(self):
|
||||
from awx.main.credential_plugins.tss import DomainPasswordGrantAuthorizer, PasswordGrantAuthorizer, SecretServer, ServerSecret # noqa
|
||||
|
||||
for cls in (DomainPasswordGrantAuthorizer, PasswordGrantAuthorizer, SecretServer, ServerSecret):
|
||||
# assert this module as opposed to older thycotic.secrets.server
|
||||
assert cls.__module__ == 'delinea.secrets.server'
|
||||
|
||||
@@ -38,8 +38,8 @@ def test_orphan_unified_job_creation(instance, inventory):
|
||||
|
||||
@pytest.mark.django_db
|
||||
@mock.patch('awx.main.tasks.system.inspect_execution_and_hop_nodes', lambda *args, **kwargs: None)
|
||||
@mock.patch('awx.main.models.ha.get_cpu_effective_capacity', lambda cpu: 8)
|
||||
@mock.patch('awx.main.models.ha.get_mem_effective_capacity', lambda mem: 62)
|
||||
@mock.patch('awx.main.models.ha.get_cpu_effective_capacity', lambda cpu, is_control_node: 8)
|
||||
@mock.patch('awx.main.models.ha.get_mem_effective_capacity', lambda mem, is_control_node: 62)
|
||||
def test_job_capacity_and_with_inactive_node():
|
||||
i = Instance.objects.create(hostname='test-1')
|
||||
i.save_health_data('18.0.1', 2, 8000)
|
||||
|
||||
44
awx/main/tests/functional/test_migrations.py
Normal file
44
awx/main/tests/functional/test_migrations.py
Normal file
@@ -0,0 +1,44 @@
|
||||
import pytest
|
||||
|
||||
from django_test_migrations.plan import all_migrations, nodes_to_tuples
|
||||
|
||||
"""
|
||||
Most tests that live in here can probably be deleted at some point. They are mainly
|
||||
for a developer. When AWX versions that users upgrade from falls out of support that
|
||||
is when migration tests can be deleted. This is also a good time to squash. Squashing
|
||||
will likely mess with the tests that live here.
|
||||
|
||||
The smoke test should be kept in here. The smoke test ensures that our migrations
|
||||
continue to work when sqlite is the backing database (vs. the default DB of postgres).
|
||||
"""
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestMigrationSmoke:
|
||||
def test_happy_path(self, migrator):
|
||||
"""
|
||||
This smoke test runs all the migrations.
|
||||
|
||||
Example of how to use django-test-migration to invoke particular migration(s)
|
||||
while weaving in object creation and assertions.
|
||||
|
||||
Note that this is more than just an example. It is a smoke test because it runs ALL
|
||||
the migrations. Our "normal" unit tests subvert the migrations running because it is slow.
|
||||
"""
|
||||
migration_nodes = all_migrations('default')
|
||||
migration_tuples = nodes_to_tuples(migration_nodes)
|
||||
final_migration = migration_tuples[-1]
|
||||
|
||||
migrator.apply_initial_migration(('main', None))
|
||||
# I just picked a newish migration at the time of writing this.
|
||||
# If someone from the future finds themselves here because the are squashing migrations
|
||||
# it is fine to change the 0180_... below to some other newish migration
|
||||
intermediate_state = migrator.apply_tested_migration(('main', '0180_add_hostmetric_fields'))
|
||||
|
||||
Instance = intermediate_state.apps.get_model('main', 'Instance')
|
||||
# Create any old object in the database
|
||||
Instance.objects.create(hostname='foobar', node_type='control')
|
||||
|
||||
final_state = migrator.apply_tested_migration(final_migration)
|
||||
Instance = final_state.apps.get_model('main', 'Instance')
|
||||
assert Instance.objects.filter(hostname='foobar').count() == 1
|
||||
@@ -122,25 +122,6 @@ def test_team_org_resource_role(ext_auth, organization, rando, org_admin, team):
|
||||
] == [True for i in range(2)]
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_user_accessible_objects(user, organization):
|
||||
"""
|
||||
We cannot directly use accessible_objects for User model because
|
||||
both editing and read permissions are obligated to complex business logic
|
||||
"""
|
||||
admin = user('admin', False)
|
||||
u = user('john', False)
|
||||
access = UserAccess(admin)
|
||||
assert access.get_queryset().count() == 1 # can only see himself
|
||||
|
||||
organization.member_role.members.add(u)
|
||||
organization.member_role.members.add(admin)
|
||||
assert access.get_queryset().count() == 2
|
||||
|
||||
organization.member_role.members.remove(u)
|
||||
assert access.get_queryset().count() == 1
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_org_admin_create_sys_auditor(org_admin):
|
||||
access = UserAccess(org_admin)
|
||||
|
||||
@@ -5,8 +5,8 @@ import tempfile
|
||||
import shutil
|
||||
|
||||
from awx.main.tasks.jobs import RunJob
|
||||
from awx.main.tasks.system import execution_node_health_check, _cleanup_images_and_files, handle_work_error
|
||||
from awx.main.models import Instance, Job, InventoryUpdate, ProjectUpdate
|
||||
from awx.main.tasks.system import execution_node_health_check, _cleanup_images_and_files
|
||||
from awx.main.models import Instance, Job
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -73,17 +73,3 @@ def test_does_not_run_reaped_job(mocker, mock_me):
|
||||
job.refresh_from_db()
|
||||
assert job.status == 'failed'
|
||||
mock_run.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_handle_work_error_nested(project, inventory_source):
|
||||
pu = ProjectUpdate.objects.create(status='failed', project=project, celery_task_id='1234')
|
||||
iu = InventoryUpdate.objects.create(status='pending', inventory_source=inventory_source, source='scm')
|
||||
job = Job.objects.create(status='pending')
|
||||
iu.dependent_jobs.add(pu)
|
||||
job.dependent_jobs.add(pu, iu)
|
||||
handle_work_error({'type': 'project_update', 'id': pu.id})
|
||||
iu.refresh_from_db()
|
||||
job.refresh_from_db()
|
||||
assert iu.job_explanation == f'Previous Task Failed: {{"job_type": "project_update", "job_name": "", "job_id": "{pu.id}"}}'
|
||||
assert job.job_explanation == f'Previous Task Failed: {{"job_type": "inventory_update", "job_name": "", "job_id": "{iu.id}"}}'
|
||||
|
||||
@@ -47,7 +47,7 @@ data_loggly = {
|
||||
'\n'.join(
|
||||
[
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")\nmodule(load="omhttp")',
|
||||
'action(type="omhttp" server="logs-01.loggly.com" serverport="80" usehttps="off" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxdiskspace="1g" queue.type="LinkedList" queue.saveOnShutdown="on" errorfile="/var/log/tower/rsyslog.err" restpath="inputs/1fd38090-2af1-4e1e-8d80-492899da0f71/tag/http/")', # noqa
|
||||
'action(type="omhttp" server="logs-01.loggly.com" serverport="80" usehttps="off" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxDiskSpace="1g" queue.maxFileSize="100m" queue.type="LinkedList" queue.saveOnShutdown="on" queue.syncqueuefiles="on" queue.checkpointInterval="1000" queue.size="131072" queue.highwaterMark="98304" queue.discardMark="117964" queue.discardSeverity="5" errorfile="/var/log/tower/rsyslog.err" restpath="inputs/1fd38090-2af1-4e1e-8d80-492899da0f71/tag/http/")', # noqa
|
||||
]
|
||||
),
|
||||
),
|
||||
@@ -61,7 +61,7 @@ data_loggly = {
|
||||
'\n'.join(
|
||||
[
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")',
|
||||
'action(type="omfwd" target="localhost" port="9000" protocol="udp" action.resumeRetryCount="-1" action.resumeInterval="5" template="awx")', # noqa
|
||||
'action(type="omfwd" target="localhost" port="9000" protocol="udp" action.resumeRetryCount="-1" action.resumeInterval="5" template="awx" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxDiskSpace="1g" queue.maxFileSize="100m" queue.type="LinkedList" queue.saveOnShutdown="on" queue.syncqueuefiles="on" queue.checkpointInterval="1000" queue.size="131072" queue.highwaterMark="98304" queue.discardMark="117964" queue.discardSeverity="5")', # noqa
|
||||
]
|
||||
),
|
||||
),
|
||||
@@ -75,7 +75,7 @@ data_loggly = {
|
||||
'\n'.join(
|
||||
[
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")',
|
||||
'action(type="omfwd" target="localhost" port="9000" protocol="tcp" action.resumeRetryCount="-1" action.resumeInterval="5" template="awx")', # noqa
|
||||
'action(type="omfwd" target="localhost" port="9000" protocol="tcp" action.resumeRetryCount="-1" action.resumeInterval="5" template="awx" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxDiskSpace="1g" queue.maxFileSize="100m" queue.type="LinkedList" queue.saveOnShutdown="on" queue.syncqueuefiles="on" queue.checkpointInterval="1000" queue.size="131072" queue.highwaterMark="98304" queue.discardMark="117964" queue.discardSeverity="5")', # noqa
|
||||
]
|
||||
),
|
||||
),
|
||||
@@ -89,7 +89,7 @@ data_loggly = {
|
||||
'\n'.join(
|
||||
[
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")\nmodule(load="omhttp")',
|
||||
'action(type="omhttp" server="yoursplunk" serverport="443" usehttps="on" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxdiskspace="1g" queue.type="LinkedList" queue.saveOnShutdown="on" errorfile="/var/log/tower/rsyslog.err" restpath="services/collector/event")', # noqa
|
||||
'action(type="omhttp" server="yoursplunk" serverport="443" usehttps="on" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxDiskSpace="1g" queue.maxFileSize="100m" queue.type="LinkedList" queue.saveOnShutdown="on" queue.syncqueuefiles="on" queue.checkpointInterval="1000" queue.size="131072" queue.highwaterMark="98304" queue.discardMark="117964" queue.discardSeverity="5" errorfile="/var/log/tower/rsyslog.err" restpath="services/collector/event")', # noqa
|
||||
]
|
||||
),
|
||||
),
|
||||
@@ -103,7 +103,7 @@ data_loggly = {
|
||||
'\n'.join(
|
||||
[
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")\nmodule(load="omhttp")',
|
||||
'action(type="omhttp" server="yoursplunk" serverport="80" usehttps="off" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxdiskspace="1g" queue.type="LinkedList" queue.saveOnShutdown="on" errorfile="/var/log/tower/rsyslog.err" restpath="services/collector/event")', # noqa
|
||||
'action(type="omhttp" server="yoursplunk" serverport="80" usehttps="off" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxDiskSpace="1g" queue.maxFileSize="100m" queue.type="LinkedList" queue.saveOnShutdown="on" queue.syncqueuefiles="on" queue.checkpointInterval="1000" queue.size="131072" queue.highwaterMark="98304" queue.discardMark="117964" queue.discardSeverity="5" errorfile="/var/log/tower/rsyslog.err" restpath="services/collector/event")', # noqa
|
||||
]
|
||||
),
|
||||
),
|
||||
@@ -117,7 +117,7 @@ data_loggly = {
|
||||
'\n'.join(
|
||||
[
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")\nmodule(load="omhttp")',
|
||||
'action(type="omhttp" server="yoursplunk" serverport="8088" usehttps="on" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxdiskspace="1g" queue.type="LinkedList" queue.saveOnShutdown="on" errorfile="/var/log/tower/rsyslog.err" restpath="services/collector/event")', # noqa
|
||||
'action(type="omhttp" server="yoursplunk" serverport="8088" usehttps="on" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxDiskSpace="1g" queue.maxFileSize="100m" queue.type="LinkedList" queue.saveOnShutdown="on" queue.syncqueuefiles="on" queue.checkpointInterval="1000" queue.size="131072" queue.highwaterMark="98304" queue.discardMark="117964" queue.discardSeverity="5" errorfile="/var/log/tower/rsyslog.err" restpath="services/collector/event")', # noqa
|
||||
]
|
||||
),
|
||||
),
|
||||
@@ -131,7 +131,7 @@ data_loggly = {
|
||||
'\n'.join(
|
||||
[
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")\nmodule(load="omhttp")',
|
||||
'action(type="omhttp" server="yoursplunk" serverport="8088" usehttps="on" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxdiskspace="1g" queue.type="LinkedList" queue.saveOnShutdown="on" errorfile="/var/log/tower/rsyslog.err" restpath="services/collector/event")', # noqa
|
||||
'action(type="omhttp" server="yoursplunk" serverport="8088" usehttps="on" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxDiskSpace="1g" queue.maxFileSize="100m" queue.type="LinkedList" queue.saveOnShutdown="on" queue.syncqueuefiles="on" queue.checkpointInterval="1000" queue.size="131072" queue.highwaterMark="98304" queue.discardMark="117964" queue.discardSeverity="5" errorfile="/var/log/tower/rsyslog.err" restpath="services/collector/event")', # noqa
|
||||
]
|
||||
),
|
||||
),
|
||||
@@ -145,7 +145,7 @@ data_loggly = {
|
||||
'\n'.join(
|
||||
[
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")\nmodule(load="omhttp")',
|
||||
'action(type="omhttp" server="yoursplunk.org" serverport="8088" usehttps="on" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxdiskspace="1g" queue.type="LinkedList" queue.saveOnShutdown="on" errorfile="/var/log/tower/rsyslog.err" restpath="services/collector/event")', # noqa
|
||||
'action(type="omhttp" server="yoursplunk.org" serverport="8088" usehttps="on" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxDiskSpace="1g" queue.maxFileSize="100m" queue.type="LinkedList" queue.saveOnShutdown="on" queue.syncqueuefiles="on" queue.checkpointInterval="1000" queue.size="131072" queue.highwaterMark="98304" queue.discardMark="117964" queue.discardSeverity="5" errorfile="/var/log/tower/rsyslog.err" restpath="services/collector/event")', # noqa
|
||||
]
|
||||
),
|
||||
),
|
||||
@@ -159,7 +159,7 @@ data_loggly = {
|
||||
'\n'.join(
|
||||
[
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")\nmodule(load="omhttp")',
|
||||
'action(type="omhttp" server="yoursplunk.org" serverport="8088" usehttps="off" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxdiskspace="1g" queue.type="LinkedList" queue.saveOnShutdown="on" errorfile="/var/log/tower/rsyslog.err" restpath="services/collector/event")', # noqa
|
||||
'action(type="omhttp" server="yoursplunk.org" serverport="8088" usehttps="off" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxDiskSpace="1g" queue.maxFileSize="100m" queue.type="LinkedList" queue.saveOnShutdown="on" queue.syncqueuefiles="on" queue.checkpointInterval="1000" queue.size="131072" queue.highwaterMark="98304" queue.discardMark="117964" queue.discardSeverity="5" errorfile="/var/log/tower/rsyslog.err" restpath="services/collector/event")', # noqa
|
||||
]
|
||||
),
|
||||
),
|
||||
@@ -173,7 +173,7 @@ data_loggly = {
|
||||
'\n'.join(
|
||||
[
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")\nmodule(load="omhttp")',
|
||||
'action(type="omhttp" server="endpoint5.collection.us2.sumologic.com" serverport="443" usehttps="on" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxdiskspace="1g" queue.type="LinkedList" queue.saveOnShutdown="on" errorfile="/var/log/tower/rsyslog.err" restpath="receiver/v1/http/ZaVnC4dhaV0qoiETY0MrM3wwLoDgO1jFgjOxE6-39qokkj3LGtOroZ8wNaN2M6DtgYrJZsmSi4-36_Up5TbbN_8hosYonLKHSSOSKY845LuLZBCBwStrHQ==")', # noqa
|
||||
'action(type="omhttp" server="endpoint5.collection.us2.sumologic.com" serverport="443" usehttps="on" allowunsignedcerts="off" skipverifyhost="off" action.resumeRetryCount="-1" template="awx" action.resumeInterval="5" queue.spoolDirectory="/var/lib/awx" queue.filename="awx-external-logger-action-queue" queue.maxDiskSpace="1g" queue.maxFileSize="100m" queue.type="LinkedList" queue.saveOnShutdown="on" queue.syncqueuefiles="on" queue.checkpointInterval="1000" queue.size="131072" queue.highwaterMark="98304" queue.discardMark="117964" queue.discardSeverity="5" errorfile="/var/log/tower/rsyslog.err" restpath="receiver/v1/http/ZaVnC4dhaV0qoiETY0MrM3wwLoDgO1jFgjOxE6-39qokkj3LGtOroZ8wNaN2M6DtgYrJZsmSi4-36_Up5TbbN_8hosYonLKHSSOSKY845LuLZBCBwStrHQ==")', # noqa
|
||||
]
|
||||
),
|
||||
),
|
||||
|
||||
@@ -36,7 +36,9 @@ def test_SYSTEM_TASK_ABS_MEM_conversion(value, converted_value, mem_capacity):
|
||||
mock_settings.IS_K8S = True
|
||||
assert convert_mem_str_to_bytes(value) == converted_value
|
||||
assert get_corrected_memory(-1) == converted_value
|
||||
assert get_mem_effective_capacity(-1) == mem_capacity
|
||||
assert get_mem_effective_capacity(1, is_control_node=True) == mem_capacity
|
||||
# SYSTEM_TASK_ABS_MEM should not effect memory and capacity for execution nodes
|
||||
assert get_mem_effective_capacity(2147483648, is_control_node=False) == 20
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@@ -58,4 +60,6 @@ def test_SYSTEM_TASK_ABS_CPU_conversion(value, converted_value, cpu_capacity):
|
||||
mock_settings.SYSTEM_TASK_FORKS_CPU = 4
|
||||
assert convert_cpu_str_to_decimal_cpu(value) == converted_value
|
||||
assert get_corrected_cpu(-1) == converted_value
|
||||
assert get_cpu_effective_capacity(-1) == cpu_capacity
|
||||
assert get_cpu_effective_capacity(-1, is_control_node=True) == cpu_capacity
|
||||
# SYSTEM_TASK_ABS_CPU should not effect cpu count and capacity for execution nodes
|
||||
assert get_cpu_effective_capacity(2.0, is_control_node=False) == 8
|
||||
|
||||
@@ -1,8 +1,43 @@
|
||||
import signal
|
||||
import functools
|
||||
|
||||
from awx.main.tasks.signals import signal_state, signal_callback, with_signal_handling
|
||||
|
||||
|
||||
def pytest_sigint():
|
||||
pytest_sigint.called_count += 1
|
||||
|
||||
|
||||
def pytest_sigterm():
|
||||
pytest_sigterm.called_count += 1
|
||||
|
||||
|
||||
def tmp_signals_for_test(func):
|
||||
"""
|
||||
When we run our internal signal handlers, it will call the original signal
|
||||
handlers when its own work is finished.
|
||||
This would crash the test runners normally, because those methods will
|
||||
shut down the process.
|
||||
So this is a decorator to safely replace existing signal handlers
|
||||
with new signal handlers that do nothing so that tests do not crash.
|
||||
"""
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapper():
|
||||
original_sigterm = signal.getsignal(signal.SIGTERM)
|
||||
original_sigint = signal.getsignal(signal.SIGINT)
|
||||
signal.signal(signal.SIGTERM, pytest_sigterm)
|
||||
signal.signal(signal.SIGINT, pytest_sigint)
|
||||
pytest_sigterm.called_count = 0
|
||||
pytest_sigint.called_count = 0
|
||||
func()
|
||||
signal.signal(signal.SIGTERM, original_sigterm)
|
||||
signal.signal(signal.SIGINT, original_sigint)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
@tmp_signals_for_test
|
||||
def test_outer_inner_signal_handling():
|
||||
"""
|
||||
Even if the flag is set in the outer context, its value should persist in the inner context
|
||||
@@ -15,17 +50,22 @@ def test_outer_inner_signal_handling():
|
||||
@with_signal_handling
|
||||
def f1():
|
||||
assert signal_callback() is False
|
||||
signal_state.set_flag()
|
||||
signal_state.set_sigterm_flag()
|
||||
assert signal_callback()
|
||||
f2()
|
||||
|
||||
original_sigterm = signal.getsignal(signal.SIGTERM)
|
||||
assert signal_callback() is False
|
||||
assert pytest_sigterm.called_count == 0
|
||||
assert pytest_sigint.called_count == 0
|
||||
f1()
|
||||
assert signal_callback() is False
|
||||
assert signal.getsignal(signal.SIGTERM) is original_sigterm
|
||||
assert pytest_sigterm.called_count == 1
|
||||
assert pytest_sigint.called_count == 0
|
||||
|
||||
|
||||
@tmp_signals_for_test
|
||||
def test_inner_outer_signal_handling():
|
||||
"""
|
||||
Even if the flag is set in the inner context, its value should persist in the outer context
|
||||
@@ -34,7 +74,7 @@ def test_inner_outer_signal_handling():
|
||||
@with_signal_handling
|
||||
def f2():
|
||||
assert signal_callback() is False
|
||||
signal_state.set_flag()
|
||||
signal_state.set_sigint_flag()
|
||||
assert signal_callback()
|
||||
|
||||
@with_signal_handling
|
||||
@@ -45,6 +85,10 @@ def test_inner_outer_signal_handling():
|
||||
|
||||
original_sigterm = signal.getsignal(signal.SIGTERM)
|
||||
assert signal_callback() is False
|
||||
assert pytest_sigterm.called_count == 0
|
||||
assert pytest_sigint.called_count == 0
|
||||
f1()
|
||||
assert signal_callback() is False
|
||||
assert signal.getsignal(signal.SIGTERM) is original_sigterm
|
||||
assert pytest_sigterm.called_count == 0
|
||||
assert pytest_sigint.called_count == 1
|
||||
|
||||
@@ -143,13 +143,6 @@ def test_send_notifications_job_id(mocker):
|
||||
assert UnifiedJob.objects.get.called_with(id=1)
|
||||
|
||||
|
||||
def test_work_success_callback_missing_job():
|
||||
task_data = {'type': 'project_update', 'id': 9999}
|
||||
with mock.patch('django.db.models.query.QuerySet.get') as get_mock:
|
||||
get_mock.side_effect = ProjectUpdate.DoesNotExist()
|
||||
assert system.handle_work_success(task_data) is None
|
||||
|
||||
|
||||
@mock.patch('awx.main.models.UnifiedJob.objects.get')
|
||||
@mock.patch('awx.main.models.Notification.objects.filter')
|
||||
def test_send_notifications_list(mock_notifications_filter, mock_job_get, mocker):
|
||||
|
||||
@@ -23,7 +23,7 @@ from django.core.exceptions import ObjectDoesNotExist, FieldDoesNotExist
|
||||
from django.utils.dateparse import parse_datetime
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from django.utils.functional import cached_property
|
||||
from django.db import connection, transaction, ProgrammingError
|
||||
from django.db import connection, transaction, ProgrammingError, IntegrityError
|
||||
from django.db.models.fields.related import ForeignObjectRel, ManyToManyField
|
||||
from django.db.models.fields.related_descriptors import ForwardManyToOneDescriptor, ManyToManyDescriptor
|
||||
from django.db.models.query import QuerySet
|
||||
@@ -768,14 +768,13 @@ def get_corrected_cpu(cpu_count): # formerlly get_cpu_capacity
|
||||
return cpu_count # no correction
|
||||
|
||||
|
||||
def get_cpu_effective_capacity(cpu_count):
|
||||
def get_cpu_effective_capacity(cpu_count, is_control_node=False):
|
||||
from django.conf import settings
|
||||
|
||||
cpu_count = get_corrected_cpu(cpu_count)
|
||||
|
||||
settings_forkcpu = getattr(settings, 'SYSTEM_TASK_FORKS_CPU', None)
|
||||
env_forkcpu = os.getenv('SYSTEM_TASK_FORKS_CPU', None)
|
||||
|
||||
if is_control_node:
|
||||
cpu_count = get_corrected_cpu(cpu_count)
|
||||
if env_forkcpu:
|
||||
forkcpu = int(env_forkcpu)
|
||||
elif settings_forkcpu:
|
||||
@@ -834,6 +833,7 @@ def get_corrected_memory(memory):
|
||||
|
||||
# Runner returns memory in bytes
|
||||
# so we convert memory from settings to bytes as well.
|
||||
|
||||
if env_absmem is not None:
|
||||
return convert_mem_str_to_bytes(env_absmem)
|
||||
elif settings_absmem is not None:
|
||||
@@ -842,14 +842,13 @@ def get_corrected_memory(memory):
|
||||
return memory
|
||||
|
||||
|
||||
def get_mem_effective_capacity(mem_bytes):
|
||||
def get_mem_effective_capacity(mem_bytes, is_control_node=False):
|
||||
from django.conf import settings
|
||||
|
||||
mem_bytes = get_corrected_memory(mem_bytes)
|
||||
|
||||
settings_mem_mb_per_fork = getattr(settings, 'SYSTEM_TASK_FORKS_MEM', None)
|
||||
env_mem_mb_per_fork = os.getenv('SYSTEM_TASK_FORKS_MEM', None)
|
||||
|
||||
if is_control_node:
|
||||
mem_bytes = get_corrected_memory(mem_bytes)
|
||||
if env_mem_mb_per_fork:
|
||||
mem_mb_per_fork = int(env_mem_mb_per_fork)
|
||||
elif settings_mem_mb_per_fork:
|
||||
@@ -1165,13 +1164,24 @@ def create_partition(tblname, start=None):
|
||||
try:
|
||||
with transaction.atomic():
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute(f"SELECT EXISTS (SELECT FROM information_schema.tables WHERE table_name = '{tblname}_{partition_label}');")
|
||||
row = cursor.fetchone()
|
||||
if row is not None:
|
||||
for val in row: # should only have 1
|
||||
if val is True:
|
||||
logger.debug(f'Event partition table {tblname}_{partition_label} already exists')
|
||||
return
|
||||
|
||||
cursor.execute(
|
||||
f'CREATE TABLE IF NOT EXISTS {tblname}_{partition_label} '
|
||||
f'PARTITION OF {tblname} '
|
||||
f'FOR VALUES FROM (\'{start_timestamp}\') to (\'{end_timestamp}\');'
|
||||
f'CREATE TABLE {tblname}_{partition_label} (LIKE {tblname} INCLUDING DEFAULTS INCLUDING CONSTRAINTS); '
|
||||
f'ALTER TABLE {tblname} ATTACH PARTITION {tblname}_{partition_label} '
|
||||
f'FOR VALUES FROM (\'{start_timestamp}\') TO (\'{end_timestamp}\');'
|
||||
)
|
||||
except ProgrammingError as e:
|
||||
logger.debug(f'Caught known error due to existing partition: {e}')
|
||||
except (ProgrammingError, IntegrityError) as e:
|
||||
if 'already exists' in str(e):
|
||||
logger.info(f'Caught known error due to partition creation race: {e}')
|
||||
else:
|
||||
raise
|
||||
|
||||
|
||||
def cleanup_new_process(func):
|
||||
|
||||
@@ -17,11 +17,26 @@ def construct_rsyslog_conf_template(settings=settings):
|
||||
port = getattr(settings, 'LOG_AGGREGATOR_PORT', '')
|
||||
protocol = getattr(settings, 'LOG_AGGREGATOR_PROTOCOL', '')
|
||||
timeout = getattr(settings, 'LOG_AGGREGATOR_TCP_TIMEOUT', 5)
|
||||
max_disk_space_main_queue = getattr(settings, 'LOG_AGGREGATOR_MAX_DISK_USAGE_GB', 1)
|
||||
action_queue_size = getattr(settings, 'LOG_AGGREGATOR_ACTION_QUEUE_SIZE', 131072)
|
||||
max_disk_space_action_queue = getattr(settings, 'LOG_AGGREGATOR_ACTION_MAX_DISK_USAGE_GB', 1)
|
||||
spool_directory = getattr(settings, 'LOG_AGGREGATOR_MAX_DISK_USAGE_PATH', '/var/lib/awx').rstrip('/')
|
||||
error_log_file = getattr(settings, 'LOG_AGGREGATOR_RSYSLOGD_ERROR_LOG_FILE', '')
|
||||
|
||||
queue_options = [
|
||||
f'queue.spoolDirectory="{spool_directory}"',
|
||||
'queue.filename="awx-external-logger-action-queue"',
|
||||
f'queue.maxDiskSpace="{max_disk_space_action_queue}g"', # overall disk space for all queue files
|
||||
'queue.maxFileSize="100m"', # individual file size
|
||||
'queue.type="LinkedList"',
|
||||
'queue.saveOnShutdown="on"',
|
||||
'queue.syncqueuefiles="on"', # (f)sync when checkpoint occurs
|
||||
'queue.checkpointInterval="1000"', # Update disk queue every 1000 messages
|
||||
f'queue.size="{action_queue_size}"', # max number of messages in queue
|
||||
f'queue.highwaterMark="{int(action_queue_size * 0.75)}"', # 75% of queue.size
|
||||
f'queue.discardMark="{int(action_queue_size * 0.9)}"', # 90% of queue.size
|
||||
'queue.discardSeverity="5"', # Only discard notice, info, debug if we must discard anything
|
||||
]
|
||||
|
||||
if not os.access(spool_directory, os.W_OK):
|
||||
spool_directory = '/var/lib/awx'
|
||||
|
||||
@@ -33,7 +48,6 @@ def construct_rsyslog_conf_template(settings=settings):
|
||||
'$WorkDirectory /var/lib/awx/rsyslog',
|
||||
f'$MaxMessageSize {max_bytes}',
|
||||
'$IncludeConfig /var/lib/awx/rsyslog/conf.d/*.conf',
|
||||
f'main_queue(queue.spoolDirectory="{spool_directory}" queue.maxdiskspace="{max_disk_space_main_queue}g" queue.type="Disk" queue.filename="awx-external-logger-backlog")', # noqa
|
||||
'module(load="imuxsock" SysSock.Use="off")',
|
||||
'input(type="imuxsock" Socket="' + settings.LOGGING['handlers']['external_logger']['address'] + '" unlink="on" RateLimit.Burst="0")',
|
||||
'template(name="awx" type="string" string="%rawmsg-after-pri%")',
|
||||
@@ -79,12 +93,7 @@ def construct_rsyslog_conf_template(settings=settings):
|
||||
'action.resumeRetryCount="-1"',
|
||||
'template="awx"',
|
||||
f'action.resumeInterval="{timeout}"',
|
||||
f'queue.spoolDirectory="{spool_directory}"',
|
||||
'queue.filename="awx-external-logger-action-queue"',
|
||||
f'queue.maxdiskspace="{max_disk_space_action_queue}g"',
|
||||
'queue.type="LinkedList"',
|
||||
'queue.saveOnShutdown="on"',
|
||||
]
|
||||
] + queue_options
|
||||
if error_log_file:
|
||||
params.append(f'errorfile="{error_log_file}"')
|
||||
if parsed.path:
|
||||
@@ -112,9 +121,18 @@ def construct_rsyslog_conf_template(settings=settings):
|
||||
params = ' '.join(params)
|
||||
parts.extend(['module(load="omhttp")', f'action({params})'])
|
||||
elif protocol and host and port:
|
||||
parts.append(
|
||||
f'action(type="omfwd" target="{host}" port="{port}" protocol="{protocol}" action.resumeRetryCount="-1" action.resumeInterval="{timeout}" template="awx")' # noqa
|
||||
)
|
||||
params = [
|
||||
'type="omfwd"',
|
||||
f'target="{host}"',
|
||||
f'port="{port}"',
|
||||
f'protocol="{protocol}"',
|
||||
'action.resumeRetryCount="-1"',
|
||||
f'action.resumeInterval="{timeout}"',
|
||||
'template="awx"',
|
||||
] + queue_options
|
||||
params = ' '.join(params)
|
||||
parts.append(f'action({params})')
|
||||
|
||||
else:
|
||||
parts.append('action(type="omfile" file="/dev/null")') # rsyslog needs *at least* one valid action to start
|
||||
tmpl = '\n'.join(parts)
|
||||
|
||||
@@ -199,6 +199,8 @@ class Licenser(object):
|
||||
license['support_level'] = attr.get('value')
|
||||
elif attr.get('name') == 'usage':
|
||||
license['usage'] = attr.get('value')
|
||||
elif attr.get('name') == 'ph_product_name' and attr.get('value') == 'RHEL Developer':
|
||||
license['license_type'] = 'developer'
|
||||
|
||||
if not license:
|
||||
logger.error("No valid subscriptions found in manifest")
|
||||
@@ -322,7 +324,9 @@ class Licenser(object):
|
||||
def generate_license_options_from_entitlements(self, json):
|
||||
from dateutil.parser import parse
|
||||
|
||||
ValidSub = collections.namedtuple('ValidSub', 'sku name support_level end_date trial quantity pool_id satellite subscription_id account_number usage')
|
||||
ValidSub = collections.namedtuple(
|
||||
'ValidSub', 'sku name support_level end_date trial developer_license quantity pool_id satellite subscription_id account_number usage'
|
||||
)
|
||||
valid_subs = []
|
||||
for sub in json:
|
||||
satellite = sub.get('satellite')
|
||||
@@ -350,6 +354,7 @@ class Licenser(object):
|
||||
|
||||
sku = sub['productId']
|
||||
trial = sku.startswith('S') # i.e.,, SER/SVC
|
||||
developer_license = False
|
||||
support_level = ''
|
||||
usage = ''
|
||||
pool_id = sub['id']
|
||||
@@ -364,9 +369,24 @@ class Licenser(object):
|
||||
support_level = attr.get('value')
|
||||
elif attr.get('name') == 'usage':
|
||||
usage = attr.get('value')
|
||||
elif attr.get('name') == 'ph_product_name' and attr.get('value') == 'RHEL Developer':
|
||||
developer_license = True
|
||||
|
||||
valid_subs.append(
|
||||
ValidSub(sku, sub['productName'], support_level, end_date, trial, quantity, pool_id, satellite, subscription_id, account_number, usage)
|
||||
ValidSub(
|
||||
sku,
|
||||
sub['productName'],
|
||||
support_level,
|
||||
end_date,
|
||||
trial,
|
||||
developer_license,
|
||||
quantity,
|
||||
pool_id,
|
||||
satellite,
|
||||
subscription_id,
|
||||
account_number,
|
||||
usage,
|
||||
)
|
||||
)
|
||||
|
||||
if valid_subs:
|
||||
@@ -381,6 +401,8 @@ class Licenser(object):
|
||||
if sub.trial:
|
||||
license._attrs['trial'] = True
|
||||
license._attrs['license_type'] = 'trial'
|
||||
if sub.developer_license:
|
||||
license._attrs['license_type'] = 'developer'
|
||||
license._attrs['instance_count'] = min(MAX_INSTANCES, license._attrs['instance_count'])
|
||||
human_instances = license._attrs['instance_count']
|
||||
if human_instances == MAX_INSTANCES:
|
||||
|
||||
@@ -3,6 +3,8 @@ import logging
|
||||
import asyncio
|
||||
from typing import Dict
|
||||
|
||||
import ipaddress
|
||||
|
||||
import aiohttp
|
||||
from aiohttp import client_exceptions
|
||||
import aioredis
|
||||
@@ -71,7 +73,16 @@ class WebsocketRelayConnection:
|
||||
if not self.channel_layer:
|
||||
self.channel_layer = get_channel_layer()
|
||||
|
||||
uri = f"{self.protocol}://{self.remote_host}:{self.remote_port}/websocket/relay/"
|
||||
# figure out if what we have is an ipaddress, IPv6 Addresses must have brackets added for uri
|
||||
uri_hostname = self.remote_host
|
||||
try:
|
||||
# Throws ValueError if self.remote_host is a hostname like example.com, not an IPv4 or IPv6 ip address
|
||||
if isinstance(ipaddress.ip_address(uri_hostname), ipaddress.IPv6Address):
|
||||
uri_hostname = f"[{uri_hostname}]"
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
uri = f"{self.protocol}://{uri_hostname}:{self.remote_port}/websocket/relay/"
|
||||
timeout = aiohttp.ClientTimeout(total=10)
|
||||
|
||||
secret_val = WebsocketSecretAuthHelper.construct_secret()
|
||||
@@ -216,7 +227,8 @@ class WebSocketRelayManager(object):
|
||||
continue
|
||||
try:
|
||||
if not notif.payload or notif.channel != "web_ws_heartbeat":
|
||||
return
|
||||
logger.warning(f"Unexpected channel or missing payload. {notif.channel}, {notif.payload}")
|
||||
continue
|
||||
|
||||
try:
|
||||
payload = json.loads(notif.payload)
|
||||
@@ -224,13 +236,15 @@ class WebSocketRelayManager(object):
|
||||
logmsg = "Failed to decode message from pg_notify channel `web_ws_heartbeat`"
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
logmsg = "{} {}".format(logmsg, payload)
|
||||
logger.warning(logmsg)
|
||||
return
|
||||
logger.warning(logmsg)
|
||||
continue
|
||||
|
||||
# Skip if the message comes from the same host we are running on
|
||||
# In this case, we'll be sharing a redis, no need to relay.
|
||||
if payload.get("hostname") == self.local_hostname:
|
||||
return
|
||||
hostname = payload.get("hostname")
|
||||
logger.debug("Received a heartbeat request for {hostname}. Skipping as we use redis for local host.")
|
||||
continue
|
||||
|
||||
action = payload.get("action")
|
||||
|
||||
@@ -239,7 +253,7 @@ class WebSocketRelayManager(object):
|
||||
ip = payload.get("ip") or hostname # try back to hostname if ip isn't supplied
|
||||
if ip is None:
|
||||
logger.warning(f"Received invalid {action} ws_heartbeat, missing hostname and ip: {payload}")
|
||||
return
|
||||
continue
|
||||
logger.debug(f"Web host {hostname} ({ip}) {action} heartbeat received.")
|
||||
|
||||
if action == "online":
|
||||
|
||||
@@ -336,6 +336,7 @@ INSTALLED_APPS = [
|
||||
'awx.ui',
|
||||
'awx.sso',
|
||||
'solo',
|
||||
'ansible_base',
|
||||
]
|
||||
|
||||
INTERNAL_IPS = ('127.0.0.1',)
|
||||
@@ -470,7 +471,7 @@ CELERYBEAT_SCHEDULE = {
|
||||
'receptor_reaper': {'task': 'awx.main.tasks.system.awx_receptor_workunit_reaper', 'schedule': timedelta(seconds=60)},
|
||||
'send_subsystem_metrics': {'task': 'awx.main.analytics.analytics_tasks.send_subsystem_metrics', 'schedule': timedelta(seconds=20)},
|
||||
'cleanup_images': {'task': 'awx.main.tasks.system.cleanup_images_and_files', 'schedule': timedelta(hours=3)},
|
||||
'cleanup_host_metrics': {'task': 'awx.main.tasks.system.cleanup_host_metrics', 'schedule': timedelta(hours=3, minutes=30)},
|
||||
'cleanup_host_metrics': {'task': 'awx.main.tasks.host_metrics.cleanup_host_metrics', 'schedule': timedelta(hours=3, minutes=30)},
|
||||
'host_metric_summary_monthly': {'task': 'awx.main.tasks.host_metrics.host_metric_summary_monthly', 'schedule': timedelta(hours=4)},
|
||||
}
|
||||
|
||||
@@ -796,7 +797,7 @@ LOG_AGGREGATOR_ENABLED = False
|
||||
LOG_AGGREGATOR_TCP_TIMEOUT = 5
|
||||
LOG_AGGREGATOR_VERIFY_CERT = True
|
||||
LOG_AGGREGATOR_LEVEL = 'INFO'
|
||||
LOG_AGGREGATOR_MAX_DISK_USAGE_GB = 1 # Main queue
|
||||
LOG_AGGREGATOR_ACTION_QUEUE_SIZE = 131072
|
||||
LOG_AGGREGATOR_ACTION_MAX_DISK_USAGE_GB = 1 # Action queue
|
||||
LOG_AGGREGATOR_MAX_DISK_USAGE_PATH = '/var/lib/awx'
|
||||
LOG_AGGREGATOR_RSYSLOGD_DEBUG = False
|
||||
@@ -1049,7 +1050,7 @@ UI_NEXT = True
|
||||
# - 'unique_managed_hosts': Compliant = automated - deleted hosts (using /api/v2/host_metrics/)
|
||||
SUBSCRIPTION_USAGE_MODEL = ''
|
||||
|
||||
# Host metrics cleanup - last time of the cleanup run (soft-deleting records)
|
||||
# Host metrics cleanup - last time of the task/command run
|
||||
CLEANUP_HOST_METRICS_LAST_TS = None
|
||||
# Host metrics cleanup - minimal interval between two cleanups in days
|
||||
CLEANUP_HOST_METRICS_INTERVAL = 30 # days
|
||||
|
||||
@@ -87,7 +87,7 @@ def _update_user_orgs(backend, desired_org_state, orgs_to_create, user=None):
|
||||
is_member_expression = org_opts.get(user_type, None)
|
||||
remove_members = bool(org_opts.get('remove_{}'.format(user_type), remove))
|
||||
has_role = _update_m2m_from_expression(user, is_member_expression, remove_members)
|
||||
desired_org_state[organization_name][role_name] = has_role
|
||||
desired_org_state[organization_name][role_name] = desired_org_state[organization_name].get(role_name, False) or has_role
|
||||
|
||||
|
||||
def _update_user_teams(backend, desired_team_state, teams_to_create, user=None):
|
||||
|
||||
@@ -637,3 +637,75 @@ class TestSAMLUserFlags:
|
||||
}
|
||||
|
||||
assert expected == _check_flag(user, 'superuser', attributes, user_flags_settings)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test__update_user_orgs_org_map_and_saml_attr():
|
||||
"""
|
||||
This combines the action of two other tests where an org membership is defined both by
|
||||
the ORGANIZATION_MAP and the SOCIAL_AUTH_SAML_ORGANIZATION_ATTR at the same time
|
||||
"""
|
||||
|
||||
# This data will make the user a member
|
||||
class BackendClass:
|
||||
s = {
|
||||
'ORGANIZATION_MAP': {
|
||||
'Default1': {
|
||||
'remove': True,
|
||||
'remove_admins': True,
|
||||
'users': 'foobar',
|
||||
'remove_users': True,
|
||||
'organization_alias': 'o1_alias',
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def setting(self, key):
|
||||
return self.s[key]
|
||||
|
||||
backend = BackendClass()
|
||||
|
||||
setting = {
|
||||
'saml_attr': 'memberOf',
|
||||
'saml_admin_attr': 'admins',
|
||||
'saml_auditor_attr': 'auditors',
|
||||
'remove': True,
|
||||
'remove_admins': True,
|
||||
}
|
||||
|
||||
# This data from the server will make the user an admin of the organization
|
||||
kwargs = {
|
||||
'username': 'foobar',
|
||||
'uid': 'idp:cmeyers@redhat.com',
|
||||
'request': {u'SAMLResponse': [], u'RelayState': [u'idp']},
|
||||
'is_new': False,
|
||||
'response': {
|
||||
'session_index': '_0728f0e0-b766-0135-75fa-02842b07c044',
|
||||
'idp_name': u'idp',
|
||||
'attributes': {
|
||||
'admins': ['Default1'],
|
||||
},
|
||||
},
|
||||
'social': None,
|
||||
'strategy': None,
|
||||
'new_association': False,
|
||||
}
|
||||
|
||||
this_user = User.objects.create(username='foobar')
|
||||
|
||||
with override_settings(SOCIAL_AUTH_SAML_ORGANIZATION_ATTR=setting):
|
||||
desired_org_state = {}
|
||||
orgs_to_create = []
|
||||
|
||||
# this should add user as an admin of the org
|
||||
_update_user_orgs_by_saml_attr(backend, desired_org_state, orgs_to_create, **kwargs)
|
||||
assert desired_org_state['o1_alias']['admin_role'] is True
|
||||
|
||||
assert set(orgs_to_create) == set(['o1_alias'])
|
||||
|
||||
# this should add user as a member of the org without reverting the admin status
|
||||
_update_user_orgs(backend, desired_org_state, orgs_to_create, this_user)
|
||||
assert desired_org_state['o1_alias']['member_role'] is True
|
||||
assert desired_org_state['o1_alias']['admin_role'] is True
|
||||
|
||||
assert set(orgs_to_create) == set(['o1_alias'])
|
||||
|
||||
1771
awx/ui/package-lock.json
generated
1771
awx/ui/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -33,12 +33,12 @@
|
||||
"styled-components": "5.3.6"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/core": "^7.16.10",
|
||||
"@babel/eslint-parser": "^7.16.5",
|
||||
"@babel/eslint-plugin": "^7.16.5",
|
||||
"@babel/plugin-syntax-jsx": "7.16.7",
|
||||
"@babel/polyfill": "^7.8.7",
|
||||
"@babel/preset-react": "7.16.7",
|
||||
"@babel/core": "^7.22.9",
|
||||
"@babel/eslint-parser": "^7.22.9",
|
||||
"@babel/eslint-plugin": "^7.22.10",
|
||||
"@babel/plugin-syntax-jsx": "^7.22.5",
|
||||
"@babel/polyfill": "^7.12.1",
|
||||
"@babel/preset-react": "^7.22.5",
|
||||
"@cypress/instrument-cra": "^1.4.0",
|
||||
"@lingui/cli": "^3.7.1",
|
||||
"@lingui/loader": "3.15.0",
|
||||
|
||||
@@ -33,6 +33,7 @@ import Roles from './models/Roles';
|
||||
import Root from './models/Root';
|
||||
import Schedules from './models/Schedules';
|
||||
import Settings from './models/Settings';
|
||||
import SubscriptionUsage from './models/SubscriptionUsage';
|
||||
import SystemJobs from './models/SystemJobs';
|
||||
import SystemJobTemplates from './models/SystemJobTemplates';
|
||||
import Teams from './models/Teams';
|
||||
@@ -82,6 +83,7 @@ const RolesAPI = new Roles();
|
||||
const RootAPI = new Root();
|
||||
const SchedulesAPI = new Schedules();
|
||||
const SettingsAPI = new Settings();
|
||||
const SubscriptionUsageAPI = new SubscriptionUsage();
|
||||
const SystemJobsAPI = new SystemJobs();
|
||||
const SystemJobTemplatesAPI = new SystemJobTemplates();
|
||||
const TeamsAPI = new Teams();
|
||||
@@ -132,6 +134,7 @@ export {
|
||||
RootAPI,
|
||||
SchedulesAPI,
|
||||
SettingsAPI,
|
||||
SubscriptionUsageAPI,
|
||||
SystemJobsAPI,
|
||||
SystemJobTemplatesAPI,
|
||||
TeamsAPI,
|
||||
|
||||
16
awx/ui/src/api/models/SubscriptionUsage.js
Normal file
16
awx/ui/src/api/models/SubscriptionUsage.js
Normal file
@@ -0,0 +1,16 @@
|
||||
import Base from '../Base';
|
||||
|
||||
class SubscriptionUsage extends Base {
|
||||
constructor(http) {
|
||||
super(http);
|
||||
this.baseUrl = 'api/v2/host_metric_summary_monthly/';
|
||||
}
|
||||
|
||||
readSubscriptionUsageChart(dateRange) {
|
||||
return this.http.get(
|
||||
`${this.baseUrl}?date__gte=${dateRange}&order_by=date&page_size=100`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
export default SubscriptionUsage;
|
||||
@@ -11,6 +11,7 @@ import {
|
||||
WorkflowJobsAPI,
|
||||
WorkflowJobTemplatesAPI,
|
||||
} from 'api';
|
||||
import useToast, { AlertVariant } from 'hooks/useToast';
|
||||
import AlertModal from '../AlertModal';
|
||||
import ErrorDetail from '../ErrorDetail';
|
||||
import LaunchPrompt from '../LaunchPrompt';
|
||||
@@ -45,8 +46,22 @@ function LaunchButton({ resource, children }) {
|
||||
const [isLaunching, setIsLaunching] = useState(false);
|
||||
const [resourceCredentials, setResourceCredentials] = useState([]);
|
||||
const [error, setError] = useState(null);
|
||||
const { addToast, Toast, toastProps } = useToast();
|
||||
|
||||
const showToast = () => {
|
||||
addToast({
|
||||
id: resource.id,
|
||||
title: t`A job has already been launched`,
|
||||
variant: AlertVariant.info,
|
||||
hasTimeout: true,
|
||||
});
|
||||
};
|
||||
|
||||
const handleLaunch = async () => {
|
||||
if (isLaunching) {
|
||||
showToast();
|
||||
return;
|
||||
}
|
||||
setIsLaunching(true);
|
||||
const readLaunch =
|
||||
resource.type === 'workflow_job_template'
|
||||
@@ -104,6 +119,11 @@ function LaunchButton({ resource, children }) {
|
||||
};
|
||||
|
||||
const launchWithParams = async (params) => {
|
||||
if (isLaunching) {
|
||||
showToast();
|
||||
return;
|
||||
}
|
||||
setIsLaunching(true);
|
||||
try {
|
||||
let jobPromise;
|
||||
|
||||
@@ -141,6 +161,10 @@ function LaunchButton({ resource, children }) {
|
||||
let readRelaunch;
|
||||
let relaunch;
|
||||
|
||||
if (isLaunching) {
|
||||
showToast();
|
||||
return;
|
||||
}
|
||||
setIsLaunching(true);
|
||||
if (resource.type === 'inventory_update') {
|
||||
// We'll need to handle the scenario where the src no longer exists
|
||||
@@ -197,6 +221,7 @@ function LaunchButton({ resource, children }) {
|
||||
handleRelaunch,
|
||||
isLaunching,
|
||||
})}
|
||||
<Toast {...toastProps} />
|
||||
{error && (
|
||||
<AlertModal
|
||||
isOpen={error}
|
||||
|
||||
@@ -75,6 +75,7 @@ function SessionProvider({ children }) {
|
||||
const [sessionCountdown, setSessionCountdown] = useState(0);
|
||||
const [authRedirectTo, setAuthRedirectTo] = useState('/');
|
||||
const [isUserBeingLoggedOut, setIsUserBeingLoggedOut] = useState(false);
|
||||
const [isRedirectLinkReceived, setIsRedirectLinkReceived] = useState(false);
|
||||
|
||||
const {
|
||||
request: fetchLoginRedirectOverride,
|
||||
@@ -99,6 +100,7 @@ function SessionProvider({ children }) {
|
||||
|
||||
const logout = useCallback(async () => {
|
||||
setIsUserBeingLoggedOut(true);
|
||||
setIsRedirectLinkReceived(false);
|
||||
if (!isSessionExpired.current) {
|
||||
setAuthRedirectTo('/logout');
|
||||
window.localStorage.setItem(SESSION_USER_ID, null);
|
||||
@@ -112,6 +114,18 @@ function SessionProvider({ children }) {
|
||||
return <Redirect to="/login" />;
|
||||
}, [setSessionTimeout, setSessionCountdown]);
|
||||
|
||||
useEffect(() => {
|
||||
const unlisten = history.listen((location, action) => {
|
||||
if (action === 'POP') {
|
||||
setIsRedirectLinkReceived(true);
|
||||
}
|
||||
});
|
||||
|
||||
return () => {
|
||||
unlisten(); // ensure that the listener is removed when the component unmounts
|
||||
};
|
||||
}, [history]);
|
||||
|
||||
useEffect(() => {
|
||||
if (!isAuthenticated(document.cookie)) {
|
||||
return () => {};
|
||||
@@ -176,6 +190,8 @@ function SessionProvider({ children }) {
|
||||
logout,
|
||||
sessionCountdown,
|
||||
setAuthRedirectTo,
|
||||
isRedirectLinkReceived,
|
||||
setIsRedirectLinkReceived,
|
||||
}),
|
||||
[
|
||||
authRedirectTo,
|
||||
@@ -186,6 +202,8 @@ function SessionProvider({ children }) {
|
||||
logout,
|
||||
sessionCountdown,
|
||||
setAuthRedirectTo,
|
||||
isRedirectLinkReceived,
|
||||
setIsRedirectLinkReceived,
|
||||
]
|
||||
);
|
||||
|
||||
|
||||
@@ -17,6 +17,7 @@ import Organizations from 'screens/Organization';
|
||||
import Projects from 'screens/Project';
|
||||
import Schedules from 'screens/Schedule';
|
||||
import Settings from 'screens/Setting';
|
||||
import SubscriptionUsage from 'screens/SubscriptionUsage/SubscriptionUsage';
|
||||
import Teams from 'screens/Team';
|
||||
import Templates from 'screens/Template';
|
||||
import TopologyView from 'screens/TopologyView';
|
||||
@@ -61,6 +62,11 @@ function getRouteConfig(userProfile = {}) {
|
||||
path: '/host_metrics',
|
||||
screen: HostMetrics,
|
||||
},
|
||||
{
|
||||
title: <Trans>Subscription Usage</Trans>,
|
||||
path: '/subscription_usage',
|
||||
screen: SubscriptionUsage,
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
@@ -189,6 +195,7 @@ function getRouteConfig(userProfile = {}) {
|
||||
'unique_managed_hosts'
|
||||
) {
|
||||
deleteRoute('host_metrics');
|
||||
deleteRoute('subscription_usage');
|
||||
}
|
||||
if (userProfile?.isSuperUser || userProfile?.isSystemAuditor)
|
||||
return routeConfig;
|
||||
@@ -197,6 +204,7 @@ function getRouteConfig(userProfile = {}) {
|
||||
deleteRoute('management_jobs');
|
||||
deleteRoute('topology_view');
|
||||
deleteRoute('instances');
|
||||
deleteRoute('subscription_usage');
|
||||
if (userProfile?.isOrgAdmin) return routeConfig;
|
||||
if (!userProfile?.isNotificationAdmin) deleteRoute('notification_templates');
|
||||
|
||||
|
||||
@@ -31,6 +31,7 @@ describe('getRouteConfig', () => {
|
||||
'/activity_stream',
|
||||
'/workflow_approvals',
|
||||
'/host_metrics',
|
||||
'/subscription_usage',
|
||||
'/templates',
|
||||
'/credentials',
|
||||
'/projects',
|
||||
@@ -61,6 +62,7 @@ describe('getRouteConfig', () => {
|
||||
'/activity_stream',
|
||||
'/workflow_approvals',
|
||||
'/host_metrics',
|
||||
'/subscription_usage',
|
||||
'/templates',
|
||||
'/credentials',
|
||||
'/projects',
|
||||
|
||||
@@ -302,9 +302,9 @@ function HostsByProcessorTypeExample() {
|
||||
|
||||
const hostsByProcessorLimit = `intel_hosts`;
|
||||
const hostsByProcessorSourceVars = `plugin: constructed
|
||||
strict: true
|
||||
groups:
|
||||
intel_hosts: "GenuineIntel" in ansible_processor`;
|
||||
strict: true
|
||||
groups:
|
||||
intel_hosts: "'GenuineIntel' in ansible_processor"`;
|
||||
|
||||
return (
|
||||
<FormFieldGroupExpandable
|
||||
|
||||
@@ -45,7 +45,7 @@ describe('<ConstructedInventoryHint />', () => {
|
||||
);
|
||||
expect(navigator.clipboard.writeText).toHaveBeenCalledWith(
|
||||
expect.stringContaining(
|
||||
'intel_hosts: "GenuineIntel" in ansible_processor'
|
||||
`intel_hosts: \"'GenuineIntel' in ansible_processor\"`
|
||||
)
|
||||
);
|
||||
});
|
||||
|
||||
@@ -53,13 +53,9 @@ const getStdOutValue = (hostEvent) => {
|
||||
const res = hostEvent?.event_data?.res;
|
||||
|
||||
let stdOut;
|
||||
if (taskAction === 'debug' && res.result && res.result.stdout) {
|
||||
if (taskAction === 'debug' && res?.result?.stdout) {
|
||||
stdOut = res.result.stdout;
|
||||
} else if (
|
||||
taskAction === 'yum' &&
|
||||
res.results &&
|
||||
Array.isArray(res.results)
|
||||
) {
|
||||
} else if (taskAction === 'yum' && Array.isArray(res?.results)) {
|
||||
stdOut = res.results.join('\n');
|
||||
} else if (res?.stdout) {
|
||||
stdOut = Array.isArray(res.stdout) ? res.stdout.join(' ') : res.stdout;
|
||||
|
||||
@@ -45,7 +45,8 @@ const Login = styled(PFLogin)`
|
||||
|
||||
function AWXLogin({ alt, isAuthenticated }) {
|
||||
const [userId, setUserId] = useState(null);
|
||||
const { authRedirectTo, isSessionExpired } = useSession();
|
||||
const { authRedirectTo, isSessionExpired, isRedirectLinkReceived } =
|
||||
useSession();
|
||||
const isNewUser = useRef(true);
|
||||
const hasVerifiedUser = useRef(false);
|
||||
|
||||
@@ -179,7 +180,8 @@ function AWXLogin({ alt, isAuthenticated }) {
|
||||
return <LoadingSpinner />;
|
||||
}
|
||||
if (userId && hasVerifiedUser.current) {
|
||||
const redirect = isNewUser.current ? '/home' : authRedirectTo;
|
||||
const redirect =
|
||||
isNewUser.current && !isRedirectLinkReceived ? '/home' : authRedirectTo;
|
||||
|
||||
return <Redirect to={redirect} />;
|
||||
}
|
||||
|
||||
@@ -29,7 +29,7 @@ SettingsAPI.readCategory.mockResolvedValue({
|
||||
LOG_AGGREGATOR_TCP_TIMEOUT: 5,
|
||||
LOG_AGGREGATOR_VERIFY_CERT: true,
|
||||
LOG_AGGREGATOR_LEVEL: 'INFO',
|
||||
LOG_AGGREGATOR_MAX_DISK_USAGE_GB: 1,
|
||||
LOG_AGGREGATOR_ACTION_QUEUE_SIZE: 131072,
|
||||
LOG_AGGREGATOR_ACTION_MAX_DISK_USAGE_GB: 1,
|
||||
LOG_AGGREGATOR_MAX_DISK_USAGE_PATH: '/var/lib/awx',
|
||||
LOG_AGGREGATOR_RSYSLOGD_DEBUG: false,
|
||||
|
||||
@@ -31,7 +31,7 @@ const mockSettings = {
|
||||
LOG_AGGREGATOR_TCP_TIMEOUT: 123,
|
||||
LOG_AGGREGATOR_VERIFY_CERT: true,
|
||||
LOG_AGGREGATOR_LEVEL: 'ERROR',
|
||||
LOG_AGGREGATOR_MAX_DISK_USAGE_GB: 1,
|
||||
LOG_AGGREGATOR_ACTION_QUEUE_SIZE: 131072,
|
||||
LOG_AGGREGATOR_ACTION_MAX_DISK_USAGE_GB: 1,
|
||||
LOG_AGGREGATOR_MAX_DISK_USAGE_PATH: '/var/lib/awx',
|
||||
LOG_AGGREGATOR_RSYSLOGD_DEBUG: false,
|
||||
|
||||
@@ -659,21 +659,21 @@
|
||||
]
|
||||
]
|
||||
},
|
||||
"LOG_AGGREGATOR_MAX_DISK_USAGE_GB": {
|
||||
"LOG_AGGREGATOR_ACTION_QUEUE_SIZE": {
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"label": "Maximum disk persistence for external log aggregation (in GB)",
|
||||
"help_text": "Amount of data to store (in gigabytes) during an outage of the external log aggregator (defaults to 1). Equivalent to the rsyslogd queue.maxdiskspace setting for main_queue. Notably, this is used for the rsyslogd main queue (for input messages).",
|
||||
"label": "Maximum number of messages that can be stored in the log action queue",
|
||||
"help_text": "Defines how large the rsyslog action queue can grow in number of messages stored. This can have an impact on memory utilization. When the queue reaches 75% of this number, the queue will start writing to disk (queue.highWatermark in rsyslog). When it reaches 90%, NOTICE, INFO, and DEBUG messages will start to be discarded (queue.discardMark with queue.discardSeverity=5).",
|
||||
"min_value": 1,
|
||||
"category": "Logging",
|
||||
"category_slug": "logging",
|
||||
"default": 1
|
||||
"default": 131072
|
||||
},
|
||||
"LOG_AGGREGATOR_ACTION_MAX_DISK_USAGE_GB": {
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"label": "Maximum disk persistence for rsyslogd action queuing (in GB)",
|
||||
"help_text": "Amount of data to store (in gigabytes) if an rsyslog action takes time to process an incoming message (defaults to 1). Equivalent to the rsyslogd queue.maxdiskspace setting on the action (e.g. omhttp). Like LOG_AGGREGATOR_MAX_DISK_USAGE_GB, it stores files in the directory specified by LOG_AGGREGATOR_MAX_DISK_USAGE_PATH.",
|
||||
"help_text": "Amount of data to store (in gigabytes) if an rsyslog action takes time to process an incoming message (defaults to 1). Equivalent to the rsyslogd queue.maxdiskspace setting on the action (e.g. omhttp). It stores files in the directory specified by LOG_AGGREGATOR_MAX_DISK_USAGE_PATH.",
|
||||
"min_value": 1,
|
||||
"category": "Logging",
|
||||
"category_slug": "logging",
|
||||
@@ -5016,10 +5016,10 @@
|
||||
]
|
||||
]
|
||||
},
|
||||
"LOG_AGGREGATOR_MAX_DISK_USAGE_GB": {
|
||||
"LOG_AGGREGATOR_ACTION_QUEUE_SIZE": {
|
||||
"type": "integer",
|
||||
"label": "Maximum disk persistence for external log aggregation (in GB)",
|
||||
"help_text": "Amount of data to store (in gigabytes) during an outage of the external log aggregator (defaults to 1). Equivalent to the rsyslogd queue.maxdiskspace setting for main_queue. Notably, this is used for the rsyslogd main queue (for input messages).",
|
||||
"label": "Maximum number of messages that can be stored in the log action queue",
|
||||
"help_text": "Defines how large the rsyslog action queue can grow in number of messages stored. This can have an impact on memory utilization. When the queue reaches 75% of this number, the queue will start writing to disk (queue.highWatermark in rsyslog). When it reaches 90%, NOTICE, INFO, and DEBUG messages will start to be discarded (queue.discardMark with queue.discardSeverity=5).",
|
||||
"min_value": 1,
|
||||
"category": "Logging",
|
||||
"category_slug": "logging",
|
||||
@@ -5028,7 +5028,7 @@
|
||||
"LOG_AGGREGATOR_ACTION_MAX_DISK_USAGE_GB": {
|
||||
"type": "integer",
|
||||
"label": "Maximum disk persistence for rsyslogd action queuing (in GB)",
|
||||
"help_text": "Amount of data to store (in gigabytes) if an rsyslog action takes time to process an incoming message (defaults to 1). Equivalent to the rsyslogd queue.maxdiskspace setting on the action (e.g. omhttp). Like LOG_AGGREGATOR_MAX_DISK_USAGE_GB, it stores files in the directory specified by LOG_AGGREGATOR_MAX_DISK_USAGE_PATH.",
|
||||
"help_text": "Amount of data to store (in gigabytes) if an rsyslog action takes time to process an incoming message (defaults to 1). Equivalent to the rsyslogd queue.maxdiskspace setting on the action (e.g. omhttp). It stores files in the directory specified by LOG_AGGREGATOR_MAX_DISK_USAGE_PATH.",
|
||||
"min_value": 1,
|
||||
"category": "Logging",
|
||||
"category_slug": "logging",
|
||||
|
||||
@@ -70,7 +70,7 @@
|
||||
"LOG_AGGREGATOR_TCP_TIMEOUT": 5,
|
||||
"LOG_AGGREGATOR_VERIFY_CERT": true,
|
||||
"LOG_AGGREGATOR_LEVEL": "INFO",
|
||||
"LOG_AGGREGATOR_MAX_DISK_USAGE_GB": 1,
|
||||
"LOG_AGGREGATOR_ACTION_QUEUE_SIZE": 131072,
|
||||
"LOG_AGGREGATOR_ACTION_MAX_DISK_USAGE_GB": 1,
|
||||
"LOG_AGGREGATOR_MAX_DISK_USAGE_PATH": "/var/lib/awx",
|
||||
"LOG_AGGREGATOR_RSYSLOGD_DEBUG": false,
|
||||
@@ -548,4 +548,4 @@
|
||||
"adj_list": []
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
"LOG_AGGREGATOR_TCP_TIMEOUT": 5,
|
||||
"LOG_AGGREGATOR_VERIFY_CERT": true,
|
||||
"LOG_AGGREGATOR_LEVEL": "INFO",
|
||||
"LOG_AGGREGATOR_MAX_DISK_USAGE_GB": 1,
|
||||
"LOG_AGGREGATOR_ACTION_QUEUE_SIZE": 131072,
|
||||
"LOG_AGGREGATOR_ACTION_MAX_DISK_USAGE_GB": 1,
|
||||
"LOG_AGGREGATOR_MAX_DISK_USAGE_PATH": "/var/lib/awx",
|
||||
"LOG_AGGREGATOR_RSYSLOGD_DEBUG": false,
|
||||
|
||||
@@ -0,0 +1,319 @@
|
||||
import React, { useEffect, useCallback } from 'react';
|
||||
import { string, number, shape, arrayOf } from 'prop-types';
|
||||
import * as d3 from 'd3';
|
||||
import { t } from '@lingui/macro';
|
||||
import { PageContextConsumer } from '@patternfly/react-core';
|
||||
import UsageChartTooltip from './UsageChartTooltip';
|
||||
|
||||
function UsageChart({ id, data, height, pageContext }) {
|
||||
const { isNavOpen } = pageContext;
|
||||
|
||||
// Methods
|
||||
const draw = useCallback(() => {
|
||||
const margin = { top: 15, right: 25, bottom: 105, left: 70 };
|
||||
|
||||
const getWidth = () => {
|
||||
let width;
|
||||
// This is in an a try/catch due to an error from jest.
|
||||
// Even though the d3.select returns a valid selector with
|
||||
// style function, it says it is null in the test
|
||||
try {
|
||||
width =
|
||||
parseInt(d3.select(`#${id}`).style('width'), 10) -
|
||||
margin.left -
|
||||
margin.right || 700;
|
||||
} catch (error) {
|
||||
width = 700;
|
||||
}
|
||||
return width;
|
||||
};
|
||||
|
||||
// Clear our chart container element first
|
||||
d3.selectAll(`#${id} > *`).remove();
|
||||
const width = getWidth();
|
||||
|
||||
function transition(path) {
|
||||
path.transition().duration(1000).attrTween('stroke-dasharray', tweenDash);
|
||||
}
|
||||
|
||||
function tweenDash(...params) {
|
||||
const l = params[2][params[1]].getTotalLength();
|
||||
const i = d3.interpolateString(`0,${l}`, `${l},${l}`);
|
||||
return (val) => i(val);
|
||||
}
|
||||
|
||||
const x = d3.scaleTime().rangeRound([0, width]);
|
||||
const y = d3.scaleLinear().range([height, 0]);
|
||||
|
||||
// [consumed, capacity]
|
||||
const colors = d3.scaleOrdinal(['#06C', '#C9190B']);
|
||||
const svg = d3
|
||||
.select(`#${id}`)
|
||||
.append('svg')
|
||||
.attr('width', width + margin.left + margin.right)
|
||||
.attr('height', height + margin.top + margin.bottom)
|
||||
.attr('z', 100)
|
||||
.append('g')
|
||||
.attr('id', 'chart-container')
|
||||
.attr('transform', `translate(${margin.left}, ${margin.top})`);
|
||||
// Tooltip
|
||||
const tooltip = new UsageChartTooltip({
|
||||
svg: `#${id}`,
|
||||
colors,
|
||||
label: t`Hosts`,
|
||||
});
|
||||
|
||||
const parseTime = d3.timeParse('%Y-%m-%d');
|
||||
|
||||
const formattedData = data?.reduce(
|
||||
(formatted, { date, license_consumed, license_capacity }) => {
|
||||
const MONTH = parseTime(date);
|
||||
const CONSUMED = +license_consumed;
|
||||
const CAPACITY = +license_capacity;
|
||||
return formatted.concat({ MONTH, CONSUMED, CAPACITY });
|
||||
},
|
||||
[]
|
||||
);
|
||||
|
||||
// Scale the range of the data
|
||||
const largestY = formattedData?.reduce((a_max, b) => {
|
||||
const b_max = Math.max(b.CONSUMED > b.CAPACITY ? b.CONSUMED : b.CAPACITY);
|
||||
return a_max > b_max ? a_max : b_max;
|
||||
}, 0);
|
||||
x.domain(d3.extent(formattedData, (d) => d.MONTH));
|
||||
y.domain([
|
||||
0,
|
||||
largestY > 4 ? largestY + Math.max(largestY / 10, 1) : 5,
|
||||
]).nice();
|
||||
|
||||
const capacityLine = d3
|
||||
.line()
|
||||
.curve(d3.curveMonotoneX)
|
||||
.x((d) => x(d.MONTH))
|
||||
.y((d) => y(d.CAPACITY));
|
||||
|
||||
const consumedLine = d3
|
||||
.line()
|
||||
.curve(d3.curveMonotoneX)
|
||||
.x((d) => x(d.MONTH))
|
||||
.y((d) => y(d.CONSUMED));
|
||||
|
||||
// Add the Y Axis
|
||||
svg
|
||||
.append('g')
|
||||
.attr('class', 'y-axis')
|
||||
.call(
|
||||
d3
|
||||
.axisLeft(y)
|
||||
.ticks(
|
||||
largestY > 3
|
||||
? Math.min(largestY + Math.max(largestY / 10, 1), 10)
|
||||
: 5
|
||||
)
|
||||
.tickSize(-width)
|
||||
.tickFormat(d3.format('d'))
|
||||
)
|
||||
.selectAll('line')
|
||||
.attr('stroke', '#d7d7d7');
|
||||
svg.selectAll('.y-axis .tick text').attr('x', -5).attr('font-size', '14');
|
||||
|
||||
// text label for the y axis
|
||||
svg
|
||||
.append('text')
|
||||
.attr('transform', 'rotate(-90)')
|
||||
.attr('y', 0 - margin.left)
|
||||
.attr('x', 0 - height / 2)
|
||||
.attr('dy', '1em')
|
||||
.style('text-anchor', 'middle')
|
||||
.text(t`Unique Hosts`);
|
||||
|
||||
// Add the X Axis
|
||||
let ticks;
|
||||
const maxTicks = Math.round(
|
||||
formattedData.length / (formattedData.length / 2)
|
||||
);
|
||||
ticks = formattedData.map((d) => d.MONTH);
|
||||
if (formattedData.length === 13) {
|
||||
ticks = formattedData
|
||||
.map((d, i) => (i % maxTicks === 0 ? d.MONTH : undefined))
|
||||
.filter((item) => item);
|
||||
}
|
||||
|
||||
svg.select('.domain').attr('stroke', '#d7d7d7');
|
||||
|
||||
svg
|
||||
.append('g')
|
||||
.attr('class', 'x-axis')
|
||||
.attr('transform', `translate(0, ${height})`)
|
||||
.call(
|
||||
d3
|
||||
.axisBottom(x)
|
||||
.tickValues(ticks)
|
||||
.tickSize(-height)
|
||||
.tickFormat(d3.timeFormat('%m/%y'))
|
||||
)
|
||||
.selectAll('line')
|
||||
.attr('stroke', '#d7d7d7');
|
||||
|
||||
svg
|
||||
.selectAll('.x-axis .tick text')
|
||||
.attr('x', -25)
|
||||
.attr('font-size', '14')
|
||||
.attr('transform', 'rotate(-65)');
|
||||
|
||||
// text label for the x axis
|
||||
svg
|
||||
.append('text')
|
||||
.attr(
|
||||
'transform',
|
||||
`translate(${width / 2} , ${height + margin.top + 50})`
|
||||
)
|
||||
.style('text-anchor', 'middle')
|
||||
.text(t`Month`);
|
||||
const vertical = svg
|
||||
.append('path')
|
||||
.attr('class', 'mouse-line')
|
||||
.style('stroke', 'black')
|
||||
.style('stroke-width', '3px')
|
||||
.style('stroke-dasharray', '3, 3')
|
||||
.style('opacity', '0');
|
||||
|
||||
const handleMouseOver = (event, d) => {
|
||||
tooltip.handleMouseOver(event, d);
|
||||
// show vertical line
|
||||
vertical.transition().style('opacity', '1');
|
||||
};
|
||||
const handleMouseMove = function mouseMove(event) {
|
||||
const [pointerX] = d3.pointer(event);
|
||||
vertical.attr('d', () => `M${pointerX},${height} ${pointerX},${0}`);
|
||||
};
|
||||
|
||||
const handleMouseOut = () => {
|
||||
// hide tooltip
|
||||
tooltip.handleMouseOut();
|
||||
// hide vertical line
|
||||
vertical.transition().style('opacity', 0);
|
||||
};
|
||||
|
||||
const dateFormat = d3.timeFormat('%m/%y');
|
||||
|
||||
// Add the consumed line path
|
||||
svg
|
||||
.append('path')
|
||||
.data([formattedData])
|
||||
.attr('class', 'line')
|
||||
.style('fill', 'none')
|
||||
.style('stroke', () => colors(1))
|
||||
.attr('stroke-width', 2)
|
||||
.attr('d', consumedLine)
|
||||
.call(transition);
|
||||
|
||||
// create our consumed line circles
|
||||
|
||||
svg
|
||||
.selectAll('dot')
|
||||
.data(formattedData)
|
||||
.enter()
|
||||
.append('circle')
|
||||
.attr('r', 3)
|
||||
.style('stroke', () => colors(1))
|
||||
.style('fill', () => colors(1))
|
||||
.attr('cx', (d) => x(d.MONTH))
|
||||
.attr('cy', (d) => y(d.CONSUMED))
|
||||
.attr('id', (d) => `consumed-dot-${dateFormat(d.MONTH)}`)
|
||||
.on('mouseover', (event, d) => handleMouseOver(event, d))
|
||||
.on('mousemove', handleMouseMove)
|
||||
.on('mouseout', handleMouseOut);
|
||||
|
||||
// Add the capacity line path
|
||||
svg
|
||||
.append('path')
|
||||
.data([formattedData])
|
||||
.attr('class', 'line')
|
||||
.style('fill', 'none')
|
||||
.style('stroke', () => colors(0))
|
||||
.attr('stroke-width', 2)
|
||||
.attr('d', capacityLine)
|
||||
.call(transition);
|
||||
|
||||
// create our capacity line circles
|
||||
|
||||
svg
|
||||
.selectAll('dot')
|
||||
.data(formattedData)
|
||||
.enter()
|
||||
.append('circle')
|
||||
.attr('r', 3)
|
||||
.style('stroke', () => colors(0))
|
||||
.style('fill', () => colors(0))
|
||||
.attr('cx', (d) => x(d.MONTH))
|
||||
.attr('cy', (d) => y(d.CAPACITY))
|
||||
.attr('id', (d) => `capacity-dot-${dateFormat(d.MONTH)}`)
|
||||
.on('mouseover', handleMouseOver)
|
||||
.on('mousemove', handleMouseMove)
|
||||
.on('mouseout', handleMouseOut);
|
||||
|
||||
// Create legend
|
||||
const legend_keys = [t`Subscriptions consumed`, t`Subscription capacity`];
|
||||
let totalWidth = width / 2 - 175;
|
||||
|
||||
const lineLegend = svg
|
||||
.selectAll('.lineLegend')
|
||||
.data(legend_keys)
|
||||
.enter()
|
||||
.append('g')
|
||||
.attr('class', 'lineLegend')
|
||||
.each(function formatLegend() {
|
||||
const current = d3.select(this);
|
||||
current.attr('transform', `translate(${totalWidth}, ${height + 90})`);
|
||||
totalWidth += 200;
|
||||
});
|
||||
|
||||
lineLegend
|
||||
.append('text')
|
||||
.text((d) => d)
|
||||
.attr('font-size', '14')
|
||||
.attr('transform', 'translate(15,9)'); // align texts with boxes
|
||||
|
||||
lineLegend
|
||||
.append('rect')
|
||||
.attr('fill', (d) => colors(d))
|
||||
.attr('width', 10)
|
||||
.attr('height', 10);
|
||||
}, [data, height, id]);
|
||||
|
||||
useEffect(() => {
|
||||
draw();
|
||||
}, [draw, isNavOpen]);
|
||||
|
||||
useEffect(() => {
|
||||
function handleResize() {
|
||||
draw();
|
||||
}
|
||||
|
||||
window.addEventListener('resize', handleResize);
|
||||
|
||||
handleResize();
|
||||
|
||||
return () => window.removeEventListener('resize', handleResize);
|
||||
}, [draw]);
|
||||
|
||||
return <div id={id} />;
|
||||
}
|
||||
|
||||
UsageChart.propTypes = {
|
||||
id: string.isRequired,
|
||||
data: arrayOf(shape({})).isRequired,
|
||||
height: number.isRequired,
|
||||
};
|
||||
|
||||
const withPageContext = (Component) =>
|
||||
function contextComponent(props) {
|
||||
return (
|
||||
<PageContextConsumer>
|
||||
{(pageContext) => <Component {...props} pageContext={pageContext} />}
|
||||
</PageContextConsumer>
|
||||
);
|
||||
};
|
||||
|
||||
export default withPageContext(UsageChart);
|
||||
@@ -0,0 +1,177 @@
|
||||
import * as d3 from 'd3';
|
||||
import { t } from '@lingui/macro';
|
||||
|
||||
class UsageChartTooltip {
|
||||
constructor(opts) {
|
||||
this.label = opts.label;
|
||||
this.svg = opts.svg;
|
||||
this.colors = opts.colors;
|
||||
|
||||
this.draw();
|
||||
}
|
||||
|
||||
draw() {
|
||||
this.toolTipBase = d3.select(`${this.svg} > svg`).append('g');
|
||||
this.toolTipBase.attr('id', 'chart-tooltip');
|
||||
this.toolTipBase.attr('overflow', 'visible');
|
||||
this.toolTipBase.style('opacity', 0);
|
||||
this.toolTipBase.style('pointer-events', 'none');
|
||||
this.toolTipBase.attr('transform', 'translate(100, 100)');
|
||||
this.boxWidth = 200;
|
||||
this.textWidthThreshold = 20;
|
||||
|
||||
this.toolTipPoint = this.toolTipBase
|
||||
.append('rect')
|
||||
.attr('transform', 'translate(10, -10) rotate(45)')
|
||||
.attr('x', 0)
|
||||
.attr('y', 0)
|
||||
.attr('height', 20)
|
||||
.attr('width', 20)
|
||||
.attr('fill', '#393f44');
|
||||
this.boundingBox = this.toolTipBase
|
||||
.append('rect')
|
||||
.attr('x', 10)
|
||||
.attr('y', -41)
|
||||
.attr('rx', 2)
|
||||
.attr('height', 82)
|
||||
.attr('width', this.boxWidth)
|
||||
.attr('fill', '#393f44');
|
||||
this.circleBlue = this.toolTipBase
|
||||
.append('circle')
|
||||
.attr('cx', 26)
|
||||
.attr('cy', 0)
|
||||
.attr('r', 7)
|
||||
.attr('stroke', 'white')
|
||||
.attr('fill', this.colors(1));
|
||||
this.circleRed = this.toolTipBase
|
||||
.append('circle')
|
||||
.attr('cx', 26)
|
||||
.attr('cy', 26)
|
||||
.attr('r', 7)
|
||||
.attr('stroke', 'white')
|
||||
.attr('fill', this.colors(0));
|
||||
this.consumedText = this.toolTipBase
|
||||
.append('text')
|
||||
.attr('x', 43)
|
||||
.attr('y', 4)
|
||||
.attr('font-size', 12)
|
||||
.attr('fill', 'white')
|
||||
.text(t`Subscriptions consumed`);
|
||||
this.capacityText = this.toolTipBase
|
||||
.append('text')
|
||||
.attr('x', 43)
|
||||
.attr('y', 28)
|
||||
.attr('font-size', 12)
|
||||
.attr('fill', 'white')
|
||||
.text(t`Subscription capacity`);
|
||||
this.icon = this.toolTipBase
|
||||
.append('text')
|
||||
.attr('fill', 'white')
|
||||
.attr('stroke', 'white')
|
||||
.attr('x', 24)
|
||||
.attr('y', 30)
|
||||
.attr('font-size', 12);
|
||||
this.consumed = this.toolTipBase
|
||||
.append('text')
|
||||
.attr('fill', 'white')
|
||||
.attr('font-size', 12)
|
||||
.attr('x', 122)
|
||||
.attr('y', 4)
|
||||
.attr('id', 'consumed-count')
|
||||
.text('0');
|
||||
this.capacity = this.toolTipBase
|
||||
.append('text')
|
||||
.attr('fill', 'white')
|
||||
.attr('font-size', 12)
|
||||
.attr('x', 122)
|
||||
.attr('y', 28)
|
||||
.attr('id', 'capacity-count')
|
||||
.text('0');
|
||||
this.date = this.toolTipBase
|
||||
.append('text')
|
||||
.attr('fill', 'white')
|
||||
.attr('stroke', 'white')
|
||||
.attr('x', 20)
|
||||
.attr('y', -21)
|
||||
.attr('font-size', 12);
|
||||
}
|
||||
|
||||
handleMouseOver = (event, data) => {
|
||||
let consumed = 0;
|
||||
let capacity = 0;
|
||||
const [x, y] = d3.pointer(event);
|
||||
const tooltipPointerX = x + 75;
|
||||
|
||||
const formatTooltipDate = d3.timeFormat('%m/%y');
|
||||
if (!event) {
|
||||
return;
|
||||
}
|
||||
|
||||
const toolTipWidth = this.toolTipBase.node().getBoundingClientRect().width;
|
||||
const chartWidth = d3
|
||||
.select(`${this.svg}> svg`)
|
||||
.node()
|
||||
.getBoundingClientRect().width;
|
||||
const overflow = 100 - (toolTipWidth / chartWidth) * 100;
|
||||
const flipped = overflow < (tooltipPointerX / chartWidth) * 100;
|
||||
if (data) {
|
||||
consumed = data.CONSUMED || 0;
|
||||
capacity = data.CAPACITY || 0;
|
||||
this.date.text(formatTooltipDate(data.MONTH || null));
|
||||
}
|
||||
|
||||
this.capacity.text(`${capacity}`);
|
||||
this.consumed.text(`${consumed}`);
|
||||
this.consumedTextWidth = this.consumed.node().getComputedTextLength();
|
||||
this.capacityTextWidth = this.capacity.node().getComputedTextLength();
|
||||
|
||||
const maxTextPerc = (this.jobsWidth / this.boxWidth) * 100;
|
||||
const threshold = 40;
|
||||
const overage = maxTextPerc / threshold;
|
||||
let adjustedWidth;
|
||||
if (maxTextPerc > threshold) {
|
||||
adjustedWidth = this.boxWidth * overage;
|
||||
} else {
|
||||
adjustedWidth = this.boxWidth;
|
||||
}
|
||||
|
||||
this.boundingBox.attr('width', adjustedWidth);
|
||||
this.toolTipBase.attr('transform', `translate(${tooltipPointerX}, ${y})`);
|
||||
if (flipped) {
|
||||
this.toolTipPoint.attr('transform', 'translate(-20, -10) rotate(45)');
|
||||
this.boundingBox.attr('x', -adjustedWidth - 20);
|
||||
this.circleBlue.attr('cx', -adjustedWidth);
|
||||
this.circleRed.attr('cx', -adjustedWidth);
|
||||
this.icon.attr('x', -adjustedWidth - 2);
|
||||
this.consumedText.attr('x', -adjustedWidth + 17);
|
||||
this.capacityText.attr('x', -adjustedWidth + 17);
|
||||
this.consumed.attr('x', -this.consumedTextWidth - 20 - 12);
|
||||
this.capacity.attr('x', -this.capacityTextWidth - 20 - 12);
|
||||
this.date.attr('x', -adjustedWidth - 5);
|
||||
} else {
|
||||
this.toolTipPoint.attr('transform', 'translate(10, -10) rotate(45)');
|
||||
this.boundingBox.attr('x', 10);
|
||||
this.circleBlue.attr('cx', 26);
|
||||
this.circleRed.attr('cx', 26);
|
||||
this.icon.attr('x', 24);
|
||||
this.consumedText.attr('x', 43);
|
||||
this.capacityText.attr('x', 43);
|
||||
this.consumed.attr('x', adjustedWidth - this.consumedTextWidth);
|
||||
this.capacity.attr('x', adjustedWidth - this.capacityTextWidth);
|
||||
this.date.attr('x', 20);
|
||||
}
|
||||
|
||||
this.toolTipBase.style('opacity', 1);
|
||||
this.toolTipBase.interrupt();
|
||||
};
|
||||
|
||||
handleMouseOut = () => {
|
||||
this.toolTipBase
|
||||
.transition()
|
||||
.delay(15)
|
||||
.style('opacity', 0)
|
||||
.style('pointer-events', 'none');
|
||||
};
|
||||
}
|
||||
|
||||
export default UsageChartTooltip;
|
||||
53
awx/ui/src/screens/SubscriptionUsage/SubscriptionUsage.js
Normal file
53
awx/ui/src/screens/SubscriptionUsage/SubscriptionUsage.js
Normal file
@@ -0,0 +1,53 @@
|
||||
import React from 'react';
|
||||
import styled from 'styled-components';
|
||||
|
||||
import { t, Trans } from '@lingui/macro';
|
||||
import { Banner, Card, PageSection } from '@patternfly/react-core';
|
||||
import { InfoCircleIcon } from '@patternfly/react-icons';
|
||||
|
||||
import { useConfig } from 'contexts/Config';
|
||||
import useBrandName from 'hooks/useBrandName';
|
||||
import ScreenHeader from 'components/ScreenHeader';
|
||||
import SubscriptionUsageChart from './SubscriptionUsageChart';
|
||||
|
||||
const MainPageSection = styled(PageSection)`
|
||||
padding-top: 24px;
|
||||
padding-bottom: 0;
|
||||
|
||||
& .spacer {
|
||||
margin-bottom: var(--pf-global--spacer--lg);
|
||||
}
|
||||
`;
|
||||
|
||||
function SubscriptionUsage() {
|
||||
const config = useConfig();
|
||||
const brandName = useBrandName();
|
||||
|
||||
return (
|
||||
<>
|
||||
{config?.ui_next && (
|
||||
<Banner variant="info">
|
||||
<Trans>
|
||||
<p>
|
||||
<InfoCircleIcon /> A tech preview of the new {brandName} user
|
||||
interface can be found <a href="/ui_next/dashboard">here</a>.
|
||||
</p>
|
||||
</Trans>
|
||||
</Banner>
|
||||
)}
|
||||
<ScreenHeader
|
||||
streamType="all"
|
||||
breadcrumbConfig={{ '/subscription_usage': t`Subscription Usage` }}
|
||||
/>
|
||||
<MainPageSection>
|
||||
<div className="spacer">
|
||||
<Card id="dashboard-main-container">
|
||||
<SubscriptionUsageChart />
|
||||
</Card>
|
||||
</div>
|
||||
</MainPageSection>
|
||||
</>
|
||||
);
|
||||
}
|
||||
|
||||
export default SubscriptionUsage;
|
||||
167
awx/ui/src/screens/SubscriptionUsage/SubscriptionUsageChart.js
Normal file
167
awx/ui/src/screens/SubscriptionUsage/SubscriptionUsageChart.js
Normal file
@@ -0,0 +1,167 @@
|
||||
import React, { useCallback, useEffect, useState } from 'react';
|
||||
import styled from 'styled-components';
|
||||
|
||||
import { t } from '@lingui/macro';
|
||||
import {
|
||||
Card,
|
||||
CardHeader,
|
||||
CardActions,
|
||||
CardBody,
|
||||
CardTitle,
|
||||
Flex,
|
||||
FlexItem,
|
||||
PageSection,
|
||||
Select,
|
||||
SelectVariant,
|
||||
SelectOption,
|
||||
Text,
|
||||
} from '@patternfly/react-core';
|
||||
|
||||
import useRequest from 'hooks/useRequest';
|
||||
import { SubscriptionUsageAPI } from 'api';
|
||||
import { useUserProfile } from 'contexts/Config';
|
||||
import ContentLoading from 'components/ContentLoading';
|
||||
import UsageChart from './ChartComponents/UsageChart';
|
||||
|
||||
const GraphCardHeader = styled(CardHeader)`
|
||||
margin-bottom: var(--pf-global--spacer--lg);
|
||||
`;
|
||||
|
||||
const ChartCardTitle = styled(CardTitle)`
|
||||
padding-right: 24px;
|
||||
font-size: 20px;
|
||||
font-weight: var(--pf-c-title--m-xl--FontWeight);
|
||||
`;
|
||||
|
||||
const CardText = styled(Text)`
|
||||
padding-right: 24px;
|
||||
`;
|
||||
|
||||
const GraphCardActions = styled(CardActions)`
|
||||
margin-left: initial;
|
||||
padding-left: 0;
|
||||
`;
|
||||
|
||||
function SubscriptionUsageChart() {
|
||||
const [isPeriodDropdownOpen, setIsPeriodDropdownOpen] = useState(false);
|
||||
const [periodSelection, setPeriodSelection] = useState('year');
|
||||
const userProfile = useUserProfile();
|
||||
|
||||
const calculateDateRange = () => {
|
||||
const today = new Date();
|
||||
let date = '';
|
||||
switch (periodSelection) {
|
||||
case 'year':
|
||||
date =
|
||||
today.getMonth() < 10
|
||||
? `${today.getFullYear() - 1}-0${today.getMonth() + 1}-01`
|
||||
: `${today.getFullYear() - 1}-${today.getMonth() + 1}-01`;
|
||||
break;
|
||||
case 'two_years':
|
||||
date =
|
||||
today.getMonth() < 10
|
||||
? `${today.getFullYear() - 2}-0${today.getMonth() + 1}-01`
|
||||
: `${today.getFullYear() - 2}-${today.getMonth() + 1}-01`;
|
||||
break;
|
||||
case 'three_years':
|
||||
date =
|
||||
today.getMonth() < 10
|
||||
? `${today.getFullYear() - 3}-0${today.getMonth() + 1}-01`
|
||||
: `${today.getFullYear() - 3}-${today.getMonth() + 1}-01`;
|
||||
break;
|
||||
default:
|
||||
date =
|
||||
today.getMonth() < 10
|
||||
? `${today.getFullYear() - 1}-0${today.getMonth() + 1}-01`
|
||||
: `${today.getFullYear() - 1}-${today.getMonth() + 1}-01`;
|
||||
break;
|
||||
}
|
||||
return date;
|
||||
};
|
||||
|
||||
const {
|
||||
isLoading,
|
||||
result: subscriptionUsageChartData,
|
||||
request: fetchSubscriptionUsageChart,
|
||||
} = useRequest(
|
||||
useCallback(async () => {
|
||||
const data = await SubscriptionUsageAPI.readSubscriptionUsageChart(
|
||||
calculateDateRange()
|
||||
);
|
||||
return data.data.results;
|
||||
}, [periodSelection]),
|
||||
[]
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
fetchSubscriptionUsageChart();
|
||||
}, [fetchSubscriptionUsageChart, periodSelection]);
|
||||
|
||||
if (isLoading) {
|
||||
return (
|
||||
<PageSection>
|
||||
<Card>
|
||||
<ContentLoading />
|
||||
</Card>
|
||||
</PageSection>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<Card>
|
||||
<Flex style={{ justifyContent: 'space-between' }}>
|
||||
<FlexItem>
|
||||
<ChartCardTitle>{t`Subscription Compliance`}</ChartCardTitle>
|
||||
</FlexItem>
|
||||
<FlexItem>
|
||||
<CardText component="small">
|
||||
{t`Last recalculation date:`}{' '}
|
||||
{userProfile.systemConfig.HOST_METRIC_SUMMARY_TASK_LAST_TS.slice(
|
||||
0,
|
||||
10
|
||||
)}
|
||||
</CardText>
|
||||
</FlexItem>
|
||||
</Flex>
|
||||
<GraphCardHeader>
|
||||
<GraphCardActions>
|
||||
<Select
|
||||
variant={SelectVariant.single}
|
||||
placeholderText={t`Select period`}
|
||||
aria-label={t`Select period`}
|
||||
typeAheadAriaLabel={t`Select period`}
|
||||
className="periodSelect"
|
||||
onToggle={setIsPeriodDropdownOpen}
|
||||
onSelect={(event, selection) => {
|
||||
setIsPeriodDropdownOpen(false);
|
||||
setPeriodSelection(selection);
|
||||
}}
|
||||
selections={periodSelection}
|
||||
isOpen={isPeriodDropdownOpen}
|
||||
noResultsFoundText={t`No results found`}
|
||||
ouiaId="subscription-usage-period-select"
|
||||
>
|
||||
<SelectOption key="year" value="year">
|
||||
{t`Past year`}
|
||||
</SelectOption>
|
||||
<SelectOption key="two_years" value="two_years">
|
||||
{t`Past two years`}
|
||||
</SelectOption>
|
||||
<SelectOption key="three_years" value="three_years">
|
||||
{t`Past three years`}
|
||||
</SelectOption>
|
||||
</Select>
|
||||
</GraphCardActions>
|
||||
</GraphCardHeader>
|
||||
<CardBody>
|
||||
<UsageChart
|
||||
period={periodSelection}
|
||||
height={600}
|
||||
id="d3-usage-line-chart-root"
|
||||
data={subscriptionUsageChartData}
|
||||
/>
|
||||
</CardBody>
|
||||
</Card>
|
||||
);
|
||||
}
|
||||
export default SubscriptionUsageChart;
|
||||
@@ -2,16 +2,9 @@ export default function getDocsBaseUrl(config) {
|
||||
let version = 'latest';
|
||||
const licenseType = config?.license_info?.license_type;
|
||||
|
||||
if (licenseType && licenseType !== 'open') {
|
||||
if (config?.version) {
|
||||
if (parseFloat(config?.version.split('-')[0]) >= 4.3) {
|
||||
version = parseFloat(config?.version.split('-')[0]);
|
||||
} else {
|
||||
version = config?.version.split('-')[0];
|
||||
}
|
||||
}
|
||||
} else {
|
||||
version = 'latest';
|
||||
if (licenseType && licenseType !== 'open' && config?.version) {
|
||||
version = parseFloat(config?.version.split('-')[0]).toFixed(1);
|
||||
}
|
||||
|
||||
return `https://docs.ansible.com/automation-controller/${version}`;
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ describe('getDocsBaseUrl', () => {
|
||||
license_info: {
|
||||
license_type: 'open',
|
||||
},
|
||||
version: '18.0.0',
|
||||
version: '18.4.4',
|
||||
});
|
||||
|
||||
expect(result).toEqual(
|
||||
@@ -19,11 +19,11 @@ describe('getDocsBaseUrl', () => {
|
||||
license_info: {
|
||||
license_type: 'enterprise',
|
||||
},
|
||||
version: '4.0.0',
|
||||
version: '18.4.4',
|
||||
});
|
||||
|
||||
expect(result).toEqual(
|
||||
'https://docs.ansible.com/automation-controller/4.0.0'
|
||||
'https://docs.ansible.com/automation-controller/18.4'
|
||||
);
|
||||
});
|
||||
|
||||
@@ -32,17 +32,17 @@ describe('getDocsBaseUrl', () => {
|
||||
license_info: {
|
||||
license_type: 'enterprise',
|
||||
},
|
||||
version: '4.0.0-beta',
|
||||
version: '7.0.0-beta',
|
||||
});
|
||||
|
||||
expect(result).toEqual(
|
||||
'https://docs.ansible.com/automation-controller/4.0.0'
|
||||
'https://docs.ansible.com/automation-controller/7.0'
|
||||
);
|
||||
});
|
||||
|
||||
it('should return latest version if license info missing', () => {
|
||||
const result = getDocsBaseUrl({
|
||||
version: '18.0.0',
|
||||
version: '18.4.4',
|
||||
});
|
||||
|
||||
expect(result).toEqual(
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user