mirror of
https://github.com/ansible/awx.git
synced 2026-02-06 03:54:44 -03:30
Compare commits
81 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
56230ba5d1 | ||
|
|
480aaeace5 | ||
|
|
3eaea396be | ||
|
|
deef8669c9 | ||
|
|
63223a2cc7 | ||
|
|
a28bc2eb3f | ||
|
|
09168e5832 | ||
|
|
6df1de4262 | ||
|
|
e072bb7668 | ||
|
|
ec579fd637 | ||
|
|
b95d521162 | ||
|
|
d03a6a809d | ||
|
|
4466976e10 | ||
|
|
5733f78fd8 | ||
|
|
20fc7c702a | ||
|
|
6ce5799689 | ||
|
|
dc81aa46d0 | ||
|
|
ab3ceaecad | ||
|
|
1bb4240a6b | ||
|
|
5e105c2cbd | ||
|
|
cdb4f0b7fd | ||
|
|
cf1e448577 | ||
|
|
224e9e0324 | ||
|
|
660dab439b | ||
|
|
5ce2055431 | ||
|
|
951bd1cc87 | ||
|
|
c9190ebd8f | ||
|
|
eb33973fa3 | ||
|
|
40be2e7b6e | ||
|
|
485813211a | ||
|
|
0a87bf1b5e | ||
|
|
fa0e0b2576 | ||
|
|
1d3b2f57ce | ||
|
|
0577e1ee79 | ||
|
|
470ecc4a4f | ||
|
|
965127637b | ||
|
|
eba130cf41 | ||
|
|
441336301e | ||
|
|
2a0be898e6 | ||
|
|
c47acc5988 | ||
|
|
70ba32b5b2 | ||
|
|
81e06dace2 | ||
|
|
3e8202590c | ||
|
|
ad96a72ebe | ||
|
|
eb0058268b | ||
|
|
2bf6512a8e | ||
|
|
855f61a04e | ||
|
|
532e71ff45 | ||
|
|
b9ea114cac | ||
|
|
e41ad82687 | ||
|
|
3bd25c682e | ||
|
|
7169c75b1a | ||
|
|
fdb359a67b | ||
|
|
ed2a59c1a3 | ||
|
|
906f8a1dce | ||
|
|
6833976c54 | ||
|
|
d15405eafe | ||
|
|
6c3bbfc3be | ||
|
|
2e3e6cbde5 | ||
|
|
54894c14dc | ||
|
|
2a51f23b7d | ||
|
|
80df31fc4e | ||
|
|
8f8462b38e | ||
|
|
0c41abea0e | ||
|
|
3eda1ede8d | ||
|
|
40fca6db57 | ||
|
|
148111a072 | ||
|
|
9cad45feac | ||
|
|
6834568c5d | ||
|
|
f7fdb7fe8d | ||
|
|
d8abd4912b | ||
|
|
4fbdc412ad | ||
|
|
db1af57daa | ||
|
|
ffa59864ee | ||
|
|
b209bc67b4 | ||
|
|
1faea020af | ||
|
|
b55a099620 | ||
|
|
f6dd3cb988 | ||
|
|
c448b87c85 | ||
|
|
4dd823121a | ||
|
|
ec4f10d868 |
89
.github/actions/run_awx_devel/action.yml
vendored
Normal file
89
.github/actions/run_awx_devel/action.yml
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
# This currently *always* uses the "warm build cache" image
|
||||
# We should do something to allow forcing a rebuild, probably by looking for
|
||||
# some string in the commit message or something.
|
||||
|
||||
name: Run AWX (devel environment)
|
||||
description: Runs AWX with `make docker-compose`
|
||||
inputs:
|
||||
github-token:
|
||||
description: GitHub Token for registry access
|
||||
required: true
|
||||
build-ui:
|
||||
description: Should the UI be built?
|
||||
required: false
|
||||
default: false
|
||||
type: boolean
|
||||
outputs:
|
||||
ip:
|
||||
description: The IP of the tools_awx_1 container
|
||||
value: ${{ steps.data.outputs.ip }}
|
||||
admin-token:
|
||||
description: OAuth token for admin user
|
||||
value: ${{ steps.data.outputs.admin_token }}
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Get python version from Makefile
|
||||
shell: bash
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Upgrade ansible-core
|
||||
shell: bash
|
||||
run: python3 -m pip install --upgrade ansible-core
|
||||
|
||||
- name: Install system deps
|
||||
shell: bash
|
||||
run: sudo apt-get install -y gettext
|
||||
|
||||
- name: Log in to registry
|
||||
shell: bash
|
||||
run: |
|
||||
echo "${{ inputs.github-token }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
|
||||
|
||||
- name: Pre-pull latest available devel image and build HEAD on top of it
|
||||
shell: bash
|
||||
run: |
|
||||
docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${{ github.base_ref }}
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} \
|
||||
COMPOSE_TAG=${{ github.base_ref }} \
|
||||
make docker-compose-build
|
||||
|
||||
- name: Start AWX
|
||||
shell: bash
|
||||
run: |
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} \
|
||||
COMPOSE_TAG=${{ github.base_ref }} \
|
||||
COMPOSE_UP_OPTS="-d" \
|
||||
make docker-compose
|
||||
|
||||
- name: Update default AWX password
|
||||
shell: bash
|
||||
run: |
|
||||
while [[ "$(curl -s -o /dev/null -w ''%{http_code}'' -k https://localhost:8043/api/v2/ping/)" != "200" ]]
|
||||
do
|
||||
echo "Waiting for AWX..."
|
||||
sleep 5
|
||||
done
|
||||
echo "AWX is up, updating the password..."
|
||||
docker exec -i tools_awx_1 sh <<-EOSH
|
||||
awx-manage update_password --username=admin --password=password
|
||||
EOSH
|
||||
|
||||
- name: Build UI
|
||||
# This must be a string comparison in composite actions:
|
||||
# https://github.com/actions/runner/issues/2238
|
||||
if: ${{ inputs.build-ui == 'true' }}
|
||||
shell: bash
|
||||
run: |
|
||||
docker exec -i tools_awx_1 sh <<-EOSH
|
||||
make ui-devel
|
||||
EOSH
|
||||
|
||||
- name: Get instance data
|
||||
id: data
|
||||
shell: bash
|
||||
run: |
|
||||
AWX_IP=$(docker inspect -f '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' tools_awx_1)
|
||||
ADMIN_TOKEN=$(docker exec -i tools_awx_1 awx-manage create_oauth2_token --user admin)
|
||||
echo "ip=$AWX_IP" >> $GITHUB_OUTPUT
|
||||
echo "admin_token=$ADMIN_TOKEN" >> $GITHUB_OUTPUT
|
||||
19
.github/actions/upload_awx_devel_logs/action.yml
vendored
Normal file
19
.github/actions/upload_awx_devel_logs/action.yml
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
name: Upload logs
|
||||
description: Upload logs from `make docker-compose` devel environment to GitHub as an artifact
|
||||
inputs:
|
||||
log-filename:
|
||||
description: "*Unique* name of the log file"
|
||||
required: true
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Get AWX logs
|
||||
shell: bash
|
||||
run: |
|
||||
docker logs tools_awx_1 > ${{ inputs.log-filename }}
|
||||
|
||||
- name: Upload AWX logs as artifact
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: docker-compose-logs
|
||||
path: ${{ inputs.log-filename }}
|
||||
147
.github/workflows/ci.yml
vendored
147
.github/workflows/ci.yml
vendored
@@ -7,6 +7,9 @@ env:
|
||||
COMPOSE_TAG: ${{ github.base_ref || 'devel' }}
|
||||
on:
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
- 'docs/**'
|
||||
- '.github/workflows/docs.yml'
|
||||
jobs:
|
||||
common-tests:
|
||||
name: ${{ matrix.tests.name }}
|
||||
@@ -35,7 +38,7 @@ jobs:
|
||||
- name: ui-test-general
|
||||
command: make ui-test-general
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Run check ${{ matrix.tests.name }}
|
||||
run: AWX_DOCKER_CMD='${{ matrix.tests.command }}' make github_ci_runner
|
||||
@@ -43,7 +46,7 @@ jobs:
|
||||
dev-env:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Run smoke test
|
||||
run: make github_ci_setup && ansible-playbook tools/docker-compose/ansible/smoke-test.yml -v
|
||||
@@ -52,12 +55,12 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout awx
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
path: awx
|
||||
|
||||
- name: Checkout awx-operator
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: ansible/awx-operator
|
||||
path: awx-operator
|
||||
@@ -67,7 +70,7 @@ jobs:
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v2
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
|
||||
@@ -102,7 +105,7 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
# The containers that GitHub Actions use have Ansible installed, so upgrade to make sure we have the latest version.
|
||||
- name: Upgrade ansible-core
|
||||
@@ -114,3 +117,135 @@ jobs:
|
||||
# needed due to cgroupsv2. This is fixed, but a stable release
|
||||
# with the fix has not been made yet.
|
||||
ANSIBLE_TEST_PREFER_PODMAN: 1
|
||||
|
||||
collection-integration:
|
||||
name: awx_collection integration
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target-regex:
|
||||
- name: a-h
|
||||
regex: ^[a-h]
|
||||
- name: i-p
|
||||
regex: ^[i-p]
|
||||
- name: r-z0-9
|
||||
regex: ^[r-z0-9]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- uses: ./.github/actions/run_awx_devel
|
||||
id: awx
|
||||
with:
|
||||
build-ui: false
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Install dependencies for running tests
|
||||
run: |
|
||||
python3 -m pip install -e ./awxkit/
|
||||
python3 -m pip install -r awx_collection/requirements.txt
|
||||
|
||||
- name: Run integration tests
|
||||
run: |
|
||||
echo "::remove-matcher owner=python::" # Disable annoying annotations from setup-python
|
||||
echo '[general]' > ~/.tower_cli.cfg
|
||||
echo 'host = https://${{ steps.awx.outputs.ip }}:8043' >> ~/.tower_cli.cfg
|
||||
echo 'oauth_token = ${{ steps.awx.outputs.admin-token }}' >> ~/.tower_cli.cfg
|
||||
echo 'verify_ssl = false' >> ~/.tower_cli.cfg
|
||||
TARGETS="$(ls awx_collection/tests/integration/targets | grep '${{ matrix.target-regex.regex }}' | tr '\n' ' ')"
|
||||
make COLLECTION_VERSION=100.100.100-git COLLECTION_TEST_TARGET="--coverage --requirements $TARGETS" test_collection_integration
|
||||
env:
|
||||
ANSIBLE_TEST_PREFER_PODMAN: 1
|
||||
|
||||
# Upload coverage report as artifact
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: coverage-${{ matrix.target-regex.name }}
|
||||
path: ~/.ansible/collections/ansible_collections/awx/awx/tests/output/coverage/
|
||||
|
||||
- uses: ./.github/actions/upload_awx_devel_logs
|
||||
with:
|
||||
log-filename: collection-integration-${{ matrix.target-regex.name }}.log
|
||||
|
||||
collection-integration-coverage-combine:
|
||||
name: combine awx_collection integration coverage
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- collection-integration
|
||||
strategy:
|
||||
fail-fast: false
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Upgrade ansible-core
|
||||
run: python3 -m pip install --upgrade ansible-core
|
||||
|
||||
- name: Download coverage artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: coverage
|
||||
|
||||
- name: Combine coverage
|
||||
run: |
|
||||
make COLLECTION_VERSION=100.100.100-git install_collection
|
||||
mkdir -p ~/.ansible/collections/ansible_collections/awx/awx/tests/output/coverage
|
||||
cd coverage
|
||||
for i in coverage-*; do
|
||||
cp -rv $i/* ~/.ansible/collections/ansible_collections/awx/awx/tests/output/coverage/
|
||||
done
|
||||
cd ~/.ansible/collections/ansible_collections/awx/awx
|
||||
ansible-test coverage combine --requirements
|
||||
ansible-test coverage html
|
||||
echo '## AWX Collection Integration Coverage' >> $GITHUB_STEP_SUMMARY
|
||||
echo '```' >> $GITHUB_STEP_SUMMARY
|
||||
ansible-test coverage report >> $GITHUB_STEP_SUMMARY
|
||||
echo '```' >> $GITHUB_STEP_SUMMARY
|
||||
echo >> $GITHUB_STEP_SUMMARY
|
||||
echo '## AWX Collection Integration Coverage HTML' >> $GITHUB_STEP_SUMMARY
|
||||
echo 'Download the HTML artifacts to view the coverage report.' >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
# This is a huge hack, there's no official action for removing artifacts currently.
|
||||
# Also ACTIONS_RUNTIME_URL and ACTIONS_RUNTIME_TOKEN aren't available in normal run
|
||||
# steps, so we have to use github-script to get them.
|
||||
#
|
||||
# The advantage of doing this, though, is that we save on artifact storage space.
|
||||
|
||||
- name: Get secret artifact runtime URL
|
||||
uses: actions/github-script@v6
|
||||
id: get-runtime-url
|
||||
with:
|
||||
result-encoding: string
|
||||
script: |
|
||||
const { ACTIONS_RUNTIME_URL } = process.env;
|
||||
return ACTIONS_RUNTIME_URL;
|
||||
|
||||
- name: Get secret artifact runtime token
|
||||
uses: actions/github-script@v6
|
||||
id: get-runtime-token
|
||||
with:
|
||||
result-encoding: string
|
||||
script: |
|
||||
const { ACTIONS_RUNTIME_TOKEN } = process.env;
|
||||
return ACTIONS_RUNTIME_TOKEN;
|
||||
|
||||
- name: Remove intermediary artifacts
|
||||
env:
|
||||
ACTIONS_RUNTIME_URL: ${{ steps.get-runtime-url.outputs.result }}
|
||||
ACTIONS_RUNTIME_TOKEN: ${{ steps.get-runtime-token.outputs.result }}
|
||||
run: |
|
||||
echo "::add-mask::${ACTIONS_RUNTIME_TOKEN}"
|
||||
artifacts=$(
|
||||
curl -H "Authorization: Bearer $ACTIONS_RUNTIME_TOKEN" \
|
||||
${ACTIONS_RUNTIME_URL}_apis/pipelines/workflows/${{ github.run_id }}/artifacts?api-version=6.0-preview \
|
||||
| jq -r '.value | .[] | select(.name | startswith("coverage-")) | .url'
|
||||
)
|
||||
|
||||
for artifact in $artifacts; do
|
||||
curl -i -X DELETE -H "Accept: application/json;api-version=6.0-preview" -H "Authorization: Bearer $ACTIONS_RUNTIME_TOKEN" "$artifact"
|
||||
done
|
||||
|
||||
- name: Upload coverage report as artifact
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: awx-collection-integration-coverage-html
|
||||
path: ~/.ansible/collections/ansible_collections/awx/awx/tests/output/reports/coverage
|
||||
|
||||
4
.github/workflows/devel_images.yml
vendored
4
.github/workflows/devel_images.yml
vendored
@@ -16,7 +16,7 @@ jobs:
|
||||
packages: write
|
||||
contents: read
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Get python version from Makefile
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
@@ -28,7 +28,7 @@ jobs:
|
||||
OWNER: '${{ github.repository_owner }}'
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v2
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
|
||||
|
||||
19
.github/workflows/docs.yml
vendored
Normal file
19
.github/workflows/docs.yml
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
---
|
||||
name: Docsite CI
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- 'docs/**'
|
||||
- '.github/workflows/docs.yml'
|
||||
jobs:
|
||||
docsite-build:
|
||||
name: docsite test build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: install tox
|
||||
run: pip install tox
|
||||
|
||||
- name: Assure docs can be built
|
||||
run: tox -e docs
|
||||
54
.github/workflows/e2e_test.yml
vendored
54
.github/workflows/e2e_test.yml
vendored
@@ -19,41 +19,21 @@ jobs:
|
||||
job: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Get python version from Makefile
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v2
|
||||
- uses: ./.github/actions/run_awx_devel
|
||||
id: awx
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
|
||||
- name: Install system deps
|
||||
run: sudo apt-get install -y gettext
|
||||
|
||||
- name: Log in to registry
|
||||
run: |
|
||||
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
|
||||
|
||||
- name: Pre-pull image to warm build cache
|
||||
run: |
|
||||
docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${{ github.base_ref }}
|
||||
|
||||
- name: Build UI
|
||||
run: |
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} COMPOSE_TAG=${{ github.base_ref }} make ui-devel
|
||||
|
||||
- name: Start AWX
|
||||
run: |
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} COMPOSE_TAG=${{ github.base_ref }} make docker-compose &> make-docker-compose-output.log &
|
||||
build-ui: true
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
log-filename: e2e-${{ matrix.job }}.log
|
||||
|
||||
- name: Pull awx_cypress_base image
|
||||
run: |
|
||||
docker pull quay.io/awx/awx_cypress_base:latest
|
||||
|
||||
- name: Checkout test project
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: ${{ github.repository_owner }}/tower-qa
|
||||
ssh-key: ${{ secrets.QA_REPO_KEY }}
|
||||
@@ -65,18 +45,6 @@ jobs:
|
||||
cd ${{ secrets.E2E_PROJECT }}/ui-tests/awx-pf-tests
|
||||
docker build -t awx-pf-tests .
|
||||
|
||||
- name: Update default AWX password
|
||||
run: |
|
||||
while [[ "$(curl -s -o /dev/null -w ''%{http_code}'' -k https://localhost:8043/api/v2/ping/)" != "200" ]]
|
||||
do
|
||||
echo "Waiting for AWX..."
|
||||
sleep 5;
|
||||
done
|
||||
echo "AWX is up, updating the password..."
|
||||
docker exec -i tools_awx_1 sh <<-EOSH
|
||||
awx-manage update_password --username=admin --password=password
|
||||
EOSH
|
||||
|
||||
- name: Run E2E tests
|
||||
env:
|
||||
CYPRESS_RECORD_KEY: ${{ secrets.CYPRESS_RECORD_KEY }}
|
||||
@@ -86,7 +54,7 @@ jobs:
|
||||
export COMMIT_INFO_SHA=$GITHUB_SHA
|
||||
export COMMIT_INFO_REMOTE=$GITHUB_REPOSITORY_OWNER
|
||||
cd ${{ secrets.E2E_PROJECT }}/ui-tests/awx-pf-tests
|
||||
AWX_IP=$(docker inspect -f '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' tools_awx_1)
|
||||
AWX_IP=${{ steps.awx.outputs.ip }}
|
||||
printenv > .env
|
||||
echo "Executing tests:"
|
||||
docker run \
|
||||
@@ -102,8 +70,6 @@ jobs:
|
||||
-w /e2e \
|
||||
awx-pf-tests run --project .
|
||||
|
||||
- name: Save AWX logs
|
||||
uses: actions/upload-artifact@v2
|
||||
- uses: ./.github/actions/upload_awx_devel_logs
|
||||
with:
|
||||
name: AWX-logs-${{ matrix.job }}
|
||||
path: make-docker-compose-output.log
|
||||
log-filename: e2e-${{ matrix.job }}.log
|
||||
|
||||
2
.github/workflows/label_issue.yml
vendored
2
.github/workflows/label_issue.yml
vendored
@@ -28,7 +28,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
name: Label Issue - Community
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
- name: Install python requests
|
||||
run: pip install requests
|
||||
|
||||
2
.github/workflows/label_pr.yml
vendored
2
.github/workflows/label_pr.yml
vendored
@@ -27,7 +27,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
name: Label PR - Community
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
- name: Install python requests
|
||||
run: pip install requests
|
||||
|
||||
1
.github/workflows/pr_body_check.yml
vendored
1
.github/workflows/pr_body_check.yml
vendored
@@ -7,6 +7,7 @@ on:
|
||||
types: [opened, edited, reopened, synchronize]
|
||||
jobs:
|
||||
pr-check:
|
||||
if: github.repository_owner == 'ansible' && endsWith(github.repository, 'awx')
|
||||
name: Scan PR description for semantic versioning keywords
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
|
||||
35
.github/workflows/pr_body_check_jira.yml
vendored
35
.github/workflows/pr_body_check_jira.yml
vendored
@@ -1,35 +0,0 @@
|
||||
---
|
||||
name: Check body for reference to jira
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- release_**
|
||||
jobs:
|
||||
pr-check:
|
||||
if: github.repository_owner == 'ansible' && github.repository != 'awx'
|
||||
name: Scan PR description for JIRA links
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
steps:
|
||||
- name: Check for JIRA lines
|
||||
env:
|
||||
PR_BODY: ${{ github.event.pull_request.body }}
|
||||
run: |
|
||||
echo "$PR_BODY" | grep "JIRA: None" > no_jira
|
||||
echo "$PR_BODY" | grep "JIRA: https://.*[0-9]+"> jira
|
||||
exit 0
|
||||
# We exit 0 and set the shell to prevent the returns from the greps from failing this step
|
||||
# See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#exit-codes-and-error-action-preference
|
||||
shell: bash {0}
|
||||
|
||||
- name: Check for exactly one item
|
||||
run: |
|
||||
if [ $(cat no_jira jira | wc -l) != 1 ] ; then
|
||||
echo "The PR body must contain exactly one of [ 'JIRA: None' or 'JIRA: <one or more links>' ]"
|
||||
echo "We counted $(cat no_jira jira | wc -l)"
|
||||
exit 255;
|
||||
else
|
||||
exit 0;
|
||||
fi
|
||||
4
.github/workflows/promote.yml
vendored
4
.github/workflows/promote.yml
vendored
@@ -17,13 +17,13 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout awx
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Get python version from Makefile
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v2
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
|
||||
|
||||
8
.github/workflows/stage.yml
vendored
8
.github/workflows/stage.yml
vendored
@@ -44,7 +44,7 @@ jobs:
|
||||
exit 0
|
||||
|
||||
- name: Checkout awx
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
path: awx
|
||||
|
||||
@@ -52,18 +52,18 @@ jobs:
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v2
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
|
||||
- name: Checkout awx-logos
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: ansible/awx-logos
|
||||
path: awx-logos
|
||||
|
||||
- name: Checkout awx-operator
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: ${{ github.repository_owner }}/awx-operator
|
||||
path: awx-operator
|
||||
|
||||
4
.github/workflows/upload_schema.yml
vendored
4
.github/workflows/upload_schema.yml
vendored
@@ -17,13 +17,13 @@ jobs:
|
||||
packages: write
|
||||
contents: read
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Get python version from Makefile
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v2
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
|
||||
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -165,3 +165,7 @@ use_dev_supervisor.txt
|
||||
|
||||
awx/ui_next/src
|
||||
awx/ui_next/build
|
||||
|
||||
# Docs build stuff
|
||||
docs/docsite/build/
|
||||
_readthedocs/
|
||||
|
||||
5
.gitleaks.toml
Normal file
5
.gitleaks.toml
Normal file
@@ -0,0 +1,5 @@
|
||||
[allowlist]
|
||||
description = "Documentation contains example secrets and passwords"
|
||||
paths = [
|
||||
"docs/docsite/rst/administration/oauth2_token_auth.rst",
|
||||
]
|
||||
15
.readthedocs.yaml
Normal file
15
.readthedocs.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
# Read the Docs configuration file
|
||||
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
|
||||
|
||||
version: 2
|
||||
|
||||
build:
|
||||
os: ubuntu-22.04
|
||||
tools:
|
||||
python: >-
|
||||
3.11
|
||||
commands:
|
||||
- pip install --user tox
|
||||
- python3 -m tox -e docs
|
||||
- mkdir -p _readthedocs/html/
|
||||
- mv docs/docsite/build/html/* _readthedocs/html/
|
||||
@@ -10,6 +10,7 @@ ignore: |
|
||||
tools/docker-compose/_sources
|
||||
# django template files
|
||||
awx/api/templates/instance_install_bundle/**
|
||||
.readthedocs.yaml
|
||||
|
||||
extends: default
|
||||
|
||||
|
||||
2
Makefile
2
Makefile
@@ -383,7 +383,7 @@ test_collection_sanity:
|
||||
cd $(COLLECTION_INSTALL) && ansible-test sanity $(COLLECTION_SANITY_ARGS)
|
||||
|
||||
test_collection_integration: install_collection
|
||||
cd $(COLLECTION_INSTALL) && ansible-test integration $(COLLECTION_TEST_TARGET)
|
||||
cd $(COLLECTION_INSTALL) && ansible-test integration -vvv $(COLLECTION_TEST_TARGET)
|
||||
|
||||
test_unit:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
|
||||
@@ -3233,7 +3233,7 @@ class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobO
|
||||
if get_field_from_model_or_attrs('host_config_key') and not inventory:
|
||||
raise serializers.ValidationError({'host_config_key': _("Cannot enable provisioning callback without an inventory set.")})
|
||||
|
||||
prompting_error_message = _("Must either set a default value or ask to prompt on launch.")
|
||||
prompting_error_message = _("You must either set a default value or ask to prompt on launch.")
|
||||
if project is None:
|
||||
raise serializers.ValidationError({'project': _("Job Templates must have a project assigned.")})
|
||||
elif inventory is None and not get_field_from_model_or_attrs('ask_inventory_on_launch'):
|
||||
@@ -5356,10 +5356,16 @@ class ScheduleSerializer(LaunchConfigurationBaseSerializer, SchedulePreviewSeria
|
||||
class InstanceLinkSerializer(BaseSerializer):
|
||||
class Meta:
|
||||
model = InstanceLink
|
||||
fields = ('source', 'target', 'link_state')
|
||||
fields = ('id', 'url', 'related', 'source', 'target', 'link_state')
|
||||
|
||||
source = serializers.SlugRelatedField(slug_field="hostname", read_only=True)
|
||||
target = serializers.SlugRelatedField(slug_field="hostname", read_only=True)
|
||||
source = serializers.SlugRelatedField(slug_field="hostname", queryset=Instance.objects.all())
|
||||
target = serializers.SlugRelatedField(slug_field="hostname", queryset=Instance.objects.all())
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(InstanceLinkSerializer, self).get_related(obj)
|
||||
res['source_instance'] = self.reverse('api:instance_detail', kwargs={'pk': obj.source.id})
|
||||
res['target_instance'] = self.reverse('api:instance_detail', kwargs={'pk': obj.target.id})
|
||||
return res
|
||||
|
||||
|
||||
class InstanceNodeSerializer(BaseSerializer):
|
||||
@@ -5376,6 +5382,7 @@ class InstanceSerializer(BaseSerializer):
|
||||
jobs_running = serializers.IntegerField(help_text=_('Count of jobs in the running or waiting state that are targeted for this instance'), read_only=True)
|
||||
jobs_total = serializers.IntegerField(help_text=_('Count of all jobs that target this instance'), read_only=True)
|
||||
health_check_pending = serializers.SerializerMethodField()
|
||||
peers = serializers.SlugRelatedField(many=True, required=False, slug_field="hostname", queryset=Instance.objects.all())
|
||||
|
||||
class Meta:
|
||||
model = Instance
|
||||
@@ -5412,6 +5419,8 @@ class InstanceSerializer(BaseSerializer):
|
||||
'node_state',
|
||||
'ip_address',
|
||||
'listener_port',
|
||||
'peers',
|
||||
'peers_from_control_nodes',
|
||||
)
|
||||
extra_kwargs = {
|
||||
'node_type': {'initial': Instance.Types.EXECUTION, 'default': Instance.Types.EXECUTION},
|
||||
@@ -5464,22 +5473,57 @@ class InstanceSerializer(BaseSerializer):
|
||||
def get_health_check_pending(self, obj):
|
||||
return obj.health_check_pending
|
||||
|
||||
def validate(self, data):
|
||||
if self.instance:
|
||||
if self.instance.node_type == Instance.Types.HOP:
|
||||
raise serializers.ValidationError("Hop node instances may not be changed.")
|
||||
else:
|
||||
if not settings.IS_K8S:
|
||||
raise serializers.ValidationError("Can only create instances on Kubernetes or OpenShift.")
|
||||
return data
|
||||
def validate(self, attrs):
|
||||
def get_field_from_model_or_attrs(fd):
|
||||
return attrs.get(fd, self.instance and getattr(self.instance, fd) or None)
|
||||
|
||||
def check_peers_changed():
|
||||
'''
|
||||
return True if
|
||||
- 'peers' in attrs
|
||||
- instance peers matches peers in attrs
|
||||
'''
|
||||
return self.instance and 'peers' in attrs and set(self.instance.peers.all()) != set(attrs['peers'])
|
||||
|
||||
if not self.instance and not settings.IS_K8S:
|
||||
raise serializers.ValidationError(_("Can only create instances on Kubernetes or OpenShift."))
|
||||
|
||||
node_type = get_field_from_model_or_attrs("node_type")
|
||||
peers_from_control_nodes = get_field_from_model_or_attrs("peers_from_control_nodes")
|
||||
listener_port = get_field_from_model_or_attrs("listener_port")
|
||||
peers = attrs.get('peers', [])
|
||||
|
||||
if peers_from_control_nodes and node_type not in (Instance.Types.EXECUTION, Instance.Types.HOP):
|
||||
raise serializers.ValidationError(_("peers_from_control_nodes can only be enabled for execution or hop nodes."))
|
||||
|
||||
if node_type in [Instance.Types.CONTROL, Instance.Types.HYBRID]:
|
||||
if check_peers_changed():
|
||||
raise serializers.ValidationError(
|
||||
_("Setting peers manually for control nodes is not allowed. Enable peers_from_control_nodes on the hop and execution nodes instead.")
|
||||
)
|
||||
|
||||
if not listener_port and peers_from_control_nodes:
|
||||
raise serializers.ValidationError(_("Field listener_port must be a valid integer when peers_from_control_nodes is enabled."))
|
||||
|
||||
if not listener_port and self.instance and self.instance.peers_from.exists():
|
||||
raise serializers.ValidationError(_("Field listener_port must be a valid integer when other nodes peer to it."))
|
||||
|
||||
for peer in peers:
|
||||
if peer.listener_port is None:
|
||||
raise serializers.ValidationError(_("Field listener_port must be set on peer ") + peer.hostname + ".")
|
||||
|
||||
if not settings.IS_K8S:
|
||||
if check_peers_changed():
|
||||
raise serializers.ValidationError(_("Cannot change peers."))
|
||||
|
||||
return super().validate(attrs)
|
||||
|
||||
def validate_node_type(self, value):
|
||||
if not self.instance:
|
||||
if value not in (Instance.Types.EXECUTION,):
|
||||
raise serializers.ValidationError("Can only create execution nodes.")
|
||||
else:
|
||||
if self.instance.node_type != value:
|
||||
raise serializers.ValidationError("Cannot change node type.")
|
||||
if not self.instance and value not in [Instance.Types.HOP, Instance.Types.EXECUTION]:
|
||||
raise serializers.ValidationError(_("Can only create execution or hop nodes."))
|
||||
|
||||
if self.instance and self.instance.node_type != value:
|
||||
raise serializers.ValidationError(_("Cannot change node type."))
|
||||
|
||||
return value
|
||||
|
||||
@@ -5487,30 +5531,41 @@ class InstanceSerializer(BaseSerializer):
|
||||
if self.instance:
|
||||
if value != self.instance.node_state:
|
||||
if not settings.IS_K8S:
|
||||
raise serializers.ValidationError("Can only change the state on Kubernetes or OpenShift.")
|
||||
raise serializers.ValidationError(_("Can only change the state on Kubernetes or OpenShift."))
|
||||
if value != Instance.States.DEPROVISIONING:
|
||||
raise serializers.ValidationError("Can only change instances to the 'deprovisioning' state.")
|
||||
if self.instance.node_type not in (Instance.Types.EXECUTION,):
|
||||
raise serializers.ValidationError("Can only deprovision execution nodes.")
|
||||
raise serializers.ValidationError(_("Can only change instances to the 'deprovisioning' state."))
|
||||
if self.instance.node_type not in (Instance.Types.EXECUTION, Instance.Types.HOP):
|
||||
raise serializers.ValidationError(_("Can only deprovision execution or hop nodes."))
|
||||
else:
|
||||
if value and value != Instance.States.INSTALLED:
|
||||
raise serializers.ValidationError("Can only create instances in the 'installed' state.")
|
||||
raise serializers.ValidationError(_("Can only create instances in the 'installed' state."))
|
||||
|
||||
return value
|
||||
|
||||
def validate_hostname(self, value):
|
||||
"""
|
||||
- Hostname cannot be "localhost" - but can be something like localhost.domain
|
||||
- Cannot change the hostname of an-already instantiated & initialized Instance object
|
||||
Cannot change the hostname
|
||||
"""
|
||||
if self.instance and self.instance.hostname != value:
|
||||
raise serializers.ValidationError("Cannot change hostname.")
|
||||
raise serializers.ValidationError(_("Cannot change hostname."))
|
||||
|
||||
return value
|
||||
|
||||
def validate_listener_port(self, value):
|
||||
if self.instance and self.instance.listener_port != value:
|
||||
raise serializers.ValidationError("Cannot change listener port.")
|
||||
"""
|
||||
Cannot change listener port, unless going from none to integer, and vice versa
|
||||
"""
|
||||
if value and self.instance and self.instance.listener_port and self.instance.listener_port != value:
|
||||
raise serializers.ValidationError(_("Cannot change listener port."))
|
||||
|
||||
return value
|
||||
|
||||
def validate_peers_from_control_nodes(self, value):
|
||||
"""
|
||||
Can only enable for K8S based deployments
|
||||
"""
|
||||
if value and not settings.IS_K8S:
|
||||
raise serializers.ValidationError(_("Can only be enabled on Kubernetes or Openshift."))
|
||||
|
||||
return value
|
||||
|
||||
@@ -5518,7 +5573,19 @@ class InstanceSerializer(BaseSerializer):
|
||||
class InstanceHealthCheckSerializer(BaseSerializer):
|
||||
class Meta:
|
||||
model = Instance
|
||||
read_only_fields = ('uuid', 'hostname', 'version', 'last_health_check', 'errors', 'cpu', 'memory', 'cpu_capacity', 'mem_capacity', 'capacity')
|
||||
read_only_fields = (
|
||||
'uuid',
|
||||
'hostname',
|
||||
'ip_address',
|
||||
'version',
|
||||
'last_health_check',
|
||||
'errors',
|
||||
'cpu',
|
||||
'memory',
|
||||
'cpu_capacity',
|
||||
'mem_capacity',
|
||||
'capacity',
|
||||
)
|
||||
fields = read_only_fields
|
||||
|
||||
|
||||
|
||||
@@ -3,21 +3,35 @@ receptor_group: awx
|
||||
receptor_verify: true
|
||||
receptor_tls: true
|
||||
receptor_mintls13: false
|
||||
{% if instance.node_type == "execution" %}
|
||||
receptor_work_commands:
|
||||
ansible-runner:
|
||||
command: ansible-runner
|
||||
params: worker
|
||||
allowruntimeparams: true
|
||||
verifysignature: true
|
||||
additional_python_packages:
|
||||
- ansible-runner
|
||||
{% endif %}
|
||||
custom_worksign_public_keyfile: receptor/work_public_key.pem
|
||||
custom_tls_certfile: receptor/tls/receptor.crt
|
||||
custom_tls_keyfile: receptor/tls/receptor.key
|
||||
custom_ca_certfile: receptor/tls/ca/mesh-CA.crt
|
||||
receptor_protocol: 'tcp'
|
||||
{% if instance.listener_port %}
|
||||
receptor_listener: true
|
||||
receptor_port: {{ instance.listener_port }}
|
||||
receptor_dependencies:
|
||||
- python39-pip
|
||||
{% else %}
|
||||
receptor_listener: false
|
||||
{% endif %}
|
||||
{% if peers %}
|
||||
receptor_peers:
|
||||
{% for peer in peers %}
|
||||
- host: {{ peer.host }}
|
||||
port: {{ peer.port }}
|
||||
protocol: tcp
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% verbatim %}
|
||||
podman_user: "{{ receptor_user }}"
|
||||
podman_group: "{{ receptor_group }}"
|
||||
|
||||
@@ -1,20 +1,16 @@
|
||||
{% verbatim %}
|
||||
---
|
||||
- hosts: all
|
||||
become: yes
|
||||
tasks:
|
||||
- name: Create the receptor user
|
||||
user:
|
||||
{% verbatim %}
|
||||
name: "{{ receptor_user }}"
|
||||
{% endverbatim %}
|
||||
shell: /bin/bash
|
||||
- name: Enable Copr repo for Receptor
|
||||
command: dnf copr enable ansible-awx/receptor -y
|
||||
{% if instance.node_type == "execution" %}
|
||||
- import_role:
|
||||
name: ansible.receptor.podman
|
||||
{% endif %}
|
||||
- import_role:
|
||||
name: ansible.receptor.setup
|
||||
- name: Install ansible-runner
|
||||
pip:
|
||||
name: ansible-runner
|
||||
executable: pip3.9
|
||||
{% endverbatim %}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
---
|
||||
collections:
|
||||
- name: ansible.receptor
|
||||
version: 1.1.0
|
||||
version: 2.0.0
|
||||
|
||||
@@ -341,17 +341,18 @@ class InstanceDetail(RetrieveUpdateAPIView):
|
||||
|
||||
def update_raw_data(self, data):
|
||||
# these fields are only valid on creation of an instance, so they unwanted on detail view
|
||||
data.pop('listener_port', None)
|
||||
data.pop('node_type', None)
|
||||
data.pop('hostname', None)
|
||||
data.pop('ip_address', None)
|
||||
return super(InstanceDetail, self).update_raw_data(data)
|
||||
|
||||
def update(self, request, *args, **kwargs):
|
||||
r = super(InstanceDetail, self).update(request, *args, **kwargs)
|
||||
if status.is_success(r.status_code):
|
||||
obj = self.get_object()
|
||||
obj.set_capacity_value()
|
||||
obj.save(update_fields=['capacity'])
|
||||
capacity_changed = obj.set_capacity_value()
|
||||
if capacity_changed:
|
||||
obj.save(update_fields=['capacity'])
|
||||
r.data = serializers.InstanceSerializer(obj, context=self.get_serializer_context()).to_representation(obj)
|
||||
return r
|
||||
|
||||
|
||||
@@ -6,6 +6,8 @@ import io
|
||||
import ipaddress
|
||||
import os
|
||||
import tarfile
|
||||
import time
|
||||
import re
|
||||
|
||||
import asn1
|
||||
from awx.api import serializers
|
||||
@@ -40,6 +42,8 @@ RECEPTOR_OID = "1.3.6.1.4.1.2312.19.1"
|
||||
# │ │ └── receptor.key
|
||||
# │ └── work-public-key.pem
|
||||
# └── requirements.yml
|
||||
|
||||
|
||||
class InstanceInstallBundle(GenericAPIView):
|
||||
name = _('Install Bundle')
|
||||
model = models.Instance
|
||||
@@ -49,9 +53,9 @@ class InstanceInstallBundle(GenericAPIView):
|
||||
def get(self, request, *args, **kwargs):
|
||||
instance_obj = self.get_object()
|
||||
|
||||
if instance_obj.node_type not in ('execution',):
|
||||
if instance_obj.node_type not in ('execution', 'hop'):
|
||||
return Response(
|
||||
data=dict(msg=_('Install bundle can only be generated for execution nodes.')),
|
||||
data=dict(msg=_('Install bundle can only be generated for execution or hop nodes.')),
|
||||
status=status.HTTP_400_BAD_REQUEST,
|
||||
)
|
||||
|
||||
@@ -66,37 +70,37 @@ class InstanceInstallBundle(GenericAPIView):
|
||||
# generate and write the receptor key to receptor/tls/receptor.key in the tar file
|
||||
key, cert = generate_receptor_tls(instance_obj)
|
||||
|
||||
def tar_addfile(tarinfo, filecontent):
|
||||
tarinfo.mtime = time.time()
|
||||
tarinfo.size = len(filecontent)
|
||||
tar.addfile(tarinfo, io.BytesIO(filecontent))
|
||||
|
||||
key_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/receptor/tls/receptor.key")
|
||||
key_tarinfo.size = len(key)
|
||||
tar.addfile(key_tarinfo, io.BytesIO(key))
|
||||
tar_addfile(key_tarinfo, key)
|
||||
|
||||
cert_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/receptor/tls/receptor.crt")
|
||||
cert_tarinfo.size = len(cert)
|
||||
tar.addfile(cert_tarinfo, io.BytesIO(cert))
|
||||
tar_addfile(cert_tarinfo, cert)
|
||||
|
||||
# generate and write install_receptor.yml to the tar file
|
||||
playbook = generate_playbook().encode('utf-8')
|
||||
playbook = generate_playbook(instance_obj).encode('utf-8')
|
||||
playbook_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/install_receptor.yml")
|
||||
playbook_tarinfo.size = len(playbook)
|
||||
tar.addfile(playbook_tarinfo, io.BytesIO(playbook))
|
||||
tar_addfile(playbook_tarinfo, playbook)
|
||||
|
||||
# generate and write inventory.yml to the tar file
|
||||
inventory_yml = generate_inventory_yml(instance_obj).encode('utf-8')
|
||||
inventory_yml_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/inventory.yml")
|
||||
inventory_yml_tarinfo.size = len(inventory_yml)
|
||||
tar.addfile(inventory_yml_tarinfo, io.BytesIO(inventory_yml))
|
||||
tar_addfile(inventory_yml_tarinfo, inventory_yml)
|
||||
|
||||
# generate and write group_vars/all.yml to the tar file
|
||||
group_vars = generate_group_vars_all_yml(instance_obj).encode('utf-8')
|
||||
group_vars_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/group_vars/all.yml")
|
||||
group_vars_tarinfo.size = len(group_vars)
|
||||
tar.addfile(group_vars_tarinfo, io.BytesIO(group_vars))
|
||||
tar_addfile(group_vars_tarinfo, group_vars)
|
||||
|
||||
# generate and write requirements.yml to the tar file
|
||||
requirements_yml = generate_requirements_yml().encode('utf-8')
|
||||
requirements_yml_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/requirements.yml")
|
||||
requirements_yml_tarinfo.size = len(requirements_yml)
|
||||
tar.addfile(requirements_yml_tarinfo, io.BytesIO(requirements_yml))
|
||||
tar_addfile(requirements_yml_tarinfo, requirements_yml)
|
||||
|
||||
# respond with the tarfile
|
||||
f.seek(0)
|
||||
@@ -105,8 +109,10 @@ class InstanceInstallBundle(GenericAPIView):
|
||||
return response
|
||||
|
||||
|
||||
def generate_playbook():
|
||||
return render_to_string("instance_install_bundle/install_receptor.yml")
|
||||
def generate_playbook(instance_obj):
|
||||
playbook_yaml = render_to_string("instance_install_bundle/install_receptor.yml", context=dict(instance=instance_obj))
|
||||
# convert consecutive newlines with a single newline
|
||||
return re.sub(r'\n+', '\n', playbook_yaml)
|
||||
|
||||
|
||||
def generate_requirements_yml():
|
||||
@@ -118,7 +124,12 @@ def generate_inventory_yml(instance_obj):
|
||||
|
||||
|
||||
def generate_group_vars_all_yml(instance_obj):
|
||||
return render_to_string("instance_install_bundle/group_vars/all.yml", context=dict(instance=instance_obj))
|
||||
peers = []
|
||||
for instance in instance_obj.peers.all():
|
||||
peers.append(dict(host=instance.hostname, port=instance.listener_port))
|
||||
all_yaml = render_to_string("instance_install_bundle/group_vars/all.yml", context=dict(instance=instance_obj, peers=peers))
|
||||
# convert consecutive newlines with a single newline
|
||||
return re.sub(r'\n+', '\n', all_yaml)
|
||||
|
||||
|
||||
def generate_receptor_tls(instance_obj):
|
||||
|
||||
@@ -4,6 +4,8 @@ from urllib.parse import urljoin, quote
|
||||
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
import requests
|
||||
import base64
|
||||
import binascii
|
||||
|
||||
|
||||
conjur_inputs = {
|
||||
@@ -50,6 +52,13 @@ conjur_inputs = {
|
||||
}
|
||||
|
||||
|
||||
def _is_base64(s: str) -> bool:
|
||||
try:
|
||||
return base64.b64encode(base64.b64decode(s.encode("utf-8"))) == s.encode("utf-8")
|
||||
except binascii.Error:
|
||||
return False
|
||||
|
||||
|
||||
def conjur_backend(**kwargs):
|
||||
url = kwargs['url']
|
||||
api_key = kwargs['api_key']
|
||||
@@ -77,7 +86,7 @@ def conjur_backend(**kwargs):
|
||||
token = resp.content.decode('utf-8')
|
||||
|
||||
lookup_kwargs = {
|
||||
'headers': {'Authorization': 'Token token="{}"'.format(token)},
|
||||
'headers': {'Authorization': 'Token token="{}"'.format(token if _is_base64(token) else base64.b64encode(token.encode('utf-8')).decode('utf-8'))},
|
||||
'allow_redirects': False,
|
||||
}
|
||||
|
||||
|
||||
@@ -1,22 +1,22 @@
|
||||
from awx.main.models import HostMetric
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.conf import settings
|
||||
from awx.main.tasks.host_metrics import HostMetricTask
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""
|
||||
Run soft-deleting of HostMetrics
|
||||
This command provides cleanup task for HostMetric model.
|
||||
There are two modes, which run in following order:
|
||||
- soft cleanup
|
||||
- - Perform soft-deletion of all host metrics last automated 12 months ago or before.
|
||||
This is the same as issuing a DELETE request to /api/v2/host_metrics/N/ for all host metrics that match the criteria.
|
||||
- - updates columns delete, deleted_counter and last_deleted
|
||||
- hard cleanup
|
||||
- - Permanently erase from the database all host metrics last automated 36 months ago or before.
|
||||
This operation happens after the soft deletion has finished.
|
||||
"""
|
||||
|
||||
help = 'Run soft-deleting of HostMetrics'
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--months-ago', type=int, dest='months-ago', action='store', help='Threshold in months for soft-deleting')
|
||||
help = 'Run soft and hard-deletion of HostMetrics'
|
||||
|
||||
def handle(self, *args, **options):
|
||||
months_ago = options.get('months-ago') or None
|
||||
|
||||
if not months_ago:
|
||||
months_ago = getattr(settings, 'CLEANUP_HOST_METRICS_SOFT_THRESHOLD', 12)
|
||||
|
||||
HostMetric.cleanup_task(months_ago)
|
||||
HostMetricTask().cleanup(soft_threshold=settings.CLEANUP_HOST_METRICS_SOFT_THRESHOLD, hard_threshold=settings.CLEANUP_HOST_METRICS_HARD_THRESHOLD)
|
||||
|
||||
@@ -25,17 +25,20 @@ class Command(BaseCommand):
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--hostname', dest='hostname', type=str, help="Hostname used during provisioning")
|
||||
parser.add_argument('--listener_port', dest='listener_port', type=int, help="Receptor listener port")
|
||||
parser.add_argument('--node_type', type=str, default='hybrid', choices=['control', 'execution', 'hop', 'hybrid'], help="Instance Node type")
|
||||
parser.add_argument('--uuid', type=str, help="Instance UUID")
|
||||
|
||||
def _register_hostname(self, hostname, node_type, uuid):
|
||||
def _register_hostname(self, hostname, node_type, uuid, listener_port):
|
||||
if not hostname:
|
||||
if not settings.AWX_AUTO_DEPROVISION_INSTANCES:
|
||||
raise CommandError('Registering with values from settings only intended for use in K8s installs')
|
||||
|
||||
from awx.main.management.commands.register_queue import RegisterQueue
|
||||
|
||||
(changed, instance) = Instance.objects.register(ip_address=os.environ.get('MY_POD_IP'), node_type='control', node_uuid=settings.SYSTEM_UUID)
|
||||
(changed, instance) = Instance.objects.register(
|
||||
ip_address=os.environ.get('MY_POD_IP'), listener_port=listener_port, node_type='control', node_uuid=settings.SYSTEM_UUID
|
||||
)
|
||||
RegisterQueue(settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME, 100, 0, [], is_container_group=False).register()
|
||||
RegisterQueue(
|
||||
settings.DEFAULT_EXECUTION_QUEUE_NAME,
|
||||
@@ -48,7 +51,7 @@ class Command(BaseCommand):
|
||||
max_concurrent_jobs=settings.DEFAULT_EXECUTION_QUEUE_MAX_CONCURRENT_JOBS,
|
||||
).register()
|
||||
else:
|
||||
(changed, instance) = Instance.objects.register(hostname=hostname, node_type=node_type, node_uuid=uuid)
|
||||
(changed, instance) = Instance.objects.register(hostname=hostname, node_type=node_type, node_uuid=uuid, listener_port=listener_port)
|
||||
if changed:
|
||||
print("Successfully registered instance {}".format(hostname))
|
||||
else:
|
||||
@@ -58,6 +61,6 @@ class Command(BaseCommand):
|
||||
@transaction.atomic
|
||||
def handle(self, **options):
|
||||
self.changed = False
|
||||
self._register_hostname(options.get('hostname'), options.get('node_type'), options.get('uuid'))
|
||||
self._register_hostname(options.get('hostname'), options.get('node_type'), options.get('uuid'), options.get('listener_port'))
|
||||
if self.changed:
|
||||
print("(changed: True)")
|
||||
|
||||
@@ -115,10 +115,13 @@ class InstanceManager(models.Manager):
|
||||
return node[0]
|
||||
raise RuntimeError("No instance found with the current cluster host id")
|
||||
|
||||
def register(self, node_uuid=None, hostname=None, ip_address=None, node_type='hybrid', defaults=None):
|
||||
def register(self, node_uuid=None, hostname=None, ip_address="", listener_port=None, node_type='hybrid', defaults=None):
|
||||
if not hostname:
|
||||
hostname = settings.CLUSTER_HOST_ID
|
||||
|
||||
if not ip_address:
|
||||
ip_address = ""
|
||||
|
||||
with advisory_lock('instance_registration_%s' % hostname):
|
||||
if settings.AWX_AUTO_DEPROVISION_INSTANCES:
|
||||
# detect any instances with the same IP address.
|
||||
@@ -157,6 +160,9 @@ class InstanceManager(models.Manager):
|
||||
if instance.node_type != node_type:
|
||||
instance.node_type = node_type
|
||||
update_fields.append('node_type')
|
||||
if instance.listener_port != listener_port:
|
||||
instance.listener_port = listener_port
|
||||
update_fields.append('listener_port')
|
||||
if update_fields:
|
||||
instance.save(update_fields=update_fields)
|
||||
return (True, instance)
|
||||
@@ -167,12 +173,11 @@ class InstanceManager(models.Manager):
|
||||
create_defaults = {
|
||||
'node_state': Instance.States.INSTALLED,
|
||||
'capacity': 0,
|
||||
'listener_port': 27199,
|
||||
}
|
||||
if defaults is not None:
|
||||
create_defaults.update(defaults)
|
||||
uuid_option = {'uuid': node_uuid if node_uuid is not None else uuid.uuid4()}
|
||||
if node_type == 'execution' and 'version' not in create_defaults:
|
||||
create_defaults['version'] = RECEPTOR_PENDING
|
||||
instance = self.create(hostname=hostname, ip_address=ip_address, node_type=node_type, **create_defaults, **uuid_option)
|
||||
instance = self.create(hostname=hostname, ip_address=ip_address, listener_port=listener_port, node_type=node_type, **create_defaults, **uuid_option)
|
||||
return (True, instance)
|
||||
|
||||
75
awx/main/migrations/0187_hop_nodes.py
Normal file
75
awx/main/migrations/0187_hop_nodes.py
Normal file
@@ -0,0 +1,75 @@
|
||||
# Generated by Django 4.2.3 on 2023-08-04 20:50
|
||||
|
||||
import django.core.validators
|
||||
from django.db import migrations, models
|
||||
from django.conf import settings
|
||||
|
||||
|
||||
def automatically_peer_from_control_plane(apps, schema_editor):
|
||||
if settings.IS_K8S:
|
||||
Instance = apps.get_model('main', 'Instance')
|
||||
Instance.objects.filter(node_type='execution').update(peers_from_control_nodes=True)
|
||||
Instance.objects.filter(node_type='control').update(listener_port=None)
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
('main', '0186_drop_django_taggit'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterModelOptions(
|
||||
name='instancelink',
|
||||
options={'ordering': ('id',)},
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='instance',
|
||||
name='peers_from_control_nodes',
|
||||
field=models.BooleanField(default=False, help_text='If True, control plane cluster nodes should automatically peer to it.'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='instance',
|
||||
name='ip_address',
|
||||
field=models.CharField(blank=True, default='', max_length=50),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='instance',
|
||||
name='listener_port',
|
||||
field=models.PositiveIntegerField(
|
||||
blank=True,
|
||||
default=None,
|
||||
help_text='Port that Receptor will listen for incoming connections on.',
|
||||
null=True,
|
||||
validators=[django.core.validators.MinValueValidator(1024), django.core.validators.MaxValueValidator(65535)],
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='instance',
|
||||
name='peers',
|
||||
field=models.ManyToManyField(related_name='peers_from', through='main.InstanceLink', to='main.instance'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='instancelink',
|
||||
name='link_state',
|
||||
field=models.CharField(
|
||||
choices=[('adding', 'Adding'), ('established', 'Established'), ('removing', 'Removing')],
|
||||
default='adding',
|
||||
help_text='Indicates the current life cycle stage of this peer link.',
|
||||
max_length=16,
|
||||
),
|
||||
),
|
||||
migrations.AddConstraint(
|
||||
model_name='instance',
|
||||
constraint=models.UniqueConstraint(
|
||||
condition=models.Q(('ip_address', ''), _negated=True),
|
||||
fields=('ip_address',),
|
||||
name='unique_ip_address_not_empty',
|
||||
violation_error_message='Field ip_address must be unique.',
|
||||
),
|
||||
),
|
||||
migrations.AddConstraint(
|
||||
model_name='instancelink',
|
||||
constraint=models.CheckConstraint(check=models.Q(('source', models.F('target')), _negated=True), name='source_and_target_can_not_be_equal'),
|
||||
),
|
||||
migrations.RunPython(automatically_peer_from_control_plane),
|
||||
]
|
||||
@@ -17,6 +17,7 @@ from jinja2 import sandbox
|
||||
from django.db import models
|
||||
from django.utils.translation import gettext_lazy as _, gettext_noop
|
||||
from django.core.exceptions import ValidationError
|
||||
from django.conf import settings
|
||||
from django.utils.encoding import force_str
|
||||
from django.utils.functional import cached_property
|
||||
from django.utils.timezone import now
|
||||
@@ -30,7 +31,7 @@ from awx.main.fields import (
|
||||
CredentialTypeInjectorField,
|
||||
DynamicCredentialInputField,
|
||||
)
|
||||
from awx.main.utils import decrypt_field, classproperty
|
||||
from awx.main.utils import decrypt_field, classproperty, set_environ
|
||||
from awx.main.utils.safe_yaml import safe_dump
|
||||
from awx.main.utils.execution_environments import to_container_path
|
||||
from awx.main.validators import validate_ssh_private_key
|
||||
@@ -1252,7 +1253,9 @@ class CredentialInputSource(PrimordialModel):
|
||||
backend_kwargs[field_name] = value
|
||||
|
||||
backend_kwargs.update(self.metadata)
|
||||
return backend(**backend_kwargs)
|
||||
|
||||
with set_environ(**settings.AWX_TASK_ENV):
|
||||
return backend(**backend_kwargs)
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
view_name = 'api:credential_input_source_detail'
|
||||
|
||||
@@ -12,13 +12,14 @@ from django.dispatch import receiver
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from django.conf import settings
|
||||
from django.utils.timezone import now, timedelta
|
||||
from django.db.models import Sum
|
||||
from django.db.models import Sum, Q
|
||||
|
||||
import redis
|
||||
from solo.models import SingletonModel
|
||||
|
||||
# AWX
|
||||
from awx import __version__ as awx_application_version
|
||||
from awx.main.utils import is_testing
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.fields import ImplicitRoleField
|
||||
from awx.main.managers import InstanceManager, UUID_DEFAULT
|
||||
@@ -70,16 +71,33 @@ class InstanceLink(BaseModel):
|
||||
REMOVING = 'removing', _('Removing')
|
||||
|
||||
link_state = models.CharField(
|
||||
choices=States.choices, default=States.ESTABLISHED, max_length=16, help_text=_("Indicates the current life cycle stage of this peer link.")
|
||||
choices=States.choices, default=States.ADDING, max_length=16, help_text=_("Indicates the current life cycle stage of this peer link.")
|
||||
)
|
||||
|
||||
class Meta:
|
||||
unique_together = ('source', 'target')
|
||||
ordering = ("id",)
|
||||
constraints = [models.CheckConstraint(check=~models.Q(source=models.F('target')), name='source_and_target_can_not_be_equal')]
|
||||
|
||||
|
||||
class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
"""A model representing an AWX instance running against this database."""
|
||||
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
ordering = ("hostname",)
|
||||
constraints = [
|
||||
models.UniqueConstraint(
|
||||
fields=["ip_address"],
|
||||
condition=~Q(ip_address=""), # don't apply to constraint to empty entries
|
||||
name="unique_ip_address_not_empty",
|
||||
violation_error_message=_("Field ip_address must be unique."),
|
||||
)
|
||||
]
|
||||
|
||||
def __str__(self):
|
||||
return self.hostname
|
||||
|
||||
objects = InstanceManager()
|
||||
|
||||
# Fields set in instance registration
|
||||
@@ -87,10 +105,8 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
hostname = models.CharField(max_length=250, unique=True)
|
||||
ip_address = models.CharField(
|
||||
blank=True,
|
||||
null=True,
|
||||
default=None,
|
||||
default="",
|
||||
max_length=50,
|
||||
unique=True,
|
||||
)
|
||||
# Auto-fields, implementation is different from BaseModel
|
||||
created = models.DateTimeField(auto_now_add=True)
|
||||
@@ -169,16 +185,14 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
)
|
||||
listener_port = models.PositiveIntegerField(
|
||||
blank=True,
|
||||
default=27199,
|
||||
validators=[MinValueValidator(1), MaxValueValidator(65535)],
|
||||
null=True,
|
||||
default=None,
|
||||
validators=[MinValueValidator(1024), MaxValueValidator(65535)],
|
||||
help_text=_("Port that Receptor will listen for incoming connections on."),
|
||||
)
|
||||
|
||||
peers = models.ManyToManyField('self', symmetrical=False, through=InstanceLink, through_fields=('source', 'target'))
|
||||
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
ordering = ("hostname",)
|
||||
peers = models.ManyToManyField('self', symmetrical=False, through=InstanceLink, through_fields=('source', 'target'), related_name='peers_from')
|
||||
peers_from_control_nodes = models.BooleanField(default=False, help_text=_("If True, control plane cluster nodes should automatically peer to it."))
|
||||
|
||||
POLICY_FIELDS = frozenset(('managed_by_policy', 'hostname', 'capacity_adjustment'))
|
||||
|
||||
@@ -275,10 +289,14 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
if update_last_seen:
|
||||
update_fields += ['last_seen']
|
||||
if perform_save:
|
||||
self.save(update_fields=update_fields)
|
||||
from awx.main.signals import disable_activity_stream
|
||||
|
||||
with disable_activity_stream():
|
||||
self.save(update_fields=update_fields)
|
||||
return update_fields
|
||||
|
||||
def set_capacity_value(self):
|
||||
old_val = self.capacity
|
||||
"""Sets capacity according to capacity adjustment rule (no save)"""
|
||||
if self.enabled and self.node_type != 'hop':
|
||||
lower_cap = min(self.mem_capacity, self.cpu_capacity)
|
||||
@@ -286,6 +304,7 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
self.capacity = lower_cap + (higher_cap - lower_cap) * self.capacity_adjustment
|
||||
else:
|
||||
self.capacity = 0
|
||||
return int(self.capacity) != int(old_val) # return True if value changed
|
||||
|
||||
def refresh_capacity_fields(self):
|
||||
"""Update derived capacity fields from cpu and memory (no save)"""
|
||||
@@ -293,8 +312,8 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
self.cpu_capacity = 0
|
||||
self.mem_capacity = 0 # formula has a non-zero offset, so we make sure it is 0 for hop nodes
|
||||
else:
|
||||
self.cpu_capacity = get_cpu_effective_capacity(self.cpu)
|
||||
self.mem_capacity = get_mem_effective_capacity(self.memory)
|
||||
self.cpu_capacity = get_cpu_effective_capacity(self.cpu, is_control_node=bool(self.node_type in (Instance.Types.CONTROL, Instance.Types.HYBRID)))
|
||||
self.mem_capacity = get_mem_effective_capacity(self.memory, is_control_node=bool(self.node_type in (Instance.Types.CONTROL, Instance.Types.HYBRID)))
|
||||
self.set_capacity_value()
|
||||
|
||||
def save_health_data(self, version=None, cpu=0, memory=0, uuid=None, update_last_seen=False, errors=''):
|
||||
@@ -317,12 +336,17 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
self.version = version
|
||||
update_fields.append('version')
|
||||
|
||||
new_cpu = get_corrected_cpu(cpu)
|
||||
if self.node_type == Instance.Types.EXECUTION:
|
||||
new_cpu = cpu
|
||||
new_memory = memory
|
||||
else:
|
||||
new_cpu = get_corrected_cpu(cpu)
|
||||
new_memory = get_corrected_memory(memory)
|
||||
|
||||
if new_cpu != self.cpu:
|
||||
self.cpu = new_cpu
|
||||
update_fields.append('cpu')
|
||||
|
||||
new_memory = get_corrected_memory(memory)
|
||||
if new_memory != self.memory:
|
||||
self.memory = new_memory
|
||||
update_fields.append('memory')
|
||||
@@ -464,21 +488,50 @@ def on_instance_group_saved(sender, instance, created=False, raw=False, **kwargs
|
||||
instance.set_default_policy_fields()
|
||||
|
||||
|
||||
def schedule_write_receptor_config(broadcast=True):
|
||||
from awx.main.tasks.receptor import write_receptor_config # prevents circular import
|
||||
|
||||
# broadcast to all control instances to update their receptor configs
|
||||
if broadcast:
|
||||
connection.on_commit(lambda: write_receptor_config.apply_async(queue='tower_broadcast_all'))
|
||||
else:
|
||||
if not is_testing():
|
||||
write_receptor_config() # just run locally
|
||||
|
||||
|
||||
@receiver(post_save, sender=Instance)
|
||||
def on_instance_saved(sender, instance, created=False, raw=False, **kwargs):
|
||||
if settings.IS_K8S and instance.node_type in (Instance.Types.EXECUTION,):
|
||||
'''
|
||||
Here we link control nodes to hop or execution nodes based on the
|
||||
peers_from_control_nodes field.
|
||||
write_receptor_config should be called on each control node when:
|
||||
1. new node is created with peers_from_control_nodes enabled
|
||||
2. a node changes its value of peers_from_control_nodes
|
||||
3. a new control node comes online and has instances to peer to
|
||||
'''
|
||||
if created and settings.IS_K8S and instance.node_type in [Instance.Types.CONTROL, Instance.Types.HYBRID]:
|
||||
inst = Instance.objects.filter(peers_from_control_nodes=True)
|
||||
if set(instance.peers.all()) != set(inst):
|
||||
instance.peers.set(inst)
|
||||
schedule_write_receptor_config(broadcast=False)
|
||||
|
||||
if settings.IS_K8S and instance.node_type in [Instance.Types.HOP, Instance.Types.EXECUTION]:
|
||||
if instance.node_state == Instance.States.DEPROVISIONING:
|
||||
from awx.main.tasks.receptor import remove_deprovisioned_node # prevents circular import
|
||||
|
||||
# wait for jobs on the node to complete, then delete the
|
||||
# node and kick off write_receptor_config
|
||||
connection.on_commit(lambda: remove_deprovisioned_node.apply_async([instance.hostname]))
|
||||
|
||||
if instance.node_state == Instance.States.INSTALLED:
|
||||
from awx.main.tasks.receptor import write_receptor_config # prevents circular import
|
||||
|
||||
# broadcast to all control instances to update their receptor configs
|
||||
connection.on_commit(lambda: write_receptor_config.apply_async(queue='tower_broadcast_all'))
|
||||
else:
|
||||
control_instances = set(Instance.objects.filter(node_type__in=[Instance.Types.CONTROL, Instance.Types.HYBRID]))
|
||||
if instance.peers_from_control_nodes:
|
||||
if (control_instances & set(instance.peers_from.all())) != set(control_instances):
|
||||
instance.peers_from.add(*control_instances)
|
||||
schedule_write_receptor_config() # keep method separate to make pytest mocking easier
|
||||
else:
|
||||
if set(control_instances) & set(instance.peers_from.all()):
|
||||
instance.peers_from.remove(*control_instances)
|
||||
schedule_write_receptor_config()
|
||||
|
||||
if created or instance.has_policy_changes():
|
||||
schedule_policy_task()
|
||||
@@ -493,6 +546,8 @@ def on_instance_group_deleted(sender, instance, using, **kwargs):
|
||||
@receiver(post_delete, sender=Instance)
|
||||
def on_instance_deleted(sender, instance, using, **kwargs):
|
||||
schedule_policy_task()
|
||||
if settings.IS_K8S and instance.node_type in (Instance.Types.EXECUTION, Instance.Types.HOP) and instance.peers_from_control_nodes:
|
||||
schedule_write_receptor_config()
|
||||
|
||||
|
||||
class UnifiedJobTemplateInstanceGroupMembership(models.Model):
|
||||
|
||||
@@ -10,7 +10,6 @@ import copy
|
||||
import os.path
|
||||
from urllib.parse import urljoin
|
||||
|
||||
import dateutil.relativedelta
|
||||
import yaml
|
||||
|
||||
# Django
|
||||
@@ -890,23 +889,6 @@ class HostMetric(models.Model):
|
||||
self.deleted = False
|
||||
self.save(update_fields=['deleted'])
|
||||
|
||||
@classmethod
|
||||
def cleanup_task(cls, months_ago):
|
||||
try:
|
||||
months_ago = int(months_ago)
|
||||
if months_ago <= 0:
|
||||
raise ValueError()
|
||||
|
||||
last_automation_before = now() - dateutil.relativedelta.relativedelta(months=months_ago)
|
||||
|
||||
logger.info(f'cleanup_host_metrics: soft-deleting records last automated before {last_automation_before}')
|
||||
HostMetric.active_objects.filter(last_automation__lt=last_automation_before).update(
|
||||
deleted=True, deleted_counter=models.F('deleted_counter') + 1, last_deleted=now()
|
||||
)
|
||||
settings.CLEANUP_HOST_METRICS_LAST_TS = now()
|
||||
except (TypeError, ValueError):
|
||||
logger.error(f"cleanup_host_metrics: months_ago({months_ago}) has to be a positive integer value")
|
||||
|
||||
|
||||
class HostMetricSummaryMonthly(models.Model):
|
||||
"""
|
||||
|
||||
@@ -208,9 +208,10 @@ class RunnerCallback:
|
||||
# We opened a connection just for that save, close it here now
|
||||
connections.close_all()
|
||||
elif status_data['status'] == 'error':
|
||||
result_traceback = status_data.get('result_traceback', None)
|
||||
if result_traceback:
|
||||
self.delay_update(result_traceback=result_traceback)
|
||||
for field_name in ('result_traceback', 'job_explanation'):
|
||||
field_value = status_data.get(field_name, None)
|
||||
if field_value:
|
||||
self.delay_update(**{field_name: field_value})
|
||||
|
||||
def artifacts_handler(self, artifact_dir):
|
||||
self.artifacts_processed = True
|
||||
|
||||
10
awx/main/tasks/helpers.py
Normal file
10
awx/main/tasks/helpers.py
Normal file
@@ -0,0 +1,10 @@
|
||||
from django.utils.timezone import now
|
||||
from rest_framework.fields import DateTimeField
|
||||
|
||||
|
||||
def is_run_threshold_reached(setting, threshold_seconds):
|
||||
last_time = DateTimeField().to_internal_value(setting) if setting else None
|
||||
if not last_time:
|
||||
return True
|
||||
else:
|
||||
return (now() - last_time).total_seconds() > threshold_seconds
|
||||
@@ -3,33 +3,90 @@ from dateutil.relativedelta import relativedelta
|
||||
import logging
|
||||
|
||||
from django.conf import settings
|
||||
from django.db.models import Count
|
||||
from django.db.models import Count, F
|
||||
from django.db.models.functions import TruncMonth
|
||||
from django.utils.timezone import now
|
||||
from rest_framework.fields import DateTimeField
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.models.inventory import HostMetric, HostMetricSummaryMonthly
|
||||
from awx.main.tasks.helpers import is_run_threshold_reached
|
||||
from awx.conf.license import get_license
|
||||
|
||||
logger = logging.getLogger('awx.main.tasks.host_metric_summary_monthly')
|
||||
logger = logging.getLogger('awx.main.tasks.host_metrics')
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
def cleanup_host_metrics():
|
||||
if is_run_threshold_reached(getattr(settings, 'CLEANUP_HOST_METRICS_LAST_TS', None), getattr(settings, 'CLEANUP_HOST_METRICS_INTERVAL', 30) * 86400):
|
||||
logger.info(f"Executing cleanup_host_metrics, last ran at {getattr(settings, 'CLEANUP_HOST_METRICS_LAST_TS', '---')}")
|
||||
HostMetricTask().cleanup(
|
||||
soft_threshold=getattr(settings, 'CLEANUP_HOST_METRICS_SOFT_THRESHOLD', 12),
|
||||
hard_threshold=getattr(settings, 'CLEANUP_HOST_METRICS_HARD_THRESHOLD', 36),
|
||||
)
|
||||
logger.info("Finished cleanup_host_metrics")
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
def host_metric_summary_monthly():
|
||||
"""Run cleanup host metrics summary monthly task each week"""
|
||||
if _is_run_threshold_reached(
|
||||
getattr(settings, 'HOST_METRIC_SUMMARY_TASK_LAST_TS', None), getattr(settings, 'HOST_METRIC_SUMMARY_TASK_INTERVAL', 7) * 86400
|
||||
):
|
||||
if is_run_threshold_reached(getattr(settings, 'HOST_METRIC_SUMMARY_TASK_LAST_TS', None), getattr(settings, 'HOST_METRIC_SUMMARY_TASK_INTERVAL', 7) * 86400):
|
||||
logger.info(f"Executing host_metric_summary_monthly, last ran at {getattr(settings, 'HOST_METRIC_SUMMARY_TASK_LAST_TS', '---')}")
|
||||
HostMetricSummaryMonthlyTask().execute()
|
||||
logger.info("Finished host_metric_summary_monthly")
|
||||
|
||||
|
||||
def _is_run_threshold_reached(setting, threshold_seconds):
|
||||
last_time = DateTimeField().to_internal_value(setting) if setting else DateTimeField().to_internal_value('1970-01-01')
|
||||
class HostMetricTask:
|
||||
"""
|
||||
This class provides cleanup task for HostMetric model.
|
||||
There are two modes:
|
||||
- soft cleanup (updates columns delete, deleted_counter and last_deleted)
|
||||
- hard cleanup (deletes from the db)
|
||||
"""
|
||||
|
||||
return (now() - last_time).total_seconds() > threshold_seconds
|
||||
def cleanup(self, soft_threshold=None, hard_threshold=None):
|
||||
"""
|
||||
Main entrypoint, runs either soft cleanup, hard cleanup or both
|
||||
|
||||
:param soft_threshold: (int)
|
||||
:param hard_threshold: (int)
|
||||
"""
|
||||
if hard_threshold is not None:
|
||||
self.hard_cleanup(hard_threshold)
|
||||
if soft_threshold is not None:
|
||||
self.soft_cleanup(soft_threshold)
|
||||
|
||||
settings.CLEANUP_HOST_METRICS_LAST_TS = now()
|
||||
|
||||
@staticmethod
|
||||
def soft_cleanup(threshold=None):
|
||||
if threshold is None:
|
||||
threshold = getattr(settings, 'CLEANUP_HOST_METRICS_SOFT_THRESHOLD', 12)
|
||||
|
||||
try:
|
||||
threshold = int(threshold)
|
||||
except (ValueError, TypeError) as e:
|
||||
raise type(e)("soft_threshold has to be convertible to number") from e
|
||||
|
||||
last_automation_before = now() - relativedelta(months=threshold)
|
||||
rows = HostMetric.active_objects.filter(last_automation__lt=last_automation_before).update(
|
||||
deleted=True, deleted_counter=F('deleted_counter') + 1, last_deleted=now()
|
||||
)
|
||||
logger.info(f'cleanup_host_metrics: soft-deleted records last automated before {last_automation_before}, affected rows: {rows}')
|
||||
|
||||
@staticmethod
|
||||
def hard_cleanup(threshold=None):
|
||||
if threshold is None:
|
||||
threshold = getattr(settings, 'CLEANUP_HOST_METRICS_HARD_THRESHOLD', 36)
|
||||
|
||||
try:
|
||||
threshold = int(threshold)
|
||||
except (ValueError, TypeError) as e:
|
||||
raise type(e)("hard_threshold has to be convertible to number") from e
|
||||
|
||||
last_deleted_before = now() - relativedelta(months=threshold)
|
||||
queryset = HostMetric.objects.filter(deleted=True, last_deleted__lt=last_deleted_before)
|
||||
rows = queryset.delete()
|
||||
logger.info(f'cleanup_host_metrics: hard-deleted records which were soft deleted before {last_deleted_before}, affected rows: {rows[0]}')
|
||||
|
||||
|
||||
class HostMetricSummaryMonthlyTask:
|
||||
|
||||
@@ -30,6 +30,7 @@ from awx.main.tasks.signals import signal_state, signal_callback, SignalExit
|
||||
from awx.main.models import Instance, InstanceLink, UnifiedJob
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.utils.pglock import advisory_lock
|
||||
|
||||
# Receptorctl
|
||||
from receptorctl.socket_interface import ReceptorControl
|
||||
@@ -431,16 +432,16 @@ class AWXReceptorJob:
|
||||
# massive, only ask for last 1000 bytes
|
||||
startpos = max(stdout_size - 1000, 0)
|
||||
resultsock, resultfile = receptor_ctl.get_work_results(self.unit_id, startpos=startpos, return_socket=True, return_sockfile=True)
|
||||
resultsock.setblocking(False) # this makes resultfile reads non blocking
|
||||
lines = resultfile.readlines()
|
||||
receptor_output = b"".join(lines).decode()
|
||||
if receptor_output:
|
||||
self.task.runner_callback.delay_update(result_traceback=receptor_output)
|
||||
self.task.runner_callback.delay_update(result_traceback=f'Worker output:\n{receptor_output}')
|
||||
elif detail:
|
||||
self.task.runner_callback.delay_update(result_traceback=detail)
|
||||
self.task.runner_callback.delay_update(result_traceback=f'Receptor detail:\n{detail}')
|
||||
else:
|
||||
logger.warning(f'No result details or output from {self.task.instance.log_format}, status:\n{state_name}')
|
||||
except Exception:
|
||||
logger.exception(f'Work results error from job id={self.task.instance.id} work_unit={self.task.instance.work_unit_id}')
|
||||
raise RuntimeError(detail)
|
||||
|
||||
return res
|
||||
@@ -675,26 +676,41 @@ RECEPTOR_CONFIG_STARTER = (
|
||||
)
|
||||
|
||||
|
||||
@task()
|
||||
def write_receptor_config():
|
||||
lock = FileLock(__RECEPTOR_CONF_LOCKFILE)
|
||||
with lock:
|
||||
receptor_config = list(RECEPTOR_CONFIG_STARTER)
|
||||
def should_update_config(instances):
|
||||
'''
|
||||
checks that the list of instances matches the list of
|
||||
tcp-peers in the config
|
||||
'''
|
||||
current_config = read_receptor_config() # this gets receptor conf lock
|
||||
current_peers = []
|
||||
for config_entry in current_config:
|
||||
for key, value in config_entry.items():
|
||||
if key.endswith('-peer'):
|
||||
current_peers.append(value['address'])
|
||||
intended_peers = [f"{i.hostname}:{i.listener_port}" for i in instances]
|
||||
logger.debug(f"Peers current {current_peers} intended {intended_peers}")
|
||||
if set(current_peers) == set(intended_peers):
|
||||
return False # config file is already update to date
|
||||
|
||||
this_inst = Instance.objects.me()
|
||||
instances = Instance.objects.filter(node_type=Instance.Types.EXECUTION)
|
||||
existing_peers = {link.target_id for link in InstanceLink.objects.filter(source=this_inst)}
|
||||
new_links = []
|
||||
for instance in instances:
|
||||
peer = {'tcp-peer': {'address': f'{instance.hostname}:{instance.listener_port}', 'tls': 'tlsclient'}}
|
||||
receptor_config.append(peer)
|
||||
if instance.id not in existing_peers:
|
||||
new_links.append(InstanceLink(source=this_inst, target=instance, link_state=InstanceLink.States.ADDING))
|
||||
return True
|
||||
|
||||
InstanceLink.objects.bulk_create(new_links)
|
||||
|
||||
with open(__RECEPTOR_CONF, 'w') as file:
|
||||
yaml.dump(receptor_config, file, default_flow_style=False)
|
||||
def generate_config_data():
|
||||
# returns two values
|
||||
# receptor config - based on current database peers
|
||||
# should_update - If True, receptor_config differs from the receptor conf file on disk
|
||||
instances = Instance.objects.filter(node_type__in=(Instance.Types.EXECUTION, Instance.Types.HOP), peers_from_control_nodes=True)
|
||||
|
||||
receptor_config = list(RECEPTOR_CONFIG_STARTER)
|
||||
for instance in instances:
|
||||
peer = {'tcp-peer': {'address': f'{instance.hostname}:{instance.listener_port}', 'tls': 'tlsclient'}}
|
||||
receptor_config.append(peer)
|
||||
should_update = should_update_config(instances)
|
||||
return receptor_config, should_update
|
||||
|
||||
|
||||
def reload_receptor():
|
||||
logger.warning("Receptor config changed, reloading receptor")
|
||||
|
||||
# This needs to be outside of the lock because this function itself will acquire the lock.
|
||||
receptor_ctl = get_receptor_ctl()
|
||||
@@ -710,8 +726,29 @@ def write_receptor_config():
|
||||
else:
|
||||
raise RuntimeError("Receptor reload failed")
|
||||
|
||||
links = InstanceLink.objects.filter(source=this_inst, target__in=instances, link_state=InstanceLink.States.ADDING)
|
||||
links.update(link_state=InstanceLink.States.ESTABLISHED)
|
||||
|
||||
@task()
|
||||
def write_receptor_config():
|
||||
"""
|
||||
This task runs async on each control node, K8S only.
|
||||
It is triggered whenever remote is added or removed, or if peers_from_control_nodes
|
||||
is flipped.
|
||||
It is possible for write_receptor_config to be called multiple times.
|
||||
For example, if new instances are added in quick succession.
|
||||
To prevent that case, each control node first grabs a DB advisory lock, specific
|
||||
to just that control node (i.e. multiple control nodes can run this function
|
||||
at the same time, since it only writes the local receptor config file)
|
||||
"""
|
||||
with advisory_lock(f"{settings.CLUSTER_HOST_ID}_write_receptor_config", wait=True):
|
||||
# Config file needs to be updated
|
||||
receptor_config, should_update = generate_config_data()
|
||||
if should_update:
|
||||
lock = FileLock(__RECEPTOR_CONF_LOCKFILE)
|
||||
with lock:
|
||||
with open(__RECEPTOR_CONF, 'w') as file:
|
||||
yaml.dump(receptor_config, file, default_flow_style=False)
|
||||
|
||||
reload_receptor()
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@@ -731,6 +768,3 @@ def remove_deprovisioned_node(hostname):
|
||||
|
||||
# This will as a side effect also delete the InstanceLinks that are tied to it.
|
||||
Instance.objects.filter(hostname=hostname).delete()
|
||||
|
||||
# Update the receptor configs for all of the control-plane.
|
||||
write_receptor_config.apply_async(queue='tower_broadcast_all')
|
||||
|
||||
@@ -48,7 +48,6 @@ from awx.main.models import (
|
||||
Inventory,
|
||||
SmartInventoryMembership,
|
||||
Job,
|
||||
HostMetric,
|
||||
convert_jsonfields,
|
||||
)
|
||||
from awx.main.constants import ACTIVE_STATES
|
||||
@@ -64,6 +63,7 @@ from awx.main.utils.common import (
|
||||
|
||||
from awx.main.utils.reload import stop_local_services
|
||||
from awx.main.utils.pglock import advisory_lock
|
||||
from awx.main.tasks.helpers import is_run_threshold_reached
|
||||
from awx.main.tasks.receptor import get_receptor_ctl, worker_info, worker_cleanup, administrative_workunit_reaper, write_receptor_config
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
from awx.main import analytics
|
||||
@@ -368,9 +368,7 @@ def send_notifications(notification_list, job_id=None):
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
def gather_analytics():
|
||||
from awx.conf.models import Setting
|
||||
|
||||
if is_run_threshold_reached(Setting.objects.filter(key='AUTOMATION_ANALYTICS_LAST_GATHER').first(), settings.AUTOMATION_ANALYTICS_GATHER_INTERVAL):
|
||||
if is_run_threshold_reached(getattr(settings, 'AUTOMATION_ANALYTICS_LAST_GATHER', None), settings.AUTOMATION_ANALYTICS_GATHER_INTERVAL):
|
||||
analytics.gather()
|
||||
|
||||
|
||||
@@ -427,29 +425,6 @@ def cleanup_images_and_files():
|
||||
_cleanup_images_and_files()
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
def cleanup_host_metrics():
|
||||
"""Run cleanup host metrics ~each month"""
|
||||
# TODO: move whole method to host_metrics in follow-up PR
|
||||
from awx.conf.models import Setting
|
||||
|
||||
if is_run_threshold_reached(
|
||||
Setting.objects.filter(key='CLEANUP_HOST_METRICS_LAST_TS').first(), getattr(settings, 'CLEANUP_HOST_METRICS_INTERVAL', 30) * 86400
|
||||
):
|
||||
months_ago = getattr(settings, 'CLEANUP_HOST_METRICS_SOFT_THRESHOLD', 12)
|
||||
logger.info("Executing cleanup_host_metrics")
|
||||
HostMetric.cleanup_task(months_ago)
|
||||
logger.info("Finished cleanup_host_metrics")
|
||||
|
||||
|
||||
def is_run_threshold_reached(setting, threshold_seconds):
|
||||
from rest_framework.fields import DateTimeField
|
||||
|
||||
last_time = DateTimeField().to_internal_value(setting.value) if setting and setting.value else DateTimeField().to_internal_value('1970-01-01')
|
||||
|
||||
return (now() - last_time).total_seconds() > threshold_seconds
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
def cluster_node_health_check(node):
|
||||
"""
|
||||
@@ -491,7 +466,6 @@ def execution_node_health_check(node):
|
||||
data = worker_info(node)
|
||||
|
||||
prior_capacity = instance.capacity
|
||||
|
||||
instance.save_health_data(
|
||||
version='ansible-runner-' + data.get('runner_version', '???'),
|
||||
cpu=data.get('cpu_count', 0),
|
||||
@@ -512,13 +486,37 @@ def execution_node_health_check(node):
|
||||
return data
|
||||
|
||||
|
||||
def inspect_execution_nodes(instance_list):
|
||||
with advisory_lock('inspect_execution_nodes_lock', wait=False):
|
||||
node_lookup = {inst.hostname: inst for inst in instance_list}
|
||||
def inspect_established_receptor_connections(mesh_status):
|
||||
'''
|
||||
Flips link state from ADDING to ESTABLISHED
|
||||
If the InstanceLink source and target match the entries
|
||||
in Known Connection Costs, flip to Established.
|
||||
'''
|
||||
from awx.main.models import InstanceLink
|
||||
|
||||
all_links = InstanceLink.objects.filter(link_state=InstanceLink.States.ADDING)
|
||||
if not all_links.exists():
|
||||
return
|
||||
active_receptor_conns = mesh_status['KnownConnectionCosts']
|
||||
update_links = []
|
||||
for link in all_links:
|
||||
if link.link_state != InstanceLink.States.REMOVING:
|
||||
if link.target.hostname in active_receptor_conns.get(link.source.hostname, {}):
|
||||
if link.link_state is not InstanceLink.States.ESTABLISHED:
|
||||
link.link_state = InstanceLink.States.ESTABLISHED
|
||||
update_links.append(link)
|
||||
|
||||
InstanceLink.objects.bulk_update(update_links, ['link_state'])
|
||||
|
||||
|
||||
def inspect_execution_and_hop_nodes(instance_list):
|
||||
with advisory_lock('inspect_execution_and_hop_nodes_lock', wait=False):
|
||||
node_lookup = {inst.hostname: inst for inst in instance_list}
|
||||
ctl = get_receptor_ctl()
|
||||
mesh_status = ctl.simple_command('status')
|
||||
|
||||
inspect_established_receptor_connections(mesh_status)
|
||||
|
||||
nowtime = now()
|
||||
workers = mesh_status['Advertisements']
|
||||
|
||||
@@ -576,7 +574,7 @@ def cluster_node_heartbeat(dispatch_time=None, worker_tasks=None):
|
||||
this_inst = inst
|
||||
break
|
||||
|
||||
inspect_execution_nodes(instance_list)
|
||||
inspect_execution_and_hop_nodes(instance_list)
|
||||
|
||||
for inst in list(instance_list):
|
||||
if inst == this_inst:
|
||||
@@ -765,7 +763,6 @@ def awx_periodic_scheduler():
|
||||
new_unified_job.save(update_fields=['status', 'job_explanation'])
|
||||
new_unified_job.websocket_emit_status("failed")
|
||||
emit_channel_notification('schedules-changed', dict(id=schedule.id, group_name="schedules"))
|
||||
state.save()
|
||||
|
||||
|
||||
def schedule_manager_success_or_error(instance):
|
||||
|
||||
@@ -84,5 +84,6 @@ def test_custom_hostname_regex(post, admin_user):
|
||||
"hostname": value[0],
|
||||
"node_type": "execution",
|
||||
"node_state": "installed",
|
||||
"peers": [],
|
||||
}
|
||||
post(url=url, user=admin_user, data=data, expect=value[1])
|
||||
|
||||
342
awx/main/tests/functional/api/test_instance_peers.py
Normal file
342
awx/main/tests/functional/api/test_instance_peers.py
Normal file
@@ -0,0 +1,342 @@
|
||||
import pytest
|
||||
import yaml
|
||||
import itertools
|
||||
from unittest import mock
|
||||
|
||||
from django.db.utils import IntegrityError
|
||||
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.models import Instance
|
||||
from awx.api.views.instance_install_bundle import generate_group_vars_all_yml
|
||||
|
||||
|
||||
def has_peer(group_vars, peer):
|
||||
peers = group_vars.get('receptor_peers', [])
|
||||
for p in peers:
|
||||
if f"{p['host']}:{p['port']}" == peer:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestPeers:
|
||||
@pytest.fixture(autouse=True)
|
||||
def configure_settings(self, settings):
|
||||
settings.IS_K8S = True
|
||||
|
||||
@pytest.mark.parametrize('node_type', ['control', 'hybrid'])
|
||||
def test_prevent_peering_to_self(self, node_type):
|
||||
"""
|
||||
cannot peer to self
|
||||
"""
|
||||
control_instance = Instance.objects.create(hostname='abc', node_type=node_type)
|
||||
with pytest.raises(IntegrityError):
|
||||
control_instance.peers.add(control_instance)
|
||||
|
||||
@pytest.mark.parametrize('node_type', ['control', 'hybrid', 'hop', 'execution'])
|
||||
def test_creating_node(self, node_type, admin_user, post):
|
||||
"""
|
||||
can only add hop and execution nodes via API
|
||||
"""
|
||||
post(
|
||||
url=reverse('api:instance_list'),
|
||||
data={"hostname": "abc", "node_type": node_type},
|
||||
user=admin_user,
|
||||
expect=400 if node_type in ['control', 'hybrid'] else 201,
|
||||
)
|
||||
|
||||
def test_changing_node_type(self, admin_user, patch):
|
||||
"""
|
||||
cannot change node type
|
||||
"""
|
||||
hop = Instance.objects.create(hostname='abc', node_type="hop")
|
||||
patch(
|
||||
url=reverse('api:instance_detail', kwargs={'pk': hop.pk}),
|
||||
data={"node_type": "execution"},
|
||||
user=admin_user,
|
||||
expect=400,
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize('node_type', ['hop', 'execution'])
|
||||
def test_listener_port_null(self, node_type, admin_user, post):
|
||||
"""
|
||||
listener_port can be None
|
||||
"""
|
||||
post(
|
||||
url=reverse('api:instance_list'),
|
||||
data={"hostname": "abc", "node_type": node_type, "listener_port": None},
|
||||
user=admin_user,
|
||||
expect=201,
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize('node_type, allowed', [('control', False), ('hybrid', False), ('hop', True), ('execution', True)])
|
||||
def test_peers_from_control_nodes_allowed(self, node_type, allowed, post, admin_user):
|
||||
"""
|
||||
only hop and execution nodes can have peers_from_control_nodes set to True
|
||||
"""
|
||||
post(
|
||||
url=reverse('api:instance_list'),
|
||||
data={"hostname": "abc", "peers_from_control_nodes": True, "node_type": node_type, "listener_port": 6789},
|
||||
user=admin_user,
|
||||
expect=201 if allowed else 400,
|
||||
)
|
||||
|
||||
def test_listener_port_is_required(self, admin_user, post):
|
||||
"""
|
||||
if adding instance to peers list, that instance must have listener_port set
|
||||
"""
|
||||
Instance.objects.create(hostname='abc', node_type="hop", listener_port=None)
|
||||
post(
|
||||
url=reverse('api:instance_list'),
|
||||
data={"hostname": "ex", "peers_from_control_nodes": False, "node_type": "execution", "listener_port": None, "peers": ["abc"]},
|
||||
user=admin_user,
|
||||
expect=400,
|
||||
)
|
||||
|
||||
def test_peers_from_control_nodes_listener_port_enabled(self, admin_user, post):
|
||||
"""
|
||||
if peers_from_control_nodes is True, listener_port must an integer
|
||||
Assert that all other combinations are allowed
|
||||
"""
|
||||
for index, item in enumerate(itertools.product(['hop', 'execution'], [True, False], [None, 6789])):
|
||||
node_type, peers_from, listener_port = item
|
||||
# only disallowed case is when peers_from is True and listener port is None
|
||||
disallowed = peers_from and not listener_port
|
||||
post(
|
||||
url=reverse('api:instance_list'),
|
||||
data={"hostname": f"abc{index}", "peers_from_control_nodes": peers_from, "node_type": node_type, "listener_port": listener_port},
|
||||
user=admin_user,
|
||||
expect=400 if disallowed else 201,
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize('node_type', ['control', 'hybrid'])
|
||||
def test_disallow_modifying_peers_control_nodes(self, node_type, admin_user, patch):
|
||||
"""
|
||||
for control nodes, peers field should not be
|
||||
modified directly via patch.
|
||||
"""
|
||||
control = Instance.objects.create(hostname='abc', node_type=node_type)
|
||||
hop1 = Instance.objects.create(hostname='hop1', node_type='hop', peers_from_control_nodes=True, listener_port=6789)
|
||||
hop2 = Instance.objects.create(hostname='hop2', node_type='hop', peers_from_control_nodes=False, listener_port=6789)
|
||||
assert [hop1] == list(control.peers.all()) # only hop1 should be peered
|
||||
patch(
|
||||
url=reverse('api:instance_detail', kwargs={'pk': control.pk}),
|
||||
data={"peers": ["hop2"]},
|
||||
user=admin_user,
|
||||
expect=400, # cannot add peers directly
|
||||
)
|
||||
patch(
|
||||
url=reverse('api:instance_detail', kwargs={'pk': control.pk}),
|
||||
data={"peers": ["hop1"]},
|
||||
user=admin_user,
|
||||
expect=200, # patching with current peers list should be okay
|
||||
)
|
||||
patch(
|
||||
url=reverse('api:instance_detail', kwargs={'pk': control.pk}),
|
||||
data={"peers": []},
|
||||
user=admin_user,
|
||||
expect=400, # cannot remove peers directly
|
||||
)
|
||||
patch(
|
||||
url=reverse('api:instance_detail', kwargs={'pk': control.pk}),
|
||||
data={},
|
||||
user=admin_user,
|
||||
expect=200, # patching without data should be fine too
|
||||
)
|
||||
# patch hop2
|
||||
patch(
|
||||
url=reverse('api:instance_detail', kwargs={'pk': hop2.pk}),
|
||||
data={"peers_from_control_nodes": True},
|
||||
user=admin_user,
|
||||
expect=200, # patching without data should be fine too
|
||||
)
|
||||
assert {hop1, hop2} == set(control.peers.all()) # hop1 and hop2 should now be peered from control node
|
||||
|
||||
def test_disallow_changing_hostname(self, admin_user, patch):
|
||||
"""
|
||||
cannot change hostname
|
||||
"""
|
||||
hop = Instance.objects.create(hostname='hop', node_type='hop')
|
||||
patch(
|
||||
url=reverse('api:instance_detail', kwargs={'pk': hop.pk}),
|
||||
data={"hostname": "hop2"},
|
||||
user=admin_user,
|
||||
expect=400,
|
||||
)
|
||||
|
||||
def test_disallow_changing_node_state(self, admin_user, patch):
|
||||
"""
|
||||
only allow setting to deprovisioning
|
||||
"""
|
||||
hop = Instance.objects.create(hostname='hop', node_type='hop', node_state='installed')
|
||||
patch(
|
||||
url=reverse('api:instance_detail', kwargs={'pk': hop.pk}),
|
||||
data={"node_state": "deprovisioning"},
|
||||
user=admin_user,
|
||||
expect=200,
|
||||
)
|
||||
patch(
|
||||
url=reverse('api:instance_detail', kwargs={'pk': hop.pk}),
|
||||
data={"node_state": "ready"},
|
||||
user=admin_user,
|
||||
expect=400,
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize('node_type', ['control', 'hybrid'])
|
||||
def test_control_node_automatically_peers(self, node_type):
|
||||
"""
|
||||
a new control node should automatically
|
||||
peer to hop
|
||||
|
||||
peer to hop should be removed if hop is deleted
|
||||
"""
|
||||
|
||||
hop = Instance.objects.create(hostname='hop', node_type='hop', peers_from_control_nodes=True, listener_port=6789)
|
||||
control = Instance.objects.create(hostname='abc', node_type=node_type)
|
||||
assert hop in control.peers.all()
|
||||
hop.delete()
|
||||
assert not control.peers.exists()
|
||||
|
||||
@pytest.mark.parametrize('node_type', ['control', 'hybrid'])
|
||||
def test_control_node_retains_other_peers(self, node_type):
|
||||
"""
|
||||
if a new node comes online, other peer relationships should
|
||||
remain intact
|
||||
"""
|
||||
hop1 = Instance.objects.create(hostname='hop1', node_type='hop', listener_port=6789, peers_from_control_nodes=True)
|
||||
hop2 = Instance.objects.create(hostname='hop2', node_type='hop', listener_port=6789, peers_from_control_nodes=False)
|
||||
hop1.peers.add(hop2)
|
||||
|
||||
# a control node is added
|
||||
Instance.objects.create(hostname='control', node_type=node_type, listener_port=None)
|
||||
|
||||
assert hop1.peers.exists()
|
||||
|
||||
def test_group_vars(self, get, admin_user):
|
||||
"""
|
||||
control > hop1 > hop2 < execution
|
||||
"""
|
||||
control = Instance.objects.create(hostname='control', node_type='control', listener_port=None)
|
||||
hop1 = Instance.objects.create(hostname='hop1', node_type='hop', listener_port=6789, peers_from_control_nodes=True)
|
||||
hop2 = Instance.objects.create(hostname='hop2', node_type='hop', listener_port=6789, peers_from_control_nodes=False)
|
||||
execution = Instance.objects.create(hostname='execution', node_type='execution', listener_port=6789)
|
||||
|
||||
execution.peers.add(hop2)
|
||||
hop1.peers.add(hop2)
|
||||
|
||||
control_vars = yaml.safe_load(generate_group_vars_all_yml(control))
|
||||
hop1_vars = yaml.safe_load(generate_group_vars_all_yml(hop1))
|
||||
hop2_vars = yaml.safe_load(generate_group_vars_all_yml(hop2))
|
||||
execution_vars = yaml.safe_load(generate_group_vars_all_yml(execution))
|
||||
|
||||
# control group vars assertions
|
||||
assert has_peer(control_vars, 'hop1:6789')
|
||||
assert not has_peer(control_vars, 'hop2:6789')
|
||||
assert not has_peer(control_vars, 'execution:6789')
|
||||
assert not control_vars.get('receptor_listener', False)
|
||||
|
||||
# hop1 group vars assertions
|
||||
assert has_peer(hop1_vars, 'hop2:6789')
|
||||
assert not has_peer(hop1_vars, 'execution:6789')
|
||||
assert hop1_vars.get('receptor_listener', False)
|
||||
|
||||
# hop2 group vars assertions
|
||||
assert not has_peer(hop2_vars, 'hop1:6789')
|
||||
assert not has_peer(hop2_vars, 'execution:6789')
|
||||
assert hop2_vars.get('receptor_listener', False)
|
||||
assert hop2_vars.get('receptor_peers', []) == []
|
||||
|
||||
# execution group vars assertions
|
||||
assert has_peer(execution_vars, 'hop2:6789')
|
||||
assert not has_peer(execution_vars, 'hop1:6789')
|
||||
assert execution_vars.get('receptor_listener', False)
|
||||
|
||||
def test_write_receptor_config_called(self):
|
||||
"""
|
||||
Assert that write_receptor_config is called
|
||||
when certain instances are created, or if
|
||||
peers_from_control_nodes changes.
|
||||
In general, write_receptor_config should only
|
||||
be called when necessary, as it will reload
|
||||
receptor backend connections which is not trivial.
|
||||
"""
|
||||
with mock.patch('awx.main.models.ha.schedule_write_receptor_config') as write_method:
|
||||
# new control instance but nothing to peer to (no)
|
||||
control = Instance.objects.create(hostname='control1', node_type='control')
|
||||
write_method.assert_not_called()
|
||||
|
||||
# new hop node with peers_from_control_nodes False (no)
|
||||
hop1 = Instance.objects.create(hostname='hop1', node_type='hop', listener_port=6789, peers_from_control_nodes=False)
|
||||
hop1.delete()
|
||||
write_method.assert_not_called()
|
||||
|
||||
# new hop node with peers_from_control_nodes True (yes)
|
||||
hop1 = Instance.objects.create(hostname='hop1', node_type='hop', listener_port=6789, peers_from_control_nodes=True)
|
||||
write_method.assert_called()
|
||||
write_method.reset_mock()
|
||||
|
||||
# new control instance but with something to peer to (yes)
|
||||
Instance.objects.create(hostname='control2', node_type='control')
|
||||
write_method.assert_called()
|
||||
write_method.reset_mock()
|
||||
|
||||
# new hop node with peers_from_control_nodes False and peered to another hop node (no)
|
||||
hop2 = Instance.objects.create(hostname='hop2', node_type='hop', listener_port=6789, peers_from_control_nodes=False)
|
||||
hop2.peers.add(hop1)
|
||||
hop2.delete()
|
||||
write_method.assert_not_called()
|
||||
|
||||
# changing peers_from_control_nodes to False (yes)
|
||||
hop1.peers_from_control_nodes = False
|
||||
hop1.save()
|
||||
write_method.assert_called()
|
||||
write_method.reset_mock()
|
||||
|
||||
# deleting hop node that has peers_from_control_nodes to False (no)
|
||||
hop1.delete()
|
||||
write_method.assert_not_called()
|
||||
|
||||
# deleting control nodes (no)
|
||||
control.delete()
|
||||
write_method.assert_not_called()
|
||||
|
||||
def test_write_receptor_config_data(self):
|
||||
"""
|
||||
Assert the correct peers are included in data that will
|
||||
be written to receptor.conf
|
||||
"""
|
||||
from awx.main.tasks.receptor import RECEPTOR_CONFIG_STARTER
|
||||
|
||||
with mock.patch('awx.main.tasks.receptor.read_receptor_config', return_value=list(RECEPTOR_CONFIG_STARTER)):
|
||||
from awx.main.tasks.receptor import generate_config_data
|
||||
|
||||
_, should_update = generate_config_data()
|
||||
assert not should_update
|
||||
|
||||
# not peered, so config file should not be updated
|
||||
for i in range(3):
|
||||
Instance.objects.create(hostname=f"exNo-{i}", node_type='execution', listener_port=6789, peers_from_control_nodes=False)
|
||||
|
||||
_, should_update = generate_config_data()
|
||||
assert not should_update
|
||||
|
||||
# peered, so config file should be updated
|
||||
expected_peers = []
|
||||
for i in range(3):
|
||||
expected_peers.append(f"hop-{i}:6789")
|
||||
Instance.objects.create(hostname=f"hop-{i}", node_type='hop', listener_port=6789, peers_from_control_nodes=True)
|
||||
|
||||
for i in range(3):
|
||||
expected_peers.append(f"exYes-{i}:6789")
|
||||
Instance.objects.create(hostname=f"exYes-{i}", node_type='execution', listener_port=6789, peers_from_control_nodes=True)
|
||||
|
||||
new_config, should_update = generate_config_data()
|
||||
assert should_update
|
||||
|
||||
peers = []
|
||||
for entry in new_config:
|
||||
for key, value in entry.items():
|
||||
if key == "tcp-peer":
|
||||
peers.append(value['address'])
|
||||
|
||||
assert set(expected_peers) == set(peers)
|
||||
@@ -0,0 +1,78 @@
|
||||
import pytest
|
||||
|
||||
from awx.main.tasks.host_metrics import HostMetricTask
|
||||
from awx.main.models.inventory import HostMetric
|
||||
from awx.main.tests.factories.fixtures import mk_host_metric
|
||||
from dateutil.relativedelta import relativedelta
|
||||
from django.conf import settings
|
||||
from django.utils import timezone
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_no_host_metrics():
|
||||
"""No-crash test"""
|
||||
assert HostMetric.objects.count() == 0
|
||||
HostMetricTask().cleanup(soft_threshold=0, hard_threshold=0)
|
||||
HostMetricTask().cleanup(soft_threshold=24, hard_threshold=42)
|
||||
assert HostMetric.objects.count() == 0
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_delete_exception():
|
||||
"""Crash test"""
|
||||
with pytest.raises(ValueError):
|
||||
HostMetricTask().soft_cleanup("")
|
||||
with pytest.raises(TypeError):
|
||||
HostMetricTask().hard_cleanup(set())
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('threshold', [settings.CLEANUP_HOST_METRICS_SOFT_THRESHOLD, 20])
|
||||
def test_soft_delete(threshold):
|
||||
"""Metrics with last_automation < threshold are updated to deleted=True"""
|
||||
mk_host_metric('host_1', first_automation=ago(months=1), last_automation=ago(months=1), deleted=False)
|
||||
mk_host_metric('host_2', first_automation=ago(months=1), last_automation=ago(months=1), deleted=True)
|
||||
mk_host_metric('host_3', first_automation=ago(months=1), last_automation=ago(months=threshold, hours=-1), deleted=False)
|
||||
mk_host_metric('host_4', first_automation=ago(months=1), last_automation=ago(months=threshold, hours=-1), deleted=True)
|
||||
mk_host_metric('host_5', first_automation=ago(months=1), last_automation=ago(months=threshold, hours=1), deleted=False)
|
||||
mk_host_metric('host_6', first_automation=ago(months=1), last_automation=ago(months=threshold, hours=1), deleted=True)
|
||||
mk_host_metric('host_7', first_automation=ago(months=1), last_automation=ago(months=42), deleted=False)
|
||||
mk_host_metric('host_8', first_automation=ago(months=1), last_automation=ago(months=42), deleted=True)
|
||||
|
||||
assert HostMetric.objects.count() == 8
|
||||
assert HostMetric.active_objects.count() == 4
|
||||
|
||||
for i in range(2):
|
||||
HostMetricTask().cleanup(soft_threshold=threshold)
|
||||
assert HostMetric.objects.count() == 8
|
||||
|
||||
hostnames = set(HostMetric.objects.filter(deleted=False).order_by('hostname').values_list('hostname', flat=True))
|
||||
assert hostnames == {'host_1', 'host_3'}
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('threshold', [settings.CLEANUP_HOST_METRICS_HARD_THRESHOLD, 20])
|
||||
def test_hard_delete(threshold):
|
||||
"""Metrics with last_deleted < threshold and deleted=True are deleted from the db"""
|
||||
mk_host_metric('host_1', first_automation=ago(months=1), last_deleted=ago(months=1), deleted=False)
|
||||
mk_host_metric('host_2', first_automation=ago(months=1), last_deleted=ago(months=1), deleted=True)
|
||||
mk_host_metric('host_3', first_automation=ago(months=1), last_deleted=ago(months=threshold, hours=-1), deleted=False)
|
||||
mk_host_metric('host_4', first_automation=ago(months=1), last_deleted=ago(months=threshold, hours=-1), deleted=True)
|
||||
mk_host_metric('host_5', first_automation=ago(months=1), last_deleted=ago(months=threshold, hours=1), deleted=False)
|
||||
mk_host_metric('host_6', first_automation=ago(months=1), last_deleted=ago(months=threshold, hours=1), deleted=True)
|
||||
mk_host_metric('host_7', first_automation=ago(months=1), last_deleted=ago(months=42), deleted=False)
|
||||
mk_host_metric('host_8', first_automation=ago(months=1), last_deleted=ago(months=42), deleted=True)
|
||||
|
||||
assert HostMetric.objects.count() == 8
|
||||
assert HostMetric.active_objects.count() == 4
|
||||
|
||||
for i in range(2):
|
||||
HostMetricTask().cleanup(hard_threshold=threshold)
|
||||
assert HostMetric.objects.count() == 6
|
||||
|
||||
hostnames = set(HostMetric.objects.order_by('hostname').values_list('hostname', flat=True))
|
||||
assert hostnames == {'host_1', 'host_2', 'host_3', 'host_4', 'host_5', 'host_7'}
|
||||
|
||||
|
||||
def ago(months=0, hours=0):
|
||||
return timezone.now() - relativedelta(months=months, hours=hours)
|
||||
@@ -37,9 +37,9 @@ def test_orphan_unified_job_creation(instance, inventory):
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@mock.patch('awx.main.tasks.system.inspect_execution_nodes', lambda *args, **kwargs: None)
|
||||
@mock.patch('awx.main.models.ha.get_cpu_effective_capacity', lambda cpu: 8)
|
||||
@mock.patch('awx.main.models.ha.get_mem_effective_capacity', lambda mem: 62)
|
||||
@mock.patch('awx.main.tasks.system.inspect_execution_and_hop_nodes', lambda *args, **kwargs: None)
|
||||
@mock.patch('awx.main.models.ha.get_cpu_effective_capacity', lambda cpu, is_control_node: 8)
|
||||
@mock.patch('awx.main.models.ha.get_mem_effective_capacity', lambda mem, is_control_node: 62)
|
||||
def test_job_capacity_and_with_inactive_node():
|
||||
i = Instance.objects.create(hostname='test-1')
|
||||
i.save_health_data('18.0.1', 2, 8000)
|
||||
|
||||
@@ -36,7 +36,9 @@ def test_SYSTEM_TASK_ABS_MEM_conversion(value, converted_value, mem_capacity):
|
||||
mock_settings.IS_K8S = True
|
||||
assert convert_mem_str_to_bytes(value) == converted_value
|
||||
assert get_corrected_memory(-1) == converted_value
|
||||
assert get_mem_effective_capacity(-1) == mem_capacity
|
||||
assert get_mem_effective_capacity(1, is_control_node=True) == mem_capacity
|
||||
# SYSTEM_TASK_ABS_MEM should not effect memory and capacity for execution nodes
|
||||
assert get_mem_effective_capacity(2147483648, is_control_node=False) == 20
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@@ -58,4 +60,6 @@ def test_SYSTEM_TASK_ABS_CPU_conversion(value, converted_value, cpu_capacity):
|
||||
mock_settings.SYSTEM_TASK_FORKS_CPU = 4
|
||||
assert convert_cpu_str_to_decimal_cpu(value) == converted_value
|
||||
assert get_corrected_cpu(-1) == converted_value
|
||||
assert get_cpu_effective_capacity(-1) == cpu_capacity
|
||||
assert get_cpu_effective_capacity(-1, is_control_node=True) == cpu_capacity
|
||||
# SYSTEM_TASK_ABS_CPU should not effect cpu count and capacity for execution nodes
|
||||
assert get_cpu_effective_capacity(2.0, is_control_node=False) == 8
|
||||
|
||||
@@ -768,14 +768,13 @@ def get_corrected_cpu(cpu_count): # formerlly get_cpu_capacity
|
||||
return cpu_count # no correction
|
||||
|
||||
|
||||
def get_cpu_effective_capacity(cpu_count):
|
||||
def get_cpu_effective_capacity(cpu_count, is_control_node=False):
|
||||
from django.conf import settings
|
||||
|
||||
cpu_count = get_corrected_cpu(cpu_count)
|
||||
|
||||
settings_forkcpu = getattr(settings, 'SYSTEM_TASK_FORKS_CPU', None)
|
||||
env_forkcpu = os.getenv('SYSTEM_TASK_FORKS_CPU', None)
|
||||
|
||||
if is_control_node:
|
||||
cpu_count = get_corrected_cpu(cpu_count)
|
||||
if env_forkcpu:
|
||||
forkcpu = int(env_forkcpu)
|
||||
elif settings_forkcpu:
|
||||
@@ -834,6 +833,7 @@ def get_corrected_memory(memory):
|
||||
|
||||
# Runner returns memory in bytes
|
||||
# so we convert memory from settings to bytes as well.
|
||||
|
||||
if env_absmem is not None:
|
||||
return convert_mem_str_to_bytes(env_absmem)
|
||||
elif settings_absmem is not None:
|
||||
@@ -842,14 +842,13 @@ def get_corrected_memory(memory):
|
||||
return memory
|
||||
|
||||
|
||||
def get_mem_effective_capacity(mem_bytes):
|
||||
def get_mem_effective_capacity(mem_bytes, is_control_node=False):
|
||||
from django.conf import settings
|
||||
|
||||
mem_bytes = get_corrected_memory(mem_bytes)
|
||||
|
||||
settings_mem_mb_per_fork = getattr(settings, 'SYSTEM_TASK_FORKS_MEM', None)
|
||||
env_mem_mb_per_fork = os.getenv('SYSTEM_TASK_FORKS_MEM', None)
|
||||
|
||||
if is_control_node:
|
||||
mem_bytes = get_corrected_memory(mem_bytes)
|
||||
if env_mem_mb_per_fork:
|
||||
mem_mb_per_fork = int(env_mem_mb_per_fork)
|
||||
elif settings_mem_mb_per_fork:
|
||||
|
||||
@@ -470,13 +470,13 @@ CELERYBEAT_SCHEDULE = {
|
||||
'receptor_reaper': {'task': 'awx.main.tasks.system.awx_receptor_workunit_reaper', 'schedule': timedelta(seconds=60)},
|
||||
'send_subsystem_metrics': {'task': 'awx.main.analytics.analytics_tasks.send_subsystem_metrics', 'schedule': timedelta(seconds=20)},
|
||||
'cleanup_images': {'task': 'awx.main.tasks.system.cleanup_images_and_files', 'schedule': timedelta(hours=3)},
|
||||
'cleanup_host_metrics': {'task': 'awx.main.tasks.system.cleanup_host_metrics', 'schedule': timedelta(hours=3, minutes=30)},
|
||||
'cleanup_host_metrics': {'task': 'awx.main.tasks.host_metrics.cleanup_host_metrics', 'schedule': timedelta(hours=3, minutes=30)},
|
||||
'host_metric_summary_monthly': {'task': 'awx.main.tasks.host_metrics.host_metric_summary_monthly', 'schedule': timedelta(hours=4)},
|
||||
}
|
||||
|
||||
# Django Caching Configuration
|
||||
DJANGO_REDIS_IGNORE_EXCEPTIONS = True
|
||||
CACHES = {'default': {'BACKEND': 'awx.main.cache.AWXRedisCache', 'LOCATION': 'unix:/var/run/redis/redis.sock?db=1'}}
|
||||
CACHES = {'default': {'BACKEND': 'awx.main.cache.AWXRedisCache', 'LOCATION': 'unix:///var/run/redis/redis.sock?db=1'}}
|
||||
|
||||
# Social Auth configuration.
|
||||
SOCIAL_AUTH_STRATEGY = 'social_django.strategy.DjangoStrategy'
|
||||
@@ -1049,7 +1049,7 @@ UI_NEXT = True
|
||||
# - 'unique_managed_hosts': Compliant = automated - deleted hosts (using /api/v2/host_metrics/)
|
||||
SUBSCRIPTION_USAGE_MODEL = ''
|
||||
|
||||
# Host metrics cleanup - last time of the cleanup run (soft-deleting records)
|
||||
# Host metrics cleanup - last time of the task/command run
|
||||
CLEANUP_HOST_METRICS_LAST_TS = None
|
||||
# Host metrics cleanup - minimal interval between two cleanups in days
|
||||
CLEANUP_HOST_METRICS_INTERVAL = 30 # days
|
||||
|
||||
@@ -87,7 +87,7 @@ def _update_user_orgs(backend, desired_org_state, orgs_to_create, user=None):
|
||||
is_member_expression = org_opts.get(user_type, None)
|
||||
remove_members = bool(org_opts.get('remove_{}'.format(user_type), remove))
|
||||
has_role = _update_m2m_from_expression(user, is_member_expression, remove_members)
|
||||
desired_org_state[organization_name][role_name] = has_role
|
||||
desired_org_state[organization_name][role_name] = desired_org_state[organization_name].get(role_name, False) or has_role
|
||||
|
||||
|
||||
def _update_user_teams(backend, desired_team_state, teams_to_create, user=None):
|
||||
|
||||
@@ -637,3 +637,75 @@ class TestSAMLUserFlags:
|
||||
}
|
||||
|
||||
assert expected == _check_flag(user, 'superuser', attributes, user_flags_settings)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test__update_user_orgs_org_map_and_saml_attr():
|
||||
"""
|
||||
This combines the action of two other tests where an org membership is defined both by
|
||||
the ORGANIZATION_MAP and the SOCIAL_AUTH_SAML_ORGANIZATION_ATTR at the same time
|
||||
"""
|
||||
|
||||
# This data will make the user a member
|
||||
class BackendClass:
|
||||
s = {
|
||||
'ORGANIZATION_MAP': {
|
||||
'Default1': {
|
||||
'remove': True,
|
||||
'remove_admins': True,
|
||||
'users': 'foobar',
|
||||
'remove_users': True,
|
||||
'organization_alias': 'o1_alias',
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def setting(self, key):
|
||||
return self.s[key]
|
||||
|
||||
backend = BackendClass()
|
||||
|
||||
setting = {
|
||||
'saml_attr': 'memberOf',
|
||||
'saml_admin_attr': 'admins',
|
||||
'saml_auditor_attr': 'auditors',
|
||||
'remove': True,
|
||||
'remove_admins': True,
|
||||
}
|
||||
|
||||
# This data from the server will make the user an admin of the organization
|
||||
kwargs = {
|
||||
'username': 'foobar',
|
||||
'uid': 'idp:cmeyers@redhat.com',
|
||||
'request': {u'SAMLResponse': [], u'RelayState': [u'idp']},
|
||||
'is_new': False,
|
||||
'response': {
|
||||
'session_index': '_0728f0e0-b766-0135-75fa-02842b07c044',
|
||||
'idp_name': u'idp',
|
||||
'attributes': {
|
||||
'admins': ['Default1'],
|
||||
},
|
||||
},
|
||||
'social': None,
|
||||
'strategy': None,
|
||||
'new_association': False,
|
||||
}
|
||||
|
||||
this_user = User.objects.create(username='foobar')
|
||||
|
||||
with override_settings(SOCIAL_AUTH_SAML_ORGANIZATION_ATTR=setting):
|
||||
desired_org_state = {}
|
||||
orgs_to_create = []
|
||||
|
||||
# this should add user as an admin of the org
|
||||
_update_user_orgs_by_saml_attr(backend, desired_org_state, orgs_to_create, **kwargs)
|
||||
assert desired_org_state['o1_alias']['admin_role'] is True
|
||||
|
||||
assert set(orgs_to_create) == set(['o1_alias'])
|
||||
|
||||
# this should add user as a member of the org without reverting the admin status
|
||||
_update_user_orgs(backend, desired_org_state, orgs_to_create, this_user)
|
||||
assert desired_org_state['o1_alias']['member_role'] is True
|
||||
assert desired_org_state['o1_alias']['admin_role'] is True
|
||||
|
||||
assert set(orgs_to_create) == set(['o1_alias'])
|
||||
|
||||
1771
awx/ui/package-lock.json
generated
1771
awx/ui/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -33,12 +33,12 @@
|
||||
"styled-components": "5.3.6"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/core": "^7.16.10",
|
||||
"@babel/eslint-parser": "^7.16.5",
|
||||
"@babel/eslint-plugin": "^7.16.5",
|
||||
"@babel/plugin-syntax-jsx": "7.16.7",
|
||||
"@babel/polyfill": "^7.8.7",
|
||||
"@babel/preset-react": "7.16.7",
|
||||
"@babel/core": "^7.22.9",
|
||||
"@babel/eslint-parser": "^7.22.9",
|
||||
"@babel/eslint-plugin": "^7.22.10",
|
||||
"@babel/plugin-syntax-jsx": "^7.22.5",
|
||||
"@babel/polyfill": "^7.12.1",
|
||||
"@babel/preset-react": "^7.22.5",
|
||||
"@cypress/instrument-cra": "^1.4.0",
|
||||
"@lingui/cli": "^3.7.1",
|
||||
"@lingui/loader": "3.15.0",
|
||||
|
||||
@@ -5,7 +5,11 @@
|
||||
<title data-cy="migration-title">{{ title }}</title>
|
||||
<meta
|
||||
http-equiv="Content-Security-Policy"
|
||||
content="default-src 'self'; connect-src 'self' ws: wss:; style-src 'self' 'unsafe-inline'; script-src 'self' 'nonce-{{ csp_nonce }}' *.pendo.io; img-src 'self' *.pendo.io data:;"
|
||||
content="default-src 'self';
|
||||
connect-src 'self' ws: wss:;
|
||||
style-src 'self' 'unsafe-inline';
|
||||
script-src 'self' 'nonce-{{ csp_nonce }}' *.pendo.io;
|
||||
img-src 'self' *.pendo.io data:;"
|
||||
/>
|
||||
<meta charset="utf-8">
|
||||
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
|
||||
|
||||
@@ -11,6 +11,7 @@ import {
|
||||
WorkflowJobsAPI,
|
||||
WorkflowJobTemplatesAPI,
|
||||
} from 'api';
|
||||
import useToast, { AlertVariant } from 'hooks/useToast';
|
||||
import AlertModal from '../AlertModal';
|
||||
import ErrorDetail from '../ErrorDetail';
|
||||
import LaunchPrompt from '../LaunchPrompt';
|
||||
@@ -45,8 +46,22 @@ function LaunchButton({ resource, children }) {
|
||||
const [isLaunching, setIsLaunching] = useState(false);
|
||||
const [resourceCredentials, setResourceCredentials] = useState([]);
|
||||
const [error, setError] = useState(null);
|
||||
const { addToast, Toast, toastProps } = useToast();
|
||||
|
||||
const showToast = () => {
|
||||
addToast({
|
||||
id: resource.id,
|
||||
title: t`A job has already been launched`,
|
||||
variant: AlertVariant.info,
|
||||
hasTimeout: true,
|
||||
});
|
||||
};
|
||||
|
||||
const handleLaunch = async () => {
|
||||
if (isLaunching) {
|
||||
showToast();
|
||||
return;
|
||||
}
|
||||
setIsLaunching(true);
|
||||
const readLaunch =
|
||||
resource.type === 'workflow_job_template'
|
||||
@@ -104,6 +119,11 @@ function LaunchButton({ resource, children }) {
|
||||
};
|
||||
|
||||
const launchWithParams = async (params) => {
|
||||
if (isLaunching) {
|
||||
showToast();
|
||||
return;
|
||||
}
|
||||
setIsLaunching(true);
|
||||
try {
|
||||
let jobPromise;
|
||||
|
||||
@@ -141,6 +161,10 @@ function LaunchButton({ resource, children }) {
|
||||
let readRelaunch;
|
||||
let relaunch;
|
||||
|
||||
if (isLaunching) {
|
||||
showToast();
|
||||
return;
|
||||
}
|
||||
setIsLaunching(true);
|
||||
if (resource.type === 'inventory_update') {
|
||||
// We'll need to handle the scenario where the src no longer exists
|
||||
@@ -197,6 +221,7 @@ function LaunchButton({ resource, children }) {
|
||||
handleRelaunch,
|
||||
isLaunching,
|
||||
})}
|
||||
<Toast {...toastProps} />
|
||||
{error && (
|
||||
<AlertModal
|
||||
isOpen={error}
|
||||
|
||||
@@ -223,6 +223,10 @@ function Lookup(props) {
|
||||
const Item = shape({
|
||||
id: number.isRequired,
|
||||
});
|
||||
const InstanceItem = shape({
|
||||
id: number.isRequired,
|
||||
hostname: string.isRequired,
|
||||
});
|
||||
|
||||
Lookup.propTypes = {
|
||||
id: string,
|
||||
@@ -230,7 +234,13 @@ Lookup.propTypes = {
|
||||
modalDescription: oneOfType([string, node]),
|
||||
onChange: func.isRequired,
|
||||
onUpdate: func,
|
||||
value: oneOfType([Item, arrayOf(Item), object]),
|
||||
value: oneOfType([
|
||||
Item,
|
||||
arrayOf(Item),
|
||||
object,
|
||||
InstanceItem,
|
||||
arrayOf(InstanceItem),
|
||||
]),
|
||||
multiple: bool,
|
||||
required: bool,
|
||||
onBlur: func,
|
||||
|
||||
212
awx/ui/src/components/Lookup/PeersLookup.js
Executable file
212
awx/ui/src/components/Lookup/PeersLookup.js
Executable file
@@ -0,0 +1,212 @@
|
||||
import React, { useCallback, useEffect } from 'react';
|
||||
import { arrayOf, string, func, bool, shape } from 'prop-types';
|
||||
import { withRouter } from 'react-router-dom';
|
||||
import { t } from '@lingui/macro';
|
||||
import { FormGroup, Chip } from '@patternfly/react-core';
|
||||
import { InstancesAPI } from 'api';
|
||||
import { Instance } from 'types';
|
||||
import { getSearchableKeys } from 'components/PaginatedTable';
|
||||
import { getQSConfig, parseQueryString, mergeParams } from 'util/qs';
|
||||
import useRequest from 'hooks/useRequest';
|
||||
import Popover from '../Popover';
|
||||
import OptionsList from '../OptionsList';
|
||||
import Lookup from './Lookup';
|
||||
import LookupErrorMessage from './shared/LookupErrorMessage';
|
||||
import FieldWithPrompt from '../FieldWithPrompt';
|
||||
|
||||
const QS_CONFIG = getQSConfig('instances', {
|
||||
page: 1,
|
||||
page_size: 5,
|
||||
order_by: 'hostname',
|
||||
});
|
||||
|
||||
function PeersLookup({
|
||||
id,
|
||||
value,
|
||||
onChange,
|
||||
tooltip,
|
||||
className,
|
||||
required,
|
||||
history,
|
||||
fieldName,
|
||||
multiple,
|
||||
validate,
|
||||
columns,
|
||||
isPromptableField,
|
||||
promptId,
|
||||
promptName,
|
||||
formLabel,
|
||||
typePeers,
|
||||
instance_details,
|
||||
}) {
|
||||
const {
|
||||
result: { instances, count, relatedSearchableKeys, searchableKeys },
|
||||
request: fetchInstances,
|
||||
error,
|
||||
isLoading,
|
||||
} = useRequest(
|
||||
useCallback(async () => {
|
||||
const params = parseQueryString(QS_CONFIG, history.location.search);
|
||||
const peersFilter = {};
|
||||
if (typePeers) {
|
||||
peersFilter.not__node_type = ['control', 'hybrid'];
|
||||
if (instance_details) {
|
||||
if (instance_details.id) {
|
||||
peersFilter.not__id = instance_details.id;
|
||||
peersFilter.not__hostname = instance_details.peers;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const [{ data }, actionsResponse] = await Promise.all([
|
||||
InstancesAPI.read(
|
||||
mergeParams(params, {
|
||||
...peersFilter,
|
||||
})
|
||||
),
|
||||
InstancesAPI.readOptions(),
|
||||
]);
|
||||
return {
|
||||
instances: data.results,
|
||||
count: data.count,
|
||||
relatedSearchableKeys: (
|
||||
actionsResponse?.data?.related_search_fields || []
|
||||
).map((val) => val.slice(0, -8)),
|
||||
searchableKeys: getSearchableKeys(actionsResponse.data.actions?.GET),
|
||||
};
|
||||
}, [history.location, typePeers, instance_details]),
|
||||
{
|
||||
instances: [],
|
||||
count: 0,
|
||||
relatedSearchableKeys: [],
|
||||
searchableKeys: [],
|
||||
}
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
fetchInstances();
|
||||
}, [fetchInstances]);
|
||||
|
||||
const renderLookup = () => (
|
||||
<>
|
||||
<Lookup
|
||||
id={fieldName}
|
||||
header={formLabel}
|
||||
value={value}
|
||||
onChange={onChange}
|
||||
onUpdate={fetchInstances}
|
||||
fieldName={fieldName}
|
||||
validate={validate}
|
||||
qsConfig={QS_CONFIG}
|
||||
multiple={multiple}
|
||||
required={required}
|
||||
isLoading={isLoading}
|
||||
label={formLabel}
|
||||
renderItemChip={({ item, removeItem, canDelete }) => (
|
||||
<Chip
|
||||
key={item.id}
|
||||
onClick={() => removeItem(item)}
|
||||
isReadOnly={!canDelete}
|
||||
>
|
||||
{item.hostname}
|
||||
</Chip>
|
||||
)}
|
||||
renderOptionsList={({ state, dispatch, canDelete }) => (
|
||||
<OptionsList
|
||||
value={state.selectedItems}
|
||||
options={instances}
|
||||
optionCount={count}
|
||||
columns={columns}
|
||||
header={formLabel}
|
||||
displayKey="hostname"
|
||||
searchColumns={[
|
||||
{
|
||||
name: t`Hostname`,
|
||||
key: 'hostname__icontains',
|
||||
isDefault: true,
|
||||
},
|
||||
]}
|
||||
sortColumns={[
|
||||
{
|
||||
name: t`Hostname`,
|
||||
key: 'hostname',
|
||||
},
|
||||
]}
|
||||
searchableKeys={searchableKeys}
|
||||
relatedSearchableKeys={relatedSearchableKeys}
|
||||
multiple={multiple}
|
||||
label={formLabel}
|
||||
name={fieldName}
|
||||
qsConfig={QS_CONFIG}
|
||||
readOnly={!canDelete}
|
||||
selectItem={(item) => dispatch({ type: 'SELECT_ITEM', item })}
|
||||
deselectItem={(item) => dispatch({ type: 'DESELECT_ITEM', item })}
|
||||
/>
|
||||
)}
|
||||
/>
|
||||
<LookupErrorMessage error={error} />
|
||||
</>
|
||||
);
|
||||
|
||||
return isPromptableField ? (
|
||||
<FieldWithPrompt
|
||||
fieldId={id}
|
||||
label={formLabel}
|
||||
promptId={promptId}
|
||||
promptName={promptName}
|
||||
tooltip={tooltip}
|
||||
>
|
||||
{renderLookup()}
|
||||
</FieldWithPrompt>
|
||||
) : (
|
||||
<FormGroup
|
||||
className={className}
|
||||
label={formLabel}
|
||||
labelIcon={tooltip && <Popover content={tooltip} />}
|
||||
fieldId={id}
|
||||
>
|
||||
{renderLookup()}
|
||||
</FormGroup>
|
||||
);
|
||||
}
|
||||
|
||||
PeersLookup.propTypes = {
|
||||
id: string,
|
||||
value: arrayOf(Instance).isRequired,
|
||||
tooltip: string,
|
||||
onChange: func.isRequired,
|
||||
className: string,
|
||||
required: bool,
|
||||
validate: func,
|
||||
multiple: bool,
|
||||
fieldName: string,
|
||||
columns: arrayOf(Object),
|
||||
formLabel: string,
|
||||
instance_details: (Instance, shape({})),
|
||||
typePeers: bool,
|
||||
};
|
||||
|
||||
PeersLookup.defaultProps = {
|
||||
id: 'instances',
|
||||
tooltip: '',
|
||||
className: '',
|
||||
required: false,
|
||||
validate: () => undefined,
|
||||
fieldName: 'instances',
|
||||
columns: [
|
||||
{
|
||||
key: 'hostname',
|
||||
name: t`Hostname`,
|
||||
},
|
||||
{
|
||||
key: 'node_type',
|
||||
name: t`Node Type`,
|
||||
},
|
||||
],
|
||||
formLabel: t`Instances`,
|
||||
instance_details: {},
|
||||
multiple: true,
|
||||
typePeers: false,
|
||||
};
|
||||
|
||||
export default withRouter(PeersLookup);
|
||||
137
awx/ui/src/components/Lookup/PeersLookup.test.js
Executable file
137
awx/ui/src/components/Lookup/PeersLookup.test.js
Executable file
@@ -0,0 +1,137 @@
|
||||
import React from 'react';
|
||||
import { act } from 'react-dom/test-utils';
|
||||
import { Formik } from 'formik';
|
||||
import { InstancesAPI } from 'api';
|
||||
import { mountWithContexts } from '../../../testUtils/enzymeHelpers';
|
||||
import PeersLookup from './PeersLookup';
|
||||
|
||||
jest.mock('../../api');
|
||||
|
||||
const mockedInstances = {
|
||||
count: 1,
|
||||
results: [
|
||||
{
|
||||
id: 2,
|
||||
name: 'Foo',
|
||||
image: 'quay.io/ansible/awx-ee',
|
||||
pull: 'missing',
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
const instances = [
|
||||
{
|
||||
id: 1,
|
||||
hostname: 'awx_1',
|
||||
type: 'instance',
|
||||
url: '/api/v2/instances/1/',
|
||||
related: {
|
||||
named_url: '/api/v2/instances/awx_1/',
|
||||
jobs: '/api/v2/instances/1/jobs/',
|
||||
instance_groups: '/api/v2/instances/1/instance_groups/',
|
||||
peers: '/api/v2/instances/1/peers/',
|
||||
},
|
||||
summary_fields: {
|
||||
user_capabilities: {
|
||||
edit: false,
|
||||
},
|
||||
links: [],
|
||||
},
|
||||
uuid: '00000000-0000-0000-0000-000000000000',
|
||||
created: '2023-04-26T22:06:46.766198Z',
|
||||
modified: '2023-04-26T22:06:46.766217Z',
|
||||
last_seen: '2023-04-26T23:12:02.857732Z',
|
||||
health_check_started: null,
|
||||
health_check_pending: false,
|
||||
last_health_check: '2023-04-26T23:01:13.941693Z',
|
||||
errors: 'Instance received normal shutdown signal',
|
||||
capacity_adjustment: '1.00',
|
||||
version: '0.1.dev33237+g1fdef52',
|
||||
capacity: 0,
|
||||
consumed_capacity: 0,
|
||||
percent_capacity_remaining: 0,
|
||||
jobs_running: 0,
|
||||
jobs_total: 0,
|
||||
cpu: '8.0',
|
||||
memory: 8011055104,
|
||||
cpu_capacity: 0,
|
||||
mem_capacity: 0,
|
||||
enabled: true,
|
||||
managed_by_policy: true,
|
||||
node_type: 'hybrid',
|
||||
node_state: 'installed',
|
||||
ip_address: null,
|
||||
listener_port: 27199,
|
||||
peers: [],
|
||||
peers_from_control_nodes: false,
|
||||
},
|
||||
];
|
||||
|
||||
describe('PeersLookup', () => {
|
||||
let wrapper;
|
||||
|
||||
beforeEach(() => {
|
||||
InstancesAPI.read.mockResolvedValue({
|
||||
data: mockedInstances,
|
||||
});
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
jest.clearAllMocks();
|
||||
});
|
||||
|
||||
test('should render successfully without instance_details (for new added instance)', async () => {
|
||||
InstancesAPI.readOptions.mockReturnValue({
|
||||
data: {
|
||||
actions: {
|
||||
GET: {},
|
||||
POST: {},
|
||||
},
|
||||
related_search_fields: [],
|
||||
},
|
||||
});
|
||||
await act(async () => {
|
||||
wrapper = mountWithContexts(
|
||||
<Formik>
|
||||
<PeersLookup value={instances} onChange={() => {}} />
|
||||
</Formik>
|
||||
);
|
||||
});
|
||||
wrapper.update();
|
||||
expect(InstancesAPI.read).toHaveBeenCalledTimes(1);
|
||||
expect(wrapper.find('PeersLookup')).toHaveLength(1);
|
||||
expect(wrapper.find('FormGroup[label="Instances"]').length).toBe(1);
|
||||
expect(wrapper.find('Checkbox[aria-label="Prompt on launch"]').length).toBe(
|
||||
0
|
||||
);
|
||||
});
|
||||
test('should render successfully with instance_details for edit instance', async () => {
|
||||
InstancesAPI.readOptions.mockReturnValue({
|
||||
data: {
|
||||
actions: {
|
||||
GET: {},
|
||||
POST: {},
|
||||
},
|
||||
related_search_fields: [],
|
||||
},
|
||||
});
|
||||
await act(async () => {
|
||||
wrapper = mountWithContexts(
|
||||
<Formik>
|
||||
<PeersLookup
|
||||
value={instances}
|
||||
instance_details={instances[0]}
|
||||
onChange={() => {}}
|
||||
/>
|
||||
</Formik>
|
||||
);
|
||||
});
|
||||
wrapper.update();
|
||||
expect(InstancesAPI.read).toHaveBeenCalledTimes(1);
|
||||
expect(wrapper.find('PeersLookup')).toHaveLength(1);
|
||||
expect(wrapper.find('FormGroup[label="Instances"]').length).toBe(1);
|
||||
expect(wrapper.find('Checkbox[aria-label="Prompt on launch"]').length).toBe(
|
||||
0
|
||||
);
|
||||
});
|
||||
});
|
||||
@@ -8,3 +8,4 @@ export { default as ApplicationLookup } from './ApplicationLookup';
|
||||
export { default as HostFilterLookup } from './HostFilterLookup';
|
||||
export { default as OrganizationLookup } from './OrganizationLookup';
|
||||
export { default as ExecutionEnvironmentLookup } from './ExecutionEnvironmentLookup';
|
||||
export { default as PeersLookup } from './PeersLookup';
|
||||
|
||||
@@ -125,19 +125,24 @@ const Item = shape({
|
||||
name: string.isRequired,
|
||||
url: string,
|
||||
});
|
||||
const InstanceItem = shape({
|
||||
id: oneOfType([number, string]).isRequired,
|
||||
hostname: string.isRequired,
|
||||
url: string,
|
||||
});
|
||||
OptionsList.propTypes = {
|
||||
deselectItem: func.isRequired,
|
||||
displayKey: string,
|
||||
isSelectedDraggable: bool,
|
||||
multiple: bool,
|
||||
optionCount: number.isRequired,
|
||||
options: arrayOf(Item).isRequired,
|
||||
options: oneOfType([arrayOf(Item), arrayOf(InstanceItem)]).isRequired,
|
||||
qsConfig: QSConfig.isRequired,
|
||||
renderItemChip: func,
|
||||
searchColumns: SearchColumns,
|
||||
selectItem: func.isRequired,
|
||||
sortColumns: SortColumns,
|
||||
value: arrayOf(Item).isRequired,
|
||||
value: oneOfType([arrayOf(Item), arrayOf(InstanceItem)]).isRequired,
|
||||
};
|
||||
OptionsList.defaultProps = {
|
||||
isSelectedDraggable: false,
|
||||
|
||||
@@ -32,12 +32,10 @@ describe('<InstanceAdd />', () => {
|
||||
await waitForElement(wrapper, 'isLoading', (el) => el.length === 0);
|
||||
await act(async () => {
|
||||
wrapper.find('InstanceForm').prop('handleSubmit')({
|
||||
name: 'new Foo',
|
||||
node_type: 'hop',
|
||||
});
|
||||
});
|
||||
expect(InstancesAPI.create).toHaveBeenCalledWith({
|
||||
name: 'new Foo',
|
||||
node_type: 'hop',
|
||||
});
|
||||
expect(history.location.pathname).toBe('/instances/13/details');
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import React, { useCallback, useEffect, useState } from 'react';
|
||||
|
||||
import { useHistory, useParams } from 'react-router-dom';
|
||||
import { useHistory, useParams, Link } from 'react-router-dom';
|
||||
import { t, Plural } from '@lingui/macro';
|
||||
import {
|
||||
Button,
|
||||
@@ -116,6 +116,7 @@ function InstanceDetail({ setBreadcrumb, isK8s }) {
|
||||
setBreadcrumb(instance);
|
||||
}
|
||||
}, [instance, setBreadcrumb]);
|
||||
|
||||
const { error: healthCheckError, request: fetchHealthCheck } = useRequest(
|
||||
useCallback(async () => {
|
||||
const { status } = await InstancesAPI.healthCheck(id);
|
||||
@@ -205,13 +206,42 @@ function InstanceDetail({ setBreadcrumb, isK8s }) {
|
||||
}
|
||||
/>
|
||||
<Detail label={t`Node Type`} value={instance.node_type} />
|
||||
<Detail label={t`Host`} value={instance.ip_address} />
|
||||
<Detail label={t`Listener Port`} value={instance.listener_port} />
|
||||
{(isExecutionNode || isHopNode) && (
|
||||
<>
|
||||
{instance.related?.install_bundle && (
|
||||
<Detail
|
||||
label={t`Install Bundle`}
|
||||
value={
|
||||
<Tooltip content={t`Click to download bundle`}>
|
||||
<Button
|
||||
component="a"
|
||||
isSmall
|
||||
href={`${instance.related?.install_bundle}`}
|
||||
target="_blank"
|
||||
variant="secondary"
|
||||
dataCy="install-bundle-download-button"
|
||||
rel="noopener noreferrer"
|
||||
>
|
||||
<DownloadIcon />
|
||||
</Button>
|
||||
</Tooltip>
|
||||
}
|
||||
/>
|
||||
)}
|
||||
<Detail
|
||||
label={t`Peers from control nodes`}
|
||||
value={instance.peers_from_control_nodes ? t`On` : t`Off`}
|
||||
/>
|
||||
</>
|
||||
)}
|
||||
{!isHopNode && (
|
||||
<>
|
||||
<Detail
|
||||
label={t`Policy Type`}
|
||||
value={instance.managed_by_policy ? t`Auto` : t`Manual`}
|
||||
/>
|
||||
<Detail label={t`Host`} value={instance.ip_address} />
|
||||
<Detail label={t`Running Jobs`} value={instance.jobs_running} />
|
||||
<Detail label={t`Total Jobs`} value={instance.jobs_total} />
|
||||
{instanceGroups && (
|
||||
@@ -246,26 +276,6 @@ function InstanceDetail({ setBreadcrumb, isK8s }) {
|
||||
}
|
||||
value={formatHealthCheckTimeStamp(instance.last_health_check)}
|
||||
/>
|
||||
{instance.related?.install_bundle && (
|
||||
<Detail
|
||||
label={t`Install Bundle`}
|
||||
value={
|
||||
<Tooltip content={t`Click to download bundle`}>
|
||||
<Button
|
||||
component="a"
|
||||
isSmall
|
||||
href={`${instance.related?.install_bundle}`}
|
||||
target="_blank"
|
||||
variant="secondary"
|
||||
dataCy="install-bundle-download-button"
|
||||
rel="noopener noreferrer"
|
||||
>
|
||||
<DownloadIcon />
|
||||
</Button>
|
||||
</Tooltip>
|
||||
}
|
||||
/>
|
||||
)}
|
||||
<Detail
|
||||
label={t`Capacity Adjustment`}
|
||||
dataCy="capacity-adjustment"
|
||||
@@ -327,9 +337,20 @@ function InstanceDetail({ setBreadcrumb, isK8s }) {
|
||||
/>
|
||||
)}
|
||||
</DetailList>
|
||||
{!isHopNode && (
|
||||
<CardActionsRow>
|
||||
{config?.me?.is_superuser && isK8s && isExecutionNode && (
|
||||
<CardActionsRow>
|
||||
{config?.me?.is_superuser && isK8s && (isExecutionNode || isHopNode) && (
|
||||
<Button
|
||||
ouiaId="instance-detail-edit-button"
|
||||
aria-label={t`edit`}
|
||||
component={Link}
|
||||
to={`/instances/${id}/edit`}
|
||||
>
|
||||
{t`Edit`}
|
||||
</Button>
|
||||
)}
|
||||
{config?.me?.is_superuser &&
|
||||
isK8s &&
|
||||
(isExecutionNode || isHopNode) && (
|
||||
<RemoveInstanceButton
|
||||
dataCy="remove-instance-button"
|
||||
itemsToRemove={[instance]}
|
||||
@@ -337,32 +358,31 @@ function InstanceDetail({ setBreadcrumb, isK8s }) {
|
||||
onRemove={removeInstances}
|
||||
/>
|
||||
)}
|
||||
{isExecutionNode && (
|
||||
<Tooltip content={t`Run a health check on the instance`}>
|
||||
<Button
|
||||
isDisabled={
|
||||
!config?.me?.is_superuser || instance.health_check_pending
|
||||
}
|
||||
variant="primary"
|
||||
ouiaId="health-check-button"
|
||||
onClick={fetchHealthCheck}
|
||||
isLoading={instance.health_check_pending}
|
||||
spinnerAriaLabel={t`Running health check`}
|
||||
>
|
||||
{instance.health_check_pending
|
||||
? t`Running health check`
|
||||
: t`Run health check`}
|
||||
</Button>
|
||||
</Tooltip>
|
||||
)}
|
||||
<InstanceToggle
|
||||
css="display: inline-flex;"
|
||||
fetchInstances={fetchDetails}
|
||||
instance={instance}
|
||||
dataCy="enable-instance"
|
||||
/>
|
||||
</CardActionsRow>
|
||||
)}
|
||||
{isExecutionNode && (
|
||||
<Tooltip content={t`Run a health check on the instance`}>
|
||||
<Button
|
||||
isDisabled={
|
||||
!config?.me?.is_superuser || instance.health_check_pending
|
||||
}
|
||||
variant="primary"
|
||||
ouiaId="health-check-button"
|
||||
onClick={fetchHealthCheck}
|
||||
isLoading={instance.health_check_pending}
|
||||
spinnerAriaLabel={t`Running health check`}
|
||||
>
|
||||
{instance.health_check_pending
|
||||
? t`Running health check`
|
||||
: t`Run health check`}
|
||||
</Button>
|
||||
</Tooltip>
|
||||
)}
|
||||
<InstanceToggle
|
||||
css="display: inline-flex;"
|
||||
fetchInstances={fetchDetails}
|
||||
instance={instance}
|
||||
dataCy="enable-instance"
|
||||
/>
|
||||
</CardActionsRow>
|
||||
|
||||
{error && (
|
||||
<AlertModal
|
||||
|
||||
105
awx/ui/src/screens/Instances/InstanceEdit/InstanceEdit.js
Normal file
105
awx/ui/src/screens/Instances/InstanceEdit/InstanceEdit.js
Normal file
@@ -0,0 +1,105 @@
|
||||
import React, { useState, useCallback, useEffect } from 'react';
|
||||
|
||||
import { t } from '@lingui/macro';
|
||||
import { useHistory, useParams, Link } from 'react-router-dom';
|
||||
import { Card, PageSection } from '@patternfly/react-core';
|
||||
import useRequest from 'hooks/useRequest';
|
||||
import ContentError from 'components/ContentError';
|
||||
import ContentLoading from 'components/ContentLoading';
|
||||
import { CardBody } from 'components/Card';
|
||||
import { InstancesAPI } from 'api';
|
||||
import InstanceForm from '../Shared/InstanceForm';
|
||||
|
||||
function InstanceEdit({ setBreadcrumb }) {
|
||||
const history = useHistory();
|
||||
const { id } = useParams();
|
||||
const [formError, setFormError] = useState();
|
||||
|
||||
const detailsUrl = `/instances/${id}/details`;
|
||||
|
||||
const handleSubmit = async (values) => {
|
||||
try {
|
||||
await InstancesAPI.update(id, values);
|
||||
history.push(detailsUrl);
|
||||
} catch (err) {
|
||||
setFormError(err);
|
||||
}
|
||||
};
|
||||
|
||||
const handleCancel = () => {
|
||||
history.push(detailsUrl);
|
||||
};
|
||||
|
||||
const {
|
||||
isLoading,
|
||||
error,
|
||||
request: fetchDetail,
|
||||
result: { instance, peers },
|
||||
} = useRequest(
|
||||
useCallback(async () => {
|
||||
const [{ data: instance_detail }, { data: peers_detail }] =
|
||||
await Promise.all([
|
||||
InstancesAPI.readDetail(id),
|
||||
InstancesAPI.readPeers(id),
|
||||
]);
|
||||
return {
|
||||
instance: instance_detail,
|
||||
peers: peers_detail.results,
|
||||
};
|
||||
}, [id]),
|
||||
{
|
||||
instance: {},
|
||||
peers: [],
|
||||
}
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
fetchDetail();
|
||||
}, [fetchDetail]);
|
||||
|
||||
useEffect(() => {
|
||||
if (instance) {
|
||||
setBreadcrumb(instance);
|
||||
}
|
||||
}, [instance, setBreadcrumb]);
|
||||
|
||||
if (isLoading) {
|
||||
return (
|
||||
<CardBody>
|
||||
<ContentLoading />
|
||||
</CardBody>
|
||||
);
|
||||
}
|
||||
|
||||
if (error) {
|
||||
return (
|
||||
<CardBody>
|
||||
<ContentError error={error}>
|
||||
{error?.response?.status === 404 && (
|
||||
<span>
|
||||
{t`Instance not found.`}{' '}
|
||||
<Link to="/instances">{t`View all Instances.`}</Link>
|
||||
</span>
|
||||
)}
|
||||
</ContentError>
|
||||
</CardBody>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<PageSection>
|
||||
<Card>
|
||||
<InstanceForm
|
||||
instance={instance}
|
||||
instance_peers={peers}
|
||||
isEdit
|
||||
submitError={formError}
|
||||
handleSubmit={handleSubmit}
|
||||
handleCancel={handleCancel}
|
||||
/>
|
||||
</Card>
|
||||
</PageSection>
|
||||
);
|
||||
}
|
||||
|
||||
export default InstanceEdit;
|
||||
149
awx/ui/src/screens/Instances/InstanceEdit/InstanceEdit.test.js
Normal file
149
awx/ui/src/screens/Instances/InstanceEdit/InstanceEdit.test.js
Normal file
@@ -0,0 +1,149 @@
|
||||
import React from 'react';
|
||||
import { act } from 'react-dom/test-utils';
|
||||
import { createMemoryHistory } from 'history';
|
||||
import useDebounce from 'hooks/useDebounce';
|
||||
import { InstancesAPI } from 'api';
|
||||
import {
|
||||
mountWithContexts,
|
||||
waitForElement,
|
||||
} from '../../../../testUtils/enzymeHelpers';
|
||||
|
||||
import InstanceEdit from './InstanceEdit';
|
||||
|
||||
jest.mock('../../../api');
|
||||
jest.mock('../../../hooks/useDebounce');
|
||||
jest.mock('react-router-dom', () => ({
|
||||
...jest.requireActual('react-router-dom'),
|
||||
useParams: () => ({
|
||||
id: 42,
|
||||
}),
|
||||
}));
|
||||
|
||||
const instanceData = {
|
||||
id: 42,
|
||||
hostname: 'awx_1',
|
||||
type: 'instance',
|
||||
url: '/api/v2/instances/1/',
|
||||
related: {
|
||||
named_url: '/api/v2/instances/awx_1/',
|
||||
jobs: '/api/v2/instances/1/jobs/',
|
||||
instance_groups: '/api/v2/instances/1/instance_groups/',
|
||||
peers: '/api/v2/instances/1/peers/',
|
||||
},
|
||||
summary_fields: {
|
||||
user_capabilities: {
|
||||
edit: false,
|
||||
},
|
||||
links: [],
|
||||
},
|
||||
uuid: '00000000-0000-0000-0000-000000000000',
|
||||
created: '2023-04-26T22:06:46.766198Z',
|
||||
modified: '2023-04-26T22:06:46.766217Z',
|
||||
last_seen: '2023-04-26T23:12:02.857732Z',
|
||||
health_check_started: null,
|
||||
health_check_pending: false,
|
||||
last_health_check: '2023-04-26T23:01:13.941693Z',
|
||||
errors: 'Instance received normal shutdown signal',
|
||||
capacity_adjustment: '1.00',
|
||||
version: '0.1.dev33237+g1fdef52',
|
||||
capacity: 0,
|
||||
consumed_capacity: 0,
|
||||
percent_capacity_remaining: 0,
|
||||
jobs_running: 0,
|
||||
jobs_total: 0,
|
||||
cpu: '8.0',
|
||||
memory: 8011055104,
|
||||
cpu_capacity: 0,
|
||||
mem_capacity: 0,
|
||||
enabled: true,
|
||||
managed_by_policy: true,
|
||||
node_type: 'hybrid',
|
||||
node_state: 'installed',
|
||||
ip_address: null,
|
||||
listener_port: 27199,
|
||||
peers: [],
|
||||
peers_from_control_nodes: false,
|
||||
};
|
||||
|
||||
const instanceDataWithPeers = {
|
||||
results: [instanceData],
|
||||
};
|
||||
|
||||
const updatedInstance = {
|
||||
node_type: 'hop',
|
||||
peers: ['test-peer'],
|
||||
};
|
||||
|
||||
describe('<InstanceEdit/>', () => {
|
||||
let wrapper;
|
||||
let history;
|
||||
|
||||
beforeAll(async () => {
|
||||
useDebounce.mockImplementation((fn) => fn);
|
||||
history = createMemoryHistory();
|
||||
InstancesAPI.readDetail.mockResolvedValue({ data: instanceData });
|
||||
InstancesAPI.readPeers.mockResolvedValue({ data: instanceDataWithPeers });
|
||||
|
||||
await act(async () => {
|
||||
wrapper = mountWithContexts(
|
||||
<InstanceEdit
|
||||
instance={instanceData}
|
||||
peers={instanceDataWithPeers}
|
||||
isEdit
|
||||
setBreadcrumb={() => {}}
|
||||
/>,
|
||||
{
|
||||
context: { router: { history } },
|
||||
}
|
||||
);
|
||||
});
|
||||
expect(InstancesAPI.readDetail).toBeCalledWith(42);
|
||||
});
|
||||
|
||||
afterAll(() => {
|
||||
jest.clearAllMocks();
|
||||
});
|
||||
|
||||
test('initially renders successfully', async () => {
|
||||
await waitForElement(wrapper, 'ContentLoading', (el) => el.length === 0);
|
||||
expect(wrapper.find('InstanceEdit')).toHaveLength(1);
|
||||
});
|
||||
|
||||
test('handleSubmit should call the api and redirect to details page', async () => {
|
||||
await act(async () => {
|
||||
wrapper.find('InstanceForm').invoke('handleSubmit')(updatedInstance);
|
||||
});
|
||||
expect(InstancesAPI.update).toHaveBeenCalledWith(42, updatedInstance);
|
||||
expect(history.location.pathname).toEqual('/instances/42/details');
|
||||
});
|
||||
|
||||
test('should navigate to instance details when cancel is clicked', async () => {
|
||||
await act(async () => {
|
||||
wrapper.find('button[aria-label="Cancel"]').simulate('click');
|
||||
});
|
||||
expect(history.location.pathname).toEqual('/instances/42/details');
|
||||
});
|
||||
|
||||
test('should navigate to instance details after successful submission', async () => {
|
||||
await act(async () => {
|
||||
wrapper.find('InstanceForm').invoke('handleSubmit')(updatedInstance);
|
||||
});
|
||||
wrapper.update();
|
||||
expect(wrapper.find('submitError').length).toBe(0);
|
||||
expect(history.location.pathname).toEqual('/instances/42/details');
|
||||
});
|
||||
|
||||
test('failed form submission should show an error message', async () => {
|
||||
const error = {
|
||||
response: {
|
||||
data: { detail: 'An error occurred' },
|
||||
},
|
||||
};
|
||||
InstancesAPI.update.mockImplementationOnce(() => Promise.reject(error));
|
||||
await act(async () => {
|
||||
wrapper.find('InstanceForm').invoke('handleSubmit')(updatedInstance);
|
||||
});
|
||||
wrapper.update();
|
||||
expect(wrapper.find('FormSubmitError').length).toBe(1);
|
||||
});
|
||||
});
|
||||
1
awx/ui/src/screens/Instances/InstanceEdit/index.js
Normal file
1
awx/ui/src/screens/Instances/InstanceEdit/index.js
Normal file
@@ -0,0 +1 @@
|
||||
export { default } from './InstanceEdit';
|
||||
@@ -138,7 +138,7 @@ function InstanceListItem({
|
||||
rowIndex,
|
||||
isSelected,
|
||||
onSelect,
|
||||
disable: !isExecutionNode,
|
||||
disable: !(isExecutionNode || isHopNode),
|
||||
}}
|
||||
dataLabel={t`Selected`}
|
||||
/>
|
||||
|
||||
@@ -1,17 +1,24 @@
|
||||
import React, { useCallback, useEffect } from 'react';
|
||||
import React, { useCallback, useEffect, useState } from 'react';
|
||||
import { t } from '@lingui/macro';
|
||||
import { CardBody } from 'components/Card';
|
||||
import PaginatedTable, {
|
||||
getSearchableKeys,
|
||||
HeaderCell,
|
||||
HeaderRow,
|
||||
ToolbarAddButton,
|
||||
} from 'components/PaginatedTable';
|
||||
import { getQSConfig, parseQueryString } from 'util/qs';
|
||||
import DisassociateButton from 'components/DisassociateButton';
|
||||
import AssociateModal from 'components/AssociateModal';
|
||||
import ErrorDetail from 'components/ErrorDetail';
|
||||
import AlertModal from 'components/AlertModal';
|
||||
import useToast, { AlertVariant } from 'hooks/useToast';
|
||||
import { getQSConfig, parseQueryString, mergeParams } from 'util/qs';
|
||||
import { useLocation, useParams } from 'react-router-dom';
|
||||
import useRequest from 'hooks/useRequest';
|
||||
import useRequest, { useDismissableError } from 'hooks/useRequest';
|
||||
import DataListToolbar from 'components/DataListToolbar';
|
||||
import { InstancesAPI } from 'api';
|
||||
import useExpanded from 'hooks/useExpanded';
|
||||
import useSelected from 'hooks/useSelected';
|
||||
import InstancePeerListItem from './InstancePeerListItem';
|
||||
|
||||
const QS_CONFIG = getQSConfig('peer', {
|
||||
@@ -20,27 +27,36 @@ const QS_CONFIG = getQSConfig('peer', {
|
||||
order_by: 'hostname',
|
||||
});
|
||||
|
||||
function InstancePeerList() {
|
||||
function InstancePeerList({ setBreadcrumb }) {
|
||||
const location = useLocation();
|
||||
const { id } = useParams();
|
||||
const [isModalOpen, setIsModalOpen] = useState(false);
|
||||
const { addToast, Toast, toastProps } = useToast();
|
||||
const readInstancesOptions = useCallback(
|
||||
() => InstancesAPI.readOptions(id),
|
||||
[id]
|
||||
);
|
||||
const {
|
||||
isLoading,
|
||||
error: contentError,
|
||||
request: fetchPeers,
|
||||
result: { peers, count, relatedSearchableKeys, searchableKeys },
|
||||
result: { instance, peers, count, relatedSearchableKeys, searchableKeys },
|
||||
} = useRequest(
|
||||
useCallback(async () => {
|
||||
const params = parseQueryString(QS_CONFIG, location.search);
|
||||
const [
|
||||
{ data: detail },
|
||||
{
|
||||
data: { results, count: itemNumber },
|
||||
},
|
||||
actions,
|
||||
] = await Promise.all([
|
||||
InstancesAPI.readDetail(id),
|
||||
InstancesAPI.readPeers(id, params),
|
||||
InstancesAPI.readOptions(),
|
||||
]);
|
||||
return {
|
||||
instance: detail,
|
||||
peers: results,
|
||||
count: itemNumber,
|
||||
relatedSearchableKeys: (actions?.data?.related_search_fields || []).map(
|
||||
@@ -50,6 +66,7 @@ function InstancePeerList() {
|
||||
};
|
||||
}, [id, location]),
|
||||
{
|
||||
instance: {},
|
||||
peers: [],
|
||||
count: 0,
|
||||
relatedSearchableKeys: [],
|
||||
@@ -61,18 +78,98 @@ function InstancePeerList() {
|
||||
fetchPeers();
|
||||
}, [fetchPeers]);
|
||||
|
||||
useEffect(() => {
|
||||
if (instance) {
|
||||
setBreadcrumb(instance);
|
||||
}
|
||||
}, [instance, setBreadcrumb]);
|
||||
|
||||
const { expanded, isAllExpanded, handleExpand, expandAll } =
|
||||
useExpanded(peers);
|
||||
const { selected, isAllSelected, handleSelect, clearSelected, selectAll } =
|
||||
useSelected(peers);
|
||||
|
||||
const fetchInstancesToAssociate = useCallback(
|
||||
(params) =>
|
||||
InstancesAPI.read(
|
||||
mergeParams(params, {
|
||||
...{ not__id: id },
|
||||
...{ not__node_type: ['control', 'hybrid'] },
|
||||
...{ not__hostname: instance.peers },
|
||||
})
|
||||
),
|
||||
[id, instance]
|
||||
);
|
||||
|
||||
const {
|
||||
isLoading: isAssociateLoading,
|
||||
request: handlePeerAssociate,
|
||||
error: associateError,
|
||||
} = useRequest(
|
||||
useCallback(
|
||||
async (instancesPeerToAssociate) => {
|
||||
const selected_hostname = instancesPeerToAssociate.map(
|
||||
(obj) => obj.hostname
|
||||
);
|
||||
const new_peers = [
|
||||
...new Set([...instance.peers, ...selected_hostname]),
|
||||
];
|
||||
await InstancesAPI.update(instance.id, { peers: new_peers });
|
||||
fetchPeers();
|
||||
addToast({
|
||||
id: instancesPeerToAssociate,
|
||||
title: t`${selected_hostname} added as a peer. Please be sure to run the install bundle for ${instance.hostname} again in order to see changes take effect.`,
|
||||
variant: AlertVariant.success,
|
||||
hasTimeout: true,
|
||||
});
|
||||
},
|
||||
[instance, fetchPeers, addToast]
|
||||
)
|
||||
);
|
||||
|
||||
const {
|
||||
isLoading: isDisassociateLoading,
|
||||
request: handlePeersDiassociate,
|
||||
error: disassociateError,
|
||||
} = useRequest(
|
||||
useCallback(async () => {
|
||||
const new_peers = [];
|
||||
const selected_hostname = selected.map((obj) => obj.hostname);
|
||||
for (let i = 0; i < instance.peers.length; i++) {
|
||||
if (!selected_hostname.includes(instance.peers[i])) {
|
||||
new_peers.push(instance.peers[i]);
|
||||
}
|
||||
}
|
||||
await InstancesAPI.update(instance.id, { peers: new_peers });
|
||||
fetchPeers();
|
||||
addToast({
|
||||
title: t`${selected_hostname} removed. Please be sure to run the install bundle for ${instance.hostname} again in order to see changes take effect.`,
|
||||
variant: AlertVariant.success,
|
||||
hasTimeout: true,
|
||||
});
|
||||
}, [instance, selected, fetchPeers, addToast])
|
||||
);
|
||||
|
||||
const { error, dismissError } = useDismissableError(
|
||||
associateError || disassociateError
|
||||
);
|
||||
|
||||
const isHopNode = instance.node_type === 'hop';
|
||||
const isExecutionNode = instance.node_type === 'execution';
|
||||
|
||||
return (
|
||||
<CardBody>
|
||||
<PaginatedTable
|
||||
contentError={contentError}
|
||||
hasContentLoading={isLoading}
|
||||
hasContentLoading={
|
||||
isLoading || isDisassociateLoading || isAssociateLoading
|
||||
}
|
||||
items={peers}
|
||||
itemCount={count}
|
||||
pluralizedItemName={t`Peers`}
|
||||
qsConfig={QS_CONFIG}
|
||||
onRowClick={handleSelect}
|
||||
clearSelected={clearSelected}
|
||||
toolbarSearchableKeys={searchableKeys}
|
||||
toolbarRelatedSearchableKeys={relatedSearchableKeys}
|
||||
toolbarSearchColumns={[
|
||||
@@ -101,13 +198,36 @@ function InstancePeerList() {
|
||||
renderToolbar={(props) => (
|
||||
<DataListToolbar
|
||||
{...props}
|
||||
isAllSelected={isAllSelected}
|
||||
onSelectAll={selectAll}
|
||||
isAllExpanded={isAllExpanded}
|
||||
onExpandAll={expandAll}
|
||||
qsConfig={QS_CONFIG}
|
||||
additionalControls={[
|
||||
(isExecutionNode || isHopNode) && (
|
||||
<ToolbarAddButton
|
||||
ouiaId="add-instance-peers-button"
|
||||
key="associate"
|
||||
defaultLabel={t`Associate`}
|
||||
onClick={() => setIsModalOpen(true)}
|
||||
/>
|
||||
),
|
||||
(isExecutionNode || isHopNode) && (
|
||||
<DisassociateButton
|
||||
verifyCannotDisassociate={false}
|
||||
key="disassociate"
|
||||
onDisassociate={handlePeersDiassociate}
|
||||
itemsToDisassociate={selected}
|
||||
modalTitle={t`Remove instance from peers?`}
|
||||
/>
|
||||
),
|
||||
]}
|
||||
/>
|
||||
)}
|
||||
renderRow={(peer, index) => (
|
||||
<InstancePeerListItem
|
||||
isSelected={selected.some((row) => row.id === peer.id)}
|
||||
onSelect={() => handleSelect(peer)}
|
||||
isExpanded={expanded.some((row) => row.id === peer.id)}
|
||||
onExpand={() => handleExpand(peer)}
|
||||
key={peer.id}
|
||||
@@ -116,6 +236,35 @@ function InstancePeerList() {
|
||||
/>
|
||||
)}
|
||||
/>
|
||||
{isModalOpen && (
|
||||
<AssociateModal
|
||||
header={t`Instances`}
|
||||
fetchRequest={fetchInstancesToAssociate}
|
||||
isModalOpen={isModalOpen}
|
||||
onAssociate={handlePeerAssociate}
|
||||
onClose={() => setIsModalOpen(false)}
|
||||
title={t`Select Instances`}
|
||||
optionsRequest={readInstancesOptions}
|
||||
displayKey="hostname"
|
||||
columns={[
|
||||
{ key: 'hostname', name: t`Name` },
|
||||
{ key: 'node_type', name: t`Node Type` },
|
||||
]}
|
||||
/>
|
||||
)}
|
||||
<Toast {...toastProps} />
|
||||
{error && (
|
||||
<AlertModal
|
||||
isOpen={error}
|
||||
onClose={dismissError}
|
||||
title={t`Error!`}
|
||||
variant="error"
|
||||
>
|
||||
{associateError && t`Failed to associate peer.`}
|
||||
{disassociateError && t`Failed to remove peers.`}
|
||||
<ErrorDetail error={error} />
|
||||
</AlertModal>
|
||||
)}
|
||||
</CardBody>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -10,6 +10,8 @@ import { Detail, DetailList } from 'components/DetailList';
|
||||
|
||||
function InstancePeerListItem({
|
||||
peerInstance,
|
||||
isSelected,
|
||||
onSelect,
|
||||
isExpanded,
|
||||
onExpand,
|
||||
rowIndex,
|
||||
@@ -33,7 +35,14 @@ function InstancePeerListItem({
|
||||
}}
|
||||
/>
|
||||
)}
|
||||
<Td />
|
||||
<Td
|
||||
select={{
|
||||
rowIndex,
|
||||
isSelected,
|
||||
onSelect,
|
||||
}}
|
||||
dataLabel={t`Selected`}
|
||||
/>
|
||||
<Td id={labelId} dataLabel={t`Name`}>
|
||||
<Link to={`/instances/${peerInstance.id}/details`}>
|
||||
<b>{peerInstance.hostname}</b>
|
||||
|
||||
@@ -7,6 +7,7 @@ import PersistentFilters from 'components/PersistentFilters';
|
||||
import { InstanceList } from './InstanceList';
|
||||
import Instance from './Instance';
|
||||
import InstanceAdd from './InstanceAdd';
|
||||
import InstanceEdit from './InstanceEdit';
|
||||
|
||||
function Instances() {
|
||||
const [breadcrumbConfig, setBreadcrumbConfig] = useState({
|
||||
@@ -20,8 +21,11 @@ function Instances() {
|
||||
}
|
||||
setBreadcrumbConfig({
|
||||
'/instances': t`Instances`,
|
||||
'/instances/add': t`Create new Instance`,
|
||||
[`/instances/${instance.id}`]: `${instance.hostname}`,
|
||||
[`/instances/${instance.id}/details`]: t`Details`,
|
||||
[`/instances/${instance.id}/peers`]: t`Peers`,
|
||||
[`/instances/${instance.id}/edit`]: t`Edit Instance`,
|
||||
});
|
||||
}, []);
|
||||
|
||||
@@ -30,7 +34,10 @@ function Instances() {
|
||||
<ScreenHeader streamType="instance" breadcrumbConfig={breadcrumbConfig} />
|
||||
<Switch>
|
||||
<Route path="/instances/add">
|
||||
<InstanceAdd />
|
||||
<InstanceAdd setBreadcrumb={buildBreadcrumbConfig} />
|
||||
</Route>
|
||||
<Route path="/instances/:id/edit" key="edit">
|
||||
<InstanceEdit setBreadcrumb={buildBreadcrumbConfig} />
|
||||
</Route>
|
||||
<Route path="/instances/:id">
|
||||
<Instance setBreadcrumb={buildBreadcrumbConfig} />
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import React from 'react';
|
||||
import React, { useCallback } from 'react';
|
||||
import { t } from '@lingui/macro';
|
||||
import { Formik } from 'formik';
|
||||
import { Formik, useField, useFormikContext } from 'formik';
|
||||
import { Form, FormGroup, CardBody } from '@patternfly/react-core';
|
||||
import { FormColumnLayout } from 'components/FormLayout';
|
||||
import FormField, {
|
||||
@@ -8,9 +8,31 @@ import FormField, {
|
||||
CheckboxField,
|
||||
} from 'components/FormField';
|
||||
import FormActionGroup from 'components/FormActionGroup';
|
||||
import AnsibleSelect from 'components/AnsibleSelect';
|
||||
import { PeersLookup } from 'components/Lookup';
|
||||
import { required } from 'util/validators';
|
||||
|
||||
function InstanceFormFields() {
|
||||
const INSTANCE_TYPES = [
|
||||
{ id: 'execution', name: t`Execution` },
|
||||
{ id: 'hop', name: t`Hop` },
|
||||
];
|
||||
|
||||
function InstanceFormFields({ isEdit }) {
|
||||
const [instanceTypeField, instanceTypeMeta, instanceTypeHelpers] = useField({
|
||||
name: 'node_type',
|
||||
validate: required(t`Set a value for this field`),
|
||||
});
|
||||
|
||||
const { setFieldValue } = useFormikContext();
|
||||
|
||||
const [peersField, peersMeta, peersHelpers] = useField('peers');
|
||||
|
||||
const handlePeersUpdate = useCallback(
|
||||
(value) => {
|
||||
setFieldValue('peers', value);
|
||||
},
|
||||
[setFieldValue]
|
||||
);
|
||||
return (
|
||||
<>
|
||||
<FormField
|
||||
@@ -20,6 +42,7 @@ function InstanceFormFields() {
|
||||
type="text"
|
||||
validate={required(null)}
|
||||
isRequired
|
||||
isDisabled={isEdit}
|
||||
/>
|
||||
<FormField
|
||||
id="instance-description"
|
||||
@@ -40,16 +63,47 @@ function InstanceFormFields() {
|
||||
label={t`Listener Port`}
|
||||
name="listener_port"
|
||||
type="number"
|
||||
tooltip={t`Select the port that Receptor will listen on for incoming connections. Default is 27199.`}
|
||||
isRequired
|
||||
tooltip={t`Select the port that Receptor will listen on for incoming connections, e.g. 27199.`}
|
||||
/>
|
||||
<FormField
|
||||
id="instance-type"
|
||||
<FormGroup
|
||||
fieldId="instance-type"
|
||||
label={t`Instance Type`}
|
||||
name="node_type"
|
||||
type="text"
|
||||
tooltip={t`Sets the role that this instance will play within mesh topology. Default is "execution."`}
|
||||
isDisabled
|
||||
validated={
|
||||
!instanceTypeMeta.touched || !instanceTypeMeta.error
|
||||
? 'default'
|
||||
: 'error'
|
||||
}
|
||||
helperTextInvalid={instanceTypeMeta.error}
|
||||
isRequired
|
||||
>
|
||||
<AnsibleSelect
|
||||
{...instanceTypeField}
|
||||
id="node_type"
|
||||
data={INSTANCE_TYPES.map((type) => ({
|
||||
key: type.id,
|
||||
value: type.id,
|
||||
label: type.name,
|
||||
}))}
|
||||
onChange={(event, value) => {
|
||||
instanceTypeHelpers.setValue(value);
|
||||
}}
|
||||
isDisabled={isEdit}
|
||||
/>
|
||||
</FormGroup>
|
||||
<PeersLookup
|
||||
helperTextInvalid={peersMeta.error}
|
||||
isValid={!peersMeta.touched || !peersMeta.error}
|
||||
onBlur={() => peersHelpers.setTouched()}
|
||||
onChange={handlePeersUpdate}
|
||||
value={peersField.value}
|
||||
tooltip={t`Select the Peers Instances.`}
|
||||
fieldName="peers"
|
||||
formLabel={t`Peers`}
|
||||
multiple
|
||||
typePeers
|
||||
id="peers"
|
||||
isRequired
|
||||
/>
|
||||
<FormGroup fieldId="instance-option-checkboxes" label={t`Options`}>
|
||||
<CheckboxField
|
||||
@@ -64,6 +118,12 @@ function InstanceFormFields() {
|
||||
label={t`Managed by Policy`}
|
||||
tooltip={t`Controls whether or not this instance is managed by policy. If enabled, the instance will be available for automatic assignment to and unassignment from instance groups based on policy rules.`}
|
||||
/>
|
||||
<CheckboxField
|
||||
id="peers_from_control_nodes"
|
||||
name="peers_from_control_nodes"
|
||||
label={t`Peers from control nodes`}
|
||||
tooltip={t`If enabled, control nodes will peer to this instance automatically. If disabled, instance will be connected only to associated peers.`}
|
||||
/>
|
||||
</FormGroup>
|
||||
</>
|
||||
);
|
||||
@@ -71,6 +131,8 @@ function InstanceFormFields() {
|
||||
|
||||
function InstanceForm({
|
||||
instance = {},
|
||||
instance_peers = [],
|
||||
isEdit = false,
|
||||
submitError,
|
||||
handleCancel,
|
||||
handleSubmit,
|
||||
@@ -79,22 +141,29 @@ function InstanceForm({
|
||||
<CardBody>
|
||||
<Formik
|
||||
initialValues={{
|
||||
hostname: '',
|
||||
description: '',
|
||||
node_type: 'execution',
|
||||
node_state: 'installed',
|
||||
listener_port: 27199,
|
||||
enabled: true,
|
||||
managed_by_policy: true,
|
||||
hostname: instance.hostname || '',
|
||||
description: instance.description || '',
|
||||
node_type: instance.node_type || 'execution',
|
||||
node_state: instance.node_state || 'installed',
|
||||
listener_port: instance.listener_port,
|
||||
enabled: instance.enabled || true,
|
||||
managed_by_policy: instance.managed_by_policy || true,
|
||||
peers_from_control_nodes: instance.peers_from_control_nodes || false,
|
||||
peers: instance_peers,
|
||||
}}
|
||||
onSubmit={(values) => {
|
||||
handleSubmit(values);
|
||||
handleSubmit({
|
||||
...values,
|
||||
listener_port:
|
||||
values.listener_port === '' ? null : values.listener_port,
|
||||
peers: values.peers.map((peer) => peer.hostname || peer),
|
||||
});
|
||||
}}
|
||||
>
|
||||
{(formik) => (
|
||||
<Form autoComplete="off" onSubmit={formik.handleSubmit}>
|
||||
<FormColumnLayout>
|
||||
<InstanceFormFields instance={instance} />
|
||||
<InstanceFormFields isEdit={isEdit} />
|
||||
<FormSubmitError error={submitError} />
|
||||
<FormActionGroup
|
||||
onCancel={handleCancel}
|
||||
|
||||
@@ -65,7 +65,7 @@ describe('<InstanceForm />', () => {
|
||||
expect(handleCancel).toBeCalled();
|
||||
});
|
||||
|
||||
test('should call handleSubmit when Cancel button is clicked', async () => {
|
||||
test('should call handleSubmit when Save button is clicked', async () => {
|
||||
expect(handleSubmit).not.toHaveBeenCalled();
|
||||
await act(async () => {
|
||||
wrapper.find('input#hostname').simulate('change', {
|
||||
@@ -74,9 +74,6 @@ describe('<InstanceForm />', () => {
|
||||
wrapper.find('input#instance-description').simulate('change', {
|
||||
target: { value: 'This is a repeat song', name: 'description' },
|
||||
});
|
||||
wrapper.find('input#instance-port').simulate('change', {
|
||||
target: { value: 'This is a repeat song', name: 'listener_port' },
|
||||
});
|
||||
});
|
||||
wrapper.update();
|
||||
expect(
|
||||
@@ -91,9 +88,10 @@ describe('<InstanceForm />', () => {
|
||||
enabled: true,
|
||||
managed_by_policy: true,
|
||||
hostname: 'new Foo',
|
||||
listener_port: 'This is a repeat song',
|
||||
node_state: 'installed',
|
||||
node_type: 'execution',
|
||||
peers_from_control_nodes: false,
|
||||
peers: [],
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -33,7 +33,8 @@ function RemoveInstanceButton({ itemsToRemove, onRemove, isK8s }) {
|
||||
const [removeDetails, setRemoveDetails] = useState(null);
|
||||
const [isLoading, setIsLoading] = useState(false);
|
||||
|
||||
const cannotRemove = (item) => item.node_type !== 'execution';
|
||||
const cannotRemove = (item) =>
|
||||
!(item.node_type === 'execution' || item.node_type === 'hop');
|
||||
|
||||
const toggleModal = async (isOpen) => {
|
||||
setRemoveDetails(null);
|
||||
@@ -175,7 +176,7 @@ function RemoveInstanceButton({ itemsToRemove, onRemove, isK8s }) {
|
||||
</Button>,
|
||||
]}
|
||||
>
|
||||
<div>{t`This action will remove the following instances:`}</div>
|
||||
<div>{t`This action will remove the following instance and you may need to rerun the install bundle for any instance that was previously connected to:`}</div>
|
||||
{itemsToRemove.map((item) => (
|
||||
<span key={item.id} id={`item-to-be-removed-${item.id}`}>
|
||||
<strong>{item.hostname}</strong>
|
||||
|
||||
@@ -53,13 +53,9 @@ const getStdOutValue = (hostEvent) => {
|
||||
const res = hostEvent?.event_data?.res;
|
||||
|
||||
let stdOut;
|
||||
if (taskAction === 'debug' && res.result && res.result.stdout) {
|
||||
if (taskAction === 'debug' && res?.result?.stdout) {
|
||||
stdOut = res.result.stdout;
|
||||
} else if (
|
||||
taskAction === 'yum' &&
|
||||
res.results &&
|
||||
Array.isArray(res.results)
|
||||
) {
|
||||
} else if (taskAction === 'yum' && Array.isArray(res?.results)) {
|
||||
stdOut = res.results.join('\n');
|
||||
} else if (res?.stdout) {
|
||||
stdOut = Array.isArray(res.stdout) ? res.stdout.join(' ') : res.stdout;
|
||||
|
||||
@@ -16,6 +16,7 @@ import {
|
||||
SearchPlusIcon,
|
||||
ExpandArrowsAltIcon,
|
||||
ExpandIcon,
|
||||
RedoAltIcon,
|
||||
} from '@patternfly/react-icons';
|
||||
|
||||
const Header = ({
|
||||
@@ -26,6 +27,7 @@ const Header = ({
|
||||
zoomOut,
|
||||
resetZoom,
|
||||
zoomFit,
|
||||
refresh,
|
||||
showZoomControls,
|
||||
}) => {
|
||||
const { light } = PageSectionVariants;
|
||||
@@ -48,6 +50,18 @@ const Header = ({
|
||||
</Title>
|
||||
</div>
|
||||
<div>
|
||||
<Tooltip content={t`Refresh`} position="top">
|
||||
<Button
|
||||
ouiaId="refresh-button"
|
||||
aria-label={t`Refresh`}
|
||||
variant="plain"
|
||||
icon={<RedoAltIcon />}
|
||||
onClick={refresh}
|
||||
isDisabled={!showZoomControls}
|
||||
>
|
||||
<RedoAltIcon />
|
||||
</Button>
|
||||
</Tooltip>
|
||||
<Tooltip content={t`Zoom in`} position="top">
|
||||
<Button
|
||||
ouiaId="zoom-in-button"
|
||||
|
||||
@@ -245,7 +245,7 @@ function Legend() {
|
||||
y1="9"
|
||||
x2="20"
|
||||
y2="9"
|
||||
stroke="#666"
|
||||
stroke="#6A6E73"
|
||||
strokeWidth="4"
|
||||
/>
|
||||
</svg>
|
||||
@@ -260,7 +260,7 @@ function Legend() {
|
||||
y1="9"
|
||||
x2="20"
|
||||
y2="9"
|
||||
stroke="#666"
|
||||
stroke="#3E8635"
|
||||
strokeWidth="4"
|
||||
strokeDasharray="6"
|
||||
/>
|
||||
|
||||
@@ -13,6 +13,7 @@ import Tooltip from './Tooltip';
|
||||
import ContentLoading from './ContentLoading';
|
||||
import {
|
||||
renderStateColor,
|
||||
renderLinkStatusColor,
|
||||
renderLabelText,
|
||||
renderNodeType,
|
||||
renderNodeIcon,
|
||||
@@ -177,7 +178,7 @@ function MeshGraph({
|
||||
mesh
|
||||
.append('defs')
|
||||
.selectAll('marker')
|
||||
.data(['end', 'end-active'])
|
||||
.data(['end', 'end-active', 'end-adding', 'end-removing'])
|
||||
.join('marker')
|
||||
.attr('id', String)
|
||||
.attr('viewBox', '0 -5 10 10')
|
||||
@@ -187,8 +188,9 @@ function MeshGraph({
|
||||
.attr('orient', 'auto')
|
||||
.append('path')
|
||||
.attr('d', 'M0,-5L10,0L0,5');
|
||||
|
||||
mesh.select('#end').attr('refX', 23).attr('fill', '#ccc');
|
||||
mesh.select('#end').attr('refX', 23).attr('fill', '#6A6E73');
|
||||
mesh.select('#end-removing').attr('refX', 23).attr('fill', '#C9190B');
|
||||
mesh.select('#end-adding').attr('refX', 23).attr('fill', '#3E8635');
|
||||
mesh.select('#end-active').attr('refX', 18).attr('fill', '#0066CC');
|
||||
|
||||
// Add links
|
||||
@@ -204,18 +206,24 @@ function MeshGraph({
|
||||
.attr('y1', (d) => d.source.y)
|
||||
.attr('x2', (d) => d.target.x)
|
||||
.attr('y2', (d) => d.target.y)
|
||||
.attr('marker-end', 'url(#end)')
|
||||
.attr('marker-end', (d) => {
|
||||
if (d.link_state === 'adding') {
|
||||
return 'url(#end-adding)';
|
||||
}
|
||||
if (d.link_state === 'removing') {
|
||||
return 'url(#end-removing)';
|
||||
}
|
||||
return 'url(#end)';
|
||||
})
|
||||
.attr('class', (_, i) => `link-${i}`)
|
||||
.attr('data-cy', (d) => `${d.source.hostname}-${d.target.hostname}`)
|
||||
.style('fill', 'none')
|
||||
.style('stroke', (d) =>
|
||||
d.link_state === 'removing' ? '#C9190B' : '#CCC'
|
||||
)
|
||||
.style('stroke', (d) => renderLinkStatusColor(d.link_state))
|
||||
.style('stroke-width', '2px')
|
||||
.style('stroke-dasharray', (d) => renderLinkState(d.link_state))
|
||||
.attr('pointer-events', 'none')
|
||||
.on('mouseover', function showPointer() {
|
||||
d3.select(this).transition().style('cursor', 'pointer');
|
||||
d3.select(this).style('cursor', 'pointer');
|
||||
});
|
||||
// add nodes
|
||||
const node = mesh
|
||||
@@ -228,7 +236,7 @@ function MeshGraph({
|
||||
.append('g')
|
||||
.attr('data-cy', (d) => `node-${d.id}`)
|
||||
.on('mouseenter', function handleNodeHover(_, d) {
|
||||
d3.select(this).transition().style('cursor', 'pointer');
|
||||
d3.select(this).style('cursor', 'pointer');
|
||||
highlightSiblings(d);
|
||||
})
|
||||
.on('mouseleave', (_, d) => {
|
||||
@@ -239,7 +247,8 @@ function MeshGraph({
|
||||
});
|
||||
|
||||
// node circles
|
||||
node
|
||||
const nodeCircles = node.append('g');
|
||||
nodeCircles
|
||||
.append('circle')
|
||||
.attr('r', DEFAULT_RADIUS)
|
||||
.attr('cx', (d) => d.x)
|
||||
@@ -248,7 +257,8 @@ function MeshGraph({
|
||||
.attr('class', (d) => `id-${d.id}`)
|
||||
.attr('fill', DEFAULT_NODE_COLOR)
|
||||
.attr('stroke-dasharray', (d) => (d.enabled ? `1 0` : `5`))
|
||||
.attr('stroke', DEFAULT_NODE_STROKE_COLOR);
|
||||
.attr('stroke', (d) => renderStateColor(d.node_state));
|
||||
|
||||
// node type labels
|
||||
node
|
||||
.append('text')
|
||||
@@ -259,64 +269,62 @@ function MeshGraph({
|
||||
.attr('dominant-baseline', 'central')
|
||||
.attr('fill', DEFAULT_NODE_SYMBOL_TEXT_COLOR);
|
||||
|
||||
const placeholder = node.append('g').attr('class', 'placeholder');
|
||||
|
||||
placeholder
|
||||
.append('text')
|
||||
.text((d) => renderLabelText(d.node_state, d.hostname))
|
||||
.attr('x', (d) => d.x)
|
||||
.attr('y', (d) => d.y + 40)
|
||||
.attr('fill', 'black')
|
||||
.attr('font-size', '18px')
|
||||
.attr('text-anchor', 'middle')
|
||||
.each(function calculateLabelWidth() {
|
||||
// eslint-disable-next-line react/no-this-in-sfc
|
||||
const bbox = this.getBBox();
|
||||
// eslint-disable-next-line react/no-this-in-sfc
|
||||
d3.select(this.parentNode)
|
||||
.append('path')
|
||||
.attr('d', (d) => renderLabelIcons(d.node_state))
|
||||
.attr('transform', (d) => renderIconPosition(d.node_state, bbox))
|
||||
.style('fill', 'black');
|
||||
});
|
||||
|
||||
placeholder.each(function calculateLabelWidth() {
|
||||
// eslint-disable-next-line react/no-this-in-sfc
|
||||
const bbox = this.getBBox();
|
||||
// eslint-disable-next-line react/no-this-in-sfc
|
||||
d3.select(this.parentNode)
|
||||
.append('rect')
|
||||
.attr('x', (d) => d.x - bbox.width / 2)
|
||||
.attr('y', bbox.y + 5)
|
||||
.attr('width', bbox.width)
|
||||
.attr('height', bbox.height)
|
||||
.attr('rx', 8)
|
||||
.attr('ry', 8)
|
||||
.style('fill', (d) => renderStateColor(d.node_state));
|
||||
});
|
||||
|
||||
const hostNames = node.append('g');
|
||||
// node hostname labels
|
||||
const hostNames = node.append('g').attr('class', 'node-state-label');
|
||||
hostNames
|
||||
.append('text')
|
||||
.attr('x', (d) => d.x)
|
||||
.attr('y', (d) => d.y + 40)
|
||||
.text((d) => renderLabelText(d.node_state, d.hostname))
|
||||
.attr('x', (d) => d.x + 6)
|
||||
.attr('y', (d) => d.y + 42)
|
||||
.attr('class', 'placeholder')
|
||||
.attr('fill', 'white')
|
||||
.attr('font-size', DEFAULT_FONT_SIZE)
|
||||
.attr('text-anchor', 'middle')
|
||||
.each(function calculateLabelWidth() {
|
||||
// eslint-disable-next-line react/no-this-in-sfc
|
||||
const bbox = this.getBBox();
|
||||
const padding = 10;
|
||||
// eslint-disable-next-line react/no-this-in-sfc
|
||||
d3.select(this.parentNode)
|
||||
.append('path')
|
||||
.attr('class', (d) => `icon-${d.node_state}`)
|
||||
.attr('d', (d) => renderLabelIcons(d.node_state))
|
||||
.attr('transform', (d) => renderIconPosition(d.node_state, bbox))
|
||||
.attr('fill', 'white');
|
||||
.append('rect')
|
||||
.attr('x', bbox.x - padding / 2)
|
||||
.attr('y', bbox.y)
|
||||
.attr('width', bbox.width + padding)
|
||||
.attr('height', bbox.height)
|
||||
.style('stroke-width', 1)
|
||||
.attr('rx', 4)
|
||||
.attr('ry', 4)
|
||||
.attr('fill', 'white')
|
||||
.style('stroke', DEFAULT_NODE_STROKE_COLOR);
|
||||
});
|
||||
svg.selectAll('g.placeholder').remove();
|
||||
svg.selectAll('text.placeholder').remove();
|
||||
hostNames
|
||||
.append('text')
|
||||
.attr('x', (d) => d.x)
|
||||
.attr('y', (d) => d.y + 38)
|
||||
.text((d) => renderLabelText(d.node_state, d.hostname))
|
||||
.attr('font-size', DEFAULT_FONT_SIZE)
|
||||
.attr('fill', 'black')
|
||||
.attr('text-anchor', 'middle');
|
||||
|
||||
// add badge icons
|
||||
const badges = nodeCircles.append('g').attr('class', 'node-state-badge');
|
||||
badges.each(function drawStateBadge() {
|
||||
// eslint-disable-next-line react/no-this-in-sfc
|
||||
const bbox = this.parentNode.getBBox();
|
||||
// eslint-disable-next-line react/no-this-in-sfc
|
||||
d3.select(this)
|
||||
.append('circle')
|
||||
.attr('r', 9)
|
||||
.attr('cx', bbox.x)
|
||||
.attr('cy', bbox.y)
|
||||
.attr('fill', (d) => renderStateColor(d.node_state));
|
||||
d3.select(this)
|
||||
.append('path')
|
||||
.attr('class', (d) => `icon-${d.node_state}`)
|
||||
.attr('d', (d) => renderLabelIcons(d.node_state))
|
||||
.attr('transform', (d) => renderIconPosition(d.node_state, bbox))
|
||||
.attr('fill', 'white');
|
||||
});
|
||||
svg.call(zoom);
|
||||
|
||||
function highlightSiblings(n) {
|
||||
@@ -330,7 +338,6 @@ function MeshGraph({
|
||||
immediate.forEach((s) => {
|
||||
svg
|
||||
.selectAll(`.link-${s.index}`)
|
||||
.transition()
|
||||
.style('stroke', '#0066CC')
|
||||
.style('stroke-width', '3px')
|
||||
.attr('marker-end', 'url(#end-active)');
|
||||
@@ -346,13 +353,17 @@ function MeshGraph({
|
||||
immediate.forEach((s) => {
|
||||
svg
|
||||
.selectAll(`.link-${s.index}`)
|
||||
.transition()
|
||||
.duration(50)
|
||||
.style('stroke', (d) =>
|
||||
d.link_state === 'removing' ? '#C9190B' : '#CCC'
|
||||
)
|
||||
.style('stroke', (d) => renderLinkStatusColor(d.link_state))
|
||||
.style('stroke-width', '2px')
|
||||
.attr('marker-end', 'url(#end)');
|
||||
.attr('marker-end', (d) => {
|
||||
if (d.link_state === 'adding') {
|
||||
return 'url(#end-adding)';
|
||||
}
|
||||
if (d.link_state === 'removing') {
|
||||
return 'url(#end-removing)';
|
||||
}
|
||||
return 'url(#end)';
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
@@ -361,7 +372,7 @@ function MeshGraph({
|
||||
// toggle rings
|
||||
svg
|
||||
.select(`circle.id-${n.id}`)
|
||||
.attr('stroke', '#ccc')
|
||||
.attr('stroke', (d) => renderStateColor(d.node_state))
|
||||
.attr('stroke-width', null);
|
||||
// show default empty state of tooltip
|
||||
setIsNodeSelected(false);
|
||||
@@ -370,7 +381,7 @@ function MeshGraph({
|
||||
}
|
||||
svg
|
||||
.selectAll('circle')
|
||||
.attr('stroke', '#ccc')
|
||||
.attr('stroke', (d) => renderStateColor(d.node_state))
|
||||
.attr('stroke-width', null);
|
||||
svg
|
||||
.select(`circle.id-${n.id}`)
|
||||
|
||||
@@ -45,6 +45,7 @@ function TopologyView() {
|
||||
zoomIn={zoomIn}
|
||||
zoomOut={zoomOut}
|
||||
zoomFit={zoomFit}
|
||||
refresh={fetchMeshVisualizer}
|
||||
resetZoom={resetZoom}
|
||||
showZoomControls={showZoomControls}
|
||||
/>
|
||||
|
||||
@@ -13,7 +13,7 @@ export const DEFAULT_NODE_COLOR = 'white';
|
||||
export const DEFAULT_NODE_HIGHLIGHT_COLOR = '#eee';
|
||||
export const DEFAULT_NODE_LABEL_TEXT_COLOR = 'white';
|
||||
export const DEFAULT_NODE_SYMBOL_TEXT_COLOR = 'black';
|
||||
export const DEFAULT_NODE_STROKE_COLOR = '#ccc';
|
||||
export const DEFAULT_NODE_STROKE_COLOR = '#6A6E73';
|
||||
export const DEFAULT_FONT_SIZE = '12px';
|
||||
export const LABEL_TEXT_MAX_LENGTH = 15;
|
||||
export const MARGIN = 15;
|
||||
@@ -27,6 +27,12 @@ export const NODE_STATE_COLOR_KEY = {
|
||||
deprovisioning: '#666',
|
||||
};
|
||||
|
||||
export const LINK_STATE_COLOR_KEY = {
|
||||
established: '#6A6E73',
|
||||
adding: '#3E8635',
|
||||
removing: '#C9190B',
|
||||
};
|
||||
|
||||
export const NODE_TYPE_SYMBOL_KEY = {
|
||||
hop: 'h',
|
||||
execution: 'Ex',
|
||||
|
||||
@@ -4,6 +4,7 @@ import { truncateString } from '../../../util/strings';
|
||||
import {
|
||||
NODE_STATE_COLOR_KEY,
|
||||
NODE_TYPE_SYMBOL_KEY,
|
||||
LINK_STATE_COLOR_KEY,
|
||||
LABEL_TEXT_MAX_LENGTH,
|
||||
ICONS,
|
||||
} from '../constants';
|
||||
@@ -20,6 +21,12 @@ export function renderStateColor(nodeState) {
|
||||
return NODE_STATE_COLOR_KEY[nodeState] ? NODE_STATE_COLOR_KEY[nodeState] : '';
|
||||
}
|
||||
|
||||
export function renderLinkStatusColor(linkState) {
|
||||
return LINK_STATE_COLOR_KEY[linkState]
|
||||
? LINK_STATE_COLOR_KEY[linkState]
|
||||
: '#ccc';
|
||||
}
|
||||
|
||||
export function renderLabelText(nodeState, name) {
|
||||
if (typeof nodeState === 'string' && typeof name === 'string') {
|
||||
return `${truncateString(name, LABEL_TEXT_MAX_LENGTH)}`;
|
||||
@@ -45,8 +52,8 @@ export function renderLabelIcons(nodeState) {
|
||||
ready: 'checkmark',
|
||||
installed: 'clock',
|
||||
unavailable: 'exclaimation',
|
||||
'provision-fail': 'exclaimation',
|
||||
'deprovision-fail': 'exclaimation',
|
||||
'provision-fail': 'exclaimation',
|
||||
provisioning: 'plus',
|
||||
deprovisioning: 'minus',
|
||||
};
|
||||
@@ -59,15 +66,17 @@ export function renderLabelIcons(nodeState) {
|
||||
export function renderIconPosition(nodeState, bbox) {
|
||||
if (nodeState) {
|
||||
const iconPositionMapper = {
|
||||
ready: `translate(${bbox.x - 15}, ${bbox.y + 3}), scale(0.02)`,
|
||||
installed: `translate(${bbox.x - 18}, ${bbox.y + 1}), scale(0.03)`,
|
||||
unavailable: `translate(${bbox.x - 9}, ${bbox.y + 3}), scale(0.02)`,
|
||||
'provision-fail': `translate(${bbox.x - 9}, ${bbox.y + 3}), scale(0.02)`,
|
||||
'deprovision-fail': `translate(${bbox.x - 9}, ${
|
||||
bbox.y + 3
|
||||
ready: `translate(${bbox.x - 4.5}, ${bbox.y - 4.5}), scale(0.02)`,
|
||||
installed: `translate(${bbox.x - 6.5}, ${bbox.y - 6.5}), scale(0.025)`,
|
||||
unavailable: `translate(${bbox.x - 2}, ${bbox.y - 4.4}), scale(0.02)`,
|
||||
'provision-fail': `translate(${bbox.x - 2}, ${bbox.y - 4}), scale(0.02)`,
|
||||
'deprovision-fail': `translate(${bbox.x - 2}, ${
|
||||
bbox.y - 4
|
||||
}), scale(0.02)`,
|
||||
provisioning: `translate(${bbox.x - 4.5}, ${bbox.y - 4.5}), scale(0.02)`,
|
||||
deprovisioning: `translate(${bbox.x - 4.5}, ${
|
||||
bbox.y - 4.5
|
||||
}), scale(0.02)`,
|
||||
provisioning: `translate(${bbox.x - 12}, ${bbox.y + 3}), scale(0.02)`,
|
||||
deprovisioning: `translate(${bbox.x - 12}, ${bbox.y + 3}), scale(0.02)`,
|
||||
};
|
||||
return iconPositionMapper[nodeState] ? iconPositionMapper[nodeState] : ``;
|
||||
}
|
||||
@@ -101,7 +110,7 @@ export function getRandomInt(min, max) {
|
||||
const generateRandomLinks = (n, r) => {
|
||||
const links = [];
|
||||
function getRandomLinkState() {
|
||||
return ['established', 'adding', 'removing'][getRandomInt(0, 2)];
|
||||
return ['established', 'adding', 'removing'][getRandomInt(0, 3)];
|
||||
}
|
||||
for (let i = 0; i < r; i++) {
|
||||
const link = {
|
||||
@@ -142,7 +151,7 @@ export const generateRandomNodes = (n) => {
|
||||
hostname: `node-${id}`,
|
||||
node_type: randomType,
|
||||
node_state: randomState,
|
||||
enabled: Math.random() < 0.5,
|
||||
enabled: Math.random() < 0.9,
|
||||
};
|
||||
nodes.push(node);
|
||||
}
|
||||
|
||||
@@ -73,7 +73,7 @@ describe('renderIconPosition', () => {
|
||||
const bbox = { x: 400, y: 400, width: 10, height: 20 };
|
||||
test('returns correct label icon', () => {
|
||||
expect(renderIconPosition('ready', bbox)).toBe(
|
||||
`translate(${bbox.x - 15}, ${bbox.y + 3}), scale(0.02)`
|
||||
`translate(${bbox.x - 4.5}, ${bbox.y - 4.5}), scale(0.02)`
|
||||
);
|
||||
});
|
||||
test('returns empty string if state is not found', () => {
|
||||
|
||||
@@ -122,7 +122,7 @@ export const InstanceGroup = shape({
|
||||
|
||||
export const Instance = shape({
|
||||
id: number.isRequired,
|
||||
name: string.isRequired,
|
||||
hostname: string.isRequired,
|
||||
});
|
||||
|
||||
export const Label = shape({
|
||||
|
||||
@@ -46,6 +46,7 @@ options:
|
||||
organization:
|
||||
description:
|
||||
- Organization name, ID, or named URL that should own the credential.
|
||||
- This parameter is mutually exclusive with C(team) and C(user).
|
||||
type: str
|
||||
credential_type:
|
||||
description:
|
||||
@@ -93,10 +94,12 @@ options:
|
||||
user:
|
||||
description:
|
||||
- User name, ID, or named URL that should own this credential.
|
||||
- This parameter is mutually exclusive with C(organization) and C(team).
|
||||
type: str
|
||||
team:
|
||||
description:
|
||||
- Team name, ID, or named URL that should own this credential.
|
||||
- This parameter is mutually exclusive with C(organization) and C(user).
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
@@ -219,8 +222,13 @@ def main():
|
||||
state=dict(choices=['present', 'absent', 'exists'], default='present'),
|
||||
)
|
||||
|
||||
mutually_exclusive = [("organization", "user", "team")]
|
||||
|
||||
# Create a module for ourselves
|
||||
module = ControllerAPIModule(argument_spec=argument_spec)
|
||||
module = ControllerAPIModule(
|
||||
argument_spec=argument_spec,
|
||||
mutually_exclusive=mutually_exclusive
|
||||
)
|
||||
|
||||
# Extract our parameters
|
||||
name = module.params.get('name')
|
||||
|
||||
@@ -47,6 +47,7 @@ options:
|
||||
- Role that this node plays in the mesh.
|
||||
choices:
|
||||
- execution
|
||||
- hop
|
||||
required: False
|
||||
type: str
|
||||
node_state:
|
||||
@@ -62,6 +63,18 @@ options:
|
||||
- Port that Receptor will listen for incoming connections on.
|
||||
required: False
|
||||
type: int
|
||||
peers:
|
||||
description:
|
||||
- List of peers to connect outbound to. Only configurable for hop and execution nodes.
|
||||
- To remove all current peers, set value to an empty list, [].
|
||||
required: False
|
||||
type: list
|
||||
elements: str
|
||||
peers_from_control_nodes:
|
||||
description:
|
||||
- If enabled, control plane nodes will automatically peer to this node.
|
||||
required: False
|
||||
type: bool
|
||||
extends_documentation_fragment: awx.awx.auth
|
||||
'''
|
||||
|
||||
@@ -88,9 +101,11 @@ def main():
|
||||
capacity_adjustment=dict(type='float'),
|
||||
enabled=dict(type='bool'),
|
||||
managed_by_policy=dict(type='bool'),
|
||||
node_type=dict(type='str', choices=['execution']),
|
||||
node_type=dict(type='str', choices=['execution', 'hop']),
|
||||
node_state=dict(type='str', choices=['deprovisioning', 'installed']),
|
||||
listener_port=dict(type='int'),
|
||||
peers=dict(required=False, type='list', elements='str'),
|
||||
peers_from_control_nodes=dict(required=False, type='bool'),
|
||||
)
|
||||
|
||||
# Create a module for ourselves
|
||||
@@ -104,7 +119,8 @@ def main():
|
||||
node_type = module.params.get('node_type')
|
||||
node_state = module.params.get('node_state')
|
||||
listener_port = module.params.get('listener_port')
|
||||
|
||||
peers = module.params.get('peers')
|
||||
peers_from_control_nodes = module.params.get('peers_from_control_nodes')
|
||||
# Attempt to look up an existing item based on the provided data
|
||||
existing_item = module.get_one('instances', name_or_id=hostname)
|
||||
|
||||
@@ -122,6 +138,10 @@ def main():
|
||||
new_fields['node_state'] = node_state
|
||||
if listener_port is not None:
|
||||
new_fields['listener_port'] = listener_port
|
||||
if peers is not None:
|
||||
new_fields['peers'] = peers
|
||||
if peers_from_control_nodes is not None:
|
||||
new_fields['peers_from_control_nodes'] = peers_from_control_nodes
|
||||
|
||||
module.create_or_update_if_needed(
|
||||
existing_item,
|
||||
|
||||
@@ -52,7 +52,12 @@ EXAMPLES = '''
|
||||
license:
|
||||
manifest: "/tmp/my_manifest.zip"
|
||||
|
||||
- name: Attach to a pool
|
||||
- name: Use the subscriptions module to fetch subscriptions from Red Hat or Red Hat Satellite
|
||||
subscriptions:
|
||||
username: "my_satellite_username"
|
||||
password: "my_satellite_password"
|
||||
|
||||
- name: Attach to a pool (requires fetching subscriptions at least once before)
|
||||
license:
|
||||
pool_id: 123456
|
||||
|
||||
|
||||
@@ -83,7 +83,7 @@ options:
|
||||
type: bool
|
||||
scm_update_on_launch:
|
||||
description:
|
||||
- Before an update to the local repository before launching a job with this project.
|
||||
- Perform an update to the local repository before launching a job with this project.
|
||||
type: bool
|
||||
scm_update_cache_timeout:
|
||||
description:
|
||||
|
||||
@@ -517,68 +517,63 @@ EXAMPLES = '''
|
||||
workflow_nodes:
|
||||
- identifier: node101
|
||||
unified_job_template:
|
||||
name: example-project
|
||||
name: example-inventory
|
||||
inventory:
|
||||
organization:
|
||||
name: Default
|
||||
type: inventory_source
|
||||
related:
|
||||
success_nodes: []
|
||||
failure_nodes:
|
||||
- identifier: node201
|
||||
always_nodes: []
|
||||
credentials: []
|
||||
- identifier: node201
|
||||
unified_job_template:
|
||||
organization:
|
||||
name: Default
|
||||
name: job template 1
|
||||
type: job_template
|
||||
credentials: []
|
||||
related:
|
||||
success_nodes:
|
||||
- identifier: node301
|
||||
failure_nodes: []
|
||||
always_nodes: []
|
||||
credentials: []
|
||||
- identifier: node202
|
||||
- identifier: node102
|
||||
unified_job_template:
|
||||
organization:
|
||||
name: Default
|
||||
name: example-project
|
||||
type: project
|
||||
related:
|
||||
success_nodes: []
|
||||
failure_nodes: []
|
||||
always_nodes: []
|
||||
credentials: []
|
||||
- identifier: node301
|
||||
all_parents_must_converge: false
|
||||
success_nodes:
|
||||
- identifier: node201
|
||||
- identifier: node201
|
||||
unified_job_template:
|
||||
organization:
|
||||
name: Default
|
||||
name: job template 2
|
||||
name: example-job template
|
||||
type: job_template
|
||||
execution_environment:
|
||||
name: My EE
|
||||
inventory:
|
||||
name: Test inventory
|
||||
name: Demo Inventory
|
||||
organization:
|
||||
name: Default
|
||||
related:
|
||||
success_nodes:
|
||||
- identifier: node401
|
||||
failure_nodes:
|
||||
- identifier: node301
|
||||
always_nodes: []
|
||||
credentials:
|
||||
- name: cyberark
|
||||
organization:
|
||||
name: Default
|
||||
instance_groups:
|
||||
- name: SunCavanaugh Cloud
|
||||
- name: default
|
||||
labels:
|
||||
- name: Custom Label
|
||||
- name: Another Custom Label
|
||||
organization:
|
||||
name: Default
|
||||
register: result
|
||||
- all_parents_must_converge: false
|
||||
identifier: node301
|
||||
unified_job_template:
|
||||
description: Approval node for example
|
||||
timeout: 900
|
||||
type: workflow_approval
|
||||
name: Approval Node for Demo
|
||||
related:
|
||||
success_nodes:
|
||||
- identifier: node401
|
||||
- identifier: node401
|
||||
unified_job_template:
|
||||
name: Cleanup Activity Stream
|
||||
type: system_job_template
|
||||
|
||||
'''
|
||||
|
||||
|
||||
53
awx_collection/test/awx/test_instance.py
Normal file
53
awx_collection/test/awx/test_instance.py
Normal file
@@ -0,0 +1,53 @@
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
import pytest
|
||||
|
||||
from awx.main.models import Instance
|
||||
from django.test.utils import override_settings
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_peers_adding_and_removing(run_module, admin_user):
|
||||
with override_settings(IS_K8S=True):
|
||||
result = run_module(
|
||||
'instance',
|
||||
{'hostname': 'hopnode1', 'node_type': 'hop', 'peers_from_control_nodes': True, 'node_state': 'installed', 'listener_port': 27199},
|
||||
admin_user,
|
||||
)
|
||||
assert result['changed']
|
||||
|
||||
hop_node_1 = Instance.objects.get(pk=result.get('id'))
|
||||
|
||||
assert hop_node_1.peers_from_control_nodes is True
|
||||
assert hop_node_1.node_type == 'hop'
|
||||
|
||||
result = run_module(
|
||||
'instance',
|
||||
{'hostname': 'hopnode2', 'node_type': 'hop', 'peers_from_control_nodes': True, 'node_state': 'installed', 'listener_port': 27199},
|
||||
admin_user,
|
||||
)
|
||||
assert result['changed']
|
||||
|
||||
hop_node_2 = Instance.objects.get(pk=result.get('id'))
|
||||
|
||||
result = run_module(
|
||||
'instance',
|
||||
{'hostname': 'executionnode', 'node_type': 'execution', 'node_state': 'installed', 'listener_port': 27199, 'peers': ['hopnode1', 'hopnode2']},
|
||||
admin_user,
|
||||
)
|
||||
assert result['changed']
|
||||
|
||||
execution_node = Instance.objects.get(pk=result.get('id'))
|
||||
|
||||
assert set(execution_node.peers.all()) == {hop_node_1, hop_node_2}
|
||||
|
||||
result = run_module(
|
||||
'instance',
|
||||
{'hostname': 'executionnode', 'node_type': 'execution', 'node_state': 'installed', 'listener_port': 27199, 'peers': []},
|
||||
admin_user,
|
||||
)
|
||||
|
||||
assert result['changed']
|
||||
assert set(execution_node.peers.all()) == set()
|
||||
@@ -49,8 +49,8 @@
|
||||
- name: Cancel the command
|
||||
ad_hoc_command_cancel:
|
||||
command_id: "{{ command.id }}"
|
||||
request_timeout: 60
|
||||
register: results
|
||||
ignore_errors: true
|
||||
|
||||
- assert:
|
||||
that:
|
||||
|
||||
@@ -33,6 +33,7 @@
|
||||
name: "localhost"
|
||||
inventory: "Demo Inventory"
|
||||
state: present
|
||||
enabled: true
|
||||
variables:
|
||||
ansible_connection: local
|
||||
register: result
|
||||
|
||||
@@ -21,14 +21,14 @@
|
||||
name: "{{ inv_name }}"
|
||||
organization: Default
|
||||
state: present
|
||||
register: result
|
||||
register: inv_result
|
||||
|
||||
- name: Create a Host
|
||||
host:
|
||||
name: "{{ host_name4 }}"
|
||||
inventory: "{{ inv_name }}"
|
||||
state: present
|
||||
register: result
|
||||
register: host_result
|
||||
|
||||
- name: Add Host to Group
|
||||
group:
|
||||
@@ -37,16 +37,18 @@
|
||||
hosts:
|
||||
- "{{ host_name4 }}"
|
||||
preserve_existing_hosts: true
|
||||
register: result
|
||||
register: group_result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- "result is changed"
|
||||
- inv_result is changed
|
||||
- host_result is changed
|
||||
- group_result is changed
|
||||
|
||||
- name: Create Group 1
|
||||
group:
|
||||
name: "{{ group_name1 }}"
|
||||
inventory: "{{ result.id }}"
|
||||
inventory: "{{ inv_result.id }}"
|
||||
state: present
|
||||
variables:
|
||||
foo: bar
|
||||
@@ -165,18 +167,6 @@
|
||||
that:
|
||||
- group1_host_count == "3"
|
||||
|
||||
- name: Delete Group 2
|
||||
group:
|
||||
name: "{{ group_name2 }}"
|
||||
inventory: "{{ inv_name }}"
|
||||
state: absent
|
||||
register: result
|
||||
|
||||
# In this case, group 2 was last a child of group1 so deleting group1 deleted group2
|
||||
- assert:
|
||||
that:
|
||||
- "result is not changed"
|
||||
|
||||
- name: Delete Group 3
|
||||
group:
|
||||
name: "{{ group_name3 }}"
|
||||
@@ -200,6 +190,18 @@
|
||||
that:
|
||||
- "result is changed"
|
||||
|
||||
- name: Delete Group 2
|
||||
group:
|
||||
name: "{{ group_name2 }}"
|
||||
inventory: "{{ inv_name }}"
|
||||
state: absent
|
||||
register: result
|
||||
|
||||
# In this case, group 2 was last a child of group1 so deleting group1 deleted group2
|
||||
- assert:
|
||||
that:
|
||||
- "result is not changed"
|
||||
|
||||
- name: Check module fails with correct msg
|
||||
group:
|
||||
name: test-group
|
||||
|
||||
@@ -70,3 +70,60 @@
|
||||
- "{{ hostname3 }}"
|
||||
|
||||
when: IS_K8S
|
||||
|
||||
- block:
|
||||
- name: Create hop node 1
|
||||
awx.awx.instance:
|
||||
hostname: hopnode1
|
||||
node_type: hop
|
||||
node_state: installed
|
||||
listener_port: 27199
|
||||
peers_from_control_nodes: True
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
|
||||
- name: Create hop node 2
|
||||
awx.awx.instance:
|
||||
hostname: hopnode2
|
||||
node_type: hop
|
||||
node_state: installed
|
||||
listener_port: 27199
|
||||
peers_from_control_nodes: True
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
|
||||
- name: Create execution node
|
||||
awx.awx.instance:
|
||||
hostname: executionnode
|
||||
node_type: execution
|
||||
node_state: installed
|
||||
listener_port: 27199
|
||||
peers:
|
||||
- "hopnode1"
|
||||
- "hopnode2"
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
|
||||
- name: Remove execution node peers
|
||||
awx.awx.instance:
|
||||
hostname: executionnode
|
||||
node_type: execution
|
||||
node_state: installed
|
||||
listener_port: 27199
|
||||
peers: []
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
|
||||
when: IS_K8S
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
- name: Cancel the job
|
||||
job_cancel:
|
||||
job_id: "{{ job.id }}"
|
||||
request_timeout: 60
|
||||
register: results
|
||||
|
||||
- assert:
|
||||
@@ -23,10 +24,10 @@
|
||||
fail_if_not_running: true
|
||||
register: results
|
||||
ignore_errors: true
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- results is failed
|
||||
# This test can be flaky, so we retry it a few times
|
||||
until: results is failed and results.msg == 'Job is not running'
|
||||
retries: 6
|
||||
delay: 5
|
||||
|
||||
- name: Check module fails with correct msg
|
||||
job_cancel:
|
||||
|
||||
@@ -61,6 +61,10 @@
|
||||
organization: Default
|
||||
state: absent
|
||||
register: result
|
||||
until: result is changed # wait for the project update to settle
|
||||
retries: 6
|
||||
delay: 5
|
||||
|
||||
|
||||
- assert:
|
||||
that:
|
||||
|
||||
@@ -220,6 +220,7 @@
|
||||
user:
|
||||
controller_username: "{{ username }}-orgadmin"
|
||||
controller_password: "{{ username }}-orgadmin"
|
||||
controller_oauthtoken: false # Hack for CI where we use oauth in config file
|
||||
username: "{{ username }}"
|
||||
first_name: Joe
|
||||
password: "{{ 65535 | random | to_uuid }}"
|
||||
|
||||
@@ -169,6 +169,9 @@
|
||||
name: "{{ jt1_name }}"
|
||||
project: "{{ demo_project_name }}"
|
||||
inventory: Demo Inventory
|
||||
ask_inventory_on_launch: true
|
||||
ask_credential_on_launch: true
|
||||
ask_labels_on_launch: true
|
||||
playbook: hello_world.yml
|
||||
job_type: run
|
||||
state: present
|
||||
@@ -710,7 +713,7 @@
|
||||
name: "{{ wfjt_name }}"
|
||||
inventory: Demo Inventory
|
||||
extra_vars: {'foo': 'bar', 'another-foo': {'barz': 'bar2'}}
|
||||
schema:
|
||||
workflow_nodes:
|
||||
- identifier: node101
|
||||
unified_job_template:
|
||||
name: "{{ project_inv_source_result.id }}"
|
||||
@@ -721,30 +724,52 @@
|
||||
related:
|
||||
failure_nodes:
|
||||
- identifier: node201
|
||||
- identifier: node102
|
||||
unified_job_template:
|
||||
organization:
|
||||
name: "{{ org_name }}"
|
||||
name: "{{ demo_project_name_2 }}"
|
||||
type: project
|
||||
related:
|
||||
success_nodes:
|
||||
- identifier: node201
|
||||
- identifier: node201
|
||||
unified_job_template:
|
||||
organization:
|
||||
name: Default
|
||||
name: "{{ jt1_name }}"
|
||||
type: job_template
|
||||
credentials: []
|
||||
inventory:
|
||||
name: Demo Inventory
|
||||
organization:
|
||||
name: Default
|
||||
related:
|
||||
success_nodes:
|
||||
- identifier: node401
|
||||
failure_nodes:
|
||||
- identifier: node301
|
||||
- identifier: node202
|
||||
unified_job_template:
|
||||
organization:
|
||||
name: "{{ org_name }}"
|
||||
name: "{{ project_inv_source }}"
|
||||
type: project
|
||||
always_nodes: []
|
||||
credentials:
|
||||
- name: "{{ scm_cred_name }}"
|
||||
organization:
|
||||
name: Default
|
||||
instance_groups:
|
||||
- name: "{{ ig1 }}"
|
||||
labels:
|
||||
- name: "{{ lab1 }}"
|
||||
organization:
|
||||
name: "{{ org_name }}"
|
||||
- all_parents_must_converge: false
|
||||
identifier: node301
|
||||
unified_job_template:
|
||||
organization:
|
||||
name: Default
|
||||
name: "{{ jt2_name }}"
|
||||
type: job_template
|
||||
- identifier: Cleanup Job
|
||||
description: Approval node for example
|
||||
timeout: 900
|
||||
type: workflow_approval
|
||||
name: "{{ approval_node_name }}"
|
||||
related:
|
||||
success_nodes:
|
||||
- identifier: node401
|
||||
- identifier: node401
|
||||
unified_job_template:
|
||||
name: Cleanup Activity Stream
|
||||
type: system_job_template
|
||||
|
||||
@@ -18,7 +18,9 @@ documentation: https://github.com/ansible/awx/blob/devel/awx_collection/README.m
|
||||
homepage: https://www.ansible.com/
|
||||
issues: https://github.com/ansible/awx/issues?q=is%3Aissue+label%3Acomponent%3Aawx_collection
|
||||
license:
|
||||
- GPL-3.0-only
|
||||
- GPL-3.0-or-later
|
||||
# plugins/module_utils/tower_legacy.py
|
||||
- BSD-2-Clause
|
||||
name: {{ collection_package }}
|
||||
namespace: {{ collection_namespace }}
|
||||
readme: README.md
|
||||
|
||||
0
docs/docsite/_static/.gitkeep
Normal file
0
docs/docsite/_static/.gitkeep
Normal file
90
docs/docsite/conf.py
Normal file
90
docs/docsite/conf.py
Normal file
@@ -0,0 +1,90 @@
|
||||
import sys
|
||||
import os
|
||||
import shlex
|
||||
|
||||
from datetime import datetime
|
||||
from importlib import import_module
|
||||
|
||||
#sys.path.insert(0, os.path.abspath('./rst/rest_api/_swagger'))
|
||||
|
||||
project = u'Ansible AWX'
|
||||
copyright = u'2023, Red Hat'
|
||||
author = u'Red Hat'
|
||||
|
||||
pubdateshort = '2023-08-04'
|
||||
pubdate = datetime.strptime(pubdateshort, '%Y-%m-%d').strftime('%B %d, %Y')
|
||||
|
||||
# The name for this set of Sphinx documents. If None, it defaults to
|
||||
# "<project> v<release> documentation".
|
||||
#html_title = None
|
||||
html_title = 'Ansible AWX community documentation'
|
||||
|
||||
# A shorter title for the navigation bar. Default is the same as html_title.
|
||||
#html_short_title = None
|
||||
html_short_title = 'AWX community documentation'
|
||||
|
||||
htmlhelp_basename = 'AWX_docs'
|
||||
|
||||
# include the swagger extension to build rest api reference
|
||||
#'swagger',
|
||||
extensions = [
|
||||
'sphinx.ext.autodoc',
|
||||
'sphinx.ext.doctest',
|
||||
'sphinx.ext.intersphinx',
|
||||
'sphinx.ext.todo',
|
||||
'sphinx.ext.coverage',
|
||||
'sphinx.ext.ifconfig',
|
||||
'sphinx_ansible_theme',
|
||||
]
|
||||
|
||||
html_theme = 'sphinx_ansible_theme'
|
||||
html_theme_path = ["_static"]
|
||||
|
||||
pygments_style = "ansible"
|
||||
highlight_language = "YAML+Jinja"
|
||||
|
||||
source_suffix = '.rst'
|
||||
master_doc = 'index'
|
||||
|
||||
version = 'latest'
|
||||
shortversion = 'latest'
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = 'AWX latest'
|
||||
|
||||
language = 'en'
|
||||
|
||||
locale_dirs = ['locale/'] # path is example but recommended.
|
||||
gettext_compact = False # optional.
|
||||
|
||||
rst_epilog = """
|
||||
.. |atqi| replace:: *AWX Quick Installation Guide*
|
||||
.. |atqs| replace:: *AWX Quick Setup Guide*
|
||||
.. |atir| replace:: *AWX Installation and Reference Guide*
|
||||
.. |ata| replace:: *AWX Administration Guide*
|
||||
.. |atu| replace:: *AWX User Guide*
|
||||
.. |atumg| replace:: *AWX Upgrade and Migration Guide*
|
||||
.. |atapi| replace:: *AWX API Guide*
|
||||
.. |atrn| replace:: *AWX Release Notes*
|
||||
.. |aa| replace:: Ansible Automation
|
||||
.. |AA| replace:: Automation Analytics
|
||||
.. |aap| replace:: Ansible Automation Platform
|
||||
.. |ab| replace:: ansible-builder
|
||||
.. |ap| replace:: Automation Platform
|
||||
.. |at| replace:: automation controller
|
||||
.. |At| replace:: Automation controller
|
||||
.. |ah| replace:: Automation Hub
|
||||
.. |EE| replace:: Execution Environment
|
||||
.. |EEs| replace:: Execution Environments
|
||||
.. |Ee| replace:: Execution environment
|
||||
.. |Ees| replace:: Execution environments
|
||||
.. |ee| replace:: execution environment
|
||||
.. |ees| replace:: execution environments
|
||||
.. |versionshortest| replace:: v%s
|
||||
.. |pubdateshort| replace:: %s
|
||||
.. |pubdate| replace:: %s
|
||||
.. |rhel| replace:: Red Hat Enterprise Linux
|
||||
.. |rhaa| replace:: Red Hat Ansible Automation
|
||||
.. |rhaap| replace:: Red Hat Ansible Automation Platform
|
||||
.. |RHAT| replace:: Red Hat Ansible Automation Platform controller
|
||||
|
||||
""" % (version, pubdateshort, pubdate)
|
||||
5
docs/docsite/requirements.txt
Normal file
5
docs/docsite/requirements.txt
Normal file
@@ -0,0 +1,5 @@
|
||||
sphinx==5.1.1
|
||||
sphinx-ansible-theme==0.9.1
|
||||
docutils==0.16
|
||||
Jinja2<3.1
|
||||
PyYaml
|
||||
31
docs/docsite/rst/administration/authentication_timeout.rst
Normal file
31
docs/docsite/rst/administration/authentication_timeout.rst
Normal file
@@ -0,0 +1,31 @@
|
||||
Changing the Default Timeout for Authentication
|
||||
=================================================
|
||||
|
||||
.. index::
|
||||
pair: troubleshooting; authentication timeout
|
||||
pair: authentication timeout; changing the default
|
||||
single: authentication token
|
||||
single: authentication expiring
|
||||
single: log
|
||||
single: login timeout
|
||||
single: timeout login
|
||||
pair: timeout; session
|
||||
|
||||
|
||||
The default length of time, in seconds, that your supplied token is valid can be changed in the System Settings screen of the AWX user interface:
|
||||
|
||||
1. Click the **Settings** from the left navigation bar.
|
||||
|
||||
3. Click **Miscellaneous Authentication settings** under the System settings.
|
||||
|
||||
3. Click **Edit**.
|
||||
|
||||
4. Enter the timeout period in seconds in the **Idle Time Force Log Out** text field.
|
||||
|
||||
.. image:: ../common/images/configure-awx-system-timeout.png
|
||||
|
||||
4. Click **Save** to apply your changes.
|
||||
|
||||
.. note::
|
||||
|
||||
If you are accessing AWX directly and are having trouble getting your authentication to stay, in that you have to keep logging in over and over, try clearing your web browser's cache. In situations like this, it is often found that the authentication token has been cached in the browser session and must be cleared.
|
||||
198
docs/docsite/rst/administration/awx-manage.rst
Normal file
198
docs/docsite/rst/administration/awx-manage.rst
Normal file
@@ -0,0 +1,198 @@
|
||||
.. _ag_manage_utility:
|
||||
|
||||
The *awx-manage* Utility
|
||||
-------------------------------
|
||||
|
||||
.. index::
|
||||
single: awx-manage
|
||||
|
||||
The ``awx-manage`` utility is used to access detailed internal information of AWX. Commands for ``awx-manage`` should run as the ``awx`` or ``root`` user.
|
||||
|
||||
.. warning::
|
||||
Running awx-manage commands via playbook is not recommended or supported.
|
||||
|
||||
Inventory Import
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
.. index::
|
||||
single: awx-manage; inventory import
|
||||
|
||||
``awx-manage`` is a mechanism by which an AWX administrator can import inventory directly into AWX, for those who cannot use Custom Inventory Scripts.
|
||||
|
||||
To use ``awx-manage`` properly, you must first create an inventory in AWX to use as the destination for the import.
|
||||
|
||||
For help with ``awx-manage``, run the following command: ``awx-manage inventory_import [--help]``
|
||||
|
||||
The ``inventory_import`` command synchronizes an AWX inventory object with a text-based inventory file, dynamic inventory script, or a directory of one or more of the above as supported by core Ansible.
|
||||
|
||||
When running this command, specify either an ``--inventory-id`` or ``--inventory-name``, and the path to the Ansible inventory source (``--source``).
|
||||
|
||||
::
|
||||
|
||||
awx-manage inventory_import --source=/ansible/inventory/ --inventory-id=1
|
||||
|
||||
By default, inventory data already stored in AWX blends with data from the external source. To use only the external data, specify ``--overwrite``. To specify that any existing hosts get variable data exclusively from the ``--source``, specify ``--overwrite_vars``. The default behavior adds any new variables from the external source, overwriting keys that already exist, but preserves any variables that were not sourced from the external data source.
|
||||
|
||||
::
|
||||
|
||||
awx-manage inventory_import --source=/ansible/inventory/ --inventory-id=1 --overwrite
|
||||
|
||||
|
||||
.. include:: ../common/overwrite_var_note_2-4-0.rst
|
||||
|
||||
|
||||
Cleanup of old data
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. index::
|
||||
single: awx-manage, data cleanup
|
||||
|
||||
``awx-manage`` has a variety of commands used to clean old data from AWX. The AWX administrators can use the Management Jobs interface for access or use the command line.
|
||||
|
||||
- ``awx-manage cleanup_jobs [--help]``
|
||||
|
||||
This permanently deletes the job details and job output for jobs older than a specified number of days.
|
||||
|
||||
- ``awx-manage cleanup_activitystream [--help]``
|
||||
|
||||
This permanently deletes any :ref:`ug_activitystreams` data older than a specific number of days.
|
||||
|
||||
Cluster management
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. index::
|
||||
single: awx-manage; cluster management
|
||||
|
||||
Refer to the :ref:`ag_clustering` section for details on the
|
||||
``awx-manage provision_instance`` and ``awx-manage deprovision_instance``
|
||||
commands.
|
||||
|
||||
|
||||
.. note::
|
||||
Do not run other ``awx-manage`` commands unless instructed by Ansible Support.
|
||||
|
||||
|
||||
.. _ag_token_utility:
|
||||
|
||||
Token and session management
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. index::
|
||||
single: awx-manage; token management
|
||||
single: awx-manage; session management
|
||||
|
||||
AWX supports the following commands for OAuth2 token management:
|
||||
|
||||
.. contents::
|
||||
:local:
|
||||
|
||||
|
||||
``create_oauth2_token``
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Use this command to create OAuth2 tokens (specify actual username for ``example_user`` below):
|
||||
|
||||
::
|
||||
|
||||
$ awx-manage create_oauth2_token --user example_user
|
||||
|
||||
New OAuth2 token for example_user: j89ia8OO79te6IAZ97L7E8bMgXCON2
|
||||
|
||||
Make sure you provide a valid user when creating tokens. Otherwise, you will get an error message that you tried to issue the command without specifying a user, or supplying a username that does not exist.
|
||||
|
||||
|
||||
.. _ag_manage_utility_revoke_tokens:
|
||||
|
||||
|
||||
``revoke_oauth2_tokens``
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Use this command to revoke OAuth2 tokens (both application tokens and personal access tokens (PAT)). By default, it revokes all application tokens (but not their associated refresh tokens), and revokes all personal access tokens. However, you can also specify a user for whom to revoke all tokens.
|
||||
|
||||
To revoke all existing OAuth2 tokens:
|
||||
|
||||
::
|
||||
|
||||
$ awx-manage revoke_oauth2_tokens
|
||||
|
||||
To revoke all OAuth2 tokens & their refresh tokens:
|
||||
|
||||
::
|
||||
|
||||
$ awx-manage revoke_oauth2_tokens --revoke_refresh
|
||||
|
||||
To revoke all OAuth2 tokens for the user with ``id=example_user`` (specify actual username for ``example_user`` below):
|
||||
|
||||
::
|
||||
|
||||
$ awx-manage revoke_oauth2_tokens --user example_user
|
||||
|
||||
To revoke all OAuth2 tokens and refresh token for the user with ``id=example_user``:
|
||||
|
||||
::
|
||||
|
||||
$ awx-manage revoke_oauth2_tokens --user example_user --revoke_refresh
|
||||
|
||||
|
||||
|
||||
``cleartokens``
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Use this command to clear tokens which have already been revoked. Refer to `Django's Oauth Toolkit documentation on cleartokens`_ for more detail.
|
||||
|
||||
.. _`Django's Oauth Toolkit documentation on cleartokens`: https://django-oauth-toolkit.readthedocs.io/en/latest/management_commands.html
|
||||
|
||||
|
||||
``expire_sessions``
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Use this command to terminate all sessions or all sessions for a specific user. Consider using this command when a user changes role in an organization, is removed from assorted groups in LDAP/AD, or the administrator wants to ensure the user can no longer execute jobs due to membership in these groups.
|
||||
|
||||
::
|
||||
|
||||
$ awx-manage expire_sessions
|
||||
|
||||
|
||||
This command terminates all sessions by default. The users associated with those sessions will be consequently logged out. To only expire the sessions of a specific user, you can pass their username using the ``--user`` flag (specify actual username for ``example_user`` below):
|
||||
|
||||
::
|
||||
|
||||
$ awx-manage expire_sessions --user example_user
|
||||
|
||||
|
||||
|
||||
``clearsessions``
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Use this command to delete all sessions that have expired. Refer to `Django's documentation on clearsessions`_ for more detail.
|
||||
|
||||
.. _`Django's documentation on clearsessions`: https://docs.djangoproject.com/en/2.1/topics/http/sessions/#clearing-the-session-store
|
||||
|
||||
|
||||
|
||||
For more information on OAuth2 token management in the AWX user interface, see the :ref:`ug_applications_auth` section of the |atu|.
|
||||
|
||||
|
||||
Analytics gathering
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. index::
|
||||
single: awx-manage; data collection
|
||||
single: awx-manage; analytics gathering
|
||||
|
||||
|
||||
Use this command to gather analytics on-demand outside of the predefined window (default is 4 hours):
|
||||
|
||||
::
|
||||
|
||||
$ awx-manage gather_analytics --ship
|
||||
|
||||
|
||||
For customers with disconnected environments who want to collect usage information about unique hosts automated across a time period, use this command:
|
||||
|
||||
::
|
||||
|
||||
awx-manage host_metric --since YYYY-MM-DD --until YYYY-MM-DD --json
|
||||
|
||||
|
||||
The parameters ``--since`` and ``--until`` specify date ranges and are optional, but one of them has to be present. The ``--json`` flag specifies the output format and is optional.
|
||||
222
docs/docsite/rst/administration/clustering.rst
Normal file
222
docs/docsite/rst/administration/clustering.rst
Normal file
@@ -0,0 +1,222 @@
|
||||
|
||||
.. _ag_clustering:
|
||||
|
||||
Clustering
|
||||
============
|
||||
|
||||
.. index::
|
||||
pair: redundancy; instance groups
|
||||
pair: redundancy; clustering
|
||||
|
||||
Clustering is sharing load between hosts. Each instance should be able to act as an entry point for UI and API access. This should enable AWX administrators to use load balancers in front of as many instances as they wish and maintain good data visibility.
|
||||
|
||||
.. note::
|
||||
Load balancing is optional and is entirely possible to have ingress on one or all instances as needed. The ``CSRF_TRUSTED_ORIGIN`` setting may be required if you are using AWX behind a load balancer. See :ref:`ki_csrf_trusted_origin_setting` for more detail.
|
||||
|
||||
Each instance should be able to join AWX cluster and expand its ability to execute jobs. This is a simple system where jobs can and will run anywhere rather than be directed on where to run. Also, clustered instances can be grouped into different pools/queues, called :ref:`ag_instance_groups`.
|
||||
|
||||
|
||||
Setup Considerations
|
||||
---------------------
|
||||
|
||||
.. index::
|
||||
single: clustering; setup considerations
|
||||
pair: clustering; PostgreSQL
|
||||
|
||||
This section covers initial setup of clusters only. For upgrading an existing cluster, refer to the |atumg|.
|
||||
|
||||
Important considerations to note in the new clustering environment:
|
||||
|
||||
- PostgreSQL is still a standalone instance and is not clustered. AWX does not manage replica configuration or database failover (if the user configures standby replicas).
|
||||
|
||||
- When spinning up a cluster, the database node should be a standalone server, and PostgreSQL should not be installed on one of AWX nodes.
|
||||
|
||||
- PgBouncer is not recommended for connection pooling with AWX. Currently, AWX relies heavily on ``pg_notify`` for sending messages across various components, and therefore, PgBouncer cannot readily be used in transaction pooling mode.
|
||||
|
||||
- The maximum supported instances in a cluster is 20.
|
||||
|
||||
- All instances should be reachable from all other instances and they should be able to reach the database. It is also important for the hosts to have a stable address and/or hostname (depending on how the AWX host is configured).
|
||||
|
||||
- All instances must be geographically collocated, with reliable low-latency connections between instances.
|
||||
|
||||
- For purposes of upgrading to a clustered environment, your primary instance must be part of the ``default`` group in the inventory *AND* it needs to be the first host listed in the ``default`` group.
|
||||
|
||||
- Manual projects must be manually synced to all instances by the customer, and updated on all instances at once.
|
||||
|
||||
- The ``inventory`` file for platform deployments should be saved/persisted. If new instances are to be provisioned, the passwords and configuration options, as well as host names, must be made available to the installer.
|
||||
|
||||
|
||||
Scaling the Web and Task pods independently
|
||||
--------------------------------------------
|
||||
|
||||
You can scale replicas up or down for each deployment by using the ``web_replicas`` or ``task_replicas`` respectively. You can scale all pods across both deployments by using ``replicas`` as well. The logic behind these CRD keys acts as such:
|
||||
|
||||
- If you specify the ``replicas`` field, the key passed will scale both the ``web`` and ``task`` replicas to the same number.
|
||||
- If ``web_replicas`` or ``task_replicas`` is ever passed, it will override the existing ``replicas`` field on the specific deployment with the new key value.
|
||||
|
||||
These new replicas can be constrained in a similar manner to previous single deployments by appending the particular deployment name in front of the constraint used. More about those new constraints can be found below in the :ref:`ag_assign_pods_to_nodes` section.
|
||||
|
||||
.. _ag_assign_pods_to_nodes:
|
||||
|
||||
Assigning AWX pods to specific nodes
|
||||
-------------------------------------
|
||||
|
||||
You can constrain the AWX pods created by the operator to run on a certain subset of nodes. ``node_selector`` and ``postgres_selector`` constrains the AWX pods to run only on the nodes that match all the specified key/value pairs. ``tolerations`` and ``postgres_tolerations`` allow the AWX pods to be scheduled onto nodes with matching taints. The ability to specify ``topologySpreadConstraints`` is also allowed through ``topology_spread_constraints`` If you want to use affinity rules for your AWX pod, you can use the ``affinity`` option.
|
||||
|
||||
If you want to constrain the web and task pods individually, you can do so by specifying the deployment type before the specific setting. For example, specifying ``task_tolerations`` will allow the AWX task pod to be scheduled onto nodes with matching taints.
|
||||
|
||||
+----------------------------------+------------------------------------------+----------+
|
||||
| Name | Description | Default |
|
||||
+----------------------------------+------------------------------------------+----------+
|
||||
| postgres_image | Path of the image to pull | postgres |
|
||||
+----------------------------------+------------------------------------------+----------+
|
||||
| postgres_image_version | Image version to pull | 13 |
|
||||
+----------------------------------+------------------------------------------+----------+
|
||||
| node_selector | AWX pods' nodeSelector | '' |
|
||||
+----------------------------------+------------------------------------------+----------+
|
||||
| web_node_selector | AWX web pods' nodeSelector | '' |
|
||||
+----------------------------------+------------------------------------------+----------+
|
||||
| task_node_selector | AWX task pods' nodeSelector | '' |
|
||||
+----------------------------------+------------------------------------------+----------+
|
||||
| topology_spread_constraints | AWX pods' topologySpreadConstraints | '' |
|
||||
+----------------------------------+------------------------------------------+----------+
|
||||
| web_topology_spread_constraints | AWX web pods' topologySpreadConstraints | '' |
|
||||
+----------------------------------+------------------------------------------+----------+
|
||||
| task_topology_spread_constraints | AWX task pods' topologySpreadConstraints | '' |
|
||||
+----------------------------------+------------------------------------------+----------+
|
||||
| affinity | AWX pods' affinity rules | '' |
|
||||
+----------------------------------+------------------------------------------+----------+
|
||||
| web_affinity | AWX web pods' affinity rules | '' |
|
||||
+----------------------------------+------------------------------------------+----------+
|
||||
| task_affinity | AWX task pods' affinity rules | '' |
|
||||
+----------------------------------+------------------------------------------+----------+
|
||||
| tolerations | AWX pods' tolerations | '' |
|
||||
+----------------------------------+------------------------------------------+----------+
|
||||
| web_tolerations | AWX web pods' tolerations | '' |
|
||||
+----------------------------------+------------------------------------------+----------+
|
||||
| task_tolerations | AWX task pods' tolerations | '' |
|
||||
+----------------------------------+------------------------------------------+----------+
|
||||
| annotations | AWX pods' annotations | '' |
|
||||
+----------------------------------+------------------------------------------+----------+
|
||||
| postgres_selector | Postgres pods' nodeSelector | '' |
|
||||
+----------------------------------+------------------------------------------+----------+
|
||||
| postgres_tolerations | Postgres pods' tolerations | '' |
|
||||
+----------------------------------+------------------------------------------+----------+
|
||||
|
||||
Example of customization could be:
|
||||
|
||||
::
|
||||
|
||||
---
|
||||
spec:
|
||||
...
|
||||
node_selector: |
|
||||
disktype: ssd
|
||||
kubernetes.io/arch: amd64
|
||||
kubernetes.io/os: linux
|
||||
topology_spread_constraints: |
|
||||
- maxSkew: 100
|
||||
topologyKey: "topology.kubernetes.io/zone"
|
||||
whenUnsatisfiable: "ScheduleAnyway"
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: "<resourcename>"
|
||||
tolerations: |
|
||||
- key: "dedicated"
|
||||
operator: "Equal"
|
||||
value: "AWX"
|
||||
effect: "NoSchedule"
|
||||
task_tolerations: |
|
||||
- key: "dedicated"
|
||||
operator: "Equal"
|
||||
value: "AWX_task"
|
||||
effect: "NoSchedule"
|
||||
postgres_selector: |
|
||||
disktype: ssd
|
||||
kubernetes.io/arch: amd64
|
||||
kubernetes.io/os: linux
|
||||
postgres_tolerations: |
|
||||
- key: "dedicated"
|
||||
operator: "Equal"
|
||||
value: "AWX"
|
||||
effect: "NoSchedule"
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 1
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: another-node-label-key
|
||||
operator: In
|
||||
values:
|
||||
- another-node-label-value
|
||||
- another-node-label-value
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: security
|
||||
operator: In
|
||||
values:
|
||||
- S2
|
||||
topologyKey: topology.kubernetes.io/zone
|
||||
|
||||
|
||||
Status and Monitoring via Browser API
|
||||
--------------------------------------
|
||||
|
||||
AWX itself reports as much status as it can via the Browsable API at ``/api/v2/ping`` in order to provide validation of the health of the cluster, including:
|
||||
|
||||
- The instance servicing the HTTP request
|
||||
|
||||
- The timestamps of the last heartbeat of all other instances in the cluster
|
||||
|
||||
- Instance Groups and Instance membership in those groups
|
||||
|
||||
View more details about Instances and Instance Groups, including running jobs and membership information at ``/api/v2/instances/`` and ``/api/v2/instance_groups/``.
|
||||
|
||||
|
||||
Instance Services and Failure Behavior
|
||||
----------------------------------------
|
||||
|
||||
Each AWX instance is made up of several different services working collaboratively:
|
||||
|
||||
- HTTP Services - This includes the AWX application itself as well as external web services.
|
||||
|
||||
- Callback Receiver - Receives job events from running Ansible jobs.
|
||||
|
||||
- Dispatcher - The worker queue that processes and runs all jobs.
|
||||
|
||||
- Redis - This key value store is used as a queue for event data propagated from ansible-playbook to the application.
|
||||
|
||||
- Rsyslog - log processing service used to deliver logs to various external logging services.
|
||||
|
||||
AWX is configured in such a way that if any of these services or their components fail, then all services are restarted. If these fail sufficiently often in a short span of time, then the entire instance will be placed offline in an automated fashion in order to allow remediation without causing unexpected behavior.
|
||||
|
||||
|
||||
Job Runtime Behavior
|
||||
---------------------
|
||||
|
||||
The way jobs are run and reported to a 'normal' user of AWX does not change. On the system side, some differences are worth noting:
|
||||
|
||||
- When a job is submitted from the API interface it gets pushed into the dispatcher queue. Each AWX instance will connect to and receive jobs from that queue using a particular scheduling algorithm. Any instance in the cluster is just as likely to receive the work and execute the task. If a instance fails while executing jobs, then the work is marked as permanently failed.
|
||||
|
||||
.. image:: ../common/images/clustering-visual.png
|
||||
|
||||
- Project updates run successfully on any instance that could potentially run a job. Projects will sync themselves to the correct version on the instance immediately prior to running the job. If the needed revision is already locally checked out and Galaxy or Collections updates are not needed, then a sync may not be performed.
|
||||
|
||||
- When the sync happens, it is recorded in the database as a project update with a ``launch_type = sync`` and ``job_type = run``. Project syncs will not change the status or version of the project; instead, they will update the source tree *only* on the instance where they run.
|
||||
|
||||
- If updates are needed from Galaxy or Collections, a sync is performed that downloads the required roles, consuming that much more space in your /tmp file. In cases where you have a big project (around 10 GB), disk space on ``/tmp`` may be an issue.
|
||||
|
||||
|
||||
Job Runs
|
||||
^^^^^^^^^^^
|
||||
|
||||
By default, when a job is submitted to the AWX queue, it can be picked up by any of the workers. However, you can control where a particular job runs, such as restricting the instances from which a job runs on.
|
||||
|
||||
In order to support temporarily taking an instance offline, there is a property enabled defined on each instance. When this property is disabled, no jobs will be assigned to that instance. Existing jobs will finish, but no new work will be assigned.
|
||||
|
||||
|
||||
100
docs/docsite/rst/administration/configure_awx.rst
Normal file
100
docs/docsite/rst/administration/configure_awx.rst
Normal file
@@ -0,0 +1,100 @@
|
||||
.. _ag_configure_awx:
|
||||
|
||||
AWX Configuration
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. index::
|
||||
single: configure AWX
|
||||
|
||||
.. _configure_awx_overview:
|
||||
|
||||
You can configure various AWX settings within the Settings screen in the following tabs:
|
||||
|
||||
.. image:: ../common/images/ug-settings-menu-screen.png
|
||||
|
||||
Each tab contains fields with a **Reset** button, allowing you to revert any value entered back to the default value. **Reset All** allows you to revert all the values to their factory default values.
|
||||
|
||||
**Save** applies changes you make, but it does not exit the edit dialog. To return to the Settings screen, click **Settings** from the left navigation bar or use the breadcrumbs at the top of the current view.
|
||||
|
||||
|
||||
Authentication
|
||||
=================
|
||||
.. index::
|
||||
single: social authentication
|
||||
single: authentication
|
||||
single: enterprise authentication
|
||||
pair: configuration; authentication
|
||||
|
||||
.. include:: ./configure_awx_authentication.rst
|
||||
|
||||
|
||||
.. _configure_awx_jobs:
|
||||
|
||||
Jobs
|
||||
=========
|
||||
.. index::
|
||||
single: jobs
|
||||
pair: configuration; jobs
|
||||
|
||||
The Jobs tab allows you to configure the types of modules that are allowed to be used by AWX's Ad Hoc Commands feature, set limits on the number of jobs that can be scheduled, define their output size, and other details pertaining to working with Jobs in AWX.
|
||||
|
||||
1. From the left navigation bar, click **Settings** from the left navigation bar and select **Jobs settings** from the Settings screen.
|
||||
|
||||
2. Set the configurable options from the fields provided. Click the tooltip |help| icon next to the field that you need additional information or details about. Refer to the :ref:`ug_galaxy` section for details about configuring Galaxy settings.
|
||||
|
||||
.. note::
|
||||
|
||||
The values for all the timeouts are in seconds.
|
||||
|
||||
.. image:: ../common/images/configure-awx-jobs.png
|
||||
|
||||
3. Click **Save** to apply the settings or **Cancel** to abandon the changes.
|
||||
|
||||
|
||||
.. _configure_awx_system:
|
||||
|
||||
System
|
||||
======
|
||||
.. index::
|
||||
pair: configuration; system
|
||||
|
||||
The System tab allows you to define the base URL for the AWX host, configure alerts, enable activity capturing, control visibility of users, enable certain AWX features and functionality through a license file, and configure logging aggregation options.
|
||||
|
||||
1. From the left navigation bar, click **Settings**.
|
||||
|
||||
2. The right side of the Settings window is a set of configurable System settings. Select from the following options:
|
||||
|
||||
- **Miscellaneous System settings**: enable activity streams, specify the default execution environment, define the base URL for the AWX host, enable AWX administration alerts, set user visibility, define analytics, specify usernames and passwords, and configure proxies.
|
||||
- **Miscellaneous Authentication settings**: configure options associated with authentication methods (built-in or SSO), sessions (timeout, number of sessions logged in, tokens), and social authentication mapping.
|
||||
- **Logging settings**: configure logging options based on the type you choose:
|
||||
|
||||
.. image:: ../common/images/configure-awx-system-logging-types.png
|
||||
|
||||
For more information about each of the logging aggregation types, refer to the :ref:`ag_logging` section of the |ata|.
|
||||
|
||||
|
||||
3. Set the configurable options from the fields provided. Click the tooltip |help| icon next to the field that you need additional information or details about. Below is an example of the System settings window.
|
||||
|
||||
.. |help| image:: ../common/images/tooltips-icon.png
|
||||
|
||||
.. image:: ../common/images/configure-awx-system.png
|
||||
|
||||
.. note::
|
||||
|
||||
The **Allow External Users to Create Oauth2 Tokens** setting is disabled by default. This ensures external users cannot *create* their own tokens. If you enable then disable it, any tokens created by external users in the meantime will still exist, and are not automatically revoked.
|
||||
|
||||
4. Click **Save** to apply the settings or **Cancel** to abandon the changes.
|
||||
|
||||
.. _configure_awx_ui:
|
||||
|
||||
User Interface
|
||||
================
|
||||
.. index::
|
||||
pair: configuration; UI
|
||||
pair: configuration; data collection
|
||||
pair: configuration; custom logo
|
||||
pair: configuration; custom login message
|
||||
pair: logo; custom
|
||||
pair: login message; custom
|
||||
|
||||
.. include:: ../common/logos_branding.rst
|
||||
@@ -0,0 +1,19 @@
|
||||
Through the AWX user interface, you can set up a simplified login through various authentication types: GitHub, Google, LDAP, RADIUS, and SAML. After you create and register your developer application with the appropriate service, you can set up authorizations for them.
|
||||
|
||||
1. From the left navigation bar, click **Settings**.
|
||||
|
||||
2. The left side of the Settings window is a set of configurable Authentication settings. Select from the following options:
|
||||
|
||||
- :ref:`ag_auth_azure`
|
||||
- :ref:`ag_auth_github`
|
||||
- :ref:`ag_auth_google_oauth2`
|
||||
- :ref:`LDAP settings <ag_auth_ldap>`
|
||||
- :ref:`ag_auth_radius`
|
||||
- :ref:`ag_auth_saml`
|
||||
- :ref:`ag_auth_tacacs`
|
||||
- :ref:`ag_auth_oidc`
|
||||
|
||||
|
||||
Different authentication types require you to enter different information. Be sure to include all the information as required.
|
||||
|
||||
3. Click **Save** to apply the settings or **Cancel** to abandon the changes.
|
||||
442
docs/docsite/rst/administration/containers_instance_groups.rst
Normal file
442
docs/docsite/rst/administration/containers_instance_groups.rst
Normal file
@@ -0,0 +1,442 @@
|
||||
.. _ag_ext_exe_env:
|
||||
|
||||
Container and Instance Groups
|
||||
==================================
|
||||
|
||||
.. index::
|
||||
pair: container; groups
|
||||
pair: instance; groups
|
||||
|
||||
AWX allows you to execute jobs via ansible playbook runs directly on a member of the cluster or in a namespace of an Openshift cluster with the necessary service account provisioned called a Container Group. You can execute jobs in a container group only as-needed per playbook. For more information, see :ref:`ag_container_groups` towards the end of this section.
|
||||
|
||||
For |ees|, see :ref:`ug_execution_environments` in the |atu|.
|
||||
|
||||
.. _ag_instance_groups:
|
||||
|
||||
Instance Groups
|
||||
------------------
|
||||
|
||||
Instances can be grouped into one or more Instance Groups. Instance groups can be assigned to one or more of the resources listed below.
|
||||
|
||||
- Organizations
|
||||
- Inventories
|
||||
- Job Templates
|
||||
|
||||
When a job associated with one of the resources executes, it will be assigned to the instance group associated with the resource. During the execution process, instance groups associated with Job Templates are checked before those associated with Inventories. Similarly, instance groups associated with Inventories are checked before those associated with Organizations. Thus, Instance Group assignments for the three resources form a hierarchy: Job Template **>** Inventory **>** Organization.
|
||||
|
||||
Here are some of the things to consider when working with instance groups:
|
||||
|
||||
- You may optionally define other groups and group instances in those groups. These groups should be prefixed with ``instance_group_``. Instances are required to be in the ``awx`` or ``execution_nodes`` group alongside other ``instance_group_`` groups. In a clustered setup, at least one instance **must** be present in the ``awx`` group, which will appear as ``controlplane`` in the API instance groups. See :ref:`ag_awx_group_policies` for example scenarios.
|
||||
|
||||
- A ``default`` API instance group is automatically created with all nodes capable of running jobs. Technically, it is like any other instance group but if a specific instance group is not associated with a specific resource, then job execution will always fall back to the ``default`` instance group. The ``default`` instance group always exists (it cannot be deleted nor renamed).
|
||||
|
||||
- Do not create a group named ``instance_group_default``.
|
||||
|
||||
- Do not name any instance the same as a group name.
|
||||
|
||||
|
||||
.. _ag_awx_group_policies:
|
||||
|
||||
``awx`` group policies
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. index::
|
||||
pair: policies; awx groups
|
||||
|
||||
Use the following criteria when defining nodes:
|
||||
|
||||
- nodes in the ``awx`` group can define ``node_type`` hostvar to be ``hybrid`` (default) or ``control``
|
||||
- nodes in the ``execution_nodes`` group can define ``node_type`` hostvar to be ``execution`` (default) or ``hop``
|
||||
|
||||
You can define custom groups in the inventory file by naming groups with ``instance_group_*`` where ``*`` becomes the name of the group in the API. Or, you can create custom instance groups in the API after the install has finished.
|
||||
|
||||
The current behavior expects a member of an ``instance_group_*`` be part of ``awx`` or ``execution_nodes`` group. Consider this example scenario:
|
||||
|
||||
::
|
||||
|
||||
[awx]
|
||||
126-addr.tatu.home ansible_host=192.168.111.126 node_type=control
|
||||
|
||||
[awx:vars]
|
||||
peers=execution_nodes
|
||||
|
||||
[execution_nodes]
|
||||
|
||||
[instance_group_test]
|
||||
110-addr.tatu.home ansible_host=192.168.111.110 receptor_listener_port=8928
|
||||
|
||||
|
||||
As a result of running the installer, you will get the error below:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
TASK [ansible.automation_platform_installer.check_config_static : Validate mesh topology] ***
|
||||
fatal: [126-addr.tatu.home -> localhost]: FAILED! => {"msg": "The host '110-addr.tatu.home' is not present in either [awx] or [execution_nodes]"}
|
||||
|
||||
|
||||
To fix this, you could move the box ``110-addr.tatu.home`` to an ``execution_node`` group.
|
||||
|
||||
::
|
||||
|
||||
[awx]
|
||||
126-addr.tatu.home ansible_host=192.168.111.126 node_type=control
|
||||
|
||||
[awx:vars]
|
||||
peers=execution_nodes
|
||||
|
||||
[execution_nodes]
|
||||
110-addr.tatu.home ansible_host=192.168.111.110 receptor_listener_port=8928
|
||||
|
||||
[instance_group_test]
|
||||
110-addr.tatu.home
|
||||
|
||||
|
||||
This results in:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
TASK [ansible.automation_platform_installer.check_config_static : Validate mesh topology] ***
|
||||
ok: [126-addr.tatu.home -> localhost] => {"changed": false, "mesh": {"110-addr.tatu.home": {"node_type": "execution", "peers": [], "receptor_control_filename": "receptor.sock", "receptor_control_service_name": "control", "receptor_listener": true, "receptor_listener_port": 8928, "receptor_listener_protocol": "tcp", "receptor_log_level": "info"}, "126-addr.tatu.home": {"node_type": "control", "peers": ["110-addr.tatu.home"], "receptor_control_filename": "receptor.sock", "receptor_control_service_name": "control", "receptor_listener": false, "receptor_listener_port": 27199, "receptor_listener_protocol": "tcp", "receptor_log_level": "info"}}}
|
||||
|
||||
Upon upgrading from older versions of awx, the legacy ``instance_group_`` member will most likely have the awx code installed, which would cause that node to be placed in the ``awx`` group.
|
||||
|
||||
|
||||
Configuring Instance Groups from the API
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
.. index::
|
||||
pair: instance group; configure
|
||||
pair: instance group; API
|
||||
|
||||
Instance groups can be created by POSTing to ``/api/v2/instance_groups`` as a system administrator.
|
||||
|
||||
Once created, instances can be associated with an instance group with:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
HTTP POST /api/v2/instance_groups/x/instances/ {'id': y}`
|
||||
|
||||
An instance that is added to an instance group will automatically reconfigure itself to listen on the group's work queue. See the following section, :ref:`ag_instance_group_policies`, for more details.
|
||||
|
||||
|
||||
.. _ag_instance_group_policies:
|
||||
|
||||
Instance group policies
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. index::
|
||||
pair: policies; instance groups
|
||||
pair: clustering; instance group policies
|
||||
|
||||
You can configure AWX instances to automatically join Instance Groups when they come online by defining a :term:`policy`. These policies are evaluated for every new instance that comes online.
|
||||
|
||||
Instance Group Policies are controlled by three optional fields on an ``Instance Group``:
|
||||
|
||||
- ``policy_instance_percentage``: This is a number between 0 - 100. It guarantees that this percentage of active AWX instances will be added to this Instance Group. As new instances come online, if the number of Instances in this group relative to the total number of instances is less than the given percentage, then new ones will be added until the percentage condition is satisfied.
|
||||
- ``policy_instance_minimum``: This policy attempts to keep at least this many instances in the Instance Group. If the number of available instances is lower than this minimum, then all instances will be placed in this Instance Group.
|
||||
- ``policy_instance_list``: This is a fixed list of instance names to always include in this Instance Group.
|
||||
|
||||
The Instance Groups list view from the |at| User Interface provides a summary of the capacity levels for each instance group according to instance group policies:
|
||||
|
||||
|Instance Group policy example|
|
||||
|
||||
.. |Instance Group policy example| image:: ../common/images/instance-groups_list_view.png
|
||||
|
||||
See :ref:`ug_instance_groups_create` for further detail.
|
||||
|
||||
|
||||
Notable policy considerations
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
- ``policy_instance_percentage`` and ``policy_instance_minimum`` both set minimum allocations. The rule that results in more instances assigned to the group will take effect. For example, if you have a ``policy_instance_percentage`` of 50% and a ``policy_instance_minimum`` of 2 and you start 6 instances, 3 of them would be assigned to the Instance Group. If you reduce the number of total instances in the cluster to 2, then both of them would be assigned to the Instance Group to satisfy ``policy_instance_minimum``. This way, you can set a lower bound on the amount of available resources.
|
||||
|
||||
- Policies do not actively prevent instances from being associated with multiple Instance Groups, but this can effectively be achieved by making the percentages add up to 100. If you have 4 instance groups, assign each a percentage value of 25 and the instances will be distributed among them with no overlap.
|
||||
|
||||
|
||||
Manually pinning instances to specific groups
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. index::
|
||||
pair: pinning; instance groups
|
||||
pair: clustering; pinning
|
||||
|
||||
|
||||
If you have a special instance which needs to be exclusively assigned to a specific Instance Group but don't want it to automatically join other groups via "percentage" or "minimum" policies:
|
||||
|
||||
1. Add the instance to one or more Instance Groups' ``policy_instance_list``
|
||||
|
||||
2. Update the instance's ``managed_by_policy`` property to be ``False``.
|
||||
|
||||
This will prevent the Instance from being automatically added to other groups based on percentage and minimum policy; it will only belong to the groups you've manually assigned it to:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
HTTP PATCH /api/v2/instance_groups/N/
|
||||
{
|
||||
"policy_instance_list": ["special-instance"]
|
||||
}
|
||||
|
||||
HTTP PATCH /api/v2/instances/X/
|
||||
{
|
||||
"managed_by_policy": False
|
||||
}
|
||||
|
||||
|
||||
.. _ag_instance_groups_job_runtime_behavior:
|
||||
|
||||
Job Runtime Behavior
|
||||
^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
When you run a job associated with a instance group, some behaviors worth noting are:
|
||||
|
||||
- If a cluster is divided into separate instance groups, then the behavior is similar to the cluster as a whole. If two instances are assigned to a group then either one is just as likely to receive a job as any other in the same group.
|
||||
- As AWX instances are brought online, it effectively expands the work capacity of the system. If those instances are also placed into instance groups, then they also expand that group's capacity. If an instance is performing work and it is a member of multiple groups, then capacity will be reduced from all groups for which it is a member. De-provisioning an instance will remove capacity from the cluster wherever that instance was assigned.
|
||||
|
||||
.. note::
|
||||
Not all instances are required to be provisioned with an equal capacity.
|
||||
|
||||
|
||||
.. _ag_instance_groups_control_where_job_runs:
|
||||
|
||||
Control Where a Job Runs
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
If any of the job template, inventory, or organization has instance groups associated with them, a job ran from that job template will not be eligible for the default behavior. That means that if all of the instances inside of the instance groups associated with these 3 resources are out of capacity, the job will remain in the pending state until capacity becomes available.
|
||||
|
||||
The order of preference in determining which instance group to submit the job to is as follows:
|
||||
|
||||
1. job template
|
||||
2. inventory
|
||||
3. organization (by way of project)
|
||||
|
||||
If instance groups are associated with the job template, and all of these are at capacity, then the job will be submitted to instance groups specified on inventory, and then organization. Jobs should execute in those groups in preferential order as resources are available.
|
||||
|
||||
The global ``default`` group can still be associated with a resource, just like any of the custom instance groups defined in the playbook. This can be used to specify a preferred instance group on the job template or inventory, but still allow the job to be submitted to any instance if those are out of capacity.
|
||||
|
||||
As an example, by associating ``group_a`` with a Job Template and also associating the ``default`` group with its inventory, you allow the ``default`` group to be used as a fallback in case ``group_a`` gets out of capacity.
|
||||
|
||||
In addition, it is possible to not associate an instance group with one resource but designate another resource as the fallback. For example, not associating an instance group with a job template and have it fall back to the inventory and/or the organization's instance group.
|
||||
|
||||
This presents two other great use cases:
|
||||
|
||||
1. Associating instance groups with an inventory (omitting assigning the job template to an instance group) will allow the user to ensure that any playbook run against a specific inventory will run only on the group associated with it. This can be super useful in the situation where only those instances have a direct link to the managed nodes.
|
||||
|
||||
2. An administrator can assign instance groups to organizations. This effectively allows the administrator to segment out the entire infrastructure and guarantee that each organization has capacity to run jobs without interfering with any other organization's ability to run jobs.
|
||||
|
||||
Likewise, an administrator could assign multiple groups to each organization as desired, as in the following scenario:
|
||||
|
||||
- There are three instance groups: A, B, and C. There are two organizations: Org1 and Org2.
|
||||
- The administrator assigns group A to Org1, group B to Org2 and then assign group C to both Org1 and Org2 as an overflow for any extra capacity that may be needed.
|
||||
- The organization administrators are then free to assign inventory or job templates to whichever group they want (or just let them inherit the default order from the organization).
|
||||
|
||||
|Instance Group example|
|
||||
|
||||
.. |Instance Group example| image:: ../common/images/instance-groups-scenarios.png
|
||||
|
||||
Arranging resources in this way offers a lot of flexibility. Also, you can create instance groups with only one instance, thus allowing you to direct work towards a very specific Host in the AWX cluster.
|
||||
|
||||
.. _ag_instancegrp_cpacity:
|
||||
|
||||
Instance group capacity limits
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
.. index::
|
||||
pair: instance groups; capacity
|
||||
pair: instance groups; limits
|
||||
pair: instance groups; forks
|
||||
pair: instance groups; jobs
|
||||
|
||||
|
||||
Sometimes there is external business logic which may drive the desire to limit the concurrency of jobs sent to an instance group, or the maximum number of forks to be consumed.
|
||||
|
||||
For traditional instances and instance groups, there could be a desire to allow two organizations to run jobs on the same underlying instances, but limit each organization's total number of concurrent jobs. This can be achieved by creating an instance group for each organization and assigning the value for ``max_concurrent_jobs``.
|
||||
|
||||
For container groups, AWX is generally not aware of the resource limits of the OpenShift cluster. There may be limits set on the number of pods on a namespace, or only resources available to schedule a certain number of pods at a time if no auto-scaling is in place. Again, in this case, we can adjust the value for ``max_concurrent_jobs``.
|
||||
|
||||
Another parameter available is ``max_forks``. This provides additional flexibility for capping the capacity consumed on an instance group or container group. This may be used if jobs with a wide variety of inventory sizes and "forks" values are being run. This way, you can limit an organization to run up to 10 jobs concurrently, but consume no more than 50 forks at a time.
|
||||
|
||||
::
|
||||
|
||||
max_concurrent_jobs: 10
|
||||
max_forks: 50
|
||||
|
||||
If 10 jobs that use 5 forks each are run, an 11th job will wait until one of these finishes to run on that group (or be scheduled on a different group with capacity).
|
||||
|
||||
If 2 jobs are running with 20 forks each, then a 3rd job with a ``task_impact`` of 11 or more will wait until one of these finishes to run on that group (or be scheduled on a different group with capacity).
|
||||
|
||||
For container groups, using the ``max_forks`` value is useful given that all jobs are submitted using the same ``pod_spec`` with the same resource requests, irrespective of the "forks" value of the job. The default ``pod_spec`` sets requests and not limits, so the pods can "burst" above their requested value without being throttled or reaped. By setting the ``max_forks`` value, you can help prevent a scenario where too many jobs with large forks values get scheduled concurrently and cause the OpenShift nodes to be oversubscribed with multiple pods using more resources than their requested value.
|
||||
|
||||
To set the maximum values for the concurrent jobs and forks in an instance group, see :ref:`ug_instance_groups_create` in the |atu|.
|
||||
|
||||
.. _ag_instancegrp_deprovision:
|
||||
|
||||
Deprovision Instance Groups
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. index::
|
||||
pair: groups; deprovisioning
|
||||
|
||||
Re-running the setup playbook does not automatically deprovision instances since clusters do not currently distinguish between an instance that was taken offline intentionally or due to failure. Instead, shut down all services on the AWX instance and then run the deprovisioning tool from any other instance:
|
||||
|
||||
#. Shut down the instance or stop the service with the command, ``automation-awx-service stop``.
|
||||
|
||||
#. Run the deprovision command ``$ awx-manage deprovision_instance --hostname=<name used in inventory file>`` from another instance to remove it from the AWX cluster registry.
|
||||
|
||||
Example: ``awx-manage deprovision_instance --hostname=hostB``
|
||||
|
||||
|
||||
Similarly, deprovisioning instance groups in AWX does not automatically deprovision or remove instance groups, even though re-provisioning will often cause these to be unused. They may still show up in API endpoints and stats monitoring. These groups can be removed with the following command:
|
||||
|
||||
Example: ``awx-manage unregister_queue --queuename=<name>``
|
||||
|
||||
Removing an instance's membership from an instance group in the inventory file and re-running the setup playbook does not ensure the instance won't be added back to a group. To be sure that an instance will not be added back to a group, remove via the API and also remove it in your inventory file, or you can stop defining instance groups in the inventory file altogether. You can also manage instance group topology through the |at| User Interface. For more information on managing instance groups in the UI, refer to :ref:`Instance Groups <ug_instance_groups>` in the |atu|.
|
||||
|
||||
.. _ag_container_groups:
|
||||
|
||||
Container Groups
|
||||
-----------------
|
||||
|
||||
.. index::
|
||||
single: container groups
|
||||
pair: containers; instance groups
|
||||
|
||||
AWX supports :term:`Container Groups`, which allow you to execute jobs in AWX regardless of whether AWX is installed as a standalone, in a virtual environment, or in a container. Container groups act as a pool of resources within a virtual environment. You can create instance groups to point to an OpenShift container, which are job environments that are provisioned on-demand as a Pod that exists only for the duration of the playbook run. This is known as the ephemeral execution model and ensures a clean environment for every job run.
|
||||
|
||||
In some cases, it is desirable to have container groups be "always-on", which is configured through the creation of an instance.
|
||||
|
||||
.. note::
|
||||
|
||||
Container Groups upgraded from versions prior to |at| 4.0 will revert back to default and completely remove the old pod definition, clearing out all custom pod definitions in the migration.
|
||||
|
||||
|
||||
Container groups are different from |ees| in that |ees| are container images and do not use a virtual environment. See :ref:`ug_execution_environments` in the |atu| for further detail.
|
||||
|
||||
|
||||
Create a container group
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. include:: ../common/get-creds-from-service-account.rst
|
||||
|
||||
|
||||
To create a container group:
|
||||
|
||||
1. Use the AWX user interface to create an :ref:`ug_credentials_ocp_k8s` credential that will be used with your container group, see :ref:`ug_credentials_add` in the |atu| for detail.
|
||||
|
||||
2. Create a new container group by navigating to the Instance Groups configuration window by clicking **Instance Groups** from the left navigation bar.
|
||||
|
||||
3. Click the **Add** button and select **Create Container Group**.
|
||||
|
||||
|IG - create new CG|
|
||||
|
||||
.. |IG - create new CG| image:: ../common/images/instance-group-create-new-cg.png
|
||||
|
||||
4. Enter a name for your new container group and select the credential previously created to associate it to the container group.
|
||||
|
||||
.. _ag_customize_pod_spec:
|
||||
|
||||
Customize the Pod spec
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
AWX provides a simple default Pod specification, however, you can provide a custom YAML (or JSON) document that overrides the default Pod spec. This field uses any custom fields (i.e. ``ImagePullSecrets``) that can be "serialized" as valid Pod JSON or YAML. A full list of options can be found in the `OpenShift documentation <https://docs.openshift.com/online/pro/architecture/core_concepts/pods_and_services.html>`_.
|
||||
|
||||
To customize the Pod spec, specify the namespace in the **Pod Spec Override** field by using the toggle to enable and expand the **Pod Spec Override** field and click **Save** when done.
|
||||
|
||||
|IG - CG customize pod|
|
||||
|
||||
.. |IG - CG customize pod| image:: ../common/images/instance-group-customize-cg-pod.png
|
||||
|
||||
You may provide additional customizations, if needed. Click **Expand** to view the entire customization window.
|
||||
|
||||
.. image:: ../common/images/instance-group-customize-cg-pod-expanded.png
|
||||
|
||||
.. note::
|
||||
|
||||
The image used at job launch time is determined by which |ee| is associated with the job. If a Container Registry credential is associated with the |ee|, then AWX will attempt to make a ``ImagePullSecret`` to pull the image. If you prefer not to give the service account permission to manage secrets, you must pre-create the ``ImagePullSecret`` and specify it on the pod spec, and omit any credential from the |ee| used.
|
||||
|
||||
Once the container group is successfully created, the **Details** tab of the newly created container group remains, which allows you to review and edit your container group information. This is the same menu that is opened if the Edit (|edit-button|) button is clicked from the **Instance Group** link. You can also edit **Instances** and review **Jobs** associated with this instance group.
|
||||
|
||||
.. |edit-button| image:: ../common/images/edit-button.png
|
||||
|
||||
|IG - example CG successfully created|
|
||||
|
||||
.. |IG - example CG successfully created| image:: ../common/images/instance-group-example-cg-successfully-created.png
|
||||
|
||||
Container groups and instance groups are labeled accordingly.
|
||||
|
||||
.. note::
|
||||
|
||||
Despite the fact that customers have custom Pod specs, upgrades may be difficult if the default ``pod_spec`` changes. Most any manifest can be applied to any namespace, with the namespace specified separately, most likely you will only need to override the namespace. Similarly, pinning a default image for different releases of the platform to different versions of the default job runner container is tricky. If the default image is specified in the Pod spec, then upgrades do not pick up the new default changes are made to the default Pod spec.
|
||||
|
||||
|
||||
Verify container group functions
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
To verify the deployment and termination of your container:
|
||||
|
||||
1. Create a mock inventory and associate the container group to it by populating the name of the container group in the **Instance Group** field. See :ref:`ug_inventories_add` in the |atu| for detail.
|
||||
|
||||
|Dummy inventory|
|
||||
|
||||
.. |Dummy inventory| image:: ../common/images/inventories-create-new-cg-test-inventory.png
|
||||
|
||||
2. Create "localhost" host in inventory with variables:
|
||||
|
||||
::
|
||||
|
||||
{'ansible_host': '127.0.0.1', 'ansible_connection': 'local'}
|
||||
|
||||
|Inventory with localhost|
|
||||
|
||||
.. |Inventory with localhost| image:: ../common/images/inventories-create-new-cg-test-localhost.png
|
||||
|
||||
3. Launch an ad hoc job against the localhost using the *ping* or *setup* module. Even though the **Machine Credential** field is required, it does not matter which one is selected for this simple test.
|
||||
|
||||
|Launch inventory with localhost|
|
||||
|
||||
.. |Launch inventory with localhost| image:: ../common/images/inventories-launch-adhoc-cg-test-localhost.png
|
||||
|
||||
.. image:: ../common/images/inventories-launch-adhoc-cg-test-localhost2.png
|
||||
|
||||
You can see in the jobs detail view the container was reached successfully using one of ad hoc jobs.
|
||||
|
||||
|Inventory with localhost ping success|
|
||||
|
||||
.. |Inventory with localhost ping success| image:: ../common/images/inventories-launch-adhoc-cg-test-localhost-success.png
|
||||
|
||||
|
||||
If you have an OpenShift UI, you can see Pods appear and disappear as they deploy and terminate. Alternatively, you can use the CLI to perform a ``get pod`` operation on your namespace to watch these same events occurring in real-time.
|
||||
|
||||
|
||||
View container group jobs
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
When you run a job associated with a container group, you can see the details of that job in the **Details** view and its associated container group and the execution environment that spun up.
|
||||
|
||||
|IG - instances jobs|
|
||||
|
||||
.. |IG - instances jobs| image:: ../common/images/instance-group-job-details-with-cgs.png
|
||||
|
||||
|
||||
Kubernetes API failure conditions
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
When running a container group and the Kubernetes API responds that the resource quota has been exceeded, AWX keeps the job in pending state. Other failures result in the traceback of the **Error Details** field showing the failure reason, similar to the example here:
|
||||
|
||||
::
|
||||
|
||||
Error creating pod: pods is forbidden: User "system: serviceaccount: aap:example" cannot create resource "pods" in API group "" in the namespace "aap"
|
||||
|
||||
.. _ag_container_capacity:
|
||||
|
||||
Container capacity limits
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
.. index::
|
||||
pair: container groups; capacity
|
||||
pair: container groups; limits
|
||||
|
||||
Capacity limits and quotas for containers are defined via objects in the Kubernetes API:
|
||||
|
||||
- To set limits on all pods within a given namespace, use the ``LimitRange`` object. Refer to the OpenShift documentation for `Quotas and Limit Ranges <https://docs.openshift.com/online/pro/dev_guide/compute_resources.html#overview>`_.
|
||||
|
||||
- To set limits directly on the pod definition launched by AWX, see :ref:`ag_customize_pod_spec` and refer to the OpenShift documentation to set the options to `compute resources <https://docs.openshift.com/online/pro/dev_guide/compute_resources.html#dev-compute-resources>`_.
|
||||
|
||||
.. Note::
|
||||
|
||||
Container groups do not use the capacity algorithm that normal nodes use. You would need to explicitly set the number of forks at the job template level, for instance. If forks are configured in AWX, that setting will be passed along to the container.
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user