mirror of
https://github.com/ansible/awx.git
synced 2026-02-05 19:44:43 -03:30
Compare commits
210 Commits
stale-acti
...
dmzoneill-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5ca017d359 | ||
|
|
4b6f7e0ebe | ||
|
|
370c567be1 | ||
|
|
9be64f3de5 | ||
|
|
30500e5a95 | ||
|
|
bb323c5710 | ||
|
|
7571df49d5 | ||
|
|
1559c21033 | ||
|
|
d9b81731e9 | ||
|
|
2034cca3a9 | ||
|
|
0b5e59d9cb | ||
|
|
f48b2d1ae5 | ||
|
|
b44bb98c7e | ||
|
|
8cafdf0400 | ||
|
|
3f566c8737 | ||
|
|
c8021a25bf | ||
|
|
934646a0f6 | ||
|
|
9bb97dd658 | ||
|
|
7150f5edc6 | ||
|
|
93da15c0ee | ||
|
|
ab593bda45 | ||
|
|
065bd3ae2a | ||
|
|
8ff7260bc6 | ||
|
|
a635445082 | ||
|
|
949e7efab1 | ||
|
|
615f09226f | ||
|
|
d903c524f5 | ||
|
|
393d9c39c6 | ||
|
|
dfab342bb4 | ||
|
|
12843eccf7 | ||
|
|
dd9160135d | ||
|
|
ad96a92fa7 | ||
|
|
ca8085fe7e | ||
|
|
b076cb00a9 | ||
|
|
ee9eac15dc | ||
|
|
3f2f7b75a6 | ||
|
|
b71645f3b1 | ||
|
|
eb300252b8 | ||
|
|
2e2cd7f2de | ||
|
|
727278aaa3 | ||
|
|
81825ab755 | ||
|
|
7f2a1b6b03 | ||
|
|
1b56d94d30 | ||
|
|
e1e32c971c | ||
|
|
a4a2fabc01 | ||
|
|
b7b7bfa520 | ||
|
|
887604317e | ||
|
|
d35d8b6ed7 | ||
|
|
ec28eff7f7 | ||
|
|
a5d17539c6 | ||
|
|
a49d894cf1 | ||
|
|
b3466d4449 | ||
|
|
237adc6150 | ||
|
|
09b028ee3c | ||
|
|
fb83bfbc31 | ||
|
|
88e406e121 | ||
|
|
59d0bcc63f | ||
|
|
3fb3125bc3 | ||
|
|
d70c6b9474 | ||
|
|
5549516a37 | ||
|
|
14ac91a8a2 | ||
|
|
d5753818a0 | ||
|
|
33010a2e02 | ||
|
|
14454cc670 | ||
|
|
7ab2bca16e | ||
|
|
f0f655f2c3 | ||
|
|
4286d411a7 | ||
|
|
06ad32ed8e | ||
|
|
1ebff23232 | ||
|
|
700de14c76 | ||
|
|
8605e339df | ||
|
|
e50954ce40 | ||
|
|
7caca60308 | ||
|
|
f4e13af056 | ||
|
|
decdb56288 | ||
|
|
bcd4c2e8ef | ||
|
|
d663066ac5 | ||
|
|
1ceebb275c | ||
|
|
f78ba282a6 | ||
|
|
81d88df757 | ||
|
|
0bdb01a9e9 | ||
|
|
cd91fbf59f | ||
|
|
f240e640e5 | ||
|
|
46f489185e | ||
|
|
dbb80fb7e3 | ||
|
|
cb3d357ce1 | ||
|
|
dfa4db9266 | ||
|
|
6906a88dc9 | ||
|
|
1f7be9258c | ||
|
|
dcce024424 | ||
|
|
79d7179c72 | ||
|
|
4d80f886e0 | ||
|
|
5179333185 | ||
|
|
362e11aaf2 | ||
|
|
decff01fa4 | ||
|
|
a14cc8199d | ||
|
|
b6436826f6 | ||
|
|
2109b5039e | ||
|
|
b6f9b73418 | ||
|
|
40a8a3cb2f | ||
|
|
19f80c0a26 | ||
|
|
5d1bb2125e | ||
|
|
99c512bcef | ||
|
|
ed0329f5db | ||
|
|
dd53345397 | ||
|
|
f66cde51d7 | ||
|
|
d1c31687fc | ||
|
|
38424487f1 | ||
|
|
b0565e9937 | ||
|
|
44d85b589c | ||
|
|
46f816e7a4 | ||
|
|
54b32c10f0 | ||
|
|
20202054cc | ||
|
|
e84e2962d0 | ||
|
|
2259047527 | ||
|
|
f429ef6ca7 | ||
|
|
4b637c1319 | ||
|
|
4c41f6b018 | ||
|
|
3ae72219b4 | ||
|
|
402c29dc52 | ||
|
|
8eb4a9a2a0 | ||
|
|
36f3b46726 | ||
|
|
55c6a319dc | ||
|
|
56b6a07f6e | ||
|
|
519fd22bec | ||
|
|
2e5306ae8e | ||
|
|
068e6acbd5 | ||
|
|
f9a23a5645 | ||
|
|
40150a2be8 | ||
|
|
b79aa5b1ed | ||
|
|
b3aeb962ce | ||
|
|
2300b8fddf | ||
|
|
3a3284b5df | ||
|
|
2359004cc1 | ||
|
|
694d7e98e7 | ||
|
|
8c9c02c975 | ||
|
|
8a902debd5 | ||
|
|
6dcaa09dfb | ||
|
|
21fd6af0f9 | ||
|
|
eeae1d59d4 | ||
|
|
a252d0ae33 | ||
|
|
48971411cc | ||
|
|
083c05f12a | ||
|
|
b558397b67 | ||
|
|
904c6001e9 | ||
|
|
818e11dfdc | ||
|
|
7fc13a0569 | ||
|
|
92c693f14e | ||
|
|
f2417f0ed2 | ||
|
|
8f22188116 | ||
|
|
05502c0af8 | ||
|
|
957ce59bf7 | ||
|
|
cc4cc37d46 | ||
|
|
1e254c804c | ||
|
|
1b44bebed3 | ||
|
|
a4cf55bdba | ||
|
|
c333d0e82f | ||
|
|
b093c89a84 | ||
|
|
f98493aa61 | ||
|
|
c36d2b0485 | ||
|
|
8ddb604bf1 | ||
|
|
cd9dd43be7 | ||
|
|
82323390a7 | ||
|
|
4c5ac1d3da | ||
|
|
9c06370e33 | ||
|
|
449b95d1eb | ||
|
|
1712540c8e | ||
|
|
7cf639d8eb | ||
|
|
dbfcc40d7c | ||
|
|
73d2c92ae3 | ||
|
|
24a4242147 | ||
|
|
92ce85b688 | ||
|
|
9531f8377a | ||
|
|
15a16b3dd1 | ||
|
|
a37e7bf147 | ||
|
|
a2fcd2f97a | ||
|
|
c394ffdd19 | ||
|
|
69102cf265 | ||
|
|
a188798543 | ||
|
|
60108ebd10 | ||
|
|
8c7c00451a | ||
|
|
7a1ed406da | ||
|
|
f916ffe1e9 | ||
|
|
901dbd697e | ||
|
|
d8b4a9825e | ||
|
|
6db66c5f81 | ||
|
|
82ad7dcf40 | ||
|
|
93500f9fea | ||
|
|
9ba70c151d | ||
|
|
46dc61253f | ||
|
|
6cb2cd18b0 | ||
|
|
5d1dd8ec41 | ||
|
|
9f69daf787 | ||
|
|
16ece5de7e | ||
|
|
ab0e9265c5 | ||
|
|
04cbbbccfa | ||
|
|
d1cacf64de | ||
|
|
5385eb0fb3 | ||
|
|
7d7503279d | ||
|
|
d860d1d91b | ||
|
|
3a17c45b64 | ||
|
|
bca68bcdf1 | ||
|
|
c32f234ebb | ||
|
|
5cb3d3b078 | ||
|
|
5199cc5246 | ||
|
|
387e877485 | ||
|
|
d54c5934ff | ||
|
|
2fa5116197 | ||
|
|
527755d986 | ||
|
|
f9c0b97c53 |
8
.github/PULL_REQUEST_TEMPLATE.md
vendored
8
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -7,6 +7,14 @@ commit message and your description; but you should still explain what
|
||||
the change does.
|
||||
-->
|
||||
|
||||
##### Depends on
|
||||
<!---
|
||||
Please provide links to any other PR dependanices.
|
||||
Indicating these should be merged first prior to this PR.
|
||||
-->
|
||||
- #12345
|
||||
- https://github.com/xxx/yyy/pulls/1234
|
||||
|
||||
##### ISSUE TYPE
|
||||
<!--- Pick one below and delete the rest: -->
|
||||
- Breaking Change
|
||||
|
||||
10
.github/actions/awx_devel_image/action.yml
vendored
10
.github/actions/awx_devel_image/action.yml
vendored
@@ -11,6 +11,12 @@ runs:
|
||||
shell: bash
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Set lower case owner name
|
||||
shell: bash
|
||||
run: echo "OWNER_LC=${OWNER,,}" >> $GITHUB_ENV
|
||||
env:
|
||||
OWNER: '${{ github.repository_owner }}'
|
||||
|
||||
- name: Log in to registry
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -18,11 +24,11 @@ runs:
|
||||
|
||||
- name: Pre-pull latest devel image to warm cache
|
||||
shell: bash
|
||||
run: docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${{ github.base_ref }}
|
||||
run: docker pull ghcr.io/${OWNER_LC}/awx_devel:${{ github.base_ref }}
|
||||
|
||||
- name: Build image for current source checkout
|
||||
shell: bash
|
||||
run: |
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} \
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} \
|
||||
COMPOSE_TAG=${{ github.base_ref }} \
|
||||
make docker-compose-build
|
||||
|
||||
2
.github/actions/run_awx_devel/action.yml
vendored
2
.github/actions/run_awx_devel/action.yml
vendored
@@ -35,7 +35,7 @@ runs:
|
||||
- name: Start AWX
|
||||
shell: bash
|
||||
run: |
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} \
|
||||
DEV_DOCKER_OWNER=${{ github.repository_owner }} \
|
||||
COMPOSE_TAG=${{ github.base_ref }} \
|
||||
COMPOSE_UP_OPTS="-d" \
|
||||
make docker-compose
|
||||
|
||||
3
.github/pr_labeler.yml
vendored
3
.github/pr_labeler.yml
vendored
@@ -15,5 +15,4 @@
|
||||
|
||||
"dependencies":
|
||||
- any: ["awx/ui/package.json"]
|
||||
- any: ["requirements/*.txt"]
|
||||
- any: ["requirements/requirements.in"]
|
||||
- any: ["requirements/*"]
|
||||
|
||||
6
.github/workflows/ci.yml
vendored
6
.github/workflows/ci.yml
vendored
@@ -107,7 +107,7 @@ jobs:
|
||||
ansible-galaxy collection install -r molecule/requirements.yml
|
||||
sudo rm -f $(which kustomize)
|
||||
make kustomize
|
||||
KUSTOMIZE_PATH=$(readlink -f bin/kustomize) molecule -v test -s kind
|
||||
KUSTOMIZE_PATH=$(readlink -f bin/kustomize) molecule -v test -s kind -- --skip-tags=replicas
|
||||
env:
|
||||
AWX_TEST_IMAGE: awx
|
||||
AWX_TEST_VERSION: ci
|
||||
@@ -127,10 +127,6 @@ jobs:
|
||||
|
||||
- name: Run sanity tests
|
||||
run: make test_collection_sanity
|
||||
env:
|
||||
# needed due to cgroupsv2. This is fixed, but a stable release
|
||||
# with the fix has not been made yet.
|
||||
ANSIBLE_TEST_PREFER_PODMAN: 1
|
||||
|
||||
collection-integration:
|
||||
name: awx_collection integration
|
||||
|
||||
64
.github/workflows/devel_images.yml
vendored
64
.github/workflows/devel_images.yml
vendored
@@ -3,28 +3,50 @@ name: Build/Push Development Images
|
||||
env:
|
||||
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- devel
|
||||
- release_*
|
||||
- feature_*
|
||||
jobs:
|
||||
push:
|
||||
if: endsWith(github.repository, '/awx') || startsWith(github.ref, 'refs/heads/release_')
|
||||
push-development-images:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
timeout-minutes: 120
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
build-targets:
|
||||
- image-name: awx_devel
|
||||
make-target: docker-compose-buildx
|
||||
- image-name: awx_kube_devel
|
||||
make-target: awx-kube-dev-buildx
|
||||
- image-name: awx
|
||||
make-target: awx-kube-buildx
|
||||
steps:
|
||||
|
||||
- name: Skipping build of awx image for non-awx repository
|
||||
run: |
|
||||
echo "Skipping build of awx image for non-awx repository"
|
||||
exit 0
|
||||
if: matrix.build-targets.image-name == 'awx' && !endsWith(github.repository, '/awx')
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Get python version from Makefile
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set lower case owner name
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Set GITHUB_ENV variables
|
||||
run: |
|
||||
echo "OWNER_LC=${OWNER,,}" >>${GITHUB_ENV}
|
||||
echo "DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER,,}" >> $GITHUB_ENV
|
||||
echo "COMPOSE_TAG=${GITHUB_REF##*/}" >> $GITHUB_ENV
|
||||
echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
env:
|
||||
OWNER: '${{ github.repository_owner }}'
|
||||
|
||||
@@ -37,23 +59,19 @@ jobs:
|
||||
run: |
|
||||
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
|
||||
|
||||
- name: Pre-pull image to warm build cache
|
||||
run: |
|
||||
docker pull ghcr.io/${OWNER_LC}/awx_devel:${GITHUB_REF##*/} || :
|
||||
docker pull ghcr.io/${OWNER_LC}/awx_kube_devel:${GITHUB_REF##*/} || :
|
||||
docker pull ghcr.io/${OWNER_LC}/awx:${GITHUB_REF##*/} || :
|
||||
- name: Setup node and npm
|
||||
uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: '16.13.1'
|
||||
if: matrix.build-targets.image-name == 'awx'
|
||||
|
||||
- name: Build images
|
||||
- name: Prebuild UI for awx image (to speed up build process)
|
||||
run: |
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} COMPOSE_TAG=${GITHUB_REF##*/} make docker-compose-build
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} COMPOSE_TAG=${GITHUB_REF##*/} make awx-kube-dev-build
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} COMPOSE_TAG=${GITHUB_REF##*/} make awx-kube-build
|
||||
sudo apt-get install gettext
|
||||
make ui-release
|
||||
make ui-next
|
||||
if: matrix.build-targets.image-name == 'awx'
|
||||
|
||||
- name: Push development images
|
||||
- name: Build and push AWX devel images
|
||||
run: |
|
||||
docker push ghcr.io/${OWNER_LC}/awx_devel:${GITHUB_REF##*/}
|
||||
docker push ghcr.io/${OWNER_LC}/awx_kube_devel:${GITHUB_REF##*/}
|
||||
|
||||
- name: Push AWX k8s image, only for upstream and feature branches
|
||||
run: docker push ghcr.io/${OWNER_LC}/awx:${GITHUB_REF##*/}
|
||||
if: endsWith(github.repository, '/awx')
|
||||
make ${{ matrix.build-targets.make-target }}
|
||||
|
||||
12
.github/workflows/feature_branch_deletion.yml
vendored
12
.github/workflows/feature_branch_deletion.yml
vendored
@@ -2,12 +2,10 @@
|
||||
name: Feature branch deletion cleanup
|
||||
env:
|
||||
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
|
||||
on:
|
||||
delete:
|
||||
branches:
|
||||
- feature_**
|
||||
on: delete
|
||||
jobs:
|
||||
push:
|
||||
branch_delete:
|
||||
if: ${{ github.event.ref_type == 'branch' && startsWith(github.event.ref, 'feature_') }}
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
permissions:
|
||||
@@ -22,6 +20,4 @@ jobs:
|
||||
run: |
|
||||
ansible localhost -c local, -m command -a "{{ ansible_python_interpreter + ' -m pip install boto3'}}"
|
||||
ansible localhost -c local -m aws_s3 \
|
||||
-a "bucket=awx-public-ci-files object=${GITHUB_REF##*/}/schema.json mode=delete permission=public-read"
|
||||
|
||||
|
||||
-a "bucket=awx-public-ci-files object=${GITHUB_REF##*/}/schema.json mode=delobj permission=public-read"
|
||||
|
||||
20
.github/workflows/promote.yml
vendored
20
.github/workflows/promote.yml
vendored
@@ -83,11 +83,15 @@ jobs:
|
||||
|
||||
- name: Re-tag and promote awx image
|
||||
run: |
|
||||
docker pull ghcr.io/${{ github.repository }}:${{ github.event.release.tag_name }}
|
||||
docker tag ghcr.io/${{ github.repository }}:${{ github.event.release.tag_name }} quay.io/${{ github.repository }}:${{ github.event.release.tag_name }}
|
||||
docker tag ghcr.io/${{ github.repository }}:${{ github.event.release.tag_name }} quay.io/${{ github.repository }}:latest
|
||||
docker push quay.io/${{ github.repository }}:${{ github.event.release.tag_name }}
|
||||
docker push quay.io/${{ github.repository }}:latest
|
||||
docker pull ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.release.tag_name }}
|
||||
docker tag ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.release.tag_name }} quay.io/${{ github.repository_owner }}/awx-ee:${{ github.event.release.tag_name }}
|
||||
docker push quay.io/${{ github.repository_owner }}/awx-ee:${{ github.event.release.tag_name }}
|
||||
docker buildx imagetools create \
|
||||
ghcr.io/${{ github.repository }}:${{ github.event.release.tag_name }} \
|
||||
--tag quay.io/${{ github.repository }}:${{ github.event.release.tag_name }}
|
||||
docker buildx imagetools create \
|
||||
ghcr.io/${{ github.repository }}:${{ github.event.release.tag_name }} \
|
||||
--tag quay.io/${{ github.repository }}:latest
|
||||
|
||||
- name: Re-tag and promote awx-ee image
|
||||
run: |
|
||||
docker buildx imagetools create \
|
||||
ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.release.tag_name }} \
|
||||
--tag quay.io/${{ github.repository_owner }}/awx-ee:${{ github.event.release.tag_name }}
|
||||
|
||||
34
.github/workflows/stage.yml
vendored
34
.github/workflows/stage.yml
vendored
@@ -86,27 +86,33 @@ jobs:
|
||||
-e push=yes \
|
||||
-e awx_official=yes
|
||||
|
||||
- name: Log in to GHCR
|
||||
run: |
|
||||
echo ${{ secrets.GITHUB_TOKEN }} | docker login ghcr.io -u ${{ github.actor }} --password-stdin
|
||||
- name: Log into registry ghcr.io
|
||||
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Log in to Quay
|
||||
run: |
|
||||
echo ${{ secrets.QUAY_TOKEN }} | docker login quay.io -u ${{ secrets.QUAY_USER }} --password-stdin
|
||||
- name: Log into registry quay.io
|
||||
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_USER }}
|
||||
password: ${{ secrets.QUAY_TOKEN }}
|
||||
|
||||
- name: tag awx-ee:latest with version input
|
||||
run: |
|
||||
docker pull quay.io/ansible/awx-ee:latest
|
||||
docker tag quay.io/ansible/awx-ee:latest ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.inputs.version }}
|
||||
docker push ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.inputs.version }}
|
||||
docker buildx imagetools create \
|
||||
quay.io/ansible/awx-ee:latest \
|
||||
--tag ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.inputs.version }}
|
||||
|
||||
- name: Build and stage awx-operator
|
||||
- name: Stage awx-operator image
|
||||
working-directory: awx-operator
|
||||
run: |
|
||||
BUILD_ARGS="--build-arg DEFAULT_AWX_VERSION=${{ github.event.inputs.version }} \
|
||||
--build-arg OPERATOR_VERSION=${{ github.event.inputs.operator_version }}" \
|
||||
IMAGE_TAG_BASE=ghcr.io/${{ github.repository_owner }}/awx-operator \
|
||||
VERSION=${{ github.event.inputs.operator_version }} make docker-build docker-push
|
||||
BUILD_ARGS="--build-arg DEFAULT_AWX_VERSION=${{ github.event.inputs.version}} \
|
||||
--build-arg OPERATOR_VERSION=${{ github.event.inputs.operator_version }}" \
|
||||
IMG=ghcr.io/${{ github.repository_owner }}/awx-operator:${{ github.event.inputs.operator_version }} \
|
||||
make docker-buildx
|
||||
|
||||
- name: Run test deployment with awx-operator
|
||||
working-directory: awx-operator
|
||||
|
||||
8
.gitignore
vendored
8
.gitignore
vendored
@@ -46,6 +46,11 @@ tools/docker-compose/overrides/
|
||||
tools/docker-compose-minikube/_sources
|
||||
tools/docker-compose/keycloak.awx.realm.json
|
||||
|
||||
!tools/docker-compose/editable_dependencies
|
||||
tools/docker-compose/editable_dependencies/*
|
||||
!tools/docker-compose/editable_dependencies/README.md
|
||||
!tools/docker-compose/editable_dependencies/install.sh
|
||||
|
||||
# Tower setup playbook testing
|
||||
setup/test/roles/postgresql
|
||||
**/provision_docker
|
||||
@@ -169,3 +174,6 @@ awx/ui_next/build
|
||||
# Docs build stuff
|
||||
docs/docsite/build/
|
||||
_readthedocs/
|
||||
|
||||
# Pyenv
|
||||
.python-version
|
||||
|
||||
113
.vscode/launch.json
vendored
Normal file
113
.vscode/launch.json
vendored
Normal file
@@ -0,0 +1,113 @@
|
||||
{
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "run_ws_heartbeat",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"program": "manage.py",
|
||||
"args": ["run_ws_heartbeat"],
|
||||
"django": true,
|
||||
"preLaunchTask": "stop awx-ws-heartbeat",
|
||||
"postDebugTask": "start awx-ws-heartbeat"
|
||||
},
|
||||
{
|
||||
"name": "run_cache_clear",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"program": "manage.py",
|
||||
"args": ["run_cache_clear"],
|
||||
"django": true,
|
||||
"preLaunchTask": "stop awx-cache-clear",
|
||||
"postDebugTask": "start awx-cache-clear"
|
||||
},
|
||||
{
|
||||
"name": "run_callback_receiver",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"program": "manage.py",
|
||||
"args": ["run_callback_receiver"],
|
||||
"django": true,
|
||||
"preLaunchTask": "stop awx-receiver",
|
||||
"postDebugTask": "start awx-receiver"
|
||||
},
|
||||
{
|
||||
"name": "run_dispatcher",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"program": "manage.py",
|
||||
"args": ["run_dispatcher"],
|
||||
"django": true,
|
||||
"preLaunchTask": "stop awx-dispatcher",
|
||||
"postDebugTask": "start awx-dispatcher"
|
||||
},
|
||||
{
|
||||
"name": "run_rsyslog_configurer",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"program": "manage.py",
|
||||
"args": ["run_rsyslog_configurer"],
|
||||
"django": true,
|
||||
"preLaunchTask": "stop awx-rsyslog-configurer",
|
||||
"postDebugTask": "start awx-rsyslog-configurer"
|
||||
},
|
||||
{
|
||||
"name": "run_cache_clear",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"program": "manage.py",
|
||||
"args": ["run_cache_clear"],
|
||||
"django": true,
|
||||
"preLaunchTask": "stop awx-cache-clear",
|
||||
"postDebugTask": "start awx-cache-clear"
|
||||
},
|
||||
{
|
||||
"name": "run_wsrelay",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"program": "manage.py",
|
||||
"args": ["run_wsrelay"],
|
||||
"django": true,
|
||||
"preLaunchTask": "stop awx-wsrelay",
|
||||
"postDebugTask": "start awx-wsrelay"
|
||||
},
|
||||
{
|
||||
"name": "daphne",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"program": "/var/lib/awx/venv/awx/bin/daphne",
|
||||
"args": ["-b", "127.0.0.1", "-p", "8051", "awx.asgi:channel_layer"],
|
||||
"django": true,
|
||||
"preLaunchTask": "stop awx-daphne",
|
||||
"postDebugTask": "start awx-daphne"
|
||||
},
|
||||
{
|
||||
"name": "runserver(uwsgi alternative)",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"program": "manage.py",
|
||||
"args": ["runserver", "127.0.0.1:8052"],
|
||||
"django": true,
|
||||
"preLaunchTask": "stop awx-uwsgi",
|
||||
"postDebugTask": "start awx-uwsgi"
|
||||
},
|
||||
{
|
||||
"name": "runserver_plus(uwsgi alternative)",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"program": "manage.py",
|
||||
"args": ["runserver_plus", "127.0.0.1:8052"],
|
||||
"django": true,
|
||||
"preLaunchTask": "stop awx-uwsgi and install Werkzeug",
|
||||
"postDebugTask": "start awx-uwsgi"
|
||||
},
|
||||
{
|
||||
"name": "shell_plus",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"program": "manage.py",
|
||||
"args": ["shell_plus"],
|
||||
"django": true,
|
||||
},
|
||||
]
|
||||
}
|
||||
100
.vscode/tasks.json
vendored
Normal file
100
.vscode/tasks.json
vendored
Normal file
@@ -0,0 +1,100 @@
|
||||
{
|
||||
"version": "2.0.0",
|
||||
"tasks": [
|
||||
{
|
||||
"label": "start awx-cache-clear",
|
||||
"type": "shell",
|
||||
"command": "supervisorctl start tower-processes:awx-cache-clear"
|
||||
},
|
||||
{
|
||||
"label": "stop awx-cache-clear",
|
||||
"type": "shell",
|
||||
"command": "supervisorctl stop tower-processes:awx-cache-clear"
|
||||
},
|
||||
{
|
||||
"label": "start awx-daphne",
|
||||
"type": "shell",
|
||||
"command": "supervisorctl start tower-processes:awx-daphne"
|
||||
},
|
||||
{
|
||||
"label": "stop awx-daphne",
|
||||
"type": "shell",
|
||||
"command": "supervisorctl stop tower-processes:awx-daphne"
|
||||
},
|
||||
{
|
||||
"label": "start awx-dispatcher",
|
||||
"type": "shell",
|
||||
"command": "supervisorctl start tower-processes:awx-dispatcher"
|
||||
},
|
||||
{
|
||||
"label": "stop awx-dispatcher",
|
||||
"type": "shell",
|
||||
"command": "supervisorctl stop tower-processes:awx-dispatcher"
|
||||
},
|
||||
{
|
||||
"label": "start awx-receiver",
|
||||
"type": "shell",
|
||||
"command": "supervisorctl start tower-processes:awx-receiver"
|
||||
},
|
||||
{
|
||||
"label": "stop awx-receiver",
|
||||
"type": "shell",
|
||||
"command": "supervisorctl stop tower-processes:awx-receiver"
|
||||
},
|
||||
{
|
||||
"label": "start awx-rsyslog-configurer",
|
||||
"type": "shell",
|
||||
"command": "supervisorctl start tower-processes:awx-rsyslog-configurer"
|
||||
},
|
||||
{
|
||||
"label": "stop awx-rsyslog-configurer",
|
||||
"type": "shell",
|
||||
"command": "supervisorctl stop tower-processes:awx-rsyslog-configurer"
|
||||
},
|
||||
{
|
||||
"label": "start awx-rsyslogd",
|
||||
"type": "shell",
|
||||
"command": "supervisorctl start tower-processes:awx-rsyslogd"
|
||||
},
|
||||
{
|
||||
"label": "stop awx-rsyslogd",
|
||||
"type": "shell",
|
||||
"command": "supervisorctl stop tower-processes:awx-rsyslogd"
|
||||
},
|
||||
{
|
||||
"label": "start awx-uwsgi",
|
||||
"type": "shell",
|
||||
"command": "supervisorctl start tower-processes:awx-uwsgi"
|
||||
},
|
||||
{
|
||||
"label": "stop awx-uwsgi",
|
||||
"type": "shell",
|
||||
"command": "supervisorctl stop tower-processes:awx-uwsgi"
|
||||
},
|
||||
{
|
||||
"label": "stop awx-uwsgi and install Werkzeug",
|
||||
"type": "shell",
|
||||
"command": "pip install Werkzeug; supervisorctl stop tower-processes:awx-uwsgi"
|
||||
},
|
||||
{
|
||||
"label": "start awx-ws-heartbeat",
|
||||
"type": "shell",
|
||||
"command": "supervisorctl start tower-processes:awx-ws-heartbeat"
|
||||
},
|
||||
{
|
||||
"label": "stop awx-ws-heartbeat",
|
||||
"type": "shell",
|
||||
"command": "supervisorctl stop tower-processes:awx-ws-heartbeat"
|
||||
},
|
||||
{
|
||||
"label": "start awx-wsrelay",
|
||||
"type": "shell",
|
||||
"command": "supervisorctl start tower-processes:awx-wsrelay"
|
||||
},
|
||||
{
|
||||
"label": "stop awx-wsrelay",
|
||||
"type": "shell",
|
||||
"command": "supervisorctl stop tower-processes:awx-wsrelay"
|
||||
}
|
||||
]
|
||||
}
|
||||
76
Makefile
76
Makefile
@@ -1,6 +1,6 @@
|
||||
-include awx/ui_next/Makefile
|
||||
|
||||
PYTHON := $(notdir $(shell for i in python3.9 python3; do command -v $$i; done|sed 1q))
|
||||
PYTHON := $(notdir $(shell for i in python3.11 python3; do command -v $$i; done|sed 1q))
|
||||
SHELL := bash
|
||||
DOCKER_COMPOSE ?= docker-compose
|
||||
OFFICIAL ?= no
|
||||
@@ -10,7 +10,7 @@ KIND_BIN ?= $(shell which kind)
|
||||
CHROMIUM_BIN=/tmp/chrome-linux/chrome
|
||||
GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
|
||||
MANAGEMENT_COMMAND ?= awx-manage
|
||||
VERSION ?= $(shell $(PYTHON) tools/scripts/scm_version.py)
|
||||
VERSION ?= $(shell $(PYTHON) tools/scripts/scm_version.py 2> /dev/null)
|
||||
|
||||
# ansible-test requires semver compatable version, so we allow overrides to hack it
|
||||
COLLECTION_VERSION ?= $(shell $(PYTHON) tools/scripts/scm_version.py | cut -d . -f 1-3)
|
||||
@@ -47,6 +47,8 @@ VAULT ?= false
|
||||
VAULT_TLS ?= false
|
||||
# If set to true docker-compose will also start a tacacs+ instance
|
||||
TACACS ?= false
|
||||
# If set to true docker-compose will install editable dependencies
|
||||
EDITABLE_DEPENDENCIES ?= false
|
||||
|
||||
VENV_BASE ?= /var/lib/awx/venv
|
||||
|
||||
@@ -63,7 +65,7 @@ RECEPTOR_IMAGE ?= quay.io/ansible/receptor:devel
|
||||
SRC_ONLY_PKGS ?= cffi,pycparser,psycopg,twilio
|
||||
# These should be upgraded in the AWX and Ansible venv before attempting
|
||||
# to install the actual requirements
|
||||
VENV_BOOTSTRAP ?= pip==21.2.4 setuptools==65.6.3 setuptools_scm[toml]==8.0.4 wheel==0.38.4
|
||||
VENV_BOOTSTRAP ?= pip==21.2.4 setuptools==69.0.2 setuptools_scm[toml]==8.0.4 wheel==0.42.0
|
||||
|
||||
NAME ?= awx
|
||||
|
||||
@@ -75,6 +77,9 @@ SDIST_TAR_FILE ?= $(SDIST_TAR_NAME).tar.gz
|
||||
|
||||
I18N_FLAG_FILE = .i18n_built
|
||||
|
||||
## PLATFORMS defines the target platforms for the manager image be build to provide support to multiple
|
||||
PLATFORMS ?= linux/amd64,linux/arm64 # linux/ppc64le,linux/s390x
|
||||
|
||||
.PHONY: awx-link clean clean-tmp clean-venv requirements requirements_dev \
|
||||
develop refresh adduser migrate dbchange \
|
||||
receiver test test_unit test_coverage coverage_html \
|
||||
@@ -213,8 +218,6 @@ collectstatic:
|
||||
fi; \
|
||||
$(PYTHON) manage.py collectstatic --clear --noinput > /dev/null 2>&1
|
||||
|
||||
DEV_RELOAD_COMMAND ?= supervisorctl restart tower-processes:*
|
||||
|
||||
uwsgi: collectstatic
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
@@ -222,7 +225,7 @@ uwsgi: collectstatic
|
||||
uwsgi /etc/tower/uwsgi.ini
|
||||
|
||||
awx-autoreload:
|
||||
@/awx_devel/tools/docker-compose/awx-autoreload /awx_devel/awx "$(DEV_RELOAD_COMMAND)"
|
||||
@/awx_devel/tools/docker-compose/awx-autoreload /awx_devel/awx
|
||||
|
||||
daphne:
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
@@ -302,7 +305,7 @@ swagger: reports
|
||||
@if [ "$(VENV_BASE)" ]; then \
|
||||
. $(VENV_BASE)/awx/bin/activate; \
|
||||
fi; \
|
||||
(set -o pipefail && py.test $(PYTEST_ARGS) awx/conf/tests/functional awx/main/tests/functional/api awx/main/tests/docs --release=$(VERSION_TARGET) | tee reports/$@.report)
|
||||
(set -o pipefail && py.test $(PYTEST_ARGS) awx/conf/tests/functional awx/main/tests/functional/api awx/main/tests/docs | tee reports/$@.report)
|
||||
|
||||
check: black
|
||||
|
||||
@@ -532,15 +535,23 @@ docker-compose-sources: .git/hooks/pre-commit
|
||||
-e enable_vault=$(VAULT) \
|
||||
-e vault_tls=$(VAULT_TLS) \
|
||||
-e enable_tacacs=$(TACACS) \
|
||||
$(EXTRA_SOURCES_ANSIBLE_OPTS)
|
||||
-e install_editable_dependencies=$(EDITABLE_DEPENDENCIES) \
|
||||
$(EXTRA_SOURCES_ANSIBLE_OPTS)
|
||||
|
||||
docker-compose: awx/projects docker-compose-sources
|
||||
ansible-galaxy install --ignore-certs -r tools/docker-compose/ansible/requirements.yml;
|
||||
ansible-playbook -i tools/docker-compose/inventory tools/docker-compose/ansible/initialize_containers.yml \
|
||||
-e enable_vault=$(VAULT) \
|
||||
-e vault_tls=$(VAULT_TLS);
|
||||
-e vault_tls=$(VAULT_TLS) \
|
||||
-e enable_ldap=$(LDAP); \
|
||||
$(MAKE) docker-compose-up
|
||||
|
||||
docker-compose-up:
|
||||
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml $(COMPOSE_OPTS) up $(COMPOSE_UP_OPTS) --remove-orphans
|
||||
|
||||
docker-compose-down:
|
||||
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml $(COMPOSE_OPTS) down --remove-orphans
|
||||
|
||||
docker-compose-credential-plugins: awx/projects docker-compose-sources
|
||||
echo -e "\033[0;31mTo generate a CyberArk Conjur API key: docker exec -it tools_conjur_1 conjurctl account create quick-start\033[0m"
|
||||
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml -f tools/docker-credential-plugins-override.yml up --no-recreate awx_1 --remove-orphans
|
||||
@@ -585,12 +596,27 @@ docker-compose-build: Dockerfile.dev
|
||||
--build-arg BUILDKIT_INLINE_CACHE=1 \
|
||||
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG) .
|
||||
|
||||
|
||||
.PHONY: docker-compose-buildx
|
||||
## Build awx_devel image for docker compose development environment for multiple architectures
|
||||
docker-compose-buildx: Dockerfile.dev
|
||||
- docker buildx create --name docker-compose-buildx
|
||||
docker buildx use docker-compose-buildx
|
||||
- docker buildx build \
|
||||
--push \
|
||||
--build-arg BUILDKIT_INLINE_CACHE=1 \
|
||||
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG) \
|
||||
--platform=$(PLATFORMS) \
|
||||
--tag $(DEVEL_IMAGE_NAME) \
|
||||
-f Dockerfile.dev .
|
||||
- docker buildx rm docker-compose-buildx
|
||||
|
||||
docker-clean:
|
||||
-$(foreach container_id,$(shell docker ps -f name=tools_awx -aq && docker ps -f name=tools_receptor -aq),docker stop $(container_id); docker rm -f $(container_id);)
|
||||
-$(foreach image_id,$(shell docker images --filter=reference='*/*/*awx_devel*' --filter=reference='*/*awx_devel*' --filter=reference='*awx_devel*' -aq),docker rmi --force $(image_id);)
|
||||
|
||||
docker-clean-volumes: docker-compose-clean docker-compose-container-group-clean
|
||||
docker volume rm -f tools_awx_db tools_vault_1 tools_grafana_storage tools_prometheus_storage $(docker volume ls --filter name=tools_redis_socket_ -q)
|
||||
docker volume rm -f tools_var_lib_awx tools_awx_db tools_vault_1 tools_ldap_1 tools_grafana_storage tools_prometheus_storage $(docker volume ls --filter name=tools_redis_socket_ -q)
|
||||
|
||||
docker-refresh: docker-clean docker-compose
|
||||
|
||||
@@ -612,9 +638,6 @@ clean-elk:
|
||||
docker rm tools_elasticsearch_1
|
||||
docker rm tools_kibana_1
|
||||
|
||||
psql-container:
|
||||
docker run -it --net tools_default --rm postgres:12 sh -c 'exec psql -h "postgres" -p "5432" -U postgres'
|
||||
|
||||
VERSION:
|
||||
@echo "awx: $(VERSION)"
|
||||
|
||||
@@ -647,6 +670,21 @@ awx-kube-build: Dockerfile
|
||||
--build-arg HEADLESS=$(HEADLESS) \
|
||||
-t $(DEV_DOCKER_TAG_BASE)/awx:$(COMPOSE_TAG) .
|
||||
|
||||
## Build multi-arch awx image for deployment on Kubernetes environment.
|
||||
awx-kube-buildx: Dockerfile
|
||||
- docker buildx create --name awx-kube-buildx
|
||||
docker buildx use awx-kube-buildx
|
||||
- docker buildx build \
|
||||
--push \
|
||||
--build-arg VERSION=$(VERSION) \
|
||||
--build-arg SETUPTOOLS_SCM_PRETEND_VERSION=$(VERSION) \
|
||||
--build-arg HEADLESS=$(HEADLESS) \
|
||||
--platform=$(PLATFORMS) \
|
||||
--tag $(DEV_DOCKER_TAG_BASE)/awx:$(COMPOSE_TAG) \
|
||||
-f Dockerfile .
|
||||
- docker buildx rm awx-kube-buildx
|
||||
|
||||
|
||||
.PHONY: Dockerfile.kube-dev
|
||||
## Generate Docker.kube-dev for awx_kube_devel image
|
||||
Dockerfile.kube-dev: tools/ansible/roles/dockerfile/templates/Dockerfile.j2
|
||||
@@ -663,6 +701,18 @@ awx-kube-dev-build: Dockerfile.kube-dev
|
||||
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) \
|
||||
-t $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) .
|
||||
|
||||
## Build and push multi-arch awx_kube_devel image for development on local Kubernetes environment.
|
||||
awx-kube-dev-buildx: Dockerfile.kube-dev
|
||||
- docker buildx create --name awx-kube-dev-buildx
|
||||
docker buildx use awx-kube-dev-buildx
|
||||
- docker buildx build \
|
||||
--push \
|
||||
--build-arg BUILDKIT_INLINE_CACHE=1 \
|
||||
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) \
|
||||
--platform=$(PLATFORMS) \
|
||||
--tag $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) \
|
||||
-f Dockerfile.kube-dev .
|
||||
- docker buildx rm awx-kube-dev-buildx
|
||||
|
||||
kind-dev-load: awx-kube-dev-build
|
||||
$(KIND_BIN) load docker-image $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG)
|
||||
|
||||
@@ -154,10 +154,12 @@ def manage():
|
||||
from django.conf import settings
|
||||
from django.core.management import execute_from_command_line
|
||||
|
||||
# enforce the postgres version is equal to 12. if not, then terminate program with exit code of 1
|
||||
# enforce the postgres version is a minimum of 12 (we need this for partitioning); if not, then terminate program with exit code of 1
|
||||
# In the future if we require a feature of a version of postgres > 12 this should be updated to reflect that.
|
||||
# The return of connection.pg_version is something like 12013
|
||||
if not os.getenv('SKIP_PG_VERSION_CHECK', False) and not MODE == 'development':
|
||||
if (connection.pg_version // 10000) < 12:
|
||||
sys.stderr.write("Postgres version 12 is required\n")
|
||||
sys.stderr.write("At a minimum, postgres version 12 is required\n")
|
||||
sys.exit(1)
|
||||
|
||||
if len(sys.argv) >= 2 and sys.argv[1] in ('version', '--version'): # pragma: no cover
|
||||
|
||||
@@ -93,6 +93,7 @@ register(
|
||||
default='',
|
||||
label=_('Login redirect override URL'),
|
||||
help_text=_('URL to which unauthorized users will be redirected to log in. If blank, users will be sent to the login page.'),
|
||||
warning_text=_('Changing the redirect URL could impact the ability to login if local authentication is also disabled.'),
|
||||
category=_('Authentication'),
|
||||
category_slug='authentication',
|
||||
)
|
||||
|
||||
@@ -36,11 +36,13 @@ class Metadata(metadata.SimpleMetadata):
|
||||
field_info = OrderedDict()
|
||||
field_info['type'] = self.label_lookup[field]
|
||||
field_info['required'] = getattr(field, 'required', False)
|
||||
field_info['hidden'] = getattr(field, 'hidden', False)
|
||||
|
||||
text_attrs = [
|
||||
'read_only',
|
||||
'label',
|
||||
'help_text',
|
||||
'warning_text',
|
||||
'min_length',
|
||||
'max_length',
|
||||
'min_value',
|
||||
|
||||
@@ -6,7 +6,7 @@ import copy
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
from collections import OrderedDict
|
||||
from collections import Counter, OrderedDict
|
||||
from datetime import timedelta
|
||||
from uuid import uuid4
|
||||
|
||||
@@ -82,6 +82,7 @@ from awx.main.models import (
|
||||
Project,
|
||||
ProjectUpdate,
|
||||
ProjectUpdateEvent,
|
||||
ReceptorAddress,
|
||||
RefreshToken,
|
||||
Role,
|
||||
Schedule,
|
||||
@@ -190,6 +191,7 @@ SUMMARIZABLE_FK_FIELDS = {
|
||||
'webhook_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
|
||||
'approved_or_denied_by': ('id', 'username', 'first_name', 'last_name'),
|
||||
'credential_type': DEFAULT_SUMMARY_FIELDS,
|
||||
'resource': ('ansible_id', 'resource_type'),
|
||||
}
|
||||
|
||||
|
||||
@@ -636,7 +638,7 @@ class BaseSerializer(serializers.ModelSerializer, metaclass=BaseSerializerMetacl
|
||||
exclusions = self.get_validation_exclusions(self.instance)
|
||||
obj = self.instance or self.Meta.model()
|
||||
for k, v in attrs.items():
|
||||
if k not in exclusions:
|
||||
if k not in exclusions and k != 'canonical_address_port':
|
||||
setattr(obj, k, v)
|
||||
obj.full_clean(exclude=exclusions)
|
||||
# full_clean may modify values on the instance; copy those changes
|
||||
@@ -5176,16 +5178,21 @@ class NotificationTemplateSerializer(BaseSerializer):
|
||||
body = messages[event].get('body', {})
|
||||
if body:
|
||||
try:
|
||||
rendered_body = (
|
||||
sandbox.ImmutableSandboxedEnvironment(undefined=DescriptiveUndefined).from_string(body).render(JobNotificationMixin.context_stub())
|
||||
)
|
||||
potential_body = json.loads(rendered_body)
|
||||
if not isinstance(potential_body, dict):
|
||||
error_list.append(
|
||||
_("Webhook body for '{}' should be a json dictionary. Found type '{}'.".format(event, type(potential_body).__name__))
|
||||
)
|
||||
except json.JSONDecodeError as exc:
|
||||
error_list.append(_("Webhook body for '{}' is not a valid json dictionary ({}).".format(event, exc)))
|
||||
sandbox.ImmutableSandboxedEnvironment(undefined=DescriptiveUndefined).from_string(body).render(JobNotificationMixin.context_stub())
|
||||
|
||||
# https://github.com/ansible/awx/issues/14410
|
||||
|
||||
# When rendering something such as "{{ job.id }}"
|
||||
# the return type is not a dict, unlike "{{ job_metadata }}" which is a dict
|
||||
|
||||
# potential_body = json.loads(rendered_body)
|
||||
|
||||
# if not isinstance(potential_body, dict):
|
||||
# error_list.append(
|
||||
# _("Webhook body for '{}' should be a json dictionary. Found type '{}'.".format(event, type(potential_body).__name__))
|
||||
# )
|
||||
except Exception as exc:
|
||||
error_list.append(_("Webhook body for '{}' is not valid. The following gave an error ({}).".format(event, exc)))
|
||||
|
||||
if error_list:
|
||||
raise serializers.ValidationError(error_list)
|
||||
@@ -5458,17 +5465,25 @@ class ScheduleSerializer(LaunchConfigurationBaseSerializer, SchedulePreviewSeria
|
||||
class InstanceLinkSerializer(BaseSerializer):
|
||||
class Meta:
|
||||
model = InstanceLink
|
||||
fields = ('id', 'url', 'related', 'source', 'target', 'link_state')
|
||||
fields = ('id', 'related', 'source', 'target', 'target_full_address', 'link_state')
|
||||
|
||||
source = serializers.SlugRelatedField(slug_field="hostname", queryset=Instance.objects.all())
|
||||
target = serializers.SlugRelatedField(slug_field="hostname", queryset=Instance.objects.all())
|
||||
|
||||
target = serializers.SerializerMethodField()
|
||||
target_full_address = serializers.SerializerMethodField()
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(InstanceLinkSerializer, self).get_related(obj)
|
||||
res['source_instance'] = self.reverse('api:instance_detail', kwargs={'pk': obj.source.id})
|
||||
res['target_instance'] = self.reverse('api:instance_detail', kwargs={'pk': obj.target.id})
|
||||
res['target_address'] = self.reverse('api:receptor_address_detail', kwargs={'pk': obj.target.id})
|
||||
return res
|
||||
|
||||
def get_target(self, obj):
|
||||
return obj.target.instance.hostname
|
||||
|
||||
def get_target_full_address(self, obj):
|
||||
return obj.target.get_full_address()
|
||||
|
||||
|
||||
class InstanceNodeSerializer(BaseSerializer):
|
||||
class Meta:
|
||||
@@ -5476,6 +5491,29 @@ class InstanceNodeSerializer(BaseSerializer):
|
||||
fields = ('id', 'hostname', 'node_type', 'node_state', 'enabled')
|
||||
|
||||
|
||||
class ReceptorAddressSerializer(BaseSerializer):
|
||||
full_address = serializers.SerializerMethodField()
|
||||
|
||||
class Meta:
|
||||
model = ReceptorAddress
|
||||
fields = (
|
||||
'id',
|
||||
'url',
|
||||
'address',
|
||||
'port',
|
||||
'protocol',
|
||||
'websocket_path',
|
||||
'is_internal',
|
||||
'canonical',
|
||||
'instance',
|
||||
'peers_from_control_nodes',
|
||||
'full_address',
|
||||
)
|
||||
|
||||
def get_full_address(self, obj):
|
||||
return obj.get_full_address()
|
||||
|
||||
|
||||
class InstanceSerializer(BaseSerializer):
|
||||
show_capabilities = ['edit']
|
||||
|
||||
@@ -5484,11 +5522,17 @@ class InstanceSerializer(BaseSerializer):
|
||||
jobs_running = serializers.IntegerField(help_text=_('Count of jobs in the running or waiting state that are targeted for this instance'), read_only=True)
|
||||
jobs_total = serializers.IntegerField(help_text=_('Count of all jobs that target this instance'), read_only=True)
|
||||
health_check_pending = serializers.SerializerMethodField()
|
||||
peers = serializers.SlugRelatedField(many=True, required=False, slug_field="hostname", queryset=Instance.objects.all())
|
||||
peers = serializers.PrimaryKeyRelatedField(
|
||||
help_text=_('Primary keys of receptor addresses to peer to.'), many=True, required=False, queryset=ReceptorAddress.objects.all()
|
||||
)
|
||||
reverse_peers = serializers.SerializerMethodField()
|
||||
listener_port = serializers.IntegerField(source='canonical_address_port', required=False, allow_null=True)
|
||||
peers_from_control_nodes = serializers.BooleanField(source='canonical_address_peers_from_control_nodes', required=False)
|
||||
protocol = serializers.SerializerMethodField()
|
||||
|
||||
class Meta:
|
||||
model = Instance
|
||||
read_only_fields = ('ip_address', 'uuid', 'version')
|
||||
read_only_fields = ('ip_address', 'uuid', 'version', 'managed', 'reverse_peers')
|
||||
fields = (
|
||||
'id',
|
||||
'hostname',
|
||||
@@ -5519,10 +5563,13 @@ class InstanceSerializer(BaseSerializer):
|
||||
'managed_by_policy',
|
||||
'node_type',
|
||||
'node_state',
|
||||
'managed',
|
||||
'ip_address',
|
||||
'listener_port',
|
||||
'peers',
|
||||
'reverse_peers',
|
||||
'listener_port',
|
||||
'peers_from_control_nodes',
|
||||
'protocol',
|
||||
)
|
||||
extra_kwargs = {
|
||||
'node_type': {'initial': Instance.Types.EXECUTION, 'default': Instance.Types.EXECUTION},
|
||||
@@ -5544,16 +5591,54 @@ class InstanceSerializer(BaseSerializer):
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(InstanceSerializer, self).get_related(obj)
|
||||
res['receptor_addresses'] = self.reverse('api:instance_receptor_addresses_list', kwargs={'pk': obj.pk})
|
||||
res['jobs'] = self.reverse('api:instance_unified_jobs_list', kwargs={'pk': obj.pk})
|
||||
res['instance_groups'] = self.reverse('api:instance_instance_groups_list', kwargs={'pk': obj.pk})
|
||||
if obj.node_type in [Instance.Types.EXECUTION, Instance.Types.HOP]:
|
||||
res['install_bundle'] = self.reverse('api:instance_install_bundle', kwargs={'pk': obj.pk})
|
||||
res['peers'] = self.reverse('api:instance_peers_list', kwargs={"pk": obj.pk})
|
||||
res['instance_groups'] = self.reverse('api:instance_instance_groups_list', kwargs={'pk': obj.pk})
|
||||
if obj.node_type in [Instance.Types.EXECUTION, Instance.Types.HOP] and not obj.managed:
|
||||
res['install_bundle'] = self.reverse('api:instance_install_bundle', kwargs={'pk': obj.pk})
|
||||
if self.context['request'].user.is_superuser or self.context['request'].user.is_system_auditor:
|
||||
if obj.node_type == 'execution':
|
||||
res['health_check'] = self.reverse('api:instance_health_check', kwargs={'pk': obj.pk})
|
||||
return res
|
||||
|
||||
def create_or_update(self, validated_data, obj=None, create=True):
|
||||
# create a managed receptor address if listener port is defined
|
||||
port = validated_data.pop('listener_port', -1)
|
||||
peers_from_control_nodes = validated_data.pop('peers_from_control_nodes', -1)
|
||||
|
||||
# delete the receptor address if the port is explicitly set to None
|
||||
if obj and port == None:
|
||||
obj.receptor_addresses.filter(address=obj.hostname).delete()
|
||||
|
||||
if create:
|
||||
instance = super(InstanceSerializer, self).create(validated_data)
|
||||
else:
|
||||
instance = super(InstanceSerializer, self).update(obj, validated_data)
|
||||
instance.refresh_from_db() # instance canonical address lookup is deferred, so needs to be reloaded
|
||||
|
||||
# only create or update if port is defined in validated_data or already exists in the
|
||||
# canonical address
|
||||
# this prevents creating a receptor address if peers_from_control_nodes is in
|
||||
# validated_data but a port is not set
|
||||
if (port != None and port != -1) or instance.canonical_address_port:
|
||||
kwargs = {}
|
||||
if port != -1:
|
||||
kwargs['port'] = port
|
||||
if peers_from_control_nodes != -1:
|
||||
kwargs['peers_from_control_nodes'] = peers_from_control_nodes
|
||||
if kwargs:
|
||||
kwargs['canonical'] = True
|
||||
instance.receptor_addresses.update_or_create(address=instance.hostname, defaults=kwargs)
|
||||
|
||||
return instance
|
||||
|
||||
def create(self, validated_data):
|
||||
return self.create_or_update(validated_data, create=True)
|
||||
|
||||
def update(self, obj, validated_data):
|
||||
return self.create_or_update(validated_data, obj, create=False)
|
||||
|
||||
def get_summary_fields(self, obj):
|
||||
summary = super().get_summary_fields(obj)
|
||||
|
||||
@@ -5563,6 +5648,16 @@ class InstanceSerializer(BaseSerializer):
|
||||
|
||||
return summary
|
||||
|
||||
def get_reverse_peers(self, obj):
|
||||
return Instance.objects.prefetch_related('peers').filter(peers__in=obj.receptor_addresses.all()).values_list('id', flat=True)
|
||||
|
||||
def get_protocol(self, obj):
|
||||
# note: don't create a different query for receptor addresses, as this is prefetched on the View for optimization
|
||||
for addr in obj.receptor_addresses.all():
|
||||
if addr.canonical:
|
||||
return addr.protocol
|
||||
return ""
|
||||
|
||||
def get_consumed_capacity(self, obj):
|
||||
return obj.consumed_capacity
|
||||
|
||||
@@ -5576,47 +5671,20 @@ class InstanceSerializer(BaseSerializer):
|
||||
return obj.health_check_pending
|
||||
|
||||
def validate(self, attrs):
|
||||
def get_field_from_model_or_attrs(fd):
|
||||
return attrs.get(fd, self.instance and getattr(self.instance, fd) or None)
|
||||
|
||||
def check_peers_changed():
|
||||
'''
|
||||
return True if
|
||||
- 'peers' in attrs
|
||||
- instance peers matches peers in attrs
|
||||
'''
|
||||
return self.instance and 'peers' in attrs and set(self.instance.peers.all()) != set(attrs['peers'])
|
||||
# Oddly, using 'source' on a DRF field populates attrs with the source name, so we should rename it back
|
||||
if 'canonical_address_port' in attrs:
|
||||
attrs['listener_port'] = attrs.pop('canonical_address_port')
|
||||
if 'canonical_address_peers_from_control_nodes' in attrs:
|
||||
attrs['peers_from_control_nodes'] = attrs.pop('canonical_address_peers_from_control_nodes')
|
||||
|
||||
if not self.instance and not settings.IS_K8S:
|
||||
raise serializers.ValidationError(_("Can only create instances on Kubernetes or OpenShift."))
|
||||
|
||||
node_type = get_field_from_model_or_attrs("node_type")
|
||||
peers_from_control_nodes = get_field_from_model_or_attrs("peers_from_control_nodes")
|
||||
listener_port = get_field_from_model_or_attrs("listener_port")
|
||||
peers = attrs.get('peers', [])
|
||||
|
||||
if peers_from_control_nodes and node_type not in (Instance.Types.EXECUTION, Instance.Types.HOP):
|
||||
raise serializers.ValidationError(_("peers_from_control_nodes can only be enabled for execution or hop nodes."))
|
||||
|
||||
if node_type in [Instance.Types.CONTROL, Instance.Types.HYBRID]:
|
||||
if check_peers_changed():
|
||||
raise serializers.ValidationError(
|
||||
_("Setting peers manually for control nodes is not allowed. Enable peers_from_control_nodes on the hop and execution nodes instead.")
|
||||
)
|
||||
|
||||
if not listener_port and peers_from_control_nodes:
|
||||
raise serializers.ValidationError(_("Field listener_port must be a valid integer when peers_from_control_nodes is enabled."))
|
||||
|
||||
if not listener_port and self.instance and self.instance.peers_from.exists():
|
||||
raise serializers.ValidationError(_("Field listener_port must be a valid integer when other nodes peer to it."))
|
||||
|
||||
for peer in peers:
|
||||
if peer.listener_port is None:
|
||||
raise serializers.ValidationError(_("Field listener_port must be set on peer ") + peer.hostname + ".")
|
||||
|
||||
if not settings.IS_K8S:
|
||||
if check_peers_changed():
|
||||
raise serializers.ValidationError(_("Cannot change peers."))
|
||||
# cannot enable peers_from_control_nodes if listener_port is not set
|
||||
if attrs.get('peers_from_control_nodes'):
|
||||
port = attrs.get('listener_port', -1) # -1 denotes missing, None denotes explicit null
|
||||
if (port is None) or (port == -1 and self.instance and self.instance.canonical_address is None):
|
||||
raise serializers.ValidationError(_("Cannot enable peers_from_control_nodes if listener_port is not set."))
|
||||
|
||||
return super().validate(attrs)
|
||||
|
||||
@@ -5636,8 +5704,8 @@ class InstanceSerializer(BaseSerializer):
|
||||
raise serializers.ValidationError(_("Can only change the state on Kubernetes or OpenShift."))
|
||||
if value != Instance.States.DEPROVISIONING:
|
||||
raise serializers.ValidationError(_("Can only change instances to the 'deprovisioning' state."))
|
||||
if self.instance.node_type not in (Instance.Types.EXECUTION, Instance.Types.HOP):
|
||||
raise serializers.ValidationError(_("Can only deprovision execution or hop nodes."))
|
||||
if self.instance.managed:
|
||||
raise serializers.ValidationError(_("Cannot deprovision managed nodes."))
|
||||
else:
|
||||
if value and value != Instance.States.INSTALLED:
|
||||
raise serializers.ValidationError(_("Can only create instances in the 'installed' state."))
|
||||
@@ -5656,18 +5724,48 @@ class InstanceSerializer(BaseSerializer):
|
||||
def validate_listener_port(self, value):
|
||||
"""
|
||||
Cannot change listener port, unless going from none to integer, and vice versa
|
||||
If instance is managed, cannot change listener port at all
|
||||
"""
|
||||
if value and self.instance and self.instance.listener_port and self.instance.listener_port != value:
|
||||
raise serializers.ValidationError(_("Cannot change listener port."))
|
||||
if self.instance:
|
||||
canonical_address_port = self.instance.canonical_address_port
|
||||
if value and canonical_address_port and canonical_address_port != value:
|
||||
raise serializers.ValidationError(_("Cannot change listener port."))
|
||||
if self.instance.managed and value != canonical_address_port:
|
||||
raise serializers.ValidationError(_("Cannot change listener port for managed nodes."))
|
||||
return value
|
||||
|
||||
def validate_peers(self, value):
|
||||
# cannot peer to an instance more than once
|
||||
peers_instances = Counter(p.instance_id for p in value)
|
||||
if any(count > 1 for count in peers_instances.values()):
|
||||
raise serializers.ValidationError(_("Cannot peer to the same instance more than once."))
|
||||
|
||||
if self.instance:
|
||||
instance_addresses = set(self.instance.receptor_addresses.all())
|
||||
setting_peers = set(value)
|
||||
peers_changed = set(self.instance.peers.all()) != setting_peers
|
||||
|
||||
if not settings.IS_K8S and peers_changed:
|
||||
raise serializers.ValidationError(_("Cannot change peers."))
|
||||
|
||||
if self.instance.managed and peers_changed:
|
||||
raise serializers.ValidationError(_("Setting peers manually for managed nodes is not allowed."))
|
||||
|
||||
# cannot peer to self
|
||||
if instance_addresses & setting_peers:
|
||||
raise serializers.ValidationError(_("Instance cannot peer to its own address."))
|
||||
|
||||
# cannot peer to an instance that is already peered to this instance
|
||||
if instance_addresses:
|
||||
for p in setting_peers:
|
||||
if set(p.instance.peers.all()) & instance_addresses:
|
||||
raise serializers.ValidationError(_(f"Instance {p.instance.hostname} is already peered to this instance."))
|
||||
|
||||
return value
|
||||
|
||||
def validate_peers_from_control_nodes(self, value):
|
||||
"""
|
||||
Can only enable for K8S based deployments
|
||||
"""
|
||||
if value and not settings.IS_K8S:
|
||||
raise serializers.ValidationError(_("Can only be enabled on Kubernetes or Openshift."))
|
||||
if self.instance and self.instance.managed and self.instance.canonical_address_peers_from_control_nodes != value:
|
||||
raise serializers.ValidationError(_("Cannot change peers_from_control_nodes for managed nodes."))
|
||||
|
||||
return value
|
||||
|
||||
|
||||
@@ -17,19 +17,18 @@ custom_worksign_public_keyfile: receptor/work_public_key.pem
|
||||
custom_tls_certfile: receptor/tls/receptor.crt
|
||||
custom_tls_keyfile: receptor/tls/receptor.key
|
||||
custom_ca_certfile: receptor/tls/ca/mesh-CA.crt
|
||||
receptor_protocol: 'tcp'
|
||||
{% if instance.listener_port %}
|
||||
{% if listener_port %}
|
||||
receptor_protocol: {{ listener_protocol }}
|
||||
receptor_listener: true
|
||||
receptor_port: {{ instance.listener_port }}
|
||||
receptor_port: {{ listener_port }}
|
||||
{% else %}
|
||||
receptor_listener: false
|
||||
{% endif %}
|
||||
{% if peers %}
|
||||
receptor_peers:
|
||||
{% for peer in peers %}
|
||||
- host: {{ peer.host }}
|
||||
port: {{ peer.port }}
|
||||
protocol: tcp
|
||||
- address: {{ peer.address }}
|
||||
protocol: {{ peer.protocol }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% verbatim %}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
---
|
||||
collections:
|
||||
- name: ansible.receptor
|
||||
version: 2.0.2
|
||||
version: 2.0.3
|
||||
|
||||
@@ -10,6 +10,7 @@ from awx.api.views import (
|
||||
InstanceInstanceGroupsList,
|
||||
InstanceHealthCheck,
|
||||
InstancePeersList,
|
||||
InstanceReceptorAddressesList,
|
||||
)
|
||||
from awx.api.views.instance_install_bundle import InstanceInstallBundle
|
||||
|
||||
@@ -21,6 +22,7 @@ urls = [
|
||||
re_path(r'^(?P<pk>[0-9]+)/instance_groups/$', InstanceInstanceGroupsList.as_view(), name='instance_instance_groups_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/health_check/$', InstanceHealthCheck.as_view(), name='instance_health_check'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/peers/$', InstancePeersList.as_view(), name='instance_peers_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/receptor_addresses/$', InstanceReceptorAddressesList.as_view(), name='instance_receptor_addresses_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/install_bundle/$', InstanceInstallBundle.as_view(), name='instance_install_bundle'),
|
||||
]
|
||||
|
||||
|
||||
17
awx/api/urls/receptor_address.py
Normal file
17
awx/api/urls/receptor_address.py
Normal file
@@ -0,0 +1,17 @@
|
||||
# Copyright (c) 2017 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
from django.urls import re_path
|
||||
|
||||
from awx.api.views import (
|
||||
ReceptorAddressesList,
|
||||
ReceptorAddressDetail,
|
||||
)
|
||||
|
||||
|
||||
urls = [
|
||||
re_path(r'^$', ReceptorAddressesList.as_view(), name='receptor_addresses_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/$', ReceptorAddressDetail.as_view(), name='receptor_address_detail'),
|
||||
]
|
||||
|
||||
__all__ = ['urls']
|
||||
@@ -85,6 +85,7 @@ from .oauth2_root import urls as oauth2_root_urls
|
||||
from .workflow_approval_template import urls as workflow_approval_template_urls
|
||||
from .workflow_approval import urls as workflow_approval_urls
|
||||
from .analytics import urls as analytics_urls
|
||||
from .receptor_address import urls as receptor_address_urls
|
||||
|
||||
v2_urls = [
|
||||
re_path(r'^$', ApiV2RootView.as_view(), name='api_v2_root_view'),
|
||||
@@ -155,6 +156,7 @@ v2_urls = [
|
||||
re_path(r'^bulk/host_create/$', BulkHostCreateView.as_view(), name='bulk_host_create'),
|
||||
re_path(r'^bulk/host_delete/$', BulkHostDeleteView.as_view(), name='bulk_host_delete'),
|
||||
re_path(r'^bulk/job_launch/$', BulkJobLaunchView.as_view(), name='bulk_job_launch'),
|
||||
re_path(r'^receptor_addresses/', include(receptor_address_urls)),
|
||||
]
|
||||
|
||||
|
||||
|
||||
@@ -24,6 +24,10 @@ def drf_reverse(viewname, args=None, kwargs=None, request=None, format=None, **e
|
||||
else:
|
||||
url = _reverse(viewname, args, kwargs, request, format, **extra)
|
||||
|
||||
if settings.OPTIONAL_API_URLPATTERN_PREFIX and request:
|
||||
if request.path.startswith(f"/api/{settings.OPTIONAL_API_URLPATTERN_PREFIX}"):
|
||||
url = url.replace('/api', f"/api/{settings.OPTIONAL_API_URLPATTERN_PREFIX}")
|
||||
|
||||
return url
|
||||
|
||||
|
||||
|
||||
@@ -272,16 +272,24 @@ class DashboardJobsGraphView(APIView):
|
||||
|
||||
success_query = user_unified_jobs.filter(status='successful')
|
||||
failed_query = user_unified_jobs.filter(status='failed')
|
||||
canceled_query = user_unified_jobs.filter(status='canceled')
|
||||
error_query = user_unified_jobs.filter(status='error')
|
||||
|
||||
if job_type == 'inv_sync':
|
||||
success_query = success_query.filter(instance_of=models.InventoryUpdate)
|
||||
failed_query = failed_query.filter(instance_of=models.InventoryUpdate)
|
||||
canceled_query = canceled_query.filter(instance_of=models.InventoryUpdate)
|
||||
error_query = error_query.filter(instance_of=models.InventoryUpdate)
|
||||
elif job_type == 'playbook_run':
|
||||
success_query = success_query.filter(instance_of=models.Job)
|
||||
failed_query = failed_query.filter(instance_of=models.Job)
|
||||
canceled_query = canceled_query.filter(instance_of=models.Job)
|
||||
error_query = error_query.filter(instance_of=models.Job)
|
||||
elif job_type == 'scm_update':
|
||||
success_query = success_query.filter(instance_of=models.ProjectUpdate)
|
||||
failed_query = failed_query.filter(instance_of=models.ProjectUpdate)
|
||||
canceled_query = canceled_query.filter(instance_of=models.ProjectUpdate)
|
||||
error_query = error_query.filter(instance_of=models.ProjectUpdate)
|
||||
|
||||
end = now()
|
||||
interval = 'day'
|
||||
@@ -297,10 +305,12 @@ class DashboardJobsGraphView(APIView):
|
||||
else:
|
||||
return Response({'error': _('Unknown period "%s"') % str(period)}, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
dashboard_data = {"jobs": {"successful": [], "failed": []}}
|
||||
dashboard_data = {"jobs": {"successful": [], "failed": [], "canceled": [], "error": []}}
|
||||
|
||||
succ_list = dashboard_data['jobs']['successful']
|
||||
fail_list = dashboard_data['jobs']['failed']
|
||||
canceled_list = dashboard_data['jobs']['canceled']
|
||||
error_list = dashboard_data['jobs']['error']
|
||||
|
||||
qs_s = (
|
||||
success_query.filter(finished__range=(start, end))
|
||||
@@ -318,6 +328,22 @@ class DashboardJobsGraphView(APIView):
|
||||
.annotate(agg=Count('id', distinct=True))
|
||||
)
|
||||
data_f = {item['d']: item['agg'] for item in qs_f}
|
||||
qs_c = (
|
||||
canceled_query.filter(finished__range=(start, end))
|
||||
.annotate(d=Trunc('finished', interval, tzinfo=end.tzinfo))
|
||||
.order_by()
|
||||
.values('d')
|
||||
.annotate(agg=Count('id', distinct=True))
|
||||
)
|
||||
data_c = {item['d']: item['agg'] for item in qs_c}
|
||||
qs_e = (
|
||||
error_query.filter(finished__range=(start, end))
|
||||
.annotate(d=Trunc('finished', interval, tzinfo=end.tzinfo))
|
||||
.order_by()
|
||||
.values('d')
|
||||
.annotate(agg=Count('id', distinct=True))
|
||||
)
|
||||
data_e = {item['d']: item['agg'] for item in qs_e}
|
||||
|
||||
start_date = start.replace(hour=0, minute=0, second=0, microsecond=0)
|
||||
for d in itertools.count():
|
||||
@@ -326,6 +352,8 @@ class DashboardJobsGraphView(APIView):
|
||||
break
|
||||
succ_list.append([time.mktime(date.timetuple()), data_s.get(date, 0)])
|
||||
fail_list.append([time.mktime(date.timetuple()), data_f.get(date, 0)])
|
||||
canceled_list.append([time.mktime(date.timetuple()), data_c.get(date, 0)])
|
||||
error_list.append([time.mktime(date.timetuple()), data_e.get(date, 0)])
|
||||
|
||||
return Response(dashboard_data)
|
||||
|
||||
@@ -337,12 +365,20 @@ class InstanceList(ListCreateAPIView):
|
||||
search_fields = ('hostname',)
|
||||
ordering = ('id',)
|
||||
|
||||
def get_queryset(self):
|
||||
qs = super().get_queryset().prefetch_related('receptor_addresses')
|
||||
return qs
|
||||
|
||||
|
||||
class InstanceDetail(RetrieveUpdateAPIView):
|
||||
name = _("Instance Detail")
|
||||
model = models.Instance
|
||||
serializer_class = serializers.InstanceSerializer
|
||||
|
||||
def get_queryset(self):
|
||||
qs = super().get_queryset().prefetch_related('receptor_addresses')
|
||||
return qs
|
||||
|
||||
def update_raw_data(self, data):
|
||||
# these fields are only valid on creation of an instance, so they unwanted on detail view
|
||||
data.pop('node_type', None)
|
||||
@@ -375,13 +411,37 @@ class InstanceUnifiedJobsList(SubListAPIView):
|
||||
|
||||
|
||||
class InstancePeersList(SubListAPIView):
|
||||
name = _("Instance Peers")
|
||||
name = _("Peers")
|
||||
model = models.ReceptorAddress
|
||||
serializer_class = serializers.ReceptorAddressSerializer
|
||||
parent_model = models.Instance
|
||||
model = models.Instance
|
||||
serializer_class = serializers.InstanceSerializer
|
||||
parent_access = 'read'
|
||||
search_fields = {'hostname'}
|
||||
relationship = 'peers'
|
||||
search_fields = ('address',)
|
||||
|
||||
|
||||
class InstanceReceptorAddressesList(SubListAPIView):
|
||||
name = _("Receptor Addresses")
|
||||
model = models.ReceptorAddress
|
||||
parent_key = 'instance'
|
||||
parent_model = models.Instance
|
||||
serializer_class = serializers.ReceptorAddressSerializer
|
||||
search_fields = ('address',)
|
||||
|
||||
|
||||
class ReceptorAddressesList(ListAPIView):
|
||||
name = _("Receptor Addresses")
|
||||
model = models.ReceptorAddress
|
||||
serializer_class = serializers.ReceptorAddressSerializer
|
||||
search_fields = ('address',)
|
||||
|
||||
|
||||
class ReceptorAddressDetail(RetrieveAPIView):
|
||||
name = _("Receptor Address Detail")
|
||||
model = models.ReceptorAddress
|
||||
serializer_class = serializers.ReceptorAddressSerializer
|
||||
parent_model = models.Instance
|
||||
relationship = 'receptor_addresses'
|
||||
|
||||
|
||||
class InstanceInstanceGroupsList(InstanceGroupMembershipMixin, SubListCreateAttachDetachAPIView):
|
||||
|
||||
@@ -124,10 +124,19 @@ def generate_inventory_yml(instance_obj):
|
||||
|
||||
|
||||
def generate_group_vars_all_yml(instance_obj):
|
||||
# get peers
|
||||
peers = []
|
||||
for instance in instance_obj.peers.all():
|
||||
peers.append(dict(host=instance.hostname, port=instance.listener_port))
|
||||
all_yaml = render_to_string("instance_install_bundle/group_vars/all.yml", context=dict(instance=instance_obj, peers=peers))
|
||||
for addr in instance_obj.peers.select_related('instance'):
|
||||
peers.append(dict(address=addr.get_full_address(), protocol=addr.protocol))
|
||||
context = dict(instance=instance_obj, peers=peers)
|
||||
|
||||
canonical_addr = instance_obj.canonical_address
|
||||
if canonical_addr:
|
||||
context['listener_port'] = canonical_addr.port
|
||||
protocol = canonical_addr.protocol if canonical_addr.protocol != 'wss' else 'ws'
|
||||
context['listener_protocol'] = protocol
|
||||
|
||||
all_yaml = render_to_string("instance_install_bundle/group_vars/all.yml", context=context)
|
||||
# convert consecutive newlines with a single newline
|
||||
return re.sub(r'\n+', '\n', all_yaml)
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ class MeshVisualizer(APIView):
|
||||
def get(self, request, format=None):
|
||||
data = {
|
||||
'nodes': InstanceNodeSerializer(Instance.objects.all(), many=True).data,
|
||||
'links': InstanceLinkSerializer(InstanceLink.objects.select_related('target', 'source'), many=True).data,
|
||||
'links': InstanceLinkSerializer(InstanceLink.objects.select_related('target__instance', 'source'), many=True).data,
|
||||
}
|
||||
|
||||
return Response(data)
|
||||
|
||||
@@ -13,6 +13,7 @@ from django.utils.decorators import method_decorator
|
||||
from django.views.decorators.csrf import ensure_csrf_cookie
|
||||
from django.template.loader import render_to_string
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from django.urls import reverse as django_reverse
|
||||
|
||||
from rest_framework.permissions import AllowAny, IsAuthenticated
|
||||
from rest_framework.response import Response
|
||||
@@ -84,6 +85,7 @@ class ApiVersionRootView(APIView):
|
||||
data['ping'] = reverse('api:api_v2_ping_view', request=request)
|
||||
data['instances'] = reverse('api:instance_list', request=request)
|
||||
data['instance_groups'] = reverse('api:instance_group_list', request=request)
|
||||
data['receptor_addresses'] = reverse('api:receptor_addresses_list', request=request)
|
||||
data['config'] = reverse('api:api_v2_config_view', request=request)
|
||||
data['settings'] = reverse('api:setting_category_list', request=request)
|
||||
data['me'] = reverse('api:user_me_list', request=request)
|
||||
@@ -129,6 +131,7 @@ class ApiVersionRootView(APIView):
|
||||
data['mesh_visualizer'] = reverse('api:mesh_visualizer_view', request=request)
|
||||
data['bulk'] = reverse('api:bulk', request=request)
|
||||
data['analytics'] = reverse('api:analytics_root_view', request=request)
|
||||
data['service_index'] = django_reverse('service-index-root')
|
||||
return Response(data)
|
||||
|
||||
|
||||
|
||||
@@ -55,6 +55,7 @@ register(
|
||||
# Optional; category_slug will be slugified version of category if not
|
||||
# explicitly provided.
|
||||
category_slug='cows',
|
||||
hidden=True,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -127,6 +127,8 @@ class SettingsRegistry(object):
|
||||
encrypted = bool(field_kwargs.pop('encrypted', False))
|
||||
defined_in_file = bool(field_kwargs.pop('defined_in_file', False))
|
||||
unit = field_kwargs.pop('unit', None)
|
||||
hidden = field_kwargs.pop('hidden', False)
|
||||
warning_text = field_kwargs.pop('warning_text', None)
|
||||
if getattr(field_kwargs.get('child', None), 'source', None) is not None:
|
||||
field_kwargs['child'].source = None
|
||||
field_instance = field_class(**field_kwargs)
|
||||
@@ -134,12 +136,14 @@ class SettingsRegistry(object):
|
||||
field_instance.category = category
|
||||
field_instance.depends_on = depends_on
|
||||
field_instance.unit = unit
|
||||
field_instance.hidden = hidden
|
||||
if placeholder is not empty:
|
||||
field_instance.placeholder = placeholder
|
||||
field_instance.defined_in_file = defined_in_file
|
||||
if field_instance.defined_in_file:
|
||||
field_instance.help_text = str(_('This value has been set manually in a settings file.')) + '\n\n' + str(field_instance.help_text)
|
||||
field_instance.encrypted = encrypted
|
||||
field_instance.warning_text = warning_text
|
||||
original_field_instance = field_instance
|
||||
if field_class != original_field_class:
|
||||
original_field_instance = original_field_class(**field_kwargs)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
# Python
|
||||
import contextlib
|
||||
import logging
|
||||
import psycopg
|
||||
import threading
|
||||
import time
|
||||
import os
|
||||
@@ -13,7 +14,7 @@ from django.conf import settings, UserSettingsHolder
|
||||
from django.core.cache import cache as django_cache
|
||||
from django.core.exceptions import ImproperlyConfigured, SynchronousOnlyOperation
|
||||
from django.db import transaction, connection
|
||||
from django.db.utils import Error as DBError, ProgrammingError
|
||||
from django.db.utils import DatabaseError, ProgrammingError
|
||||
from django.utils.functional import cached_property
|
||||
|
||||
# Django REST Framework
|
||||
@@ -80,18 +81,26 @@ def _ctit_db_wrapper(trans_safe=False):
|
||||
logger.debug('Obtaining database settings in spite of broken transaction.')
|
||||
transaction.set_rollback(False)
|
||||
yield
|
||||
except DBError as exc:
|
||||
except ProgrammingError as e:
|
||||
# Exception raised for programming errors
|
||||
# Examples may be table not found or already exists,
|
||||
# this generally means we can't fetch Tower configuration
|
||||
# because the database hasn't actually finished migrating yet;
|
||||
# this is usually a sign that a service in a container (such as ws_broadcast)
|
||||
# has come up *before* the database has finished migrating, and
|
||||
# especially that the conf.settings table doesn't exist yet
|
||||
# syntax error in the SQL statement, wrong number of parameters specified, etc.
|
||||
if trans_safe:
|
||||
level = logger.warning
|
||||
if isinstance(exc, ProgrammingError):
|
||||
if 'relation' in str(exc) and 'does not exist' in str(exc):
|
||||
# this generally means we can't fetch Tower configuration
|
||||
# because the database hasn't actually finished migrating yet;
|
||||
# this is usually a sign that a service in a container (such as ws_broadcast)
|
||||
# has come up *before* the database has finished migrating, and
|
||||
# especially that the conf.settings table doesn't exist yet
|
||||
level = logger.debug
|
||||
level(f'Database settings are not available, using defaults. error: {str(exc)}')
|
||||
logger.debug(f'Database settings are not available, using defaults. error: {str(e)}')
|
||||
else:
|
||||
logger.exception('Error modifying something related to database settings.')
|
||||
except DatabaseError as e:
|
||||
if trans_safe:
|
||||
cause = e.__cause__
|
||||
if cause and hasattr(cause, 'sqlstate'):
|
||||
sqlstate = cause.sqlstate
|
||||
sqlstate_str = psycopg.errors.lookup(sqlstate)
|
||||
logger.error('SQL Error state: {} - {}'.format(sqlstate, sqlstate_str))
|
||||
else:
|
||||
logger.exception('Error modifying something related to database settings.')
|
||||
finally:
|
||||
|
||||
@@ -57,6 +57,7 @@ from awx.main.models import (
|
||||
Project,
|
||||
ProjectUpdate,
|
||||
ProjectUpdateEvent,
|
||||
ReceptorAddress,
|
||||
Role,
|
||||
Schedule,
|
||||
SystemJob,
|
||||
@@ -638,7 +639,10 @@ class UserAccess(BaseAccess):
|
||||
"""
|
||||
|
||||
model = User
|
||||
prefetch_related = ('profile',)
|
||||
prefetch_related = (
|
||||
'profile',
|
||||
'resource',
|
||||
)
|
||||
|
||||
def filtered_queryset(self):
|
||||
if settings.ORG_ADMINS_CAN_SEE_ALL_USERS and (self.user.admin_of_organizations.exists() or self.user.auditor_of_organizations.exists()):
|
||||
@@ -834,6 +838,7 @@ class OrganizationAccess(NotificationAttachMixin, BaseAccess):
|
||||
prefetch_related = (
|
||||
'created_by',
|
||||
'modified_by',
|
||||
'resource', # dab_resource_registry
|
||||
)
|
||||
# organization admin_role is not a parent of organization auditor_role
|
||||
notification_attach_roles = ['admin_role', 'auditor_role']
|
||||
@@ -1302,6 +1307,7 @@ class TeamAccess(BaseAccess):
|
||||
'created_by',
|
||||
'modified_by',
|
||||
'organization',
|
||||
'resource', # dab_resource_registry
|
||||
)
|
||||
|
||||
def filtered_queryset(self):
|
||||
@@ -2430,6 +2436,29 @@ class InventoryUpdateEventAccess(BaseAccess):
|
||||
return False
|
||||
|
||||
|
||||
class ReceptorAddressAccess(BaseAccess):
|
||||
"""
|
||||
I can see receptor address records whenever I can access the instance
|
||||
"""
|
||||
|
||||
model = ReceptorAddress
|
||||
|
||||
def filtered_queryset(self):
|
||||
return self.model.objects.filter(Q(instance__in=Instance.accessible_pk_qs(self.user, 'read_role')))
|
||||
|
||||
@check_superuser
|
||||
def can_add(self, data):
|
||||
return False
|
||||
|
||||
@check_superuser
|
||||
def can_change(self, obj, data):
|
||||
return False
|
||||
|
||||
@check_superuser
|
||||
def can_delete(self, obj):
|
||||
return False
|
||||
|
||||
|
||||
class SystemJobEventAccess(BaseAccess):
|
||||
"""
|
||||
I can only see manage System Jobs events if I'm a super user
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
import logging
|
||||
|
||||
# AWX
|
||||
from awx.main.analytics.subsystem_metrics import Metrics
|
||||
from awx.main.analytics.subsystem_metrics import DispatcherMetrics, CallbackReceiverMetrics
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
|
||||
@@ -11,4 +11,5 @@ logger = logging.getLogger('awx.main.scheduler')
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
def send_subsystem_metrics():
|
||||
Metrics().send_metrics()
|
||||
DispatcherMetrics().send_metrics()
|
||||
CallbackReceiverMetrics().send_metrics()
|
||||
|
||||
@@ -419,7 +419,7 @@ def _events_table(since, full_path, until, tbl, where_column, project_job_create
|
||||
resolved_action,
|
||||
resolved_role,
|
||||
-- '-' operator listed here:
|
||||
-- https://www.postgresql.org/docs/12/functions-json.html
|
||||
-- https://www.postgresql.org/docs/15/functions-json.html
|
||||
-- note that operator is only supported by jsonb objects
|
||||
-- https://www.postgresql.org/docs/current/datatype-json.html
|
||||
(CASE WHEN event = 'playbook_on_stats' THEN {event_data} - 'artifact_data' END) as playbook_on_stats,
|
||||
|
||||
@@ -1,10 +1,15 @@
|
||||
import itertools
|
||||
import redis
|
||||
import json
|
||||
import time
|
||||
import logging
|
||||
|
||||
import prometheus_client
|
||||
from prometheus_client.core import GaugeMetricFamily, HistogramMetricFamily
|
||||
from prometheus_client.registry import CollectorRegistry
|
||||
from django.conf import settings
|
||||
from django.apps import apps
|
||||
from django.http import HttpRequest
|
||||
from rest_framework.request import Request
|
||||
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
from awx.main.utils import is_testing
|
||||
@@ -13,6 +18,30 @@ root_key = settings.SUBSYSTEM_METRICS_REDIS_KEY_PREFIX
|
||||
logger = logging.getLogger('awx.main.analytics')
|
||||
|
||||
|
||||
class MetricsNamespace:
|
||||
def __init__(self, namespace):
|
||||
self._namespace = namespace
|
||||
|
||||
|
||||
class MetricsServerSettings(MetricsNamespace):
|
||||
def port(self):
|
||||
return settings.METRICS_SUBSYSTEM_CONFIG['server'][self._namespace]['port']
|
||||
|
||||
|
||||
class MetricsServer(MetricsServerSettings):
|
||||
def __init__(self, namespace, registry):
|
||||
MetricsNamespace.__init__(self, namespace)
|
||||
self._registry = registry
|
||||
|
||||
def start(self):
|
||||
try:
|
||||
# TODO: addr for ipv6 ?
|
||||
prometheus_client.start_http_server(self.port(), addr='localhost', registry=self._registry)
|
||||
except Exception:
|
||||
logger.error(f"MetricsServer failed to start for service '{self._namespace}.")
|
||||
raise
|
||||
|
||||
|
||||
class BaseM:
|
||||
def __init__(self, field, help_text):
|
||||
self.field = field
|
||||
@@ -148,76 +177,40 @@ class HistogramM(BaseM):
|
||||
return output_text
|
||||
|
||||
|
||||
class Metrics:
|
||||
def __init__(self, auto_pipe_execute=False, instance_name=None):
|
||||
class Metrics(MetricsNamespace):
|
||||
# metric name, help_text
|
||||
METRICSLIST = []
|
||||
_METRICSLIST = [
|
||||
FloatM('subsystem_metrics_pipe_execute_seconds', 'Time spent saving metrics to redis'),
|
||||
IntM('subsystem_metrics_pipe_execute_calls', 'Number of calls to pipe_execute'),
|
||||
FloatM('subsystem_metrics_send_metrics_seconds', 'Time spent sending metrics to other nodes'),
|
||||
]
|
||||
|
||||
def __init__(self, namespace, auto_pipe_execute=False, instance_name=None, metrics_have_changed=True, **kwargs):
|
||||
MetricsNamespace.__init__(self, namespace)
|
||||
|
||||
self.pipe = redis.Redis.from_url(settings.BROKER_URL).pipeline()
|
||||
self.conn = redis.Redis.from_url(settings.BROKER_URL)
|
||||
self.last_pipe_execute = time.time()
|
||||
# track if metrics have been modified since last saved to redis
|
||||
# start with True so that we get an initial save to redis
|
||||
self.metrics_have_changed = True
|
||||
self.metrics_have_changed = metrics_have_changed
|
||||
self.pipe_execute_interval = settings.SUBSYSTEM_METRICS_INTERVAL_SAVE_TO_REDIS
|
||||
self.send_metrics_interval = settings.SUBSYSTEM_METRICS_INTERVAL_SEND_METRICS
|
||||
# auto pipe execute will commit transaction of metric data to redis
|
||||
# at a regular interval (pipe_execute_interval). If set to False,
|
||||
# the calling function should call .pipe_execute() explicitly
|
||||
self.auto_pipe_execute = auto_pipe_execute
|
||||
Instance = apps.get_model('main', 'Instance')
|
||||
if instance_name:
|
||||
self.instance_name = instance_name
|
||||
elif is_testing():
|
||||
self.instance_name = "awx_testing"
|
||||
else:
|
||||
self.instance_name = Instance.objects.my_hostname()
|
||||
self.instance_name = settings.CLUSTER_HOST_ID # Same as Instance.objects.my_hostname() BUT we do not need to import Instance
|
||||
|
||||
# metric name, help_text
|
||||
METRICSLIST = [
|
||||
SetIntM('callback_receiver_events_queue_size_redis', 'Current number of events in redis queue'),
|
||||
IntM('callback_receiver_events_popped_redis', 'Number of events popped from redis'),
|
||||
IntM('callback_receiver_events_in_memory', 'Current number of events in memory (in transfer from redis to db)'),
|
||||
IntM('callback_receiver_batch_events_errors', 'Number of times batch insertion failed'),
|
||||
FloatM('callback_receiver_events_insert_db_seconds', 'Total time spent saving events to database'),
|
||||
IntM('callback_receiver_events_insert_db', 'Number of events batch inserted into database'),
|
||||
IntM('callback_receiver_events_broadcast', 'Number of events broadcast to other control plane nodes'),
|
||||
HistogramM(
|
||||
'callback_receiver_batch_events_insert_db', 'Number of events batch inserted into database', settings.SUBSYSTEM_METRICS_BATCH_INSERT_BUCKETS
|
||||
),
|
||||
SetFloatM('callback_receiver_event_processing_avg_seconds', 'Average processing time per event per callback receiver batch'),
|
||||
FloatM('subsystem_metrics_pipe_execute_seconds', 'Time spent saving metrics to redis'),
|
||||
IntM('subsystem_metrics_pipe_execute_calls', 'Number of calls to pipe_execute'),
|
||||
FloatM('subsystem_metrics_send_metrics_seconds', 'Time spent sending metrics to other nodes'),
|
||||
SetFloatM('task_manager_get_tasks_seconds', 'Time spent in loading tasks from db'),
|
||||
SetFloatM('task_manager_start_task_seconds', 'Time spent starting task'),
|
||||
SetFloatM('task_manager_process_running_tasks_seconds', 'Time spent processing running tasks'),
|
||||
SetFloatM('task_manager_process_pending_tasks_seconds', 'Time spent processing pending tasks'),
|
||||
SetFloatM('task_manager__schedule_seconds', 'Time spent in running the entire _schedule'),
|
||||
IntM('task_manager__schedule_calls', 'Number of calls to _schedule, after lock is acquired'),
|
||||
SetFloatM('task_manager_recorded_timestamp', 'Unix timestamp when metrics were last recorded'),
|
||||
SetIntM('task_manager_tasks_started', 'Number of tasks started'),
|
||||
SetIntM('task_manager_running_processed', 'Number of running tasks processed'),
|
||||
SetIntM('task_manager_pending_processed', 'Number of pending tasks processed'),
|
||||
SetIntM('task_manager_tasks_blocked', 'Number of tasks blocked from running'),
|
||||
SetFloatM('task_manager_commit_seconds', 'Time spent in db transaction, including on_commit calls'),
|
||||
SetFloatM('dependency_manager_get_tasks_seconds', 'Time spent loading pending tasks from db'),
|
||||
SetFloatM('dependency_manager_generate_dependencies_seconds', 'Time spent generating dependencies for pending tasks'),
|
||||
SetFloatM('dependency_manager__schedule_seconds', 'Time spent in running the entire _schedule'),
|
||||
IntM('dependency_manager__schedule_calls', 'Number of calls to _schedule, after lock is acquired'),
|
||||
SetFloatM('dependency_manager_recorded_timestamp', 'Unix timestamp when metrics were last recorded'),
|
||||
SetIntM('dependency_manager_pending_processed', 'Number of pending tasks processed'),
|
||||
SetFloatM('workflow_manager__schedule_seconds', 'Time spent in running the entire _schedule'),
|
||||
IntM('workflow_manager__schedule_calls', 'Number of calls to _schedule, after lock is acquired'),
|
||||
SetFloatM('workflow_manager_recorded_timestamp', 'Unix timestamp when metrics were last recorded'),
|
||||
SetFloatM('workflow_manager_spawn_workflow_graph_jobs_seconds', 'Time spent spawning workflow tasks'),
|
||||
SetFloatM('workflow_manager_get_tasks_seconds', 'Time spent loading workflow tasks from db'),
|
||||
# dispatcher subsystem metrics
|
||||
SetIntM('dispatcher_pool_scale_up_events', 'Number of times local dispatcher scaled up a worker since startup'),
|
||||
SetIntM('dispatcher_pool_active_task_count', 'Number of active tasks in the worker pool when last task was submitted'),
|
||||
SetIntM('dispatcher_pool_max_worker_count', 'Highest number of workers in worker pool in last collection interval, about 20s'),
|
||||
SetFloatM('dispatcher_availability', 'Fraction of time (in last collection interval) dispatcher was able to receive messages'),
|
||||
]
|
||||
# turn metric list into dictionary with the metric name as a key
|
||||
self.METRICS = {}
|
||||
for m in METRICSLIST:
|
||||
for m in itertools.chain(self.METRICSLIST, self._METRICSLIST):
|
||||
self.METRICS[m.field] = m
|
||||
|
||||
# track last time metrics were sent to other nodes
|
||||
@@ -230,7 +223,7 @@ class Metrics:
|
||||
m.reset_value(self.conn)
|
||||
self.metrics_have_changed = True
|
||||
self.conn.delete(root_key + "_lock")
|
||||
for m in self.conn.scan_iter(root_key + '_instance_*'):
|
||||
for m in self.conn.scan_iter(root_key + '-' + self._namespace + '_instance_*'):
|
||||
self.conn.delete(m)
|
||||
|
||||
def inc(self, field, value):
|
||||
@@ -297,7 +290,7 @@ class Metrics:
|
||||
def send_metrics(self):
|
||||
# more than one thread could be calling this at the same time, so should
|
||||
# acquire redis lock before sending metrics
|
||||
lock = self.conn.lock(root_key + '_lock')
|
||||
lock = self.conn.lock(root_key + '-' + self._namespace + '_lock')
|
||||
if not lock.acquire(blocking=False):
|
||||
return
|
||||
try:
|
||||
@@ -307,9 +300,10 @@ class Metrics:
|
||||
payload = {
|
||||
'instance': self.instance_name,
|
||||
'metrics': serialized_metrics,
|
||||
'metrics_namespace': self._namespace,
|
||||
}
|
||||
# store the serialized data locally as well, so that load_other_metrics will read it
|
||||
self.conn.set(root_key + '_instance_' + self.instance_name, serialized_metrics)
|
||||
self.conn.set(root_key + '-' + self._namespace + '_instance_' + self.instance_name, serialized_metrics)
|
||||
emit_channel_notification("metrics", payload)
|
||||
|
||||
self.previous_send_metrics.set(current_time)
|
||||
@@ -331,14 +325,14 @@ class Metrics:
|
||||
instances_filter = request.query_params.getlist("node")
|
||||
# get a sorted list of instance names
|
||||
instance_names = [self.instance_name]
|
||||
for m in self.conn.scan_iter(root_key + '_instance_*'):
|
||||
for m in self.conn.scan_iter(root_key + '-' + self._namespace + '_instance_*'):
|
||||
instance_names.append(m.decode('UTF-8').split('_instance_')[1])
|
||||
instance_names.sort()
|
||||
# load data, including data from the this local instance
|
||||
instance_data = {}
|
||||
for instance in instance_names:
|
||||
if len(instances_filter) == 0 or instance in instances_filter:
|
||||
instance_data_from_redis = self.conn.get(root_key + '_instance_' + instance)
|
||||
instance_data_from_redis = self.conn.get(root_key + '-' + self._namespace + '_instance_' + instance)
|
||||
# data from other instances may not be available. That is OK.
|
||||
if instance_data_from_redis:
|
||||
instance_data[instance] = json.loads(instance_data_from_redis.decode('UTF-8'))
|
||||
@@ -357,6 +351,120 @@ class Metrics:
|
||||
return output_text
|
||||
|
||||
|
||||
class DispatcherMetrics(Metrics):
|
||||
METRICSLIST = [
|
||||
SetFloatM('task_manager_get_tasks_seconds', 'Time spent in loading tasks from db'),
|
||||
SetFloatM('task_manager_start_task_seconds', 'Time spent starting task'),
|
||||
SetFloatM('task_manager_process_running_tasks_seconds', 'Time spent processing running tasks'),
|
||||
SetFloatM('task_manager_process_pending_tasks_seconds', 'Time spent processing pending tasks'),
|
||||
SetFloatM('task_manager__schedule_seconds', 'Time spent in running the entire _schedule'),
|
||||
IntM('task_manager__schedule_calls', 'Number of calls to _schedule, after lock is acquired'),
|
||||
SetFloatM('task_manager_recorded_timestamp', 'Unix timestamp when metrics were last recorded'),
|
||||
SetIntM('task_manager_tasks_started', 'Number of tasks started'),
|
||||
SetIntM('task_manager_running_processed', 'Number of running tasks processed'),
|
||||
SetIntM('task_manager_pending_processed', 'Number of pending tasks processed'),
|
||||
SetIntM('task_manager_tasks_blocked', 'Number of tasks blocked from running'),
|
||||
SetFloatM('task_manager_commit_seconds', 'Time spent in db transaction, including on_commit calls'),
|
||||
SetFloatM('dependency_manager_get_tasks_seconds', 'Time spent loading pending tasks from db'),
|
||||
SetFloatM('dependency_manager_generate_dependencies_seconds', 'Time spent generating dependencies for pending tasks'),
|
||||
SetFloatM('dependency_manager__schedule_seconds', 'Time spent in running the entire _schedule'),
|
||||
IntM('dependency_manager__schedule_calls', 'Number of calls to _schedule, after lock is acquired'),
|
||||
SetFloatM('dependency_manager_recorded_timestamp', 'Unix timestamp when metrics were last recorded'),
|
||||
SetIntM('dependency_manager_pending_processed', 'Number of pending tasks processed'),
|
||||
SetFloatM('workflow_manager__schedule_seconds', 'Time spent in running the entire _schedule'),
|
||||
IntM('workflow_manager__schedule_calls', 'Number of calls to _schedule, after lock is acquired'),
|
||||
SetFloatM('workflow_manager_recorded_timestamp', 'Unix timestamp when metrics were last recorded'),
|
||||
SetFloatM('workflow_manager_spawn_workflow_graph_jobs_seconds', 'Time spent spawning workflow tasks'),
|
||||
SetFloatM('workflow_manager_get_tasks_seconds', 'Time spent loading workflow tasks from db'),
|
||||
# dispatcher subsystem metrics
|
||||
SetIntM('dispatcher_pool_scale_up_events', 'Number of times local dispatcher scaled up a worker since startup'),
|
||||
SetIntM('dispatcher_pool_active_task_count', 'Number of active tasks in the worker pool when last task was submitted'),
|
||||
SetIntM('dispatcher_pool_max_worker_count', 'Highest number of workers in worker pool in last collection interval, about 20s'),
|
||||
SetFloatM('dispatcher_availability', 'Fraction of time (in last collection interval) dispatcher was able to receive messages'),
|
||||
]
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(settings.METRICS_SERVICE_DISPATCHER, *args, **kwargs)
|
||||
|
||||
|
||||
class CallbackReceiverMetrics(Metrics):
|
||||
METRICSLIST = [
|
||||
SetIntM('callback_receiver_events_queue_size_redis', 'Current number of events in redis queue'),
|
||||
IntM('callback_receiver_events_popped_redis', 'Number of events popped from redis'),
|
||||
IntM('callback_receiver_events_in_memory', 'Current number of events in memory (in transfer from redis to db)'),
|
||||
IntM('callback_receiver_batch_events_errors', 'Number of times batch insertion failed'),
|
||||
FloatM('callback_receiver_events_insert_db_seconds', 'Total time spent saving events to database'),
|
||||
IntM('callback_receiver_events_insert_db', 'Number of events batch inserted into database'),
|
||||
IntM('callback_receiver_events_broadcast', 'Number of events broadcast to other control plane nodes'),
|
||||
HistogramM(
|
||||
'callback_receiver_batch_events_insert_db', 'Number of events batch inserted into database', settings.SUBSYSTEM_METRICS_BATCH_INSERT_BUCKETS
|
||||
),
|
||||
SetFloatM('callback_receiver_event_processing_avg_seconds', 'Average processing time per event per callback receiver batch'),
|
||||
]
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(settings.METRICS_SERVICE_CALLBACK_RECEIVER, *args, **kwargs)
|
||||
|
||||
|
||||
def metrics(request):
|
||||
m = Metrics()
|
||||
return m.generate_metrics(request)
|
||||
output_text = ''
|
||||
for m in [DispatcherMetrics(), CallbackReceiverMetrics()]:
|
||||
output_text += m.generate_metrics(request)
|
||||
return output_text
|
||||
|
||||
|
||||
class CustomToPrometheusMetricsCollector(prometheus_client.registry.Collector):
|
||||
"""
|
||||
Takes the metric data from redis -> our custom metric fields -> prometheus
|
||||
library metric fields.
|
||||
|
||||
The plan is to get rid of the use of redis, our custom metric fields, and
|
||||
to switch fully to the prometheus library. At that point, this translation
|
||||
code will be deleted.
|
||||
"""
|
||||
|
||||
def __init__(self, metrics_obj, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self._metrics = metrics_obj
|
||||
|
||||
def collect(self):
|
||||
my_hostname = settings.CLUSTER_HOST_ID
|
||||
|
||||
instance_data = self._metrics.load_other_metrics(Request(HttpRequest()))
|
||||
if not instance_data:
|
||||
logger.debug(f"No metric data not found in redis for metric namespace '{self._metrics._namespace}'")
|
||||
return None
|
||||
|
||||
host_metrics = instance_data.get(my_hostname)
|
||||
for _, metric in self._metrics.METRICS.items():
|
||||
entry = host_metrics.get(metric.field)
|
||||
if not entry:
|
||||
logger.debug(f"{self._metrics._namespace} metric '{metric.field}' not found in redis data payload {json.dumps(instance_data, indent=2)}")
|
||||
continue
|
||||
if isinstance(metric, HistogramM):
|
||||
buckets = list(zip(metric.buckets, entry['counts']))
|
||||
buckets = [[str(i[0]), str(i[1])] for i in buckets]
|
||||
yield HistogramMetricFamily(metric.field, metric.help_text, buckets=buckets, sum_value=entry['sum'])
|
||||
else:
|
||||
yield GaugeMetricFamily(metric.field, metric.help_text, value=entry)
|
||||
|
||||
|
||||
class CallbackReceiverMetricsServer(MetricsServer):
|
||||
def __init__(self):
|
||||
registry = CollectorRegistry(auto_describe=True)
|
||||
registry.register(CustomToPrometheusMetricsCollector(DispatcherMetrics(metrics_have_changed=False)))
|
||||
super().__init__(settings.METRICS_SERVICE_CALLBACK_RECEIVER, registry)
|
||||
|
||||
|
||||
class DispatcherMetricsServer(MetricsServer):
|
||||
def __init__(self):
|
||||
registry = CollectorRegistry(auto_describe=True)
|
||||
registry.register(CustomToPrometheusMetricsCollector(CallbackReceiverMetrics(metrics_have_changed=False)))
|
||||
super().__init__(settings.METRICS_SERVICE_DISPATCHER, registry)
|
||||
|
||||
|
||||
class WebsocketsMetricsServer(MetricsServer):
|
||||
def __init__(self):
|
||||
registry = CollectorRegistry(auto_describe=True)
|
||||
# registry.register()
|
||||
super().__init__(settings.METRICS_SERVICE_WEBSOCKETS, registry)
|
||||
|
||||
@@ -92,6 +92,7 @@ register(
|
||||
),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
required=False,
|
||||
)
|
||||
|
||||
register(
|
||||
@@ -774,6 +775,7 @@ register(
|
||||
allow_null=True,
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
required=False,
|
||||
)
|
||||
register(
|
||||
'AUTOMATION_ANALYTICS_LAST_ENTRIES',
|
||||
@@ -815,6 +817,7 @@ register(
|
||||
help_text=_('Max jobs to allow bulk jobs to launch'),
|
||||
category=_('Bulk Actions'),
|
||||
category_slug='bulk',
|
||||
hidden=True,
|
||||
)
|
||||
|
||||
register(
|
||||
@@ -825,6 +828,7 @@ register(
|
||||
help_text=_('Max number of hosts to allow to be created in a single bulk action'),
|
||||
category=_('Bulk Actions'),
|
||||
category_slug='bulk',
|
||||
hidden=True,
|
||||
)
|
||||
|
||||
register(
|
||||
@@ -835,6 +839,7 @@ register(
|
||||
help_text=_('Max number of hosts to allow to be deleted in a single bulk action'),
|
||||
category=_('Bulk Actions'),
|
||||
category_slug='bulk',
|
||||
hidden=True,
|
||||
)
|
||||
|
||||
register(
|
||||
@@ -845,6 +850,7 @@ register(
|
||||
help_text=_('Enable preview of new user interface.'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
hidden=True,
|
||||
)
|
||||
|
||||
register(
|
||||
|
||||
@@ -14,7 +14,7 @@ __all__ = [
|
||||
'STANDARD_INVENTORY_UPDATE_ENV',
|
||||
]
|
||||
|
||||
CLOUD_PROVIDERS = ('azure_rm', 'ec2', 'gce', 'vmware', 'openstack', 'rhv', 'satellite6', 'controller', 'insights')
|
||||
CLOUD_PROVIDERS = ('azure_rm', 'ec2', 'gce', 'vmware', 'openstack', 'rhv', 'satellite6', 'controller', 'insights', 'terraform')
|
||||
PRIVILEGE_ESCALATION_METHODS = [
|
||||
('sudo', _('Sudo')),
|
||||
('su', _('Su')),
|
||||
|
||||
@@ -106,7 +106,7 @@ class RelayConsumer(AsyncJsonWebsocketConsumer):
|
||||
if group == "metrics":
|
||||
message = json.loads(message['text'])
|
||||
conn = redis.Redis.from_url(settings.BROKER_URL)
|
||||
conn.set(settings.SUBSYSTEM_METRICS_REDIS_KEY_PREFIX + "_instance_" + message['instance'], message['metrics'])
|
||||
conn.set(settings.SUBSYSTEM_METRICS_REDIS_KEY_PREFIX + "-" + message['metrics_namespace'] + "_instance_" + message['instance'], message['metrics'])
|
||||
else:
|
||||
await self.channel_layer.group_send(group, message)
|
||||
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
from azure.keyvault.secrets import SecretClient
|
||||
from azure.identity import ClientSecretCredential
|
||||
from msrestazure import azure_cloud
|
||||
|
||||
from .plugin import CredentialPlugin
|
||||
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from azure.keyvault import KeyVaultClient, KeyVaultAuthentication
|
||||
from azure.common.credentials import ServicePrincipalCredentials
|
||||
from msrestazure import azure_cloud
|
||||
|
||||
|
||||
# https://github.com/Azure/msrestazure-for-python/blob/master/msrestazure/azure_cloud.py
|
||||
@@ -54,22 +55,9 @@ azure_keyvault_inputs = {
|
||||
|
||||
|
||||
def azure_keyvault_backend(**kwargs):
|
||||
url = kwargs['url']
|
||||
[cloud] = [c for c in clouds if c.name == kwargs.get('cloud_name', default_cloud.name)]
|
||||
|
||||
def auth_callback(server, resource, scope):
|
||||
credentials = ServicePrincipalCredentials(
|
||||
url=url,
|
||||
client_id=kwargs['client'],
|
||||
secret=kwargs['secret'],
|
||||
tenant=kwargs['tenant'],
|
||||
resource=f"https://{cloud.suffixes.keyvault_dns.split('.', 1).pop()}",
|
||||
)
|
||||
token = credentials.token
|
||||
return token['token_type'], token['access_token']
|
||||
|
||||
kv = KeyVaultClient(KeyVaultAuthentication(auth_callback))
|
||||
return kv.get_secret(url, kwargs['secret_field'], kwargs.get('secret_version', '')).value
|
||||
csc = ClientSecretCredential(tenant_id=kwargs['tenant'], client_id=kwargs['client'], client_secret=kwargs['secret'])
|
||||
kv = SecretClient(credential=csc, vault_url=kwargs['url'])
|
||||
return kv.get_secret(name=kwargs['secret_field'], version=kwargs.get('secret_version', '')).value
|
||||
|
||||
|
||||
azure_keyvault_plugin = CredentialPlugin('Microsoft Azure Key Vault', inputs=azure_keyvault_inputs, backend=azure_keyvault_backend)
|
||||
|
||||
@@ -105,7 +105,11 @@ def create_listener_connection():
|
||||
for k, v in settings.LISTENER_DATABASES.get('default', {}).get('OPTIONS', {}).items():
|
||||
conf['OPTIONS'][k] = v
|
||||
|
||||
connection_data = f"dbname={conf['NAME']} host={conf['HOST']} user={conf['USER']} password={conf['PASSWORD']} port={conf['PORT']}"
|
||||
# Allow password-less authentication
|
||||
if 'PASSWORD' in conf:
|
||||
conf['OPTIONS']['password'] = conf.pop('PASSWORD')
|
||||
|
||||
connection_data = f"dbname={conf['NAME']} host={conf['HOST']} user={conf['USER']} port={conf['PORT']}"
|
||||
return psycopg.connect(connection_data, autocommit=True, **conf['OPTIONS'])
|
||||
|
||||
|
||||
|
||||
@@ -162,13 +162,13 @@ class AWXConsumerRedis(AWXConsumerBase):
|
||||
class AWXConsumerPG(AWXConsumerBase):
|
||||
def __init__(self, *args, schedule=None, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.pg_max_wait = settings.DISPATCHER_DB_DOWNTIME_TOLERANCE
|
||||
self.pg_max_wait = getattr(settings, 'DISPATCHER_DB_DOWNTOWN_TOLLERANCE', settings.DISPATCHER_DB_DOWNTIME_TOLERANCE)
|
||||
# if no successful loops have ran since startup, then we should fail right away
|
||||
self.pg_is_down = True # set so that we fail if we get database errors on startup
|
||||
init_time = time.time()
|
||||
self.pg_down_time = init_time - self.pg_max_wait # allow no grace period
|
||||
self.last_cleanup = init_time
|
||||
self.subsystem_metrics = s_metrics.Metrics(auto_pipe_execute=False)
|
||||
self.subsystem_metrics = s_metrics.DispatcherMetrics(auto_pipe_execute=False)
|
||||
self.last_metrics_gather = init_time
|
||||
self.listen_cumulative_time = 0.0
|
||||
if schedule:
|
||||
@@ -259,6 +259,12 @@ class AWXConsumerPG(AWXConsumerBase):
|
||||
current_downtime = time.time() - self.pg_down_time
|
||||
if current_downtime > self.pg_max_wait:
|
||||
logger.exception(f"Postgres event consumer has not recovered in {current_downtime} s, exiting")
|
||||
# Sending QUIT to multiprocess queue to signal workers to exit
|
||||
for worker in self.pool.workers:
|
||||
try:
|
||||
worker.quit()
|
||||
except Exception:
|
||||
logger.exception(f"Error sending QUIT to worker {worker}")
|
||||
raise
|
||||
# Wait for a second before next attempt, but still listen for any shutdown signals
|
||||
for i in range(10):
|
||||
@@ -270,6 +276,12 @@ class AWXConsumerPG(AWXConsumerBase):
|
||||
except Exception:
|
||||
# Log unanticipated exception in addition to writing to stderr to get timestamps and other metadata
|
||||
logger.exception('Encountered unhandled error in dispatcher main loop')
|
||||
# Sending QUIT to multiprocess queue to signal workers to exit
|
||||
for worker in self.pool.workers:
|
||||
try:
|
||||
worker.quit()
|
||||
except Exception:
|
||||
logger.exception(f"Error sending QUIT to worker {worker}")
|
||||
raise
|
||||
|
||||
|
||||
|
||||
@@ -72,7 +72,7 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
def __init__(self):
|
||||
self.buff = {}
|
||||
self.redis = redis.Redis.from_url(settings.BROKER_URL)
|
||||
self.subsystem_metrics = s_metrics.Metrics(auto_pipe_execute=False)
|
||||
self.subsystem_metrics = s_metrics.CallbackReceiverMetrics(auto_pipe_execute=False)
|
||||
self.queue_pop = 0
|
||||
self.queue_name = settings.CALLBACK_QUEUE
|
||||
self.prof = AWXProfiler("CallbackBrokerWorker")
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
import copy
|
||||
import json
|
||||
import re
|
||||
import sys
|
||||
import urllib.parse
|
||||
|
||||
from jinja2 import sandbox, StrictUndefined
|
||||
@@ -406,11 +407,13 @@ class SmartFilterField(models.TextField):
|
||||
# https://docs.python.org/2/library/stdtypes.html#truth-value-testing
|
||||
if not value:
|
||||
return None
|
||||
value = urllib.parse.unquote(value)
|
||||
try:
|
||||
SmartFilter().query_from_string(value)
|
||||
except RuntimeError as e:
|
||||
raise models.base.ValidationError(e)
|
||||
# avoid doing too much during migrations
|
||||
if 'migrate' not in sys.argv:
|
||||
value = urllib.parse.unquote(value)
|
||||
try:
|
||||
SmartFilter().query_from_string(value)
|
||||
except RuntimeError as e:
|
||||
raise models.base.ValidationError(e)
|
||||
return super(SmartFilterField, self).get_prep_value(value)
|
||||
|
||||
|
||||
|
||||
53
awx/main/management/commands/add_receptor_address.py
Normal file
53
awx/main/management/commands/add_receptor_address.py
Normal file
@@ -0,0 +1,53 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved
|
||||
|
||||
from django.core.management.base import BaseCommand
|
||||
|
||||
from awx.main.models import Instance, ReceptorAddress
|
||||
|
||||
|
||||
def add_address(**kwargs):
|
||||
try:
|
||||
instance = Instance.objects.get(hostname=kwargs.pop('instance'))
|
||||
kwargs['instance'] = instance
|
||||
|
||||
if kwargs.get('canonical') and instance.receptor_addresses.filter(canonical=True).exclude(address=kwargs['address']).exists():
|
||||
print(f"Instance {instance.hostname} already has a canonical address, skipping")
|
||||
return False
|
||||
# if ReceptorAddress already exists with address, just update
|
||||
# otherwise, create new ReceptorAddress
|
||||
addr, _ = ReceptorAddress.objects.update_or_create(address=kwargs.pop('address'), defaults=kwargs)
|
||||
print(f"Successfully added receptor address {addr.get_full_address()}")
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"Error adding receptor address: {e}")
|
||||
return False
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""
|
||||
Internal controller command.
|
||||
Register receptor address to an already-registered instance.
|
||||
"""
|
||||
|
||||
help = "Add receptor address to an instance."
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--instance', dest='instance', required=True, type=str, help="Instance hostname this address is added to")
|
||||
parser.add_argument('--address', dest='address', required=True, type=str, help="Receptor address")
|
||||
parser.add_argument('--port', dest='port', type=int, help="Receptor listener port")
|
||||
parser.add_argument('--websocket_path', dest='websocket_path', type=str, default="", help="Path for websockets")
|
||||
parser.add_argument('--is_internal', action='store_true', help="If true, address only resolvable within the Kubernetes cluster")
|
||||
parser.add_argument('--protocol', type=str, default='tcp', choices=['tcp', 'ws', 'wss'], help="Protocol to use for the Receptor listener")
|
||||
parser.add_argument('--canonical', action='store_true', help="If true, address is the canonical address for the instance")
|
||||
parser.add_argument('--peers_from_control_nodes', action='store_true', help="If true, control nodes will peer to this address")
|
||||
|
||||
def handle(self, **options):
|
||||
address_options = {
|
||||
k: options[k]
|
||||
for k in ('instance', 'address', 'port', 'websocket_path', 'is_internal', 'protocol', 'peers_from_control_nodes', 'canonical')
|
||||
if options[k]
|
||||
}
|
||||
changed = add_address(**address_options)
|
||||
if changed:
|
||||
print("(changed: True)")
|
||||
179
awx/main/management/commands/dump_auth_config.py
Normal file
179
awx/main/management/commands/dump_auth_config.py
Normal file
@@ -0,0 +1,179 @@
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import re
|
||||
|
||||
from typing import Any
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.conf import settings
|
||||
from awx.conf import settings_registry
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = 'Dump the current auth configuration in django_ansible_base.authenticator format, currently supports LDAP and SAML'
|
||||
|
||||
DAB_SAML_AUTHENTICATOR_KEYS = {
|
||||
"SP_ENTITY_ID": True,
|
||||
"SP_PUBLIC_CERT": True,
|
||||
"SP_PRIVATE_KEY": True,
|
||||
"ORG_INFO": True,
|
||||
"TECHNICAL_CONTACT": True,
|
||||
"SUPPORT_CONTACT": True,
|
||||
"SP_EXTRA": False,
|
||||
"SECURITY_CONFIG": False,
|
||||
"EXTRA_DATA": False,
|
||||
"ENABLED_IDPS": True,
|
||||
"CALLBACK_URL": False,
|
||||
}
|
||||
|
||||
DAB_LDAP_AUTHENTICATOR_KEYS = {
|
||||
"SERVER_URI": True,
|
||||
"BIND_DN": False,
|
||||
"BIND_PASSWORD": False,
|
||||
"CONNECTION_OPTIONS": False,
|
||||
"GROUP_TYPE": True,
|
||||
"GROUP_TYPE_PARAMS": True,
|
||||
"GROUP_SEARCH": False,
|
||||
"START_TLS": False,
|
||||
"USER_DN_TEMPLATE": True,
|
||||
"USER_ATTR_MAP": True,
|
||||
"USER_SEARCH": False,
|
||||
}
|
||||
|
||||
def get_awx_ldap_settings(self) -> dict[str, dict[str, Any]]:
|
||||
awx_ldap_settings = {}
|
||||
|
||||
for awx_ldap_setting in settings_registry.get_registered_settings(category_slug='ldap'):
|
||||
key = awx_ldap_setting.removeprefix("AUTH_LDAP_")
|
||||
value = getattr(settings, awx_ldap_setting, None)
|
||||
awx_ldap_settings[key] = value
|
||||
|
||||
grouped_settings = {}
|
||||
|
||||
for key, value in awx_ldap_settings.items():
|
||||
match = re.search(r'(\d+)', key)
|
||||
index = int(match.group()) if match else 0
|
||||
new_key = re.sub(r'\d+_', '', key)
|
||||
|
||||
if index not in grouped_settings:
|
||||
grouped_settings[index] = {}
|
||||
|
||||
grouped_settings[index][new_key] = value
|
||||
if new_key == "GROUP_TYPE" and value:
|
||||
grouped_settings[index][new_key] = type(value).__name__
|
||||
|
||||
if new_key == "SERVER_URI" and value:
|
||||
value = value.split(", ")
|
||||
|
||||
return grouped_settings
|
||||
|
||||
def is_enabled(self, settings, keys):
|
||||
for key, required in keys.items():
|
||||
if required and not settings.get(key):
|
||||
return False
|
||||
return True
|
||||
|
||||
def get_awx_saml_settings(self) -> dict[str, Any]:
|
||||
awx_saml_settings = {}
|
||||
for awx_saml_setting in settings_registry.get_registered_settings(category_slug='saml'):
|
||||
awx_saml_settings[awx_saml_setting.removeprefix("SOCIAL_AUTH_SAML_")] = getattr(settings, awx_saml_setting, None)
|
||||
|
||||
return awx_saml_settings
|
||||
|
||||
def format_config_data(self, enabled, awx_settings, type, keys, name):
|
||||
config = {
|
||||
"type": f"awx.authentication.authenticator_plugins.{type}",
|
||||
"name": name,
|
||||
"enabled": enabled,
|
||||
"create_objects": True,
|
||||
"users_unique": False,
|
||||
"remove_users": True,
|
||||
"configuration": {},
|
||||
}
|
||||
for k in keys:
|
||||
v = awx_settings.get(k)
|
||||
config["configuration"].update({k: v})
|
||||
|
||||
if type == "saml":
|
||||
idp_to_key_mapping = {
|
||||
"url": "IDP_URL",
|
||||
"x509cert": "IDP_X509_CERT",
|
||||
"entity_id": "IDP_ENTITY_ID",
|
||||
"attr_email": "IDP_ATTR_EMAIL",
|
||||
"attr_groups": "IDP_GROUPS",
|
||||
"attr_username": "IDP_ATTR_USERNAME",
|
||||
"attr_last_name": "IDP_ATTR_LAST_NAME",
|
||||
"attr_first_name": "IDP_ATTR_FIRST_NAME",
|
||||
"attr_user_permanent_id": "IDP_ATTR_USER_PERMANENT_ID",
|
||||
}
|
||||
for idp_name in awx_settings.get("ENABLED_IDPS", {}):
|
||||
for key in idp_to_key_mapping:
|
||||
value = awx_settings["ENABLED_IDPS"][idp_name].get(key)
|
||||
if value is not None:
|
||||
config["name"] = idp_name
|
||||
config["configuration"].update({idp_to_key_mapping[key]: value})
|
||||
|
||||
return config
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument(
|
||||
"output_file",
|
||||
nargs="?",
|
||||
type=str,
|
||||
default=None,
|
||||
help="Output JSON file path",
|
||||
)
|
||||
|
||||
def handle(self, *args, **options):
|
||||
try:
|
||||
data = []
|
||||
|
||||
# dump SAML settings
|
||||
awx_saml_settings = self.get_awx_saml_settings()
|
||||
awx_saml_enabled = self.is_enabled(awx_saml_settings, self.DAB_SAML_AUTHENTICATOR_KEYS)
|
||||
if awx_saml_enabled:
|
||||
awx_saml_name = awx_saml_settings["ENABLED_IDPS"]
|
||||
data.append(
|
||||
self.format_config_data(
|
||||
awx_saml_enabled,
|
||||
awx_saml_settings,
|
||||
"saml",
|
||||
self.DAB_SAML_AUTHENTICATOR_KEYS,
|
||||
awx_saml_name,
|
||||
)
|
||||
)
|
||||
|
||||
# dump LDAP settings
|
||||
awx_ldap_group_settings = self.get_awx_ldap_settings()
|
||||
for awx_ldap_name, awx_ldap_settings in enumerate(awx_ldap_group_settings.values()):
|
||||
enabled = self.is_enabled(awx_ldap_settings, self.DAB_LDAP_AUTHENTICATOR_KEYS)
|
||||
if enabled:
|
||||
data.append(
|
||||
self.format_config_data(
|
||||
enabled,
|
||||
awx_ldap_settings,
|
||||
"ldap",
|
||||
self.DAB_LDAP_AUTHENTICATOR_KEYS,
|
||||
str(awx_ldap_name),
|
||||
)
|
||||
)
|
||||
|
||||
# write to file if requested
|
||||
if options["output_file"]:
|
||||
# Define the path for the output JSON file
|
||||
output_file = options["output_file"]
|
||||
|
||||
# Ensure the directory exists
|
||||
os.makedirs(os.path.dirname(output_file), exist_ok=True)
|
||||
|
||||
# Write data to the JSON file
|
||||
with open(output_file, "w") as f:
|
||||
json.dump(data, f, indent=4)
|
||||
|
||||
self.stdout.write(self.style.SUCCESS(f"Auth config data dumped to {output_file}"))
|
||||
else:
|
||||
self.stdout.write(json.dumps(data, indent=4))
|
||||
|
||||
except Exception as e:
|
||||
self.stdout.write(self.style.ERROR(f"An error occurred: {str(e)}"))
|
||||
sys.exit(1)
|
||||
@@ -55,7 +55,7 @@ class Command(BaseCommand):
|
||||
|
||||
capacity = f' capacity={x.capacity}' if x.node_type != 'hop' else ''
|
||||
version = f" version={x.version or '?'}" if x.node_type != 'hop' else ''
|
||||
heartbeat = f' heartbeat="{x.last_seen:%Y-%m-%d %H:%M:%S}"' if x.capacity or x.node_type == 'hop' else ''
|
||||
heartbeat = f' heartbeat="{x.last_seen:%Y-%m-%d %H:%M:%S}"' if x.last_seen else ''
|
||||
print(f'\t{color}{x.hostname}{capacity} node_type={x.node_type}{version}{heartbeat}{end_color}')
|
||||
|
||||
print()
|
||||
|
||||
@@ -25,20 +25,17 @@ class Command(BaseCommand):
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--hostname', dest='hostname', type=str, help="Hostname used during provisioning")
|
||||
parser.add_argument('--listener_port', dest='listener_port', type=int, help="Receptor listener port")
|
||||
parser.add_argument('--node_type', type=str, default='hybrid', choices=['control', 'execution', 'hop', 'hybrid'], help="Instance Node type")
|
||||
parser.add_argument('--uuid', type=str, help="Instance UUID")
|
||||
|
||||
def _register_hostname(self, hostname, node_type, uuid, listener_port):
|
||||
def _register_hostname(self, hostname, node_type, uuid):
|
||||
if not hostname:
|
||||
if not settings.AWX_AUTO_DEPROVISION_INSTANCES:
|
||||
raise CommandError('Registering with values from settings only intended for use in K8s installs')
|
||||
|
||||
from awx.main.management.commands.register_queue import RegisterQueue
|
||||
|
||||
(changed, instance) = Instance.objects.register(
|
||||
ip_address=os.environ.get('MY_POD_IP'), listener_port=listener_port, node_type='control', node_uuid=settings.SYSTEM_UUID
|
||||
)
|
||||
(changed, instance) = Instance.objects.register(ip_address=os.environ.get('MY_POD_IP'), node_type='control', node_uuid=settings.SYSTEM_UUID)
|
||||
RegisterQueue(settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME, 100, 0, [], is_container_group=False).register()
|
||||
RegisterQueue(
|
||||
settings.DEFAULT_EXECUTION_QUEUE_NAME,
|
||||
@@ -51,16 +48,17 @@ class Command(BaseCommand):
|
||||
max_concurrent_jobs=settings.DEFAULT_EXECUTION_QUEUE_MAX_CONCURRENT_JOBS,
|
||||
).register()
|
||||
else:
|
||||
(changed, instance) = Instance.objects.register(hostname=hostname, node_type=node_type, node_uuid=uuid, listener_port=listener_port)
|
||||
(changed, instance) = Instance.objects.register(hostname=hostname, node_type=node_type, node_uuid=uuid)
|
||||
if changed:
|
||||
print("Successfully registered instance {}".format(hostname))
|
||||
else:
|
||||
print("Instance already registered {}".format(instance.hostname))
|
||||
|
||||
self.changed = changed
|
||||
|
||||
@transaction.atomic
|
||||
def handle(self, **options):
|
||||
self.changed = False
|
||||
self._register_hostname(options.get('hostname'), options.get('node_type'), options.get('uuid'), options.get('listener_port'))
|
||||
self._register_hostname(options.get('hostname'), options.get('node_type'), options.get('uuid'))
|
||||
if self.changed:
|
||||
print("(changed: True)")
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
import warnings
|
||||
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
from django.db import transaction
|
||||
|
||||
from awx.main.models import Instance, InstanceLink
|
||||
from awx.main.models import Instance, InstanceLink, ReceptorAddress
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
@@ -28,7 +26,9 @@ class Command(BaseCommand):
|
||||
|
||||
def handle(self, **options):
|
||||
# provides a mapping of hostname to Instance objects
|
||||
nodes = Instance.objects.in_bulk(field_name='hostname')
|
||||
nodes = Instance.objects.all().in_bulk(field_name='hostname')
|
||||
# provides a mapping of address to ReceptorAddress objects
|
||||
addresses = ReceptorAddress.objects.all().in_bulk(field_name='address')
|
||||
|
||||
if options['source'] not in nodes:
|
||||
raise CommandError(f"Host {options['source']} is not a registered instance.")
|
||||
@@ -39,6 +39,14 @@ class Command(BaseCommand):
|
||||
if options['exact'] is not None and options['disconnect']:
|
||||
raise CommandError("The option --disconnect may not be used with --exact.")
|
||||
|
||||
# make sure each target has a receptor address
|
||||
peers = options['peers'] or []
|
||||
disconnect = options['disconnect'] or []
|
||||
exact = options['exact'] or []
|
||||
for peer in peers + disconnect + exact:
|
||||
if peer not in addresses:
|
||||
raise CommandError(f"Peer {peer} does not have a receptor address.")
|
||||
|
||||
# No 1-cycles
|
||||
for collection in ('peers', 'disconnect', 'exact'):
|
||||
if options[collection] is not None and options['source'] in options[collection]:
|
||||
@@ -47,9 +55,12 @@ class Command(BaseCommand):
|
||||
# No 2-cycles
|
||||
if options['peers'] or options['exact'] is not None:
|
||||
peers = set(options['peers'] or options['exact'])
|
||||
incoming = set(InstanceLink.objects.filter(target=nodes[options['source']]).values_list('source__hostname', flat=True))
|
||||
if options['source'] in addresses:
|
||||
incoming = set(InstanceLink.objects.filter(target=addresses[options['source']]).values_list('source__hostname', flat=True))
|
||||
else:
|
||||
incoming = set()
|
||||
if peers & incoming:
|
||||
warnings.warn(f"Source node {options['source']} should not link to nodes already peering to it: {peers & incoming}.")
|
||||
raise CommandError(f"Source node {options['source']} should not link to nodes already peering to it: {peers & incoming}.")
|
||||
|
||||
if options['peers']:
|
||||
missing_peers = set(options['peers']) - set(nodes)
|
||||
@@ -60,7 +71,7 @@ class Command(BaseCommand):
|
||||
results = 0
|
||||
for target in options['peers']:
|
||||
_, created = InstanceLink.objects.update_or_create(
|
||||
source=nodes[options['source']], target=nodes[target], defaults={'link_state': InstanceLink.States.ESTABLISHED}
|
||||
source=nodes[options['source']], target=addresses[target], defaults={'link_state': InstanceLink.States.ESTABLISHED}
|
||||
)
|
||||
if created:
|
||||
results += 1
|
||||
@@ -70,9 +81,9 @@ class Command(BaseCommand):
|
||||
if options['disconnect']:
|
||||
results = 0
|
||||
for target in options['disconnect']:
|
||||
if target not in nodes: # Be permissive, the node might have already been de-registered.
|
||||
if target not in addresses: # Be permissive, the node might have already been de-registered.
|
||||
continue
|
||||
n, _ = InstanceLink.objects.filter(source=nodes[options['source']], target=nodes[target]).delete()
|
||||
n, _ = InstanceLink.objects.filter(source=nodes[options['source']], target=addresses[target]).delete()
|
||||
results += n
|
||||
|
||||
print(f"{results} peer links removed from the database.")
|
||||
@@ -81,11 +92,11 @@ class Command(BaseCommand):
|
||||
additions = 0
|
||||
with transaction.atomic():
|
||||
peers = set(options['exact'])
|
||||
links = set(InstanceLink.objects.filter(source=nodes[options['source']]).values_list('target__hostname', flat=True))
|
||||
removals, _ = InstanceLink.objects.filter(source=nodes[options['source']], target__hostname__in=links - peers).delete()
|
||||
links = set(InstanceLink.objects.filter(source=nodes[options['source']]).values_list('target__address', flat=True))
|
||||
removals, _ = InstanceLink.objects.filter(source=nodes[options['source']], target__instance__hostname__in=links - peers).delete()
|
||||
for target in peers - links:
|
||||
_, created = InstanceLink.objects.update_or_create(
|
||||
source=nodes[options['source']], target=nodes[target], defaults={'link_state': InstanceLink.States.ESTABLISHED}
|
||||
source=nodes[options['source']], target=addresses[target], defaults={'link_state': InstanceLink.States.ESTABLISHED}
|
||||
)
|
||||
if created:
|
||||
additions += 1
|
||||
|
||||
26
awx/main/management/commands/remove_receptor_address.py
Normal file
26
awx/main/management/commands/remove_receptor_address.py
Normal file
@@ -0,0 +1,26 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved
|
||||
|
||||
from django.core.management.base import BaseCommand
|
||||
|
||||
from awx.main.models import ReceptorAddress
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""
|
||||
Internal controller command.
|
||||
Delete a receptor address.
|
||||
"""
|
||||
|
||||
help = "Add receptor address to an instance."
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--address', dest='address', type=str, help="Receptor address to remove")
|
||||
|
||||
def handle(self, **options):
|
||||
deleted = ReceptorAddress.objects.filter(address=options['address']).delete()
|
||||
if deleted[0]:
|
||||
print(f"Successfully removed {options['address']}")
|
||||
print("(changed: True)")
|
||||
else:
|
||||
print(f"Did not remove {options['address']}, not found")
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
from django.conf import settings
|
||||
from django.core.management.base import BaseCommand
|
||||
from awx.main.analytics.subsystem_metrics import CallbackReceiverMetricsServer
|
||||
|
||||
from awx.main.dispatch.control import Control
|
||||
from awx.main.dispatch.worker import AWXConsumerRedis, CallbackBrokerWorker
|
||||
@@ -25,6 +26,9 @@ class Command(BaseCommand):
|
||||
print(Control('callback_receiver').status())
|
||||
return
|
||||
consumer = None
|
||||
|
||||
CallbackReceiverMetricsServer().start()
|
||||
|
||||
try:
|
||||
consumer = AWXConsumerRedis(
|
||||
'callback_receiver',
|
||||
|
||||
@@ -10,6 +10,7 @@ from awx.main.dispatch import get_task_queuename
|
||||
from awx.main.dispatch.control import Control
|
||||
from awx.main.dispatch.pool import AutoscalePool
|
||||
from awx.main.dispatch.worker import AWXConsumerPG, TaskWorker
|
||||
from awx.main.analytics.subsystem_metrics import DispatcherMetricsServer
|
||||
|
||||
logger = logging.getLogger('awx.main.dispatch')
|
||||
|
||||
@@ -62,6 +63,8 @@ class Command(BaseCommand):
|
||||
|
||||
consumer = None
|
||||
|
||||
DispatcherMetricsServer().start()
|
||||
|
||||
try:
|
||||
queues = ['tower_broadcast_all', 'tower_settings_change', get_task_queuename()]
|
||||
consumer = AWXConsumerPG('dispatcher', TaskWorker(), queues, AutoscalePool(min_workers=4), schedule=settings.CELERYBEAT_SCHEDULE)
|
||||
|
||||
@@ -16,6 +16,7 @@ from awx.main.analytics.broadcast_websocket import (
|
||||
RelayWebsocketStatsManager,
|
||||
safe_name,
|
||||
)
|
||||
from awx.main.analytics.subsystem_metrics import WebsocketsMetricsServer
|
||||
from awx.main.wsrelay import WebSocketRelayManager
|
||||
|
||||
|
||||
@@ -163,8 +164,15 @@ class Command(BaseCommand):
|
||||
|
||||
return
|
||||
|
||||
try:
|
||||
websocket_relay_manager = WebSocketRelayManager()
|
||||
asyncio.run(websocket_relay_manager.run())
|
||||
except KeyboardInterrupt:
|
||||
logger.info('Terminating Websocket Relayer')
|
||||
WebsocketsMetricsServer().start()
|
||||
websocket_relay_manager = WebSocketRelayManager()
|
||||
|
||||
while True:
|
||||
try:
|
||||
asyncio.run(websocket_relay_manager.run())
|
||||
except KeyboardInterrupt:
|
||||
logger.info('Shutting down Websocket Relayer')
|
||||
break
|
||||
except Exception as e:
|
||||
logger.exception('Error in Websocket Relayer, exception: {}. Restarting in 10 seconds'.format(e))
|
||||
time.sleep(10)
|
||||
|
||||
@@ -115,7 +115,14 @@ class InstanceManager(models.Manager):
|
||||
return node[0]
|
||||
raise RuntimeError("No instance found with the current cluster host id")
|
||||
|
||||
def register(self, node_uuid=None, hostname=None, ip_address="", listener_port=None, node_type='hybrid', defaults=None):
|
||||
def register(
|
||||
self,
|
||||
node_uuid=None,
|
||||
hostname=None,
|
||||
ip_address="",
|
||||
node_type='hybrid',
|
||||
defaults=None,
|
||||
):
|
||||
if not hostname:
|
||||
hostname = settings.CLUSTER_HOST_ID
|
||||
|
||||
@@ -161,9 +168,6 @@ class InstanceManager(models.Manager):
|
||||
if instance.node_type != node_type:
|
||||
instance.node_type = node_type
|
||||
update_fields.append('node_type')
|
||||
if instance.listener_port != listener_port:
|
||||
instance.listener_port = listener_port
|
||||
update_fields.append('listener_port')
|
||||
if update_fields:
|
||||
instance.save(update_fields=update_fields)
|
||||
return (True, instance)
|
||||
@@ -174,11 +178,13 @@ class InstanceManager(models.Manager):
|
||||
create_defaults = {
|
||||
'node_state': Instance.States.INSTALLED,
|
||||
'capacity': 0,
|
||||
'managed': True,
|
||||
}
|
||||
if defaults is not None:
|
||||
create_defaults.update(defaults)
|
||||
uuid_option = {'uuid': node_uuid if node_uuid is not None else uuid.uuid4()}
|
||||
if node_type == 'execution' and 'version' not in create_defaults:
|
||||
create_defaults['version'] = RECEPTOR_PENDING
|
||||
instance = self.create(hostname=hostname, ip_address=ip_address, listener_port=listener_port, node_type=node_type, **create_defaults, **uuid_option)
|
||||
instance = self.create(hostname=hostname, ip_address=ip_address, node_type=node_type, **create_defaults, **uuid_option)
|
||||
|
||||
return (True, instance)
|
||||
|
||||
@@ -5,11 +5,12 @@ import logging
|
||||
import threading
|
||||
import time
|
||||
import urllib.parse
|
||||
from pathlib import Path
|
||||
|
||||
from django.conf import settings
|
||||
from django.contrib.auth import logout
|
||||
from django.contrib.auth.models import User
|
||||
from django.db.migrations.executor import MigrationExecutor
|
||||
from django.db.migrations.recorder import MigrationRecorder
|
||||
from django.db import connection
|
||||
from django.shortcuts import redirect
|
||||
from django.apps import apps
|
||||
@@ -17,9 +18,11 @@ from django.utils.deprecation import MiddlewareMixin
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from django.urls import reverse, resolve
|
||||
|
||||
from awx.main import migrations
|
||||
from awx.main.utils.named_url_graph import generate_graph, GraphNode
|
||||
from awx.conf import fields, register
|
||||
from awx.main.utils.profiling import AWXProfiler
|
||||
from awx.main.utils.common import memoize
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.middleware')
|
||||
@@ -198,9 +201,22 @@ class URLModificationMiddleware(MiddlewareMixin):
|
||||
request.path_info = new_path
|
||||
|
||||
|
||||
@memoize(ttl=20)
|
||||
def is_migrating():
|
||||
latest_number = 0
|
||||
latest_name = ''
|
||||
for migration_path in Path(migrations.__path__[0]).glob('[0-9]*.py'):
|
||||
try:
|
||||
migration_number = int(migration_path.name.split('_', 1)[0])
|
||||
except ValueError:
|
||||
continue
|
||||
if migration_number > latest_number:
|
||||
latest_number = migration_number
|
||||
latest_name = migration_path.name[: -len('.py')]
|
||||
return not MigrationRecorder(connection).migration_qs.filter(app='main', name=latest_name).exists()
|
||||
|
||||
|
||||
class MigrationRanCheckMiddleware(MiddlewareMixin):
|
||||
def process_request(self, request):
|
||||
executor = MigrationExecutor(connection)
|
||||
plan = executor.migration_plan(executor.loader.graph.leaf_nodes())
|
||||
if bool(plan) and getattr(resolve(request.path), 'url_name', '') != 'migrations_notran':
|
||||
if is_migrating() and getattr(resolve(request.path), 'url_name', '') != 'migrations_notran':
|
||||
return redirect(reverse("ui:migrations_notran"))
|
||||
|
||||
150
awx/main/migrations/0189_inbound_hop_nodes.py
Normal file
150
awx/main/migrations/0189_inbound_hop_nodes.py
Normal file
@@ -0,0 +1,150 @@
|
||||
# Generated by Django 4.2.6 on 2024-01-19 19:24
|
||||
|
||||
import django.core.validators
|
||||
from django.db import migrations, models
|
||||
import django.db.models.deletion
|
||||
|
||||
|
||||
def create_receptor_addresses(apps, schema_editor):
|
||||
"""
|
||||
If listener_port was defined on an instance, create a receptor address for it
|
||||
"""
|
||||
Instance = apps.get_model('main', 'Instance')
|
||||
ReceptorAddress = apps.get_model('main', 'ReceptorAddress')
|
||||
for instance in Instance.objects.exclude(listener_port=None):
|
||||
ReceptorAddress.objects.create(
|
||||
instance=instance,
|
||||
address=instance.hostname,
|
||||
port=instance.listener_port,
|
||||
peers_from_control_nodes=instance.peers_from_control_nodes,
|
||||
protocol='tcp',
|
||||
is_internal=False,
|
||||
canonical=True,
|
||||
)
|
||||
|
||||
|
||||
def link_to_receptor_addresses(apps, schema_editor):
|
||||
"""
|
||||
Modify each InstanceLink to point to the newly created
|
||||
ReceptorAddresses, using the new target field
|
||||
"""
|
||||
InstanceLink = apps.get_model('main', 'InstanceLink')
|
||||
for link in InstanceLink.objects.all():
|
||||
link.target = link.target_old.receptor_addresses.get()
|
||||
link.save()
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
('main', '0188_add_bitbucket_dc_webhook'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.CreateModel(
|
||||
name='ReceptorAddress',
|
||||
fields=[
|
||||
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
||||
('address', models.CharField(help_text='Routable address for this instance.', max_length=255)),
|
||||
(
|
||||
'port',
|
||||
models.IntegerField(
|
||||
default=27199,
|
||||
help_text='Port for the address.',
|
||||
validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(65535)],
|
||||
),
|
||||
),
|
||||
('websocket_path', models.CharField(blank=True, default='', help_text='Websocket path.', max_length=255)),
|
||||
(
|
||||
'protocol',
|
||||
models.CharField(
|
||||
choices=[('tcp', 'TCP'), ('ws', 'WS'), ('wss', 'WSS')],
|
||||
default='tcp',
|
||||
help_text="Protocol to use for the Receptor listener, 'tcp', 'wss', or 'ws'.",
|
||||
max_length=10,
|
||||
),
|
||||
),
|
||||
('is_internal', models.BooleanField(default=False, help_text='If True, only routable within the Kubernetes cluster.')),
|
||||
('canonical', models.BooleanField(default=False, help_text='If True, this address is the canonical address for the instance.')),
|
||||
(
|
||||
'peers_from_control_nodes',
|
||||
models.BooleanField(default=False, help_text='If True, control plane cluster nodes should automatically peer to it.'),
|
||||
),
|
||||
],
|
||||
),
|
||||
migrations.RemoveConstraint(
|
||||
model_name='instancelink',
|
||||
name='source_and_target_can_not_be_equal',
|
||||
),
|
||||
migrations.RenameField(
|
||||
model_name='instancelink',
|
||||
old_name='target',
|
||||
new_name='target_old',
|
||||
),
|
||||
migrations.AlterUniqueTogether(
|
||||
name='instancelink',
|
||||
unique_together=set(),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='instance',
|
||||
name='managed',
|
||||
field=models.BooleanField(default=False, editable=False, help_text='If True, this instance is managed by the control plane.'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='instancelink',
|
||||
name='source',
|
||||
field=models.ForeignKey(help_text='The source instance of this peer link.', on_delete=django.db.models.deletion.CASCADE, to='main.instance'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='receptoraddress',
|
||||
name='instance',
|
||||
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='receptor_addresses', to='main.instance'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='activitystream',
|
||||
name='receptor_address',
|
||||
field=models.ManyToManyField(blank=True, to='main.receptoraddress'),
|
||||
),
|
||||
migrations.AddConstraint(
|
||||
model_name='receptoraddress',
|
||||
constraint=models.UniqueConstraint(fields=('address',), name='unique_receptor_address', violation_error_message='Receptor address must be unique.'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='instancelink',
|
||||
name='target',
|
||||
field=models.ForeignKey(
|
||||
help_text='The target receptor address of this peer link.', null=True, on_delete=django.db.models.deletion.CASCADE, to='main.receptoraddress'
|
||||
),
|
||||
),
|
||||
migrations.RunPython(create_receptor_addresses),
|
||||
migrations.RunPython(link_to_receptor_addresses),
|
||||
migrations.RemoveField(
|
||||
model_name='instance',
|
||||
name='peers_from_control_nodes',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='instance',
|
||||
name='listener_port',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='instancelink',
|
||||
name='target_old',
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='instance',
|
||||
name='peers',
|
||||
field=models.ManyToManyField(related_name='peers_from', through='main.InstanceLink', to='main.receptoraddress'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='instancelink',
|
||||
name='target',
|
||||
field=models.ForeignKey(
|
||||
help_text='The target receptor address of this peer link.', on_delete=django.db.models.deletion.CASCADE, to='main.receptoraddress'
|
||||
),
|
||||
),
|
||||
migrations.AddConstraint(
|
||||
model_name='instancelink',
|
||||
constraint=models.UniqueConstraint(
|
||||
fields=('source', 'target'), name='unique_source_target', violation_error_message='Field source and target must be unique together.'
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,59 @@
|
||||
# Generated by Django 4.2.6 on 2024-02-15 20:51
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0189_inbound_hop_nodes'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='inventorysource',
|
||||
name='source',
|
||||
field=models.CharField(
|
||||
choices=[
|
||||
('file', 'File, Directory or Script'),
|
||||
('constructed', 'Template additional groups and hostvars at runtime'),
|
||||
('scm', 'Sourced from a Project'),
|
||||
('ec2', 'Amazon EC2'),
|
||||
('gce', 'Google Compute Engine'),
|
||||
('azure_rm', 'Microsoft Azure Resource Manager'),
|
||||
('vmware', 'VMware vCenter'),
|
||||
('satellite6', 'Red Hat Satellite 6'),
|
||||
('openstack', 'OpenStack'),
|
||||
('rhv', 'Red Hat Virtualization'),
|
||||
('controller', 'Red Hat Ansible Automation Platform'),
|
||||
('insights', 'Red Hat Insights'),
|
||||
('terraform', 'Terraform State'),
|
||||
],
|
||||
default=None,
|
||||
max_length=32,
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventoryupdate',
|
||||
name='source',
|
||||
field=models.CharField(
|
||||
choices=[
|
||||
('file', 'File, Directory or Script'),
|
||||
('constructed', 'Template additional groups and hostvars at runtime'),
|
||||
('scm', 'Sourced from a Project'),
|
||||
('ec2', 'Amazon EC2'),
|
||||
('gce', 'Google Compute Engine'),
|
||||
('azure_rm', 'Microsoft Azure Resource Manager'),
|
||||
('vmware', 'VMware vCenter'),
|
||||
('satellite6', 'Red Hat Satellite 6'),
|
||||
('openstack', 'OpenStack'),
|
||||
('rhv', 'Red Hat Virtualization'),
|
||||
('controller', 'Red Hat Ansible Automation Platform'),
|
||||
('insights', 'Red Hat Insights'),
|
||||
('terraform', 'Terraform State'),
|
||||
],
|
||||
default=None,
|
||||
max_length=32,
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -6,6 +6,8 @@ from django.conf import settings # noqa
|
||||
from django.db import connection
|
||||
from django.db.models.signals import pre_delete # noqa
|
||||
|
||||
# django-ansible-base
|
||||
from ansible_base.resource_registry.fields import AnsibleResourceField
|
||||
from ansible_base.lib.utils.models import prevent_search
|
||||
|
||||
# AWX
|
||||
@@ -14,6 +16,7 @@ from awx.main.models.unified_jobs import UnifiedJob, UnifiedJobTemplate, StdoutM
|
||||
from awx.main.models.organization import Organization, Profile, Team, UserSessionMembership # noqa
|
||||
from awx.main.models.credential import Credential, CredentialType, CredentialInputSource, ManagedCredentialType, build_safe_env # noqa
|
||||
from awx.main.models.projects import Project, ProjectUpdate # noqa
|
||||
from awx.main.models.receptor_address import ReceptorAddress # noqa
|
||||
from awx.main.models.inventory import ( # noqa
|
||||
CustomInventoryScript,
|
||||
Group,
|
||||
@@ -98,6 +101,7 @@ from awx.main.access import get_user_queryset, check_user_access, check_user_acc
|
||||
User.add_to_class('get_queryset', get_user_queryset)
|
||||
User.add_to_class('can_access', check_user_access)
|
||||
User.add_to_class('can_access_with_errors', check_user_access_with_errors)
|
||||
User.add_to_class('resource', AnsibleResourceField(primary_key_field="id"))
|
||||
|
||||
|
||||
def convert_jsonfields():
|
||||
|
||||
@@ -77,6 +77,7 @@ class ActivityStream(models.Model):
|
||||
notification_template = models.ManyToManyField("NotificationTemplate", blank=True)
|
||||
notification = models.ManyToManyField("Notification", blank=True)
|
||||
label = models.ManyToManyField("Label", blank=True)
|
||||
receptor_address = models.ManyToManyField("ReceptorAddress", blank=True)
|
||||
role = models.ManyToManyField("Role", blank=True)
|
||||
instance = models.ManyToManyField("Instance", blank=True)
|
||||
instance_group = models.ManyToManyField("InstanceGroup", blank=True)
|
||||
|
||||
@@ -1216,6 +1216,26 @@ ManagedCredentialType(
|
||||
},
|
||||
)
|
||||
|
||||
ManagedCredentialType(
|
||||
namespace='terraform',
|
||||
kind='cloud',
|
||||
name=gettext_noop('Terraform backend configuration'),
|
||||
managed=True,
|
||||
inputs={
|
||||
'fields': [
|
||||
{
|
||||
'id': 'configuration',
|
||||
'label': gettext_noop('Backend configuration'),
|
||||
'type': 'string',
|
||||
'secret': True,
|
||||
'multiline': True,
|
||||
'help_text': gettext_noop('Terraform backend config as Hashicorp configuration language.'),
|
||||
},
|
||||
],
|
||||
'required': ['configuration'],
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
class CredentialInputSource(PrimordialModel):
|
||||
class Meta:
|
||||
|
||||
@@ -122,3 +122,11 @@ def kubernetes_bearer_token(cred, env, private_data_dir):
|
||||
env['K8S_AUTH_SSL_CA_CERT'] = to_container_path(path, private_data_dir)
|
||||
else:
|
||||
env['K8S_AUTH_VERIFY_SSL'] = 'False'
|
||||
|
||||
|
||||
def terraform(cred, env, private_data_dir):
|
||||
handle, path = tempfile.mkstemp(dir=os.path.join(private_data_dir, 'env'))
|
||||
with os.fdopen(handle, 'w') as f:
|
||||
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
|
||||
f.write(cred.get_input('configuration'))
|
||||
env['TF_BACKEND_CONFIG_FILE'] = to_container_path(path, private_data_dir)
|
||||
|
||||
@@ -124,8 +124,6 @@ class BasePlaybookEvent(CreatedModifiedModel):
|
||||
'parent_uuid',
|
||||
'start_line',
|
||||
'end_line',
|
||||
'host_id',
|
||||
'host_name',
|
||||
'verbosity',
|
||||
]
|
||||
WRAPUP_EVENT = 'playbook_on_stats'
|
||||
@@ -473,7 +471,7 @@ class JobEvent(BasePlaybookEvent):
|
||||
An event/message logged from the callback when running a job.
|
||||
"""
|
||||
|
||||
VALID_KEYS = BasePlaybookEvent.VALID_KEYS + ['job_id', 'workflow_job_id', 'job_created']
|
||||
VALID_KEYS = BasePlaybookEvent.VALID_KEYS + ['job_id', 'workflow_job_id', 'job_created', 'host_id', 'host_name']
|
||||
JOB_REFERENCE = 'job_id'
|
||||
|
||||
objects = DeferJobCreatedManager()
|
||||
|
||||
@@ -5,7 +5,7 @@ from decimal import Decimal
|
||||
import logging
|
||||
import os
|
||||
|
||||
from django.core.validators import MinValueValidator, MaxValueValidator
|
||||
from django.core.validators import MinValueValidator
|
||||
from django.db import models, connection
|
||||
from django.db.models.signals import post_save, post_delete
|
||||
from django.dispatch import receiver
|
||||
@@ -34,6 +34,7 @@ from awx.main.models.rbac import (
|
||||
from awx.main.models.unified_jobs import UnifiedJob
|
||||
from awx.main.utils.common import get_corrected_cpu, get_cpu_effective_capacity, get_corrected_memory, get_mem_effective_capacity
|
||||
from awx.main.models.mixins import RelatedJobsMixin, ResourceMixin
|
||||
from awx.main.models.receptor_address import ReceptorAddress
|
||||
|
||||
# ansible-runner
|
||||
from ansible_runner.utils.capacity import get_cpu_count, get_mem_in_bytes
|
||||
@@ -64,8 +65,19 @@ class HasPolicyEditsMixin(HasEditsMixin):
|
||||
|
||||
|
||||
class InstanceLink(BaseModel):
|
||||
source = models.ForeignKey('Instance', on_delete=models.CASCADE, related_name='+')
|
||||
target = models.ForeignKey('Instance', on_delete=models.CASCADE, related_name='reverse_peers')
|
||||
class Meta:
|
||||
ordering = ("id",)
|
||||
# add constraint for source and target to be unique together
|
||||
constraints = [
|
||||
models.UniqueConstraint(
|
||||
fields=["source", "target"],
|
||||
name="unique_source_target",
|
||||
violation_error_message=_("Field source and target must be unique together."),
|
||||
)
|
||||
]
|
||||
|
||||
source = models.ForeignKey('Instance', on_delete=models.CASCADE, help_text=_("The source instance of this peer link."))
|
||||
target = models.ForeignKey('ReceptorAddress', on_delete=models.CASCADE, help_text=_("The target receptor address of this peer link."))
|
||||
|
||||
class States(models.TextChoices):
|
||||
ADDING = 'adding', _('Adding')
|
||||
@@ -76,11 +88,6 @@ class InstanceLink(BaseModel):
|
||||
choices=States.choices, default=States.ADDING, max_length=16, help_text=_("Indicates the current life cycle stage of this peer link.")
|
||||
)
|
||||
|
||||
class Meta:
|
||||
unique_together = ('source', 'target')
|
||||
ordering = ("id",)
|
||||
constraints = [models.CheckConstraint(check=~models.Q(source=models.F('target')), name='source_and_target_can_not_be_equal')]
|
||||
|
||||
|
||||
class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
"""A model representing an AWX instance running against this database."""
|
||||
@@ -110,6 +117,7 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
default="",
|
||||
max_length=50,
|
||||
)
|
||||
|
||||
# Auto-fields, implementation is different from BaseModel
|
||||
created = models.DateTimeField(auto_now_add=True)
|
||||
modified = models.DateTimeField(auto_now=True)
|
||||
@@ -185,16 +193,9 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
node_state = models.CharField(
|
||||
choices=States.choices, default=States.READY, max_length=16, help_text=_("Indicates the current life cycle stage of this instance.")
|
||||
)
|
||||
listener_port = models.PositiveIntegerField(
|
||||
blank=True,
|
||||
null=True,
|
||||
default=None,
|
||||
validators=[MinValueValidator(1024), MaxValueValidator(65535)],
|
||||
help_text=_("Port that Receptor will listen for incoming connections on."),
|
||||
)
|
||||
|
||||
peers = models.ManyToManyField('self', symmetrical=False, through=InstanceLink, through_fields=('source', 'target'), related_name='peers_from')
|
||||
peers_from_control_nodes = models.BooleanField(default=False, help_text=_("If True, control plane cluster nodes should automatically peer to it."))
|
||||
managed = models.BooleanField(help_text=_("If True, this instance is managed by the control plane."), default=False, editable=False)
|
||||
peers = models.ManyToManyField('ReceptorAddress', through=InstanceLink, through_fields=('source', 'target'), related_name='peers_from')
|
||||
|
||||
POLICY_FIELDS = frozenset(('managed_by_policy', 'hostname', 'capacity_adjustment'))
|
||||
|
||||
@@ -241,6 +242,26 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
return True
|
||||
return self.health_check_started > self.last_health_check
|
||||
|
||||
@property
|
||||
def canonical_address(self):
|
||||
return self.receptor_addresses.filter(canonical=True).first()
|
||||
|
||||
@property
|
||||
def canonical_address_port(self):
|
||||
# note: don't create a different query for receptor addresses, as this is prefetched on the View for optimization
|
||||
for addr in self.receptor_addresses.all():
|
||||
if addr.canonical:
|
||||
return addr.port
|
||||
return None
|
||||
|
||||
@property
|
||||
def canonical_address_peers_from_control_nodes(self):
|
||||
# note: don't create a different query for receptor addresses, as this is prefetched on the View for optimization
|
||||
for addr in self.receptor_addresses.all():
|
||||
if addr.canonical:
|
||||
return addr.peers_from_control_nodes
|
||||
return False
|
||||
|
||||
def get_cleanup_task_kwargs(self, **kwargs):
|
||||
"""
|
||||
Produce options to use for the command: ansible-runner worker cleanup
|
||||
@@ -501,6 +522,35 @@ def schedule_write_receptor_config(broadcast=True):
|
||||
write_receptor_config() # just run locally
|
||||
|
||||
|
||||
@receiver(post_save, sender=ReceptorAddress)
|
||||
def receptor_address_saved(sender, instance, **kwargs):
|
||||
from awx.main.signals import disable_activity_stream
|
||||
|
||||
address = instance
|
||||
|
||||
control_instances = set(Instance.objects.filter(node_type__in=[Instance.Types.CONTROL, Instance.Types.HYBRID]))
|
||||
if address.peers_from_control_nodes:
|
||||
# if control_instances is not a subset of current peers of address, then
|
||||
# that means we need to add some InstanceLinks
|
||||
if not control_instances <= set(address.peers_from.all()):
|
||||
with disable_activity_stream():
|
||||
for control_instance in control_instances:
|
||||
InstanceLink.objects.update_or_create(source=control_instance, target=address)
|
||||
schedule_write_receptor_config()
|
||||
else:
|
||||
if address.peers_from.exists():
|
||||
with disable_activity_stream():
|
||||
address.peers_from.remove(*control_instances)
|
||||
schedule_write_receptor_config()
|
||||
|
||||
|
||||
@receiver(post_delete, sender=ReceptorAddress)
|
||||
def receptor_address_deleted(sender, instance, **kwargs):
|
||||
address = instance
|
||||
if address.peers_from_control_nodes:
|
||||
schedule_write_receptor_config()
|
||||
|
||||
|
||||
@receiver(post_save, sender=Instance)
|
||||
def on_instance_saved(sender, instance, created=False, raw=False, **kwargs):
|
||||
'''
|
||||
@@ -511,11 +561,14 @@ def on_instance_saved(sender, instance, created=False, raw=False, **kwargs):
|
||||
2. a node changes its value of peers_from_control_nodes
|
||||
3. a new control node comes online and has instances to peer to
|
||||
'''
|
||||
from awx.main.signals import disable_activity_stream
|
||||
|
||||
if created and settings.IS_K8S and instance.node_type in [Instance.Types.CONTROL, Instance.Types.HYBRID]:
|
||||
inst = Instance.objects.filter(peers_from_control_nodes=True)
|
||||
if set(instance.peers.all()) != set(inst):
|
||||
instance.peers.set(inst)
|
||||
schedule_write_receptor_config(broadcast=False)
|
||||
peers_addresses = ReceptorAddress.objects.filter(peers_from_control_nodes=True)
|
||||
if peers_addresses.exists():
|
||||
with disable_activity_stream():
|
||||
instance.peers.add(*peers_addresses)
|
||||
schedule_write_receptor_config(broadcast=False)
|
||||
|
||||
if settings.IS_K8S and instance.node_type in [Instance.Types.HOP, Instance.Types.EXECUTION]:
|
||||
if instance.node_state == Instance.States.DEPROVISIONING:
|
||||
@@ -524,16 +577,6 @@ def on_instance_saved(sender, instance, created=False, raw=False, **kwargs):
|
||||
# wait for jobs on the node to complete, then delete the
|
||||
# node and kick off write_receptor_config
|
||||
connection.on_commit(lambda: remove_deprovisioned_node.apply_async([instance.hostname]))
|
||||
else:
|
||||
control_instances = set(Instance.objects.filter(node_type__in=[Instance.Types.CONTROL, Instance.Types.HYBRID]))
|
||||
if instance.peers_from_control_nodes:
|
||||
if (control_instances & set(instance.peers_from.all())) != set(control_instances):
|
||||
instance.peers_from.add(*control_instances)
|
||||
schedule_write_receptor_config() # keep method separate to make pytest mocking easier
|
||||
else:
|
||||
if set(control_instances) & set(instance.peers_from.all()):
|
||||
instance.peers_from.remove(*control_instances)
|
||||
schedule_write_receptor_config()
|
||||
|
||||
if created or instance.has_policy_changes():
|
||||
schedule_policy_task()
|
||||
@@ -548,8 +591,6 @@ def on_instance_group_deleted(sender, instance, using, **kwargs):
|
||||
@receiver(post_delete, sender=Instance)
|
||||
def on_instance_deleted(sender, instance, using, **kwargs):
|
||||
schedule_policy_task()
|
||||
if settings.IS_K8S and instance.node_type in (Instance.Types.EXECUTION, Instance.Types.HOP) and instance.peers_from_control_nodes:
|
||||
schedule_write_receptor_config()
|
||||
|
||||
|
||||
class UnifiedJobTemplateInstanceGroupMembership(models.Model):
|
||||
|
||||
@@ -925,6 +925,7 @@ class InventorySourceOptions(BaseModel):
|
||||
('rhv', _('Red Hat Virtualization')),
|
||||
('controller', _('Red Hat Ansible Automation Platform')),
|
||||
('insights', _('Red Hat Insights')),
|
||||
('terraform', _('Terraform State')),
|
||||
]
|
||||
|
||||
# From the options of the Django management base command
|
||||
@@ -1630,6 +1631,20 @@ class satellite6(PluginFileInjector):
|
||||
return ret
|
||||
|
||||
|
||||
class terraform(PluginFileInjector):
|
||||
plugin_name = 'terraform_state'
|
||||
base_injector = 'managed'
|
||||
namespace = 'cloud'
|
||||
collection = 'terraform'
|
||||
use_fqcn = True
|
||||
|
||||
def inventory_as_dict(self, inventory_update, private_data_dir):
|
||||
env = super(terraform, self).get_plugin_env(inventory_update, private_data_dir, None)
|
||||
ret = super().inventory_as_dict(inventory_update, private_data_dir)
|
||||
ret['backend_config_files'] = env["TF_BACKEND_CONFIG_FILE"]
|
||||
return ret
|
||||
|
||||
|
||||
class controller(PluginFileInjector):
|
||||
plugin_name = 'tower' # TODO: relying on routing for now, update after EEs pick up revised collection
|
||||
base_injector = 'template'
|
||||
|
||||
@@ -5,6 +5,7 @@ from copy import deepcopy
|
||||
import datetime
|
||||
import logging
|
||||
import json
|
||||
import traceback
|
||||
|
||||
from django.db import models
|
||||
from django.conf import settings
|
||||
@@ -484,14 +485,29 @@ class JobNotificationMixin(object):
|
||||
if msg_template:
|
||||
try:
|
||||
msg = env.from_string(msg_template).render(**context)
|
||||
except (TemplateSyntaxError, UndefinedError, SecurityError):
|
||||
msg = ''
|
||||
except (TemplateSyntaxError, UndefinedError, SecurityError) as e:
|
||||
msg = '\r\n'.join([e.message, ''.join(traceback.format_exception(None, e, e.__traceback__).replace('\n', '\r\n'))])
|
||||
|
||||
if body_template:
|
||||
try:
|
||||
body = env.from_string(body_template).render(**context)
|
||||
except (TemplateSyntaxError, UndefinedError, SecurityError):
|
||||
body = ''
|
||||
except (TemplateSyntaxError, UndefinedError, SecurityError) as e:
|
||||
body = '\r\n'.join([e.message, ''.join(traceback.format_exception(None, e, e.__traceback__).replace('\n', '\r\n'))])
|
||||
|
||||
# https://datatracker.ietf.org/doc/html/rfc2822#section-2.2
|
||||
# Body should have at least 2 CRLF, some clients will interpret
|
||||
# the email incorrectly with blank body. So we will check that
|
||||
|
||||
if len(body.strip().splitlines()) < 1:
|
||||
# blank body
|
||||
body = '\r\n'.join(
|
||||
[
|
||||
"The template rendering return a blank body.",
|
||||
"Please check the template.",
|
||||
"Refer to https://github.com/ansible/awx/issues/13983",
|
||||
"for further information.",
|
||||
]
|
||||
)
|
||||
|
||||
return (msg, body)
|
||||
|
||||
|
||||
@@ -10,6 +10,8 @@ from django.contrib.sessions.models import Session
|
||||
from django.utils.timezone import now as tz_now
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
# django-ansible-base
|
||||
from ansible_base.resource_registry.fields import AnsibleResourceField
|
||||
|
||||
# AWX
|
||||
from awx.api.versioning import reverse
|
||||
@@ -103,6 +105,7 @@ class Organization(CommonModel, NotificationFieldsModel, ResourceMixin, CustomVi
|
||||
approval_role = ImplicitRoleField(
|
||||
parent_role='admin_role',
|
||||
)
|
||||
resource = AnsibleResourceField(primary_key_field="id")
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:organization_detail', kwargs={'pk': self.pk}, request=request)
|
||||
@@ -151,6 +154,7 @@ class Team(CommonModelNameNotUnique, ResourceMixin):
|
||||
read_role = ImplicitRoleField(
|
||||
parent_role=['organization.auditor_role', 'member_role'],
|
||||
)
|
||||
resource = AnsibleResourceField(primary_key_field="id")
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:team_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
67
awx/main/models/receptor_address.py
Normal file
67
awx/main/models/receptor_address.py
Normal file
@@ -0,0 +1,67 @@
|
||||
from django.db import models
|
||||
from django.core.validators import MinValueValidator, MaxValueValidator
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from awx.api.versioning import reverse
|
||||
|
||||
|
||||
class Protocols(models.TextChoices):
|
||||
TCP = 'tcp', 'TCP'
|
||||
WS = 'ws', 'WS'
|
||||
WSS = 'wss', 'WSS'
|
||||
|
||||
|
||||
class ReceptorAddress(models.Model):
|
||||
class Meta:
|
||||
app_label = 'main'
|
||||
constraints = [
|
||||
models.UniqueConstraint(
|
||||
fields=["address"],
|
||||
name="unique_receptor_address",
|
||||
violation_error_message=_("Receptor address must be unique."),
|
||||
)
|
||||
]
|
||||
|
||||
address = models.CharField(help_text=_("Routable address for this instance."), max_length=255)
|
||||
port = models.IntegerField(help_text=_("Port for the address."), default=27199, validators=[MinValueValidator(0), MaxValueValidator(65535)])
|
||||
websocket_path = models.CharField(help_text=_("Websocket path."), max_length=255, default="", blank=True)
|
||||
protocol = models.CharField(
|
||||
help_text=_("Protocol to use for the Receptor listener, 'tcp', 'wss', or 'ws'."), max_length=10, default=Protocols.TCP, choices=Protocols.choices
|
||||
)
|
||||
is_internal = models.BooleanField(help_text=_("If True, only routable within the Kubernetes cluster."), default=False)
|
||||
canonical = models.BooleanField(help_text=_("If True, this address is the canonical address for the instance."), default=False)
|
||||
peers_from_control_nodes = models.BooleanField(help_text=_("If True, control plane cluster nodes should automatically peer to it."), default=False)
|
||||
instance = models.ForeignKey(
|
||||
'Instance',
|
||||
related_name='receptor_addresses',
|
||||
on_delete=models.CASCADE,
|
||||
null=False,
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
return self.get_full_address()
|
||||
|
||||
def get_full_address(self):
|
||||
scheme = ""
|
||||
path = ""
|
||||
port = ""
|
||||
if self.protocol == "ws":
|
||||
scheme = "wss://"
|
||||
|
||||
if self.protocol == "ws" and self.websocket_path:
|
||||
path = f"/{self.websocket_path}"
|
||||
|
||||
if self.port:
|
||||
port = f":{self.port}"
|
||||
|
||||
return f"{scheme}{self.address}{port}{path}"
|
||||
|
||||
def get_peer_type(self):
|
||||
if self.protocol == 'tcp':
|
||||
return 'tcp-peer'
|
||||
elif self.protocol in ['ws', 'wss']:
|
||||
return 'ws-peer'
|
||||
else:
|
||||
return None
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:receptor_address_detail', kwargs={'pk': self.pk}, request=request)
|
||||
@@ -1,5 +1,6 @@
|
||||
# Copyright (c) 2019 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
# -*-coding:utf-8-*-
|
||||
|
||||
|
||||
class CustomNotificationBase(object):
|
||||
|
||||
@@ -4,13 +4,15 @@ import logging
|
||||
from django.conf import settings
|
||||
from django.urls import re_path
|
||||
|
||||
from channels.auth import AuthMiddlewareStack
|
||||
from channels.routing import ProtocolTypeRouter, URLRouter
|
||||
|
||||
from ansible_base.lib.channels.middleware import DrfAuthMiddlewareStack
|
||||
|
||||
from . import consumers
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.routing')
|
||||
_application = None
|
||||
|
||||
|
||||
class AWXProtocolTypeRouter(ProtocolTypeRouter):
|
||||
@@ -26,13 +28,91 @@ class AWXProtocolTypeRouter(ProtocolTypeRouter):
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
|
||||
class MultipleURLRouterAdapter:
|
||||
"""
|
||||
Django channels doesn't nicely support Auth_1(urls_1), Auth_2(urls_2), ..., Auth_n(urls_n)
|
||||
This class allows assocating a websocket url with an auth
|
||||
Ordering matters. The first matching url will be used.
|
||||
"""
|
||||
|
||||
def __init__(self, *auths):
|
||||
self._auths = [a for a in auths]
|
||||
|
||||
async def __call__(self, scope, receive, send):
|
||||
"""
|
||||
Loop through the list of passed in URLRouter's (they may or may not be wrapped by auth).
|
||||
We know we have exhausted the list of URLRouter patterns when we get a
|
||||
ValueError('No route found for path %s'). When that happens, move onto the next
|
||||
URLRouter.
|
||||
If the final URLRouter raises an error, re-raise it in the end.
|
||||
|
||||
We know that we found a match when no error is raised, end the loop.
|
||||
"""
|
||||
last_index = len(self._auths) - 1
|
||||
for i, auth in enumerate(self._auths):
|
||||
try:
|
||||
return await auth.__call__(scope, receive, send)
|
||||
except ValueError as e:
|
||||
if str(e).startswith('No route found for path'):
|
||||
# Only surface the error if on the last URLRouter
|
||||
if i == last_index:
|
||||
raise
|
||||
|
||||
|
||||
websocket_urlpatterns = [
|
||||
re_path(r'api/websocket/$', consumers.EventConsumer.as_asgi()),
|
||||
re_path(r'websocket/$', consumers.EventConsumer.as_asgi()),
|
||||
]
|
||||
websocket_relay_urlpatterns = [
|
||||
re_path(r'websocket/relay/$', consumers.RelayConsumer.as_asgi()),
|
||||
]
|
||||
|
||||
application = AWXProtocolTypeRouter(
|
||||
{
|
||||
'websocket': AuthMiddlewareStack(URLRouter(websocket_urlpatterns)),
|
||||
}
|
||||
)
|
||||
|
||||
def application_func(cls=AWXProtocolTypeRouter) -> ProtocolTypeRouter:
|
||||
return cls(
|
||||
{
|
||||
'websocket': MultipleURLRouterAdapter(
|
||||
URLRouter(websocket_relay_urlpatterns),
|
||||
DrfAuthMiddlewareStack(URLRouter(websocket_urlpatterns)),
|
||||
)
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
def __getattr__(name: str) -> ProtocolTypeRouter:
|
||||
"""
|
||||
Defer instantiating application.
|
||||
For testing, we just need it to NOT run on import.
|
||||
|
||||
https://peps.python.org/pep-0562/#specification
|
||||
|
||||
Normally, someone would get application from this module via:
|
||||
from awx.main.routing import application
|
||||
|
||||
and do something with the application:
|
||||
application.do_something()
|
||||
|
||||
What does the callstack look like when the import runs?
|
||||
...
|
||||
awx.main.routing.__getattribute__(...) # <-- we don't define this so NOOP as far as we are concerned
|
||||
if '__getattr__' in awx.main.routing.__dict__: # <-- this triggers the function we are in
|
||||
return awx.main.routing.__dict__.__getattr__("application")
|
||||
|
||||
Why isn't this function simply implemented as:
|
||||
def __getattr__(name):
|
||||
if not _application:
|
||||
_application = application_func()
|
||||
return _application
|
||||
|
||||
It could. I manually tested it and it passes test_routing.py.
|
||||
|
||||
But my understanding after reading the PEP-0562 specification link above is that
|
||||
performance would be a bit worse due to the extra __getattribute__ calls when
|
||||
we reference non-global variables.
|
||||
"""
|
||||
if name == "application":
|
||||
globs = globals()
|
||||
if not globs['_application']:
|
||||
globs['_application'] = application_func()
|
||||
return globs['_application']
|
||||
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
|
||||
|
||||
@@ -68,7 +68,7 @@ class TaskBase:
|
||||
# initialize each metric to 0 and force metric_has_changed to true. This
|
||||
# ensures each task manager metric will be overridden when pipe_execute
|
||||
# is called later.
|
||||
self.subsystem_metrics = s_metrics.Metrics(auto_pipe_execute=False)
|
||||
self.subsystem_metrics = s_metrics.DispatcherMetrics(auto_pipe_execute=False)
|
||||
self.start_time = time.time()
|
||||
|
||||
# We want to avoid calling settings in loops, so cache these settings at init time
|
||||
@@ -105,7 +105,7 @@ class TaskBase:
|
||||
try:
|
||||
# increment task_manager_schedule_calls regardless if the other
|
||||
# metrics are recorded
|
||||
s_metrics.Metrics(auto_pipe_execute=True).inc(f"{self.prefix}__schedule_calls", 1)
|
||||
s_metrics.DispatcherMetrics(auto_pipe_execute=True).inc(f"{self.prefix}__schedule_calls", 1)
|
||||
# Only record metrics if the last time recording was more
|
||||
# than SUBSYSTEM_METRICS_TASK_MANAGER_RECORD_INTERVAL ago.
|
||||
# Prevents a short-duration task manager that runs directly after a
|
||||
|
||||
@@ -29,7 +29,7 @@ class RunnerCallback:
|
||||
self.safe_env = {}
|
||||
self.event_ct = 0
|
||||
self.model = model
|
||||
self.update_attempts = int(settings.DISPATCHER_DB_DOWNTIME_TOLERANCE / 5)
|
||||
self.update_attempts = int(getattr(settings, 'DISPATCHER_DB_DOWNTOWN_TOLLERANCE', settings.DISPATCHER_DB_DOWNTIME_TOLERANCE) / 5)
|
||||
self.wrapup_event_dispatched = False
|
||||
self.artifacts_processed = False
|
||||
self.extra_update_fields = {}
|
||||
@@ -95,17 +95,17 @@ class RunnerCallback:
|
||||
if self.parent_workflow_job_id:
|
||||
event_data['workflow_job_id'] = self.parent_workflow_job_id
|
||||
event_data['job_created'] = self.job_created
|
||||
if self.host_map:
|
||||
host = event_data.get('event_data', {}).get('host', '').strip()
|
||||
if host:
|
||||
event_data['host_name'] = host
|
||||
if host in self.host_map:
|
||||
event_data['host_id'] = self.host_map[host]
|
||||
else:
|
||||
event_data['host_name'] = ''
|
||||
event_data['host_id'] = ''
|
||||
if event_data.get('event') == 'playbook_on_stats':
|
||||
event_data['host_map'] = self.host_map
|
||||
|
||||
host = event_data.get('event_data', {}).get('host', '').strip()
|
||||
if host:
|
||||
event_data['host_name'] = host
|
||||
if host in self.host_map:
|
||||
event_data['host_id'] = self.host_map[host]
|
||||
else:
|
||||
event_data['host_name'] = ''
|
||||
event_data['host_id'] = ''
|
||||
if event_data.get('event') == 'playbook_on_stats':
|
||||
event_data['host_map'] = self.host_map
|
||||
|
||||
if isinstance(self, RunnerCallbackForProjectUpdate):
|
||||
# need a better way to have this check.
|
||||
|
||||
@@ -114,7 +114,7 @@ class BaseTask(object):
|
||||
|
||||
def __init__(self):
|
||||
self.cleanup_paths = []
|
||||
self.update_attempts = int(settings.DISPATCHER_DB_DOWNTIME_TOLERANCE / 5)
|
||||
self.update_attempts = int(getattr(settings, 'DISPATCHER_DB_DOWNTOWN_TOLLERANCE', settings.DISPATCHER_DB_DOWNTIME_TOLERANCE) / 5)
|
||||
self.runner_callback = self.callback_class(model=self.model)
|
||||
|
||||
def update_model(self, pk, _attempt=0, **updates):
|
||||
|
||||
@@ -27,7 +27,7 @@ from awx.main.utils.common import (
|
||||
)
|
||||
from awx.main.constants import MAX_ISOLATED_PATH_COLON_DELIMITER
|
||||
from awx.main.tasks.signals import signal_state, signal_callback, SignalExit
|
||||
from awx.main.models import Instance, InstanceLink, UnifiedJob
|
||||
from awx.main.models import Instance, InstanceLink, UnifiedJob, ReceptorAddress
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.utils.pglock import advisory_lock
|
||||
@@ -49,6 +49,70 @@ class ReceptorConnectionType(Enum):
|
||||
STREAMTLS = 2
|
||||
|
||||
|
||||
"""
|
||||
Translate receptorctl messages that come in over stdout into
|
||||
structured messages. Currently, these are error messages.
|
||||
"""
|
||||
|
||||
|
||||
class ReceptorErrorBase:
|
||||
_MESSAGE = 'Receptor Error'
|
||||
|
||||
def __init__(self, node: str = 'N/A', state_name: str = 'N/A'):
|
||||
self.node = node
|
||||
self.state_name = state_name
|
||||
|
||||
def __str__(self):
|
||||
return f"{self.__class__.__name__} '{self._MESSAGE}' on node '{self.node}' with state '{self.state_name}'"
|
||||
|
||||
|
||||
class WorkUnitError(ReceptorErrorBase):
|
||||
_MESSAGE = 'unknown work unit '
|
||||
|
||||
def __init__(self, work_unit_id: str, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.work_unit_id = work_unit_id
|
||||
|
||||
def __str__(self):
|
||||
return f"{super().__str__()} work unit id '{self.work_unit_id}'"
|
||||
|
||||
|
||||
class WorkUnitCancelError(WorkUnitError):
|
||||
_MESSAGE = 'error cancelling remote unit: unknown work unit '
|
||||
|
||||
|
||||
class WorkUnitResultsError(WorkUnitError):
|
||||
_MESSAGE = 'Failed to get results: unknown work unit '
|
||||
|
||||
|
||||
class UnknownError(ReceptorErrorBase):
|
||||
_MESSAGE = 'Unknown receptor ctl error'
|
||||
|
||||
def __init__(self, msg, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self._MESSAGE = msg
|
||||
|
||||
|
||||
class FuzzyError:
|
||||
def __new__(self, e: RuntimeError, node: str, state_name: str):
|
||||
"""
|
||||
At the time of writing this comment all of the sub-classes detection
|
||||
is centralized in this parent class. It's like a Router().
|
||||
Someone may find it better to push down the error detection logic into
|
||||
each sub-class.
|
||||
"""
|
||||
msg = e.args[0]
|
||||
|
||||
common_startswith = (WorkUnitCancelError, WorkUnitResultsError, WorkUnitError)
|
||||
|
||||
for klass in common_startswith:
|
||||
if msg.startswith(klass._MESSAGE):
|
||||
work_unit_id = msg[len(klass._MESSAGE) :]
|
||||
return klass(work_unit_id, node=node, state_name=state_name)
|
||||
|
||||
return UnknownError(msg, node=node, state_name=state_name)
|
||||
|
||||
|
||||
def read_receptor_config():
|
||||
# for K8S deployments, getting a lock is necessary as another process
|
||||
# may be re-writing the config at this time
|
||||
@@ -185,6 +249,7 @@ def run_until_complete(node, timing_data=None, **kwargs):
|
||||
timing_data['transmit_timing'] = run_start - transmit_start
|
||||
run_timing = 0.0
|
||||
stdout = ''
|
||||
state_name = 'local var never set'
|
||||
|
||||
try:
|
||||
resultfile = receptor_ctl.get_work_results(unit_id)
|
||||
@@ -205,13 +270,33 @@ def run_until_complete(node, timing_data=None, **kwargs):
|
||||
stdout = resultfile.read()
|
||||
stdout = str(stdout, encoding='utf-8')
|
||||
|
||||
except RuntimeError as e:
|
||||
receptor_e = FuzzyError(e, node, state_name)
|
||||
if type(receptor_e) in (
|
||||
WorkUnitError,
|
||||
WorkUnitResultsError,
|
||||
):
|
||||
logger.warning(f'While consuming job results: {receptor_e}')
|
||||
else:
|
||||
raise
|
||||
finally:
|
||||
if settings.RECEPTOR_RELEASE_WORK:
|
||||
res = receptor_ctl.simple_command(f"work release {unit_id}")
|
||||
if res != {'released': unit_id}:
|
||||
logger.warning(f'Could not confirm release of receptor work unit id {unit_id} from {node}, data: {res}')
|
||||
try:
|
||||
res = receptor_ctl.simple_command(f"work release {unit_id}")
|
||||
|
||||
receptor_ctl.close()
|
||||
if res != {'released': unit_id}:
|
||||
logger.warning(f'Could not confirm release of receptor work unit id {unit_id} from {node}, data: {res}')
|
||||
|
||||
receptor_ctl.close()
|
||||
except RuntimeError as e:
|
||||
receptor_e = FuzzyError(e, node, state_name)
|
||||
if type(receptor_e) in (
|
||||
WorkUnitError,
|
||||
WorkUnitCancelError,
|
||||
):
|
||||
logger.warning(f"While releasing work: {receptor_e}")
|
||||
else:
|
||||
logger.error(f"While releasing work: {receptor_e}")
|
||||
|
||||
if state_name.lower() == 'failed':
|
||||
work_detail = status.get('Detail', '')
|
||||
@@ -275,7 +360,7 @@ def _convert_args_to_cli(vargs):
|
||||
args = ['cleanup']
|
||||
for option in ('exclude_strings', 'remove_images'):
|
||||
if vargs.get(option):
|
||||
args.append('--{}={}'.format(option.replace('_', '-'), ' '.join(vargs.get(option))))
|
||||
args.append('--{}="{}"'.format(option.replace('_', '-'), ' '.join(vargs.get(option))))
|
||||
for option in ('file_pattern', 'image_prune', 'process_isolation_executable', 'grace_period'):
|
||||
if vargs.get(option) is True:
|
||||
args.append('--{}'.format(option.replace('_', '-')))
|
||||
@@ -676,36 +761,44 @@ RECEPTOR_CONFIG_STARTER = (
|
||||
)
|
||||
|
||||
|
||||
def should_update_config(instances):
|
||||
def should_update_config(new_config):
|
||||
'''
|
||||
checks that the list of instances matches the list of
|
||||
tcp-peers in the config
|
||||
'''
|
||||
current_config = read_receptor_config() # this gets receptor conf lock
|
||||
current_peers = []
|
||||
for config_entry in current_config:
|
||||
for key, value in config_entry.items():
|
||||
if key.endswith('-peer'):
|
||||
current_peers.append(value['address'])
|
||||
intended_peers = [f"{i.hostname}:{i.listener_port}" for i in instances]
|
||||
logger.debug(f"Peers current {current_peers} intended {intended_peers}")
|
||||
if set(current_peers) == set(intended_peers):
|
||||
return False # config file is already update to date
|
||||
|
||||
return True
|
||||
current_config = read_receptor_config() # this gets receptor conf lock
|
||||
for config_entry in current_config:
|
||||
if config_entry not in new_config:
|
||||
logger.warning(f"{config_entry} should not be in receptor config. Updating.")
|
||||
return True
|
||||
for config_entry in new_config:
|
||||
if config_entry not in current_config:
|
||||
logger.warning(f"{config_entry} missing from receptor config. Updating.")
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def generate_config_data():
|
||||
# returns two values
|
||||
# receptor config - based on current database peers
|
||||
# should_update - If True, receptor_config differs from the receptor conf file on disk
|
||||
instances = Instance.objects.filter(node_type__in=(Instance.Types.EXECUTION, Instance.Types.HOP), peers_from_control_nodes=True)
|
||||
addresses = ReceptorAddress.objects.filter(peers_from_control_nodes=True)
|
||||
|
||||
receptor_config = list(RECEPTOR_CONFIG_STARTER)
|
||||
for instance in instances:
|
||||
peer = {'tcp-peer': {'address': f'{instance.hostname}:{instance.listener_port}', 'tls': 'tlsclient'}}
|
||||
receptor_config.append(peer)
|
||||
should_update = should_update_config(instances)
|
||||
for address in addresses:
|
||||
if address.get_peer_type():
|
||||
peer = {
|
||||
f'{address.get_peer_type()}': {
|
||||
'address': f'{address.get_full_address()}',
|
||||
'tls': 'tlsclient',
|
||||
}
|
||||
}
|
||||
receptor_config.append(peer)
|
||||
else:
|
||||
logger.warning(f"Receptor address {address} has unsupported peer type, skipping.")
|
||||
should_update = should_update_config(receptor_config)
|
||||
return receptor_config, should_update
|
||||
|
||||
|
||||
@@ -747,14 +840,13 @@ def write_receptor_config():
|
||||
with lock:
|
||||
with open(__RECEPTOR_CONF, 'w') as file:
|
||||
yaml.dump(receptor_config, file, default_flow_style=False)
|
||||
|
||||
reload_receptor()
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
def remove_deprovisioned_node(hostname):
|
||||
InstanceLink.objects.filter(source__hostname=hostname).update(link_state=InstanceLink.States.REMOVING)
|
||||
InstanceLink.objects.filter(target__hostname=hostname).update(link_state=InstanceLink.States.REMOVING)
|
||||
InstanceLink.objects.filter(target__instance__hostname=hostname).update(link_state=InstanceLink.States.REMOVING)
|
||||
|
||||
node_jobs = UnifiedJob.objects.filter(
|
||||
execution_node=hostname,
|
||||
|
||||
@@ -6,6 +6,7 @@ import itertools
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import psycopg
|
||||
from io import StringIO
|
||||
from contextlib import redirect_stdout
|
||||
import shutil
|
||||
@@ -62,7 +63,7 @@ from awx.main.tasks.receptor import get_receptor_ctl, worker_info, worker_cleanu
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
from awx.main import analytics
|
||||
from awx.conf import settings_registry
|
||||
from awx.main.analytics.subsystem_metrics import Metrics
|
||||
from awx.main.analytics.subsystem_metrics import DispatcherMetrics
|
||||
|
||||
from rest_framework.exceptions import PermissionDenied
|
||||
|
||||
@@ -113,7 +114,7 @@ def dispatch_startup():
|
||||
cluster_node_heartbeat()
|
||||
reaper.startup_reaping()
|
||||
reaper.reap_waiting(grace_period=0)
|
||||
m = Metrics()
|
||||
m = DispatcherMetrics()
|
||||
m.reset_values()
|
||||
|
||||
|
||||
@@ -416,7 +417,7 @@ def handle_removed_image(remove_images=None):
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
def cleanup_images_and_files():
|
||||
_cleanup_images_and_files()
|
||||
_cleanup_images_and_files(image_prune=True)
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
@@ -495,7 +496,7 @@ def inspect_established_receptor_connections(mesh_status):
|
||||
update_links = []
|
||||
for link in all_links:
|
||||
if link.link_state != InstanceLink.States.REMOVING:
|
||||
if link.target.hostname in active_receptor_conns.get(link.source.hostname, {}):
|
||||
if link.target.instance.hostname in active_receptor_conns.get(link.source.hostname, {}):
|
||||
if link.link_state is not InstanceLink.States.ESTABLISHED:
|
||||
link.link_state = InstanceLink.States.ESTABLISHED
|
||||
update_links.append(link)
|
||||
@@ -630,10 +631,18 @@ def cluster_node_heartbeat(dispatch_time=None, worker_tasks=None):
|
||||
logger.error("Host {} last checked in at {}, marked as lost.".format(other_inst.hostname, other_inst.last_seen))
|
||||
|
||||
except DatabaseError as e:
|
||||
if 'did not affect any rows' in str(e):
|
||||
logger.debug('Another instance has marked {} as lost'.format(other_inst.hostname))
|
||||
cause = e.__cause__
|
||||
if cause and hasattr(cause, 'sqlstate'):
|
||||
sqlstate = cause.sqlstate
|
||||
sqlstate_str = psycopg.errors.lookup(sqlstate)
|
||||
logger.debug('SQL Error state: {} - {}'.format(sqlstate, sqlstate_str))
|
||||
|
||||
if sqlstate == psycopg.errors.NoData:
|
||||
logger.debug('Another instance has marked {} as lost'.format(other_inst.hostname))
|
||||
else:
|
||||
logger.exception("Error marking {} as lost.".format(other_inst.hostname))
|
||||
else:
|
||||
logger.exception('Error marking {} as lost'.format(other_inst.hostname))
|
||||
logger.exception('No SQL state available. Error marking {} as lost'.format(other_inst.hostname))
|
||||
|
||||
# Run local reaper
|
||||
if worker_tasks is not None:
|
||||
@@ -788,10 +797,19 @@ def update_inventory_computed_fields(inventory_id):
|
||||
try:
|
||||
i.update_computed_fields()
|
||||
except DatabaseError as e:
|
||||
if 'did not affect any rows' in str(e):
|
||||
logger.debug('Exiting duplicate update_inventory_computed_fields task.')
|
||||
return
|
||||
raise
|
||||
# https://github.com/django/django/blob/eff21d8e7a1cb297aedf1c702668b590a1b618f3/django/db/models/base.py#L1105
|
||||
# django raises DatabaseError("Forced update did not affect any rows.")
|
||||
|
||||
# if sqlstate is set then there was a database error and otherwise will re-raise that error
|
||||
cause = e.__cause__
|
||||
if cause and hasattr(cause, 'sqlstate'):
|
||||
sqlstate = cause.sqlstate
|
||||
sqlstate_str = psycopg.errors.lookup(sqlstate)
|
||||
logger.error('SQL Error state: {} - {}'.format(sqlstate, sqlstate_str))
|
||||
raise
|
||||
|
||||
# otherwise
|
||||
logger.debug('Exiting duplicate update_inventory_computed_fields task.')
|
||||
|
||||
|
||||
def update_smart_memberships_for_inventory(smart_inventory):
|
||||
|
||||
@@ -3,5 +3,5 @@
|
||||
hosts: all
|
||||
tasks:
|
||||
- name: Hello Message
|
||||
debug:
|
||||
ansible.builtin.debug:
|
||||
msg: "Hello World!"
|
||||
|
||||
3
awx/main/tests/data/inventory/plugins/terraform/env.json
Normal file
3
awx/main/tests/data/inventory/plugins/terraform/env.json
Normal file
@@ -0,0 +1,3 @@
|
||||
{
|
||||
"TF_BACKEND_CONFIG_FILE": "{{ file_reference }}"
|
||||
}
|
||||
@@ -1,13 +1,8 @@
|
||||
from awx.main.tests.functional.conftest import * # noqa
|
||||
import os
|
||||
import pytest
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
parser.addoption("--release", action="store", help="a release version number, e.g., 3.3.0")
|
||||
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
# This is called for every test. Only get/set command line arguments
|
||||
# if the argument is specified in the list of test "fixturenames".
|
||||
option_value = metafunc.config.option.release
|
||||
if 'release' in metafunc.fixturenames and option_value is not None:
|
||||
metafunc.parametrize("release", [option_value])
|
||||
@pytest.fixture()
|
||||
def release():
|
||||
return os.environ.get('VERSION_TARGET', '')
|
||||
|
||||
@@ -1,19 +1,16 @@
|
||||
import pytest
|
||||
import yaml
|
||||
import itertools
|
||||
from unittest import mock
|
||||
|
||||
from django.db.utils import IntegrityError
|
||||
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.models import Instance
|
||||
from awx.main.models import Instance, ReceptorAddress
|
||||
from awx.api.views.instance_install_bundle import generate_group_vars_all_yml
|
||||
|
||||
|
||||
def has_peer(group_vars, peer):
|
||||
peers = group_vars.get('receptor_peers', [])
|
||||
for p in peers:
|
||||
if f"{p['host']}:{p['port']}" == peer:
|
||||
if p['address'] == peer:
|
||||
return True
|
||||
return False
|
||||
|
||||
@@ -24,119 +21,314 @@ class TestPeers:
|
||||
def configure_settings(self, settings):
|
||||
settings.IS_K8S = True
|
||||
|
||||
@pytest.mark.parametrize('node_type', ['control', 'hybrid'])
|
||||
def test_prevent_peering_to_self(self, node_type):
|
||||
@pytest.mark.parametrize('node_type', ['hop', 'execution'])
|
||||
def test_peering_to_self(self, node_type, admin_user, patch):
|
||||
"""
|
||||
cannot peer to self
|
||||
"""
|
||||
control_instance = Instance.objects.create(hostname='abc', node_type=node_type)
|
||||
with pytest.raises(IntegrityError):
|
||||
control_instance.peers.add(control_instance)
|
||||
instance = Instance.objects.create(hostname='abc', node_type=node_type)
|
||||
addr = ReceptorAddress.objects.create(instance=instance, address='abc', canonical=True)
|
||||
resp = patch(
|
||||
url=reverse('api:instance_detail', kwargs={'pk': instance.pk}),
|
||||
data={"hostname": "abc", "node_type": node_type, "peers": [addr.id]},
|
||||
user=admin_user,
|
||||
expect=400,
|
||||
)
|
||||
assert 'Instance cannot peer to its own address.' in str(resp.data)
|
||||
|
||||
@pytest.mark.parametrize('node_type', ['control', 'hybrid', 'hop', 'execution'])
|
||||
def test_creating_node(self, node_type, admin_user, post):
|
||||
"""
|
||||
can only add hop and execution nodes via API
|
||||
"""
|
||||
post(
|
||||
resp = post(
|
||||
url=reverse('api:instance_list'),
|
||||
data={"hostname": "abc", "node_type": node_type},
|
||||
user=admin_user,
|
||||
expect=400 if node_type in ['control', 'hybrid'] else 201,
|
||||
)
|
||||
if resp.status_code == 400:
|
||||
assert 'Can only create execution or hop nodes.' in str(resp.data)
|
||||
|
||||
def test_changing_node_type(self, admin_user, patch):
|
||||
"""
|
||||
cannot change node type
|
||||
"""
|
||||
hop = Instance.objects.create(hostname='abc', node_type="hop")
|
||||
patch(
|
||||
resp = patch(
|
||||
url=reverse('api:instance_detail', kwargs={'pk': hop.pk}),
|
||||
data={"node_type": "execution"},
|
||||
user=admin_user,
|
||||
expect=400,
|
||||
)
|
||||
assert 'Cannot change node type.' in str(resp.data)
|
||||
|
||||
@pytest.mark.parametrize('node_type', ['hop', 'execution'])
|
||||
def test_listener_port_null(self, node_type, admin_user, post):
|
||||
"""
|
||||
listener_port can be None
|
||||
"""
|
||||
post(
|
||||
url=reverse('api:instance_list'),
|
||||
data={"hostname": "abc", "node_type": node_type, "listener_port": None},
|
||||
@pytest.mark.parametrize(
|
||||
'payload_port, payload_peers_from, initial_port, initial_peers_from',
|
||||
[
|
||||
(-1, -1, None, None),
|
||||
(-1, -1, 27199, False),
|
||||
(-1, -1, 27199, True),
|
||||
(None, -1, None, None),
|
||||
(None, False, None, None),
|
||||
(-1, False, None, None),
|
||||
(27199, True, 27199, True),
|
||||
(27199, False, 27199, False),
|
||||
(27199, -1, 27199, True),
|
||||
(27199, -1, 27199, False),
|
||||
(-1, True, 27199, True),
|
||||
(-1, False, 27199, False),
|
||||
],
|
||||
)
|
||||
def test_no_op(self, payload_port, payload_peers_from, initial_port, initial_peers_from, admin_user, patch):
|
||||
node = Instance.objects.create(hostname='abc', node_type='hop')
|
||||
if initial_port is not None:
|
||||
ReceptorAddress.objects.create(address=node.hostname, port=initial_port, canonical=True, peers_from_control_nodes=initial_peers_from, instance=node)
|
||||
|
||||
assert ReceptorAddress.objects.filter(instance=node).count() == 1
|
||||
else:
|
||||
assert ReceptorAddress.objects.filter(instance=node).count() == 0
|
||||
|
||||
data = {'enabled': True} # Just to have something to post.
|
||||
if payload_port != -1:
|
||||
data['listener_port'] = payload_port
|
||||
if payload_peers_from != -1:
|
||||
data['peers_from_control_nodes'] = payload_peers_from
|
||||
|
||||
patch(
|
||||
url=reverse('api:instance_detail', kwargs={'pk': node.pk}),
|
||||
data=data,
|
||||
user=admin_user,
|
||||
expect=201,
|
||||
expect=200,
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize('node_type, allowed', [('control', False), ('hybrid', False), ('hop', True), ('execution', True)])
|
||||
def test_peers_from_control_nodes_allowed(self, node_type, allowed, post, admin_user):
|
||||
"""
|
||||
only hop and execution nodes can have peers_from_control_nodes set to True
|
||||
"""
|
||||
post(
|
||||
url=reverse('api:instance_list'),
|
||||
data={"hostname": "abc", "peers_from_control_nodes": True, "node_type": node_type, "listener_port": 6789},
|
||||
assert ReceptorAddress.objects.filter(instance=node).count() == (0 if initial_port is None else 1)
|
||||
if initial_port is not None:
|
||||
ra = ReceptorAddress.objects.get(instance=node, canonical=True)
|
||||
assert ra.port == initial_port
|
||||
assert ra.peers_from_control_nodes == initial_peers_from
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'payload_port, payload_peers_from',
|
||||
[
|
||||
(27199, True),
|
||||
(27199, False),
|
||||
(27199, -1),
|
||||
],
|
||||
)
|
||||
def test_creates_canonical_address(self, payload_port, payload_peers_from, admin_user, patch):
|
||||
node = Instance.objects.create(hostname='abc', node_type='hop')
|
||||
assert ReceptorAddress.objects.filter(instance=node).count() == 0
|
||||
|
||||
data = {'enabled': True} # Just to have something to post.
|
||||
if payload_port != -1:
|
||||
data['listener_port'] = payload_port
|
||||
if payload_peers_from != -1:
|
||||
data['peers_from_control_nodes'] = payload_peers_from
|
||||
|
||||
patch(
|
||||
url=reverse('api:instance_detail', kwargs={'pk': node.pk}),
|
||||
data=data,
|
||||
user=admin_user,
|
||||
expect=201 if allowed else 400,
|
||||
expect=200,
|
||||
)
|
||||
|
||||
def test_listener_port_is_required(self, admin_user, post):
|
||||
"""
|
||||
if adding instance to peers list, that instance must have listener_port set
|
||||
"""
|
||||
Instance.objects.create(hostname='abc', node_type="hop", listener_port=None)
|
||||
post(
|
||||
url=reverse('api:instance_list'),
|
||||
data={"hostname": "ex", "peers_from_control_nodes": False, "node_type": "execution", "listener_port": None, "peers": ["abc"]},
|
||||
assert ReceptorAddress.objects.filter(instance=node).count() == 1
|
||||
ra = ReceptorAddress.objects.get(instance=node, canonical=True)
|
||||
assert ra.port == payload_port
|
||||
assert ra.peers_from_control_nodes == (payload_peers_from if payload_peers_from != -1 else False)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'payload_port, payload_peers_from, initial_port, initial_peers_from',
|
||||
[
|
||||
(None, False, 27199, True),
|
||||
(None, -1, 27199, True),
|
||||
(None, False, 27199, False),
|
||||
(None, -1, 27199, False),
|
||||
],
|
||||
)
|
||||
def test_deletes_canonical_address(self, payload_port, payload_peers_from, initial_port, initial_peers_from, admin_user, patch):
|
||||
node = Instance.objects.create(hostname='abc', node_type='hop')
|
||||
ReceptorAddress.objects.create(address=node.hostname, port=initial_port, canonical=True, peers_from_control_nodes=initial_peers_from, instance=node)
|
||||
|
||||
assert ReceptorAddress.objects.filter(instance=node).count() == 1
|
||||
|
||||
data = {'enabled': True} # Just to have something to post.
|
||||
if payload_port != -1:
|
||||
data['listener_port'] = payload_port
|
||||
if payload_peers_from != -1:
|
||||
data['peers_from_control_nodes'] = payload_peers_from
|
||||
|
||||
patch(
|
||||
url=reverse('api:instance_detail', kwargs={'pk': node.pk}),
|
||||
data=data,
|
||||
user=admin_user,
|
||||
expect=200,
|
||||
)
|
||||
|
||||
assert ReceptorAddress.objects.filter(instance=node).count() == 0
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'payload_port, payload_peers_from, initial_port, initial_peers_from',
|
||||
[
|
||||
(27199, True, 27199, False),
|
||||
(27199, False, 27199, True),
|
||||
(-1, True, 27199, False),
|
||||
(-1, False, 27199, True),
|
||||
],
|
||||
)
|
||||
def test_updates_canonical_address(self, payload_port, payload_peers_from, initial_port, initial_peers_from, admin_user, patch):
|
||||
node = Instance.objects.create(hostname='abc', node_type='hop')
|
||||
ReceptorAddress.objects.create(address=node.hostname, port=initial_port, canonical=True, peers_from_control_nodes=initial_peers_from, instance=node)
|
||||
|
||||
assert ReceptorAddress.objects.filter(instance=node).count() == 1
|
||||
|
||||
data = {'enabled': True} # Just to have something to post.
|
||||
if payload_port != -1:
|
||||
data['listener_port'] = payload_port
|
||||
if payload_peers_from != -1:
|
||||
data['peers_from_control_nodes'] = payload_peers_from
|
||||
|
||||
patch(
|
||||
url=reverse('api:instance_detail', kwargs={'pk': node.pk}),
|
||||
data=data,
|
||||
user=admin_user,
|
||||
expect=200,
|
||||
)
|
||||
|
||||
assert ReceptorAddress.objects.filter(instance=node).count() == 1
|
||||
ra = ReceptorAddress.objects.get(instance=node, canonical=True)
|
||||
assert ra.port == initial_port # At the present time, changing ports is not allowed
|
||||
assert ra.peers_from_control_nodes == payload_peers_from
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'payload_port, payload_peers_from, initial_port, initial_peers_from, error_msg',
|
||||
[
|
||||
(-1, True, None, None, "Cannot enable peers_from_control_nodes"),
|
||||
(None, True, None, None, "Cannot enable peers_from_control_nodes"),
|
||||
(None, True, 21799, True, "Cannot enable peers_from_control_nodes"),
|
||||
(None, True, 21799, False, "Cannot enable peers_from_control_nodes"),
|
||||
(21800, -1, 21799, True, "Cannot change listener port"),
|
||||
(21800, True, 21799, True, "Cannot change listener port"),
|
||||
(21800, False, 21799, True, "Cannot change listener port"),
|
||||
(21800, -1, 21799, False, "Cannot change listener port"),
|
||||
(21800, True, 21799, False, "Cannot change listener port"),
|
||||
(21800, False, 21799, False, "Cannot change listener port"),
|
||||
],
|
||||
)
|
||||
def test_canonical_address_validation_error(self, payload_port, payload_peers_from, initial_port, initial_peers_from, error_msg, admin_user, patch):
|
||||
node = Instance.objects.create(hostname='abc', node_type='hop')
|
||||
if initial_port is not None:
|
||||
ReceptorAddress.objects.create(address=node.hostname, port=initial_port, canonical=True, peers_from_control_nodes=initial_peers_from, instance=node)
|
||||
|
||||
assert ReceptorAddress.objects.filter(instance=node).count() == 1
|
||||
else:
|
||||
assert ReceptorAddress.objects.filter(instance=node).count() == 0
|
||||
|
||||
data = {'enabled': True} # Just to have something to post.
|
||||
if payload_port != -1:
|
||||
data['listener_port'] = payload_port
|
||||
if payload_peers_from != -1:
|
||||
data['peers_from_control_nodes'] = payload_peers_from
|
||||
|
||||
resp = patch(
|
||||
url=reverse('api:instance_detail', kwargs={'pk': node.pk}),
|
||||
data=data,
|
||||
user=admin_user,
|
||||
expect=400,
|
||||
)
|
||||
|
||||
def test_peers_from_control_nodes_listener_port_enabled(self, admin_user, post):
|
||||
assert error_msg in str(resp.data)
|
||||
|
||||
def test_changing_managed_listener_port(self, admin_user, patch):
|
||||
"""
|
||||
if peers_from_control_nodes is True, listener_port must an integer
|
||||
Assert that all other combinations are allowed
|
||||
if instance is managed, cannot change listener port at all
|
||||
"""
|
||||
for index, item in enumerate(itertools.product(['hop', 'execution'], [True, False], [None, 6789])):
|
||||
node_type, peers_from, listener_port = item
|
||||
# only disallowed case is when peers_from is True and listener port is None
|
||||
disallowed = peers_from and not listener_port
|
||||
post(
|
||||
url=reverse('api:instance_list'),
|
||||
data={"hostname": f"abc{index}", "peers_from_control_nodes": peers_from, "node_type": node_type, "listener_port": listener_port},
|
||||
user=admin_user,
|
||||
expect=400 if disallowed else 201,
|
||||
)
|
||||
hop = Instance.objects.create(hostname='abc', node_type="hop", managed=True)
|
||||
resp = patch(
|
||||
url=reverse('api:instance_detail', kwargs={'pk': hop.pk}),
|
||||
data={"listener_port": 5678},
|
||||
user=admin_user,
|
||||
expect=400, # cannot set port
|
||||
)
|
||||
assert 'Cannot change listener port for managed nodes.' in str(resp.data)
|
||||
ReceptorAddress.objects.create(instance=hop, address='hop', port=27199, canonical=True)
|
||||
resp = patch(
|
||||
url=reverse('api:instance_detail', kwargs={'pk': hop.pk}),
|
||||
data={"listener_port": None},
|
||||
user=admin_user,
|
||||
expect=400, # cannot unset port
|
||||
)
|
||||
assert 'Cannot change listener port for managed nodes.' in str(resp.data)
|
||||
|
||||
def test_bidirectional_peering(self, admin_user, patch):
|
||||
"""
|
||||
cannot peer to node that is already to peered to it
|
||||
if A -> B, then disallow B -> A
|
||||
"""
|
||||
hop1 = Instance.objects.create(hostname='hop1', node_type='hop')
|
||||
hop1addr = ReceptorAddress.objects.create(instance=hop1, address='hop1', canonical=True)
|
||||
hop2 = Instance.objects.create(hostname='hop2', node_type='hop')
|
||||
hop2addr = ReceptorAddress.objects.create(instance=hop2, address='hop2', canonical=True)
|
||||
hop1.peers.add(hop2addr)
|
||||
resp = patch(
|
||||
url=reverse('api:instance_detail', kwargs={'pk': hop2.pk}),
|
||||
data={"peers": [hop1addr.id]},
|
||||
user=admin_user,
|
||||
expect=400,
|
||||
)
|
||||
assert 'Instance hop1 is already peered to this instance.' in str(resp.data)
|
||||
|
||||
def test_multiple_peers_same_instance(self, admin_user, patch):
|
||||
"""
|
||||
cannot peer to more than one address of the same instance
|
||||
"""
|
||||
hop1 = Instance.objects.create(hostname='hop1', node_type='hop')
|
||||
hop1addr1 = ReceptorAddress.objects.create(instance=hop1, address='hop1', canonical=True)
|
||||
hop1addr2 = ReceptorAddress.objects.create(instance=hop1, address='hop1alternate')
|
||||
hop2 = Instance.objects.create(hostname='hop2', node_type='hop')
|
||||
resp = patch(
|
||||
url=reverse('api:instance_detail', kwargs={'pk': hop2.pk}),
|
||||
data={"peers": [hop1addr1.id, hop1addr2.id]},
|
||||
user=admin_user,
|
||||
expect=400,
|
||||
)
|
||||
assert 'Cannot peer to the same instance more than once.' in str(resp.data)
|
||||
|
||||
@pytest.mark.parametrize('node_type', ['control', 'hybrid'])
|
||||
def test_disallow_modifying_peers_control_nodes(self, node_type, admin_user, patch):
|
||||
def test_changing_peers_control_nodes(self, node_type, admin_user, patch):
|
||||
"""
|
||||
for control nodes, peers field should not be
|
||||
modified directly via patch.
|
||||
"""
|
||||
control = Instance.objects.create(hostname='abc', node_type=node_type)
|
||||
hop1 = Instance.objects.create(hostname='hop1', node_type='hop', peers_from_control_nodes=True, listener_port=6789)
|
||||
hop2 = Instance.objects.create(hostname='hop2', node_type='hop', peers_from_control_nodes=False, listener_port=6789)
|
||||
assert [hop1] == list(control.peers.all()) # only hop1 should be peered
|
||||
patch(
|
||||
control = Instance.objects.create(hostname='abc', node_type=node_type, managed=True)
|
||||
hop1 = Instance.objects.create(hostname='hop1', node_type='hop')
|
||||
hop1addr = ReceptorAddress.objects.create(instance=hop1, address='hop1', peers_from_control_nodes=True, canonical=True)
|
||||
hop2 = Instance.objects.create(hostname='hop2', node_type='hop')
|
||||
hop2addr = ReceptorAddress.objects.create(instance=hop2, address='hop2', canonical=True)
|
||||
assert [hop1addr] == list(control.peers.all()) # only hop1addr should be peered
|
||||
resp = patch(
|
||||
url=reverse('api:instance_detail', kwargs={'pk': control.pk}),
|
||||
data={"peers": ["hop2"]},
|
||||
data={"peers": [hop2addr.id]},
|
||||
user=admin_user,
|
||||
expect=400, # cannot add peers directly
|
||||
expect=400, # cannot add peers manually
|
||||
)
|
||||
assert 'Setting peers manually for managed nodes is not allowed.' in str(resp.data)
|
||||
|
||||
patch(
|
||||
url=reverse('api:instance_detail', kwargs={'pk': control.pk}),
|
||||
data={"peers": ["hop1"]},
|
||||
data={"peers": [hop1addr.id]},
|
||||
user=admin_user,
|
||||
expect=200, # patching with current peers list should be okay
|
||||
)
|
||||
patch(
|
||||
resp = patch(
|
||||
url=reverse('api:instance_detail', kwargs={'pk': control.pk}),
|
||||
data={"peers": []},
|
||||
user=admin_user,
|
||||
expect=400, # cannot remove peers directly
|
||||
)
|
||||
assert 'Setting peers manually for managed nodes is not allowed.' in str(resp.data)
|
||||
|
||||
patch(
|
||||
url=reverse('api:instance_detail', kwargs={'pk': control.pk}),
|
||||
data={},
|
||||
@@ -148,23 +340,25 @@ class TestPeers:
|
||||
url=reverse('api:instance_detail', kwargs={'pk': hop2.pk}),
|
||||
data={"peers_from_control_nodes": True},
|
||||
user=admin_user,
|
||||
expect=200, # patching without data should be fine too
|
||||
expect=200,
|
||||
)
|
||||
assert {hop1, hop2} == set(control.peers.all()) # hop1 and hop2 should now be peered from control node
|
||||
assert {hop1addr, hop2addr} == set(control.peers.all()) # hop1 and hop2 should now be peered from control node
|
||||
|
||||
def test_disallow_changing_hostname(self, admin_user, patch):
|
||||
def test_changing_hostname(self, admin_user, patch):
|
||||
"""
|
||||
cannot change hostname
|
||||
"""
|
||||
hop = Instance.objects.create(hostname='hop', node_type='hop')
|
||||
patch(
|
||||
resp = patch(
|
||||
url=reverse('api:instance_detail', kwargs={'pk': hop.pk}),
|
||||
data={"hostname": "hop2"},
|
||||
user=admin_user,
|
||||
expect=400,
|
||||
)
|
||||
|
||||
def test_disallow_changing_node_state(self, admin_user, patch):
|
||||
assert 'Cannot change hostname.' in str(resp.data)
|
||||
|
||||
def test_changing_node_state(self, admin_user, patch):
|
||||
"""
|
||||
only allow setting to deprovisioning
|
||||
"""
|
||||
@@ -175,12 +369,54 @@ class TestPeers:
|
||||
user=admin_user,
|
||||
expect=200,
|
||||
)
|
||||
patch(
|
||||
resp = patch(
|
||||
url=reverse('api:instance_detail', kwargs={'pk': hop.pk}),
|
||||
data={"node_state": "ready"},
|
||||
user=admin_user,
|
||||
expect=400,
|
||||
)
|
||||
assert "Can only change instances to the 'deprovisioning' state." in str(resp.data)
|
||||
|
||||
def test_changing_managed_node_state(self, admin_user, patch):
|
||||
"""
|
||||
cannot change node state of managed node
|
||||
"""
|
||||
hop = Instance.objects.create(hostname='hop', node_type='hop', managed=True)
|
||||
resp = patch(
|
||||
url=reverse('api:instance_detail', kwargs={'pk': hop.pk}),
|
||||
data={"node_state": "deprovisioning"},
|
||||
user=admin_user,
|
||||
expect=400,
|
||||
)
|
||||
|
||||
assert 'Cannot deprovision managed nodes.' in str(resp.data)
|
||||
|
||||
def test_changing_managed_peers_from_control_nodes(self, admin_user, patch):
|
||||
"""
|
||||
cannot change peers_from_control_nodes of managed node
|
||||
"""
|
||||
hop = Instance.objects.create(hostname='hop', node_type='hop', managed=True)
|
||||
ReceptorAddress.objects.create(instance=hop, address='hop', peers_from_control_nodes=True, canonical=True)
|
||||
resp = patch(
|
||||
url=reverse('api:instance_detail', kwargs={'pk': hop.pk}),
|
||||
data={"peers_from_control_nodes": False},
|
||||
user=admin_user,
|
||||
expect=400,
|
||||
)
|
||||
|
||||
assert 'Cannot change peers_from_control_nodes for managed nodes.' in str(resp.data)
|
||||
|
||||
hop.peers_from_control_nodes = False
|
||||
hop.save()
|
||||
|
||||
resp = patch(
|
||||
url=reverse('api:instance_detail', kwargs={'pk': hop.pk}),
|
||||
data={"peers_from_control_nodes": False},
|
||||
user=admin_user,
|
||||
expect=400,
|
||||
)
|
||||
|
||||
assert 'Cannot change peers_from_control_nodes for managed nodes.' in str(resp.data)
|
||||
|
||||
@pytest.mark.parametrize('node_type', ['control', 'hybrid'])
|
||||
def test_control_node_automatically_peers(self, node_type):
|
||||
@@ -191,9 +427,10 @@ class TestPeers:
|
||||
peer to hop should be removed if hop is deleted
|
||||
"""
|
||||
|
||||
hop = Instance.objects.create(hostname='hop', node_type='hop', peers_from_control_nodes=True, listener_port=6789)
|
||||
hop = Instance.objects.create(hostname='hop', node_type='hop')
|
||||
hopaddr = ReceptorAddress.objects.create(instance=hop, address='hop', peers_from_control_nodes=True, canonical=True)
|
||||
control = Instance.objects.create(hostname='abc', node_type=node_type)
|
||||
assert hop in control.peers.all()
|
||||
assert hopaddr in control.peers.all()
|
||||
hop.delete()
|
||||
assert not control.peers.exists()
|
||||
|
||||
@@ -203,26 +440,50 @@ class TestPeers:
|
||||
if a new node comes online, other peer relationships should
|
||||
remain intact
|
||||
"""
|
||||
hop1 = Instance.objects.create(hostname='hop1', node_type='hop', listener_port=6789, peers_from_control_nodes=True)
|
||||
hop2 = Instance.objects.create(hostname='hop2', node_type='hop', listener_port=6789, peers_from_control_nodes=False)
|
||||
hop1.peers.add(hop2)
|
||||
hop1 = Instance.objects.create(hostname='hop1', node_type='hop')
|
||||
hop2 = Instance.objects.create(hostname='hop2', node_type='hop')
|
||||
hop2addr = ReceptorAddress.objects.create(instance=hop2, address='hop2', canonical=True)
|
||||
hop1.peers.add(hop2addr)
|
||||
|
||||
# a control node is added
|
||||
Instance.objects.create(hostname='control', node_type=node_type, listener_port=None)
|
||||
Instance.objects.create(hostname='control', node_type=node_type)
|
||||
|
||||
assert hop1.peers.exists()
|
||||
|
||||
def test_group_vars(self, get, admin_user):
|
||||
def test_reverse_peers(self, admin_user, get):
|
||||
"""
|
||||
if hop1 peers to hop2, hop1 should
|
||||
be in hop2's reverse_peers list
|
||||
"""
|
||||
hop1 = Instance.objects.create(hostname='hop1', node_type='hop')
|
||||
hop2 = Instance.objects.create(hostname='hop2', node_type='hop')
|
||||
hop2addr = ReceptorAddress.objects.create(instance=hop2, address='hop2', canonical=True)
|
||||
hop1.peers.add(hop2addr)
|
||||
|
||||
resp = get(
|
||||
url=reverse('api:instance_detail', kwargs={'pk': hop2.pk}),
|
||||
user=admin_user,
|
||||
expect=200,
|
||||
)
|
||||
|
||||
assert hop1.pk in resp.data['reverse_peers']
|
||||
|
||||
def test_group_vars(self):
|
||||
"""
|
||||
control > hop1 > hop2 < execution
|
||||
"""
|
||||
control = Instance.objects.create(hostname='control', node_type='control', listener_port=None)
|
||||
hop1 = Instance.objects.create(hostname='hop1', node_type='hop', listener_port=6789, peers_from_control_nodes=True)
|
||||
hop2 = Instance.objects.create(hostname='hop2', node_type='hop', listener_port=6789, peers_from_control_nodes=False)
|
||||
execution = Instance.objects.create(hostname='execution', node_type='execution', listener_port=6789)
|
||||
control = Instance.objects.create(hostname='control', node_type='control')
|
||||
hop1 = Instance.objects.create(hostname='hop1', node_type='hop')
|
||||
ReceptorAddress.objects.create(instance=hop1, address='hop1', peers_from_control_nodes=True, port=6789, canonical=True)
|
||||
|
||||
execution.peers.add(hop2)
|
||||
hop1.peers.add(hop2)
|
||||
hop2 = Instance.objects.create(hostname='hop2', node_type='hop')
|
||||
hop2addr = ReceptorAddress.objects.create(instance=hop2, address='hop2', peers_from_control_nodes=False, port=6789, canonical=True)
|
||||
|
||||
execution = Instance.objects.create(hostname='execution', node_type='execution')
|
||||
ReceptorAddress.objects.create(instance=execution, address='execution', peers_from_control_nodes=False, port=6789, canonical=True)
|
||||
|
||||
execution.peers.add(hop2addr)
|
||||
hop1.peers.add(hop2addr)
|
||||
|
||||
control_vars = yaml.safe_load(generate_group_vars_all_yml(control))
|
||||
hop1_vars = yaml.safe_load(generate_group_vars_all_yml(hop1))
|
||||
@@ -265,13 +526,15 @@ class TestPeers:
|
||||
control = Instance.objects.create(hostname='control1', node_type='control')
|
||||
write_method.assert_not_called()
|
||||
|
||||
# new hop node with peers_from_control_nodes False (no)
|
||||
hop1 = Instance.objects.create(hostname='hop1', node_type='hop', listener_port=6789, peers_from_control_nodes=False)
|
||||
# new address with peers_from_control_nodes False (no)
|
||||
hop1 = Instance.objects.create(hostname='hop1', node_type='hop')
|
||||
hop1addr = ReceptorAddress.objects.create(instance=hop1, address='hop1', peers_from_control_nodes=False, canonical=True)
|
||||
hop1.delete()
|
||||
write_method.assert_not_called()
|
||||
|
||||
# new hop node with peers_from_control_nodes True (yes)
|
||||
hop1 = Instance.objects.create(hostname='hop1', node_type='hop', listener_port=6789, peers_from_control_nodes=True)
|
||||
# new address with peers_from_control_nodes True (yes)
|
||||
hop1 = Instance.objects.create(hostname='hop1', node_type='hop')
|
||||
hop1addr = ReceptorAddress.objects.create(instance=hop1, address='hop1', peers_from_control_nodes=True, canonical=True)
|
||||
write_method.assert_called()
|
||||
write_method.reset_mock()
|
||||
|
||||
@@ -280,20 +543,21 @@ class TestPeers:
|
||||
write_method.assert_called()
|
||||
write_method.reset_mock()
|
||||
|
||||
# new hop node with peers_from_control_nodes False and peered to another hop node (no)
|
||||
hop2 = Instance.objects.create(hostname='hop2', node_type='hop', listener_port=6789, peers_from_control_nodes=False)
|
||||
hop2.peers.add(hop1)
|
||||
# new address with peers_from_control_nodes False and peered to another hop node (no)
|
||||
hop2 = Instance.objects.create(hostname='hop2', node_type='hop')
|
||||
ReceptorAddress.objects.create(instance=hop2, address='hop2', peers_from_control_nodes=False, canonical=True)
|
||||
hop2.peers.add(hop1addr)
|
||||
hop2.delete()
|
||||
write_method.assert_not_called()
|
||||
|
||||
# changing peers_from_control_nodes to False (yes)
|
||||
hop1.peers_from_control_nodes = False
|
||||
hop1.save()
|
||||
hop1addr.peers_from_control_nodes = False
|
||||
hop1addr.save()
|
||||
write_method.assert_called()
|
||||
write_method.reset_mock()
|
||||
|
||||
# deleting hop node that has peers_from_control_nodes to False (no)
|
||||
hop1.delete()
|
||||
# deleting address that has peers_from_control_nodes to False (no)
|
||||
hop1.delete() # cascade deletes to hop1addr
|
||||
write_method.assert_not_called()
|
||||
|
||||
# deleting control nodes (no)
|
||||
@@ -315,8 +579,8 @@ class TestPeers:
|
||||
|
||||
# not peered, so config file should not be updated
|
||||
for i in range(3):
|
||||
Instance.objects.create(hostname=f"exNo-{i}", node_type='execution', listener_port=6789, peers_from_control_nodes=False)
|
||||
|
||||
inst = Instance.objects.create(hostname=f"exNo-{i}", node_type='execution')
|
||||
ReceptorAddress.objects.create(instance=inst, address=f"exNo-{i}", port=6789, peers_from_control_nodes=False, canonical=True)
|
||||
_, should_update = generate_config_data()
|
||||
assert not should_update
|
||||
|
||||
@@ -324,11 +588,13 @@ class TestPeers:
|
||||
expected_peers = []
|
||||
for i in range(3):
|
||||
expected_peers.append(f"hop-{i}:6789")
|
||||
Instance.objects.create(hostname=f"hop-{i}", node_type='hop', listener_port=6789, peers_from_control_nodes=True)
|
||||
inst = Instance.objects.create(hostname=f"hop-{i}", node_type='hop')
|
||||
ReceptorAddress.objects.create(instance=inst, address=f"hop-{i}", port=6789, peers_from_control_nodes=True, canonical=True)
|
||||
|
||||
for i in range(3):
|
||||
expected_peers.append(f"exYes-{i}:6789")
|
||||
Instance.objects.create(hostname=f"exYes-{i}", node_type='execution', listener_port=6789, peers_from_control_nodes=True)
|
||||
inst = Instance.objects.create(hostname=f"exYes-{i}", node_type='execution')
|
||||
ReceptorAddress.objects.create(instance=inst, address=f"exYes-{i}", port=6789, peers_from_control_nodes=True, canonical=True)
|
||||
|
||||
new_config, should_update = generate_config_data()
|
||||
assert should_update
|
||||
|
||||
@@ -3,15 +3,19 @@ import pytest
|
||||
from unittest import mock
|
||||
import urllib.parse
|
||||
from unittest.mock import PropertyMock
|
||||
import importlib
|
||||
|
||||
# Django
|
||||
from django.urls import resolve
|
||||
from django.http import Http404
|
||||
from django.apps import apps
|
||||
from django.core.handlers.exception import response_for_exception
|
||||
from django.contrib.auth.models import User
|
||||
from django.core.serializers.json import DjangoJSONEncoder
|
||||
from django.db.backends.sqlite3.base import SQLiteCursorWrapper
|
||||
|
||||
from django.db.models.signals import post_migrate
|
||||
|
||||
# AWX
|
||||
from awx.main.models.projects import Project
|
||||
from awx.main.models.ha import Instance
|
||||
@@ -41,10 +45,19 @@ from awx.main.models.workflow import WorkflowJobTemplate
|
||||
from awx.main.models.ad_hoc_commands import AdHocCommand
|
||||
from awx.main.models.oauth import OAuth2Application as Application
|
||||
from awx.main.models.execution_environments import ExecutionEnvironment
|
||||
from awx.main.utils import is_testing
|
||||
|
||||
__SWAGGER_REQUESTS__ = {}
|
||||
|
||||
|
||||
# HACK: the dab_resource_registry app required ServiceID in migrations which checks do not run
|
||||
dab_rr_initial = importlib.import_module('ansible_base.resource_registry.migrations.0001_initial')
|
||||
|
||||
|
||||
if is_testing():
|
||||
post_migrate.connect(lambda **kwargs: dab_rr_initial.create_service_id(apps, None))
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def swagger_autogen(requests=__SWAGGER_REQUESTS__):
|
||||
return requests
|
||||
|
||||
@@ -0,0 +1,39 @@
|
||||
import pytest
|
||||
|
||||
from ansible_base.resource_registry.models import Resource
|
||||
|
||||
from awx.api.versioning import reverse
|
||||
|
||||
|
||||
def assert_has_resource(list_response, obj=None):
|
||||
data = list_response.data
|
||||
assert 'resource' in data['results'][0]['summary_fields']
|
||||
resource_data = data['results'][0]['summary_fields']['resource']
|
||||
assert resource_data['ansible_id']
|
||||
resource = Resource.objects.filter(ansible_id=resource_data['ansible_id']).first()
|
||||
assert resource
|
||||
assert resource.content_object
|
||||
if obj:
|
||||
objects = [Resource.objects.get(ansible_id=entry['summary_fields']['resource']['ansible_id']).content_object for entry in data['results']]
|
||||
assert obj in objects
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_organization_ansible_id(organization, admin_user, get):
|
||||
url = reverse('api:organization_list')
|
||||
response = get(url=url, user=admin_user, expect=200)
|
||||
assert_has_resource(response, obj=organization)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_team_ansible_id(team, admin_user, get):
|
||||
url = reverse('api:team_list')
|
||||
response = get(url=url, user=admin_user, expect=200)
|
||||
assert_has_resource(response, obj=team)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_user_ansible_id(rando, admin_user, get):
|
||||
url = reverse('api:user_list')
|
||||
response = get(url=url, user=admin_user, expect=200)
|
||||
assert_has_resource(response, obj=rando)
|
||||
@@ -193,6 +193,7 @@ class TestInventorySourceInjectors:
|
||||
('satellite6', 'theforeman.foreman.foreman'),
|
||||
('insights', 'redhatinsights.insights.insights'),
|
||||
('controller', 'awx.awx.tower'),
|
||||
('terraform', 'cloud.terraform.terraform_state'),
|
||||
],
|
||||
)
|
||||
def test_plugin_proper_names(self, source, proper_name):
|
||||
|
||||
@@ -101,6 +101,7 @@ def test_default_cred_types():
|
||||
'satellite6',
|
||||
'scm',
|
||||
'ssh',
|
||||
'terraform',
|
||||
'thycotic_dsv',
|
||||
'thycotic_tss',
|
||||
'vault',
|
||||
|
||||
@@ -107,6 +107,7 @@ def read_content(private_data_dir, raw_env, inventory_update):
|
||||
for filename in os.listdir(os.path.join(private_data_dir, subdir)):
|
||||
filename_list.append(os.path.join(subdir, filename))
|
||||
filename_list = sorted(filename_list, key=lambda fn: inverse_env.get(os.path.join(private_data_dir, fn), [fn])[0])
|
||||
inventory_content = ""
|
||||
for filename in filename_list:
|
||||
if filename in ('args', 'project'):
|
||||
continue # Ansible runner
|
||||
@@ -130,6 +131,7 @@ def read_content(private_data_dir, raw_env, inventory_update):
|
||||
dir_contents[abs_file_path] = f.read()
|
||||
# Declare a reference to inventory plugin file if it exists
|
||||
if abs_file_path.endswith('.yml') and 'plugin: ' in dir_contents[abs_file_path]:
|
||||
inventory_content = dir_contents[abs_file_path]
|
||||
referenced_paths.add(abs_file_path) # used as inventory file
|
||||
elif cache_file_regex.match(abs_file_path):
|
||||
file_aliases[abs_file_path] = 'cache_file'
|
||||
@@ -157,7 +159,11 @@ def read_content(private_data_dir, raw_env, inventory_update):
|
||||
content = {}
|
||||
for abs_file_path, file_content in dir_contents.items():
|
||||
# assert that all files laid down are used
|
||||
if abs_file_path not in referenced_paths and abs_file_path not in ignore_files:
|
||||
if (
|
||||
abs_file_path not in referenced_paths
|
||||
and to_container_path(abs_file_path, private_data_dir) not in inventory_content
|
||||
and abs_file_path not in ignore_files
|
||||
):
|
||||
raise AssertionError(
|
||||
"File {} is not referenced. References and files:\n{}\n{}".format(abs_file_path, json.dumps(env, indent=4), json.dumps(dir_contents, indent=4))
|
||||
)
|
||||
|
||||
30
awx/main/tests/functional/test_linkstate.py
Normal file
30
awx/main/tests/functional/test_linkstate.py
Normal file
@@ -0,0 +1,30 @@
|
||||
import pytest
|
||||
|
||||
from awx.main.models import Instance, ReceptorAddress, InstanceLink
|
||||
from awx.main.tasks.system import inspect_established_receptor_connections
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestLinkState:
|
||||
@pytest.fixture(autouse=True)
|
||||
def configure_settings(self, settings):
|
||||
settings.IS_K8S = True
|
||||
|
||||
def test_inspect_established_receptor_connections(self):
|
||||
'''
|
||||
Change link state from ADDING to ESTABLISHED
|
||||
if the receptor status KnownConnectionCosts field
|
||||
has an entry for the source and target node.
|
||||
'''
|
||||
hop1 = Instance.objects.create(hostname='hop1')
|
||||
hop2 = Instance.objects.create(hostname='hop2')
|
||||
hop2addr = ReceptorAddress.objects.create(instance=hop2, address='hop2', port=5678)
|
||||
InstanceLink.objects.create(source=hop1, target=hop2addr, link_state=InstanceLink.States.ADDING)
|
||||
|
||||
# calling with empty KnownConnectionCosts should not change the link state
|
||||
inspect_established_receptor_connections({"KnownConnectionCosts": {}})
|
||||
assert InstanceLink.objects.get(source=hop1, target=hop2addr).link_state == InstanceLink.States.ADDING
|
||||
|
||||
mesh_state = {"KnownConnectionCosts": {"hop1": {"hop2": 1}}}
|
||||
inspect_established_receptor_connections(mesh_state)
|
||||
assert InstanceLink.objects.get(source=hop1, target=hop2addr).link_state == InstanceLink.States.ESTABLISHED
|
||||
@@ -42,3 +42,29 @@ class TestMigrationSmoke:
|
||||
final_state = migrator.apply_tested_migration(final_migration)
|
||||
Instance = final_state.apps.get_model('main', 'Instance')
|
||||
assert Instance.objects.filter(hostname='foobar').count() == 1
|
||||
|
||||
def test_receptor_address(self, migrator):
|
||||
old_state = migrator.apply_initial_migration(('main', '0188_add_bitbucket_dc_webhook'))
|
||||
Instance = old_state.apps.get_model('main', 'Instance')
|
||||
for i in range(3):
|
||||
Instance.objects.create(hostname=f'foobar{i}', node_type='hop')
|
||||
foo = Instance.objects.create(hostname='foo', node_type='execution', listener_port=1234)
|
||||
bar = Instance.objects.create(hostname='bar', node_type='execution', listener_port=None)
|
||||
bar.peers.add(foo)
|
||||
|
||||
new_state = migrator.apply_tested_migration(
|
||||
('main', '0189_inbound_hop_nodes'),
|
||||
)
|
||||
Instance = new_state.apps.get_model('main', 'Instance')
|
||||
ReceptorAddress = new_state.apps.get_model('main', 'ReceptorAddress')
|
||||
|
||||
# We can now test how our migration worked, new field is there:
|
||||
assert ReceptorAddress.objects.filter(address='foo', port=1234).count() == 1
|
||||
assert not ReceptorAddress.objects.filter(address='bar').exists()
|
||||
|
||||
bar = Instance.objects.get(hostname='bar')
|
||||
fooaddr = ReceptorAddress.objects.get(address='foo')
|
||||
|
||||
bar_peers = bar.peers.all()
|
||||
assert len(bar_peers) == 1
|
||||
assert fooaddr in bar_peers
|
||||
|
||||
@@ -411,14 +411,14 @@ def test_project_delete(delete, organization, admin_user):
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'order_by, expected_names, expected_ids',
|
||||
'order_by, expected_names',
|
||||
[
|
||||
('name', ['alice project', 'bob project', 'shared project'], [1, 2, 3]),
|
||||
('-name', ['shared project', 'bob project', 'alice project'], [3, 2, 1]),
|
||||
('name', ['alice project', 'bob project', 'shared project']),
|
||||
('-name', ['shared project', 'bob project', 'alice project']),
|
||||
],
|
||||
)
|
||||
@pytest.mark.django_db
|
||||
def test_project_list_ordering_by_name(get, order_by, expected_names, expected_ids, organization_factory):
|
||||
def test_project_list_ordering_by_name(get, order_by, expected_names, organization_factory):
|
||||
'ensure sorted order of project list is maintained correctly when the requested order is invalid or not applicable'
|
||||
objects = organization_factory(
|
||||
'org1',
|
||||
@@ -426,13 +426,11 @@ def test_project_list_ordering_by_name(get, order_by, expected_names, expected_i
|
||||
superusers=['admin'],
|
||||
)
|
||||
project_names = []
|
||||
project_ids = []
|
||||
# TODO: ask for an order by here that doesn't apply
|
||||
results = get(reverse('api:project_list'), objects.superusers.admin, QUERY_STRING='order_by=%s' % order_by).data['results']
|
||||
for x in range(len(results)):
|
||||
project_names.append(results[x]['name'])
|
||||
project_ids.append(results[x]['id'])
|
||||
assert project_names == expected_names and project_ids == expected_ids
|
||||
assert project_names == expected_names
|
||||
|
||||
|
||||
@pytest.mark.parametrize('order_by', ('name', '-name'))
|
||||
@@ -450,7 +448,8 @@ def test_project_list_ordering_with_duplicate_names(get, order_by, organization_
|
||||
for x in range(3):
|
||||
results = get(reverse('api:project_list'), objects.superusers.admin, QUERY_STRING='order_by=%s' % order_by).data['results']
|
||||
project_ids[x] = [proj['id'] for proj in results]
|
||||
assert project_ids[0] == project_ids[1] == project_ids[2] == [1, 2, 3, 4, 5]
|
||||
assert project_ids[0] == project_ids[1] == project_ids[2]
|
||||
assert project_ids[0] == sorted(project_ids[0])
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
|
||||
90
awx/main/tests/functional/test_routing.py
Normal file
90
awx/main/tests/functional/test_routing.py
Normal file
@@ -0,0 +1,90 @@
|
||||
import pytest
|
||||
|
||||
from django.contrib.auth.models import AnonymousUser
|
||||
|
||||
from channels.routing import ProtocolTypeRouter
|
||||
from channels.testing.websocket import WebsocketCommunicator
|
||||
|
||||
|
||||
from awx.main.consumers import WebsocketSecretAuthHelper
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def application():
|
||||
# code in routing hits the db on import because .. settings cache
|
||||
from awx.main.routing import application_func
|
||||
|
||||
yield application_func(ProtocolTypeRouter)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def websocket_server_generator(application):
|
||||
def fn(endpoint):
|
||||
return WebsocketCommunicator(application, endpoint)
|
||||
|
||||
return fn
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.django_db
|
||||
class TestWebsocketRelay:
|
||||
@pytest.fixture
|
||||
def websocket_relay_secret_generator(self, settings):
|
||||
def fn(secret, set_broadcast_websocket_secret=False):
|
||||
secret_backup = settings.BROADCAST_WEBSOCKET_SECRET
|
||||
settings.BROADCAST_WEBSOCKET_SECRET = 'foobar'
|
||||
res = ('secret'.encode('utf-8'), WebsocketSecretAuthHelper.construct_secret().encode('utf-8'))
|
||||
if set_broadcast_websocket_secret is False:
|
||||
settings.BROADCAST_WEBSOCKET_SECRET = secret_backup
|
||||
return res
|
||||
|
||||
return fn
|
||||
|
||||
@pytest.fixture
|
||||
def websocket_relay_secret(self, settings, websocket_relay_secret_generator):
|
||||
return websocket_relay_secret_generator('foobar', set_broadcast_websocket_secret=True)
|
||||
|
||||
async def test_authorized(self, websocket_server_generator, websocket_relay_secret):
|
||||
server = websocket_server_generator('/websocket/relay/')
|
||||
|
||||
server.scope['headers'] = (websocket_relay_secret,)
|
||||
connected, _ = await server.connect()
|
||||
assert connected is True
|
||||
|
||||
async def test_not_authorized(self, websocket_server_generator):
|
||||
server = websocket_server_generator('/websocket/relay/')
|
||||
connected, _ = await server.connect()
|
||||
assert connected is False, "Connection to the relay websocket without auth. We expected the client to be denied."
|
||||
|
||||
async def test_wrong_secret(self, websocket_server_generator, websocket_relay_secret_generator):
|
||||
server = websocket_server_generator('/websocket/relay/')
|
||||
|
||||
server.scope['headers'] = (websocket_relay_secret_generator('foobar', set_broadcast_websocket_secret=False),)
|
||||
connected, _ = await server.connect()
|
||||
assert connected is False
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.django_db
|
||||
class TestWebsocketEventConsumer:
|
||||
async def test_unauthorized_anonymous(self, websocket_server_generator):
|
||||
server = websocket_server_generator('/websocket/')
|
||||
|
||||
server.scope['user'] = AnonymousUser()
|
||||
connected, _ = await server.connect()
|
||||
assert connected is False, "Anonymous user should NOT be allowed to login."
|
||||
|
||||
@pytest.mark.skip(reason="Ran out of coding time.")
|
||||
async def test_authorized(self, websocket_server_generator, application, admin):
|
||||
server = websocket_server_generator('/websocket/')
|
||||
|
||||
"""
|
||||
I ran out of time. Here is what I was thinking ...
|
||||
Inject a valid session into the cookies in the header
|
||||
|
||||
server.scope['headers'] = (
|
||||
(b'cookie', ...),
|
||||
)
|
||||
"""
|
||||
connected, _ = await server.connect()
|
||||
assert connected is True, "User should be allowed in via cookies auth via a session key in the cookies"
|
||||
@@ -1,11 +1,6 @@
|
||||
# Python
|
||||
from unittest import mock
|
||||
import uuid
|
||||
|
||||
# patch python-ldap
|
||||
with mock.patch('__main__.__builtins__.dir', return_value=[]):
|
||||
import ldap # NOQA
|
||||
|
||||
# Load development settings for base variables.
|
||||
from awx.settings.development import * # NOQA
|
||||
|
||||
|
||||
122
awx/main/tests/unit/commands/test_dump_auth_config.py
Normal file
122
awx/main/tests/unit/commands/test_dump_auth_config.py
Normal file
@@ -0,0 +1,122 @@
|
||||
from io import StringIO
|
||||
import json
|
||||
from django.core.management import call_command
|
||||
from django.test import TestCase, override_settings
|
||||
|
||||
|
||||
settings_dict = {
|
||||
"SOCIAL_AUTH_SAML_SP_ENTITY_ID": "SP_ENTITY_ID",
|
||||
"SOCIAL_AUTH_SAML_SP_PUBLIC_CERT": "SP_PUBLIC_CERT",
|
||||
"SOCIAL_AUTH_SAML_SP_PRIVATE_KEY": "SP_PRIVATE_KEY",
|
||||
"SOCIAL_AUTH_SAML_ORG_INFO": "ORG_INFO",
|
||||
"SOCIAL_AUTH_SAML_TECHNICAL_CONTACT": "TECHNICAL_CONTACT",
|
||||
"SOCIAL_AUTH_SAML_SUPPORT_CONTACT": "SUPPORT_CONTACT",
|
||||
"SOCIAL_AUTH_SAML_SP_EXTRA": "SP_EXTRA",
|
||||
"SOCIAL_AUTH_SAML_SECURITY_CONFIG": "SECURITY_CONFIG",
|
||||
"SOCIAL_AUTH_SAML_EXTRA_DATA": "EXTRA_DATA",
|
||||
"SOCIAL_AUTH_SAML_ENABLED_IDPS": {
|
||||
"Keycloak": {
|
||||
"attr_last_name": "last_name",
|
||||
"attr_groups": "groups",
|
||||
"attr_email": "email",
|
||||
"attr_user_permanent_id": "name_id",
|
||||
"attr_username": "username",
|
||||
"entity_id": "https://example.com/auth/realms/awx",
|
||||
"url": "https://example.com/auth/realms/awx/protocol/saml",
|
||||
"x509cert": "-----BEGIN CERTIFICATE-----\nMIIDDjCCAfYCCQCPBeVvpo8+VzANBgkqhkiG9w0BAQsFADBJMQswCQYDVQQGEwJV\nUzELMAkGA1UECAwCTkMxDzANBgNVBAcMBkR1cmhhbTEMMAoGA1UECgwDYXd4MQ4w\nDAYDVQQDDAVsb2NhbDAeFw0yNDAxMTgxNDA4MzFaFw0yNTAxMTcxNDA4MzFaMEkx\nCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJOQzEPMA0GA1UEBwwGRHVyaGFtMQwwCgYD\nVQQKDANhd3gxDjAMBgNVBAMMBWxvY2FsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A\nMIIBCgKCAQEAzouj93oyFXsHEABdPESh3CYpp5QJJBM4TLYIIolk6PFOFIVwBuFY\nfExi5w7Hh4A42lPM6RkrT+u3h7LV39H9MRUfqygOSmaxICTOI0sU9ROHc44fWWzN\n756OP4B5zSiqG82q8X7nYVkcID+2F/3ekPLMOlWn53OrcdfKKDIcqavoTkQJefc2\nggXU3WgVCxGki/qCm+e5cZ1Cpl/ykSLOT8dWMEzDd12kin66zJ3KYz9F2Q5kQTh4\nKRAChnBBoEqzOfENHEAaHALiXOlVSy61VcLbtvskRMMwBtsydlnd9n/HGnktgrid\n3Ca0z5wBTHWjAOBvCKxKJuDa+jmyHEnpcQIDAQABMA0GCSqGSIb3DQEBCwUAA4IB\nAQBXvmyPWgXhC26cHYJBgQqj57dZ+n7p00kM1J+27oDMjGmbmX+XIKXLWazw/rG3\ngDjw9MXI2tVCrQMX0ohjphaULXhb/VBUPDOiW+k7C6AB3nZySFRflcR3cM4f83zF\nMoBd0549h5Red4p72FeOKNJRTN8YO4ooH9YNh5g0FQkgqn7fV9w2CNlomeKIW9zP\nm8tjFw0cJUk2wEYBVl8O7ko5rgNlzhkLoZkMvJhKa99AQJA6MAdyoLl1lv56Kq4X\njk+mMEiz9SaInp+ILQ1uQxZEwuC7DoGRW76rV4Fnie6+DLft4WKZfX1497mx8NV3\noR0abutJaKnCj07dwRu4/EsK\n-----END CERTIFICATE-----",
|
||||
"attr_first_name": "first_name",
|
||||
}
|
||||
},
|
||||
"SOCIAL_AUTH_SAML_CALLBACK_URL": "CALLBACK_URL",
|
||||
"AUTH_LDAP_1_SERVER_URI": "SERVER_URI",
|
||||
"AUTH_LDAP_1_BIND_DN": "BIND_DN",
|
||||
"AUTH_LDAP_1_BIND_PASSWORD": "BIND_PASSWORD",
|
||||
"AUTH_LDAP_1_GROUP_SEARCH": ["GROUP_SEARCH"],
|
||||
"AUTH_LDAP_1_GROUP_TYPE": "string object",
|
||||
"AUTH_LDAP_1_GROUP_TYPE_PARAMS": {"member_attr": "member", "name_attr": "cn"},
|
||||
"AUTH_LDAP_1_USER_DN_TEMPLATE": "USER_DN_TEMPLATE",
|
||||
"AUTH_LDAP_1_USER_SEARCH": ["USER_SEARCH"],
|
||||
"AUTH_LDAP_1_USER_ATTR_MAP": {
|
||||
"email": "email",
|
||||
"last_name": "last_name",
|
||||
"first_name": "first_name",
|
||||
},
|
||||
"AUTH_LDAP_1_CONNECTION_OPTIONS": {},
|
||||
"AUTH_LDAP_1_START_TLS": None,
|
||||
}
|
||||
|
||||
|
||||
@override_settings(**settings_dict)
|
||||
class TestDumpAuthConfigCommand(TestCase):
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
self.expected_config = [
|
||||
{
|
||||
"type": "awx.authentication.authenticator_plugins.saml",
|
||||
"name": "Keycloak",
|
||||
"enabled": True,
|
||||
"create_objects": True,
|
||||
"users_unique": False,
|
||||
"remove_users": True,
|
||||
"configuration": {
|
||||
"SP_ENTITY_ID": "SP_ENTITY_ID",
|
||||
"SP_PUBLIC_CERT": "SP_PUBLIC_CERT",
|
||||
"SP_PRIVATE_KEY": "SP_PRIVATE_KEY",
|
||||
"ORG_INFO": "ORG_INFO",
|
||||
"TECHNICAL_CONTACT": "TECHNICAL_CONTACT",
|
||||
"SUPPORT_CONTACT": "SUPPORT_CONTACT",
|
||||
"SP_EXTRA": "SP_EXTRA",
|
||||
"SECURITY_CONFIG": "SECURITY_CONFIG",
|
||||
"EXTRA_DATA": "EXTRA_DATA",
|
||||
"ENABLED_IDPS": {
|
||||
"Keycloak": {
|
||||
"attr_last_name": "last_name",
|
||||
"attr_groups": "groups",
|
||||
"attr_email": "email",
|
||||
"attr_user_permanent_id": "name_id",
|
||||
"attr_username": "username",
|
||||
"entity_id": "https://example.com/auth/realms/awx",
|
||||
"url": "https://example.com/auth/realms/awx/protocol/saml",
|
||||
"x509cert": "-----BEGIN CERTIFICATE-----\nMIIDDjCCAfYCCQCPBeVvpo8+VzANBgkqhkiG9w0BAQsFADBJMQswCQYDVQQGEwJV\nUzELMAkGA1UECAwCTkMxDzANBgNVBAcMBkR1cmhhbTEMMAoGA1UECgwDYXd4MQ4w\nDAYDVQQDDAVsb2NhbDAeFw0yNDAxMTgxNDA4MzFaFw0yNTAxMTcxNDA4MzFaMEkx\nCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJOQzEPMA0GA1UEBwwGRHVyaGFtMQwwCgYD\nVQQKDANhd3gxDjAMBgNVBAMMBWxvY2FsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A\nMIIBCgKCAQEAzouj93oyFXsHEABdPESh3CYpp5QJJBM4TLYIIolk6PFOFIVwBuFY\nfExi5w7Hh4A42lPM6RkrT+u3h7LV39H9MRUfqygOSmaxICTOI0sU9ROHc44fWWzN\n756OP4B5zSiqG82q8X7nYVkcID+2F/3ekPLMOlWn53OrcdfKKDIcqavoTkQJefc2\nggXU3WgVCxGki/qCm+e5cZ1Cpl/ykSLOT8dWMEzDd12kin66zJ3KYz9F2Q5kQTh4\nKRAChnBBoEqzOfENHEAaHALiXOlVSy61VcLbtvskRMMwBtsydlnd9n/HGnktgrid\n3Ca0z5wBTHWjAOBvCKxKJuDa+jmyHEnpcQIDAQABMA0GCSqGSIb3DQEBCwUAA4IB\nAQBXvmyPWgXhC26cHYJBgQqj57dZ+n7p00kM1J+27oDMjGmbmX+XIKXLWazw/rG3\ngDjw9MXI2tVCrQMX0ohjphaULXhb/VBUPDOiW+k7C6AB3nZySFRflcR3cM4f83zF\nMoBd0549h5Red4p72FeOKNJRTN8YO4ooH9YNh5g0FQkgqn7fV9w2CNlomeKIW9zP\nm8tjFw0cJUk2wEYBVl8O7ko5rgNlzhkLoZkMvJhKa99AQJA6MAdyoLl1lv56Kq4X\njk+mMEiz9SaInp+ILQ1uQxZEwuC7DoGRW76rV4Fnie6+DLft4WKZfX1497mx8NV3\noR0abutJaKnCj07dwRu4/EsK\n-----END CERTIFICATE-----",
|
||||
"attr_first_name": "first_name",
|
||||
}
|
||||
},
|
||||
"CALLBACK_URL": "CALLBACK_URL",
|
||||
"IDP_URL": "https://example.com/auth/realms/awx/protocol/saml",
|
||||
"IDP_X509_CERT": "-----BEGIN CERTIFICATE-----\nMIIDDjCCAfYCCQCPBeVvpo8+VzANBgkqhkiG9w0BAQsFADBJMQswCQYDVQQGEwJV\nUzELMAkGA1UECAwCTkMxDzANBgNVBAcMBkR1cmhhbTEMMAoGA1UECgwDYXd4MQ4w\nDAYDVQQDDAVsb2NhbDAeFw0yNDAxMTgxNDA4MzFaFw0yNTAxMTcxNDA4MzFaMEkx\nCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJOQzEPMA0GA1UEBwwGRHVyaGFtMQwwCgYD\nVQQKDANhd3gxDjAMBgNVBAMMBWxvY2FsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A\nMIIBCgKCAQEAzouj93oyFXsHEABdPESh3CYpp5QJJBM4TLYIIolk6PFOFIVwBuFY\nfExi5w7Hh4A42lPM6RkrT+u3h7LV39H9MRUfqygOSmaxICTOI0sU9ROHc44fWWzN\n756OP4B5zSiqG82q8X7nYVkcID+2F/3ekPLMOlWn53OrcdfKKDIcqavoTkQJefc2\nggXU3WgVCxGki/qCm+e5cZ1Cpl/ykSLOT8dWMEzDd12kin66zJ3KYz9F2Q5kQTh4\nKRAChnBBoEqzOfENHEAaHALiXOlVSy61VcLbtvskRMMwBtsydlnd9n/HGnktgrid\n3Ca0z5wBTHWjAOBvCKxKJuDa+jmyHEnpcQIDAQABMA0GCSqGSIb3DQEBCwUAA4IB\nAQBXvmyPWgXhC26cHYJBgQqj57dZ+n7p00kM1J+27oDMjGmbmX+XIKXLWazw/rG3\ngDjw9MXI2tVCrQMX0ohjphaULXhb/VBUPDOiW+k7C6AB3nZySFRflcR3cM4f83zF\nMoBd0549h5Red4p72FeOKNJRTN8YO4ooH9YNh5g0FQkgqn7fV9w2CNlomeKIW9zP\nm8tjFw0cJUk2wEYBVl8O7ko5rgNlzhkLoZkMvJhKa99AQJA6MAdyoLl1lv56Kq4X\njk+mMEiz9SaInp+ILQ1uQxZEwuC7DoGRW76rV4Fnie6+DLft4WKZfX1497mx8NV3\noR0abutJaKnCj07dwRu4/EsK\n-----END CERTIFICATE-----",
|
||||
"IDP_ENTITY_ID": "https://example.com/auth/realms/awx",
|
||||
"IDP_ATTR_EMAIL": "email",
|
||||
"IDP_GROUPS": "groups",
|
||||
"IDP_ATTR_USERNAME": "username",
|
||||
"IDP_ATTR_LAST_NAME": "last_name",
|
||||
"IDP_ATTR_FIRST_NAME": "first_name",
|
||||
"IDP_ATTR_USER_PERMANENT_ID": "name_id",
|
||||
},
|
||||
},
|
||||
{
|
||||
"type": "awx.authentication.authenticator_plugins.ldap",
|
||||
"name": "1",
|
||||
"enabled": True,
|
||||
"create_objects": True,
|
||||
"users_unique": False,
|
||||
"remove_users": True,
|
||||
"configuration": {
|
||||
"SERVER_URI": "SERVER_URI",
|
||||
"BIND_DN": "BIND_DN",
|
||||
"BIND_PASSWORD": "BIND_PASSWORD",
|
||||
"CONNECTION_OPTIONS": {},
|
||||
"GROUP_TYPE": "str",
|
||||
"GROUP_TYPE_PARAMS": {"member_attr": "member", "name_attr": "cn"},
|
||||
"GROUP_SEARCH": ["GROUP_SEARCH"],
|
||||
"START_TLS": None,
|
||||
"USER_DN_TEMPLATE": "USER_DN_TEMPLATE",
|
||||
"USER_ATTR_MAP": {"email": "email", "last_name": "last_name", "first_name": "first_name"},
|
||||
"USER_SEARCH": ["USER_SEARCH"],
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
def test_json_returned_from_cmd(self):
|
||||
output = StringIO()
|
||||
call_command("dump_auth_config", stdout=output)
|
||||
assert json.loads(output.getvalue()) == self.expected_config
|
||||
32
awx/main/tests/unit/models/test_receptor_address.py
Normal file
32
awx/main/tests/unit/models/test_receptor_address.py
Normal file
@@ -0,0 +1,32 @@
|
||||
from awx.main.models import ReceptorAddress
|
||||
import pytest
|
||||
|
||||
ReceptorAddress()
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'address, protocol, port, websocket_path, expected',
|
||||
[
|
||||
('foo', 'tcp', 27199, '', 'foo:27199'),
|
||||
('bar', 'ws', 6789, '', 'wss://bar:6789'),
|
||||
('mal', 'ws', 6789, 'path', 'wss://mal:6789/path'),
|
||||
('example.com', 'ws', 443, 'path', 'wss://example.com:443/path'),
|
||||
],
|
||||
)
|
||||
def test_get_full_address(address, protocol, port, websocket_path, expected):
|
||||
receptor_address = ReceptorAddress(address=address, protocol=protocol, port=port, websocket_path=websocket_path)
|
||||
assert receptor_address.get_full_address() == expected
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'protocol, expected',
|
||||
[
|
||||
('tcp', 'tcp-peer'),
|
||||
('ws', 'ws-peer'),
|
||||
('wss', 'ws-peer'),
|
||||
('foo', None),
|
||||
],
|
||||
)
|
||||
def test_get_peer_type(protocol, expected):
|
||||
receptor_address = ReceptorAddress(protocol=protocol)
|
||||
assert receptor_address.get_peer_type() == expected
|
||||
64
awx/main/tests/unit/tasks/test_system.py
Normal file
64
awx/main/tests/unit/tasks/test_system.py
Normal file
@@ -0,0 +1,64 @@
|
||||
import pytest
|
||||
from unittest.mock import MagicMock, patch
|
||||
from awx.main.tasks.system import update_inventory_computed_fields
|
||||
from awx.main.models import Inventory
|
||||
from django.db import DatabaseError
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_logger():
|
||||
with patch("awx.main.tasks.system.logger") as logger:
|
||||
yield logger
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_inventory():
|
||||
return MagicMock(spec=Inventory)
|
||||
|
||||
|
||||
def test_update_inventory_computed_fields_existing_inventory(mock_logger, mock_inventory):
|
||||
# Mocking the Inventory.objects.filter method to return a non-empty queryset
|
||||
with patch("awx.main.tasks.system.Inventory.objects.filter") as mock_filter:
|
||||
mock_filter.return_value.exists.return_value = True
|
||||
mock_filter.return_value.__getitem__.return_value = mock_inventory
|
||||
|
||||
# Mocking the update_computed_fields method
|
||||
with patch.object(mock_inventory, "update_computed_fields") as mock_update_computed_fields:
|
||||
update_inventory_computed_fields(1)
|
||||
|
||||
# Assertions
|
||||
mock_filter.assert_called_once_with(id=1)
|
||||
mock_update_computed_fields.assert_called_once()
|
||||
|
||||
# You can add more assertions based on your specific requirements
|
||||
|
||||
|
||||
def test_update_inventory_computed_fields_missing_inventory(mock_logger):
|
||||
# Mocking the Inventory.objects.filter method to return an empty queryset
|
||||
with patch("awx.main.tasks.system.Inventory.objects.filter") as mock_filter:
|
||||
mock_filter.return_value.exists.return_value = False
|
||||
|
||||
update_inventory_computed_fields(1)
|
||||
|
||||
# Assertions
|
||||
mock_filter.assert_called_once_with(id=1)
|
||||
mock_logger.error.assert_called_once_with("Update Inventory Computed Fields failed due to missing inventory: 1")
|
||||
|
||||
|
||||
def test_update_inventory_computed_fields_database_error_nosqlstate(mock_logger, mock_inventory):
|
||||
# Mocking the Inventory.objects.filter method to return a non-empty queryset
|
||||
with patch("awx.main.tasks.system.Inventory.objects.filter") as mock_filter:
|
||||
mock_filter.return_value.exists.return_value = True
|
||||
mock_filter.return_value.__getitem__.return_value = mock_inventory
|
||||
|
||||
# Mocking the update_computed_fields method
|
||||
with patch.object(mock_inventory, "update_computed_fields") as mock_update_computed_fields:
|
||||
# Simulating the update_computed_fields method to explicitly raise a DatabaseError
|
||||
mock_update_computed_fields.side_effect = DatabaseError("Some error")
|
||||
|
||||
update_inventory_computed_fields(1)
|
||||
|
||||
# Assertions
|
||||
mock_filter.assert_called_once_with(id=1)
|
||||
mock_update_computed_fields.assert_called_once()
|
||||
mock_inventory.update_computed_fields.assert_called_once()
|
||||
@@ -1085,6 +1085,27 @@ class TestJobCredentials(TestJobExecution):
|
||||
assert open(env['ANSIBLE_NET_SSH_KEYFILE'], 'r').read() == self.EXAMPLE_PRIVATE_KEY
|
||||
assert safe_env['ANSIBLE_NET_PASSWORD'] == HIDDEN_PASSWORD
|
||||
|
||||
def test_terraform_cloud_credentials(self, job, private_data_dir, mock_me):
|
||||
terraform = CredentialType.defaults['terraform']()
|
||||
hcl_config = '''
|
||||
backend "s3" {
|
||||
bucket = "s3_sample_bucket"
|
||||
key = "/tf_state/"
|
||||
region = "us-east-1"
|
||||
}
|
||||
'''
|
||||
credential = Credential(pk=1, credential_type=terraform, inputs={'configuration': hcl_config})
|
||||
credential.inputs['configuration'] = encrypt_field(credential, 'configuration')
|
||||
job.credentials.add(credential)
|
||||
|
||||
env = {}
|
||||
safe_env = {}
|
||||
credential.credential_type.inject_credential(credential, env, safe_env, [], private_data_dir)
|
||||
|
||||
local_path = to_host_path(env['TF_BACKEND_CONFIG_FILE'], private_data_dir)
|
||||
config = open(local_path, 'r').read()
|
||||
assert config == hcl_config
|
||||
|
||||
def test_custom_environment_injectors_with_jinja_syntax_error(self, private_data_dir, mock_me):
|
||||
some_cloud = CredentialType(
|
||||
kind='cloud',
|
||||
|
||||
@@ -121,6 +121,10 @@ def test_get_model_for_valid_type(model_type, model_class):
|
||||
assert common.get_model_for_type(model_type) == model_class
|
||||
|
||||
|
||||
def test_is_testing():
|
||||
assert common.is_testing() is True
|
||||
|
||||
|
||||
@pytest.mark.parametrize("model_type,model_class", [(name, cls) for cls, name in TEST_MODELS])
|
||||
def test_get_capacity_type(model_type, model_class):
|
||||
if model_type in ('job', 'ad_hoc_command', 'inventory_update', 'job_template'):
|
||||
|
||||
@@ -3,7 +3,7 @@ from awx.main.tasks.receptor import _convert_args_to_cli
|
||||
|
||||
def test_file_cleanup_scenario():
|
||||
args = _convert_args_to_cli({'exclude_strings': ['awx_423_', 'awx_582_'], 'file_pattern': '/tmp/awx_*_*'})
|
||||
assert ' '.join(args) == 'cleanup --exclude-strings=awx_423_ awx_582_ --file-pattern=/tmp/awx_*_*'
|
||||
assert ' '.join(args) == 'cleanup --exclude-strings="awx_423_ awx_582_" --file-pattern=/tmp/awx_*_*'
|
||||
|
||||
|
||||
def test_image_cleanup_scenario():
|
||||
@@ -17,5 +17,5 @@ def test_image_cleanup_scenario():
|
||||
}
|
||||
)
|
||||
assert (
|
||||
' '.join(args) == 'cleanup --remove-images=quay.invalid/foo/bar:latest quay.invalid/foo/bar:devel --image-prune --process-isolation-executable=podman'
|
||||
' '.join(args) == 'cleanup --remove-images="quay.invalid/foo/bar:latest quay.invalid/foo/bar:devel" --image-prune --process-isolation-executable=podman'
|
||||
)
|
||||
|
||||
@@ -7,6 +7,7 @@ import json
|
||||
import yaml
|
||||
import logging
|
||||
import time
|
||||
import psycopg
|
||||
import os
|
||||
import subprocess
|
||||
import re
|
||||
@@ -23,7 +24,7 @@ from django.core.exceptions import ObjectDoesNotExist, FieldDoesNotExist
|
||||
from django.utils.dateparse import parse_datetime
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from django.utils.functional import cached_property
|
||||
from django.db import connection, transaction, ProgrammingError, IntegrityError
|
||||
from django.db import connection, DatabaseError, transaction, ProgrammingError, IntegrityError
|
||||
from django.db.models.fields.related import ForeignObjectRel, ManyToManyField
|
||||
from django.db.models.fields.related_descriptors import ForwardManyToOneDescriptor, ManyToManyDescriptor
|
||||
from django.db.models.query import QuerySet
|
||||
@@ -136,7 +137,7 @@ def underscore_to_camelcase(s):
|
||||
@functools.cache
|
||||
def is_testing(argv=None):
|
||||
'''Return True if running django or py.test unit tests.'''
|
||||
if 'PYTEST_CURRENT_TEST' in os.environ.keys():
|
||||
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'awx.main.tests.settings_for_test':
|
||||
return True
|
||||
argv = sys.argv if argv is None else argv
|
||||
if len(argv) >= 1 and ('py.test' in argv[0] or 'py/test.py' in argv[0]):
|
||||
@@ -1155,11 +1156,25 @@ def create_partition(tblname, start=None):
|
||||
f'ALTER TABLE {tblname} ATTACH PARTITION {tblname}_{partition_label} '
|
||||
f'FOR VALUES FROM (\'{start_timestamp}\') TO (\'{end_timestamp}\');'
|
||||
)
|
||||
|
||||
except (ProgrammingError, IntegrityError) as e:
|
||||
if 'already exists' in str(e):
|
||||
logger.info(f'Caught known error due to partition creation race: {e}')
|
||||
else:
|
||||
raise
|
||||
cause = e.__cause__
|
||||
if cause and hasattr(cause, 'sqlstate'):
|
||||
sqlstate = cause.sqlstate
|
||||
sqlstate_cls = psycopg.errors.lookup(sqlstate)
|
||||
|
||||
if psycopg.errors.DuplicateTable == sqlstate_cls or psycopg.errors.UniqueViolation == sqlstate_cls:
|
||||
logger.info(f'Caught known error due to partition creation race: {e}')
|
||||
else:
|
||||
logger.error('SQL Error state: {} - {}'.format(sqlstate, sqlstate_cls))
|
||||
raise
|
||||
except DatabaseError as e:
|
||||
cause = e.__cause__
|
||||
if cause and hasattr(cause, 'sqlstate'):
|
||||
sqlstate = cause.sqlstate
|
||||
sqlstate_str = psycopg.errors.lookup(sqlstate)
|
||||
logger.error('SQL Error state: {} - {}'.format(sqlstate, sqlstate_str))
|
||||
raise
|
||||
|
||||
|
||||
def cleanup_new_process(func):
|
||||
|
||||
@@ -20,7 +20,6 @@ from awx.main.analytics.broadcast_websocket import (
|
||||
RelayWebsocketStats,
|
||||
RelayWebsocketStatsManager,
|
||||
)
|
||||
import awx.main.analytics.subsystem_metrics as s_metrics
|
||||
|
||||
logger = logging.getLogger('awx.main.wsrelay')
|
||||
|
||||
@@ -54,7 +53,6 @@ class WebsocketRelayConnection:
|
||||
self.protocol = protocol
|
||||
self.verify_ssl = verify_ssl
|
||||
self.channel_layer = None
|
||||
self.subsystem_metrics = s_metrics.Metrics(instance_name=name)
|
||||
self.producers = dict()
|
||||
self.connected = False
|
||||
|
||||
@@ -304,20 +302,36 @@ class WebSocketRelayManager(object):
|
||||
self.stats_mgr.start()
|
||||
|
||||
# Set up a pg_notify consumer for allowing web nodes to "provision" and "deprovision" themselves gracefully.
|
||||
database_conf = settings.DATABASES['default']
|
||||
async_conn = await psycopg.AsyncConnection.connect(
|
||||
dbname=database_conf['NAME'],
|
||||
host=database_conf['HOST'],
|
||||
user=database_conf['USER'],
|
||||
password=database_conf['PASSWORD'],
|
||||
port=database_conf['PORT'],
|
||||
**database_conf.get("OPTIONS", {}),
|
||||
)
|
||||
await async_conn.set_autocommit(True)
|
||||
event_loop.create_task(self.on_ws_heartbeat(async_conn))
|
||||
database_conf = settings.DATABASES['default'].copy()
|
||||
database_conf['OPTIONS'] = database_conf.get('OPTIONS', {}).copy()
|
||||
|
||||
for k, v in settings.LISTENER_DATABASES.get('default', {}).items():
|
||||
database_conf[k] = v
|
||||
for k, v in settings.LISTENER_DATABASES.get('default', {}).get('OPTIONS', {}).items():
|
||||
database_conf['OPTIONS'][k] = v
|
||||
|
||||
task = None
|
||||
|
||||
# Establishes a websocket connection to /websocket/relay on all API servers
|
||||
while True:
|
||||
if not task or task.done():
|
||||
try:
|
||||
async_conn = await psycopg.AsyncConnection.connect(
|
||||
dbname=database_conf['NAME'],
|
||||
host=database_conf['HOST'],
|
||||
user=database_conf['USER'],
|
||||
password=database_conf['PASSWORD'],
|
||||
port=database_conf['PORT'],
|
||||
**database_conf.get("OPTIONS", {}),
|
||||
)
|
||||
await async_conn.set_autocommit(True)
|
||||
|
||||
task = event_loop.create_task(self.on_ws_heartbeat(async_conn), name="on_ws_heartbeat")
|
||||
logger.info("Creating `on_ws_heartbeat` task in event loop.")
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to connect to database for pg_notify: {e}")
|
||||
|
||||
future_remote_hosts = self.known_hosts.keys()
|
||||
current_remote_hosts = self.relay_connections.keys()
|
||||
deleted_remote_hosts = set(current_remote_hosts) - set(future_remote_hosts)
|
||||
@@ -341,7 +355,7 @@ class WebSocketRelayManager(object):
|
||||
|
||||
if deleted_remote_hosts:
|
||||
logger.info(f"Removing {deleted_remote_hosts} from websocket broadcast list")
|
||||
await asyncio.gather(self.cleanup_offline_host(h) for h in deleted_remote_hosts)
|
||||
await asyncio.gather(*[self.cleanup_offline_host(h) for h in deleted_remote_hosts])
|
||||
|
||||
if new_remote_hosts:
|
||||
logger.info(f"Adding {new_remote_hosts} to websocket broadcast list")
|
||||
|
||||
@@ -216,42 +216,59 @@
|
||||
- block:
|
||||
- name: Fetch galaxy roles from roles/requirements.(yml/yaml)
|
||||
ansible.builtin.command:
|
||||
cmd: "ansible-galaxy role install -r {{ item }} {{ verbosity }}"
|
||||
cmd: "ansible-galaxy role install -r {{ req_file }} {{ verbosity }}"
|
||||
register: galaxy_result
|
||||
with_fileglob:
|
||||
- "{{ project_path | quote }}/roles/requirements.yaml"
|
||||
- "{{ project_path | quote }}/roles/requirements.yml"
|
||||
vars:
|
||||
req_file: "{{ lookup('ansible.builtin.first_found', req_candidates, skip=True) }}"
|
||||
req_candidates:
|
||||
files:
|
||||
- "{{ project_path | quote }}/roles/requirements.yml"
|
||||
- "{{ project_path | quote }}/roles/requirements.yaml"
|
||||
skip: True
|
||||
changed_when: "'was installed successfully' in galaxy_result.stdout"
|
||||
when: roles_enabled | bool
|
||||
when:
|
||||
- roles_enabled | bool
|
||||
- req_file
|
||||
tags:
|
||||
- install_roles
|
||||
|
||||
- name: Fetch galaxy collections from collections/requirements.(yml/yaml)
|
||||
ansible.builtin.command:
|
||||
cmd: "ansible-galaxy collection install -r {{ item }} {{ verbosity }}"
|
||||
cmd: "ansible-galaxy collection install -r {{ req_file }} {{ verbosity }}"
|
||||
register: galaxy_collection_result
|
||||
with_fileglob:
|
||||
- "{{ project_path | quote }}/collections/requirements.yaml"
|
||||
- "{{ project_path | quote }}/collections/requirements.yml"
|
||||
vars:
|
||||
req_file: "{{ lookup('ansible.builtin.first_found', req_candidates, skip=True) }}"
|
||||
req_candidates:
|
||||
files:
|
||||
- "{{ project_path | quote }}/collections/requirements.yml"
|
||||
- "{{ project_path | quote }}/collections/requirements.yaml"
|
||||
skip: True
|
||||
changed_when: "'Nothing to do.' not in galaxy_collection_result.stdout"
|
||||
when:
|
||||
- "ansible_version.full is version_compare('2.9', '>=')"
|
||||
- collections_enabled | bool
|
||||
- req_file
|
||||
tags:
|
||||
- install_collections
|
||||
|
||||
# requirements.yml in project root can be either "old" (roles only) or "new" (collections+roles) format
|
||||
- name: Fetch galaxy roles and collections from requirements.(yml/yaml)
|
||||
ansible.builtin.command:
|
||||
cmd: "ansible-galaxy install -r {{ item }} {{ verbosity }}"
|
||||
cmd: "ansible-galaxy install -r {{ req_file }} {{ verbosity }}"
|
||||
register: galaxy_combined_result
|
||||
with_fileglob:
|
||||
- "{{ project_path | quote }}/requirements.yaml"
|
||||
- "{{ project_path | quote }}/requirements.yml"
|
||||
vars:
|
||||
req_file: "{{ lookup('ansible.builtin.first_found', req_candidates, skip=True) }}"
|
||||
req_candidates:
|
||||
files:
|
||||
- "{{ project_path | quote }}/requirements.yaml"
|
||||
- "{{ project_path | quote }}/requirements.yml"
|
||||
skip: True
|
||||
changed_when: "'Nothing to do.' not in galaxy_combined_result.stdout"
|
||||
when:
|
||||
- "ansible_version.full is version_compare('2.10', '>=')"
|
||||
- collections_enabled | bool
|
||||
- roles_enabled | bool
|
||||
- req_file
|
||||
tags:
|
||||
- install_collections
|
||||
- install_roles
|
||||
|
||||
22
awx/resource_api.py
Normal file
22
awx/resource_api.py
Normal file
@@ -0,0 +1,22 @@
|
||||
from ansible_base.resource_registry.registry import ParentResource, ResourceConfig, ServiceAPIConfig, SharedResource
|
||||
from ansible_base.resource_registry.shared_types import OrganizationType, TeamType, UserType
|
||||
|
||||
from awx.main import models
|
||||
|
||||
|
||||
class APIConfig(ServiceAPIConfig):
|
||||
service_type = "awx"
|
||||
|
||||
|
||||
RESOURCE_LIST = (
|
||||
ResourceConfig(
|
||||
models.Organization,
|
||||
shared_resource=SharedResource(serializer=OrganizationType, is_provider=False),
|
||||
),
|
||||
ResourceConfig(models.User, shared_resource=SharedResource(serializer=UserType, is_provider=False), name_field="username"),
|
||||
ResourceConfig(
|
||||
models.Team,
|
||||
shared_resource=SharedResource(serializer=TeamType, is_provider=False),
|
||||
parent_resources=[ParentResource(model=models.Organization, field_name="organization")],
|
||||
),
|
||||
)
|
||||
@@ -353,8 +353,11 @@ INSTALLED_APPS = [
|
||||
'awx.sso',
|
||||
'solo',
|
||||
'ansible_base.rest_filters',
|
||||
'ansible_base.jwt_consumer',
|
||||
'ansible_base.resource_registry',
|
||||
]
|
||||
|
||||
|
||||
INTERNAL_IPS = ('127.0.0.1',)
|
||||
|
||||
MAX_PAGE_SIZE = 200
|
||||
@@ -362,6 +365,7 @@ REST_FRAMEWORK = {
|
||||
'DEFAULT_PAGINATION_CLASS': 'awx.api.pagination.Pagination',
|
||||
'PAGE_SIZE': 25,
|
||||
'DEFAULT_AUTHENTICATION_CLASSES': (
|
||||
'ansible_base.jwt_consumer.awx.auth.AwxJWTAuthentication',
|
||||
'awx.api.authentication.LoggedOAuth2Authentication',
|
||||
'awx.api.authentication.SessionAuthentication',
|
||||
'awx.api.authentication.LoggedBasicAuthentication',
|
||||
@@ -755,6 +759,14 @@ SATELLITE6_INSTANCE_ID_VAR = 'foreman_id,foreman.id'
|
||||
INSIGHTS_INSTANCE_ID_VAR = 'insights_id'
|
||||
INSIGHTS_EXCLUDE_EMPTY_GROUPS = False
|
||||
|
||||
# ----------------
|
||||
# -- Terraform State --
|
||||
# ----------------
|
||||
# TERRAFORM_ENABLED_VAR =
|
||||
# TERRAFORM_ENABLED_VALUE =
|
||||
TERRAFORM_INSTANCE_ID_VAR = 'id'
|
||||
TERRAFORM_EXCLUDE_EMPTY_GROUPS = True
|
||||
|
||||
# ---------------------
|
||||
# ----- Custom -----
|
||||
# ---------------------
|
||||
@@ -1076,11 +1088,49 @@ HOST_METRIC_SUMMARY_TASK_LAST_TS = None
|
||||
HOST_METRIC_SUMMARY_TASK_INTERVAL = 7 # days
|
||||
|
||||
|
||||
# TODO: cmeyers, replace with with register pattern
|
||||
# The register pattern is particularly nice for this because we need
|
||||
# to know the process to start the thread that will be the server.
|
||||
# The registration location should be the same location as we would
|
||||
# call MetricsServer.start()
|
||||
# Note: if we don't get to this TODO, then at least create constants
|
||||
# for the services strings below.
|
||||
# TODO: cmeyers, break this out into a separate django app so other
|
||||
# projects can take advantage.
|
||||
|
||||
METRICS_SERVICE_CALLBACK_RECEIVER = 'callback_receiver'
|
||||
METRICS_SERVICE_DISPATCHER = 'dispatcher'
|
||||
METRICS_SERVICE_WEBSOCKETS = 'websockets'
|
||||
|
||||
METRICS_SUBSYSTEM_CONFIG = {
|
||||
'server': {
|
||||
METRICS_SERVICE_CALLBACK_RECEIVER: {
|
||||
'port': 8014,
|
||||
},
|
||||
METRICS_SERVICE_DISPATCHER: {
|
||||
'port': 8015,
|
||||
},
|
||||
METRICS_SERVICE_WEBSOCKETS: {
|
||||
'port': 8016,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# django-ansible-base
|
||||
ANSIBLE_BASE_TEAM_MODEL = 'main.Team'
|
||||
ANSIBLE_BASE_ORGANIZATION_MODEL = 'main.Organization'
|
||||
ANSIBLE_BASE_RESOURCE_CONFIG_MODULE = 'awx.resource_api'
|
||||
|
||||
from ansible_base.lib import dynamic_config # noqa: E402
|
||||
|
||||
settings_file = os.path.join(os.path.dirname(dynamic_config.__file__), 'dynamic_settings.py')
|
||||
include(settings_file)
|
||||
|
||||
# Add a postfix to the API URL patterns
|
||||
# example if set to '' API pattern will be /api
|
||||
# example if set to 'controller' API pattern will be /api AND /api/controller
|
||||
OPTIONAL_API_URLPATTERN_PREFIX = ''
|
||||
|
||||
# Use AWX base view, to give 401 on unauthenticated requests
|
||||
ANSIBLE_BASE_CUSTOM_VIEW_PARENT = 'awx.api.generics.APIView'
|
||||
|
||||
@@ -21,7 +21,7 @@ from split_settings.tools import optional, include
|
||||
from .defaults import * # NOQA
|
||||
|
||||
# awx-manage shell_plus --notebook
|
||||
NOTEBOOK_ARGUMENTS = ['--NotebookApp.token=', '--ip', '0.0.0.0', '--port', '8888', '--allow-root', '--no-browser']
|
||||
NOTEBOOK_ARGUMENTS = ['--NotebookApp.token=', '--ip', '0.0.0.0', '--port', '9888', '--allow-root', '--no-browser']
|
||||
|
||||
# print SQL queries in shell_plus
|
||||
SHELL_PLUS_PRINT_SQL = False
|
||||
@@ -72,6 +72,8 @@ AWX_CALLBACK_PROFILE = True
|
||||
# Allows user to trigger task managers directly for debugging and profiling purposes.
|
||||
# Only works in combination with settings.SETTINGS_MODULE == 'awx.settings.development'
|
||||
AWX_DISABLE_TASK_MANAGERS = False
|
||||
|
||||
# Needed for launching runserver in debug mode
|
||||
# ======================!!!!!!! FOR DEVELOPMENT ONLY !!!!!!!=================================
|
||||
|
||||
# Store a snapshot of default settings at this point before loading any
|
||||
|
||||
@@ -39,7 +39,7 @@
|
||||
{% else %}
|
||||
<li><a href="{% url 'api:login' %}?next={{ request.get_full_path }}" data-toggle="tooltip" data-placement="bottom" data-delay="1000" title="Log in"><span class="glyphicon glyphicon-log-in"></span>Log in</a></li>
|
||||
{% endif %}
|
||||
<li><a href="//docs.ansible.com/ansible-tower/{{short_tower_version}}/html/towerapi/index.html" target="_blank" data-toggle="tooltip" data-placement="bottom" data-delay="1000" title="{% trans 'API Guide' %}"><span class="glyphicon glyphicon-question-sign"></span><span class="visible-xs-inline">{% trans 'API Guide' %}</span></a></li>
|
||||
<li><a href="//ansible.readthedocs.io/projects/awx/en/latest/rest_api/index.html" target="_blank" data-toggle="tooltip" data-placement="bottom" data-delay="1000" title="{% trans 'API Guide' %}"><span class="glyphicon glyphicon-question-sign"></span><span class="visible-xs-inline">{% trans 'API Guide' %}</span></a></li>
|
||||
<li><a href="/" data-toggle="tooltip" data-placement="bottom" data-delay="1000" title="{% trans 'Back to application' %}"><span class="glyphicon glyphicon-circle-arrow-left"></span><span class="visible-xs-inline">{% trans 'Back to application' %}</span></a></li>
|
||||
<li class="hidden-xs"><a href="#" class="resize" data-toggle="tooltip" data-placement="bottom" data-delay="1000" title="{% trans 'Resize' %}"><span class="glyphicon glyphicon-resize-full"></span></a></li>
|
||||
</ul>
|
||||
|
||||
@@ -59,6 +59,7 @@ register(
|
||||
help_text=_('Maximum number of job events for the UI to retrieve within a single request.'),
|
||||
category=_('UI'),
|
||||
category_slug='ui',
|
||||
hidden=True,
|
||||
)
|
||||
|
||||
register(
|
||||
@@ -68,4 +69,5 @@ register(
|
||||
help_text=_('If disabled, the page will not refresh when events are received. Reloading the page will be required to get the latest details.'),
|
||||
category=_('UI'),
|
||||
category_slug='ui',
|
||||
hidden=True,
|
||||
)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user