mirror of
https://github.com/ansible/awx.git
synced 2026-04-01 08:15:09 -02:30
Compare commits
174 Commits
23.6.0
...
daoneill-i
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bfb0d15e6f | ||
|
|
33010a2e02 | ||
|
|
14454cc670 | ||
|
|
7ab2bca16e | ||
|
|
f0f655f2c3 | ||
|
|
4286d411a7 | ||
|
|
06ad32ed8e | ||
|
|
1ebff23232 | ||
|
|
700de14c76 | ||
|
|
8605e339df | ||
|
|
e50954ce40 | ||
|
|
7caca60308 | ||
|
|
f4e13af056 | ||
|
|
decdb56288 | ||
|
|
bcd4c2e8ef | ||
|
|
d663066ac5 | ||
|
|
1ceebb275c | ||
|
|
f78ba282a6 | ||
|
|
81d88df757 | ||
|
|
0bdb01a9e9 | ||
|
|
cd91fbf59f | ||
|
|
f240e640e5 | ||
|
|
46f489185e | ||
|
|
dbb80fb7e3 | ||
|
|
cb3d357ce1 | ||
|
|
dfa4db9266 | ||
|
|
6906a88dc9 | ||
|
|
1f7be9258c | ||
|
|
dcce024424 | ||
|
|
79d7179c72 | ||
|
|
4d80f886e0 | ||
|
|
5179333185 | ||
|
|
362e11aaf2 | ||
|
|
decff01fa4 | ||
|
|
a14cc8199d | ||
|
|
b6436826f6 | ||
|
|
2109b5039e | ||
|
|
b6f9b73418 | ||
|
|
40a8a3cb2f | ||
|
|
19f80c0a26 | ||
|
|
5d1bb2125e | ||
|
|
99c512bcef | ||
|
|
ed0329f5db | ||
|
|
dd53345397 | ||
|
|
f66cde51d7 | ||
|
|
d1c31687fc | ||
|
|
38424487f1 | ||
|
|
b0565e9937 | ||
|
|
44d85b589c | ||
|
|
46f816e7a4 | ||
|
|
54b32c10f0 | ||
|
|
20202054cc | ||
|
|
e84e2962d0 | ||
|
|
2259047527 | ||
|
|
f429ef6ca7 | ||
|
|
4b637c1319 | ||
|
|
4c41f6b018 | ||
|
|
3ae72219b4 | ||
|
|
402c29dc52 | ||
|
|
8eb4a9a2a0 | ||
|
|
36f3b46726 | ||
|
|
55c6a319dc | ||
|
|
56b6a07f6e | ||
|
|
519fd22bec | ||
|
|
2e5306ae8e | ||
|
|
068e6acbd5 | ||
|
|
f9a23a5645 | ||
|
|
40150a2be8 | ||
|
|
b79aa5b1ed | ||
|
|
b3aeb962ce | ||
|
|
2300b8fddf | ||
|
|
3a3284b5df | ||
|
|
2359004cc1 | ||
|
|
694d7e98e7 | ||
|
|
8c9c02c975 | ||
|
|
8a902debd5 | ||
|
|
6dcaa09dfb | ||
|
|
21fd6af0f9 | ||
|
|
eeae1d59d4 | ||
|
|
a252d0ae33 | ||
|
|
48971411cc | ||
|
|
083c05f12a | ||
|
|
b558397b67 | ||
|
|
904c6001e9 | ||
|
|
818e11dfdc | ||
|
|
7fc13a0569 | ||
|
|
92c693f14e | ||
|
|
f2417f0ed2 | ||
|
|
8f22188116 | ||
|
|
05502c0af8 | ||
|
|
957ce59bf7 | ||
|
|
cc4cc37d46 | ||
|
|
1e254c804c | ||
|
|
1b44bebed3 | ||
|
|
a4cf55bdba | ||
|
|
c333d0e82f | ||
|
|
b093c89a84 | ||
|
|
f98493aa61 | ||
|
|
c36d2b0485 | ||
|
|
8ddb604bf1 | ||
|
|
cd9dd43be7 | ||
|
|
82323390a7 | ||
|
|
4c5ac1d3da | ||
|
|
9c06370e33 | ||
|
|
449b95d1eb | ||
|
|
1712540c8e | ||
|
|
7cf639d8eb | ||
|
|
dbfcc40d7c | ||
|
|
73d2c92ae3 | ||
|
|
24a4242147 | ||
|
|
92ce85b688 | ||
|
|
9531f8377a | ||
|
|
15a16b3dd1 | ||
|
|
a37e7bf147 | ||
|
|
a2fcd2f97a | ||
|
|
c394ffdd19 | ||
|
|
69102cf265 | ||
|
|
a188798543 | ||
|
|
60108ebd10 | ||
|
|
8c7c00451a | ||
|
|
7a1ed406da | ||
|
|
f916ffe1e9 | ||
|
|
901dbd697e | ||
|
|
d8b4a9825e | ||
|
|
6db66c5f81 | ||
|
|
82ad7dcf40 | ||
|
|
93500f9fea | ||
|
|
9ba70c151d | ||
|
|
46dc61253f | ||
|
|
6cb2cd18b0 | ||
|
|
5d1dd8ec41 | ||
|
|
9f69daf787 | ||
|
|
16ece5de7e | ||
|
|
ab0e9265c5 | ||
|
|
04cbbbccfa | ||
|
|
d1cacf64de | ||
|
|
5385eb0fb3 | ||
|
|
7d7503279d | ||
|
|
d860d1d91b | ||
|
|
3a17c45b64 | ||
|
|
bca68bcdf1 | ||
|
|
c32f234ebb | ||
|
|
5cb3d3b078 | ||
|
|
5199cc5246 | ||
|
|
387e877485 | ||
|
|
d54c5934ff | ||
|
|
2fa5116197 | ||
|
|
527755d986 | ||
|
|
f9c0b97c53 | ||
|
|
65655f84de | ||
|
|
9aa3d5584a | ||
|
|
266e31d71a | ||
|
|
a1bbe75aed | ||
|
|
695f1cf892 | ||
|
|
0ab103d8c4 | ||
|
|
9ac1c0f6c2 | ||
|
|
2e168d8177 | ||
|
|
d4f7bfef18 | ||
|
|
985a8d499d | ||
|
|
e3b52f0169 | ||
|
|
f69f600cff | ||
|
|
74cd23be5c | ||
|
|
209747d88e | ||
|
|
d91da39f81 | ||
|
|
5cd029df96 | ||
|
|
5a93a519f6 | ||
|
|
5f5cd960d5 | ||
|
|
42701f32fe | ||
|
|
30d4df788f | ||
|
|
1bcd71a8ac | ||
|
|
43be90f051 | ||
|
|
bb1922cdbb | ||
|
|
403f545071 | ||
|
|
a06a2a883c |
10
.github/actions/awx_devel_image/action.yml
vendored
10
.github/actions/awx_devel_image/action.yml
vendored
@@ -11,6 +11,12 @@ runs:
|
|||||||
shell: bash
|
shell: bash
|
||||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: Set lower case owner name
|
||||||
|
shell: bash
|
||||||
|
run: echo "OWNER_LC=${OWNER,,}" >> $GITHUB_ENV
|
||||||
|
env:
|
||||||
|
OWNER: '${{ github.repository_owner }}'
|
||||||
|
|
||||||
- name: Log in to registry
|
- name: Log in to registry
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
@@ -18,11 +24,11 @@ runs:
|
|||||||
|
|
||||||
- name: Pre-pull latest devel image to warm cache
|
- name: Pre-pull latest devel image to warm cache
|
||||||
shell: bash
|
shell: bash
|
||||||
run: docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${{ github.base_ref }}
|
run: docker pull ghcr.io/${OWNER_LC}/awx_devel:${{ github.base_ref }}
|
||||||
|
|
||||||
- name: Build image for current source checkout
|
- name: Build image for current source checkout
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} \
|
DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} \
|
||||||
COMPOSE_TAG=${{ github.base_ref }} \
|
COMPOSE_TAG=${{ github.base_ref }} \
|
||||||
make docker-compose-build
|
make docker-compose-build
|
||||||
|
|||||||
40
.github/actions/issue_metrics/issue_metrics.yml
vendored
Normal file
40
.github/actions/issue_metrics/issue_metrics.yml
vendored
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
name: Monthly issue metrics
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
schedule:
|
||||||
|
- cron: '3 2 1 * *'
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
issues: write
|
||||||
|
pull-requests: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
name: issue metrics
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Get dates for last month
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
# Calculate the first day of the previous month
|
||||||
|
first_day=$(date -d "last month" +%Y-%m-01)
|
||||||
|
|
||||||
|
# Calculate the last day of the previous month
|
||||||
|
last_day=$(date -d "$first_day +1 month -1 day" +%Y-%m-%d)
|
||||||
|
|
||||||
|
#Set an environment variable with the date range
|
||||||
|
echo "$first_day..$last_day"
|
||||||
|
echo "last_month=$first_day..$last_day" >> "$GITHUB_ENV"
|
||||||
|
|
||||||
|
- name: Run issue-metrics tool
|
||||||
|
uses: github/issue-metrics@v2
|
||||||
|
env:
|
||||||
|
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
SEARCH_QUERY: 'repo:ansible/awx is:issue created:${{ env.last_month }} -reason:"not planned"'
|
||||||
|
|
||||||
|
- name: Create issue
|
||||||
|
uses: peter-evans/create-issue-from-file@v4
|
||||||
|
with:
|
||||||
|
title: Monthly issue metrics report
|
||||||
|
token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
content-filepath: ./issue_metrics.md
|
||||||
4
.github/actions/run_awx_devel/action.yml
vendored
4
.github/actions/run_awx_devel/action.yml
vendored
@@ -35,7 +35,7 @@ runs:
|
|||||||
- name: Start AWX
|
- name: Start AWX
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} \
|
DEV_DOCKER_OWNER=${{ github.repository_owner }} \
|
||||||
COMPOSE_TAG=${{ github.base_ref }} \
|
COMPOSE_TAG=${{ github.base_ref }} \
|
||||||
COMPOSE_UP_OPTS="-d" \
|
COMPOSE_UP_OPTS="-d" \
|
||||||
make docker-compose
|
make docker-compose
|
||||||
@@ -71,7 +71,7 @@ runs:
|
|||||||
id: data
|
id: data
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
AWX_IP=$(docker inspect -f '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' tools_awx_1)
|
AWX_IP=$(docker inspect -f '{{.NetworkSettings.Networks._sources_awx.IPAddress}}' tools_awx_1)
|
||||||
ADMIN_TOKEN=$(docker exec -i tools_awx_1 awx-manage create_oauth2_token --user admin)
|
ADMIN_TOKEN=$(docker exec -i tools_awx_1 awx-manage create_oauth2_token --user admin)
|
||||||
echo "ip=$AWX_IP" >> $GITHUB_OUTPUT
|
echo "ip=$AWX_IP" >> $GITHUB_OUTPUT
|
||||||
echo "admin_token=$ADMIN_TOKEN" >> $GITHUB_OUTPUT
|
echo "admin_token=$ADMIN_TOKEN" >> $GITHUB_OUTPUT
|
||||||
|
|||||||
3
.github/pr_labeler.yml
vendored
3
.github/pr_labeler.yml
vendored
@@ -15,5 +15,4 @@
|
|||||||
|
|
||||||
"dependencies":
|
"dependencies":
|
||||||
- any: ["awx/ui/package.json"]
|
- any: ["awx/ui/package.json"]
|
||||||
- any: ["requirements/*.txt"]
|
- any: ["requirements/*"]
|
||||||
- any: ["requirements/requirements.in"]
|
|
||||||
|
|||||||
63
.github/workflows/devel_images.yml
vendored
63
.github/workflows/devel_images.yml
vendored
@@ -9,22 +9,43 @@ on:
|
|||||||
- release_*
|
- release_*
|
||||||
- feature_*
|
- feature_*
|
||||||
jobs:
|
jobs:
|
||||||
push:
|
push-development-images:
|
||||||
if: endsWith(github.repository, '/awx') || startsWith(github.ref, 'refs/heads/release_')
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
timeout-minutes: 60
|
timeout-minutes: 120
|
||||||
permissions:
|
permissions:
|
||||||
packages: write
|
packages: write
|
||||||
contents: read
|
contents: read
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
build-targets:
|
||||||
|
- image-name: awx_devel
|
||||||
|
make-target: docker-compose-buildx
|
||||||
|
- image-name: awx_kube_devel
|
||||||
|
make-target: awx-kube-dev-buildx
|
||||||
|
- image-name: awx
|
||||||
|
make-target: awx-kube-buildx
|
||||||
steps:
|
steps:
|
||||||
|
|
||||||
|
- name: Skipping build of awx image for non-awx repository
|
||||||
|
run: |
|
||||||
|
echo "Skipping build of awx image for non-awx repository"
|
||||||
|
exit 0
|
||||||
|
if: matrix.build-targets.image-name == 'awx' && !endsWith(github.repository, '/awx')
|
||||||
|
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
- name: Get python version from Makefile
|
- name: Set up QEMU
|
||||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
- name: Set lower case owner name
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
|
||||||
|
- name: Set GITHUB_ENV variables
|
||||||
run: |
|
run: |
|
||||||
echo "OWNER_LC=${OWNER,,}" >>${GITHUB_ENV}
|
echo "DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER,,}" >> $GITHUB_ENV
|
||||||
|
echo "COMPOSE_TAG=${GITHUB_REF##*/}" >> $GITHUB_ENV
|
||||||
|
echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||||
env:
|
env:
|
||||||
OWNER: '${{ github.repository_owner }}'
|
OWNER: '${{ github.repository_owner }}'
|
||||||
|
|
||||||
@@ -37,23 +58,19 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
|
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
|
||||||
|
|
||||||
- name: Pre-pull image to warm build cache
|
- name: Setup node and npm
|
||||||
run: |
|
uses: actions/setup-node@v2
|
||||||
docker pull ghcr.io/${OWNER_LC}/awx_devel:${GITHUB_REF##*/} || :
|
with:
|
||||||
docker pull ghcr.io/${OWNER_LC}/awx_kube_devel:${GITHUB_REF##*/} || :
|
node-version: '16.13.1'
|
||||||
docker pull ghcr.io/${OWNER_LC}/awx:${GITHUB_REF##*/} || :
|
if: matrix.build-targets.image-name == 'awx'
|
||||||
|
|
||||||
- name: Build images
|
- name: Prebuild UI for awx image (to speed up build process)
|
||||||
run: |
|
run: |
|
||||||
DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} COMPOSE_TAG=${GITHUB_REF##*/} make docker-compose-build
|
sudo apt-get install gettext
|
||||||
DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} COMPOSE_TAG=${GITHUB_REF##*/} make awx-kube-dev-build
|
make ui-release
|
||||||
DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} COMPOSE_TAG=${GITHUB_REF##*/} make awx-kube-build
|
make ui-next
|
||||||
|
if: matrix.build-targets.image-name == 'awx'
|
||||||
|
|
||||||
- name: Push development images
|
- name: Build and push AWX devel images
|
||||||
run: |
|
run: |
|
||||||
docker push ghcr.io/${OWNER_LC}/awx_devel:${GITHUB_REF##*/}
|
make ${{ matrix.build-targets.make-target }}
|
||||||
docker push ghcr.io/${OWNER_LC}/awx_kube_devel:${GITHUB_REF##*/}
|
|
||||||
|
|
||||||
- name: Push AWX k8s image, only for upstream and feature branches
|
|
||||||
run: docker push ghcr.io/${OWNER_LC}/awx:${GITHUB_REF##*/}
|
|
||||||
if: endsWith(github.repository, '/awx')
|
|
||||||
|
|||||||
12
.github/workflows/feature_branch_deletion.yml
vendored
12
.github/workflows/feature_branch_deletion.yml
vendored
@@ -2,12 +2,10 @@
|
|||||||
name: Feature branch deletion cleanup
|
name: Feature branch deletion cleanup
|
||||||
env:
|
env:
|
||||||
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
|
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
|
||||||
on:
|
on: delete
|
||||||
delete:
|
|
||||||
branches:
|
|
||||||
- feature_**
|
|
||||||
jobs:
|
jobs:
|
||||||
push:
|
branch_delete:
|
||||||
|
if: ${{ github.event.ref_type == 'branch' && startsWith(github.event.ref, 'feature_') }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
timeout-minutes: 20
|
timeout-minutes: 20
|
||||||
permissions:
|
permissions:
|
||||||
@@ -22,6 +20,4 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
ansible localhost -c local, -m command -a "{{ ansible_python_interpreter + ' -m pip install boto3'}}"
|
ansible localhost -c local, -m command -a "{{ ansible_python_interpreter + ' -m pip install boto3'}}"
|
||||||
ansible localhost -c local -m aws_s3 \
|
ansible localhost -c local -m aws_s3 \
|
||||||
-a "bucket=awx-public-ci-files object=${GITHUB_REF##*/}/schema.json mode=delete permission=public-read"
|
-a "bucket=awx-public-ci-files object=${GITHUB_REF##*/}/schema.json mode=delobj permission=public-read"
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
2
.github/workflows/promote.yml
vendored
2
.github/workflows/promote.yml
vendored
@@ -66,7 +66,7 @@ jobs:
|
|||||||
- name: Build awxkit and upload to pypi
|
- name: Build awxkit and upload to pypi
|
||||||
run: |
|
run: |
|
||||||
git reset --hard
|
git reset --hard
|
||||||
cd awxkit && python3 setup.py bdist_wheel
|
cd awxkit && python3 setup.py sdist bdist_wheel
|
||||||
twine upload \
|
twine upload \
|
||||||
-r ${{ env.pypi_repo }} \
|
-r ${{ env.pypi_repo }} \
|
||||||
-u ${{ secrets.PYPI_USERNAME }} \
|
-u ${{ secrets.PYPI_USERNAME }} \
|
||||||
|
|||||||
28
.github/workflows/stage.yml
vendored
28
.github/workflows/stage.yml
vendored
@@ -86,13 +86,19 @@ jobs:
|
|||||||
-e push=yes \
|
-e push=yes \
|
||||||
-e awx_official=yes
|
-e awx_official=yes
|
||||||
|
|
||||||
- name: Log in to GHCR
|
- name: Log into registry ghcr.io
|
||||||
run: |
|
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
|
||||||
echo ${{ secrets.GITHUB_TOKEN }} | docker login ghcr.io -u ${{ github.actor }} --password-stdin
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{ github.actor }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Log in to Quay
|
- name: Log into registry quay.io
|
||||||
run: |
|
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
|
||||||
echo ${{ secrets.QUAY_TOKEN }} | docker login quay.io -u ${{ secrets.QUAY_USER }} --password-stdin
|
with:
|
||||||
|
registry: quay.io
|
||||||
|
username: ${{ secrets.QUAY_USER }}
|
||||||
|
password: ${{ secrets.QUAY_TOKEN }}
|
||||||
|
|
||||||
- name: tag awx-ee:latest with version input
|
- name: tag awx-ee:latest with version input
|
||||||
run: |
|
run: |
|
||||||
@@ -100,13 +106,13 @@ jobs:
|
|||||||
docker tag quay.io/ansible/awx-ee:latest ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.inputs.version }}
|
docker tag quay.io/ansible/awx-ee:latest ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.inputs.version }}
|
||||||
docker push ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.inputs.version }}
|
docker push ghcr.io/${{ github.repository_owner }}/awx-ee:${{ github.event.inputs.version }}
|
||||||
|
|
||||||
- name: Build and stage awx-operator
|
- name: Stage awx-operator image
|
||||||
working-directory: awx-operator
|
working-directory: awx-operator
|
||||||
run: |
|
run: |
|
||||||
BUILD_ARGS="--build-arg DEFAULT_AWX_VERSION=${{ github.event.inputs.version }} \
|
BUILD_ARGS="--build-arg DEFAULT_AWX_VERSION=${{ github.event.inputs.version}} \
|
||||||
--build-arg OPERATOR_VERSION=${{ github.event.inputs.operator_version }}" \
|
--build-arg OPERATOR_VERSION=${{ github.event.inputs.operator_version }}" \
|
||||||
IMAGE_TAG_BASE=ghcr.io/${{ github.repository_owner }}/awx-operator \
|
IMG=ghcr.io/${{ github.repository_owner }}/awx-operator:${{ github.event.inputs.operator_version }} \
|
||||||
VERSION=${{ github.event.inputs.operator_version }} make docker-build docker-push
|
make docker-buildx
|
||||||
|
|
||||||
- name: Run test deployment with awx-operator
|
- name: Run test deployment with awx-operator
|
||||||
working-directory: awx-operator
|
working-directory: awx-operator
|
||||||
|
|||||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -169,3 +169,6 @@ awx/ui_next/build
|
|||||||
# Docs build stuff
|
# Docs build stuff
|
||||||
docs/docsite/build/
|
docs/docsite/build/
|
||||||
_readthedocs/
|
_readthedocs/
|
||||||
|
|
||||||
|
# Pyenv
|
||||||
|
.python-version
|
||||||
|
|||||||
54
Makefile
54
Makefile
@@ -10,7 +10,7 @@ KIND_BIN ?= $(shell which kind)
|
|||||||
CHROMIUM_BIN=/tmp/chrome-linux/chrome
|
CHROMIUM_BIN=/tmp/chrome-linux/chrome
|
||||||
GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
|
GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
|
||||||
MANAGEMENT_COMMAND ?= awx-manage
|
MANAGEMENT_COMMAND ?= awx-manage
|
||||||
VERSION ?= $(shell $(PYTHON) tools/scripts/scm_version.py)
|
VERSION ?= $(shell $(PYTHON) tools/scripts/scm_version.py 2> /dev/null)
|
||||||
|
|
||||||
# ansible-test requires semver compatable version, so we allow overrides to hack it
|
# ansible-test requires semver compatable version, so we allow overrides to hack it
|
||||||
COLLECTION_VERSION ?= $(shell $(PYTHON) tools/scripts/scm_version.py | cut -d . -f 1-3)
|
COLLECTION_VERSION ?= $(shell $(PYTHON) tools/scripts/scm_version.py | cut -d . -f 1-3)
|
||||||
@@ -75,6 +75,9 @@ SDIST_TAR_FILE ?= $(SDIST_TAR_NAME).tar.gz
|
|||||||
|
|
||||||
I18N_FLAG_FILE = .i18n_built
|
I18N_FLAG_FILE = .i18n_built
|
||||||
|
|
||||||
|
## PLATFORMS defines the target platforms for the manager image be build to provide support to multiple
|
||||||
|
PLATFORMS ?= linux/amd64,linux/arm64 # linux/ppc64le,linux/s390x
|
||||||
|
|
||||||
.PHONY: awx-link clean clean-tmp clean-venv requirements requirements_dev \
|
.PHONY: awx-link clean clean-tmp clean-venv requirements requirements_dev \
|
||||||
develop refresh adduser migrate dbchange \
|
develop refresh adduser migrate dbchange \
|
||||||
receiver test test_unit test_coverage coverage_html \
|
receiver test test_unit test_coverage coverage_html \
|
||||||
@@ -532,13 +535,14 @@ docker-compose-sources: .git/hooks/pre-commit
|
|||||||
-e enable_vault=$(VAULT) \
|
-e enable_vault=$(VAULT) \
|
||||||
-e vault_tls=$(VAULT_TLS) \
|
-e vault_tls=$(VAULT_TLS) \
|
||||||
-e enable_tacacs=$(TACACS) \
|
-e enable_tacacs=$(TACACS) \
|
||||||
$(EXTRA_SOURCES_ANSIBLE_OPTS)
|
$(EXTRA_SOURCES_ANSIBLE_OPTS)
|
||||||
|
|
||||||
docker-compose: awx/projects docker-compose-sources
|
docker-compose: awx/projects docker-compose-sources
|
||||||
ansible-galaxy install --ignore-certs -r tools/docker-compose/ansible/requirements.yml;
|
ansible-galaxy install --ignore-certs -r tools/docker-compose/ansible/requirements.yml;
|
||||||
ansible-playbook -i tools/docker-compose/inventory tools/docker-compose/ansible/initialize_containers.yml \
|
ansible-playbook -i tools/docker-compose/inventory tools/docker-compose/ansible/initialize_containers.yml \
|
||||||
-e enable_vault=$(VAULT) \
|
-e enable_vault=$(VAULT) \
|
||||||
-e vault_tls=$(VAULT_TLS);
|
-e vault_tls=$(VAULT_TLS) \
|
||||||
|
-e enable_ldap=$(LDAP);
|
||||||
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml $(COMPOSE_OPTS) up $(COMPOSE_UP_OPTS) --remove-orphans
|
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml $(COMPOSE_OPTS) up $(COMPOSE_UP_OPTS) --remove-orphans
|
||||||
|
|
||||||
docker-compose-credential-plugins: awx/projects docker-compose-sources
|
docker-compose-credential-plugins: awx/projects docker-compose-sources
|
||||||
@@ -585,12 +589,27 @@ docker-compose-build: Dockerfile.dev
|
|||||||
--build-arg BUILDKIT_INLINE_CACHE=1 \
|
--build-arg BUILDKIT_INLINE_CACHE=1 \
|
||||||
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG) .
|
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG) .
|
||||||
|
|
||||||
|
|
||||||
|
.PHONY: docker-compose-buildx
|
||||||
|
## Build awx_devel image for docker compose development environment for multiple architectures
|
||||||
|
docker-compose-buildx: Dockerfile.dev
|
||||||
|
- docker buildx create --name docker-compose-buildx
|
||||||
|
docker buildx use docker-compose-buildx
|
||||||
|
- docker buildx build \
|
||||||
|
--push \
|
||||||
|
--build-arg BUILDKIT_INLINE_CACHE=1 \
|
||||||
|
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG) \
|
||||||
|
--platform=$(PLATFORMS) \
|
||||||
|
--tag $(DEVEL_IMAGE_NAME) \
|
||||||
|
-f Dockerfile.dev .
|
||||||
|
- docker buildx rm docker-compose-buildx
|
||||||
|
|
||||||
docker-clean:
|
docker-clean:
|
||||||
-$(foreach container_id,$(shell docker ps -f name=tools_awx -aq && docker ps -f name=tools_receptor -aq),docker stop $(container_id); docker rm -f $(container_id);)
|
-$(foreach container_id,$(shell docker ps -f name=tools_awx -aq && docker ps -f name=tools_receptor -aq),docker stop $(container_id); docker rm -f $(container_id);)
|
||||||
-$(foreach image_id,$(shell docker images --filter=reference='*/*/*awx_devel*' --filter=reference='*/*awx_devel*' --filter=reference='*awx_devel*' -aq),docker rmi --force $(image_id);)
|
-$(foreach image_id,$(shell docker images --filter=reference='*/*/*awx_devel*' --filter=reference='*/*awx_devel*' --filter=reference='*awx_devel*' -aq),docker rmi --force $(image_id);)
|
||||||
|
|
||||||
docker-clean-volumes: docker-compose-clean docker-compose-container-group-clean
|
docker-clean-volumes: docker-compose-clean docker-compose-container-group-clean
|
||||||
docker volume rm -f tools_awx_db tools_vault_1 tools_grafana_storage tools_prometheus_storage $(docker volume ls --filter name=tools_redis_socket_ -q)
|
docker volume rm -f tools_awx_db tools_vault_1 tools_ldap_1 tools_grafana_storage tools_prometheus_storage $(docker volume ls --filter name=tools_redis_socket_ -q)
|
||||||
|
|
||||||
docker-refresh: docker-clean docker-compose
|
docker-refresh: docker-clean docker-compose
|
||||||
|
|
||||||
@@ -647,6 +666,21 @@ awx-kube-build: Dockerfile
|
|||||||
--build-arg HEADLESS=$(HEADLESS) \
|
--build-arg HEADLESS=$(HEADLESS) \
|
||||||
-t $(DEV_DOCKER_TAG_BASE)/awx:$(COMPOSE_TAG) .
|
-t $(DEV_DOCKER_TAG_BASE)/awx:$(COMPOSE_TAG) .
|
||||||
|
|
||||||
|
## Build multi-arch awx image for deployment on Kubernetes environment.
|
||||||
|
awx-kube-buildx: Dockerfile
|
||||||
|
- docker buildx create --name awx-kube-buildx
|
||||||
|
docker buildx use awx-kube-buildx
|
||||||
|
- docker buildx build \
|
||||||
|
--push \
|
||||||
|
--build-arg VERSION=$(VERSION) \
|
||||||
|
--build-arg SETUPTOOLS_SCM_PRETEND_VERSION=$(VERSION) \
|
||||||
|
--build-arg HEADLESS=$(HEADLESS) \
|
||||||
|
--platform=$(PLATFORMS) \
|
||||||
|
--tag $(DEV_DOCKER_TAG_BASE)/awx:$(COMPOSE_TAG) \
|
||||||
|
-f Dockerfile .
|
||||||
|
- docker buildx rm awx-kube-buildx
|
||||||
|
|
||||||
|
|
||||||
.PHONY: Dockerfile.kube-dev
|
.PHONY: Dockerfile.kube-dev
|
||||||
## Generate Docker.kube-dev for awx_kube_devel image
|
## Generate Docker.kube-dev for awx_kube_devel image
|
||||||
Dockerfile.kube-dev: tools/ansible/roles/dockerfile/templates/Dockerfile.j2
|
Dockerfile.kube-dev: tools/ansible/roles/dockerfile/templates/Dockerfile.j2
|
||||||
@@ -663,6 +697,18 @@ awx-kube-dev-build: Dockerfile.kube-dev
|
|||||||
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) \
|
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) \
|
||||||
-t $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) .
|
-t $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) .
|
||||||
|
|
||||||
|
## Build and push multi-arch awx_kube_devel image for development on local Kubernetes environment.
|
||||||
|
awx-kube-dev-buildx: Dockerfile.kube-dev
|
||||||
|
- docker buildx create --name awx-kube-dev-buildx
|
||||||
|
docker buildx use awx-kube-dev-buildx
|
||||||
|
- docker buildx build \
|
||||||
|
--push \
|
||||||
|
--build-arg BUILDKIT_INLINE_CACHE=1 \
|
||||||
|
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) \
|
||||||
|
--platform=$(PLATFORMS) \
|
||||||
|
--tag $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) \
|
||||||
|
-f Dockerfile.kube-dev .
|
||||||
|
- docker buildx rm awx-kube-dev-buildx
|
||||||
|
|
||||||
kind-dev-load: awx-kube-dev-build
|
kind-dev-load: awx-kube-dev-build
|
||||||
$(KIND_BIN) load docker-image $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG)
|
$(KIND_BIN) load docker-image $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG)
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ AWX provides a web-based user interface, REST API, and task engine built on top
|
|||||||
|
|
||||||
To install AWX, please view the [Install guide](./INSTALL.md).
|
To install AWX, please view the [Install guide](./INSTALL.md).
|
||||||
|
|
||||||
To learn more about using AWX, and Tower, view the [Tower docs site](http://docs.ansible.com/ansible-tower/index.html).
|
To learn more about using AWX, view the [AWX docs site](https://ansible.readthedocs.io/projects/awx/en/latest/).
|
||||||
|
|
||||||
The AWX Project Frequently Asked Questions can be found [here](https://www.ansible.com/awx-project-faq).
|
The AWX Project Frequently Asked Questions can be found [here](https://www.ansible.com/awx-project-faq).
|
||||||
|
|
||||||
|
|||||||
@@ -30,8 +30,8 @@ from rest_framework.permissions import IsAuthenticated
|
|||||||
from rest_framework.renderers import StaticHTMLRenderer
|
from rest_framework.renderers import StaticHTMLRenderer
|
||||||
from rest_framework.negotiation import DefaultContentNegotiation
|
from rest_framework.negotiation import DefaultContentNegotiation
|
||||||
|
|
||||||
from ansible_base.filters.rest_framework.field_lookup_backend import FieldLookupBackend
|
from ansible_base.rest_filters.rest_framework.field_lookup_backend import FieldLookupBackend
|
||||||
from ansible_base.utils.models import get_all_field_names
|
from ansible_base.lib.utils.models import get_all_field_names
|
||||||
|
|
||||||
# AWX
|
# AWX
|
||||||
from awx.main.models import UnifiedJob, UnifiedJobTemplate, User, Role, Credential, WorkflowJobTemplateNode, WorkflowApprovalTemplate
|
from awx.main.models import UnifiedJob, UnifiedJobTemplate, User, Role, Credential, WorkflowJobTemplateNode, WorkflowApprovalTemplate
|
||||||
@@ -91,7 +91,7 @@ class LoggedLoginView(auth_views.LoginView):
|
|||||||
ret = super(LoggedLoginView, self).post(request, *args, **kwargs)
|
ret = super(LoggedLoginView, self).post(request, *args, **kwargs)
|
||||||
if request.user.is_authenticated:
|
if request.user.is_authenticated:
|
||||||
logger.info(smart_str(u"User {} logged in from {}".format(self.request.user.username, request.META.get('REMOTE_ADDR', None))))
|
logger.info(smart_str(u"User {} logged in from {}".format(self.request.user.username, request.META.get('REMOTE_ADDR', None))))
|
||||||
ret.set_cookie('userLoggedIn', 'true')
|
ret.set_cookie('userLoggedIn', 'true', secure=getattr(settings, 'SESSION_COOKIE_SECURE', False))
|
||||||
ret.setdefault('X-API-Session-Cookie-Name', getattr(settings, 'SESSION_COOKIE_NAME', 'awx_sessionid'))
|
ret.setdefault('X-API-Session-Cookie-Name', getattr(settings, 'SESSION_COOKIE_NAME', 'awx_sessionid'))
|
||||||
|
|
||||||
return ret
|
return ret
|
||||||
@@ -107,7 +107,7 @@ class LoggedLogoutView(auth_views.LogoutView):
|
|||||||
original_user = getattr(request, 'user', None)
|
original_user = getattr(request, 'user', None)
|
||||||
ret = super(LoggedLogoutView, self).dispatch(request, *args, **kwargs)
|
ret = super(LoggedLogoutView, self).dispatch(request, *args, **kwargs)
|
||||||
current_user = getattr(request, 'user', None)
|
current_user = getattr(request, 'user', None)
|
||||||
ret.set_cookie('userLoggedIn', 'false')
|
ret.set_cookie('userLoggedIn', 'false', secure=getattr(settings, 'SESSION_COOKIE_SECURE', False))
|
||||||
if (not current_user or not getattr(current_user, 'pk', True)) and current_user != original_user:
|
if (not current_user or not getattr(current_user, 'pk', True)) and current_user != original_user:
|
||||||
logger.info("User {} logged out.".format(original_user.username))
|
logger.info("User {} logged out.".format(original_user.username))
|
||||||
return ret
|
return ret
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ import copy
|
|||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
import re
|
import re
|
||||||
from collections import OrderedDict
|
from collections import Counter, OrderedDict
|
||||||
from datetime import timedelta
|
from datetime import timedelta
|
||||||
from uuid import uuid4
|
from uuid import uuid4
|
||||||
|
|
||||||
@@ -43,7 +43,7 @@ from rest_framework.utils.serializer_helpers import ReturnList
|
|||||||
# Django-Polymorphic
|
# Django-Polymorphic
|
||||||
from polymorphic.models import PolymorphicModel
|
from polymorphic.models import PolymorphicModel
|
||||||
|
|
||||||
from ansible_base.utils.models import get_type_for_model
|
from ansible_base.lib.utils.models import get_type_for_model
|
||||||
|
|
||||||
# AWX
|
# AWX
|
||||||
from awx.main.access import get_user_capabilities
|
from awx.main.access import get_user_capabilities
|
||||||
@@ -82,6 +82,7 @@ from awx.main.models import (
|
|||||||
Project,
|
Project,
|
||||||
ProjectUpdate,
|
ProjectUpdate,
|
||||||
ProjectUpdateEvent,
|
ProjectUpdateEvent,
|
||||||
|
ReceptorAddress,
|
||||||
RefreshToken,
|
RefreshToken,
|
||||||
Role,
|
Role,
|
||||||
Schedule,
|
Schedule,
|
||||||
@@ -636,7 +637,7 @@ class BaseSerializer(serializers.ModelSerializer, metaclass=BaseSerializerMetacl
|
|||||||
exclusions = self.get_validation_exclusions(self.instance)
|
exclusions = self.get_validation_exclusions(self.instance)
|
||||||
obj = self.instance or self.Meta.model()
|
obj = self.instance or self.Meta.model()
|
||||||
for k, v in attrs.items():
|
for k, v in attrs.items():
|
||||||
if k not in exclusions:
|
if k not in exclusions and k != 'canonical_address_port':
|
||||||
setattr(obj, k, v)
|
setattr(obj, k, v)
|
||||||
obj.full_clean(exclude=exclusions)
|
obj.full_clean(exclude=exclusions)
|
||||||
# full_clean may modify values on the instance; copy those changes
|
# full_clean may modify values on the instance; copy those changes
|
||||||
@@ -5176,16 +5177,21 @@ class NotificationTemplateSerializer(BaseSerializer):
|
|||||||
body = messages[event].get('body', {})
|
body = messages[event].get('body', {})
|
||||||
if body:
|
if body:
|
||||||
try:
|
try:
|
||||||
rendered_body = (
|
sandbox.ImmutableSandboxedEnvironment(undefined=DescriptiveUndefined).from_string(body).render(JobNotificationMixin.context_stub())
|
||||||
sandbox.ImmutableSandboxedEnvironment(undefined=DescriptiveUndefined).from_string(body).render(JobNotificationMixin.context_stub())
|
|
||||||
)
|
# https://github.com/ansible/awx/issues/14410
|
||||||
potential_body = json.loads(rendered_body)
|
|
||||||
if not isinstance(potential_body, dict):
|
# When rendering something such as "{{ job.id }}"
|
||||||
error_list.append(
|
# the return type is not a dict, unlike "{{ job_metadata }}" which is a dict
|
||||||
_("Webhook body for '{}' should be a json dictionary. Found type '{}'.".format(event, type(potential_body).__name__))
|
|
||||||
)
|
# potential_body = json.loads(rendered_body)
|
||||||
except json.JSONDecodeError as exc:
|
|
||||||
error_list.append(_("Webhook body for '{}' is not a valid json dictionary ({}).".format(event, exc)))
|
# if not isinstance(potential_body, dict):
|
||||||
|
# error_list.append(
|
||||||
|
# _("Webhook body for '{}' should be a json dictionary. Found type '{}'.".format(event, type(potential_body).__name__))
|
||||||
|
# )
|
||||||
|
except Exception as exc:
|
||||||
|
error_list.append(_("Webhook body for '{}' is not valid. The following gave an error ({}).".format(event, exc)))
|
||||||
|
|
||||||
if error_list:
|
if error_list:
|
||||||
raise serializers.ValidationError(error_list)
|
raise serializers.ValidationError(error_list)
|
||||||
@@ -5458,17 +5464,25 @@ class ScheduleSerializer(LaunchConfigurationBaseSerializer, SchedulePreviewSeria
|
|||||||
class InstanceLinkSerializer(BaseSerializer):
|
class InstanceLinkSerializer(BaseSerializer):
|
||||||
class Meta:
|
class Meta:
|
||||||
model = InstanceLink
|
model = InstanceLink
|
||||||
fields = ('id', 'url', 'related', 'source', 'target', 'link_state')
|
fields = ('id', 'related', 'source', 'target', 'target_full_address', 'link_state')
|
||||||
|
|
||||||
source = serializers.SlugRelatedField(slug_field="hostname", queryset=Instance.objects.all())
|
source = serializers.SlugRelatedField(slug_field="hostname", queryset=Instance.objects.all())
|
||||||
target = serializers.SlugRelatedField(slug_field="hostname", queryset=Instance.objects.all())
|
|
||||||
|
target = serializers.SerializerMethodField()
|
||||||
|
target_full_address = serializers.SerializerMethodField()
|
||||||
|
|
||||||
def get_related(self, obj):
|
def get_related(self, obj):
|
||||||
res = super(InstanceLinkSerializer, self).get_related(obj)
|
res = super(InstanceLinkSerializer, self).get_related(obj)
|
||||||
res['source_instance'] = self.reverse('api:instance_detail', kwargs={'pk': obj.source.id})
|
res['source_instance'] = self.reverse('api:instance_detail', kwargs={'pk': obj.source.id})
|
||||||
res['target_instance'] = self.reverse('api:instance_detail', kwargs={'pk': obj.target.id})
|
res['target_address'] = self.reverse('api:receptor_address_detail', kwargs={'pk': obj.target.id})
|
||||||
return res
|
return res
|
||||||
|
|
||||||
|
def get_target(self, obj):
|
||||||
|
return obj.target.instance.hostname
|
||||||
|
|
||||||
|
def get_target_full_address(self, obj):
|
||||||
|
return obj.target.get_full_address()
|
||||||
|
|
||||||
|
|
||||||
class InstanceNodeSerializer(BaseSerializer):
|
class InstanceNodeSerializer(BaseSerializer):
|
||||||
class Meta:
|
class Meta:
|
||||||
@@ -5476,6 +5490,29 @@ class InstanceNodeSerializer(BaseSerializer):
|
|||||||
fields = ('id', 'hostname', 'node_type', 'node_state', 'enabled')
|
fields = ('id', 'hostname', 'node_type', 'node_state', 'enabled')
|
||||||
|
|
||||||
|
|
||||||
|
class ReceptorAddressSerializer(BaseSerializer):
|
||||||
|
full_address = serializers.SerializerMethodField()
|
||||||
|
|
||||||
|
class Meta:
|
||||||
|
model = ReceptorAddress
|
||||||
|
fields = (
|
||||||
|
'id',
|
||||||
|
'url',
|
||||||
|
'address',
|
||||||
|
'port',
|
||||||
|
'protocol',
|
||||||
|
'websocket_path',
|
||||||
|
'is_internal',
|
||||||
|
'canonical',
|
||||||
|
'instance',
|
||||||
|
'peers_from_control_nodes',
|
||||||
|
'full_address',
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_full_address(self, obj):
|
||||||
|
return obj.get_full_address()
|
||||||
|
|
||||||
|
|
||||||
class InstanceSerializer(BaseSerializer):
|
class InstanceSerializer(BaseSerializer):
|
||||||
show_capabilities = ['edit']
|
show_capabilities = ['edit']
|
||||||
|
|
||||||
@@ -5484,11 +5521,17 @@ class InstanceSerializer(BaseSerializer):
|
|||||||
jobs_running = serializers.IntegerField(help_text=_('Count of jobs in the running or waiting state that are targeted for this instance'), read_only=True)
|
jobs_running = serializers.IntegerField(help_text=_('Count of jobs in the running or waiting state that are targeted for this instance'), read_only=True)
|
||||||
jobs_total = serializers.IntegerField(help_text=_('Count of all jobs that target this instance'), read_only=True)
|
jobs_total = serializers.IntegerField(help_text=_('Count of all jobs that target this instance'), read_only=True)
|
||||||
health_check_pending = serializers.SerializerMethodField()
|
health_check_pending = serializers.SerializerMethodField()
|
||||||
peers = serializers.SlugRelatedField(many=True, required=False, slug_field="hostname", queryset=Instance.objects.all())
|
peers = serializers.PrimaryKeyRelatedField(
|
||||||
|
help_text=_('Primary keys of receptor addresses to peer to.'), many=True, required=False, queryset=ReceptorAddress.objects.all()
|
||||||
|
)
|
||||||
|
reverse_peers = serializers.SerializerMethodField()
|
||||||
|
listener_port = serializers.IntegerField(source='canonical_address_port', required=False, allow_null=True)
|
||||||
|
peers_from_control_nodes = serializers.BooleanField(source='canonical_address_peers_from_control_nodes', required=False)
|
||||||
|
protocol = serializers.SerializerMethodField()
|
||||||
|
|
||||||
class Meta:
|
class Meta:
|
||||||
model = Instance
|
model = Instance
|
||||||
read_only_fields = ('ip_address', 'uuid', 'version')
|
read_only_fields = ('ip_address', 'uuid', 'version', 'managed', 'reverse_peers')
|
||||||
fields = (
|
fields = (
|
||||||
'id',
|
'id',
|
||||||
'hostname',
|
'hostname',
|
||||||
@@ -5519,10 +5562,13 @@ class InstanceSerializer(BaseSerializer):
|
|||||||
'managed_by_policy',
|
'managed_by_policy',
|
||||||
'node_type',
|
'node_type',
|
||||||
'node_state',
|
'node_state',
|
||||||
|
'managed',
|
||||||
'ip_address',
|
'ip_address',
|
||||||
'listener_port',
|
|
||||||
'peers',
|
'peers',
|
||||||
|
'reverse_peers',
|
||||||
|
'listener_port',
|
||||||
'peers_from_control_nodes',
|
'peers_from_control_nodes',
|
||||||
|
'protocol',
|
||||||
)
|
)
|
||||||
extra_kwargs = {
|
extra_kwargs = {
|
||||||
'node_type': {'initial': Instance.Types.EXECUTION, 'default': Instance.Types.EXECUTION},
|
'node_type': {'initial': Instance.Types.EXECUTION, 'default': Instance.Types.EXECUTION},
|
||||||
@@ -5544,16 +5590,54 @@ class InstanceSerializer(BaseSerializer):
|
|||||||
|
|
||||||
def get_related(self, obj):
|
def get_related(self, obj):
|
||||||
res = super(InstanceSerializer, self).get_related(obj)
|
res = super(InstanceSerializer, self).get_related(obj)
|
||||||
|
res['receptor_addresses'] = self.reverse('api:instance_receptor_addresses_list', kwargs={'pk': obj.pk})
|
||||||
res['jobs'] = self.reverse('api:instance_unified_jobs_list', kwargs={'pk': obj.pk})
|
res['jobs'] = self.reverse('api:instance_unified_jobs_list', kwargs={'pk': obj.pk})
|
||||||
res['instance_groups'] = self.reverse('api:instance_instance_groups_list', kwargs={'pk': obj.pk})
|
|
||||||
if obj.node_type in [Instance.Types.EXECUTION, Instance.Types.HOP]:
|
|
||||||
res['install_bundle'] = self.reverse('api:instance_install_bundle', kwargs={'pk': obj.pk})
|
|
||||||
res['peers'] = self.reverse('api:instance_peers_list', kwargs={"pk": obj.pk})
|
res['peers'] = self.reverse('api:instance_peers_list', kwargs={"pk": obj.pk})
|
||||||
|
res['instance_groups'] = self.reverse('api:instance_instance_groups_list', kwargs={'pk': obj.pk})
|
||||||
|
if obj.node_type in [Instance.Types.EXECUTION, Instance.Types.HOP] and not obj.managed:
|
||||||
|
res['install_bundle'] = self.reverse('api:instance_install_bundle', kwargs={'pk': obj.pk})
|
||||||
if self.context['request'].user.is_superuser or self.context['request'].user.is_system_auditor:
|
if self.context['request'].user.is_superuser or self.context['request'].user.is_system_auditor:
|
||||||
if obj.node_type == 'execution':
|
if obj.node_type == 'execution':
|
||||||
res['health_check'] = self.reverse('api:instance_health_check', kwargs={'pk': obj.pk})
|
res['health_check'] = self.reverse('api:instance_health_check', kwargs={'pk': obj.pk})
|
||||||
return res
|
return res
|
||||||
|
|
||||||
|
def create_or_update(self, validated_data, obj=None, create=True):
|
||||||
|
# create a managed receptor address if listener port is defined
|
||||||
|
port = validated_data.pop('listener_port', -1)
|
||||||
|
peers_from_control_nodes = validated_data.pop('peers_from_control_nodes', -1)
|
||||||
|
|
||||||
|
# delete the receptor address if the port is explicitly set to None
|
||||||
|
if obj and port == None:
|
||||||
|
obj.receptor_addresses.filter(address=obj.hostname).delete()
|
||||||
|
|
||||||
|
if create:
|
||||||
|
instance = super(InstanceSerializer, self).create(validated_data)
|
||||||
|
else:
|
||||||
|
instance = super(InstanceSerializer, self).update(obj, validated_data)
|
||||||
|
instance.refresh_from_db() # instance canonical address lookup is deferred, so needs to be reloaded
|
||||||
|
|
||||||
|
# only create or update if port is defined in validated_data or already exists in the
|
||||||
|
# canonical address
|
||||||
|
# this prevents creating a receptor address if peers_from_control_nodes is in
|
||||||
|
# validated_data but a port is not set
|
||||||
|
if (port != None and port != -1) or instance.canonical_address_port:
|
||||||
|
kwargs = {}
|
||||||
|
if port != -1:
|
||||||
|
kwargs['port'] = port
|
||||||
|
if peers_from_control_nodes != -1:
|
||||||
|
kwargs['peers_from_control_nodes'] = peers_from_control_nodes
|
||||||
|
if kwargs:
|
||||||
|
kwargs['canonical'] = True
|
||||||
|
instance.receptor_addresses.update_or_create(address=instance.hostname, defaults=kwargs)
|
||||||
|
|
||||||
|
return instance
|
||||||
|
|
||||||
|
def create(self, validated_data):
|
||||||
|
return self.create_or_update(validated_data, create=True)
|
||||||
|
|
||||||
|
def update(self, obj, validated_data):
|
||||||
|
return self.create_or_update(validated_data, obj, create=False)
|
||||||
|
|
||||||
def get_summary_fields(self, obj):
|
def get_summary_fields(self, obj):
|
||||||
summary = super().get_summary_fields(obj)
|
summary = super().get_summary_fields(obj)
|
||||||
|
|
||||||
@@ -5563,6 +5647,16 @@ class InstanceSerializer(BaseSerializer):
|
|||||||
|
|
||||||
return summary
|
return summary
|
||||||
|
|
||||||
|
def get_reverse_peers(self, obj):
|
||||||
|
return Instance.objects.prefetch_related('peers').filter(peers__in=obj.receptor_addresses.all()).values_list('id', flat=True)
|
||||||
|
|
||||||
|
def get_protocol(self, obj):
|
||||||
|
# note: don't create a different query for receptor addresses, as this is prefetched on the View for optimization
|
||||||
|
for addr in obj.receptor_addresses.all():
|
||||||
|
if addr.canonical:
|
||||||
|
return addr.protocol
|
||||||
|
return ""
|
||||||
|
|
||||||
def get_consumed_capacity(self, obj):
|
def get_consumed_capacity(self, obj):
|
||||||
return obj.consumed_capacity
|
return obj.consumed_capacity
|
||||||
|
|
||||||
@@ -5576,47 +5670,20 @@ class InstanceSerializer(BaseSerializer):
|
|||||||
return obj.health_check_pending
|
return obj.health_check_pending
|
||||||
|
|
||||||
def validate(self, attrs):
|
def validate(self, attrs):
|
||||||
def get_field_from_model_or_attrs(fd):
|
# Oddly, using 'source' on a DRF field populates attrs with the source name, so we should rename it back
|
||||||
return attrs.get(fd, self.instance and getattr(self.instance, fd) or None)
|
if 'canonical_address_port' in attrs:
|
||||||
|
attrs['listener_port'] = attrs.pop('canonical_address_port')
|
||||||
def check_peers_changed():
|
if 'canonical_address_peers_from_control_nodes' in attrs:
|
||||||
'''
|
attrs['peers_from_control_nodes'] = attrs.pop('canonical_address_peers_from_control_nodes')
|
||||||
return True if
|
|
||||||
- 'peers' in attrs
|
|
||||||
- instance peers matches peers in attrs
|
|
||||||
'''
|
|
||||||
return self.instance and 'peers' in attrs and set(self.instance.peers.all()) != set(attrs['peers'])
|
|
||||||
|
|
||||||
if not self.instance and not settings.IS_K8S:
|
if not self.instance and not settings.IS_K8S:
|
||||||
raise serializers.ValidationError(_("Can only create instances on Kubernetes or OpenShift."))
|
raise serializers.ValidationError(_("Can only create instances on Kubernetes or OpenShift."))
|
||||||
|
|
||||||
node_type = get_field_from_model_or_attrs("node_type")
|
# cannot enable peers_from_control_nodes if listener_port is not set
|
||||||
peers_from_control_nodes = get_field_from_model_or_attrs("peers_from_control_nodes")
|
if attrs.get('peers_from_control_nodes'):
|
||||||
listener_port = get_field_from_model_or_attrs("listener_port")
|
port = attrs.get('listener_port', -1) # -1 denotes missing, None denotes explicit null
|
||||||
peers = attrs.get('peers', [])
|
if (port is None) or (port == -1 and self.instance and self.instance.canonical_address is None):
|
||||||
|
raise serializers.ValidationError(_("Cannot enable peers_from_control_nodes if listener_port is not set."))
|
||||||
if peers_from_control_nodes and node_type not in (Instance.Types.EXECUTION, Instance.Types.HOP):
|
|
||||||
raise serializers.ValidationError(_("peers_from_control_nodes can only be enabled for execution or hop nodes."))
|
|
||||||
|
|
||||||
if node_type in [Instance.Types.CONTROL, Instance.Types.HYBRID]:
|
|
||||||
if check_peers_changed():
|
|
||||||
raise serializers.ValidationError(
|
|
||||||
_("Setting peers manually for control nodes is not allowed. Enable peers_from_control_nodes on the hop and execution nodes instead.")
|
|
||||||
)
|
|
||||||
|
|
||||||
if not listener_port and peers_from_control_nodes:
|
|
||||||
raise serializers.ValidationError(_("Field listener_port must be a valid integer when peers_from_control_nodes is enabled."))
|
|
||||||
|
|
||||||
if not listener_port and self.instance and self.instance.peers_from.exists():
|
|
||||||
raise serializers.ValidationError(_("Field listener_port must be a valid integer when other nodes peer to it."))
|
|
||||||
|
|
||||||
for peer in peers:
|
|
||||||
if peer.listener_port is None:
|
|
||||||
raise serializers.ValidationError(_("Field listener_port must be set on peer ") + peer.hostname + ".")
|
|
||||||
|
|
||||||
if not settings.IS_K8S:
|
|
||||||
if check_peers_changed():
|
|
||||||
raise serializers.ValidationError(_("Cannot change peers."))
|
|
||||||
|
|
||||||
return super().validate(attrs)
|
return super().validate(attrs)
|
||||||
|
|
||||||
@@ -5636,8 +5703,8 @@ class InstanceSerializer(BaseSerializer):
|
|||||||
raise serializers.ValidationError(_("Can only change the state on Kubernetes or OpenShift."))
|
raise serializers.ValidationError(_("Can only change the state on Kubernetes or OpenShift."))
|
||||||
if value != Instance.States.DEPROVISIONING:
|
if value != Instance.States.DEPROVISIONING:
|
||||||
raise serializers.ValidationError(_("Can only change instances to the 'deprovisioning' state."))
|
raise serializers.ValidationError(_("Can only change instances to the 'deprovisioning' state."))
|
||||||
if self.instance.node_type not in (Instance.Types.EXECUTION, Instance.Types.HOP):
|
if self.instance.managed:
|
||||||
raise serializers.ValidationError(_("Can only deprovision execution or hop nodes."))
|
raise serializers.ValidationError(_("Cannot deprovision managed nodes."))
|
||||||
else:
|
else:
|
||||||
if value and value != Instance.States.INSTALLED:
|
if value and value != Instance.States.INSTALLED:
|
||||||
raise serializers.ValidationError(_("Can only create instances in the 'installed' state."))
|
raise serializers.ValidationError(_("Can only create instances in the 'installed' state."))
|
||||||
@@ -5656,18 +5723,48 @@ class InstanceSerializer(BaseSerializer):
|
|||||||
def validate_listener_port(self, value):
|
def validate_listener_port(self, value):
|
||||||
"""
|
"""
|
||||||
Cannot change listener port, unless going from none to integer, and vice versa
|
Cannot change listener port, unless going from none to integer, and vice versa
|
||||||
|
If instance is managed, cannot change listener port at all
|
||||||
"""
|
"""
|
||||||
if value and self.instance and self.instance.listener_port and self.instance.listener_port != value:
|
if self.instance:
|
||||||
raise serializers.ValidationError(_("Cannot change listener port."))
|
canonical_address_port = self.instance.canonical_address_port
|
||||||
|
if value and canonical_address_port and canonical_address_port != value:
|
||||||
|
raise serializers.ValidationError(_("Cannot change listener port."))
|
||||||
|
if self.instance.managed and value != canonical_address_port:
|
||||||
|
raise serializers.ValidationError(_("Cannot change listener port for managed nodes."))
|
||||||
|
return value
|
||||||
|
|
||||||
|
def validate_peers(self, value):
|
||||||
|
# cannot peer to an instance more than once
|
||||||
|
peers_instances = Counter(p.instance_id for p in value)
|
||||||
|
if any(count > 1 for count in peers_instances.values()):
|
||||||
|
raise serializers.ValidationError(_("Cannot peer to the same instance more than once."))
|
||||||
|
|
||||||
|
if self.instance:
|
||||||
|
instance_addresses = set(self.instance.receptor_addresses.all())
|
||||||
|
setting_peers = set(value)
|
||||||
|
peers_changed = set(self.instance.peers.all()) != setting_peers
|
||||||
|
|
||||||
|
if not settings.IS_K8S and peers_changed:
|
||||||
|
raise serializers.ValidationError(_("Cannot change peers."))
|
||||||
|
|
||||||
|
if self.instance.managed and peers_changed:
|
||||||
|
raise serializers.ValidationError(_("Setting peers manually for managed nodes is not allowed."))
|
||||||
|
|
||||||
|
# cannot peer to self
|
||||||
|
if instance_addresses & setting_peers:
|
||||||
|
raise serializers.ValidationError(_("Instance cannot peer to its own address."))
|
||||||
|
|
||||||
|
# cannot peer to an instance that is already peered to this instance
|
||||||
|
if instance_addresses:
|
||||||
|
for p in setting_peers:
|
||||||
|
if set(p.instance.peers.all()) & instance_addresses:
|
||||||
|
raise serializers.ValidationError(_(f"Instance {p.instance.hostname} is already peered to this instance."))
|
||||||
|
|
||||||
return value
|
return value
|
||||||
|
|
||||||
def validate_peers_from_control_nodes(self, value):
|
def validate_peers_from_control_nodes(self, value):
|
||||||
"""
|
if self.instance and self.instance.managed and self.instance.canonical_address_peers_from_control_nodes != value:
|
||||||
Can only enable for K8S based deployments
|
raise serializers.ValidationError(_("Cannot change peers_from_control_nodes for managed nodes."))
|
||||||
"""
|
|
||||||
if value and not settings.IS_K8S:
|
|
||||||
raise serializers.ValidationError(_("Can only be enabled on Kubernetes or Openshift."))
|
|
||||||
|
|
||||||
return value
|
return value
|
||||||
|
|
||||||
|
|||||||
@@ -17,19 +17,18 @@ custom_worksign_public_keyfile: receptor/work_public_key.pem
|
|||||||
custom_tls_certfile: receptor/tls/receptor.crt
|
custom_tls_certfile: receptor/tls/receptor.crt
|
||||||
custom_tls_keyfile: receptor/tls/receptor.key
|
custom_tls_keyfile: receptor/tls/receptor.key
|
||||||
custom_ca_certfile: receptor/tls/ca/mesh-CA.crt
|
custom_ca_certfile: receptor/tls/ca/mesh-CA.crt
|
||||||
receptor_protocol: 'tcp'
|
{% if listener_port %}
|
||||||
{% if instance.listener_port %}
|
receptor_protocol: {{ listener_protocol }}
|
||||||
receptor_listener: true
|
receptor_listener: true
|
||||||
receptor_port: {{ instance.listener_port }}
|
receptor_port: {{ listener_port }}
|
||||||
{% else %}
|
{% else %}
|
||||||
receptor_listener: false
|
receptor_listener: false
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% if peers %}
|
{% if peers %}
|
||||||
receptor_peers:
|
receptor_peers:
|
||||||
{% for peer in peers %}
|
{% for peer in peers %}
|
||||||
- host: {{ peer.host }}
|
- address: {{ peer.address }}
|
||||||
port: {{ peer.port }}
|
protocol: {{ peer.protocol }}
|
||||||
protocol: tcp
|
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% verbatim %}
|
{% verbatim %}
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
---
|
---
|
||||||
collections:
|
collections:
|
||||||
- name: ansible.receptor
|
- name: ansible.receptor
|
||||||
version: 2.0.2
|
version: 2.0.3
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ from awx.api.views import (
|
|||||||
InstanceInstanceGroupsList,
|
InstanceInstanceGroupsList,
|
||||||
InstanceHealthCheck,
|
InstanceHealthCheck,
|
||||||
InstancePeersList,
|
InstancePeersList,
|
||||||
|
InstanceReceptorAddressesList,
|
||||||
)
|
)
|
||||||
from awx.api.views.instance_install_bundle import InstanceInstallBundle
|
from awx.api.views.instance_install_bundle import InstanceInstallBundle
|
||||||
|
|
||||||
@@ -21,6 +22,7 @@ urls = [
|
|||||||
re_path(r'^(?P<pk>[0-9]+)/instance_groups/$', InstanceInstanceGroupsList.as_view(), name='instance_instance_groups_list'),
|
re_path(r'^(?P<pk>[0-9]+)/instance_groups/$', InstanceInstanceGroupsList.as_view(), name='instance_instance_groups_list'),
|
||||||
re_path(r'^(?P<pk>[0-9]+)/health_check/$', InstanceHealthCheck.as_view(), name='instance_health_check'),
|
re_path(r'^(?P<pk>[0-9]+)/health_check/$', InstanceHealthCheck.as_view(), name='instance_health_check'),
|
||||||
re_path(r'^(?P<pk>[0-9]+)/peers/$', InstancePeersList.as_view(), name='instance_peers_list'),
|
re_path(r'^(?P<pk>[0-9]+)/peers/$', InstancePeersList.as_view(), name='instance_peers_list'),
|
||||||
|
re_path(r'^(?P<pk>[0-9]+)/receptor_addresses/$', InstanceReceptorAddressesList.as_view(), name='instance_receptor_addresses_list'),
|
||||||
re_path(r'^(?P<pk>[0-9]+)/install_bundle/$', InstanceInstallBundle.as_view(), name='instance_install_bundle'),
|
re_path(r'^(?P<pk>[0-9]+)/install_bundle/$', InstanceInstallBundle.as_view(), name='instance_install_bundle'),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|||||||
17
awx/api/urls/receptor_address.py
Normal file
17
awx/api/urls/receptor_address.py
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
# Copyright (c) 2017 Ansible, Inc.
|
||||||
|
# All Rights Reserved.
|
||||||
|
|
||||||
|
from django.urls import re_path
|
||||||
|
|
||||||
|
from awx.api.views import (
|
||||||
|
ReceptorAddressesList,
|
||||||
|
ReceptorAddressDetail,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
urls = [
|
||||||
|
re_path(r'^$', ReceptorAddressesList.as_view(), name='receptor_addresses_list'),
|
||||||
|
re_path(r'^(?P<pk>[0-9]+)/$', ReceptorAddressDetail.as_view(), name='receptor_address_detail'),
|
||||||
|
]
|
||||||
|
|
||||||
|
__all__ = ['urls']
|
||||||
@@ -85,6 +85,7 @@ from .oauth2_root import urls as oauth2_root_urls
|
|||||||
from .workflow_approval_template import urls as workflow_approval_template_urls
|
from .workflow_approval_template import urls as workflow_approval_template_urls
|
||||||
from .workflow_approval import urls as workflow_approval_urls
|
from .workflow_approval import urls as workflow_approval_urls
|
||||||
from .analytics import urls as analytics_urls
|
from .analytics import urls as analytics_urls
|
||||||
|
from .receptor_address import urls as receptor_address_urls
|
||||||
|
|
||||||
v2_urls = [
|
v2_urls = [
|
||||||
re_path(r'^$', ApiV2RootView.as_view(), name='api_v2_root_view'),
|
re_path(r'^$', ApiV2RootView.as_view(), name='api_v2_root_view'),
|
||||||
@@ -155,6 +156,7 @@ v2_urls = [
|
|||||||
re_path(r'^bulk/host_create/$', BulkHostCreateView.as_view(), name='bulk_host_create'),
|
re_path(r'^bulk/host_create/$', BulkHostCreateView.as_view(), name='bulk_host_create'),
|
||||||
re_path(r'^bulk/host_delete/$', BulkHostDeleteView.as_view(), name='bulk_host_delete'),
|
re_path(r'^bulk/host_delete/$', BulkHostDeleteView.as_view(), name='bulk_host_delete'),
|
||||||
re_path(r'^bulk/job_launch/$', BulkJobLaunchView.as_view(), name='bulk_job_launch'),
|
re_path(r'^bulk/job_launch/$', BulkJobLaunchView.as_view(), name='bulk_job_launch'),
|
||||||
|
re_path(r'^receptor_addresses/', include(receptor_address_urls)),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,10 +1,11 @@
|
|||||||
from django.urls import re_path
|
from django.urls import re_path
|
||||||
|
|
||||||
from awx.api.views.webhooks import WebhookKeyView, GithubWebhookReceiver, GitlabWebhookReceiver
|
from awx.api.views.webhooks import WebhookKeyView, GithubWebhookReceiver, GitlabWebhookReceiver, BitbucketDcWebhookReceiver
|
||||||
|
|
||||||
|
|
||||||
urlpatterns = [
|
urlpatterns = [
|
||||||
re_path(r'^webhook_key/$', WebhookKeyView.as_view(), name='webhook_key'),
|
re_path(r'^webhook_key/$', WebhookKeyView.as_view(), name='webhook_key'),
|
||||||
re_path(r'^github/$', GithubWebhookReceiver.as_view(), name='webhook_receiver_github'),
|
re_path(r'^github/$', GithubWebhookReceiver.as_view(), name='webhook_receiver_github'),
|
||||||
re_path(r'^gitlab/$', GitlabWebhookReceiver.as_view(), name='webhook_receiver_gitlab'),
|
re_path(r'^gitlab/$', GitlabWebhookReceiver.as_view(), name='webhook_receiver_gitlab'),
|
||||||
|
re_path(r'^bitbucket_dc/$', BitbucketDcWebhookReceiver.as_view(), name='webhook_receiver_bitbucket_dc'),
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -272,16 +272,24 @@ class DashboardJobsGraphView(APIView):
|
|||||||
|
|
||||||
success_query = user_unified_jobs.filter(status='successful')
|
success_query = user_unified_jobs.filter(status='successful')
|
||||||
failed_query = user_unified_jobs.filter(status='failed')
|
failed_query = user_unified_jobs.filter(status='failed')
|
||||||
|
canceled_query = user_unified_jobs.filter(status='canceled')
|
||||||
|
error_query = user_unified_jobs.filter(status='error')
|
||||||
|
|
||||||
if job_type == 'inv_sync':
|
if job_type == 'inv_sync':
|
||||||
success_query = success_query.filter(instance_of=models.InventoryUpdate)
|
success_query = success_query.filter(instance_of=models.InventoryUpdate)
|
||||||
failed_query = failed_query.filter(instance_of=models.InventoryUpdate)
|
failed_query = failed_query.filter(instance_of=models.InventoryUpdate)
|
||||||
|
canceled_query = canceled_query.filter(instance_of=models.InventoryUpdate)
|
||||||
|
error_query = error_query.filter(instance_of=models.InventoryUpdate)
|
||||||
elif job_type == 'playbook_run':
|
elif job_type == 'playbook_run':
|
||||||
success_query = success_query.filter(instance_of=models.Job)
|
success_query = success_query.filter(instance_of=models.Job)
|
||||||
failed_query = failed_query.filter(instance_of=models.Job)
|
failed_query = failed_query.filter(instance_of=models.Job)
|
||||||
|
canceled_query = canceled_query.filter(instance_of=models.Job)
|
||||||
|
error_query = error_query.filter(instance_of=models.Job)
|
||||||
elif job_type == 'scm_update':
|
elif job_type == 'scm_update':
|
||||||
success_query = success_query.filter(instance_of=models.ProjectUpdate)
|
success_query = success_query.filter(instance_of=models.ProjectUpdate)
|
||||||
failed_query = failed_query.filter(instance_of=models.ProjectUpdate)
|
failed_query = failed_query.filter(instance_of=models.ProjectUpdate)
|
||||||
|
canceled_query = canceled_query.filter(instance_of=models.ProjectUpdate)
|
||||||
|
error_query = error_query.filter(instance_of=models.ProjectUpdate)
|
||||||
|
|
||||||
end = now()
|
end = now()
|
||||||
interval = 'day'
|
interval = 'day'
|
||||||
@@ -297,10 +305,12 @@ class DashboardJobsGraphView(APIView):
|
|||||||
else:
|
else:
|
||||||
return Response({'error': _('Unknown period "%s"') % str(period)}, status=status.HTTP_400_BAD_REQUEST)
|
return Response({'error': _('Unknown period "%s"') % str(period)}, status=status.HTTP_400_BAD_REQUEST)
|
||||||
|
|
||||||
dashboard_data = {"jobs": {"successful": [], "failed": []}}
|
dashboard_data = {"jobs": {"successful": [], "failed": [], "canceled": [], "error": []}}
|
||||||
|
|
||||||
succ_list = dashboard_data['jobs']['successful']
|
succ_list = dashboard_data['jobs']['successful']
|
||||||
fail_list = dashboard_data['jobs']['failed']
|
fail_list = dashboard_data['jobs']['failed']
|
||||||
|
canceled_list = dashboard_data['jobs']['canceled']
|
||||||
|
error_list = dashboard_data['jobs']['error']
|
||||||
|
|
||||||
qs_s = (
|
qs_s = (
|
||||||
success_query.filter(finished__range=(start, end))
|
success_query.filter(finished__range=(start, end))
|
||||||
@@ -318,6 +328,22 @@ class DashboardJobsGraphView(APIView):
|
|||||||
.annotate(agg=Count('id', distinct=True))
|
.annotate(agg=Count('id', distinct=True))
|
||||||
)
|
)
|
||||||
data_f = {item['d']: item['agg'] for item in qs_f}
|
data_f = {item['d']: item['agg'] for item in qs_f}
|
||||||
|
qs_c = (
|
||||||
|
canceled_query.filter(finished__range=(start, end))
|
||||||
|
.annotate(d=Trunc('finished', interval, tzinfo=end.tzinfo))
|
||||||
|
.order_by()
|
||||||
|
.values('d')
|
||||||
|
.annotate(agg=Count('id', distinct=True))
|
||||||
|
)
|
||||||
|
data_c = {item['d']: item['agg'] for item in qs_c}
|
||||||
|
qs_e = (
|
||||||
|
error_query.filter(finished__range=(start, end))
|
||||||
|
.annotate(d=Trunc('finished', interval, tzinfo=end.tzinfo))
|
||||||
|
.order_by()
|
||||||
|
.values('d')
|
||||||
|
.annotate(agg=Count('id', distinct=True))
|
||||||
|
)
|
||||||
|
data_e = {item['d']: item['agg'] for item in qs_e}
|
||||||
|
|
||||||
start_date = start.replace(hour=0, minute=0, second=0, microsecond=0)
|
start_date = start.replace(hour=0, minute=0, second=0, microsecond=0)
|
||||||
for d in itertools.count():
|
for d in itertools.count():
|
||||||
@@ -326,6 +352,8 @@ class DashboardJobsGraphView(APIView):
|
|||||||
break
|
break
|
||||||
succ_list.append([time.mktime(date.timetuple()), data_s.get(date, 0)])
|
succ_list.append([time.mktime(date.timetuple()), data_s.get(date, 0)])
|
||||||
fail_list.append([time.mktime(date.timetuple()), data_f.get(date, 0)])
|
fail_list.append([time.mktime(date.timetuple()), data_f.get(date, 0)])
|
||||||
|
canceled_list.append([time.mktime(date.timetuple()), data_c.get(date, 0)])
|
||||||
|
error_list.append([time.mktime(date.timetuple()), data_e.get(date, 0)])
|
||||||
|
|
||||||
return Response(dashboard_data)
|
return Response(dashboard_data)
|
||||||
|
|
||||||
@@ -337,12 +365,20 @@ class InstanceList(ListCreateAPIView):
|
|||||||
search_fields = ('hostname',)
|
search_fields = ('hostname',)
|
||||||
ordering = ('id',)
|
ordering = ('id',)
|
||||||
|
|
||||||
|
def get_queryset(self):
|
||||||
|
qs = super().get_queryset().prefetch_related('receptor_addresses')
|
||||||
|
return qs
|
||||||
|
|
||||||
|
|
||||||
class InstanceDetail(RetrieveUpdateAPIView):
|
class InstanceDetail(RetrieveUpdateAPIView):
|
||||||
name = _("Instance Detail")
|
name = _("Instance Detail")
|
||||||
model = models.Instance
|
model = models.Instance
|
||||||
serializer_class = serializers.InstanceSerializer
|
serializer_class = serializers.InstanceSerializer
|
||||||
|
|
||||||
|
def get_queryset(self):
|
||||||
|
qs = super().get_queryset().prefetch_related('receptor_addresses')
|
||||||
|
return qs
|
||||||
|
|
||||||
def update_raw_data(self, data):
|
def update_raw_data(self, data):
|
||||||
# these fields are only valid on creation of an instance, so they unwanted on detail view
|
# these fields are only valid on creation of an instance, so they unwanted on detail view
|
||||||
data.pop('node_type', None)
|
data.pop('node_type', None)
|
||||||
@@ -375,13 +411,37 @@ class InstanceUnifiedJobsList(SubListAPIView):
|
|||||||
|
|
||||||
|
|
||||||
class InstancePeersList(SubListAPIView):
|
class InstancePeersList(SubListAPIView):
|
||||||
name = _("Instance Peers")
|
name = _("Peers")
|
||||||
|
model = models.ReceptorAddress
|
||||||
|
serializer_class = serializers.ReceptorAddressSerializer
|
||||||
parent_model = models.Instance
|
parent_model = models.Instance
|
||||||
model = models.Instance
|
|
||||||
serializer_class = serializers.InstanceSerializer
|
|
||||||
parent_access = 'read'
|
parent_access = 'read'
|
||||||
search_fields = {'hostname'}
|
|
||||||
relationship = 'peers'
|
relationship = 'peers'
|
||||||
|
search_fields = ('address',)
|
||||||
|
|
||||||
|
|
||||||
|
class InstanceReceptorAddressesList(SubListAPIView):
|
||||||
|
name = _("Receptor Addresses")
|
||||||
|
model = models.ReceptorAddress
|
||||||
|
parent_key = 'instance'
|
||||||
|
parent_model = models.Instance
|
||||||
|
serializer_class = serializers.ReceptorAddressSerializer
|
||||||
|
search_fields = ('address',)
|
||||||
|
|
||||||
|
|
||||||
|
class ReceptorAddressesList(ListAPIView):
|
||||||
|
name = _("Receptor Addresses")
|
||||||
|
model = models.ReceptorAddress
|
||||||
|
serializer_class = serializers.ReceptorAddressSerializer
|
||||||
|
search_fields = ('address',)
|
||||||
|
|
||||||
|
|
||||||
|
class ReceptorAddressDetail(RetrieveAPIView):
|
||||||
|
name = _("Receptor Address Detail")
|
||||||
|
model = models.ReceptorAddress
|
||||||
|
serializer_class = serializers.ReceptorAddressSerializer
|
||||||
|
parent_model = models.Instance
|
||||||
|
relationship = 'receptor_addresses'
|
||||||
|
|
||||||
|
|
||||||
class InstanceInstanceGroupsList(InstanceGroupMembershipMixin, SubListCreateAttachDetachAPIView):
|
class InstanceInstanceGroupsList(InstanceGroupMembershipMixin, SubListCreateAttachDetachAPIView):
|
||||||
|
|||||||
@@ -124,10 +124,19 @@ def generate_inventory_yml(instance_obj):
|
|||||||
|
|
||||||
|
|
||||||
def generate_group_vars_all_yml(instance_obj):
|
def generate_group_vars_all_yml(instance_obj):
|
||||||
|
# get peers
|
||||||
peers = []
|
peers = []
|
||||||
for instance in instance_obj.peers.all():
|
for addr in instance_obj.peers.select_related('instance'):
|
||||||
peers.append(dict(host=instance.hostname, port=instance.listener_port))
|
peers.append(dict(address=addr.get_full_address(), protocol=addr.protocol))
|
||||||
all_yaml = render_to_string("instance_install_bundle/group_vars/all.yml", context=dict(instance=instance_obj, peers=peers))
|
context = dict(instance=instance_obj, peers=peers)
|
||||||
|
|
||||||
|
canonical_addr = instance_obj.canonical_address
|
||||||
|
if canonical_addr:
|
||||||
|
context['listener_port'] = canonical_addr.port
|
||||||
|
protocol = canonical_addr.protocol if canonical_addr.protocol != 'wss' else 'ws'
|
||||||
|
context['listener_protocol'] = protocol
|
||||||
|
|
||||||
|
all_yaml = render_to_string("instance_install_bundle/group_vars/all.yml", context=context)
|
||||||
# convert consecutive newlines with a single newline
|
# convert consecutive newlines with a single newline
|
||||||
return re.sub(r'\n+', '\n', all_yaml)
|
return re.sub(r'\n+', '\n', all_yaml)
|
||||||
|
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ class MeshVisualizer(APIView):
|
|||||||
def get(self, request, format=None):
|
def get(self, request, format=None):
|
||||||
data = {
|
data = {
|
||||||
'nodes': InstanceNodeSerializer(Instance.objects.all(), many=True).data,
|
'nodes': InstanceNodeSerializer(Instance.objects.all(), many=True).data,
|
||||||
'links': InstanceLinkSerializer(InstanceLink.objects.select_related('target', 'source'), many=True).data,
|
'links': InstanceLinkSerializer(InstanceLink.objects.select_related('target__instance', 'source'), many=True).data,
|
||||||
}
|
}
|
||||||
|
|
||||||
return Response(data)
|
return Response(data)
|
||||||
|
|||||||
@@ -84,6 +84,7 @@ class ApiVersionRootView(APIView):
|
|||||||
data['ping'] = reverse('api:api_v2_ping_view', request=request)
|
data['ping'] = reverse('api:api_v2_ping_view', request=request)
|
||||||
data['instances'] = reverse('api:instance_list', request=request)
|
data['instances'] = reverse('api:instance_list', request=request)
|
||||||
data['instance_groups'] = reverse('api:instance_group_list', request=request)
|
data['instance_groups'] = reverse('api:instance_group_list', request=request)
|
||||||
|
data['receptor_addresses'] = reverse('api:receptor_addresses_list', request=request)
|
||||||
data['config'] = reverse('api:api_v2_config_view', request=request)
|
data['config'] = reverse('api:api_v2_config_view', request=request)
|
||||||
data['settings'] = reverse('api:setting_category_list', request=request)
|
data['settings'] = reverse('api:setting_category_list', request=request)
|
||||||
data['me'] = reverse('api:user_me_list', request=request)
|
data['me'] = reverse('api:user_me_list', request=request)
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
from hashlib import sha1
|
from hashlib import sha1, sha256
|
||||||
import hmac
|
import hmac
|
||||||
import logging
|
import logging
|
||||||
import urllib.parse
|
import urllib.parse
|
||||||
@@ -99,14 +99,31 @@ class WebhookReceiverBase(APIView):
|
|||||||
def get_signature(self):
|
def get_signature(self):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
|
def must_check_signature(self):
|
||||||
|
return True
|
||||||
|
|
||||||
|
def is_ignored_request(self):
|
||||||
|
return False
|
||||||
|
|
||||||
def check_signature(self, obj):
|
def check_signature(self, obj):
|
||||||
if not obj.webhook_key:
|
if not obj.webhook_key:
|
||||||
raise PermissionDenied
|
raise PermissionDenied
|
||||||
|
if not self.must_check_signature():
|
||||||
|
logger.debug("skipping signature validation")
|
||||||
|
return
|
||||||
|
|
||||||
mac = hmac.new(force_bytes(obj.webhook_key), msg=force_bytes(self.request.body), digestmod=sha1)
|
hash_alg, expected_digest = self.get_signature()
|
||||||
logger.debug("header signature: %s", self.get_signature())
|
if hash_alg == 'sha1':
|
||||||
|
mac = hmac.new(force_bytes(obj.webhook_key), msg=force_bytes(self.request.body), digestmod=sha1)
|
||||||
|
elif hash_alg == 'sha256':
|
||||||
|
mac = hmac.new(force_bytes(obj.webhook_key), msg=force_bytes(self.request.body), digestmod=sha256)
|
||||||
|
else:
|
||||||
|
logger.debug("Unsupported signature type, supported: sha1, sha256, received: {}".format(hash_alg))
|
||||||
|
raise PermissionDenied
|
||||||
|
|
||||||
|
logger.debug("header signature: %s", expected_digest)
|
||||||
logger.debug("calculated signature: %s", force_bytes(mac.hexdigest()))
|
logger.debug("calculated signature: %s", force_bytes(mac.hexdigest()))
|
||||||
if not hmac.compare_digest(force_bytes(mac.hexdigest()), self.get_signature()):
|
if not hmac.compare_digest(force_bytes(mac.hexdigest()), expected_digest):
|
||||||
raise PermissionDenied
|
raise PermissionDenied
|
||||||
|
|
||||||
@csrf_exempt
|
@csrf_exempt
|
||||||
@@ -118,6 +135,10 @@ class WebhookReceiverBase(APIView):
|
|||||||
obj = self.get_object()
|
obj = self.get_object()
|
||||||
self.check_signature(obj)
|
self.check_signature(obj)
|
||||||
|
|
||||||
|
if self.is_ignored_request():
|
||||||
|
# This was an ignored request type (e.g. ping), don't act on it
|
||||||
|
return Response({'message': _("Webhook ignored")}, status=status.HTTP_200_OK)
|
||||||
|
|
||||||
event_type = self.get_event_type()
|
event_type = self.get_event_type()
|
||||||
event_guid = self.get_event_guid()
|
event_guid = self.get_event_guid()
|
||||||
event_ref = self.get_event_ref()
|
event_ref = self.get_event_ref()
|
||||||
@@ -186,7 +207,7 @@ class GithubWebhookReceiver(WebhookReceiverBase):
|
|||||||
if hash_alg != 'sha1':
|
if hash_alg != 'sha1':
|
||||||
logger.debug("Unsupported signature type, expected: sha1, received: {}".format(hash_alg))
|
logger.debug("Unsupported signature type, expected: sha1, received: {}".format(hash_alg))
|
||||||
raise PermissionDenied
|
raise PermissionDenied
|
||||||
return force_bytes(signature)
|
return hash_alg, force_bytes(signature)
|
||||||
|
|
||||||
|
|
||||||
class GitlabWebhookReceiver(WebhookReceiverBase):
|
class GitlabWebhookReceiver(WebhookReceiverBase):
|
||||||
@@ -214,15 +235,73 @@ class GitlabWebhookReceiver(WebhookReceiverBase):
|
|||||||
|
|
||||||
return "{}://{}/api/v4/projects/{}/statuses/{}".format(parsed.scheme, parsed.netloc, project['id'], self.get_event_ref())
|
return "{}://{}/api/v4/projects/{}/statuses/{}".format(parsed.scheme, parsed.netloc, project['id'], self.get_event_ref())
|
||||||
|
|
||||||
def get_signature(self):
|
|
||||||
return force_bytes(self.request.META.get('HTTP_X_GITLAB_TOKEN') or '')
|
|
||||||
|
|
||||||
def check_signature(self, obj):
|
def check_signature(self, obj):
|
||||||
if not obj.webhook_key:
|
if not obj.webhook_key:
|
||||||
raise PermissionDenied
|
raise PermissionDenied
|
||||||
|
|
||||||
|
token_from_request = force_bytes(self.request.META.get('HTTP_X_GITLAB_TOKEN') or '')
|
||||||
|
|
||||||
# GitLab only returns the secret token, not an hmac hash. Use
|
# GitLab only returns the secret token, not an hmac hash. Use
|
||||||
# the hmac `compare_digest` helper function to prevent timing
|
# the hmac `compare_digest` helper function to prevent timing
|
||||||
# analysis by attackers.
|
# analysis by attackers.
|
||||||
if not hmac.compare_digest(force_bytes(obj.webhook_key), self.get_signature()):
|
if not hmac.compare_digest(force_bytes(obj.webhook_key), token_from_request):
|
||||||
raise PermissionDenied
|
raise PermissionDenied
|
||||||
|
|
||||||
|
|
||||||
|
class BitbucketDcWebhookReceiver(WebhookReceiverBase):
|
||||||
|
service = 'bitbucket_dc'
|
||||||
|
|
||||||
|
ref_keys = {
|
||||||
|
'repo:refs_changed': 'changes.0.toHash',
|
||||||
|
'mirror:repo_synchronized': 'changes.0.toHash',
|
||||||
|
'pr:opened': 'pullRequest.toRef.latestCommit',
|
||||||
|
'pr:from_ref_updated': 'pullRequest.toRef.latestCommit',
|
||||||
|
'pr:modified': 'pullRequest.toRef.latestCommit',
|
||||||
|
}
|
||||||
|
|
||||||
|
def get_event_type(self):
|
||||||
|
return self.request.META.get('HTTP_X_EVENT_KEY')
|
||||||
|
|
||||||
|
def get_event_guid(self):
|
||||||
|
return self.request.META.get('HTTP_X_REQUEST_ID')
|
||||||
|
|
||||||
|
def get_event_status_api(self):
|
||||||
|
# https://<bitbucket-base-url>/rest/build-status/1.0/commits/<commit-hash>
|
||||||
|
if self.get_event_type() not in self.ref_keys.keys():
|
||||||
|
return
|
||||||
|
if self.get_event_ref() is None:
|
||||||
|
return
|
||||||
|
any_url = None
|
||||||
|
if 'actor' in self.request.data:
|
||||||
|
any_url = self.request.data['actor'].get('links', {}).get('self')
|
||||||
|
if any_url is None and 'repository' in self.request.data:
|
||||||
|
any_url = self.request.data['repository'].get('links', {}).get('self')
|
||||||
|
if any_url is None:
|
||||||
|
return
|
||||||
|
any_url = any_url[0].get('href')
|
||||||
|
if any_url is None:
|
||||||
|
return
|
||||||
|
parsed = urllib.parse.urlparse(any_url)
|
||||||
|
|
||||||
|
return "{}://{}/rest/build-status/1.0/commits/{}".format(parsed.scheme, parsed.netloc, self.get_event_ref())
|
||||||
|
|
||||||
|
def is_ignored_request(self):
|
||||||
|
return self.get_event_type() not in [
|
||||||
|
'repo:refs_changed',
|
||||||
|
'mirror:repo_synchronized',
|
||||||
|
'pr:opened',
|
||||||
|
'pr:from_ref_updated',
|
||||||
|
'pr:modified',
|
||||||
|
]
|
||||||
|
|
||||||
|
def must_check_signature(self):
|
||||||
|
# Bitbucket does not sign ping requests...
|
||||||
|
return self.get_event_type() != 'diagnostics:ping'
|
||||||
|
|
||||||
|
def get_signature(self):
|
||||||
|
header_sig = self.request.META.get('HTTP_X_HUB_SIGNATURE')
|
||||||
|
if not header_sig:
|
||||||
|
logger.debug("Expected signature missing from header key HTTP_X_HUB_SIGNATURE")
|
||||||
|
raise PermissionDenied
|
||||||
|
hash_alg, signature = header_sig.split('=')
|
||||||
|
return hash_alg, force_bytes(signature)
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ import json
|
|||||||
# Django
|
# Django
|
||||||
from django.db import models
|
from django.db import models
|
||||||
|
|
||||||
from ansible_base.utils.models import prevent_search
|
from ansible_base.lib.utils.models import prevent_search
|
||||||
|
|
||||||
# AWX
|
# AWX
|
||||||
from awx.main.models.base import CreatedModifiedModel
|
from awx.main.models.base import CreatedModifiedModel
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ from rest_framework.exceptions import ParseError, PermissionDenied
|
|||||||
# Django OAuth Toolkit
|
# Django OAuth Toolkit
|
||||||
from awx.main.models.oauth import OAuth2Application, OAuth2AccessToken
|
from awx.main.models.oauth import OAuth2Application, OAuth2AccessToken
|
||||||
|
|
||||||
from ansible_base.utils.validation import to_python_boolean
|
from ansible_base.lib.utils.validation import to_python_boolean
|
||||||
|
|
||||||
# AWX
|
# AWX
|
||||||
from awx.main.utils import (
|
from awx.main.utils import (
|
||||||
@@ -57,6 +57,7 @@ from awx.main.models import (
|
|||||||
Project,
|
Project,
|
||||||
ProjectUpdate,
|
ProjectUpdate,
|
||||||
ProjectUpdateEvent,
|
ProjectUpdateEvent,
|
||||||
|
ReceptorAddress,
|
||||||
Role,
|
Role,
|
||||||
Schedule,
|
Schedule,
|
||||||
SystemJob,
|
SystemJob,
|
||||||
@@ -2430,6 +2431,29 @@ class InventoryUpdateEventAccess(BaseAccess):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
class ReceptorAddressAccess(BaseAccess):
|
||||||
|
"""
|
||||||
|
I can see receptor address records whenever I can access the instance
|
||||||
|
"""
|
||||||
|
|
||||||
|
model = ReceptorAddress
|
||||||
|
|
||||||
|
def filtered_queryset(self):
|
||||||
|
return self.model.objects.filter(Q(instance__in=Instance.accessible_pk_qs(self.user, 'read_role')))
|
||||||
|
|
||||||
|
@check_superuser
|
||||||
|
def can_add(self, data):
|
||||||
|
return False
|
||||||
|
|
||||||
|
@check_superuser
|
||||||
|
def can_change(self, obj, data):
|
||||||
|
return False
|
||||||
|
|
||||||
|
@check_superuser
|
||||||
|
def can_delete(self, obj):
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
class SystemJobEventAccess(BaseAccess):
|
class SystemJobEventAccess(BaseAccess):
|
||||||
"""
|
"""
|
||||||
I can only see manage System Jobs events if I'm a super user
|
I can only see manage System Jobs events if I'm a super user
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
import logging
|
import logging
|
||||||
|
|
||||||
# AWX
|
# AWX
|
||||||
from awx.main.analytics.subsystem_metrics import Metrics
|
from awx.main.analytics.subsystem_metrics import DispatcherMetrics, CallbackReceiverMetrics
|
||||||
from awx.main.dispatch.publish import task
|
from awx.main.dispatch.publish import task
|
||||||
from awx.main.dispatch import get_task_queuename
|
from awx.main.dispatch import get_task_queuename
|
||||||
|
|
||||||
@@ -11,4 +11,5 @@ logger = logging.getLogger('awx.main.scheduler')
|
|||||||
|
|
||||||
@task(queue=get_task_queuename)
|
@task(queue=get_task_queuename)
|
||||||
def send_subsystem_metrics():
|
def send_subsystem_metrics():
|
||||||
Metrics().send_metrics()
|
DispatcherMetrics().send_metrics()
|
||||||
|
CallbackReceiverMetrics().send_metrics()
|
||||||
|
|||||||
@@ -1,10 +1,15 @@
|
|||||||
|
import itertools
|
||||||
import redis
|
import redis
|
||||||
import json
|
import json
|
||||||
import time
|
import time
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
|
import prometheus_client
|
||||||
|
from prometheus_client.core import GaugeMetricFamily, HistogramMetricFamily
|
||||||
|
from prometheus_client.registry import CollectorRegistry
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
from django.apps import apps
|
from django.http import HttpRequest
|
||||||
|
from rest_framework.request import Request
|
||||||
|
|
||||||
from awx.main.consumers import emit_channel_notification
|
from awx.main.consumers import emit_channel_notification
|
||||||
from awx.main.utils import is_testing
|
from awx.main.utils import is_testing
|
||||||
@@ -13,6 +18,30 @@ root_key = settings.SUBSYSTEM_METRICS_REDIS_KEY_PREFIX
|
|||||||
logger = logging.getLogger('awx.main.analytics')
|
logger = logging.getLogger('awx.main.analytics')
|
||||||
|
|
||||||
|
|
||||||
|
class MetricsNamespace:
|
||||||
|
def __init__(self, namespace):
|
||||||
|
self._namespace = namespace
|
||||||
|
|
||||||
|
|
||||||
|
class MetricsServerSettings(MetricsNamespace):
|
||||||
|
def port(self):
|
||||||
|
return settings.METRICS_SUBSYSTEM_CONFIG['server'][self._namespace]['port']
|
||||||
|
|
||||||
|
|
||||||
|
class MetricsServer(MetricsServerSettings):
|
||||||
|
def __init__(self, namespace, registry):
|
||||||
|
MetricsNamespace.__init__(self, namespace)
|
||||||
|
self._registry = registry
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
try:
|
||||||
|
# TODO: addr for ipv6 ?
|
||||||
|
prometheus_client.start_http_server(self.port(), addr='localhost', registry=self._registry)
|
||||||
|
except Exception:
|
||||||
|
logger.error(f"MetricsServer failed to start for service '{self._namespace}.")
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
class BaseM:
|
class BaseM:
|
||||||
def __init__(self, field, help_text):
|
def __init__(self, field, help_text):
|
||||||
self.field = field
|
self.field = field
|
||||||
@@ -148,76 +177,40 @@ class HistogramM(BaseM):
|
|||||||
return output_text
|
return output_text
|
||||||
|
|
||||||
|
|
||||||
class Metrics:
|
class Metrics(MetricsNamespace):
|
||||||
def __init__(self, auto_pipe_execute=False, instance_name=None):
|
# metric name, help_text
|
||||||
|
METRICSLIST = []
|
||||||
|
_METRICSLIST = [
|
||||||
|
FloatM('subsystem_metrics_pipe_execute_seconds', 'Time spent saving metrics to redis'),
|
||||||
|
IntM('subsystem_metrics_pipe_execute_calls', 'Number of calls to pipe_execute'),
|
||||||
|
FloatM('subsystem_metrics_send_metrics_seconds', 'Time spent sending metrics to other nodes'),
|
||||||
|
]
|
||||||
|
|
||||||
|
def __init__(self, namespace, auto_pipe_execute=False, instance_name=None, metrics_have_changed=True, **kwargs):
|
||||||
|
MetricsNamespace.__init__(self, namespace)
|
||||||
|
|
||||||
self.pipe = redis.Redis.from_url(settings.BROKER_URL).pipeline()
|
self.pipe = redis.Redis.from_url(settings.BROKER_URL).pipeline()
|
||||||
self.conn = redis.Redis.from_url(settings.BROKER_URL)
|
self.conn = redis.Redis.from_url(settings.BROKER_URL)
|
||||||
self.last_pipe_execute = time.time()
|
self.last_pipe_execute = time.time()
|
||||||
# track if metrics have been modified since last saved to redis
|
# track if metrics have been modified since last saved to redis
|
||||||
# start with True so that we get an initial save to redis
|
# start with True so that we get an initial save to redis
|
||||||
self.metrics_have_changed = True
|
self.metrics_have_changed = metrics_have_changed
|
||||||
self.pipe_execute_interval = settings.SUBSYSTEM_METRICS_INTERVAL_SAVE_TO_REDIS
|
self.pipe_execute_interval = settings.SUBSYSTEM_METRICS_INTERVAL_SAVE_TO_REDIS
|
||||||
self.send_metrics_interval = settings.SUBSYSTEM_METRICS_INTERVAL_SEND_METRICS
|
self.send_metrics_interval = settings.SUBSYSTEM_METRICS_INTERVAL_SEND_METRICS
|
||||||
# auto pipe execute will commit transaction of metric data to redis
|
# auto pipe execute will commit transaction of metric data to redis
|
||||||
# at a regular interval (pipe_execute_interval). If set to False,
|
# at a regular interval (pipe_execute_interval). If set to False,
|
||||||
# the calling function should call .pipe_execute() explicitly
|
# the calling function should call .pipe_execute() explicitly
|
||||||
self.auto_pipe_execute = auto_pipe_execute
|
self.auto_pipe_execute = auto_pipe_execute
|
||||||
Instance = apps.get_model('main', 'Instance')
|
|
||||||
if instance_name:
|
if instance_name:
|
||||||
self.instance_name = instance_name
|
self.instance_name = instance_name
|
||||||
elif is_testing():
|
elif is_testing():
|
||||||
self.instance_name = "awx_testing"
|
self.instance_name = "awx_testing"
|
||||||
else:
|
else:
|
||||||
self.instance_name = Instance.objects.my_hostname()
|
self.instance_name = settings.CLUSTER_HOST_ID # Same as Instance.objects.my_hostname() BUT we do not need to import Instance
|
||||||
|
|
||||||
# metric name, help_text
|
|
||||||
METRICSLIST = [
|
|
||||||
SetIntM('callback_receiver_events_queue_size_redis', 'Current number of events in redis queue'),
|
|
||||||
IntM('callback_receiver_events_popped_redis', 'Number of events popped from redis'),
|
|
||||||
IntM('callback_receiver_events_in_memory', 'Current number of events in memory (in transfer from redis to db)'),
|
|
||||||
IntM('callback_receiver_batch_events_errors', 'Number of times batch insertion failed'),
|
|
||||||
FloatM('callback_receiver_events_insert_db_seconds', 'Total time spent saving events to database'),
|
|
||||||
IntM('callback_receiver_events_insert_db', 'Number of events batch inserted into database'),
|
|
||||||
IntM('callback_receiver_events_broadcast', 'Number of events broadcast to other control plane nodes'),
|
|
||||||
HistogramM(
|
|
||||||
'callback_receiver_batch_events_insert_db', 'Number of events batch inserted into database', settings.SUBSYSTEM_METRICS_BATCH_INSERT_BUCKETS
|
|
||||||
),
|
|
||||||
SetFloatM('callback_receiver_event_processing_avg_seconds', 'Average processing time per event per callback receiver batch'),
|
|
||||||
FloatM('subsystem_metrics_pipe_execute_seconds', 'Time spent saving metrics to redis'),
|
|
||||||
IntM('subsystem_metrics_pipe_execute_calls', 'Number of calls to pipe_execute'),
|
|
||||||
FloatM('subsystem_metrics_send_metrics_seconds', 'Time spent sending metrics to other nodes'),
|
|
||||||
SetFloatM('task_manager_get_tasks_seconds', 'Time spent in loading tasks from db'),
|
|
||||||
SetFloatM('task_manager_start_task_seconds', 'Time spent starting task'),
|
|
||||||
SetFloatM('task_manager_process_running_tasks_seconds', 'Time spent processing running tasks'),
|
|
||||||
SetFloatM('task_manager_process_pending_tasks_seconds', 'Time spent processing pending tasks'),
|
|
||||||
SetFloatM('task_manager__schedule_seconds', 'Time spent in running the entire _schedule'),
|
|
||||||
IntM('task_manager__schedule_calls', 'Number of calls to _schedule, after lock is acquired'),
|
|
||||||
SetFloatM('task_manager_recorded_timestamp', 'Unix timestamp when metrics were last recorded'),
|
|
||||||
SetIntM('task_manager_tasks_started', 'Number of tasks started'),
|
|
||||||
SetIntM('task_manager_running_processed', 'Number of running tasks processed'),
|
|
||||||
SetIntM('task_manager_pending_processed', 'Number of pending tasks processed'),
|
|
||||||
SetIntM('task_manager_tasks_blocked', 'Number of tasks blocked from running'),
|
|
||||||
SetFloatM('task_manager_commit_seconds', 'Time spent in db transaction, including on_commit calls'),
|
|
||||||
SetFloatM('dependency_manager_get_tasks_seconds', 'Time spent loading pending tasks from db'),
|
|
||||||
SetFloatM('dependency_manager_generate_dependencies_seconds', 'Time spent generating dependencies for pending tasks'),
|
|
||||||
SetFloatM('dependency_manager__schedule_seconds', 'Time spent in running the entire _schedule'),
|
|
||||||
IntM('dependency_manager__schedule_calls', 'Number of calls to _schedule, after lock is acquired'),
|
|
||||||
SetFloatM('dependency_manager_recorded_timestamp', 'Unix timestamp when metrics were last recorded'),
|
|
||||||
SetIntM('dependency_manager_pending_processed', 'Number of pending tasks processed'),
|
|
||||||
SetFloatM('workflow_manager__schedule_seconds', 'Time spent in running the entire _schedule'),
|
|
||||||
IntM('workflow_manager__schedule_calls', 'Number of calls to _schedule, after lock is acquired'),
|
|
||||||
SetFloatM('workflow_manager_recorded_timestamp', 'Unix timestamp when metrics were last recorded'),
|
|
||||||
SetFloatM('workflow_manager_spawn_workflow_graph_jobs_seconds', 'Time spent spawning workflow tasks'),
|
|
||||||
SetFloatM('workflow_manager_get_tasks_seconds', 'Time spent loading workflow tasks from db'),
|
|
||||||
# dispatcher subsystem metrics
|
|
||||||
SetIntM('dispatcher_pool_scale_up_events', 'Number of times local dispatcher scaled up a worker since startup'),
|
|
||||||
SetIntM('dispatcher_pool_active_task_count', 'Number of active tasks in the worker pool when last task was submitted'),
|
|
||||||
SetIntM('dispatcher_pool_max_worker_count', 'Highest number of workers in worker pool in last collection interval, about 20s'),
|
|
||||||
SetFloatM('dispatcher_availability', 'Fraction of time (in last collection interval) dispatcher was able to receive messages'),
|
|
||||||
]
|
|
||||||
# turn metric list into dictionary with the metric name as a key
|
# turn metric list into dictionary with the metric name as a key
|
||||||
self.METRICS = {}
|
self.METRICS = {}
|
||||||
for m in METRICSLIST:
|
for m in itertools.chain(self.METRICSLIST, self._METRICSLIST):
|
||||||
self.METRICS[m.field] = m
|
self.METRICS[m.field] = m
|
||||||
|
|
||||||
# track last time metrics were sent to other nodes
|
# track last time metrics were sent to other nodes
|
||||||
@@ -230,7 +223,7 @@ class Metrics:
|
|||||||
m.reset_value(self.conn)
|
m.reset_value(self.conn)
|
||||||
self.metrics_have_changed = True
|
self.metrics_have_changed = True
|
||||||
self.conn.delete(root_key + "_lock")
|
self.conn.delete(root_key + "_lock")
|
||||||
for m in self.conn.scan_iter(root_key + '_instance_*'):
|
for m in self.conn.scan_iter(root_key + '-' + self._namespace + '_instance_*'):
|
||||||
self.conn.delete(m)
|
self.conn.delete(m)
|
||||||
|
|
||||||
def inc(self, field, value):
|
def inc(self, field, value):
|
||||||
@@ -297,7 +290,7 @@ class Metrics:
|
|||||||
def send_metrics(self):
|
def send_metrics(self):
|
||||||
# more than one thread could be calling this at the same time, so should
|
# more than one thread could be calling this at the same time, so should
|
||||||
# acquire redis lock before sending metrics
|
# acquire redis lock before sending metrics
|
||||||
lock = self.conn.lock(root_key + '_lock')
|
lock = self.conn.lock(root_key + '-' + self._namespace + '_lock')
|
||||||
if not lock.acquire(blocking=False):
|
if not lock.acquire(blocking=False):
|
||||||
return
|
return
|
||||||
try:
|
try:
|
||||||
@@ -307,9 +300,10 @@ class Metrics:
|
|||||||
payload = {
|
payload = {
|
||||||
'instance': self.instance_name,
|
'instance': self.instance_name,
|
||||||
'metrics': serialized_metrics,
|
'metrics': serialized_metrics,
|
||||||
|
'metrics_namespace': self._namespace,
|
||||||
}
|
}
|
||||||
# store the serialized data locally as well, so that load_other_metrics will read it
|
# store the serialized data locally as well, so that load_other_metrics will read it
|
||||||
self.conn.set(root_key + '_instance_' + self.instance_name, serialized_metrics)
|
self.conn.set(root_key + '-' + self._namespace + '_instance_' + self.instance_name, serialized_metrics)
|
||||||
emit_channel_notification("metrics", payload)
|
emit_channel_notification("metrics", payload)
|
||||||
|
|
||||||
self.previous_send_metrics.set(current_time)
|
self.previous_send_metrics.set(current_time)
|
||||||
@@ -331,14 +325,14 @@ class Metrics:
|
|||||||
instances_filter = request.query_params.getlist("node")
|
instances_filter = request.query_params.getlist("node")
|
||||||
# get a sorted list of instance names
|
# get a sorted list of instance names
|
||||||
instance_names = [self.instance_name]
|
instance_names = [self.instance_name]
|
||||||
for m in self.conn.scan_iter(root_key + '_instance_*'):
|
for m in self.conn.scan_iter(root_key + '-' + self._namespace + '_instance_*'):
|
||||||
instance_names.append(m.decode('UTF-8').split('_instance_')[1])
|
instance_names.append(m.decode('UTF-8').split('_instance_')[1])
|
||||||
instance_names.sort()
|
instance_names.sort()
|
||||||
# load data, including data from the this local instance
|
# load data, including data from the this local instance
|
||||||
instance_data = {}
|
instance_data = {}
|
||||||
for instance in instance_names:
|
for instance in instance_names:
|
||||||
if len(instances_filter) == 0 or instance in instances_filter:
|
if len(instances_filter) == 0 or instance in instances_filter:
|
||||||
instance_data_from_redis = self.conn.get(root_key + '_instance_' + instance)
|
instance_data_from_redis = self.conn.get(root_key + '-' + self._namespace + '_instance_' + instance)
|
||||||
# data from other instances may not be available. That is OK.
|
# data from other instances may not be available. That is OK.
|
||||||
if instance_data_from_redis:
|
if instance_data_from_redis:
|
||||||
instance_data[instance] = json.loads(instance_data_from_redis.decode('UTF-8'))
|
instance_data[instance] = json.loads(instance_data_from_redis.decode('UTF-8'))
|
||||||
@@ -357,6 +351,120 @@ class Metrics:
|
|||||||
return output_text
|
return output_text
|
||||||
|
|
||||||
|
|
||||||
|
class DispatcherMetrics(Metrics):
|
||||||
|
METRICSLIST = [
|
||||||
|
SetFloatM('task_manager_get_tasks_seconds', 'Time spent in loading tasks from db'),
|
||||||
|
SetFloatM('task_manager_start_task_seconds', 'Time spent starting task'),
|
||||||
|
SetFloatM('task_manager_process_running_tasks_seconds', 'Time spent processing running tasks'),
|
||||||
|
SetFloatM('task_manager_process_pending_tasks_seconds', 'Time spent processing pending tasks'),
|
||||||
|
SetFloatM('task_manager__schedule_seconds', 'Time spent in running the entire _schedule'),
|
||||||
|
IntM('task_manager__schedule_calls', 'Number of calls to _schedule, after lock is acquired'),
|
||||||
|
SetFloatM('task_manager_recorded_timestamp', 'Unix timestamp when metrics were last recorded'),
|
||||||
|
SetIntM('task_manager_tasks_started', 'Number of tasks started'),
|
||||||
|
SetIntM('task_manager_running_processed', 'Number of running tasks processed'),
|
||||||
|
SetIntM('task_manager_pending_processed', 'Number of pending tasks processed'),
|
||||||
|
SetIntM('task_manager_tasks_blocked', 'Number of tasks blocked from running'),
|
||||||
|
SetFloatM('task_manager_commit_seconds', 'Time spent in db transaction, including on_commit calls'),
|
||||||
|
SetFloatM('dependency_manager_get_tasks_seconds', 'Time spent loading pending tasks from db'),
|
||||||
|
SetFloatM('dependency_manager_generate_dependencies_seconds', 'Time spent generating dependencies for pending tasks'),
|
||||||
|
SetFloatM('dependency_manager__schedule_seconds', 'Time spent in running the entire _schedule'),
|
||||||
|
IntM('dependency_manager__schedule_calls', 'Number of calls to _schedule, after lock is acquired'),
|
||||||
|
SetFloatM('dependency_manager_recorded_timestamp', 'Unix timestamp when metrics were last recorded'),
|
||||||
|
SetIntM('dependency_manager_pending_processed', 'Number of pending tasks processed'),
|
||||||
|
SetFloatM('workflow_manager__schedule_seconds', 'Time spent in running the entire _schedule'),
|
||||||
|
IntM('workflow_manager__schedule_calls', 'Number of calls to _schedule, after lock is acquired'),
|
||||||
|
SetFloatM('workflow_manager_recorded_timestamp', 'Unix timestamp when metrics were last recorded'),
|
||||||
|
SetFloatM('workflow_manager_spawn_workflow_graph_jobs_seconds', 'Time spent spawning workflow tasks'),
|
||||||
|
SetFloatM('workflow_manager_get_tasks_seconds', 'Time spent loading workflow tasks from db'),
|
||||||
|
# dispatcher subsystem metrics
|
||||||
|
SetIntM('dispatcher_pool_scale_up_events', 'Number of times local dispatcher scaled up a worker since startup'),
|
||||||
|
SetIntM('dispatcher_pool_active_task_count', 'Number of active tasks in the worker pool when last task was submitted'),
|
||||||
|
SetIntM('dispatcher_pool_max_worker_count', 'Highest number of workers in worker pool in last collection interval, about 20s'),
|
||||||
|
SetFloatM('dispatcher_availability', 'Fraction of time (in last collection interval) dispatcher was able to receive messages'),
|
||||||
|
]
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super().__init__(settings.METRICS_SERVICE_DISPATCHER, *args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class CallbackReceiverMetrics(Metrics):
|
||||||
|
METRICSLIST = [
|
||||||
|
SetIntM('callback_receiver_events_queue_size_redis', 'Current number of events in redis queue'),
|
||||||
|
IntM('callback_receiver_events_popped_redis', 'Number of events popped from redis'),
|
||||||
|
IntM('callback_receiver_events_in_memory', 'Current number of events in memory (in transfer from redis to db)'),
|
||||||
|
IntM('callback_receiver_batch_events_errors', 'Number of times batch insertion failed'),
|
||||||
|
FloatM('callback_receiver_events_insert_db_seconds', 'Total time spent saving events to database'),
|
||||||
|
IntM('callback_receiver_events_insert_db', 'Number of events batch inserted into database'),
|
||||||
|
IntM('callback_receiver_events_broadcast', 'Number of events broadcast to other control plane nodes'),
|
||||||
|
HistogramM(
|
||||||
|
'callback_receiver_batch_events_insert_db', 'Number of events batch inserted into database', settings.SUBSYSTEM_METRICS_BATCH_INSERT_BUCKETS
|
||||||
|
),
|
||||||
|
SetFloatM('callback_receiver_event_processing_avg_seconds', 'Average processing time per event per callback receiver batch'),
|
||||||
|
]
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super().__init__(settings.METRICS_SERVICE_CALLBACK_RECEIVER, *args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
def metrics(request):
|
def metrics(request):
|
||||||
m = Metrics()
|
output_text = ''
|
||||||
return m.generate_metrics(request)
|
for m in [DispatcherMetrics(), CallbackReceiverMetrics()]:
|
||||||
|
output_text += m.generate_metrics(request)
|
||||||
|
return output_text
|
||||||
|
|
||||||
|
|
||||||
|
class CustomToPrometheusMetricsCollector(prometheus_client.registry.Collector):
|
||||||
|
"""
|
||||||
|
Takes the metric data from redis -> our custom metric fields -> prometheus
|
||||||
|
library metric fields.
|
||||||
|
|
||||||
|
The plan is to get rid of the use of redis, our custom metric fields, and
|
||||||
|
to switch fully to the prometheus library. At that point, this translation
|
||||||
|
code will be deleted.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, metrics_obj, *args, **kwargs):
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
self._metrics = metrics_obj
|
||||||
|
|
||||||
|
def collect(self):
|
||||||
|
my_hostname = settings.CLUSTER_HOST_ID
|
||||||
|
|
||||||
|
instance_data = self._metrics.load_other_metrics(Request(HttpRequest()))
|
||||||
|
if not instance_data:
|
||||||
|
logger.debug(f"No metric data not found in redis for metric namespace '{self._metrics._namespace}'")
|
||||||
|
return None
|
||||||
|
|
||||||
|
host_metrics = instance_data.get(my_hostname)
|
||||||
|
for _, metric in self._metrics.METRICS.items():
|
||||||
|
entry = host_metrics.get(metric.field)
|
||||||
|
if not entry:
|
||||||
|
logger.debug(f"{self._metrics._namespace} metric '{metric.field}' not found in redis data payload {json.dumps(instance_data, indent=2)}")
|
||||||
|
continue
|
||||||
|
if isinstance(metric, HistogramM):
|
||||||
|
buckets = list(zip(metric.buckets, entry['counts']))
|
||||||
|
buckets = [[str(i[0]), str(i[1])] for i in buckets]
|
||||||
|
yield HistogramMetricFamily(metric.field, metric.help_text, buckets=buckets, sum_value=entry['sum'])
|
||||||
|
else:
|
||||||
|
yield GaugeMetricFamily(metric.field, metric.help_text, value=entry)
|
||||||
|
|
||||||
|
|
||||||
|
class CallbackReceiverMetricsServer(MetricsServer):
|
||||||
|
def __init__(self):
|
||||||
|
registry = CollectorRegistry(auto_describe=True)
|
||||||
|
registry.register(CustomToPrometheusMetricsCollector(DispatcherMetrics(metrics_have_changed=False)))
|
||||||
|
super().__init__(settings.METRICS_SERVICE_CALLBACK_RECEIVER, registry)
|
||||||
|
|
||||||
|
|
||||||
|
class DispatcherMetricsServer(MetricsServer):
|
||||||
|
def __init__(self):
|
||||||
|
registry = CollectorRegistry(auto_describe=True)
|
||||||
|
registry.register(CustomToPrometheusMetricsCollector(CallbackReceiverMetrics(metrics_have_changed=False)))
|
||||||
|
super().__init__(settings.METRICS_SERVICE_DISPATCHER, registry)
|
||||||
|
|
||||||
|
|
||||||
|
class WebsocketsMetricsServer(MetricsServer):
|
||||||
|
def __init__(self):
|
||||||
|
registry = CollectorRegistry(auto_describe=True)
|
||||||
|
# registry.register()
|
||||||
|
super().__init__(settings.METRICS_SERVICE_WEBSOCKETS, registry)
|
||||||
|
|||||||
@@ -106,7 +106,7 @@ class RelayConsumer(AsyncJsonWebsocketConsumer):
|
|||||||
if group == "metrics":
|
if group == "metrics":
|
||||||
message = json.loads(message['text'])
|
message = json.loads(message['text'])
|
||||||
conn = redis.Redis.from_url(settings.BROKER_URL)
|
conn = redis.Redis.from_url(settings.BROKER_URL)
|
||||||
conn.set(settings.SUBSYSTEM_METRICS_REDIS_KEY_PREFIX + "_instance_" + message['instance'], message['metrics'])
|
conn.set(settings.SUBSYSTEM_METRICS_REDIS_KEY_PREFIX + "-" + message['metrics_namespace'] + "_instance_" + message['instance'], message['metrics'])
|
||||||
else:
|
else:
|
||||||
await self.channel_layer.group_send(group, message)
|
await self.channel_layer.group_send(group, message)
|
||||||
|
|
||||||
|
|||||||
@@ -58,7 +58,7 @@ aim_inputs = {
|
|||||||
'id': 'object_property',
|
'id': 'object_property',
|
||||||
'label': _('Object Property'),
|
'label': _('Object Property'),
|
||||||
'type': 'string',
|
'type': 'string',
|
||||||
'help_text': _('The property of the object to return. Default: Content Ex: Username, Address, etc.'),
|
'help_text': _('The property of the object to return. Available properties: Username, Password and Address.'),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
'id': 'reason',
|
'id': 'reason',
|
||||||
@@ -111,8 +111,12 @@ def aim_backend(**kwargs):
|
|||||||
object_property = 'Content'
|
object_property = 'Content'
|
||||||
elif object_property.lower() == 'username':
|
elif object_property.lower() == 'username':
|
||||||
object_property = 'UserName'
|
object_property = 'UserName'
|
||||||
|
elif object_property.lower() == 'password':
|
||||||
|
object_property = 'Content'
|
||||||
|
elif object_property.lower() == 'address':
|
||||||
|
object_property = 'Address'
|
||||||
elif object_property not in res:
|
elif object_property not in res:
|
||||||
raise KeyError('Property {} not found in object'.format(object_property))
|
raise KeyError('Property {} not found in object, available properties: Username, Password and Address'.format(object_property))
|
||||||
else:
|
else:
|
||||||
object_property = object_property.capitalize()
|
object_property = object_property.capitalize()
|
||||||
|
|
||||||
|
|||||||
@@ -87,6 +87,20 @@ base_inputs = {
|
|||||||
' see https://www.vaultproject.io/docs/auth/kubernetes#configuration'
|
' see https://www.vaultproject.io/docs/auth/kubernetes#configuration'
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
'id': 'username',
|
||||||
|
'label': _('Username'),
|
||||||
|
'type': 'string',
|
||||||
|
'secret': False,
|
||||||
|
'help_text': _('Username for user authentication.'),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'id': 'password',
|
||||||
|
'label': _('Password'),
|
||||||
|
'type': 'string',
|
||||||
|
'secret': True,
|
||||||
|
'help_text': _('Password for user authentication.'),
|
||||||
|
},
|
||||||
{
|
{
|
||||||
'id': 'default_auth_path',
|
'id': 'default_auth_path',
|
||||||
'label': _('Path to Auth'),
|
'label': _('Path to Auth'),
|
||||||
@@ -185,9 +199,10 @@ hashi_ssh_inputs['required'].extend(['public_key', 'role'])
|
|||||||
|
|
||||||
def handle_auth(**kwargs):
|
def handle_auth(**kwargs):
|
||||||
token = None
|
token = None
|
||||||
|
|
||||||
if kwargs.get('token'):
|
if kwargs.get('token'):
|
||||||
token = kwargs['token']
|
token = kwargs['token']
|
||||||
|
elif kwargs.get('username') and kwargs.get('password'):
|
||||||
|
token = method_auth(**kwargs, auth_param=userpass_auth(**kwargs))
|
||||||
elif kwargs.get('role_id') and kwargs.get('secret_id'):
|
elif kwargs.get('role_id') and kwargs.get('secret_id'):
|
||||||
token = method_auth(**kwargs, auth_param=approle_auth(**kwargs))
|
token = method_auth(**kwargs, auth_param=approle_auth(**kwargs))
|
||||||
elif kwargs.get('kubernetes_role'):
|
elif kwargs.get('kubernetes_role'):
|
||||||
@@ -195,11 +210,14 @@ def handle_auth(**kwargs):
|
|||||||
elif kwargs.get('client_cert_public') and kwargs.get('client_cert_private'):
|
elif kwargs.get('client_cert_public') and kwargs.get('client_cert_private'):
|
||||||
token = method_auth(**kwargs, auth_param=client_cert_auth(**kwargs))
|
token = method_auth(**kwargs, auth_param=client_cert_auth(**kwargs))
|
||||||
else:
|
else:
|
||||||
raise Exception('Either a token or AppRole, Kubernetes, or TLS authentication parameters must be set')
|
raise Exception('Token, Username/Password, AppRole, Kubernetes, or TLS authentication parameters must be set')
|
||||||
|
|
||||||
return token
|
return token
|
||||||
|
|
||||||
|
|
||||||
|
def userpass_auth(**kwargs):
|
||||||
|
return {'username': kwargs['username'], 'password': kwargs['password']}
|
||||||
|
|
||||||
|
|
||||||
def approle_auth(**kwargs):
|
def approle_auth(**kwargs):
|
||||||
return {'role_id': kwargs['role_id'], 'secret_id': kwargs['secret_id']}
|
return {'role_id': kwargs['role_id'], 'secret_id': kwargs['secret_id']}
|
||||||
|
|
||||||
@@ -227,11 +245,14 @@ def method_auth(**kwargs):
|
|||||||
cacert = kwargs.get('cacert', None)
|
cacert = kwargs.get('cacert', None)
|
||||||
|
|
||||||
sess = requests.Session()
|
sess = requests.Session()
|
||||||
|
sess.mount(url, requests.adapters.HTTPAdapter(max_retries=5))
|
||||||
|
|
||||||
# Namespace support
|
# Namespace support
|
||||||
if kwargs.get('namespace'):
|
if kwargs.get('namespace'):
|
||||||
sess.headers['X-Vault-Namespace'] = kwargs['namespace']
|
sess.headers['X-Vault-Namespace'] = kwargs['namespace']
|
||||||
request_url = '/'.join([url, 'auth', auth_path, 'login']).rstrip('/')
|
request_url = '/'.join([url, 'auth', auth_path, 'login']).rstrip('/')
|
||||||
|
if kwargs['auth_param'].get('username'):
|
||||||
|
request_url = request_url + '/' + (kwargs['username'])
|
||||||
with CertFiles(cacert) as cert:
|
with CertFiles(cacert) as cert:
|
||||||
request_kwargs['verify'] = cert
|
request_kwargs['verify'] = cert
|
||||||
# TLS client certificate support
|
# TLS client certificate support
|
||||||
@@ -263,6 +284,7 @@ def kv_backend(**kwargs):
|
|||||||
}
|
}
|
||||||
|
|
||||||
sess = requests.Session()
|
sess = requests.Session()
|
||||||
|
sess.mount(url, requests.adapters.HTTPAdapter(max_retries=5))
|
||||||
sess.headers['Authorization'] = 'Bearer {}'.format(token)
|
sess.headers['Authorization'] = 'Bearer {}'.format(token)
|
||||||
# Compatibility header for older installs of Hashicorp Vault
|
# Compatibility header for older installs of Hashicorp Vault
|
||||||
sess.headers['X-Vault-Token'] = token
|
sess.headers['X-Vault-Token'] = token
|
||||||
@@ -333,6 +355,7 @@ def ssh_backend(**kwargs):
|
|||||||
request_kwargs['json']['valid_principals'] = kwargs['valid_principals']
|
request_kwargs['json']['valid_principals'] = kwargs['valid_principals']
|
||||||
|
|
||||||
sess = requests.Session()
|
sess = requests.Session()
|
||||||
|
sess.mount(url, requests.adapters.HTTPAdapter(max_retries=5))
|
||||||
sess.headers['Authorization'] = 'Bearer {}'.format(token)
|
sess.headers['Authorization'] = 'Bearer {}'.format(token)
|
||||||
if kwargs.get('namespace'):
|
if kwargs.get('namespace'):
|
||||||
sess.headers['X-Vault-Namespace'] = kwargs['namespace']
|
sess.headers['X-Vault-Namespace'] = kwargs['namespace']
|
||||||
|
|||||||
@@ -93,6 +93,26 @@ class PubSub(object):
|
|||||||
self.conn.close()
|
self.conn.close()
|
||||||
|
|
||||||
|
|
||||||
|
def create_listener_connection():
|
||||||
|
conf = settings.DATABASES['default'].copy()
|
||||||
|
conf['OPTIONS'] = conf.get('OPTIONS', {}).copy()
|
||||||
|
# Modify the application name to distinguish from other connections the process might use
|
||||||
|
conf['OPTIONS']['application_name'] = get_application_name(settings.CLUSTER_HOST_ID, function='listener')
|
||||||
|
|
||||||
|
# Apply overrides specifically for the listener connection
|
||||||
|
for k, v in settings.LISTENER_DATABASES.get('default', {}).items():
|
||||||
|
conf[k] = v
|
||||||
|
for k, v in settings.LISTENER_DATABASES.get('default', {}).get('OPTIONS', {}).items():
|
||||||
|
conf['OPTIONS'][k] = v
|
||||||
|
|
||||||
|
# Allow password-less authentication
|
||||||
|
if 'PASSWORD' in conf:
|
||||||
|
conf['OPTIONS']['password'] = conf.pop('PASSWORD')
|
||||||
|
|
||||||
|
connection_data = f"dbname={conf['NAME']} host={conf['HOST']} user={conf['USER']} port={conf['PORT']}"
|
||||||
|
return psycopg.connect(connection_data, autocommit=True, **conf['OPTIONS'])
|
||||||
|
|
||||||
|
|
||||||
@contextmanager
|
@contextmanager
|
||||||
def pg_bus_conn(new_connection=False, select_timeout=None):
|
def pg_bus_conn(new_connection=False, select_timeout=None):
|
||||||
'''
|
'''
|
||||||
@@ -106,12 +126,7 @@ def pg_bus_conn(new_connection=False, select_timeout=None):
|
|||||||
'''
|
'''
|
||||||
|
|
||||||
if new_connection:
|
if new_connection:
|
||||||
conf = settings.DATABASES['default'].copy()
|
conn = create_listener_connection()
|
||||||
conf['OPTIONS'] = conf.get('OPTIONS', {}).copy()
|
|
||||||
# Modify the application name to distinguish from other connections the process might use
|
|
||||||
conf['OPTIONS']['application_name'] = get_application_name(settings.CLUSTER_HOST_ID, function='listener')
|
|
||||||
connection_data = f"dbname={conf['NAME']} host={conf['HOST']} user={conf['USER']} password={conf['PASSWORD']} port={conf['PORT']}"
|
|
||||||
conn = psycopg.connect(connection_data, autocommit=True, **conf['OPTIONS'])
|
|
||||||
else:
|
else:
|
||||||
if pg_connection.connection is None:
|
if pg_connection.connection is None:
|
||||||
pg_connection.connect()
|
pg_connection.connect()
|
||||||
|
|||||||
@@ -162,13 +162,13 @@ class AWXConsumerRedis(AWXConsumerBase):
|
|||||||
class AWXConsumerPG(AWXConsumerBase):
|
class AWXConsumerPG(AWXConsumerBase):
|
||||||
def __init__(self, *args, schedule=None, **kwargs):
|
def __init__(self, *args, schedule=None, **kwargs):
|
||||||
super().__init__(*args, **kwargs)
|
super().__init__(*args, **kwargs)
|
||||||
self.pg_max_wait = settings.DISPATCHER_DB_DOWNTIME_TOLERANCE
|
self.pg_max_wait = getattr(settings, 'DISPATCHER_DB_DOWNTOWN_TOLLERANCE', settings.DISPATCHER_DB_DOWNTIME_TOLERANCE)
|
||||||
# if no successful loops have ran since startup, then we should fail right away
|
# if no successful loops have ran since startup, then we should fail right away
|
||||||
self.pg_is_down = True # set so that we fail if we get database errors on startup
|
self.pg_is_down = True # set so that we fail if we get database errors on startup
|
||||||
init_time = time.time()
|
init_time = time.time()
|
||||||
self.pg_down_time = init_time - self.pg_max_wait # allow no grace period
|
self.pg_down_time = init_time - self.pg_max_wait # allow no grace period
|
||||||
self.last_cleanup = init_time
|
self.last_cleanup = init_time
|
||||||
self.subsystem_metrics = s_metrics.Metrics(auto_pipe_execute=False)
|
self.subsystem_metrics = s_metrics.DispatcherMetrics(auto_pipe_execute=False)
|
||||||
self.last_metrics_gather = init_time
|
self.last_metrics_gather = init_time
|
||||||
self.listen_cumulative_time = 0.0
|
self.listen_cumulative_time = 0.0
|
||||||
if schedule:
|
if schedule:
|
||||||
@@ -214,7 +214,10 @@ class AWXConsumerPG(AWXConsumerBase):
|
|||||||
# bypasses pg_notify for scheduled tasks
|
# bypasses pg_notify for scheduled tasks
|
||||||
self.dispatch_task(body)
|
self.dispatch_task(body)
|
||||||
|
|
||||||
self.pg_is_down = False
|
if self.pg_is_down:
|
||||||
|
logger.info('Dispatcher listener connection established')
|
||||||
|
self.pg_is_down = False
|
||||||
|
|
||||||
self.listen_start = time.time()
|
self.listen_start = time.time()
|
||||||
|
|
||||||
return self.scheduler.time_until_next_run()
|
return self.scheduler.time_until_next_run()
|
||||||
|
|||||||
@@ -72,7 +72,7 @@ class CallbackBrokerWorker(BaseWorker):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.buff = {}
|
self.buff = {}
|
||||||
self.redis = redis.Redis.from_url(settings.BROKER_URL)
|
self.redis = redis.Redis.from_url(settings.BROKER_URL)
|
||||||
self.subsystem_metrics = s_metrics.Metrics(auto_pipe_execute=False)
|
self.subsystem_metrics = s_metrics.CallbackReceiverMetrics(auto_pipe_execute=False)
|
||||||
self.queue_pop = 0
|
self.queue_pop = 0
|
||||||
self.queue_name = settings.CALLBACK_QUEUE
|
self.queue_name = settings.CALLBACK_QUEUE
|
||||||
self.prof = AWXProfiler("CallbackBrokerWorker")
|
self.prof = AWXProfiler("CallbackBrokerWorker")
|
||||||
|
|||||||
@@ -5,6 +5,7 @@
|
|||||||
import copy
|
import copy
|
||||||
import json
|
import json
|
||||||
import re
|
import re
|
||||||
|
import sys
|
||||||
import urllib.parse
|
import urllib.parse
|
||||||
|
|
||||||
from jinja2 import sandbox, StrictUndefined
|
from jinja2 import sandbox, StrictUndefined
|
||||||
@@ -406,11 +407,13 @@ class SmartFilterField(models.TextField):
|
|||||||
# https://docs.python.org/2/library/stdtypes.html#truth-value-testing
|
# https://docs.python.org/2/library/stdtypes.html#truth-value-testing
|
||||||
if not value:
|
if not value:
|
||||||
return None
|
return None
|
||||||
value = urllib.parse.unquote(value)
|
# avoid doing too much during migrations
|
||||||
try:
|
if 'migrate' not in sys.argv:
|
||||||
SmartFilter().query_from_string(value)
|
value = urllib.parse.unquote(value)
|
||||||
except RuntimeError as e:
|
try:
|
||||||
raise models.base.ValidationError(e)
|
SmartFilter().query_from_string(value)
|
||||||
|
except RuntimeError as e:
|
||||||
|
raise models.base.ValidationError(e)
|
||||||
return super(SmartFilterField, self).get_prep_value(value)
|
return super(SmartFilterField, self).get_prep_value(value)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
53
awx/main/management/commands/add_receptor_address.py
Normal file
53
awx/main/management/commands/add_receptor_address.py
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
# Copyright (c) 2015 Ansible, Inc.
|
||||||
|
# All Rights Reserved
|
||||||
|
|
||||||
|
from django.core.management.base import BaseCommand
|
||||||
|
|
||||||
|
from awx.main.models import Instance, ReceptorAddress
|
||||||
|
|
||||||
|
|
||||||
|
def add_address(**kwargs):
|
||||||
|
try:
|
||||||
|
instance = Instance.objects.get(hostname=kwargs.pop('instance'))
|
||||||
|
kwargs['instance'] = instance
|
||||||
|
|
||||||
|
if kwargs.get('canonical') and instance.receptor_addresses.filter(canonical=True).exclude(address=kwargs['address']).exists():
|
||||||
|
print(f"Instance {instance.hostname} already has a canonical address, skipping")
|
||||||
|
return False
|
||||||
|
# if ReceptorAddress already exists with address, just update
|
||||||
|
# otherwise, create new ReceptorAddress
|
||||||
|
addr, _ = ReceptorAddress.objects.update_or_create(address=kwargs.pop('address'), defaults=kwargs)
|
||||||
|
print(f"Successfully added receptor address {addr.get_full_address()}")
|
||||||
|
return True
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error adding receptor address: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
class Command(BaseCommand):
|
||||||
|
"""
|
||||||
|
Internal controller command.
|
||||||
|
Register receptor address to an already-registered instance.
|
||||||
|
"""
|
||||||
|
|
||||||
|
help = "Add receptor address to an instance."
|
||||||
|
|
||||||
|
def add_arguments(self, parser):
|
||||||
|
parser.add_argument('--instance', dest='instance', required=True, type=str, help="Instance hostname this address is added to")
|
||||||
|
parser.add_argument('--address', dest='address', required=True, type=str, help="Receptor address")
|
||||||
|
parser.add_argument('--port', dest='port', type=int, help="Receptor listener port")
|
||||||
|
parser.add_argument('--websocket_path', dest='websocket_path', type=str, default="", help="Path for websockets")
|
||||||
|
parser.add_argument('--is_internal', action='store_true', help="If true, address only resolvable within the Kubernetes cluster")
|
||||||
|
parser.add_argument('--protocol', type=str, default='tcp', choices=['tcp', 'ws', 'wss'], help="Protocol to use for the Receptor listener")
|
||||||
|
parser.add_argument('--canonical', action='store_true', help="If true, address is the canonical address for the instance")
|
||||||
|
parser.add_argument('--peers_from_control_nodes', action='store_true', help="If true, control nodes will peer to this address")
|
||||||
|
|
||||||
|
def handle(self, **options):
|
||||||
|
address_options = {
|
||||||
|
k: options[k]
|
||||||
|
for k in ('instance', 'address', 'port', 'websocket_path', 'is_internal', 'protocol', 'peers_from_control_nodes', 'canonical')
|
||||||
|
if options[k]
|
||||||
|
}
|
||||||
|
changed = add_address(**address_options)
|
||||||
|
if changed:
|
||||||
|
print("(changed: True)")
|
||||||
@@ -55,7 +55,7 @@ class Command(BaseCommand):
|
|||||||
|
|
||||||
capacity = f' capacity={x.capacity}' if x.node_type != 'hop' else ''
|
capacity = f' capacity={x.capacity}' if x.node_type != 'hop' else ''
|
||||||
version = f" version={x.version or '?'}" if x.node_type != 'hop' else ''
|
version = f" version={x.version or '?'}" if x.node_type != 'hop' else ''
|
||||||
heartbeat = f' heartbeat="{x.last_seen:%Y-%m-%d %H:%M:%S}"' if x.capacity or x.node_type == 'hop' else ''
|
heartbeat = f' heartbeat="{x.last_seen:%Y-%m-%d %H:%M:%S}"' if x.last_seen else ''
|
||||||
print(f'\t{color}{x.hostname}{capacity} node_type={x.node_type}{version}{heartbeat}{end_color}')
|
print(f'\t{color}{x.hostname}{capacity} node_type={x.node_type}{version}{heartbeat}{end_color}')
|
||||||
|
|
||||||
print()
|
print()
|
||||||
|
|||||||
@@ -25,20 +25,17 @@ class Command(BaseCommand):
|
|||||||
|
|
||||||
def add_arguments(self, parser):
|
def add_arguments(self, parser):
|
||||||
parser.add_argument('--hostname', dest='hostname', type=str, help="Hostname used during provisioning")
|
parser.add_argument('--hostname', dest='hostname', type=str, help="Hostname used during provisioning")
|
||||||
parser.add_argument('--listener_port', dest='listener_port', type=int, help="Receptor listener port")
|
|
||||||
parser.add_argument('--node_type', type=str, default='hybrid', choices=['control', 'execution', 'hop', 'hybrid'], help="Instance Node type")
|
parser.add_argument('--node_type', type=str, default='hybrid', choices=['control', 'execution', 'hop', 'hybrid'], help="Instance Node type")
|
||||||
parser.add_argument('--uuid', type=str, help="Instance UUID")
|
parser.add_argument('--uuid', type=str, help="Instance UUID")
|
||||||
|
|
||||||
def _register_hostname(self, hostname, node_type, uuid, listener_port):
|
def _register_hostname(self, hostname, node_type, uuid):
|
||||||
if not hostname:
|
if not hostname:
|
||||||
if not settings.AWX_AUTO_DEPROVISION_INSTANCES:
|
if not settings.AWX_AUTO_DEPROVISION_INSTANCES:
|
||||||
raise CommandError('Registering with values from settings only intended for use in K8s installs')
|
raise CommandError('Registering with values from settings only intended for use in K8s installs')
|
||||||
|
|
||||||
from awx.main.management.commands.register_queue import RegisterQueue
|
from awx.main.management.commands.register_queue import RegisterQueue
|
||||||
|
|
||||||
(changed, instance) = Instance.objects.register(
|
(changed, instance) = Instance.objects.register(ip_address=os.environ.get('MY_POD_IP'), node_type='control', node_uuid=settings.SYSTEM_UUID)
|
||||||
ip_address=os.environ.get('MY_POD_IP'), listener_port=listener_port, node_type='control', node_uuid=settings.SYSTEM_UUID
|
|
||||||
)
|
|
||||||
RegisterQueue(settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME, 100, 0, [], is_container_group=False).register()
|
RegisterQueue(settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME, 100, 0, [], is_container_group=False).register()
|
||||||
RegisterQueue(
|
RegisterQueue(
|
||||||
settings.DEFAULT_EXECUTION_QUEUE_NAME,
|
settings.DEFAULT_EXECUTION_QUEUE_NAME,
|
||||||
@@ -51,16 +48,17 @@ class Command(BaseCommand):
|
|||||||
max_concurrent_jobs=settings.DEFAULT_EXECUTION_QUEUE_MAX_CONCURRENT_JOBS,
|
max_concurrent_jobs=settings.DEFAULT_EXECUTION_QUEUE_MAX_CONCURRENT_JOBS,
|
||||||
).register()
|
).register()
|
||||||
else:
|
else:
|
||||||
(changed, instance) = Instance.objects.register(hostname=hostname, node_type=node_type, node_uuid=uuid, listener_port=listener_port)
|
(changed, instance) = Instance.objects.register(hostname=hostname, node_type=node_type, node_uuid=uuid)
|
||||||
if changed:
|
if changed:
|
||||||
print("Successfully registered instance {}".format(hostname))
|
print("Successfully registered instance {}".format(hostname))
|
||||||
else:
|
else:
|
||||||
print("Instance already registered {}".format(instance.hostname))
|
print("Instance already registered {}".format(instance.hostname))
|
||||||
|
|
||||||
self.changed = changed
|
self.changed = changed
|
||||||
|
|
||||||
@transaction.atomic
|
@transaction.atomic
|
||||||
def handle(self, **options):
|
def handle(self, **options):
|
||||||
self.changed = False
|
self.changed = False
|
||||||
self._register_hostname(options.get('hostname'), options.get('node_type'), options.get('uuid'), options.get('listener_port'))
|
self._register_hostname(options.get('hostname'), options.get('node_type'), options.get('uuid'))
|
||||||
if self.changed:
|
if self.changed:
|
||||||
print("(changed: True)")
|
print("(changed: True)")
|
||||||
|
|||||||
@@ -1,9 +1,7 @@
|
|||||||
import warnings
|
|
||||||
|
|
||||||
from django.core.management.base import BaseCommand, CommandError
|
from django.core.management.base import BaseCommand, CommandError
|
||||||
from django.db import transaction
|
from django.db import transaction
|
||||||
|
|
||||||
from awx.main.models import Instance, InstanceLink
|
from awx.main.models import Instance, InstanceLink, ReceptorAddress
|
||||||
|
|
||||||
|
|
||||||
class Command(BaseCommand):
|
class Command(BaseCommand):
|
||||||
@@ -28,7 +26,9 @@ class Command(BaseCommand):
|
|||||||
|
|
||||||
def handle(self, **options):
|
def handle(self, **options):
|
||||||
# provides a mapping of hostname to Instance objects
|
# provides a mapping of hostname to Instance objects
|
||||||
nodes = Instance.objects.in_bulk(field_name='hostname')
|
nodes = Instance.objects.all().in_bulk(field_name='hostname')
|
||||||
|
# provides a mapping of address to ReceptorAddress objects
|
||||||
|
addresses = ReceptorAddress.objects.all().in_bulk(field_name='address')
|
||||||
|
|
||||||
if options['source'] not in nodes:
|
if options['source'] not in nodes:
|
||||||
raise CommandError(f"Host {options['source']} is not a registered instance.")
|
raise CommandError(f"Host {options['source']} is not a registered instance.")
|
||||||
@@ -39,6 +39,14 @@ class Command(BaseCommand):
|
|||||||
if options['exact'] is not None and options['disconnect']:
|
if options['exact'] is not None and options['disconnect']:
|
||||||
raise CommandError("The option --disconnect may not be used with --exact.")
|
raise CommandError("The option --disconnect may not be used with --exact.")
|
||||||
|
|
||||||
|
# make sure each target has a receptor address
|
||||||
|
peers = options['peers'] or []
|
||||||
|
disconnect = options['disconnect'] or []
|
||||||
|
exact = options['exact'] or []
|
||||||
|
for peer in peers + disconnect + exact:
|
||||||
|
if peer not in addresses:
|
||||||
|
raise CommandError(f"Peer {peer} does not have a receptor address.")
|
||||||
|
|
||||||
# No 1-cycles
|
# No 1-cycles
|
||||||
for collection in ('peers', 'disconnect', 'exact'):
|
for collection in ('peers', 'disconnect', 'exact'):
|
||||||
if options[collection] is not None and options['source'] in options[collection]:
|
if options[collection] is not None and options['source'] in options[collection]:
|
||||||
@@ -47,9 +55,12 @@ class Command(BaseCommand):
|
|||||||
# No 2-cycles
|
# No 2-cycles
|
||||||
if options['peers'] or options['exact'] is not None:
|
if options['peers'] or options['exact'] is not None:
|
||||||
peers = set(options['peers'] or options['exact'])
|
peers = set(options['peers'] or options['exact'])
|
||||||
incoming = set(InstanceLink.objects.filter(target=nodes[options['source']]).values_list('source__hostname', flat=True))
|
if options['source'] in addresses:
|
||||||
|
incoming = set(InstanceLink.objects.filter(target=addresses[options['source']]).values_list('source__hostname', flat=True))
|
||||||
|
else:
|
||||||
|
incoming = set()
|
||||||
if peers & incoming:
|
if peers & incoming:
|
||||||
warnings.warn(f"Source node {options['source']} should not link to nodes already peering to it: {peers & incoming}.")
|
raise CommandError(f"Source node {options['source']} should not link to nodes already peering to it: {peers & incoming}.")
|
||||||
|
|
||||||
if options['peers']:
|
if options['peers']:
|
||||||
missing_peers = set(options['peers']) - set(nodes)
|
missing_peers = set(options['peers']) - set(nodes)
|
||||||
@@ -60,7 +71,7 @@ class Command(BaseCommand):
|
|||||||
results = 0
|
results = 0
|
||||||
for target in options['peers']:
|
for target in options['peers']:
|
||||||
_, created = InstanceLink.objects.update_or_create(
|
_, created = InstanceLink.objects.update_or_create(
|
||||||
source=nodes[options['source']], target=nodes[target], defaults={'link_state': InstanceLink.States.ESTABLISHED}
|
source=nodes[options['source']], target=addresses[target], defaults={'link_state': InstanceLink.States.ESTABLISHED}
|
||||||
)
|
)
|
||||||
if created:
|
if created:
|
||||||
results += 1
|
results += 1
|
||||||
@@ -70,9 +81,9 @@ class Command(BaseCommand):
|
|||||||
if options['disconnect']:
|
if options['disconnect']:
|
||||||
results = 0
|
results = 0
|
||||||
for target in options['disconnect']:
|
for target in options['disconnect']:
|
||||||
if target not in nodes: # Be permissive, the node might have already been de-registered.
|
if target not in addresses: # Be permissive, the node might have already been de-registered.
|
||||||
continue
|
continue
|
||||||
n, _ = InstanceLink.objects.filter(source=nodes[options['source']], target=nodes[target]).delete()
|
n, _ = InstanceLink.objects.filter(source=nodes[options['source']], target=addresses[target]).delete()
|
||||||
results += n
|
results += n
|
||||||
|
|
||||||
print(f"{results} peer links removed from the database.")
|
print(f"{results} peer links removed from the database.")
|
||||||
@@ -81,11 +92,11 @@ class Command(BaseCommand):
|
|||||||
additions = 0
|
additions = 0
|
||||||
with transaction.atomic():
|
with transaction.atomic():
|
||||||
peers = set(options['exact'])
|
peers = set(options['exact'])
|
||||||
links = set(InstanceLink.objects.filter(source=nodes[options['source']]).values_list('target__hostname', flat=True))
|
links = set(InstanceLink.objects.filter(source=nodes[options['source']]).values_list('target__address', flat=True))
|
||||||
removals, _ = InstanceLink.objects.filter(source=nodes[options['source']], target__hostname__in=links - peers).delete()
|
removals, _ = InstanceLink.objects.filter(source=nodes[options['source']], target__instance__hostname__in=links - peers).delete()
|
||||||
for target in peers - links:
|
for target in peers - links:
|
||||||
_, created = InstanceLink.objects.update_or_create(
|
_, created = InstanceLink.objects.update_or_create(
|
||||||
source=nodes[options['source']], target=nodes[target], defaults={'link_state': InstanceLink.States.ESTABLISHED}
|
source=nodes[options['source']], target=addresses[target], defaults={'link_state': InstanceLink.States.ESTABLISHED}
|
||||||
)
|
)
|
||||||
if created:
|
if created:
|
||||||
additions += 1
|
additions += 1
|
||||||
|
|||||||
26
awx/main/management/commands/remove_receptor_address.py
Normal file
26
awx/main/management/commands/remove_receptor_address.py
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
# Copyright (c) 2015 Ansible, Inc.
|
||||||
|
# All Rights Reserved
|
||||||
|
|
||||||
|
from django.core.management.base import BaseCommand
|
||||||
|
|
||||||
|
from awx.main.models import ReceptorAddress
|
||||||
|
|
||||||
|
|
||||||
|
class Command(BaseCommand):
|
||||||
|
"""
|
||||||
|
Internal controller command.
|
||||||
|
Delete a receptor address.
|
||||||
|
"""
|
||||||
|
|
||||||
|
help = "Add receptor address to an instance."
|
||||||
|
|
||||||
|
def add_arguments(self, parser):
|
||||||
|
parser.add_argument('--address', dest='address', type=str, help="Receptor address to remove")
|
||||||
|
|
||||||
|
def handle(self, **options):
|
||||||
|
deleted = ReceptorAddress.objects.filter(address=options['address']).delete()
|
||||||
|
if deleted[0]:
|
||||||
|
print(f"Successfully removed {options['address']}")
|
||||||
|
print("(changed: True)")
|
||||||
|
else:
|
||||||
|
print(f"Did not remove {options['address']}, not found")
|
||||||
@@ -3,6 +3,7 @@
|
|||||||
|
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
from django.core.management.base import BaseCommand
|
from django.core.management.base import BaseCommand
|
||||||
|
from awx.main.analytics.subsystem_metrics import CallbackReceiverMetricsServer
|
||||||
|
|
||||||
from awx.main.dispatch.control import Control
|
from awx.main.dispatch.control import Control
|
||||||
from awx.main.dispatch.worker import AWXConsumerRedis, CallbackBrokerWorker
|
from awx.main.dispatch.worker import AWXConsumerRedis, CallbackBrokerWorker
|
||||||
@@ -25,6 +26,9 @@ class Command(BaseCommand):
|
|||||||
print(Control('callback_receiver').status())
|
print(Control('callback_receiver').status())
|
||||||
return
|
return
|
||||||
consumer = None
|
consumer = None
|
||||||
|
|
||||||
|
CallbackReceiverMetricsServer().start()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
consumer = AWXConsumerRedis(
|
consumer = AWXConsumerRedis(
|
||||||
'callback_receiver',
|
'callback_receiver',
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ from awx.main.dispatch import get_task_queuename
|
|||||||
from awx.main.dispatch.control import Control
|
from awx.main.dispatch.control import Control
|
||||||
from awx.main.dispatch.pool import AutoscalePool
|
from awx.main.dispatch.pool import AutoscalePool
|
||||||
from awx.main.dispatch.worker import AWXConsumerPG, TaskWorker
|
from awx.main.dispatch.worker import AWXConsumerPG, TaskWorker
|
||||||
|
from awx.main.analytics.subsystem_metrics import DispatcherMetricsServer
|
||||||
|
|
||||||
logger = logging.getLogger('awx.main.dispatch')
|
logger = logging.getLogger('awx.main.dispatch')
|
||||||
|
|
||||||
@@ -62,6 +63,8 @@ class Command(BaseCommand):
|
|||||||
|
|
||||||
consumer = None
|
consumer = None
|
||||||
|
|
||||||
|
DispatcherMetricsServer().start()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
queues = ['tower_broadcast_all', 'tower_settings_change', get_task_queuename()]
|
queues = ['tower_broadcast_all', 'tower_settings_change', get_task_queuename()]
|
||||||
consumer = AWXConsumerPG('dispatcher', TaskWorker(), queues, AutoscalePool(min_workers=4), schedule=settings.CELERYBEAT_SCHEDULE)
|
consumer = AWXConsumerPG('dispatcher', TaskWorker(), queues, AutoscalePool(min_workers=4), schedule=settings.CELERYBEAT_SCHEDULE)
|
||||||
|
|||||||
@@ -16,6 +16,7 @@ from awx.main.analytics.broadcast_websocket import (
|
|||||||
RelayWebsocketStatsManager,
|
RelayWebsocketStatsManager,
|
||||||
safe_name,
|
safe_name,
|
||||||
)
|
)
|
||||||
|
from awx.main.analytics.subsystem_metrics import WebsocketsMetricsServer
|
||||||
from awx.main.wsrelay import WebSocketRelayManager
|
from awx.main.wsrelay import WebSocketRelayManager
|
||||||
|
|
||||||
|
|
||||||
@@ -91,6 +92,8 @@ class Command(BaseCommand):
|
|||||||
return host_stats
|
return host_stats
|
||||||
|
|
||||||
def handle(self, *arg, **options):
|
def handle(self, *arg, **options):
|
||||||
|
WebsocketsMetricsServer().start()
|
||||||
|
|
||||||
# it's necessary to delay this import in case
|
# it's necessary to delay this import in case
|
||||||
# database migrations are still running
|
# database migrations are still running
|
||||||
from awx.main.models.ha import Instance
|
from awx.main.models.ha import Instance
|
||||||
|
|||||||
@@ -115,7 +115,14 @@ class InstanceManager(models.Manager):
|
|||||||
return node[0]
|
return node[0]
|
||||||
raise RuntimeError("No instance found with the current cluster host id")
|
raise RuntimeError("No instance found with the current cluster host id")
|
||||||
|
|
||||||
def register(self, node_uuid=None, hostname=None, ip_address="", listener_port=None, node_type='hybrid', defaults=None):
|
def register(
|
||||||
|
self,
|
||||||
|
node_uuid=None,
|
||||||
|
hostname=None,
|
||||||
|
ip_address="",
|
||||||
|
node_type='hybrid',
|
||||||
|
defaults=None,
|
||||||
|
):
|
||||||
if not hostname:
|
if not hostname:
|
||||||
hostname = settings.CLUSTER_HOST_ID
|
hostname = settings.CLUSTER_HOST_ID
|
||||||
|
|
||||||
@@ -161,9 +168,6 @@ class InstanceManager(models.Manager):
|
|||||||
if instance.node_type != node_type:
|
if instance.node_type != node_type:
|
||||||
instance.node_type = node_type
|
instance.node_type = node_type
|
||||||
update_fields.append('node_type')
|
update_fields.append('node_type')
|
||||||
if instance.listener_port != listener_port:
|
|
||||||
instance.listener_port = listener_port
|
|
||||||
update_fields.append('listener_port')
|
|
||||||
if update_fields:
|
if update_fields:
|
||||||
instance.save(update_fields=update_fields)
|
instance.save(update_fields=update_fields)
|
||||||
return (True, instance)
|
return (True, instance)
|
||||||
@@ -174,11 +178,13 @@ class InstanceManager(models.Manager):
|
|||||||
create_defaults = {
|
create_defaults = {
|
||||||
'node_state': Instance.States.INSTALLED,
|
'node_state': Instance.States.INSTALLED,
|
||||||
'capacity': 0,
|
'capacity': 0,
|
||||||
|
'managed': True,
|
||||||
}
|
}
|
||||||
if defaults is not None:
|
if defaults is not None:
|
||||||
create_defaults.update(defaults)
|
create_defaults.update(defaults)
|
||||||
uuid_option = {'uuid': node_uuid if node_uuid is not None else uuid.uuid4()}
|
uuid_option = {'uuid': node_uuid if node_uuid is not None else uuid.uuid4()}
|
||||||
if node_type == 'execution' and 'version' not in create_defaults:
|
if node_type == 'execution' and 'version' not in create_defaults:
|
||||||
create_defaults['version'] = RECEPTOR_PENDING
|
create_defaults['version'] = RECEPTOR_PENDING
|
||||||
instance = self.create(hostname=hostname, ip_address=ip_address, listener_port=listener_port, node_type=node_type, **create_defaults, **uuid_option)
|
instance = self.create(hostname=hostname, ip_address=ip_address, node_type=node_type, **create_defaults, **uuid_option)
|
||||||
|
|
||||||
return (True, instance)
|
return (True, instance)
|
||||||
|
|||||||
@@ -5,11 +5,12 @@ import logging
|
|||||||
import threading
|
import threading
|
||||||
import time
|
import time
|
||||||
import urllib.parse
|
import urllib.parse
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
from django.contrib.auth import logout
|
from django.contrib.auth import logout
|
||||||
from django.contrib.auth.models import User
|
from django.contrib.auth.models import User
|
||||||
from django.db.migrations.executor import MigrationExecutor
|
from django.db.migrations.recorder import MigrationRecorder
|
||||||
from django.db import connection
|
from django.db import connection
|
||||||
from django.shortcuts import redirect
|
from django.shortcuts import redirect
|
||||||
from django.apps import apps
|
from django.apps import apps
|
||||||
@@ -17,9 +18,11 @@ from django.utils.deprecation import MiddlewareMixin
|
|||||||
from django.utils.translation import gettext_lazy as _
|
from django.utils.translation import gettext_lazy as _
|
||||||
from django.urls import reverse, resolve
|
from django.urls import reverse, resolve
|
||||||
|
|
||||||
|
from awx.main import migrations
|
||||||
from awx.main.utils.named_url_graph import generate_graph, GraphNode
|
from awx.main.utils.named_url_graph import generate_graph, GraphNode
|
||||||
from awx.conf import fields, register
|
from awx.conf import fields, register
|
||||||
from awx.main.utils.profiling import AWXProfiler
|
from awx.main.utils.profiling import AWXProfiler
|
||||||
|
from awx.main.utils.common import memoize
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger('awx.main.middleware')
|
logger = logging.getLogger('awx.main.middleware')
|
||||||
@@ -198,9 +201,22 @@ class URLModificationMiddleware(MiddlewareMixin):
|
|||||||
request.path_info = new_path
|
request.path_info = new_path
|
||||||
|
|
||||||
|
|
||||||
|
@memoize(ttl=20)
|
||||||
|
def is_migrating():
|
||||||
|
latest_number = 0
|
||||||
|
latest_name = ''
|
||||||
|
for migration_path in Path(migrations.__path__[0]).glob('[0-9]*.py'):
|
||||||
|
try:
|
||||||
|
migration_number = int(migration_path.name.split('_', 1)[0])
|
||||||
|
except ValueError:
|
||||||
|
continue
|
||||||
|
if migration_number > latest_number:
|
||||||
|
latest_number = migration_number
|
||||||
|
latest_name = migration_path.name[: -len('.py')]
|
||||||
|
return not MigrationRecorder(connection).migration_qs.filter(app='main', name=latest_name).exists()
|
||||||
|
|
||||||
|
|
||||||
class MigrationRanCheckMiddleware(MiddlewareMixin):
|
class MigrationRanCheckMiddleware(MiddlewareMixin):
|
||||||
def process_request(self, request):
|
def process_request(self, request):
|
||||||
executor = MigrationExecutor(connection)
|
if is_migrating() and getattr(resolve(request.path), 'url_name', '') != 'migrations_notran':
|
||||||
plan = executor.migration_plan(executor.loader.graph.leaf_nodes())
|
|
||||||
if bool(plan) and getattr(resolve(request.path), 'url_name', '') != 'migrations_notran':
|
|
||||||
return redirect(reverse("ui:migrations_notran"))
|
return redirect(reverse("ui:migrations_notran"))
|
||||||
|
|||||||
52
awx/main/migrations/0188_add_bitbucket_dc_webhook.py
Normal file
52
awx/main/migrations/0188_add_bitbucket_dc_webhook.py
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
# Generated by Django 4.2.6 on 2023-11-16 21:00
|
||||||
|
|
||||||
|
from django.db import migrations, models
|
||||||
|
|
||||||
|
|
||||||
|
class Migration(migrations.Migration):
|
||||||
|
dependencies = [
|
||||||
|
('main', '0187_hop_nodes'),
|
||||||
|
]
|
||||||
|
|
||||||
|
operations = [
|
||||||
|
migrations.AlterField(
|
||||||
|
model_name='job',
|
||||||
|
name='webhook_service',
|
||||||
|
field=models.CharField(
|
||||||
|
blank=True,
|
||||||
|
choices=[('github', 'GitHub'), ('gitlab', 'GitLab'), ('bitbucket_dc', 'BitBucket DataCenter')],
|
||||||
|
help_text='Service that webhook requests will be accepted from',
|
||||||
|
max_length=16,
|
||||||
|
),
|
||||||
|
),
|
||||||
|
migrations.AlterField(
|
||||||
|
model_name='jobtemplate',
|
||||||
|
name='webhook_service',
|
||||||
|
field=models.CharField(
|
||||||
|
blank=True,
|
||||||
|
choices=[('github', 'GitHub'), ('gitlab', 'GitLab'), ('bitbucket_dc', 'BitBucket DataCenter')],
|
||||||
|
help_text='Service that webhook requests will be accepted from',
|
||||||
|
max_length=16,
|
||||||
|
),
|
||||||
|
),
|
||||||
|
migrations.AlterField(
|
||||||
|
model_name='workflowjob',
|
||||||
|
name='webhook_service',
|
||||||
|
field=models.CharField(
|
||||||
|
blank=True,
|
||||||
|
choices=[('github', 'GitHub'), ('gitlab', 'GitLab'), ('bitbucket_dc', 'BitBucket DataCenter')],
|
||||||
|
help_text='Service that webhook requests will be accepted from',
|
||||||
|
max_length=16,
|
||||||
|
),
|
||||||
|
),
|
||||||
|
migrations.AlterField(
|
||||||
|
model_name='workflowjobtemplate',
|
||||||
|
name='webhook_service',
|
||||||
|
field=models.CharField(
|
||||||
|
blank=True,
|
||||||
|
choices=[('github', 'GitHub'), ('gitlab', 'GitLab'), ('bitbucket_dc', 'BitBucket DataCenter')],
|
||||||
|
help_text='Service that webhook requests will be accepted from',
|
||||||
|
max_length=16,
|
||||||
|
),
|
||||||
|
),
|
||||||
|
]
|
||||||
150
awx/main/migrations/0189_inbound_hop_nodes.py
Normal file
150
awx/main/migrations/0189_inbound_hop_nodes.py
Normal file
@@ -0,0 +1,150 @@
|
|||||||
|
# Generated by Django 4.2.6 on 2024-01-19 19:24
|
||||||
|
|
||||||
|
import django.core.validators
|
||||||
|
from django.db import migrations, models
|
||||||
|
import django.db.models.deletion
|
||||||
|
|
||||||
|
|
||||||
|
def create_receptor_addresses(apps, schema_editor):
|
||||||
|
"""
|
||||||
|
If listener_port was defined on an instance, create a receptor address for it
|
||||||
|
"""
|
||||||
|
Instance = apps.get_model('main', 'Instance')
|
||||||
|
ReceptorAddress = apps.get_model('main', 'ReceptorAddress')
|
||||||
|
for instance in Instance.objects.exclude(listener_port=None):
|
||||||
|
ReceptorAddress.objects.create(
|
||||||
|
instance=instance,
|
||||||
|
address=instance.hostname,
|
||||||
|
port=instance.listener_port,
|
||||||
|
peers_from_control_nodes=instance.peers_from_control_nodes,
|
||||||
|
protocol='tcp',
|
||||||
|
is_internal=False,
|
||||||
|
canonical=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def link_to_receptor_addresses(apps, schema_editor):
|
||||||
|
"""
|
||||||
|
Modify each InstanceLink to point to the newly created
|
||||||
|
ReceptorAddresses, using the new target field
|
||||||
|
"""
|
||||||
|
InstanceLink = apps.get_model('main', 'InstanceLink')
|
||||||
|
for link in InstanceLink.objects.all():
|
||||||
|
link.target = link.target_old.receptor_addresses.get()
|
||||||
|
link.save()
|
||||||
|
|
||||||
|
|
||||||
|
class Migration(migrations.Migration):
|
||||||
|
dependencies = [
|
||||||
|
('main', '0188_add_bitbucket_dc_webhook'),
|
||||||
|
]
|
||||||
|
|
||||||
|
operations = [
|
||||||
|
migrations.CreateModel(
|
||||||
|
name='ReceptorAddress',
|
||||||
|
fields=[
|
||||||
|
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
||||||
|
('address', models.CharField(help_text='Routable address for this instance.', max_length=255)),
|
||||||
|
(
|
||||||
|
'port',
|
||||||
|
models.IntegerField(
|
||||||
|
default=27199,
|
||||||
|
help_text='Port for the address.',
|
||||||
|
validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(65535)],
|
||||||
|
),
|
||||||
|
),
|
||||||
|
('websocket_path', models.CharField(blank=True, default='', help_text='Websocket path.', max_length=255)),
|
||||||
|
(
|
||||||
|
'protocol',
|
||||||
|
models.CharField(
|
||||||
|
choices=[('tcp', 'TCP'), ('ws', 'WS'), ('wss', 'WSS')],
|
||||||
|
default='tcp',
|
||||||
|
help_text="Protocol to use for the Receptor listener, 'tcp', 'wss', or 'ws'.",
|
||||||
|
max_length=10,
|
||||||
|
),
|
||||||
|
),
|
||||||
|
('is_internal', models.BooleanField(default=False, help_text='If True, only routable within the Kubernetes cluster.')),
|
||||||
|
('canonical', models.BooleanField(default=False, help_text='If True, this address is the canonical address for the instance.')),
|
||||||
|
(
|
||||||
|
'peers_from_control_nodes',
|
||||||
|
models.BooleanField(default=False, help_text='If True, control plane cluster nodes should automatically peer to it.'),
|
||||||
|
),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
migrations.RemoveConstraint(
|
||||||
|
model_name='instancelink',
|
||||||
|
name='source_and_target_can_not_be_equal',
|
||||||
|
),
|
||||||
|
migrations.RenameField(
|
||||||
|
model_name='instancelink',
|
||||||
|
old_name='target',
|
||||||
|
new_name='target_old',
|
||||||
|
),
|
||||||
|
migrations.AlterUniqueTogether(
|
||||||
|
name='instancelink',
|
||||||
|
unique_together=set(),
|
||||||
|
),
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='instance',
|
||||||
|
name='managed',
|
||||||
|
field=models.BooleanField(default=False, editable=False, help_text='If True, this instance is managed by the control plane.'),
|
||||||
|
),
|
||||||
|
migrations.AlterField(
|
||||||
|
model_name='instancelink',
|
||||||
|
name='source',
|
||||||
|
field=models.ForeignKey(help_text='The source instance of this peer link.', on_delete=django.db.models.deletion.CASCADE, to='main.instance'),
|
||||||
|
),
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='receptoraddress',
|
||||||
|
name='instance',
|
||||||
|
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='receptor_addresses', to='main.instance'),
|
||||||
|
),
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='activitystream',
|
||||||
|
name='receptor_address',
|
||||||
|
field=models.ManyToManyField(blank=True, to='main.receptoraddress'),
|
||||||
|
),
|
||||||
|
migrations.AddConstraint(
|
||||||
|
model_name='receptoraddress',
|
||||||
|
constraint=models.UniqueConstraint(fields=('address',), name='unique_receptor_address', violation_error_message='Receptor address must be unique.'),
|
||||||
|
),
|
||||||
|
migrations.AddField(
|
||||||
|
model_name='instancelink',
|
||||||
|
name='target',
|
||||||
|
field=models.ForeignKey(
|
||||||
|
help_text='The target receptor address of this peer link.', null=True, on_delete=django.db.models.deletion.CASCADE, to='main.receptoraddress'
|
||||||
|
),
|
||||||
|
),
|
||||||
|
migrations.RunPython(create_receptor_addresses),
|
||||||
|
migrations.RunPython(link_to_receptor_addresses),
|
||||||
|
migrations.RemoveField(
|
||||||
|
model_name='instance',
|
||||||
|
name='peers_from_control_nodes',
|
||||||
|
),
|
||||||
|
migrations.RemoveField(
|
||||||
|
model_name='instance',
|
||||||
|
name='listener_port',
|
||||||
|
),
|
||||||
|
migrations.RemoveField(
|
||||||
|
model_name='instancelink',
|
||||||
|
name='target_old',
|
||||||
|
),
|
||||||
|
migrations.AlterField(
|
||||||
|
model_name='instance',
|
||||||
|
name='peers',
|
||||||
|
field=models.ManyToManyField(related_name='peers_from', through='main.InstanceLink', to='main.receptoraddress'),
|
||||||
|
),
|
||||||
|
migrations.AlterField(
|
||||||
|
model_name='instancelink',
|
||||||
|
name='target',
|
||||||
|
field=models.ForeignKey(
|
||||||
|
help_text='The target receptor address of this peer link.', on_delete=django.db.models.deletion.CASCADE, to='main.receptoraddress'
|
||||||
|
),
|
||||||
|
),
|
||||||
|
migrations.AddConstraint(
|
||||||
|
model_name='instancelink',
|
||||||
|
constraint=models.UniqueConstraint(
|
||||||
|
fields=('source', 'target'), name='unique_source_target', violation_error_message='Field source and target must be unique together.'
|
||||||
|
),
|
||||||
|
),
|
||||||
|
]
|
||||||
@@ -76,7 +76,7 @@ class azure_rm(PluginFileInjector):
|
|||||||
user_filters = []
|
user_filters = []
|
||||||
old_filterables = [
|
old_filterables = [
|
||||||
('resource_groups', 'resource_group'),
|
('resource_groups', 'resource_group'),
|
||||||
('tags', 'tags')
|
('tags', 'tags'),
|
||||||
# locations / location would be an entry
|
# locations / location would be an entry
|
||||||
# but this would conflict with source_regions
|
# but this would conflict with source_regions
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ from django.conf import settings # noqa
|
|||||||
from django.db import connection
|
from django.db import connection
|
||||||
from django.db.models.signals import pre_delete # noqa
|
from django.db.models.signals import pre_delete # noqa
|
||||||
|
|
||||||
from ansible_base.utils.models import prevent_search
|
from ansible_base.lib.utils.models import prevent_search
|
||||||
|
|
||||||
# AWX
|
# AWX
|
||||||
from awx.main.models.base import BaseModel, PrimordialModel, accepts_json, CLOUD_INVENTORY_SOURCES, VERBOSITY_CHOICES # noqa
|
from awx.main.models.base import BaseModel, PrimordialModel, accepts_json, CLOUD_INVENTORY_SOURCES, VERBOSITY_CHOICES # noqa
|
||||||
@@ -14,6 +14,7 @@ from awx.main.models.unified_jobs import UnifiedJob, UnifiedJobTemplate, StdoutM
|
|||||||
from awx.main.models.organization import Organization, Profile, Team, UserSessionMembership # noqa
|
from awx.main.models.organization import Organization, Profile, Team, UserSessionMembership # noqa
|
||||||
from awx.main.models.credential import Credential, CredentialType, CredentialInputSource, ManagedCredentialType, build_safe_env # noqa
|
from awx.main.models.credential import Credential, CredentialType, CredentialInputSource, ManagedCredentialType, build_safe_env # noqa
|
||||||
from awx.main.models.projects import Project, ProjectUpdate # noqa
|
from awx.main.models.projects import Project, ProjectUpdate # noqa
|
||||||
|
from awx.main.models.receptor_address import ReceptorAddress # noqa
|
||||||
from awx.main.models.inventory import ( # noqa
|
from awx.main.models.inventory import ( # noqa
|
||||||
CustomInventoryScript,
|
CustomInventoryScript,
|
||||||
Group,
|
Group,
|
||||||
|
|||||||
@@ -77,6 +77,7 @@ class ActivityStream(models.Model):
|
|||||||
notification_template = models.ManyToManyField("NotificationTemplate", blank=True)
|
notification_template = models.ManyToManyField("NotificationTemplate", blank=True)
|
||||||
notification = models.ManyToManyField("Notification", blank=True)
|
notification = models.ManyToManyField("Notification", blank=True)
|
||||||
label = models.ManyToManyField("Label", blank=True)
|
label = models.ManyToManyField("Label", blank=True)
|
||||||
|
receptor_address = models.ManyToManyField("ReceptorAddress", blank=True)
|
||||||
role = models.ManyToManyField("Role", blank=True)
|
role = models.ManyToManyField("Role", blank=True)
|
||||||
instance = models.ManyToManyField("Instance", blank=True)
|
instance = models.ManyToManyField("Instance", blank=True)
|
||||||
instance_group = models.ManyToManyField("InstanceGroup", blank=True)
|
instance_group = models.ManyToManyField("InstanceGroup", blank=True)
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ from django.utils.text import Truncator
|
|||||||
from django.utils.translation import gettext_lazy as _
|
from django.utils.translation import gettext_lazy as _
|
||||||
from django.core.exceptions import ValidationError
|
from django.core.exceptions import ValidationError
|
||||||
|
|
||||||
from ansible_base.utils.models import prevent_search
|
from ansible_base.lib.utils.models import prevent_search
|
||||||
|
|
||||||
# AWX
|
# AWX
|
||||||
from awx.api.versioning import reverse
|
from awx.api.versioning import reverse
|
||||||
|
|||||||
@@ -953,6 +953,25 @@ ManagedCredentialType(
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
ManagedCredentialType(
|
||||||
|
namespace='bitbucket_dc_token',
|
||||||
|
kind='token',
|
||||||
|
name=gettext_noop('Bitbucket Data Center HTTP Access Token'),
|
||||||
|
managed=True,
|
||||||
|
inputs={
|
||||||
|
'fields': [
|
||||||
|
{
|
||||||
|
'id': 'token',
|
||||||
|
'label': gettext_noop('Token'),
|
||||||
|
'type': 'string',
|
||||||
|
'secret': True,
|
||||||
|
'help_text': gettext_noop('This token needs to come from your user settings in Bitbucket'),
|
||||||
|
}
|
||||||
|
],
|
||||||
|
'required': ['token'],
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
ManagedCredentialType(
|
ManagedCredentialType(
|
||||||
namespace='insights',
|
namespace='insights',
|
||||||
kind='insights',
|
kind='insights',
|
||||||
@@ -1197,6 +1216,26 @@ ManagedCredentialType(
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
ManagedCredentialType(
|
||||||
|
namespace='terraform',
|
||||||
|
kind='cloud',
|
||||||
|
name=gettext_noop('Terraform backend configuration'),
|
||||||
|
managed=True,
|
||||||
|
inputs={
|
||||||
|
'fields': [
|
||||||
|
{
|
||||||
|
'id': 'configuration',
|
||||||
|
'label': gettext_noop('Backend configuration'),
|
||||||
|
'type': 'string',
|
||||||
|
'secret': True,
|
||||||
|
'multiline': True,
|
||||||
|
'help_text': gettext_noop('Terraform backend config as Hashicorp configuration language.'),
|
||||||
|
},
|
||||||
|
],
|
||||||
|
'required': ['configuration'],
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class CredentialInputSource(PrimordialModel):
|
class CredentialInputSource(PrimordialModel):
|
||||||
class Meta:
|
class Meta:
|
||||||
|
|||||||
@@ -122,3 +122,11 @@ def kubernetes_bearer_token(cred, env, private_data_dir):
|
|||||||
env['K8S_AUTH_SSL_CA_CERT'] = to_container_path(path, private_data_dir)
|
env['K8S_AUTH_SSL_CA_CERT'] = to_container_path(path, private_data_dir)
|
||||||
else:
|
else:
|
||||||
env['K8S_AUTH_VERIFY_SSL'] = 'False'
|
env['K8S_AUTH_VERIFY_SSL'] = 'False'
|
||||||
|
|
||||||
|
|
||||||
|
def terraform(cred, env, private_data_dir):
|
||||||
|
handle, path = tempfile.mkstemp(dir=os.path.join(private_data_dir, 'env'))
|
||||||
|
with os.fdopen(handle, 'w') as f:
|
||||||
|
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
|
||||||
|
f.write(cred.get_input('configuration'))
|
||||||
|
env['TF_BACKEND_CONFIG_FILE'] = to_container_path(path, private_data_dir)
|
||||||
|
|||||||
@@ -124,8 +124,6 @@ class BasePlaybookEvent(CreatedModifiedModel):
|
|||||||
'parent_uuid',
|
'parent_uuid',
|
||||||
'start_line',
|
'start_line',
|
||||||
'end_line',
|
'end_line',
|
||||||
'host_id',
|
|
||||||
'host_name',
|
|
||||||
'verbosity',
|
'verbosity',
|
||||||
]
|
]
|
||||||
WRAPUP_EVENT = 'playbook_on_stats'
|
WRAPUP_EVENT = 'playbook_on_stats'
|
||||||
@@ -473,7 +471,7 @@ class JobEvent(BasePlaybookEvent):
|
|||||||
An event/message logged from the callback when running a job.
|
An event/message logged from the callback when running a job.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
VALID_KEYS = BasePlaybookEvent.VALID_KEYS + ['job_id', 'workflow_job_id', 'job_created']
|
VALID_KEYS = BasePlaybookEvent.VALID_KEYS + ['job_id', 'workflow_job_id', 'job_created', 'host_id', 'host_name']
|
||||||
JOB_REFERENCE = 'job_id'
|
JOB_REFERENCE = 'job_id'
|
||||||
|
|
||||||
objects = DeferJobCreatedManager()
|
objects = DeferJobCreatedManager()
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ from decimal import Decimal
|
|||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
|
|
||||||
from django.core.validators import MinValueValidator, MaxValueValidator
|
from django.core.validators import MinValueValidator
|
||||||
from django.db import models, connection
|
from django.db import models, connection
|
||||||
from django.db.models.signals import post_save, post_delete
|
from django.db.models.signals import post_save, post_delete
|
||||||
from django.dispatch import receiver
|
from django.dispatch import receiver
|
||||||
@@ -17,7 +17,7 @@ from django.db.models import Sum, Q
|
|||||||
import redis
|
import redis
|
||||||
from solo.models import SingletonModel
|
from solo.models import SingletonModel
|
||||||
|
|
||||||
from ansible_base.utils.models import prevent_search
|
from ansible_base.lib.utils.models import prevent_search
|
||||||
|
|
||||||
# AWX
|
# AWX
|
||||||
from awx import __version__ as awx_application_version
|
from awx import __version__ as awx_application_version
|
||||||
@@ -34,6 +34,7 @@ from awx.main.models.rbac import (
|
|||||||
from awx.main.models.unified_jobs import UnifiedJob
|
from awx.main.models.unified_jobs import UnifiedJob
|
||||||
from awx.main.utils.common import get_corrected_cpu, get_cpu_effective_capacity, get_corrected_memory, get_mem_effective_capacity
|
from awx.main.utils.common import get_corrected_cpu, get_cpu_effective_capacity, get_corrected_memory, get_mem_effective_capacity
|
||||||
from awx.main.models.mixins import RelatedJobsMixin, ResourceMixin
|
from awx.main.models.mixins import RelatedJobsMixin, ResourceMixin
|
||||||
|
from awx.main.models.receptor_address import ReceptorAddress
|
||||||
|
|
||||||
# ansible-runner
|
# ansible-runner
|
||||||
from ansible_runner.utils.capacity import get_cpu_count, get_mem_in_bytes
|
from ansible_runner.utils.capacity import get_cpu_count, get_mem_in_bytes
|
||||||
@@ -64,8 +65,19 @@ class HasPolicyEditsMixin(HasEditsMixin):
|
|||||||
|
|
||||||
|
|
||||||
class InstanceLink(BaseModel):
|
class InstanceLink(BaseModel):
|
||||||
source = models.ForeignKey('Instance', on_delete=models.CASCADE, related_name='+')
|
class Meta:
|
||||||
target = models.ForeignKey('Instance', on_delete=models.CASCADE, related_name='reverse_peers')
|
ordering = ("id",)
|
||||||
|
# add constraint for source and target to be unique together
|
||||||
|
constraints = [
|
||||||
|
models.UniqueConstraint(
|
||||||
|
fields=["source", "target"],
|
||||||
|
name="unique_source_target",
|
||||||
|
violation_error_message=_("Field source and target must be unique together."),
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
source = models.ForeignKey('Instance', on_delete=models.CASCADE, help_text=_("The source instance of this peer link."))
|
||||||
|
target = models.ForeignKey('ReceptorAddress', on_delete=models.CASCADE, help_text=_("The target receptor address of this peer link."))
|
||||||
|
|
||||||
class States(models.TextChoices):
|
class States(models.TextChoices):
|
||||||
ADDING = 'adding', _('Adding')
|
ADDING = 'adding', _('Adding')
|
||||||
@@ -76,11 +88,6 @@ class InstanceLink(BaseModel):
|
|||||||
choices=States.choices, default=States.ADDING, max_length=16, help_text=_("Indicates the current life cycle stage of this peer link.")
|
choices=States.choices, default=States.ADDING, max_length=16, help_text=_("Indicates the current life cycle stage of this peer link.")
|
||||||
)
|
)
|
||||||
|
|
||||||
class Meta:
|
|
||||||
unique_together = ('source', 'target')
|
|
||||||
ordering = ("id",)
|
|
||||||
constraints = [models.CheckConstraint(check=~models.Q(source=models.F('target')), name='source_and_target_can_not_be_equal')]
|
|
||||||
|
|
||||||
|
|
||||||
class Instance(HasPolicyEditsMixin, BaseModel):
|
class Instance(HasPolicyEditsMixin, BaseModel):
|
||||||
"""A model representing an AWX instance running against this database."""
|
"""A model representing an AWX instance running against this database."""
|
||||||
@@ -110,6 +117,7 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
|||||||
default="",
|
default="",
|
||||||
max_length=50,
|
max_length=50,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Auto-fields, implementation is different from BaseModel
|
# Auto-fields, implementation is different from BaseModel
|
||||||
created = models.DateTimeField(auto_now_add=True)
|
created = models.DateTimeField(auto_now_add=True)
|
||||||
modified = models.DateTimeField(auto_now=True)
|
modified = models.DateTimeField(auto_now=True)
|
||||||
@@ -185,16 +193,9 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
|||||||
node_state = models.CharField(
|
node_state = models.CharField(
|
||||||
choices=States.choices, default=States.READY, max_length=16, help_text=_("Indicates the current life cycle stage of this instance.")
|
choices=States.choices, default=States.READY, max_length=16, help_text=_("Indicates the current life cycle stage of this instance.")
|
||||||
)
|
)
|
||||||
listener_port = models.PositiveIntegerField(
|
|
||||||
blank=True,
|
|
||||||
null=True,
|
|
||||||
default=None,
|
|
||||||
validators=[MinValueValidator(1024), MaxValueValidator(65535)],
|
|
||||||
help_text=_("Port that Receptor will listen for incoming connections on."),
|
|
||||||
)
|
|
||||||
|
|
||||||
peers = models.ManyToManyField('self', symmetrical=False, through=InstanceLink, through_fields=('source', 'target'), related_name='peers_from')
|
managed = models.BooleanField(help_text=_("If True, this instance is managed by the control plane."), default=False, editable=False)
|
||||||
peers_from_control_nodes = models.BooleanField(default=False, help_text=_("If True, control plane cluster nodes should automatically peer to it."))
|
peers = models.ManyToManyField('ReceptorAddress', through=InstanceLink, through_fields=('source', 'target'), related_name='peers_from')
|
||||||
|
|
||||||
POLICY_FIELDS = frozenset(('managed_by_policy', 'hostname', 'capacity_adjustment'))
|
POLICY_FIELDS = frozenset(('managed_by_policy', 'hostname', 'capacity_adjustment'))
|
||||||
|
|
||||||
@@ -241,6 +242,26 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
|||||||
return True
|
return True
|
||||||
return self.health_check_started > self.last_health_check
|
return self.health_check_started > self.last_health_check
|
||||||
|
|
||||||
|
@property
|
||||||
|
def canonical_address(self):
|
||||||
|
return self.receptor_addresses.filter(canonical=True).first()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def canonical_address_port(self):
|
||||||
|
# note: don't create a different query for receptor addresses, as this is prefetched on the View for optimization
|
||||||
|
for addr in self.receptor_addresses.all():
|
||||||
|
if addr.canonical:
|
||||||
|
return addr.port
|
||||||
|
return None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def canonical_address_peers_from_control_nodes(self):
|
||||||
|
# note: don't create a different query for receptor addresses, as this is prefetched on the View for optimization
|
||||||
|
for addr in self.receptor_addresses.all():
|
||||||
|
if addr.canonical:
|
||||||
|
return addr.peers_from_control_nodes
|
||||||
|
return False
|
||||||
|
|
||||||
def get_cleanup_task_kwargs(self, **kwargs):
|
def get_cleanup_task_kwargs(self, **kwargs):
|
||||||
"""
|
"""
|
||||||
Produce options to use for the command: ansible-runner worker cleanup
|
Produce options to use for the command: ansible-runner worker cleanup
|
||||||
@@ -501,6 +522,35 @@ def schedule_write_receptor_config(broadcast=True):
|
|||||||
write_receptor_config() # just run locally
|
write_receptor_config() # just run locally
|
||||||
|
|
||||||
|
|
||||||
|
@receiver(post_save, sender=ReceptorAddress)
|
||||||
|
def receptor_address_saved(sender, instance, **kwargs):
|
||||||
|
from awx.main.signals import disable_activity_stream
|
||||||
|
|
||||||
|
address = instance
|
||||||
|
|
||||||
|
control_instances = set(Instance.objects.filter(node_type__in=[Instance.Types.CONTROL, Instance.Types.HYBRID]))
|
||||||
|
if address.peers_from_control_nodes:
|
||||||
|
# if control_instances is not a subset of current peers of address, then
|
||||||
|
# that means we need to add some InstanceLinks
|
||||||
|
if not control_instances <= set(address.peers_from.all()):
|
||||||
|
with disable_activity_stream():
|
||||||
|
for control_instance in control_instances:
|
||||||
|
InstanceLink.objects.update_or_create(source=control_instance, target=address)
|
||||||
|
schedule_write_receptor_config()
|
||||||
|
else:
|
||||||
|
if address.peers_from.exists():
|
||||||
|
with disable_activity_stream():
|
||||||
|
address.peers_from.remove(*control_instances)
|
||||||
|
schedule_write_receptor_config()
|
||||||
|
|
||||||
|
|
||||||
|
@receiver(post_delete, sender=ReceptorAddress)
|
||||||
|
def receptor_address_deleted(sender, instance, **kwargs):
|
||||||
|
address = instance
|
||||||
|
if address.peers_from_control_nodes:
|
||||||
|
schedule_write_receptor_config()
|
||||||
|
|
||||||
|
|
||||||
@receiver(post_save, sender=Instance)
|
@receiver(post_save, sender=Instance)
|
||||||
def on_instance_saved(sender, instance, created=False, raw=False, **kwargs):
|
def on_instance_saved(sender, instance, created=False, raw=False, **kwargs):
|
||||||
'''
|
'''
|
||||||
@@ -511,11 +561,14 @@ def on_instance_saved(sender, instance, created=False, raw=False, **kwargs):
|
|||||||
2. a node changes its value of peers_from_control_nodes
|
2. a node changes its value of peers_from_control_nodes
|
||||||
3. a new control node comes online and has instances to peer to
|
3. a new control node comes online and has instances to peer to
|
||||||
'''
|
'''
|
||||||
|
from awx.main.signals import disable_activity_stream
|
||||||
|
|
||||||
if created and settings.IS_K8S and instance.node_type in [Instance.Types.CONTROL, Instance.Types.HYBRID]:
|
if created and settings.IS_K8S and instance.node_type in [Instance.Types.CONTROL, Instance.Types.HYBRID]:
|
||||||
inst = Instance.objects.filter(peers_from_control_nodes=True)
|
peers_addresses = ReceptorAddress.objects.filter(peers_from_control_nodes=True)
|
||||||
if set(instance.peers.all()) != set(inst):
|
if peers_addresses.exists():
|
||||||
instance.peers.set(inst)
|
with disable_activity_stream():
|
||||||
schedule_write_receptor_config(broadcast=False)
|
instance.peers.add(*peers_addresses)
|
||||||
|
schedule_write_receptor_config(broadcast=False)
|
||||||
|
|
||||||
if settings.IS_K8S and instance.node_type in [Instance.Types.HOP, Instance.Types.EXECUTION]:
|
if settings.IS_K8S and instance.node_type in [Instance.Types.HOP, Instance.Types.EXECUTION]:
|
||||||
if instance.node_state == Instance.States.DEPROVISIONING:
|
if instance.node_state == Instance.States.DEPROVISIONING:
|
||||||
@@ -524,16 +577,6 @@ def on_instance_saved(sender, instance, created=False, raw=False, **kwargs):
|
|||||||
# wait for jobs on the node to complete, then delete the
|
# wait for jobs on the node to complete, then delete the
|
||||||
# node and kick off write_receptor_config
|
# node and kick off write_receptor_config
|
||||||
connection.on_commit(lambda: remove_deprovisioned_node.apply_async([instance.hostname]))
|
connection.on_commit(lambda: remove_deprovisioned_node.apply_async([instance.hostname]))
|
||||||
else:
|
|
||||||
control_instances = set(Instance.objects.filter(node_type__in=[Instance.Types.CONTROL, Instance.Types.HYBRID]))
|
|
||||||
if instance.peers_from_control_nodes:
|
|
||||||
if (control_instances & set(instance.peers_from.all())) != set(control_instances):
|
|
||||||
instance.peers_from.add(*control_instances)
|
|
||||||
schedule_write_receptor_config() # keep method separate to make pytest mocking easier
|
|
||||||
else:
|
|
||||||
if set(control_instances) & set(instance.peers_from.all()):
|
|
||||||
instance.peers_from.remove(*control_instances)
|
|
||||||
schedule_write_receptor_config()
|
|
||||||
|
|
||||||
if created or instance.has_policy_changes():
|
if created or instance.has_policy_changes():
|
||||||
schedule_policy_task()
|
schedule_policy_task()
|
||||||
@@ -548,8 +591,6 @@ def on_instance_group_deleted(sender, instance, using, **kwargs):
|
|||||||
@receiver(post_delete, sender=Instance)
|
@receiver(post_delete, sender=Instance)
|
||||||
def on_instance_deleted(sender, instance, using, **kwargs):
|
def on_instance_deleted(sender, instance, using, **kwargs):
|
||||||
schedule_policy_task()
|
schedule_policy_task()
|
||||||
if settings.IS_K8S and instance.node_type in (Instance.Types.EXECUTION, Instance.Types.HOP) and instance.peers_from_control_nodes:
|
|
||||||
schedule_write_receptor_config()
|
|
||||||
|
|
||||||
|
|
||||||
class UnifiedJobTemplateInstanceGroupMembership(models.Model):
|
class UnifiedJobTemplateInstanceGroupMembership(models.Model):
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ from django.db.models import Q
|
|||||||
# REST Framework
|
# REST Framework
|
||||||
from rest_framework.exceptions import ParseError
|
from rest_framework.exceptions import ParseError
|
||||||
|
|
||||||
from ansible_base.utils.models import prevent_search
|
from ansible_base.lib.utils.models import prevent_search
|
||||||
|
|
||||||
# AWX
|
# AWX
|
||||||
from awx.api.versioning import reverse
|
from awx.api.versioning import reverse
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ from django.core.exceptions import FieldDoesNotExist
|
|||||||
# REST Framework
|
# REST Framework
|
||||||
from rest_framework.exceptions import ParseError
|
from rest_framework.exceptions import ParseError
|
||||||
|
|
||||||
from ansible_base.utils.models import prevent_search
|
from ansible_base.lib.utils.models import prevent_search
|
||||||
|
|
||||||
# AWX
|
# AWX
|
||||||
from awx.api.versioning import reverse
|
from awx.api.versioning import reverse
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ from django.db.models.query import QuerySet
|
|||||||
from django.utils.crypto import get_random_string
|
from django.utils.crypto import get_random_string
|
||||||
from django.utils.translation import gettext_lazy as _
|
from django.utils.translation import gettext_lazy as _
|
||||||
|
|
||||||
from ansible_base.utils.models import prevent_search
|
from ansible_base.lib.utils.models import prevent_search
|
||||||
|
|
||||||
# AWX
|
# AWX
|
||||||
from awx.main.models.rbac import Role, RoleAncestorEntry
|
from awx.main.models.rbac import Role, RoleAncestorEntry
|
||||||
@@ -527,7 +527,6 @@ class CustomVirtualEnvMixin(models.Model):
|
|||||||
|
|
||||||
|
|
||||||
class RelatedJobsMixin(object):
|
class RelatedJobsMixin(object):
|
||||||
|
|
||||||
"""
|
"""
|
||||||
This method is intended to be overwritten.
|
This method is intended to be overwritten.
|
||||||
Called by get_active_jobs()
|
Called by get_active_jobs()
|
||||||
@@ -562,6 +561,7 @@ class WebhookTemplateMixin(models.Model):
|
|||||||
SERVICES = [
|
SERVICES = [
|
||||||
('github', "GitHub"),
|
('github', "GitHub"),
|
||||||
('gitlab', "GitLab"),
|
('gitlab', "GitLab"),
|
||||||
|
('bitbucket_dc', "BitBucket DataCenter"),
|
||||||
]
|
]
|
||||||
|
|
||||||
webhook_service = models.CharField(max_length=16, choices=SERVICES, blank=True, help_text=_('Service that webhook requests will be accepted from'))
|
webhook_service = models.CharField(max_length=16, choices=SERVICES, blank=True, help_text=_('Service that webhook requests will be accepted from'))
|
||||||
@@ -622,6 +622,7 @@ class WebhookMixin(models.Model):
|
|||||||
service_header = {
|
service_header = {
|
||||||
'github': ('Authorization', 'token {}'),
|
'github': ('Authorization', 'token {}'),
|
||||||
'gitlab': ('PRIVATE-TOKEN', '{}'),
|
'gitlab': ('PRIVATE-TOKEN', '{}'),
|
||||||
|
'bitbucket_dc': ('Authorization', 'Bearer {}'),
|
||||||
}
|
}
|
||||||
service_statuses = {
|
service_statuses = {
|
||||||
'github': {
|
'github': {
|
||||||
@@ -639,6 +640,14 @@ class WebhookMixin(models.Model):
|
|||||||
'error': 'failed', # GitLab doesn't have an 'error' status distinct from 'failed' :(
|
'error': 'failed', # GitLab doesn't have an 'error' status distinct from 'failed' :(
|
||||||
'canceled': 'canceled',
|
'canceled': 'canceled',
|
||||||
},
|
},
|
||||||
|
'bitbucket_dc': {
|
||||||
|
'pending': 'INPROGRESS', # Bitbucket DC doesn't have any other statuses distinct from INPROGRESS, SUCCESSFUL, FAILED :(
|
||||||
|
'running': 'INPROGRESS',
|
||||||
|
'successful': 'SUCCESSFUL',
|
||||||
|
'failed': 'FAILED',
|
||||||
|
'error': 'FAILED',
|
||||||
|
'canceled': 'FAILED',
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
statuses = service_statuses[self.webhook_service]
|
statuses = service_statuses[self.webhook_service]
|
||||||
@@ -647,11 +656,18 @@ class WebhookMixin(models.Model):
|
|||||||
return
|
return
|
||||||
try:
|
try:
|
||||||
license_type = get_licenser().validate().get('license_type')
|
license_type = get_licenser().validate().get('license_type')
|
||||||
data = {
|
if self.webhook_service == 'bitbucket_dc':
|
||||||
'state': statuses[status],
|
data = {
|
||||||
'context': 'ansible/awx' if license_type == 'open' else 'ansible/tower',
|
'state': statuses[status],
|
||||||
'target_url': self.get_ui_url(),
|
'key': 'ansible/awx' if license_type == 'open' else 'ansible/tower',
|
||||||
}
|
'url': self.get_ui_url(),
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
data = {
|
||||||
|
'state': statuses[status],
|
||||||
|
'context': 'ansible/awx' if license_type == 'open' else 'ansible/tower',
|
||||||
|
'target_url': self.get_ui_url(),
|
||||||
|
}
|
||||||
k, v = service_header[self.webhook_service]
|
k, v = service_header[self.webhook_service]
|
||||||
headers = {k: v.format(self.webhook_credential.get_input('token')), 'Content-Type': 'application/json'}
|
headers = {k: v.format(self.webhook_credential.get_input('token')), 'Content-Type': 'application/json'}
|
||||||
response = requests.post(status_api, data=json.dumps(data), headers=headers, timeout=30)
|
response = requests.post(status_api, data=json.dumps(data), headers=headers, timeout=30)
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ from copy import deepcopy
|
|||||||
import datetime
|
import datetime
|
||||||
import logging
|
import logging
|
||||||
import json
|
import json
|
||||||
|
import traceback
|
||||||
|
|
||||||
from django.db import models
|
from django.db import models
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
@@ -15,7 +16,7 @@ from django.utils.encoding import smart_str, force_str
|
|||||||
from jinja2 import sandbox, ChainableUndefined
|
from jinja2 import sandbox, ChainableUndefined
|
||||||
from jinja2.exceptions import TemplateSyntaxError, UndefinedError, SecurityError
|
from jinja2.exceptions import TemplateSyntaxError, UndefinedError, SecurityError
|
||||||
|
|
||||||
from ansible_base.utils.models import prevent_search
|
from ansible_base.lib.utils.models import prevent_search
|
||||||
|
|
||||||
# AWX
|
# AWX
|
||||||
from awx.api.versioning import reverse
|
from awx.api.versioning import reverse
|
||||||
@@ -484,14 +485,29 @@ class JobNotificationMixin(object):
|
|||||||
if msg_template:
|
if msg_template:
|
||||||
try:
|
try:
|
||||||
msg = env.from_string(msg_template).render(**context)
|
msg = env.from_string(msg_template).render(**context)
|
||||||
except (TemplateSyntaxError, UndefinedError, SecurityError):
|
except (TemplateSyntaxError, UndefinedError, SecurityError) as e:
|
||||||
msg = ''
|
msg = '\r\n'.join([e.message, ''.join(traceback.format_exception(None, e, e.__traceback__).replace('\n', '\r\n'))])
|
||||||
|
|
||||||
if body_template:
|
if body_template:
|
||||||
try:
|
try:
|
||||||
body = env.from_string(body_template).render(**context)
|
body = env.from_string(body_template).render(**context)
|
||||||
except (TemplateSyntaxError, UndefinedError, SecurityError):
|
except (TemplateSyntaxError, UndefinedError, SecurityError) as e:
|
||||||
body = ''
|
body = '\r\n'.join([e.message, ''.join(traceback.format_exception(None, e, e.__traceback__).replace('\n', '\r\n'))])
|
||||||
|
|
||||||
|
# https://datatracker.ietf.org/doc/html/rfc2822#section-2.2
|
||||||
|
# Body should have at least 2 CRLF, some clients will interpret
|
||||||
|
# the email incorrectly with blank body. So we will check that
|
||||||
|
|
||||||
|
if len(body.strip().splitlines()) <= 2:
|
||||||
|
# blank body
|
||||||
|
body = '\r\n'.join(
|
||||||
|
[
|
||||||
|
"The template rendering return a blank body.",
|
||||||
|
"Please check the template.",
|
||||||
|
"Refer to https://github.com/ansible/awx/issues/13983",
|
||||||
|
"for further information.",
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
return (msg, body)
|
return (msg, body)
|
||||||
|
|
||||||
|
|||||||
67
awx/main/models/receptor_address.py
Normal file
67
awx/main/models/receptor_address.py
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
from django.db import models
|
||||||
|
from django.core.validators import MinValueValidator, MaxValueValidator
|
||||||
|
from django.utils.translation import gettext_lazy as _
|
||||||
|
from awx.api.versioning import reverse
|
||||||
|
|
||||||
|
|
||||||
|
class Protocols(models.TextChoices):
|
||||||
|
TCP = 'tcp', 'TCP'
|
||||||
|
WS = 'ws', 'WS'
|
||||||
|
WSS = 'wss', 'WSS'
|
||||||
|
|
||||||
|
|
||||||
|
class ReceptorAddress(models.Model):
|
||||||
|
class Meta:
|
||||||
|
app_label = 'main'
|
||||||
|
constraints = [
|
||||||
|
models.UniqueConstraint(
|
||||||
|
fields=["address"],
|
||||||
|
name="unique_receptor_address",
|
||||||
|
violation_error_message=_("Receptor address must be unique."),
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
address = models.CharField(help_text=_("Routable address for this instance."), max_length=255)
|
||||||
|
port = models.IntegerField(help_text=_("Port for the address."), default=27199, validators=[MinValueValidator(0), MaxValueValidator(65535)])
|
||||||
|
websocket_path = models.CharField(help_text=_("Websocket path."), max_length=255, default="", blank=True)
|
||||||
|
protocol = models.CharField(
|
||||||
|
help_text=_("Protocol to use for the Receptor listener, 'tcp', 'wss', or 'ws'."), max_length=10, default=Protocols.TCP, choices=Protocols.choices
|
||||||
|
)
|
||||||
|
is_internal = models.BooleanField(help_text=_("If True, only routable within the Kubernetes cluster."), default=False)
|
||||||
|
canonical = models.BooleanField(help_text=_("If True, this address is the canonical address for the instance."), default=False)
|
||||||
|
peers_from_control_nodes = models.BooleanField(help_text=_("If True, control plane cluster nodes should automatically peer to it."), default=False)
|
||||||
|
instance = models.ForeignKey(
|
||||||
|
'Instance',
|
||||||
|
related_name='receptor_addresses',
|
||||||
|
on_delete=models.CASCADE,
|
||||||
|
null=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return self.get_full_address()
|
||||||
|
|
||||||
|
def get_full_address(self):
|
||||||
|
scheme = ""
|
||||||
|
path = ""
|
||||||
|
port = ""
|
||||||
|
if self.protocol == "ws":
|
||||||
|
scheme = "wss://"
|
||||||
|
|
||||||
|
if self.protocol == "ws" and self.websocket_path:
|
||||||
|
path = f"/{self.websocket_path}"
|
||||||
|
|
||||||
|
if self.port:
|
||||||
|
port = f":{self.port}"
|
||||||
|
|
||||||
|
return f"{scheme}{self.address}{port}{path}"
|
||||||
|
|
||||||
|
def get_peer_type(self):
|
||||||
|
if self.protocol == 'tcp':
|
||||||
|
return 'tcp-peer'
|
||||||
|
elif self.protocol in ['ws', 'wss']:
|
||||||
|
return 'ws-peer'
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
def get_absolute_url(self, request=None):
|
||||||
|
return reverse('api:receptor_address_detail', kwargs={'pk': self.pk}, request=request)
|
||||||
@@ -30,7 +30,7 @@ from rest_framework.exceptions import ParseError
|
|||||||
# Django-Polymorphic
|
# Django-Polymorphic
|
||||||
from polymorphic.models import PolymorphicModel
|
from polymorphic.models import PolymorphicModel
|
||||||
|
|
||||||
from ansible_base.utils.models import prevent_search, get_type_for_model
|
from ansible_base.lib.utils.models import prevent_search, get_type_for_model
|
||||||
|
|
||||||
# AWX
|
# AWX
|
||||||
from awx.main.models.base import CommonModelNameNotUnique, PasswordFieldsModel, NotificationFieldsModel
|
from awx.main.models.base import CommonModelNameNotUnique, PasswordFieldsModel, NotificationFieldsModel
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ from crum import get_current_user
|
|||||||
from jinja2 import sandbox
|
from jinja2 import sandbox
|
||||||
from jinja2.exceptions import TemplateSyntaxError, UndefinedError, SecurityError
|
from jinja2.exceptions import TemplateSyntaxError, UndefinedError, SecurityError
|
||||||
|
|
||||||
from ansible_base.utils.models import prevent_search
|
from ansible_base.lib.utils.models import prevent_search
|
||||||
|
|
||||||
# AWX
|
# AWX
|
||||||
from awx.api.versioning import reverse
|
from awx.api.versioning import reverse
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
# Copyright (c) 2019 Ansible, Inc.
|
# Copyright (c) 2019 Ansible, Inc.
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
|
# -*-coding:utf-8-*-
|
||||||
|
|
||||||
|
|
||||||
class CustomNotificationBase(object):
|
class CustomNotificationBase(object):
|
||||||
|
|||||||
@@ -4,13 +4,15 @@ import logging
|
|||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
from django.urls import re_path
|
from django.urls import re_path
|
||||||
|
|
||||||
from channels.auth import AuthMiddlewareStack
|
|
||||||
from channels.routing import ProtocolTypeRouter, URLRouter
|
from channels.routing import ProtocolTypeRouter, URLRouter
|
||||||
|
|
||||||
|
from ansible_base.lib.channels.middleware import DrfAuthMiddlewareStack
|
||||||
|
|
||||||
from . import consumers
|
from . import consumers
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger('awx.main.routing')
|
logger = logging.getLogger('awx.main.routing')
|
||||||
|
_application = None
|
||||||
|
|
||||||
|
|
||||||
class AWXProtocolTypeRouter(ProtocolTypeRouter):
|
class AWXProtocolTypeRouter(ProtocolTypeRouter):
|
||||||
@@ -26,13 +28,91 @@ class AWXProtocolTypeRouter(ProtocolTypeRouter):
|
|||||||
super().__init__(*args, **kwargs)
|
super().__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class MultipleURLRouterAdapter:
|
||||||
|
"""
|
||||||
|
Django channels doesn't nicely support Auth_1(urls_1), Auth_2(urls_2), ..., Auth_n(urls_n)
|
||||||
|
This class allows assocating a websocket url with an auth
|
||||||
|
Ordering matters. The first matching url will be used.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, *auths):
|
||||||
|
self._auths = [a for a in auths]
|
||||||
|
|
||||||
|
async def __call__(self, scope, receive, send):
|
||||||
|
"""
|
||||||
|
Loop through the list of passed in URLRouter's (they may or may not be wrapped by auth).
|
||||||
|
We know we have exhausted the list of URLRouter patterns when we get a
|
||||||
|
ValueError('No route found for path %s'). When that happens, move onto the next
|
||||||
|
URLRouter.
|
||||||
|
If the final URLRouter raises an error, re-raise it in the end.
|
||||||
|
|
||||||
|
We know that we found a match when no error is raised, end the loop.
|
||||||
|
"""
|
||||||
|
last_index = len(self._auths) - 1
|
||||||
|
for i, auth in enumerate(self._auths):
|
||||||
|
try:
|
||||||
|
return await auth.__call__(scope, receive, send)
|
||||||
|
except ValueError as e:
|
||||||
|
if str(e).startswith('No route found for path'):
|
||||||
|
# Only surface the error if on the last URLRouter
|
||||||
|
if i == last_index:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
websocket_urlpatterns = [
|
websocket_urlpatterns = [
|
||||||
|
re_path(r'api/websocket/$', consumers.EventConsumer.as_asgi()),
|
||||||
re_path(r'websocket/$', consumers.EventConsumer.as_asgi()),
|
re_path(r'websocket/$', consumers.EventConsumer.as_asgi()),
|
||||||
|
]
|
||||||
|
websocket_relay_urlpatterns = [
|
||||||
re_path(r'websocket/relay/$', consumers.RelayConsumer.as_asgi()),
|
re_path(r'websocket/relay/$', consumers.RelayConsumer.as_asgi()),
|
||||||
]
|
]
|
||||||
|
|
||||||
application = AWXProtocolTypeRouter(
|
|
||||||
{
|
def application_func(cls=AWXProtocolTypeRouter) -> ProtocolTypeRouter:
|
||||||
'websocket': AuthMiddlewareStack(URLRouter(websocket_urlpatterns)),
|
return cls(
|
||||||
}
|
{
|
||||||
)
|
'websocket': MultipleURLRouterAdapter(
|
||||||
|
URLRouter(websocket_relay_urlpatterns),
|
||||||
|
DrfAuthMiddlewareStack(URLRouter(websocket_urlpatterns)),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def __getattr__(name: str) -> ProtocolTypeRouter:
|
||||||
|
"""
|
||||||
|
Defer instantiating application.
|
||||||
|
For testing, we just need it to NOT run on import.
|
||||||
|
|
||||||
|
https://peps.python.org/pep-0562/#specification
|
||||||
|
|
||||||
|
Normally, someone would get application from this module via:
|
||||||
|
from awx.main.routing import application
|
||||||
|
|
||||||
|
and do something with the application:
|
||||||
|
application.do_something()
|
||||||
|
|
||||||
|
What does the callstack look like when the import runs?
|
||||||
|
...
|
||||||
|
awx.main.routing.__getattribute__(...) # <-- we don't define this so NOOP as far as we are concerned
|
||||||
|
if '__getattr__' in awx.main.routing.__dict__: # <-- this triggers the function we are in
|
||||||
|
return awx.main.routing.__dict__.__getattr__("application")
|
||||||
|
|
||||||
|
Why isn't this function simply implemented as:
|
||||||
|
def __getattr__(name):
|
||||||
|
if not _application:
|
||||||
|
_application = application_func()
|
||||||
|
return _application
|
||||||
|
|
||||||
|
It could. I manually tested it and it passes test_routing.py.
|
||||||
|
|
||||||
|
But my understanding after reading the PEP-0562 specification link above is that
|
||||||
|
performance would be a bit worse due to the extra __getattribute__ calls when
|
||||||
|
we reference non-global variables.
|
||||||
|
"""
|
||||||
|
if name == "application":
|
||||||
|
globs = globals()
|
||||||
|
if not globs['_application']:
|
||||||
|
globs['_application'] = application_func()
|
||||||
|
return globs['_application']
|
||||||
|
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ from django.utils.timezone import now as tz_now
|
|||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
from django.contrib.contenttypes.models import ContentType
|
from django.contrib.contenttypes.models import ContentType
|
||||||
|
|
||||||
from ansible_base.utils.models import get_type_for_model
|
from ansible_base.lib.utils.models import get_type_for_model
|
||||||
|
|
||||||
# AWX
|
# AWX
|
||||||
from awx.main.dispatch.reaper import reap_job
|
from awx.main.dispatch.reaper import reap_job
|
||||||
@@ -68,7 +68,7 @@ class TaskBase:
|
|||||||
# initialize each metric to 0 and force metric_has_changed to true. This
|
# initialize each metric to 0 and force metric_has_changed to true. This
|
||||||
# ensures each task manager metric will be overridden when pipe_execute
|
# ensures each task manager metric will be overridden when pipe_execute
|
||||||
# is called later.
|
# is called later.
|
||||||
self.subsystem_metrics = s_metrics.Metrics(auto_pipe_execute=False)
|
self.subsystem_metrics = s_metrics.DispatcherMetrics(auto_pipe_execute=False)
|
||||||
self.start_time = time.time()
|
self.start_time = time.time()
|
||||||
|
|
||||||
# We want to avoid calling settings in loops, so cache these settings at init time
|
# We want to avoid calling settings in loops, so cache these settings at init time
|
||||||
@@ -105,7 +105,7 @@ class TaskBase:
|
|||||||
try:
|
try:
|
||||||
# increment task_manager_schedule_calls regardless if the other
|
# increment task_manager_schedule_calls regardless if the other
|
||||||
# metrics are recorded
|
# metrics are recorded
|
||||||
s_metrics.Metrics(auto_pipe_execute=True).inc(f"{self.prefix}__schedule_calls", 1)
|
s_metrics.DispatcherMetrics(auto_pipe_execute=True).inc(f"{self.prefix}__schedule_calls", 1)
|
||||||
# Only record metrics if the last time recording was more
|
# Only record metrics if the last time recording was more
|
||||||
# than SUBSYSTEM_METRICS_TASK_MANAGER_RECORD_INTERVAL ago.
|
# than SUBSYSTEM_METRICS_TASK_MANAGER_RECORD_INTERVAL ago.
|
||||||
# Prevents a short-duration task manager that runs directly after a
|
# Prevents a short-duration task manager that runs directly after a
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ class RunnerCallback:
|
|||||||
self.safe_env = {}
|
self.safe_env = {}
|
||||||
self.event_ct = 0
|
self.event_ct = 0
|
||||||
self.model = model
|
self.model = model
|
||||||
self.update_attempts = int(settings.DISPATCHER_DB_DOWNTIME_TOLERANCE / 5)
|
self.update_attempts = int(getattr(settings, 'DISPATCHER_DB_DOWNTOWN_TOLLERANCE', settings.DISPATCHER_DB_DOWNTIME_TOLERANCE) / 5)
|
||||||
self.wrapup_event_dispatched = False
|
self.wrapup_event_dispatched = False
|
||||||
self.artifacts_processed = False
|
self.artifacts_processed = False
|
||||||
self.extra_update_fields = {}
|
self.extra_update_fields = {}
|
||||||
@@ -95,17 +95,17 @@ class RunnerCallback:
|
|||||||
if self.parent_workflow_job_id:
|
if self.parent_workflow_job_id:
|
||||||
event_data['workflow_job_id'] = self.parent_workflow_job_id
|
event_data['workflow_job_id'] = self.parent_workflow_job_id
|
||||||
event_data['job_created'] = self.job_created
|
event_data['job_created'] = self.job_created
|
||||||
if self.host_map:
|
|
||||||
host = event_data.get('event_data', {}).get('host', '').strip()
|
host = event_data.get('event_data', {}).get('host', '').strip()
|
||||||
if host:
|
if host:
|
||||||
event_data['host_name'] = host
|
event_data['host_name'] = host
|
||||||
if host in self.host_map:
|
if host in self.host_map:
|
||||||
event_data['host_id'] = self.host_map[host]
|
event_data['host_id'] = self.host_map[host]
|
||||||
else:
|
else:
|
||||||
event_data['host_name'] = ''
|
event_data['host_name'] = ''
|
||||||
event_data['host_id'] = ''
|
event_data['host_id'] = ''
|
||||||
if event_data.get('event') == 'playbook_on_stats':
|
if event_data.get('event') == 'playbook_on_stats':
|
||||||
event_data['host_map'] = self.host_map
|
event_data['host_map'] = self.host_map
|
||||||
|
|
||||||
if isinstance(self, RunnerCallbackForProjectUpdate):
|
if isinstance(self, RunnerCallbackForProjectUpdate):
|
||||||
# need a better way to have this check.
|
# need a better way to have this check.
|
||||||
|
|||||||
@@ -114,7 +114,7 @@ class BaseTask(object):
|
|||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.cleanup_paths = []
|
self.cleanup_paths = []
|
||||||
self.update_attempts = int(settings.DISPATCHER_DB_DOWNTIME_TOLERANCE / 5)
|
self.update_attempts = int(getattr(settings, 'DISPATCHER_DB_DOWNTOWN_TOLLERANCE', settings.DISPATCHER_DB_DOWNTIME_TOLERANCE) / 5)
|
||||||
self.runner_callback = self.callback_class(model=self.model)
|
self.runner_callback = self.callback_class(model=self.model)
|
||||||
|
|
||||||
def update_model(self, pk, _attempt=0, **updates):
|
def update_model(self, pk, _attempt=0, **updates):
|
||||||
|
|||||||
@@ -27,7 +27,7 @@ from awx.main.utils.common import (
|
|||||||
)
|
)
|
||||||
from awx.main.constants import MAX_ISOLATED_PATH_COLON_DELIMITER
|
from awx.main.constants import MAX_ISOLATED_PATH_COLON_DELIMITER
|
||||||
from awx.main.tasks.signals import signal_state, signal_callback, SignalExit
|
from awx.main.tasks.signals import signal_state, signal_callback, SignalExit
|
||||||
from awx.main.models import Instance, InstanceLink, UnifiedJob
|
from awx.main.models import Instance, InstanceLink, UnifiedJob, ReceptorAddress
|
||||||
from awx.main.dispatch import get_task_queuename
|
from awx.main.dispatch import get_task_queuename
|
||||||
from awx.main.dispatch.publish import task
|
from awx.main.dispatch.publish import task
|
||||||
from awx.main.utils.pglock import advisory_lock
|
from awx.main.utils.pglock import advisory_lock
|
||||||
@@ -676,36 +676,44 @@ RECEPTOR_CONFIG_STARTER = (
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def should_update_config(instances):
|
def should_update_config(new_config):
|
||||||
'''
|
'''
|
||||||
checks that the list of instances matches the list of
|
checks that the list of instances matches the list of
|
||||||
tcp-peers in the config
|
tcp-peers in the config
|
||||||
'''
|
'''
|
||||||
current_config = read_receptor_config() # this gets receptor conf lock
|
|
||||||
current_peers = []
|
|
||||||
for config_entry in current_config:
|
|
||||||
for key, value in config_entry.items():
|
|
||||||
if key.endswith('-peer'):
|
|
||||||
current_peers.append(value['address'])
|
|
||||||
intended_peers = [f"{i.hostname}:{i.listener_port}" for i in instances]
|
|
||||||
logger.debug(f"Peers current {current_peers} intended {intended_peers}")
|
|
||||||
if set(current_peers) == set(intended_peers):
|
|
||||||
return False # config file is already update to date
|
|
||||||
|
|
||||||
return True
|
current_config = read_receptor_config() # this gets receptor conf lock
|
||||||
|
for config_entry in current_config:
|
||||||
|
if config_entry not in new_config:
|
||||||
|
logger.warning(f"{config_entry} should not be in receptor config. Updating.")
|
||||||
|
return True
|
||||||
|
for config_entry in new_config:
|
||||||
|
if config_entry not in current_config:
|
||||||
|
logger.warning(f"{config_entry} missing from receptor config. Updating.")
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
def generate_config_data():
|
def generate_config_data():
|
||||||
# returns two values
|
# returns two values
|
||||||
# receptor config - based on current database peers
|
# receptor config - based on current database peers
|
||||||
# should_update - If True, receptor_config differs from the receptor conf file on disk
|
# should_update - If True, receptor_config differs from the receptor conf file on disk
|
||||||
instances = Instance.objects.filter(node_type__in=(Instance.Types.EXECUTION, Instance.Types.HOP), peers_from_control_nodes=True)
|
addresses = ReceptorAddress.objects.filter(peers_from_control_nodes=True)
|
||||||
|
|
||||||
receptor_config = list(RECEPTOR_CONFIG_STARTER)
|
receptor_config = list(RECEPTOR_CONFIG_STARTER)
|
||||||
for instance in instances:
|
for address in addresses:
|
||||||
peer = {'tcp-peer': {'address': f'{instance.hostname}:{instance.listener_port}', 'tls': 'tlsclient'}}
|
if address.get_peer_type():
|
||||||
receptor_config.append(peer)
|
peer = {
|
||||||
should_update = should_update_config(instances)
|
f'{address.get_peer_type()}': {
|
||||||
|
'address': f'{address.get_full_address()}',
|
||||||
|
'tls': 'tlsclient',
|
||||||
|
}
|
||||||
|
}
|
||||||
|
receptor_config.append(peer)
|
||||||
|
else:
|
||||||
|
logger.warning(f"Receptor address {address} has unsupported peer type, skipping.")
|
||||||
|
should_update = should_update_config(receptor_config)
|
||||||
return receptor_config, should_update
|
return receptor_config, should_update
|
||||||
|
|
||||||
|
|
||||||
@@ -747,14 +755,13 @@ def write_receptor_config():
|
|||||||
with lock:
|
with lock:
|
||||||
with open(__RECEPTOR_CONF, 'w') as file:
|
with open(__RECEPTOR_CONF, 'w') as file:
|
||||||
yaml.dump(receptor_config, file, default_flow_style=False)
|
yaml.dump(receptor_config, file, default_flow_style=False)
|
||||||
|
|
||||||
reload_receptor()
|
reload_receptor()
|
||||||
|
|
||||||
|
|
||||||
@task(queue=get_task_queuename)
|
@task(queue=get_task_queuename)
|
||||||
def remove_deprovisioned_node(hostname):
|
def remove_deprovisioned_node(hostname):
|
||||||
InstanceLink.objects.filter(source__hostname=hostname).update(link_state=InstanceLink.States.REMOVING)
|
InstanceLink.objects.filter(source__hostname=hostname).update(link_state=InstanceLink.States.REMOVING)
|
||||||
InstanceLink.objects.filter(target__hostname=hostname).update(link_state=InstanceLink.States.REMOVING)
|
InstanceLink.objects.filter(target__instance__hostname=hostname).update(link_state=InstanceLink.States.REMOVING)
|
||||||
|
|
||||||
node_jobs = UnifiedJob.objects.filter(
|
node_jobs = UnifiedJob.objects.filter(
|
||||||
execution_node=hostname,
|
execution_node=hostname,
|
||||||
|
|||||||
@@ -62,7 +62,7 @@ from awx.main.tasks.receptor import get_receptor_ctl, worker_info, worker_cleanu
|
|||||||
from awx.main.consumers import emit_channel_notification
|
from awx.main.consumers import emit_channel_notification
|
||||||
from awx.main import analytics
|
from awx.main import analytics
|
||||||
from awx.conf import settings_registry
|
from awx.conf import settings_registry
|
||||||
from awx.main.analytics.subsystem_metrics import Metrics
|
from awx.main.analytics.subsystem_metrics import DispatcherMetrics
|
||||||
|
|
||||||
from rest_framework.exceptions import PermissionDenied
|
from rest_framework.exceptions import PermissionDenied
|
||||||
|
|
||||||
@@ -113,7 +113,7 @@ def dispatch_startup():
|
|||||||
cluster_node_heartbeat()
|
cluster_node_heartbeat()
|
||||||
reaper.startup_reaping()
|
reaper.startup_reaping()
|
||||||
reaper.reap_waiting(grace_period=0)
|
reaper.reap_waiting(grace_period=0)
|
||||||
m = Metrics()
|
m = DispatcherMetrics()
|
||||||
m.reset_values()
|
m.reset_values()
|
||||||
|
|
||||||
|
|
||||||
@@ -495,7 +495,7 @@ def inspect_established_receptor_connections(mesh_status):
|
|||||||
update_links = []
|
update_links = []
|
||||||
for link in all_links:
|
for link in all_links:
|
||||||
if link.link_state != InstanceLink.States.REMOVING:
|
if link.link_state != InstanceLink.States.REMOVING:
|
||||||
if link.target.hostname in active_receptor_conns.get(link.source.hostname, {}):
|
if link.target.instance.hostname in active_receptor_conns.get(link.source.hostname, {}):
|
||||||
if link.link_state is not InstanceLink.States.ESTABLISHED:
|
if link.link_state is not InstanceLink.States.ESTABLISHED:
|
||||||
link.link_state = InstanceLink.States.ESTABLISHED
|
link.link_state = InstanceLink.States.ESTABLISHED
|
||||||
update_links.append(link)
|
update_links.append(link)
|
||||||
|
|||||||
@@ -1,19 +1,16 @@
|
|||||||
import pytest
|
import pytest
|
||||||
import yaml
|
import yaml
|
||||||
import itertools
|
|
||||||
from unittest import mock
|
from unittest import mock
|
||||||
|
|
||||||
from django.db.utils import IntegrityError
|
|
||||||
|
|
||||||
from awx.api.versioning import reverse
|
from awx.api.versioning import reverse
|
||||||
from awx.main.models import Instance
|
from awx.main.models import Instance, ReceptorAddress
|
||||||
from awx.api.views.instance_install_bundle import generate_group_vars_all_yml
|
from awx.api.views.instance_install_bundle import generate_group_vars_all_yml
|
||||||
|
|
||||||
|
|
||||||
def has_peer(group_vars, peer):
|
def has_peer(group_vars, peer):
|
||||||
peers = group_vars.get('receptor_peers', [])
|
peers = group_vars.get('receptor_peers', [])
|
||||||
for p in peers:
|
for p in peers:
|
||||||
if f"{p['host']}:{p['port']}" == peer:
|
if p['address'] == peer:
|
||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@@ -24,119 +21,314 @@ class TestPeers:
|
|||||||
def configure_settings(self, settings):
|
def configure_settings(self, settings):
|
||||||
settings.IS_K8S = True
|
settings.IS_K8S = True
|
||||||
|
|
||||||
@pytest.mark.parametrize('node_type', ['control', 'hybrid'])
|
@pytest.mark.parametrize('node_type', ['hop', 'execution'])
|
||||||
def test_prevent_peering_to_self(self, node_type):
|
def test_peering_to_self(self, node_type, admin_user, patch):
|
||||||
"""
|
"""
|
||||||
cannot peer to self
|
cannot peer to self
|
||||||
"""
|
"""
|
||||||
control_instance = Instance.objects.create(hostname='abc', node_type=node_type)
|
instance = Instance.objects.create(hostname='abc', node_type=node_type)
|
||||||
with pytest.raises(IntegrityError):
|
addr = ReceptorAddress.objects.create(instance=instance, address='abc', canonical=True)
|
||||||
control_instance.peers.add(control_instance)
|
resp = patch(
|
||||||
|
url=reverse('api:instance_detail', kwargs={'pk': instance.pk}),
|
||||||
|
data={"hostname": "abc", "node_type": node_type, "peers": [addr.id]},
|
||||||
|
user=admin_user,
|
||||||
|
expect=400,
|
||||||
|
)
|
||||||
|
assert 'Instance cannot peer to its own address.' in str(resp.data)
|
||||||
|
|
||||||
@pytest.mark.parametrize('node_type', ['control', 'hybrid', 'hop', 'execution'])
|
@pytest.mark.parametrize('node_type', ['control', 'hybrid', 'hop', 'execution'])
|
||||||
def test_creating_node(self, node_type, admin_user, post):
|
def test_creating_node(self, node_type, admin_user, post):
|
||||||
"""
|
"""
|
||||||
can only add hop and execution nodes via API
|
can only add hop and execution nodes via API
|
||||||
"""
|
"""
|
||||||
post(
|
resp = post(
|
||||||
url=reverse('api:instance_list'),
|
url=reverse('api:instance_list'),
|
||||||
data={"hostname": "abc", "node_type": node_type},
|
data={"hostname": "abc", "node_type": node_type},
|
||||||
user=admin_user,
|
user=admin_user,
|
||||||
expect=400 if node_type in ['control', 'hybrid'] else 201,
|
expect=400 if node_type in ['control', 'hybrid'] else 201,
|
||||||
)
|
)
|
||||||
|
if resp.status_code == 400:
|
||||||
|
assert 'Can only create execution or hop nodes.' in str(resp.data)
|
||||||
|
|
||||||
def test_changing_node_type(self, admin_user, patch):
|
def test_changing_node_type(self, admin_user, patch):
|
||||||
"""
|
"""
|
||||||
cannot change node type
|
cannot change node type
|
||||||
"""
|
"""
|
||||||
hop = Instance.objects.create(hostname='abc', node_type="hop")
|
hop = Instance.objects.create(hostname='abc', node_type="hop")
|
||||||
patch(
|
resp = patch(
|
||||||
url=reverse('api:instance_detail', kwargs={'pk': hop.pk}),
|
url=reverse('api:instance_detail', kwargs={'pk': hop.pk}),
|
||||||
data={"node_type": "execution"},
|
data={"node_type": "execution"},
|
||||||
user=admin_user,
|
user=admin_user,
|
||||||
expect=400,
|
expect=400,
|
||||||
)
|
)
|
||||||
|
assert 'Cannot change node type.' in str(resp.data)
|
||||||
|
|
||||||
@pytest.mark.parametrize('node_type', ['hop', 'execution'])
|
@pytest.mark.parametrize(
|
||||||
def test_listener_port_null(self, node_type, admin_user, post):
|
'payload_port, payload_peers_from, initial_port, initial_peers_from',
|
||||||
"""
|
[
|
||||||
listener_port can be None
|
(-1, -1, None, None),
|
||||||
"""
|
(-1, -1, 27199, False),
|
||||||
post(
|
(-1, -1, 27199, True),
|
||||||
url=reverse('api:instance_list'),
|
(None, -1, None, None),
|
||||||
data={"hostname": "abc", "node_type": node_type, "listener_port": None},
|
(None, False, None, None),
|
||||||
|
(-1, False, None, None),
|
||||||
|
(27199, True, 27199, True),
|
||||||
|
(27199, False, 27199, False),
|
||||||
|
(27199, -1, 27199, True),
|
||||||
|
(27199, -1, 27199, False),
|
||||||
|
(-1, True, 27199, True),
|
||||||
|
(-1, False, 27199, False),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
def test_no_op(self, payload_port, payload_peers_from, initial_port, initial_peers_from, admin_user, patch):
|
||||||
|
node = Instance.objects.create(hostname='abc', node_type='hop')
|
||||||
|
if initial_port is not None:
|
||||||
|
ReceptorAddress.objects.create(address=node.hostname, port=initial_port, canonical=True, peers_from_control_nodes=initial_peers_from, instance=node)
|
||||||
|
|
||||||
|
assert ReceptorAddress.objects.filter(instance=node).count() == 1
|
||||||
|
else:
|
||||||
|
assert ReceptorAddress.objects.filter(instance=node).count() == 0
|
||||||
|
|
||||||
|
data = {'enabled': True} # Just to have something to post.
|
||||||
|
if payload_port != -1:
|
||||||
|
data['listener_port'] = payload_port
|
||||||
|
if payload_peers_from != -1:
|
||||||
|
data['peers_from_control_nodes'] = payload_peers_from
|
||||||
|
|
||||||
|
patch(
|
||||||
|
url=reverse('api:instance_detail', kwargs={'pk': node.pk}),
|
||||||
|
data=data,
|
||||||
user=admin_user,
|
user=admin_user,
|
||||||
expect=201,
|
expect=200,
|
||||||
)
|
)
|
||||||
|
|
||||||
@pytest.mark.parametrize('node_type, allowed', [('control', False), ('hybrid', False), ('hop', True), ('execution', True)])
|
assert ReceptorAddress.objects.filter(instance=node).count() == (0 if initial_port is None else 1)
|
||||||
def test_peers_from_control_nodes_allowed(self, node_type, allowed, post, admin_user):
|
if initial_port is not None:
|
||||||
"""
|
ra = ReceptorAddress.objects.get(instance=node, canonical=True)
|
||||||
only hop and execution nodes can have peers_from_control_nodes set to True
|
assert ra.port == initial_port
|
||||||
"""
|
assert ra.peers_from_control_nodes == initial_peers_from
|
||||||
post(
|
|
||||||
url=reverse('api:instance_list'),
|
@pytest.mark.parametrize(
|
||||||
data={"hostname": "abc", "peers_from_control_nodes": True, "node_type": node_type, "listener_port": 6789},
|
'payload_port, payload_peers_from',
|
||||||
|
[
|
||||||
|
(27199, True),
|
||||||
|
(27199, False),
|
||||||
|
(27199, -1),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
def test_creates_canonical_address(self, payload_port, payload_peers_from, admin_user, patch):
|
||||||
|
node = Instance.objects.create(hostname='abc', node_type='hop')
|
||||||
|
assert ReceptorAddress.objects.filter(instance=node).count() == 0
|
||||||
|
|
||||||
|
data = {'enabled': True} # Just to have something to post.
|
||||||
|
if payload_port != -1:
|
||||||
|
data['listener_port'] = payload_port
|
||||||
|
if payload_peers_from != -1:
|
||||||
|
data['peers_from_control_nodes'] = payload_peers_from
|
||||||
|
|
||||||
|
patch(
|
||||||
|
url=reverse('api:instance_detail', kwargs={'pk': node.pk}),
|
||||||
|
data=data,
|
||||||
user=admin_user,
|
user=admin_user,
|
||||||
expect=201 if allowed else 400,
|
expect=200,
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_listener_port_is_required(self, admin_user, post):
|
assert ReceptorAddress.objects.filter(instance=node).count() == 1
|
||||||
"""
|
ra = ReceptorAddress.objects.get(instance=node, canonical=True)
|
||||||
if adding instance to peers list, that instance must have listener_port set
|
assert ra.port == payload_port
|
||||||
"""
|
assert ra.peers_from_control_nodes == (payload_peers_from if payload_peers_from != -1 else False)
|
||||||
Instance.objects.create(hostname='abc', node_type="hop", listener_port=None)
|
|
||||||
post(
|
@pytest.mark.parametrize(
|
||||||
url=reverse('api:instance_list'),
|
'payload_port, payload_peers_from, initial_port, initial_peers_from',
|
||||||
data={"hostname": "ex", "peers_from_control_nodes": False, "node_type": "execution", "listener_port": None, "peers": ["abc"]},
|
[
|
||||||
|
(None, False, 27199, True),
|
||||||
|
(None, -1, 27199, True),
|
||||||
|
(None, False, 27199, False),
|
||||||
|
(None, -1, 27199, False),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
def test_deletes_canonical_address(self, payload_port, payload_peers_from, initial_port, initial_peers_from, admin_user, patch):
|
||||||
|
node = Instance.objects.create(hostname='abc', node_type='hop')
|
||||||
|
ReceptorAddress.objects.create(address=node.hostname, port=initial_port, canonical=True, peers_from_control_nodes=initial_peers_from, instance=node)
|
||||||
|
|
||||||
|
assert ReceptorAddress.objects.filter(instance=node).count() == 1
|
||||||
|
|
||||||
|
data = {'enabled': True} # Just to have something to post.
|
||||||
|
if payload_port != -1:
|
||||||
|
data['listener_port'] = payload_port
|
||||||
|
if payload_peers_from != -1:
|
||||||
|
data['peers_from_control_nodes'] = payload_peers_from
|
||||||
|
|
||||||
|
patch(
|
||||||
|
url=reverse('api:instance_detail', kwargs={'pk': node.pk}),
|
||||||
|
data=data,
|
||||||
|
user=admin_user,
|
||||||
|
expect=200,
|
||||||
|
)
|
||||||
|
|
||||||
|
assert ReceptorAddress.objects.filter(instance=node).count() == 0
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
'payload_port, payload_peers_from, initial_port, initial_peers_from',
|
||||||
|
[
|
||||||
|
(27199, True, 27199, False),
|
||||||
|
(27199, False, 27199, True),
|
||||||
|
(-1, True, 27199, False),
|
||||||
|
(-1, False, 27199, True),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
def test_updates_canonical_address(self, payload_port, payload_peers_from, initial_port, initial_peers_from, admin_user, patch):
|
||||||
|
node = Instance.objects.create(hostname='abc', node_type='hop')
|
||||||
|
ReceptorAddress.objects.create(address=node.hostname, port=initial_port, canonical=True, peers_from_control_nodes=initial_peers_from, instance=node)
|
||||||
|
|
||||||
|
assert ReceptorAddress.objects.filter(instance=node).count() == 1
|
||||||
|
|
||||||
|
data = {'enabled': True} # Just to have something to post.
|
||||||
|
if payload_port != -1:
|
||||||
|
data['listener_port'] = payload_port
|
||||||
|
if payload_peers_from != -1:
|
||||||
|
data['peers_from_control_nodes'] = payload_peers_from
|
||||||
|
|
||||||
|
patch(
|
||||||
|
url=reverse('api:instance_detail', kwargs={'pk': node.pk}),
|
||||||
|
data=data,
|
||||||
|
user=admin_user,
|
||||||
|
expect=200,
|
||||||
|
)
|
||||||
|
|
||||||
|
assert ReceptorAddress.objects.filter(instance=node).count() == 1
|
||||||
|
ra = ReceptorAddress.objects.get(instance=node, canonical=True)
|
||||||
|
assert ra.port == initial_port # At the present time, changing ports is not allowed
|
||||||
|
assert ra.peers_from_control_nodes == payload_peers_from
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
'payload_port, payload_peers_from, initial_port, initial_peers_from, error_msg',
|
||||||
|
[
|
||||||
|
(-1, True, None, None, "Cannot enable peers_from_control_nodes"),
|
||||||
|
(None, True, None, None, "Cannot enable peers_from_control_nodes"),
|
||||||
|
(None, True, 21799, True, "Cannot enable peers_from_control_nodes"),
|
||||||
|
(None, True, 21799, False, "Cannot enable peers_from_control_nodes"),
|
||||||
|
(21800, -1, 21799, True, "Cannot change listener port"),
|
||||||
|
(21800, True, 21799, True, "Cannot change listener port"),
|
||||||
|
(21800, False, 21799, True, "Cannot change listener port"),
|
||||||
|
(21800, -1, 21799, False, "Cannot change listener port"),
|
||||||
|
(21800, True, 21799, False, "Cannot change listener port"),
|
||||||
|
(21800, False, 21799, False, "Cannot change listener port"),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
def test_canonical_address_validation_error(self, payload_port, payload_peers_from, initial_port, initial_peers_from, error_msg, admin_user, patch):
|
||||||
|
node = Instance.objects.create(hostname='abc', node_type='hop')
|
||||||
|
if initial_port is not None:
|
||||||
|
ReceptorAddress.objects.create(address=node.hostname, port=initial_port, canonical=True, peers_from_control_nodes=initial_peers_from, instance=node)
|
||||||
|
|
||||||
|
assert ReceptorAddress.objects.filter(instance=node).count() == 1
|
||||||
|
else:
|
||||||
|
assert ReceptorAddress.objects.filter(instance=node).count() == 0
|
||||||
|
|
||||||
|
data = {'enabled': True} # Just to have something to post.
|
||||||
|
if payload_port != -1:
|
||||||
|
data['listener_port'] = payload_port
|
||||||
|
if payload_peers_from != -1:
|
||||||
|
data['peers_from_control_nodes'] = payload_peers_from
|
||||||
|
|
||||||
|
resp = patch(
|
||||||
|
url=reverse('api:instance_detail', kwargs={'pk': node.pk}),
|
||||||
|
data=data,
|
||||||
user=admin_user,
|
user=admin_user,
|
||||||
expect=400,
|
expect=400,
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_peers_from_control_nodes_listener_port_enabled(self, admin_user, post):
|
assert error_msg in str(resp.data)
|
||||||
|
|
||||||
|
def test_changing_managed_listener_port(self, admin_user, patch):
|
||||||
"""
|
"""
|
||||||
if peers_from_control_nodes is True, listener_port must an integer
|
if instance is managed, cannot change listener port at all
|
||||||
Assert that all other combinations are allowed
|
|
||||||
"""
|
"""
|
||||||
for index, item in enumerate(itertools.product(['hop', 'execution'], [True, False], [None, 6789])):
|
hop = Instance.objects.create(hostname='abc', node_type="hop", managed=True)
|
||||||
node_type, peers_from, listener_port = item
|
resp = patch(
|
||||||
# only disallowed case is when peers_from is True and listener port is None
|
url=reverse('api:instance_detail', kwargs={'pk': hop.pk}),
|
||||||
disallowed = peers_from and not listener_port
|
data={"listener_port": 5678},
|
||||||
post(
|
user=admin_user,
|
||||||
url=reverse('api:instance_list'),
|
expect=400, # cannot set port
|
||||||
data={"hostname": f"abc{index}", "peers_from_control_nodes": peers_from, "node_type": node_type, "listener_port": listener_port},
|
)
|
||||||
user=admin_user,
|
assert 'Cannot change listener port for managed nodes.' in str(resp.data)
|
||||||
expect=400 if disallowed else 201,
|
ReceptorAddress.objects.create(instance=hop, address='hop', port=27199, canonical=True)
|
||||||
)
|
resp = patch(
|
||||||
|
url=reverse('api:instance_detail', kwargs={'pk': hop.pk}),
|
||||||
|
data={"listener_port": None},
|
||||||
|
user=admin_user,
|
||||||
|
expect=400, # cannot unset port
|
||||||
|
)
|
||||||
|
assert 'Cannot change listener port for managed nodes.' in str(resp.data)
|
||||||
|
|
||||||
|
def test_bidirectional_peering(self, admin_user, patch):
|
||||||
|
"""
|
||||||
|
cannot peer to node that is already to peered to it
|
||||||
|
if A -> B, then disallow B -> A
|
||||||
|
"""
|
||||||
|
hop1 = Instance.objects.create(hostname='hop1', node_type='hop')
|
||||||
|
hop1addr = ReceptorAddress.objects.create(instance=hop1, address='hop1', canonical=True)
|
||||||
|
hop2 = Instance.objects.create(hostname='hop2', node_type='hop')
|
||||||
|
hop2addr = ReceptorAddress.objects.create(instance=hop2, address='hop2', canonical=True)
|
||||||
|
hop1.peers.add(hop2addr)
|
||||||
|
resp = patch(
|
||||||
|
url=reverse('api:instance_detail', kwargs={'pk': hop2.pk}),
|
||||||
|
data={"peers": [hop1addr.id]},
|
||||||
|
user=admin_user,
|
||||||
|
expect=400,
|
||||||
|
)
|
||||||
|
assert 'Instance hop1 is already peered to this instance.' in str(resp.data)
|
||||||
|
|
||||||
|
def test_multiple_peers_same_instance(self, admin_user, patch):
|
||||||
|
"""
|
||||||
|
cannot peer to more than one address of the same instance
|
||||||
|
"""
|
||||||
|
hop1 = Instance.objects.create(hostname='hop1', node_type='hop')
|
||||||
|
hop1addr1 = ReceptorAddress.objects.create(instance=hop1, address='hop1', canonical=True)
|
||||||
|
hop1addr2 = ReceptorAddress.objects.create(instance=hop1, address='hop1alternate')
|
||||||
|
hop2 = Instance.objects.create(hostname='hop2', node_type='hop')
|
||||||
|
resp = patch(
|
||||||
|
url=reverse('api:instance_detail', kwargs={'pk': hop2.pk}),
|
||||||
|
data={"peers": [hop1addr1.id, hop1addr2.id]},
|
||||||
|
user=admin_user,
|
||||||
|
expect=400,
|
||||||
|
)
|
||||||
|
assert 'Cannot peer to the same instance more than once.' in str(resp.data)
|
||||||
|
|
||||||
@pytest.mark.parametrize('node_type', ['control', 'hybrid'])
|
@pytest.mark.parametrize('node_type', ['control', 'hybrid'])
|
||||||
def test_disallow_modifying_peers_control_nodes(self, node_type, admin_user, patch):
|
def test_changing_peers_control_nodes(self, node_type, admin_user, patch):
|
||||||
"""
|
"""
|
||||||
for control nodes, peers field should not be
|
for control nodes, peers field should not be
|
||||||
modified directly via patch.
|
modified directly via patch.
|
||||||
"""
|
"""
|
||||||
control = Instance.objects.create(hostname='abc', node_type=node_type)
|
control = Instance.objects.create(hostname='abc', node_type=node_type, managed=True)
|
||||||
hop1 = Instance.objects.create(hostname='hop1', node_type='hop', peers_from_control_nodes=True, listener_port=6789)
|
hop1 = Instance.objects.create(hostname='hop1', node_type='hop')
|
||||||
hop2 = Instance.objects.create(hostname='hop2', node_type='hop', peers_from_control_nodes=False, listener_port=6789)
|
hop1addr = ReceptorAddress.objects.create(instance=hop1, address='hop1', peers_from_control_nodes=True, canonical=True)
|
||||||
assert [hop1] == list(control.peers.all()) # only hop1 should be peered
|
hop2 = Instance.objects.create(hostname='hop2', node_type='hop')
|
||||||
patch(
|
hop2addr = ReceptorAddress.objects.create(instance=hop2, address='hop2', canonical=True)
|
||||||
|
assert [hop1addr] == list(control.peers.all()) # only hop1addr should be peered
|
||||||
|
resp = patch(
|
||||||
url=reverse('api:instance_detail', kwargs={'pk': control.pk}),
|
url=reverse('api:instance_detail', kwargs={'pk': control.pk}),
|
||||||
data={"peers": ["hop2"]},
|
data={"peers": [hop2addr.id]},
|
||||||
user=admin_user,
|
user=admin_user,
|
||||||
expect=400, # cannot add peers directly
|
expect=400, # cannot add peers manually
|
||||||
)
|
)
|
||||||
|
assert 'Setting peers manually for managed nodes is not allowed.' in str(resp.data)
|
||||||
|
|
||||||
patch(
|
patch(
|
||||||
url=reverse('api:instance_detail', kwargs={'pk': control.pk}),
|
url=reverse('api:instance_detail', kwargs={'pk': control.pk}),
|
||||||
data={"peers": ["hop1"]},
|
data={"peers": [hop1addr.id]},
|
||||||
user=admin_user,
|
user=admin_user,
|
||||||
expect=200, # patching with current peers list should be okay
|
expect=200, # patching with current peers list should be okay
|
||||||
)
|
)
|
||||||
patch(
|
resp = patch(
|
||||||
url=reverse('api:instance_detail', kwargs={'pk': control.pk}),
|
url=reverse('api:instance_detail', kwargs={'pk': control.pk}),
|
||||||
data={"peers": []},
|
data={"peers": []},
|
||||||
user=admin_user,
|
user=admin_user,
|
||||||
expect=400, # cannot remove peers directly
|
expect=400, # cannot remove peers directly
|
||||||
)
|
)
|
||||||
|
assert 'Setting peers manually for managed nodes is not allowed.' in str(resp.data)
|
||||||
|
|
||||||
patch(
|
patch(
|
||||||
url=reverse('api:instance_detail', kwargs={'pk': control.pk}),
|
url=reverse('api:instance_detail', kwargs={'pk': control.pk}),
|
||||||
data={},
|
data={},
|
||||||
@@ -148,23 +340,25 @@ class TestPeers:
|
|||||||
url=reverse('api:instance_detail', kwargs={'pk': hop2.pk}),
|
url=reverse('api:instance_detail', kwargs={'pk': hop2.pk}),
|
||||||
data={"peers_from_control_nodes": True},
|
data={"peers_from_control_nodes": True},
|
||||||
user=admin_user,
|
user=admin_user,
|
||||||
expect=200, # patching without data should be fine too
|
expect=200,
|
||||||
)
|
)
|
||||||
assert {hop1, hop2} == set(control.peers.all()) # hop1 and hop2 should now be peered from control node
|
assert {hop1addr, hop2addr} == set(control.peers.all()) # hop1 and hop2 should now be peered from control node
|
||||||
|
|
||||||
def test_disallow_changing_hostname(self, admin_user, patch):
|
def test_changing_hostname(self, admin_user, patch):
|
||||||
"""
|
"""
|
||||||
cannot change hostname
|
cannot change hostname
|
||||||
"""
|
"""
|
||||||
hop = Instance.objects.create(hostname='hop', node_type='hop')
|
hop = Instance.objects.create(hostname='hop', node_type='hop')
|
||||||
patch(
|
resp = patch(
|
||||||
url=reverse('api:instance_detail', kwargs={'pk': hop.pk}),
|
url=reverse('api:instance_detail', kwargs={'pk': hop.pk}),
|
||||||
data={"hostname": "hop2"},
|
data={"hostname": "hop2"},
|
||||||
user=admin_user,
|
user=admin_user,
|
||||||
expect=400,
|
expect=400,
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_disallow_changing_node_state(self, admin_user, patch):
|
assert 'Cannot change hostname.' in str(resp.data)
|
||||||
|
|
||||||
|
def test_changing_node_state(self, admin_user, patch):
|
||||||
"""
|
"""
|
||||||
only allow setting to deprovisioning
|
only allow setting to deprovisioning
|
||||||
"""
|
"""
|
||||||
@@ -175,12 +369,54 @@ class TestPeers:
|
|||||||
user=admin_user,
|
user=admin_user,
|
||||||
expect=200,
|
expect=200,
|
||||||
)
|
)
|
||||||
patch(
|
resp = patch(
|
||||||
url=reverse('api:instance_detail', kwargs={'pk': hop.pk}),
|
url=reverse('api:instance_detail', kwargs={'pk': hop.pk}),
|
||||||
data={"node_state": "ready"},
|
data={"node_state": "ready"},
|
||||||
user=admin_user,
|
user=admin_user,
|
||||||
expect=400,
|
expect=400,
|
||||||
)
|
)
|
||||||
|
assert "Can only change instances to the 'deprovisioning' state." in str(resp.data)
|
||||||
|
|
||||||
|
def test_changing_managed_node_state(self, admin_user, patch):
|
||||||
|
"""
|
||||||
|
cannot change node state of managed node
|
||||||
|
"""
|
||||||
|
hop = Instance.objects.create(hostname='hop', node_type='hop', managed=True)
|
||||||
|
resp = patch(
|
||||||
|
url=reverse('api:instance_detail', kwargs={'pk': hop.pk}),
|
||||||
|
data={"node_state": "deprovisioning"},
|
||||||
|
user=admin_user,
|
||||||
|
expect=400,
|
||||||
|
)
|
||||||
|
|
||||||
|
assert 'Cannot deprovision managed nodes.' in str(resp.data)
|
||||||
|
|
||||||
|
def test_changing_managed_peers_from_control_nodes(self, admin_user, patch):
|
||||||
|
"""
|
||||||
|
cannot change peers_from_control_nodes of managed node
|
||||||
|
"""
|
||||||
|
hop = Instance.objects.create(hostname='hop', node_type='hop', managed=True)
|
||||||
|
ReceptorAddress.objects.create(instance=hop, address='hop', peers_from_control_nodes=True, canonical=True)
|
||||||
|
resp = patch(
|
||||||
|
url=reverse('api:instance_detail', kwargs={'pk': hop.pk}),
|
||||||
|
data={"peers_from_control_nodes": False},
|
||||||
|
user=admin_user,
|
||||||
|
expect=400,
|
||||||
|
)
|
||||||
|
|
||||||
|
assert 'Cannot change peers_from_control_nodes for managed nodes.' in str(resp.data)
|
||||||
|
|
||||||
|
hop.peers_from_control_nodes = False
|
||||||
|
hop.save()
|
||||||
|
|
||||||
|
resp = patch(
|
||||||
|
url=reverse('api:instance_detail', kwargs={'pk': hop.pk}),
|
||||||
|
data={"peers_from_control_nodes": False},
|
||||||
|
user=admin_user,
|
||||||
|
expect=400,
|
||||||
|
)
|
||||||
|
|
||||||
|
assert 'Cannot change peers_from_control_nodes for managed nodes.' in str(resp.data)
|
||||||
|
|
||||||
@pytest.mark.parametrize('node_type', ['control', 'hybrid'])
|
@pytest.mark.parametrize('node_type', ['control', 'hybrid'])
|
||||||
def test_control_node_automatically_peers(self, node_type):
|
def test_control_node_automatically_peers(self, node_type):
|
||||||
@@ -191,9 +427,10 @@ class TestPeers:
|
|||||||
peer to hop should be removed if hop is deleted
|
peer to hop should be removed if hop is deleted
|
||||||
"""
|
"""
|
||||||
|
|
||||||
hop = Instance.objects.create(hostname='hop', node_type='hop', peers_from_control_nodes=True, listener_port=6789)
|
hop = Instance.objects.create(hostname='hop', node_type='hop')
|
||||||
|
hopaddr = ReceptorAddress.objects.create(instance=hop, address='hop', peers_from_control_nodes=True, canonical=True)
|
||||||
control = Instance.objects.create(hostname='abc', node_type=node_type)
|
control = Instance.objects.create(hostname='abc', node_type=node_type)
|
||||||
assert hop in control.peers.all()
|
assert hopaddr in control.peers.all()
|
||||||
hop.delete()
|
hop.delete()
|
||||||
assert not control.peers.exists()
|
assert not control.peers.exists()
|
||||||
|
|
||||||
@@ -203,26 +440,50 @@ class TestPeers:
|
|||||||
if a new node comes online, other peer relationships should
|
if a new node comes online, other peer relationships should
|
||||||
remain intact
|
remain intact
|
||||||
"""
|
"""
|
||||||
hop1 = Instance.objects.create(hostname='hop1', node_type='hop', listener_port=6789, peers_from_control_nodes=True)
|
hop1 = Instance.objects.create(hostname='hop1', node_type='hop')
|
||||||
hop2 = Instance.objects.create(hostname='hop2', node_type='hop', listener_port=6789, peers_from_control_nodes=False)
|
hop2 = Instance.objects.create(hostname='hop2', node_type='hop')
|
||||||
hop1.peers.add(hop2)
|
hop2addr = ReceptorAddress.objects.create(instance=hop2, address='hop2', canonical=True)
|
||||||
|
hop1.peers.add(hop2addr)
|
||||||
|
|
||||||
# a control node is added
|
# a control node is added
|
||||||
Instance.objects.create(hostname='control', node_type=node_type, listener_port=None)
|
Instance.objects.create(hostname='control', node_type=node_type)
|
||||||
|
|
||||||
assert hop1.peers.exists()
|
assert hop1.peers.exists()
|
||||||
|
|
||||||
def test_group_vars(self, get, admin_user):
|
def test_reverse_peers(self, admin_user, get):
|
||||||
|
"""
|
||||||
|
if hop1 peers to hop2, hop1 should
|
||||||
|
be in hop2's reverse_peers list
|
||||||
|
"""
|
||||||
|
hop1 = Instance.objects.create(hostname='hop1', node_type='hop')
|
||||||
|
hop2 = Instance.objects.create(hostname='hop2', node_type='hop')
|
||||||
|
hop2addr = ReceptorAddress.objects.create(instance=hop2, address='hop2', canonical=True)
|
||||||
|
hop1.peers.add(hop2addr)
|
||||||
|
|
||||||
|
resp = get(
|
||||||
|
url=reverse('api:instance_detail', kwargs={'pk': hop2.pk}),
|
||||||
|
user=admin_user,
|
||||||
|
expect=200,
|
||||||
|
)
|
||||||
|
|
||||||
|
assert hop1.pk in resp.data['reverse_peers']
|
||||||
|
|
||||||
|
def test_group_vars(self):
|
||||||
"""
|
"""
|
||||||
control > hop1 > hop2 < execution
|
control > hop1 > hop2 < execution
|
||||||
"""
|
"""
|
||||||
control = Instance.objects.create(hostname='control', node_type='control', listener_port=None)
|
control = Instance.objects.create(hostname='control', node_type='control')
|
||||||
hop1 = Instance.objects.create(hostname='hop1', node_type='hop', listener_port=6789, peers_from_control_nodes=True)
|
hop1 = Instance.objects.create(hostname='hop1', node_type='hop')
|
||||||
hop2 = Instance.objects.create(hostname='hop2', node_type='hop', listener_port=6789, peers_from_control_nodes=False)
|
ReceptorAddress.objects.create(instance=hop1, address='hop1', peers_from_control_nodes=True, port=6789, canonical=True)
|
||||||
execution = Instance.objects.create(hostname='execution', node_type='execution', listener_port=6789)
|
|
||||||
|
|
||||||
execution.peers.add(hop2)
|
hop2 = Instance.objects.create(hostname='hop2', node_type='hop')
|
||||||
hop1.peers.add(hop2)
|
hop2addr = ReceptorAddress.objects.create(instance=hop2, address='hop2', peers_from_control_nodes=False, port=6789, canonical=True)
|
||||||
|
|
||||||
|
execution = Instance.objects.create(hostname='execution', node_type='execution')
|
||||||
|
ReceptorAddress.objects.create(instance=execution, address='execution', peers_from_control_nodes=False, port=6789, canonical=True)
|
||||||
|
|
||||||
|
execution.peers.add(hop2addr)
|
||||||
|
hop1.peers.add(hop2addr)
|
||||||
|
|
||||||
control_vars = yaml.safe_load(generate_group_vars_all_yml(control))
|
control_vars = yaml.safe_load(generate_group_vars_all_yml(control))
|
||||||
hop1_vars = yaml.safe_load(generate_group_vars_all_yml(hop1))
|
hop1_vars = yaml.safe_load(generate_group_vars_all_yml(hop1))
|
||||||
@@ -265,13 +526,15 @@ class TestPeers:
|
|||||||
control = Instance.objects.create(hostname='control1', node_type='control')
|
control = Instance.objects.create(hostname='control1', node_type='control')
|
||||||
write_method.assert_not_called()
|
write_method.assert_not_called()
|
||||||
|
|
||||||
# new hop node with peers_from_control_nodes False (no)
|
# new address with peers_from_control_nodes False (no)
|
||||||
hop1 = Instance.objects.create(hostname='hop1', node_type='hop', listener_port=6789, peers_from_control_nodes=False)
|
hop1 = Instance.objects.create(hostname='hop1', node_type='hop')
|
||||||
|
hop1addr = ReceptorAddress.objects.create(instance=hop1, address='hop1', peers_from_control_nodes=False, canonical=True)
|
||||||
hop1.delete()
|
hop1.delete()
|
||||||
write_method.assert_not_called()
|
write_method.assert_not_called()
|
||||||
|
|
||||||
# new hop node with peers_from_control_nodes True (yes)
|
# new address with peers_from_control_nodes True (yes)
|
||||||
hop1 = Instance.objects.create(hostname='hop1', node_type='hop', listener_port=6789, peers_from_control_nodes=True)
|
hop1 = Instance.objects.create(hostname='hop1', node_type='hop')
|
||||||
|
hop1addr = ReceptorAddress.objects.create(instance=hop1, address='hop1', peers_from_control_nodes=True, canonical=True)
|
||||||
write_method.assert_called()
|
write_method.assert_called()
|
||||||
write_method.reset_mock()
|
write_method.reset_mock()
|
||||||
|
|
||||||
@@ -280,20 +543,21 @@ class TestPeers:
|
|||||||
write_method.assert_called()
|
write_method.assert_called()
|
||||||
write_method.reset_mock()
|
write_method.reset_mock()
|
||||||
|
|
||||||
# new hop node with peers_from_control_nodes False and peered to another hop node (no)
|
# new address with peers_from_control_nodes False and peered to another hop node (no)
|
||||||
hop2 = Instance.objects.create(hostname='hop2', node_type='hop', listener_port=6789, peers_from_control_nodes=False)
|
hop2 = Instance.objects.create(hostname='hop2', node_type='hop')
|
||||||
hop2.peers.add(hop1)
|
ReceptorAddress.objects.create(instance=hop2, address='hop2', peers_from_control_nodes=False, canonical=True)
|
||||||
|
hop2.peers.add(hop1addr)
|
||||||
hop2.delete()
|
hop2.delete()
|
||||||
write_method.assert_not_called()
|
write_method.assert_not_called()
|
||||||
|
|
||||||
# changing peers_from_control_nodes to False (yes)
|
# changing peers_from_control_nodes to False (yes)
|
||||||
hop1.peers_from_control_nodes = False
|
hop1addr.peers_from_control_nodes = False
|
||||||
hop1.save()
|
hop1addr.save()
|
||||||
write_method.assert_called()
|
write_method.assert_called()
|
||||||
write_method.reset_mock()
|
write_method.reset_mock()
|
||||||
|
|
||||||
# deleting hop node that has peers_from_control_nodes to False (no)
|
# deleting address that has peers_from_control_nodes to False (no)
|
||||||
hop1.delete()
|
hop1.delete() # cascade deletes to hop1addr
|
||||||
write_method.assert_not_called()
|
write_method.assert_not_called()
|
||||||
|
|
||||||
# deleting control nodes (no)
|
# deleting control nodes (no)
|
||||||
@@ -315,8 +579,8 @@ class TestPeers:
|
|||||||
|
|
||||||
# not peered, so config file should not be updated
|
# not peered, so config file should not be updated
|
||||||
for i in range(3):
|
for i in range(3):
|
||||||
Instance.objects.create(hostname=f"exNo-{i}", node_type='execution', listener_port=6789, peers_from_control_nodes=False)
|
inst = Instance.objects.create(hostname=f"exNo-{i}", node_type='execution')
|
||||||
|
ReceptorAddress.objects.create(instance=inst, address=f"exNo-{i}", port=6789, peers_from_control_nodes=False, canonical=True)
|
||||||
_, should_update = generate_config_data()
|
_, should_update = generate_config_data()
|
||||||
assert not should_update
|
assert not should_update
|
||||||
|
|
||||||
@@ -324,11 +588,13 @@ class TestPeers:
|
|||||||
expected_peers = []
|
expected_peers = []
|
||||||
for i in range(3):
|
for i in range(3):
|
||||||
expected_peers.append(f"hop-{i}:6789")
|
expected_peers.append(f"hop-{i}:6789")
|
||||||
Instance.objects.create(hostname=f"hop-{i}", node_type='hop', listener_port=6789, peers_from_control_nodes=True)
|
inst = Instance.objects.create(hostname=f"hop-{i}", node_type='hop')
|
||||||
|
ReceptorAddress.objects.create(instance=inst, address=f"hop-{i}", port=6789, peers_from_control_nodes=True, canonical=True)
|
||||||
|
|
||||||
for i in range(3):
|
for i in range(3):
|
||||||
expected_peers.append(f"exYes-{i}:6789")
|
expected_peers.append(f"exYes-{i}:6789")
|
||||||
Instance.objects.create(hostname=f"exYes-{i}", node_type='execution', listener_port=6789, peers_from_control_nodes=True)
|
inst = Instance.objects.create(hostname=f"exYes-{i}", node_type='execution')
|
||||||
|
ReceptorAddress.objects.create(instance=inst, address=f"exYes-{i}", port=6789, peers_from_control_nodes=True, canonical=True)
|
||||||
|
|
||||||
new_config, should_update = generate_config_data()
|
new_config, should_update = generate_config_data()
|
||||||
assert should_update
|
assert should_update
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ from unittest import mock
|
|||||||
import pytest
|
import pytest
|
||||||
import json
|
import json
|
||||||
|
|
||||||
from ansible_base.utils.models import get_type_for_model
|
from ansible_base.lib.utils.models import get_type_for_model
|
||||||
|
|
||||||
from awx.api.versioning import reverse
|
from awx.api.versioning import reverse
|
||||||
from awx.main.models.jobs import JobTemplate, Job
|
from awx.main.models.jobs import JobTemplate, Job
|
||||||
|
|||||||
@@ -81,6 +81,7 @@ def test_default_cred_types():
|
|||||||
'aws_secretsmanager_credential',
|
'aws_secretsmanager_credential',
|
||||||
'azure_kv',
|
'azure_kv',
|
||||||
'azure_rm',
|
'azure_rm',
|
||||||
|
'bitbucket_dc_token',
|
||||||
'centrify_vault_kv',
|
'centrify_vault_kv',
|
||||||
'conjur',
|
'conjur',
|
||||||
'controller',
|
'controller',
|
||||||
@@ -100,6 +101,7 @@ def test_default_cred_types():
|
|||||||
'satellite6',
|
'satellite6',
|
||||||
'scm',
|
'scm',
|
||||||
'ssh',
|
'ssh',
|
||||||
|
'terraform',
|
||||||
'thycotic_dsv',
|
'thycotic_dsv',
|
||||||
'thycotic_tss',
|
'thycotic_tss',
|
||||||
'vault',
|
'vault',
|
||||||
|
|||||||
@@ -60,6 +60,13 @@ def test_hashivault_client_cert_auth_no_role():
|
|||||||
assert res == expected_res
|
assert res == expected_res
|
||||||
|
|
||||||
|
|
||||||
|
def test_hashivault_userpass_auth():
|
||||||
|
kwargs = {'username': 'the_username', 'password': 'the_password'}
|
||||||
|
expected_res = {'username': 'the_username', 'password': 'the_password'}
|
||||||
|
res = hashivault.userpass_auth(**kwargs)
|
||||||
|
assert res == expected_res
|
||||||
|
|
||||||
|
|
||||||
def test_hashivault_handle_auth_token():
|
def test_hashivault_handle_auth_token():
|
||||||
kwargs = {
|
kwargs = {
|
||||||
'token': 'the_token',
|
'token': 'the_token',
|
||||||
|
|||||||
30
awx/main/tests/functional/test_linkstate.py
Normal file
30
awx/main/tests/functional/test_linkstate.py
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
import pytest
|
||||||
|
|
||||||
|
from awx.main.models import Instance, ReceptorAddress, InstanceLink
|
||||||
|
from awx.main.tasks.system import inspect_established_receptor_connections
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.django_db
|
||||||
|
class TestLinkState:
|
||||||
|
@pytest.fixture(autouse=True)
|
||||||
|
def configure_settings(self, settings):
|
||||||
|
settings.IS_K8S = True
|
||||||
|
|
||||||
|
def test_inspect_established_receptor_connections(self):
|
||||||
|
'''
|
||||||
|
Change link state from ADDING to ESTABLISHED
|
||||||
|
if the receptor status KnownConnectionCosts field
|
||||||
|
has an entry for the source and target node.
|
||||||
|
'''
|
||||||
|
hop1 = Instance.objects.create(hostname='hop1')
|
||||||
|
hop2 = Instance.objects.create(hostname='hop2')
|
||||||
|
hop2addr = ReceptorAddress.objects.create(instance=hop2, address='hop2', port=5678)
|
||||||
|
InstanceLink.objects.create(source=hop1, target=hop2addr, link_state=InstanceLink.States.ADDING)
|
||||||
|
|
||||||
|
# calling with empty KnownConnectionCosts should not change the link state
|
||||||
|
inspect_established_receptor_connections({"KnownConnectionCosts": {}})
|
||||||
|
assert InstanceLink.objects.get(source=hop1, target=hop2addr).link_state == InstanceLink.States.ADDING
|
||||||
|
|
||||||
|
mesh_state = {"KnownConnectionCosts": {"hop1": {"hop2": 1}}}
|
||||||
|
inspect_established_receptor_connections(mesh_state)
|
||||||
|
assert InstanceLink.objects.get(source=hop1, target=hop2addr).link_state == InstanceLink.States.ESTABLISHED
|
||||||
@@ -42,3 +42,29 @@ class TestMigrationSmoke:
|
|||||||
final_state = migrator.apply_tested_migration(final_migration)
|
final_state = migrator.apply_tested_migration(final_migration)
|
||||||
Instance = final_state.apps.get_model('main', 'Instance')
|
Instance = final_state.apps.get_model('main', 'Instance')
|
||||||
assert Instance.objects.filter(hostname='foobar').count() == 1
|
assert Instance.objects.filter(hostname='foobar').count() == 1
|
||||||
|
|
||||||
|
def test_receptor_address(self, migrator):
|
||||||
|
old_state = migrator.apply_initial_migration(('main', '0188_add_bitbucket_dc_webhook'))
|
||||||
|
Instance = old_state.apps.get_model('main', 'Instance')
|
||||||
|
for i in range(3):
|
||||||
|
Instance.objects.create(hostname=f'foobar{i}', node_type='hop')
|
||||||
|
foo = Instance.objects.create(hostname='foo', node_type='execution', listener_port=1234)
|
||||||
|
bar = Instance.objects.create(hostname='bar', node_type='execution', listener_port=None)
|
||||||
|
bar.peers.add(foo)
|
||||||
|
|
||||||
|
new_state = migrator.apply_tested_migration(
|
||||||
|
('main', '0189_inbound_hop_nodes'),
|
||||||
|
)
|
||||||
|
Instance = new_state.apps.get_model('main', 'Instance')
|
||||||
|
ReceptorAddress = new_state.apps.get_model('main', 'ReceptorAddress')
|
||||||
|
|
||||||
|
# We can now test how our migration worked, new field is there:
|
||||||
|
assert ReceptorAddress.objects.filter(address='foo', port=1234).count() == 1
|
||||||
|
assert not ReceptorAddress.objects.filter(address='bar').exists()
|
||||||
|
|
||||||
|
bar = Instance.objects.get(hostname='bar')
|
||||||
|
fooaddr = ReceptorAddress.objects.get(address='foo')
|
||||||
|
|
||||||
|
bar_peers = bar.peers.all()
|
||||||
|
assert len(bar_peers) == 1
|
||||||
|
assert fooaddr in bar_peers
|
||||||
|
|||||||
90
awx/main/tests/functional/test_routing.py
Normal file
90
awx/main/tests/functional/test_routing.py
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
import pytest
|
||||||
|
|
||||||
|
from django.contrib.auth.models import AnonymousUser
|
||||||
|
|
||||||
|
from channels.routing import ProtocolTypeRouter
|
||||||
|
from channels.testing.websocket import WebsocketCommunicator
|
||||||
|
|
||||||
|
|
||||||
|
from awx.main.consumers import WebsocketSecretAuthHelper
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def application():
|
||||||
|
# code in routing hits the db on import because .. settings cache
|
||||||
|
from awx.main.routing import application_func
|
||||||
|
|
||||||
|
yield application_func(ProtocolTypeRouter)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def websocket_server_generator(application):
|
||||||
|
def fn(endpoint):
|
||||||
|
return WebsocketCommunicator(application, endpoint)
|
||||||
|
|
||||||
|
return fn
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
@pytest.mark.django_db
|
||||||
|
class TestWebsocketRelay:
|
||||||
|
@pytest.fixture
|
||||||
|
def websocket_relay_secret_generator(self, settings):
|
||||||
|
def fn(secret, set_broadcast_websocket_secret=False):
|
||||||
|
secret_backup = settings.BROADCAST_WEBSOCKET_SECRET
|
||||||
|
settings.BROADCAST_WEBSOCKET_SECRET = 'foobar'
|
||||||
|
res = ('secret'.encode('utf-8'), WebsocketSecretAuthHelper.construct_secret().encode('utf-8'))
|
||||||
|
if set_broadcast_websocket_secret is False:
|
||||||
|
settings.BROADCAST_WEBSOCKET_SECRET = secret_backup
|
||||||
|
return res
|
||||||
|
|
||||||
|
return fn
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def websocket_relay_secret(self, settings, websocket_relay_secret_generator):
|
||||||
|
return websocket_relay_secret_generator('foobar', set_broadcast_websocket_secret=True)
|
||||||
|
|
||||||
|
async def test_authorized(self, websocket_server_generator, websocket_relay_secret):
|
||||||
|
server = websocket_server_generator('/websocket/relay/')
|
||||||
|
|
||||||
|
server.scope['headers'] = (websocket_relay_secret,)
|
||||||
|
connected, _ = await server.connect()
|
||||||
|
assert connected is True
|
||||||
|
|
||||||
|
async def test_not_authorized(self, websocket_server_generator):
|
||||||
|
server = websocket_server_generator('/websocket/relay/')
|
||||||
|
connected, _ = await server.connect()
|
||||||
|
assert connected is False, "Connection to the relay websocket without auth. We expected the client to be denied."
|
||||||
|
|
||||||
|
async def test_wrong_secret(self, websocket_server_generator, websocket_relay_secret_generator):
|
||||||
|
server = websocket_server_generator('/websocket/relay/')
|
||||||
|
|
||||||
|
server.scope['headers'] = (websocket_relay_secret_generator('foobar', set_broadcast_websocket_secret=False),)
|
||||||
|
connected, _ = await server.connect()
|
||||||
|
assert connected is False
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
@pytest.mark.django_db
|
||||||
|
class TestWebsocketEventConsumer:
|
||||||
|
async def test_unauthorized_anonymous(self, websocket_server_generator):
|
||||||
|
server = websocket_server_generator('/websocket/')
|
||||||
|
|
||||||
|
server.scope['user'] = AnonymousUser()
|
||||||
|
connected, _ = await server.connect()
|
||||||
|
assert connected is False, "Anonymous user should NOT be allowed to login."
|
||||||
|
|
||||||
|
@pytest.mark.skip(reason="Ran out of coding time.")
|
||||||
|
async def test_authorized(self, websocket_server_generator, application, admin):
|
||||||
|
server = websocket_server_generator('/websocket/')
|
||||||
|
|
||||||
|
"""
|
||||||
|
I ran out of time. Here is what I was thinking ...
|
||||||
|
Inject a valid session into the cookies in the header
|
||||||
|
|
||||||
|
server.scope['headers'] = (
|
||||||
|
(b'cookie', ...),
|
||||||
|
)
|
||||||
|
"""
|
||||||
|
connected, _ = await server.connect()
|
||||||
|
assert connected is True, "User should be allowed in via cookies auth via a session key in the cookies"
|
||||||
@@ -5,7 +5,7 @@ import pytest
|
|||||||
# Django
|
# Django
|
||||||
from rest_framework.exceptions import PermissionDenied
|
from rest_framework.exceptions import PermissionDenied
|
||||||
|
|
||||||
from ansible_base.filters.rest_framework.field_lookup_backend import FieldLookupBackend
|
from ansible_base.rest_filters.rest_framework.field_lookup_backend import FieldLookupBackend
|
||||||
|
|
||||||
from awx.main.models import (
|
from awx.main.models import (
|
||||||
AdHocCommand,
|
AdHocCommand,
|
||||||
|
|||||||
32
awx/main/tests/unit/models/test_receptor_address.py
Normal file
32
awx/main/tests/unit/models/test_receptor_address.py
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
from awx.main.models import ReceptorAddress
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
ReceptorAddress()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
'address, protocol, port, websocket_path, expected',
|
||||||
|
[
|
||||||
|
('foo', 'tcp', 27199, '', 'foo:27199'),
|
||||||
|
('bar', 'ws', 6789, '', 'wss://bar:6789'),
|
||||||
|
('mal', 'ws', 6789, 'path', 'wss://mal:6789/path'),
|
||||||
|
('example.com', 'ws', 443, 'path', 'wss://example.com:443/path'),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
def test_get_full_address(address, protocol, port, websocket_path, expected):
|
||||||
|
receptor_address = ReceptorAddress(address=address, protocol=protocol, port=port, websocket_path=websocket_path)
|
||||||
|
assert receptor_address.get_full_address() == expected
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
'protocol, expected',
|
||||||
|
[
|
||||||
|
('tcp', 'tcp-peer'),
|
||||||
|
('ws', 'ws-peer'),
|
||||||
|
('wss', 'ws-peer'),
|
||||||
|
('foo', None),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
def test_get_peer_type(protocol, expected):
|
||||||
|
receptor_address = ReceptorAddress(protocol=protocol)
|
||||||
|
assert receptor_address.get_peer_type() == expected
|
||||||
@@ -1085,6 +1085,27 @@ class TestJobCredentials(TestJobExecution):
|
|||||||
assert open(env['ANSIBLE_NET_SSH_KEYFILE'], 'r').read() == self.EXAMPLE_PRIVATE_KEY
|
assert open(env['ANSIBLE_NET_SSH_KEYFILE'], 'r').read() == self.EXAMPLE_PRIVATE_KEY
|
||||||
assert safe_env['ANSIBLE_NET_PASSWORD'] == HIDDEN_PASSWORD
|
assert safe_env['ANSIBLE_NET_PASSWORD'] == HIDDEN_PASSWORD
|
||||||
|
|
||||||
|
def test_terraform_cloud_credentials(self, job, private_data_dir, mock_me):
|
||||||
|
terraform = CredentialType.defaults['terraform']()
|
||||||
|
hcl_config = '''
|
||||||
|
backend "s3" {
|
||||||
|
bucket = "s3_sample_bucket"
|
||||||
|
key = "/tf_state/"
|
||||||
|
region = "us-east-1"
|
||||||
|
}
|
||||||
|
'''
|
||||||
|
credential = Credential(pk=1, credential_type=terraform, inputs={'configuration': hcl_config})
|
||||||
|
credential.inputs['configuration'] = encrypt_field(credential, 'configuration')
|
||||||
|
job.credentials.add(credential)
|
||||||
|
|
||||||
|
env = {}
|
||||||
|
safe_env = {}
|
||||||
|
credential.credential_type.inject_credential(credential, env, safe_env, [], private_data_dir)
|
||||||
|
|
||||||
|
local_path = to_host_path(env['TF_BACKEND_CONFIG_FILE'], private_data_dir)
|
||||||
|
config = open(local_path, 'r').read()
|
||||||
|
assert config == hcl_config
|
||||||
|
|
||||||
def test_custom_environment_injectors_with_jinja_syntax_error(self, private_data_dir, mock_me):
|
def test_custom_environment_injectors_with_jinja_syntax_error(self, private_data_dir, mock_me):
|
||||||
some_cloud = CredentialType(
|
some_cloud = CredentialType(
|
||||||
kind='cloud',
|
kind='cloud',
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ from unittest import mock
|
|||||||
|
|
||||||
from rest_framework.exceptions import ParseError
|
from rest_framework.exceptions import ParseError
|
||||||
|
|
||||||
from ansible_base.utils.models import get_type_for_model
|
from ansible_base.lib.utils.models import get_type_for_model
|
||||||
|
|
||||||
from awx.main.utils import common
|
from awx.main.utils import common
|
||||||
from awx.api.validators import HostnameRegexValidator
|
from awx.api.validators import HostnameRegexValidator
|
||||||
|
|||||||
@@ -69,7 +69,7 @@ class mockHost:
|
|||||||
@mock.patch('awx.main.utils.filters.get_model', return_value=mockHost())
|
@mock.patch('awx.main.utils.filters.get_model', return_value=mockHost())
|
||||||
class TestSmartFilterQueryFromString:
|
class TestSmartFilterQueryFromString:
|
||||||
@mock.patch(
|
@mock.patch(
|
||||||
'ansible_base.filters.rest_framework.field_lookup_backend.get_fields_from_path', lambda model, path: ([model], path)
|
'ansible_base.rest_filters.rest_framework.field_lookup_backend.get_fields_from_path', lambda model, path: ([model], path)
|
||||||
) # disable field filtering, because a__b isn't a real Host field
|
) # disable field filtering, because a__b isn't a real Host field
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"filter_string,q_expected",
|
"filter_string,q_expected",
|
||||||
|
|||||||
@@ -161,7 +161,7 @@ class SmartFilter(object):
|
|||||||
else:
|
else:
|
||||||
# detect loops and restrict access to sensitive fields
|
# detect loops and restrict access to sensitive fields
|
||||||
# this import is intentional here to avoid a circular import
|
# this import is intentional here to avoid a circular import
|
||||||
from ansible_base.filters.rest_framework.field_lookup_backend import FieldLookupBackend
|
from ansible_base.rest_filters.rest_framework.field_lookup_backend import FieldLookupBackend
|
||||||
|
|
||||||
FieldLookupBackend().get_field_from_lookup(Host, k)
|
FieldLookupBackend().get_field_from_lookup(Host, k)
|
||||||
kwargs[k] = v
|
kwargs[k] = v
|
||||||
|
|||||||
@@ -20,7 +20,6 @@ from awx.main.analytics.broadcast_websocket import (
|
|||||||
RelayWebsocketStats,
|
RelayWebsocketStats,
|
||||||
RelayWebsocketStatsManager,
|
RelayWebsocketStatsManager,
|
||||||
)
|
)
|
||||||
import awx.main.analytics.subsystem_metrics as s_metrics
|
|
||||||
|
|
||||||
logger = logging.getLogger('awx.main.wsrelay')
|
logger = logging.getLogger('awx.main.wsrelay')
|
||||||
|
|
||||||
@@ -54,7 +53,6 @@ class WebsocketRelayConnection:
|
|||||||
self.protocol = protocol
|
self.protocol = protocol
|
||||||
self.verify_ssl = verify_ssl
|
self.verify_ssl = verify_ssl
|
||||||
self.channel_layer = None
|
self.channel_layer = None
|
||||||
self.subsystem_metrics = s_metrics.Metrics(instance_name=name)
|
|
||||||
self.producers = dict()
|
self.producers = dict()
|
||||||
self.connected = False
|
self.connected = False
|
||||||
|
|
||||||
@@ -341,7 +339,7 @@ class WebSocketRelayManager(object):
|
|||||||
|
|
||||||
if deleted_remote_hosts:
|
if deleted_remote_hosts:
|
||||||
logger.info(f"Removing {deleted_remote_hosts} from websocket broadcast list")
|
logger.info(f"Removing {deleted_remote_hosts} from websocket broadcast list")
|
||||||
await asyncio.gather(self.cleanup_offline_host(h) for h in deleted_remote_hosts)
|
await asyncio.gather(*[self.cleanup_offline_host(h) for h in deleted_remote_hosts])
|
||||||
|
|
||||||
if new_remote_hosts:
|
if new_remote_hosts:
|
||||||
logger.info(f"Adding {new_remote_hosts} to websocket broadcast list")
|
logger.info(f"Adding {new_remote_hosts} to websocket broadcast list")
|
||||||
|
|||||||
@@ -216,42 +216,54 @@
|
|||||||
- block:
|
- block:
|
||||||
- name: Fetch galaxy roles from roles/requirements.(yml/yaml)
|
- name: Fetch galaxy roles from roles/requirements.(yml/yaml)
|
||||||
ansible.builtin.command:
|
ansible.builtin.command:
|
||||||
cmd: "ansible-galaxy role install -r {{ item }} {{ verbosity }}"
|
cmd: "ansible-galaxy role install -r {{ req_file }} {{ verbosity }}"
|
||||||
register: galaxy_result
|
register: galaxy_result
|
||||||
with_fileglob:
|
vars:
|
||||||
- "{{ project_path | quote }}/roles/requirements.yaml"
|
req_file: "{{ lookup('ansible.builtin.first_found', req_candidates, skip=True) }}"
|
||||||
- "{{ project_path | quote }}/roles/requirements.yml"
|
req_candidates:
|
||||||
|
- "{{ project_path | quote }}/roles/requirements.yml"
|
||||||
|
- "{{ project_path | quote }}/roles/requirements.yaml"
|
||||||
changed_when: "'was installed successfully' in galaxy_result.stdout"
|
changed_when: "'was installed successfully' in galaxy_result.stdout"
|
||||||
when: roles_enabled | bool
|
when:
|
||||||
|
- roles_enabled | bool
|
||||||
|
- req_file
|
||||||
tags:
|
tags:
|
||||||
- install_roles
|
- install_roles
|
||||||
|
|
||||||
- name: Fetch galaxy collections from collections/requirements.(yml/yaml)
|
- name: Fetch galaxy collections from collections/requirements.(yml/yaml)
|
||||||
ansible.builtin.command:
|
ansible.builtin.command:
|
||||||
cmd: "ansible-galaxy collection install -r {{ item }} {{ verbosity }}"
|
cmd: "ansible-galaxy collection install -r {{ req_file }} {{ verbosity }}"
|
||||||
register: galaxy_collection_result
|
register: galaxy_collection_result
|
||||||
with_fileglob:
|
vars:
|
||||||
- "{{ project_path | quote }}/collections/requirements.yaml"
|
req_file: "{{ lookup('ansible.builtin.first_found', req_candidates, skip=True) }}"
|
||||||
- "{{ project_path | quote }}/collections/requirements.yml"
|
req_candidates:
|
||||||
|
- "{{ project_path | quote }}/collections/requirements.yml"
|
||||||
|
- "{{ project_path | quote }}/collections/requirements.yaml"
|
||||||
|
- "{{ project_path | quote }}/requirements.yml"
|
||||||
|
- "{{ project_path | quote }}/requirements.yaml"
|
||||||
changed_when: "'Nothing to do.' not in galaxy_collection_result.stdout"
|
changed_when: "'Nothing to do.' not in galaxy_collection_result.stdout"
|
||||||
when:
|
when:
|
||||||
- "ansible_version.full is version_compare('2.9', '>=')"
|
- "ansible_version.full is version_compare('2.9', '>=')"
|
||||||
- collections_enabled | bool
|
- collections_enabled | bool
|
||||||
|
- req_file
|
||||||
tags:
|
tags:
|
||||||
- install_collections
|
- install_collections
|
||||||
|
|
||||||
- name: Fetch galaxy roles and collections from requirements.(yml/yaml)
|
- name: Fetch galaxy roles and collections from requirements.(yml/yaml)
|
||||||
ansible.builtin.command:
|
ansible.builtin.command:
|
||||||
cmd: "ansible-galaxy install -r {{ item }} {{ verbosity }}"
|
cmd: "ansible-galaxy install -r {{ req_file }} {{ verbosity }}"
|
||||||
register: galaxy_combined_result
|
register: galaxy_combined_result
|
||||||
with_fileglob:
|
vars:
|
||||||
- "{{ project_path | quote }}/requirements.yaml"
|
req_file: "{{ lookup('ansible.builtin.first_found', req_candidates, skip=True) }}"
|
||||||
- "{{ project_path | quote }}/requirements.yml"
|
req_candidates:
|
||||||
|
- "{{ project_path | quote }}/requirements.yaml"
|
||||||
|
- "{{ project_path | quote }}/requirements.yml"
|
||||||
changed_when: "'Nothing to do.' not in galaxy_combined_result.stdout"
|
changed_when: "'Nothing to do.' not in galaxy_combined_result.stdout"
|
||||||
when:
|
when:
|
||||||
- "ansible_version.full is version_compare('2.10', '>=')"
|
- "ansible_version.full is version_compare('2.10', '>=')"
|
||||||
- collections_enabled | bool
|
- collections_enabled | bool
|
||||||
- roles_enabled | bool
|
- roles_enabled | bool
|
||||||
|
- req_file
|
||||||
tags:
|
tags:
|
||||||
- install_collections
|
- install_collections
|
||||||
- install_roles
|
- install_roles
|
||||||
|
|||||||
@@ -37,6 +37,18 @@ DATABASES = {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Special database overrides for dispatcher connections listening to pg_notify
|
||||||
|
LISTENER_DATABASES = {
|
||||||
|
'default': {
|
||||||
|
'OPTIONS': {
|
||||||
|
'keepalives': 1,
|
||||||
|
'keepalives_idle': 5,
|
||||||
|
'keepalives_interval': 5,
|
||||||
|
'keepalives_count': 5,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
# Whether or not the deployment is a K8S-based deployment
|
# Whether or not the deployment is a K8S-based deployment
|
||||||
# In K8S-based deployments, instances have zero capacity - all playbook
|
# In K8S-based deployments, instances have zero capacity - all playbook
|
||||||
# automation is intended to flow through defined Container Groups that
|
# automation is intended to flow through defined Container Groups that
|
||||||
@@ -340,7 +352,7 @@ INSTALLED_APPS = [
|
|||||||
'awx.ui',
|
'awx.ui',
|
||||||
'awx.sso',
|
'awx.sso',
|
||||||
'solo',
|
'solo',
|
||||||
'ansible_base',
|
'ansible_base.rest_filters',
|
||||||
]
|
]
|
||||||
|
|
||||||
INTERNAL_IPS = ('127.0.0.1',)
|
INTERNAL_IPS = ('127.0.0.1',)
|
||||||
@@ -1064,10 +1076,40 @@ HOST_METRIC_SUMMARY_TASK_LAST_TS = None
|
|||||||
HOST_METRIC_SUMMARY_TASK_INTERVAL = 7 # days
|
HOST_METRIC_SUMMARY_TASK_INTERVAL = 7 # days
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: cmeyers, replace with with register pattern
|
||||||
|
# The register pattern is particularly nice for this because we need
|
||||||
|
# to know the process to start the thread that will be the server.
|
||||||
|
# The registration location should be the same location as we would
|
||||||
|
# call MetricsServer.start()
|
||||||
|
# Note: if we don't get to this TODO, then at least create constants
|
||||||
|
# for the services strings below.
|
||||||
|
# TODO: cmeyers, break this out into a separate django app so other
|
||||||
|
# projects can take advantage.
|
||||||
|
|
||||||
|
METRICS_SERVICE_CALLBACK_RECEIVER = 'callback_receiver'
|
||||||
|
METRICS_SERVICE_DISPATCHER = 'dispatcher'
|
||||||
|
METRICS_SERVICE_WEBSOCKETS = 'websockets'
|
||||||
|
|
||||||
|
METRICS_SUBSYSTEM_CONFIG = {
|
||||||
|
'server': {
|
||||||
|
METRICS_SERVICE_CALLBACK_RECEIVER: {
|
||||||
|
'port': 8014,
|
||||||
|
},
|
||||||
|
METRICS_SERVICE_DISPATCHER: {
|
||||||
|
'port': 8015,
|
||||||
|
},
|
||||||
|
METRICS_SERVICE_WEBSOCKETS: {
|
||||||
|
'port': 8016,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
# django-ansible-base
|
# django-ansible-base
|
||||||
ANSIBLE_BASE_FEATURES = {'AUTHENTICATION': False, 'SWAGGER': False, 'FILTERING': True}
|
ANSIBLE_BASE_TEAM_MODEL = 'main.Team'
|
||||||
|
ANSIBLE_BASE_ORGANIZATION_MODEL = 'main.Organization'
|
||||||
|
|
||||||
from ansible_base import settings # noqa: E402
|
from ansible_base.lib import dynamic_config # noqa: E402
|
||||||
|
|
||||||
settings_file = os.path.join(os.path.dirname(settings.__file__), 'dynamic_settings.py')
|
settings_file = os.path.join(os.path.dirname(dynamic_config.__file__), 'dynamic_settings.py')
|
||||||
include(settings_file)
|
include(settings_file)
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ from split_settings.tools import optional, include
|
|||||||
from .defaults import * # NOQA
|
from .defaults import * # NOQA
|
||||||
|
|
||||||
# awx-manage shell_plus --notebook
|
# awx-manage shell_plus --notebook
|
||||||
NOTEBOOK_ARGUMENTS = ['--NotebookApp.token=', '--ip', '0.0.0.0', '--port', '8888', '--allow-root', '--no-browser']
|
NOTEBOOK_ARGUMENTS = ['--NotebookApp.token=', '--ip', '0.0.0.0', '--port', '9888', '--allow-root', '--no-browser']
|
||||||
|
|
||||||
# print SQL queries in shell_plus
|
# print SQL queries in shell_plus
|
||||||
SHELL_PLUS_PRINT_SQL = False
|
SHELL_PLUS_PRINT_SQL = False
|
||||||
|
|||||||
@@ -68,7 +68,6 @@ class LDAPSettings(BaseLDAPSettings):
|
|||||||
|
|
||||||
|
|
||||||
class LDAPBackend(BaseLDAPBackend):
|
class LDAPBackend(BaseLDAPBackend):
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Custom LDAP backend for AWX.
|
Custom LDAP backend for AWX.
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -38,7 +38,7 @@ class CompleteView(BaseRedirectView):
|
|||||||
response = super(CompleteView, self).dispatch(request, *args, **kwargs)
|
response = super(CompleteView, self).dispatch(request, *args, **kwargs)
|
||||||
if self.request.user and self.request.user.is_authenticated:
|
if self.request.user and self.request.user.is_authenticated:
|
||||||
logger.info(smart_str(u"User {} logged in".format(self.request.user.username)))
|
logger.info(smart_str(u"User {} logged in".format(self.request.user.username)))
|
||||||
response.set_cookie('userLoggedIn', 'true')
|
response.set_cookie('userLoggedIn', 'true', secure=getattr(settings, 'SESSION_COOKIE_SECURE', False))
|
||||||
response.setdefault('X-API-Session-Cookie-Name', getattr(settings, 'SESSION_COOKIE_NAME', 'awx_sessionid'))
|
response.setdefault('X-API-Session-Cookie-Name', getattr(settings, 'SESSION_COOKIE_NAME', 'awx_sessionid'))
|
||||||
return response
|
return response
|
||||||
|
|
||||||
|
|||||||
@@ -29,6 +29,7 @@ import Notifications from './models/Notifications';
|
|||||||
import Organizations from './models/Organizations';
|
import Organizations from './models/Organizations';
|
||||||
import ProjectUpdates from './models/ProjectUpdates';
|
import ProjectUpdates from './models/ProjectUpdates';
|
||||||
import Projects from './models/Projects';
|
import Projects from './models/Projects';
|
||||||
|
import ReceptorAddresses from './models/Receptor';
|
||||||
import Roles from './models/Roles';
|
import Roles from './models/Roles';
|
||||||
import Root from './models/Root';
|
import Root from './models/Root';
|
||||||
import Schedules from './models/Schedules';
|
import Schedules from './models/Schedules';
|
||||||
@@ -79,6 +80,7 @@ const NotificationsAPI = new Notifications();
|
|||||||
const OrganizationsAPI = new Organizations();
|
const OrganizationsAPI = new Organizations();
|
||||||
const ProjectUpdatesAPI = new ProjectUpdates();
|
const ProjectUpdatesAPI = new ProjectUpdates();
|
||||||
const ProjectsAPI = new Projects();
|
const ProjectsAPI = new Projects();
|
||||||
|
const ReceptorAPI = new ReceptorAddresses();
|
||||||
const RolesAPI = new Roles();
|
const RolesAPI = new Roles();
|
||||||
const RootAPI = new Root();
|
const RootAPI = new Root();
|
||||||
const SchedulesAPI = new Schedules();
|
const SchedulesAPI = new Schedules();
|
||||||
@@ -130,6 +132,7 @@ export {
|
|||||||
OrganizationsAPI,
|
OrganizationsAPI,
|
||||||
ProjectUpdatesAPI,
|
ProjectUpdatesAPI,
|
||||||
ProjectsAPI,
|
ProjectsAPI,
|
||||||
|
ReceptorAPI,
|
||||||
RolesAPI,
|
RolesAPI,
|
||||||
RootAPI,
|
RootAPI,
|
||||||
SchedulesAPI,
|
SchedulesAPI,
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ class Instances extends Base {
|
|||||||
this.readHealthCheckDetail = this.readHealthCheckDetail.bind(this);
|
this.readHealthCheckDetail = this.readHealthCheckDetail.bind(this);
|
||||||
this.healthCheck = this.healthCheck.bind(this);
|
this.healthCheck = this.healthCheck.bind(this);
|
||||||
this.readInstanceGroup = this.readInstanceGroup.bind(this);
|
this.readInstanceGroup = this.readInstanceGroup.bind(this);
|
||||||
|
this.readReceptorAddresses = this.readReceptorAddresses.bind(this);
|
||||||
this.deprovisionInstance = this.deprovisionInstance.bind(this);
|
this.deprovisionInstance = this.deprovisionInstance.bind(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -27,6 +28,17 @@ class Instances extends Base {
|
|||||||
return this.http.get(`${this.baseUrl}${instanceId}/instance_groups/`);
|
return this.http.get(`${this.baseUrl}${instanceId}/instance_groups/`);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
readReceptorAddresses(instanceId) {
|
||||||
|
return this.http.get(`${this.baseUrl}${instanceId}/receptor_addresses/`);
|
||||||
|
}
|
||||||
|
|
||||||
|
updateReceptorAddresses(instanceId, data) {
|
||||||
|
return this.http.post(
|
||||||
|
`${this.baseUrl}${instanceId}/receptor_addresses/`,
|
||||||
|
data
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
deprovisionInstance(instanceId) {
|
deprovisionInstance(instanceId) {
|
||||||
return this.http.patch(`${this.baseUrl}${instanceId}/`, {
|
return this.http.patch(`${this.baseUrl}${instanceId}/`, {
|
||||||
node_state: 'deprovisioning',
|
node_state: 'deprovisioning',
|
||||||
|
|||||||
14
awx/ui/src/api/models/Receptor.js
Normal file
14
awx/ui/src/api/models/Receptor.js
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
import Base from '../Base';
|
||||||
|
|
||||||
|
class ReceptorAddresses extends Base {
|
||||||
|
constructor(http) {
|
||||||
|
super(http);
|
||||||
|
this.baseUrl = 'api/v2/receptor_addresses/';
|
||||||
|
}
|
||||||
|
|
||||||
|
updateReceptorAddresses(instanceId, data) {
|
||||||
|
return this.http.post(`${this.baseUrl}`, data);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export default ReceptorAddresses;
|
||||||
@@ -257,12 +257,17 @@ function PromptDetail({
|
|||||||
numChips={5}
|
numChips={5}
|
||||||
ouiaId="prompt-job-tag-chips"
|
ouiaId="prompt-job-tag-chips"
|
||||||
totalChips={
|
totalChips={
|
||||||
!overrides.job_tags || overrides.job_tags === ''
|
overrides.job_tags === undefined ||
|
||||||
|
overrides.job_tags === null ||
|
||||||
|
overrides.job_tags === ''
|
||||||
? 0
|
? 0
|
||||||
: overrides.job_tags.split(',').length
|
: overrides.job_tags.split(',').length
|
||||||
}
|
}
|
||||||
>
|
>
|
||||||
{overrides.job_tags.length > 0 &&
|
{overrides.job_tags !== undefined &&
|
||||||
|
overrides.job_tags !== null &&
|
||||||
|
overrides.job_tags !== '' &&
|
||||||
|
overrides.job_tags.length > 0 &&
|
||||||
overrides.job_tags.split(',').map((jobTag) => (
|
overrides.job_tags.split(',').map((jobTag) => (
|
||||||
<Chip
|
<Chip
|
||||||
key={jobTag}
|
key={jobTag}
|
||||||
@@ -284,13 +289,18 @@ function PromptDetail({
|
|||||||
<ChipGroup
|
<ChipGroup
|
||||||
numChips={5}
|
numChips={5}
|
||||||
totalChips={
|
totalChips={
|
||||||
!overrides.skip_tags || overrides.skip_tags === ''
|
overrides.skip_tags === undefined ||
|
||||||
|
overrides.skip_tags === null ||
|
||||||
|
overrides.skip_tags === ''
|
||||||
? 0
|
? 0
|
||||||
: overrides.skip_tags.split(',').length
|
: overrides.skip_tags.split(',').length
|
||||||
}
|
}
|
||||||
ouiaId="prompt-skip-tag-chips"
|
ouiaId="prompt-skip-tag-chips"
|
||||||
>
|
>
|
||||||
{overrides.skip_tags.length > 0 &&
|
{overrides.skip_tags !== undefined &&
|
||||||
|
overrides.skip_tags !== null &&
|
||||||
|
overrides.skip_tags !== '' &&
|
||||||
|
overrides.skip_tags.length > 0 &&
|
||||||
overrides.skip_tags.split(',').map((skipTag) => (
|
overrides.skip_tags.split(',').map((skipTag) => (
|
||||||
<Chip
|
<Chip
|
||||||
key={skipTag}
|
key={skipTag}
|
||||||
|
|||||||
@@ -115,8 +115,11 @@ function SessionProvider({ children }) {
|
|||||||
}, [setSessionTimeout, setSessionCountdown]);
|
}, [setSessionTimeout, setSessionCountdown]);
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
|
const isRedirectCondition = (location, histLength) =>
|
||||||
|
location.pathname === '/login' && histLength === 2;
|
||||||
|
|
||||||
const unlisten = history.listen((location, action) => {
|
const unlisten = history.listen((location, action) => {
|
||||||
if (action === 'POP') {
|
if (action === 'POP' || isRedirectCondition(location, history.length)) {
|
||||||
setIsRedirectLinkReceived(true);
|
setIsRedirectLinkReceived(true);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -784,7 +784,7 @@ msgstr "Branche à utiliser dans l’exécution de la tâche. Projet par défaut
|
|||||||
|
|
||||||
#: screens/Inventory/shared/Inventory.helptext.js:155
|
#: screens/Inventory/shared/Inventory.helptext.js:155
|
||||||
msgid "Branch to use on inventory sync. Project default used if blank. Only allowed if project allow_override field is set to true."
|
msgid "Branch to use on inventory sync. Project default used if blank. Only allowed if project allow_override field is set to true."
|
||||||
msgstr ""
|
msgstr "Branche à utiliser pour la synchronisation de l'inventaire. La valeur par défaut du projet est utilisée si elle est vide. Cette option n'est autorisée que si le champ allow_override du projet est défini sur vrai."
|
||||||
|
|
||||||
#: components/About/About.js:45
|
#: components/About/About.js:45
|
||||||
msgid "Brand Image"
|
msgid "Brand Image"
|
||||||
@@ -2832,7 +2832,7 @@ msgstr "Entrez les variables avec la syntaxe JSON ou YAML. Consultez la documen
|
|||||||
|
|
||||||
#: screens/Inventory/shared/SmartInventoryForm.js:94
|
#: screens/Inventory/shared/SmartInventoryForm.js:94
|
||||||
msgid "Enter inventory variables using either JSON or YAML syntax. Use the radio button to toggle between the two. Refer to the Ansible Controller documentation for example syntax."
|
msgid "Enter inventory variables using either JSON or YAML syntax. Use the radio button to toggle between the two. Refer to the Ansible Controller documentation for example syntax."
|
||||||
msgstr ""
|
msgstr "Entrez les variables d'inventaire en utilisant la syntaxe JSON ou YAML. Utilisez le bouton d'option pour basculer entre les deux. Référez-vous à la documentation du contrôleur Ansible pour les exemples de syntaxe."
|
||||||
|
|
||||||
#: screens/CredentialType/CredentialTypeDetails/CredentialTypeDetails.js:87
|
#: screens/CredentialType/CredentialTypeDetails/CredentialTypeDetails.js:87
|
||||||
msgid "Environment variables or extra variables that specify the values a credential type can inject."
|
msgid "Environment variables or extra variables that specify the values a credential type can inject."
|
||||||
@@ -3015,7 +3015,7 @@ msgstr "Recherche exacte sur le champ d'identification."
|
|||||||
|
|
||||||
#: components/Search/RelatedLookupTypeInput.js:38
|
#: components/Search/RelatedLookupTypeInput.js:38
|
||||||
msgid "Exact search on name field."
|
msgid "Exact search on name field."
|
||||||
msgstr ""
|
msgstr "Recherche exacte sur le champ nom."
|
||||||
|
|
||||||
#: screens/Project/shared/Project.helptext.js:23
|
#: screens/Project/shared/Project.helptext.js:23
|
||||||
msgid "Example URLs for GIT Source Control include:"
|
msgid "Example URLs for GIT Source Control include:"
|
||||||
@@ -3242,7 +3242,7 @@ msgstr "Jobs ayant échoué"
|
|||||||
|
|
||||||
#: screens/WorkflowApproval/WorkflowApprovalList/WorkflowApprovalList.js:262
|
#: screens/WorkflowApproval/WorkflowApprovalList/WorkflowApprovalList.js:262
|
||||||
msgid "Failed to approve one or more workflow approval."
|
msgid "Failed to approve one or more workflow approval."
|
||||||
msgstr ""
|
msgstr "Échec de l'approbation d'une ou plusieurs validations de flux de travail."
|
||||||
|
|
||||||
#: screens/WorkflowApproval/shared/WorkflowApprovalButton.js:56
|
#: screens/WorkflowApproval/shared/WorkflowApprovalButton.js:56
|
||||||
msgid "Failed to approve {0}."
|
msgid "Failed to approve {0}."
|
||||||
@@ -3474,7 +3474,7 @@ msgstr "N'a pas réussi à supprimer {name}."
|
|||||||
|
|
||||||
#: screens/WorkflowApproval/WorkflowApprovalList/WorkflowApprovalList.js:263
|
#: screens/WorkflowApproval/WorkflowApprovalList/WorkflowApprovalList.js:263
|
||||||
msgid "Failed to deny one or more workflow approval."
|
msgid "Failed to deny one or more workflow approval."
|
||||||
msgstr ""
|
msgstr "Échec du refus d'une ou plusieurs validations de flux de travail."
|
||||||
|
|
||||||
#: screens/WorkflowApproval/shared/WorkflowDenyButton.js:51
|
#: screens/WorkflowApproval/shared/WorkflowDenyButton.js:51
|
||||||
msgid "Failed to deny {0}."
|
msgid "Failed to deny {0}."
|
||||||
@@ -3520,7 +3520,7 @@ msgstr "Echec du lancement du Job."
|
|||||||
|
|
||||||
#: screens/Inventory/InventoryHosts/InventoryHostItem.js:121
|
#: screens/Inventory/InventoryHosts/InventoryHostItem.js:121
|
||||||
msgid "Failed to load related groups."
|
msgid "Failed to load related groups."
|
||||||
msgstr ""
|
msgstr "Impossible de charger les groupes associés."
|
||||||
|
|
||||||
#: screens/Instances/InstanceDetail/InstanceDetail.js:388
|
#: screens/Instances/InstanceDetail/InstanceDetail.js:388
|
||||||
#: screens/Instances/InstanceList/InstanceList.js:266
|
#: screens/Instances/InstanceList/InstanceList.js:266
|
||||||
@@ -3972,12 +3972,12 @@ msgstr "Demande(s) de bilan de santé soumise(s). Veuillez patienter et recharge
|
|||||||
#: screens/Instances/InstanceDetail/InstanceDetail.js:234
|
#: screens/Instances/InstanceDetail/InstanceDetail.js:234
|
||||||
#: screens/Instances/InstanceList/InstanceListItem.js:242
|
#: screens/Instances/InstanceList/InstanceListItem.js:242
|
||||||
msgid "Health checks are asynchronous tasks. See the"
|
msgid "Health checks are asynchronous tasks. See the"
|
||||||
msgstr ""
|
msgstr "Les bilans de santé sont des tâches asynchrones. Veuillez consulter la documentation pour plus d'informations."
|
||||||
|
|
||||||
#: screens/InstanceGroup/Instances/InstanceList.js:286
|
#: screens/InstanceGroup/Instances/InstanceList.js:286
|
||||||
#: screens/Instances/InstanceList/InstanceList.js:219
|
#: screens/Instances/InstanceList/InstanceList.js:219
|
||||||
msgid "Health checks can only be run on execution nodes."
|
msgid "Health checks can only be run on execution nodes."
|
||||||
msgstr ""
|
msgstr "Les bilans de santé ne peuvent être exécutées que sur les nœuds d'exécution."
|
||||||
|
|
||||||
#: components/StatusLabel/StatusLabel.js:42
|
#: components/StatusLabel/StatusLabel.js:42
|
||||||
msgid "Healthy"
|
msgid "Healthy"
|
||||||
@@ -5048,7 +5048,7 @@ msgstr "Lancer"
|
|||||||
|
|
||||||
#: components/TemplateList/TemplateListItem.js:214
|
#: components/TemplateList/TemplateListItem.js:214
|
||||||
msgid "Launch Template"
|
msgid "Launch Template"
|
||||||
msgstr "Lacer le modèle."
|
msgstr "Lancer le modèle."
|
||||||
|
|
||||||
#: screens/ManagementJob/ManagementJobList/LaunchManagementPrompt.js:32
|
#: screens/ManagementJob/ManagementJobList/LaunchManagementPrompt.js:32
|
||||||
#: screens/ManagementJob/ManagementJobList/LaunchManagementPrompt.js:34
|
#: screens/ManagementJob/ManagementJobList/LaunchManagementPrompt.js:34
|
||||||
@@ -9637,7 +9637,7 @@ msgstr "Utilisateur"
|
|||||||
|
|
||||||
#: components/AppContainer/PageHeaderToolbar.js:160
|
#: components/AppContainer/PageHeaderToolbar.js:160
|
||||||
msgid "User Details"
|
msgid "User Details"
|
||||||
msgstr "Détails de l'erreur"
|
msgstr "Détails de l'utilisateur"
|
||||||
|
|
||||||
#: screens/Setting/SettingList.js:121
|
#: screens/Setting/SettingList.js:121
|
||||||
#: screens/Setting/Settings.js:118
|
#: screens/Setting/Settings.js:118
|
||||||
|
|||||||
@@ -12,6 +12,7 @@ import { SettingsAPI } from 'api';
|
|||||||
import ContentLoading from 'components/ContentLoading';
|
import ContentLoading from 'components/ContentLoading';
|
||||||
import InstanceDetail from './InstanceDetail';
|
import InstanceDetail from './InstanceDetail';
|
||||||
import InstancePeerList from './InstancePeers';
|
import InstancePeerList from './InstancePeers';
|
||||||
|
import InstanceListenerAddressList from './InstanceListenerAddressList';
|
||||||
|
|
||||||
function Instance({ setBreadcrumb }) {
|
function Instance({ setBreadcrumb }) {
|
||||||
const { me } = useConfig();
|
const { me } = useConfig();
|
||||||
@@ -54,7 +55,12 @@ function Instance({ setBreadcrumb }) {
|
|||||||
}, [request]);
|
}, [request]);
|
||||||
|
|
||||||
if (isK8s) {
|
if (isK8s) {
|
||||||
tabsArray.push({ name: t`Peers`, link: `${match.url}/peers`, id: 1 });
|
tabsArray.push({
|
||||||
|
name: t`Listener Addresses`,
|
||||||
|
link: `${match.url}/listener_addresses`,
|
||||||
|
id: 1,
|
||||||
|
});
|
||||||
|
tabsArray.push({ name: t`Peers`, link: `${match.url}/peers`, id: 2 });
|
||||||
}
|
}
|
||||||
if (isLoading) {
|
if (isLoading) {
|
||||||
return <ContentLoading />;
|
return <ContentLoading />;
|
||||||
@@ -72,6 +78,14 @@ function Instance({ setBreadcrumb }) {
|
|||||||
<Route path="/instances/:id/details" key="details">
|
<Route path="/instances/:id/details" key="details">
|
||||||
<InstanceDetail isK8s={isK8s} setBreadcrumb={setBreadcrumb} />
|
<InstanceDetail isK8s={isK8s} setBreadcrumb={setBreadcrumb} />
|
||||||
</Route>
|
</Route>
|
||||||
|
{isK8s && (
|
||||||
|
<Route
|
||||||
|
path="/instances/:id/listener_addresses"
|
||||||
|
key="listener_addresses"
|
||||||
|
>
|
||||||
|
<InstanceListenerAddressList setBreadcrumb={setBreadcrumb} />
|
||||||
|
</Route>
|
||||||
|
)}
|
||||||
{isK8s && (
|
{isK8s && (
|
||||||
<Route path="/instances/:id/peers" key="peers">
|
<Route path="/instances/:id/peers" key="peers">
|
||||||
<InstancePeerList setBreadcrumb={setBreadcrumb} />
|
<InstancePeerList setBreadcrumb={setBreadcrumb} />
|
||||||
|
|||||||
@@ -9,6 +9,10 @@ function InstanceAdd() {
|
|||||||
const [formError, setFormError] = useState();
|
const [formError, setFormError] = useState();
|
||||||
const handleSubmit = async (values) => {
|
const handleSubmit = async (values) => {
|
||||||
try {
|
try {
|
||||||
|
if (values.listener_port === undefined) {
|
||||||
|
values.listener_port = null;
|
||||||
|
}
|
||||||
|
|
||||||
const {
|
const {
|
||||||
data: { id },
|
data: { id },
|
||||||
} = await InstancesAPI.create(values);
|
} = await InstancesAPI.create(values);
|
||||||
|
|||||||
@@ -36,6 +36,7 @@ describe('<InstanceAdd />', () => {
|
|||||||
});
|
});
|
||||||
});
|
});
|
||||||
expect(InstancesAPI.create).toHaveBeenCalledWith({
|
expect(InstancesAPI.create).toHaveBeenCalledWith({
|
||||||
|
listener_port: null, // injected if listener_port is not set
|
||||||
node_type: 'hop',
|
node_type: 'hop',
|
||||||
});
|
});
|
||||||
expect(history.location.pathname).toBe('/instances/13/details');
|
expect(history.location.pathname).toBe('/instances/13/details');
|
||||||
|
|||||||
@@ -183,6 +183,7 @@ function InstanceDetail({ setBreadcrumb, isK8s }) {
|
|||||||
}
|
}
|
||||||
const isHopNode = instance.node_type === 'hop';
|
const isHopNode = instance.node_type === 'hop';
|
||||||
const isExecutionNode = instance.node_type === 'execution';
|
const isExecutionNode = instance.node_type === 'execution';
|
||||||
|
const isManaged = instance.managed;
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<>
|
<>
|
||||||
@@ -208,33 +209,31 @@ function InstanceDetail({ setBreadcrumb, isK8s }) {
|
|||||||
<Detail label={t`Node Type`} value={instance.node_type} />
|
<Detail label={t`Node Type`} value={instance.node_type} />
|
||||||
<Detail label={t`Host`} value={instance.ip_address} />
|
<Detail label={t`Host`} value={instance.ip_address} />
|
||||||
<Detail label={t`Listener Port`} value={instance.listener_port} />
|
<Detail label={t`Listener Port`} value={instance.listener_port} />
|
||||||
|
{!isManaged && instance.related?.install_bundle && (
|
||||||
|
<Detail
|
||||||
|
label={t`Install Bundle`}
|
||||||
|
value={
|
||||||
|
<Tooltip content={t`Click to download bundle`}>
|
||||||
|
<Button
|
||||||
|
component="a"
|
||||||
|
isSmall
|
||||||
|
href={`${instance.related?.install_bundle}`}
|
||||||
|
target="_blank"
|
||||||
|
variant="secondary"
|
||||||
|
dataCy="install-bundle-download-button"
|
||||||
|
rel="noopener noreferrer"
|
||||||
|
>
|
||||||
|
<DownloadIcon />
|
||||||
|
</Button>
|
||||||
|
</Tooltip>
|
||||||
|
}
|
||||||
|
/>
|
||||||
|
)}
|
||||||
{(isExecutionNode || isHopNode) && (
|
{(isExecutionNode || isHopNode) && (
|
||||||
<>
|
<Detail
|
||||||
{instance.related?.install_bundle && (
|
label={t`Peers from control nodes`}
|
||||||
<Detail
|
value={instance.peers_from_control_nodes ? t`On` : t`Off`}
|
||||||
label={t`Install Bundle`}
|
/>
|
||||||
value={
|
|
||||||
<Tooltip content={t`Click to download bundle`}>
|
|
||||||
<Button
|
|
||||||
component="a"
|
|
||||||
isSmall
|
|
||||||
href={`${instance.related?.install_bundle}`}
|
|
||||||
target="_blank"
|
|
||||||
variant="secondary"
|
|
||||||
dataCy="install-bundle-download-button"
|
|
||||||
rel="noopener noreferrer"
|
|
||||||
>
|
|
||||||
<DownloadIcon />
|
|
||||||
</Button>
|
|
||||||
</Tooltip>
|
|
||||||
}
|
|
||||||
/>
|
|
||||||
)}
|
|
||||||
<Detail
|
|
||||||
label={t`Peers from control nodes`}
|
|
||||||
value={instance.peers_from_control_nodes ? t`On` : t`Off`}
|
|
||||||
/>
|
|
||||||
</>
|
|
||||||
)}
|
)}
|
||||||
{!isHopNode && (
|
{!isHopNode && (
|
||||||
<>
|
<>
|
||||||
@@ -294,7 +293,9 @@ function InstanceDetail({ setBreadcrumb, isK8s }) {
|
|||||||
value={instance.capacity_adjustment}
|
value={instance.capacity_adjustment}
|
||||||
onChange={handleChangeValue}
|
onChange={handleChangeValue}
|
||||||
isDisabled={
|
isDisabled={
|
||||||
!config?.me?.is_superuser || !instance.enabled
|
!config?.me?.is_superuser ||
|
||||||
|
!instance.enabled ||
|
||||||
|
!isManaged
|
||||||
}
|
}
|
||||||
data-cy="slider"
|
data-cy="slider"
|
||||||
/>
|
/>
|
||||||
@@ -338,31 +339,31 @@ function InstanceDetail({ setBreadcrumb, isK8s }) {
|
|||||||
)}
|
)}
|
||||||
</DetailList>
|
</DetailList>
|
||||||
<CardActionsRow>
|
<CardActionsRow>
|
||||||
{config?.me?.is_superuser && isK8s && (isExecutionNode || isHopNode) && (
|
{config?.me?.is_superuser && isK8s && !isManaged && (
|
||||||
<Button
|
<>
|
||||||
ouiaId="instance-detail-edit-button"
|
<Button
|
||||||
aria-label={t`edit`}
|
ouiaId="instance-detail-edit-button"
|
||||||
component={Link}
|
aria-label={t`edit`}
|
||||||
to={`/instances/${id}/edit`}
|
component={Link}
|
||||||
>
|
to={`/instances/${id}/edit`}
|
||||||
{t`Edit`}
|
>
|
||||||
</Button>
|
{t`Edit`}
|
||||||
)}
|
</Button>
|
||||||
{config?.me?.is_superuser &&
|
|
||||||
isK8s &&
|
|
||||||
(isExecutionNode || isHopNode) && (
|
|
||||||
<RemoveInstanceButton
|
<RemoveInstanceButton
|
||||||
dataCy="remove-instance-button"
|
dataCy="remove-instance-button"
|
||||||
itemsToRemove={[instance]}
|
itemsToRemove={[instance]}
|
||||||
isK8s={isK8s}
|
isK8s={isK8s}
|
||||||
onRemove={removeInstances}
|
onRemove={removeInstances}
|
||||||
/>
|
/>
|
||||||
)}
|
</>
|
||||||
|
)}
|
||||||
{isExecutionNode && (
|
{isExecutionNode && (
|
||||||
<Tooltip content={t`Run a health check on the instance`}>
|
<Tooltip content={t`Run a health check on the instance`}>
|
||||||
<Button
|
<Button
|
||||||
isDisabled={
|
isDisabled={
|
||||||
!config?.me?.is_superuser || instance.health_check_pending
|
!config?.me?.is_superuser ||
|
||||||
|
instance.health_check_pending ||
|
||||||
|
instance.managed
|
||||||
}
|
}
|
||||||
variant="primary"
|
variant="primary"
|
||||||
ouiaId="health-check-button"
|
ouiaId="health-check-button"
|
||||||
@@ -376,12 +377,14 @@ function InstanceDetail({ setBreadcrumb, isK8s }) {
|
|||||||
</Button>
|
</Button>
|
||||||
</Tooltip>
|
</Tooltip>
|
||||||
)}
|
)}
|
||||||
<InstanceToggle
|
{!isHopNode && (
|
||||||
css="display: inline-flex;"
|
<InstanceToggle
|
||||||
fetchInstances={fetchDetails}
|
css="display: inline-flex;"
|
||||||
instance={instance}
|
fetchInstances={fetchDetails}
|
||||||
dataCy="enable-instance"
|
instance={instance}
|
||||||
/>
|
dataCy="enable-instance"
|
||||||
|
/>
|
||||||
|
)}
|
||||||
</CardActionsRow>
|
</CardActionsRow>
|
||||||
|
|
||||||
{error && (
|
{error && (
|
||||||
|
|||||||
@@ -48,6 +48,7 @@ describe('<InstanceDetail/>', () => {
|
|||||||
cpu_capacity: 32,
|
cpu_capacity: 32,
|
||||||
mem_capacity: 38,
|
mem_capacity: 38,
|
||||||
enabled: true,
|
enabled: true,
|
||||||
|
managed: false,
|
||||||
managed_by_policy: true,
|
managed_by_policy: true,
|
||||||
node_type: 'execution',
|
node_type: 'execution',
|
||||||
node_state: 'ready',
|
node_state: 'ready',
|
||||||
|
|||||||
@@ -114,7 +114,8 @@ function InstanceListItem({
|
|||||||
);
|
);
|
||||||
|
|
||||||
const isHopNode = instance.node_type === 'hop';
|
const isHopNode = instance.node_type === 'hop';
|
||||||
const isExecutionNode = instance.node_type === 'execution';
|
const isManaged = instance.managed;
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<>
|
<>
|
||||||
<Tr
|
<Tr
|
||||||
@@ -138,7 +139,7 @@ function InstanceListItem({
|
|||||||
rowIndex,
|
rowIndex,
|
||||||
isSelected,
|
isSelected,
|
||||||
onSelect,
|
onSelect,
|
||||||
disable: !(isExecutionNode || isHopNode),
|
disable: isManaged,
|
||||||
}}
|
}}
|
||||||
dataLabel={t`Selected`}
|
dataLabel={t`Selected`}
|
||||||
/>
|
/>
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user