Compare commits

..

15 Commits

Author SHA1 Message Date
John Westcott IV
af59abbbc4 Fixing NUL characters in event data 2023-05-02 14:37:35 -04:00
John Westcott IV
8ab3514428 Fixing ValueError becoming DataError 2023-05-02 11:47:12 -04:00
John Westcott IV
98781a82c7 Merge branch 'feature-django-upgrade' of github.com:ansible/awx into feature-django-upgrade 2023-05-02 11:45:51 -04:00
John Westcott IV
d3fabe81d1 Fixing using QuerySet.iterator() after prefetch_related() without specifying chunk_size is deprecated 2023-04-28 15:32:20 -04:00
John Westcott IV
b274d0e5ef Removing deprecated django.utils.timezone.utc alias in favor of datetime.timezone.utc 2023-04-28 15:32:20 -04:00
John Westcott IV
4494412f0c Replacing depricated index_togeather with new indexes 2023-04-28 15:31:28 -04:00
John Westcott IV
b82bec7d04 Replacing psycopg2.copy_expert with psycopg3.copy 2023-04-28 12:35:49 -04:00
John Westcott IV
2cee1caad2 Fixing final CI error 2023-04-28 12:35:49 -04:00
John Westcott IV
c3045b1169 Updating old migrations for psycopg3
We have both psycopg2 and 3 installed in the AWX venv.

Old versions of Django only used psycopg2 but 4.2 now supports 3

Django 4.2 detects psycopg3 first and will use that over psycopg2

So old migrations needed to be updated to support psycopg3
2023-04-28 12:35:49 -04:00
John Westcott IV
27024378bc Upgrading djgno to 4.2 LTS 2023-04-28 12:35:49 -04:00
John Westcott IV
8eff90d4c0 Adding upgrade to django-oauth-toolkit pre-migraiton 2023-04-28 12:35:49 -04:00
John Westcott IV
9b633b6492 Fixing final CI error 2023-04-27 08:00:56 -04:00
John Westcott IV
11dbc56ecb Updating old migrations for psycopg3
We have both psycopg2 and 3 installed in the AWX venv.

Old versions of Django only used psycopg2 but 4.2 now supports 3

Django 4.2 detects psycopg3 first and will use that over psycopg2

So old migrations needed to be updated to support psycopg3
2023-04-26 09:10:25 -04:00
John Westcott IV
4c1bd1e88e Upgrading djgno to 4.2 LTS 2023-04-26 09:10:25 -04:00
John Westcott IV
865cb7518e Adding upgrade to django-oauth-toolkit pre-migraiton 2023-04-26 09:10:25 -04:00
1269 changed files with 11431 additions and 39134 deletions

View File

@@ -19,8 +19,6 @@ body:
required: true
- label: I understand that AWX is open source software provided for free and that I might not receive a timely response.
required: true
- label: I am **NOT** reporting a (potential) security vulnerability. (These should be emailed to `security@ansible.com` instead.)
required: true
- type: textarea
id: summary
@@ -44,7 +42,6 @@ body:
label: Select the relevant components
options:
- label: UI
- label: UI (tech preview)
- label: API
- label: Docs
- label: Collection

View File

@@ -1,28 +0,0 @@
name: Setup images for AWX
description: Builds new awx_devel image
inputs:
github-token:
description: GitHub Token for registry access
required: true
runs:
using: composite
steps:
- name: Get python version from Makefile
shell: bash
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
- name: Log in to registry
shell: bash
run: |
echo "${{ inputs.github-token }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
- name: Pre-pull latest devel image to warm cache
shell: bash
run: docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${{ github.base_ref }}
- name: Build image for current source checkout
shell: bash
run: |
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} \
COMPOSE_TAG=${{ github.base_ref }} \
make docker-compose-build

View File

@@ -1,73 +0,0 @@
name: Run AWX docker-compose
description: Runs AWX with `make docker-compose`
inputs:
github-token:
description: GitHub Token to pass to awx_devel_image
required: true
build-ui:
description: Should the UI be built?
required: false
default: false
type: boolean
outputs:
ip:
description: The IP of the tools_awx_1 container
value: ${{ steps.data.outputs.ip }}
admin-token:
description: OAuth token for admin user
value: ${{ steps.data.outputs.admin_token }}
runs:
using: composite
steps:
- name: Build awx_devel image for running checks
uses: ./.github/actions/awx_devel_image
with:
github-token: ${{ inputs.github-token }}
- name: Upgrade ansible-core
shell: bash
run: python3 -m pip install --upgrade ansible-core
- name: Install system deps
shell: bash
run: sudo apt-get install -y gettext
- name: Start AWX
shell: bash
run: |
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} \
COMPOSE_TAG=${{ github.base_ref }} \
COMPOSE_UP_OPTS="-d" \
make docker-compose
- name: Update default AWX password
shell: bash
run: |
while [[ "$(curl -s -o /dev/null -w ''%{http_code}'' -k https://localhost:8043/api/v2/ping/)" != "200" ]]
do
echo "Waiting for AWX..."
sleep 5
done
echo "AWX is up, updating the password..."
docker exec -i tools_awx_1 sh <<-EOSH
awx-manage update_password --username=admin --password=password
EOSH
- name: Build UI
# This must be a string comparison in composite actions:
# https://github.com/actions/runner/issues/2238
if: ${{ inputs.build-ui == 'true' }}
shell: bash
run: |
docker exec -i tools_awx_1 sh <<-EOSH
make ui-devel
EOSH
- name: Get instance data
id: data
shell: bash
run: |
AWX_IP=$(docker inspect -f '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' tools_awx_1)
ADMIN_TOKEN=$(docker exec -i tools_awx_1 awx-manage create_oauth2_token --user admin)
echo "ip=$AWX_IP" >> $GITHUB_OUTPUT
echo "admin_token=$ADMIN_TOKEN" >> $GITHUB_OUTPUT

View File

@@ -1,19 +0,0 @@
name: Upload logs
description: Upload logs from `make docker-compose` devel environment to GitHub as an artifact
inputs:
log-filename:
description: "*Unique* name of the log file"
required: true
runs:
using: composite
steps:
- name: Get AWX logs
shell: bash
run: |
docker logs tools_awx_1 > ${{ inputs.log-filename }}
- name: Upload AWX logs as artifact
uses: actions/upload-artifact@v3
with:
name: docker-compose-logs
path: ${{ inputs.log-filename }}

19
.github/dependabot.yml vendored Normal file
View File

@@ -0,0 +1,19 @@
version: 2
updates:
- package-ecosystem: "npm"
directory: "/awx/ui"
schedule:
interval: "monthly"
open-pull-requests-limit: 5
allow:
- dependency-type: "production"
reviewers:
- "AlexSCorey"
- "keithjgrant"
- "kialam"
- "mabashian"
- "marshmalien"
labels:
- "component:ui"
- "dependencies"
target-branch: "devel"

View File

@@ -6,8 +6,6 @@ needs_triage:
- "Feature Summary"
"component:ui":
- "\\[X\\] UI"
"component:ui_next":
- "\\[X\\] UI \\(tech preview\\)"
"component:api":
- "\\[X\\] API"
"component:docs":

View File

@@ -15,5 +15,5 @@
"dependencies":
- any: ["awx/ui/package.json"]
- any: ["requirements/*.txt"]
- any: ["requirements/requirements.in"]
- any: ["awx/requirements/*.txt"]
- any: ["awx/requirements/requirements.in"]

View File

@@ -7,8 +7,8 @@
## PRs/Issues
### Visit the Forum or Matrix
- Hello, this appears to be less of a bug report or feature request and more of a question. Could you please ask this on either the [Ansible AWX channel on Matrix](https://matrix.to/#/#awx:ansible.com) or the [Ansible Community Forum](https://forum.ansible.com/tag/awx)?
### Visit our mailing list
- Hello, this appears to be less of a bug report or feature request and more of a question. Could you please ask this on our mailing list? See https://github.com/ansible/awx/#get-involved for information for ways to connect with us.
### Denied Submission

View File

@@ -3,7 +3,7 @@ name: CI
env:
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
CI_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
DEV_DOCKER_OWNER: ${{ github.repository_owner }}
DEV_DOCKER_TAG_BASE: ghcr.io/${{ github.repository_owner }}
COMPOSE_TAG: ${{ github.base_ref || 'devel' }}
on:
pull_request:
@@ -20,8 +20,6 @@ jobs:
tests:
- name: api-test
command: /start_tests.sh
- name: api-migrations
command: /start_tests.sh test_migrations
- name: api-lint
command: /var/lib/awx/venv/awx/bin/tox -e linters
- name: api-swagger
@@ -37,40 +35,29 @@ jobs:
- name: ui-test-general
command: make ui-test-general
steps:
- uses: actions/checkout@v3
- name: Build awx_devel image for running checks
uses: ./.github/actions/awx_devel_image
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions/checkout@v2
- name: Run check ${{ matrix.tests.name }}
run: AWX_DOCKER_CMD='${{ matrix.tests.command }}' make docker-runner
run: AWX_DOCKER_CMD='${{ matrix.tests.command }}' make github_ci_runner
dev-env:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: ./.github/actions/run_awx_devel
id: awx
with:
build-ui: false
github-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions/checkout@v2
- name: Run smoke test
run: ansible-playbook tools/docker-compose/ansible/smoke-test.yml -v
run: make github_ci_setup && ansible-playbook tools/docker-compose/ansible/smoke-test.yml -v
awx-operator:
runs-on: ubuntu-latest
steps:
- name: Checkout awx
uses: actions/checkout@v3
uses: actions/checkout@v2
with:
path: awx
- name: Checkout awx-operator
uses: actions/checkout@v3
uses: actions/checkout@v2
with:
repository: ansible/awx-operator
path: awx-operator
@@ -80,7 +67,7 @@ jobs:
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
- name: Install python ${{ env.py_version }}
uses: actions/setup-python@v4
uses: actions/setup-python@v2
with:
python-version: ${{ env.py_version }}
@@ -115,7 +102,7 @@ jobs:
strategy:
fail-fast: false
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
# The containers that GitHub Actions use have Ansible installed, so upgrade to make sure we have the latest version.
- name: Upgrade ansible-core
@@ -127,137 +114,3 @@ jobs:
# needed due to cgroupsv2. This is fixed, but a stable release
# with the fix has not been made yet.
ANSIBLE_TEST_PREFER_PODMAN: 1
collection-integration:
name: awx_collection integration
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
target-regex:
- name: a-h
regex: ^[a-h]
- name: i-p
regex: ^[i-p]
- name: r-z0-9
regex: ^[r-z0-9]
steps:
- uses: actions/checkout@v3
- uses: ./.github/actions/run_awx_devel
id: awx
with:
build-ui: false
github-token: ${{ secrets.GITHUB_TOKEN }}
- name: Install dependencies for running tests
run: |
python3 -m pip install -e ./awxkit/
python3 -m pip install -r awx_collection/requirements.txt
- name: Run integration tests
run: |
echo "::remove-matcher owner=python::" # Disable annoying annotations from setup-python
echo '[general]' > ~/.tower_cli.cfg
echo 'host = https://${{ steps.awx.outputs.ip }}:8043' >> ~/.tower_cli.cfg
echo 'oauth_token = ${{ steps.awx.outputs.admin-token }}' >> ~/.tower_cli.cfg
echo 'verify_ssl = false' >> ~/.tower_cli.cfg
TARGETS="$(ls awx_collection/tests/integration/targets | grep '${{ matrix.target-regex.regex }}' | tr '\n' ' ')"
make COLLECTION_VERSION=100.100.100-git COLLECTION_TEST_TARGET="--coverage --requirements $TARGETS" test_collection_integration
env:
ANSIBLE_TEST_PREFER_PODMAN: 1
# Upload coverage report as artifact
- uses: actions/upload-artifact@v3
if: always()
with:
name: coverage-${{ matrix.target-regex.name }}
path: ~/.ansible/collections/ansible_collections/awx/awx/tests/output/coverage/
- uses: ./.github/actions/upload_awx_devel_logs
if: always()
with:
log-filename: collection-integration-${{ matrix.target-regex.name }}.log
collection-integration-coverage-combine:
name: combine awx_collection integration coverage
runs-on: ubuntu-latest
needs:
- collection-integration
strategy:
fail-fast: false
steps:
- uses: actions/checkout@v3
- name: Upgrade ansible-core
run: python3 -m pip install --upgrade ansible-core
- name: Download coverage artifacts
uses: actions/download-artifact@v3
with:
path: coverage
- name: Combine coverage
run: |
make COLLECTION_VERSION=100.100.100-git install_collection
mkdir -p ~/.ansible/collections/ansible_collections/awx/awx/tests/output/coverage
cd coverage
for i in coverage-*; do
cp -rv $i/* ~/.ansible/collections/ansible_collections/awx/awx/tests/output/coverage/
done
cd ~/.ansible/collections/ansible_collections/awx/awx
ansible-test coverage combine --requirements
ansible-test coverage html
echo '## AWX Collection Integration Coverage' >> $GITHUB_STEP_SUMMARY
echo '```' >> $GITHUB_STEP_SUMMARY
ansible-test coverage report >> $GITHUB_STEP_SUMMARY
echo '```' >> $GITHUB_STEP_SUMMARY
echo >> $GITHUB_STEP_SUMMARY
echo '## AWX Collection Integration Coverage HTML' >> $GITHUB_STEP_SUMMARY
echo 'Download the HTML artifacts to view the coverage report.' >> $GITHUB_STEP_SUMMARY
# This is a huge hack, there's no official action for removing artifacts currently.
# Also ACTIONS_RUNTIME_URL and ACTIONS_RUNTIME_TOKEN aren't available in normal run
# steps, so we have to use github-script to get them.
#
# The advantage of doing this, though, is that we save on artifact storage space.
- name: Get secret artifact runtime URL
uses: actions/github-script@v6
id: get-runtime-url
with:
result-encoding: string
script: |
const { ACTIONS_RUNTIME_URL } = process.env;
return ACTIONS_RUNTIME_URL;
- name: Get secret artifact runtime token
uses: actions/github-script@v6
id: get-runtime-token
with:
result-encoding: string
script: |
const { ACTIONS_RUNTIME_TOKEN } = process.env;
return ACTIONS_RUNTIME_TOKEN;
- name: Remove intermediary artifacts
env:
ACTIONS_RUNTIME_URL: ${{ steps.get-runtime-url.outputs.result }}
ACTIONS_RUNTIME_TOKEN: ${{ steps.get-runtime-token.outputs.result }}
run: |
echo "::add-mask::${ACTIONS_RUNTIME_TOKEN}"
artifacts=$(
curl -H "Authorization: Bearer $ACTIONS_RUNTIME_TOKEN" \
${ACTIONS_RUNTIME_URL}_apis/pipelines/workflows/${{ github.run_id }}/artifacts?api-version=6.0-preview \
| jq -r '.value | .[] | select(.name | startswith("coverage-")) | .url'
)
for artifact in $artifacts; do
curl -i -X DELETE -H "Accept: application/json;api-version=6.0-preview" -H "Authorization: Bearer $ACTIONS_RUNTIME_TOKEN" "$artifact"
done
- name: Upload coverage report as artifact
uses: actions/upload-artifact@v3
with:
name: awx-collection-integration-coverage-html
path: ~/.ansible/collections/ansible_collections/awx/awx/tests/output/reports/coverage

View File

@@ -16,7 +16,7 @@ jobs:
packages: write
contents: read
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
- name: Get python version from Makefile
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
@@ -28,7 +28,7 @@ jobs:
OWNER: '${{ github.repository_owner }}'
- name: Install python ${{ env.py_version }}
uses: actions/setup-python@v4
uses: actions/setup-python@v2
with:
python-version: ${{ env.py_version }}
@@ -48,11 +48,8 @@ jobs:
DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} COMPOSE_TAG=${GITHUB_REF##*/} make awx-kube-dev-build
DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} COMPOSE_TAG=${GITHUB_REF##*/} make awx-kube-build
- name: Push development images
- name: Push image
run: |
docker push ghcr.io/${OWNER_LC}/awx_devel:${GITHUB_REF##*/}
docker push ghcr.io/${OWNER_LC}/awx_kube_devel:${GITHUB_REF##*/}
- name: Push AWX k8s image, only for upstream and feature branches
run: docker push ghcr.io/${OWNER_LC}/awx:${GITHUB_REF##*/}
if: endsWith(github.repository, '/awx')
docker push ghcr.io/${OWNER_LC}/awx:${GITHUB_REF##*/}

View File

@@ -1,16 +0,0 @@
---
name: Docsite CI
on:
pull_request:
jobs:
docsite-build:
name: docsite test build
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: install tox
run: pip install tox
- name: Assure docs can be built
run: tox -e docs

View File

@@ -19,20 +19,41 @@ jobs:
job: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
- uses: ./.github/actions/run_awx_devel
id: awx
- name: Get python version from Makefile
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
- name: Install python ${{ env.py_version }}
uses: actions/setup-python@v2
with:
build-ui: true
github-token: ${{ secrets.GITHUB_TOKEN }}
python-version: ${{ env.py_version }}
- name: Install system deps
run: sudo apt-get install -y gettext
- name: Log in to registry
run: |
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
- name: Pre-pull image to warm build cache
run: |
docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${{ github.base_ref }}
- name: Build UI
run: |
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} COMPOSE_TAG=${{ github.base_ref }} make ui-devel
- name: Start AWX
run: |
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} COMPOSE_TAG=${{ github.base_ref }} make docker-compose &> make-docker-compose-output.log &
- name: Pull awx_cypress_base image
run: |
docker pull quay.io/awx/awx_cypress_base:latest
- name: Checkout test project
uses: actions/checkout@v3
uses: actions/checkout@v2
with:
repository: ${{ github.repository_owner }}/tower-qa
ssh-key: ${{ secrets.QA_REPO_KEY }}
@@ -44,6 +65,18 @@ jobs:
cd ${{ secrets.E2E_PROJECT }}/ui-tests/awx-pf-tests
docker build -t awx-pf-tests .
- name: Update default AWX password
run: |
while [[ "$(curl -s -o /dev/null -w ''%{http_code}'' -k https://localhost:8043/api/v2/ping/)" != "200" ]]
do
echo "Waiting for AWX..."
sleep 5;
done
echo "AWX is up, updating the password..."
docker exec -i tools_awx_1 sh <<-EOSH
awx-manage update_password --username=admin --password=password
EOSH
- name: Run E2E tests
env:
CYPRESS_RECORD_KEY: ${{ secrets.CYPRESS_RECORD_KEY }}
@@ -53,7 +86,7 @@ jobs:
export COMMIT_INFO_SHA=$GITHUB_SHA
export COMMIT_INFO_REMOTE=$GITHUB_REPOSITORY_OWNER
cd ${{ secrets.E2E_PROJECT }}/ui-tests/awx-pf-tests
AWX_IP=${{ steps.awx.outputs.ip }}
AWX_IP=$(docker inspect -f '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' tools_awx_1)
printenv > .env
echo "Executing tests:"
docker run \
@@ -69,7 +102,8 @@ jobs:
-w /e2e \
awx-pf-tests run --project .
- uses: ./.github/actions/upload_awx_devel_logs
if: always()
- name: Save AWX logs
uses: actions/upload-artifact@v2
with:
log-filename: e2e-${{ matrix.job }}.log
name: AWX-logs-${{ matrix.job }}
path: make-docker-compose-output.log

View File

@@ -6,9 +6,9 @@ on:
- opened
- reopened
permissions:
contents: write # to fetch code
issues: write # to label issues
permissions:
contents: read # to fetch code
issues: write # to label issues
jobs:
triage:
@@ -17,7 +17,7 @@ jobs:
steps:
- name: Label Issue
uses: github/issue-labeler@v3.1
uses: github/issue-labeler@v2.4.1
with:
repo-token: "${{ secrets.GITHUB_TOKEN }}"
not-before: 2021-12-07T07:00:00Z
@@ -28,7 +28,7 @@ jobs:
runs-on: ubuntu-latest
name: Label Issue - Community
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
- uses: actions/setup-python@v4
- name: Install python requests
run: pip install requests

View File

@@ -8,7 +8,7 @@ on:
- synchronize
permissions:
contents: write # to determine modified files (actions/labeler)
contents: read # to determine modified files (actions/labeler)
pull-requests: write # to add labels to PRs (actions/labeler)
jobs:
@@ -27,7 +27,7 @@ jobs:
runs-on: ubuntu-latest
name: Label PR - Community
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
- uses: actions/setup-python@v4
- name: Install python requests
run: pip install requests

View File

@@ -7,7 +7,6 @@ on:
types: [opened, edited, reopened, synchronize]
jobs:
pr-check:
if: github.repository_owner == 'ansible' && endsWith(github.repository, 'awx')
name: Scan PR description for semantic versioning keywords
runs-on: ubuntu-latest
permissions:

View File

@@ -17,13 +17,13 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout awx
uses: actions/checkout@v3
uses: actions/checkout@v2
- name: Get python version from Makefile
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
- name: Install python ${{ env.py_version }}
uses: actions/setup-python@v4
uses: actions/setup-python@v2
with:
python-version: ${{ env.py_version }}
@@ -40,12 +40,8 @@ jobs:
if: ${{ github.repository_owner != 'ansible' }}
- name: Build collection and publish to galaxy
env:
COLLECTION_NAMESPACE: ${{ env.collection_namespace }}
COLLECTION_VERSION: ${{ github.event.release.tag_name }}
COLLECTION_TEMPLATE_VERSION: true
run: |
make build_collection
COLLECTION_TEMPLATE_VERSION=true COLLECTION_NAMESPACE=${{ env.collection_namespace }} make build_collection
if [ "$(curl --head -sw '%{http_code}' https://galaxy.ansible.com/download/${{ env.collection_namespace }}-awx-${{ github.event.release.tag_name }}.tar.gz | tail -1)" == "302" ] ; then \
echo "Galaxy release already done"; \
else \

View File

@@ -44,7 +44,7 @@ jobs:
exit 0
- name: Checkout awx
uses: actions/checkout@v3
uses: actions/checkout@v2
with:
path: awx
@@ -52,18 +52,18 @@ jobs:
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
- name: Install python ${{ env.py_version }}
uses: actions/setup-python@v4
uses: actions/setup-python@v2
with:
python-version: ${{ env.py_version }}
- name: Checkout awx-logos
uses: actions/checkout@v3
uses: actions/checkout@v2
with:
repository: ansible/awx-logos
path: awx-logos
- name: Checkout awx-operator
uses: actions/checkout@v3
uses: actions/checkout@v2
with:
repository: ${{ github.repository_owner }}/awx-operator
path: awx-operator

View File

@@ -17,13 +17,13 @@ jobs:
packages: write
contents: read
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
- name: Get python version from Makefile
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
- name: Install python ${{ env.py_version }}
uses: actions/setup-python@v4
uses: actions/setup-python@v2
with:
python-version: ${{ env.py_version }}

4
.gitignore vendored
View File

@@ -165,7 +165,3 @@ use_dev_supervisor.txt
awx/ui_next/src
awx/ui_next/build
# Docs build stuff
docs/docsite/build/
_readthedocs/

View File

@@ -1,5 +0,0 @@
[allowlist]
description = "Documentation contains example secrets and passwords"
paths = [
"docs/docsite/rst/administration/oauth2_token_auth.rst",
]

View File

@@ -1,5 +0,0 @@
[tool.pip-tools]
resolver = "backtracking"
allow-unsafe = true
strip-extras = true
quiet = true

View File

@@ -1,15 +0,0 @@
# Read the Docs configuration file
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
version: 2
build:
os: ubuntu-22.04
tools:
python: >-
3.11
commands:
- pip install --user tox
- python3 -m tox -e docs
- mkdir -p _readthedocs/html/
- mv docs/docsite/build/html/* _readthedocs/html/

View File

@@ -10,7 +10,6 @@ ignore: |
tools/docker-compose/_sources
# django template files
awx/api/templates/instance_install_bundle/**
.readthedocs.yaml
extends: default

View File

@@ -4,6 +4,6 @@
Early versions of AWX did not support seamless upgrades between major versions and required the use of a backup and restore tool to perform upgrades.
As of version 18.0, `awx-operator` is the preferred install/upgrade method. Users who wish to upgrade modern AWX installations should follow the instructions at:
Users who wish to upgrade modern AWX installations should follow the instructions at:
https://github.com/ansible/awx-operator/blob/devel/docs/upgrade/upgrading.md
https://github.com/ansible/awx/blob/devel/INSTALL.md#upgrading-from-previous-versions

View File

@@ -31,7 +31,7 @@ If your issue isn't considered high priority, then please be patient as it may t
`state:needs_info` The issue needs more information. This could be more debug output, more specifics out the system such as version information. Any detail that is currently preventing this issue from moving forward. This should be considered a blocked state.
`state:needs_review` The issue/pull request needs to be reviewed by other maintainers and contributors. This is usually used when there is a question out to another maintainer or when a person is less familiar with an area of the code base the issue is for.
`state:needs_review` The issue/pull request needs to be reviewed by other maintainers and contributors. This is usually used when there is a question out to another maintainer or when a person is less familar with an area of the code base the issue is for.
`state:needs_revision` More commonly used on pull requests, this state represents that there are changes that are being waited on.

View File

@@ -1,16 +1,14 @@
-include awx/ui_next/Makefile
PYTHON := $(notdir $(shell for i in python3.9 python3; do command -v $$i; done|sed 1q))
SHELL := bash
DOCKER_COMPOSE ?= docker-compose
OFFICIAL ?= no
NODE ?= node
NPM_BIN ?= npm
KIND_BIN ?= $(shell which kind)
CHROMIUM_BIN=/tmp/chrome-linux/chrome
GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
MANAGEMENT_COMMAND ?= awx-manage
VERSION ?= $(shell $(PYTHON) tools/scripts/scm_version.py)
VERSION := $(shell $(PYTHON) tools/scripts/scm_version.py)
# ansible-test requires semver compatable version, so we allow overrides to hack it
COLLECTION_VERSION ?= $(shell $(PYTHON) tools/scripts/scm_version.py | cut -d . -f 1-3)
@@ -29,8 +27,6 @@ COLLECTION_TEMPLATE_VERSION ?= false
# NOTE: This defaults the container image version to the branch that's active
COMPOSE_TAG ?= $(GIT_BRANCH)
MAIN_NODE_TYPE ?= hybrid
# If set to true docker-compose will also start a pgbouncer instance and use it
PGBOUNCER ?= false
# If set to true docker-compose will also start a keycloak instance
KEYCLOAK ?= false
# If set to true docker-compose will also start an ldap instance
@@ -41,24 +37,19 @@ SPLUNK ?= false
PROMETHEUS ?= false
# If set to true docker-compose will also start a grafana instance
GRAFANA ?= false
# If set to true docker-compose will also start a hashicorp vault instance
VAULT ?= false
# If set to true docker-compose will also start a tacacs+ instance
TACACS ?= false
VENV_BASE ?= /var/lib/awx/venv
DEV_DOCKER_OWNER ?= ansible
# Docker will only accept lowercase, so github names like Paul need to be paul
DEV_DOCKER_OWNER_LOWER = $(shell echo $(DEV_DOCKER_OWNER) | tr A-Z a-z)
DEV_DOCKER_TAG_BASE ?= ghcr.io/$(DEV_DOCKER_OWNER_LOWER)
DEV_DOCKER_TAG_BASE ?= ghcr.io/ansible
DEVEL_IMAGE_NAME ?= $(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG)
RECEPTOR_IMAGE ?= quay.io/ansible/receptor:devel
# Python packages to install only from source (not from binary wheels)
# Comma separated list
SRC_ONLY_PKGS ?= cffi,pycparser,psycopg,twilio
SRC_ONLY_PKGS ?= cffi,pycparser,psycopg2,twilio
# These should be upgraded in the AWX and Ansible venv before attempting
# to install the actual requirements
VENV_BOOTSTRAP ?= pip==21.2.4 setuptools==65.6.3 setuptools_scm[toml]==7.0.5 wheel==0.38.4
@@ -79,7 +70,7 @@ I18N_FLAG_FILE = .i18n_built
sdist \
ui-release ui-devel \
VERSION PYTHON_VERSION docker-compose-sources \
.git/hooks/pre-commit
.git/hooks/pre-commit github_ci_setup github_ci_runner
clean-tmp:
rm -rf tmp/
@@ -273,11 +264,11 @@ run-wsrelay:
$(PYTHON) manage.py run_wsrelay
## Start the heartbeat process in background in development environment.
run-ws-heartbeat:
run-heartbeet:
@if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/awx/bin/activate; \
fi; \
$(PYTHON) manage.py run_ws_heartbeat
$(PYTHON) manage.py run_heartbeet
reports:
mkdir -p $@
@@ -324,16 +315,21 @@ test:
cd awxkit && $(VENV_BASE)/awx/bin/tox -re py3
awx-manage check_migrations --dry-run --check -n 'missing_migration_file'
test_migrations:
if [ "$(VENV_BASE)" ]; then \
. $(VENV_BASE)/awx/bin/activate; \
fi; \
PYTHONDONTWRITEBYTECODE=1 py.test -p no:cacheprovider --migrations -m migration_test $(PYTEST_ARGS) $(TEST_DIRS)
## Login to Github container image registry, pull image, then build image.
github_ci_setup:
# GITHUB_ACTOR is automatic github actions env var
# CI_GITHUB_TOKEN is defined in .github files
echo $(CI_GITHUB_TOKEN) | docker login ghcr.io -u $(GITHUB_ACTOR) --password-stdin
docker pull $(DEVEL_IMAGE_NAME) || : # Pre-pull image to warm build cache
$(MAKE) docker-compose-build
## Runs AWX_DOCKER_CMD inside a new docker container.
docker-runner:
docker run -u $(shell id -u) --rm -v $(shell pwd):/awx_devel/:Z --workdir=/awx_devel $(DEVEL_IMAGE_NAME) $(AWX_DOCKER_CMD)
## Builds image and runs AWX_DOCKER_CMD in it, mainly for .github checks.
github_ci_runner: github_ci_setup docker-runner
test_collection:
rm -f $(shell ls -d $(VENV_BASE)/awx/lib/python* | head -n 1)/no-global-site-packages.txt
if [ "$(VENV_BASE)" ]; then \
@@ -379,7 +375,7 @@ test_collection_sanity:
cd $(COLLECTION_INSTALL) && ansible-test sanity $(COLLECTION_SANITY_ARGS)
test_collection_integration: install_collection
cd $(COLLECTION_INSTALL) && ansible-test integration -vvv $(COLLECTION_TEST_TARGET)
cd $(COLLECTION_INSTALL) && ansible-test integration $(COLLECTION_TEST_TARGET)
test_unit:
@if [ "$(VENV_BASE)" ]; then \
@@ -521,20 +517,15 @@ docker-compose-sources: .git/hooks/pre-commit
-e control_plane_node_count=$(CONTROL_PLANE_NODE_COUNT) \
-e execution_node_count=$(EXECUTION_NODE_COUNT) \
-e minikube_container_group=$(MINIKUBE_CONTAINER_GROUP) \
-e enable_pgbouncer=$(PGBOUNCER) \
-e enable_keycloak=$(KEYCLOAK) \
-e enable_ldap=$(LDAP) \
-e enable_splunk=$(SPLUNK) \
-e enable_prometheus=$(PROMETHEUS) \
-e enable_grafana=$(GRAFANA) \
-e enable_vault=$(VAULT) \
-e enable_tacacs=$(TACACS) \
$(EXTRA_SOURCES_ANSIBLE_OPTS)
docker-compose: awx/projects docker-compose-sources
ansible-galaxy install --ignore-certs -r tools/docker-compose/ansible/requirements.yml;
ansible-playbook -i tools/docker-compose/inventory tools/docker-compose/ansible/initialize_containers.yml \
-e enable_vault=$(VAULT);
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml $(COMPOSE_OPTS) up $(COMPOSE_UP_OPTS) --remove-orphans
docker-compose-credential-plugins: awx/projects docker-compose-sources
@@ -586,7 +577,7 @@ docker-clean:
-$(foreach image_id,$(shell docker images --filter=reference='*/*/*awx_devel*' --filter=reference='*/*awx_devel*' --filter=reference='*awx_devel*' -aq),docker rmi --force $(image_id);)
docker-clean-volumes: docker-compose-clean docker-compose-container-group-clean
docker volume rm -f tools_awx_db tools_vault_1 tools_grafana_storage tools_prometheus_storage $(docker volume ls --filter name=tools_redis_socket_ -q)
docker volume rm -f tools_awx_db tools_grafana_storage tools_prometheus_storage $(docker volume ls --filter name=tools_redis_socket_ -q)
docker-refresh: docker-clean docker-compose
@@ -660,21 +651,16 @@ awx-kube-dev-build: Dockerfile.kube-dev
-t $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG) .
kind-dev-load: awx-kube-dev-build
$(KIND_BIN) load docker-image $(DEV_DOCKER_TAG_BASE)/awx_kube_devel:$(COMPOSE_TAG)
# Translation TASKS
# --------------------------------------
## generate UI .pot file, an empty template of strings yet to be translated
pot: $(UI_BUILD_FLAG_FILE)
$(NPM_BIN) --prefix awx/ui --loglevel warn run extract-template --clean
$(NPM_BIN) --prefix awx/ui_next --loglevel warn run extract-template --clean
## generate UI .po files for each locale (will update translated strings for `en`)
po: $(UI_BUILD_FLAG_FILE)
$(NPM_BIN) --prefix awx/ui --loglevel warn run extract-strings -- --clean
$(NPM_BIN) --prefix awx/ui_next --loglevel warn run extract-strings -- --clean
## generate API django .pot .po
messages:

View File

@@ -1,5 +1,5 @@
[![CI](https://github.com/ansible/awx/actions/workflows/ci.yml/badge.svg?branch=devel)](https://github.com/ansible/awx/actions/workflows/ci.yml) [![Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-Ansible-yellow.svg)](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) [![Apache v2 License](https://img.shields.io/badge/license-Apache%202.0-brightgreen.svg)](https://github.com/ansible/awx/blob/devel/LICENSE.md) [![AWX Mailing List](https://img.shields.io/badge/mailing%20list-AWX-orange.svg)](https://groups.google.com/g/awx-project)
[![Ansible Matrix](https://img.shields.io/badge/matrix-Ansible%20Community-blueviolet.svg?logo=matrix)](https://chat.ansible.im/#/welcome) [![Ansible Discourse](https://img.shields.io/badge/discourse-Ansible%20Community-yellowgreen.svg?logo=discourse)](https://forum.ansible.com)
[![IRC Chat - #ansible-awx](https://img.shields.io/badge/IRC-%23ansible--awx-blueviolet.svg)](https://libera.chat)
<img src="https://raw.githubusercontent.com/ansible/awx-logos/master/awx/ui/client/assets/logo-login.svg?sanitize=true" width=200 alt="AWX" />
@@ -30,12 +30,12 @@ If you're experiencing a problem that you feel is a bug in AWX or have ideas for
Code of Conduct
---------------
We ask all of our community members and contributors to adhere to the [Ansible code of conduct](http://docs.ansible.com/ansible/latest/community/code_of_conduct.html). If you have questions or need assistance, please reach out to our community team at [codeofconduct@ansible.com](mailto:codeofconduct@ansible.com)
We ask all of our community members and contributors to adhere to the [Ansible code of conduct](http://docs.ansible.com/ansible/latest/community/code_of_conduct.html). If you have questions or need assistance, please reach out to our community team at [codeofconduct@ansible.com](mailto:codeofconduct@ansible.com)
Get Involved
------------
We welcome your feedback and ideas. Here's how to reach us with feedback and questions:
- Join the [Ansible AWX channel on Matrix](https://matrix.to/#/#awx:ansible.com)
- Join the [Ansible Community Forum](https://forum.ansible.com)
- Join the `#ansible-awx` channel on irc.libera.chat
- Join the [mailing list](https://groups.google.com/forum/#!forum/awx-project)

View File

@@ -52,14 +52,39 @@ try:
except ImportError: # pragma: no cover
MODE = 'production'
import hashlib
try:
import django # noqa: F401
HAS_DJANGO = True
except ImportError:
pass
HAS_DJANGO = False
else:
from django.db.backends.base import schema
from django.db.models import indexes
from django.db.backends.utils import names_digest
from django.db import connection
if HAS_DJANGO is True:
# See upgrade blocker note in requirements/README.md
try:
names_digest('foo', 'bar', 'baz', length=8)
except ValueError:
def names_digest(*args, length):
"""
Generate a 32-bit digest of a set of arguments that can be used to shorten
identifying names. Support for use in FIPS environments.
"""
h = hashlib.md5(usedforsecurity=False)
for arg in args:
h.update(arg.encode())
return h.hexdigest()[:length]
schema.names_digest = names_digest
indexes.names_digest = names_digest
def find_commands(management_dir):
# Modified version of function from django/core/management/__init__.py.

View File

@@ -347,7 +347,7 @@ class FieldLookupBackend(BaseFilterBackend):
args.append(Q(**{k: v}))
for role_name in role_filters:
if not hasattr(queryset.model, 'accessible_pk_qs'):
raise ParseError(_('Cannot apply role_level filter to this list because its model does not use roles for access control.'))
raise ParseError(_('Cannot apply role_level filter to this list because its model ' 'does not use roles for access control.'))
args.append(Q(pk__in=queryset.model.accessible_pk_qs(request.user, role_name)))
if or_filters:
q = Q()

View File

@@ -169,7 +169,7 @@ class APIView(views.APIView):
self.__init_request_error__ = exc
except UnsupportedMediaType as exc:
exc.detail = _(
'You did not use correct Content-Type in your HTTP request. If you are using our REST API, the Content-Type must be application/json'
'You did not use correct Content-Type in your HTTP request. ' 'If you are using our REST API, the Content-Type must be application/json'
)
self.__init_request_error__ = exc
return drf_request
@@ -232,8 +232,7 @@ class APIView(views.APIView):
response = super(APIView, self).finalize_response(request, response, *args, **kwargs)
time_started = getattr(self, 'time_started', None)
if request.user.is_authenticated:
response['X-API-Product-Version'] = get_awx_version()
response['X-API-Product-Version'] = get_awx_version()
response['X-API-Product-Name'] = server_product_name()
response['X-API-Node'] = settings.CLUSTER_HOST_ID
@@ -523,16 +522,14 @@ class SubListAPIView(ParentMixin, ListAPIView):
def get_queryset(self):
parent = self.get_parent_object()
self.check_parent_access(parent)
sublist_qs = self.get_sublist_queryset(parent)
if not self.filter_read_permission:
return optimize_queryset(self.get_sublist_queryset(parent))
qs = self.request.user.get_queryset(self.model)
if hasattr(self, 'parent_key'):
# This is vastly preferable for ReverseForeignKey relationships
return qs.filter(**{self.parent_key: parent})
return qs.distinct() & self.get_sublist_queryset(parent).distinct()
return optimize_queryset(sublist_qs)
qs = self.request.user.get_queryset(self.model).distinct()
return qs & sublist_qs
def get_sublist_queryset(self, parent):
return getattrd(parent, self.relationship)
return getattrd(parent, self.relationship).distinct()
class DestroyAPIView(generics.DestroyAPIView):
@@ -581,6 +578,15 @@ class SubListCreateAPIView(SubListAPIView, ListCreateAPIView):
d.update({'parent_key': getattr(self, 'parent_key', None)})
return d
def get_queryset(self):
if hasattr(self, 'parent_key'):
# Prefer this filtering because ForeignKey allows us more assumptions
parent = self.get_parent_object()
self.check_parent_access(parent)
qs = self.request.user.get_queryset(self.model)
return qs.filter(**{self.parent_key: parent})
return super(SubListCreateAPIView, self).get_queryset()
def create(self, request, *args, **kwargs):
# If the object ID was not specified, it probably doesn't exist in the
# DB yet. We want to see if we can create it. The URL may choose to

View File

@@ -71,7 +71,7 @@ class Metadata(metadata.SimpleMetadata):
'url': _('URL for this {}.'),
'related': _('Data structure with URLs of related resources.'),
'summary_fields': _(
'Data structure with name/description for related resources. The output for some objects may be limited for performance reasons.'
'Data structure with name/description for related resources. ' 'The output for some objects may be limited for performance reasons.'
),
'created': _('Timestamp when this {} was created.'),
'modified': _('Timestamp when this {} was last modified.'),

View File

@@ -220,7 +220,7 @@ class CopySerializer(serializers.Serializer):
view = self.context.get('view', None)
obj = view.get_object()
if name == obj.name:
raise serializers.ValidationError(_('The original object is already named {}, a copy from it cannot have the same name.'.format(name)))
raise serializers.ValidationError(_('The original object is already named {}, a copy from' ' it cannot have the same name.'.format(name)))
return attrs
@@ -760,7 +760,7 @@ class UnifiedJobTemplateSerializer(BaseSerializer):
class UnifiedJobSerializer(BaseSerializer):
show_capabilities = ['start', 'delete']
event_processing_finished = serializers.BooleanField(
help_text=_('Indicates whether all of the events generated by this unified job have been saved to the database.'), read_only=True
help_text=_('Indicates whether all of the events generated by this ' 'unified job have been saved to the database.'), read_only=True
)
class Meta:
@@ -1579,7 +1579,7 @@ class ProjectPlaybooksSerializer(ProjectSerializer):
class ProjectInventoriesSerializer(ProjectSerializer):
inventory_files = serializers.ReadOnlyField(help_text=_('Array of inventory files and directories available within this project, not comprehensive.'))
inventory_files = serializers.ReadOnlyField(help_text=_('Array of inventory files and directories available within this project, ' 'not comprehensive.'))
class Meta:
model = Project
@@ -1629,8 +1629,8 @@ class ProjectUpdateDetailSerializer(ProjectUpdateSerializer):
fields = ('*', 'host_status_counts', 'playbook_counts')
def get_playbook_counts(self, obj):
task_count = obj.get_event_queryset().filter(event='playbook_on_task_start').count()
play_count = obj.get_event_queryset().filter(event='playbook_on_play_start').count()
task_count = obj.project_update_events.filter(event='playbook_on_task_start').count()
play_count = obj.project_update_events.filter(event='playbook_on_play_start').count()
data = {'play_count': play_count, 'task_count': task_count}
@@ -2905,7 +2905,7 @@ class CredentialSerializer(BaseSerializer):
):
if getattr(self.instance, related_objects).count() > 0:
raise ValidationError(
_('You cannot change the credential type of the credential, as it may break the functionality of the resources using it.')
_('You cannot change the credential type of the credential, as it may break the functionality' ' of the resources using it.')
)
return credential_type
@@ -2925,7 +2925,7 @@ class CredentialSerializerCreate(CredentialSerializer):
default=None,
write_only=True,
allow_null=True,
help_text=_('Write-only field used to add user to owner role. If provided, do not give either team or organization. Only valid for creation.'),
help_text=_('Write-only field used to add user to owner role. If provided, ' 'do not give either team or organization. Only valid for creation.'),
)
team = serializers.PrimaryKeyRelatedField(
queryset=Team.objects.all(),
@@ -2933,14 +2933,14 @@ class CredentialSerializerCreate(CredentialSerializer):
default=None,
write_only=True,
allow_null=True,
help_text=_('Write-only field used to add team to owner role. If provided, do not give either user or organization. Only valid for creation.'),
help_text=_('Write-only field used to add team to owner role. If provided, ' 'do not give either user or organization. Only valid for creation.'),
)
organization = serializers.PrimaryKeyRelatedField(
queryset=Organization.objects.all(),
required=False,
default=None,
allow_null=True,
help_text=_('Inherit permissions from organization roles. If provided on creation, do not give either user or team.'),
help_text=_('Inherit permissions from organization roles. If provided on creation, ' 'do not give either user or team.'),
)
class Meta:
@@ -2962,7 +2962,7 @@ class CredentialSerializerCreate(CredentialSerializer):
if len(owner_fields) > 1:
received = ", ".join(sorted(owner_fields))
raise serializers.ValidationError(
{"detail": _("Only one of 'user', 'team', or 'organization' should be provided, received {} fields.".format(received))}
{"detail": _("Only one of 'user', 'team', or 'organization' should be provided, " "received {} fields.".format(received))}
)
if attrs.get('team'):
@@ -3233,7 +3233,7 @@ class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobO
if get_field_from_model_or_attrs('host_config_key') and not inventory:
raise serializers.ValidationError({'host_config_key': _("Cannot enable provisioning callback without an inventory set.")})
prompting_error_message = _("You must either set a default value or ask to prompt on launch.")
prompting_error_message = _("Must either set a default value or ask to prompt on launch.")
if project is None:
raise serializers.ValidationError({'project': _("Job Templates must have a project assigned.")})
elif inventory is None and not get_field_from_model_or_attrs('ask_inventory_on_launch'):
@@ -3622,7 +3622,7 @@ class SystemJobSerializer(UnifiedJobSerializer):
try:
return obj.result_stdout
except StdoutMaxBytesExceeded as e:
return _("Standard Output too large to display ({text_size} bytes), only download supported for sizes over {supported_size} bytes.").format(
return _("Standard Output too large to display ({text_size} bytes), " "only download supported for sizes over {supported_size} bytes.").format(
text_size=e.total, supported_size=e.supported
)
@@ -4536,7 +4536,7 @@ class JobLaunchSerializer(BaseSerializer):
if cred.unique_hash() in provided_mapping.keys():
continue # User replaced credential with new of same type
errors.setdefault('credentials', []).append(
_('Removing {} credential at launch time without replacement is not supported. Provided list lacked credential(s): {}.').format(
_('Removing {} credential at launch time without replacement is not supported. ' 'Provided list lacked credential(s): {}.').format(
cred.unique_hash(display=True), ', '.join([str(c) for c in removed_creds])
)
)
@@ -4686,11 +4686,12 @@ class BulkJobNodeSerializer(WorkflowJobNodeSerializer):
# many-to-many fields
credentials = serializers.ListField(child=serializers.IntegerField(min_value=1), required=False)
labels = serializers.ListField(child=serializers.IntegerField(min_value=1), required=False)
instance_groups = serializers.ListField(child=serializers.IntegerField(min_value=1), required=False)
# TODO: Use instance group role added via PR 13584(once merged), for now everything related to instance group is commented
# instance_groups = serializers.ListField(child=serializers.IntegerField(min_value=1), required=False)
class Meta:
model = WorkflowJobNode
fields = ('*', 'credentials', 'labels', 'instance_groups') # m2m fields are not canonical for WJ nodes
fields = ('*', 'credentials', 'labels') # m2m fields are not canonical for WJ nodes, TODO: add instance_groups once supported
def validate(self, attrs):
return super(LaunchConfigurationBaseSerializer, self).validate(attrs)
@@ -4750,21 +4751,21 @@ class BulkJobLaunchSerializer(serializers.Serializer):
requested_use_execution_environments = {job['execution_environment'] for job in attrs['jobs'] if 'execution_environment' in job}
requested_use_credentials = set()
requested_use_labels = set()
requested_use_instance_groups = set()
# requested_use_instance_groups = set()
for job in attrs['jobs']:
for cred in job.get('credentials', []):
requested_use_credentials.add(cred)
for label in job.get('labels', []):
requested_use_labels.add(label)
for instance_group in job.get('instance_groups', []):
requested_use_instance_groups.add(instance_group)
# for instance_group in job.get('instance_groups', []):
# requested_use_instance_groups.add(instance_group)
key_to_obj_map = {
"unified_job_template": {obj.id: obj for obj in UnifiedJobTemplate.objects.filter(id__in=requested_ujts)},
"inventory": {obj.id: obj for obj in Inventory.objects.filter(id__in=requested_use_inventories)},
"credentials": {obj.id: obj for obj in Credential.objects.filter(id__in=requested_use_credentials)},
"labels": {obj.id: obj for obj in Label.objects.filter(id__in=requested_use_labels)},
"instance_groups": {obj.id: obj for obj in InstanceGroup.objects.filter(id__in=requested_use_instance_groups)},
# "instance_groups": {obj.id: obj for obj in InstanceGroup.objects.filter(id__in=requested_use_instance_groups)},
"execution_environment": {obj.id: obj for obj in ExecutionEnvironment.objects.filter(id__in=requested_use_execution_environments)},
}
@@ -4791,7 +4792,7 @@ class BulkJobLaunchSerializer(serializers.Serializer):
self.check_list_permission(Credential, requested_use_credentials, 'use_role')
self.check_list_permission(Label, requested_use_labels)
self.check_list_permission(InstanceGroup, requested_use_instance_groups) # TODO: change to use_role for conflict
# self.check_list_permission(InstanceGroup, requested_use_instance_groups) # TODO: change to use_role for conflict
self.check_list_permission(ExecutionEnvironment, requested_use_execution_environments) # TODO: change if roles introduced
jobs_object = self.get_objectified_jobs(attrs, key_to_obj_map)
@@ -4838,7 +4839,7 @@ class BulkJobLaunchSerializer(serializers.Serializer):
node_m2m_object_types_to_through_model = {
'credentials': WorkflowJobNode.credentials.through,
'labels': WorkflowJobNode.labels.through,
'instance_groups': WorkflowJobNode.instance_groups.through,
# 'instance_groups': WorkflowJobNode.instance_groups.through,
}
node_deferred_attr_names = (
'limit',
@@ -4891,9 +4892,9 @@ class BulkJobLaunchSerializer(serializers.Serializer):
if field_name in node_m2m_objects[node_identifier] and field_name == 'labels':
for label in node_m2m_objects[node_identifier][field_name]:
through_model_objects.append(through_model(label=label, workflowjobnode=node_m2m_objects[node_identifier]['node']))
if field_name in node_m2m_objects[node_identifier] and field_name == 'instance_groups':
for instance_group in node_m2m_objects[node_identifier][field_name]:
through_model_objects.append(through_model(instancegroup=instance_group, workflowjobnode=node_m2m_objects[node_identifier]['node']))
# if obj_type in node_m2m_objects[node_identifier] and obj_type == 'instance_groups':
# for instance_group in node_m2m_objects[node_identifier][obj_type]:
# through_model_objects.append(through_model(instancegroup=instance_group, workflowjobnode=node_m2m_objects[node_identifier]['node']))
if through_model_objects:
through_model.objects.bulk_create(through_model_objects)
@@ -5018,7 +5019,7 @@ class NotificationTemplateSerializer(BaseSerializer):
for subevent in event_messages:
if subevent not in ('running', 'approved', 'timed_out', 'denied'):
error_list.append(
_("Workflow Approval event '{}' invalid, must be one of 'running', 'approved', 'timed_out', or 'denied'").format(subevent)
_("Workflow Approval event '{}' invalid, must be one of " "'running', 'approved', 'timed_out', or 'denied'").format(subevent)
)
continue
subevent_messages = event_messages[subevent]
@@ -5356,16 +5357,10 @@ class ScheduleSerializer(LaunchConfigurationBaseSerializer, SchedulePreviewSeria
class InstanceLinkSerializer(BaseSerializer):
class Meta:
model = InstanceLink
fields = ('id', 'url', 'related', 'source', 'target', 'link_state')
fields = ('source', 'target', 'link_state')
source = serializers.SlugRelatedField(slug_field="hostname", queryset=Instance.objects.all())
target = serializers.SlugRelatedField(slug_field="hostname", queryset=Instance.objects.all())
def get_related(self, obj):
res = super(InstanceLinkSerializer, self).get_related(obj)
res['source_instance'] = self.reverse('api:instance_detail', kwargs={'pk': obj.source.id})
res['target_instance'] = self.reverse('api:instance_detail', kwargs={'pk': obj.target.id})
return res
source = serializers.SlugRelatedField(slug_field="hostname", read_only=True)
target = serializers.SlugRelatedField(slug_field="hostname", read_only=True)
class InstanceNodeSerializer(BaseSerializer):
@@ -5382,7 +5377,6 @@ class InstanceSerializer(BaseSerializer):
jobs_running = serializers.IntegerField(help_text=_('Count of jobs in the running or waiting state that are targeted for this instance'), read_only=True)
jobs_total = serializers.IntegerField(help_text=_('Count of all jobs that target this instance'), read_only=True)
health_check_pending = serializers.SerializerMethodField()
peers = serializers.SlugRelatedField(many=True, required=False, slug_field="hostname", queryset=Instance.objects.all())
class Meta:
model = Instance
@@ -5419,8 +5413,6 @@ class InstanceSerializer(BaseSerializer):
'node_state',
'ip_address',
'listener_port',
'peers',
'peers_from_control_nodes',
)
extra_kwargs = {
'node_type': {'initial': Instance.Types.EXECUTION, 'default': Instance.Types.EXECUTION},
@@ -5444,7 +5436,7 @@ class InstanceSerializer(BaseSerializer):
res = super(InstanceSerializer, self).get_related(obj)
res['jobs'] = self.reverse('api:instance_unified_jobs_list', kwargs={'pk': obj.pk})
res['instance_groups'] = self.reverse('api:instance_instance_groups_list', kwargs={'pk': obj.pk})
if obj.node_type in [Instance.Types.EXECUTION, Instance.Types.HOP]:
if settings.IS_K8S and obj.node_type in (Instance.Types.EXECUTION,):
res['install_bundle'] = self.reverse('api:instance_install_bundle', kwargs={'pk': obj.pk})
res['peers'] = self.reverse('api:instance_peers_list', kwargs={"pk": obj.pk})
if self.context['request'].user.is_superuser or self.context['request'].user.is_system_auditor:
@@ -5473,57 +5465,22 @@ class InstanceSerializer(BaseSerializer):
def get_health_check_pending(self, obj):
return obj.health_check_pending
def validate(self, attrs):
def get_field_from_model_or_attrs(fd):
return attrs.get(fd, self.instance and getattr(self.instance, fd) or None)
def check_peers_changed():
'''
return True if
- 'peers' in attrs
- instance peers matches peers in attrs
'''
return self.instance and 'peers' in attrs and set(self.instance.peers.all()) != set(attrs['peers'])
if not self.instance and not settings.IS_K8S:
raise serializers.ValidationError(_("Can only create instances on Kubernetes or OpenShift."))
node_type = get_field_from_model_or_attrs("node_type")
peers_from_control_nodes = get_field_from_model_or_attrs("peers_from_control_nodes")
listener_port = get_field_from_model_or_attrs("listener_port")
peers = attrs.get('peers', [])
if peers_from_control_nodes and node_type not in (Instance.Types.EXECUTION, Instance.Types.HOP):
raise serializers.ValidationError(_("peers_from_control_nodes can only be enabled for execution or hop nodes."))
if node_type in [Instance.Types.CONTROL, Instance.Types.HYBRID]:
if check_peers_changed():
raise serializers.ValidationError(
_("Setting peers manually for control nodes is not allowed. Enable peers_from_control_nodes on the hop and execution nodes instead.")
)
if not listener_port and peers_from_control_nodes:
raise serializers.ValidationError(_("Field listener_port must be a valid integer when peers_from_control_nodes is enabled."))
if not listener_port and self.instance and self.instance.peers_from.exists():
raise serializers.ValidationError(_("Field listener_port must be a valid integer when other nodes peer to it."))
for peer in peers:
if peer.listener_port is None:
raise serializers.ValidationError(_("Field listener_port must be set on peer ") + peer.hostname + ".")
if not settings.IS_K8S:
if check_peers_changed():
raise serializers.ValidationError(_("Cannot change peers."))
return super().validate(attrs)
def validate(self, data):
if self.instance:
if self.instance.node_type == Instance.Types.HOP:
raise serializers.ValidationError("Hop node instances may not be changed.")
else:
if not settings.IS_K8S:
raise serializers.ValidationError("Can only create instances on Kubernetes or OpenShift.")
return data
def validate_node_type(self, value):
if not self.instance and value not in [Instance.Types.HOP, Instance.Types.EXECUTION]:
raise serializers.ValidationError(_("Can only create execution or hop nodes."))
if self.instance and self.instance.node_type != value:
raise serializers.ValidationError(_("Cannot change node type."))
if not self.instance:
if value not in (Instance.Types.EXECUTION,):
raise serializers.ValidationError("Can only create execution nodes.")
else:
if self.instance.node_type != value:
raise serializers.ValidationError("Cannot change node type.")
return value
@@ -5531,41 +5488,30 @@ class InstanceSerializer(BaseSerializer):
if self.instance:
if value != self.instance.node_state:
if not settings.IS_K8S:
raise serializers.ValidationError(_("Can only change the state on Kubernetes or OpenShift."))
raise serializers.ValidationError("Can only change the state on Kubernetes or OpenShift.")
if value != Instance.States.DEPROVISIONING:
raise serializers.ValidationError(_("Can only change instances to the 'deprovisioning' state."))
if self.instance.node_type not in (Instance.Types.EXECUTION, Instance.Types.HOP):
raise serializers.ValidationError(_("Can only deprovision execution or hop nodes."))
raise serializers.ValidationError("Can only change instances to the 'deprovisioning' state.")
if self.instance.node_type not in (Instance.Types.EXECUTION,):
raise serializers.ValidationError("Can only deprovision execution nodes.")
else:
if value and value != Instance.States.INSTALLED:
raise serializers.ValidationError(_("Can only create instances in the 'installed' state."))
raise serializers.ValidationError("Can only create instances in the 'installed' state.")
return value
def validate_hostname(self, value):
"""
Cannot change the hostname
- Hostname cannot be "localhost" - but can be something like localhost.domain
- Cannot change the hostname of an-already instantiated & initialized Instance object
"""
if self.instance and self.instance.hostname != value:
raise serializers.ValidationError(_("Cannot change hostname."))
raise serializers.ValidationError("Cannot change hostname.")
return value
def validate_listener_port(self, value):
"""
Cannot change listener port, unless going from none to integer, and vice versa
"""
if value and self.instance and self.instance.listener_port and self.instance.listener_port != value:
raise serializers.ValidationError(_("Cannot change listener port."))
return value
def validate_peers_from_control_nodes(self, value):
"""
Can only enable for K8S based deployments
"""
if value and not settings.IS_K8S:
raise serializers.ValidationError(_("Can only be enabled on Kubernetes or Openshift."))
if self.instance and self.instance.listener_port != value:
raise serializers.ValidationError("Cannot change listener port.")
return value
@@ -5573,19 +5519,7 @@ class InstanceSerializer(BaseSerializer):
class InstanceHealthCheckSerializer(BaseSerializer):
class Meta:
model = Instance
read_only_fields = (
'uuid',
'hostname',
'ip_address',
'version',
'last_health_check',
'errors',
'cpu',
'memory',
'cpu_capacity',
'mem_capacity',
'capacity',
)
read_only_fields = ('uuid', 'hostname', 'version', 'last_health_check', 'errors', 'cpu', 'memory', 'cpu_capacity', 'mem_capacity', 'capacity')
fields = read_only_fields
@@ -5625,7 +5559,7 @@ class InstanceGroupSerializer(BaseSerializer):
instances = serializers.SerializerMethodField()
is_container_group = serializers.BooleanField(
required=False,
help_text=_('Indicates whether instances in this group are containerized.Containerized groups have a designated Openshift or Kubernetes cluster.'),
help_text=_('Indicates whether instances in this group are containerized.' 'Containerized groups have a designated Openshift or Kubernetes cluster.'),
)
# NOTE: help_text is duplicated from field definitions, no obvious way of
# both defining field details here and also getting the field's help_text
@@ -5636,7 +5570,7 @@ class InstanceGroupSerializer(BaseSerializer):
required=False,
initial=0,
label=_('Policy Instance Percentage'),
help_text=_("Minimum percentage of all instances that will be automatically assigned to this group when new instances come online."),
help_text=_("Minimum percentage of all instances that will be automatically assigned to " "this group when new instances come online."),
)
policy_instance_minimum = serializers.IntegerField(
default=0,
@@ -5644,7 +5578,7 @@ class InstanceGroupSerializer(BaseSerializer):
required=False,
initial=0,
label=_('Policy Instance Minimum'),
help_text=_("Static minimum number of Instances that will be automatically assign to this group when new instances come online."),
help_text=_("Static minimum number of Instances that will be automatically assign to " "this group when new instances come online."),
)
max_concurrent_jobs = serializers.IntegerField(
default=0,

View File

@@ -1,10 +1,16 @@
import json
import warnings
from rest_framework.permissions import AllowAny
from rest_framework.schemas import SchemaGenerator, AutoSchema as DRFAuthSchema
from coreapi.document import Object, Link
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
from rest_framework import exceptions
from rest_framework.permissions import AllowAny
from rest_framework.renderers import CoreJSONRenderer
from rest_framework.response import Response
from rest_framework.schemas import SchemaGenerator, AutoSchema as DRFAuthSchema
from rest_framework.views import APIView
from rest_framework_swagger import renderers
class SuperUserSchemaGenerator(SchemaGenerator):
@@ -49,15 +55,43 @@ class AutoSchema(DRFAuthSchema):
return description
schema_view = get_schema_view(
openapi.Info(
title="Snippets API",
default_version='v1',
description="Test description",
terms_of_service="https://www.google.com/policies/terms/",
contact=openapi.Contact(email="contact@snippets.local"),
license=openapi.License(name="BSD License"),
),
public=True,
permission_classes=[AllowAny],
)
class SwaggerSchemaView(APIView):
_ignore_model_permissions = True
exclude_from_schema = True
permission_classes = [AllowAny]
renderer_classes = [CoreJSONRenderer, renderers.OpenAPIRenderer, renderers.SwaggerUIRenderer]
def get(self, request):
generator = SuperUserSchemaGenerator(title='Ansible Automation Platform controller API', patterns=None, urlconf=None)
schema = generator.get_schema(request=request)
# python core-api doesn't support the deprecation yet, so track it
# ourselves and return it in a response header
_deprecated = []
# By default, DRF OpenAPI serialization places all endpoints in
# a single node based on their root path (/api). Instead, we want to
# group them by topic/tag so that they're categorized in the rendered
# output
document = schema._data.pop('api')
for path, node in document.items():
if isinstance(node, Object):
for action in node.values():
topic = getattr(action, 'topic', None)
if topic:
schema._data.setdefault(topic, Object())
schema._data[topic]._data[path] = node
if isinstance(action, Object):
for link in action.links.values():
if link.deprecated:
_deprecated.append(link.url)
elif isinstance(node, Link):
topic = getattr(node, 'topic', None)
if topic:
schema._data.setdefault(topic, Object())
schema._data[topic]._data[path] = node
if not schema:
raise exceptions.ValidationError('The schema generator did not return a schema Document')
return Response(schema, headers={'X-Deprecated-Paths': json.dumps(_deprecated)})

View File

@@ -3,35 +3,21 @@ receptor_group: awx
receptor_verify: true
receptor_tls: true
receptor_mintls13: false
{% if instance.node_type == "execution" %}
receptor_work_commands:
ansible-runner:
command: ansible-runner
params: worker
allowruntimeparams: true
verifysignature: true
additional_python_packages:
- ansible-runner
{% endif %}
custom_worksign_public_keyfile: receptor/work_public_key.pem
custom_worksign_public_keyfile: receptor/work-public-key.pem
custom_tls_certfile: receptor/tls/receptor.crt
custom_tls_keyfile: receptor/tls/receptor.key
custom_ca_certfile: receptor/tls/ca/mesh-CA.crt
custom_ca_certfile: receptor/tls/ca/receptor-ca.crt
receptor_protocol: 'tcp'
{% if instance.listener_port %}
receptor_listener: true
receptor_port: {{ instance.listener_port }}
{% else %}
receptor_listener: false
{% endif %}
{% if peers %}
receptor_peers:
{% for peer in peers %}
- host: {{ peer.host }}
port: {{ peer.port }}
protocol: tcp
{% endfor %}
{% endif %}
receptor_dependencies:
- python39-pip
{% verbatim %}
podman_user: "{{ receptor_user }}"
podman_group: "{{ receptor_group }}"

View File

@@ -1,16 +1,20 @@
{% verbatim %}
---
- hosts: all
become: yes
tasks:
- name: Create the receptor user
user:
{% verbatim %}
name: "{{ receptor_user }}"
{% endverbatim %}
shell: /bin/bash
{% if instance.node_type == "execution" %}
- name: Enable Copr repo for Receptor
command: dnf copr enable ansible-awx/receptor -y
- import_role:
name: ansible.receptor.podman
{% endif %}
- import_role:
name: ansible.receptor.setup
- name: Install ansible-runner
pip:
name: ansible-runner
executable: pip3.9
{% endverbatim %}

View File

@@ -1,4 +1,4 @@
---
collections:
- name: ansible.receptor
version: 2.0.2
version: 1.1.0

View File

@@ -30,7 +30,7 @@ from awx.api.views import (
OAuth2TokenList,
ApplicationOAuth2TokenList,
OAuth2ApplicationDetail,
HostMetricSummaryMonthlyList,
# HostMetricSummaryMonthlyList, # It will be enabled in future version of the AWX
)
from awx.api.views.bulk import (
@@ -123,7 +123,8 @@ v2_urls = [
re_path(r'^constructed_inventories/', include(constructed_inventory_urls)),
re_path(r'^hosts/', include(host_urls)),
re_path(r'^host_metrics/', include(host_metric_urls)),
re_path(r'^host_metric_summary_monthly/$', HostMetricSummaryMonthlyList.as_view(), name='host_metric_summary_monthly_list'),
# It will be enabled in future version of the AWX
# re_path(r'^host_metric_summary_monthly/$', HostMetricSummaryMonthlyList.as_view(), name='host_metric_summary_monthly_list'),
re_path(r'^groups/', include(group_urls)),
re_path(r'^inventory_sources/', include(inventory_source_urls)),
re_path(r'^inventory_updates/', include(inventory_update_urls)),
@@ -166,13 +167,10 @@ urlpatterns = [
]
if MODE == 'development':
# Only include these if we are in the development environment
from awx.api.swagger import schema_view
from awx.api.swagger import SwaggerSchemaView
urlpatterns += [re_path(r'^swagger/$', SwaggerSchemaView.as_view(), name='swagger_view')]
from awx.api.urls.debug import urls as debug_urls
urlpatterns += [re_path(r'^debug/', include(debug_urls))]
urlpatterns += [
re_path(r'^swagger(?P<format>\.json|\.yaml)/$', schema_view.without_ui(cache_timeout=0), name='schema-json'),
re_path(r'^swagger/$', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
re_path(r'^redoc/$', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),
]

View File

@@ -128,10 +128,6 @@ logger = logging.getLogger('awx.api.views')
def unpartitioned_event_horizon(cls):
with connection.cursor() as cursor:
cursor.execute(f"SELECT 1 FROM INFORMATION_SCHEMA.TABLES WHERE table_name = '_unpartitioned_{cls._meta.db_table}';")
if not cursor.fetchone():
return 0
with connection.cursor() as cursor:
try:
cursor.execute(f'SELECT MAX(id) FROM _unpartitioned_{cls._meta.db_table}')
@@ -345,18 +341,17 @@ class InstanceDetail(RetrieveUpdateAPIView):
def update_raw_data(self, data):
# these fields are only valid on creation of an instance, so they unwanted on detail view
data.pop('listener_port', None)
data.pop('node_type', None)
data.pop('hostname', None)
data.pop('ip_address', None)
return super(InstanceDetail, self).update_raw_data(data)
def update(self, request, *args, **kwargs):
r = super(InstanceDetail, self).update(request, *args, **kwargs)
if status.is_success(r.status_code):
obj = self.get_object()
capacity_changed = obj.set_capacity_value()
if capacity_changed:
obj.save(update_fields=['capacity'])
obj.set_capacity_value()
obj.save(update_fields=['capacity'])
r.data = serializers.InstanceSerializer(obj, context=self.get_serializer_context()).to_representation(obj)
return r
@@ -570,7 +565,7 @@ class LaunchConfigCredentialsBase(SubListAttachDetachAPIView):
if self.relationship not in ask_mapping:
return {"msg": _("Related template cannot accept {} on launch.").format(self.relationship)}
elif sub.passwords_needed:
return {"msg": _("Credential that requires user input on launch cannot be used in saved launch configuration.")}
return {"msg": _("Credential that requires user input on launch " "cannot be used in saved launch configuration.")}
ask_field_name = ask_mapping[self.relationship]
@@ -1569,15 +1564,16 @@ class HostMetricDetail(RetrieveDestroyAPIView):
return Response(status=status.HTTP_204_NO_CONTENT)
class HostMetricSummaryMonthlyList(ListAPIView):
name = _("Host Metrics Summary Monthly")
model = models.HostMetricSummaryMonthly
serializer_class = serializers.HostMetricSummaryMonthlySerializer
permission_classes = (IsSystemAdminOrAuditor,)
search_fields = ('date',)
def get_queryset(self):
return self.model.objects.all()
# It will be enabled in future version of the AWX
# class HostMetricSummaryMonthlyList(ListAPIView):
# name = _("Host Metrics Summary Monthly")
# model = models.HostMetricSummaryMonthly
# serializer_class = serializers.HostMetricSummaryMonthlySerializer
# permission_classes = (IsSystemAdminOrAuditor,)
# search_fields = ('date',)
#
# def get_queryset(self):
# return self.model.objects.all()
class HostList(HostRelatedSearchMixin, ListCreateAPIView):
@@ -2505,7 +2501,7 @@ class JobTemplateSurveySpec(GenericAPIView):
return Response(
dict(
error=_(
"$encrypted$ is a reserved keyword for password question defaults, survey question {idx} is type {survey_item[type]}."
"$encrypted$ is a reserved keyword for password question defaults, " "survey question {idx} is type {survey_item[type]}."
).format(**context)
),
status=status.HTTP_400_BAD_REQUEST,
@@ -3337,6 +3333,7 @@ class JobLabelList(SubListAPIView):
serializer_class = serializers.LabelSerializer
parent_model = models.Job
relationship = 'labels'
parent_key = 'job'
class WorkflowJobLabelList(JobLabelList):
@@ -4059,7 +4056,7 @@ class UnifiedJobStdout(RetrieveAPIView):
return super(UnifiedJobStdout, self).retrieve(request, *args, **kwargs)
except models.StdoutMaxBytesExceeded as e:
response_message = _(
"Standard Output too large to display ({text_size} bytes), only download supported for sizes over {supported_size} bytes."
"Standard Output too large to display ({text_size} bytes), " "only download supported for sizes over {supported_size} bytes."
).format(text_size=e.total, supported_size=e.supported)
if request.accepted_renderer.format == 'json':
return Response({'range': {'start': 0, 'end': 1, 'absolute_end': 1}, 'content': response_message})

View File

@@ -1,7 +1,5 @@
from collections import OrderedDict
from django.utils.translation import gettext_lazy as _
from rest_framework.permissions import IsAuthenticated
from rest_framework.renderers import JSONRenderer
from rest_framework.reverse import reverse
@@ -20,9 +18,6 @@ from awx.api import (
class BulkView(APIView):
name = _('Bulk')
swagger_topic = 'Bulk'
permission_classes = [IsAuthenticated]
renderer_classes = [
renderers.BrowsableAPIRenderer,

View File

@@ -6,8 +6,6 @@ import io
import ipaddress
import os
import tarfile
import time
import re
import asn1
from awx.api import serializers
@@ -42,8 +40,6 @@ RECEPTOR_OID = "1.3.6.1.4.1.2312.19.1"
# │ │ └── receptor.key
# │ └── work-public-key.pem
# └── requirements.yml
class InstanceInstallBundle(GenericAPIView):
name = _('Install Bundle')
model = models.Instance
@@ -53,54 +49,56 @@ class InstanceInstallBundle(GenericAPIView):
def get(self, request, *args, **kwargs):
instance_obj = self.get_object()
if instance_obj.node_type not in ('execution', 'hop'):
if instance_obj.node_type not in ('execution',):
return Response(
data=dict(msg=_('Install bundle can only be generated for execution or hop nodes.')),
data=dict(msg=_('Install bundle can only be generated for execution nodes.')),
status=status.HTTP_400_BAD_REQUEST,
)
with io.BytesIO() as f:
with tarfile.open(fileobj=f, mode='w:gz') as tar:
# copy /etc/receptor/tls/ca/mesh-CA.crt to receptor/tls/ca in the tar file
tar.add(os.path.realpath('/etc/receptor/tls/ca/mesh-CA.crt'), arcname=f"{instance_obj.hostname}_install_bundle/receptor/tls/ca/mesh-CA.crt")
# copy /etc/receptor/tls/ca/receptor-ca.crt to receptor/tls/ca in the tar file
tar.add(
os.path.realpath('/etc/receptor/tls/ca/receptor-ca.crt'), arcname=f"{instance_obj.hostname}_install_bundle/receptor/tls/ca/receptor-ca.crt"
)
# copy /etc/receptor/work_public_key.pem to receptor/work_public_key.pem
tar.add('/etc/receptor/work_public_key.pem', arcname=f"{instance_obj.hostname}_install_bundle/receptor/work_public_key.pem")
# copy /etc/receptor/signing/work-public-key.pem to receptor/work-public-key.pem
tar.add('/etc/receptor/signing/work-public-key.pem', arcname=f"{instance_obj.hostname}_install_bundle/receptor/work-public-key.pem")
# generate and write the receptor key to receptor/tls/receptor.key in the tar file
key, cert = generate_receptor_tls(instance_obj)
def tar_addfile(tarinfo, filecontent):
tarinfo.mtime = time.time()
tarinfo.size = len(filecontent)
tar.addfile(tarinfo, io.BytesIO(filecontent))
key_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/receptor/tls/receptor.key")
tar_addfile(key_tarinfo, key)
key_tarinfo.size = len(key)
tar.addfile(key_tarinfo, io.BytesIO(key))
cert_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/receptor/tls/receptor.crt")
cert_tarinfo.size = len(cert)
tar_addfile(cert_tarinfo, cert)
tar.addfile(cert_tarinfo, io.BytesIO(cert))
# generate and write install_receptor.yml to the tar file
playbook = generate_playbook(instance_obj).encode('utf-8')
playbook = generate_playbook().encode('utf-8')
playbook_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/install_receptor.yml")
tar_addfile(playbook_tarinfo, playbook)
playbook_tarinfo.size = len(playbook)
tar.addfile(playbook_tarinfo, io.BytesIO(playbook))
# generate and write inventory.yml to the tar file
inventory_yml = generate_inventory_yml(instance_obj).encode('utf-8')
inventory_yml_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/inventory.yml")
tar_addfile(inventory_yml_tarinfo, inventory_yml)
inventory_yml_tarinfo.size = len(inventory_yml)
tar.addfile(inventory_yml_tarinfo, io.BytesIO(inventory_yml))
# generate and write group_vars/all.yml to the tar file
group_vars = generate_group_vars_all_yml(instance_obj).encode('utf-8')
group_vars_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/group_vars/all.yml")
tar_addfile(group_vars_tarinfo, group_vars)
group_vars_tarinfo.size = len(group_vars)
tar.addfile(group_vars_tarinfo, io.BytesIO(group_vars))
# generate and write requirements.yml to the tar file
requirements_yml = generate_requirements_yml().encode('utf-8')
requirements_yml_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/requirements.yml")
tar_addfile(requirements_yml_tarinfo, requirements_yml)
requirements_yml_tarinfo.size = len(requirements_yml)
tar.addfile(requirements_yml_tarinfo, io.BytesIO(requirements_yml))
# respond with the tarfile
f.seek(0)
@@ -109,10 +107,8 @@ class InstanceInstallBundle(GenericAPIView):
return response
def generate_playbook(instance_obj):
playbook_yaml = render_to_string("instance_install_bundle/install_receptor.yml", context=dict(instance=instance_obj))
# convert consecutive newlines with a single newline
return re.sub(r'\n+', '\n', playbook_yaml)
def generate_playbook():
return render_to_string("instance_install_bundle/install_receptor.yml")
def generate_requirements_yml():
@@ -124,12 +120,7 @@ def generate_inventory_yml(instance_obj):
def generate_group_vars_all_yml(instance_obj):
peers = []
for instance in instance_obj.peers.all():
peers.append(dict(host=instance.hostname, port=instance.listener_port))
all_yaml = render_to_string("instance_install_bundle/group_vars/all.yml", context=dict(instance=instance_obj, peers=peers))
# convert consecutive newlines with a single newline
return re.sub(r'\n+', '\n', all_yaml)
return render_to_string("instance_install_bundle/group_vars/all.yml", context=dict(instance=instance_obj))
def generate_receptor_tls(instance_obj):
@@ -170,14 +161,14 @@ def generate_receptor_tls(instance_obj):
.sign(key, hashes.SHA256())
)
# sign csr with the receptor ca key from /etc/receptor/ca/mesh-CA.key
with open('/etc/receptor/tls/ca/mesh-CA.key', 'rb') as f:
# sign csr with the receptor ca key from /etc/receptor/ca/receptor-ca.key
with open('/etc/receptor/tls/ca/receptor-ca.key', 'rb') as f:
ca_key = serialization.load_pem_private_key(
f.read(),
password=None,
)
with open('/etc/receptor/tls/ca/mesh-CA.crt', 'rb') as f:
with open('/etc/receptor/tls/ca/receptor-ca.crt', 'rb') as f:
ca_cert = x509.load_pem_x509_certificate(f.read())
cert = (

View File

@@ -50,7 +50,7 @@ class UnifiedJobDeletionMixin(object):
return Response({"error": _("Job has not finished processing events.")}, status=status.HTTP_400_BAD_REQUEST)
else:
# if it has been > 1 minute, events are probably lost
logger.warning('Allowing deletion of {} through the API without all events processed.'.format(obj.log_format))
logger.warning('Allowing deletion of {} through the API without all events ' 'processed.'.format(obj.log_format))
# Manually cascade delete events if unpartitioned job
if obj.has_unpartitioned_events:

View File

@@ -20,7 +20,6 @@ from rest_framework import status
import requests
from awx import MODE
from awx.api.generics import APIView
from awx.conf.registry import settings_registry
from awx.main.analytics import all_collectors
@@ -55,8 +54,6 @@ class ApiRootView(APIView):
data['custom_logo'] = settings.CUSTOM_LOGO
data['custom_login_info'] = settings.CUSTOM_LOGIN_INFO
data['login_redirect_override'] = settings.LOGIN_REDIRECT_OVERRIDE
if MODE == 'development':
data['swagger'] = drf_reverse('api:schema-swagger-ui')
return Response(data)
@@ -107,7 +104,8 @@ class ApiVersionRootView(APIView):
data['groups'] = reverse('api:group_list', request=request)
data['hosts'] = reverse('api:host_list', request=request)
data['host_metrics'] = reverse('api:host_metric_list', request=request)
data['host_metric_summary_monthly'] = reverse('api:host_metric_summary_monthly_list', request=request)
# It will be enabled in future version of the AWX
# data['host_metric_summary_monthly'] = reverse('api:host_metric_summary_monthly_list', request=request)
data['job_templates'] = reverse('api:job_template_list', request=request)
data['jobs'] = reverse('api:job_list', request=request)
data['ad_hoc_commands'] = reverse('api:ad_hoc_command_list', request=request)

View File

@@ -114,7 +114,7 @@ class WebhookReceiverBase(APIView):
# Ensure that the full contents of the request are captured for multiple uses.
request.body
logger.debug("headers: {}\ndata: {}\n".format(request.headers, request.data))
logger.debug("headers: {}\n" "data: {}\n".format(request.headers, request.data))
obj = self.get_object()
self.check_signature(obj)

View File

@@ -14,7 +14,7 @@ class ConfConfig(AppConfig):
def ready(self):
self.module.autodiscover()
if not set(sys.argv) & {'migrate', 'check_migrations', 'showmigrations'}:
if not set(sys.argv) & {'migrate', 'check_migrations'}:
from .settings import SettingsWrapper
SettingsWrapper.initialize()

View File

@@ -1,17 +0,0 @@
# Generated by Django 4.2 on 2023-06-09 19:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('conf', '0009_rename_proot_settings'),
]
operations = [
migrations.AlterField(
model_name='setting',
name='value',
field=models.JSONField(null=True),
),
]

View File

@@ -8,6 +8,7 @@ import json
from django.db import models
# AWX
from awx.main.fields import JSONBlob
from awx.main.models.base import CreatedModifiedModel, prevent_search
from awx.main.utils import encrypt_field
from awx.conf import settings_registry
@@ -17,7 +18,7 @@ __all__ = ['Setting']
class Setting(CreatedModifiedModel):
key = models.CharField(max_length=255)
value = models.JSONField(null=True)
value = JSONBlob(null=True)
user = prevent_search(models.ForeignKey('auth.User', related_name='settings', default=None, null=True, editable=False, on_delete=models.CASCADE))
def __str__(self):

View File

@@ -418,10 +418,6 @@ class SettingsWrapper(UserSettingsHolder):
"""Get value while accepting the in-memory cache if key is available"""
with _ctit_db_wrapper(trans_safe=True):
return self._get_local(name)
# If the last line did not return, that means we hit a database error
# in that case, we should not have a local cache value
# thus, return empty as a signal to use the default
return empty
def __getattr__(self, name):
value = empty

View File

@@ -35,7 +35,7 @@ class TestStringListBooleanField:
field = StringListBooleanField()
with pytest.raises(ValidationError) as e:
field.to_internal_value(value)
assert e.value.detail[0] == "Expected None, True, False, a string or list of strings but got {} instead.".format(type(value))
assert e.value.detail[0] == "Expected None, True, False, a string or list " "of strings but got {} instead.".format(type(value))
@pytest.mark.parametrize("value_in, value_known", FIELD_VALUES)
def test_to_representation_valid(self, value_in, value_known):
@@ -48,7 +48,7 @@ class TestStringListBooleanField:
field = StringListBooleanField()
with pytest.raises(ValidationError) as e:
field.to_representation(value)
assert e.value.detail[0] == "Expected None, True, False, a string or list of strings but got {} instead.".format(type(value))
assert e.value.detail[0] == "Expected None, True, False, a string or list " "of strings but got {} instead.".format(type(value))
class TestListTuplesField:
@@ -67,7 +67,7 @@ class TestListTuplesField:
field = ListTuplesField()
with pytest.raises(ValidationError) as e:
field.to_internal_value(value)
assert e.value.detail[0] == "Expected a list of tuples of max length 2 but got {} instead.".format(t)
assert e.value.detail[0] == "Expected a list of tuples of max length 2 " "but got {} instead.".format(t)
class TestStringListPathField:

View File

@@ -13,7 +13,6 @@ from unittest import mock
from django.conf import LazySettings
from django.core.cache.backends.locmem import LocMemCache
from django.core.exceptions import ImproperlyConfigured
from django.db.utils import Error as DBError
from django.utils.translation import gettext_lazy as _
import pytest
@@ -332,18 +331,3 @@ def test_in_memory_cache_works(settings):
with mock.patch.object(settings, '_get_local') as mock_get:
assert settings.AWX_VAR == 'DEFAULT'
mock_get.assert_not_called()
@pytest.mark.defined_in_file(AWX_VAR=[])
def test_getattr_with_database_error(settings):
"""
If a setting is defined via the registry and has a null-ish default which is not None
then referencing that setting during a database outage should give that default
this is regression testing for a bug where it would return None
"""
settings.registry.register('AWX_VAR', field_class=fields.StringListField, default=[], category=_('System'), category_slug='system')
settings._awx_conf_memoizedcache.clear()
with mock.patch('django.db.backends.base.base.BaseDatabaseWrapper.ensure_connection') as mock_ensure:
mock_ensure.side_effect = DBError('for test')
assert settings.AWX_VAR == []

View File

@@ -79,6 +79,7 @@ __all__ = [
'get_user_queryset',
'check_user_access',
'check_user_access_with_errors',
'user_accessible_objects',
'consumer_access',
]
@@ -135,6 +136,10 @@ def register_access(model_class, access_class):
access_registry[model_class] = access_class
def user_accessible_objects(user, role_name):
return ResourceMixin._accessible_objects(User, user, role_name)
def get_user_queryset(user, model_class):
"""
Return a queryset for the given model_class containing only the instances
@@ -361,9 +366,9 @@ class BaseAccess(object):
report_violation = lambda message: None
else:
report_violation = lambda message: logger.warning(message)
if validation_info.get('trial', False) is True:
if validation_info.get('trial', False) is True or validation_info['instance_count'] == 10: # basic 10 license
def report_violation(message): # noqa
def report_violation(message):
raise PermissionDenied(message)
if check_expiration and validation_info.get('time_remaining', None) is None:
@@ -2229,7 +2234,7 @@ class WorkflowJobAccess(BaseAccess):
if not node_access.can_add({'reference_obj': node}):
wj_add_perm = False
if not wj_add_perm and self.save_messages:
self.messages['workflow_job_template'] = _('You do not have permission to the workflow job resources required for relaunch.')
self.messages['workflow_job_template'] = _('You do not have permission to the workflow job ' 'resources required for relaunch.')
return wj_add_perm
def can_cancel(self, obj):

View File

@@ -613,20 +613,3 @@ def host_metric_table(since, full_path, until, **kwargs):
since.isoformat(), until.isoformat(), since.isoformat(), until.isoformat()
)
return _copy_table(table='host_metric', query=host_metric_query, path=full_path)
@register('host_metric_summary_monthly_table', '1.0', format='csv', description=_('HostMetricSummaryMonthly export, full sync'), expensive=trivial_slicing)
def host_metric_summary_monthly_table(since, full_path, **kwargs):
query = '''
COPY (SELECT main_hostmetricsummarymonthly.id,
main_hostmetricsummarymonthly.date,
main_hostmetricsummarymonthly.license_capacity,
main_hostmetricsummarymonthly.license_consumed,
main_hostmetricsummarymonthly.hosts_added,
main_hostmetricsummarymonthly.hosts_deleted,
main_hostmetricsummarymonthly.indirectly_managed_hosts
FROM main_hostmetricsummarymonthly
ORDER BY main_hostmetricsummarymonthly.id ASC) TO STDOUT WITH CSV HEADER
'''
return _copy_table(table='host_metric_summary_monthly', query=query, path=full_path)

View File

@@ -209,11 +209,6 @@ class Metrics:
SetFloatM('workflow_manager_recorded_timestamp', 'Unix timestamp when metrics were last recorded'),
SetFloatM('workflow_manager_spawn_workflow_graph_jobs_seconds', 'Time spent spawning workflow tasks'),
SetFloatM('workflow_manager_get_tasks_seconds', 'Time spent loading workflow tasks from db'),
# dispatcher subsystem metrics
SetIntM('dispatcher_pool_scale_up_events', 'Number of times local dispatcher scaled up a worker since startup'),
SetIntM('dispatcher_pool_active_task_count', 'Number of active tasks in the worker pool when last task was submitted'),
SetIntM('dispatcher_pool_max_worker_count', 'Highest number of workers in worker pool in last collection interval, about 20s'),
SetFloatM('dispatcher_availability', 'Fraction of time (in last collection interval) dispatcher was able to receive messages'),
]
# turn metric list into dictionary with the metric name as a key
self.METRICS = {}

View File

@@ -1,87 +0,0 @@
import functools
from django.conf import settings
from django.core.cache.backends.base import DEFAULT_TIMEOUT
from django.core.cache.backends.redis import RedisCache
from redis.exceptions import ConnectionError, ResponseError, TimeoutError
import socket
# This list comes from what django-redis ignores and the behavior we are trying
# to retain while dropping the dependency on django-redis.
IGNORED_EXCEPTIONS = (TimeoutError, ResponseError, ConnectionError, socket.timeout)
CONNECTION_INTERRUPTED_SENTINEL = object()
def optionally_ignore_exceptions(func=None, return_value=None):
if func is None:
return functools.partial(optionally_ignore_exceptions, return_value=return_value)
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except IGNORED_EXCEPTIONS as e:
if settings.DJANGO_REDIS_IGNORE_EXCEPTIONS:
return return_value
raise e.__cause__ or e
return wrapper
class AWXRedisCache(RedisCache):
"""
We just want to wrap the upstream RedisCache class so that we can ignore
the exceptions that it raises when the cache is unavailable.
"""
@optionally_ignore_exceptions
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
return super().add(key, value, timeout, version)
@optionally_ignore_exceptions(return_value=CONNECTION_INTERRUPTED_SENTINEL)
def _get(self, key, default=None, version=None):
return super().get(key, default, version)
def get(self, key, default=None, version=None):
value = self._get(key, default, version)
if value is CONNECTION_INTERRUPTED_SENTINEL:
return default
return value
@optionally_ignore_exceptions
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
return super().set(key, value, timeout, version)
@optionally_ignore_exceptions
def touch(self, key, timeout=DEFAULT_TIMEOUT, version=None):
return super().touch(key, timeout, version)
@optionally_ignore_exceptions
def delete(self, key, version=None):
return super().delete(key, version)
@optionally_ignore_exceptions
def get_many(self, keys, version=None):
return super().get_many(keys, version)
@optionally_ignore_exceptions
def has_key(self, key, version=None):
return super().has_key(key, version)
@optionally_ignore_exceptions
def incr(self, key, delta=1, version=None):
return super().incr(key, delta, version)
@optionally_ignore_exceptions
def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None):
return super().set_many(data, timeout, version)
@optionally_ignore_exceptions
def delete_many(self, keys, version=None):
return super().delete_many(keys, version)
@optionally_ignore_exceptions
def clear(self):
return super().clear()

View File

@@ -94,20 +94,6 @@ register(
category_slug='system',
)
register(
'CSRF_TRUSTED_ORIGINS',
default=[],
field_class=fields.StringListField,
label=_('CSRF Trusted Origins List'),
help_text=_(
"If the service is behind a reverse proxy/load balancer, use this setting "
"to configure the schema://addresses from which the service should trust "
"Origin header values. "
),
category=_('System'),
category_slug='system',
)
register(
'LICENSE',
field_class=fields.DictField,
@@ -694,33 +680,15 @@ register(
category_slug='logging',
)
register(
'LOG_AGGREGATOR_ACTION_QUEUE_SIZE',
field_class=fields.IntegerField,
default=131072,
min_value=1,
label=_('Maximum number of messages that can be stored in the log action queue'),
help_text=_(
'Defines how large the rsyslog action queue can grow in number of messages '
'stored. This can have an impact on memory utilization. When the queue '
'reaches 75% of this number, the queue will start writing to disk '
'(queue.highWatermark in rsyslog). When it reaches 90%, NOTICE, INFO, and '
'DEBUG messages will start to be discarded (queue.discardMark with '
'queue.discardSeverity=5).'
),
category=_('Logging'),
category_slug='logging',
)
register(
'LOG_AGGREGATOR_ACTION_MAX_DISK_USAGE_GB',
'LOG_AGGREGATOR_MAX_DISK_USAGE_GB',
field_class=fields.IntegerField,
default=1,
min_value=1,
label=_('Maximum disk persistence for rsyslogd action queuing (in GB)'),
label=_('Maximum disk persistance for external log aggregation (in GB)'),
help_text=_(
'Amount of data to store (in gigabytes) if an rsyslog action takes time '
'to process an incoming message (defaults to 1). '
'Equivalent to the rsyslogd queue.maxdiskspace setting on the action (e.g. omhttp). '
'It stores files in the directory specified by LOG_AGGREGATOR_MAX_DISK_USAGE_PATH.'
'Amount of data to store (in gigabytes) during an outage of '
'the external log aggregator (defaults to 1). '
'Equivalent to the rsyslogd queue.maxdiskspace setting.'
),
category=_('Logging'),
category_slug='logging',
@@ -863,55 +831,6 @@ register(
category_slug='system',
)
register(
'HOST_METRIC_SUMMARY_TASK_LAST_TS',
field_class=fields.DateTimeField,
label=_('Last computing date of HostMetricSummaryMonthly'),
allow_null=True,
category=_('System'),
category_slug='system',
)
register(
'AWX_CLEANUP_PATHS',
field_class=fields.BooleanField,
label=_('Enable or Disable tmp dir cleanup'),
default=True,
help_text=_('Enable or Disable TMP Dir cleanup'),
category=('Debug'),
category_slug='debug',
)
register(
'AWX_REQUEST_PROFILE',
field_class=fields.BooleanField,
label=_('Debug Web Requests'),
default=False,
help_text=_('Debug web request python timing'),
category=('Debug'),
category_slug='debug',
)
register(
'DEFAULT_CONTAINER_RUN_OPTIONS',
field_class=fields.StringListField,
label=_('Container Run Options'),
default=['--network', 'slirp4netns:enable_ipv6=true'],
help_text=_("List of options to pass to podman run example: ['--network', 'slirp4netns:enable_ipv6=true', '--log-level', 'debug']"),
category=('Jobs'),
category_slug='jobs',
)
register(
'RECEPTOR_RELEASE_WORK',
field_class=fields.BooleanField,
label=_('Release Receptor Work'),
default=True,
help_text=_('Release receptor work'),
category=('Debug'),
category_slug='debug',
)
def logging_validate(serializer, attrs):
if not serializer.instance or not hasattr(serializer.instance, 'LOG_AGGREGATOR_HOST') or not hasattr(serializer.instance, 'LOG_AGGREGATOR_TYPE'):

View File

@@ -1,65 +0,0 @@
import boto3
from botocore.exceptions import ClientError
from .plugin import CredentialPlugin
from django.utils.translation import gettext_lazy as _
secrets_manager_inputs = {
'fields': [
{
'id': 'aws_access_key',
'label': _('AWS Access Key'),
'type': 'string',
},
{
'id': 'aws_secret_key',
'label': _('AWS Secret Key'),
'type': 'string',
'secret': True,
},
],
'metadata': [
{
'id': 'region_name',
'label': _('AWS Secrets Manager Region'),
'type': 'string',
'help_text': _('Region which the secrets manager is located'),
},
{
'id': 'secret_name',
'label': _('AWS Secret Name'),
'type': 'string',
},
],
'required': ['aws_access_key', 'aws_secret_key', 'region_name', 'secret_name'],
}
def aws_secretsmanager_backend(**kwargs):
secret_name = kwargs['secret_name']
region_name = kwargs['region_name']
aws_secret_access_key = kwargs['aws_secret_key']
aws_access_key_id = kwargs['aws_access_key']
session = boto3.session.Session()
client = session.client(
service_name='secretsmanager', region_name=region_name, aws_secret_access_key=aws_secret_access_key, aws_access_key_id=aws_access_key_id
)
try:
get_secret_value_response = client.get_secret_value(SecretId=secret_name)
except ClientError as e:
raise e
# Secrets Manager decrypts the secret value using the associated KMS CMK
# Depending on whether the secret was a string or binary, only one of these fields will be populated
if 'SecretString' in get_secret_value_response:
secret = get_secret_value_response['SecretString']
else:
secret = get_secret_value_response['SecretBinary']
return secret
aws_secretmanager_plugin = CredentialPlugin('AWS Secrets Manager lookup', inputs=secrets_manager_inputs, backend=aws_secretsmanager_backend)

View File

@@ -4,8 +4,6 @@ from urllib.parse import urljoin, quote
from django.utils.translation import gettext_lazy as _
import requests
import base64
import binascii
conjur_inputs = {
@@ -52,13 +50,6 @@ conjur_inputs = {
}
def _is_base64(s: str) -> bool:
try:
return base64.b64encode(base64.b64decode(s.encode("utf-8"))) == s.encode("utf-8")
except binascii.Error:
return False
def conjur_backend(**kwargs):
url = kwargs['url']
api_key = kwargs['api_key']
@@ -86,7 +77,7 @@ def conjur_backend(**kwargs):
token = resp.content.decode('utf-8')
lookup_kwargs = {
'headers': {'Authorization': 'Token token="{}"'.format(token if _is_base64(token) else base64.b64encode(token.encode('utf-8')).decode('utf-8'))},
'headers': {'Authorization': 'Token token="{}"'.format(token)},
'allow_redirects': False,
}

View File

@@ -2,29 +2,25 @@ from .plugin import CredentialPlugin
from django.conf import settings
from django.utils.translation import gettext_lazy as _
from delinea.secrets.vault import PasswordGrantAuthorizer, SecretsVault
from base64 import b64decode
from thycotic.secrets.vault import SecretsVault
dsv_inputs = {
'fields': [
{
'id': 'tenant',
'label': _('Tenant'),
'help_text': _('The tenant e.g. "ex" when the URL is https://ex.secretsvaultcloud.com'),
'help_text': _('The tenant e.g. "ex" when the URL is https://ex.secretservercloud.com'),
'type': 'string',
},
{
'id': 'tld',
'label': _('Top-level Domain (TLD)'),
'help_text': _('The TLD of the tenant e.g. "com" when the URL is https://ex.secretsvaultcloud.com'),
'choices': ['ca', 'com', 'com.au', 'eu'],
'help_text': _('The TLD of the tenant e.g. "com" when the URL is https://ex.secretservercloud.com'),
'choices': ['ca', 'com', 'com.au', 'com.sg', 'eu'],
'default': 'com',
},
{
'id': 'client_id',
'label': _('Client ID'),
'type': 'string',
},
{'id': 'client_id', 'label': _('Client ID'), 'type': 'string'},
{
'id': 'client_secret',
'label': _('Client Secret'),
@@ -45,16 +41,8 @@ dsv_inputs = {
'help_text': _('The field to extract from the secret'),
'type': 'string',
},
{
'id': 'secret_decoding',
'label': _('Should the secret be base64 decoded?'),
'help_text': _('Specify whether the secret should be base64 decoded, typically used for storing files, such as SSH keys'),
'choices': ['No Decoding', 'Decode Base64'],
'type': 'string',
'default': 'No Decoding',
},
],
'required': ['tenant', 'client_id', 'client_secret', 'path', 'secret_field', 'secret_decoding'],
'required': ['tenant', 'client_id', 'client_secret', 'path', 'secret_field'],
}
if settings.DEBUG:
@@ -63,32 +51,12 @@ if settings.DEBUG:
'id': 'url_template',
'label': _('URL template'),
'type': 'string',
'default': 'https://{}.secretsvaultcloud.{}',
'default': 'https://{}.secretsvaultcloud.{}/v1',
}
)
def dsv_backend(**kwargs):
tenant_name = kwargs['tenant']
tenant_tld = kwargs.get('tld', 'com')
tenant_url_template = kwargs.get('url_template', 'https://{}.secretsvaultcloud.{}')
client_id = kwargs['client_id']
client_secret = kwargs['client_secret']
secret_path = kwargs['path']
secret_field = kwargs['secret_field']
# providing a default value to remain backward compatible for secrets that have not specified this option
secret_decoding = kwargs.get('secret_decoding', 'No Decoding')
tenant_url = tenant_url_template.format(tenant_name, tenant_tld.strip("."))
authorizer = PasswordGrantAuthorizer(tenant_url, client_id, client_secret)
dsv_secret = SecretsVault(tenant_url, authorizer).get_secret(secret_path)
# files can be uploaded base64 decoded to DSV and thus decoding it only, when asked for
if secret_decoding == 'Decode Base64':
return b64decode(dsv_secret['data'][secret_field]).decode()
return dsv_secret['data'][secret_field]
dsv_plugin = CredentialPlugin(name='Thycotic DevOps Secrets Vault', inputs=dsv_inputs, backend=dsv_backend)
dsv_plugin = CredentialPlugin(
'Thycotic DevOps Secrets Vault',
dsv_inputs,
lambda **kwargs: SecretsVault(**{k: v for (k, v) in kwargs.items() if k in [field['id'] for field in dsv_inputs['fields']]}).get_secret(kwargs['path'])['data'][kwargs['secret_field']], # fmt: skip
)

View File

@@ -265,8 +265,6 @@ def kv_backend(**kwargs):
if secret_key:
try:
if (secret_key != 'data') and (secret_key not in json['data']) and ('data' in json['data']):
return json['data']['data'][secret_key]
return json['data'][secret_key]
except KeyError:
raise RuntimeError('{} is not present at {}'.format(secret_key, secret_path))

View File

@@ -1,10 +1,7 @@
from .plugin import CredentialPlugin
from django.utils.translation import gettext_lazy as _
try:
from delinea.secrets.server import DomainPasswordGrantAuthorizer, PasswordGrantAuthorizer, SecretServer, ServerSecret
except ImportError:
from thycotic.secrets.server import DomainPasswordGrantAuthorizer, PasswordGrantAuthorizer, SecretServer, ServerSecret
from thycotic.secrets.server import DomainPasswordGrantAuthorizer, PasswordGrantAuthorizer, SecretServer, ServerSecret
tss_inputs = {
'fields': [
@@ -53,10 +50,8 @@ tss_inputs = {
def tss_backend(**kwargs):
if kwargs.get("domain"):
authorizer = DomainPasswordGrantAuthorizer(
base_url=kwargs['server_url'], username=kwargs['username'], domain=kwargs['domain'], password=kwargs['password']
)
if 'domain' in kwargs:
authorizer = DomainPasswordGrantAuthorizer(kwargs['server_url'], kwargs['username'], kwargs['password'], kwargs['domain'])
else:
authorizer = PasswordGrantAuthorizer(kwargs['server_url'], kwargs['username'], kwargs['password'])
secret_server = SecretServer(kwargs['server_url'], authorizer)

View File

@@ -87,7 +87,7 @@ class RecordedQueryLog(object):
)
log.commit()
log.execute(
'INSERT INTO queries (pid, version, argv, time, sql, explain, bt) VALUES (?, ?, ?, ?, ?, ?, ?);',
'INSERT INTO queries (pid, version, argv, time, sql, explain, bt) ' 'VALUES (?, ?, ?, ?, ?, ?, ?);',
(os.getpid(), version, ' '.join(sys.argv), seconds, sql, explain, bt),
)
log.commit()

View File

@@ -1,5 +1,5 @@
import os
import psycopg
import psycopg2
import select
from contextlib import contextmanager
@@ -40,12 +40,8 @@ def get_task_queuename():
class PubSub(object):
def __init__(self, conn, select_timeout=None):
def __init__(self, conn):
self.conn = conn
if select_timeout is None:
self.select_timeout = 5
else:
self.select_timeout = select_timeout
def listen(self, channel):
with self.conn.cursor() as cur:
@@ -59,42 +55,25 @@ class PubSub(object):
with self.conn.cursor() as cur:
cur.execute('SELECT pg_notify(%s, %s);', (channel, payload))
@staticmethod
def current_notifies(conn):
"""
Altered version of .notifies method from psycopg library
This removes the outer while True loop so that we only process
queued notifications
"""
with conn.lock:
try:
ns = conn.wait(psycopg.generators.notifies(conn.pgconn))
except psycopg.errors._NO_TRACEBACK as ex:
raise ex.with_traceback(None)
enc = psycopg._encodings.pgconn_encoding(conn.pgconn)
for pgn in ns:
n = psycopg.connection.Notify(pgn.relname.decode(enc), pgn.extra.decode(enc), pgn.be_pid)
yield n
def events(self, yield_timeouts=False):
def events(self, select_timeout=5, yield_timeouts=False):
if not self.conn.autocommit:
raise RuntimeError('Listening for events can only be done in autocommit mode')
while True:
if select.select([self.conn], [], [], self.select_timeout) == NOT_READY:
if select.select([self.conn], [], [], select_timeout) == NOT_READY:
if yield_timeouts:
yield None
else:
notification_generator = self.current_notifies(self.conn)
for notification in notification_generator:
yield notification
self.conn.poll()
while self.conn.notifies:
yield self.conn.notifies.pop(0)
def close(self):
self.conn.close()
@contextmanager
def pg_bus_conn(new_connection=False, select_timeout=None):
def pg_bus_conn(new_connection=False):
'''
Any listeners probably want to establish a new database connection,
separate from the Django connection used for queries, because that will prevent
@@ -110,8 +89,9 @@ def pg_bus_conn(new_connection=False, select_timeout=None):
conf['OPTIONS'] = conf.get('OPTIONS', {}).copy()
# Modify the application name to distinguish from other connections the process might use
conf['OPTIONS']['application_name'] = get_application_name(settings.CLUSTER_HOST_ID, function='listener')
connection_data = f"dbname={conf['NAME']} host={conf['HOST']} user={conf['USER']} password={conf['PASSWORD']} port={conf['PORT']}"
conn = psycopg.connect(connection_data, autocommit=True, **conf['OPTIONS'])
conn = psycopg2.connect(dbname=conf['NAME'], host=conf['HOST'], user=conf['USER'], password=conf['PASSWORD'], port=conf['PORT'], **conf['OPTIONS'])
# Django connection.cursor().connection doesn't have autocommit=True on by default
conn.set_session(autocommit=True)
else:
if pg_connection.connection is None:
pg_connection.connect()
@@ -119,7 +99,7 @@ def pg_bus_conn(new_connection=False, select_timeout=None):
raise RuntimeError('Unexpectedly could not connect to postgres for pg_notify actions')
conn = pg_connection.connection
pubsub = PubSub(conn, select_timeout=select_timeout)
pubsub = PubSub(conn)
yield pubsub
if new_connection:
conn.close()

View File

@@ -37,14 +37,8 @@ class Control(object):
def running(self, *args, **kwargs):
return self.control_with_reply('running', *args, **kwargs)
def cancel(self, task_ids, with_reply=True):
if with_reply:
return self.control_with_reply('cancel', extra_data={'task_ids': task_ids})
else:
self.control({'control': 'cancel', 'task_ids': task_ids, 'reply_to': None}, extra_data={'task_ids': task_ids})
def schedule(self, *args, **kwargs):
return self.control_with_reply('schedule', *args, **kwargs)
def cancel(self, task_ids, *args, **kwargs):
return self.control_with_reply('cancel', *args, extra_data={'task_ids': task_ids}, **kwargs)
@classmethod
def generate_reply_queue_name(cls):
@@ -58,14 +52,14 @@ class Control(object):
if not connection.get_autocommit():
raise RuntimeError('Control-with-reply messages can only be done in autocommit mode')
with pg_bus_conn(select_timeout=timeout) as conn:
with pg_bus_conn() as conn:
conn.listen(reply_queue)
send_data = {'control': command, 'reply_to': reply_queue}
if extra_data:
send_data.update(extra_data)
conn.notify(self.queuename, json.dumps(send_data))
for reply in conn.events(yield_timeouts=True):
for reply in conn.events(select_timeout=timeout, yield_timeouts=True):
if reply is None:
logger.error(f'{self.service} did not reply within {timeout}s')
raise RuntimeError(f"{self.service} did not reply within {timeout}s")

View File

@@ -1,142 +1,57 @@
import logging
import os
import time
import yaml
from datetime import datetime
from multiprocessing import Process
from django.conf import settings
from django.db import connections
from schedule import Scheduler
from django_guid import set_guid
from django_guid.utils import generate_guid
from awx.main.dispatch.worker import TaskWorker
from awx.main.utils.db import set_connection_name
logger = logging.getLogger('awx.main.dispatch.periodic')
class ScheduledTask:
"""
Class representing schedules, very loosely modeled after python schedule library Job
the idea of this class is to:
- only deal in relative times (time since the scheduler global start)
- only deal in integer math for target runtimes, but float for current relative time
class Scheduler(Scheduler):
def run_continuously(self):
idle_seconds = max(1, min(self.jobs).period.total_seconds() / 2)
Missed schedule policy:
Invariant target times are maintained, meaning that if interval=10s offset=0
and it runs at t=7s, then it calls for next run in 3s.
However, if a complete interval has passed, that is counted as a missed run,
and missed runs are abandoned (no catch-up runs).
"""
def run():
ppid = os.getppid()
logger.warning('periodic beat started')
def __init__(self, name: str, data: dict):
# parameters need for schedule computation
self.interval = int(data['schedule'].total_seconds())
self.offset = 0 # offset relative to start time this schedule begins
self.index = 0 # number of periods of the schedule that has passed
set_connection_name('periodic') # set application_name to distinguish from other dispatcher processes
# parameters that do not affect scheduling logic
self.last_run = None # time of last run, only used for debug
self.completed_runs = 0 # number of times schedule is known to run
self.name = name
self.data = data # used by caller to know what to run
while True:
if os.getppid() != ppid:
# if the parent PID changes, this process has been orphaned
# via e.g., segfault or sigkill, we should exit too
pid = os.getpid()
logger.warning(f'periodic beat exiting gracefully pid:{pid}')
raise SystemExit()
try:
for conn in connections.all():
# If the database connection has a hiccup, re-establish a new
# connection
conn.close_if_unusable_or_obsolete()
set_guid(generate_guid())
self.run_pending()
except Exception:
logger.exception('encountered an error while scheduling periodic tasks')
time.sleep(idle_seconds)
@property
def next_run(self):
"Time until the next run with t=0 being the global_start of the scheduler class"
return (self.index + 1) * self.interval + self.offset
def due_to_run(self, relative_time):
return bool(self.next_run <= relative_time)
def expected_runs(self, relative_time):
return int((relative_time - self.offset) / self.interval)
def mark_run(self, relative_time):
self.last_run = relative_time
self.completed_runs += 1
new_index = self.expected_runs(relative_time)
if new_index > self.index + 1:
logger.warning(f'Missed {new_index - self.index - 1} schedules of {self.name}')
self.index = new_index
def missed_runs(self, relative_time):
"Number of times job was supposed to ran but failed to, only used for debug"
missed_ct = self.expected_runs(relative_time) - self.completed_runs
# if this is currently due to run do not count that as a missed run
if missed_ct and self.due_to_run(relative_time):
missed_ct -= 1
return missed_ct
process = Process(target=run)
process.daemon = True
process.start()
class Scheduler:
def __init__(self, schedule):
"""
Expects schedule in the form of a dictionary like
{
'job1': {'schedule': timedelta(seconds=50), 'other': 'stuff'}
}
Only the schedule nearest-second value is used for scheduling,
the rest of the data is for use by the caller to know what to run.
"""
self.jobs = [ScheduledTask(name, data) for name, data in schedule.items()]
min_interval = min(job.interval for job in self.jobs)
num_jobs = len(self.jobs)
# this is intentionally oppioniated against spammy schedules
# a core goal is to spread out the scheduled tasks (for worker management)
# and high-frequency schedules just do not work with that
if num_jobs > min_interval:
raise RuntimeError(f'Number of schedules ({num_jobs}) is more than the shortest schedule interval ({min_interval} seconds).')
# even space out jobs over the base interval
for i, job in enumerate(self.jobs):
job.offset = (i * min_interval) // num_jobs
# internally times are all referenced relative to startup time, add grace period
self.global_start = time.time() + 2.0
def get_and_mark_pending(self):
relative_time = time.time() - self.global_start
to_run = []
for job in self.jobs:
if job.due_to_run(relative_time):
to_run.append(job)
logger.debug(f'scheduler found {job.name} to run, {relative_time - job.next_run} seconds after target')
job.mark_run(relative_time)
return to_run
def time_until_next_run(self):
relative_time = time.time() - self.global_start
next_job = min(self.jobs, key=lambda j: j.next_run)
delta = next_job.next_run - relative_time
if delta <= 0.1:
# careful not to give 0 or negative values to the select timeout, which has unclear interpretation
logger.warning(f'Scheduler next run of {next_job.name} is {-delta} seconds in the past')
return 0.1
elif delta > 20.0:
logger.warning(f'Scheduler next run unexpectedly over 20 seconds in future: {delta}')
return 20.0
logger.debug(f'Scheduler next run is {next_job.name} in {delta} seconds')
return delta
def debug(self, *args, **kwargs):
data = dict()
data['title'] = 'Scheduler status'
now = datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S UTC')
start_time = datetime.fromtimestamp(self.global_start).strftime('%Y-%m-%d %H:%M:%S UTC')
relative_time = time.time() - self.global_start
data['started_time'] = start_time
data['current_time'] = now
data['current_time_relative'] = round(relative_time, 3)
data['total_schedules'] = len(self.jobs)
data['schedule_list'] = dict(
[
(
job.name,
dict(
last_run_seconds_ago=round(relative_time - job.last_run, 3) if job.last_run else None,
next_run_in_seconds=round(job.next_run - relative_time, 3),
offset_in_seconds=job.offset,
completed_runs=job.completed_runs,
missed_runs=job.missed_runs(relative_time),
),
)
for job in sorted(self.jobs, key=lambda job: job.interval)
]
)
return yaml.safe_dump(data, default_flow_style=False, sort_keys=False)
def run_continuously():
scheduler = Scheduler()
for task in settings.CELERYBEAT_SCHEDULE.values():
apply_async = TaskWorker.resolve_callable(task['task']).apply_async
total_seconds = task['schedule'].total_seconds()
scheduler.every(total_seconds).seconds.do(apply_async)
scheduler.run_continuously()

View File

@@ -339,17 +339,6 @@ class AutoscalePool(WorkerPool):
# but if the task takes longer than the time defined here, we will force it to stop here
self.task_manager_timeout = settings.TASK_MANAGER_TIMEOUT + settings.TASK_MANAGER_TIMEOUT_GRACE_PERIOD
# initialize some things for subsystem metrics periodic gathering
# the AutoscalePool class does not save these to redis directly, but reports via produce_subsystem_metrics
self.scale_up_ct = 0
self.worker_count_max = 0
def produce_subsystem_metrics(self, metrics_object):
metrics_object.set('dispatcher_pool_scale_up_events', self.scale_up_ct)
metrics_object.set('dispatcher_pool_active_task_count', sum(len(w.managed_tasks) for w in self.workers))
metrics_object.set('dispatcher_pool_max_worker_count', self.worker_count_max)
self.worker_count_max = len(self.workers)
@property
def should_grow(self):
if len(self.workers) < self.min_workers:
@@ -417,16 +406,16 @@ class AutoscalePool(WorkerPool):
# the task manager to never do more work
current_task = w.current_task
if current_task and isinstance(current_task, dict):
endings = ('tasks.task_manager', 'tasks.dependency_manager', 'tasks.workflow_manager')
endings = ['tasks.task_manager', 'tasks.dependency_manager', 'tasks.workflow_manager']
current_task_name = current_task.get('task', '')
if current_task_name.endswith(endings):
if any(current_task_name.endswith(e) for e in endings):
if 'started' not in current_task:
w.managed_tasks[current_task['uuid']]['started'] = time.time()
age = time.time() - current_task['started']
w.managed_tasks[current_task['uuid']]['age'] = age
if age > self.task_manager_timeout:
logger.error(f'{current_task_name} has held the advisory lock for {age}, sending SIGUSR1 to {w.pid}')
os.kill(w.pid, signal.SIGUSR1)
logger.error(f'{current_task_name} has held the advisory lock for {age}, sending SIGTERM to {w.pid}')
os.kill(w.pid, signal.SIGTERM)
for m in orphaned:
# if all the workers are dead, spawn at least one
@@ -454,12 +443,7 @@ class AutoscalePool(WorkerPool):
idx = random.choice(range(len(self.workers)))
return idx, self.workers[idx]
else:
self.scale_up_ct += 1
ret = super(AutoscalePool, self).up()
new_worker_ct = len(self.workers)
if new_worker_ct > self.worker_count_max:
self.worker_count_max = new_worker_ct
return ret
return super(AutoscalePool, self).up()
def write(self, preferred_queue, body):
if 'guid' in body:

View File

@@ -73,15 +73,15 @@ class task:
return cls.apply_async(args, kwargs)
@classmethod
def get_async_body(cls, args=None, kwargs=None, uuid=None, **kw):
"""
Get the python dict to become JSON data in the pg_notify message
This same message gets passed over the dispatcher IPC queue to workers
If a task is submitted to a multiprocessing pool, skipping pg_notify, this might be used directly
"""
def apply_async(cls, args=None, kwargs=None, queue=None, uuid=None, **kw):
task_id = uuid or str(uuid4())
args = args or []
kwargs = kwargs or {}
queue = queue or getattr(cls.queue, 'im_func', cls.queue)
if not queue:
msg = f'{cls.name}: Queue value required and may not be None'
logger.error(msg)
raise ValueError(msg)
obj = {'uuid': task_id, 'args': args, 'kwargs': kwargs, 'task': cls.name, 'time_pub': time.time()}
guid = get_guid()
if guid:
@@ -89,16 +89,6 @@ class task:
if bind_kwargs:
obj['bind_kwargs'] = bind_kwargs
obj.update(**kw)
return obj
@classmethod
def apply_async(cls, args=None, kwargs=None, queue=None, uuid=None, **kw):
queue = queue or getattr(cls.queue, 'im_func', cls.queue)
if not queue:
msg = f'{cls.name}: Queue value required and may not be None'
logger.error(msg)
raise ValueError(msg)
obj = cls.get_async_body(args=args, kwargs=kwargs, uuid=uuid, **kw)
if callable(queue):
queue = queue()
if not is_testing():
@@ -126,5 +116,4 @@ class task:
setattr(fn, 'name', cls.name)
setattr(fn, 'apply_async', cls.apply_async)
setattr(fn, 'delay', cls.delay)
setattr(fn, 'get_async_body', cls.get_async_body)
return fn

View File

@@ -7,21 +7,18 @@ import signal
import sys
import redis
import json
import psycopg
import psycopg2
import time
from uuid import UUID
from queue import Empty as QueueEmpty
from datetime import timedelta
from django import db
from django.conf import settings
from awx.main.dispatch.pool import WorkerPool
from awx.main.dispatch.periodic import Scheduler
from awx.main.dispatch import pg_bus_conn
from awx.main.utils.common import log_excess_runtime
from awx.main.utils.db import set_connection_name
import awx.main.analytics.subsystem_metrics as s_metrics
if 'run_callback_receiver' in sys.argv:
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
@@ -66,12 +63,10 @@ class AWXConsumerBase(object):
def control(self, body):
logger.warning(f'Received control signal:\n{body}')
control = body.get('control')
if control in ('status', 'schedule', 'running', 'cancel'):
if control in ('status', 'running', 'cancel'):
reply_queue = body['reply_to']
if control == 'status':
msg = '\n'.join([self.listening_on, self.pool.debug()])
if control == 'schedule':
msg = self.scheduler.debug()
elif control == 'running':
msg = []
for worker in self.pool.workers:
@@ -89,20 +84,24 @@ class AWXConsumerBase(object):
if task_ids and not msg:
logger.info(f'Could not locate running tasks to cancel with ids={task_ids}')
if reply_queue is not None:
with pg_bus_conn() as conn:
conn.notify(reply_queue, json.dumps(msg))
with pg_bus_conn() as conn:
conn.notify(reply_queue, json.dumps(msg))
elif control == 'reload':
for worker in self.pool.workers:
worker.quit()
else:
logger.error('unrecognized control message: {}'.format(control))
def dispatch_task(self, body):
"""This will place the given body into a worker queue to run method decorated as a task"""
def process_task(self, body):
if isinstance(body, dict):
body['time_ack'] = time.time()
if 'control' in body:
try:
return self.control(body)
except Exception:
logger.exception(f"Exception handling control message: {body}")
return
if len(self.pool):
if "uuid" in body and body['uuid']:
try:
@@ -116,24 +115,15 @@ class AWXConsumerBase(object):
self.pool.write(queue, body)
self.total_messages += 1
def process_task(self, body):
"""Routes the task details in body as either a control task or a task-task"""
if 'control' in body:
try:
return self.control(body)
except Exception:
logger.exception(f"Exception handling control message: {body}")
return
self.dispatch_task(body)
@log_excess_runtime(logger)
def record_statistics(self):
if time.time() - self.last_stats > 1: # buffer stat recording to once per second
try:
self.redis.set(f'awx_{self.name}_statistics', self.pool.debug())
self.last_stats = time.time()
except Exception:
logger.exception(f"encountered an error communicating with redis to store {self.name} statistics")
self.last_stats = time.time()
self.last_stats = time.time()
def run(self, *args, **kwargs):
signal.signal(signal.SIGINT, self.stop)
@@ -152,72 +142,29 @@ class AWXConsumerRedis(AWXConsumerBase):
def run(self, *args, **kwargs):
super(AWXConsumerRedis, self).run(*args, **kwargs)
self.worker.on_start()
logger.info(f'Callback receiver started with pid={os.getpid()}')
db.connection.close() # logs use database, so close connection
while True:
logger.debug(f'{os.getpid()} is alive')
time.sleep(60)
class AWXConsumerPG(AWXConsumerBase):
def __init__(self, *args, schedule=None, **kwargs):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.pg_max_wait = settings.DISPATCHER_DB_DOWNTIME_TOLERANCE
self.pg_max_wait = settings.DISPATCHER_DB_DOWNTOWN_TOLLERANCE
# if no successful loops have ran since startup, then we should fail right away
self.pg_is_down = True # set so that we fail if we get database errors on startup
init_time = time.time()
self.pg_down_time = init_time - self.pg_max_wait # allow no grace period
self.last_cleanup = init_time
self.subsystem_metrics = s_metrics.Metrics(auto_pipe_execute=False)
self.last_metrics_gather = init_time
self.listen_cumulative_time = 0.0
if schedule:
schedule = schedule.copy()
else:
schedule = {}
# add control tasks to be ran at regular schedules
# NOTE: if we run out of database connections, it is important to still run cleanup
# so that we scale down workers and free up connections
schedule['pool_cleanup'] = {'control': self.pool.cleanup, 'schedule': timedelta(seconds=60)}
# record subsystem metrics for the dispatcher
schedule['metrics_gather'] = {'control': self.record_metrics, 'schedule': timedelta(seconds=20)}
self.scheduler = Scheduler(schedule)
def record_metrics(self):
current_time = time.time()
self.pool.produce_subsystem_metrics(self.subsystem_metrics)
self.subsystem_metrics.set('dispatcher_availability', self.listen_cumulative_time / (current_time - self.last_metrics_gather))
self.subsystem_metrics.pipe_execute()
self.listen_cumulative_time = 0.0
self.last_metrics_gather = current_time
self.pg_down_time = time.time() - self.pg_max_wait # allow no grace period
self.last_cleanup = time.time()
def run_periodic_tasks(self):
"""
Run general periodic logic, and return maximum time in seconds before
the next requested run
This may be called more often than that when events are consumed
so this should be very efficient in that
"""
try:
self.record_statistics() # maintains time buffer in method
except Exception as exc:
logger.warning(f'Failed to save dispatcher statistics {exc}')
self.record_statistics() # maintains time buffer in method
for job in self.scheduler.get_and_mark_pending():
if 'control' in job.data:
try:
job.data['control']()
except Exception:
logger.exception(f'Error running control task {job.data}')
elif 'task' in job.data:
body = self.worker.resolve_callable(job.data['task']).get_async_body()
# bypasses pg_notify for scheduled tasks
self.dispatch_task(body)
self.pg_is_down = False
self.listen_start = time.time()
return self.scheduler.time_until_next_run()
if time.time() - self.last_cleanup > 60: # same as cluster_node_heartbeat
# NOTE: if we run out of database connections, it is important to still run cleanup
# so that we scale down workers and free up connections
self.pool.cleanup()
self.last_cleanup = time.time()
def run(self, *args, **kwargs):
super(AWXConsumerPG, self).run(*args, **kwargs)
@@ -233,21 +180,17 @@ class AWXConsumerPG(AWXConsumerBase):
if init is False:
self.worker.on_start()
init = True
# run_periodic_tasks run scheduled actions and gives time until next scheduled action
# this is saved to the conn (PubSub) object in order to modify read timeout in-loop
conn.select_timeout = self.run_periodic_tasks()
# this is the main operational loop for awx-manage run_dispatcher
for e in conn.events(yield_timeouts=True):
self.listen_cumulative_time += time.time() - self.listen_start # for metrics
if e is not None:
self.process_task(json.loads(e.payload))
conn.select_timeout = self.run_periodic_tasks()
self.run_periodic_tasks()
self.pg_is_down = False
if self.should_stop:
return
except psycopg.InterfaceError:
except psycopg2.InterfaceError:
logger.warning("Stale Postgres message bus connection, reconnecting")
continue
except (db.DatabaseError, psycopg.OperationalError):
except (db.DatabaseError, psycopg2.OperationalError):
# If we have attained stady state operation, tolerate short-term database hickups
if not self.pg_is_down:
logger.exception(f"Error consuming new events from postgres, will retry for {self.pg_max_wait} s")
@@ -289,8 +232,8 @@ class BaseWorker(object):
break
except QueueEmpty:
continue
except Exception:
logger.exception("Exception on worker {}, reconnecting: ".format(idx))
except Exception as e:
logger.error("Exception on worker {}, restarting: ".format(idx) + str(e))
continue
try:
for conn in db.connections.all():

View File

@@ -9,6 +9,7 @@ from django.conf import settings
from django.utils.functional import cached_property
from django.utils.timezone import now as tz_now
from django.db import transaction, connection as django_connection
from django.db.utils import DataError
from django_guid import set_guid
import psutil
@@ -191,12 +192,16 @@ class CallbackBrokerWorker(BaseWorker):
e._retry_count = retry_count
# special sanitization logic for postgres treatment of NUL 0x00 char
# This used to check the class of the exception but on the postgres3 upgrade it could appear
# as either DataError or ValueError, so now lets just try if its there.
if (retry_count == 1) and ("\x00" in e.stdout):
e.stdout = e.stdout.replace("\x00", "")
if retry_count >= self.INDIVIDUAL_EVENT_RETRIES:
if (retry_count == 1) and isinstance(exc_indv, DataError):
# The easiest place is in stdout. This raises as an error stating that it can't save a NUL character
if "\x00" in e.stdout:
e.stdout = e.stdout.replace("\x00", "")
# There is also a chance that NUL char is embedded in event data which is part of a JSON blob. In that case we, thankfully, get a different exception
if 'unsupported Unicode escape sequence' in str(exc_indv):
e.event_data = json.loads(
json.dumps(e.event_data).replace("\x00", "").replace("\\x00", "").replace("\u0000", "").replace("\\u0000", "")
)
elif retry_count >= self.INDIVIDUAL_EVENT_RETRIES:
logger.error(f'Hit max retries ({retry_count}) saving individual Event error: {str(exc_indv)}\ndata:\n{e.__dict__}')
events.remove(e)
else:

View File

@@ -67,60 +67,10 @@ def __enum_validate__(validator, enums, instance, schema):
Draft4Validator.VALIDATORS['enum'] = __enum_validate__
import logging
logger = logging.getLogger('awx.main.fields')
class JSONBlob(JSONField):
# Cringe... a JSONField that is back ended with a TextField.
# This field was a legacy custom field type that tl;dr; was a TextField
# Over the years, with Django upgrades, we were able to go to a JSONField instead of the custom field
# However, we didn't want to have large customers with millions of events to update from text to json during an upgrade
# So we keep this field type as backended with TextField.
def get_internal_type(self):
return "TextField"
# postgres uses a Jsonb field as the default backend
# with psycopg2 it was using a psycopg2._json.Json class internally
# with psycopg3 it uses a psycopg.types.json.Jsonb class internally
# The binary class was not compatible with a text field, so we are going to override these next two methods and ensure we are using a string
def from_db_value(self, value, expression, connection):
if value is None:
return value
if isinstance(value, str):
try:
return json.loads(value)
except Exception as e:
logger.error(f"Failed to load JSONField {self.name}: {e}")
return value
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
try:
# Null characters are not allowed in text fields and JSONBlobs are JSON data but saved as text
# So we want to make sure we strip out any null characters also note, these "should" be escaped by the dumps process:
# >>> my_obj = { 'test': '\x00' }
# >>> import json
# >>> json.dumps(my_obj)
# '{"test": "\\u0000"}'
# But just to be safe, lets remove them if they are there. \x00 and \u0000 are the same:
# >>> string = "\x00"
# >>> "\u0000" in string
# True
dumped_value = json.dumps(value)
if "\x00" in dumped_value:
dumped_value = dumped_value.replace("\x00", '')
return dumped_value
except Exception as e:
logger.error(f"Failed to dump JSONField {self.name}: {e} value: {value}")
return value
# Based on AutoOneToOneField from django-annoying:
# https://bitbucket.org/offline/django-annoying/src/a0de8b294db3/annoying/fields.py
@@ -850,7 +800,7 @@ class CredentialTypeInjectorField(JSONSchemaField):
def validate_env_var_allowed(self, env_var):
if env_var.startswith('ANSIBLE_'):
raise django_exceptions.ValidationError(
_('Environment variable {} may affect Ansible configuration so its use is not allowed in credentials.').format(env_var),
_('Environment variable {} may affect Ansible configuration so its ' 'use is not allowed in credentials.').format(env_var),
code='invalid',
params={'value': env_var},
)

View File

@@ -23,10 +23,7 @@ class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('--days', dest='days', type=int, default=90, metavar='N', help='Remove activity stream events more than N days old')
parser.add_argument('--dry-run', dest='dry_run', action='store_true', default=False, help='Dry run mode (show items that would be removed)')
parser.add_argument(
'--batch-size', dest='batch_size', type=int, default=500, metavar='X', help='Remove activity stream events in batch of X events. Defaults to 500.'
)
parser.add_argument('--dry-run', dest='dry_run', action='store_true', default=False, help='Dry run mode (show items that would ' 'be removed)')
def init_logging(self):
log_levels = dict(enumerate([logging.ERROR, logging.INFO, logging.DEBUG, 0]))
@@ -51,7 +48,7 @@ class Command(BaseCommand):
else:
pks_to_delete.add(asobj.pk)
# Cleanup objects in batches instead of deleting each one individually.
if len(pks_to_delete) >= self.batch_size:
if len(pks_to_delete) >= 500:
ActivityStream.objects.filter(pk__in=pks_to_delete).delete()
n_deleted_items += len(pks_to_delete)
pks_to_delete.clear()
@@ -66,5 +63,4 @@ class Command(BaseCommand):
self.days = int(options.get('days', 30))
self.cutoff = now() - datetime.timedelta(days=self.days)
self.dry_run = bool(options.get('dry_run', False))
self.batch_size = int(options.get('batch_size', 500))
self.cleanup_activitystream()

View File

@@ -1,22 +1,22 @@
from awx.main.models import HostMetric
from django.core.management.base import BaseCommand
from django.conf import settings
from awx.main.tasks.host_metrics import HostMetricTask
class Command(BaseCommand):
"""
This command provides cleanup task for HostMetric model.
There are two modes, which run in following order:
- soft cleanup
- - Perform soft-deletion of all host metrics last automated 12 months ago or before.
This is the same as issuing a DELETE request to /api/v2/host_metrics/N/ for all host metrics that match the criteria.
- - updates columns delete, deleted_counter and last_deleted
- hard cleanup
- - Permanently erase from the database all host metrics last automated 36 months ago or before.
This operation happens after the soft deletion has finished.
Run soft-deleting of HostMetrics
"""
help = 'Run soft and hard-deletion of HostMetrics'
help = 'Run soft-deleting of HostMetrics'
def add_arguments(self, parser):
parser.add_argument('--months-ago', type=int, dest='months-ago', action='store', help='Threshold in months for soft-deleting')
def handle(self, *args, **options):
HostMetricTask().cleanup(soft_threshold=settings.CLEANUP_HOST_METRICS_SOFT_THRESHOLD, hard_threshold=settings.CLEANUP_HOST_METRICS_HARD_THRESHOLD)
months_ago = options.get('months-ago') or None
if not months_ago:
months_ago = getattr(settings, 'CLEANUP_HOST_METRICS_THRESHOLD', 12)
HostMetric.cleanup_task(months_ago)

View File

@@ -9,7 +9,6 @@ import re
# Django
from django.apps import apps
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction, connection
from django.db.models import Min, Max
@@ -18,7 +17,10 @@ from django.utils.timezone import now
# AWX
from awx.main.models import Job, AdHocCommand, ProjectUpdate, InventoryUpdate, SystemJob, WorkflowJob, Notification
from awx.main.utils import unified_job_class_to_event_table_name
def unified_job_class_to_event_table_name(job_class):
return f'main_{job_class().event_class.__name__.lower()}'
def partition_table_name(job_class, dt):
@@ -150,10 +152,7 @@ class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('--days', dest='days', type=int, default=90, metavar='N', help='Remove jobs/updates executed more than N days ago. Defaults to 90.')
parser.add_argument('--dry-run', dest='dry_run', action='store_true', default=False, help='Dry run mode (show items that would be removed)')
parser.add_argument(
'--batch-size', dest='batch_size', type=int, default=100000, metavar='X', help='Remove jobs in batch of X jobs. Defaults to 100000.'
)
parser.add_argument('--dry-run', dest='dry_run', action='store_true', default=False, help='Dry run mode (show items that would ' 'be removed)')
parser.add_argument('--jobs', dest='only_jobs', action='store_true', default=False, help='Remove jobs')
parser.add_argument('--ad-hoc-commands', dest='only_ad_hoc_commands', action='store_true', default=False, help='Remove ad hoc commands')
parser.add_argument('--project-updates', dest='only_project_updates', action='store_true', default=False, help='Remove project updates')
@@ -199,58 +198,18 @@ class Command(BaseCommand):
delete_meta.delete_jobs()
return (delete_meta.jobs_no_delete_count, delete_meta.jobs_to_delete_count)
def has_unpartitioned_table(self, model):
tblname = unified_job_class_to_event_table_name(model)
with connection.cursor() as cursor:
cursor.execute(f"SELECT 1 FROM pg_tables WHERE tablename = '_unpartitioned_{tblname}';")
row = cursor.fetchone()
if row is None:
return False
return True
def _delete_unpartitioned_table(self, model):
"If the unpartitioned table is no longer necessary, it will drop the table"
tblname = unified_job_class_to_event_table_name(model)
if not self.has_unpartitioned_table(model):
self.logger.debug(f'Table _unpartitioned_{tblname} does not exist, you are fully migrated.')
return
with connection.cursor() as cursor:
# same as UnpartitionedJobEvent.objects.aggregate(Max('created'))
cursor.execute(f'SELECT MAX("_unpartitioned_{tblname}"."created") FROM "_unpartitioned_{tblname}";')
row = cursor.fetchone()
last_created = row[0]
if last_created:
self.logger.info(f'Last event created in _unpartitioned_{tblname} was {last_created.isoformat()}')
else:
self.logger.info(f'Table _unpartitioned_{tblname} has no events in it')
if (last_created is None) or (last_created < self.cutoff):
self.logger.warning(
f'Dropping table _unpartitioned_{tblname} since no records are newer than {self.cutoff}\n'
'WARNING - this will happen in a separate transaction so a failure will not roll back prior cleanup'
)
with connection.cursor() as cursor:
cursor.execute(f'DROP TABLE _unpartitioned_{tblname};')
def _delete_unpartitioned_events(self, model, pk_list):
"If unpartitioned job events remain, it will cascade those from jobs in pk_list"
tblname = unified_job_class_to_event_table_name(model)
rel_name = model().event_parent_key
# Bail if the unpartitioned table does not exist anymore
if not self.has_unpartitioned_table(model):
return
# Table still exists, delete individual unpartitioned events
def _cascade_delete_job_events(self, model, pk_list):
if pk_list:
with connection.cursor() as cursor:
self.logger.debug(f'Deleting {len(pk_list)} events from _unpartitioned_{tblname}, use a longer cleanup window to delete the table.')
tblname = unified_job_class_to_event_table_name(model)
pk_list_csv = ','.join(map(str, pk_list))
cursor.execute(f"DELETE FROM _unpartitioned_{tblname} WHERE {rel_name} IN ({pk_list_csv});")
rel_name = model().event_parent_key
cursor.execute(f"DELETE FROM _unpartitioned_{tblname} WHERE {rel_name} IN ({pk_list_csv})")
def cleanup_jobs(self):
batch_size = 100000
# Hack to avoid doing N+1 queries as each item in the Job query set does
# an individual query to get the underlying UnifiedJob.
Job.polymorphic_super_sub_accessors_replaced = True
@@ -265,14 +224,13 @@ class Command(BaseCommand):
deleted = 0
info = qs.aggregate(min=Min('id'), max=Max('id'))
if info['min'] is not None:
for start in range(info['min'], info['max'] + 1, self.batch_size):
qs_batch = qs.filter(id__gte=start, id__lte=start + self.batch_size)
for start in range(info['min'], info['max'] + 1, batch_size):
qs_batch = qs.filter(id__gte=start, id__lte=start + batch_size)
pk_list = qs_batch.values_list('id', flat=True)
_, results = qs_batch.delete()
deleted += results['main.Job']
# Avoid dropping the job event table in case we have interacted with it already
self._delete_unpartitioned_events(Job, pk_list)
self._cascade_delete_job_events(Job, pk_list)
return skipped, deleted
@@ -295,7 +253,7 @@ class Command(BaseCommand):
deleted += 1
if not self.dry_run:
self._delete_unpartitioned_events(AdHocCommand, pk_list)
self._cascade_delete_job_events(AdHocCommand, pk_list)
skipped += AdHocCommand.objects.filter(created__gte=self.cutoff).count()
return skipped, deleted
@@ -323,7 +281,7 @@ class Command(BaseCommand):
deleted += 1
if not self.dry_run:
self._delete_unpartitioned_events(ProjectUpdate, pk_list)
self._cascade_delete_job_events(ProjectUpdate, pk_list)
skipped += ProjectUpdate.objects.filter(created__gte=self.cutoff).count()
return skipped, deleted
@@ -351,7 +309,7 @@ class Command(BaseCommand):
deleted += 1
if not self.dry_run:
self._delete_unpartitioned_events(InventoryUpdate, pk_list)
self._cascade_delete_job_events(InventoryUpdate, pk_list)
skipped += InventoryUpdate.objects.filter(created__gte=self.cutoff).count()
return skipped, deleted
@@ -375,7 +333,7 @@ class Command(BaseCommand):
deleted += 1
if not self.dry_run:
self._delete_unpartitioned_events(SystemJob, pk_list)
self._cascade_delete_job_events(SystemJob, pk_list)
skipped += SystemJob.objects.filter(created__gte=self.cutoff).count()
return skipped, deleted
@@ -420,12 +378,12 @@ class Command(BaseCommand):
skipped += Notification.objects.filter(created__gte=self.cutoff).count()
return skipped, deleted
@transaction.atomic
def handle(self, *args, **options):
self.verbosity = int(options.get('verbosity', 1))
self.init_logging()
self.days = int(options.get('days', 90))
self.dry_run = bool(options.get('dry_run', False))
self.batch_size = int(options.get('batch_size', 100000))
try:
self.cutoff = now() - datetime.timedelta(days=self.days)
except OverflowError:
@@ -447,29 +405,19 @@ class Command(BaseCommand):
del s.receivers[:]
s.sender_receivers_cache.clear()
with transaction.atomic():
for m in models_to_cleanup:
skipped, deleted = getattr(self, 'cleanup_%s' % m)()
for m in model_names:
if m not in models_to_cleanup:
continue
func = getattr(self, 'cleanup_%s_partition' % m, None)
if func:
skipped_partition, deleted_partition = func()
skipped += skipped_partition
deleted += deleted_partition
skipped, deleted = getattr(self, 'cleanup_%s' % m)()
if self.dry_run:
self.logger.log(99, '%s: %d would be deleted, %d would be skipped.', m.replace('_', ' '), deleted, skipped)
else:
self.logger.log(99, '%s: %d deleted, %d skipped.', m.replace('_', ' '), deleted, skipped)
func = getattr(self, 'cleanup_%s_partition' % m, None)
if func:
skipped_partition, deleted_partition = func()
skipped += skipped_partition
deleted += deleted_partition
# Deleting unpartitioned tables cannot be done in same transaction as updates to related tables
if not self.dry_run:
with transaction.atomic():
for m in models_to_cleanup:
unified_job_class_name = m[:-1].title().replace('Management', 'System').replace('_', '')
unified_job_class = apps.get_model('main', unified_job_class_name)
try:
unified_job_class().event_class
except (NotImplementedError, AttributeError):
continue # no need to run this for models without events
self._delete_unpartitioned_table(unified_job_class)
if self.dry_run:
self.logger.log(99, '%s: %d would be deleted, %d would be skipped.', m.replace('_', ' '), deleted, skipped)
else:
self.logger.log(99, '%s: %d deleted, %d skipped.', m.replace('_', ' '), deleted, skipped)

View File

@@ -44,7 +44,7 @@ class Command(BaseCommand):
'- To list all (now deprecated) custom virtual environments run:',
'awx-manage list_custom_venvs',
'',
'- To export the contents of a (deprecated) virtual environment, run the following command while supplying the path as an argument:',
'- To export the contents of a (deprecated) virtual environment, ' 'run the following command while supplying the path as an argument:',
'awx-manage export_custom_venv /path/to/venv',
'',
'- Run these commands with `-q` to remove tool tips.',

View File

@@ -13,7 +13,7 @@ class Command(BaseCommand):
Deprovision a cluster node
"""
help = 'Remove instance from the database. Specify `--hostname` to use this command.'
help = 'Remove instance from the database. ' 'Specify `--hostname` to use this command.'
def add_arguments(self, parser):
parser.add_argument('--hostname', dest='hostname', type=str, help='Hostname used during provisioning')

View File

@@ -1,9 +0,0 @@
from django.core.management.base import BaseCommand
from awx.main.tasks.host_metrics import HostMetricSummaryMonthlyTask
class Command(BaseCommand):
help = 'Computing of HostMetricSummaryMonthly'
def handle(self, *args, **options):
HostMetricSummaryMonthlyTask().execute()

View File

@@ -22,7 +22,7 @@ class Command(BaseCommand):
'# Discovered Virtual Environments:',
'\n'.join(venvs),
'',
'- To export the contents of a (deprecated) virtual environment, run the following command while supplying the path as an argument:',
'- To export the contents of a (deprecated) virtual environment, ' 'run the following command while supplying the path as an argument:',
'awx-manage export_custom_venv /path/to/venv',
'',
'- To view the connections a (deprecated) virtual environment had in the database, run the following command while supplying the path as an argument:',

View File

@@ -1,27 +0,0 @@
from django.utils.timezone import now
from django.core.management.base import BaseCommand, CommandParser
from datetime import timedelta
from awx.main.utils.common import create_partition, unified_job_class_to_event_table_name
from awx.main.models import Job, SystemJob, ProjectUpdate, InventoryUpdate, AdHocCommand
class Command(BaseCommand):
"""Command used to precreate database partitions to avoid pg_dump locks"""
def add_arguments(self, parser: CommandParser) -> None:
parser.add_argument('--count', dest='count', action='store', help='The amount of hours of partitions to create', type=int, default=1)
def _create_partitioned_tables(self, count):
tables = list()
for model in (Job, SystemJob, ProjectUpdate, InventoryUpdate, AdHocCommand):
tables.append(unified_job_class_to_event_table_name(model))
start = now()
while count > 0:
for table in tables:
create_partition(table, start)
print(f'Created partitions for {table} {start}')
start = start + timedelta(hours=1)
count -= 1
def handle(self, **options):
self._create_partitioned_tables(count=options.get('count'))

View File

@@ -25,20 +25,17 @@ class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('--hostname', dest='hostname', type=str, help="Hostname used during provisioning")
parser.add_argument('--listener_port', dest='listener_port', type=int, help="Receptor listener port")
parser.add_argument('--node_type', type=str, default='hybrid', choices=['control', 'execution', 'hop', 'hybrid'], help="Instance Node type")
parser.add_argument('--uuid', type=str, help="Instance UUID")
def _register_hostname(self, hostname, node_type, uuid, listener_port):
def _register_hostname(self, hostname, node_type, uuid):
if not hostname:
if not settings.AWX_AUTO_DEPROVISION_INSTANCES:
raise CommandError('Registering with values from settings only intended for use in K8s installs')
from awx.main.management.commands.register_queue import RegisterQueue
(changed, instance) = Instance.objects.register(
ip_address=os.environ.get('MY_POD_IP'), listener_port=listener_port, node_type='control', node_uuid=settings.SYSTEM_UUID
)
(changed, instance) = Instance.objects.register(ip_address=os.environ.get('MY_POD_IP'), node_type='control', uuid=settings.SYSTEM_UUID)
RegisterQueue(settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME, 100, 0, [], is_container_group=False).register()
RegisterQueue(
settings.DEFAULT_EXECUTION_QUEUE_NAME,
@@ -51,7 +48,7 @@ class Command(BaseCommand):
max_concurrent_jobs=settings.DEFAULT_EXECUTION_QUEUE_MAX_CONCURRENT_JOBS,
).register()
else:
(changed, instance) = Instance.objects.register(hostname=hostname, node_type=node_type, node_uuid=uuid, listener_port=listener_port)
(changed, instance) = Instance.objects.register(hostname=hostname, node_type=node_type, uuid=uuid)
if changed:
print("Successfully registered instance {}".format(hostname))
else:
@@ -61,6 +58,6 @@ class Command(BaseCommand):
@transaction.atomic
def handle(self, **options):
self.changed = False
self._register_hostname(options.get('hostname'), options.get('node_type'), options.get('uuid'), options.get('listener_port'))
self._register_hostname(options.get('hostname'), options.get('node_type'), options.get('uuid'))
if self.changed:
print("(changed: True)")

View File

@@ -2,7 +2,6 @@ import logging
import json
from django.core.management.base import BaseCommand
from awx.main.dispatch import pg_bus_conn
from awx.main.dispatch.worker.task import TaskWorker
@@ -19,7 +18,7 @@ class Command(BaseCommand):
def handle(self, *arg, **options):
try:
with pg_bus_conn() as conn:
with pg_bus_conn(new_connection=True) as conn:
conn.listen("tower_settings_change")
for e in conn.events(yield_timeouts=True):
if e is not None:

View File

@@ -4,22 +4,28 @@ import logging
import yaml
from django.conf import settings
from django.core.cache import cache as django_cache
from django.core.management.base import BaseCommand
from django.db import connection as django_connection
from awx.main.dispatch import get_task_queuename
from awx.main.dispatch.control import Control
from awx.main.dispatch.pool import AutoscalePool
from awx.main.dispatch.worker import AWXConsumerPG, TaskWorker
from awx.main.dispatch import periodic
logger = logging.getLogger('awx.main.dispatch')
def construct_bcast_queue_name(common_name):
return common_name + '_' + settings.CLUSTER_HOST_ID
class Command(BaseCommand):
help = 'Launch the task dispatcher'
def add_arguments(self, parser):
parser.add_argument('--status', dest='status', action='store_true', help='print the internal state of any running dispatchers')
parser.add_argument('--schedule', dest='schedule', action='store_true', help='print the current status of schedules being ran by dispatcher')
parser.add_argument('--running', dest='running', action='store_true', help='print the UUIDs of any tasked managed by this dispatcher')
parser.add_argument(
'--reload',
@@ -41,9 +47,6 @@ class Command(BaseCommand):
if options.get('status'):
print(Control('dispatcher').status())
return
if options.get('schedule'):
print(Control('dispatcher').schedule())
return
if options.get('running'):
print(Control('dispatcher').running())
return
@@ -60,11 +63,21 @@ class Command(BaseCommand):
print(Control('dispatcher').cancel(cancel_data))
return
# It's important to close these because we're _about_ to fork, and we
# don't want the forked processes to inherit the open sockets
# for the DB and cache connections (that way lies race conditions)
django_connection.close()
django_cache.close()
# spawn a daemon thread to periodically enqueues scheduled tasks
# (like the node heartbeat)
periodic.run_continuously()
consumer = None
try:
queues = ['tower_broadcast_all', 'tower_settings_change', get_task_queuename()]
consumer = AWXConsumerPG('dispatcher', TaskWorker(), queues, AutoscalePool(min_workers=4), schedule=settings.CELERYBEAT_SCHEDULE)
consumer = AWXConsumerPG('dispatcher', TaskWorker(), queues, AutoscalePool(min_workers=4))
consumer.run()
except KeyboardInterrupt:
logger.debug('Terminating Task Dispatcher')

View File

@@ -0,0 +1,74 @@
import json
import logging
import os
import time
import signal
import sys
from django.core.management.base import BaseCommand
from django.conf import settings
from awx.main.dispatch import pg_bus_conn
logger = logging.getLogger('awx.main.commands.run_heartbeet')
class Command(BaseCommand):
help = 'Launch the web server beacon (heartbeet)'
def print_banner(self):
heartbeet = r"""
********** **********
************* *************
*****************************
***********HEART***********
*************************
*******************
*************** _._
*********** /`._ `'. __
******* \ .\| \ _'` `)
*** (``_) \| ).'` /`- /
* `\ `;\_ `\\//`-'` /
\ `'.'.| / __/`
`'--v_|/`'`
__||-._
/'` `-`` `'\\
/ .'` )
\ BEET ' )
\. /
'. /'`
`) |
//
'(.
`\`.
``"""
print(heartbeet)
def construct_payload(self, action='online'):
payload = {
'hostname': settings.CLUSTER_HOST_ID,
'ip': os.environ.get('MY_POD_IP'),
'action': action,
}
return json.dumps(payload)
def notify_listener_and_exit(self, *args):
with pg_bus_conn(new_connection=False) as conn:
conn.notify('web_heartbeet', self.construct_payload(action='offline'))
sys.exit(0)
def do_hearbeat_loop(self):
with pg_bus_conn(new_connection=True) as conn:
while True:
logger.debug('Sending heartbeat')
conn.notify('web_heartbeet', self.construct_payload())
time.sleep(settings.BROADCAST_WEBSOCKET_BEACON_FROM_WEB_RATE_SECONDS)
def handle(self, *arg, **options):
self.print_banner()
signal.signal(signal.SIGTERM, self.notify_listener_and_exit)
signal.signal(signal.SIGINT, self.notify_listener_and_exit)
# Note: We don't really try any reconnect logic to pg_notify here,
# just let supervisor restart if we fail.
self.do_hearbeat_loop()

View File

@@ -22,7 +22,7 @@ class Command(BaseCommand):
def handle(self, *arg, **options):
try:
with pg_bus_conn() as conn:
with pg_bus_conn(new_connection=True) as conn:
conn.listen("rsyslog_configurer")
# reconfigure rsyslog on start up
reconfigure_rsyslog()

View File

@@ -1,45 +0,0 @@
import json
import logging
import os
import time
import signal
import sys
from django.core.management.base import BaseCommand
from django.conf import settings
from awx.main.dispatch import pg_bus_conn
logger = logging.getLogger('awx.main.commands.run_ws_heartbeat')
class Command(BaseCommand):
help = 'Launch the web server beacon (ws_heartbeat)'
def construct_payload(self, action='online'):
payload = {
'hostname': settings.CLUSTER_HOST_ID,
'ip': os.environ.get('MY_POD_IP'),
'action': action,
}
return json.dumps(payload)
def notify_listener_and_exit(self, *args):
with pg_bus_conn(new_connection=False) as conn:
conn.notify('web_ws_heartbeat', self.construct_payload(action='offline'))
sys.exit(0)
def do_heartbeat_loop(self):
while True:
with pg_bus_conn() as conn:
logger.debug('Sending heartbeat')
conn.notify('web_ws_heartbeat', self.construct_payload())
time.sleep(settings.BROADCAST_WEBSOCKET_BEACON_FROM_WEB_RATE_SECONDS)
def handle(self, *arg, **options):
signal.signal(signal.SIGTERM, self.notify_listener_and_exit)
signal.signal(signal.SIGINT, self.notify_listener_and_exit)
# Note: We don't really try any reconnect logic to pg_notify here,
# just let supervisor restart if we fail.
self.do_heartbeat_loop()

View File

@@ -2,7 +2,6 @@
# All Rights Reserved.
import logging
import uuid
from django.db import models
from django.conf import settings
from django.db.models.functions import Lower
@@ -115,29 +114,25 @@ class InstanceManager(models.Manager):
return node[0]
raise RuntimeError("No instance found with the current cluster host id")
def register(self, node_uuid=None, hostname=None, ip_address="", listener_port=None, node_type='hybrid', defaults=None):
def register(self, uuid=None, hostname=None, ip_address=None, node_type='hybrid', defaults=None):
if not hostname:
hostname = settings.CLUSTER_HOST_ID
if not ip_address:
ip_address = ""
with advisory_lock('instance_registration_%s' % hostname):
if settings.AWX_AUTO_DEPROVISION_INSTANCES:
# detect any instances with the same IP address.
# if one exists, set it to ""
if ip_address:
inst_conflicting_ip = self.filter(ip_address=ip_address).exclude(hostname=hostname)
if inst_conflicting_ip.exists():
for other_inst in inst_conflicting_ip:
other_hostname = other_inst.hostname
other_inst.ip_address = ""
other_inst.save(update_fields=['ip_address'])
logger.warning("IP address {0} conflict detected, ip address unset for host {1}.".format(ip_address, other_hostname))
# if one exists, set it to None
inst_conflicting_ip = self.filter(ip_address=ip_address).exclude(hostname=hostname)
if inst_conflicting_ip.exists():
for other_inst in inst_conflicting_ip:
other_hostname = other_inst.hostname
other_inst.ip_address = None
other_inst.save(update_fields=['ip_address'])
logger.warning("IP address {0} conflict detected, ip address unset for host {1}.".format(ip_address, other_hostname))
# Return existing instance that matches hostname or UUID (default to UUID)
if node_uuid is not None and node_uuid != UUID_DEFAULT and self.filter(uuid=node_uuid).exists():
instance = self.filter(uuid=node_uuid)
if uuid is not None and uuid != UUID_DEFAULT and self.filter(uuid=uuid).exists():
instance = self.filter(uuid=uuid)
else:
# if instance was not retrieved by uuid and hostname was, use the hostname
instance = self.filter(hostname=hostname)
@@ -161,9 +156,6 @@ class InstanceManager(models.Manager):
if instance.node_type != node_type:
instance.node_type = node_type
update_fields.append('node_type')
if instance.listener_port != listener_port:
instance.listener_port = listener_port
update_fields.append('listener_port')
if update_fields:
instance.save(update_fields=update_fields)
return (True, instance)
@@ -174,11 +166,14 @@ class InstanceManager(models.Manager):
create_defaults = {
'node_state': Instance.States.INSTALLED,
'capacity': 0,
'listener_port': 27199,
}
if defaults is not None:
create_defaults.update(defaults)
uuid_option = {'uuid': node_uuid if node_uuid is not None else uuid.uuid4()}
uuid_option = {}
if uuid is not None:
uuid_option = {'uuid': uuid}
if node_type == 'execution' and 'version' not in create_defaults:
create_defaults['version'] = RECEPTOR_PENDING
instance = self.create(hostname=hostname, ip_address=ip_address, listener_port=listener_port, node_type=node_type, **create_defaults, **uuid_option)
instance = self.create(hostname=hostname, ip_address=ip_address, node_type=node_type, **create_defaults, **uuid_option)
return (True, instance)

View File

@@ -122,7 +122,7 @@ class URLModificationMiddleware(MiddlewareMixin):
field_class=fields.DictField,
read_only=True,
label=_('Formats of all available named urls'),
help_text=_('Read-only list of key-value pairs that shows the standard format of all available named URLs.'),
help_text=_('Read-only list of key-value pairs that shows the standard format of all ' 'available named URLs.'),
category=_('Named URL'),
category_slug='named-url',
)

View File

@@ -9,11 +9,13 @@ from django.db import migrations, models
import django.utils.timezone
import django.db.models.deletion
from django.conf import settings
import taggit.managers
import awx.main.fields
class Migration(migrations.Migration):
dependencies = [
('taggit', '0002_auto_20150616_2121'),
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
@@ -182,6 +184,12 @@ class Migration(migrations.Migration):
null=True,
),
),
(
'tags',
taggit.managers.TaggableManager(
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
),
),
],
options={
'ordering': ('kind', 'name'),
@@ -521,6 +529,12 @@ class Migration(migrations.Migration):
null=True,
),
),
(
'tags',
taggit.managers.TaggableManager(
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
),
),
('users', models.ManyToManyField(related_name='organizations', to=settings.AUTH_USER_MODEL, blank=True)),
],
options={
@@ -575,6 +589,12 @@ class Migration(migrations.Migration):
null=True,
),
),
(
'tags',
taggit.managers.TaggableManager(
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
),
),
],
),
migrations.CreateModel(
@@ -624,6 +644,12 @@ class Migration(migrations.Migration):
null=True,
),
),
(
'tags',
taggit.managers.TaggableManager(
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
),
),
],
options={
'ordering': ['-next_run'],
@@ -661,6 +687,12 @@ class Migration(migrations.Migration):
),
),
('organization', models.ForeignKey(related_name='teams', on_delete=django.db.models.deletion.SET_NULL, to='main.Organization', null=True)),
(
'tags',
taggit.managers.TaggableManager(
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
),
),
('users', models.ManyToManyField(related_name='teams', to=settings.AUTH_USER_MODEL, blank=True)),
],
options={
@@ -1235,6 +1267,13 @@ class Migration(migrations.Migration):
null=True,
),
),
migrations.AddField(
model_name='unifiedjobtemplate',
name='tags',
field=taggit.managers.TaggableManager(
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
),
),
migrations.AddField(
model_name='unifiedjob',
name='created_by',
@@ -1280,6 +1319,13 @@ class Migration(migrations.Migration):
name='schedule',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, default=None, editable=False, to='main.Schedule', null=True),
),
migrations.AddField(
model_name='unifiedjob',
name='tags',
field=taggit.managers.TaggableManager(
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
),
),
migrations.AddField(
model_name='unifiedjob',
name='unified_job_template',
@@ -1324,6 +1370,13 @@ class Migration(migrations.Migration):
help_text='Organization containing this inventory.',
),
),
migrations.AddField(
model_name='inventory',
name='tags',
field=taggit.managers.TaggableManager(
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
),
),
migrations.AddField(
model_name='host',
name='inventory',
@@ -1354,6 +1407,13 @@ class Migration(migrations.Migration):
null=True,
),
),
migrations.AddField(
model_name='host',
name='tags',
field=taggit.managers.TaggableManager(
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
),
),
migrations.AddField(
model_name='group',
name='hosts',
@@ -1381,6 +1441,13 @@ class Migration(migrations.Migration):
name='parents',
field=models.ManyToManyField(related_name='children', to='main.Group', blank=True),
),
migrations.AddField(
model_name='group',
name='tags',
field=taggit.managers.TaggableManager(
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
),
),
migrations.AddField(
model_name='custominventoryscript',
name='organization',
@@ -1392,6 +1459,13 @@ class Migration(migrations.Migration):
null=True,
),
),
migrations.AddField(
model_name='custominventoryscript',
name='tags',
field=taggit.managers.TaggableManager(
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
),
),
migrations.AddField(
model_name='credential',
name='team',

View File

@@ -12,6 +12,8 @@ import django.db.models.deletion
from django.conf import settings
from django.utils.timezone import now
import taggit.managers
def create_system_job_templates(apps, schema_editor):
"""
@@ -123,6 +125,7 @@ class Migration(migrations.Migration):
]
dependencies = [
('taggit', '0002_auto_20150616_2121'),
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('main', '0001_initial'),
@@ -253,6 +256,12 @@ class Migration(migrations.Migration):
'organization',
models.ForeignKey(related_name='notification_templates', on_delete=django.db.models.deletion.SET_NULL, to='main.Organization', null=True),
),
(
'tags',
taggit.managers.TaggableManager(
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
),
),
],
),
migrations.AddField(
@@ -712,6 +721,12 @@ class Migration(migrations.Migration):
help_text='Organization this label belongs to.',
),
),
(
'tags',
taggit.managers.TaggableManager(
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
),
),
],
options={
'ordering': ('organization', 'name'),

View File

@@ -5,11 +5,11 @@ from __future__ import unicode_literals
# Django
from django.db import connection, migrations, models, OperationalError, ProgrammingError
from django.conf import settings
import taggit.managers
# AWX
import awx.main.fields
from awx.main.models import Host
from ._sqlite_helper import dbawaremigrations
def replaces():
@@ -132,11 +132,9 @@ class Migration(migrations.Migration):
help_text='If enabled, Tower will act as an Ansible Fact Cache Plugin; persisting facts at the end of a playbook run to the database and caching facts for use by Ansible.',
),
),
dbawaremigrations.RunSQL(
migrations.RunSQL(
sql="CREATE INDEX host_ansible_facts_default_gin ON {} USING gin(ansible_facts jsonb_path_ops);".format(Host._meta.db_table),
reverse_sql='DROP INDEX host_ansible_facts_default_gin;',
sqlite_sql=dbawaremigrations.RunSQL.noop,
sqlite_reverse_sql=dbawaremigrations.RunSQL.noop,
),
# SCM file-based inventories
migrations.AddField(
@@ -319,6 +317,10 @@ class Migration(migrations.Migration):
model_name='permission',
name='project',
),
migrations.RemoveField(
model_name='permission',
name='tags',
),
migrations.RemoveField(
model_name='permission',
name='team',
@@ -508,6 +510,12 @@ class Migration(migrations.Migration):
null=True,
),
),
(
'tags',
taggit.managers.TaggableManager(
to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'
),
),
],
options={
'ordering': ('kind', 'name'),

View File

@@ -3,27 +3,24 @@ from __future__ import unicode_literals
from django.db import migrations
from ._sqlite_helper import dbawaremigrations
tables_to_drop = [
'celery_taskmeta',
'celery_tasksetmeta',
'djcelery_crontabschedule',
'djcelery_intervalschedule',
'djcelery_periodictask',
'djcelery_periodictasks',
'djcelery_taskstate',
'djcelery_workerstate',
'djkombu_message',
'djkombu_queue',
]
postgres_sql = ([("DROP TABLE IF EXISTS {} CASCADE;".format(table))] for table in tables_to_drop)
sqlite_sql = ([("DROP TABLE IF EXISTS {};".format(table))] for table in tables_to_drop)
class Migration(migrations.Migration):
dependencies = [
('main', '0049_v330_validate_instance_capacity_adjustment'),
]
operations = [dbawaremigrations.RunSQL(p, sqlite_sql=s) for p, s in zip(postgres_sql, sqlite_sql)]
operations = [
migrations.RunSQL([("DROP TABLE IF EXISTS {} CASCADE;".format(table))])
for table in (
'celery_taskmeta',
'celery_tasksetmeta',
'djcelery_crontabschedule',
'djcelery_intervalschedule',
'djcelery_periodictask',
'djcelery_periodictasks',
'djcelery_taskstate',
'djcelery_workerstate',
'djkombu_message',
'djkombu_queue',
)
]

View File

@@ -4,6 +4,7 @@ from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import taggit.managers
# AWX
import awx.main.fields
@@ -19,6 +20,7 @@ def setup_tower_managed_defaults(apps, schema_editor):
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('taggit', '0002_auto_20150616_2121'),
('main', '0066_v350_inventorysource_custom_virtualenv'),
]
@@ -58,6 +60,12 @@ class Migration(migrations.Migration):
'source_credential',
models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='target_input_sources', to='main.Credential'),
),
(
'tags',
taggit.managers.TaggableManager(
blank=True, help_text='A comma-separated list of tags.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags'
),
),
(
'target_credential',
models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='input_sources', to='main.Credential'),

View File

@@ -2,8 +2,6 @@
from django.db import migrations, models, connection
from ._sqlite_helper import dbawaremigrations
def migrate_event_data(apps, schema_editor):
# see: https://github.com/ansible/awx/issues/6010
@@ -14,21 +12,19 @@ def migrate_event_data(apps, schema_editor):
# https://www.postgresql.org/docs/9.1/datatype-numeric.html)
for tblname in ('main_jobevent', 'main_inventoryupdateevent', 'main_projectupdateevent', 'main_adhoccommandevent', 'main_systemjobevent'):
with connection.cursor() as cursor:
# This loop used to do roughly the following:
# Rename the table to _old_<tablename>
# Create a new table form the old table (it would have no rows)
# Drop the old sequnce and create a new on tied to the new table and set the sequence to the last number from the old table
# This used to work with postgres spitting out a NOTICE and DETAIL
# With the django 4.2 upgrade that changed to an ERROR and HINT
# By the time we hit the 4.2 upgrade, no one should be upgrading a database this old directly to this new schema
# So we no longer really care about having to do all of this work, we only need a table with a bigint ID field
# And this can be achieved by just changing the id column type...
# rename the current event table
cursor.execute(f'ALTER TABLE {tblname} RENAME TO _old_{tblname};')
# create a *new* table with the same schema
cursor.execute(f'CREATE TABLE {tblname} (LIKE _old_{tblname} INCLUDING ALL);')
# alter the *new* table so that the primary key is a big int
cursor.execute(f'ALTER TABLE {tblname} ALTER COLUMN id TYPE bigint USING id::bigint;')
def migrate_event_data_sqlite(apps, schema_editor):
# TODO: cmeyers fill this in
return
# recreate counter for the new table's primary key to
# start where the *old* table left off (we have to do this because the
# counter changed from an int to a bigint)
cursor.execute(f'CREATE SEQUENCE IF NOT EXISTS "{tblname}_id_seq";')
cursor.execute(f"SELECT setval('{tblname}_id_seq', COALESCE((SELECT MAX(id)+1 FROM _old_{tblname}), 1), false);")
cursor.execute(f'DROP TABLE _old_{tblname};')
class FakeAlterField(migrations.AlterField):
@@ -44,7 +40,7 @@ class Migration(migrations.Migration):
]
operations = [
dbawaremigrations.RunPython(migrate_event_data, sqlite_code=migrate_event_data_sqlite),
migrations.RunPython(migrate_event_data),
FakeAlterField(
model_name='adhoccommandevent',
name='id',

View File

@@ -4,10 +4,12 @@ from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.expressions
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('taggit', '0003_taggeditem_add_unique_index'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('main', '0123_drop_hg_support'),
]
@@ -67,6 +69,12 @@ class Migration(migrations.Migration):
to='main.Organization',
),
),
(
'tags',
taggit.managers.TaggableManager(
blank=True, help_text='A comma-separated list of tags.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags'
),
),
],
options={
'ordering': (django.db.models.expressions.OrderBy(django.db.models.expressions.F('organization_id'), nulls_first=True), 'image'),

View File

@@ -1,7 +1,5 @@
from django.db import migrations, models, connection
from ._sqlite_helper import dbawaremigrations
def migrate_event_data(apps, schema_editor):
# see: https://github.com/ansible/awx/issues/9039
@@ -32,7 +30,7 @@ def migrate_event_data(apps, schema_editor):
# otherwise, the schema changes we would make on the old jobevents table
# (namely, dropping the primary key constraint) would cause the migration
# to suffer a serious performance degradation
cursor.execute(f'CREATE TABLE tmp_{tblname} (LIKE _unpartitioned_{tblname} INCLUDING ALL)')
cursor.execute(f'CREATE TABLE tmp_{tblname} ' f'(LIKE _unpartitioned_{tblname} INCLUDING ALL)')
# drop primary key constraint; in a partioned table
# constraints must include the partition key itself
@@ -50,7 +48,7 @@ def migrate_event_data(apps, schema_editor):
cursor.execute(f'DROP TABLE tmp_{tblname}')
# recreate primary key constraint
cursor.execute(f'ALTER TABLE ONLY {tblname} ADD CONSTRAINT {tblname}_pkey_new PRIMARY KEY (id, job_created);')
cursor.execute(f'ALTER TABLE ONLY {tblname} ' f'ADD CONSTRAINT {tblname}_pkey_new PRIMARY KEY (id, job_created);')
with connection.cursor() as cursor:
"""
@@ -61,10 +59,6 @@ def migrate_event_data(apps, schema_editor):
cursor.execute('DROP INDEX IF EXISTS main_jobevent_job_id_idx')
def migrate_event_data_sqlite(apps, schema_editor):
return None
class FakeAddField(migrations.AddField):
def database_forwards(self, *args):
# this is intentionally left blank, because we're
@@ -78,7 +72,7 @@ class Migration(migrations.Migration):
]
operations = [
dbawaremigrations.RunPython(migrate_event_data, sqlite_code=migrate_event_data_sqlite),
migrations.RunPython(migrate_event_data),
FakeAddField(
model_name='jobevent',
name='job_created',

View File

@@ -1,4 +1,4 @@
# Generated by Django 4.2 on 2023-05-09 19:02
# Generated by Django 4.2 on 2023-04-21 14:43
import awx.main.fields
import awx.main.utils.polymorphic
@@ -9,102 +9,12 @@ import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('main', '0183_pre_django_upgrade'),
]
operations = [
migrations.RenameIndex(
model_name='adhoccommandevent',
new_name='main_adhocc_ad_hoc__1e4d24_idx',
old_fields=('ad_hoc_command', 'job_created', 'uuid'),
),
migrations.RenameIndex(
model_name='adhoccommandevent',
new_name='main_adhocc_ad_hoc__e72142_idx',
old_fields=('ad_hoc_command', 'job_created', 'event'),
),
migrations.RenameIndex(
model_name='adhoccommandevent',
new_name='main_adhocc_ad_hoc__a57777_idx',
old_fields=('ad_hoc_command', 'job_created', 'counter'),
),
migrations.RenameIndex(
model_name='inventoryupdateevent',
new_name='main_invent_invento_f72b21_idx',
old_fields=('inventory_update', 'job_created', 'uuid'),
),
migrations.RenameIndex(
model_name='inventoryupdateevent',
new_name='main_invent_invento_364dcb_idx',
old_fields=('inventory_update', 'job_created', 'counter'),
),
migrations.RenameIndex(
model_name='jobevent',
new_name='main_jobeve_job_id_40a56d_idx',
old_fields=('job', 'job_created', 'parent_uuid'),
),
migrations.RenameIndex(
model_name='jobevent',
new_name='main_jobeve_job_id_3c4a4a_idx',
old_fields=('job', 'job_created', 'uuid'),
),
migrations.RenameIndex(
model_name='jobevent',
new_name='main_jobeve_job_id_51c382_idx',
old_fields=('job', 'job_created', 'counter'),
),
migrations.RenameIndex(
model_name='jobevent',
new_name='main_jobeve_job_id_0ddc6b_idx',
old_fields=('job', 'job_created', 'event'),
),
migrations.RenameIndex(
model_name='projectupdateevent',
new_name='main_projec_project_449bbd_idx',
old_fields=('project_update', 'job_created', 'uuid'),
),
migrations.RenameIndex(
model_name='projectupdateevent',
new_name='main_projec_project_69559a_idx',
old_fields=('project_update', 'job_created', 'counter'),
),
migrations.RenameIndex(
model_name='projectupdateevent',
new_name='main_projec_project_c44b7c_idx',
old_fields=('project_update', 'job_created', 'event'),
),
migrations.RenameIndex(
model_name='role',
new_name='main_rbac_r_content_979bdd_idx',
old_fields=('content_type', 'object_id'),
),
migrations.RenameIndex(
model_name='roleancestorentry',
new_name='main_rbac_r_ancesto_b44606_idx',
old_fields=('ancestor', 'content_type_id', 'role_field'),
),
migrations.RenameIndex(
model_name='roleancestorentry',
new_name='main_rbac_r_ancesto_22b9f0_idx',
old_fields=('ancestor', 'content_type_id', 'object_id'),
),
migrations.RenameIndex(
model_name='roleancestorentry',
new_name='main_rbac_r_ancesto_c87b87_idx',
old_fields=('ancestor', 'descendent'),
),
migrations.RenameIndex(
model_name='systemjobevent',
new_name='main_system_system__e39825_idx',
old_fields=('system_job', 'job_created', 'uuid'),
),
migrations.RenameIndex(
model_name='systemjobevent',
new_name='main_system_system__73537a_idx',
old_fields=('system_job', 'job_created', 'counter'),
),
migrations.AlterField(
model_name='activitystream',
name='unified_job',

View File

@@ -0,0 +1,102 @@
# Generated by Django 4.2 on 2023-04-28 19:21
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0184_django_upgrade'),
]
operations = [
migrations.RenameIndex(
model_name='adhoccommandevent',
new_name='main_adhocc_ad_hoc__a57777_idx',
old_fields=('ad_hoc_command', 'job_created', 'counter'),
),
migrations.RenameIndex(
model_name='adhoccommandevent',
new_name='main_adhocc_ad_hoc__e72142_idx',
old_fields=('ad_hoc_command', 'job_created', 'event'),
),
migrations.RenameIndex(
model_name='adhoccommandevent',
new_name='main_adhocc_ad_hoc__1e4d24_idx',
old_fields=('ad_hoc_command', 'job_created', 'uuid'),
),
migrations.RenameIndex(
model_name='inventoryupdateevent',
new_name='main_invent_invento_f72b21_idx',
old_fields=('inventory_update', 'job_created', 'uuid'),
),
migrations.RenameIndex(
model_name='inventoryupdateevent',
new_name='main_invent_invento_364dcb_idx',
old_fields=('inventory_update', 'job_created', 'counter'),
),
migrations.RenameIndex(
model_name='jobevent',
new_name='main_jobeve_job_id_51c382_idx',
old_fields=('job', 'job_created', 'counter'),
),
migrations.RenameIndex(
model_name='jobevent',
new_name='main_jobeve_job_id_0ddc6b_idx',
old_fields=('job', 'job_created', 'event'),
),
migrations.RenameIndex(
model_name='jobevent',
new_name='main_jobeve_job_id_40a56d_idx',
old_fields=('job', 'job_created', 'parent_uuid'),
),
migrations.RenameIndex(
model_name='jobevent',
new_name='main_jobeve_job_id_3c4a4a_idx',
old_fields=('job', 'job_created', 'uuid'),
),
migrations.RenameIndex(
model_name='projectupdateevent',
new_name='main_projec_project_c44b7c_idx',
old_fields=('project_update', 'job_created', 'event'),
),
migrations.RenameIndex(
model_name='projectupdateevent',
new_name='main_projec_project_449bbd_idx',
old_fields=('project_update', 'job_created', 'uuid'),
),
migrations.RenameIndex(
model_name='projectupdateevent',
new_name='main_projec_project_69559a_idx',
old_fields=('project_update', 'job_created', 'counter'),
),
migrations.RenameIndex(
model_name='role',
new_name='main_rbac_r_content_979bdd_idx',
old_fields=('content_type', 'object_id'),
),
migrations.RenameIndex(
model_name='roleancestorentry',
new_name='main_rbac_r_ancesto_22b9f0_idx',
old_fields=('ancestor', 'content_type_id', 'object_id'),
),
migrations.RenameIndex(
model_name='roleancestorentry',
new_name='main_rbac_r_ancesto_b44606_idx',
old_fields=('ancestor', 'content_type_id', 'role_field'),
),
migrations.RenameIndex(
model_name='roleancestorentry',
new_name='main_rbac_r_ancesto_c87b87_idx',
old_fields=('ancestor', 'descendent'),
),
migrations.RenameIndex(
model_name='systemjobevent',
new_name='main_system_system__e39825_idx',
old_fields=('system_job', 'job_created', 'uuid'),
),
migrations.RenameIndex(
model_name='systemjobevent',
new_name='main_system_system__73537a_idx',
old_fields=('system_job', 'job_created', 'counter'),
),
]

View File

@@ -1,289 +0,0 @@
# Generated by Django 4.2.3 on 2023-08-02 13:18
import awx.main.models.notifications
from django.db import migrations, models
from ._sqlite_helper import dbawaremigrations
class Migration(migrations.Migration):
dependencies = [
('main', '0184_django_indexes'),
('conf', '0010_change_to_JSONField'),
]
operations = [
migrations.AlterField(
model_name='instancegroup',
name='policy_instance_list',
field=models.JSONField(
blank=True, default=list, help_text='List of exact-match Instances that will always be automatically assigned to this group'
),
),
migrations.AlterField(
model_name='jobtemplate',
name='survey_spec',
field=models.JSONField(blank=True, default=dict),
),
migrations.AlterField(
model_name='notificationtemplate',
name='messages',
field=models.JSONField(
blank=True,
default=awx.main.models.notifications.NotificationTemplate.default_messages,
help_text='Optional custom messages for notification template.',
null=True,
),
),
migrations.AlterField(
model_name='notificationtemplate',
name='notification_configuration',
field=models.JSONField(default=dict),
),
migrations.AlterField(
model_name='project',
name='inventory_files',
field=models.JSONField(
blank=True,
default=list,
editable=False,
help_text='Suggested list of content that could be Ansible inventory in the project',
verbose_name='Inventory Files',
),
),
migrations.AlterField(
model_name='project',
name='playbook_files',
field=models.JSONField(blank=True, default=list, editable=False, help_text='List of playbooks found in the project', verbose_name='Playbook Files'),
),
migrations.AlterField(
model_name='schedule',
name='char_prompts',
field=models.JSONField(blank=True, default=dict),
),
migrations.AlterField(
model_name='schedule',
name='survey_passwords',
field=models.JSONField(blank=True, default=dict, editable=False),
),
migrations.AlterField(
model_name='workflowjobtemplate',
name='char_prompts',
field=models.JSONField(blank=True, default=dict),
),
migrations.AlterField(
model_name='workflowjobtemplate',
name='survey_spec',
field=models.JSONField(blank=True, default=dict),
),
migrations.AlterField(
model_name='workflowjobtemplatenode',
name='char_prompts',
field=models.JSONField(blank=True, default=dict),
),
migrations.AlterField(
model_name='workflowjobtemplatenode',
name='survey_passwords',
field=models.JSONField(blank=True, default=dict, editable=False),
),
# These are potentially a problem. Move the existing fields
# aside while pretending like they've been deleted, then add
# in fresh empty fields. Make the old fields nullable where
# needed while we are at it, so that new rows don't hit
# IntegrityError. We'll do the data migration out-of-band
# using a task.
migrations.RunSQL( # Already nullable
"ALTER TABLE main_activitystream RENAME deleted_actor TO deleted_actor_old;",
state_operations=[
migrations.RemoveField(
model_name='activitystream',
name='deleted_actor',
),
],
),
migrations.AddField(
model_name='activitystream',
name='deleted_actor',
field=models.JSONField(null=True),
),
dbawaremigrations.RunSQL(
"""
ALTER TABLE main_activitystream RENAME setting TO setting_old;
ALTER TABLE main_activitystream ALTER COLUMN setting_old DROP NOT NULL;
""",
sqlite_sql="ALTER TABLE main_activitystream RENAME setting TO setting_old",
state_operations=[
migrations.RemoveField(
model_name='activitystream',
name='setting',
),
],
),
migrations.AddField(
model_name='activitystream',
name='setting',
field=models.JSONField(blank=True, default=dict),
),
dbawaremigrations.RunSQL(
"""
ALTER TABLE main_job RENAME survey_passwords TO survey_passwords_old;
ALTER TABLE main_job ALTER COLUMN survey_passwords_old DROP NOT NULL;
""",
sqlite_sql="ALTER TABLE main_job RENAME survey_passwords TO survey_passwords_old",
state_operations=[
migrations.RemoveField(
model_name='job',
name='survey_passwords',
),
],
),
migrations.AddField(
model_name='job',
name='survey_passwords',
field=models.JSONField(blank=True, default=dict, editable=False),
),
dbawaremigrations.RunSQL(
"""
ALTER TABLE main_joblaunchconfig RENAME char_prompts TO char_prompts_old;
ALTER TABLE main_joblaunchconfig ALTER COLUMN char_prompts_old DROP NOT NULL;
""",
sqlite_sql="ALTER TABLE main_joblaunchconfig RENAME char_prompts TO char_prompts_old",
state_operations=[
migrations.RemoveField(
model_name='joblaunchconfig',
name='char_prompts',
),
],
),
migrations.AddField(
model_name='joblaunchconfig',
name='char_prompts',
field=models.JSONField(blank=True, default=dict),
),
dbawaremigrations.RunSQL(
"""
ALTER TABLE main_joblaunchconfig RENAME survey_passwords TO survey_passwords_old;
ALTER TABLE main_joblaunchconfig ALTER COLUMN survey_passwords_old DROP NOT NULL;
""",
sqlite_sql="ALTER TABLE main_joblaunchconfig RENAME survey_passwords TO survey_passwords_old;",
state_operations=[
migrations.RemoveField(
model_name='joblaunchconfig',
name='survey_passwords',
),
],
),
migrations.AddField(
model_name='joblaunchconfig',
name='survey_passwords',
field=models.JSONField(blank=True, default=dict, editable=False),
),
dbawaremigrations.RunSQL(
"""
ALTER TABLE main_notification RENAME body TO body_old;
ALTER TABLE main_notification ALTER COLUMN body_old DROP NOT NULL;
""",
sqlite_sql="ALTER TABLE main_notification RENAME body TO body_old",
state_operations=[
migrations.RemoveField(
model_name='notification',
name='body',
),
],
),
migrations.AddField(
model_name='notification',
name='body',
field=models.JSONField(blank=True, default=dict),
),
dbawaremigrations.RunSQL(
"""
ALTER TABLE main_unifiedjob RENAME job_env TO job_env_old;
ALTER TABLE main_unifiedjob ALTER COLUMN job_env_old DROP NOT NULL;
""",
sqlite_sql="ALTER TABLE main_unifiedjob RENAME job_env TO job_env_old",
state_operations=[
migrations.RemoveField(
model_name='unifiedjob',
name='job_env',
),
],
),
migrations.AddField(
model_name='unifiedjob',
name='job_env',
field=models.JSONField(blank=True, default=dict, editable=False),
),
dbawaremigrations.RunSQL(
"""
ALTER TABLE main_workflowjob RENAME char_prompts TO char_prompts_old;
ALTER TABLE main_workflowjob ALTER COLUMN char_prompts_old DROP NOT NULL;
""",
sqlite_sql="ALTER TABLE main_workflowjob RENAME char_prompts TO char_prompts_old",
state_operations=[
migrations.RemoveField(
model_name='workflowjob',
name='char_prompts',
),
],
),
migrations.AddField(
model_name='workflowjob',
name='char_prompts',
field=models.JSONField(blank=True, default=dict),
),
dbawaremigrations.RunSQL(
"""
ALTER TABLE main_workflowjob RENAME survey_passwords TO survey_passwords_old;
ALTER TABLE main_workflowjob ALTER COLUMN survey_passwords_old DROP NOT NULL;
""",
sqlite_sql="ALTER TABLE main_workflowjob RENAME survey_passwords TO survey_passwords_old",
state_operations=[
migrations.RemoveField(
model_name='workflowjob',
name='survey_passwords',
),
],
),
migrations.AddField(
model_name='workflowjob',
name='survey_passwords',
field=models.JSONField(blank=True, default=dict, editable=False),
),
dbawaremigrations.RunSQL(
"""
ALTER TABLE main_workflowjobnode RENAME char_prompts TO char_prompts_old;
ALTER TABLE main_workflowjobnode ALTER COLUMN char_prompts_old DROP NOT NULL;
""",
sqlite_sql="ALTER TABLE main_workflowjobnode RENAME char_prompts TO char_prompts_old",
state_operations=[
migrations.RemoveField(
model_name='workflowjobnode',
name='char_prompts',
),
],
),
migrations.AddField(
model_name='workflowjobnode',
name='char_prompts',
field=models.JSONField(blank=True, default=dict),
),
dbawaremigrations.RunSQL(
"""
ALTER TABLE main_workflowjobnode RENAME survey_passwords TO survey_passwords_old;
ALTER TABLE main_workflowjobnode ALTER COLUMN survey_passwords_old DROP NOT NULL;
""",
sqlite_sql="ALTER TABLE main_workflowjobnode RENAME survey_passwords TO survey_passwords_old",
state_operations=[
migrations.RemoveField(
model_name='workflowjobnode',
name='survey_passwords',
),
],
),
migrations.AddField(
model_name='workflowjobnode',
name='survey_passwords',
field=models.JSONField(blank=True, default=dict, editable=False),
),
]

View File

@@ -1,29 +0,0 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from ._sqlite_helper import dbawaremigrations
def delete_taggit_contenttypes(apps, schema_editor):
ContentType = apps.get_model('contenttypes', 'ContentType')
ContentType.objects.filter(app_label='taggit').delete()
def delete_taggit_migration_records(apps, schema_editor):
recorder = migrations.recorder.MigrationRecorder(connection=schema_editor.connection)
recorder.migration_qs.filter(app='taggit').delete()
class Migration(migrations.Migration):
dependencies = [
('main', '0185_move_JSONBlob_to_JSONField'),
]
operations = [
dbawaremigrations.RunSQL("DROP TABLE IF EXISTS taggit_tag CASCADE;", sqlite_sql="DROP TABLE IF EXISTS taggit_tag;"),
dbawaremigrations.RunSQL("DROP TABLE IF EXISTS taggit_taggeditem CASCADE;", sqlite_sql="DROP TABLE IF EXISTS taggit_taggeditem;"),
migrations.RunPython(delete_taggit_contenttypes),
migrations.RunPython(delete_taggit_migration_records),
]

View File

@@ -1,75 +0,0 @@
# Generated by Django 4.2.3 on 2023-08-04 20:50
import django.core.validators
from django.db import migrations, models
from django.conf import settings
def automatically_peer_from_control_plane(apps, schema_editor):
if settings.IS_K8S:
Instance = apps.get_model('main', 'Instance')
Instance.objects.filter(node_type='execution').update(peers_from_control_nodes=True)
Instance.objects.filter(node_type='control').update(listener_port=None)
class Migration(migrations.Migration):
dependencies = [
('main', '0186_drop_django_taggit'),
]
operations = [
migrations.AlterModelOptions(
name='instancelink',
options={'ordering': ('id',)},
),
migrations.AddField(
model_name='instance',
name='peers_from_control_nodes',
field=models.BooleanField(default=False, help_text='If True, control plane cluster nodes should automatically peer to it.'),
),
migrations.AlterField(
model_name='instance',
name='ip_address',
field=models.CharField(blank=True, default='', max_length=50),
),
migrations.AlterField(
model_name='instance',
name='listener_port',
field=models.PositiveIntegerField(
blank=True,
default=None,
help_text='Port that Receptor will listen for incoming connections on.',
null=True,
validators=[django.core.validators.MinValueValidator(1024), django.core.validators.MaxValueValidator(65535)],
),
),
migrations.AlterField(
model_name='instance',
name='peers',
field=models.ManyToManyField(related_name='peers_from', through='main.InstanceLink', to='main.instance'),
),
migrations.AlterField(
model_name='instancelink',
name='link_state',
field=models.CharField(
choices=[('adding', 'Adding'), ('established', 'Established'), ('removing', 'Removing')],
default='adding',
help_text='Indicates the current life cycle stage of this peer link.',
max_length=16,
),
),
migrations.AddConstraint(
model_name='instance',
constraint=models.UniqueConstraint(
condition=models.Q(('ip_address', ''), _negated=True),
fields=('ip_address',),
name='unique_ip_address_not_empty',
violation_error_message='Field ip_address must be unique.',
),
),
migrations.AddConstraint(
model_name='instancelink',
constraint=models.CheckConstraint(check=models.Q(('source', models.F('target')), _negated=True), name='source_and_target_can_not_be_equal'),
),
migrations.RunPython(automatically_peer_from_control_plane),
]

View File

@@ -158,7 +158,7 @@ class ec2(PluginFileInjector):
return {
# vars that change
'ec2_block_devices': (
"dict(block_device_mappings | map(attribute='device_name') | list | zip(block_device_mappings | map(attribute='ebs.volume_id') | list))"
"dict(block_device_mappings | map(attribute='device_name') | list | zip(block_device_mappings " "| map(attribute='ebs.volume_id') | list))"
),
'ec2_dns_name': 'public_dns_name',
'ec2_group_name': 'placement.group_name',
@@ -635,7 +635,7 @@ class satellite6(PluginFileInjector):
"environment": {
"prefix": "{}environment_".format(group_prefix),
"separator": "",
"key": "foreman['environment_name'] | lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9_]', '_') | regex_replace('none', '')",
"key": "foreman['environment_name'] | lower | regex_replace(' ', '') | " "regex_replace('[^A-Za-z0-9_]', '_') | regex_replace('none', '')",
},
"location": {
"prefix": "{}location_".format(group_prefix),
@@ -656,7 +656,7 @@ class satellite6(PluginFileInjector):
"content_view": {
"prefix": "{}content_view_".format(group_prefix),
"separator": "",
"key": "foreman['content_facet_attributes']['content_view_name'] | lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9_]', '_')",
"key": "foreman['content_facet_attributes']['content_view_name'] | " "lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9_]', '_')",
},
}

View File

@@ -1,61 +0,0 @@
from django.db import migrations
class RunSQL(migrations.operations.special.RunSQL):
"""
Bit of a hack here. Django actually wants this decision made in the router
and we can pass **hints.
"""
def __init__(self, *args, **kwargs):
if 'sqlite_sql' not in kwargs:
raise ValueError("sqlite_sql parameter required")
sqlite_sql = kwargs.pop('sqlite_sql')
self.sqlite_sql = sqlite_sql
self.sqlite_reverse_sql = kwargs.pop('sqlite_reverse_sql', None)
super().__init__(*args, **kwargs)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
if not schema_editor.connection.vendor.startswith('postgres'):
self.sql = self.sqlite_sql or migrations.RunSQL.noop
super().database_forwards(app_label, schema_editor, from_state, to_state)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
if not schema_editor.connection.vendor.startswith('postgres'):
self.reverse_sql = self.sqlite_reverse_sql or migrations.RunSQL.noop
super().database_backwards(app_label, schema_editor, from_state, to_state)
class RunPython(migrations.operations.special.RunPython):
"""
Bit of a hack here. Django actually wants this decision made in the router
and we can pass **hints.
"""
def __init__(self, *args, **kwargs):
if 'sqlite_code' not in kwargs:
raise ValueError("sqlite_code parameter required")
sqlite_code = kwargs.pop('sqlite_code')
self.sqlite_code = sqlite_code
self.sqlite_reverse_code = kwargs.pop('sqlite_reverse_code', None)
super().__init__(*args, **kwargs)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
if not schema_editor.connection.vendor.startswith('postgres'):
self.code = self.sqlite_code or migrations.RunPython.noop
super().database_forwards(app_label, schema_editor, from_state, to_state)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
if not schema_editor.connection.vendor.startswith('postgres'):
self.reverse_code = self.sqlite_reverse_code or migrations.RunPython.noop
super().database_backwards(app_label, schema_editor, from_state, to_state)
class _sqlitemigrations:
RunPython = RunPython
RunSQL = RunSQL
dbawaremigrations = _sqlitemigrations()

View File

@@ -3,7 +3,6 @@
# Django
from django.conf import settings # noqa
from django.db import connection
from django.db.models.signals import pre_delete # noqa
# AWX
@@ -57,6 +56,7 @@ from awx.main.models.ha import ( # noqa
from awx.main.models.rbac import ( # noqa
Role,
batch_role_ancestor_rebuilding,
get_roles_on_resource,
role_summary_fields_generator,
ROLE_SINGLETON_SYSTEM_ADMINISTRATOR,
ROLE_SINGLETON_SYSTEM_AUDITOR,
@@ -90,64 +90,13 @@ from oauth2_provider.models import Grant, RefreshToken # noqa -- needed django-
# Add custom methods to User model for permissions checks.
from django.contrib.auth.models import User # noqa
from awx.main.access import get_user_queryset, check_user_access, check_user_access_with_errors # noqa
from awx.main.access import get_user_queryset, check_user_access, check_user_access_with_errors, user_accessible_objects # noqa
User.add_to_class('get_queryset', get_user_queryset)
User.add_to_class('can_access', check_user_access)
User.add_to_class('can_access_with_errors', check_user_access_with_errors)
def convert_jsonfields():
if connection.vendor != 'postgresql':
return
# fmt: off
fields = [
('main_activitystream', 'id', (
'deleted_actor',
'setting',
)),
('main_job', 'unifiedjob_ptr_id', (
'survey_passwords',
)),
('main_joblaunchconfig', 'id', (
'char_prompts',
'survey_passwords',
)),
('main_notification', 'id', (
'body',
)),
('main_unifiedjob', 'id', (
'job_env',
)),
('main_workflowjob', 'unifiedjob_ptr_id', (
'char_prompts',
'survey_passwords',
)),
('main_workflowjobnode', 'id', (
'char_prompts',
'survey_passwords',
)),
]
# fmt: on
with connection.cursor() as cursor:
for table, pkfield, columns in fields:
# Do the renamed old columns still exist? If so, run the task.
old_columns = ','.join(f"'{column}_old'" for column in columns)
cursor.execute(
f"""
select count(1) from information_schema.columns
where
table_name = %s and column_name in ({old_columns});
""",
(table,),
)
if cursor.fetchone()[0]:
from awx.main.tasks.system import migrate_jsonfield
migrate_jsonfield.apply_async([table, pkfield, columns])
User.add_to_class('accessible_objects', user_accessible_objects)
def cleanup_created_modified_by(sender, **kwargs):

Some files were not shown because too many files have changed in this diff Show More