mirror of
https://github.com/ansible/awx.git
synced 2026-03-03 17:51:06 -03:30
Compare commits
29 Commits
feature_an
...
AAP-63318-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d2912c3bdd | ||
|
|
d1f4fc3e97 | ||
|
|
0f2692b504 | ||
|
|
e1e2c60f2e | ||
|
|
d8a2aa1dc3 | ||
|
|
9d61e42ede | ||
|
|
2c71bcda32 | ||
|
|
a21f9fbdb8 | ||
|
|
2a35ce5524 | ||
|
|
567a980a03 | ||
|
|
9059cfbda6 | ||
|
|
d8fd953732 | ||
|
|
39851c392a | ||
|
|
aeba4a1a3f | ||
|
|
915deca78c | ||
|
|
1a79e853fe | ||
|
|
08f1507f70 | ||
|
|
994a2b3c04 | ||
|
|
7ccc14daeb | ||
|
|
9700fb01f2 | ||
|
|
c515b86fa6 | ||
|
|
01293f1b45 | ||
|
|
fd847862a7 | ||
|
|
980d9db192 | ||
|
|
f2438a0e86 | ||
|
|
707f2fa5da | ||
|
|
1f18396438 | ||
|
|
6f0cfb5ace | ||
|
|
fc0a4cddce |
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -24,7 +24,7 @@ in as the first entry for your PR title.
|
||||
|
||||
|
||||
|
||||
##### ADDITIONAL INFORMATION
|
||||
##### STEPS TO REPRODUCE AND EXTRA INFO
|
||||
<!---
|
||||
Include additional information to help people understand the change here.
|
||||
For bugs that don't have a linked bug report, a step-by-step reproduction
|
||||
|
||||
40
.github/workflows/api_schema_check.yml
vendored
40
.github/workflows/api_schema_check.yml
vendored
@@ -45,15 +45,45 @@ jobs:
|
||||
make docker-runner 2>&1 | tee schema-diff.txt
|
||||
exit ${PIPESTATUS[0]}
|
||||
|
||||
- name: Add schema diff to job summary
|
||||
- name: Validate OpenAPI schema
|
||||
id: schema-validation
|
||||
continue-on-error: true
|
||||
run: |
|
||||
AWX_DOCKER_ARGS='-e GITHUB_ACTIONS' \
|
||||
AWX_DOCKER_CMD='make validate-openapi-schema' \
|
||||
make docker-runner 2>&1 | tee schema-validation.txt
|
||||
exit ${PIPESTATUS[0]}
|
||||
|
||||
- name: Add schema validation and diff to job summary
|
||||
if: always()
|
||||
# show text and if for some reason, it can't be generated, state that it can't be.
|
||||
run: |
|
||||
echo "## API Schema Change Detection Results" >> $GITHUB_STEP_SUMMARY
|
||||
echo "## API Schema Check Results" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
# Show validation status
|
||||
echo "### OpenAPI Validation" >> $GITHUB_STEP_SUMMARY
|
||||
if [ -f schema-validation.txt ] && grep -q "✓ Schema is valid" schema-validation.txt; then
|
||||
echo "✅ **Status:** PASSED - Schema is valid OpenAPI 3.0.3" >> $GITHUB_STEP_SUMMARY
|
||||
else
|
||||
echo "❌ **Status:** FAILED - Schema validation failed" >> $GITHUB_STEP_SUMMARY
|
||||
if [ -f schema-validation.txt ]; then
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "<details><summary>Validation errors</summary>" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo '```' >> $GITHUB_STEP_SUMMARY
|
||||
cat schema-validation.txt >> $GITHUB_STEP_SUMMARY
|
||||
echo '```' >> $GITHUB_STEP_SUMMARY
|
||||
echo "</details>" >> $GITHUB_STEP_SUMMARY
|
||||
fi
|
||||
fi
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
# Show schema changes
|
||||
echo "### Schema Changes" >> $GITHUB_STEP_SUMMARY
|
||||
if [ -f schema-diff.txt ]; then
|
||||
if grep -q "^+" schema-diff.txt || grep -q "^-" schema-diff.txt; then
|
||||
echo "### Schema changes detected" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Changes detected** between this PR and the base branch" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
# Truncate to first 1000 lines to stay under GitHub's 1MB summary limit
|
||||
TOTAL_LINES=$(wc -l < schema-diff.txt)
|
||||
@@ -65,8 +95,8 @@ jobs:
|
||||
head -n 1000 schema-diff.txt >> $GITHUB_STEP_SUMMARY
|
||||
echo '```' >> $GITHUB_STEP_SUMMARY
|
||||
else
|
||||
echo "### No schema changes detected" >> $GITHUB_STEP_SUMMARY
|
||||
echo "No schema changes detected" >> $GITHUB_STEP_SUMMARY
|
||||
fi
|
||||
else
|
||||
echo "### Unable to generate schema diff" >> $GITHUB_STEP_SUMMARY
|
||||
echo "Unable to generate schema diff" >> $GITHUB_STEP_SUMMARY
|
||||
fi
|
||||
|
||||
84
.github/workflows/ci.yml
vendored
84
.github/workflows/ci.yml
vendored
@@ -4,14 +4,46 @@ env:
|
||||
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
|
||||
CI_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
DEV_DOCKER_OWNER: ${{ github.repository_owner }}
|
||||
COMPOSE_TAG: ${{ github.base_ref || 'devel' }}
|
||||
COMPOSE_TAG: ${{ github.base_ref || github.ref_name || 'devel' }}
|
||||
UPSTREAM_REPOSITORY_ID: 91594105
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- devel # needed to publish code coverage post-merge
|
||||
schedule:
|
||||
- cron: '0 12,18 * * 1-5'
|
||||
workflow_dispatch: {}
|
||||
jobs:
|
||||
trigger-release-branches:
|
||||
name: "Dispatch CI to release branches"
|
||||
if: github.event_name == 'schedule'
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
actions: write
|
||||
steps:
|
||||
- name: Trigger CI on release_4.6
|
||||
id: dispatch_release_46
|
||||
continue-on-error: true
|
||||
run: gh workflow run ci.yml --ref release_4.6
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
GH_REPO: ${{ github.repository }}
|
||||
- name: Trigger CI on stable-2.6
|
||||
id: dispatch_stable_26
|
||||
continue-on-error: true
|
||||
run: gh workflow run ci.yml --ref stable-2.6
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
GH_REPO: ${{ github.repository }}
|
||||
- name: Check dispatch results
|
||||
if: steps.dispatch_release_46.outcome == 'failure' || steps.dispatch_stable_26.outcome == 'failure'
|
||||
run: |
|
||||
echo "One or more dispatches failed:"
|
||||
echo " release_4.6: ${{ steps.dispatch_release_46.outcome }}"
|
||||
echo " stable-2.6: ${{ steps.dispatch_stable_26.outcome }}"
|
||||
exit 1
|
||||
|
||||
common-tests:
|
||||
name: ${{ matrix.tests.name }}
|
||||
runs-on: ubuntu-latest
|
||||
@@ -112,25 +144,27 @@ jobs:
|
||||
path: reports/coverage.xml
|
||||
retention-days: 5
|
||||
|
||||
- name: Upload awx jUnit test reports
|
||||
- name: >-
|
||||
Upload ${{
|
||||
matrix.tests.coverage-upload-name || 'awx'
|
||||
}} jUnit test reports to the unified dashboard
|
||||
if: >-
|
||||
!cancelled()
|
||||
&& steps.make-run.outputs.test-result-files != ''
|
||||
&& github.event_name == 'push'
|
||||
&& env.UPSTREAM_REPOSITORY_ID == github.repository_id
|
||||
&& github.ref_name == github.event.repository.default_branch
|
||||
run: |
|
||||
for junit_file in $(echo '${{ steps.make-run.outputs.test-result-files }}' | sed 's/,/ /')
|
||||
do
|
||||
curl \
|
||||
-v \
|
||||
--user "${{ vars.PDE_ORG_RESULTS_AGGREGATOR_UPLOAD_USER }}:${{ secrets.PDE_ORG_RESULTS_UPLOAD_PASSWORD }}" \
|
||||
--form "xunit_xml=@${junit_file}" \
|
||||
--form "component_name=${{ matrix.tests.coverage-upload-name || 'awx' }}" \
|
||||
--form "git_commit_sha=${{ github.sha }}" \
|
||||
--form "git_repository_url=https://github.com/${{ github.repository }}" \
|
||||
"${{ vars.PDE_ORG_RESULTS_AGGREGATOR_UPLOAD_URL }}/api/results/upload/"
|
||||
done
|
||||
uses: ansible/gh-action-record-test-results@fc552f81bf7e734cdebe6d04f9f608e2e2b4759e
|
||||
with:
|
||||
aggregation-server-url: ${{ vars.PDE_ORG_RESULTS_AGGREGATOR_UPLOAD_URL }}
|
||||
http-auth-password: >-
|
||||
${{ secrets.PDE_ORG_RESULTS_UPLOAD_PASSWORD }}
|
||||
http-auth-username: >-
|
||||
${{ vars.PDE_ORG_RESULTS_AGGREGATOR_UPLOAD_USER }}
|
||||
project-component-name: >-
|
||||
${{ matrix.tests.coverage-upload-name || 'awx' }}
|
||||
test-result-files: >-
|
||||
${{ steps.make-run.outputs.test-result-files }}
|
||||
|
||||
dev-env:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -294,18 +328,16 @@ jobs:
|
||||
&& github.event_name == 'push'
|
||||
&& env.UPSTREAM_REPOSITORY_ID == github.repository_id
|
||||
&& github.ref_name == github.event.repository.default_branch
|
||||
run: |
|
||||
for junit_file in $(echo '${{ steps.make-run.outputs.test-result-files }}' | sed 's/,/ /')
|
||||
do
|
||||
curl \
|
||||
-v \
|
||||
--user "${{ vars.PDE_ORG_RESULTS_AGGREGATOR_UPLOAD_USER }}:${{ secrets.PDE_ORG_RESULTS_UPLOAD_PASSWORD }}" \
|
||||
--form "xunit_xml=@${junit_file}" \
|
||||
--form "component_name=awx" \
|
||||
--form "git_commit_sha=${{ github.sha }}" \
|
||||
--form "git_repository_url=https://github.com/${{ github.repository }}" \
|
||||
"${{ vars.PDE_ORG_RESULTS_AGGREGATOR_UPLOAD_URL }}/api/results/upload/"
|
||||
done
|
||||
uses: ansible/gh-action-record-test-results@fc552f81bf7e734cdebe6d04f9f608e2e2b4759e
|
||||
with:
|
||||
aggregation-server-url: ${{ vars.PDE_ORG_RESULTS_AGGREGATOR_UPLOAD_URL }}
|
||||
http-auth-password: >-
|
||||
${{ secrets.PDE_ORG_RESULTS_UPLOAD_PASSWORD }}
|
||||
http-auth-username: >-
|
||||
${{ vars.PDE_ORG_RESULTS_AGGREGATOR_UPLOAD_USER }}
|
||||
project-component-name: awx
|
||||
test-result-files: >-
|
||||
${{ steps.make-run.outputs.test-result-files }}
|
||||
|
||||
collection-integration:
|
||||
name: awx_collection integration
|
||||
|
||||
176
.github/workflows/spec-sync-on-merge.yml
vendored
Normal file
176
.github/workflows/spec-sync-on-merge.yml
vendored
Normal file
@@ -0,0 +1,176 @@
|
||||
# Sync OpenAPI Spec on Merge
|
||||
#
|
||||
# This workflow runs when code is merged to the devel branch.
|
||||
# It runs the dev environment to generate the OpenAPI spec, then syncs it to
|
||||
# the central spec repository.
|
||||
#
|
||||
# FLOW: PR merged → push to branch → dev environment runs → spec synced to central repo
|
||||
#
|
||||
# NOTE: This is an inlined version for testing with private forks.
|
||||
# Production version will use a reusable workflow from the org repos.
|
||||
name: Sync OpenAPI Spec on Merge
|
||||
env:
|
||||
LC_ALL: "C.UTF-8"
|
||||
DEV_DOCKER_OWNER: ${{ github.repository_owner }}
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- devel
|
||||
workflow_dispatch: # Allow manual triggering for testing
|
||||
jobs:
|
||||
sync-openapi-spec:
|
||||
name: Sync OpenAPI spec to central repo
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
steps:
|
||||
- name: Checkout Controller repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
show-progress: false
|
||||
|
||||
- name: Build awx_devel image to use for schema gen
|
||||
uses: ./.github/actions/awx_devel_image
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
private-github-key: ${{ secrets.PRIVATE_GITHUB_KEY }}
|
||||
|
||||
- name: Generate API Schema
|
||||
run: |
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} \
|
||||
COMPOSE_TAG=${{ github.base_ref || github.ref_name }} \
|
||||
docker run -u $(id -u) --rm -v ${{ github.workspace }}:/awx_devel/:Z \
|
||||
--workdir=/awx_devel `make print-DEVEL_IMAGE_NAME` /start_tests.sh genschema
|
||||
|
||||
- name: Verify spec file exists
|
||||
run: |
|
||||
SPEC_FILE="./schema.json"
|
||||
if [ ! -f "$SPEC_FILE" ]; then
|
||||
echo "❌ Spec file not found at $SPEC_FILE"
|
||||
echo "Contents of workspace:"
|
||||
ls -la .
|
||||
exit 1
|
||||
fi
|
||||
echo "✅ Found spec file at $SPEC_FILE"
|
||||
|
||||
- name: Checkout spec repo
|
||||
id: checkout_spec_repo
|
||||
continue-on-error: true
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: ansible-automation-platform/aap-openapi-specs
|
||||
ref: ${{ github.ref_name }}
|
||||
path: spec-repo
|
||||
token: ${{ secrets.OPENAPI_SPEC_SYNC_TOKEN }}
|
||||
|
||||
- name: Fail if branch doesn't exist
|
||||
if: steps.checkout_spec_repo.outcome == 'failure'
|
||||
run: |
|
||||
echo "##[error]❌ Branch '${{ github.ref_name }}' does not exist in the central spec repository."
|
||||
echo "##[error]Expected branch: ${{ github.ref_name }}"
|
||||
echo "##[error]This branch must be created in the spec repo before specs can be synced."
|
||||
exit 1
|
||||
|
||||
- name: Compare specs
|
||||
id: compare
|
||||
run: |
|
||||
COMPONENT_SPEC="./schema.json"
|
||||
SPEC_REPO_FILE="spec-repo/controller.json"
|
||||
|
||||
# Check if spec file exists in spec repo
|
||||
if [ ! -f "$SPEC_REPO_FILE" ]; then
|
||||
echo "Spec file doesn't exist in spec repo - will create new file"
|
||||
echo "has_diff=true" >> $GITHUB_OUTPUT
|
||||
echo "is_new_file=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
# Compare files
|
||||
if diff -q "$COMPONENT_SPEC" "$SPEC_REPO_FILE" > /dev/null; then
|
||||
echo "✅ No differences found - specs are identical"
|
||||
echo "has_diff=false" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "📝 Differences found - spec has changed"
|
||||
echo "has_diff=true" >> $GITHUB_OUTPUT
|
||||
echo "is_new_file=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
fi
|
||||
|
||||
- name: Update spec file
|
||||
if: steps.compare.outputs.has_diff == 'true'
|
||||
run: |
|
||||
cp "./schema.json" "spec-repo/controller.json"
|
||||
echo "✅ Updated spec-repo/controller.json"
|
||||
|
||||
- name: Create PR in spec repo
|
||||
if: steps.compare.outputs.has_diff == 'true'
|
||||
working-directory: spec-repo
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.OPENAPI_SPEC_SYNC_TOKEN }}
|
||||
COMMIT_MESSAGE: ${{ github.event.head_commit.message }}
|
||||
run: |
|
||||
# Configure git
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
|
||||
# Create branch for PR
|
||||
SHORT_SHA="${{ github.sha }}"
|
||||
SHORT_SHA="${SHORT_SHA:0:7}"
|
||||
BRANCH_NAME="update-Controller-${{ github.ref_name }}-${SHORT_SHA}"
|
||||
git checkout -b "$BRANCH_NAME"
|
||||
|
||||
# Add and commit changes
|
||||
git add "controller.json"
|
||||
|
||||
if [ "${{ steps.compare.outputs.is_new_file }}" == "true" ]; then
|
||||
COMMIT_MSG="Add Controller OpenAPI spec for ${{ github.ref_name }}"
|
||||
else
|
||||
COMMIT_MSG="Update Controller OpenAPI spec for ${{ github.ref_name }}"
|
||||
fi
|
||||
|
||||
git commit -m "$COMMIT_MSG
|
||||
|
||||
Synced from ${{ github.repository }}@${{ github.sha }}
|
||||
Source branch: ${{ github.ref_name }}
|
||||
|
||||
Co-Authored-By: github-actions[bot] <github-actions[bot]@users.noreply.github.com>"
|
||||
|
||||
# Push branch
|
||||
git push origin "$BRANCH_NAME"
|
||||
|
||||
# Create PR
|
||||
PR_TITLE="[${{ github.ref_name }}] Update Controller spec from merged commit"
|
||||
PR_BODY="## Summary
|
||||
Automated OpenAPI spec sync from component repository merge.
|
||||
|
||||
**Source:** ${{ github.repository }}@${{ github.sha }}
|
||||
**Branch:** \`${{ github.ref_name }}\`
|
||||
**Component:** \`Controller\`
|
||||
**Spec File:** \`controller.json\`
|
||||
|
||||
## Changes
|
||||
$(if [ "${{ steps.compare.outputs.is_new_file }}" == "true" ]; then echo "- 🆕 New spec file created"; else echo "- 📝 Spec file updated with latest changes"; fi)
|
||||
|
||||
## Source Commit
|
||||
\`\`\`
|
||||
${COMMIT_MESSAGE}
|
||||
\`\`\`
|
||||
|
||||
---
|
||||
🤖 This PR was automatically generated by the OpenAPI spec sync workflow."
|
||||
|
||||
gh pr create \
|
||||
--title "$PR_TITLE" \
|
||||
--body "$PR_BODY" \
|
||||
--base "${{ github.ref_name }}" \
|
||||
--head "$BRANCH_NAME"
|
||||
|
||||
echo "✅ Created PR in spec repo"
|
||||
|
||||
- name: Report results
|
||||
if: always()
|
||||
run: |
|
||||
if [ "${{ steps.compare.outputs.has_diff }}" == "true" ]; then
|
||||
echo "📝 Spec sync completed - PR created in spec repo"
|
||||
else
|
||||
echo "✅ Spec sync completed - no changes needed"
|
||||
fi
|
||||
6
Makefile
6
Makefile
@@ -79,7 +79,7 @@ RECEPTOR_IMAGE ?= quay.io/ansible/receptor:devel
|
||||
SRC_ONLY_PKGS ?= cffi,pycparser,psycopg,twilio
|
||||
# These should be upgraded in the AWX and Ansible venv before attempting
|
||||
# to install the actual requirements
|
||||
VENV_BOOTSTRAP ?= pip==25.3 setuptools==80.9.0 setuptools_scm[toml]==9.2.2 wheel==0.45.1 cython==3.1.3
|
||||
VENV_BOOTSTRAP ?= pip==25.3 setuptools==80.9.0 setuptools_scm[toml]==9.2.2 wheel==0.46.3 cython==3.1.3
|
||||
|
||||
NAME ?= awx
|
||||
|
||||
@@ -579,6 +579,10 @@ detect-schema-change: genschema
|
||||
# diff exits with 1 when files differ - capture but don't fail
|
||||
-diff -u -b reference-schema.json schema.json
|
||||
|
||||
validate-openapi-schema: genschema
|
||||
@echo "Validating OpenAPI schema from schema.json..."
|
||||
@python3 -c "from openapi_spec_validator import validate; import json; spec = json.load(open('schema.json')); validate(spec); print('✓ OpenAPI Schema is valid!')"
|
||||
|
||||
docker-compose-clean: awx/projects
|
||||
$(DOCKER_COMPOSE) -f tools/docker-compose/_sources/docker-compose.yml rm -sf
|
||||
|
||||
|
||||
@@ -89,7 +89,7 @@ class DeprecatedCredentialField(serializers.IntegerField):
|
||||
def to_internal_value(self, pk):
|
||||
try:
|
||||
pk = int(pk)
|
||||
except ValueError:
|
||||
except (ValueError, TypeError):
|
||||
self.fail('invalid')
|
||||
try:
|
||||
Credential.objects.get(pk=pk)
|
||||
|
||||
@@ -25,7 +25,6 @@ import requests
|
||||
|
||||
from ansible_base.lib.utils.schema import extend_schema_if_available
|
||||
|
||||
from awx import MODE
|
||||
from awx.api.generics import APIView
|
||||
from awx.conf.registry import settings_registry
|
||||
from awx.main.analytics import all_collectors
|
||||
@@ -33,7 +32,7 @@ from awx.main.ha import is_ha_environment
|
||||
from awx.main.tasks.system import clear_setting_cache
|
||||
from awx.main.utils import get_awx_version, get_custom_venv_choices
|
||||
from awx.main.utils.licensing import validate_entitlement_manifest
|
||||
from awx.api.versioning import URLPathVersioning, reverse, drf_reverse
|
||||
from awx.api.versioning import URLPathVersioning, reverse
|
||||
from awx.main.constants import PRIVILEGE_ESCALATION_METHODS
|
||||
from awx.main.models import Project, Organization, Instance, InstanceGroup, JobTemplate
|
||||
from awx.main.utils import set_environ
|
||||
@@ -62,8 +61,6 @@ class ApiRootView(APIView):
|
||||
data['custom_logo'] = settings.CUSTOM_LOGO
|
||||
data['custom_login_info'] = settings.CUSTOM_LOGIN_INFO
|
||||
data['login_redirect_override'] = settings.LOGIN_REDIRECT_OVERRIDE
|
||||
if MODE == 'development':
|
||||
data['docs'] = drf_reverse('api:schema-swagger-ui')
|
||||
return Response(data)
|
||||
|
||||
|
||||
|
||||
@@ -27,6 +27,10 @@ def get_dispatcherd_config(for_service: bool = False, mock_publish: bool = False
|
||||
"pool_kwargs": {
|
||||
"min_workers": settings.JOB_EVENT_WORKERS,
|
||||
"max_workers": max_workers,
|
||||
# This must be less than max_workers to make sense, which is usually 4
|
||||
# With reserve of 1, after a burst of tasks, load needs to down to 4-1=3
|
||||
# before we return to min_workers
|
||||
"scaledown_reserve": 1,
|
||||
},
|
||||
"main_kwargs": {"node_id": settings.CLUSTER_HOST_ID},
|
||||
"process_manager_cls": "ForkServerManager",
|
||||
|
||||
@@ -21,6 +21,6 @@ class Migration(migrations.Migration):
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(setup_tower_managed_defaults),
|
||||
migrations.RunPython(setup_rbac_role_system_administrator),
|
||||
migrations.RunPython(setup_tower_managed_defaults, migrations.RunPython.noop),
|
||||
migrations.RunPython(setup_rbac_role_system_administrator, migrations.RunPython.noop),
|
||||
]
|
||||
|
||||
@@ -98,5 +98,5 @@ class Migration(migrations.Migration):
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(convert_controller_role_definitions),
|
||||
migrations.RunPython(convert_controller_role_definitions, migrations.RunPython.noop),
|
||||
]
|
||||
|
||||
@@ -3,19 +3,15 @@ from django.db import migrations, models
|
||||
from awx.main.migrations._create_system_jobs import delete_clear_tokens_sjt
|
||||
|
||||
|
||||
# --- START of function merged from 0203_rename_github_app_kind.py ---
|
||||
def update_github_app_kind(apps, schema_editor):
|
||||
"""
|
||||
Updates the 'kind' field for CredentialType records
|
||||
Updates the 'namespace' field for CredentialType records
|
||||
from 'github_app' to 'github_app_lookup'.
|
||||
This addresses a change in the entry point key for the GitHub App plugin.
|
||||
"""
|
||||
CredentialType = apps.get_model('main', 'CredentialType')
|
||||
db_alias = schema_editor.connection.alias
|
||||
CredentialType.objects.using(db_alias).filter(kind='github_app').update(kind='github_app_lookup')
|
||||
|
||||
|
||||
# --- END of function merged from 0203_rename_github_app_kind.py ---
|
||||
CredentialType.objects.using(db_alias).filter(namespace='github_app').update(namespace='github_app_lookup')
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
@@ -118,7 +114,5 @@ class Migration(migrations.Migration):
|
||||
max_length=32,
|
||||
),
|
||||
),
|
||||
# --- START of operations merged from 0203_rename_github_app_kind.py ---
|
||||
migrations.RunPython(update_github_app_kind, migrations.RunPython.noop),
|
||||
# --- END of operations merged from 0203_rename_github_app_kind.py ---
|
||||
]
|
||||
|
||||
@@ -188,6 +188,16 @@ class SurveyJobTemplateMixin(models.Model):
|
||||
runtime_extra_vars.pop(variable_key)
|
||||
|
||||
if default is not None:
|
||||
# do not add variables that contain an empty string, are not required and are not present in extra_vars
|
||||
# password fields must be skipped, because default values have special behaviour
|
||||
if (
|
||||
default == ''
|
||||
and not survey_element.get('required')
|
||||
and survey_element.get('type') != 'password'
|
||||
and variable_key not in runtime_extra_vars
|
||||
):
|
||||
continue
|
||||
|
||||
decrypted_default = default
|
||||
if survey_element['type'] == "password" and isinstance(decrypted_default, str) and decrypted_default.startswith('$encrypted$'):
|
||||
decrypted_default = decrypt_value(get_encryption_key('value', pk=None), decrypted_default)
|
||||
|
||||
@@ -17,7 +17,6 @@ import urllib.parse as urlparse
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.db import transaction
|
||||
|
||||
# Shared code for the AWX platform
|
||||
from awx_plugins.interfaces._temporary_private_container_api import CONTAINER_ROOT, get_incontainer_path
|
||||
@@ -84,6 +83,7 @@ from awx.main.utils.common import (
|
||||
create_partition,
|
||||
ScheduleWorkflowManager,
|
||||
ScheduleTaskManager,
|
||||
getattr_dne,
|
||||
)
|
||||
from awx.conf.license import get_license
|
||||
from awx.main.utils.handlers import SpecialInventoryHandler
|
||||
@@ -92,9 +92,92 @@ from awx.main.utils.update_model import update_model
|
||||
# Django flags
|
||||
from flags.state import flag_enabled
|
||||
|
||||
# Workload Identity
|
||||
from ansible_base.lib.workload_identity.controller import AutomationControllerJobScope
|
||||
|
||||
from ansible_base.resource_registry.workload_identity_client import (
|
||||
get_workload_identity_client,
|
||||
)
|
||||
|
||||
logger = logging.getLogger('awx.main.tasks.jobs')
|
||||
|
||||
|
||||
def populate_claims_for_workload(unified_job) -> dict:
|
||||
"""
|
||||
Extract JWT claims from a Controller workload for the aap_controller_automation_job scope.
|
||||
"""
|
||||
|
||||
# Related objects in the UnifiedJob model, applies to all job types
|
||||
organization = getattr_dne(unified_job, 'organization')
|
||||
ujt = getattr_dne(unified_job, 'unified_job_template')
|
||||
instance_group = getattr_dne(unified_job, 'instance_group')
|
||||
|
||||
claims = {
|
||||
AutomationControllerJobScope.CLAIM_JOB_ID: unified_job.id,
|
||||
AutomationControllerJobScope.CLAIM_JOB_NAME: unified_job.name,
|
||||
AutomationControllerJobScope.CLAIM_LAUNCH_TYPE: unified_job.launch_type,
|
||||
}
|
||||
|
||||
# Related objects in the UnifiedJob model, applies to all job types
|
||||
# null cases are omitted because of OIDC
|
||||
if organization := getattr_dne(unified_job, 'organization'):
|
||||
claims[AutomationControllerJobScope.CLAIM_ORGANIZATION_NAME] = organization.name
|
||||
claims[AutomationControllerJobScope.CLAIM_ORGANIZATION_ID] = organization.id
|
||||
|
||||
if ujt := getattr_dne(unified_job, 'unified_job_template'):
|
||||
claims[AutomationControllerJobScope.CLAIM_UNIFIED_JOB_TEMPLATE_NAME] = ujt.name
|
||||
claims[AutomationControllerJobScope.CLAIM_UNIFIED_JOB_TEMPLATE_ID] = ujt.id
|
||||
|
||||
if instance_group := getattr_dne(unified_job, 'instance_group'):
|
||||
claims[AutomationControllerJobScope.CLAIM_INSTANCE_GROUP_NAME] = instance_group.name
|
||||
claims[AutomationControllerJobScope.CLAIM_INSTANCE_GROUP_ID] = instance_group.id
|
||||
|
||||
# Related objects on concrete models, may not be valid for type of unified_job
|
||||
if inventory := getattr_dne(unified_job, 'inventory', None):
|
||||
claims[AutomationControllerJobScope.CLAIM_INVENTORY_NAME] = inventory.name
|
||||
claims[AutomationControllerJobScope.CLAIM_INVENTORY_ID] = inventory.id
|
||||
|
||||
if execution_environment := getattr_dne(unified_job, 'execution_environment', None):
|
||||
claims[AutomationControllerJobScope.CLAIM_EXECUTION_ENVIRONMENT_NAME] = execution_environment.name
|
||||
claims[AutomationControllerJobScope.CLAIM_EXECUTION_ENVIRONMENT_ID] = execution_environment.id
|
||||
|
||||
if project := getattr_dne(unified_job, 'project', None):
|
||||
claims[AutomationControllerJobScope.CLAIM_PROJECT_NAME] = project.name
|
||||
claims[AutomationControllerJobScope.CLAIM_PROJECT_ID] = project.id
|
||||
|
||||
if jt := getattr_dne(unified_job, 'job_template', None):
|
||||
claims[AutomationControllerJobScope.CLAIM_JOB_TEMPLATE_NAME] = jt.name
|
||||
claims[AutomationControllerJobScope.CLAIM_JOB_TEMPLATE_ID] = jt.id
|
||||
|
||||
# Only valid for job templates
|
||||
if hasattr(unified_job, 'playbook'):
|
||||
claims[AutomationControllerJobScope.CLAIM_PLAYBOOK_NAME] = unified_job.playbook
|
||||
|
||||
# Not valid for inventory updates and system jobs
|
||||
if hasattr(unified_job, 'job_type'):
|
||||
claims[AutomationControllerJobScope.CLAIM_JOB_TYPE] = unified_job.job_type
|
||||
|
||||
launched_by: dict = unified_job.launched_by
|
||||
if 'name' in launched_by:
|
||||
claims[AutomationControllerJobScope.CLAIM_LAUNCHED_BY_NAME] = launched_by['name']
|
||||
if 'id' in launched_by:
|
||||
claims[AutomationControllerJobScope.CLAIM_LAUNCHED_BY_ID] = launched_by['id']
|
||||
|
||||
return claims
|
||||
|
||||
|
||||
def retrieve_workload_identity_jwt(unified_job: UnifiedJob, audience: str, scope: str) -> str:
|
||||
"""Retrieve JWT token from workload claims.
|
||||
Raises:
|
||||
RuntimeError: if the workload identity client is not configured.
|
||||
"""
|
||||
client = get_workload_identity_client()
|
||||
if client is None:
|
||||
raise RuntimeError("Workload identity client is not configured")
|
||||
claims = populate_claims_for_workload(unified_job)
|
||||
return client.request_workload_jwt(claims=claims, scope=scope, audience=audience).jwt
|
||||
|
||||
|
||||
def with_path_cleanup(f):
|
||||
@functools.wraps(f)
|
||||
def _wrapped(self, *args, **kwargs):
|
||||
@@ -121,6 +204,7 @@ def dispatch_waiting_jobs(binder):
|
||||
if not kwargs:
|
||||
kwargs = {}
|
||||
binder.control('run', data={'task': serialize_task(uj._get_task_class()), 'args': [uj.id], 'kwargs': kwargs, 'uuid': uj.celery_task_id})
|
||||
UnifiedJob.objects.filter(pk=uj.pk, status='waiting').update(status='running', start_args='')
|
||||
|
||||
|
||||
class BaseTask(object):
|
||||
@@ -467,48 +551,32 @@ class BaseTask(object):
|
||||
def should_use_fact_cache(self):
|
||||
return False
|
||||
|
||||
def transition_status(self, pk: int) -> bool:
|
||||
"""Atomically transition status to running, if False returned, another process got it"""
|
||||
with transaction.atomic():
|
||||
# Explanation of parts for the fetch:
|
||||
# .values - avoid loading a full object, this is known to lead to deadlocks due to signals
|
||||
# the signals load other related rows which another process may be locking, and happens in practice
|
||||
# of=('self',) - keeps FK tables out of the lock list, another way deadlocks can happen
|
||||
# .get - just load the single job
|
||||
instance_data = UnifiedJob.objects.select_for_update(of=('self',)).values('status', 'cancel_flag').get(pk=pk)
|
||||
|
||||
# If status is not waiting (obtained under lock) then this process does not have clearence to run
|
||||
if instance_data['status'] == 'waiting':
|
||||
if instance_data['cancel_flag']:
|
||||
updated_status = 'canceled'
|
||||
else:
|
||||
updated_status = 'running'
|
||||
# Explanation of the update:
|
||||
# .filter - again, do not load the full object
|
||||
# .update - a bulk update on just that one row, avoid loading unintended data
|
||||
UnifiedJob.objects.filter(pk=pk).update(status=updated_status, start_args='')
|
||||
elif instance_data['status'] == 'running':
|
||||
logger.info(f'Job {pk} is being ran by another process, exiting')
|
||||
return False
|
||||
return True
|
||||
|
||||
@with_path_cleanup
|
||||
@with_signal_handling
|
||||
def run(self, pk, **kwargs):
|
||||
"""
|
||||
Run the job/task and capture its output.
|
||||
"""
|
||||
if not self.instance: # Used to skip fetch for local runs
|
||||
if not self.transition_status(pk):
|
||||
logger.info(f'Job {pk} is being ran by another process, exiting')
|
||||
return
|
||||
|
||||
# Load the instance
|
||||
self.instance = self.update_model(pk)
|
||||
if not self.instance: # Used to skip fetch for local runs
|
||||
# Load the instance
|
||||
self.instance = self.update_model(pk)
|
||||
|
||||
# status should be "running" from dispatch_waiting_jobs,
|
||||
# but may still be "waiting" if the worker picked this up before the status update landed.
|
||||
if self.instance.status == 'waiting':
|
||||
UnifiedJob.objects.filter(pk=pk).update(status="running", start_args='')
|
||||
self.instance.refresh_from_db()
|
||||
|
||||
if self.instance.status != 'running':
|
||||
logger.error(f'Not starting {self.instance.status} task pk={pk} because its status "{self.instance.status}" is not expected')
|
||||
return
|
||||
|
||||
if self.instance.cancel_flag:
|
||||
self.instance = self.update_model(pk, status='canceled')
|
||||
self.instance.websocket_emit_status('canceled')
|
||||
return
|
||||
|
||||
self.instance.websocket_emit_status("running")
|
||||
status, rc = 'error', None
|
||||
self.runner_callback.event_ct = 0
|
||||
|
||||
@@ -463,6 +463,26 @@ class TestInventorySourceCredential:
|
||||
assert 'Cloud-based inventory sources (such as ec2)' in r.data['credential'][0]
|
||||
assert 'require credentials for the matching cloud service' in r.data['credential'][0]
|
||||
|
||||
def test_credential_dict_value_returns_400(self, inventory, admin_user, put):
|
||||
"""Passing a dict for the credential field should return 400, not 500.
|
||||
|
||||
Reproduces a bug where int() raises TypeError on non-scalar types
|
||||
(dict, list) which was uncaught, resulting in a 500 Internal Server Error.
|
||||
"""
|
||||
inv_src = InventorySource.objects.create(name='test-src', inventory=inventory, source='ec2')
|
||||
r = put(
|
||||
url=reverse('api:inventory_source_detail', kwargs={'pk': inv_src.pk}),
|
||||
data={
|
||||
'name': 'test-src',
|
||||
'inventory': inventory.pk,
|
||||
'source': 'ec2',
|
||||
'credential': {'username': 'admin', 'password': 'secret'},
|
||||
},
|
||||
user=admin_user,
|
||||
expect=400,
|
||||
)
|
||||
assert r.status_code == 400
|
||||
|
||||
def test_vault_credential_not_allowed(self, project, inventory, vault_credential, admin_user, post):
|
||||
"""Vault credentials cannot be associated via the deprecated field"""
|
||||
# TODO: when feature is added, add tests to use the related credentials
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
from datetime import date
|
||||
from unittest import mock
|
||||
|
||||
import pytest
|
||||
@@ -253,7 +252,7 @@ def test_user_verify_attribute_created(admin, get):
|
||||
resp = get(reverse('api:user_detail', kwargs={'pk': admin.pk}), admin)
|
||||
assert resp.data['created'] == admin.date_joined
|
||||
|
||||
past = date(2020, 1, 1).isoformat()
|
||||
past = "2020-01-01T00:00:00Z"
|
||||
for op, count in (('gt', 1), ('lt', 0)):
|
||||
resp = get(reverse('api:user_list') + f'?created__{op}={past}', admin)
|
||||
assert resp.data['count'] == count
|
||||
|
||||
@@ -29,3 +29,30 @@ def test_cancel_flag_on_start(jt_linked, caplog):
|
||||
|
||||
job = Job.objects.get(id=job.id)
|
||||
assert job.status == 'canceled'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_runjob_run_can_accept_waiting_status(jt_linked, mocker):
|
||||
"""Test that RunJob.run() can accept a job in 'waiting' status and transition it to 'running'
|
||||
before the pre_run_hook is called"""
|
||||
job = jt_linked.create_unified_job()
|
||||
job.status = 'waiting'
|
||||
job.save()
|
||||
|
||||
status_at_pre_run = None
|
||||
|
||||
def capture_status(instance, private_data_dir):
|
||||
nonlocal status_at_pre_run
|
||||
instance.refresh_from_db()
|
||||
status_at_pre_run = instance.status
|
||||
|
||||
mock_pre_run = mocker.patch.object(RunJob, 'pre_run_hook', side_effect=capture_status)
|
||||
|
||||
task = RunJob()
|
||||
try:
|
||||
task.run(job.id)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
mock_pre_run.assert_called_once()
|
||||
assert status_at_pre_run == 'running'
|
||||
|
||||
@@ -173,3 +173,54 @@ class TestMigrationSmoke:
|
||||
assert Role.objects.filter(
|
||||
singleton_name='system_administrator', role_field='system_administrator'
|
||||
).exists(), "expected to find a system_administrator singleton role"
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestGithubAppBug:
|
||||
"""
|
||||
Tests that `awx-manage createsuperuser` runs successfully after
|
||||
the `github_app` CredentialType kind is updated to `github_app_lookup`
|
||||
via the migration.
|
||||
"""
|
||||
|
||||
def test_after_github_app_kind_migration(self, migrator):
|
||||
"""
|
||||
Verifies that `createsuperuser` does not raise a KeyError
|
||||
after the 0204_squashed_deletions migration (which includes
|
||||
the `update_github_app_kind` logic) is applied.
|
||||
"""
|
||||
# 1. Apply migrations up to the point *before* the 0204_squashed_deletions migration.
|
||||
# This simulates the state where the problematic CredentialType might exist.
|
||||
# We use 0203_remove_team_of_teams as the direct predecessor.
|
||||
old_state = migrator.apply_tested_migration(('main', '0203_remove_team_of_teams'))
|
||||
|
||||
# Get the CredentialType model from the historical state.
|
||||
CredentialType = old_state.apps.get_model('main', 'CredentialType')
|
||||
|
||||
# Create a CredentialType with the old, problematic 'namespace' value
|
||||
CredentialType.objects.create(
|
||||
name='Legacy GitHub App Credential',
|
||||
kind='external',
|
||||
namespace='github_app', # The namespace that causes the KeyError in the registry lookup
|
||||
managed=True,
|
||||
created=now(),
|
||||
modified=now(),
|
||||
)
|
||||
|
||||
# Apply the migration that includes the fix (0204_squashed_deletions).
|
||||
new_state = migrator.apply_tested_migration(('main', '0204_squashed_deletions'))
|
||||
|
||||
# Verify that the CredentialType with the old 'kind' no longer exists
|
||||
# and the 'kind' has been updated to the new value.
|
||||
CredentialType = new_state.apps.get_model('main', 'CredentialType') # Get CredentialType model from the new state
|
||||
|
||||
# Assertion 1: The CredentialType with the old 'github_app' kind should no longer exist.
|
||||
assert not CredentialType.objects.filter(
|
||||
namespace='github_app'
|
||||
).exists(), "CredentialType with old 'github_app' kind should no longer exist after migration."
|
||||
|
||||
# Assertion 2: The CredentialType should now exist with the new 'github_app_lookup' kind
|
||||
# and retain its original name.
|
||||
assert CredentialType.objects.filter(
|
||||
namespace='github_app_lookup', name='Legacy GitHub App Credential'
|
||||
).exists(), "CredentialType should be updated to 'github_app_lookup' and retain its name."
|
||||
|
||||
@@ -18,7 +18,7 @@ from awx.main.tests.functional.conftest import * # noqa
|
||||
from awx.main.tests.conftest import load_all_credentials # noqa: F401; pylint: disable=unused-import
|
||||
from awx.main.tests import data
|
||||
|
||||
from awx.main.models import Project, JobTemplate, Organization, Inventory
|
||||
from awx.main.models import Project, JobTemplate, Organization, Inventory, WorkflowJob, UnifiedJob
|
||||
from awx.main.tasks.system import clear_setting_cache
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -100,6 +100,21 @@ def wait_for_events(uj, timeout=2):
|
||||
|
||||
|
||||
def unified_job_stdout(uj):
|
||||
if type(uj) is UnifiedJob:
|
||||
uj = uj.get_real_instance()
|
||||
if isinstance(uj, WorkflowJob):
|
||||
outputs = []
|
||||
for node in uj.workflow_job_nodes.all().select_related('job').order_by('id'):
|
||||
if node.job is None:
|
||||
continue
|
||||
outputs.append(
|
||||
'workflow node {node_id} job {job_id} output:\n{output}'.format(
|
||||
node_id=node.id,
|
||||
job_id=node.job.id,
|
||||
output=unified_job_stdout(node.job),
|
||||
)
|
||||
)
|
||||
return '\n'.join(outputs)
|
||||
wait_for_events(uj)
|
||||
return '\n'.join([event.stdout for event in uj.get_event_queryset().order_by('created')])
|
||||
|
||||
|
||||
49
awx/main/tests/unit/api/test_fields.py
Normal file
49
awx/main/tests/unit/api/test_fields.py
Normal file
@@ -0,0 +1,49 @@
|
||||
import pytest
|
||||
from collections import OrderedDict
|
||||
from unittest import mock
|
||||
|
||||
from rest_framework.exceptions import ValidationError
|
||||
|
||||
from awx.api.fields import DeprecatedCredentialField
|
||||
|
||||
|
||||
class TestDeprecatedCredentialField:
|
||||
"""Test that DeprecatedCredentialField handles unexpected input types gracefully."""
|
||||
|
||||
def test_dict_value_raises_validation_error(self):
|
||||
"""Passing a dict instead of an integer should return a 400 validation error, not a 500 TypeError."""
|
||||
field = DeprecatedCredentialField()
|
||||
with pytest.raises(ValidationError):
|
||||
field.to_internal_value({"username": "admin", "password": "secret"})
|
||||
|
||||
def test_ordered_dict_value_raises_validation_error(self):
|
||||
"""Passing an OrderedDict should return a 400 validation error, not a 500 TypeError."""
|
||||
field = DeprecatedCredentialField()
|
||||
with pytest.raises(ValidationError):
|
||||
field.to_internal_value(OrderedDict([("username", "admin")]))
|
||||
|
||||
def test_list_value_raises_validation_error(self):
|
||||
"""Passing a list should return a 400 validation error, not a 500 TypeError."""
|
||||
field = DeprecatedCredentialField()
|
||||
with pytest.raises(ValidationError):
|
||||
field.to_internal_value([1, 2, 3])
|
||||
|
||||
def test_string_value_raises_validation_error(self):
|
||||
"""Passing a non-numeric string should return a 400 validation error."""
|
||||
field = DeprecatedCredentialField()
|
||||
with pytest.raises(ValidationError):
|
||||
field.to_internal_value("not_a_number")
|
||||
|
||||
@mock.patch('awx.api.fields.Credential.objects')
|
||||
def test_valid_integer_value_works(self, mock_cred_objects):
|
||||
"""Passing a valid integer PK should work when the credential exists."""
|
||||
mock_cred_objects.get.return_value = mock.MagicMock()
|
||||
field = DeprecatedCredentialField()
|
||||
assert field.to_internal_value(42) == 42
|
||||
|
||||
@mock.patch('awx.api.fields.Credential.objects')
|
||||
def test_valid_string_integer_value_works(self, mock_cred_objects):
|
||||
"""Passing a numeric string PK should work when the credential exists."""
|
||||
mock_cred_objects.get.return_value = mock.MagicMock()
|
||||
field = DeprecatedCredentialField()
|
||||
assert field.to_internal_value("42") == 42
|
||||
@@ -176,22 +176,22 @@ def test_display_survey_spec_encrypts_default(survey_spec_factory):
|
||||
|
||||
@pytest.mark.survey
|
||||
@pytest.mark.parametrize(
|
||||
"question_type,default,min,max,expect_use,expect_value",
|
||||
"question_type,default,min,max,expect_valid,expect_use,expect_value",
|
||||
[
|
||||
("text", "", 0, 0, True, ''), # default used
|
||||
("text", "", 1, 0, False, 'N/A'), # value less than min length
|
||||
("password", "", 1, 0, False, 'N/A'), # passwords behave the same as text
|
||||
("multiplechoice", "", 0, 0, False, 'N/A'), # historical bug
|
||||
("multiplechoice", "zeb", 0, 0, False, 'N/A'), # zeb not in choices
|
||||
("multiplechoice", "coffee", 0, 0, True, 'coffee'),
|
||||
("multiselect", None, 0, 0, False, 'N/A'), # NOTE: Behavior is arguable, value of [] may be prefered
|
||||
("multiselect", "", 0, 0, False, 'N/A'),
|
||||
("multiselect", ["zeb"], 0, 0, False, 'N/A'),
|
||||
("multiselect", ["milk"], 0, 0, True, ["milk"]),
|
||||
("multiselect", ["orange\nmilk"], 0, 0, False, 'N/A'), # historical bug
|
||||
("text", "", 0, 0, True, False, 'N/A'), # valid but empty default not sent for optional question
|
||||
("text", "", 1, 0, False, False, 'N/A'), # value less than min length
|
||||
("password", "", 1, 0, False, False, 'N/A'), # passwords behave the same as text
|
||||
("multiplechoice", "", 0, 0, False, False, 'N/A'), # historical bug
|
||||
("multiplechoice", "zeb", 0, 0, False, False, 'N/A'), # zeb not in choices
|
||||
("multiplechoice", "coffee", 0, 0, True, True, 'coffee'),
|
||||
("multiselect", None, 0, 0, False, False, 'N/A'), # NOTE: Behavior is arguable, value of [] may be prefered
|
||||
("multiselect", "", 0, 0, False, False, 'N/A'),
|
||||
("multiselect", ["zeb"], 0, 0, False, False, 'N/A'),
|
||||
("multiselect", ["milk"], 0, 0, True, True, ["milk"]),
|
||||
("multiselect", ["orange\nmilk"], 0, 0, False, False, 'N/A'), # historical bug
|
||||
],
|
||||
)
|
||||
def test_optional_survey_question_defaults(survey_spec_factory, question_type, default, min, max, expect_use, expect_value):
|
||||
def test_optional_survey_question_defaults(survey_spec_factory, question_type, default, min, max, expect_valid, expect_use, expect_value):
|
||||
spec = survey_spec_factory(
|
||||
[
|
||||
{
|
||||
@@ -208,7 +208,7 @@ def test_optional_survey_question_defaults(survey_spec_factory, question_type, d
|
||||
jt = JobTemplate(name="test-jt", survey_spec=spec, survey_enabled=True)
|
||||
defaulted_extra_vars = jt._update_unified_job_kwargs({}, {})
|
||||
element = spec['spec'][0]
|
||||
if expect_use:
|
||||
if expect_valid:
|
||||
assert jt._survey_element_validation(element, {element['variable']: element['default']}) == []
|
||||
else:
|
||||
assert jt._survey_element_validation(element, {element['variable']: element['default']})
|
||||
@@ -218,6 +218,28 @@ def test_optional_survey_question_defaults(survey_spec_factory, question_type, d
|
||||
assert 'c' not in defaulted_extra_vars['extra_vars']
|
||||
|
||||
|
||||
@pytest.mark.survey
|
||||
def test_optional_survey_empty_default_with_runtime_extra_var(survey_spec_factory):
|
||||
"""When a user explicitly provides an empty string at runtime for an optional
|
||||
survey question, the variable should still be included in extra_vars."""
|
||||
spec = survey_spec_factory(
|
||||
[
|
||||
{
|
||||
"required": False,
|
||||
"default": "",
|
||||
"choices": "",
|
||||
"variable": "c",
|
||||
"min": 0,
|
||||
"max": 0,
|
||||
"type": "text",
|
||||
},
|
||||
]
|
||||
)
|
||||
jt = JobTemplate(name="test-jt", survey_spec=spec, survey_enabled=True)
|
||||
defaulted_extra_vars = jt._update_unified_job_kwargs({}, {'extra_vars': json.dumps({'c': ''})})
|
||||
assert json.loads(defaulted_extra_vars['extra_vars'])['c'] == ''
|
||||
|
||||
|
||||
@pytest.mark.survey
|
||||
@pytest.mark.parametrize(
|
||||
"question_type,default,maxlen,kwargs,expected",
|
||||
|
||||
@@ -18,8 +18,17 @@ from awx.main.models import (
|
||||
Job,
|
||||
Organization,
|
||||
Project,
|
||||
JobTemplate,
|
||||
UnifiedJobTemplate,
|
||||
InstanceGroup,
|
||||
ExecutionEnvironment,
|
||||
ProjectUpdate,
|
||||
InventoryUpdate,
|
||||
InventorySource,
|
||||
AdHocCommand,
|
||||
)
|
||||
from awx.main.tasks import jobs
|
||||
from ansible_base.lib.workload_identity.controller import AutomationControllerJobScope
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -188,3 +197,289 @@ def test_invalid_host_facts(mock_facts_settings, bulk_update_sorted_by_id, priva
|
||||
with pytest.raises(pytest.fail.Exception):
|
||||
if failures:
|
||||
pytest.fail(f" {len(failures)} facts cleared failures : {','.join(failures)}")
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"job_attrs,expected_claims",
|
||||
[
|
||||
(
|
||||
{
|
||||
'id': 100,
|
||||
'name': 'Test Job',
|
||||
'job_type': 'run',
|
||||
'launch_type': 'manual',
|
||||
'playbook': 'site.yml',
|
||||
'organization': Organization(id=1, name='Test Org'),
|
||||
'inventory': Inventory(id=2, name='Test Inventory'),
|
||||
'project': Project(id=3, name='Test Project'),
|
||||
'execution_environment': ExecutionEnvironment(id=4, name='Test EE'),
|
||||
'job_template': JobTemplate(id=5, name='Test Job Template'),
|
||||
'unified_job_template': UnifiedJobTemplate(pk=6, id=6, name='Test Unified Job Template'),
|
||||
'instance_group': InstanceGroup(id=7, name='Test Instance Group'),
|
||||
},
|
||||
{
|
||||
AutomationControllerJobScope.CLAIM_JOB_ID: 100,
|
||||
AutomationControllerJobScope.CLAIM_JOB_NAME: 'Test Job',
|
||||
AutomationControllerJobScope.CLAIM_JOB_TYPE: 'run',
|
||||
AutomationControllerJobScope.CLAIM_LAUNCH_TYPE: 'manual',
|
||||
AutomationControllerJobScope.CLAIM_PLAYBOOK_NAME: 'site.yml',
|
||||
AutomationControllerJobScope.CLAIM_ORGANIZATION_NAME: 'Test Org',
|
||||
AutomationControllerJobScope.CLAIM_ORGANIZATION_ID: 1,
|
||||
AutomationControllerJobScope.CLAIM_INVENTORY_NAME: 'Test Inventory',
|
||||
AutomationControllerJobScope.CLAIM_INVENTORY_ID: 2,
|
||||
AutomationControllerJobScope.CLAIM_EXECUTION_ENVIRONMENT_NAME: 'Test EE',
|
||||
AutomationControllerJobScope.CLAIM_EXECUTION_ENVIRONMENT_ID: 4,
|
||||
AutomationControllerJobScope.CLAIM_PROJECT_NAME: 'Test Project',
|
||||
AutomationControllerJobScope.CLAIM_PROJECT_ID: 3,
|
||||
AutomationControllerJobScope.CLAIM_JOB_TEMPLATE_NAME: 'Test Job Template',
|
||||
AutomationControllerJobScope.CLAIM_JOB_TEMPLATE_ID: 5,
|
||||
AutomationControllerJobScope.CLAIM_UNIFIED_JOB_TEMPLATE_NAME: 'Test Unified Job Template',
|
||||
AutomationControllerJobScope.CLAIM_UNIFIED_JOB_TEMPLATE_ID: 6,
|
||||
AutomationControllerJobScope.CLAIM_INSTANCE_GROUP_NAME: 'Test Instance Group',
|
||||
AutomationControllerJobScope.CLAIM_INSTANCE_GROUP_ID: 7,
|
||||
},
|
||||
),
|
||||
(
|
||||
{'id': 100, 'name': 'Test', 'job_type': 'run', 'launch_type': 'manual', 'organization': Organization(id=1, name='')},
|
||||
{
|
||||
AutomationControllerJobScope.CLAIM_JOB_ID: 100,
|
||||
AutomationControllerJobScope.CLAIM_JOB_NAME: 'Test',
|
||||
AutomationControllerJobScope.CLAIM_JOB_TYPE: 'run',
|
||||
AutomationControllerJobScope.CLAIM_LAUNCH_TYPE: 'manual',
|
||||
AutomationControllerJobScope.CLAIM_ORGANIZATION_ID: 1,
|
||||
AutomationControllerJobScope.CLAIM_ORGANIZATION_NAME: '',
|
||||
AutomationControllerJobScope.CLAIM_PLAYBOOK_NAME: '',
|
||||
},
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_populate_claims_for_workload(job_attrs, expected_claims):
|
||||
job = Job()
|
||||
|
||||
for attr, value in job_attrs.items():
|
||||
setattr(job, attr, value)
|
||||
|
||||
claims = jobs.populate_claims_for_workload(job)
|
||||
assert claims == expected_claims
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"workload_attrs,expected_claims",
|
||||
[
|
||||
(
|
||||
{
|
||||
'id': 200,
|
||||
'name': 'Git Sync',
|
||||
'job_type': 'check',
|
||||
'launch_type': 'sync',
|
||||
'organization': Organization(id=1, name='Test Org'),
|
||||
'project': Project(pk=3, id=3, name='Test Project'),
|
||||
'unified_job_template': Project(pk=3, id=3, name='Test Project'),
|
||||
'execution_environment': ExecutionEnvironment(id=4, name='Test EE'),
|
||||
'instance_group': InstanceGroup(id=7, name='Test Instance Group'),
|
||||
},
|
||||
{
|
||||
AutomationControllerJobScope.CLAIM_JOB_ID: 200,
|
||||
AutomationControllerJobScope.CLAIM_JOB_NAME: 'Git Sync',
|
||||
AutomationControllerJobScope.CLAIM_JOB_TYPE: 'check',
|
||||
AutomationControllerJobScope.CLAIM_LAUNCH_TYPE: 'sync',
|
||||
AutomationControllerJobScope.CLAIM_LAUNCHED_BY_NAME: 'Test Project',
|
||||
AutomationControllerJobScope.CLAIM_LAUNCHED_BY_ID: 3,
|
||||
AutomationControllerJobScope.CLAIM_ORGANIZATION_NAME: 'Test Org',
|
||||
AutomationControllerJobScope.CLAIM_ORGANIZATION_ID: 1,
|
||||
AutomationControllerJobScope.CLAIM_PROJECT_NAME: 'Test Project',
|
||||
AutomationControllerJobScope.CLAIM_PROJECT_ID: 3,
|
||||
AutomationControllerJobScope.CLAIM_UNIFIED_JOB_TEMPLATE_NAME: 'Test Project',
|
||||
AutomationControllerJobScope.CLAIM_UNIFIED_JOB_TEMPLATE_ID: 3,
|
||||
AutomationControllerJobScope.CLAIM_EXECUTION_ENVIRONMENT_NAME: 'Test EE',
|
||||
AutomationControllerJobScope.CLAIM_EXECUTION_ENVIRONMENT_ID: 4,
|
||||
AutomationControllerJobScope.CLAIM_INSTANCE_GROUP_NAME: 'Test Instance Group',
|
||||
AutomationControllerJobScope.CLAIM_INSTANCE_GROUP_ID: 7,
|
||||
},
|
||||
),
|
||||
(
|
||||
{
|
||||
'id': 201,
|
||||
'name': 'Minimal Project Update',
|
||||
'job_type': 'run',
|
||||
'launch_type': 'manual',
|
||||
},
|
||||
{
|
||||
AutomationControllerJobScope.CLAIM_JOB_ID: 201,
|
||||
AutomationControllerJobScope.CLAIM_JOB_NAME: 'Minimal Project Update',
|
||||
AutomationControllerJobScope.CLAIM_JOB_TYPE: 'run',
|
||||
AutomationControllerJobScope.CLAIM_LAUNCH_TYPE: 'manual',
|
||||
},
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_populate_claims_for_project_update(workload_attrs, expected_claims):
|
||||
project_update = ProjectUpdate()
|
||||
for attr, value in workload_attrs.items():
|
||||
setattr(project_update, attr, value)
|
||||
|
||||
claims = jobs.populate_claims_for_workload(project_update)
|
||||
assert claims == expected_claims
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"workload_attrs,expected_claims",
|
||||
[
|
||||
(
|
||||
{
|
||||
'id': 300,
|
||||
'name': 'AWS Sync',
|
||||
'launch_type': 'scheduled',
|
||||
'organization': Organization(id=1, name='Test Org'),
|
||||
'inventory': Inventory(id=2, name='AWS Inventory'),
|
||||
'unified_job_template': InventorySource(pk=8, id=8, name='AWS Source'),
|
||||
'execution_environment': ExecutionEnvironment(id=4, name='Test EE'),
|
||||
'instance_group': InstanceGroup(id=7, name='Test Instance Group'),
|
||||
},
|
||||
{
|
||||
AutomationControllerJobScope.CLAIM_JOB_ID: 300,
|
||||
AutomationControllerJobScope.CLAIM_JOB_NAME: 'AWS Sync',
|
||||
AutomationControllerJobScope.CLAIM_LAUNCH_TYPE: 'scheduled',
|
||||
AutomationControllerJobScope.CLAIM_ORGANIZATION_NAME: 'Test Org',
|
||||
AutomationControllerJobScope.CLAIM_ORGANIZATION_ID: 1,
|
||||
AutomationControllerJobScope.CLAIM_INVENTORY_NAME: 'AWS Inventory',
|
||||
AutomationControllerJobScope.CLAIM_INVENTORY_ID: 2,
|
||||
AutomationControllerJobScope.CLAIM_UNIFIED_JOB_TEMPLATE_NAME: 'AWS Source',
|
||||
AutomationControllerJobScope.CLAIM_UNIFIED_JOB_TEMPLATE_ID: 8,
|
||||
AutomationControllerJobScope.CLAIM_EXECUTION_ENVIRONMENT_NAME: 'Test EE',
|
||||
AutomationControllerJobScope.CLAIM_EXECUTION_ENVIRONMENT_ID: 4,
|
||||
AutomationControllerJobScope.CLAIM_INSTANCE_GROUP_NAME: 'Test Instance Group',
|
||||
AutomationControllerJobScope.CLAIM_INSTANCE_GROUP_ID: 7,
|
||||
},
|
||||
),
|
||||
(
|
||||
{
|
||||
'id': 301,
|
||||
'name': 'Minimal Inventory Update',
|
||||
'launch_type': 'manual',
|
||||
},
|
||||
{
|
||||
AutomationControllerJobScope.CLAIM_JOB_ID: 301,
|
||||
AutomationControllerJobScope.CLAIM_JOB_NAME: 'Minimal Inventory Update',
|
||||
AutomationControllerJobScope.CLAIM_LAUNCH_TYPE: 'manual',
|
||||
},
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_populate_claims_for_inventory_update(workload_attrs, expected_claims):
|
||||
inventory_update = InventoryUpdate()
|
||||
for attr, value in workload_attrs.items():
|
||||
setattr(inventory_update, attr, value)
|
||||
|
||||
claims = jobs.populate_claims_for_workload(inventory_update)
|
||||
assert claims == expected_claims
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"workload_attrs,expected_claims",
|
||||
[
|
||||
(
|
||||
{
|
||||
'id': 400,
|
||||
'name': 'Ping All Hosts',
|
||||
'job_type': 'run',
|
||||
'launch_type': 'manual',
|
||||
'organization': Organization(id=1, name='Test Org'),
|
||||
'inventory': Inventory(id=2, name='Test Inventory'),
|
||||
'execution_environment': ExecutionEnvironment(id=4, name='Test EE'),
|
||||
'instance_group': InstanceGroup(id=7, name='Test Instance Group'),
|
||||
},
|
||||
{
|
||||
AutomationControllerJobScope.CLAIM_JOB_ID: 400,
|
||||
AutomationControllerJobScope.CLAIM_JOB_NAME: 'Ping All Hosts',
|
||||
AutomationControllerJobScope.CLAIM_JOB_TYPE: 'run',
|
||||
AutomationControllerJobScope.CLAIM_LAUNCH_TYPE: 'manual',
|
||||
AutomationControllerJobScope.CLAIM_ORGANIZATION_NAME: 'Test Org',
|
||||
AutomationControllerJobScope.CLAIM_ORGANIZATION_ID: 1,
|
||||
AutomationControllerJobScope.CLAIM_INVENTORY_NAME: 'Test Inventory',
|
||||
AutomationControllerJobScope.CLAIM_INVENTORY_ID: 2,
|
||||
AutomationControllerJobScope.CLAIM_EXECUTION_ENVIRONMENT_NAME: 'Test EE',
|
||||
AutomationControllerJobScope.CLAIM_EXECUTION_ENVIRONMENT_ID: 4,
|
||||
AutomationControllerJobScope.CLAIM_INSTANCE_GROUP_NAME: 'Test Instance Group',
|
||||
AutomationControllerJobScope.CLAIM_INSTANCE_GROUP_ID: 7,
|
||||
},
|
||||
),
|
||||
(
|
||||
{
|
||||
'id': 401,
|
||||
'name': 'Minimal Ad Hoc',
|
||||
'job_type': 'run',
|
||||
'launch_type': 'manual',
|
||||
},
|
||||
{
|
||||
AutomationControllerJobScope.CLAIM_JOB_ID: 401,
|
||||
AutomationControllerJobScope.CLAIM_JOB_NAME: 'Minimal Ad Hoc',
|
||||
AutomationControllerJobScope.CLAIM_JOB_TYPE: 'run',
|
||||
AutomationControllerJobScope.CLAIM_LAUNCH_TYPE: 'manual',
|
||||
},
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_populate_claims_for_adhoc_command(workload_attrs, expected_claims):
|
||||
adhoc_command = AdHocCommand()
|
||||
for attr, value in workload_attrs.items():
|
||||
setattr(adhoc_command, attr, value)
|
||||
|
||||
claims = jobs.populate_claims_for_workload(adhoc_command)
|
||||
assert claims == expected_claims
|
||||
|
||||
|
||||
@mock.patch('awx.main.tasks.jobs.get_workload_identity_client')
|
||||
def test_retrieve_workload_identity_jwt_returns_jwt_from_client(mock_get_client):
|
||||
"""retrieve_workload_identity_jwt returns the JWT string from the client."""
|
||||
mock_client = mock.MagicMock()
|
||||
mock_response = mock.MagicMock()
|
||||
mock_response.jwt = 'eyJ.test.jwt'
|
||||
mock_client.request_workload_jwt.return_value = mock_response
|
||||
mock_get_client.return_value = mock_client
|
||||
|
||||
unified_job = Job()
|
||||
unified_job.id = 42
|
||||
unified_job.name = 'Test Job'
|
||||
unified_job.launch_type = 'manual'
|
||||
unified_job.organization = Organization(id=1, name='Test Org')
|
||||
unified_job.unified_job_template = None
|
||||
unified_job.instance_group = None
|
||||
|
||||
result = jobs.retrieve_workload_identity_jwt(unified_job, audience='https://api.example.com', scope='aap_controller_automation_job')
|
||||
|
||||
assert result == 'eyJ.test.jwt'
|
||||
mock_client.request_workload_jwt.assert_called_once()
|
||||
call_kwargs = mock_client.request_workload_jwt.call_args[1]
|
||||
assert call_kwargs['audience'] == 'https://api.example.com'
|
||||
assert call_kwargs['scope'] == 'aap_controller_automation_job'
|
||||
assert 'claims' in call_kwargs
|
||||
assert call_kwargs['claims'][AutomationControllerJobScope.CLAIM_JOB_ID] == 42
|
||||
assert call_kwargs['claims'][AutomationControllerJobScope.CLAIM_JOB_NAME] == 'Test Job'
|
||||
|
||||
|
||||
@mock.patch('awx.main.tasks.jobs.get_workload_identity_client')
|
||||
def test_retrieve_workload_identity_jwt_passes_audience_and_scope(mock_get_client):
|
||||
"""retrieve_workload_identity_jwt passes audience and scope to the client."""
|
||||
mock_client = mock.MagicMock()
|
||||
mock_client.request_workload_jwt.return_value = mock.MagicMock(jwt='token')
|
||||
mock_get_client.return_value = mock_client
|
||||
|
||||
unified_job = mock.MagicMock()
|
||||
audience = 'custom_audience'
|
||||
scope = 'custom_scope'
|
||||
with mock.patch('awx.main.tasks.jobs.populate_claims_for_workload', return_value={'job_id': 1}):
|
||||
jobs.retrieve_workload_identity_jwt(unified_job, audience=audience, scope=scope)
|
||||
|
||||
mock_client.request_workload_jwt.assert_called_once_with(claims={'job_id': 1}, scope=scope, audience=audience)
|
||||
|
||||
|
||||
@mock.patch('awx.main.tasks.jobs.get_workload_identity_client')
|
||||
def test_retrieve_workload_identity_jwt_raises_when_client_not_configured(mock_get_client):
|
||||
"""retrieve_workload_identity_jwt raises RuntimeError when client is None."""
|
||||
mock_get_client.return_value = None
|
||||
|
||||
unified_job = mock.MagicMock()
|
||||
|
||||
with pytest.raises(RuntimeError, match="Workload identity client is not configured"):
|
||||
jobs.retrieve_workload_identity_jwt(unified_job, audience='test_audience', scope='test_scope')
|
||||
|
||||
@@ -330,17 +330,13 @@ class TestHostnameRegexValidator:
|
||||
|
||||
def test_bad_call(self, regex_expr, re_flags):
|
||||
h = HostnameRegexValidator(regex=regex_expr, flags=re_flags)
|
||||
try:
|
||||
with pytest.raises(ValidationError, match=r"^\['illegal characters detected in hostname=@#\$%\)\$#\(TUFAS_DG. Please verify.'\]$"):
|
||||
h("@#$%)$#(TUFAS_DG")
|
||||
except ValidationError as e:
|
||||
assert e.message is not None
|
||||
|
||||
def test_good_call_with_inverse(self, regex_expr, re_flags, inverse_match=True):
|
||||
h = HostnameRegexValidator(regex=regex_expr, flags=re_flags, inverse_match=inverse_match)
|
||||
try:
|
||||
with pytest.raises(ValidationError, match=r"^\['Enter a valid value.'\]$"):
|
||||
h("1.2.3.4")
|
||||
except ValidationError as e:
|
||||
assert e.message is not None
|
||||
|
||||
def test_bad_call_with_inverse(self, regex_expr, re_flags, inverse_match=True):
|
||||
h = HostnameRegexValidator(regex=regex_expr, flags=re_flags, inverse_match=inverse_match)
|
||||
|
||||
@@ -48,15 +48,16 @@ def could_be_playbook(project_path, dir_path, filename):
|
||||
# show up.
|
||||
matched = False
|
||||
try:
|
||||
for n, line in enumerate(codecs.open(playbook_path, 'r', encoding='utf-8', errors='ignore')):
|
||||
if valid_playbook_re.match(line):
|
||||
matched = True
|
||||
break
|
||||
# Any YAML file can also be encrypted with vault;
|
||||
# allow these to be used as the main playbook.
|
||||
elif n == 0 and line.startswith('$ANSIBLE_VAULT;'):
|
||||
matched = True
|
||||
break
|
||||
with codecs.open(playbook_path, 'r', encoding='utf-8', errors='ignore') as f:
|
||||
for n, line in enumerate(f):
|
||||
if valid_playbook_re.match(line):
|
||||
matched = True
|
||||
break
|
||||
# Any YAML file can also be encrypted with vault;
|
||||
# allow these to be used as the main playbook.
|
||||
elif n == 0 and line.startswith('$ANSIBLE_VAULT;'):
|
||||
matched = True
|
||||
break
|
||||
except IOError:
|
||||
return None
|
||||
if not matched:
|
||||
|
||||
@@ -1000,9 +1000,15 @@ def getattrd(obj, name, default=NoDefaultProvided):
|
||||
raise
|
||||
|
||||
|
||||
def getattr_dne(obj, name, notfound=ObjectDoesNotExist):
|
||||
empty = object()
|
||||
|
||||
|
||||
def getattr_dne(obj, name, default=empty, notfound=ObjectDoesNotExist):
|
||||
try:
|
||||
return getattr(obj, name)
|
||||
if default is empty:
|
||||
return getattr(obj, name)
|
||||
else:
|
||||
return getattr(obj, name, default)
|
||||
except notfound:
|
||||
return None
|
||||
|
||||
|
||||
@@ -21,8 +21,11 @@ DOCUMENTATION = '''
|
||||
|
||||
import os
|
||||
import json
|
||||
import re
|
||||
from importlib.resources import files
|
||||
|
||||
from packaging.version import Version, InvalidVersion
|
||||
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
|
||||
# NOTE: in Ansible 1.2 or later general logging is available without
|
||||
@@ -52,6 +55,91 @@ def list_collections(artifacts_manager=None):
|
||||
return collections
|
||||
|
||||
|
||||
# External query path constants
|
||||
EXTERNAL_QUERY_COLLECTION = 'ansible_collections.redhat.indirect_accounting'
|
||||
EXTERNAL_QUERY_PATH = 'extensions/audit/external_queries'
|
||||
|
||||
|
||||
def list_external_queries(namespace, name):
|
||||
"""List all available external query versions for a collection.
|
||||
|
||||
Returns a list of Version objects for all available query files
|
||||
matching the namespace.name pattern.
|
||||
"""
|
||||
versions = []
|
||||
|
||||
try:
|
||||
queries_dir = files(EXTERNAL_QUERY_COLLECTION) / 'extensions' / 'audit' / 'external_queries'
|
||||
except ModuleNotFoundError:
|
||||
return versions
|
||||
|
||||
# Pattern: namespace.name.X.Y.Z.yml where X.Y.Z is the version
|
||||
pattern = re.compile(rf'^{re.escape(namespace)}\.{re.escape(name)}\.(.+)\.yml$')
|
||||
|
||||
for query_file in queries_dir.iterdir():
|
||||
match = pattern.match(query_file.name)
|
||||
if match:
|
||||
version_str = match.group(1)
|
||||
try:
|
||||
versions.append(Version(version_str))
|
||||
except InvalidVersion:
|
||||
# Skip files with invalid version strings
|
||||
pass
|
||||
|
||||
return versions
|
||||
|
||||
|
||||
def find_external_query_with_fallback(namespace, name, installed_version, display=None):
|
||||
"""Find external query file with semantic version fallback.
|
||||
|
||||
Args:
|
||||
namespace: Collection namespace (e.g., 'community')
|
||||
name: Collection name (e.g., 'vmware')
|
||||
installed_version: Version string of installed collection (e.g., '4.5.0')
|
||||
display: Ansible display object for logging
|
||||
|
||||
Returns:
|
||||
Tuple of (query_content, fallback_used, fallback_version) or (None, False, None)
|
||||
- query_content: The query file content if found
|
||||
- fallback_used: True if a fallback version was used instead of exact match
|
||||
- fallback_version: The version string used (for logging)
|
||||
"""
|
||||
try:
|
||||
installed_version_object = Version(installed_version)
|
||||
except InvalidVersion:
|
||||
# Invalid version string - can't do version comparison
|
||||
return None, False, None
|
||||
try:
|
||||
queries_dir = files(EXTERNAL_QUERY_COLLECTION) / 'extensions' / 'audit' / 'external_queries'
|
||||
except ModuleNotFoundError:
|
||||
return None, False, None
|
||||
|
||||
# 1. Try exact version match first (AC5.2)
|
||||
exact_file = queries_dir / f'{namespace}.{name}.{installed_version}.yml'
|
||||
if exact_file.exists():
|
||||
with exact_file.open('r') as f:
|
||||
return f.read(), False, installed_version
|
||||
|
||||
# 2. Find compatible fallback (same major version, nearest lower version)
|
||||
available_versions = list_external_queries(namespace, name)
|
||||
if not available_versions:
|
||||
return None, False, None
|
||||
# Filter to same major version and versions <= installed version (AC5.3, AC5.5)
|
||||
compatible_versions = [v for v in available_versions if v.major == installed_version_object.major and v <= installed_version_object]
|
||||
if not compatible_versions:
|
||||
# No compatible fallback exists (AC5.7)
|
||||
return None, False, None
|
||||
# Select nearest lower version - highest compatible version (AC5.4)
|
||||
fallback_version_object = max(compatible_versions)
|
||||
fallback_version_str = str(fallback_version_object)
|
||||
fallback_file = queries_dir / f'{namespace}.{name}.{fallback_version_str}.yml'
|
||||
if fallback_file.exists():
|
||||
with fallback_file.open('r') as f:
|
||||
return f.read(), True, fallback_version_str
|
||||
|
||||
return None, False, None
|
||||
|
||||
|
||||
class CallbackModule(CallbackBase):
|
||||
"""
|
||||
logs playbook results, per host, in /var/log/ansible/hosts
|
||||
@@ -81,6 +169,17 @@ class CallbackModule(CallbackBase):
|
||||
if query_file.exists():
|
||||
with query_file.open('r') as f:
|
||||
collection_print['host_query'] = f.read()
|
||||
self._display.vv(f"Using embedded query for {candidate.fqcn} v{candidate.ver}")
|
||||
else:
|
||||
# 2. Check for external query file with version fallback
|
||||
query_content, fallback_used, version_used = find_external_query_with_fallback(candidate.namespace, candidate.name, candidate.ver)
|
||||
if query_content:
|
||||
collection_print['host_query'] = query_content
|
||||
if fallback_used:
|
||||
# AC5.6: Log when fallback is used
|
||||
self._display.v(f"Using external query {version_used} for {candidate.fqcn} v{candidate.ver}.")
|
||||
else:
|
||||
self._display.v(f"Using external query for {candidate.fqcn} v{candidate.ver}")
|
||||
|
||||
collections_print[candidate.fqcn] = collection_print
|
||||
|
||||
|
||||
@@ -774,7 +774,7 @@ LOGGING = {
|
||||
'awx.conf.settings': {'handlers': ['null'], 'level': 'WARNING'},
|
||||
'awx.main': {'handlers': ['null']},
|
||||
'awx.main.commands.run_callback_receiver': {'handlers': ['callback_receiver'], 'level': 'INFO'}, # very noisey debug-level logs
|
||||
'awx.main.dispatch': {'handlers': ['dispatcher']},
|
||||
'awx.main.dispatch': {'handlers': ['task_system']},
|
||||
'awx.main.consumers': {'handlers': ['console', 'file', 'tower_warnings'], 'level': 'INFO'},
|
||||
'awx.main.rsyslog_configurer': {'handlers': ['rsyslog_configurer']},
|
||||
'awx.main.cache_clear': {'handlers': ['cache_clear']},
|
||||
|
||||
@@ -49,6 +49,11 @@ class Connection(object):
|
||||
self.session_cookie_name = historical_response.headers.get('X-API-Session-Cookie-Name')
|
||||
|
||||
self.session_id = self.session.cookies.get(self.session_cookie_name, None)
|
||||
if self.session_id is None and config.get("api_base_path") == "/api/controller/":
|
||||
# Use gateway session cookie name if controller session cookie name is not found
|
||||
self.session_cookie_name = "gateway_sessionid"
|
||||
self.session_id = self.session.cookies.get(self.session_cookie_name, None)
|
||||
|
||||
self.uses_session_cookie = True
|
||||
else:
|
||||
self.session.auth = (username, password)
|
||||
|
||||
@@ -31,7 +31,23 @@ class User(HasCreate, base.Base):
|
||||
payload = self.create_payload(username=username, password=password, **kwargs)
|
||||
self.password = payload.password
|
||||
|
||||
self.update_identity(Users(self.connection).post(payload))
|
||||
ctrl_users_api = Users(self.connection)
|
||||
# Check if API base path is set to controller, then use gateway endpoint
|
||||
if config.get("api_base_path") == "/api/controller/":
|
||||
# Use gateway endpoint for user creation
|
||||
gw_users_api = Users(self.connection)
|
||||
gw_users_api.endpoint = "/api/gateway/v1/users/"
|
||||
# Cleanup controller attributes
|
||||
payload["is_platform_auditor"] = payload.get("is_system_auditor")
|
||||
payload.pop("is_system_auditor")
|
||||
# Create gw user
|
||||
gw_user = gw_users_api.post(payload)
|
||||
user = ctrl_users_api.get(username=gw_user.username).results.pop()
|
||||
user.json["password"] = payload.password
|
||||
self.update_identity(user)
|
||||
else:
|
||||
# Use default endpoint
|
||||
self.update_identity(ctrl_users_api.post(payload))
|
||||
|
||||
if organization:
|
||||
organization.add_user(self)
|
||||
|
||||
@@ -251,7 +251,13 @@ class CLI(object):
|
||||
if self.resource != 'settings':
|
||||
for method in ('list', 'modify', 'create'):
|
||||
if method in parser.parser.choices:
|
||||
parser.build_query_arguments(method, 'GET' if method == 'list' else 'POST')
|
||||
if method == 'list':
|
||||
http_method = 'GET'
|
||||
elif method == 'modify' and 'PUT' in parser.options:
|
||||
http_method = 'PUT'
|
||||
else:
|
||||
http_method = 'POST'
|
||||
parser.build_query_arguments(method, http_method)
|
||||
if from_sphinx:
|
||||
parsed, extra = self.parser.parse_known_args(self.argv)
|
||||
else:
|
||||
|
||||
@@ -102,6 +102,18 @@ class ResourceOptionsParser(object):
|
||||
if '299' in warning and 'deprecated' in warning:
|
||||
self.deprecated = True
|
||||
self.allowed_options = options.headers.get('Allow', '').split(', ')
|
||||
# If the user can PUT on the detail endpoint but doesn't have
|
||||
# POST on the list endpoint, use the detail endpoint's
|
||||
# action schema so that 'modify' fields are populated.
|
||||
if 'POST' not in self.options and 'PUT' in self.allowed_options:
|
||||
try:
|
||||
detail_actions = options.json().get('actions', {})
|
||||
except Exception:
|
||||
detail_actions = {}
|
||||
if 'PUT' in detail_actions:
|
||||
self.options['PUT'] = detail_actions['PUT']
|
||||
elif 'GET' in detail_actions:
|
||||
self.options['PUT'] = detail_actions['GET']
|
||||
|
||||
def build_list_actions(self):
|
||||
action_map = {
|
||||
@@ -109,6 +121,10 @@ class ResourceOptionsParser(object):
|
||||
'POST': 'create',
|
||||
}
|
||||
for method, action in self.options.items():
|
||||
# Skip 'PUT', which may be added by get_allowed_options
|
||||
# and is handled separately by build_detail_actions
|
||||
if method not in action_map:
|
||||
continue
|
||||
method = action_map[method]
|
||||
parser = self.parser.add_parser(method, help='')
|
||||
if method == 'list':
|
||||
|
||||
@@ -11,6 +11,24 @@ class ResourceOptionsParser(ResourceOptionsParser):
|
||||
self.allowed_options = ['GET', 'POST', 'PUT', 'PATCH', 'DELETE']
|
||||
|
||||
|
||||
class NoPostResourceOptionsParser(ResourceOptionsParser):
|
||||
"""Simulates a user with object-level PUT but no list-level POST."""
|
||||
|
||||
detail_put_actions = {}
|
||||
|
||||
def get_allowed_options(self):
|
||||
self.allowed_options = ['GET', 'PUT', 'PATCH', 'DELETE']
|
||||
# Simulate the logic from the real get_allowed_options that
|
||||
# falls back to the detail endpoint's PUT schema when POST
|
||||
# is not available on the list endpoint.
|
||||
if 'POST' not in self.options and 'PUT' in self.allowed_options:
|
||||
if self.detail_put_actions:
|
||||
self.options['PUT'] = self.detail_put_actions
|
||||
|
||||
def handle_custom_actions(self):
|
||||
pass
|
||||
|
||||
|
||||
class OptionsPage(Page):
|
||||
def options(self):
|
||||
return self
|
||||
@@ -185,6 +203,30 @@ class TestOptions(unittest.TestCase):
|
||||
self.parser.choices[method].print_help(out)
|
||||
assert 'positional arguments:\n id' in out.getvalue()
|
||||
|
||||
def test_modify_without_list_post(self):
|
||||
"""User with object-level PUT but no list-level POST can still modify."""
|
||||
page = OptionsPage.from_json(
|
||||
{
|
||||
'actions': {
|
||||
'GET': {},
|
||||
}
|
||||
}
|
||||
)
|
||||
NoPostResourceOptionsParser.detail_put_actions = {
|
||||
'scm_branch': {'type': 'string', 'help_text': 'SCM branch'},
|
||||
'description': {'type': 'string', 'help_text': 'Description'},
|
||||
}
|
||||
options = NoPostResourceOptionsParser(None, page, 'projects', self.parser)
|
||||
|
||||
assert 'modify' in self.parser.choices
|
||||
assert 'create' not in self.parser.choices
|
||||
|
||||
options.build_query_arguments('modify', 'PUT')
|
||||
out = StringIO()
|
||||
self.parser.choices['modify'].print_help(out)
|
||||
assert '--scm_branch TEXT' in out.getvalue()
|
||||
assert '--description TEXT' in out.getvalue()
|
||||
|
||||
|
||||
class TestSettingsOptions(unittest.TestCase):
|
||||
def setUp(self):
|
||||
|
||||
195
docs/indirect_node_counting/external_query_files.md
Normal file
195
docs/indirect_node_counting/external_query_files.md
Normal file
@@ -0,0 +1,195 @@
|
||||
# External Query Files for Indirect Node Counting
|
||||
|
||||
This document describes how to create query files for the Indirect Node Counting feature. Query files define how to extract managed node information from Ansible module execution results.
|
||||
|
||||
## Overview
|
||||
|
||||
When Ansible modules interact with external systems (VMware, cloud providers, network devices, etc.), they may manage nodes that aren't in the Ansible inventory. Query files tell the Controller how to extract information about these "indirect" managed nodes from module execution data.
|
||||
|
||||
## Query File Types
|
||||
|
||||
There are two types of query files:
|
||||
|
||||
1. **Embedded Query Files**: Shipped within a collection at `extensions/audit/event_query.yml`
|
||||
2. **External Query Files**: Shipped in the `redhat.indirect_accounting` collection at `extensions/audit/external_queries/<namespace>.<name>.<version>.yml`
|
||||
|
||||
Embedded queries take precedence over external queries. External queries support version fallback within the same major version.
|
||||
|
||||
## File Format
|
||||
|
||||
Query files are YAML documents that map fully-qualified module names to jq expressions.
|
||||
|
||||
### Basic Structure
|
||||
|
||||
```yaml
|
||||
---
|
||||
<namespace>.<collection>.<module_name>:
|
||||
query: >-
|
||||
<jq_expression>
|
||||
```
|
||||
|
||||
### Example
|
||||
|
||||
```yaml
|
||||
---
|
||||
community.vmware.vmware_guest:
|
||||
query: >-
|
||||
{name: .instance.hw_name, canonical_facts: {host_name: .instance.hw_name, uuid: .instance.hw_product_uuid}, facts: {guest_id: .instance.hw_guest_id}}
|
||||
```
|
||||
|
||||
## jq Expression Requirements
|
||||
|
||||
The jq expression processes the module's result data (`event_data.res`) and must output a JSON object with the following fields:
|
||||
|
||||
### Required Fields
|
||||
|
||||
| Field | Type | Description |
|
||||
|-------|------|-------------|
|
||||
| `name` | string | Display name of the indirect managed node |
|
||||
| `canonical_facts` | object | Facts used for node deduplication across jobs |
|
||||
|
||||
### Optional Fields
|
||||
|
||||
| Field | Type | Description |
|
||||
|-------|------|-------------|
|
||||
| `facts` | object | Additional information about the managed node |
|
||||
|
||||
### canonical_facts
|
||||
|
||||
The `canonical_facts` object should contain fields that uniquely identify the managed node. Common examples:
|
||||
|
||||
- `host_name`: The hostname of the managed node
|
||||
- `uuid`: A unique identifier (VM UUID, device serial number, etc.)
|
||||
- `ip_address`: IP address if it uniquely identifies the node
|
||||
|
||||
These facts are used to deduplicate nodes across multiple job runs. Choose facts that remain stable across the node's lifecycle.
|
||||
|
||||
### facts
|
||||
|
||||
The `facts` object contains additional metadata that doesn't affect deduplication:
|
||||
|
||||
- `device_type`: Type of device (e.g., "virtual_machine", "network_switch")
|
||||
- `guest_id`: Guest OS identifier
|
||||
- `platform`: Platform information
|
||||
|
||||
## jq Expression Input
|
||||
|
||||
The jq expression receives the module's result data as input. This is the `res` field from Ansible's job event data, which typically contains:
|
||||
|
||||
- The module's return values
|
||||
- Any registered variables
|
||||
- Status information
|
||||
|
||||
To understand what data is available, examine the module's documentation or run a test playbook and inspect the job events.
|
||||
|
||||
## Module Matching
|
||||
|
||||
### Exact Match
|
||||
|
||||
Queries are matched by fully-qualified module name:
|
||||
|
||||
```yaml
|
||||
community.vmware.vmware_guest:
|
||||
query: >-
|
||||
...
|
||||
```
|
||||
|
||||
This matches only `community.vmware.vmware_guest` module invocations.
|
||||
|
||||
### Wildcard Match
|
||||
|
||||
You can use wildcards to match all modules in a collection:
|
||||
|
||||
```yaml
|
||||
community.vmware.*:
|
||||
query: >-
|
||||
...
|
||||
```
|
||||
|
||||
Exact matches take precedence over wildcard matches.
|
||||
|
||||
## External Query File Naming
|
||||
|
||||
External query files must follow this naming convention:
|
||||
|
||||
```
|
||||
<namespace>.<collection_name>.<version>.yml
|
||||
```
|
||||
|
||||
Examples:
|
||||
- `community.vmware.4.5.0.yml`
|
||||
- `cisco.ios.8.0.0.yml`
|
||||
- `amazon.aws.7.2.1.yml`
|
||||
|
||||
## Version Fallback
|
||||
|
||||
When no exact version match exists for an external query, the system falls back to the nearest compatible version:
|
||||
|
||||
1. Only versions with the **same major version** are considered
|
||||
2. The **highest version less than or equal to** the installed version is selected
|
||||
3. Major version boundaries are never crossed
|
||||
|
||||
### Examples
|
||||
|
||||
| Installed Version | Available Queries | Query Used | Reason |
|
||||
|-------------------|-------------------|------------|--------|
|
||||
| 4.5.0 | 4.0.0, 4.1.0, 5.0.0 | 4.1.0 | Highest v4.x <= 4.5.0 |
|
||||
| 4.0.5 | 4.0.0, 4.1.0, 5.0.0 | 4.0.0 | 4.1.0 > 4.0.5, so 4.0.0 |
|
||||
| 5.2.0 | 4.0.0, 4.1.0, 5.0.0 | 5.0.0 | Highest v5.x <= 5.2.0 |
|
||||
| 3.8.0 | 4.0.0, 4.1.0, 5.0.0 | None | No v3.x queries available |
|
||||
| 6.0.0 | 4.0.0, 4.1.0, 5.0.0 | None | No v6.x queries available |
|
||||
|
||||
## Complete Example
|
||||
|
||||
Here's a complete external query file for `community.vmware` version 4.5.0:
|
||||
|
||||
**File**: `extensions/audit/external_queries/community.vmware.4.5.0.yml`
|
||||
|
||||
```yaml
|
||||
---
|
||||
# Query for vmware_guest module - extracts VM information
|
||||
community.vmware.vmware_guest:
|
||||
query: >-
|
||||
{name: .instance.hw_name, canonical_facts: {host_name: .instance.hw_name, uuid: .instance.hw_product_uuid}, facts: {guest_id: .instance.hw_guest_id, num_cpus: .instance.hw_processor_count}}
|
||||
|
||||
# Query for vmware_guest_info module
|
||||
community.vmware.vmware_guest_info:
|
||||
query: >-
|
||||
{name: .instance.hw_name, canonical_facts: {host_name: .instance.hw_name, uuid: .instance.hw_product_uuid}, facts: {power_state: .instance.hw_power_status}}
|
||||
```
|
||||
|
||||
## Testing Query Files
|
||||
|
||||
To test a query file:
|
||||
|
||||
1. Run a playbook that uses the target module
|
||||
2. Examine the job events to see the module's result data
|
||||
3. Test your jq expression against the result data using the `jq` command-line tool
|
||||
4. Verify the output contains valid `name` and `canonical_facts` fields
|
||||
|
||||
Example testing with jq:
|
||||
|
||||
```bash
|
||||
# Sample module result data (from job event)
|
||||
echo '{"instance": {"hw_name": "test-vm", "hw_product_uuid": "abc-123"}}' | \
|
||||
jq '{name: .instance.hw_name, canonical_facts: {host_name: .instance.hw_name, uuid: .instance.hw_product_uuid}}'
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Query Not Being Applied
|
||||
|
||||
1. Verify the file is in the correct location
|
||||
2. Check the file naming matches the collection namespace, name, and version exactly
|
||||
3. Ensure the module name in the query matches the fully-qualified module name
|
||||
|
||||
### No Indirect Nodes Counted
|
||||
|
||||
1. Verify the jq expression produces valid output with `canonical_facts`
|
||||
2. Check the Controller logs for jq parsing errors
|
||||
3. Ensure the module's result data contains the expected fields
|
||||
|
||||
### Version Fallback Not Working
|
||||
|
||||
1. Verify the fallback version has the same major version as the installed collection
|
||||
2. Check that the fallback version is less than or equal to the installed version
|
||||
15
pytest.ini
15
pytest.ini
@@ -35,10 +35,6 @@ filterwarnings =
|
||||
# FIXME: and is no longer imported at runtime.
|
||||
once:CoreAPI compatibility is deprecated and will be removed in DRF 3.17:rest_framework.RemovedInDRF317Warning:rest_framework.schemas.coreapi
|
||||
|
||||
# FIXME: Delete this entry once naive dates aren't passed to DB lookup
|
||||
# FIXME: methods. Not sure where, might be in awx's views or in DAB.
|
||||
once:DateTimeField User.date_joined received a naive datetime .2020-01-01 00.00.00. while time zone support is active.:RuntimeWarning:django.db.models.fields
|
||||
|
||||
# FIXME: Delete this entry once the deprecation is acted upon.
|
||||
# Note: RemovedInDjango51Warning may not exist in newer Django versions
|
||||
ignore:'index_together' is deprecated. Use 'Meta.indexes' in 'main.\w+' instead.
|
||||
@@ -47,12 +43,6 @@ filterwarnings =
|
||||
# Note: RemovedInDjango50Warning may not exist in newer Django versions
|
||||
ignore:Using QuerySet.iterator.. after prefetch_related.. without specifying chunk_size is deprecated.
|
||||
|
||||
# FIXME: Delete this entry once the **broken** always-true assertions in the
|
||||
# FIXME: following tests are fixed:
|
||||
# * `awx/main/tests/unit/utils/test_common.py::TestHostnameRegexValidator::test_good_call`
|
||||
# * `awx/main/tests/unit/utils/test_common.py::TestHostnameRegexValidator::test_bad_call_with_inverse`
|
||||
once:assertion is always true, perhaps remove parentheses\?:pytest.PytestAssertRewriteWarning:
|
||||
|
||||
# FIXME: Figure this out, fix and then delete the entry. It's not entirely
|
||||
# FIXME: clear what emits it and where.
|
||||
once:Pagination may yield inconsistent results with an unordered object_list. .class 'awx.main.models.workflow.WorkflowJobTemplateNode'. QuerySet.:django.core.paginator.UnorderedObjectListWarning:django.core.paginator
|
||||
@@ -60,11 +50,6 @@ filterwarnings =
|
||||
# FIXME: Figure this out, fix and then delete the entry.
|
||||
once::django.core.paginator.UnorderedObjectListWarning:rest_framework.pagination
|
||||
|
||||
# FIXME: Use `codecs.open()` via a context manager
|
||||
# FIXME: in `awx/main/utils/ansible.py` to close hanging file descriptors
|
||||
# FIXME: and then delete the entry.
|
||||
once:unclosed file <_io.BufferedReader name='[^']+'>:ResourceWarning:awx.main.utils.ansible
|
||||
|
||||
# FIXME: Use `open()` via a context manager
|
||||
# FIXME: in `awx/main/tests/unit/test_tasks.py` to close hanging file
|
||||
# FIXME: descriptors and then delete the entry.
|
||||
|
||||
@@ -66,7 +66,7 @@ twisted[tls]>=24.7.0 # CVE-2024-41810
|
||||
urllib3>=2.6.3 # CVE-2024-37891
|
||||
uWSGI>=2.0.28
|
||||
uwsgitop
|
||||
wheel>=0.38.1 # CVE-2022-40898
|
||||
wheel>=0.46.2 # CVE-2026-24049
|
||||
pip==25.3 # see UPGRADE BLOCKERs
|
||||
setuptools==80.9.0 # see UPGRADE BLOCKERs
|
||||
setuptools-scm[toml]
|
||||
|
||||
@@ -116,7 +116,7 @@ cython==3.1.3
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
daphne==4.2.1
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
dispatcherd[pg-notify]==2026.01.27
|
||||
dispatcherd[pg-notify]==2026.02.26
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
distro==1.9.0
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
@@ -336,6 +336,7 @@ packaging==25.0
|
||||
# django-guid
|
||||
# opentelemetry-instrumentation
|
||||
# setuptools-scm
|
||||
# wheel
|
||||
pbr==7.0.1
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
pexpect==4.9.0
|
||||
@@ -534,7 +535,7 @@ uwsgitop==0.12
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
websocket-client==1.8.0
|
||||
# via kubernetes
|
||||
wheel==0.45.1
|
||||
wheel==0.46.3
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
wrapt==1.17.3
|
||||
# via opentelemetry-instrumentation
|
||||
|
||||
@@ -2,6 +2,7 @@ build
|
||||
django-debug-toolbar>=6.0 # Django 5.2 compatibility
|
||||
django-test-migrations
|
||||
drf-spectacular>=0.27.0 # Modern OpenAPI 3.0 schema generator
|
||||
openapi-spec-validator # OpenAPI 3.0 schema validation
|
||||
# pprofile - re-add once https://github.com/vpelletier/pprofile/issues/41 is addressed
|
||||
ipython>=7.31.1 # https://github.com/ansible/awx/security/dependabot/30
|
||||
unittest2
|
||||
@@ -22,7 +23,7 @@ gprof2dot
|
||||
atomicwrites
|
||||
flake8
|
||||
yamllint
|
||||
pip>=21.3,<=24.0 # PEP 660 – Editable installs for pyproject.toml based builds (wheel based)
|
||||
pip>=25.3 # PEP 660 – Editable installs for pyproject.toml based builds (wheel based)
|
||||
|
||||
# python debuggers
|
||||
debugpy
|
||||
|
||||
Reference in New Issue
Block a user