Compare commits

..

1 Commits

Author SHA1 Message Date
Peter Braun
293abc8b35 fix: make indirect host counting live test more reliable 2025-03-04 23:00:50 +01:00
100 changed files with 708 additions and 2105 deletions

View File

@@ -19,8 +19,6 @@ exclude_also =
branch = True branch = True
omit = omit =
awx/main/migrations/* awx/main/migrations/*
awx/settings/defaults.py
awx/settings/*_defaults.py
source = source =
. .
source_pkgs = source_pkgs =

View File

@@ -11,7 +11,9 @@ inputs:
runs: runs:
using: composite using: composite
steps: steps:
- uses: ./.github/actions/setup-python - name: Get python version from Makefile
shell: bash
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
- name: Set lower case owner name - name: Set lower case owner name
shell: bash shell: bash
@@ -24,9 +26,26 @@ runs:
run: | run: |
echo "${{ inputs.github-token }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin echo "${{ inputs.github-token }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
- uses: ./.github/actions/setup-ssh-agent - name: Generate placeholder SSH private key if SSH auth for private repos is not needed
id: generate_key
shell: bash
run: |
if [[ -z "${{ inputs.private-github-key }}" ]]; then
ssh-keygen -t ed25519 -C "github-actions" -N "" -f ~/.ssh/id_ed25519
echo "SSH_PRIVATE_KEY<<EOF" >> $GITHUB_OUTPUT
cat ~/.ssh/id_ed25519 >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
else
echo "SSH_PRIVATE_KEY<<EOF" >> $GITHUB_OUTPUT
echo "${{ inputs.private-github-key }}" >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
fi
- name: Add private GitHub key to SSH agent
uses: webfactory/ssh-agent@v0.9.0
with: with:
ssh-private-key: ${{ inputs.private-github-key }} ssh-private-key: ${{ steps.generate_key.outputs.SSH_PRIVATE_KEY }}
- name: Pre-pull latest devel image to warm cache - name: Pre-pull latest devel image to warm cache
shell: bash shell: bash

View File

@@ -1,27 +0,0 @@
name: 'Setup Python from Makefile'
description: 'Extract and set up Python version from Makefile'
inputs:
python-version:
description: 'Override Python version (optional)'
required: false
default: ''
working-directory:
description: 'Directory containing the Makefile'
required: false
default: '.'
runs:
using: composite
steps:
- name: Get python version from Makefile
shell: bash
run: |
if [ -n "${{ inputs.python-version }}" ]; then
echo "py_version=${{ inputs.python-version }}" >> $GITHUB_ENV
else
cd ${{ inputs.working-directory }}
echo "py_version=`make PYTHON_VERSION`" >> $GITHUB_ENV
fi
- name: Install python
uses: actions/setup-python@v5
with:
python-version: ${{ env.py_version }}

View File

@@ -1,29 +0,0 @@
name: 'Setup SSH for GitHub'
description: 'Configure SSH for private repository access'
inputs:
ssh-private-key:
description: 'SSH private key for repository access'
required: false
default: ''
runs:
using: composite
steps:
- name: Generate placeholder SSH private key if SSH auth for private repos is not needed
id: generate_key
shell: bash
run: |
if [[ -z "${{ inputs.ssh-private-key }}" ]]; then
ssh-keygen -t ed25519 -C "github-actions" -N "" -f ~/.ssh/id_ed25519
echo "SSH_PRIVATE_KEY<<EOF" >> $GITHUB_OUTPUT
cat ~/.ssh/id_ed25519 >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
else
echo "SSH_PRIVATE_KEY<<EOF" >> $GITHUB_OUTPUT
echo "${{ inputs.ssh-private-key }}" >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
fi
- name: Add private GitHub key to SSH agent
uses: webfactory/ssh-agent@v0.9.0
with:
ssh-private-key: ${{ steps.generate_key.outputs.SSH_PRIVATE_KEY }}

View File

@@ -130,7 +130,7 @@ jobs:
with: with:
show-progress: false show-progress: false
- uses: ./.github/actions/setup-python - uses: actions/setup-python@v5
with: with:
python-version: '3.x' python-version: '3.x'
@@ -161,10 +161,6 @@ jobs:
show-progress: false show-progress: false
path: awx path: awx
- uses: ./awx/.github/actions/setup-ssh-agent
with:
ssh-private-key: ${{ secrets.PRIVATE_GITHUB_KEY }}
- name: Checkout awx-operator - name: Checkout awx-operator
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
@@ -172,14 +168,39 @@ jobs:
repository: ansible/awx-operator repository: ansible/awx-operator
path: awx-operator path: awx-operator
- uses: ./awx/.github/actions/setup-python - name: Get python version from Makefile
working-directory: awx
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
- name: Install python ${{ env.py_version }}
uses: actions/setup-python@v4
with: with:
working-directory: awx python-version: ${{ env.py_version }}
- name: Install playbook dependencies - name: Install playbook dependencies
run: | run: |
python3 -m pip install docker python3 -m pip install docker
- name: Generate placeholder SSH private key if SSH auth for private repos is not needed
id: generate_key
shell: bash
run: |
if [[ -z "${{ secrets.PRIVATE_GITHUB_KEY }}" ]]; then
ssh-keygen -t ed25519 -C "github-actions" -N "" -f ~/.ssh/id_ed25519
echo "SSH_PRIVATE_KEY<<EOF" >> $GITHUB_OUTPUT
cat ~/.ssh/id_ed25519 >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
else
echo "SSH_PRIVATE_KEY<<EOF" >> $GITHUB_OUTPUT
echo "${{ secrets.PRIVATE_GITHUB_KEY }}" >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
fi
- name: Add private GitHub key to SSH agent
uses: webfactory/ssh-agent@v0.9.0
with:
ssh-private-key: ${{ steps.generate_key.outputs.SSH_PRIVATE_KEY }}
- name: Build AWX image - name: Build AWX image
working-directory: awx working-directory: awx
run: | run: |
@@ -278,7 +299,7 @@ jobs:
with: with:
show-progress: false show-progress: false
- uses: ./.github/actions/setup-python - uses: actions/setup-python@v5
with: with:
python-version: '3.x' python-version: '3.x'
@@ -354,7 +375,7 @@ jobs:
with: with:
show-progress: false show-progress: false
- uses: ./.github/actions/setup-python - uses: actions/setup-python@v5
with: with:
python-version: '3.x' python-version: '3.x'

View File

@@ -49,10 +49,14 @@ jobs:
run: | run: |
echo "DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER,,}" >> $GITHUB_ENV echo "DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER,,}" >> $GITHUB_ENV
echo "COMPOSE_TAG=${GITHUB_REF##*/}" >> $GITHUB_ENV echo "COMPOSE_TAG=${GITHUB_REF##*/}" >> $GITHUB_ENV
echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
env: env:
OWNER: '${{ github.repository_owner }}' OWNER: '${{ github.repository_owner }}'
- uses: ./.github/actions/setup-python - name: Install python ${{ env.py_version }}
uses: actions/setup-python@v4
with:
python-version: ${{ env.py_version }}
- name: Log in to registry - name: Log in to registry
run: | run: |
@@ -69,9 +73,25 @@ jobs:
make ui make ui
if: matrix.build-targets.image-name == 'awx' if: matrix.build-targets.image-name == 'awx'
- uses: ./.github/actions/setup-ssh-agent - name: Generate placeholder SSH private key if SSH auth for private repos is not needed
id: generate_key
shell: bash
run: |
if [[ -z "${{ secrets.PRIVATE_GITHUB_KEY }}" ]]; then
ssh-keygen -t ed25519 -C "github-actions" -N "" -f ~/.ssh/id_ed25519
echo "SSH_PRIVATE_KEY<<EOF" >> $GITHUB_OUTPUT
cat ~/.ssh/id_ed25519 >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
else
echo "SSH_PRIVATE_KEY<<EOF" >> $GITHUB_OUTPUT
echo "${{ secrets.PRIVATE_GITHUB_KEY }}" >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
fi
- name: Add private GitHub key to SSH agent
uses: webfactory/ssh-agent@v0.9.0
with: with:
ssh-private-key: ${{ secrets.PRIVATE_GITHUB_KEY }} ssh-private-key: ${{ steps.generate_key.outputs.SSH_PRIVATE_KEY }}
- name: Build and push AWX devel images - name: Build and push AWX devel images
run: | run: |

View File

@@ -12,7 +12,7 @@ jobs:
with: with:
show-progress: false show-progress: false
- uses: ./.github/actions/setup-python - uses: actions/setup-python@v5
with: with:
python-version: '3.x' python-version: '3.x'

View File

@@ -34,11 +34,9 @@ jobs:
with: with:
show-progress: false show-progress: false
- uses: ./.github/actions/setup-python - uses: actions/setup-python@v4
- name: Install python requests - name: Install python requests
run: pip install requests run: pip install requests
- name: Check if user is a member of Ansible org - name: Check if user is a member of Ansible org
uses: jannekem/run-python-script-action@v1 uses: jannekem/run-python-script-action@v1
id: check_user id: check_user

View File

@@ -33,7 +33,7 @@ jobs:
with: with:
show-progress: false show-progress: false
- uses: ./.github/actions/setup-python - uses: actions/setup-python@v5
with: with:
python-version: '3.x' python-version: '3.x'

View File

@@ -36,7 +36,13 @@ jobs:
with: with:
show-progress: false show-progress: false
- uses: ./.github/actions/setup-python - name: Get python version from Makefile
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
- name: Install python ${{ env.py_version }}
uses: actions/setup-python@v4
with:
python-version: ${{ env.py_version }}
- name: Install dependencies - name: Install dependencies
run: | run: |

View File

@@ -64,9 +64,14 @@ jobs:
repository: ansible/awx-logos repository: ansible/awx-logos
path: awx-logos path: awx-logos
- uses: ./awx/.github/actions/setup-python - name: Get python version from Makefile
working-directory: awx
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
- name: Install python ${{ env.py_version }}
uses: actions/setup-python@v4
with: with:
working-directory: awx python-version: ${{ env.py_version }}
- name: Install playbook dependencies - name: Install playbook dependencies
run: | run: |

View File

@@ -5,7 +5,6 @@ env:
LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting LC_ALL: "C.UTF-8" # prevent ERROR: Ansible could not initialize the preferred locale: unsupported locale setting
on: on:
workflow_dispatch:
push: push:
branches: branches:
- devel - devel
@@ -23,16 +22,18 @@ jobs:
with: with:
show-progress: false show-progress: false
- uses: ./.github/actions/setup-python - name: Get python version from Makefile
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
- name: Install python ${{ env.py_version }}
uses: actions/setup-python@v4
with:
python-version: ${{ env.py_version }}
- name: Log in to registry - name: Log in to registry
run: | run: |
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
- uses: ./.github/actions/setup-ssh-agent
with:
ssh-private-key: ${{ secrets.PRIVATE_GITHUB_KEY }}
- name: Pre-pull image to warm build cache - name: Pre-pull image to warm build cache
run: | run: |
docker pull -q ghcr.io/${{ github.repository_owner }}/awx_devel:${GITHUB_REF##*/} || : docker pull -q ghcr.io/${{ github.repository_owner }}/awx_devel:${GITHUB_REF##*/} || :
@@ -55,3 +56,5 @@ jobs:
ansible localhost -c local, -m command -a "{{ ansible_python_interpreter + ' -m pip install boto3'}}" ansible localhost -c local, -m command -a "{{ ansible_python_interpreter + ' -m pip install boto3'}}"
ansible localhost -c local -m aws_s3 \ ansible localhost -c local -m aws_s3 \
-a "src=${{ github.workspace }}/schema.json bucket=awx-public-ci-files object=${GITHUB_REF##*/}/schema.json mode=put permission=public-read" -a "src=${{ github.workspace }}/schema.json bucket=awx-public-ci-files object=${GITHUB_REF##*/}/schema.json mode=put permission=public-read"

2
.gitignore vendored
View File

@@ -150,8 +150,6 @@ use_dev_supervisor.txt
awx/ui/src awx/ui/src
awx/ui/build awx/ui/build
awx/ui/.ui-built
awx/ui_next
# Docs build stuff # Docs build stuff
docs/docsite/build/ docs/docsite/build/

View File

@@ -3,17 +3,6 @@
<img src="https://raw.githubusercontent.com/ansible/awx-logos/master/awx/ui/client/assets/logo-login.svg?sanitize=true" width=200 alt="AWX" /> <img src="https://raw.githubusercontent.com/ansible/awx-logos/master/awx/ui/client/assets/logo-login.svg?sanitize=true" width=200 alt="AWX" />
> [!CAUTION]
> The last release of this repository was released on Jul 2, 2024.
> **Releases of this project are now paused during a large scale refactoring.**
> For more information, follow [the Forum](https://forum.ansible.com/) and - more specifically - see the various communications on the matter:
>
> * [Blog: Upcoming Changes to the AWX Project](https://www.ansible.com/blog/upcoming-changes-to-the-awx-project/)
> * [Streamlining AWX Releases](https://forum.ansible.com/t/streamlining-awx-releases/6894) Primary update
> * [Refactoring AWX into a Pluggable, Service-Oriented Architecture](https://forum.ansible.com/t/refactoring-awx-into-a-pluggable-service-oriented-architecture/7404)
> * [Upcoming changes to AWX Operator installation methods](https://forum.ansible.com/t/upcoming-changes-to-awx-operator-installation-methods/7598)
> * [AWX UI and credential types transitioning to the new pluggable architecture](https://forum.ansible.com/t/awx-ui-and-credential-types-transitioning-to-the-new-pluggable-architecture/8027)
AWX provides a web-based user interface, REST API, and task engine built on top of [Ansible](https://github.com/ansible/ansible). It is one of the upstream projects for [Red Hat Ansible Automation Platform](https://www.ansible.com/products/automation-platform). AWX provides a web-based user interface, REST API, and task engine built on top of [Ansible](https://github.com/ansible/ansible). It is one of the upstream projects for [Red Hat Ansible Automation Platform](https://www.ansible.com/products/automation-platform).
To install AWX, please view the [Install guide](./INSTALL.md). To install AWX, please view the [Install guide](./INSTALL.md).

View File

@@ -62,8 +62,7 @@ else:
def prepare_env(): def prepare_env():
# Update the default settings environment variable based on current mode. # Update the default settings environment variable based on current mode.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'awx.settings') os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'awx.settings.%s' % MODE)
os.environ.setdefault('AWX_MODE', MODE)
# Hide DeprecationWarnings when running in production. Need to first load # Hide DeprecationWarnings when running in production. Need to first load
# settings to apply our filter after Django's own warnings filter. # settings to apply our filter after Django's own warnings filter.
from django.conf import settings from django.conf import settings

View File

@@ -161,7 +161,7 @@ def get_view_description(view, html=False):
def get_default_schema(): def get_default_schema():
if settings.DYNACONF.is_development_mode: if settings.SETTINGS_MODULE == 'awx.settings.development':
from awx.api.swagger import schema_view from awx.api.swagger import schema_view
return schema_view return schema_view

View File

@@ -6,7 +6,6 @@ import copy
import json import json
import logging import logging
import re import re
import yaml
from collections import Counter, OrderedDict from collections import Counter, OrderedDict
from datetime import timedelta from datetime import timedelta
from uuid import uuid4 from uuid import uuid4
@@ -627,41 +626,15 @@ class BaseSerializer(serializers.ModelSerializer, metaclass=BaseSerializerMetacl
return exclusions return exclusions
def validate(self, attrs): def validate(self, attrs):
"""
Apply serializer validation. Called by DRF.
Can be extended by subclasses. Or consider overwriting
`validate_with_obj` in subclasses, which provides access to the model
object and exception handling for field validation.
:param dict attrs: The names and values of the model form fields.
:raise rest_framework.exceptions.ValidationError: If the validation
fails.
The exception must contain a dict with the names of the form fields
which failed validation as keys, and a list of error messages as
values. This ensures that the error messages are rendered near the
relevant fields.
:return: The names and values from the model form fields, possibly
modified by the validations.
:rtype: dict
"""
attrs = super(BaseSerializer, self).validate(attrs) attrs = super(BaseSerializer, self).validate(attrs)
# Create/update a model instance and run its full_clean() method to
# do any validation implemented on the model class.
exclusions = self.get_validation_exclusions(self.instance)
# Create a new model instance or take the existing one if it exists,
# and update its attributes with the respective field values from
# attrs.
obj = self.instance or self.Meta.model()
for k, v in attrs.items():
if k not in exclusions and k != 'canonical_address_port':
setattr(obj, k, v)
try: try:
# Run serializer validators which need the model object for # Create/update a model instance and run its full_clean() method to
# validation. # do any validation implemented on the model class.
self.validate_with_obj(attrs, obj) exclusions = self.get_validation_exclusions(self.instance)
# Apply any validations implemented on the model class. obj = self.instance or self.Meta.model()
for k, v in attrs.items():
if k not in exclusions and k != 'canonical_address_port':
setattr(obj, k, v)
obj.full_clean(exclude=exclusions) obj.full_clean(exclude=exclusions)
# full_clean may modify values on the instance; copy those changes # full_clean may modify values on the instance; copy those changes
# back to attrs so they are saved. # back to attrs so they are saved.
@@ -690,32 +663,6 @@ class BaseSerializer(serializers.ModelSerializer, metaclass=BaseSerializerMetacl
raise ValidationError(d) raise ValidationError(d)
return attrs return attrs
def validate_with_obj(self, attrs, obj):
"""
Overwrite this if you need the model instance for your validation.
:param dict attrs: The names and values of the model form fields.
:param obj: An instance of the class's meta model.
If the serializer runs on a newly created object, obj contains only
the attrs from its serializer. If the serializer runs because an
object has been edited, obj is the existing model instance with all
attributes and values available.
:raise django.core.exceptionsValidationError: Raise this if your
validation fails.
To make the error appear at the respective form field, instantiate
the Exception with a dict containing the field name as key and the
error message as value.
Example: ``ValidationError({"password": "Not good enough!"})``
If the exception contains just a string, the message cannot be
related to a field and is rendered at the top of the model form.
:return: None
"""
return
def reverse(self, *args, **kwargs): def reverse(self, *args, **kwargs):
kwargs['request'] = self.context.get('request') kwargs['request'] = self.context.get('request')
return reverse(*args, **kwargs) return reverse(*args, **kwargs)
@@ -1037,6 +984,7 @@ class UserSerializer(BaseSerializer):
return ret return ret
def validate_password(self, value): def validate_password(self, value):
django_validate_password(value)
if not self.instance and value in (None, ''): if not self.instance and value in (None, ''):
raise serializers.ValidationError(_('Password required for new User.')) raise serializers.ValidationError(_('Password required for new User.'))
@@ -1059,50 +1007,6 @@ class UserSerializer(BaseSerializer):
return value return value
def validate_with_obj(self, attrs, obj):
"""
Validate the password with the Django password validators
To enable the Django password validators, configure
`settings.AUTH_PASSWORD_VALIDATORS` as described in the [Django
docs](https://docs.djangoproject.com/en/5.1/topics/auth/passwords/#enabling-password-validation)
:param dict attrs: The User form field names and their values as a dict.
Example::
{
'username': 'TestUsername', 'first_name': 'FirstName',
'last_name': 'LastName', 'email': 'First.Last@my.org',
'is_superuser': False, 'is_system_auditor': False,
'password': 'secret123'
}
:param obj: The User model instance.
:raises django.core.exceptions.ValidationError: Raise this if at least
one Django password validator fails.
The exception contains a dict ``{"password": <error-message>``}
which indicates that the password field has failed validation, and
the reason for failure.
:return: None.
"""
# We must do this here instead of in `validate_password` bacause some
# django password validators need access to other model instance fields,
# e.g. ``username`` for the ``UserAttributeSimilarityValidator``.
password = attrs.get("password")
# Skip validation if no password has been entered. This may happen when
# an existing User is edited.
if password and password != '$encrypted$':
# Apply validators from settings.AUTH_PASSWORD_VALIDATORS. This may
# raise ValidationError.
#
# If the validation fails, re-raise the exception with adjusted
# content to make the error appear near the password field.
try:
django_validate_password(password, user=obj)
except DjangoValidationError as exc:
raise DjangoValidationError({"password": exc.messages})
def _update_password(self, obj, new_password): def _update_password(self, obj, new_password):
if new_password and new_password != '$encrypted$': if new_password and new_password != '$encrypted$':
obj.set_password(new_password) obj.set_password(new_password)
@@ -3448,17 +3352,11 @@ class JobRelaunchSerializer(BaseSerializer):
choices=[('all', _('No change to job limit')), ('failed', _('All failed and unreachable hosts'))], choices=[('all', _('No change to job limit')), ('failed', _('All failed and unreachable hosts'))],
write_only=True, write_only=True,
) )
job_type = serializers.ChoiceField(
required=False,
allow_null=True,
choices=NEW_JOB_TYPE_CHOICES,
write_only=True,
)
credential_passwords = VerbatimField(required=True, write_only=True) credential_passwords = VerbatimField(required=True, write_only=True)
class Meta: class Meta:
model = Job model = Job
fields = ('passwords_needed_to_start', 'retry_counts', 'hosts', 'job_type', 'credential_passwords') fields = ('passwords_needed_to_start', 'retry_counts', 'hosts', 'credential_passwords')
def validate_credential_passwords(self, value): def validate_credential_passwords(self, value):
pnts = self.instance.passwords_needed_to_start pnts = self.instance.passwords_needed_to_start
@@ -5917,34 +5815,6 @@ class InstanceGroupSerializer(BaseSerializer):
raise serializers.ValidationError(_('Only Kubernetes credentials can be associated with an Instance Group')) raise serializers.ValidationError(_('Only Kubernetes credentials can be associated with an Instance Group'))
return value return value
def validate_pod_spec_override(self, value):
if not value:
return value
# value should be empty for non-container groups
if self.instance and not self.instance.is_container_group:
raise serializers.ValidationError(_('pod_spec_override is only valid for container groups'))
pod_spec_override_json = None
# defect if the value is yaml or json if yaml convert to json
try:
# convert yaml to json
pod_spec_override_json = yaml.safe_load(value)
except yaml.YAMLError:
try:
pod_spec_override_json = json.loads(value)
except json.JSONDecodeError:
raise serializers.ValidationError(_('pod_spec_override must be valid yaml or json'))
# validate the
spec = pod_spec_override_json.get('spec', {})
automount_service_account_token = spec.get('automountServiceAccountToken', False)
if automount_service_account_token:
raise serializers.ValidationError(_('automountServiceAccountToken is not allowed for security reasons'))
return value
def validate(self, attrs): def validate(self, attrs):
attrs = super(InstanceGroupSerializer, self).validate(attrs) attrs = super(InstanceGroupSerializer, self).validate(attrs)

View File

@@ -3435,7 +3435,6 @@ class JobRelaunch(RetrieveAPIView):
copy_kwargs = {} copy_kwargs = {}
retry_hosts = serializer.validated_data.get('hosts', None) retry_hosts = serializer.validated_data.get('hosts', None)
job_type = serializer.validated_data.get('job_type', None)
if retry_hosts and retry_hosts != 'all': if retry_hosts and retry_hosts != 'all':
if obj.status in ACTIVE_STATES: if obj.status in ACTIVE_STATES:
return Response( return Response(
@@ -3456,8 +3455,6 @@ class JobRelaunch(RetrieveAPIView):
) )
copy_kwargs['limit'] = ','.join(retry_host_list) copy_kwargs['limit'] = ','.join(retry_host_list)
if job_type:
copy_kwargs['job_type'] = job_type
new_job = obj.copy_unified_job(**copy_kwargs) new_job = obj.copy_unified_job(**copy_kwargs)
result = new_job.signal_start(**serializer.validated_data['credential_passwords']) result = new_job.signal_start(**serializer.validated_data['credential_passwords'])
if not result: if not result:

View File

@@ -10,7 +10,7 @@ from awx.api.generics import APIView, Response
from awx.api.permissions import AnalyticsPermission from awx.api.permissions import AnalyticsPermission
from awx.api.versioning import reverse from awx.api.versioning import reverse
from awx.main.utils import get_awx_version from awx.main.utils import get_awx_version
from awx.main.utils.analytics_proxy import OIDCClient, DEFAULT_OIDC_TOKEN_ENDPOINT from awx.main.utils.analytics_proxy import OIDCClient, DEFAULT_OIDC_ENDPOINT
from rest_framework import status from rest_framework import status
from collections import OrderedDict from collections import OrderedDict
@@ -205,7 +205,7 @@ class AnalyticsGenericView(APIView):
try: try:
rh_user = self._get_setting('REDHAT_USERNAME', None, ERROR_MISSING_USER) rh_user = self._get_setting('REDHAT_USERNAME', None, ERROR_MISSING_USER)
rh_password = self._get_setting('REDHAT_PASSWORD', None, ERROR_MISSING_PASSWORD) rh_password = self._get_setting('REDHAT_PASSWORD', None, ERROR_MISSING_PASSWORD)
client = OIDCClient(rh_user, rh_password, DEFAULT_OIDC_TOKEN_ENDPOINT, ['api.console']) client = OIDCClient(rh_user, rh_password, DEFAULT_OIDC_ENDPOINT, ['api.console'])
response = client.make_request( response = client.make_request(
method, method,
url, url,

View File

@@ -2098,7 +2098,7 @@ class WorkflowJobAccess(BaseAccess):
def filtered_queryset(self): def filtered_queryset(self):
return WorkflowJob.objects.filter( return WorkflowJob.objects.filter(
Q(unified_job_template__in=UnifiedJobTemplate.accessible_pk_qs(self.user, 'read_role')) Q(unified_job_template__in=UnifiedJobTemplate.accessible_pk_qs(self.user, 'read_role'))
| Q(organization__in=Organization.accessible_pk_qs(self.user, 'auditor_role')) | Q(organization__in=Organization.objects.filter(Q(admin_role__members=self.user)), is_bulk_job=True)
) )
def can_read(self, obj): def can_read(self, obj):
@@ -2496,11 +2496,12 @@ class UnifiedJobAccess(BaseAccess):
def filtered_queryset(self): def filtered_queryset(self):
inv_pk_qs = Inventory._accessible_pk_qs(Inventory, self.user, 'read_role') inv_pk_qs = Inventory._accessible_pk_qs(Inventory, self.user, 'read_role')
org_auditor_qs = Organization.objects.filter(Q(admin_role__members=self.user) | Q(auditor_role__members=self.user))
qs = self.model.objects.filter( qs = self.model.objects.filter(
Q(unified_job_template_id__in=UnifiedJobTemplate.accessible_pk_qs(self.user, 'read_role')) Q(unified_job_template_id__in=UnifiedJobTemplate.accessible_pk_qs(self.user, 'read_role'))
| Q(inventoryupdate__inventory_source__inventory__id__in=inv_pk_qs) | Q(inventoryupdate__inventory_source__inventory__id__in=inv_pk_qs)
| Q(adhoccommand__inventory__id__in=inv_pk_qs) | Q(adhoccommand__inventory__id__in=inv_pk_qs)
| Q(organization__in=Organization.accessible_pk_qs(self.user, 'auditor_role')) | Q(organization__in=org_auditor_qs)
) )
return qs return qs

View File

@@ -22,7 +22,7 @@ from ansible_base.lib.utils.db import advisory_lock
from awx.main.models import Job from awx.main.models import Job
from awx.main.access import access_registry from awx.main.access import access_registry
from awx.main.utils import get_awx_http_client_headers, set_environ, datetime_hook from awx.main.utils import get_awx_http_client_headers, set_environ, datetime_hook
from awx.main.utils.analytics_proxy import OIDCClient, DEFAULT_OIDC_TOKEN_ENDPOINT from awx.main.utils.analytics_proxy import OIDCClient, DEFAULT_OIDC_ENDPOINT
__all__ = ['register', 'gather', 'ship'] __all__ = ['register', 'gather', 'ship']
@@ -379,7 +379,7 @@ def ship(path):
with set_environ(**settings.AWX_TASK_ENV): with set_environ(**settings.AWX_TASK_ENV):
if rh_user and rh_password: if rh_user and rh_password:
try: try:
client = OIDCClient(rh_user, rh_password, DEFAULT_OIDC_TOKEN_ENDPOINT, ['api.console']) client = OIDCClient(rh_user, rh_password, DEFAULT_OIDC_ENDPOINT, ['api.console'])
response = client.make_request("POST", url, headers=s.headers, files=files, verify=settings.INSIGHTS_CERT_PATH, timeout=(31, 31)) response = client.make_request("POST", url, headers=s.headers, files=files, verify=settings.INSIGHTS_CERT_PATH, timeout=(31, 31))
except requests.RequestException: except requests.RequestException:
logger.error("Automation Analytics API request failed, trying base auth method") logger.error("Automation Analytics API request failed, trying base auth method")

View File

@@ -9,7 +9,6 @@ from prometheus_client.core import GaugeMetricFamily, HistogramMetricFamily
from prometheus_client.registry import CollectorRegistry from prometheus_client.registry import CollectorRegistry
from django.conf import settings from django.conf import settings
from django.http import HttpRequest from django.http import HttpRequest
import redis.exceptions
from rest_framework.request import Request from rest_framework.request import Request
from awx.main.consumers import emit_channel_notification from awx.main.consumers import emit_channel_notification
@@ -291,12 +290,8 @@ class Metrics(MetricsNamespace):
def send_metrics(self): def send_metrics(self):
# more than one thread could be calling this at the same time, so should # more than one thread could be calling this at the same time, so should
# acquire redis lock before sending metrics # acquire redis lock before sending metrics
try: lock = self.conn.lock(root_key + '-' + self._namespace + '_lock')
lock = self.conn.lock(root_key + '-' + self._namespace + '_lock') if not lock.acquire(blocking=False):
if not lock.acquire(blocking=False):
return
except redis.exceptions.ConnectionError as exc:
logger.warning(f'Connection error in send_metrics: {exc}')
return return
try: try:
current_time = time.time() current_time = time.time()

View File

@@ -88,10 +88,8 @@ class Scheduler:
# internally times are all referenced relative to startup time, add grace period # internally times are all referenced relative to startup time, add grace period
self.global_start = time.time() + 2.0 self.global_start = time.time() + 2.0
def get_and_mark_pending(self, reftime=None): def get_and_mark_pending(self):
if reftime is None: relative_time = time.time() - self.global_start
reftime = time.time() # mostly for tests
relative_time = reftime - self.global_start
to_run = [] to_run = []
for job in self.jobs: for job in self.jobs:
if job.due_to_run(relative_time): if job.due_to_run(relative_time):
@@ -100,10 +98,8 @@ class Scheduler:
job.mark_run(relative_time) job.mark_run(relative_time)
return to_run return to_run
def time_until_next_run(self, reftime=None): def time_until_next_run(self):
if reftime is None: relative_time = time.time() - self.global_start
reftime = time.time() # mostly for tests
relative_time = reftime - self.global_start
next_job = min(self.jobs, key=lambda j: j.next_run) next_job = min(self.jobs, key=lambda j: j.next_run)
delta = next_job.next_run - relative_time delta = next_job.next_run - relative_time
if delta <= 0.1: if delta <= 0.1:
@@ -119,11 +115,10 @@ class Scheduler:
def debug(self, *args, **kwargs): def debug(self, *args, **kwargs):
data = dict() data = dict()
data['title'] = 'Scheduler status' data['title'] = 'Scheduler status'
reftime = time.time()
now = datetime.fromtimestamp(reftime).strftime('%Y-%m-%d %H:%M:%S UTC') now = datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S UTC')
start_time = datetime.fromtimestamp(self.global_start).strftime('%Y-%m-%d %H:%M:%S UTC') start_time = datetime.fromtimestamp(self.global_start).strftime('%Y-%m-%d %H:%M:%S UTC')
relative_time = reftime - self.global_start relative_time = time.time() - self.global_start
data['started_time'] = start_time data['started_time'] = start_time
data['current_time'] = now data['current_time'] = now
data['current_time_relative'] = round(relative_time, 3) data['current_time_relative'] = round(relative_time, 3)

View File

@@ -7,7 +7,6 @@ import time
import traceback import traceback
from datetime import datetime from datetime import datetime
from uuid import uuid4 from uuid import uuid4
import json
import collections import collections
from multiprocessing import Process from multiprocessing import Process
@@ -26,10 +25,7 @@ from ansible_base.lib.logging.runtime import log_excess_runtime
from awx.main.models import UnifiedJob from awx.main.models import UnifiedJob
from awx.main.dispatch import reaper from awx.main.dispatch import reaper
from awx.main.utils.common import get_mem_effective_capacity, get_corrected_memory, get_corrected_cpu, get_cpu_effective_capacity from awx.main.utils.common import convert_mem_str_to_bytes, get_mem_effective_capacity
# ansible-runner
from ansible_runner.utils.capacity import get_mem_in_bytes, get_cpu_count
if 'run_callback_receiver' in sys.argv: if 'run_callback_receiver' in sys.argv:
logger = logging.getLogger('awx.main.commands.run_callback_receiver') logger = logging.getLogger('awx.main.commands.run_callback_receiver')
@@ -311,41 +307,6 @@ class WorkerPool(object):
logger.exception('could not kill {}'.format(worker.pid)) logger.exception('could not kill {}'.format(worker.pid))
def get_auto_max_workers():
"""Method we normally rely on to get max_workers
Uses almost same logic as Instance.local_health_check
The important thing is to be MORE than Instance.capacity
so that the task-manager does not over-schedule this node
Ideally we would just use the capacity from the database plus reserve workers,
but this poses some bootstrap problems where OCP task containers
register themselves after startup
"""
# Get memory from ansible-runner
total_memory_gb = get_mem_in_bytes()
# This may replace memory calculation with a user override
corrected_memory = get_corrected_memory(total_memory_gb)
# Get same number as max forks based on memory, this function takes memory as bytes
mem_capacity = get_mem_effective_capacity(corrected_memory, is_control_node=True)
# Follow same process for CPU capacity constraint
cpu_count = get_cpu_count()
corrected_cpu = get_corrected_cpu(cpu_count)
cpu_capacity = get_cpu_effective_capacity(corrected_cpu, is_control_node=True)
# Here is what is different from health checks,
auto_max = max(mem_capacity, cpu_capacity)
# add magic number of extra workers to ensure
# we have a few extra workers to run the heartbeat
auto_max += 7
return auto_max
class AutoscalePool(WorkerPool): class AutoscalePool(WorkerPool):
""" """
An extended pool implementation that automatically scales workers up and An extended pool implementation that automatically scales workers up and
@@ -359,7 +320,19 @@ class AutoscalePool(WorkerPool):
super(AutoscalePool, self).__init__(*args, **kwargs) super(AutoscalePool, self).__init__(*args, **kwargs)
if self.max_workers is None: if self.max_workers is None:
self.max_workers = get_auto_max_workers() settings_absmem = getattr(settings, 'SYSTEM_TASK_ABS_MEM', None)
if settings_absmem is not None:
# There are 1073741824 bytes in a gigabyte. Convert bytes to gigabytes by dividing by 2**30
total_memory_gb = convert_mem_str_to_bytes(settings_absmem) // 2**30
else:
total_memory_gb = (psutil.virtual_memory().total >> 30) + 1 # noqa: round up
# Get same number as max forks based on memory, this function takes memory as bytes
self.max_workers = get_mem_effective_capacity(total_memory_gb * 2**30)
# add magic prime number of extra workers to ensure
# we have a few extra workers to run the heartbeat
self.max_workers += 7
# max workers can't be less than min_workers # max workers can't be less than min_workers
self.max_workers = max(self.min_workers, self.max_workers) self.max_workers = max(self.min_workers, self.max_workers)
@@ -373,9 +346,6 @@ class AutoscalePool(WorkerPool):
self.scale_up_ct = 0 self.scale_up_ct = 0
self.worker_count_max = 0 self.worker_count_max = 0
# last time we wrote current tasks, to avoid too much log spam
self.last_task_list_log = time.monotonic()
def produce_subsystem_metrics(self, metrics_object): def produce_subsystem_metrics(self, metrics_object):
metrics_object.set('dispatcher_pool_scale_up_events', self.scale_up_ct) metrics_object.set('dispatcher_pool_scale_up_events', self.scale_up_ct)
metrics_object.set('dispatcher_pool_active_task_count', sum(len(w.managed_tasks) for w in self.workers)) metrics_object.set('dispatcher_pool_active_task_count', sum(len(w.managed_tasks) for w in self.workers))
@@ -493,14 +463,6 @@ class AutoscalePool(WorkerPool):
self.worker_count_max = new_worker_ct self.worker_count_max = new_worker_ct
return ret return ret
@staticmethod
def fast_task_serialization(current_task):
try:
return str(current_task.get('task')) + ' - ' + str(sorted(current_task.get('args', []))) + ' - ' + str(sorted(current_task.get('kwargs', {})))
except Exception:
# just make sure this does not make things worse
return str(current_task)
def write(self, preferred_queue, body): def write(self, preferred_queue, body):
if 'guid' in body: if 'guid' in body:
set_guid(body['guid']) set_guid(body['guid'])
@@ -522,15 +484,6 @@ class AutoscalePool(WorkerPool):
if isinstance(body, dict): if isinstance(body, dict):
task_name = body.get('task') task_name = body.get('task')
logger.warning(f'Workers maxed, queuing {task_name}, load: {sum(len(w.managed_tasks) for w in self.workers)} / {len(self.workers)}') logger.warning(f'Workers maxed, queuing {task_name}, load: {sum(len(w.managed_tasks) for w in self.workers)} / {len(self.workers)}')
# Once every 10 seconds write out task list for debugging
if time.monotonic() - self.last_task_list_log >= 10.0:
task_counts = {}
for worker in self.workers:
task_slug = self.fast_task_serialization(worker.current_task)
task_counts.setdefault(task_slug, 0)
task_counts[task_slug] += 1
logger.info(f'Running tasks by count:\n{json.dumps(task_counts, indent=2)}')
self.last_task_list_log = time.monotonic()
return super(AutoscalePool, self).write(preferred_queue, body) return super(AutoscalePool, self).write(preferred_queue, body)
except Exception: except Exception:
for conn in connections.all(): for conn in connections.all():

View File

@@ -15,7 +15,6 @@ from datetime import timedelta
from django import db from django import db
from django.conf import settings from django.conf import settings
import redis.exceptions
from ansible_base.lib.logging.runtime import log_excess_runtime from ansible_base.lib.logging.runtime import log_excess_runtime
@@ -131,13 +130,10 @@ class AWXConsumerBase(object):
@log_excess_runtime(logger, debug_cutoff=0.05, cutoff=0.2) @log_excess_runtime(logger, debug_cutoff=0.05, cutoff=0.2)
def record_statistics(self): def record_statistics(self):
if time.time() - self.last_stats > 1: # buffer stat recording to once per second if time.time() - self.last_stats > 1: # buffer stat recording to once per second
save_data = self.pool.debug()
try: try:
self.redis.set(f'awx_{self.name}_statistics', save_data) self.redis.set(f'awx_{self.name}_statistics', self.pool.debug())
except redis.exceptions.ConnectionError as exc:
logger.warning(f'Redis connection error saving {self.name} status data:\n{exc}\nmissed data:\n{save_data}')
except Exception: except Exception:
logger.exception(f"Unknown redis error saving {self.name} status data:\nmissed data:\n{save_data}") logger.exception(f"encountered an error communicating with redis to store {self.name} statistics")
self.last_stats = time.time() self.last_stats = time.time()
def run(self, *args, **kwargs): def run(self, *args, **kwargs):
@@ -193,10 +189,7 @@ class AWXConsumerPG(AWXConsumerBase):
current_time = time.time() current_time = time.time()
self.pool.produce_subsystem_metrics(self.subsystem_metrics) self.pool.produce_subsystem_metrics(self.subsystem_metrics)
self.subsystem_metrics.set('dispatcher_availability', self.listen_cumulative_time / (current_time - self.last_metrics_gather)) self.subsystem_metrics.set('dispatcher_availability', self.listen_cumulative_time / (current_time - self.last_metrics_gather))
try: self.subsystem_metrics.pipe_execute()
self.subsystem_metrics.pipe_execute()
except redis.exceptions.ConnectionError as exc:
logger.warning(f'Redis connection error saving dispatcher metrics, error:\n{exc}')
self.listen_cumulative_time = 0.0 self.listen_cumulative_time = 0.0
self.last_metrics_gather = current_time self.last_metrics_gather = current_time
@@ -212,11 +205,7 @@ class AWXConsumerPG(AWXConsumerBase):
except Exception as exc: except Exception as exc:
logger.warning(f'Failed to save dispatcher statistics {exc}') logger.warning(f'Failed to save dispatcher statistics {exc}')
# Everything benchmarks to the same original time, so that skews due to for job in self.scheduler.get_and_mark_pending():
# runtime of the actions, themselves, do not mess up scheduling expectations
reftime = time.time()
for job in self.scheduler.get_and_mark_pending(reftime=reftime):
if 'control' in job.data: if 'control' in job.data:
try: try:
job.data['control']() job.data['control']()
@@ -233,12 +222,12 @@ class AWXConsumerPG(AWXConsumerBase):
self.listen_start = time.time() self.listen_start = time.time()
return self.scheduler.time_until_next_run(reftime=reftime) return self.scheduler.time_until_next_run()
def run(self, *args, **kwargs): def run(self, *args, **kwargs):
super(AWXConsumerPG, self).run(*args, **kwargs) super(AWXConsumerPG, self).run(*args, **kwargs)
logger.info(f"Running {self.name}, workers min={self.pool.min_workers} max={self.pool.max_workers}, listening to queues {self.queues}") logger.info(f"Running worker {self.name} listening to queues {self.queues}")
init = False init = False
while True: while True:

View File

@@ -86,7 +86,6 @@ class CallbackBrokerWorker(BaseWorker):
return os.getpid() return os.getpid()
def read(self, queue): def read(self, queue):
has_redis_error = False
try: try:
res = self.redis.blpop(self.queue_name, timeout=1) res = self.redis.blpop(self.queue_name, timeout=1)
if res is None: if res is None:
@@ -96,21 +95,14 @@ class CallbackBrokerWorker(BaseWorker):
self.subsystem_metrics.inc('callback_receiver_events_popped_redis', 1) self.subsystem_metrics.inc('callback_receiver_events_popped_redis', 1)
self.subsystem_metrics.inc('callback_receiver_events_in_memory', 1) self.subsystem_metrics.inc('callback_receiver_events_in_memory', 1)
return json.loads(res[1]) return json.loads(res[1])
except redis.exceptions.ConnectionError as exc:
# Low noise log, because very common and many workers will write this
logger.error(f"redis connection error: {exc}")
has_redis_error = True
time.sleep(5)
except redis.exceptions.RedisError: except redis.exceptions.RedisError:
logger.exception("encountered an error communicating with redis") logger.exception("encountered an error communicating with redis")
has_redis_error = True
time.sleep(1) time.sleep(1)
except (json.JSONDecodeError, KeyError): except (json.JSONDecodeError, KeyError):
logger.exception("failed to decode JSON message from redis") logger.exception("failed to decode JSON message from redis")
finally: finally:
if not has_redis_error: self.record_statistics()
self.record_statistics() self.record_read_metrics()
self.record_read_metrics()
return {'event': 'FLUSH'} return {'event': 'FLUSH'}

View File

@@ -1,13 +1,10 @@
# Copyright (c) 2015 Ansible, Inc. # Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved. # All Rights Reserved.
import redis
from django.conf import settings from django.conf import settings
from django.core.management.base import BaseCommand, CommandError from django.core.management.base import BaseCommand
import redis.exceptions
from awx.main.analytics.subsystem_metrics import CallbackReceiverMetricsServer from awx.main.analytics.subsystem_metrics import CallbackReceiverMetricsServer
from awx.main.dispatch.control import Control from awx.main.dispatch.control import Control
from awx.main.dispatch.worker import AWXConsumerRedis, CallbackBrokerWorker from awx.main.dispatch.worker import AWXConsumerRedis, CallbackBrokerWorker
@@ -30,10 +27,7 @@ class Command(BaseCommand):
return return
consumer = None consumer = None
try: CallbackReceiverMetricsServer().start()
CallbackReceiverMetricsServer().start()
except redis.exceptions.ConnectionError as exc:
raise CommandError(f'Callback receiver could not connect to redis, error: {exc}')
try: try:
consumer = AWXConsumerRedis( consumer = AWXConsumerRedis(

View File

@@ -3,10 +3,8 @@
import logging import logging
import yaml import yaml
import redis
from django.conf import settings from django.conf import settings
from django.core.management.base import BaseCommand, CommandError from django.core.management.base import BaseCommand
from awx.main.dispatch import get_task_queuename from awx.main.dispatch import get_task_queuename
from awx.main.dispatch.control import Control from awx.main.dispatch.control import Control
@@ -65,10 +63,7 @@ class Command(BaseCommand):
consumer = None consumer = None
try: DispatcherMetricsServer().start()
DispatcherMetricsServer().start()
except redis.exceptions.ConnectionError as exc:
raise CommandError(f'Dispatcher could not connect to redis, error: {exc}')
try: try:
queues = ['tower_broadcast_all', 'tower_settings_change', get_task_queuename()] queues = ['tower_broadcast_all', 'tower_settings_change', get_task_queuename()]

View File

@@ -1,46 +0,0 @@
# Generated by Django 4.2.18 on 2025-03-17 16:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0196_indirect_managed_node_audit'),
]
operations = [
migrations.AddField(
model_name='inventory',
name='opa_query_path',
field=models.CharField(
blank=True,
default=None,
help_text='The query path for the OPA policy to evaluate prior to job execution. The query path should be formatted as package/rule.',
max_length=128,
null=True,
),
),
migrations.AddField(
model_name='jobtemplate',
name='opa_query_path',
field=models.CharField(
blank=True,
default=None,
help_text='The query path for the OPA policy to evaluate prior to job execution. The query path should be formatted as package/rule.',
max_length=128,
null=True,
),
),
migrations.AddField(
model_name='organization',
name='opa_query_path',
field=models.CharField(
blank=True,
default=None,
help_text='The query path for the OPA policy to evaluate prior to job execution. The query path should be formatted as package/rule.',
max_length=128,
null=True,
),
),
]

View File

@@ -5,7 +5,7 @@ from django.db import migrations
class Migration(migrations.Migration): class Migration(migrations.Migration):
dependencies = [ dependencies = [
('main', '0197_add_opa_query_path'), ('main', '0196_indirect_managed_node_audit'),
] ]
operations = [ operations = [

View File

@@ -5,7 +5,7 @@ from django.db import migrations
class Migration(migrations.Migration): class Migration(migrations.Migration):
dependencies = [ dependencies = [
('main', '0198_delete_profile'), ('main', '0197_delete_profile'),
] ]
operations = [ operations = [

View File

@@ -6,7 +6,7 @@ from django.db import migrations, models
class Migration(migrations.Migration): class Migration(migrations.Migration):
dependencies = [ dependencies = [
('main', '0199_remove_sso_app_content'), ('main', '0198_remove_sso_app_content'),
] ]
operations = [ operations = [

View File

@@ -6,7 +6,7 @@ from django.db import migrations
class Migration(migrations.Migration): class Migration(migrations.Migration):
dependencies = [ dependencies = [
('main', '0200_alter_inventorysource_source_and_more'), ('main', '0199_alter_inventorysource_source_and_more'),
] ]
operations = [ operations = [

View File

@@ -8,7 +8,7 @@ from awx.main.migrations._create_system_jobs import delete_clear_tokens_sjt
class Migration(migrations.Migration): class Migration(migrations.Migration):
dependencies = [ dependencies = [
('main', '0201_alter_oauth2application_unique_together_and_more'), ('main', '0200_alter_oauth2application_unique_together_and_more'),
] ]
operations = [ operations = [

View File

@@ -550,10 +550,10 @@ class CredentialType(CommonModelNameNotUnique):
# TODO: User "side-loaded" credential custom_injectors isn't supported # TODO: User "side-loaded" credential custom_injectors isn't supported
ManagedCredentialType.registry[ns] = SimpleNamespace(namespace=ns, name=plugin.name, kind='external', inputs=plugin.inputs, backend=plugin.backend) ManagedCredentialType.registry[ns] = SimpleNamespace(namespace=ns, name=plugin.name, kind='external', inputs=plugin.inputs, backend=plugin.backend)
def inject_credential(self, credential, env, safe_env, args, private_data_dir, container_root=None): def inject_credential(self, credential, env, safe_env, args, private_data_dir):
from awx_plugins.interfaces._temporary_private_inject_api import inject_credential from awx_plugins.interfaces._temporary_private_inject_api import inject_credential
inject_credential(self, credential, env, safe_env, args, private_data_dir, container_root=container_root) inject_credential(self, credential, env, safe_env, args, private_data_dir)
class CredentialTypeHelper: class CredentialTypeHelper:

View File

@@ -565,6 +565,7 @@ class JobEvent(BasePlaybookEvent):
summaries = dict() summaries = dict()
updated_hosts_list = list() updated_hosts_list = list()
for host in hostnames: for host in hostnames:
updated_hosts_list.append(host.lower())
host_id = host_map.get(host) host_id = host_map.get(host)
if host_id not in existing_host_ids: if host_id not in existing_host_ids:
host_id = None host_id = None
@@ -581,12 +582,6 @@ class JobEvent(BasePlaybookEvent):
summary.failed = bool(summary.dark or summary.failures) summary.failed = bool(summary.dark or summary.failures)
summaries[(host_id, host)] = summary summaries[(host_id, host)] = summary
# do not count dark / unreachable hosts as updated
if not bool(summary.dark):
updated_hosts_list.append(host.lower())
else:
logger.warning(f'host {host.lower()} is dark / unreachable, not marking it as updated')
JobHostSummary.objects.bulk_create(summaries.values()) JobHostSummary.objects.bulk_create(summaries.values())
# update the last_job_id and last_job_host_summary_id # update the last_job_id and last_job_host_summary_id
@@ -602,7 +597,7 @@ class JobEvent(BasePlaybookEvent):
h.last_job_host_summary_id = host_mapping[h.id] h.last_job_host_summary_id = host_mapping[h.id]
updated_hosts.add(h) updated_hosts.add(h)
Host.objects.bulk_update(sorted(updated_hosts, key=lambda host: host.id), ['last_job_id', 'last_job_host_summary_id'], batch_size=100) Host.objects.bulk_update(list(updated_hosts), ['last_job_id', 'last_job_host_summary_id'], batch_size=100)
# Create/update Host Metrics # Create/update Host Metrics
self._update_host_metrics(updated_hosts_list) self._update_host_metrics(updated_hosts_list)

View File

@@ -43,7 +43,6 @@ from awx.main.models.mixins import (
TaskManagerInventoryUpdateMixin, TaskManagerInventoryUpdateMixin,
RelatedJobsMixin, RelatedJobsMixin,
CustomVirtualEnvMixin, CustomVirtualEnvMixin,
OpaQueryPathMixin,
) )
from awx.main.models.notifications import ( from awx.main.models.notifications import (
NotificationTemplate, NotificationTemplate,
@@ -69,7 +68,7 @@ class InventoryConstructedInventoryMembership(models.Model):
) )
class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin, OpaQueryPathMixin): class Inventory(CommonModelNameNotUnique, ResourceMixin, RelatedJobsMixin):
""" """
an inventory source contains lists and hosts. an inventory source contains lists and hosts.
""" """

View File

@@ -51,7 +51,6 @@ from awx.main.models.mixins import (
RelatedJobsMixin, RelatedJobsMixin,
WebhookMixin, WebhookMixin,
WebhookTemplateMixin, WebhookTemplateMixin,
OpaQueryPathMixin,
) )
from awx.main.constants import JOB_VARIABLE_PREFIXES from awx.main.constants import JOB_VARIABLE_PREFIXES
@@ -193,9 +192,7 @@ class JobOptions(BaseModel):
return needed return needed
class JobTemplate( class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, ResourceMixin, CustomVirtualEnvMixin, RelatedJobsMixin, WebhookTemplateMixin):
UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, ResourceMixin, CustomVirtualEnvMixin, RelatedJobsMixin, WebhookTemplateMixin, OpaQueryPathMixin
):
""" """
A job template is a reusable job definition for applying a project (with A job template is a reusable job definition for applying a project (with
playbook) to an inventory source with a given credential. playbook) to an inventory source with a given credential.

View File

@@ -42,7 +42,6 @@ __all__ = [
'TaskManagerInventoryUpdateMixin', 'TaskManagerInventoryUpdateMixin',
'ExecutionEnvironmentMixin', 'ExecutionEnvironmentMixin',
'CustomVirtualEnvMixin', 'CustomVirtualEnvMixin',
'OpaQueryPathMixin',
] ]
@@ -693,16 +692,3 @@ class WebhookMixin(models.Model):
logger.debug("Webhook status update sent.") logger.debug("Webhook status update sent.")
else: else:
logger.error("Posting webhook status failed, code: {}\n" "{}\nPayload sent: {}".format(response.status_code, response.text, json.dumps(data))) logger.error("Posting webhook status failed, code: {}\n" "{}\nPayload sent: {}".format(response.status_code, response.text, json.dumps(data)))
class OpaQueryPathMixin(models.Model):
class Meta:
abstract = True
opa_query_path = models.CharField(
max_length=128,
blank=True,
null=True,
default=None,
help_text=_("The query path for the OPA policy to evaluate prior to job execution. The query path should be formatted as package/rule."),
)

View File

@@ -22,12 +22,12 @@ from awx.main.models.rbac import (
ROLE_SINGLETON_SYSTEM_AUDITOR, ROLE_SINGLETON_SYSTEM_AUDITOR,
) )
from awx.main.models.unified_jobs import UnifiedJob from awx.main.models.unified_jobs import UnifiedJob
from awx.main.models.mixins import ResourceMixin, CustomVirtualEnvMixin, RelatedJobsMixin, OpaQueryPathMixin from awx.main.models.mixins import ResourceMixin, CustomVirtualEnvMixin, RelatedJobsMixin
__all__ = ['Organization', 'Team', 'UserSessionMembership'] __all__ = ['Organization', 'Team', 'UserSessionMembership']
class Organization(CommonModel, NotificationFieldsModel, ResourceMixin, CustomVirtualEnvMixin, RelatedJobsMixin, OpaQueryPathMixin): class Organization(CommonModel, NotificationFieldsModel, ResourceMixin, CustomVirtualEnvMixin, RelatedJobsMixin):
""" """
An organization is the basic unit of multi-tenancy divisions An organization is the basic unit of multi-tenancy divisions
""" """

View File

@@ -53,8 +53,8 @@ class GrafanaBackend(AWXBaseEmailBackend, CustomNotificationBase):
): ):
super(GrafanaBackend, self).__init__(fail_silently=fail_silently) super(GrafanaBackend, self).__init__(fail_silently=fail_silently)
self.grafana_key = grafana_key self.grafana_key = grafana_key
self.dashboardId = int(dashboardId) if dashboardId is not None and panelId != "" else None self.dashboardId = int(dashboardId) if dashboardId is not None else None
self.panelId = int(panelId) if panelId is not None and panelId != "" else None self.panelId = int(panelId) if panelId is not None else None
self.annotation_tags = annotation_tags if annotation_tags is not None else [] self.annotation_tags = annotation_tags if annotation_tags is not None else []
self.grafana_no_verify_ssl = grafana_no_verify_ssl self.grafana_no_verify_ssl = grafana_no_verify_ssl
self.isRegion = isRegion self.isRegion = isRegion
@@ -97,7 +97,6 @@ class GrafanaBackend(AWXBaseEmailBackend, CustomNotificationBase):
r = requests.post( r = requests.post(
"{}/api/annotations".format(m.recipients()[0]), json=grafana_data, headers=grafana_headers, verify=(not self.grafana_no_verify_ssl) "{}/api/annotations".format(m.recipients()[0]), json=grafana_data, headers=grafana_headers, verify=(not self.grafana_no_verify_ssl)
) )
if r.status_code >= 400: if r.status_code >= 400:
logger.error(smart_str(_("Error sending notification grafana: {}").format(r.status_code))) logger.error(smart_str(_("Error sending notification grafana: {}").format(r.status_code)))
if not self.fail_silently: if not self.fail_silently:

View File

@@ -174,9 +174,6 @@ class PodManager(object):
) )
pod_spec['spec']['containers'][0]['name'] = self.pod_name pod_spec['spec']['containers'][0]['name'] = self.pod_name
# Prevent mounting of service account token in job pods in order to prevent job pods from accessing the k8s API via in cluster service account auth
pod_spec['spec']['automountServiceAccountToken'] = False
return pod_spec return pod_spec

View File

@@ -10,8 +10,6 @@ import time
import sys import sys
import signal import signal
import redis
# Django # Django
from django.db import transaction from django.db import transaction
from django.utils.translation import gettext_lazy as _, gettext_noop from django.utils.translation import gettext_lazy as _, gettext_noop
@@ -122,8 +120,6 @@ class TaskBase:
self.subsystem_metrics.pipe_execute() self.subsystem_metrics.pipe_execute()
else: else:
logger.debug(f"skipping recording {self.prefix} metrics, last recorded {time_last_recorded} seconds ago") logger.debug(f"skipping recording {self.prefix} metrics, last recorded {time_last_recorded} seconds ago")
except redis.exceptions.ConnectionError as exc:
logger.warning(f"Redis connection error saving metrics for {self.prefix}, error: {exc}")
except Exception: except Exception:
logger.exception(f"Error saving metrics for {self.prefix}") logger.exception(f"Error saving metrics for {self.prefix}")

View File

@@ -6,6 +6,7 @@ import logging
# Django # Django
from django.conf import settings from django.conf import settings
from django.db.models.query import QuerySet
from django.utils.encoding import smart_str from django.utils.encoding import smart_str
from django.utils.timezone import now from django.utils.timezone import now
from django.db import OperationalError from django.db import OperationalError
@@ -25,7 +26,6 @@ system_tracking_logger = logging.getLogger('awx.analytics.system_tracking')
def start_fact_cache(hosts, destination, log_data, timeout=None, inventory_id=None): def start_fact_cache(hosts, destination, log_data, timeout=None, inventory_id=None):
log_data['inventory_id'] = inventory_id log_data['inventory_id'] = inventory_id
log_data['written_ct'] = 0 log_data['written_ct'] = 0
hosts_cached = list()
try: try:
os.makedirs(destination, mode=0o700) os.makedirs(destination, mode=0o700)
except FileExistsError: except FileExistsError:
@@ -34,17 +34,17 @@ def start_fact_cache(hosts, destination, log_data, timeout=None, inventory_id=No
if timeout is None: if timeout is None:
timeout = settings.ANSIBLE_FACT_CACHE_TIMEOUT timeout = settings.ANSIBLE_FACT_CACHE_TIMEOUT
if isinstance(hosts, QuerySet):
hosts = hosts.iterator()
last_filepath_written = None last_filepath_written = None
for host in hosts: for host in hosts:
hosts_cached.append(host) if (not host.ansible_facts_modified) or (timeout and host.ansible_facts_modified < now() - datetime.timedelta(seconds=timeout)):
if not host.ansible_facts_modified or (timeout and host.ansible_facts_modified < now() - datetime.timedelta(seconds=timeout)):
continue # facts are expired - do not write them continue # facts are expired - do not write them
filepath = os.sep.join(map(str, [destination, host.name])) filepath = os.sep.join(map(str, [destination, host.name]))
if not os.path.realpath(filepath).startswith(destination): if not os.path.realpath(filepath).startswith(destination):
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name))) system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
continue continue
try: try:
with codecs.open(filepath, 'w', encoding='utf-8') as f: with codecs.open(filepath, 'w', encoding='utf-8') as f:
os.chmod(f.name, 0o600) os.chmod(f.name, 0o600)
@@ -54,16 +54,14 @@ def start_fact_cache(hosts, destination, log_data, timeout=None, inventory_id=No
except IOError: except IOError:
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name))) system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
continue continue
# make note of the time we wrote the last file so we can check if any file changed later
if last_filepath_written: if last_filepath_written:
return os.path.getmtime(last_filepath_written), hosts_cached return os.path.getmtime(last_filepath_written)
return None
return None, hosts_cached
def raw_update_hosts(host_list): def raw_update_hosts(host_list):
host_list = sorted(host_list, key=lambda host: host.id) Host.objects.bulk_update(host_list, ['ansible_facts', 'ansible_facts_modified'])
Host.objects.bulk_update(host_list, ['ansible_facts', 'ansible_facts_modified'], batch_size=100)
def update_hosts(host_list, max_tries=5): def update_hosts(host_list, max_tries=5):
@@ -90,14 +88,17 @@ def update_hosts(host_list, max_tries=5):
msg='Inventory {inventory_id} host facts: updated {updated_ct}, cleared {cleared_ct}, unchanged {unmodified_ct}, took {delta:.3f} s', msg='Inventory {inventory_id} host facts: updated {updated_ct}, cleared {cleared_ct}, unchanged {unmodified_ct}, took {delta:.3f} s',
add_log_data=True, add_log_data=True,
) )
def finish_fact_cache(hosts_cached, destination, facts_write_time, log_data, job_id=None, inventory_id=None): def finish_fact_cache(hosts, destination, facts_write_time, log_data, job_id=None, inventory_id=None):
log_data['inventory_id'] = inventory_id log_data['inventory_id'] = inventory_id
log_data['updated_ct'] = 0 log_data['updated_ct'] = 0
log_data['unmodified_ct'] = 0 log_data['unmodified_ct'] = 0
log_data['cleared_ct'] = 0 log_data['cleared_ct'] = 0
if isinstance(hosts, QuerySet):
hosts = hosts.iterator()
hosts_to_update = [] hosts_to_update = []
for host in hosts_cached: for host in hosts:
filepath = os.sep.join(map(str, [destination, host.name])) filepath = os.sep.join(map(str, [destination, host.name]))
if not os.path.realpath(filepath).startswith(destination): if not os.path.realpath(filepath).startswith(destination):
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name))) system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
@@ -129,7 +130,6 @@ def finish_fact_cache(hosts_cached, destination, facts_write_time, log_data, job
log_data['unmodified_ct'] += 1 log_data['unmodified_ct'] += 1
else: else:
# if the file goes missing, ansible removed it (likely via clear_facts) # if the file goes missing, ansible removed it (likely via clear_facts)
# if the file goes missing, but the host has not started facts, then we should not clear the facts
host.ansible_facts = {} host.ansible_facts = {}
host.ansible_facts_modified = now() host.ansible_facts_modified = now()
hosts_to_update.append(host) hosts_to_update.append(host)

View File

@@ -45,35 +45,22 @@ def build_indirect_host_data(job: Job, job_event_queries: dict[str, dict[str, st
facts_missing_logged = False facts_missing_logged = False
unhashable_facts_logged = False unhashable_facts_logged = False
job_event_queries_fqcn = {}
for query_k, query_v in job_event_queries.items():
if len(parts := query_k.split('.')) != 3:
logger.info(f"Skiping malformed query '{query_k}'. Expected to be of the form 'a.b.c'")
continue
if parts[2] != '*':
continue
job_event_queries_fqcn['.'.join(parts[0:2])] = query_v
for event in job.job_events.filter(event_data__isnull=False).iterator(): for event in job.job_events.filter(event_data__isnull=False).iterator():
if 'res' not in event.event_data: if 'res' not in event.event_data:
continue continue
if not (resolved_action := event.event_data.get('resolved_action', None)): if 'resolved_action' not in event.event_data or event.event_data['resolved_action'] not in job_event_queries.keys():
continue continue
if len(resolved_action_parts := resolved_action.split('.')) != 3: resolved_action = event.event_data['resolved_action']
logger.debug(f"Malformed invocation module name '{resolved_action}'. Expected to be of the form 'a.b.c'")
continue
resolved_action_fqcn = '.'.join(resolved_action_parts[0:2]) # We expect a dict with a 'query' key for the resolved_action
if 'query' not in job_event_queries[resolved_action]:
# Match module invocation to collection queries
# First match against fully qualified query names i.e. a.b.c
# Then try and match against wildcard queries i.e. a.b.*
if not (jq_str_for_event := job_event_queries.get(resolved_action, job_event_queries_fqcn.get(resolved_action_fqcn, {})).get('query')):
continue continue
# Recall from cache, or process the jq expression, and loop over the jq results # Recall from cache, or process the jq expression, and loop over the jq results
jq_str_for_event = job_event_queries[resolved_action]['query']
if jq_str_for_event not in compiled_jq_expressions: if jq_str_for_event not in compiled_jq_expressions:
compiled_jq_expressions[resolved_action] = jq.compile(jq_str_for_event) compiled_jq_expressions[resolved_action] = jq.compile(jq_str_for_event)
compiled_jq = compiled_jq_expressions[resolved_action] compiled_jq = compiled_jq_expressions[resolved_action]

View File

@@ -522,13 +522,9 @@ class BaseTask(object):
credentials = self.build_credentials_list(self.instance) credentials = self.build_credentials_list(self.instance)
container_root = None
if settings.IS_K8S and isinstance(self.instance, ProjectUpdate):
container_root = private_data_dir
for credential in credentials: for credential in credentials:
if credential: if credential:
credential.credential_type.inject_credential(credential, env, self.safe_cred_env, args, private_data_dir, container_root=container_root) credential.credential_type.inject_credential(credential, env, self.safe_cred_env, args, private_data_dir)
self.runner_callback.safe_env.update(self.safe_cred_env) self.runner_callback.safe_env.update(self.safe_cred_env)
@@ -921,6 +917,7 @@ class RunJob(SourceControlMixin, BaseTask):
env['ANSIBLE_NET_AUTH_PASS'] = network_cred.get_input('authorize_password', default='') env['ANSIBLE_NET_AUTH_PASS'] = network_cred.get_input('authorize_password', default='')
path_vars = [ path_vars = [
('ANSIBLE_COLLECTIONS_PATHS', 'collections_paths', 'requirements_collections', '~/.ansible/collections:/usr/share/ansible/collections'),
('ANSIBLE_ROLES_PATH', 'roles_path', 'requirements_roles', '~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles'), ('ANSIBLE_ROLES_PATH', 'roles_path', 'requirements_roles', '~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles'),
('ANSIBLE_COLLECTIONS_PATH', 'collections_path', 'requirements_collections', '~/.ansible/collections:/usr/share/ansible/collections'), ('ANSIBLE_COLLECTIONS_PATH', 'collections_path', 'requirements_collections', '~/.ansible/collections:/usr/share/ansible/collections'),
] ]
@@ -1091,7 +1088,7 @@ class RunJob(SourceControlMixin, BaseTask):
# where ansible expects to find it # where ansible expects to find it
if self.should_use_fact_cache(): if self.should_use_fact_cache():
job.log_lifecycle("start_job_fact_cache") job.log_lifecycle("start_job_fact_cache")
self.facts_write_time, self.hosts_with_facts_cached = start_fact_cache( self.facts_write_time = start_fact_cache(
job.get_hosts_for_fact_cache(), os.path.join(private_data_dir, 'artifacts', str(job.id), 'fact_cache'), inventory_id=job.inventory_id job.get_hosts_for_fact_cache(), os.path.join(private_data_dir, 'artifacts', str(job.id), 'fact_cache'), inventory_id=job.inventory_id
) )
@@ -1110,7 +1107,7 @@ class RunJob(SourceControlMixin, BaseTask):
if self.should_use_fact_cache() and self.runner_callback.artifacts_processed: if self.should_use_fact_cache() and self.runner_callback.artifacts_processed:
job.log_lifecycle("finish_job_fact_cache") job.log_lifecycle("finish_job_fact_cache")
finish_fact_cache( finish_fact_cache(
self.hosts_with_facts_cached, job.get_hosts_for_fact_cache(),
os.path.join(private_data_dir, 'artifacts', str(job.id), 'fact_cache'), os.path.join(private_data_dir, 'artifacts', str(job.id), 'fact_cache'),
facts_write_time=self.facts_write_time, facts_write_time=self.facts_write_time,
job_id=job.id, job_id=job.id,
@@ -1523,7 +1520,7 @@ class RunInventoryUpdate(SourceControlMixin, BaseTask):
raise NotImplementedError('Cannot update file sources through the task system.') raise NotImplementedError('Cannot update file sources through the task system.')
if inventory_update.source == 'scm' and inventory_update.source_project_update: if inventory_update.source == 'scm' and inventory_update.source_project_update:
env_key = 'ANSIBLE_COLLECTIONS_PATH' env_key = 'ANSIBLE_COLLECTIONS_PATHS'
config_setting = 'collections_paths' config_setting = 'collections_paths'
folder = 'requirements_collections' folder = 'requirements_collections'
default = '~/.ansible/collections:/usr/share/ansible/collections' default = '~/.ansible/collections:/usr/share/ansible/collections'
@@ -1541,12 +1538,12 @@ class RunInventoryUpdate(SourceControlMixin, BaseTask):
paths = [config_values[config_setting]] + paths paths = [config_values[config_setting]] + paths
paths = [os.path.join(CONTAINER_ROOT, folder)] + paths paths = [os.path.join(CONTAINER_ROOT, folder)] + paths
env[env_key] = os.pathsep.join(paths) env[env_key] = os.pathsep.join(paths)
if 'ANSIBLE_COLLECTIONS_PATH' in env: if 'ANSIBLE_COLLECTIONS_PATHS' in env:
paths = env['ANSIBLE_COLLECTIONS_PATH'].split(':') paths = env['ANSIBLE_COLLECTIONS_PATHS'].split(':')
else: else:
paths = ['~/.ansible/collections', '/usr/share/ansible/collections'] paths = ['~/.ansible/collections', '/usr/share/ansible/collections']
paths.append('/usr/share/automation-controller/collections') paths.append('/usr/share/automation-controller/collections')
env['ANSIBLE_COLLECTIONS_PATH'] = os.pathsep.join(paths) env['ANSIBLE_COLLECTIONS_PATHS'] = os.pathsep.join(paths)
return env return env

View File

@@ -1,7 +0,0 @@
---
- hosts: all
gather_facts: false
connection: local
tasks:
- meta: clear_facts

View File

@@ -1,17 +0,0 @@
---
- hosts: all
vars:
extra_value: ""
gather_facts: false
connection: local
tasks:
- name: set a custom fact
set_fact:
foo: "bar{{ extra_value }}"
bar:
a:
b:
- "c"
- "d"
cacheable: true

View File

@@ -1,9 +0,0 @@
---
- hosts: all
gather_facts: false
connection: local
vars:
msg: 'hello'
tasks:
- debug: var=msg

View File

@@ -1,17 +0,0 @@
import time
import logging
from awx.main.dispatch import get_task_queuename
from awx.main.dispatch.publish import task
logger = logging.getLogger(__name__)
@task(queue=get_task_queuename)
def sleep_task(seconds=10, log=False):
if log:
logger.info('starting sleep_task')
time.sleep(seconds)
if log:
logger.info('finished sleep_task')

View File

@@ -210,39 +210,6 @@ def test_disallowed_http_update_methods(put, patch, post, inventory, project, ad
patch(url=reverse('api:job_detail', kwargs={'pk': job.pk}), data={}, user=admin_user, expect=405) patch(url=reverse('api:job_detail', kwargs={'pk': job.pk}), data={}, user=admin_user, expect=405)
@pytest.mark.django_db
@pytest.mark.parametrize(
"job_type",
[
'run',
'check',
],
)
def test_job_relaunch_with_job_type(post, inventory, project, machine_credential, admin_user, job_type):
# Create a job template
jt = JobTemplate.objects.create(name='testjt', inventory=inventory, project=project)
# Set initial job type
init_job_type = 'check' if job_type == 'run' else 'run'
# Create a job instance
job = jt.create_unified_job(_eager_fields={'job_type': init_job_type})
# Perform the POST request
url = reverse('api:job_relaunch', kwargs={'pk': job.pk})
r = post(url=url, data={'job_type': job_type}, user=admin_user, expect=201)
# Assert that the response status code is 201 (Created)
assert r.status_code == 201
# Retrieve the newly created job from the response
new_job_id = r.data.get('id')
new_job = Job.objects.get(id=new_job_id)
# Assert that the new job has the correct job type
assert new_job.job_type == job_type
class TestControllerNode: class TestControllerNode:
@pytest.fixture @pytest.fixture
def project_update(self, project): def project_update(self, project):

View File

@@ -56,175 +56,6 @@ def test_user_create(post, admin):
assert not response.data['is_system_auditor'] assert not response.data['is_system_auditor']
# Disable local password checks to ensure that any ValidationError originates from the Django validators.
@override_settings(
LOCAL_PASSWORD_MIN_LENGTH=1,
LOCAL_PASSWORD_MIN_DIGITS=0,
LOCAL_PASSWORD_MIN_UPPER=0,
LOCAL_PASSWORD_MIN_SPECIAL=0,
)
@pytest.mark.django_db
def test_user_create_with_django_password_validation_basic(post, admin):
"""Test if the Django password validators are applied correctly."""
with override_settings(
AUTH_PASSWORD_VALIDATORS=[
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
'OPTIONS': {
'min_length': 3,
},
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
],
):
# This user should fail the UserAttrSimilarity, MinLength and CommonPassword validators.
user_attrs = (
{
"password": "Password", # NOSONAR
"username": "Password",
"is_superuser": False,
},
)
print(f"Create user with invalid password {user_attrs=}")
response = post(reverse('api:user_list'), user_attrs, admin, middleware=SessionMiddleware(mock.Mock()))
assert response.status_code == 400
# This user should pass all Django validators.
user_attrs = {
"password": "r$TyKiOCb#ED", # NOSONAR
"username": "TestUser",
"is_superuser": False,
}
print(f"Create user with valid password {user_attrs=}")
response = post(reverse('api:user_list'), user_attrs, admin, middleware=SessionMiddleware(mock.Mock()))
assert response.status_code == 201
@pytest.mark.parametrize(
"user_attrs,validators,expected_status_code",
[
# Test password similarity with username.
(
{"password": "TestUser1", "username": "TestUser1", "is_superuser": False}, # NOSONAR
[
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
],
400,
),
(
{"password": "abc", "username": "TestUser1", "is_superuser": False}, # NOSONAR
[
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
],
201,
),
# Test password min length criterion.
(
{"password": "TooShort", "username": "TestUser1", "is_superuser": False}, # NOSONAR
[
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 'OPTIONS': {'min_length': 9}},
],
400,
),
(
{"password": "LongEnough", "username": "TestUser1", "is_superuser": False}, # NOSONAR
[
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 'OPTIONS': {'min_length': 9}},
],
201,
),
# Test password is too common criterion.
(
{"password": "Password", "username": "TestUser1", "is_superuser": False}, # NOSONAR
[
{'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator'},
],
400,
),
(
{"password": "aEArV$5Vkdw", "username": "TestUser1", "is_superuser": False}, # NOSONAR
[
{'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator'},
],
201,
),
# Test if password is only numeric.
(
{"password": "1234567890", "username": "TestUser1", "is_superuser": False}, # NOSONAR
[
{'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator'},
],
400,
),
(
{"password": "abc4567890", "username": "TestUser1", "is_superuser": False}, # NOSONAR
[
{'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator'},
],
201,
),
],
)
# Disable local password checks to ensure that any ValidationError originates from the Django validators.
@override_settings(
LOCAL_PASSWORD_MIN_LENGTH=1,
LOCAL_PASSWORD_MIN_DIGITS=0,
LOCAL_PASSWORD_MIN_UPPER=0,
LOCAL_PASSWORD_MIN_SPECIAL=0,
)
@pytest.mark.django_db
def test_user_create_with_django_password_validation_ext(post, delete, admin, user_attrs, validators, expected_status_code):
"""Test the functionality of the single Django password validators."""
#
default_parameters = {
# Default values for input parameters which are None.
"user_attrs": {
"password": "r$TyKiOCb#ED", # NOSONAR
"username": "DefaultUser",
"is_superuser": False,
},
"validators": [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
'OPTIONS': {
'min_length': 8,
},
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
],
}
user_attrs = user_attrs if user_attrs is not None else default_parameters["user_attrs"]
validators = validators if validators is not None else default_parameters["validators"]
with override_settings(AUTH_PASSWORD_VALIDATORS=validators):
response = post(reverse('api:user_list'), user_attrs, admin, middleware=SessionMiddleware(mock.Mock()))
assert response.status_code == expected_status_code
# Delete user if it was created succesfully.
if response.status_code == 201:
response = delete(reverse('api:user_detail', kwargs={'pk': response.data['id']}), admin, middleware=SessionMiddleware(mock.Mock()))
assert response.status_code == 204
else:
# Catch the unexpected behavior that sometimes the user is written
# into the database before the validation fails. This actually can
# happen if UserSerializer.validate instantiates User(**attrs)!
username = user_attrs['username']
assert not User.objects.filter(username=username)
@pytest.mark.django_db @pytest.mark.django_db
def test_fail_double_create_user(post, admin): def test_fail_double_create_user(post, admin):
response = post(reverse('api:user_list'), EXAMPLE_USER_DATA, admin, middleware=SessionMiddleware(mock.Mock())) response = post(reverse('api:user_list'), EXAMPLE_USER_DATA, admin, middleware=SessionMiddleware(mock.Mock()))
@@ -251,10 +82,6 @@ def test_updating_own_password_refreshes_session(patch, admin):
Updating your own password should refresh the session id. Updating your own password should refresh the session id.
''' '''
with mock.patch('awx.api.serializers.update_session_auth_hash') as update_session_auth_hash: with mock.patch('awx.api.serializers.update_session_auth_hash') as update_session_auth_hash:
# Attention: If the Django password validator `CommonPasswordValidator`
# is active, this test case will fail because this validator raises on
# password 'newpassword'. Consider changing the hard-coded password to
# something uncommon.
patch(reverse('api:user_detail', kwargs={'pk': admin.pk}), {'password': 'newpassword'}, admin, middleware=SessionMiddleware(mock.Mock())) patch(reverse('api:user_detail', kwargs={'pk': admin.pk}), {'password': 'newpassword'}, admin, middleware=SessionMiddleware(mock.Mock()))
assert update_session_auth_hash.called assert update_session_auth_hash.called

View File

@@ -34,18 +34,40 @@ def test_wrapup_does_send_notifications(mocker):
mock.assert_called_once_with('succeeded') mock.assert_called_once_with('succeeded')
class FakeRedis:
def keys(self, *args, **kwargs):
return []
def set(self):
pass
def get(self):
return None
@classmethod
def from_url(cls, *args, **kwargs):
return cls()
def pipeline(self):
return self
class TestCallbackBrokerWorker(TransactionTestCase): class TestCallbackBrokerWorker(TransactionTestCase):
@pytest.fixture(autouse=True) @pytest.fixture(autouse=True)
def turn_off_websockets_and_redis(self, fake_redis): def turn_off_websockets(self):
with mock.patch('awx.main.dispatch.worker.callback.emit_event_detail', lambda *a, **kw: None): with mock.patch('awx.main.dispatch.worker.callback.emit_event_detail', lambda *a, **kw: None):
yield yield
def get_worker(self):
with mock.patch('redis.Redis', new=FakeRedis): # turn off redis stuff
return CallbackBrokerWorker()
def event_create_kwargs(self): def event_create_kwargs(self):
inventory_update = InventoryUpdate.objects.create(source='file', inventory_source=InventorySource.objects.create(source='file')) inventory_update = InventoryUpdate.objects.create(source='file', inventory_source=InventorySource.objects.create(source='file'))
return dict(inventory_update=inventory_update, created=inventory_update.created) return dict(inventory_update=inventory_update, created=inventory_update.created)
def test_flush_with_valid_event(self): def test_flush_with_valid_event(self):
worker = CallbackBrokerWorker() worker = self.get_worker()
events = [InventoryUpdateEvent(uuid=str(uuid4()), **self.event_create_kwargs())] events = [InventoryUpdateEvent(uuid=str(uuid4()), **self.event_create_kwargs())]
worker.buff = {InventoryUpdateEvent: events} worker.buff = {InventoryUpdateEvent: events}
worker.flush() worker.flush()
@@ -53,7 +75,7 @@ class TestCallbackBrokerWorker(TransactionTestCase):
assert InventoryUpdateEvent.objects.filter(uuid=events[0].uuid).count() == 1 assert InventoryUpdateEvent.objects.filter(uuid=events[0].uuid).count() == 1
def test_flush_with_invalid_event(self): def test_flush_with_invalid_event(self):
worker = CallbackBrokerWorker() worker = self.get_worker()
kwargs = self.event_create_kwargs() kwargs = self.event_create_kwargs()
events = [ events = [
InventoryUpdateEvent(uuid=str(uuid4()), stdout='good1', **kwargs), InventoryUpdateEvent(uuid=str(uuid4()), stdout='good1', **kwargs),
@@ -68,7 +90,7 @@ class TestCallbackBrokerWorker(TransactionTestCase):
assert worker.buff == {InventoryUpdateEvent: [events[1]]} assert worker.buff == {InventoryUpdateEvent: [events[1]]}
def test_duplicate_key_not_saved_twice(self): def test_duplicate_key_not_saved_twice(self):
worker = CallbackBrokerWorker() worker = self.get_worker()
events = [InventoryUpdateEvent(uuid=str(uuid4()), **self.event_create_kwargs())] events = [InventoryUpdateEvent(uuid=str(uuid4()), **self.event_create_kwargs())]
worker.buff = {InventoryUpdateEvent: events.copy()} worker.buff = {InventoryUpdateEvent: events.copy()}
worker.flush() worker.flush()
@@ -82,7 +104,7 @@ class TestCallbackBrokerWorker(TransactionTestCase):
assert worker.buff.get(InventoryUpdateEvent, []) == [] assert worker.buff.get(InventoryUpdateEvent, []) == []
def test_give_up_on_bad_event(self): def test_give_up_on_bad_event(self):
worker = CallbackBrokerWorker() worker = self.get_worker()
events = [InventoryUpdateEvent(uuid=str(uuid4()), counter=-2, **self.event_create_kwargs())] events = [InventoryUpdateEvent(uuid=str(uuid4()), counter=-2, **self.event_create_kwargs())]
worker.buff = {InventoryUpdateEvent: events.copy()} worker.buff = {InventoryUpdateEvent: events.copy()}
@@ -95,7 +117,7 @@ class TestCallbackBrokerWorker(TransactionTestCase):
assert InventoryUpdateEvent.objects.filter(uuid=events[0].uuid).count() == 0 # sanity assert InventoryUpdateEvent.objects.filter(uuid=events[0].uuid).count() == 0 # sanity
def test_flush_with_empty_buffer(self): def test_flush_with_empty_buffer(self):
worker = CallbackBrokerWorker() worker = self.get_worker()
worker.buff = {InventoryUpdateEvent: []} worker.buff = {InventoryUpdateEvent: []}
with mock.patch.object(InventoryUpdateEvent.objects, 'bulk_create') as flush_mock: with mock.patch.object(InventoryUpdateEvent.objects, 'bulk_create') as flush_mock:
worker.flush() worker.flush()
@@ -105,7 +127,7 @@ class TestCallbackBrokerWorker(TransactionTestCase):
# In postgres, text fields reject NUL character, 0x00 # In postgres, text fields reject NUL character, 0x00
# tests use sqlite3 which will not raise an error # tests use sqlite3 which will not raise an error
# but we can still test that it is sanitized before saving # but we can still test that it is sanitized before saving
worker = CallbackBrokerWorker() worker = self.get_worker()
kwargs = self.event_create_kwargs() kwargs = self.event_create_kwargs()
events = [InventoryUpdateEvent(uuid=str(uuid4()), stdout="\x00", **kwargs)] events = [InventoryUpdateEvent(uuid=str(uuid4()), stdout="\x00", **kwargs)]
assert "\x00" in events[0].stdout # sanity assert "\x00" in events[0].stdout # sanity

View File

@@ -63,33 +63,6 @@ def swagger_autogen(requests=__SWAGGER_REQUESTS__):
return requests return requests
class FakeRedis:
def keys(self, *args, **kwargs):
return []
def set(self):
pass
def get(self):
return None
@classmethod
def from_url(cls, *args, **kwargs):
return cls()
def pipeline(self):
return self
def ping(self):
return
@pytest.fixture
def fake_redis():
with mock.patch('redis.Redis', new=FakeRedis): # turn off redis stuff
yield
@pytest.fixture @pytest.fixture
def user(): def user():
def u(name, is_superuser=False): def u(name, is_superuser=False):

View File

@@ -106,17 +106,6 @@ def test_compat_role_naming(setup_managed_roles, job_template, rando, alice):
assert rd.created_by is None assert rd.created_by is None
@pytest.mark.django_db
def test_organization_admin_has_audit(setup_managed_roles):
"""This formalizes a behavior change from old to new RBAC system
Previously, the auditor_role did not list admin_role as a parent
this made various queries hard to deal with, requiring adding 2 conditions
The new system should explicitly list the auditor permission in org admin role"""
rd = RoleDefinition.objects.get(name='Organization Admin')
assert 'audit_organization' in rd.permissions.values_list('codename', flat=True)
@pytest.mark.django_db @pytest.mark.django_db
def test_organization_level_permissions(organization, inventory, setup_managed_roles): def test_organization_level_permissions(organization, inventory, setup_managed_roles):
u1 = User.objects.create(username='alice') u1 = User.objects.create(username='alice')

View File

@@ -135,9 +135,8 @@ class TestEvents:
self._create_job_event(ok=dict((hostname, len(hostname)) for hostname in self.hostnames)) self._create_job_event(ok=dict((hostname, len(hostname)) for hostname in self.hostnames))
# Soft delete 6 of the 12 host metrics, every even host like "Host 2" or "Host 4" # Soft delete 6 host metrics
for host_name in self.hostnames[::2]: for hm in HostMetric.objects.filter(id__in=[1, 3, 5, 7, 9, 11]):
hm = HostMetric.objects.get(hostname=host_name.lower())
hm.soft_delete() hm.soft_delete()
assert len(HostMetric.objects.filter(Q(deleted=False) & Q(deleted_counter=0) & Q(last_deleted__isnull=True))) == 6 assert len(HostMetric.objects.filter(Q(deleted=False) & Q(deleted_counter=0) & Q(last_deleted__isnull=True))) == 6
@@ -166,9 +165,7 @@ class TestEvents:
skipped=dict((hostname, len(hostname)) for hostname in self.hostnames[10:12]), skipped=dict((hostname, len(hostname)) for hostname in self.hostnames[10:12]),
) )
assert len(HostMetric.objects.filter(Q(deleted=False) & Q(deleted_counter=0) & Q(last_deleted__isnull=True))) == 6 assert len(HostMetric.objects.filter(Q(deleted=False) & Q(deleted_counter=0) & Q(last_deleted__isnull=True))) == 6
assert len(HostMetric.objects.filter(Q(deleted=False) & Q(deleted_counter=1) & Q(last_deleted__isnull=False))) == 6
# one of those 6 hosts is dark, so will not be counted
assert len(HostMetric.objects.filter(Q(deleted=False) & Q(deleted_counter=1) & Q(last_deleted__isnull=False))) == 5
def _generate_hosts(self, cnt, id_from=0): def _generate_hosts(self, cnt, id_from=0):
self.hostnames = [f'Host {i}' for i in range(id_from, id_from + cnt)] self.hostnames = [f'Host {i}' for i in range(id_from, id_from + cnt)]

View File

@@ -3,10 +3,6 @@ import pytest
# AWX # AWX
from awx.main.ha import is_ha_environment from awx.main.ha import is_ha_environment
from awx.main.models.ha import Instance from awx.main.models.ha import Instance
from awx.main.dispatch.pool import get_auto_max_workers
# Django
from django.test.utils import override_settings
@pytest.mark.django_db @pytest.mark.django_db
@@ -21,25 +17,3 @@ def test_db_localhost():
Instance.objects.create(hostname='foo', node_type='hybrid') Instance.objects.create(hostname='foo', node_type='hybrid')
Instance.objects.create(hostname='bar', node_type='execution') Instance.objects.create(hostname='bar', node_type='execution')
assert is_ha_environment() is False assert is_ha_environment() is False
@pytest.mark.django_db
@pytest.mark.parametrize(
'settings',
[
dict(SYSTEM_TASK_ABS_MEM='16Gi', SYSTEM_TASK_ABS_CPU='24', SYSTEM_TASK_FORKS_MEM=400, SYSTEM_TASK_FORKS_CPU=4),
dict(SYSTEM_TASK_ABS_MEM='124Gi', SYSTEM_TASK_ABS_CPU='2', SYSTEM_TASK_FORKS_MEM=None, SYSTEM_TASK_FORKS_CPU=None),
],
ids=['cpu_dominated', 'memory_dominated'],
)
def test_dispatcher_max_workers_reserve(settings, fake_redis):
"""This tests that the dispatcher max_workers matches instance capacity
Assumes capacity_adjustment is 1,
plus reserve worker count
"""
with override_settings(**settings):
i = Instance.objects.create(hostname='test-1', node_type='hybrid')
i.local_health_check()
assert get_auto_max_workers() == i.capacity + 7, (i.cpu, i.memory, i.cpu_capacity, i.mem_capacity)

View File

@@ -1,7 +1,6 @@
import pytest import pytest
from awx.main.access import ( from awx.main.access import (
UnifiedJobAccess,
WorkflowJobTemplateAccess, WorkflowJobTemplateAccess,
WorkflowJobTemplateNodeAccess, WorkflowJobTemplateNodeAccess,
WorkflowJobAccess, WorkflowJobAccess,
@@ -246,30 +245,6 @@ class TestWorkflowJobAccess:
inventory.use_role.members.add(rando) inventory.use_role.members.add(rando)
assert WorkflowJobAccess(rando).can_start(workflow_job) assert WorkflowJobAccess(rando).can_start(workflow_job)
@pytest.mark.parametrize('org_role', ['admin_role', 'auditor_role'])
def test_workflow_job_org_audit_access(self, workflow_job_template, rando, org_role):
assert workflow_job_template.organization # sanity
workflow_job = workflow_job_template.create_unified_job()
assert workflow_job.organization # sanity
assert not UnifiedJobAccess(rando).can_read(workflow_job)
assert not WorkflowJobAccess(rando).can_read(workflow_job)
assert workflow_job not in WorkflowJobAccess(rando).filtered_queryset()
org = workflow_job.organization
role = getattr(org, org_role)
role.members.add(rando)
assert UnifiedJobAccess(rando).can_read(workflow_job)
assert WorkflowJobAccess(rando).can_read(workflow_job)
assert workflow_job in WorkflowJobAccess(rando).filtered_queryset()
# Organization-level permissions should persist after deleting the WFJT
workflow_job_template.delete()
assert UnifiedJobAccess(rando).can_read(workflow_job)
assert WorkflowJobAccess(rando).can_read(workflow_job)
assert workflow_job in WorkflowJobAccess(rando).filtered_queryset()
@pytest.mark.django_db @pytest.mark.django_db
class TestWFJTCopyAccess: class TestWFJTCopyAccess:

View File

@@ -1,5 +1,4 @@
import yaml import yaml
from functools import reduce
from unittest import mock from unittest import mock
import pytest import pytest
@@ -21,46 +20,6 @@ from awx.main.models.indirect_managed_node_audit import IndirectManagedNodeAudit
TEST_JQ = "{name: .name, canonical_facts: {host_name: .direct_host_name}, facts: {another_host_name: .direct_host_name}}" TEST_JQ = "{name: .name, canonical_facts: {host_name: .direct_host_name}, facts: {another_host_name: .direct_host_name}}"
class Query(dict):
def __init__(self, resolved_action: str, query_jq: dict):
self._resolved_action = resolved_action.split('.')
self._collection_ns, self._collection_name, self._module_name = self._resolved_action
super().__init__({self.resolve_key: {'query': query_jq}})
def get_fqcn(self):
return f'{self._collection_ns}.{self._collection_name}'
@property
def resolve_value(self):
return self[self.resolve_key]
@property
def resolve_key(self):
return f'{self.get_fqcn()}.{self._module_name}'
def resolve(self, module_name=None):
return {f'{self.get_fqcn()}.{module_name or self._module_name}': self.resolve_value}
def create_event_query(self, module_name=None):
if (module_name := module_name or self._module_name) == '*':
raise ValueError('Invalid module name *')
return self.create_event_queries([module_name])
def create_event_queries(self, module_names):
queries = {}
for name in module_names:
queries |= self.resolve(name)
return EventQuery.objects.create(
fqcn=self.get_fqcn(),
collection_version='1.0.1',
event_query=yaml.dump(queries, default_flow_style=False),
)
def create_registered_event(self, job, module_name):
job.job_events.create(event_data={'resolved_action': f'{self.get_fqcn()}.{module_name}', 'res': {'direct_host_name': 'foo_host', 'name': 'vm-foo'}})
@pytest.fixture @pytest.fixture
def bare_job(job_factory): def bare_job(job_factory):
job = job_factory() job = job_factory()
@@ -80,6 +39,11 @@ def job_with_counted_event(bare_job):
return bare_job return bare_job
def create_event_query(fqcn='demo.query'):
module_name = f'{fqcn}.example'
return EventQuery.objects.create(fqcn=fqcn, collection_version='1.0.1', event_query=yaml.dump({module_name: {'query': TEST_JQ}}, default_flow_style=False))
def create_audit_record(name, job, organization, created=now()): def create_audit_record(name, job, organization, created=now()):
record = IndirectManagedNodeAudit.objects.create(name=name, job=job, organization=organization) record = IndirectManagedNodeAudit.objects.create(name=name, job=job, organization=organization)
record.created = created record.created = created
@@ -90,7 +54,7 @@ def create_audit_record(name, job, organization, created=now()):
@pytest.fixture @pytest.fixture
def event_query(): def event_query():
"This is ordinarily created by the artifacts callback" "This is ordinarily created by the artifacts callback"
return Query('demo.query.example', TEST_JQ).create_event_query() return create_event_query()
@pytest.fixture @pytest.fixture
@@ -108,211 +72,105 @@ def new_audit_record(bare_job, organization):
@pytest.mark.django_db @pytest.mark.django_db
@pytest.mark.parametrize( def test_build_with_no_results(bare_job):
'queries,expected_matches', # never filled in events, should do nothing
( assert build_indirect_host_data(bare_job, {}) == []
pytest.param(
[],
0,
id='no_results',
),
pytest.param(
[Query('demo.query.example', TEST_JQ)],
1,
id='fully_qualified',
),
pytest.param(
[Query('demo.query.*', TEST_JQ)],
1,
id='wildcard',
),
pytest.param(
[
Query('demo.query.*', TEST_JQ),
Query('demo.query.example', TEST_JQ),
],
1,
id='wildcard_and_fully_qualified',
),
pytest.param(
[
Query('demo.query.*', TEST_JQ),
Query('demo.query.example', {}),
],
0,
id='wildcard_and_fully_qualified',
),
pytest.param(
[
Query('demo.query.example', {}),
Query('demo.query.*', TEST_JQ),
],
0,
id='ordering_should_not_matter',
),
),
)
def test_build_indirect_host_data(job_with_counted_event, queries: Query, expected_matches: int):
data = build_indirect_host_data(job_with_counted_event, {k: v for d in queries for k, v in d.items()})
assert len(data) == expected_matches
@mock.patch('awx.main.tasks.host_indirect.logger.debug')
@pytest.mark.django_db
@pytest.mark.parametrize(
'task_name',
(
pytest.param(
'demo.query',
id='no_results',
),
pytest.param(
'demo',
id='no_results',
),
pytest.param(
'a.b.c.d',
id='no_results',
),
),
)
def test_build_indirect_host_data_malformed_module_name(mock_logger_debug, bare_job, task_name: str):
create_registered_event(bare_job, task_name)
assert build_indirect_host_data(bare_job, Query('demo.query.example', TEST_JQ)) == []
mock_logger_debug.assert_called_once_with(f"Malformed invocation module name '{task_name}'. Expected to be of the form 'a.b.c'")
@mock.patch('awx.main.tasks.host_indirect.logger.info')
@pytest.mark.django_db
@pytest.mark.parametrize(
'query',
(
pytest.param(
'demo.query',
id='no_results',
),
pytest.param(
'demo',
id='no_results',
),
pytest.param(
'a.b.c.d',
id='no_results',
),
),
)
def test_build_indirect_host_data_malformed_query(mock_logger_info, job_with_counted_event, query: str):
assert build_indirect_host_data(job_with_counted_event, {query: {'query': TEST_JQ}}) == []
mock_logger_info.assert_called_once_with(f"Skiping malformed query '{query}'. Expected to be of the form 'a.b.c'")
@pytest.mark.django_db @pytest.mark.django_db
@pytest.mark.parametrize( def test_collect_an_event(job_with_counted_event):
'query', records = build_indirect_host_data(job_with_counted_event, {'demo.query.example': {'query': TEST_JQ}})
( assert len(records) == 1
pytest.param(
Query('demo.query.example', TEST_JQ),
id='fully_qualified',
),
pytest.param(
Query('demo.query.*', TEST_JQ),
id='wildcard',
),
),
)
def test_fetch_job_event_query(bare_job, query: Query):
query.create_event_query(module_name='example')
assert fetch_job_event_query(bare_job) == query.resolve('example')
@pytest.mark.django_db @pytest.mark.django_db
@pytest.mark.parametrize( def test_fetch_job_event_query(bare_job, event_query):
'queries', assert fetch_job_event_query(bare_job) == {'demo.query.example': {'query': TEST_JQ}}
(
[
Query('demo.query.example', TEST_JQ),
Query('demo2.query.example', TEST_JQ),
],
[
Query('demo.query.*', TEST_JQ),
Query('demo2.query.example', TEST_JQ),
],
),
)
def test_fetch_multiple_job_event_query(bare_job, queries: list[Query]):
for q in queries:
q.create_event_query(module_name='example')
assert fetch_job_event_query(bare_job) == reduce(lambda acc, q: acc | q.resolve('example'), queries, {})
@pytest.mark.django_db @pytest.mark.django_db
@pytest.mark.parametrize( def test_fetch_multiple_job_event_query(bare_job):
('state',), create_event_query(fqcn='demo.query')
( create_event_query(fqcn='demo2.query')
pytest.param( assert fetch_job_event_query(bare_job) == {'demo.query.example': {'query': TEST_JQ}, 'demo2.query.example': {'query': TEST_JQ}}
[
(
Query('demo.query.example', TEST_JQ), @pytest.mark.django_db
['example'], def test_save_indirect_host_entries(job_with_counted_event, event_query):
), assert job_with_counted_event.event_queries_processed is False
], save_indirect_host_entries(job_with_counted_event.id)
id='fully_qualified', job_with_counted_event.refresh_from_db()
), assert job_with_counted_event.event_queries_processed is True
pytest.param( assert IndirectManagedNodeAudit.objects.filter(job=job_with_counted_event).count() == 1
[ host_audit = IndirectManagedNodeAudit.objects.filter(job=job_with_counted_event).first()
( assert host_audit.count == 1
Query('demo.query.example', TEST_JQ), assert host_audit.canonical_facts == {'host_name': 'foo_host'}
['example'] * 3, assert host_audit.facts == {'another_host_name': 'foo_host'}
), assert host_audit.organization == job_with_counted_event.organization
], assert host_audit.name == 'vm-foo'
id='multiple_events_same_module_same_host',
),
pytest.param( @pytest.mark.django_db
[ def test_multiple_events_same_module_same_host(bare_job, event_query):
( "This tests that the count field gives correct answers"
Query('demo.query.example', TEST_JQ), create_registered_event(bare_job)
['example'], create_registered_event(bare_job)
), create_registered_event(bare_job)
(
Query('demo2.query.example', TEST_JQ),
['example'],
),
],
id='multiple_modules',
),
pytest.param(
[
(
Query('demo.query.*', TEST_JQ),
['example', 'example2'],
),
],
id='multiple_modules_same_collection',
),
),
)
def test_save_indirect_host_entries(bare_job, state):
all_task_names = []
for entry in state:
query, module_names = entry
all_task_names.extend([f'{query.get_fqcn()}.{module_name}' for module_name in module_names])
query.create_event_queries(module_names)
[query.create_registered_event(bare_job, n) for n in module_names]
save_indirect_host_entries(bare_job.id) save_indirect_host_entries(bare_job.id)
bare_job.refresh_from_db()
assert bare_job.event_queries_processed is True
assert IndirectManagedNodeAudit.objects.filter(job=bare_job).count() == 1 assert IndirectManagedNodeAudit.objects.filter(job=bare_job).count() == 1
host_audit = IndirectManagedNodeAudit.objects.filter(job=bare_job).first() host_audit = IndirectManagedNodeAudit.objects.filter(job=bare_job).first()
assert host_audit.count == len(all_task_names) assert host_audit.count == 3
assert host_audit.canonical_facts == {'host_name': 'foo_host'} assert host_audit.events == ['demo.query.example']
assert host_audit.facts == {'another_host_name': 'foo_host'}
assert host_audit.organization == bare_job.organization
assert host_audit.name == 'vm-foo' @pytest.mark.django_db
assert set(host_audit.events) == set(all_task_names) def test_multiple_registered_modules(bare_job):
"This tests that the events will list multiple modules if more than 1 module from different collections is registered and used"
create_registered_event(bare_job, task_name='demo.query.example')
create_registered_event(bare_job, task_name='demo2.query.example')
# These take the place of using the event_query fixture
create_event_query(fqcn='demo.query')
create_event_query(fqcn='demo2.query')
save_indirect_host_entries(bare_job.id)
assert IndirectManagedNodeAudit.objects.filter(job=bare_job).count() == 1
host_audit = IndirectManagedNodeAudit.objects.filter(job=bare_job).first()
assert host_audit.count == 2
assert set(host_audit.events) == {'demo.query.example', 'demo2.query.example'}
@pytest.mark.django_db
def test_multiple_registered_modules_same_collection(bare_job):
"This tests that the events will list multiple modules if more than 1 module in same collection is registered and used"
create_registered_event(bare_job, task_name='demo.query.example')
create_registered_event(bare_job, task_name='demo.query.example2')
# Takes place of event_query fixture, doing manually here
EventQuery.objects.create(
fqcn='demo.query',
collection_version='1.0.1',
event_query=yaml.dump(
{
'demo.query.example': {'query': TEST_JQ},
'demo.query.example2': {'query': TEST_JQ},
},
default_flow_style=False,
),
)
save_indirect_host_entries(bare_job.id)
assert IndirectManagedNodeAudit.objects.filter(job=bare_job).count() == 1
host_audit = IndirectManagedNodeAudit.objects.filter(job=bare_job).first()
assert host_audit.count == 2
assert set(host_audit.events) == {'demo.query.example', 'demo.query.example2'}
@pytest.mark.django_db @pytest.mark.django_db

View File

@@ -231,7 +231,7 @@ def test_inventory_update_injected_content(product_name, this_kind, inventory, f
len([True for k in content.keys() if k.endswith(inventory_filename)]) > 0 len([True for k in content.keys() if k.endswith(inventory_filename)]) > 0
), f"'{inventory_filename}' file not found in inventory update runtime files {content.keys()}" ), f"'{inventory_filename}' file not found in inventory update runtime files {content.keys()}"
env.pop('ANSIBLE_COLLECTIONS_PATH', None) env.pop('ANSIBLE_COLLECTIONS_PATHS', None) # collection paths not relevant to this test
base_dir = os.path.join(DATA, 'plugins') base_dir = os.path.join(DATA, 'plugins')
if not os.path.exists(base_dir): if not os.path.exists(base_dir):
os.mkdir(base_dir) os.mkdir(base_dir)

View File

@@ -129,7 +129,7 @@ def podman_image_generator():
@pytest.fixture @pytest.fixture
def run_job_from_playbook(default_org, demo_inv, post, admin): def run_job_from_playbook(default_org, demo_inv, post, admin):
def _rf(test_name, playbook, local_path=None, scm_url=None, jt_params=None): def _rf(test_name, playbook, local_path=None, scm_url=None):
project_name = f'{test_name} project' project_name = f'{test_name} project'
jt_name = f'{test_name} JT: {playbook}' jt_name = f'{test_name} JT: {playbook}'
@@ -166,13 +166,9 @@ def run_job_from_playbook(default_org, demo_inv, post, admin):
assert proj.get_project_path() assert proj.get_project_path()
assert playbook in proj.playbooks assert playbook in proj.playbooks
jt_data = {'name': jt_name, 'project': proj.id, 'playbook': playbook, 'inventory': demo_inv.id}
if jt_params:
jt_data.update(jt_params)
result = post( result = post(
reverse('api:job_template_list'), reverse('api:job_template_list'),
jt_data, {'name': jt_name, 'project': proj.id, 'playbook': playbook, 'inventory': demo_inv.id},
admin, admin,
expect=201, expect=201,
) )

View File

@@ -1,64 +0,0 @@
import pytest
from awx.main.tests.live.tests.conftest import wait_for_events
from awx.main.models import Job, Inventory
def assert_facts_populated(name):
job = Job.objects.filter(name__icontains=name).order_by('-created').first()
assert job is not None
wait_for_events(job)
inventory = job.inventory
assert inventory.hosts.count() > 0 # sanity
for host in inventory.hosts.all():
assert host.ansible_facts
@pytest.fixture
def general_facts_test(live_tmp_folder, run_job_from_playbook):
def _rf(slug, jt_params):
jt_params['use_fact_cache'] = True
standard_kwargs = dict(scm_url=f'file://{live_tmp_folder}/facts', jt_params=jt_params)
# GATHER FACTS
name = f'test_gather_ansible_facts_{slug}'
run_job_from_playbook(name, 'gather.yml', **standard_kwargs)
assert_facts_populated(name)
# KEEP FACTS
name = f'test_clear_ansible_facts_{slug}'
run_job_from_playbook(name, 'no_op.yml', **standard_kwargs)
assert_facts_populated(name)
# CLEAR FACTS
name = f'test_clear_ansible_facts_{slug}'
run_job_from_playbook(name, 'clear.yml', **standard_kwargs)
job = Job.objects.filter(name__icontains=name).order_by('-created').first()
assert job is not None
wait_for_events(job)
inventory = job.inventory
assert inventory.hosts.count() > 0 # sanity
for host in inventory.hosts.all():
assert not host.ansible_facts
return _rf
def test_basic_ansible_facts(general_facts_test):
general_facts_test('basic', {})
@pytest.fixture
def sliced_inventory():
inv, _ = Inventory.objects.get_or_create(name='inventory-to-slice')
if not inv.hosts.exists():
for i in range(10):
inv.hosts.create(name=f'sliced_host_{i}')
return inv
def test_slicing_with_facts(general_facts_test, sliced_inventory):
general_facts_test('sliced', {'job_slice_count': 3, 'inventory': sliced_inventory.id})

View File

@@ -1,79 +0,0 @@
import multiprocessing
import random
from django.db import connection
from django.utils.timezone import now
from awx.main.models import Inventory, Host
def worker_delete_target(ready_event, continue_event, field_name):
"""Runs the bulk update, will be called in duplicate, in parallel"""
inv = Inventory.objects.get(organization__name='Default', name='test_host_update_contention')
host_list = list(inv.hosts.all())
random.shuffle(host_list)
for i, host in enumerate(host_list):
setattr(host, field_name, f'my_var: {i}')
# ready to do the bulk_update
print('worker has loaded all the hosts needed')
ready_event.set()
# wait for the coordination message
continue_event.wait()
# # presumed fix
# host_list = sorted(host_list, key=lambda host: host.id)
# NOTE: did not reproduce the bug without batch_size
Host.objects.bulk_update(host_list, [field_name], batch_size=100)
print('finished doing the bulk update in worker')
def test_host_update_contention(default_org):
inv_kwargs = dict(organization=default_org, name='test_host_update_contention')
if Inventory.objects.filter(**inv_kwargs).exists():
inv = Inventory.objects.get(**inv_kwargs).delete()
inv = Inventory.objects.create(**inv_kwargs)
right_now = now()
hosts = [Host(inventory=inv, name=f'host-{i}', created=right_now, modified=right_now) for i in range(1000)]
print('bulk creating hosts')
Host.objects.bulk_create(hosts)
# sanity check
for host in hosts:
assert not host.variables
# Force our worker pool to make their own connection
connection.close()
ready_events = [multiprocessing.Event() for _ in range(2)]
continue_event = multiprocessing.Event()
print('spawning processes for concurrent bulk updates')
processes = []
fields = ['variables', 'ansible_facts']
for i in range(2):
p = multiprocessing.Process(target=worker_delete_target, args=(ready_events[i], continue_event, fields[i]))
processes.append(p)
p.start()
# Assure both processes are connected and have loaded their host list
for e in ready_events:
print('waiting on subprocess ready event')
e.wait()
# Begin the bulk_update queries
print('setting the continue event for the workers')
continue_event.set()
# if a Deadloack happens it will probably be surfaced by result here
print('waiting on the workers to finish the bulk_update')
for p in processes:
p.join()
print('checking workers have variables set')
for host in inv.hosts.all():
assert host.variables.startswith('my_var:')
assert host.ansible_facts.startswith('my_var:')

View File

@@ -49,15 +49,22 @@ def test_indirect_host_counting(live_tmp_folder, run_job_from_playbook):
# Task might not run due to race condition, so make it run here # Task might not run due to race condition, so make it run here
job.refresh_from_db() job.refresh_from_db()
if job.event_queries_processed is False: if job.event_queries_processed is False:
save_indirect_host_entries.delay(job.id, wait_for_events=False) for _ in range(10):
save_indirect_host_entries.delay(job.id, wait_for_events=True)
job.refresh_from_db()
if job.event_queries_processed is True:
break
time.sleep(0.5)
else:
raise RuntimeError(f'Job events not received for job_id={job.id}')
# event_queries_processed only assures the task has started, it might take a minor amount of time to finish # This will poll for the background task to finish
for _ in range(10): for _ in range(10):
if IndirectManagedNodeAudit.objects.filter(job=job).exists(): if IndirectManagedNodeAudit.objects.filter(job=job).exists():
break break
time.sleep(0.2) time.sleep(0.2)
else: else:
raise RuntimeError(f'No IndirectManagedNodeAudit records ever populated for job_id={job.id}') raise RuntimeError(f'No IndirectManagedNodeAudit records ever populated for job_id={job.id}')
assert IndirectManagedNodeAudit.objects.filter(job=job).count() == 1 assert IndirectManagedNodeAudit.objects.filter(job=job).count() == 1
host_audit = IndirectManagedNodeAudit.objects.filter(job=job).first() host_audit = IndirectManagedNodeAudit.objects.filter(job=job).first()

View File

@@ -34,7 +34,7 @@ def hosts(ref_time):
def test_start_job_fact_cache(hosts, tmpdir): def test_start_job_fact_cache(hosts, tmpdir):
fact_cache = os.path.join(tmpdir, 'facts') fact_cache = os.path.join(tmpdir, 'facts')
last_modified, _ = start_fact_cache(hosts, fact_cache, timeout=0) last_modified = start_fact_cache(hosts, fact_cache, timeout=0)
for host in hosts: for host in hosts:
filepath = os.path.join(fact_cache, host.name) filepath = os.path.join(fact_cache, host.name)
@@ -61,7 +61,7 @@ def test_fact_cache_with_invalid_path_traversal(tmpdir):
def test_start_job_fact_cache_past_timeout(hosts, tmpdir): def test_start_job_fact_cache_past_timeout(hosts, tmpdir):
fact_cache = os.path.join(tmpdir, 'facts') fact_cache = os.path.join(tmpdir, 'facts')
# the hosts fixture was modified 5s ago, which is more than 2s # the hosts fixture was modified 5s ago, which is more than 2s
last_modified, _ = start_fact_cache(hosts, fact_cache, timeout=2) last_modified = start_fact_cache(hosts, fact_cache, timeout=2)
assert last_modified is None assert last_modified is None
for host in hosts: for host in hosts:
@@ -71,7 +71,7 @@ def test_start_job_fact_cache_past_timeout(hosts, tmpdir):
def test_start_job_fact_cache_within_timeout(hosts, tmpdir): def test_start_job_fact_cache_within_timeout(hosts, tmpdir):
fact_cache = os.path.join(tmpdir, 'facts') fact_cache = os.path.join(tmpdir, 'facts')
# the hosts fixture was modified 5s ago, which is less than 7s # the hosts fixture was modified 5s ago, which is less than 7s
last_modified, _ = start_fact_cache(hosts, fact_cache, timeout=7) last_modified = start_fact_cache(hosts, fact_cache, timeout=7)
assert last_modified assert last_modified
for host in hosts: for host in hosts:
@@ -80,7 +80,7 @@ def test_start_job_fact_cache_within_timeout(hosts, tmpdir):
def test_finish_job_fact_cache_with_existing_data(hosts, mocker, tmpdir, ref_time): def test_finish_job_fact_cache_with_existing_data(hosts, mocker, tmpdir, ref_time):
fact_cache = os.path.join(tmpdir, 'facts') fact_cache = os.path.join(tmpdir, 'facts')
last_modified, _ = start_fact_cache(hosts, fact_cache, timeout=0) last_modified = start_fact_cache(hosts, fact_cache, timeout=0)
bulk_update = mocker.patch('django.db.models.query.QuerySet.bulk_update') bulk_update = mocker.patch('django.db.models.query.QuerySet.bulk_update')
@@ -103,12 +103,12 @@ def test_finish_job_fact_cache_with_existing_data(hosts, mocker, tmpdir, ref_tim
assert host.ansible_facts_modified == ref_time assert host.ansible_facts_modified == ref_time
assert hosts[1].ansible_facts == ansible_facts_new assert hosts[1].ansible_facts == ansible_facts_new
assert hosts[1].ansible_facts_modified > ref_time assert hosts[1].ansible_facts_modified > ref_time
bulk_update.assert_called_once_with([hosts[1]], ['ansible_facts', 'ansible_facts_modified'], batch_size=100) bulk_update.assert_called_once_with([hosts[1]], ['ansible_facts', 'ansible_facts_modified'])
def test_finish_job_fact_cache_with_bad_data(hosts, mocker, tmpdir): def test_finish_job_fact_cache_with_bad_data(hosts, mocker, tmpdir):
fact_cache = os.path.join(tmpdir, 'facts') fact_cache = os.path.join(tmpdir, 'facts')
last_modified, _ = start_fact_cache(hosts, fact_cache, timeout=0) last_modified = start_fact_cache(hosts, fact_cache, timeout=0)
bulk_update = mocker.patch('django.db.models.query.QuerySet.bulk_update') bulk_update = mocker.patch('django.db.models.query.QuerySet.bulk_update')
@@ -127,7 +127,7 @@ def test_finish_job_fact_cache_with_bad_data(hosts, mocker, tmpdir):
def test_finish_job_fact_cache_clear(hosts, mocker, ref_time, tmpdir): def test_finish_job_fact_cache_clear(hosts, mocker, ref_time, tmpdir):
fact_cache = os.path.join(tmpdir, 'facts') fact_cache = os.path.join(tmpdir, 'facts')
last_modified, _ = start_fact_cache(hosts, fact_cache, timeout=0) last_modified = start_fact_cache(hosts, fact_cache, timeout=0)
bulk_update = mocker.patch('django.db.models.query.QuerySet.bulk_update') bulk_update = mocker.patch('django.db.models.query.QuerySet.bulk_update')
@@ -139,4 +139,4 @@ def test_finish_job_fact_cache_clear(hosts, mocker, ref_time, tmpdir):
assert host.ansible_facts_modified == ref_time assert host.ansible_facts_modified == ref_time
assert hosts[1].ansible_facts == {} assert hosts[1].ansible_facts == {}
assert hosts[1].ansible_facts_modified > ref_time assert hosts[1].ansible_facts_modified > ref_time
bulk_update.assert_called_once_with([hosts[1]], ['ansible_facts', 'ansible_facts_modified'], batch_size=100) bulk_update.assert_called_once_with([hosts[1]], ['ansible_facts', 'ansible_facts_modified'])

View File

@@ -1,162 +0,0 @@
# -*- coding: utf-8 -*-
import os
import tempfile
import shutil
import pytest
from unittest import mock
from awx.main.models import (
Inventory,
Host,
)
from django.utils.timezone import now
from django.db.models.query import QuerySet
from awx.main.models import (
Job,
Organization,
Project,
)
from awx.main.tasks import jobs
@pytest.fixture
def private_data_dir():
private_data = tempfile.mkdtemp(prefix='awx_')
for subfolder in ('inventory', 'env'):
runner_subfolder = os.path.join(private_data, subfolder)
os.makedirs(runner_subfolder, exist_ok=True)
yield private_data
shutil.rmtree(private_data, True)
@mock.patch('awx.main.tasks.facts.update_hosts')
@mock.patch('awx.main.tasks.facts.settings')
@mock.patch('awx.main.tasks.jobs.create_partition', return_value=True)
def test_pre_post_run_hook_facts(mock_create_partition, mock_facts_settings, update_hosts, private_data_dir, execution_environment):
# creates inventory_object with two hosts
inventory = Inventory(pk=1)
mock_inventory = mock.MagicMock(spec=Inventory, wraps=inventory)
mock_inventory._state = mock.MagicMock()
qs_hosts = QuerySet()
hosts = [
Host(id=1, name='host1', ansible_facts={"a": 1, "b": 2}, ansible_facts_modified=now(), inventory=mock_inventory),
Host(id=2, name='host2', ansible_facts={"a": 1, "b": 2}, ansible_facts_modified=now(), inventory=mock_inventory),
]
qs_hosts._result_cache = hosts
qs_hosts.only = mock.MagicMock(return_value=hosts)
mock_inventory.hosts = qs_hosts
assert mock_inventory.hosts.count() == 2
# creates job object with fact_cache enabled
org = Organization(pk=1)
proj = Project(pk=1, organization=org)
job = mock.MagicMock(spec=Job, use_fact_cache=True, project=proj, organization=org, job_slice_number=1, job_slice_count=1)
job.inventory = mock_inventory
job.execution_environment = execution_environment
job.get_hosts_for_fact_cache = Job.get_hosts_for_fact_cache.__get__(job) # to run original method
job.job_env.get = mock.MagicMock(return_value=private_data_dir)
# creates the task object with job object as instance
mock_facts_settings.ANSIBLE_FACT_CACHE_TIMEOUT = False # defines timeout to false
task = jobs.RunJob()
task.instance = job
task.update_model = mock.Mock(return_value=job)
task.model.objects.get = mock.Mock(return_value=job)
# run pre_run_hook
task.facts_write_time = task.pre_run_hook(job, private_data_dir)
# updates inventory with one more host
hosts.append(Host(id=3, name='host3', ansible_facts={"added": True}, ansible_facts_modified=now(), inventory=mock_inventory))
assert mock_inventory.hosts.count() == 3
# run post_run_hook
task.runner_callback.artifacts_processed = mock.MagicMock(return_value=True)
task.post_run_hook(job, "success")
assert mock_inventory.hosts[2].ansible_facts == {"added": True}
@mock.patch('awx.main.tasks.facts.update_hosts')
@mock.patch('awx.main.tasks.facts.settings')
@mock.patch('awx.main.tasks.jobs.create_partition', return_value=True)
def test_pre_post_run_hook_facts_deleted_sliced(mock_create_partition, mock_facts_settings, update_hosts, private_data_dir, execution_environment):
# creates inventory_object with two hosts
inventory = Inventory(pk=1)
mock_inventory = mock.MagicMock(spec=Inventory, wraps=inventory)
mock_inventory._state = mock.MagicMock()
qs_hosts = QuerySet()
hosts = [Host(id=num, name=f'host{num}', ansible_facts={"a": 1, "b": 2}, ansible_facts_modified=now(), inventory=mock_inventory) for num in range(999)]
qs_hosts._result_cache = hosts
qs_hosts.only = mock.MagicMock(return_value=hosts)
mock_inventory.hosts = qs_hosts
assert mock_inventory.hosts.count() == 999
# creates job object with fact_cache enabled
org = Organization(pk=1)
proj = Project(pk=1, organization=org)
job = mock.MagicMock(spec=Job, use_fact_cache=True, project=proj, organization=org, job_slice_number=1, job_slice_count=3)
job.inventory = mock_inventory
job.execution_environment = execution_environment
job.get_hosts_for_fact_cache = Job.get_hosts_for_fact_cache.__get__(job) # to run original method
job.job_env.get = mock.MagicMock(return_value=private_data_dir)
# creates the task object with job object as instance
mock_facts_settings.ANSIBLE_FACT_CACHE_TIMEOUT = False
task = jobs.RunJob()
task.instance = job
task.update_model = mock.Mock(return_value=job)
task.model.objects.get = mock.Mock(return_value=job)
# run pre_run_hook
task.facts_write_time = task.pre_run_hook(job, private_data_dir)
hosts.pop(1)
assert mock_inventory.hosts.count() == 998
# run post_run_hook
task.runner_callback.artifacts_processed = mock.MagicMock(return_value=True)
task.post_run_hook(job, "success")
for host in hosts:
assert host.ansible_facts == {"a": 1, "b": 2}
failures = []
for host in hosts:
try:
assert host.ansible_facts == {"a": 1, "b": 2, "unexpected_key": "bad"}
except AssertionError:
failures.append("Host named {} has facts {}".format(host.name, host.ansible_facts))
assert len(failures) > 0, f"Failures occurred for the following hosts: {failures}"
@mock.patch('awx.main.tasks.facts.update_hosts')
@mock.patch('awx.main.tasks.facts.settings')
def test_invalid_host_facts(mock_facts_settings, update_hosts, private_data_dir, execution_environment):
inventory = Inventory(pk=1)
mock_inventory = mock.MagicMock(spec=Inventory, wraps=inventory)
mock_inventory._state = mock.MagicMock()
hosts = [
Host(id=0, name='host0', ansible_facts={"a": 1, "b": 2}, ansible_facts_modified=now(), inventory=mock_inventory),
Host(id=1, name='host1', ansible_facts={"a": 1, "b": 2, "unexpected_key": "bad"}, ansible_facts_modified=now(), inventory=mock_inventory),
]
mock_inventory.hosts = hosts
failures = []
for host in mock_inventory.hosts:
assert "a" in host.ansible_facts
if "unexpected_key" in host.ansible_facts:
failures.append(host.name)
mock_facts_settings.SOME_SETTING = True
update_hosts(mock_inventory.hosts)
with pytest.raises(pytest.fail.Exception):
if failures:
pytest.fail(f" {len(failures)} facts cleared failures : {','.join(failures)}")

View File

@@ -1,3 +1,6 @@
from split_settings.tools import include
LOCAL_SETTINGS = ( LOCAL_SETTINGS = (
'ALLOWED_HOSTS', 'ALLOWED_HOSTS',
'BROADCAST_WEBSOCKET_PORT', 'BROADCAST_WEBSOCKET_PORT',
@@ -13,14 +16,13 @@ LOCAL_SETTINGS = (
def test_postprocess_auth_basic_enabled(): def test_postprocess_auth_basic_enabled():
"""The final loaded settings should have basic auth enabled.""" locals().update({'__file__': __file__})
from awx.settings import REST_FRAMEWORK
assert 'awx.api.authentication.LoggedBasicAuthentication' in REST_FRAMEWORK['DEFAULT_AUTHENTICATION_CLASSES'] include('../../../settings/defaults.py', scope=locals())
assert 'awx.api.authentication.LoggedBasicAuthentication' in locals()['REST_FRAMEWORK']['DEFAULT_AUTHENTICATION_CLASSES']
def test_default_settings(): def test_default_settings():
"""Ensure that all default settings are present in the snapshot."""
from django.conf import settings from django.conf import settings
for k in dir(settings): for k in dir(settings):
@@ -29,43 +31,3 @@ def test_default_settings():
default_val = getattr(settings.default_settings, k, None) default_val = getattr(settings.default_settings, k, None)
snapshot_val = settings.DEFAULTS_SNAPSHOT[k] snapshot_val = settings.DEFAULTS_SNAPSHOT[k]
assert default_val == snapshot_val, f'Setting for {k} does not match shapshot:\nsnapshot: {snapshot_val}\ndefault: {default_val}' assert default_val == snapshot_val, f'Setting for {k} does not match shapshot:\nsnapshot: {snapshot_val}\ndefault: {default_val}'
def test_django_conf_settings_is_awx_settings():
"""Ensure that the settings loaded from dynaconf are the same as the settings delivered to django."""
from django.conf import settings
from awx.settings import REST_FRAMEWORK
assert settings.REST_FRAMEWORK == REST_FRAMEWORK
def test_dynaconf_is_awx_settings():
"""Ensure that the settings loaded from dynaconf are the same as the settings delivered to django."""
from django.conf import settings
from awx.settings import REST_FRAMEWORK
assert settings.DYNACONF.REST_FRAMEWORK == REST_FRAMEWORK
def test_development_settings_can_be_directly_imported(monkeypatch):
"""Ensure that the development settings can be directly imported."""
monkeypatch.setenv('AWX_MODE', 'development')
from django.conf import settings
from awx.settings.development import REST_FRAMEWORK
from awx.settings.development import DEBUG # actually set on defaults.py and not overridden in development.py
assert settings.REST_FRAMEWORK == REST_FRAMEWORK
assert DEBUG is True
def test_merge_application_name():
"""Ensure that the merge_application_name function works as expected."""
from awx.settings.functions import merge_application_name
settings = {
"DATABASES__default__ENGINE": "django.db.backends.postgresql",
"CLUSTER_HOST_ID": "test-cluster-host-id",
}
result = merge_application_name(settings)["DATABASES__default__OPTIONS__application_name"]
assert result.startswith("awx-")
assert "test-cluster" in result

View File

@@ -10,7 +10,7 @@ from typing import Optional, Any
import requests import requests
DEFAULT_OIDC_TOKEN_ENDPOINT = 'https://sso.redhat.com/auth/realms/redhat-external/protocol/openid-connect/token' DEFAULT_OIDC_ENDPOINT = 'https://sso.redhat.com/auth/realms/redhat-external/protocol/openid-connect/token'
class TokenError(requests.RequestException): class TokenError(requests.RequestException):

View File

@@ -4,7 +4,6 @@
# Python # Python
import base64 import base64
import logging import logging
import logging.handlers
import sys import sys
import traceback import traceback
import os import os
@@ -28,9 +27,6 @@ from opentelemetry.sdk._logs.export import BatchLogRecordProcessor
from opentelemetry.sdk.resources import Resource from opentelemetry.sdk.resources import Resource
__all__ = ['RSysLogHandler', 'SpecialInventoryHandler', 'ColorHandler']
class RSysLogHandler(logging.handlers.SysLogHandler): class RSysLogHandler(logging.handlers.SysLogHandler):
append_nul = False append_nul = False
@@ -113,35 +109,39 @@ class SpecialInventoryHandler(logging.Handler):
if settings.COLOR_LOGS is True: if settings.COLOR_LOGS is True:
from logutils.colorize import ColorizingStreamHandler try:
import colorama from logutils.colorize import ColorizingStreamHandler
import colorama
colorama.deinit() colorama.deinit()
colorama.init(wrap=False, convert=False, strip=False) colorama.init(wrap=False, convert=False, strip=False)
class ColorHandler(ColorizingStreamHandler): class ColorHandler(ColorizingStreamHandler):
def colorize(self, line, record): def colorize(self, line, record):
# comment out this method if you don't like the job_lifecycle # comment out this method if you don't like the job_lifecycle
# logs rendered with cyan text # logs rendered with cyan text
previous_level_map = self.level_map.copy() previous_level_map = self.level_map.copy()
if record.name == "awx.analytics.job_lifecycle": if record.name == "awx.analytics.job_lifecycle":
self.level_map[logging.INFO] = (None, 'cyan', True) self.level_map[logging.INFO] = (None, 'cyan', True)
msg = super(ColorHandler, self).colorize(line, record) msg = super(ColorHandler, self).colorize(line, record)
self.level_map = previous_level_map self.level_map = previous_level_map
return msg return msg
def format(self, record): def format(self, record):
message = logging.StreamHandler.format(self, record) message = logging.StreamHandler.format(self, record)
return '\n'.join([self.colorize(line, record) for line in message.splitlines()]) return '\n'.join([self.colorize(line, record) for line in message.splitlines()])
level_map = { level_map = {
logging.DEBUG: (None, 'green', True), logging.DEBUG: (None, 'green', True),
logging.INFO: (None, None, True), logging.INFO: (None, None, True),
logging.WARNING: (None, 'yellow', True), logging.WARNING: (None, 'yellow', True),
logging.ERROR: (None, 'red', True), logging.ERROR: (None, 'red', True),
logging.CRITICAL: (None, 'red', True), logging.CRITICAL: (None, 'red', True),
} }
except ImportError:
# logutils is only used for colored logs in the dev environment
pass
else: else:
ColorHandler = logging.StreamHandler ColorHandler = logging.StreamHandler

View File

@@ -201,7 +201,7 @@
# additional_galaxy_env contains environment variables are used for installing roles and collections and will take precedence over items in galaxy_task_env # additional_galaxy_env contains environment variables are used for installing roles and collections and will take precedence over items in galaxy_task_env
additional_galaxy_env: additional_galaxy_env:
# These paths control where ansible-galaxy installs collections and roles on top the filesystem # These paths control where ansible-galaxy installs collections and roles on top the filesystem
ANSIBLE_COLLECTIONS_PATH: "{{ projects_root }}/.__awx_cache/{{ local_path }}/stage/requirements_collections" ANSIBLE_COLLECTIONS_PATHS: "{{ projects_root }}/.__awx_cache/{{ local_path }}/stage/requirements_collections"
ANSIBLE_ROLES_PATH: "{{ projects_root }}/.__awx_cache/{{ local_path }}/stage/requirements_roles" ANSIBLE_ROLES_PATH: "{{ projects_root }}/.__awx_cache/{{ local_path }}/stage/requirements_roles"
# Put the local tmp directory in same volume as collection destination # Put the local tmp directory in same volume as collection destination
# otherwise, files cannot be moved accross volumes and will cause error # otherwise, files cannot be moved accross volumes and will cause error

View File

@@ -1,82 +1,2 @@
# Copyright (c) 2015 Ansible, Inc. # Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved. # All Rights Reserved.
import os
import copy
from ansible_base.lib.dynamic_config import (
factory,
export,
load_envvars,
load_python_file_with_injected_context,
load_standard_settings_files,
toggle_feature_flags,
)
from .functions import (
assert_production_settings,
merge_application_name,
add_backwards_compatibility,
load_extra_development_files,
)
add_backwards_compatibility()
# Create a the standard DYNACONF instance which will come with DAB defaults
# This loads defaults.py and environment specific file e.g: development_defaults.py
DYNACONF = factory(
__name__,
"AWX",
environments=("development", "production", "quiet", "kube"),
settings_files=["defaults.py"],
)
# Store snapshot before loading any custom config file
DYNACONF.set(
"DEFAULTS_SNAPSHOT",
copy.deepcopy(DYNACONF.as_dict(internal=False)),
loader_identifier="awx.settings:DEFAULTS_SNAPSHOT",
)
#############################################################################################
# Settings loaded before this point will be allowed to be overridden by the database settings
# Any settings loaded after this point will be marked as as a read_only database setting
#############################################################################################
# Load extra settings files from the following directories
# /etc/tower/conf.d/ and /etc/tower/
# this is the legacy location, kept for backwards compatibility
settings_dir = os.environ.get('AWX_SETTINGS_DIR', '/etc/tower/conf.d/')
settings_files_path = os.path.join(settings_dir, '*.py')
settings_file_path = os.environ.get('AWX_SETTINGS_FILE', '/etc/tower/settings.py')
load_python_file_with_injected_context(settings_files_path, settings=DYNACONF)
load_python_file_with_injected_context(settings_file_path, settings=DYNACONF)
# Load extra settings files from the following directories
# /etc/ansible-automation-platform/{settings,flags,.secrets}.yaml
# and /etc/ansible-automation-platform/awx/{settings,flags,.secrets}.yaml
# this is the new standard location for all services
load_standard_settings_files(DYNACONF)
# Load optional development only settings files
load_extra_development_files(DYNACONF)
# Check at least one setting file has been loaded in production mode
assert_production_settings(DYNACONF, settings_dir, settings_file_path)
# Load envvars at the end to allow them to override everything loaded so far
load_envvars(DYNACONF)
# This must run after all custom settings are loaded
DYNACONF.update(
merge_application_name(DYNACONF),
loader_identifier="awx.settings:merge_application_name",
merge=True,
)
# Toggle feature flags based on installer settings
DYNACONF.update(
toggle_feature_flags(DYNACONF),
loader_identifier="awx.settings:toggle_feature_flags",
merge=True,
)
# Update django.conf.settings with DYNACONF values
export(__name__, DYNACONF)

View File

@@ -25,7 +25,6 @@ def get_application_name(CLUSTER_HOST_ID, function=''):
def set_application_name(DATABASES, CLUSTER_HOST_ID, function=''): def set_application_name(DATABASES, CLUSTER_HOST_ID, function=''):
"""In place modification of DATABASES to set the application name for the connection."""
# If settings files were not properly passed DATABASES could be {} at which point we don't need to set the app name. # If settings files were not properly passed DATABASES could be {} at which point we don't need to set the app name.
if not DATABASES or 'default' not in DATABASES: if not DATABASES or 'default' not in DATABASES:
return return

View File

@@ -9,6 +9,9 @@ import tempfile
import socket import socket
from datetime import timedelta from datetime import timedelta
from split_settings.tools import include
DEBUG = True DEBUG = True
SQL_DEBUG = DEBUG SQL_DEBUG = DEBUG
@@ -80,6 +83,10 @@ LANGUAGE_CODE = 'en-us'
# to load the internationalization machinery. # to load the internationalization machinery.
USE_I18N = True USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
USE_TZ = True USE_TZ = True
STATICFILES_DIRS = [ STATICFILES_DIRS = [
@@ -1008,15 +1015,16 @@ METRICS_SUBSYSTEM_CONFIG = {
} }
} }
# django-ansible-base # django-ansible-base
ANSIBLE_BASE_TEAM_MODEL = 'main.Team' ANSIBLE_BASE_TEAM_MODEL = 'main.Team'
ANSIBLE_BASE_ORGANIZATION_MODEL = 'main.Organization' ANSIBLE_BASE_ORGANIZATION_MODEL = 'main.Organization'
ANSIBLE_BASE_RESOURCE_CONFIG_MODULE = 'awx.resource_api' ANSIBLE_BASE_RESOURCE_CONFIG_MODULE = 'awx.resource_api'
ANSIBLE_BASE_PERMISSION_MODEL = 'main.Permission' ANSIBLE_BASE_PERMISSION_MODEL = 'main.Permission'
# Defaults to be overridden by DAB from ansible_base.lib import dynamic_config # noqa: E402
SPECTACULAR_SETTINGS = {}
OAUTH2_PROVIDER = {} include(os.path.join(os.path.dirname(dynamic_config.__file__), 'dynamic_settings.py'))
# Add a postfix to the API URL patterns # Add a postfix to the API URL patterns
# example if set to '' API pattern will be /api # example if set to '' API pattern will be /api

View File

@@ -1,13 +1,129 @@
# This file exists for backwards compatibility only # Copyright (c) 2015 Ansible, Inc.
# the current way of running AWX is to point settings to # All Rights Reserved.
# awx/settings/__init__.py as the entry point for the settings
# that is done by exporting: export DJANGO_SETTINGS_MODULE=awx.settings # Development settings for AWX project.
# Python
import os import os
import socket
import copy
import sys
import traceback
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "awx.settings") # Centos-7 doesn't include the svg mime type
os.environ.setdefault("AWX_MODE", "development") # /usr/lib64/python/mimetypes.py
import mimetypes
from ansible_base.lib.dynamic_config import export # Django Split Settings
from . import DYNACONF # noqa from split_settings.tools import optional, include
export(__name__, DYNACONF) # Load default settings.
from .defaults import * # NOQA
# awx-manage shell_plus --notebook
NOTEBOOK_ARGUMENTS = ['--NotebookApp.token=', '--ip', '0.0.0.0', '--port', '9888', '--allow-root', '--no-browser']
# print SQL queries in shell_plus
SHELL_PLUS_PRINT_SQL = False
# show colored logs in the dev environment
# to disable this, set `COLOR_LOGS = False` in awx/settings/local_settings.py
COLOR_LOGS = True
LOGGING['handlers']['console']['()'] = 'awx.main.utils.handlers.ColorHandler' # noqa
ALLOWED_HOSTS = ['*']
mimetypes.add_type("image/svg+xml", ".svg", True)
mimetypes.add_type("image/svg+xml", ".svgz", True)
# Disallow sending session cookies over insecure connections
SESSION_COOKIE_SECURE = False
# Disallow sending csrf cookies over insecure connections
CSRF_COOKIE_SECURE = False
# Disable Pendo on the UI for development/test.
# Note: This setting may be overridden by database settings.
PENDO_TRACKING_STATE = "off"
INSIGHTS_TRACKING_STATE = False
# debug toolbar and swagger assume that requirements/requirements_dev.txt are installed
INSTALLED_APPS += ['drf_yasg', 'debug_toolbar'] # NOQA
MIDDLEWARE = ['debug_toolbar.middleware.DebugToolbarMiddleware'] + MIDDLEWARE # NOQA
DEBUG_TOOLBAR_CONFIG = {'ENABLE_STACKTRACES': True}
# Configure a default UUID for development only.
SYSTEM_UUID = '00000000-0000-0000-0000-000000000000'
INSTALL_UUID = '00000000-0000-0000-0000-000000000000'
# Ansible base virtualenv paths and enablement
# only used for deprecated fields and management commands for them
BASE_VENV_PATH = os.path.realpath("/var/lib/awx/venv")
CLUSTER_HOST_ID = socket.gethostname()
AWX_CALLBACK_PROFILE = True
# this modifies FLAGS set by defaults
FLAGS['FEATURE_INDIRECT_NODE_COUNTING_ENABLED'] = [{'condition': 'boolean', 'value': True}] # noqa
# ======================!!!!!!! FOR DEVELOPMENT ONLY !!!!!!!=================================
# Disable normal scheduled/triggered task managers (DependencyManager, TaskManager, WorkflowManager).
# Allows user to trigger task managers directly for debugging and profiling purposes.
# Only works in combination with settings.SETTINGS_MODULE == 'awx.settings.development'
AWX_DISABLE_TASK_MANAGERS = False
# Needed for launching runserver in debug mode
# ======================!!!!!!! FOR DEVELOPMENT ONLY !!!!!!!=================================
# Store a snapshot of default settings at this point before loading any
# customizable config files.
this_module = sys.modules[__name__]
local_vars = dir(this_module)
DEFAULTS_SNAPSHOT = {} # define after we save local_vars so we do not snapshot the snapshot
for setting in local_vars:
if setting.isupper():
DEFAULTS_SNAPSHOT[setting] = copy.deepcopy(getattr(this_module, setting))
del local_vars # avoid temporary variables from showing up in dir(settings)
del this_module
#
###############################################################################################
#
# Any settings defined after this point will be marked as as a read_only database setting
#
################################################################################################
# If there is an `/etc/tower/settings.py`, include it.
# If there is a `/etc/tower/conf.d/*.py`, include them.
include(optional('/etc/tower/settings.py'), scope=locals())
include(optional('/etc/tower/conf.d/*.py'), scope=locals())
# If any local_*.py files are present in awx/settings/, use them to override
# default settings for development. If not present, we can still run using
# only the defaults.
# this needs to stay at the bottom of this file
try:
if os.getenv('AWX_KUBE_DEVEL', False):
include(optional('development_kube.py'), scope=locals())
else:
include(optional('local_*.py'), scope=locals())
except ImportError:
traceback.print_exc()
sys.exit(1)
# The below runs AFTER all of the custom settings are imported
# because conf.d files will define DATABASES and this should modify that
from .application_name import set_application_name
set_application_name(DATABASES, CLUSTER_HOST_ID) # NOQA
del set_application_name
# Set the value of any feature flags that are defined in the local settings
for feature in list(FLAGS.keys()): # noqa: F405
if feature in locals():
FLAGS[feature][0]['value'] = locals()[feature] # noqa: F405

View File

@@ -1,76 +0,0 @@
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
# Development settings for AWX project.
# Python
import os
import socket
# Centos-7 doesn't include the svg mime type
# /usr/lib64/python/mimetypes.py
import mimetypes
from dynaconf import post_hook
# awx-manage shell_plus --notebook
NOTEBOOK_ARGUMENTS = ['--NotebookApp.token=', '--ip', '0.0.0.0', '--port', '9888', '--allow-root', '--no-browser']
# print SQL queries in shell_plus
SHELL_PLUS_PRINT_SQL = False
# show colored logs in the dev environment
# to disable this, set `COLOR_LOGS = False` in awx/settings/local_settings.py
COLOR_LOGS = True
LOGGING__handlers__console = '@merge {"()": "awx.main.utils.handlers.ColorHandler"}'
ALLOWED_HOSTS = ['*']
mimetypes.add_type("image/svg+xml", ".svg", True)
mimetypes.add_type("image/svg+xml", ".svgz", True)
# Disallow sending session cookies over insecure connections
SESSION_COOKIE_SECURE = False
# Disallow sending csrf cookies over insecure connections
CSRF_COOKIE_SECURE = False
# Disable Pendo on the UI for development/test.
# Note: This setting may be overridden by database settings.
PENDO_TRACKING_STATE = "off"
INSIGHTS_TRACKING_STATE = False
# debug toolbar and swagger assume that requirements/requirements_dev.txt are installed
INSTALLED_APPS = "@merge drf_yasg,debug_toolbar"
MIDDLEWARE = "@insert 0 debug_toolbar.middleware.DebugToolbarMiddleware"
DEBUG_TOOLBAR_CONFIG = {'ENABLE_STACKTRACES': True}
# Configure a default UUID for development only.
SYSTEM_UUID = '00000000-0000-0000-0000-000000000000'
INSTALL_UUID = '00000000-0000-0000-0000-000000000000'
# Ansible base virtualenv paths and enablement
# only used for deprecated fields and management commands for them
BASE_VENV_PATH = os.path.realpath("/var/lib/awx/venv")
CLUSTER_HOST_ID = socket.gethostname()
AWX_CALLBACK_PROFILE = True
# ======================!!!!!!! FOR DEVELOPMENT ONLY !!!!!!!=================================
# Disable normal scheduled/triggered task managers (DependencyManager, TaskManager, WorkflowManager).
# Allows user to trigger task managers directly for debugging and profiling purposes.
# Only works in combination with settings.SETTINGS_MODULE == 'awx.settings.development'
AWX_DISABLE_TASK_MANAGERS = False
# Needed for launching runserver in debug mode
# ======================!!!!!!! FOR DEVELOPMENT ONLY !!!!!!!=================================
# This modifies FLAGS set by defaults, must be deferred to run later
@post_hook
def set_dev_flags(settings):
defaults_flags = settings.get("FLAGS", {})
defaults_flags['FEATURE_INDIRECT_NODE_COUNTING_ENABLED'] = [{'condition': 'boolean', 'value': True}]
return {'FLAGS': defaults_flags}

View File

@@ -1,13 +1,4 @@
# This file exists for backwards compatibility only BROADCAST_WEBSOCKET_SECRET = '🤖starscream🤖'
# the current way of running AWX is to point settings to BROADCAST_WEBSOCKET_PORT = 8052
# awx/settings/__init__.py as the entry point for the settings BROADCAST_WEBSOCKET_VERIFY_CERT = False
# that is done by exporting: export DJANGO_SETTINGS_MODULE=awx.settings BROADCAST_WEBSOCKET_PROTOCOL = 'http'
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "awx.settings")
os.environ.setdefault("AWX_MODE", "development,kube")
from ansible_base.lib.dynamic_config import export
from . import DYNACONF # noqa
export(__name__, DYNACONF)

View File

@@ -1,13 +1,15 @@
# This file exists for backwards compatibility only # Copyright (c) 2015 Ansible, Inc.
# the current way of running AWX is to point settings to # All Rights Reserved.
# awx/settings/__init__.py as the entry point for the settings
# that is done by exporting: export DJANGO_SETTINGS_MODULE=awx.settings
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "awx.settings") # Development settings for AWX project, but with DEBUG disabled
os.environ.setdefault("AWX_MODE", "development,quiet")
from ansible_base.lib.dynamic_config import export # Load development settings.
from . import DYNACONF # noqa from defaults import * # NOQA
export(__name__, DYNACONF) # Load development settings.
from development import * # NOQA
# Disable capturing DEBUG
DEBUG = False
TEMPLATE_DEBUG = DEBUG
SQL_DEBUG = DEBUG

View File

@@ -1,86 +0,0 @@
import os
from ansible_base.lib.dynamic_config import load_python_file_with_injected_context
from dynaconf import Dynaconf
from .application_name import get_application_name
def merge_application_name(settings):
"""Return a dynaconf merge dict to set the application name for the connection."""
data = {}
if "sqlite3" not in settings.get("DATABASES__default__ENGINE", ""):
data["DATABASES__default__OPTIONS__application_name"] = get_application_name(settings.get("CLUSTER_HOST_ID"))
return data
def add_backwards_compatibility():
"""Add backwards compatibility for AWX_MODE.
Before dynaconf integration the usage of AWX settings was supported to be just
DJANGO_SETTINGS_MODULE=awx.settings.production or DJANGO_SETTINGS_MODULE=awx.settings.development
(development_quiet and development_kube were also supported).
With dynaconf the DJANGO_SETTINGS_MODULE should be set always to "awx.settings" as the only entry point
for settings and then "AWX_MODE" can be set to any of production,development,quiet,kube
or a combination of them separated by comma.
E.g:
export DJANGO_SETTINGS_MODULE=awx.settings
export AWX_MODE=production
awx-manage [command]
dynaconf [command]
If pointing `DJANGO_SETTINGS_MODULE` to `awx.settings.production` or `awx.settings.development` then
this function will set `AWX_MODE` to the correct value.
"""
django_settings_module = os.getenv("DJANGO_SETTINGS_MODULE", "awx.settings")
if django_settings_module == "awx.settings":
return
current_mode = os.getenv("AWX_MODE", "")
for _module_name in ["development", "production", "development_quiet", "development_kube"]:
if django_settings_module == f"awx.settings.{_module_name}":
_mode = current_mode.split(",")
if "development_" in _module_name and "development" not in current_mode:
_mode.append("development")
_mode_fragment = _module_name.replace("development_", "")
if _mode_fragment not in _mode:
_mode.append(_mode_fragment)
os.environ["AWX_MODE"] = ",".join(_mode)
def load_extra_development_files(settings: Dynaconf):
"""Load optional development only settings files."""
if not settings.is_development_mode:
return
if settings.get_environ("AWX_KUBE_DEVEL"):
load_python_file_with_injected_context("kube_defaults.py", settings=settings)
else:
load_python_file_with_injected_context("local_*.py", settings=settings)
def assert_production_settings(settings: Dynaconf, settings_dir: str, settings_file_path: str): # pragma: no cover
"""Ensure at least one setting file has been loaded in production mode.
Current systems will require /etc/tower/settings.py and
new systems will require /etc/ansible-automation-platform/*.yaml
"""
if "production" not in settings.current_env.lower():
return
required_settings_paths = [
os.path.dirname(settings_file_path),
"/etc/ansible-automation-platform/",
settings_dir,
]
for path in required_settings_paths:
if any([path in os.path.dirname(f) for f in settings._loaded_files]):
break
else:
from django.core.exceptions import ImproperlyConfigured # noqa
msg = 'No AWX configuration found at %s.' % required_settings_paths
msg += '\nDefine the AWX_SETTINGS_FILE environment variable to '
msg += 'specify an alternate path.'
raise ImproperlyConfigured(msg)

View File

@@ -1,4 +0,0 @@
BROADCAST_WEBSOCKET_SECRET = '🤖starscream🤖'
BROADCAST_WEBSOCKET_PORT = 8052
BROADCAST_WEBSOCKET_VERIFY_CERT = False
BROADCAST_WEBSOCKET_PROTOCOL = 'http'

View File

@@ -1,13 +1,111 @@
# This file exists for backwards compatibility only # Copyright (c) 2015 Ansible, Inc.
# the current way of running AWX is to point settings to # All Rights Reserved.
# awx/settings/__init__.py as the entry point for the settings
# that is done by exporting: export DJANGO_SETTINGS_MODULE=awx.settings # Production settings for AWX project.
# Python
import os import os
import copy
import errno
import sys
import traceback
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "awx.settings") # Django Split Settings
os.environ.setdefault("AWX_MODE", "production") from split_settings.tools import optional, include
from ansible_base.lib.dynamic_config import export # Load default settings.
from . import DYNACONF # noqa from .defaults import * # NOQA
export(__name__, DYNACONF) DEBUG = False
TEMPLATE_DEBUG = DEBUG
SQL_DEBUG = DEBUG
# Clear database settings to force production environment to define them.
DATABASES = {}
# Clear the secret key to force production environment to define it.
SECRET_KEY = None
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Ansible base virtualenv paths and enablement
# only used for deprecated fields and management commands for them
BASE_VENV_PATH = os.path.realpath("/var/lib/awx/venv")
# Very important that this is editable (not read_only) in the API
AWX_ISOLATION_SHOW_PATHS = [
'/etc/pki/ca-trust:/etc/pki/ca-trust:O',
'/usr/share/pki:/usr/share/pki:O',
]
# Store a snapshot of default settings at this point before loading any
# customizable config files.
this_module = sys.modules[__name__]
local_vars = dir(this_module)
DEFAULTS_SNAPSHOT = {} # define after we save local_vars so we do not snapshot the snapshot
for setting in local_vars:
if setting.isupper():
DEFAULTS_SNAPSHOT[setting] = copy.deepcopy(getattr(this_module, setting))
del local_vars # avoid temporary variables from showing up in dir(settings)
del this_module
#
###############################################################################################
#
# Any settings defined after this point will be marked as as a read_only database setting
#
################################################################################################
# Load settings from any .py files in the global conf.d directory specified in
# the environment, defaulting to /etc/tower/conf.d/.
settings_dir = os.environ.get('AWX_SETTINGS_DIR', '/etc/tower/conf.d/')
settings_files = os.path.join(settings_dir, '*.py')
# Load remaining settings from the global settings file specified in the
# environment, defaulting to /etc/tower/settings.py.
settings_file = os.environ.get('AWX_SETTINGS_FILE', '/etc/tower/settings.py')
# Attempt to load settings from /etc/tower/settings.py first, followed by
# /etc/tower/conf.d/*.py.
try:
include(settings_file, optional(settings_files), scope=locals())
except ImportError:
traceback.print_exc()
sys.exit(1)
except IOError:
from django.core.exceptions import ImproperlyConfigured
included_file = locals().get('__included_file__', '')
if not included_file or included_file == settings_file:
# The import doesn't always give permission denied, so try to open the
# settings file directly.
try:
e = None
open(settings_file)
except IOError:
pass
if e and e.errno == errno.EACCES:
SECRET_KEY = 'permission-denied'
LOGGING = {}
else:
msg = 'No AWX configuration found at %s.' % settings_file
msg += '\nDefine the AWX_SETTINGS_FILE environment variable to '
msg += 'specify an alternate path.'
raise ImproperlyConfigured(msg)
else:
raise
# The below runs AFTER all of the custom settings are imported
# because conf.d files will define DATABASES and this should modify that
from .application_name import set_application_name
set_application_name(DATABASES, CLUSTER_HOST_ID) # NOQA
del set_application_name
# Set the value of any feature flags that are defined in the local settings
for feature in list(FLAGS.keys()): # noqa: F405
if feature in locals():
FLAGS[feature][0]['value'] = locals()[feature] # noqa: F405

View File

@@ -1,30 +0,0 @@
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
# Production settings for AWX project.
import os
DEBUG = False
TEMPLATE_DEBUG = DEBUG
SQL_DEBUG = DEBUG
# Clear database settings to force production environment to define them.
DATABASES = {}
# Clear the secret key to force production environment to define it.
SECRET_KEY = None
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Ansible base virtualenv paths and enablement
# only used for deprecated fields and management commands for them
BASE_VENV_PATH = os.path.realpath("/var/lib/awx/venv")
# Very important that this is editable (not read_only) in the API
AWX_ISOLATION_SHOW_PATHS = [
'/etc/pki/ca-trust:/etc/pki/ca-trust:O',
'/usr/share/pki:/usr/share/pki:O',
]

View File

@@ -1,8 +0,0 @@
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
# Development settings for AWX project, but with DEBUG disabled
# Disable capturing DEBUG
DEBUG = False
TEMPLATE_DEBUG = DEBUG
SQL_DEBUG = DEBUG

View File

@@ -37,7 +37,7 @@ def get_urlpatterns(prefix=None):
re_path(r'^(?!api/).*', include('awx.ui.urls', namespace='ui')), re_path(r'^(?!api/).*', include('awx.ui.urls', namespace='ui')),
] ]
if settings.DYNACONF.is_development_mode: if settings.SETTINGS_MODULE == 'awx.settings.development':
try: try:
import debug_toolbar import debug_toolbar

View File

@@ -29,7 +29,7 @@ DOCUMENTATION = """
description: description:
- The date to start the rule - The date to start the rule
- Used for all frequencies - Used for all frequencies
- Format should be 'YYYY-MM-DD HH:MM:SS' - Format should be YYYY-MM-DD [HH:MM:SS]
type: str type: str
timezone: timezone:
description: description:
@@ -47,8 +47,8 @@ DOCUMENTATION = """
description: description:
- How to end this schedule - How to end this schedule
- If this is not defined, this schedule will never end - If this is not defined, this schedule will never end
- If this is a positive number, specified as a string, this schedule will end after this number of occurrences - If this is a positive integer, this schedule will end after this number of occurences
- If this is a date in the format 'YYYY-MM-DD HH:MM:SS', this schedule ends after this date - If this is a date in the format YYYY-MM-DD [HH:MM:SS], this schedule ends after this date
- Used for all types except none - Used for all types except none
type: str type: str
on_days: on_days:

View File

@@ -257,8 +257,6 @@ def main():
copy_lookup_data = lookup_data copy_lookup_data = lookup_data
if organization: if organization:
lookup_data['organization'] = org_id lookup_data['organization'] = org_id
if user:
lookup_data['organization'] = None
credential = module.get_one('credentials', name_or_id=name, check_exists=(state == 'exists'), **{'data': lookup_data}) credential = module.get_one('credentials', name_or_id=name, check_exists=(state == 'exists'), **{'data': lookup_data})
@@ -292,11 +290,8 @@ def main():
if inputs: if inputs:
credential_fields['inputs'] = inputs credential_fields['inputs'] = inputs
if description is not None: if description:
if description == '': credential_fields['description'] = description
credential_fields['description'] = ''
else:
credential_fields['description'] = description
if organization: if organization:
credential_fields['organization'] = org_id credential_fields['organization'] = org_id

View File

@@ -116,11 +116,8 @@ def main():
} }
if kind: if kind:
credential_type_params['kind'] = kind credential_type_params['kind'] = kind
if module.params.get('description') is not None: if module.params.get('description'):
if module.params.get('description') == '': credential_type_params['description'] = module.params.get('description')
credential_type_params['description'] = ''
else:
credential_type_params['description'] = module.params.get('description')
if module.params.get('inputs'): if module.params.get('inputs'):
credential_type_params['inputs'] = module.params.get('inputs') credential_type_params['inputs'] = module.params.get('inputs')
if module.params.get('injectors'): if module.params.get('injectors'):

View File

@@ -268,7 +268,7 @@ def main():
for resource in value: for resource in value:
# Attempt to look up project based on the provided name, ID, or named URL and lookup data # Attempt to look up project based on the provided name, ID, or named URL and lookup data
lookup_key = key lookup_key = key
if key == 'organizations' or key == 'users' or key == 'teams': if key == 'organizations' or key == 'users':
lookup_data_populated = {} lookup_data_populated = {}
else: else:
lookup_data_populated = lookup_data lookup_data_populated = lookup_data

View File

@@ -13,7 +13,6 @@
wfjt_name: "AWX-Collection-tests-role-project-wfjt-{{ test_id }}" wfjt_name: "AWX-Collection-tests-role-project-wfjt-{{ test_id }}"
team_name: "AWX-Collection-tests-team-team-{{ test_id }}" team_name: "AWX-Collection-tests-team-team-{{ test_id }}"
team2_name: "AWX-Collection-tests-team-team-{{ test_id }}2" team2_name: "AWX-Collection-tests-team-team-{{ test_id }}2"
org2_name: "AWX-Collection-tests-organization-{{ test_id }}2"
- block: - block:
- name: Create a User - name: Create a User
@@ -210,40 +209,6 @@
that: that:
- "result is changed" - "result is changed"
- name: Create a 2nd organization
organization:
name: "{{ org2_name }}"
- name: Create a project in 2nd Organization
project:
name: "{{ project_name }}"
organization: "{{ org2_name }}"
scm_type: git
scm_url: https://github.com/ansible/test-playbooks
wait: true
register: project_info
- name: Add Joe and teams to the update role of the default Project with lookup from the 2nd Organization
role:
user: "{{ username }}"
users:
- "{{ username }}2"
teams:
- "{{ team_name }}"
- "{{ team2_name }}"
role: update
lookup_organization: "{{ org2_name }}"
project: "{{ project_name }}"
state: "{{ item }}"
register: result
with_items:
- "present"
- "absent"
- assert:
that:
- "result is changed"
always: always:
- name: Delete a User - name: Delete a User
user: user:
@@ -287,16 +252,3 @@
organization: Default organization: Default
state: absent state: absent
register: result register: result
- name: Delete the 2nd project
project:
name: "{{ project_name }}"
organization: "{{ org2_name }}"
state: absent
register: result
- name: Delete the 2nd organization
organization:
name: "{{ org2_name }}"
state: absent
register: result

View File

@@ -47,7 +47,6 @@ These can be specified via (from highest to lowest precedence):
- direct module parameters - direct module parameters
- environment variables (most useful when running against localhost) - environment variables (most useful when running against localhost)
- a config file path specified by the `tower_config_file` parameter - a config file path specified by the `tower_config_file` parameter
- a config file at `./tower_cli.cfg`, i.e. in the current directory
- a config file at `~/.tower_cli.cfg` - a config file at `~/.tower_cli.cfg`
- a config file at `/etc/tower/tower_cli.cfg` - a config file at `/etc/tower/tower_cli.cfg`
@@ -61,15 +60,6 @@ username = foo
password = bar password = bar
``` ```
or like this:
```
host: https://localhost:8043
verify_ssl: true
oauth_token: <token>
```
## Release and Upgrade Notes ## Release and Upgrade Notes
Notable releases of the `{{ collection_namespace }}.{{ collection_package }}` collection: Notable releases of the `{{ collection_namespace }}.{{ collection_package }}` collection:

View File

@@ -0,0 +1,27 @@
Copyright (c) 2013, 2General Oy
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of django-split-settings nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2015 Bruno Rocha
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -15,6 +15,10 @@ markers =
filterwarnings = filterwarnings =
error error
# NOTE: The following are introduced upgrading python 3.11 to python 3.12
# FIXME: Upgrade django-polymorphic https://github.com/jazzband/django-polymorphic/pull/541
once:Deprecated call to `pkg_resources.declare_namespace\('sphinxcontrib'\)`.\nImplementing implicit namespace packages \(as specified in PEP 420\) is preferred to `pkg_resources.declare_namespace`.:DeprecationWarning
# FIXME: Upgrade protobuf https://github.com/protocolbuffers/protobuf/issues/15077 # FIXME: Upgrade protobuf https://github.com/protocolbuffers/protobuf/issues/15077
once:Type google._upb._message.* uses PyType_Spec with a metaclass that has custom tp_new:DeprecationWarning once:Type google._upb._message.* uses PyType_Spec with a metaclass that has custom tp_new:DeprecationWarning
@@ -25,6 +29,9 @@ filterwarnings =
# FIXME: Set `USE_TZ` to `True`. # FIXME: Set `USE_TZ` to `True`.
once:The default value of USE_TZ will change from False to True in Django 5.0. Set USE_TZ to False in your project settings if you want to keep the current default behavior.:django.utils.deprecation.RemovedInDjango50Warning:django.conf once:The default value of USE_TZ will change from False to True in Django 5.0. Set USE_TZ to False in your project settings if you want to keep the current default behavior.:django.utils.deprecation.RemovedInDjango50Warning:django.conf
# FIXME: Delete this entry once `USE_L10N` use is removed.
once:The USE_L10N setting is deprecated. Starting with Django 5.0, localized formatting of data will always be enabled. For example Django will display numbers and dates using the format of the current locale.:django.utils.deprecation.RemovedInDjango50Warning:django.conf
# FIXME: Delete this entry once `pyparsing` is updated. # FIXME: Delete this entry once `pyparsing` is updated.
once:module 'sre_constants' is deprecated:DeprecationWarning:_pytest.assertion.rewrite once:module 'sre_constants' is deprecated:DeprecationWarning:_pytest.assertion.rewrite
@@ -34,6 +41,9 @@ filterwarnings =
# FIXME: Delete this entry once `zope` is updated. # FIXME: Delete this entry once `zope` is updated.
once:Deprecated call to `pkg_resources.declare_namespace.'zope'.`.\nImplementing implicit namespace packages .as specified in PEP 420. is preferred to `pkg_resources.declare_namespace`. See https.//setuptools.pypa.io/en/latest/references/keywords.html#keyword-namespace-packages:DeprecationWarning: once:Deprecated call to `pkg_resources.declare_namespace.'zope'.`.\nImplementing implicit namespace packages .as specified in PEP 420. is preferred to `pkg_resources.declare_namespace`. See https.//setuptools.pypa.io/en/latest/references/keywords.html#keyword-namespace-packages:DeprecationWarning:
# FIXME: Delete this entry once `coreapi` is updated.
once:'cgi' is deprecated and slated for removal in Python 3.13:DeprecationWarning:_pytest.assertion.rewrite
# FIXME: Delete this entry once the use of `distutils` is exterminated from the repo. # FIXME: Delete this entry once the use of `distutils` is exterminated from the repo.
once:The distutils package is deprecated and slated for removal in Python 3.12. Use setuptools or check PEP 632 for potential alternatives:DeprecationWarning:_pytest.assertion.rewrite once:The distutils package is deprecated and slated for removal in Python 3.12. Use setuptools or check PEP 632 for potential alternatives:DeprecationWarning:_pytest.assertion.rewrite
@@ -69,6 +79,12 @@ filterwarnings =
# FIXME: in `awx/main/analytics/collectors.py` and then delete the entry. # FIXME: in `awx/main/analytics/collectors.py` and then delete the entry.
once:distro.linux_distribution.. is deprecated. It should only be used as a compatibility shim with Python's platform.linux_distribution... Please use distro.id.., distro.version.. and distro.name.. instead.:DeprecationWarning:awx.main.analytics.collectors once:distro.linux_distribution.. is deprecated. It should only be used as a compatibility shim with Python's platform.linux_distribution... Please use distro.id.., distro.version.. and distro.name.. instead.:DeprecationWarning:awx.main.analytics.collectors
# FIXME: Figure this out, fix and then delete the entry.
once:\nUsing ProtocolTypeRouter without an explicit "http" key is deprecated.\nGiven that you have not passed the "http" you likely should use Django's\nget_asgi_application...:DeprecationWarning:awx.main.routing
# FIXME: Figure this out, fix and then delete the entry.
once:Channel's inbuilt http protocol AsgiHandler is deprecated. Use Django's get_asgi_application.. instead.:DeprecationWarning:channels.routing
# FIXME: Use `codecs.open()` via a context manager # FIXME: Use `codecs.open()` via a context manager
# FIXME: in `awx/main/utils/ansible.py` to close hanging file descriptors # FIXME: in `awx/main/utils/ansible.py` to close hanging file descriptors
# FIXME: and then delete the entry. # FIXME: and then delete the entry.

View File

@@ -14,7 +14,7 @@ cryptography<42.0.0 # investigation is needed for 42+ to work with OpenSSL v3.0
Cython Cython
daphne daphne
distro distro
django==4.2.20 # CVE-2025-26699 django==4.2.16 # CVE-2024-24680
django-cors-headers django-cors-headers
django-crum django-crum
django-extensions django-extensions
@@ -22,9 +22,9 @@ django-guid
django-oauth-toolkit<2.0.0 # Version 2.0.0 has breaking changes that will need to be worked out before upgrading django-oauth-toolkit<2.0.0 # Version 2.0.0 has breaking changes that will need to be worked out before upgrading
django-polymorphic django-polymorphic
django-solo django-solo
django-split-settings
djangorestframework>=3.15.0 djangorestframework>=3.15.0
djangorestframework-yaml djangorestframework-yaml
dynaconf<4
filelock filelock
GitPython>=3.1.37 # CVE-2023-41040 GitPython>=3.1.37 # CVE-2023-41040
grpcio grpcio
@@ -69,3 +69,6 @@ setuptools_scm[toml] # see UPGRADE BLOCKERs, xmlsec build dep
setuptools-rust>=0.11.4 # cryptography build dep setuptools-rust>=0.11.4 # cryptography build dep
pkgconfig>=1.5.1 # xmlsec build dep - needed for offline build pkgconfig>=1.5.1 # xmlsec build dep - needed for offline build
django-flags>=5.0.13 django-flags>=5.0.13
# Temporarily added to use ansible-runner from git branch, to be removed
# when ansible-runner moves from requirements_git.txt to here
pbr

View File

@@ -122,7 +122,7 @@ deprecated==1.2.15
# pygithub # pygithub
distro==1.9.0 distro==1.9.0
# via -r /awx_devel/requirements/requirements.in # via -r /awx_devel/requirements/requirements.in
django==4.2.20 django==4.2.16
# via # via
# -r /awx_devel/requirements/requirements.in # -r /awx_devel/requirements/requirements.in
# channels # channels
@@ -158,6 +158,10 @@ django-polymorphic==3.1.0
# via -r /awx_devel/requirements/requirements.in # via -r /awx_devel/requirements/requirements.in
django-solo==2.4.0 django-solo==2.4.0
# via -r /awx_devel/requirements/requirements.in # via -r /awx_devel/requirements/requirements.in
django-split-settings==1.3.2
# via
# -r /awx_devel/requirements/requirements.in
# django-ansible-base
djangorestframework==3.15.2 djangorestframework==3.15.2
# via # via
# -r /awx_devel/requirements/requirements.in # -r /awx_devel/requirements/requirements.in
@@ -166,10 +170,6 @@ djangorestframework-yaml==2.0.0
# via -r /awx_devel/requirements/requirements.in # via -r /awx_devel/requirements/requirements.in
durationpy==0.9 durationpy==0.9
# via kubernetes # via kubernetes
dynaconf==3.2.10
# via
# -r /awx_devel/requirements/requirements.in
# django-ansible-base
enum-compat==0.0.3 enum-compat==0.0.3
# via asn1 # via asn1
filelock==3.16.1 filelock==3.16.1
@@ -336,6 +336,8 @@ packaging==24.2
# ansible-runner # ansible-runner
# opentelemetry-instrumentation # opentelemetry-instrumentation
# setuptools-scm # setuptools-scm
pbr==6.1.0
# via -r /awx_devel/requirements/requirements.in
pexpect==4.7.0 pexpect==4.7.0
# via # via
# -r /awx_devel/requirements/requirements.in # -r /awx_devel/requirements/requirements.in

View File

@@ -1,7 +1,8 @@
build build
coreapi
django-debug-toolbar==3.2.4 django-debug-toolbar==3.2.4
django-test-migrations django-test-migrations
drf-yasg<1.21.10 # introduces new DeprecationWarning that is turned into error drf-yasg
# pprofile - re-add once https://github.com/vpelletier/pprofile/issues/41 is addressed # pprofile - re-add once https://github.com/vpelletier/pprofile/issues/41 is addressed
ipython>=7.31.1 # https://github.com/ansible/awx/security/dependabot/30 ipython>=7.31.1 # https://github.com/ansible/awx/security/dependabot/30
unittest2 unittest2

View File

@@ -1,5 +1,6 @@
git+https://github.com/ansible/system-certifi.git@devel#egg=certifi git+https://github.com/ansible/system-certifi.git@devel#egg=certifi
# Remove pbr from requirements.in when moving ansible-runner to requirements.in
git+https://github.com/ansible/ansible-runner.git@devel#egg=ansible-runner git+https://github.com/ansible/ansible-runner.git@devel#egg=ansible-runner
awx-plugins-core @ git+https://github.com/ansible/awx-plugins.git@devel#egg=awx-plugins-core[credentials-github-app]
django-ansible-base @ git+https://github.com/ansible/django-ansible-base@devel#egg=django-ansible-base[rest-filters,jwt_consumer,resource-registry,rbac,feature-flags] django-ansible-base @ git+https://github.com/ansible/django-ansible-base@devel#egg=django-ansible-base[rest-filters,jwt_consumer,resource-registry,rbac,feature-flags]
awx-plugins-core @ git+https://github.com/ansible/awx-plugins.git@devel#egg=awx-plugins-core[credentials-github-app]
awx_plugins.interfaces @ git+https://github.com/ansible/awx_plugins.interfaces.git awx_plugins.interfaces @ git+https://github.com/ansible/awx_plugins.interfaces.git

View File

@@ -1,26 +0,0 @@
# Community BugScrub tooling
Small python script that automatically distributes PRs and Issues given a list of `people` and dumps the contents in a Spreadsheet.
To be used when distributing the work of reviewing community contributions.
## Usage
Install requirements.
```
pip install -r requirements.txt
```
Get the usage.
```
python generate-sheet.py -h
```
## Adding a github Personal Access Token
The scripts looks first for a github personal access token to use to avoid having the scripts calls rate limited, you can create one or use an existing one if you have. The script looks for the PAT under the environment var `GITHUB_ACCESS_TOKEN`.
# For internal spreadsheet usage
AWX engineers will need to import the data generated from the script into a spreadshet manager. Please make sure that you do not replace the existing sheets but make a new one or create a new sheet inside the existing spreadsheet upon import.

View File

@@ -1,125 +0,0 @@
import argparse
import os
from typing import OrderedDict
import pyexcel
import requests
import sys
def get_headers():
access_token_env_var = "GITHUB_ACCESS_TOKEN"
if access_token_env_var in os.environ:
access_token = os.environ[access_token_env_var]
return {"Authorization": f"token {access_token}"}
else:
print(f"{access_token_env_var} not present, performing unathenticated calls that might hit rate limits.")
return None
def fetch_items(url, params, headers):
response = requests.get(url, params=params, headers=headers)
if response.status_code == 200:
return response
else:
print(f"Failed to fetch items: {response.status_code}", file=sys.stderr)
print(f"{response.content}", file=sys.stderr)
return None
def extract_next_url(response):
if 'Link' in response.headers:
links = response.headers['Link'].split(',')
for link in links:
if 'rel="next"' in link:
return link.split(';')[0].strip('<> ')
return None
def get_all_items(url, params, limit=None):
items = []
headers = get_headers()
while url:
response = fetch_items(url, params, headers)
if response:
items.extend(response.json())
print(f"Processing {len(items)}", file=sys.stderr)
if limit and len(items) > limit:
break
url = extract_next_url(response)
else:
url = None
return items
def get_open_issues(repo_url, limit):
owner, repo = repo_url.rstrip('/').split('/')[-2:]
url = f"https://api.github.com/repos/{owner}/{repo}/issues"
params = {'state': 'open', 'per_page': 100}
issues = get_all_items(url, params, limit)
open_issues = [issue for issue in issues if 'pull_request' not in issue]
return open_issues
def get_open_pull_requests(repo_url, limit):
owner, repo = repo_url.rstrip('/').split('/')[-2:]
url = f"https://api.github.com/repos/{owner}/{repo}/pulls"
params = {'state': 'open', 'per_page': 100}
pull_requests = get_all_items(url, params, limit)
return pull_requests
def generate_ods(issues, pull_requests, filename, people):
data = OrderedDict()
# Prepare issues data
issues_data = []
for n, issue in enumerate(issues):
issues_data.append(
[
issue['html_url'],
issue['title'],
issue['created_at'],
issue['user']['login'],
issue['assignee']['login'] if issue['assignee'] else 'None',
people[n % len(people)],
]
)
issues_headers = ['url', 'title', 'created_at', 'user', 'assignee', 'action']
issues_data.insert(0, issues_headers)
data.update({"Issues": issues_data})
# Prepare pull requests data
prs_data = []
for n, pr in enumerate(pull_requests):
prs_data.append(
[pr['html_url'], pr['title'], pr['created_at'], pr['user']['login'], pr['assignee']['login'] if pr['assignee'] else 'None', people[n % len(people)]]
)
prs_headers = ['url', 'title', 'created_at', 'user', 'assignee', 'action']
prs_data.insert(0, prs_headers)
data.update({"Pull Requests": prs_data})
# Save to ODS file
pyexcel.save_book_as(bookdict=data, dest_file_name=filename)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--limit", type=int, help="minimum number of issues/PRs to pull [Pulls all by default]", default=None)
parser.add_argument("--out", type=str, help="output file name [awx_community-triage.ods]", default="awx_community-triage.ods")
parser.add_argument("--repository-url", type=str, help="repository url [https://github.com/ansible/awx]", default="https://github.com/ansible/awx")
parser.add_argument("--people", type=str, help="comma separated list of names to distribute the issues/PRs among [Alice,Bob]", default="Alice,Bob")
args = parser.parse_args()
limit = args.limit
output_file_name = args.out
repo_url = args.repository_url
people = str(args.people).split(",")
open_issues = get_open_issues(repo_url, limit)
open_pull_requests = get_open_pull_requests(repo_url, limit)
print(f"Open issues: {len(open_issues)}")
print(f"Open Pull Requests: {len(open_pull_requests)}")
generate_ods(open_issues, open_pull_requests, output_file_name, people)
print(f"Generated {output_file_name} with open issues and pull requests.")
if __name__ == "__main__":
main()

View File

@@ -1,3 +0,0 @@
requests
pyexcel
pyexcel-ods3

View File

@@ -1,16 +0,0 @@
#!/usr/bin/env python
from django import setup
from awx import prepare_env
prepare_env()
setup()
# Keeping this in test folder allows it to be importable
from awx.main.tests.data.sleep_task import sleep_task
for i in range(634):
sleep_task.delay()