mirror of
https://github.com/ansible/awx.git
synced 2026-02-12 07:04:45 -03:30
Compare commits
32 Commits
priority_t
...
fix/redis-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8033b7cbe9 | ||
|
|
95289ff28c | ||
|
|
000f6b0708 | ||
|
|
c799d51ec8 | ||
|
|
db6e8b9bad | ||
|
|
483417762f | ||
|
|
49240ca8e8 | ||
|
|
5ff3d4b2fc | ||
|
|
3f96ea17d6 | ||
|
|
f59ad4f39c | ||
|
|
c3ee0c2d8a | ||
|
|
7a3010f0e6 | ||
|
|
05dc9bad1c | ||
|
|
38f0f8d45f | ||
|
|
d3ee9a1bfd | ||
|
|
438aa463d5 | ||
|
|
51f9160654 | ||
|
|
ac3123a2ac | ||
|
|
c4ee5127c5 | ||
|
|
9ec7540c4b | ||
|
|
2389fc691e | ||
|
|
567f5a2476 | ||
|
|
e837535396 | ||
|
|
1d57f1c355 | ||
|
|
7676f14114 | ||
|
|
182e5cfaa4 | ||
|
|
99be91e939 | ||
|
|
9ff163b919 | ||
|
|
5d0d0404c7 | ||
|
|
5d53821ce5 | ||
|
|
39cd09ce19 | ||
|
|
cd0e27446a |
25
.github/actions/awx_devel_image/action.yml
vendored
25
.github/actions/awx_devel_image/action.yml
vendored
@@ -11,9 +11,7 @@ inputs:
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Get python version from Makefile
|
||||
shell: bash
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
- uses: ./.github/actions/setup-python
|
||||
|
||||
- name: Set lower case owner name
|
||||
shell: bash
|
||||
@@ -26,26 +24,9 @@ runs:
|
||||
run: |
|
||||
echo "${{ inputs.github-token }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
|
||||
|
||||
- name: Generate placeholder SSH private key if SSH auth for private repos is not needed
|
||||
id: generate_key
|
||||
shell: bash
|
||||
run: |
|
||||
if [[ -z "${{ inputs.private-github-key }}" ]]; then
|
||||
ssh-keygen -t ed25519 -C "github-actions" -N "" -f ~/.ssh/id_ed25519
|
||||
echo "SSH_PRIVATE_KEY<<EOF" >> $GITHUB_OUTPUT
|
||||
cat ~/.ssh/id_ed25519 >> $GITHUB_OUTPUT
|
||||
echo "EOF" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "SSH_PRIVATE_KEY<<EOF" >> $GITHUB_OUTPUT
|
||||
echo "${{ inputs.private-github-key }}" >> $GITHUB_OUTPUT
|
||||
echo "EOF" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Add private GitHub key to SSH agent
|
||||
uses: webfactory/ssh-agent@v0.9.0
|
||||
- uses: ./.github/actions/setup-ssh-agent
|
||||
with:
|
||||
ssh-private-key: ${{ steps.generate_key.outputs.SSH_PRIVATE_KEY }}
|
||||
|
||||
ssh-private-key: ${{ inputs.private-github-key }}
|
||||
|
||||
- name: Pre-pull latest devel image to warm cache
|
||||
shell: bash
|
||||
|
||||
27
.github/actions/setup-python/action.yml
vendored
Normal file
27
.github/actions/setup-python/action.yml
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
name: 'Setup Python from Makefile'
|
||||
description: 'Extract and set up Python version from Makefile'
|
||||
inputs:
|
||||
python-version:
|
||||
description: 'Override Python version (optional)'
|
||||
required: false
|
||||
default: ''
|
||||
working-directory:
|
||||
description: 'Directory containing the Makefile'
|
||||
required: false
|
||||
default: '.'
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Get python version from Makefile
|
||||
shell: bash
|
||||
run: |
|
||||
if [ -n "${{ inputs.python-version }}" ]; then
|
||||
echo "py_version=${{ inputs.python-version }}" >> $GITHUB_ENV
|
||||
else
|
||||
cd ${{ inputs.working-directory }}
|
||||
echo "py_version=`make PYTHON_VERSION`" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: Install python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
29
.github/actions/setup-ssh-agent/action.yml
vendored
Normal file
29
.github/actions/setup-ssh-agent/action.yml
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
name: 'Setup SSH for GitHub'
|
||||
description: 'Configure SSH for private repository access'
|
||||
inputs:
|
||||
ssh-private-key:
|
||||
description: 'SSH private key for repository access'
|
||||
required: false
|
||||
default: ''
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Generate placeholder SSH private key if SSH auth for private repos is not needed
|
||||
id: generate_key
|
||||
shell: bash
|
||||
run: |
|
||||
if [[ -z "${{ inputs.ssh-private-key }}" ]]; then
|
||||
ssh-keygen -t ed25519 -C "github-actions" -N "" -f ~/.ssh/id_ed25519
|
||||
echo "SSH_PRIVATE_KEY<<EOF" >> $GITHUB_OUTPUT
|
||||
cat ~/.ssh/id_ed25519 >> $GITHUB_OUTPUT
|
||||
echo "EOF" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "SSH_PRIVATE_KEY<<EOF" >> $GITHUB_OUTPUT
|
||||
echo "${{ inputs.ssh-private-key }}" >> $GITHUB_OUTPUT
|
||||
echo "EOF" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Add private GitHub key to SSH agent
|
||||
uses: webfactory/ssh-agent@v0.9.0
|
||||
with:
|
||||
ssh-private-key: ${{ steps.generate_key.outputs.SSH_PRIVATE_KEY }}
|
||||
39
.github/workflows/ci.yml
vendored
39
.github/workflows/ci.yml
vendored
@@ -130,7 +130,7 @@ jobs:
|
||||
with:
|
||||
show-progress: false
|
||||
|
||||
- uses: actions/setup-python@v5
|
||||
- uses: ./.github/actions/setup-python
|
||||
with:
|
||||
python-version: '3.x'
|
||||
|
||||
@@ -161,6 +161,10 @@ jobs:
|
||||
show-progress: false
|
||||
path: awx
|
||||
|
||||
- uses: ./awx/.github/actions/setup-ssh-agent
|
||||
with:
|
||||
ssh-private-key: ${{ secrets.PRIVATE_GITHUB_KEY }}
|
||||
|
||||
- name: Checkout awx-operator
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
@@ -168,39 +172,14 @@ jobs:
|
||||
repository: ansible/awx-operator
|
||||
path: awx-operator
|
||||
|
||||
- name: Get python version from Makefile
|
||||
working-directory: awx
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v4
|
||||
- uses: ./awx/.github/actions/setup-python
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
working-directory: awx
|
||||
|
||||
- name: Install playbook dependencies
|
||||
run: |
|
||||
python3 -m pip install docker
|
||||
|
||||
- name: Generate placeholder SSH private key if SSH auth for private repos is not needed
|
||||
id: generate_key
|
||||
shell: bash
|
||||
run: |
|
||||
if [[ -z "${{ secrets.PRIVATE_GITHUB_KEY }}" ]]; then
|
||||
ssh-keygen -t ed25519 -C "github-actions" -N "" -f ~/.ssh/id_ed25519
|
||||
echo "SSH_PRIVATE_KEY<<EOF" >> $GITHUB_OUTPUT
|
||||
cat ~/.ssh/id_ed25519 >> $GITHUB_OUTPUT
|
||||
echo "EOF" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "SSH_PRIVATE_KEY<<EOF" >> $GITHUB_OUTPUT
|
||||
echo "${{ secrets.PRIVATE_GITHUB_KEY }}" >> $GITHUB_OUTPUT
|
||||
echo "EOF" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Add private GitHub key to SSH agent
|
||||
uses: webfactory/ssh-agent@v0.9.0
|
||||
with:
|
||||
ssh-private-key: ${{ steps.generate_key.outputs.SSH_PRIVATE_KEY }}
|
||||
|
||||
- name: Build AWX image
|
||||
working-directory: awx
|
||||
run: |
|
||||
@@ -299,7 +278,7 @@ jobs:
|
||||
with:
|
||||
show-progress: false
|
||||
|
||||
- uses: actions/setup-python@v5
|
||||
- uses: ./.github/actions/setup-python
|
||||
with:
|
||||
python-version: '3.x'
|
||||
|
||||
@@ -375,7 +354,7 @@ jobs:
|
||||
with:
|
||||
show-progress: false
|
||||
|
||||
- uses: actions/setup-python@v5
|
||||
- uses: ./.github/actions/setup-python
|
||||
with:
|
||||
python-version: '3.x'
|
||||
|
||||
|
||||
26
.github/workflows/devel_images.yml
vendored
26
.github/workflows/devel_images.yml
vendored
@@ -49,14 +49,10 @@ jobs:
|
||||
run: |
|
||||
echo "DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER,,}" >> $GITHUB_ENV
|
||||
echo "COMPOSE_TAG=${GITHUB_REF##*/}" >> $GITHUB_ENV
|
||||
echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
env:
|
||||
OWNER: '${{ github.repository_owner }}'
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
- uses: ./.github/actions/setup-python
|
||||
|
||||
- name: Log in to registry
|
||||
run: |
|
||||
@@ -73,25 +69,9 @@ jobs:
|
||||
make ui
|
||||
if: matrix.build-targets.image-name == 'awx'
|
||||
|
||||
- name: Generate placeholder SSH private key if SSH auth for private repos is not needed
|
||||
id: generate_key
|
||||
shell: bash
|
||||
run: |
|
||||
if [[ -z "${{ secrets.PRIVATE_GITHUB_KEY }}" ]]; then
|
||||
ssh-keygen -t ed25519 -C "github-actions" -N "" -f ~/.ssh/id_ed25519
|
||||
echo "SSH_PRIVATE_KEY<<EOF" >> $GITHUB_OUTPUT
|
||||
cat ~/.ssh/id_ed25519 >> $GITHUB_OUTPUT
|
||||
echo "EOF" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "SSH_PRIVATE_KEY<<EOF" >> $GITHUB_OUTPUT
|
||||
echo "${{ secrets.PRIVATE_GITHUB_KEY }}" >> $GITHUB_OUTPUT
|
||||
echo "EOF" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Add private GitHub key to SSH agent
|
||||
uses: webfactory/ssh-agent@v0.9.0
|
||||
- uses: ./.github/actions/setup-ssh-agent
|
||||
with:
|
||||
ssh-private-key: ${{ steps.generate_key.outputs.SSH_PRIVATE_KEY }}
|
||||
ssh-private-key: ${{ secrets.PRIVATE_GITHUB_KEY }}
|
||||
|
||||
- name: Build and push AWX devel images
|
||||
run: |
|
||||
|
||||
2
.github/workflows/docs.yml
vendored
2
.github/workflows/docs.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
||||
with:
|
||||
show-progress: false
|
||||
|
||||
- uses: actions/setup-python@v5
|
||||
- uses: ./.github/actions/setup-python
|
||||
with:
|
||||
python-version: '3.x'
|
||||
|
||||
|
||||
4
.github/workflows/label_issue.yml
vendored
4
.github/workflows/label_issue.yml
vendored
@@ -34,9 +34,11 @@ jobs:
|
||||
with:
|
||||
show-progress: false
|
||||
|
||||
- uses: actions/setup-python@v4
|
||||
- uses: ./.github/actions/setup-python
|
||||
|
||||
- name: Install python requests
|
||||
run: pip install requests
|
||||
|
||||
- name: Check if user is a member of Ansible org
|
||||
uses: jannekem/run-python-script-action@v1
|
||||
id: check_user
|
||||
|
||||
2
.github/workflows/label_pr.yml
vendored
2
.github/workflows/label_pr.yml
vendored
@@ -33,7 +33,7 @@ jobs:
|
||||
with:
|
||||
show-progress: false
|
||||
|
||||
- uses: actions/setup-python@v5
|
||||
- uses: ./.github/actions/setup-python
|
||||
with:
|
||||
python-version: '3.x'
|
||||
|
||||
|
||||
8
.github/workflows/promote.yml
vendored
8
.github/workflows/promote.yml
vendored
@@ -36,13 +36,7 @@ jobs:
|
||||
with:
|
||||
show-progress: false
|
||||
|
||||
- name: Get python version from Makefile
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
- uses: ./.github/actions/setup-python
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
|
||||
9
.github/workflows/stage.yml
vendored
9
.github/workflows/stage.yml
vendored
@@ -64,14 +64,9 @@ jobs:
|
||||
repository: ansible/awx-logos
|
||||
path: awx-logos
|
||||
|
||||
- name: Get python version from Makefile
|
||||
working-directory: awx
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v4
|
||||
- uses: ./awx/.github/actions/setup-python
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
working-directory: awx
|
||||
|
||||
- name: Install playbook dependencies
|
||||
run: |
|
||||
|
||||
28
.github/workflows/upload_schema.yml
vendored
28
.github/workflows/upload_schema.yml
vendored
@@ -23,37 +23,15 @@ jobs:
|
||||
with:
|
||||
show-progress: false
|
||||
|
||||
- name: Get python version from Makefile
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.py_version }}
|
||||
- uses: ./.github/actions/setup-python
|
||||
|
||||
- name: Log in to registry
|
||||
run: |
|
||||
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
|
||||
|
||||
- name: Generate placeholder SSH private key if SSH auth for private repos is not needed
|
||||
id: generate_key
|
||||
shell: bash
|
||||
run: |
|
||||
if [[ -z "${{ secrets.PRIVATE_GITHUB_KEY }}" ]]; then
|
||||
ssh-keygen -t ed25519 -C "github-actions" -N "" -f ~/.ssh/id_ed25519
|
||||
echo "SSH_PRIVATE_KEY<<EOF" >> $GITHUB_OUTPUT
|
||||
cat ~/.ssh/id_ed25519 >> $GITHUB_OUTPUT
|
||||
echo "EOF" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "SSH_PRIVATE_KEY<<EOF" >> $GITHUB_OUTPUT
|
||||
echo "${{ secrets.PRIVATE_GITHUB_KEY }}" >> $GITHUB_OUTPUT
|
||||
echo "EOF" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Add private GitHub key to SSH agent
|
||||
uses: webfactory/ssh-agent@v0.9.0
|
||||
- uses: ./.github/actions/setup-ssh-agent
|
||||
with:
|
||||
ssh-private-key: ${{ steps.generate_key.outputs.SSH_PRIVATE_KEY }}
|
||||
ssh-private-key: ${{ secrets.PRIVATE_GITHUB_KEY }}
|
||||
|
||||
- name: Pre-pull image to warm build cache
|
||||
run: |
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -150,6 +150,8 @@ use_dev_supervisor.txt
|
||||
|
||||
awx/ui/src
|
||||
awx/ui/build
|
||||
awx/ui/.ui-built
|
||||
awx/ui_next
|
||||
|
||||
# Docs build stuff
|
||||
docs/docsite/build/
|
||||
|
||||
11
README.md
11
README.md
@@ -3,6 +3,17 @@
|
||||
|
||||
<img src="https://raw.githubusercontent.com/ansible/awx-logos/master/awx/ui/client/assets/logo-login.svg?sanitize=true" width=200 alt="AWX" />
|
||||
|
||||
> [!CAUTION]
|
||||
> The last release of this repository was released on Jul 2, 2024.
|
||||
> **Releases of this project are now paused during a large scale refactoring.**
|
||||
> For more information, follow [the Forum](https://forum.ansible.com/) and - more specifically - see the various communications on the matter:
|
||||
>
|
||||
> * [Blog: Upcoming Changes to the AWX Project](https://www.ansible.com/blog/upcoming-changes-to-the-awx-project/)
|
||||
> * [Streamlining AWX Releases](https://forum.ansible.com/t/streamlining-awx-releases/6894) Primary update
|
||||
> * [Refactoring AWX into a Pluggable, Service-Oriented Architecture](https://forum.ansible.com/t/refactoring-awx-into-a-pluggable-service-oriented-architecture/7404)
|
||||
> * [Upcoming changes to AWX Operator installation methods](https://forum.ansible.com/t/upcoming-changes-to-awx-operator-installation-methods/7598)
|
||||
> * [AWX UI and credential types transitioning to the new pluggable architecture](https://forum.ansible.com/t/awx-ui-and-credential-types-transitioning-to-the-new-pluggable-architecture/8027)
|
||||
|
||||
AWX provides a web-based user interface, REST API, and task engine built on top of [Ansible](https://github.com/ansible/ansible). It is one of the upstream projects for [Red Hat Ansible Automation Platform](https://www.ansible.com/products/automation-platform).
|
||||
|
||||
To install AWX, please view the [Install guide](./INSTALL.md).
|
||||
|
||||
@@ -6,6 +6,7 @@ import copy
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
import yaml
|
||||
from collections import Counter, OrderedDict
|
||||
from datetime import timedelta
|
||||
from uuid import uuid4
|
||||
@@ -626,15 +627,41 @@ class BaseSerializer(serializers.ModelSerializer, metaclass=BaseSerializerMetacl
|
||||
return exclusions
|
||||
|
||||
def validate(self, attrs):
|
||||
"""
|
||||
Apply serializer validation. Called by DRF.
|
||||
|
||||
Can be extended by subclasses. Or consider overwriting
|
||||
`validate_with_obj` in subclasses, which provides access to the model
|
||||
object and exception handling for field validation.
|
||||
|
||||
:param dict attrs: The names and values of the model form fields.
|
||||
:raise rest_framework.exceptions.ValidationError: If the validation
|
||||
fails.
|
||||
|
||||
The exception must contain a dict with the names of the form fields
|
||||
which failed validation as keys, and a list of error messages as
|
||||
values. This ensures that the error messages are rendered near the
|
||||
relevant fields.
|
||||
:return: The names and values from the model form fields, possibly
|
||||
modified by the validations.
|
||||
:rtype: dict
|
||||
"""
|
||||
attrs = super(BaseSerializer, self).validate(attrs)
|
||||
# Create/update a model instance and run its full_clean() method to
|
||||
# do any validation implemented on the model class.
|
||||
exclusions = self.get_validation_exclusions(self.instance)
|
||||
# Create a new model instance or take the existing one if it exists,
|
||||
# and update its attributes with the respective field values from
|
||||
# attrs.
|
||||
obj = self.instance or self.Meta.model()
|
||||
for k, v in attrs.items():
|
||||
if k not in exclusions and k != 'canonical_address_port':
|
||||
setattr(obj, k, v)
|
||||
try:
|
||||
# Create/update a model instance and run its full_clean() method to
|
||||
# do any validation implemented on the model class.
|
||||
exclusions = self.get_validation_exclusions(self.instance)
|
||||
obj = self.instance or self.Meta.model()
|
||||
for k, v in attrs.items():
|
||||
if k not in exclusions and k != 'canonical_address_port':
|
||||
setattr(obj, k, v)
|
||||
# Run serializer validators which need the model object for
|
||||
# validation.
|
||||
self.validate_with_obj(attrs, obj)
|
||||
# Apply any validations implemented on the model class.
|
||||
obj.full_clean(exclude=exclusions)
|
||||
# full_clean may modify values on the instance; copy those changes
|
||||
# back to attrs so they are saved.
|
||||
@@ -663,6 +690,32 @@ class BaseSerializer(serializers.ModelSerializer, metaclass=BaseSerializerMetacl
|
||||
raise ValidationError(d)
|
||||
return attrs
|
||||
|
||||
def validate_with_obj(self, attrs, obj):
|
||||
"""
|
||||
Overwrite this if you need the model instance for your validation.
|
||||
|
||||
:param dict attrs: The names and values of the model form fields.
|
||||
:param obj: An instance of the class's meta model.
|
||||
|
||||
If the serializer runs on a newly created object, obj contains only
|
||||
the attrs from its serializer. If the serializer runs because an
|
||||
object has been edited, obj is the existing model instance with all
|
||||
attributes and values available.
|
||||
:raise django.core.exceptionsValidationError: Raise this if your
|
||||
validation fails.
|
||||
|
||||
To make the error appear at the respective form field, instantiate
|
||||
the Exception with a dict containing the field name as key and the
|
||||
error message as value.
|
||||
|
||||
Example: ``ValidationError({"password": "Not good enough!"})``
|
||||
|
||||
If the exception contains just a string, the message cannot be
|
||||
related to a field and is rendered at the top of the model form.
|
||||
:return: None
|
||||
"""
|
||||
return
|
||||
|
||||
def reverse(self, *args, **kwargs):
|
||||
kwargs['request'] = self.context.get('request')
|
||||
return reverse(*args, **kwargs)
|
||||
@@ -984,7 +1037,6 @@ class UserSerializer(BaseSerializer):
|
||||
return ret
|
||||
|
||||
def validate_password(self, value):
|
||||
django_validate_password(value)
|
||||
if not self.instance and value in (None, ''):
|
||||
raise serializers.ValidationError(_('Password required for new User.'))
|
||||
|
||||
@@ -1007,6 +1059,50 @@ class UserSerializer(BaseSerializer):
|
||||
|
||||
return value
|
||||
|
||||
def validate_with_obj(self, attrs, obj):
|
||||
"""
|
||||
Validate the password with the Django password validators
|
||||
|
||||
To enable the Django password validators, configure
|
||||
`settings.AUTH_PASSWORD_VALIDATORS` as described in the [Django
|
||||
docs](https://docs.djangoproject.com/en/5.1/topics/auth/passwords/#enabling-password-validation)
|
||||
|
||||
:param dict attrs: The User form field names and their values as a dict.
|
||||
Example::
|
||||
|
||||
{
|
||||
'username': 'TestUsername', 'first_name': 'FirstName',
|
||||
'last_name': 'LastName', 'email': 'First.Last@my.org',
|
||||
'is_superuser': False, 'is_system_auditor': False,
|
||||
'password': 'secret123'
|
||||
}
|
||||
|
||||
:param obj: The User model instance.
|
||||
:raises django.core.exceptions.ValidationError: Raise this if at least
|
||||
one Django password validator fails.
|
||||
|
||||
The exception contains a dict ``{"password": <error-message>``}
|
||||
which indicates that the password field has failed validation, and
|
||||
the reason for failure.
|
||||
:return: None.
|
||||
"""
|
||||
# We must do this here instead of in `validate_password` bacause some
|
||||
# django password validators need access to other model instance fields,
|
||||
# e.g. ``username`` for the ``UserAttributeSimilarityValidator``.
|
||||
password = attrs.get("password")
|
||||
# Skip validation if no password has been entered. This may happen when
|
||||
# an existing User is edited.
|
||||
if password and password != '$encrypted$':
|
||||
# Apply validators from settings.AUTH_PASSWORD_VALIDATORS. This may
|
||||
# raise ValidationError.
|
||||
#
|
||||
# If the validation fails, re-raise the exception with adjusted
|
||||
# content to make the error appear near the password field.
|
||||
try:
|
||||
django_validate_password(password, user=obj)
|
||||
except DjangoValidationError as exc:
|
||||
raise DjangoValidationError({"password": exc.messages})
|
||||
|
||||
def _update_password(self, obj, new_password):
|
||||
if new_password and new_password != '$encrypted$':
|
||||
obj.set_password(new_password)
|
||||
@@ -5821,6 +5917,34 @@ class InstanceGroupSerializer(BaseSerializer):
|
||||
raise serializers.ValidationError(_('Only Kubernetes credentials can be associated with an Instance Group'))
|
||||
return value
|
||||
|
||||
def validate_pod_spec_override(self, value):
|
||||
if not value:
|
||||
return value
|
||||
|
||||
# value should be empty for non-container groups
|
||||
if self.instance and not self.instance.is_container_group:
|
||||
raise serializers.ValidationError(_('pod_spec_override is only valid for container groups'))
|
||||
|
||||
pod_spec_override_json = None
|
||||
# defect if the value is yaml or json if yaml convert to json
|
||||
try:
|
||||
# convert yaml to json
|
||||
pod_spec_override_json = yaml.safe_load(value)
|
||||
except yaml.YAMLError:
|
||||
try:
|
||||
pod_spec_override_json = json.loads(value)
|
||||
except json.JSONDecodeError:
|
||||
raise serializers.ValidationError(_('pod_spec_override must be valid yaml or json'))
|
||||
|
||||
# validate the
|
||||
spec = pod_spec_override_json.get('spec', {})
|
||||
automount_service_account_token = spec.get('automountServiceAccountToken', False)
|
||||
|
||||
if automount_service_account_token:
|
||||
raise serializers.ValidationError(_('automountServiceAccountToken is not allowed for security reasons'))
|
||||
|
||||
return value
|
||||
|
||||
def validate(self, attrs):
|
||||
attrs = super(InstanceGroupSerializer, self).validate(attrs)
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ from awx.api.generics import APIView, Response
|
||||
from awx.api.permissions import AnalyticsPermission
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.utils import get_awx_version
|
||||
from awx.main.utils.analytics_proxy import OIDCClient, DEFAULT_OIDC_TOKEN_ENDPOINT
|
||||
from awx.main.utils.analytics_proxy import OIDCClient
|
||||
from rest_framework import status
|
||||
|
||||
from collections import OrderedDict
|
||||
@@ -205,7 +205,7 @@ class AnalyticsGenericView(APIView):
|
||||
try:
|
||||
rh_user = self._get_setting('REDHAT_USERNAME', None, ERROR_MISSING_USER)
|
||||
rh_password = self._get_setting('REDHAT_PASSWORD', None, ERROR_MISSING_PASSWORD)
|
||||
client = OIDCClient(rh_user, rh_password, DEFAULT_OIDC_TOKEN_ENDPOINT, ['api.console'])
|
||||
client = OIDCClient(rh_user, rh_password)
|
||||
response = client.make_request(
|
||||
method,
|
||||
url,
|
||||
@@ -219,8 +219,8 @@ class AnalyticsGenericView(APIView):
|
||||
logger.error("Automation Analytics API request failed, trying base auth method")
|
||||
response = self._base_auth_request(request, method, url, rh_user, rh_password, headers)
|
||||
except MissingSettings:
|
||||
rh_user = self._get_setting('SUBSCRIPTIONS_USERNAME', None, ERROR_MISSING_USER)
|
||||
rh_password = self._get_setting('SUBSCRIPTIONS_PASSWORD', None, ERROR_MISSING_PASSWORD)
|
||||
rh_user = self._get_setting('SUBSCRIPTIONS_CLIENT_ID', None, ERROR_MISSING_USER)
|
||||
rh_password = self._get_setting('SUBSCRIPTIONS_CLIENT_SECRET', None, ERROR_MISSING_PASSWORD)
|
||||
response = self._base_auth_request(request, method, url, rh_user, rh_password, headers)
|
||||
#
|
||||
# Missing or wrong user/pass
|
||||
|
||||
@@ -32,6 +32,7 @@ from awx.api.versioning import URLPathVersioning, reverse, drf_reverse
|
||||
from awx.main.constants import PRIVILEGE_ESCALATION_METHODS
|
||||
from awx.main.models import Project, Organization, Instance, InstanceGroup, JobTemplate
|
||||
from awx.main.utils import set_environ
|
||||
from awx.main.utils.analytics_proxy import TokenError
|
||||
from awx.main.utils.licensing import get_licenser
|
||||
|
||||
logger = logging.getLogger('awx.api.views.root')
|
||||
@@ -176,19 +177,21 @@ class ApiV2SubscriptionView(APIView):
|
||||
|
||||
def post(self, request):
|
||||
data = request.data.copy()
|
||||
if data.get('subscriptions_password') == '$encrypted$':
|
||||
data['subscriptions_password'] = settings.SUBSCRIPTIONS_PASSWORD
|
||||
if data.get('subscriptions_client_secret') == '$encrypted$':
|
||||
data['subscriptions_client_secret'] = settings.SUBSCRIPTIONS_CLIENT_SECRET
|
||||
try:
|
||||
user, pw = data.get('subscriptions_username'), data.get('subscriptions_password')
|
||||
user, pw = data.get('subscriptions_client_id'), data.get('subscriptions_client_secret')
|
||||
with set_environ(**settings.AWX_TASK_ENV):
|
||||
validated = get_licenser().validate_rh(user, pw)
|
||||
if user:
|
||||
settings.SUBSCRIPTIONS_USERNAME = data['subscriptions_username']
|
||||
settings.SUBSCRIPTIONS_CLIENT_ID = data['subscriptions_client_id']
|
||||
if pw:
|
||||
settings.SUBSCRIPTIONS_PASSWORD = data['subscriptions_password']
|
||||
settings.SUBSCRIPTIONS_CLIENT_SECRET = data['subscriptions_client_secret']
|
||||
except Exception as exc:
|
||||
msg = _("Invalid Subscription")
|
||||
if isinstance(exc, requests.exceptions.HTTPError) and getattr(getattr(exc, 'response', None), 'status_code', None) == 401:
|
||||
if isinstance(exc, TokenError) or (
|
||||
isinstance(exc, requests.exceptions.HTTPError) and getattr(getattr(exc, 'response', None), 'status_code', None) == 401
|
||||
):
|
||||
msg = _("The provided credentials are invalid (HTTP 401).")
|
||||
elif isinstance(exc, requests.exceptions.ProxyError):
|
||||
msg = _("Unable to connect to proxy server.")
|
||||
@@ -215,12 +218,12 @@ class ApiV2AttachView(APIView):
|
||||
|
||||
def post(self, request):
|
||||
data = request.data.copy()
|
||||
pool_id = data.get('pool_id', None)
|
||||
if not pool_id:
|
||||
return Response({"error": _("No subscription pool ID provided.")}, status=status.HTTP_400_BAD_REQUEST)
|
||||
user = getattr(settings, 'SUBSCRIPTIONS_USERNAME', None)
|
||||
pw = getattr(settings, 'SUBSCRIPTIONS_PASSWORD', None)
|
||||
if pool_id and user and pw:
|
||||
subscription_id = data.get('subscription_id', None)
|
||||
if not subscription_id:
|
||||
return Response({"error": _("No subscription ID provided.")}, status=status.HTTP_400_BAD_REQUEST)
|
||||
user = getattr(settings, 'SUBSCRIPTIONS_CLIENT_ID', None)
|
||||
pw = getattr(settings, 'SUBSCRIPTIONS_CLIENT_SECRET', None)
|
||||
if subscription_id and user and pw:
|
||||
data = request.data.copy()
|
||||
try:
|
||||
with set_environ(**settings.AWX_TASK_ENV):
|
||||
@@ -239,7 +242,7 @@ class ApiV2AttachView(APIView):
|
||||
logger.exception(smart_str(u"Invalid subscription submitted."), extra=dict(actor=request.user.username))
|
||||
return Response({"error": msg}, status=status.HTTP_400_BAD_REQUEST)
|
||||
for sub in validated:
|
||||
if sub['pool_id'] == pool_id:
|
||||
if sub['subscription_id'] == subscription_id:
|
||||
sub['valid_key'] = True
|
||||
settings.LICENSE = sub
|
||||
return Response(sub)
|
||||
|
||||
@@ -207,7 +207,8 @@ class URLField(CharField):
|
||||
if self.allow_plain_hostname:
|
||||
try:
|
||||
url_parts = urlparse.urlsplit(value)
|
||||
if url_parts.hostname and '.' not in url_parts.hostname:
|
||||
looks_like_ipv6 = bool(url_parts.netloc and url_parts.netloc.startswith('[') and url_parts.netloc.endswith(']'))
|
||||
if not looks_like_ipv6 and url_parts.hostname and '.' not in url_parts.hostname:
|
||||
netloc = '{}.local'.format(url_parts.hostname)
|
||||
if url_parts.port:
|
||||
netloc = '{}:{}'.format(netloc, url_parts.port)
|
||||
|
||||
@@ -27,5 +27,5 @@ def _migrate_setting(apps, old_key, new_key, encrypted=False):
|
||||
|
||||
|
||||
def prefill_rh_credentials(apps, schema_editor):
|
||||
_migrate_setting(apps, 'REDHAT_USERNAME', 'SUBSCRIPTIONS_USERNAME', encrypted=False)
|
||||
_migrate_setting(apps, 'REDHAT_PASSWORD', 'SUBSCRIPTIONS_PASSWORD', encrypted=True)
|
||||
_migrate_setting(apps, 'REDHAT_USERNAME', 'SUBSCRIPTIONS_CLIENT_ID', encrypted=False)
|
||||
_migrate_setting(apps, 'REDHAT_PASSWORD', 'SUBSCRIPTIONS_CLIENT_SECRET', encrypted=True)
|
||||
|
||||
@@ -128,3 +128,41 @@ class TestURLField:
|
||||
else:
|
||||
with pytest.raises(ValidationError):
|
||||
field.run_validators(url)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"url, expect_error",
|
||||
[
|
||||
("https://[1:2:3]", True),
|
||||
("http://[1:2:3]", True),
|
||||
("https://[2001:db8:3333:4444:5555:6666:7777:8888", True),
|
||||
("https://2001:db8:3333:4444:5555:6666:7777:8888", True),
|
||||
("https://[2001:db8:3333:4444:5555:6666:7777:8888]", False),
|
||||
("https://[::1]", False),
|
||||
("https://[::]", False),
|
||||
("https://[2001:db8::1]", False),
|
||||
("https://[2001:db8:0:0:0:0:1:1]", False),
|
||||
("https://[fe80::2%eth0]", True), # ipv6 scope identifier
|
||||
("https://[fe80:0:0:0:200:f8ff:fe21:67cf]", False),
|
||||
("https://[::ffff:192.168.1.10]", False),
|
||||
("https://[0:0:0:0:0:ffff:c000:0201]", False),
|
||||
("https://[2001:0db8:000a:0001:0000:0000:0000:0000]", False),
|
||||
("https://[2001:db8:a:1::]", False),
|
||||
("https://[ff02::1]", False),
|
||||
("https://[ff02:0:0:0:0:0:0:1]", False),
|
||||
("https://[fc00::1]", False),
|
||||
("https://[fd12:3456:789a:1::1]", False),
|
||||
("https://[2001:db8::abcd:ef12:3456:7890]", False),
|
||||
("https://[2001:db8:0000:abcd:0000:ef12:0000:3456]", False),
|
||||
("https://[::ffff:10.0.0.1]", False),
|
||||
("https://[2001:db8:cafe::]", False),
|
||||
("https://[2001:db8:cafe:0:0:0:0:0]", False),
|
||||
("https://[fe80::210:f3ff:fedf:4567%3]", True), # ipv6 scope identifier, numerical interface
|
||||
],
|
||||
)
|
||||
def test_ipv6_urls(self, url, expect_error):
|
||||
field = URLField()
|
||||
if expect_error:
|
||||
with pytest.raises(ValidationError, match="Enter a valid URL"):
|
||||
field.run_validators(url)
|
||||
else:
|
||||
field.run_validators(url)
|
||||
|
||||
@@ -2098,7 +2098,7 @@ class WorkflowJobAccess(BaseAccess):
|
||||
def filtered_queryset(self):
|
||||
return WorkflowJob.objects.filter(
|
||||
Q(unified_job_template__in=UnifiedJobTemplate.accessible_pk_qs(self.user, 'read_role'))
|
||||
| Q(organization__in=Organization.objects.filter(Q(admin_role__members=self.user)), is_bulk_job=True)
|
||||
| Q(organization__in=Organization.accessible_pk_qs(self.user, 'auditor_role'))
|
||||
)
|
||||
|
||||
def can_read(self, obj):
|
||||
@@ -2496,12 +2496,11 @@ class UnifiedJobAccess(BaseAccess):
|
||||
|
||||
def filtered_queryset(self):
|
||||
inv_pk_qs = Inventory._accessible_pk_qs(Inventory, self.user, 'read_role')
|
||||
org_auditor_qs = Organization.objects.filter(Q(admin_role__members=self.user) | Q(auditor_role__members=self.user))
|
||||
qs = self.model.objects.filter(
|
||||
Q(unified_job_template_id__in=UnifiedJobTemplate.accessible_pk_qs(self.user, 'read_role'))
|
||||
| Q(inventoryupdate__inventory_source__inventory__id__in=inv_pk_qs)
|
||||
| Q(adhoccommand__inventory__id__in=inv_pk_qs)
|
||||
| Q(organization__in=org_auditor_qs)
|
||||
| Q(organization__in=Organization.accessible_pk_qs(self.user, 'auditor_role'))
|
||||
)
|
||||
return qs
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ from ansible_base.lib.utils.db import advisory_lock
|
||||
from awx.main.models import Job
|
||||
from awx.main.access import access_registry
|
||||
from awx.main.utils import get_awx_http_client_headers, set_environ, datetime_hook
|
||||
from awx.main.utils.analytics_proxy import OIDCClient, DEFAULT_OIDC_TOKEN_ENDPOINT
|
||||
from awx.main.utils.analytics_proxy import OIDCClient
|
||||
|
||||
__all__ = ['register', 'gather', 'ship']
|
||||
|
||||
@@ -186,7 +186,7 @@ def gather(dest=None, module=None, subset=None, since=None, until=None, collecti
|
||||
|
||||
if not (
|
||||
settings.AUTOMATION_ANALYTICS_URL
|
||||
and ((settings.REDHAT_USERNAME and settings.REDHAT_PASSWORD) or (settings.SUBSCRIPTIONS_USERNAME and settings.SUBSCRIPTIONS_PASSWORD))
|
||||
and ((settings.REDHAT_USERNAME and settings.REDHAT_PASSWORD) or (settings.SUBSCRIPTIONS_CLIENT_ID and settings.SUBSCRIPTIONS_CLIENT_SECRET))
|
||||
):
|
||||
logger.log(log_level, "Not gathering analytics, configuration is invalid. Use --dry-run to gather locally without sending.")
|
||||
return None
|
||||
@@ -368,8 +368,20 @@ def ship(path):
|
||||
logger.error('AUTOMATION_ANALYTICS_URL is not set')
|
||||
return False
|
||||
|
||||
rh_user = getattr(settings, 'REDHAT_USERNAME', None)
|
||||
rh_password = getattr(settings, 'REDHAT_PASSWORD', None)
|
||||
rh_id = getattr(settings, 'REDHAT_USERNAME', None)
|
||||
rh_secret = getattr(settings, 'REDHAT_PASSWORD', None)
|
||||
|
||||
if not (rh_id and rh_secret):
|
||||
rh_id = getattr(settings, 'SUBSCRIPTIONS_CLIENT_ID', None)
|
||||
rh_secret = getattr(settings, 'SUBSCRIPTIONS_CLIENT_SECRET', None)
|
||||
|
||||
if not rh_id:
|
||||
logger.error('Neither REDHAT_USERNAME nor SUBSCRIPTIONS_CLIENT_ID are set')
|
||||
return False
|
||||
|
||||
if not rh_secret:
|
||||
logger.error('Neither REDHAT_PASSWORD nor SUBSCRIPTIONS_CLIENT_SECRET are set')
|
||||
return False
|
||||
|
||||
with open(path, 'rb') as f:
|
||||
files = {'file': (os.path.basename(path), f, settings.INSIGHTS_AGENT_MIME)}
|
||||
@@ -377,25 +389,13 @@ def ship(path):
|
||||
s.headers = get_awx_http_client_headers()
|
||||
s.headers.pop('Content-Type')
|
||||
with set_environ(**settings.AWX_TASK_ENV):
|
||||
if rh_user and rh_password:
|
||||
try:
|
||||
client = OIDCClient(rh_user, rh_password, DEFAULT_OIDC_TOKEN_ENDPOINT, ['api.console'])
|
||||
response = client.make_request("POST", url, headers=s.headers, files=files, verify=settings.INSIGHTS_CERT_PATH, timeout=(31, 31))
|
||||
except requests.RequestException:
|
||||
logger.error("Automation Analytics API request failed, trying base auth method")
|
||||
response = s.post(url, files=files, verify=settings.INSIGHTS_CERT_PATH, auth=(rh_user, rh_password), headers=s.headers, timeout=(31, 31))
|
||||
elif not rh_user or not rh_password:
|
||||
logger.info('REDHAT_USERNAME and REDHAT_PASSWORD are not set, using SUBSCRIPTIONS_USERNAME and SUBSCRIPTIONS_PASSWORD')
|
||||
rh_user = getattr(settings, 'SUBSCRIPTIONS_USERNAME', None)
|
||||
rh_password = getattr(settings, 'SUBSCRIPTIONS_PASSWORD', None)
|
||||
if rh_user and rh_password:
|
||||
response = s.post(url, files=files, verify=settings.INSIGHTS_CERT_PATH, auth=(rh_user, rh_password), headers=s.headers, timeout=(31, 31))
|
||||
elif not rh_user:
|
||||
logger.error('REDHAT_USERNAME and SUBSCRIPTIONS_USERNAME are not set')
|
||||
return False
|
||||
elif not rh_password:
|
||||
logger.error('REDHAT_PASSWORD and SUBSCRIPTIONS_USERNAME are not set')
|
||||
return False
|
||||
try:
|
||||
client = OIDCClient(rh_id, rh_secret)
|
||||
response = client.make_request("POST", url, headers=s.headers, files=files, verify=settings.INSIGHTS_CERT_PATH, timeout=(31, 31))
|
||||
except requests.RequestException:
|
||||
logger.error("Automation Analytics API request failed, trying base auth method")
|
||||
response = s.post(url, files=files, verify=settings.INSIGHTS_CERT_PATH, auth=(rh_id, rh_secret), headers=s.headers, timeout=(31, 31))
|
||||
|
||||
# Accept 2XX status_codes
|
||||
if response.status_code >= 300:
|
||||
logger.error('Upload failed with status {}, {}'.format(response.status_code, response.text))
|
||||
|
||||
@@ -9,6 +9,7 @@ from prometheus_client.core import GaugeMetricFamily, HistogramMetricFamily
|
||||
from prometheus_client.registry import CollectorRegistry
|
||||
from django.conf import settings
|
||||
from django.http import HttpRequest
|
||||
import redis.exceptions
|
||||
from rest_framework.request import Request
|
||||
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
@@ -290,8 +291,12 @@ class Metrics(MetricsNamespace):
|
||||
def send_metrics(self):
|
||||
# more than one thread could be calling this at the same time, so should
|
||||
# acquire redis lock before sending metrics
|
||||
lock = self.conn.lock(root_key + '-' + self._namespace + '_lock')
|
||||
if not lock.acquire(blocking=False):
|
||||
try:
|
||||
lock = self.conn.lock(root_key + '-' + self._namespace + '_lock')
|
||||
if not lock.acquire(blocking=False):
|
||||
return
|
||||
except redis.exceptions.ConnectionError as exc:
|
||||
logger.warning(f'Connection error in send_metrics: {exc}')
|
||||
return
|
||||
try:
|
||||
current_time = time.time()
|
||||
|
||||
@@ -124,8 +124,8 @@ register(
|
||||
allow_blank=True,
|
||||
encrypted=False,
|
||||
read_only=False,
|
||||
label=_('Red Hat customer username'),
|
||||
help_text=_('This username is used to send data to Automation Analytics'),
|
||||
label=_('Red Hat Client ID for Analytics'),
|
||||
help_text=_('Client ID used to send data to Automation Analytics'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
@@ -137,34 +137,34 @@ register(
|
||||
allow_blank=True,
|
||||
encrypted=True,
|
||||
read_only=False,
|
||||
label=_('Red Hat customer password'),
|
||||
help_text=_('This password is used to send data to Automation Analytics'),
|
||||
label=_('Red Hat Client Secret for Analytics'),
|
||||
help_text=_('Client secret used to send data to Automation Analytics'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
register(
|
||||
'SUBSCRIPTIONS_USERNAME',
|
||||
'SUBSCRIPTIONS_CLIENT_ID',
|
||||
field_class=fields.CharField,
|
||||
default='',
|
||||
allow_blank=True,
|
||||
encrypted=False,
|
||||
read_only=False,
|
||||
label=_('Red Hat or Satellite username'),
|
||||
help_text=_('This username is used to retrieve subscription and content information'), # noqa
|
||||
label=_('Red Hat Client ID for Subscriptions'),
|
||||
help_text=_('Client ID used to retrieve subscription and content information'), # noqa
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
register(
|
||||
'SUBSCRIPTIONS_PASSWORD',
|
||||
'SUBSCRIPTIONS_CLIENT_SECRET',
|
||||
field_class=fields.CharField,
|
||||
default='',
|
||||
allow_blank=True,
|
||||
encrypted=True,
|
||||
read_only=False,
|
||||
label=_('Red Hat or Satellite password'),
|
||||
help_text=_('This password is used to retrieve subscription and content information'), # noqa
|
||||
label=_('Red Hat Client Secret for Subscriptions'),
|
||||
help_text=_('Client secret used to retrieve subscription and content information'), # noqa
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
@@ -88,8 +88,10 @@ class Scheduler:
|
||||
# internally times are all referenced relative to startup time, add grace period
|
||||
self.global_start = time.time() + 2.0
|
||||
|
||||
def get_and_mark_pending(self):
|
||||
relative_time = time.time() - self.global_start
|
||||
def get_and_mark_pending(self, reftime=None):
|
||||
if reftime is None:
|
||||
reftime = time.time() # mostly for tests
|
||||
relative_time = reftime - self.global_start
|
||||
to_run = []
|
||||
for job in self.jobs:
|
||||
if job.due_to_run(relative_time):
|
||||
@@ -98,8 +100,10 @@ class Scheduler:
|
||||
job.mark_run(relative_time)
|
||||
return to_run
|
||||
|
||||
def time_until_next_run(self):
|
||||
relative_time = time.time() - self.global_start
|
||||
def time_until_next_run(self, reftime=None):
|
||||
if reftime is None:
|
||||
reftime = time.time() # mostly for tests
|
||||
relative_time = reftime - self.global_start
|
||||
next_job = min(self.jobs, key=lambda j: j.next_run)
|
||||
delta = next_job.next_run - relative_time
|
||||
if delta <= 0.1:
|
||||
@@ -115,10 +119,11 @@ class Scheduler:
|
||||
def debug(self, *args, **kwargs):
|
||||
data = dict()
|
||||
data['title'] = 'Scheduler status'
|
||||
reftime = time.time()
|
||||
|
||||
now = datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S UTC')
|
||||
now = datetime.fromtimestamp(reftime).strftime('%Y-%m-%d %H:%M:%S UTC')
|
||||
start_time = datetime.fromtimestamp(self.global_start).strftime('%Y-%m-%d %H:%M:%S UTC')
|
||||
relative_time = time.time() - self.global_start
|
||||
relative_time = reftime - self.global_start
|
||||
data['started_time'] = start_time
|
||||
data['current_time'] = now
|
||||
data['current_time_relative'] = round(relative_time, 3)
|
||||
|
||||
@@ -7,6 +7,7 @@ import time
|
||||
import traceback
|
||||
from datetime import datetime
|
||||
from uuid import uuid4
|
||||
import json
|
||||
|
||||
import collections
|
||||
from multiprocessing import Process
|
||||
@@ -25,7 +26,10 @@ from ansible_base.lib.logging.runtime import log_excess_runtime
|
||||
|
||||
from awx.main.models import UnifiedJob
|
||||
from awx.main.dispatch import reaper
|
||||
from awx.main.utils.common import convert_mem_str_to_bytes, get_mem_effective_capacity
|
||||
from awx.main.utils.common import get_mem_effective_capacity, get_corrected_memory, get_corrected_cpu, get_cpu_effective_capacity
|
||||
|
||||
# ansible-runner
|
||||
from ansible_runner.utils.capacity import get_mem_in_bytes, get_cpu_count
|
||||
|
||||
if 'run_callback_receiver' in sys.argv:
|
||||
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
|
||||
@@ -307,6 +311,41 @@ class WorkerPool(object):
|
||||
logger.exception('could not kill {}'.format(worker.pid))
|
||||
|
||||
|
||||
def get_auto_max_workers():
|
||||
"""Method we normally rely on to get max_workers
|
||||
|
||||
Uses almost same logic as Instance.local_health_check
|
||||
The important thing is to be MORE than Instance.capacity
|
||||
so that the task-manager does not over-schedule this node
|
||||
|
||||
Ideally we would just use the capacity from the database plus reserve workers,
|
||||
but this poses some bootstrap problems where OCP task containers
|
||||
register themselves after startup
|
||||
"""
|
||||
# Get memory from ansible-runner
|
||||
total_memory_gb = get_mem_in_bytes()
|
||||
|
||||
# This may replace memory calculation with a user override
|
||||
corrected_memory = get_corrected_memory(total_memory_gb)
|
||||
|
||||
# Get same number as max forks based on memory, this function takes memory as bytes
|
||||
mem_capacity = get_mem_effective_capacity(corrected_memory, is_control_node=True)
|
||||
|
||||
# Follow same process for CPU capacity constraint
|
||||
cpu_count = get_cpu_count()
|
||||
corrected_cpu = get_corrected_cpu(cpu_count)
|
||||
cpu_capacity = get_cpu_effective_capacity(corrected_cpu, is_control_node=True)
|
||||
|
||||
# Here is what is different from health checks,
|
||||
auto_max = max(mem_capacity, cpu_capacity)
|
||||
|
||||
# add magic number of extra workers to ensure
|
||||
# we have a few extra workers to run the heartbeat
|
||||
auto_max += 7
|
||||
|
||||
return auto_max
|
||||
|
||||
|
||||
class AutoscalePool(WorkerPool):
|
||||
"""
|
||||
An extended pool implementation that automatically scales workers up and
|
||||
@@ -320,19 +359,7 @@ class AutoscalePool(WorkerPool):
|
||||
super(AutoscalePool, self).__init__(*args, **kwargs)
|
||||
|
||||
if self.max_workers is None:
|
||||
settings_absmem = getattr(settings, 'SYSTEM_TASK_ABS_MEM', None)
|
||||
if settings_absmem is not None:
|
||||
# There are 1073741824 bytes in a gigabyte. Convert bytes to gigabytes by dividing by 2**30
|
||||
total_memory_gb = convert_mem_str_to_bytes(settings_absmem) // 2**30
|
||||
else:
|
||||
total_memory_gb = (psutil.virtual_memory().total >> 30) + 1 # noqa: round up
|
||||
|
||||
# Get same number as max forks based on memory, this function takes memory as bytes
|
||||
self.max_workers = get_mem_effective_capacity(total_memory_gb * 2**30)
|
||||
|
||||
# add magic prime number of extra workers to ensure
|
||||
# we have a few extra workers to run the heartbeat
|
||||
self.max_workers += 7
|
||||
self.max_workers = get_auto_max_workers()
|
||||
|
||||
# max workers can't be less than min_workers
|
||||
self.max_workers = max(self.min_workers, self.max_workers)
|
||||
@@ -346,6 +373,9 @@ class AutoscalePool(WorkerPool):
|
||||
self.scale_up_ct = 0
|
||||
self.worker_count_max = 0
|
||||
|
||||
# last time we wrote current tasks, to avoid too much log spam
|
||||
self.last_task_list_log = time.monotonic()
|
||||
|
||||
def produce_subsystem_metrics(self, metrics_object):
|
||||
metrics_object.set('dispatcher_pool_scale_up_events', self.scale_up_ct)
|
||||
metrics_object.set('dispatcher_pool_active_task_count', sum(len(w.managed_tasks) for w in self.workers))
|
||||
@@ -463,6 +493,14 @@ class AutoscalePool(WorkerPool):
|
||||
self.worker_count_max = new_worker_ct
|
||||
return ret
|
||||
|
||||
@staticmethod
|
||||
def fast_task_serialization(current_task):
|
||||
try:
|
||||
return str(current_task.get('task')) + ' - ' + str(sorted(current_task.get('args', []))) + ' - ' + str(sorted(current_task.get('kwargs', {})))
|
||||
except Exception:
|
||||
# just make sure this does not make things worse
|
||||
return str(current_task)
|
||||
|
||||
def write(self, preferred_queue, body):
|
||||
if 'guid' in body:
|
||||
set_guid(body['guid'])
|
||||
@@ -484,6 +522,15 @@ class AutoscalePool(WorkerPool):
|
||||
if isinstance(body, dict):
|
||||
task_name = body.get('task')
|
||||
logger.warning(f'Workers maxed, queuing {task_name}, load: {sum(len(w.managed_tasks) for w in self.workers)} / {len(self.workers)}')
|
||||
# Once every 10 seconds write out task list for debugging
|
||||
if time.monotonic() - self.last_task_list_log >= 10.0:
|
||||
task_counts = {}
|
||||
for worker in self.workers:
|
||||
task_slug = self.fast_task_serialization(worker.current_task)
|
||||
task_counts.setdefault(task_slug, 0)
|
||||
task_counts[task_slug] += 1
|
||||
logger.info(f'Running tasks by count:\n{json.dumps(task_counts, indent=2)}')
|
||||
self.last_task_list_log = time.monotonic()
|
||||
return super(AutoscalePool, self).write(preferred_queue, body)
|
||||
except Exception:
|
||||
for conn in connections.all():
|
||||
|
||||
@@ -15,6 +15,7 @@ from datetime import timedelta
|
||||
|
||||
from django import db
|
||||
from django.conf import settings
|
||||
import redis.exceptions
|
||||
|
||||
from ansible_base.lib.logging.runtime import log_excess_runtime
|
||||
|
||||
@@ -130,10 +131,13 @@ class AWXConsumerBase(object):
|
||||
@log_excess_runtime(logger, debug_cutoff=0.05, cutoff=0.2)
|
||||
def record_statistics(self):
|
||||
if time.time() - self.last_stats > 1: # buffer stat recording to once per second
|
||||
save_data = self.pool.debug()
|
||||
try:
|
||||
self.redis.set(f'awx_{self.name}_statistics', self.pool.debug())
|
||||
self.redis.set(f'awx_{self.name}_statistics', save_data)
|
||||
except redis.exceptions.ConnectionError as exc:
|
||||
logger.warning(f'Redis connection error saving {self.name} status data:\n{exc}\nmissed data:\n{save_data}')
|
||||
except Exception:
|
||||
logger.exception(f"encountered an error communicating with redis to store {self.name} statistics")
|
||||
logger.exception(f"Unknown redis error saving {self.name} status data:\nmissed data:\n{save_data}")
|
||||
self.last_stats = time.time()
|
||||
|
||||
def run(self, *args, **kwargs):
|
||||
@@ -189,7 +193,10 @@ class AWXConsumerPG(AWXConsumerBase):
|
||||
current_time = time.time()
|
||||
self.pool.produce_subsystem_metrics(self.subsystem_metrics)
|
||||
self.subsystem_metrics.set('dispatcher_availability', self.listen_cumulative_time / (current_time - self.last_metrics_gather))
|
||||
self.subsystem_metrics.pipe_execute()
|
||||
try:
|
||||
self.subsystem_metrics.pipe_execute()
|
||||
except redis.exceptions.ConnectionError as exc:
|
||||
logger.warning(f'Redis connection error saving dispatcher metrics, error:\n{exc}')
|
||||
self.listen_cumulative_time = 0.0
|
||||
self.last_metrics_gather = current_time
|
||||
|
||||
@@ -205,7 +212,11 @@ class AWXConsumerPG(AWXConsumerBase):
|
||||
except Exception as exc:
|
||||
logger.warning(f'Failed to save dispatcher statistics {exc}')
|
||||
|
||||
for job in self.scheduler.get_and_mark_pending():
|
||||
# Everything benchmarks to the same original time, so that skews due to
|
||||
# runtime of the actions, themselves, do not mess up scheduling expectations
|
||||
reftime = time.time()
|
||||
|
||||
for job in self.scheduler.get_and_mark_pending(reftime=reftime):
|
||||
if 'control' in job.data:
|
||||
try:
|
||||
job.data['control']()
|
||||
@@ -222,12 +233,12 @@ class AWXConsumerPG(AWXConsumerBase):
|
||||
|
||||
self.listen_start = time.time()
|
||||
|
||||
return self.scheduler.time_until_next_run()
|
||||
return self.scheduler.time_until_next_run(reftime=reftime)
|
||||
|
||||
def run(self, *args, **kwargs):
|
||||
super(AWXConsumerPG, self).run(*args, **kwargs)
|
||||
|
||||
logger.info(f"Running worker {self.name} listening to queues {self.queues}")
|
||||
logger.info(f"Running {self.name}, workers min={self.pool.min_workers} max={self.pool.max_workers}, listening to queues {self.queues}")
|
||||
init = False
|
||||
|
||||
while True:
|
||||
|
||||
@@ -86,6 +86,7 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
return os.getpid()
|
||||
|
||||
def read(self, queue):
|
||||
has_redis_error = False
|
||||
try:
|
||||
res = self.redis.blpop(self.queue_name, timeout=1)
|
||||
if res is None:
|
||||
@@ -95,14 +96,21 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
self.subsystem_metrics.inc('callback_receiver_events_popped_redis', 1)
|
||||
self.subsystem_metrics.inc('callback_receiver_events_in_memory', 1)
|
||||
return json.loads(res[1])
|
||||
except redis.exceptions.ConnectionError as exc:
|
||||
# Low noise log, because very common and many workers will write this
|
||||
logger.error(f"redis connection error: {exc}")
|
||||
has_redis_error = True
|
||||
time.sleep(5)
|
||||
except redis.exceptions.RedisError:
|
||||
logger.exception("encountered an error communicating with redis")
|
||||
has_redis_error = True
|
||||
time.sleep(1)
|
||||
except (json.JSONDecodeError, KeyError):
|
||||
logger.exception("failed to decode JSON message from redis")
|
||||
finally:
|
||||
self.record_statistics()
|
||||
self.record_read_metrics()
|
||||
if not has_redis_error:
|
||||
self.record_statistics()
|
||||
self.record_read_metrics()
|
||||
|
||||
return {'event': 'FLUSH'}
|
||||
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
from django.conf import settings
|
||||
from django.core.management.base import BaseCommand
|
||||
from awx.main.analytics.subsystem_metrics import CallbackReceiverMetricsServer
|
||||
import redis
|
||||
|
||||
from django.conf import settings
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
import redis.exceptions
|
||||
|
||||
from awx.main.analytics.subsystem_metrics import CallbackReceiverMetricsServer
|
||||
from awx.main.dispatch.control import Control
|
||||
from awx.main.dispatch.worker import AWXConsumerRedis, CallbackBrokerWorker
|
||||
|
||||
@@ -27,7 +30,10 @@ class Command(BaseCommand):
|
||||
return
|
||||
consumer = None
|
||||
|
||||
CallbackReceiverMetricsServer().start()
|
||||
try:
|
||||
CallbackReceiverMetricsServer().start()
|
||||
except redis.exceptions.ConnectionError as exc:
|
||||
raise CommandError(f'Callback receiver could not connect to redis, error: {exc}')
|
||||
|
||||
try:
|
||||
consumer = AWXConsumerRedis(
|
||||
|
||||
@@ -3,8 +3,10 @@
|
||||
import logging
|
||||
import yaml
|
||||
|
||||
import redis
|
||||
|
||||
from django.conf import settings
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
from awx.main.dispatch.control import Control
|
||||
@@ -63,7 +65,10 @@ class Command(BaseCommand):
|
||||
|
||||
consumer = None
|
||||
|
||||
DispatcherMetricsServer().start()
|
||||
try:
|
||||
DispatcherMetricsServer().start()
|
||||
except redis.exceptions.ConnectionError as exc:
|
||||
raise CommandError(f'Dispatcher could not connect to redis, error: {exc}')
|
||||
|
||||
try:
|
||||
queues = ['tower_broadcast_all', 'tower_settings_change', get_task_queuename()]
|
||||
|
||||
@@ -53,8 +53,8 @@ class GrafanaBackend(AWXBaseEmailBackend, CustomNotificationBase):
|
||||
):
|
||||
super(GrafanaBackend, self).__init__(fail_silently=fail_silently)
|
||||
self.grafana_key = grafana_key
|
||||
self.dashboardId = int(dashboardId) if dashboardId is not None else None
|
||||
self.panelId = int(panelId) if panelId is not None else None
|
||||
self.dashboardId = int(dashboardId) if dashboardId is not None and panelId != "" else None
|
||||
self.panelId = int(panelId) if panelId is not None and panelId != "" else None
|
||||
self.annotation_tags = annotation_tags if annotation_tags is not None else []
|
||||
self.grafana_no_verify_ssl = grafana_no_verify_ssl
|
||||
self.isRegion = isRegion
|
||||
@@ -97,6 +97,7 @@ class GrafanaBackend(AWXBaseEmailBackend, CustomNotificationBase):
|
||||
r = requests.post(
|
||||
"{}/api/annotations".format(m.recipients()[0]), json=grafana_data, headers=grafana_headers, verify=(not self.grafana_no_verify_ssl)
|
||||
)
|
||||
|
||||
if r.status_code >= 400:
|
||||
logger.error(smart_str(_("Error sending notification grafana: {}").format(r.status_code)))
|
||||
if not self.fail_silently:
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
# Copyright (c) 2015 Ansible, Inc.
|
||||
# All Rights Reserved.
|
||||
|
||||
# AIA: Primarily AI, Modified content, Human-initiated, Reviewed, Claude (Anthropic AI) via Cursor
|
||||
# AIA PAI Mc Hin R Claude Cursor - https://aiattribution.github.io/interpret-attribution
|
||||
|
||||
# Python
|
||||
import json
|
||||
import logging
|
||||
@@ -26,7 +29,151 @@ class CallbackQueueDispatcher(object):
|
||||
def __init__(self):
|
||||
self.queue = getattr(settings, 'CALLBACK_QUEUE', '')
|
||||
self.logger = logging.getLogger('awx.main.queue.CallbackQueueDispatcher')
|
||||
self.connection = redis.Redis.from_url(settings.BROKER_URL)
|
||||
self._broker_url = settings.BROKER_URL
|
||||
self.connection = redis.Redis.from_url(self._broker_url)
|
||||
self._connection_failures = 0
|
||||
self._max_reconnect_attempts = 3
|
||||
self._total_reconnections = 0
|
||||
self._events_lost = 0
|
||||
|
||||
def _reconnect(self):
|
||||
"""
|
||||
Attempt to reconnect to Redis after connection failure.
|
||||
|
||||
Returns:
|
||||
bool: True if reconnection successful, False otherwise
|
||||
"""
|
||||
try:
|
||||
attempt = self._connection_failures + 1
|
||||
self.logger.warning(
|
||||
f"Redis reconnection attempt {attempt}/{self._max_reconnect_attempts} " f"(total reconnections this session: {self._total_reconnections})"
|
||||
)
|
||||
|
||||
# Create new connection
|
||||
self.connection = redis.Redis.from_url(self._broker_url)
|
||||
|
||||
# Verify connection works
|
||||
self.connection.ping()
|
||||
|
||||
# Success
|
||||
self._connection_failures = 0
|
||||
self._total_reconnections += 1
|
||||
self.logger.info(f"Successfully reconnected to Redis (session reconnections: {self._total_reconnections})")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
self._connection_failures += 1
|
||||
self.logger.error(f"Redis reconnection failed (attempt {self._connection_failures}): {type(e).__name__}: {e}")
|
||||
return False
|
||||
|
||||
def dispatch(self, obj):
|
||||
self.connection.rpush(self.queue, json.dumps(obj, cls=AnsibleJSONEncoder))
|
||||
"""
|
||||
Dispatch event to Redis queue with automatic reconnection on failure.
|
||||
|
||||
Handles BrokenPipeError and ConnectionError by attempting reconnection.
|
||||
If all reconnection attempts fail, logs the event loss but allows job to continue.
|
||||
|
||||
Args:
|
||||
obj: Event data to dispatch (dict or serializable object)
|
||||
"""
|
||||
max_attempts = self._max_reconnect_attempts + 1
|
||||
last_error = None
|
||||
|
||||
# Extract diagnostic info from event
|
||||
event_type = 'unknown'
|
||||
job_id = 'unknown'
|
||||
if isinstance(obj, dict):
|
||||
event_type = obj.get('event', obj.get('event_name', 'unknown'))
|
||||
job_id = obj.get('job_id', obj.get('unified_job_id', 'unknown'))
|
||||
|
||||
for attempt in range(max_attempts):
|
||||
try:
|
||||
# Attempt to push event to Redis
|
||||
self.connection.rpush(self.queue, json.dumps(obj, cls=AnsibleJSONEncoder))
|
||||
|
||||
# Success - reset failure counter if this was a recovery
|
||||
if self._connection_failures > 0:
|
||||
self.logger.info(f"Redis connection recovered after reconnection. " f"job_id={job_id}, event_type={event_type}")
|
||||
self._connection_failures = 0
|
||||
|
||||
return # Successfully dispatched
|
||||
|
||||
except (BrokenPipeError, redis.exceptions.ConnectionError) as e:
|
||||
last_error = e
|
||||
error_type = type(e).__name__
|
||||
|
||||
self.logger.warning(f"Redis connection error during event dispatch " f"(attempt {attempt + 1}/{max_attempts}): {error_type}: {e}")
|
||||
|
||||
# Enhanced diagnostics
|
||||
self.logger.warning(
|
||||
f"Failed event details: job_id={job_id}, event_type={event_type}, " f"queue={self.queue}, attempt={attempt + 1}/{max_attempts}"
|
||||
)
|
||||
|
||||
if attempt < max_attempts - 1:
|
||||
# Try to reconnect before next attempt
|
||||
reconnected = self._reconnect()
|
||||
if reconnected:
|
||||
self.logger.info("Retrying event dispatch after successful reconnection")
|
||||
else:
|
||||
self.logger.warning(f"Reconnection failed, will retry dispatch anyway " f"(attempt {attempt + 2} coming)")
|
||||
# Continue to next attempt
|
||||
continue
|
||||
else:
|
||||
# All attempts exhausted
|
||||
self._events_lost += 1
|
||||
self.logger.error(
|
||||
f"CRITICAL: Failed to dispatch event after {max_attempts} attempts. "
|
||||
f"Event will be lost. Total events lost this session: {self._events_lost}"
|
||||
)
|
||||
self.logger.error(
|
||||
f"DIAGNOSTIC INFO: "
|
||||
f"job_id={job_id}, "
|
||||
f"event_type={event_type}, "
|
||||
f"queue={self.queue}, "
|
||||
f"broker_url={self._broker_url}, "
|
||||
f"last_error={error_type}: {last_error}, "
|
||||
f"session_reconnections={self._total_reconnections}, "
|
||||
f"session_events_lost={self._events_lost}"
|
||||
)
|
||||
|
||||
# IMPORTANT: Don't raise exception
|
||||
# Allow job to continue even though this event was lost
|
||||
# This prevents losing 17+ minutes of work due to event logging failure
|
||||
break
|
||||
|
||||
except Exception as e:
|
||||
# Catch any other unexpected Redis errors
|
||||
self.logger.error(f"Unexpected error dispatching event to Redis: {type(e).__name__}: {e}")
|
||||
self.logger.error(f"Event context: job_id={job_id}, event_type={event_type}")
|
||||
# Don't raise - allow job to continue
|
||||
break
|
||||
|
||||
def health_check(self):
|
||||
"""
|
||||
Check Redis connection health.
|
||||
|
||||
Returns:
|
||||
bool: True if connection is healthy, False otherwise
|
||||
"""
|
||||
try:
|
||||
self.connection.ping()
|
||||
return True
|
||||
except Exception as e:
|
||||
self.logger.warning(f"Redis health check failed: {type(e).__name__}: {e}")
|
||||
return False
|
||||
|
||||
def get_connection_stats(self):
|
||||
"""
|
||||
Get Redis connection statistics for monitoring.
|
||||
|
||||
Returns:
|
||||
dict: Connection statistics
|
||||
"""
|
||||
return {
|
||||
'broker_url': self._broker_url,
|
||||
'queue': self.queue,
|
||||
'connected': self.health_check(),
|
||||
'connection_failures': self._connection_failures,
|
||||
'total_reconnections': self._total_reconnections,
|
||||
'events_lost': self._events_lost,
|
||||
}
|
||||
|
||||
@@ -174,6 +174,9 @@ class PodManager(object):
|
||||
)
|
||||
pod_spec['spec']['containers'][0]['name'] = self.pod_name
|
||||
|
||||
# Prevent mounting of service account token in job pods in order to prevent job pods from accessing the k8s API via in cluster service account auth
|
||||
pod_spec['spec']['automountServiceAccountToken'] = False
|
||||
|
||||
return pod_spec
|
||||
|
||||
|
||||
|
||||
@@ -10,6 +10,8 @@ import time
|
||||
import sys
|
||||
import signal
|
||||
|
||||
import redis
|
||||
|
||||
# Django
|
||||
from django.db import transaction
|
||||
from django.utils.translation import gettext_lazy as _, gettext_noop
|
||||
@@ -120,6 +122,8 @@ class TaskBase:
|
||||
self.subsystem_metrics.pipe_execute()
|
||||
else:
|
||||
logger.debug(f"skipping recording {self.prefix} metrics, last recorded {time_last_recorded} seconds ago")
|
||||
except redis.exceptions.ConnectionError as exc:
|
||||
logger.warning(f"Redis connection error saving metrics for {self.prefix}, error: {exc}")
|
||||
except Exception:
|
||||
logger.exception(f"Error saving metrics for {self.prefix}")
|
||||
|
||||
|
||||
@@ -6,7 +6,6 @@ import logging
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.db.models.query import QuerySet
|
||||
from django.utils.encoding import smart_str
|
||||
from django.utils.timezone import now
|
||||
from django.db import OperationalError
|
||||
@@ -26,6 +25,7 @@ system_tracking_logger = logging.getLogger('awx.analytics.system_tracking')
|
||||
def start_fact_cache(hosts, destination, log_data, timeout=None, inventory_id=None):
|
||||
log_data['inventory_id'] = inventory_id
|
||||
log_data['written_ct'] = 0
|
||||
hosts_cached = list()
|
||||
try:
|
||||
os.makedirs(destination, mode=0o700)
|
||||
except FileExistsError:
|
||||
@@ -34,17 +34,17 @@ def start_fact_cache(hosts, destination, log_data, timeout=None, inventory_id=No
|
||||
if timeout is None:
|
||||
timeout = settings.ANSIBLE_FACT_CACHE_TIMEOUT
|
||||
|
||||
if isinstance(hosts, QuerySet):
|
||||
hosts = hosts.iterator()
|
||||
|
||||
last_filepath_written = None
|
||||
for host in hosts:
|
||||
if (not host.ansible_facts_modified) or (timeout and host.ansible_facts_modified < now() - datetime.timedelta(seconds=timeout)):
|
||||
hosts_cached.append(host)
|
||||
if not host.ansible_facts_modified or (timeout and host.ansible_facts_modified < now() - datetime.timedelta(seconds=timeout)):
|
||||
continue # facts are expired - do not write them
|
||||
|
||||
filepath = os.sep.join(map(str, [destination, host.name]))
|
||||
if not os.path.realpath(filepath).startswith(destination):
|
||||
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
|
||||
continue
|
||||
|
||||
try:
|
||||
with codecs.open(filepath, 'w', encoding='utf-8') as f:
|
||||
os.chmod(f.name, 0o600)
|
||||
@@ -54,10 +54,11 @@ def start_fact_cache(hosts, destination, log_data, timeout=None, inventory_id=No
|
||||
except IOError:
|
||||
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
|
||||
continue
|
||||
# make note of the time we wrote the last file so we can check if any file changed later
|
||||
|
||||
if last_filepath_written:
|
||||
return os.path.getmtime(last_filepath_written)
|
||||
return None
|
||||
return os.path.getmtime(last_filepath_written), hosts_cached
|
||||
|
||||
return None, hosts_cached
|
||||
|
||||
|
||||
def raw_update_hosts(host_list):
|
||||
@@ -88,17 +89,14 @@ def update_hosts(host_list, max_tries=5):
|
||||
msg='Inventory {inventory_id} host facts: updated {updated_ct}, cleared {cleared_ct}, unchanged {unmodified_ct}, took {delta:.3f} s',
|
||||
add_log_data=True,
|
||||
)
|
||||
def finish_fact_cache(hosts, destination, facts_write_time, log_data, job_id=None, inventory_id=None):
|
||||
def finish_fact_cache(hosts_cached, destination, facts_write_time, log_data, job_id=None, inventory_id=None):
|
||||
log_data['inventory_id'] = inventory_id
|
||||
log_data['updated_ct'] = 0
|
||||
log_data['unmodified_ct'] = 0
|
||||
log_data['cleared_ct'] = 0
|
||||
|
||||
if isinstance(hosts, QuerySet):
|
||||
hosts = hosts.iterator()
|
||||
|
||||
hosts_to_update = []
|
||||
for host in hosts:
|
||||
for host in hosts_cached:
|
||||
filepath = os.sep.join(map(str, [destination, host.name]))
|
||||
if not os.path.realpath(filepath).startswith(destination):
|
||||
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
|
||||
@@ -130,6 +128,7 @@ def finish_fact_cache(hosts, destination, facts_write_time, log_data, job_id=Non
|
||||
log_data['unmodified_ct'] += 1
|
||||
else:
|
||||
# if the file goes missing, ansible removed it (likely via clear_facts)
|
||||
# if the file goes missing, but the host has not started facts, then we should not clear the facts
|
||||
host.ansible_facts = {}
|
||||
host.ansible_facts_modified = now()
|
||||
hosts_to_update.append(host)
|
||||
|
||||
@@ -45,26 +45,46 @@ def build_indirect_host_data(job: Job, job_event_queries: dict[str, dict[str, st
|
||||
facts_missing_logged = False
|
||||
unhashable_facts_logged = False
|
||||
|
||||
job_event_queries_fqcn = {}
|
||||
for query_k, query_v in job_event_queries.items():
|
||||
if len(parts := query_k.split('.')) != 3:
|
||||
logger.info(f"Skiping malformed query '{query_k}'. Expected to be of the form 'a.b.c'")
|
||||
continue
|
||||
if parts[2] != '*':
|
||||
continue
|
||||
job_event_queries_fqcn['.'.join(parts[0:2])] = query_v
|
||||
|
||||
for event in job.job_events.filter(event_data__isnull=False).iterator():
|
||||
if 'res' not in event.event_data:
|
||||
continue
|
||||
|
||||
if 'resolved_action' not in event.event_data or event.event_data['resolved_action'] not in job_event_queries.keys():
|
||||
if not (resolved_action := event.event_data.get('resolved_action', None)):
|
||||
continue
|
||||
|
||||
resolved_action = event.event_data['resolved_action']
|
||||
if len(resolved_action_parts := resolved_action.split('.')) != 3:
|
||||
logger.debug(f"Malformed invocation module name '{resolved_action}'. Expected to be of the form 'a.b.c'")
|
||||
continue
|
||||
|
||||
# We expect a dict with a 'query' key for the resolved_action
|
||||
if 'query' not in job_event_queries[resolved_action]:
|
||||
resolved_action_fqcn = '.'.join(resolved_action_parts[0:2])
|
||||
|
||||
# Match module invocation to collection queries
|
||||
# First match against fully qualified query names i.e. a.b.c
|
||||
# Then try and match against wildcard queries i.e. a.b.*
|
||||
if not (jq_str_for_event := job_event_queries.get(resolved_action, job_event_queries_fqcn.get(resolved_action_fqcn, {})).get('query')):
|
||||
continue
|
||||
|
||||
# Recall from cache, or process the jq expression, and loop over the jq results
|
||||
jq_str_for_event = job_event_queries[resolved_action]['query']
|
||||
|
||||
if jq_str_for_event not in compiled_jq_expressions:
|
||||
compiled_jq_expressions[resolved_action] = jq.compile(jq_str_for_event)
|
||||
compiled_jq = compiled_jq_expressions[resolved_action]
|
||||
for data in compiled_jq.input(event.event_data['res']).all():
|
||||
|
||||
try:
|
||||
data_source = compiled_jq.input(event.event_data['res']).all()
|
||||
except Exception as e:
|
||||
logger.warning(f'error for module {resolved_action} and data {event.event_data["res"]}: {e}')
|
||||
continue
|
||||
|
||||
for data in data_source:
|
||||
# From this jq result (specific to a single Ansible module), get index information about this host record
|
||||
if not data.get('canonical_facts'):
|
||||
if not facts_missing_logged:
|
||||
|
||||
@@ -1091,7 +1091,7 @@ class RunJob(SourceControlMixin, BaseTask):
|
||||
# where ansible expects to find it
|
||||
if self.should_use_fact_cache():
|
||||
job.log_lifecycle("start_job_fact_cache")
|
||||
self.facts_write_time = start_fact_cache(
|
||||
self.facts_write_time, self.hosts_with_facts_cached = start_fact_cache(
|
||||
job.get_hosts_for_fact_cache(), os.path.join(private_data_dir, 'artifacts', str(job.id), 'fact_cache'), inventory_id=job.inventory_id
|
||||
)
|
||||
|
||||
@@ -1110,7 +1110,7 @@ class RunJob(SourceControlMixin, BaseTask):
|
||||
if self.should_use_fact_cache() and self.runner_callback.artifacts_processed:
|
||||
job.log_lifecycle("finish_job_fact_cache")
|
||||
finish_fact_cache(
|
||||
job.get_hosts_for_fact_cache(),
|
||||
self.hosts_with_facts_cached,
|
||||
os.path.join(private_data_dir, 'artifacts', str(job.id), 'fact_cache'),
|
||||
facts_write_time=self.facts_write_time,
|
||||
job_id=job.id,
|
||||
|
||||
7
awx/main/tests/data/projects/facts/clear.yml
Normal file
7
awx/main/tests/data/projects/facts/clear.yml
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
|
||||
- hosts: all
|
||||
gather_facts: false
|
||||
connection: local
|
||||
tasks:
|
||||
- meta: clear_facts
|
||||
17
awx/main/tests/data/projects/facts/gather.yml
Normal file
17
awx/main/tests/data/projects/facts/gather.yml
Normal file
@@ -0,0 +1,17 @@
|
||||
---
|
||||
|
||||
- hosts: all
|
||||
vars:
|
||||
extra_value: ""
|
||||
gather_facts: false
|
||||
connection: local
|
||||
tasks:
|
||||
- name: set a custom fact
|
||||
set_fact:
|
||||
foo: "bar{{ extra_value }}"
|
||||
bar:
|
||||
a:
|
||||
b:
|
||||
- "c"
|
||||
- "d"
|
||||
cacheable: true
|
||||
9
awx/main/tests/data/projects/facts/no_op.yml
Normal file
9
awx/main/tests/data/projects/facts/no_op.yml
Normal file
@@ -0,0 +1,9 @@
|
||||
---
|
||||
|
||||
- hosts: all
|
||||
gather_facts: false
|
||||
connection: local
|
||||
vars:
|
||||
msg: 'hello'
|
||||
tasks:
|
||||
- debug: var=msg
|
||||
17
awx/main/tests/data/sleep_task.py
Normal file
17
awx/main/tests/data/sleep_task.py
Normal file
@@ -0,0 +1,17 @@
|
||||
import time
|
||||
import logging
|
||||
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
from awx.main.dispatch.publish import task
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
def sleep_task(seconds=10, log=False):
|
||||
if log:
|
||||
logger.info('starting sleep_task')
|
||||
time.sleep(seconds)
|
||||
if log:
|
||||
logger.info('finished sleep_task')
|
||||
@@ -87,8 +87,8 @@ def mock_analytic_post():
|
||||
{
|
||||
'REDHAT_USERNAME': 'redhat_user',
|
||||
'REDHAT_PASSWORD': 'redhat_pass', # NOSONAR
|
||||
'SUBSCRIPTIONS_USERNAME': '',
|
||||
'SUBSCRIPTIONS_PASSWORD': '',
|
||||
'SUBSCRIPTIONS_CLIENT_ID': '',
|
||||
'SUBSCRIPTIONS_CLIENT_SECRET': '',
|
||||
},
|
||||
True,
|
||||
('redhat_user', 'redhat_pass'),
|
||||
@@ -98,8 +98,8 @@ def mock_analytic_post():
|
||||
{
|
||||
'REDHAT_USERNAME': None,
|
||||
'REDHAT_PASSWORD': None,
|
||||
'SUBSCRIPTIONS_USERNAME': 'subs_user',
|
||||
'SUBSCRIPTIONS_PASSWORD': 'subs_pass', # NOSONAR
|
||||
'SUBSCRIPTIONS_CLIENT_ID': 'subs_user',
|
||||
'SUBSCRIPTIONS_CLIENT_SECRET': 'subs_pass', # NOSONAR
|
||||
},
|
||||
True,
|
||||
('subs_user', 'subs_pass'),
|
||||
@@ -109,8 +109,8 @@ def mock_analytic_post():
|
||||
{
|
||||
'REDHAT_USERNAME': '',
|
||||
'REDHAT_PASSWORD': '',
|
||||
'SUBSCRIPTIONS_USERNAME': 'subs_user',
|
||||
'SUBSCRIPTIONS_PASSWORD': 'subs_pass', # NOSONAR
|
||||
'SUBSCRIPTIONS_CLIENT_ID': 'subs_user',
|
||||
'SUBSCRIPTIONS_CLIENT_SECRET': 'subs_pass', # NOSONAR
|
||||
},
|
||||
True,
|
||||
('subs_user', 'subs_pass'),
|
||||
@@ -120,8 +120,8 @@ def mock_analytic_post():
|
||||
{
|
||||
'REDHAT_USERNAME': '',
|
||||
'REDHAT_PASSWORD': '',
|
||||
'SUBSCRIPTIONS_USERNAME': '',
|
||||
'SUBSCRIPTIONS_PASSWORD': '',
|
||||
'SUBSCRIPTIONS_CLIENT_ID': '',
|
||||
'SUBSCRIPTIONS_CLIENT_SECRET': '',
|
||||
},
|
||||
False,
|
||||
None, # No request should be made
|
||||
@@ -131,8 +131,8 @@ def mock_analytic_post():
|
||||
{
|
||||
'REDHAT_USERNAME': '',
|
||||
'REDHAT_PASSWORD': 'redhat_pass', # NOSONAR
|
||||
'SUBSCRIPTIONS_USERNAME': 'subs_user',
|
||||
'SUBSCRIPTIONS_PASSWORD': '',
|
||||
'SUBSCRIPTIONS_CLIENT_ID': 'subs_user',
|
||||
'SUBSCRIPTIONS_CLIENT_SECRET': '',
|
||||
},
|
||||
False,
|
||||
None, # Invalid, no request should be made
|
||||
|
||||
@@ -97,8 +97,8 @@ class TestAnalyticsGenericView:
|
||||
'INSIGHTS_TRACKING_STATE': True,
|
||||
'REDHAT_USERNAME': 'redhat_user',
|
||||
'REDHAT_PASSWORD': 'redhat_pass', # NOSONAR
|
||||
'SUBSCRIPTIONS_USERNAME': '',
|
||||
'SUBSCRIPTIONS_PASSWORD': '',
|
||||
'SUBSCRIPTIONS_CLIENT_ID': '',
|
||||
'SUBSCRIPTIONS_CLIENT_SECRET': '',
|
||||
},
|
||||
('redhat_user', 'redhat_pass'),
|
||||
None,
|
||||
@@ -109,8 +109,8 @@ class TestAnalyticsGenericView:
|
||||
'INSIGHTS_TRACKING_STATE': True,
|
||||
'REDHAT_USERNAME': '',
|
||||
'REDHAT_PASSWORD': '',
|
||||
'SUBSCRIPTIONS_USERNAME': 'subs_user',
|
||||
'SUBSCRIPTIONS_PASSWORD': 'subs_pass', # NOSONAR
|
||||
'SUBSCRIPTIONS_CLIENT_ID': 'subs_user',
|
||||
'SUBSCRIPTIONS_CLIENT_SECRET': 'subs_pass', # NOSONAR
|
||||
},
|
||||
('subs_user', 'subs_pass'),
|
||||
None,
|
||||
@@ -121,8 +121,8 @@ class TestAnalyticsGenericView:
|
||||
'INSIGHTS_TRACKING_STATE': True,
|
||||
'REDHAT_USERNAME': '',
|
||||
'REDHAT_PASSWORD': '',
|
||||
'SUBSCRIPTIONS_USERNAME': '',
|
||||
'SUBSCRIPTIONS_PASSWORD': '',
|
||||
'SUBSCRIPTIONS_CLIENT_ID': '',
|
||||
'SUBSCRIPTIONS_CLIENT_SECRET': '',
|
||||
},
|
||||
None,
|
||||
ERROR_MISSING_USER,
|
||||
@@ -133,8 +133,8 @@ class TestAnalyticsGenericView:
|
||||
'INSIGHTS_TRACKING_STATE': True,
|
||||
'REDHAT_USERNAME': 'redhat_user',
|
||||
'REDHAT_PASSWORD': 'redhat_pass', # NOSONAR
|
||||
'SUBSCRIPTIONS_USERNAME': 'subs_user',
|
||||
'SUBSCRIPTIONS_PASSWORD': 'subs_pass', # NOSONAR
|
||||
'SUBSCRIPTIONS_CLIENT_ID': 'subs_user',
|
||||
'SUBSCRIPTIONS_CLIENT_SECRET': 'subs_pass', # NOSONAR
|
||||
},
|
||||
('redhat_user', 'redhat_pass'),
|
||||
None,
|
||||
@@ -145,8 +145,8 @@ class TestAnalyticsGenericView:
|
||||
'INSIGHTS_TRACKING_STATE': True,
|
||||
'REDHAT_USERNAME': '',
|
||||
'REDHAT_PASSWORD': '',
|
||||
'SUBSCRIPTIONS_USERNAME': 'subs_user', # NOSONAR
|
||||
'SUBSCRIPTIONS_PASSWORD': '',
|
||||
'SUBSCRIPTIONS_CLIENT_ID': 'subs_user', # NOSONAR
|
||||
'SUBSCRIPTIONS_CLIENT_SECRET': '',
|
||||
},
|
||||
None,
|
||||
ERROR_MISSING_PASSWORD,
|
||||
|
||||
@@ -56,6 +56,175 @@ def test_user_create(post, admin):
|
||||
assert not response.data['is_system_auditor']
|
||||
|
||||
|
||||
# Disable local password checks to ensure that any ValidationError originates from the Django validators.
|
||||
@override_settings(
|
||||
LOCAL_PASSWORD_MIN_LENGTH=1,
|
||||
LOCAL_PASSWORD_MIN_DIGITS=0,
|
||||
LOCAL_PASSWORD_MIN_UPPER=0,
|
||||
LOCAL_PASSWORD_MIN_SPECIAL=0,
|
||||
)
|
||||
@pytest.mark.django_db
|
||||
def test_user_create_with_django_password_validation_basic(post, admin):
|
||||
"""Test if the Django password validators are applied correctly."""
|
||||
with override_settings(
|
||||
AUTH_PASSWORD_VALIDATORS=[
|
||||
{
|
||||
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
|
||||
},
|
||||
{
|
||||
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
|
||||
'OPTIONS': {
|
||||
'min_length': 3,
|
||||
},
|
||||
},
|
||||
{
|
||||
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
|
||||
},
|
||||
{
|
||||
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
|
||||
},
|
||||
],
|
||||
):
|
||||
# This user should fail the UserAttrSimilarity, MinLength and CommonPassword validators.
|
||||
user_attrs = (
|
||||
{
|
||||
"password": "Password", # NOSONAR
|
||||
"username": "Password",
|
||||
"is_superuser": False,
|
||||
},
|
||||
)
|
||||
print(f"Create user with invalid password {user_attrs=}")
|
||||
response = post(reverse('api:user_list'), user_attrs, admin, middleware=SessionMiddleware(mock.Mock()))
|
||||
assert response.status_code == 400
|
||||
# This user should pass all Django validators.
|
||||
user_attrs = {
|
||||
"password": "r$TyKiOCb#ED", # NOSONAR
|
||||
"username": "TestUser",
|
||||
"is_superuser": False,
|
||||
}
|
||||
print(f"Create user with valid password {user_attrs=}")
|
||||
response = post(reverse('api:user_list'), user_attrs, admin, middleware=SessionMiddleware(mock.Mock()))
|
||||
assert response.status_code == 201
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"user_attrs,validators,expected_status_code",
|
||||
[
|
||||
# Test password similarity with username.
|
||||
(
|
||||
{"password": "TestUser1", "username": "TestUser1", "is_superuser": False}, # NOSONAR
|
||||
[
|
||||
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
|
||||
],
|
||||
400,
|
||||
),
|
||||
(
|
||||
{"password": "abc", "username": "TestUser1", "is_superuser": False}, # NOSONAR
|
||||
[
|
||||
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
|
||||
],
|
||||
201,
|
||||
),
|
||||
# Test password min length criterion.
|
||||
(
|
||||
{"password": "TooShort", "username": "TestUser1", "is_superuser": False}, # NOSONAR
|
||||
[
|
||||
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 'OPTIONS': {'min_length': 9}},
|
||||
],
|
||||
400,
|
||||
),
|
||||
(
|
||||
{"password": "LongEnough", "username": "TestUser1", "is_superuser": False}, # NOSONAR
|
||||
[
|
||||
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 'OPTIONS': {'min_length': 9}},
|
||||
],
|
||||
201,
|
||||
),
|
||||
# Test password is too common criterion.
|
||||
(
|
||||
{"password": "Password", "username": "TestUser1", "is_superuser": False}, # NOSONAR
|
||||
[
|
||||
{'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator'},
|
||||
],
|
||||
400,
|
||||
),
|
||||
(
|
||||
{"password": "aEArV$5Vkdw", "username": "TestUser1", "is_superuser": False}, # NOSONAR
|
||||
[
|
||||
{'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator'},
|
||||
],
|
||||
201,
|
||||
),
|
||||
# Test if password is only numeric.
|
||||
(
|
||||
{"password": "1234567890", "username": "TestUser1", "is_superuser": False}, # NOSONAR
|
||||
[
|
||||
{'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator'},
|
||||
],
|
||||
400,
|
||||
),
|
||||
(
|
||||
{"password": "abc4567890", "username": "TestUser1", "is_superuser": False}, # NOSONAR
|
||||
[
|
||||
{'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator'},
|
||||
],
|
||||
201,
|
||||
),
|
||||
],
|
||||
)
|
||||
# Disable local password checks to ensure that any ValidationError originates from the Django validators.
|
||||
@override_settings(
|
||||
LOCAL_PASSWORD_MIN_LENGTH=1,
|
||||
LOCAL_PASSWORD_MIN_DIGITS=0,
|
||||
LOCAL_PASSWORD_MIN_UPPER=0,
|
||||
LOCAL_PASSWORD_MIN_SPECIAL=0,
|
||||
)
|
||||
@pytest.mark.django_db
|
||||
def test_user_create_with_django_password_validation_ext(post, delete, admin, user_attrs, validators, expected_status_code):
|
||||
"""Test the functionality of the single Django password validators."""
|
||||
#
|
||||
default_parameters = {
|
||||
# Default values for input parameters which are None.
|
||||
"user_attrs": {
|
||||
"password": "r$TyKiOCb#ED", # NOSONAR
|
||||
"username": "DefaultUser",
|
||||
"is_superuser": False,
|
||||
},
|
||||
"validators": [
|
||||
{
|
||||
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
|
||||
},
|
||||
{
|
||||
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
|
||||
'OPTIONS': {
|
||||
'min_length': 8,
|
||||
},
|
||||
},
|
||||
{
|
||||
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
|
||||
},
|
||||
{
|
||||
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
|
||||
},
|
||||
],
|
||||
}
|
||||
user_attrs = user_attrs if user_attrs is not None else default_parameters["user_attrs"]
|
||||
validators = validators if validators is not None else default_parameters["validators"]
|
||||
with override_settings(AUTH_PASSWORD_VALIDATORS=validators):
|
||||
response = post(reverse('api:user_list'), user_attrs, admin, middleware=SessionMiddleware(mock.Mock()))
|
||||
assert response.status_code == expected_status_code
|
||||
# Delete user if it was created succesfully.
|
||||
if response.status_code == 201:
|
||||
response = delete(reverse('api:user_detail', kwargs={'pk': response.data['id']}), admin, middleware=SessionMiddleware(mock.Mock()))
|
||||
assert response.status_code == 204
|
||||
else:
|
||||
# Catch the unexpected behavior that sometimes the user is written
|
||||
# into the database before the validation fails. This actually can
|
||||
# happen if UserSerializer.validate instantiates User(**attrs)!
|
||||
username = user_attrs['username']
|
||||
assert not User.objects.filter(username=username)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_fail_double_create_user(post, admin):
|
||||
response = post(reverse('api:user_list'), EXAMPLE_USER_DATA, admin, middleware=SessionMiddleware(mock.Mock()))
|
||||
@@ -82,6 +251,10 @@ def test_updating_own_password_refreshes_session(patch, admin):
|
||||
Updating your own password should refresh the session id.
|
||||
'''
|
||||
with mock.patch('awx.api.serializers.update_session_auth_hash') as update_session_auth_hash:
|
||||
# Attention: If the Django password validator `CommonPasswordValidator`
|
||||
# is active, this test case will fail because this validator raises on
|
||||
# password 'newpassword'. Consider changing the hard-coded password to
|
||||
# something uncommon.
|
||||
patch(reverse('api:user_detail', kwargs={'pk': admin.pk}), {'password': 'newpassword'}, admin, middleware=SessionMiddleware(mock.Mock()))
|
||||
assert update_session_auth_hash.called
|
||||
|
||||
|
||||
@@ -34,40 +34,18 @@ def test_wrapup_does_send_notifications(mocker):
|
||||
mock.assert_called_once_with('succeeded')
|
||||
|
||||
|
||||
class FakeRedis:
|
||||
def keys(self, *args, **kwargs):
|
||||
return []
|
||||
|
||||
def set(self):
|
||||
pass
|
||||
|
||||
def get(self):
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def from_url(cls, *args, **kwargs):
|
||||
return cls()
|
||||
|
||||
def pipeline(self):
|
||||
return self
|
||||
|
||||
|
||||
class TestCallbackBrokerWorker(TransactionTestCase):
|
||||
@pytest.fixture(autouse=True)
|
||||
def turn_off_websockets(self):
|
||||
def turn_off_websockets_and_redis(self, fake_redis):
|
||||
with mock.patch('awx.main.dispatch.worker.callback.emit_event_detail', lambda *a, **kw: None):
|
||||
yield
|
||||
|
||||
def get_worker(self):
|
||||
with mock.patch('redis.Redis', new=FakeRedis): # turn off redis stuff
|
||||
return CallbackBrokerWorker()
|
||||
|
||||
def event_create_kwargs(self):
|
||||
inventory_update = InventoryUpdate.objects.create(source='file', inventory_source=InventorySource.objects.create(source='file'))
|
||||
return dict(inventory_update=inventory_update, created=inventory_update.created)
|
||||
|
||||
def test_flush_with_valid_event(self):
|
||||
worker = self.get_worker()
|
||||
worker = CallbackBrokerWorker()
|
||||
events = [InventoryUpdateEvent(uuid=str(uuid4()), **self.event_create_kwargs())]
|
||||
worker.buff = {InventoryUpdateEvent: events}
|
||||
worker.flush()
|
||||
@@ -75,7 +53,7 @@ class TestCallbackBrokerWorker(TransactionTestCase):
|
||||
assert InventoryUpdateEvent.objects.filter(uuid=events[0].uuid).count() == 1
|
||||
|
||||
def test_flush_with_invalid_event(self):
|
||||
worker = self.get_worker()
|
||||
worker = CallbackBrokerWorker()
|
||||
kwargs = self.event_create_kwargs()
|
||||
events = [
|
||||
InventoryUpdateEvent(uuid=str(uuid4()), stdout='good1', **kwargs),
|
||||
@@ -90,7 +68,7 @@ class TestCallbackBrokerWorker(TransactionTestCase):
|
||||
assert worker.buff == {InventoryUpdateEvent: [events[1]]}
|
||||
|
||||
def test_duplicate_key_not_saved_twice(self):
|
||||
worker = self.get_worker()
|
||||
worker = CallbackBrokerWorker()
|
||||
events = [InventoryUpdateEvent(uuid=str(uuid4()), **self.event_create_kwargs())]
|
||||
worker.buff = {InventoryUpdateEvent: events.copy()}
|
||||
worker.flush()
|
||||
@@ -104,7 +82,7 @@ class TestCallbackBrokerWorker(TransactionTestCase):
|
||||
assert worker.buff.get(InventoryUpdateEvent, []) == []
|
||||
|
||||
def test_give_up_on_bad_event(self):
|
||||
worker = self.get_worker()
|
||||
worker = CallbackBrokerWorker()
|
||||
events = [InventoryUpdateEvent(uuid=str(uuid4()), counter=-2, **self.event_create_kwargs())]
|
||||
worker.buff = {InventoryUpdateEvent: events.copy()}
|
||||
|
||||
@@ -117,7 +95,7 @@ class TestCallbackBrokerWorker(TransactionTestCase):
|
||||
assert InventoryUpdateEvent.objects.filter(uuid=events[0].uuid).count() == 0 # sanity
|
||||
|
||||
def test_flush_with_empty_buffer(self):
|
||||
worker = self.get_worker()
|
||||
worker = CallbackBrokerWorker()
|
||||
worker.buff = {InventoryUpdateEvent: []}
|
||||
with mock.patch.object(InventoryUpdateEvent.objects, 'bulk_create') as flush_mock:
|
||||
worker.flush()
|
||||
@@ -127,7 +105,7 @@ class TestCallbackBrokerWorker(TransactionTestCase):
|
||||
# In postgres, text fields reject NUL character, 0x00
|
||||
# tests use sqlite3 which will not raise an error
|
||||
# but we can still test that it is sanitized before saving
|
||||
worker = self.get_worker()
|
||||
worker = CallbackBrokerWorker()
|
||||
kwargs = self.event_create_kwargs()
|
||||
events = [InventoryUpdateEvent(uuid=str(uuid4()), stdout="\x00", **kwargs)]
|
||||
assert "\x00" in events[0].stdout # sanity
|
||||
|
||||
@@ -63,6 +63,33 @@ def swagger_autogen(requests=__SWAGGER_REQUESTS__):
|
||||
return requests
|
||||
|
||||
|
||||
class FakeRedis:
|
||||
def keys(self, *args, **kwargs):
|
||||
return []
|
||||
|
||||
def set(self):
|
||||
pass
|
||||
|
||||
def get(self):
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def from_url(cls, *args, **kwargs):
|
||||
return cls()
|
||||
|
||||
def pipeline(self):
|
||||
return self
|
||||
|
||||
def ping(self):
|
||||
return
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def fake_redis():
|
||||
with mock.patch('redis.Redis', new=FakeRedis): # turn off redis stuff
|
||||
yield
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def user():
|
||||
def u(name, is_superuser=False):
|
||||
|
||||
@@ -106,6 +106,17 @@ def test_compat_role_naming(setup_managed_roles, job_template, rando, alice):
|
||||
assert rd.created_by is None
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_organization_admin_has_audit(setup_managed_roles):
|
||||
"""This formalizes a behavior change from old to new RBAC system
|
||||
|
||||
Previously, the auditor_role did not list admin_role as a parent
|
||||
this made various queries hard to deal with, requiring adding 2 conditions
|
||||
The new system should explicitly list the auditor permission in org admin role"""
|
||||
rd = RoleDefinition.objects.get(name='Organization Admin')
|
||||
assert 'audit_organization' in rd.permissions.values_list('codename', flat=True)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_organization_level_permissions(organization, inventory, setup_managed_roles):
|
||||
u1 = User.objects.create(username='alice')
|
||||
|
||||
@@ -3,6 +3,10 @@ import pytest
|
||||
# AWX
|
||||
from awx.main.ha import is_ha_environment
|
||||
from awx.main.models.ha import Instance
|
||||
from awx.main.dispatch.pool import get_auto_max_workers
|
||||
|
||||
# Django
|
||||
from django.test.utils import override_settings
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -17,3 +21,25 @@ def test_db_localhost():
|
||||
Instance.objects.create(hostname='foo', node_type='hybrid')
|
||||
Instance.objects.create(hostname='bar', node_type='execution')
|
||||
assert is_ha_environment() is False
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize(
|
||||
'settings',
|
||||
[
|
||||
dict(SYSTEM_TASK_ABS_MEM='16Gi', SYSTEM_TASK_ABS_CPU='24', SYSTEM_TASK_FORKS_MEM=400, SYSTEM_TASK_FORKS_CPU=4),
|
||||
dict(SYSTEM_TASK_ABS_MEM='124Gi', SYSTEM_TASK_ABS_CPU='2', SYSTEM_TASK_FORKS_MEM=None, SYSTEM_TASK_FORKS_CPU=None),
|
||||
],
|
||||
ids=['cpu_dominated', 'memory_dominated'],
|
||||
)
|
||||
def test_dispatcher_max_workers_reserve(settings, fake_redis):
|
||||
"""This tests that the dispatcher max_workers matches instance capacity
|
||||
|
||||
Assumes capacity_adjustment is 1,
|
||||
plus reserve worker count
|
||||
"""
|
||||
with override_settings(**settings):
|
||||
i = Instance.objects.create(hostname='test-1', node_type='hybrid')
|
||||
i.local_health_check()
|
||||
|
||||
assert get_auto_max_workers() == i.capacity + 7, (i.cpu, i.memory, i.cpu_capacity, i.mem_capacity)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import pytest
|
||||
|
||||
from awx.main.access import (
|
||||
UnifiedJobAccess,
|
||||
WorkflowJobTemplateAccess,
|
||||
WorkflowJobTemplateNodeAccess,
|
||||
WorkflowJobAccess,
|
||||
@@ -245,6 +246,30 @@ class TestWorkflowJobAccess:
|
||||
inventory.use_role.members.add(rando)
|
||||
assert WorkflowJobAccess(rando).can_start(workflow_job)
|
||||
|
||||
@pytest.mark.parametrize('org_role', ['admin_role', 'auditor_role'])
|
||||
def test_workflow_job_org_audit_access(self, workflow_job_template, rando, org_role):
|
||||
assert workflow_job_template.organization # sanity
|
||||
workflow_job = workflow_job_template.create_unified_job()
|
||||
assert workflow_job.organization # sanity
|
||||
|
||||
assert not UnifiedJobAccess(rando).can_read(workflow_job)
|
||||
assert not WorkflowJobAccess(rando).can_read(workflow_job)
|
||||
assert workflow_job not in WorkflowJobAccess(rando).filtered_queryset()
|
||||
|
||||
org = workflow_job.organization
|
||||
role = getattr(org, org_role)
|
||||
role.members.add(rando)
|
||||
|
||||
assert UnifiedJobAccess(rando).can_read(workflow_job)
|
||||
assert WorkflowJobAccess(rando).can_read(workflow_job)
|
||||
assert workflow_job in WorkflowJobAccess(rando).filtered_queryset()
|
||||
|
||||
# Organization-level permissions should persist after deleting the WFJT
|
||||
workflow_job_template.delete()
|
||||
assert UnifiedJobAccess(rando).can_read(workflow_job)
|
||||
assert WorkflowJobAccess(rando).can_read(workflow_job)
|
||||
assert workflow_job in WorkflowJobAccess(rando).filtered_queryset()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestWFJTCopyAccess:
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import yaml
|
||||
from functools import reduce
|
||||
from unittest import mock
|
||||
|
||||
import pytest
|
||||
@@ -20,6 +21,46 @@ from awx.main.models.indirect_managed_node_audit import IndirectManagedNodeAudit
|
||||
TEST_JQ = "{name: .name, canonical_facts: {host_name: .direct_host_name}, facts: {another_host_name: .direct_host_name}}"
|
||||
|
||||
|
||||
class Query(dict):
|
||||
def __init__(self, resolved_action: str, query_jq: dict):
|
||||
self._resolved_action = resolved_action.split('.')
|
||||
self._collection_ns, self._collection_name, self._module_name = self._resolved_action
|
||||
|
||||
super().__init__({self.resolve_key: {'query': query_jq}})
|
||||
|
||||
def get_fqcn(self):
|
||||
return f'{self._collection_ns}.{self._collection_name}'
|
||||
|
||||
@property
|
||||
def resolve_value(self):
|
||||
return self[self.resolve_key]
|
||||
|
||||
@property
|
||||
def resolve_key(self):
|
||||
return f'{self.get_fqcn()}.{self._module_name}'
|
||||
|
||||
def resolve(self, module_name=None):
|
||||
return {f'{self.get_fqcn()}.{module_name or self._module_name}': self.resolve_value}
|
||||
|
||||
def create_event_query(self, module_name=None):
|
||||
if (module_name := module_name or self._module_name) == '*':
|
||||
raise ValueError('Invalid module name *')
|
||||
return self.create_event_queries([module_name])
|
||||
|
||||
def create_event_queries(self, module_names):
|
||||
queries = {}
|
||||
for name in module_names:
|
||||
queries |= self.resolve(name)
|
||||
return EventQuery.objects.create(
|
||||
fqcn=self.get_fqcn(),
|
||||
collection_version='1.0.1',
|
||||
event_query=yaml.dump(queries, default_flow_style=False),
|
||||
)
|
||||
|
||||
def create_registered_event(self, job, module_name):
|
||||
job.job_events.create(event_data={'resolved_action': f'{self.get_fqcn()}.{module_name}', 'res': {'direct_host_name': 'foo_host', 'name': 'vm-foo'}})
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def bare_job(job_factory):
|
||||
job = job_factory()
|
||||
@@ -39,11 +80,6 @@ def job_with_counted_event(bare_job):
|
||||
return bare_job
|
||||
|
||||
|
||||
def create_event_query(fqcn='demo.query'):
|
||||
module_name = f'{fqcn}.example'
|
||||
return EventQuery.objects.create(fqcn=fqcn, collection_version='1.0.1', event_query=yaml.dump({module_name: {'query': TEST_JQ}}, default_flow_style=False))
|
||||
|
||||
|
||||
def create_audit_record(name, job, organization, created=now()):
|
||||
record = IndirectManagedNodeAudit.objects.create(name=name, job=job, organization=organization)
|
||||
record.created = created
|
||||
@@ -54,7 +90,7 @@ def create_audit_record(name, job, organization, created=now()):
|
||||
@pytest.fixture
|
||||
def event_query():
|
||||
"This is ordinarily created by the artifacts callback"
|
||||
return create_event_query()
|
||||
return Query('demo.query.example', TEST_JQ).create_event_query()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -72,105 +108,211 @@ def new_audit_record(bare_job, organization):
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_build_with_no_results(bare_job):
|
||||
# never filled in events, should do nothing
|
||||
assert build_indirect_host_data(bare_job, {}) == []
|
||||
@pytest.mark.parametrize(
|
||||
'queries,expected_matches',
|
||||
(
|
||||
pytest.param(
|
||||
[],
|
||||
0,
|
||||
id='no_results',
|
||||
),
|
||||
pytest.param(
|
||||
[Query('demo.query.example', TEST_JQ)],
|
||||
1,
|
||||
id='fully_qualified',
|
||||
),
|
||||
pytest.param(
|
||||
[Query('demo.query.*', TEST_JQ)],
|
||||
1,
|
||||
id='wildcard',
|
||||
),
|
||||
pytest.param(
|
||||
[
|
||||
Query('demo.query.*', TEST_JQ),
|
||||
Query('demo.query.example', TEST_JQ),
|
||||
],
|
||||
1,
|
||||
id='wildcard_and_fully_qualified',
|
||||
),
|
||||
pytest.param(
|
||||
[
|
||||
Query('demo.query.*', TEST_JQ),
|
||||
Query('demo.query.example', {}),
|
||||
],
|
||||
0,
|
||||
id='wildcard_and_fully_qualified',
|
||||
),
|
||||
pytest.param(
|
||||
[
|
||||
Query('demo.query.example', {}),
|
||||
Query('demo.query.*', TEST_JQ),
|
||||
],
|
||||
0,
|
||||
id='ordering_should_not_matter',
|
||||
),
|
||||
),
|
||||
)
|
||||
def test_build_indirect_host_data(job_with_counted_event, queries: Query, expected_matches: int):
|
||||
data = build_indirect_host_data(job_with_counted_event, {k: v for d in queries for k, v in d.items()})
|
||||
assert len(data) == expected_matches
|
||||
|
||||
|
||||
@mock.patch('awx.main.tasks.host_indirect.logger.debug')
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize(
|
||||
'task_name',
|
||||
(
|
||||
pytest.param(
|
||||
'demo.query',
|
||||
id='no_results',
|
||||
),
|
||||
pytest.param(
|
||||
'demo',
|
||||
id='no_results',
|
||||
),
|
||||
pytest.param(
|
||||
'a.b.c.d',
|
||||
id='no_results',
|
||||
),
|
||||
),
|
||||
)
|
||||
def test_build_indirect_host_data_malformed_module_name(mock_logger_debug, bare_job, task_name: str):
|
||||
create_registered_event(bare_job, task_name)
|
||||
assert build_indirect_host_data(bare_job, Query('demo.query.example', TEST_JQ)) == []
|
||||
mock_logger_debug.assert_called_once_with(f"Malformed invocation module name '{task_name}'. Expected to be of the form 'a.b.c'")
|
||||
|
||||
|
||||
@mock.patch('awx.main.tasks.host_indirect.logger.info')
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize(
|
||||
'query',
|
||||
(
|
||||
pytest.param(
|
||||
'demo.query',
|
||||
id='no_results',
|
||||
),
|
||||
pytest.param(
|
||||
'demo',
|
||||
id='no_results',
|
||||
),
|
||||
pytest.param(
|
||||
'a.b.c.d',
|
||||
id='no_results',
|
||||
),
|
||||
),
|
||||
)
|
||||
def test_build_indirect_host_data_malformed_query(mock_logger_info, job_with_counted_event, query: str):
|
||||
assert build_indirect_host_data(job_with_counted_event, {query: {'query': TEST_JQ}}) == []
|
||||
mock_logger_info.assert_called_once_with(f"Skiping malformed query '{query}'. Expected to be of the form 'a.b.c'")
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_collect_an_event(job_with_counted_event):
|
||||
records = build_indirect_host_data(job_with_counted_event, {'demo.query.example': {'query': TEST_JQ}})
|
||||
assert len(records) == 1
|
||||
@pytest.mark.parametrize(
|
||||
'query',
|
||||
(
|
||||
pytest.param(
|
||||
Query('demo.query.example', TEST_JQ),
|
||||
id='fully_qualified',
|
||||
),
|
||||
pytest.param(
|
||||
Query('demo.query.*', TEST_JQ),
|
||||
id='wildcard',
|
||||
),
|
||||
),
|
||||
)
|
||||
def test_fetch_job_event_query(bare_job, query: Query):
|
||||
query.create_event_query(module_name='example')
|
||||
assert fetch_job_event_query(bare_job) == query.resolve('example')
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_fetch_job_event_query(bare_job, event_query):
|
||||
assert fetch_job_event_query(bare_job) == {'demo.query.example': {'query': TEST_JQ}}
|
||||
@pytest.mark.parametrize(
|
||||
'queries',
|
||||
(
|
||||
[
|
||||
Query('demo.query.example', TEST_JQ),
|
||||
Query('demo2.query.example', TEST_JQ),
|
||||
],
|
||||
[
|
||||
Query('demo.query.*', TEST_JQ),
|
||||
Query('demo2.query.example', TEST_JQ),
|
||||
],
|
||||
),
|
||||
)
|
||||
def test_fetch_multiple_job_event_query(bare_job, queries: list[Query]):
|
||||
for q in queries:
|
||||
q.create_event_query(module_name='example')
|
||||
assert fetch_job_event_query(bare_job) == reduce(lambda acc, q: acc | q.resolve('example'), queries, {})
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_fetch_multiple_job_event_query(bare_job):
|
||||
create_event_query(fqcn='demo.query')
|
||||
create_event_query(fqcn='demo2.query')
|
||||
assert fetch_job_event_query(bare_job) == {'demo.query.example': {'query': TEST_JQ}, 'demo2.query.example': {'query': TEST_JQ}}
|
||||
@pytest.mark.parametrize(
|
||||
('state',),
|
||||
(
|
||||
pytest.param(
|
||||
[
|
||||
(
|
||||
Query('demo.query.example', TEST_JQ),
|
||||
['example'],
|
||||
),
|
||||
],
|
||||
id='fully_qualified',
|
||||
),
|
||||
pytest.param(
|
||||
[
|
||||
(
|
||||
Query('demo.query.example', TEST_JQ),
|
||||
['example'] * 3,
|
||||
),
|
||||
],
|
||||
id='multiple_events_same_module_same_host',
|
||||
),
|
||||
pytest.param(
|
||||
[
|
||||
(
|
||||
Query('demo.query.example', TEST_JQ),
|
||||
['example'],
|
||||
),
|
||||
(
|
||||
Query('demo2.query.example', TEST_JQ),
|
||||
['example'],
|
||||
),
|
||||
],
|
||||
id='multiple_modules',
|
||||
),
|
||||
pytest.param(
|
||||
[
|
||||
(
|
||||
Query('demo.query.*', TEST_JQ),
|
||||
['example', 'example2'],
|
||||
),
|
||||
],
|
||||
id='multiple_modules_same_collection',
|
||||
),
|
||||
),
|
||||
)
|
||||
def test_save_indirect_host_entries(bare_job, state):
|
||||
all_task_names = []
|
||||
for entry in state:
|
||||
query, module_names = entry
|
||||
all_task_names.extend([f'{query.get_fqcn()}.{module_name}' for module_name in module_names])
|
||||
query.create_event_queries(module_names)
|
||||
[query.create_registered_event(bare_job, n) for n in module_names]
|
||||
|
||||
save_indirect_host_entries(bare_job.id)
|
||||
bare_job.refresh_from_db()
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_save_indirect_host_entries(job_with_counted_event, event_query):
|
||||
assert job_with_counted_event.event_queries_processed is False
|
||||
save_indirect_host_entries(job_with_counted_event.id)
|
||||
job_with_counted_event.refresh_from_db()
|
||||
assert job_with_counted_event.event_queries_processed is True
|
||||
assert IndirectManagedNodeAudit.objects.filter(job=job_with_counted_event).count() == 1
|
||||
host_audit = IndirectManagedNodeAudit.objects.filter(job=job_with_counted_event).first()
|
||||
assert host_audit.count == 1
|
||||
assert bare_job.event_queries_processed is True
|
||||
|
||||
assert IndirectManagedNodeAudit.objects.filter(job=bare_job).count() == 1
|
||||
host_audit = IndirectManagedNodeAudit.objects.filter(job=bare_job).first()
|
||||
|
||||
assert host_audit.count == len(all_task_names)
|
||||
assert host_audit.canonical_facts == {'host_name': 'foo_host'}
|
||||
assert host_audit.facts == {'another_host_name': 'foo_host'}
|
||||
assert host_audit.organization == job_with_counted_event.organization
|
||||
assert host_audit.organization == bare_job.organization
|
||||
assert host_audit.name == 'vm-foo'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_multiple_events_same_module_same_host(bare_job, event_query):
|
||||
"This tests that the count field gives correct answers"
|
||||
create_registered_event(bare_job)
|
||||
create_registered_event(bare_job)
|
||||
create_registered_event(bare_job)
|
||||
|
||||
save_indirect_host_entries(bare_job.id)
|
||||
|
||||
assert IndirectManagedNodeAudit.objects.filter(job=bare_job).count() == 1
|
||||
host_audit = IndirectManagedNodeAudit.objects.filter(job=bare_job).first()
|
||||
|
||||
assert host_audit.count == 3
|
||||
assert host_audit.events == ['demo.query.example']
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_multiple_registered_modules(bare_job):
|
||||
"This tests that the events will list multiple modules if more than 1 module from different collections is registered and used"
|
||||
create_registered_event(bare_job, task_name='demo.query.example')
|
||||
create_registered_event(bare_job, task_name='demo2.query.example')
|
||||
|
||||
# These take the place of using the event_query fixture
|
||||
create_event_query(fqcn='demo.query')
|
||||
create_event_query(fqcn='demo2.query')
|
||||
|
||||
save_indirect_host_entries(bare_job.id)
|
||||
|
||||
assert IndirectManagedNodeAudit.objects.filter(job=bare_job).count() == 1
|
||||
host_audit = IndirectManagedNodeAudit.objects.filter(job=bare_job).first()
|
||||
|
||||
assert host_audit.count == 2
|
||||
assert set(host_audit.events) == {'demo.query.example', 'demo2.query.example'}
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_multiple_registered_modules_same_collection(bare_job):
|
||||
"This tests that the events will list multiple modules if more than 1 module in same collection is registered and used"
|
||||
create_registered_event(bare_job, task_name='demo.query.example')
|
||||
create_registered_event(bare_job, task_name='demo.query.example2')
|
||||
|
||||
# Takes place of event_query fixture, doing manually here
|
||||
EventQuery.objects.create(
|
||||
fqcn='demo.query',
|
||||
collection_version='1.0.1',
|
||||
event_query=yaml.dump(
|
||||
{
|
||||
'demo.query.example': {'query': TEST_JQ},
|
||||
'demo.query.example2': {'query': TEST_JQ},
|
||||
},
|
||||
default_flow_style=False,
|
||||
),
|
||||
)
|
||||
|
||||
save_indirect_host_entries(bare_job.id)
|
||||
|
||||
assert IndirectManagedNodeAudit.objects.filter(job=bare_job).count() == 1
|
||||
host_audit = IndirectManagedNodeAudit.objects.filter(job=bare_job).first()
|
||||
|
||||
assert host_audit.count == 2
|
||||
assert set(host_audit.events) == {'demo.query.example', 'demo.query.example2'}
|
||||
assert set(host_audit.events) == set(all_task_names)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
|
||||
@@ -129,7 +129,7 @@ def podman_image_generator():
|
||||
|
||||
@pytest.fixture
|
||||
def run_job_from_playbook(default_org, demo_inv, post, admin):
|
||||
def _rf(test_name, playbook, local_path=None, scm_url=None):
|
||||
def _rf(test_name, playbook, local_path=None, scm_url=None, jt_params=None):
|
||||
project_name = f'{test_name} project'
|
||||
jt_name = f'{test_name} JT: {playbook}'
|
||||
|
||||
@@ -166,9 +166,13 @@ def run_job_from_playbook(default_org, demo_inv, post, admin):
|
||||
assert proj.get_project_path()
|
||||
assert playbook in proj.playbooks
|
||||
|
||||
jt_data = {'name': jt_name, 'project': proj.id, 'playbook': playbook, 'inventory': demo_inv.id}
|
||||
if jt_params:
|
||||
jt_data.update(jt_params)
|
||||
|
||||
result = post(
|
||||
reverse('api:job_template_list'),
|
||||
{'name': jt_name, 'project': proj.id, 'playbook': playbook, 'inventory': demo_inv.id},
|
||||
jt_data,
|
||||
admin,
|
||||
expect=201,
|
||||
)
|
||||
|
||||
64
awx/main/tests/live/tests/test_ansible_facts.py
Normal file
64
awx/main/tests/live/tests/test_ansible_facts.py
Normal file
@@ -0,0 +1,64 @@
|
||||
import pytest
|
||||
|
||||
from awx.main.tests.live.tests.conftest import wait_for_events
|
||||
|
||||
from awx.main.models import Job, Inventory
|
||||
|
||||
|
||||
def assert_facts_populated(name):
|
||||
job = Job.objects.filter(name__icontains=name).order_by('-created').first()
|
||||
assert job is not None
|
||||
wait_for_events(job)
|
||||
|
||||
inventory = job.inventory
|
||||
assert inventory.hosts.count() > 0 # sanity
|
||||
for host in inventory.hosts.all():
|
||||
assert host.ansible_facts
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def general_facts_test(live_tmp_folder, run_job_from_playbook):
|
||||
def _rf(slug, jt_params):
|
||||
jt_params['use_fact_cache'] = True
|
||||
standard_kwargs = dict(scm_url=f'file://{live_tmp_folder}/facts', jt_params=jt_params)
|
||||
|
||||
# GATHER FACTS
|
||||
name = f'test_gather_ansible_facts_{slug}'
|
||||
run_job_from_playbook(name, 'gather.yml', **standard_kwargs)
|
||||
assert_facts_populated(name)
|
||||
|
||||
# KEEP FACTS
|
||||
name = f'test_clear_ansible_facts_{slug}'
|
||||
run_job_from_playbook(name, 'no_op.yml', **standard_kwargs)
|
||||
assert_facts_populated(name)
|
||||
|
||||
# CLEAR FACTS
|
||||
name = f'test_clear_ansible_facts_{slug}'
|
||||
run_job_from_playbook(name, 'clear.yml', **standard_kwargs)
|
||||
job = Job.objects.filter(name__icontains=name).order_by('-created').first()
|
||||
|
||||
assert job is not None
|
||||
wait_for_events(job)
|
||||
inventory = job.inventory
|
||||
assert inventory.hosts.count() > 0 # sanity
|
||||
for host in inventory.hosts.all():
|
||||
assert not host.ansible_facts
|
||||
|
||||
return _rf
|
||||
|
||||
|
||||
def test_basic_ansible_facts(general_facts_test):
|
||||
general_facts_test('basic', {})
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sliced_inventory():
|
||||
inv, _ = Inventory.objects.get_or_create(name='inventory-to-slice')
|
||||
if not inv.hosts.exists():
|
||||
for i in range(10):
|
||||
inv.hosts.create(name=f'sliced_host_{i}')
|
||||
return inv
|
||||
|
||||
|
||||
def test_slicing_with_facts(general_facts_test, sliced_inventory):
|
||||
general_facts_test('sliced', {'job_slice_count': 3, 'inventory': sliced_inventory.id})
|
||||
@@ -34,7 +34,7 @@ def hosts(ref_time):
|
||||
|
||||
def test_start_job_fact_cache(hosts, tmpdir):
|
||||
fact_cache = os.path.join(tmpdir, 'facts')
|
||||
last_modified = start_fact_cache(hosts, fact_cache, timeout=0)
|
||||
last_modified, _ = start_fact_cache(hosts, fact_cache, timeout=0)
|
||||
|
||||
for host in hosts:
|
||||
filepath = os.path.join(fact_cache, host.name)
|
||||
@@ -61,7 +61,7 @@ def test_fact_cache_with_invalid_path_traversal(tmpdir):
|
||||
def test_start_job_fact_cache_past_timeout(hosts, tmpdir):
|
||||
fact_cache = os.path.join(tmpdir, 'facts')
|
||||
# the hosts fixture was modified 5s ago, which is more than 2s
|
||||
last_modified = start_fact_cache(hosts, fact_cache, timeout=2)
|
||||
last_modified, _ = start_fact_cache(hosts, fact_cache, timeout=2)
|
||||
assert last_modified is None
|
||||
|
||||
for host in hosts:
|
||||
@@ -71,7 +71,7 @@ def test_start_job_fact_cache_past_timeout(hosts, tmpdir):
|
||||
def test_start_job_fact_cache_within_timeout(hosts, tmpdir):
|
||||
fact_cache = os.path.join(tmpdir, 'facts')
|
||||
# the hosts fixture was modified 5s ago, which is less than 7s
|
||||
last_modified = start_fact_cache(hosts, fact_cache, timeout=7)
|
||||
last_modified, _ = start_fact_cache(hosts, fact_cache, timeout=7)
|
||||
assert last_modified
|
||||
|
||||
for host in hosts:
|
||||
@@ -80,7 +80,7 @@ def test_start_job_fact_cache_within_timeout(hosts, tmpdir):
|
||||
|
||||
def test_finish_job_fact_cache_with_existing_data(hosts, mocker, tmpdir, ref_time):
|
||||
fact_cache = os.path.join(tmpdir, 'facts')
|
||||
last_modified = start_fact_cache(hosts, fact_cache, timeout=0)
|
||||
last_modified, _ = start_fact_cache(hosts, fact_cache, timeout=0)
|
||||
|
||||
bulk_update = mocker.patch('django.db.models.query.QuerySet.bulk_update')
|
||||
|
||||
@@ -108,7 +108,7 @@ def test_finish_job_fact_cache_with_existing_data(hosts, mocker, tmpdir, ref_tim
|
||||
|
||||
def test_finish_job_fact_cache_with_bad_data(hosts, mocker, tmpdir):
|
||||
fact_cache = os.path.join(tmpdir, 'facts')
|
||||
last_modified = start_fact_cache(hosts, fact_cache, timeout=0)
|
||||
last_modified, _ = start_fact_cache(hosts, fact_cache, timeout=0)
|
||||
|
||||
bulk_update = mocker.patch('django.db.models.query.QuerySet.bulk_update')
|
||||
|
||||
@@ -127,7 +127,7 @@ def test_finish_job_fact_cache_with_bad_data(hosts, mocker, tmpdir):
|
||||
|
||||
def test_finish_job_fact_cache_clear(hosts, mocker, ref_time, tmpdir):
|
||||
fact_cache = os.path.join(tmpdir, 'facts')
|
||||
last_modified = start_fact_cache(hosts, fact_cache, timeout=0)
|
||||
last_modified, _ = start_fact_cache(hosts, fact_cache, timeout=0)
|
||||
|
||||
bulk_update = mocker.patch('django.db.models.query.QuerySet.bulk_update')
|
||||
|
||||
|
||||
162
awx/main/tests/unit/tasks/test_jobs.py
Normal file
162
awx/main/tests/unit/tasks/test_jobs.py
Normal file
@@ -0,0 +1,162 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import os
|
||||
import tempfile
|
||||
import shutil
|
||||
|
||||
import pytest
|
||||
from unittest import mock
|
||||
|
||||
from awx.main.models import (
|
||||
Inventory,
|
||||
Host,
|
||||
)
|
||||
|
||||
from django.utils.timezone import now
|
||||
from django.db.models.query import QuerySet
|
||||
|
||||
from awx.main.models import (
|
||||
Job,
|
||||
Organization,
|
||||
Project,
|
||||
)
|
||||
from awx.main.tasks import jobs
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def private_data_dir():
|
||||
private_data = tempfile.mkdtemp(prefix='awx_')
|
||||
for subfolder in ('inventory', 'env'):
|
||||
runner_subfolder = os.path.join(private_data, subfolder)
|
||||
os.makedirs(runner_subfolder, exist_ok=True)
|
||||
yield private_data
|
||||
shutil.rmtree(private_data, True)
|
||||
|
||||
|
||||
@mock.patch('awx.main.tasks.facts.update_hosts')
|
||||
@mock.patch('awx.main.tasks.facts.settings')
|
||||
@mock.patch('awx.main.tasks.jobs.create_partition', return_value=True)
|
||||
def test_pre_post_run_hook_facts(mock_create_partition, mock_facts_settings, update_hosts, private_data_dir, execution_environment):
|
||||
# creates inventory_object with two hosts
|
||||
inventory = Inventory(pk=1)
|
||||
mock_inventory = mock.MagicMock(spec=Inventory, wraps=inventory)
|
||||
mock_inventory._state = mock.MagicMock()
|
||||
qs_hosts = QuerySet()
|
||||
hosts = [
|
||||
Host(id=1, name='host1', ansible_facts={"a": 1, "b": 2}, ansible_facts_modified=now(), inventory=mock_inventory),
|
||||
Host(id=2, name='host2', ansible_facts={"a": 1, "b": 2}, ansible_facts_modified=now(), inventory=mock_inventory),
|
||||
]
|
||||
qs_hosts._result_cache = hosts
|
||||
qs_hosts.only = mock.MagicMock(return_value=hosts)
|
||||
mock_inventory.hosts = qs_hosts
|
||||
assert mock_inventory.hosts.count() == 2
|
||||
|
||||
# creates job object with fact_cache enabled
|
||||
org = Organization(pk=1)
|
||||
proj = Project(pk=1, organization=org)
|
||||
job = mock.MagicMock(spec=Job, use_fact_cache=True, project=proj, organization=org, job_slice_number=1, job_slice_count=1)
|
||||
job.inventory = mock_inventory
|
||||
job.execution_environment = execution_environment
|
||||
job.get_hosts_for_fact_cache = Job.get_hosts_for_fact_cache.__get__(job) # to run original method
|
||||
job.job_env.get = mock.MagicMock(return_value=private_data_dir)
|
||||
|
||||
# creates the task object with job object as instance
|
||||
mock_facts_settings.ANSIBLE_FACT_CACHE_TIMEOUT = False # defines timeout to false
|
||||
task = jobs.RunJob()
|
||||
task.instance = job
|
||||
task.update_model = mock.Mock(return_value=job)
|
||||
task.model.objects.get = mock.Mock(return_value=job)
|
||||
|
||||
# run pre_run_hook
|
||||
task.facts_write_time = task.pre_run_hook(job, private_data_dir)
|
||||
|
||||
# updates inventory with one more host
|
||||
hosts.append(Host(id=3, name='host3', ansible_facts={"added": True}, ansible_facts_modified=now(), inventory=mock_inventory))
|
||||
assert mock_inventory.hosts.count() == 3
|
||||
|
||||
# run post_run_hook
|
||||
task.runner_callback.artifacts_processed = mock.MagicMock(return_value=True)
|
||||
|
||||
task.post_run_hook(job, "success")
|
||||
assert mock_inventory.hosts[2].ansible_facts == {"added": True}
|
||||
|
||||
|
||||
@mock.patch('awx.main.tasks.facts.update_hosts')
|
||||
@mock.patch('awx.main.tasks.facts.settings')
|
||||
@mock.patch('awx.main.tasks.jobs.create_partition', return_value=True)
|
||||
def test_pre_post_run_hook_facts_deleted_sliced(mock_create_partition, mock_facts_settings, update_hosts, private_data_dir, execution_environment):
|
||||
# creates inventory_object with two hosts
|
||||
inventory = Inventory(pk=1)
|
||||
mock_inventory = mock.MagicMock(spec=Inventory, wraps=inventory)
|
||||
mock_inventory._state = mock.MagicMock()
|
||||
qs_hosts = QuerySet()
|
||||
hosts = [Host(id=num, name=f'host{num}', ansible_facts={"a": 1, "b": 2}, ansible_facts_modified=now(), inventory=mock_inventory) for num in range(999)]
|
||||
|
||||
qs_hosts._result_cache = hosts
|
||||
qs_hosts.only = mock.MagicMock(return_value=hosts)
|
||||
mock_inventory.hosts = qs_hosts
|
||||
assert mock_inventory.hosts.count() == 999
|
||||
|
||||
# creates job object with fact_cache enabled
|
||||
org = Organization(pk=1)
|
||||
proj = Project(pk=1, organization=org)
|
||||
job = mock.MagicMock(spec=Job, use_fact_cache=True, project=proj, organization=org, job_slice_number=1, job_slice_count=3)
|
||||
job.inventory = mock_inventory
|
||||
job.execution_environment = execution_environment
|
||||
job.get_hosts_for_fact_cache = Job.get_hosts_for_fact_cache.__get__(job) # to run original method
|
||||
job.job_env.get = mock.MagicMock(return_value=private_data_dir)
|
||||
|
||||
# creates the task object with job object as instance
|
||||
mock_facts_settings.ANSIBLE_FACT_CACHE_TIMEOUT = False
|
||||
task = jobs.RunJob()
|
||||
task.instance = job
|
||||
task.update_model = mock.Mock(return_value=job)
|
||||
task.model.objects.get = mock.Mock(return_value=job)
|
||||
|
||||
# run pre_run_hook
|
||||
task.facts_write_time = task.pre_run_hook(job, private_data_dir)
|
||||
|
||||
hosts.pop(1)
|
||||
assert mock_inventory.hosts.count() == 998
|
||||
|
||||
# run post_run_hook
|
||||
task.runner_callback.artifacts_processed = mock.MagicMock(return_value=True)
|
||||
task.post_run_hook(job, "success")
|
||||
|
||||
for host in hosts:
|
||||
assert host.ansible_facts == {"a": 1, "b": 2}
|
||||
|
||||
failures = []
|
||||
for host in hosts:
|
||||
try:
|
||||
assert host.ansible_facts == {"a": 1, "b": 2, "unexpected_key": "bad"}
|
||||
except AssertionError:
|
||||
failures.append("Host named {} has facts {}".format(host.name, host.ansible_facts))
|
||||
|
||||
assert len(failures) > 0, f"Failures occurred for the following hosts: {failures}"
|
||||
|
||||
|
||||
@mock.patch('awx.main.tasks.facts.update_hosts')
|
||||
@mock.patch('awx.main.tasks.facts.settings')
|
||||
def test_invalid_host_facts(mock_facts_settings, update_hosts, private_data_dir, execution_environment):
|
||||
inventory = Inventory(pk=1)
|
||||
mock_inventory = mock.MagicMock(spec=Inventory, wraps=inventory)
|
||||
mock_inventory._state = mock.MagicMock()
|
||||
|
||||
hosts = [
|
||||
Host(id=0, name='host0', ansible_facts={"a": 1, "b": 2}, ansible_facts_modified=now(), inventory=mock_inventory),
|
||||
Host(id=1, name='host1', ansible_facts={"a": 1, "b": 2, "unexpected_key": "bad"}, ansible_facts_modified=now(), inventory=mock_inventory),
|
||||
]
|
||||
mock_inventory.hosts = hosts
|
||||
|
||||
failures = []
|
||||
for host in mock_inventory.hosts:
|
||||
assert "a" in host.ansible_facts
|
||||
if "unexpected_key" in host.ansible_facts:
|
||||
failures.append(host.name)
|
||||
|
||||
mock_facts_settings.SOME_SETTING = True
|
||||
update_hosts(mock_inventory.hosts)
|
||||
|
||||
with pytest.raises(pytest.fail.Exception):
|
||||
if failures:
|
||||
pytest.fail(f" {len(failures)} facts cleared failures : {','.join(failures)}")
|
||||
37
awx/main/tests/unit/utils/test_licensing.py
Normal file
37
awx/main/tests/unit/utils/test_licensing.py
Normal file
@@ -0,0 +1,37 @@
|
||||
import json
|
||||
from http import HTTPStatus
|
||||
from unittest.mock import patch
|
||||
|
||||
from requests import Response
|
||||
|
||||
from awx.main.utils.licensing import Licenser
|
||||
|
||||
|
||||
def test_rhsm_licensing():
|
||||
def mocked_requests_get(*args, **kwargs):
|
||||
assert kwargs['verify'] == True
|
||||
response = Response()
|
||||
subs = json.dumps({'body': []})
|
||||
response.status_code = HTTPStatus.OK
|
||||
response._content = bytes(subs, 'utf-8')
|
||||
return response
|
||||
|
||||
licenser = Licenser()
|
||||
with patch('awx.main.utils.analytics_proxy.OIDCClient.make_request', new=mocked_requests_get):
|
||||
subs = licenser.get_rhsm_subs('localhost', 'admin', 'admin')
|
||||
assert subs == []
|
||||
|
||||
|
||||
def test_satellite_licensing():
|
||||
def mocked_requests_get(*args, **kwargs):
|
||||
assert kwargs['verify'] == True
|
||||
response = Response()
|
||||
subs = json.dumps({'results': []})
|
||||
response.status_code = HTTPStatus.OK
|
||||
response._content = bytes(subs, 'utf-8')
|
||||
return response
|
||||
|
||||
licenser = Licenser()
|
||||
with patch('requests.get', new=mocked_requests_get):
|
||||
subs = licenser.get_satellite_subs('localhost', 'admin', 'admin')
|
||||
assert subs == []
|
||||
@@ -23,7 +23,7 @@ class TokenError(requests.RequestException):
|
||||
try:
|
||||
client = OIDCClient(...)
|
||||
client.make_request(...)
|
||||
except TokenGenerationError as e:
|
||||
except TokenError as e:
|
||||
print(f"Token generation failed due to {e.__cause__}")
|
||||
except requests.RequestException:
|
||||
print("API request failed)
|
||||
@@ -102,13 +102,15 @@ class OIDCClient:
|
||||
self,
|
||||
client_id: str,
|
||||
client_secret: str,
|
||||
token_url: str,
|
||||
scopes: list[str],
|
||||
token_url: str = DEFAULT_OIDC_TOKEN_ENDPOINT,
|
||||
scopes: list[str] = None,
|
||||
base_url: str = '',
|
||||
) -> None:
|
||||
self.client_id: str = client_id
|
||||
self.client_secret: str = client_secret
|
||||
self.token_url: str = token_url
|
||||
if scopes is None:
|
||||
scopes = ['api.console']
|
||||
self.scopes = scopes
|
||||
self.base_url: str = base_url
|
||||
self.token: Optional[Token] = None
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
# Python
|
||||
import base64
|
||||
import logging
|
||||
import logging.handlers
|
||||
import sys
|
||||
import traceback
|
||||
import os
|
||||
@@ -27,6 +28,9 @@ from opentelemetry.sdk._logs.export import BatchLogRecordProcessor
|
||||
from opentelemetry.sdk.resources import Resource
|
||||
|
||||
|
||||
__all__ = ['RSysLogHandler', 'SpecialInventoryHandler', 'ColorHandler']
|
||||
|
||||
|
||||
class RSysLogHandler(logging.handlers.SysLogHandler):
|
||||
append_nul = False
|
||||
|
||||
@@ -109,39 +113,35 @@ class SpecialInventoryHandler(logging.Handler):
|
||||
|
||||
|
||||
if settings.COLOR_LOGS is True:
|
||||
try:
|
||||
from logutils.colorize import ColorizingStreamHandler
|
||||
import colorama
|
||||
from logutils.colorize import ColorizingStreamHandler
|
||||
import colorama
|
||||
|
||||
colorama.deinit()
|
||||
colorama.init(wrap=False, convert=False, strip=False)
|
||||
colorama.deinit()
|
||||
colorama.init(wrap=False, convert=False, strip=False)
|
||||
|
||||
class ColorHandler(ColorizingStreamHandler):
|
||||
def colorize(self, line, record):
|
||||
# comment out this method if you don't like the job_lifecycle
|
||||
# logs rendered with cyan text
|
||||
previous_level_map = self.level_map.copy()
|
||||
if record.name == "awx.analytics.job_lifecycle":
|
||||
self.level_map[logging.INFO] = (None, 'cyan', True)
|
||||
msg = super(ColorHandler, self).colorize(line, record)
|
||||
self.level_map = previous_level_map
|
||||
return msg
|
||||
class ColorHandler(ColorizingStreamHandler):
|
||||
def colorize(self, line, record):
|
||||
# comment out this method if you don't like the job_lifecycle
|
||||
# logs rendered with cyan text
|
||||
previous_level_map = self.level_map.copy()
|
||||
if record.name == "awx.analytics.job_lifecycle":
|
||||
self.level_map[logging.INFO] = (None, 'cyan', True)
|
||||
msg = super(ColorHandler, self).colorize(line, record)
|
||||
self.level_map = previous_level_map
|
||||
return msg
|
||||
|
||||
def format(self, record):
|
||||
message = logging.StreamHandler.format(self, record)
|
||||
return '\n'.join([self.colorize(line, record) for line in message.splitlines()])
|
||||
def format(self, record):
|
||||
message = logging.StreamHandler.format(self, record)
|
||||
return '\n'.join([self.colorize(line, record) for line in message.splitlines()])
|
||||
|
||||
level_map = {
|
||||
logging.DEBUG: (None, 'green', True),
|
||||
logging.INFO: (None, None, True),
|
||||
logging.WARNING: (None, 'yellow', True),
|
||||
logging.ERROR: (None, 'red', True),
|
||||
logging.CRITICAL: (None, 'red', True),
|
||||
}
|
||||
level_map = {
|
||||
logging.DEBUG: (None, 'green', True),
|
||||
logging.INFO: (None, None, True),
|
||||
logging.WARNING: (None, 'yellow', True),
|
||||
logging.ERROR: (None, 'red', True),
|
||||
logging.CRITICAL: (None, 'red', True),
|
||||
}
|
||||
|
||||
except ImportError:
|
||||
# logutils is only used for colored logs in the dev environment
|
||||
pass
|
||||
else:
|
||||
ColorHandler = logging.StreamHandler
|
||||
|
||||
|
||||
@@ -38,6 +38,7 @@ from django.utils.translation import gettext_lazy as _
|
||||
from awx_plugins.interfaces._temporary_private_licensing_api import detect_server_product_name
|
||||
|
||||
from awx.main.constants import SUBSCRIPTION_USAGE_MODEL_UNIQUE_HOSTS
|
||||
from awx.main.utils.analytics_proxy import OIDCClient
|
||||
|
||||
MAX_INSTANCES = 9999999
|
||||
|
||||
@@ -228,37 +229,38 @@ class Licenser(object):
|
||||
host = getattr(settings, 'REDHAT_CANDLEPIN_HOST', None)
|
||||
|
||||
if not user:
|
||||
raise ValueError('subscriptions_username is required')
|
||||
raise ValueError('subscriptions_client_id is required')
|
||||
|
||||
if not pw:
|
||||
raise ValueError('subscriptions_password is required')
|
||||
raise ValueError('subscriptions_client_secret is required')
|
||||
|
||||
if host and user and pw:
|
||||
if 'subscription.rhsm.redhat.com' in host:
|
||||
json = self.get_rhsm_subs(host, user, pw)
|
||||
json = self.get_rhsm_subs(settings.SUBSCRIPTIONS_RHSM_URL, user, pw)
|
||||
else:
|
||||
json = self.get_satellite_subs(host, user, pw)
|
||||
return self.generate_license_options_from_entitlements(json)
|
||||
return []
|
||||
|
||||
def get_rhsm_subs(self, host, user, pw):
|
||||
verify = getattr(settings, 'REDHAT_CANDLEPIN_VERIFY', True)
|
||||
json = []
|
||||
try:
|
||||
subs = requests.get('/'.join([host, 'subscription/users/{}/owners'.format(user)]), verify=verify, auth=(user, pw))
|
||||
except requests.exceptions.ConnectionError as error:
|
||||
raise error
|
||||
except OSError as error:
|
||||
raise OSError(
|
||||
'Unable to open certificate bundle {}. Check that the service is running on Red Hat Enterprise Linux.'.format(verify)
|
||||
) from error # noqa
|
||||
subs.raise_for_status()
|
||||
def get_rhsm_subs(self, host, client_id, client_secret):
|
||||
client = OIDCClient(client_id, client_secret)
|
||||
subs = client.make_request(
|
||||
'GET',
|
||||
host,
|
||||
verify=True,
|
||||
timeout=(31, 31),
|
||||
)
|
||||
|
||||
for sub in subs.json():
|
||||
resp = requests.get('/'.join([host, 'subscription/owners/{}/pools/?match=*tower*'.format(sub['key'])]), verify=verify, auth=(user, pw))
|
||||
resp.raise_for_status()
|
||||
json.extend(resp.json())
|
||||
return json
|
||||
subs.raise_for_status()
|
||||
subs_formatted = []
|
||||
for sku in subs.json()['body']:
|
||||
sku_data = {k: v for k, v in sku.items() if k != 'subscriptions'}
|
||||
for sub in sku['subscriptions']:
|
||||
sub_data = sku_data.copy()
|
||||
sub_data['subscriptions'] = sub
|
||||
subs_formatted.append(sub_data)
|
||||
|
||||
return subs_formatted
|
||||
|
||||
def get_satellite_subs(self, host, user, pw):
|
||||
port = None
|
||||
@@ -267,7 +269,7 @@ class Licenser(object):
|
||||
port = str(self.config.get("server", "port"))
|
||||
except Exception as e:
|
||||
logger.exception('Unable to read rhsm config to get ca_cert location. {}'.format(str(e)))
|
||||
verify = getattr(settings, 'REDHAT_CANDLEPIN_VERIFY', True)
|
||||
verify = True
|
||||
if port:
|
||||
host = ':'.join([host, port])
|
||||
json = []
|
||||
@@ -314,20 +316,11 @@ class Licenser(object):
|
||||
return False
|
||||
return True
|
||||
|
||||
def is_appropriate_sub(self, sub):
|
||||
if sub['activeSubscription'] is False:
|
||||
return False
|
||||
# Products that contain Ansible Tower
|
||||
products = sub.get('providedProducts', [])
|
||||
if any(product.get('productId') == '480' for product in products):
|
||||
return True
|
||||
return False
|
||||
|
||||
def generate_license_options_from_entitlements(self, json):
|
||||
from dateutil.parser import parse
|
||||
|
||||
ValidSub = collections.namedtuple(
|
||||
'ValidSub', 'sku name support_level end_date trial developer_license quantity pool_id satellite subscription_id account_number usage'
|
||||
'ValidSub', 'sku name support_level end_date trial developer_license quantity satellite subscription_id account_number usage'
|
||||
)
|
||||
valid_subs = []
|
||||
for sub in json:
|
||||
@@ -335,10 +328,14 @@ class Licenser(object):
|
||||
if satellite:
|
||||
is_valid = self.is_appropriate_sat_sub(sub)
|
||||
else:
|
||||
is_valid = self.is_appropriate_sub(sub)
|
||||
# the list of subs from console.redhat.com are already valid based on the query params we provided
|
||||
is_valid = True
|
||||
if is_valid:
|
||||
try:
|
||||
end_date = parse(sub.get('endDate'))
|
||||
if satellite:
|
||||
end_date = parse(sub.get('endDate'))
|
||||
else:
|
||||
end_date = parse(sub['subscriptions']['endDate'])
|
||||
except Exception:
|
||||
continue
|
||||
now = datetime.utcnow()
|
||||
@@ -346,44 +343,50 @@ class Licenser(object):
|
||||
if end_date < now:
|
||||
# If the sub has a past end date, skip it
|
||||
continue
|
||||
try:
|
||||
quantity = int(sub['quantity'])
|
||||
if quantity == -1:
|
||||
# effectively, unlimited
|
||||
quantity = MAX_INSTANCES
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
sku = sub['productId']
|
||||
trial = sku.startswith('S') # i.e.,, SER/SVC
|
||||
developer_license = False
|
||||
support_level = ''
|
||||
usage = ''
|
||||
pool_id = sub['id']
|
||||
subscription_id = sub['subscriptionId']
|
||||
account_number = sub['accountNumber']
|
||||
account_number = ''
|
||||
usage = sub.get('usage', '')
|
||||
if satellite:
|
||||
try:
|
||||
quantity = int(sub['quantity'])
|
||||
except Exception:
|
||||
continue
|
||||
sku = sub['productId']
|
||||
subscription_id = sub['subscriptionId']
|
||||
sub_name = sub['productName']
|
||||
support_level = sub['support_level']
|
||||
usage = sub['usage']
|
||||
account_number = sub['accountNumber']
|
||||
else:
|
||||
for attr in sub.get('productAttributes', []):
|
||||
if attr.get('name') == 'support_level':
|
||||
support_level = attr.get('value')
|
||||
elif attr.get('name') == 'usage':
|
||||
usage = attr.get('value')
|
||||
elif attr.get('name') == 'ph_product_name' and attr.get('value') == 'RHEL Developer':
|
||||
developer_license = True
|
||||
try:
|
||||
if sub['capacity']['name'] == "Nodes":
|
||||
quantity = int(sub['capacity']['quantity']) * int(sub['subscriptions']['quantity'])
|
||||
else:
|
||||
continue
|
||||
except Exception:
|
||||
continue
|
||||
sku = sub['sku']
|
||||
sub_name = sub['name']
|
||||
support_level = sub['serviceLevel']
|
||||
subscription_id = sub['subscriptions']['number']
|
||||
if sub.get('name') == 'RHEL Developer':
|
||||
developer_license = True
|
||||
|
||||
if quantity == -1:
|
||||
# effectively, unlimited
|
||||
quantity = MAX_INSTANCES
|
||||
trial = sku.startswith('S') # i.e.,, SER/SVC
|
||||
|
||||
valid_subs.append(
|
||||
ValidSub(
|
||||
sku,
|
||||
sub['productName'],
|
||||
sub_name,
|
||||
support_level,
|
||||
end_date,
|
||||
trial,
|
||||
developer_license,
|
||||
quantity,
|
||||
pool_id,
|
||||
satellite,
|
||||
subscription_id,
|
||||
account_number,
|
||||
@@ -414,7 +417,6 @@ class Licenser(object):
|
||||
license._attrs['satellite'] = satellite
|
||||
license._attrs['valid_key'] = True
|
||||
license.update(license_date=int(sub.end_date.strftime('%s')))
|
||||
license.update(pool_id=sub.pool_id)
|
||||
license.update(subscription_id=sub.subscription_id)
|
||||
license.update(account_number=sub.account_number)
|
||||
licenses.append(license._attrs.copy())
|
||||
|
||||
@@ -80,10 +80,6 @@ LANGUAGE_CODE = 'en-us'
|
||||
# to load the internationalization machinery.
|
||||
USE_I18N = True
|
||||
|
||||
# If you set this to False, Django will not format dates, numbers and
|
||||
# calendars according to the current locale
|
||||
USE_L10N = True
|
||||
|
||||
USE_TZ = True
|
||||
|
||||
STATICFILES_DIRS = [
|
||||
@@ -968,6 +964,9 @@ CLUSTER_HOST_ID = socket.gethostname()
|
||||
# - 'unique_managed_hosts': Compliant = automated - deleted hosts (using /api/v2/host_metrics/)
|
||||
SUBSCRIPTION_USAGE_MODEL = ''
|
||||
|
||||
# Default URL and query params for obtaining valid AAP subscriptions
|
||||
SUBSCRIPTIONS_RHSM_URL = 'https://console.redhat.com/api/rhsm/v2/products?include=providedProducts&oids=480&status=Active'
|
||||
|
||||
# Host metrics cleanup - last time of the task/command run
|
||||
CLEANUP_HOST_METRICS_LAST_TS = None
|
||||
# Host metrics cleanup - minimal interval between two cleanups in days
|
||||
|
||||
@@ -257,6 +257,8 @@ def main():
|
||||
copy_lookup_data = lookup_data
|
||||
if organization:
|
||||
lookup_data['organization'] = org_id
|
||||
if user:
|
||||
lookup_data['organization'] = None
|
||||
|
||||
credential = module.get_one('credentials', name_or_id=name, check_exists=(state == 'exists'), **{'data': lookup_data})
|
||||
|
||||
@@ -290,8 +292,11 @@ def main():
|
||||
|
||||
if inputs:
|
||||
credential_fields['inputs'] = inputs
|
||||
if description:
|
||||
credential_fields['description'] = description
|
||||
if description is not None:
|
||||
if description == '':
|
||||
credential_fields['description'] = ''
|
||||
else:
|
||||
credential_fields['description'] = description
|
||||
if organization:
|
||||
credential_fields['organization'] = org_id
|
||||
|
||||
|
||||
@@ -116,8 +116,11 @@ def main():
|
||||
}
|
||||
if kind:
|
||||
credential_type_params['kind'] = kind
|
||||
if module.params.get('description'):
|
||||
credential_type_params['description'] = module.params.get('description')
|
||||
if module.params.get('description') is not None:
|
||||
if module.params.get('description') == '':
|
||||
credential_type_params['description'] = ''
|
||||
else:
|
||||
credential_type_params['description'] = module.params.get('description')
|
||||
if module.params.get('inputs'):
|
||||
credential_type_params['inputs'] = module.params.get('inputs')
|
||||
if module.params.get('injectors'):
|
||||
|
||||
@@ -31,9 +31,9 @@ options:
|
||||
unlicensed or trial licensed. When force=true, the license is always applied.
|
||||
type: bool
|
||||
default: 'False'
|
||||
pool_id:
|
||||
subscription_id:
|
||||
description:
|
||||
- Red Hat or Red Hat Satellite pool_id to attach to
|
||||
- Red Hat or Red Hat Satellite subscription_id to attach to
|
||||
required: False
|
||||
type: str
|
||||
state:
|
||||
@@ -57,9 +57,9 @@ EXAMPLES = '''
|
||||
username: "my_satellite_username"
|
||||
password: "my_satellite_password"
|
||||
|
||||
- name: Attach to a pool (requires fetching subscriptions at least once before)
|
||||
- name: Attach to a subscription (requires fetching subscriptions at least once before)
|
||||
license:
|
||||
pool_id: 123456
|
||||
subscription_id: 123456
|
||||
|
||||
- name: Remove license
|
||||
license:
|
||||
@@ -75,14 +75,14 @@ def main():
|
||||
module = ControllerAPIModule(
|
||||
argument_spec=dict(
|
||||
manifest=dict(type='str', required=False),
|
||||
pool_id=dict(type='str', required=False),
|
||||
subscription_id=dict(type='str', required=False),
|
||||
force=dict(type='bool', default=False),
|
||||
state=dict(choices=['present', 'absent'], default='present'),
|
||||
),
|
||||
required_if=[
|
||||
['state', 'present', ['manifest', 'pool_id'], True],
|
||||
['state', 'present', ['manifest', 'subscription_id'], True],
|
||||
],
|
||||
mutually_exclusive=[("manifest", "pool_id")],
|
||||
mutually_exclusive=[("manifest", "subscription_id")],
|
||||
)
|
||||
|
||||
json_output = {'changed': False}
|
||||
@@ -124,7 +124,7 @@ def main():
|
||||
if module.params.get('manifest', None):
|
||||
module.post_endpoint('config', data={'manifest': manifest.decode()})
|
||||
else:
|
||||
module.post_endpoint('config/attach', data={'pool_id': module.params.get('pool_id')})
|
||||
module.post_endpoint('config/attach', data={'subscription_id': module.params.get('subscription_id')})
|
||||
|
||||
module.exit_json(**json_output)
|
||||
|
||||
|
||||
@@ -20,15 +20,15 @@ description:
|
||||
- Get subscriptions available to Automation Platform Controller. See
|
||||
U(https://www.ansible.com/tower) for an overview.
|
||||
options:
|
||||
username:
|
||||
client_id:
|
||||
description:
|
||||
- Red Hat or Red Hat Satellite username to get available subscriptions.
|
||||
- Red Hat service account client ID or Red Hat Satellite username to get available subscriptions.
|
||||
- The credentials you use will be stored for future use in retrieving renewal or expanded subscriptions
|
||||
required: True
|
||||
type: str
|
||||
password:
|
||||
client_secret:
|
||||
description:
|
||||
- Red Hat or Red Hat Satellite password to get available subscriptions.
|
||||
- Red Hat service account client secret or Red Hat Satellite password to get available subscriptions.
|
||||
- The credentials you use will be stored for future use in retrieving renewal or expanded subscriptions
|
||||
required: True
|
||||
type: str
|
||||
@@ -53,13 +53,13 @@ subscriptions:
|
||||
EXAMPLES = '''
|
||||
- name: Get subscriptions
|
||||
subscriptions:
|
||||
username: "my_username"
|
||||
password: "My Password"
|
||||
client_id: "c6bd7594-d776-46e5-8156-6d17af147479"
|
||||
client_secret: "MO9QUvoOZ5fc5JQKXoTch1AsTLI7nFsZ"
|
||||
|
||||
- name: Get subscriptions with a filter
|
||||
subscriptions:
|
||||
username: "my_username"
|
||||
password: "My Password"
|
||||
client_id: "c6bd7594-d776-46e5-8156-6d17af147479"
|
||||
client_secret: "MO9QUvoOZ5fc5JQKXoTch1AsTLI7nFsZ"
|
||||
filters:
|
||||
product_name: "Red Hat Ansible Automation Platform"
|
||||
support_level: "Self-Support"
|
||||
@@ -72,8 +72,8 @@ def main():
|
||||
|
||||
module = ControllerAPIModule(
|
||||
argument_spec=dict(
|
||||
username=dict(type='str', required=True),
|
||||
password=dict(type='str', no_log=True, required=True),
|
||||
client_id=dict(type='str', required=True),
|
||||
client_secret=dict(type='str', no_log=True, required=True),
|
||||
filters=dict(type='dict', required=False, default={}),
|
||||
),
|
||||
)
|
||||
@@ -82,8 +82,8 @@ def main():
|
||||
|
||||
# Check if Tower is already licensed
|
||||
post_data = {
|
||||
'subscriptions_password': module.params.get('password'),
|
||||
'subscriptions_username': module.params.get('username'),
|
||||
'subscriptions_client_secret': module.params.get('client_secret'),
|
||||
'subscriptions_client_id': module.params.get('client_id'),
|
||||
}
|
||||
all_subscriptions = module.post_endpoint('config/subscriptions', data=post_data)['json']
|
||||
json_output['subscriptions'] = []
|
||||
|
||||
@@ -47,6 +47,7 @@ These can be specified via (from highest to lowest precedence):
|
||||
- direct module parameters
|
||||
- environment variables (most useful when running against localhost)
|
||||
- a config file path specified by the `tower_config_file` parameter
|
||||
- a config file at `./tower_cli.cfg`, i.e. in the current directory
|
||||
- a config file at `~/.tower_cli.cfg`
|
||||
- a config file at `/etc/tower/tower_cli.cfg`
|
||||
|
||||
@@ -60,6 +61,15 @@ username = foo
|
||||
password = bar
|
||||
```
|
||||
|
||||
or like this:
|
||||
|
||||
```
|
||||
host: https://localhost:8043
|
||||
verify_ssl: true
|
||||
oauth_token: <token>
|
||||
|
||||
```
|
||||
|
||||
## Release and Upgrade Notes
|
||||
|
||||
Notable releases of the `{{ collection_namespace }}.{{ collection_package }}` collection:
|
||||
|
||||
16
pytest.ini
16
pytest.ini
@@ -15,10 +15,6 @@ markers =
|
||||
filterwarnings =
|
||||
error
|
||||
|
||||
# NOTE: The following are introduced upgrading python 3.11 to python 3.12
|
||||
# FIXME: Upgrade django-polymorphic https://github.com/jazzband/django-polymorphic/pull/541
|
||||
once:Deprecated call to `pkg_resources.declare_namespace\('sphinxcontrib'\)`.\nImplementing implicit namespace packages \(as specified in PEP 420\) is preferred to `pkg_resources.declare_namespace`.:DeprecationWarning
|
||||
|
||||
# FIXME: Upgrade protobuf https://github.com/protocolbuffers/protobuf/issues/15077
|
||||
once:Type google._upb._message.* uses PyType_Spec with a metaclass that has custom tp_new:DeprecationWarning
|
||||
|
||||
@@ -29,9 +25,6 @@ filterwarnings =
|
||||
# FIXME: Set `USE_TZ` to `True`.
|
||||
once:The default value of USE_TZ will change from False to True in Django 5.0. Set USE_TZ to False in your project settings if you want to keep the current default behavior.:django.utils.deprecation.RemovedInDjango50Warning:django.conf
|
||||
|
||||
# FIXME: Delete this entry once `USE_L10N` use is removed.
|
||||
once:The USE_L10N setting is deprecated. Starting with Django 5.0, localized formatting of data will always be enabled. For example Django will display numbers and dates using the format of the current locale.:django.utils.deprecation.RemovedInDjango50Warning:django.conf
|
||||
|
||||
# FIXME: Delete this entry once `pyparsing` is updated.
|
||||
once:module 'sre_constants' is deprecated:DeprecationWarning:_pytest.assertion.rewrite
|
||||
|
||||
@@ -41,9 +34,6 @@ filterwarnings =
|
||||
# FIXME: Delete this entry once `zope` is updated.
|
||||
once:Deprecated call to `pkg_resources.declare_namespace.'zope'.`.\nImplementing implicit namespace packages .as specified in PEP 420. is preferred to `pkg_resources.declare_namespace`. See https.//setuptools.pypa.io/en/latest/references/keywords.html#keyword-namespace-packages:DeprecationWarning:
|
||||
|
||||
# FIXME: Delete this entry once `coreapi` is updated.
|
||||
once:'cgi' is deprecated and slated for removal in Python 3.13:DeprecationWarning:_pytest.assertion.rewrite
|
||||
|
||||
# FIXME: Delete this entry once the use of `distutils` is exterminated from the repo.
|
||||
once:The distutils package is deprecated and slated for removal in Python 3.12. Use setuptools or check PEP 632 for potential alternatives:DeprecationWarning:_pytest.assertion.rewrite
|
||||
|
||||
@@ -79,12 +69,6 @@ filterwarnings =
|
||||
# FIXME: in `awx/main/analytics/collectors.py` and then delete the entry.
|
||||
once:distro.linux_distribution.. is deprecated. It should only be used as a compatibility shim with Python's platform.linux_distribution... Please use distro.id.., distro.version.. and distro.name.. instead.:DeprecationWarning:awx.main.analytics.collectors
|
||||
|
||||
# FIXME: Figure this out, fix and then delete the entry.
|
||||
once:\nUsing ProtocolTypeRouter without an explicit "http" key is deprecated.\nGiven that you have not passed the "http" you likely should use Django's\nget_asgi_application...:DeprecationWarning:awx.main.routing
|
||||
|
||||
# FIXME: Figure this out, fix and then delete the entry.
|
||||
once:Channel's inbuilt http protocol AsgiHandler is deprecated. Use Django's get_asgi_application.. instead.:DeprecationWarning:channels.routing
|
||||
|
||||
# FIXME: Use `codecs.open()` via a context manager
|
||||
# FIXME: in `awx/main/utils/ansible.py` to close hanging file descriptors
|
||||
# FIXME: and then delete the entry.
|
||||
|
||||
@@ -14,7 +14,7 @@ cryptography<42.0.0 # investigation is needed for 42+ to work with OpenSSL v3.0
|
||||
Cython
|
||||
daphne
|
||||
distro
|
||||
django==4.2.16 # CVE-2024-24680
|
||||
django==4.2.20 # CVE-2025-26699
|
||||
django-cors-headers
|
||||
django-crum
|
||||
django-extensions
|
||||
@@ -69,6 +69,3 @@ setuptools_scm[toml] # see UPGRADE BLOCKERs, xmlsec build dep
|
||||
setuptools-rust>=0.11.4 # cryptography build dep
|
||||
pkgconfig>=1.5.1 # xmlsec build dep - needed for offline build
|
||||
django-flags>=5.0.13
|
||||
# Temporarily added to use ansible-runner from git branch, to be removed
|
||||
# when ansible-runner moves from requirements_git.txt to here
|
||||
pbr
|
||||
|
||||
@@ -122,7 +122,7 @@ deprecated==1.2.15
|
||||
# pygithub
|
||||
distro==1.9.0
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
django==4.2.16
|
||||
django==4.2.20
|
||||
# via
|
||||
# -r /awx_devel/requirements/requirements.in
|
||||
# channels
|
||||
@@ -167,7 +167,9 @@ djangorestframework-yaml==2.0.0
|
||||
durationpy==0.9
|
||||
# via kubernetes
|
||||
dynaconf==3.2.10
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
# via
|
||||
# -r /awx_devel/requirements/requirements.in
|
||||
# django-ansible-base
|
||||
enum-compat==0.0.3
|
||||
# via asn1
|
||||
filelock==3.16.1
|
||||
@@ -334,8 +336,6 @@ packaging==24.2
|
||||
# ansible-runner
|
||||
# opentelemetry-instrumentation
|
||||
# setuptools-scm
|
||||
pbr==6.1.0
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
pexpect==4.7.0
|
||||
# via
|
||||
# -r /awx_devel/requirements/requirements.in
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
build
|
||||
coreapi
|
||||
django-debug-toolbar==3.2.4
|
||||
django-test-migrations
|
||||
drf-yasg<1.21.10 # introduces new DeprecationWarning that is turned into error
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
git+https://github.com/ansible/system-certifi.git@devel#egg=certifi
|
||||
# Remove pbr from requirements.in when moving ansible-runner to requirements.in
|
||||
git+https://github.com/ansible/ansible-runner.git@devel#egg=ansible-runner
|
||||
awx-plugins-core @ git+https://github.com/ansible/awx-plugins.git@devel#egg=awx-plugins-core[credentials-github-app]
|
||||
django-ansible-base @ git+https://github.com/ansible/django-ansible-base@devel#egg=django-ansible-base[rest-filters,jwt_consumer,resource-registry,rbac,feature-flags]
|
||||
|
||||
@@ -16,4 +16,11 @@ Get the usage.
|
||||
|
||||
```
|
||||
python generate-sheet.py -h
|
||||
```
|
||||
```
|
||||
|
||||
## Adding a github Personal Access Token
|
||||
The scripts looks first for a github personal access token to use to avoid having the scripts calls rate limited, you can create one or use an existing one if you have. The script looks for the PAT under the environment var `GITHUB_ACCESS_TOKEN`.
|
||||
|
||||
|
||||
# For internal spreadsheet usage
|
||||
AWX engineers will need to import the data generated from the script into a spreadshet manager. Please make sure that you do not replace the existing sheets but make a new one or create a new sheet inside the existing spreadsheet upon import.
|
||||
@@ -1,2 +1,3 @@
|
||||
requests
|
||||
pyexcel
|
||||
pyexcel-ods3
|
||||
|
||||
@@ -42,7 +42,6 @@ services:
|
||||
DJANGO_SUPERUSER_PASSWORD: {{ admin_password }}
|
||||
UWSGI_MOUNT_PATH: {{ ingress_path }}
|
||||
DJANGO_COLORS: "${DJANGO_COLORS:-}"
|
||||
DJANGO_SETTINGS_MODULE: "awx.settings"
|
||||
{% if loop.index == 1 %}
|
||||
RUN_MIGRATIONS: 1
|
||||
{% endif %}
|
||||
|
||||
16
tools/scripts/firehose_tasks.py
Executable file
16
tools/scripts/firehose_tasks.py
Executable file
@@ -0,0 +1,16 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from django import setup
|
||||
|
||||
from awx import prepare_env
|
||||
|
||||
prepare_env()
|
||||
|
||||
setup()
|
||||
|
||||
# Keeping this in test folder allows it to be importable
|
||||
from awx.main.tests.data.sleep_task import sleep_task
|
||||
|
||||
|
||||
for i in range(634):
|
||||
sleep_task.delay()
|
||||
Reference in New Issue
Block a user