mirror of
https://github.com/ansible/awx.git
synced 2026-02-07 04:28:23 -03:30
Compare commits
2 Commits
change_cap
...
upgrade-sq
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
543d3f940b | ||
|
|
ee7edb9179 |
8
.github/PULL_REQUEST_TEMPLATE.md
vendored
8
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -4,8 +4,7 @@
|
||||
<!---
|
||||
If you are fixing an existing issue, please include "related #nnn" in your
|
||||
commit message and your description; but you should still explain what
|
||||
the change does. Also please make sure that if this PR has an attached JIRA, put AAP-<number>
|
||||
in as the first entry for your PR title.
|
||||
the change does.
|
||||
-->
|
||||
|
||||
##### ISSUE TYPE
|
||||
@@ -23,6 +22,11 @@ in as the first entry for your PR title.
|
||||
- Docs
|
||||
- Other
|
||||
|
||||
##### AWX VERSION
|
||||
<!--- Paste verbatim output from `make VERSION` between quotes below -->
|
||||
```
|
||||
|
||||
```
|
||||
|
||||
|
||||
##### ADDITIONAL INFORMATION
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -150,8 +150,6 @@ use_dev_supervisor.txt
|
||||
|
||||
awx/ui/src
|
||||
awx/ui/build
|
||||
awx/ui/.ui-built
|
||||
awx/ui_next
|
||||
|
||||
# Docs build stuff
|
||||
docs/docsite/build/
|
||||
|
||||
@@ -7,7 +7,6 @@ import json
|
||||
import logging
|
||||
import re
|
||||
import yaml
|
||||
import urllib.parse
|
||||
from collections import Counter, OrderedDict
|
||||
from datetime import timedelta
|
||||
from uuid import uuid4
|
||||
@@ -46,9 +45,6 @@ from ansible_base.lib.utils.models import get_type_for_model
|
||||
from ansible_base.rbac.models import RoleEvaluation, ObjectRole
|
||||
from ansible_base.rbac import permission_registry
|
||||
|
||||
# django-flags
|
||||
from flags.state import flag_enabled
|
||||
|
||||
# AWX
|
||||
from awx.main.access import get_user_capabilities
|
||||
from awx.main.constants import ACTIVE_STATES, org_role_to_permission
|
||||
@@ -120,7 +116,6 @@ from awx.main.utils import (
|
||||
from awx.main.utils.filters import SmartFilter
|
||||
from awx.main.utils.plugins import load_combined_inventory_source_options
|
||||
from awx.main.utils.named_url_graph import reset_counters
|
||||
from awx.main.utils.inventory_vars import update_group_variables
|
||||
from awx.main.scheduler.task_manager_models import TaskManagerModels
|
||||
from awx.main.redact import UriCleaner, REPLACE_STR
|
||||
from awx.main.signals import update_inventory_computed_fields
|
||||
@@ -737,25 +732,7 @@ class EmptySerializer(serializers.Serializer):
|
||||
pass
|
||||
|
||||
|
||||
class OpaQueryPathEnabledMixin(serializers.Serializer):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
if not flag_enabled("FEATURE_POLICY_AS_CODE_ENABLED") and 'opa_query_path' in self.fields:
|
||||
self.fields.pop('opa_query_path')
|
||||
|
||||
def validate_opa_query_path(self, value):
|
||||
# Decode the URL and re-encode it
|
||||
decoded_value = urllib.parse.unquote(value)
|
||||
re_encoded_value = urllib.parse.quote(decoded_value, safe='/')
|
||||
|
||||
if value != re_encoded_value:
|
||||
raise serializers.ValidationError(_("The URL must be properly encoded."))
|
||||
|
||||
return value
|
||||
|
||||
|
||||
class UnifiedJobTemplateSerializer(BaseSerializer, OpaQueryPathEnabledMixin):
|
||||
class UnifiedJobTemplateSerializer(BaseSerializer):
|
||||
# As a base serializer, the capabilities prefetch is not used directly,
|
||||
# instead they are derived from the Workflow Job Template Serializer and the Job Template Serializer, respectively.
|
||||
capabilities_prefetch = []
|
||||
@@ -1188,12 +1165,12 @@ class UserActivityStreamSerializer(UserSerializer):
|
||||
fields = ('*', '-is_system_auditor')
|
||||
|
||||
|
||||
class OrganizationSerializer(BaseSerializer, OpaQueryPathEnabledMixin):
|
||||
class OrganizationSerializer(BaseSerializer):
|
||||
show_capabilities = ['edit', 'delete']
|
||||
|
||||
class Meta:
|
||||
model = Organization
|
||||
fields = ('*', 'max_hosts', 'custom_virtualenv', 'default_environment', 'opa_query_path')
|
||||
fields = ('*', 'max_hosts', 'custom_virtualenv', 'default_environment')
|
||||
read_only_fields = ('*', 'custom_virtualenv')
|
||||
|
||||
def get_related(self, obj):
|
||||
@@ -1547,7 +1524,7 @@ class LabelsListMixin(object):
|
||||
return res
|
||||
|
||||
|
||||
class InventorySerializer(LabelsListMixin, BaseSerializerWithVariables, OpaQueryPathEnabledMixin):
|
||||
class InventorySerializer(LabelsListMixin, BaseSerializerWithVariables):
|
||||
show_capabilities = ['edit', 'delete', 'adhoc', 'copy']
|
||||
capabilities_prefetch = ['admin', 'adhoc', {'copy': 'organization.inventory_admin'}]
|
||||
|
||||
@@ -1568,7 +1545,6 @@ class InventorySerializer(LabelsListMixin, BaseSerializerWithVariables, OpaQuery
|
||||
'inventory_sources_with_failures',
|
||||
'pending_deletion',
|
||||
'prevent_instance_group_fallback',
|
||||
'opa_query_path',
|
||||
)
|
||||
|
||||
def get_related(self, obj):
|
||||
@@ -1638,68 +1614,8 @@ class InventorySerializer(LabelsListMixin, BaseSerializerWithVariables, OpaQuery
|
||||
|
||||
if kind == 'smart' and not host_filter:
|
||||
raise serializers.ValidationError({'host_filter': _('Smart inventories must specify host_filter')})
|
||||
|
||||
return super(InventorySerializer, self).validate(attrs)
|
||||
|
||||
@staticmethod
|
||||
def _update_variables(variables, inventory_id):
|
||||
"""
|
||||
Update the inventory variables of the 'all'-group.
|
||||
|
||||
The variables field contains vars from the inventory dialog, hence
|
||||
representing the "all"-group variables.
|
||||
|
||||
Since this is not an update from an inventory source, we update the
|
||||
variables when the inventory details form is saved.
|
||||
|
||||
A user edit on the inventory variables is considered a reset of the
|
||||
variables update history. Particularly if the user removes a variable by
|
||||
editing the inventory variables field, the variable is not supposed to
|
||||
reappear with a value from a previous inventory source update.
|
||||
|
||||
We achieve this by forcing `reset=True` on such an update.
|
||||
|
||||
As a side-effect, variables which have been set by source updates and
|
||||
have survived a user-edit (i.e. they have not been deleted from the
|
||||
variables field) will be assumed to originate from the user edit and are
|
||||
thus no longer deleted from the inventory when they are removed from
|
||||
their original source!
|
||||
|
||||
Note that we use the inventory source id -1 for user-edit updates
|
||||
because a regular inventory source cannot have an id of -1 since
|
||||
PostgreSQL assigns pk's starting from 1 (if this assumption doesn't hold
|
||||
true, we have to assign another special value for invsrc_id).
|
||||
|
||||
:param str variables: The variables as plain text in yaml or json
|
||||
format.
|
||||
:param int inventory_id: The primary key of the related inventory
|
||||
object.
|
||||
"""
|
||||
variables_dict = parse_yaml_or_json(variables, silent_failure=False)
|
||||
logger.debug(f"InventorySerializer._update_variables: {inventory_id=} {variables_dict=}, {variables=}")
|
||||
update_group_variables(
|
||||
group_id=None, # `None` denotes the 'all' group (which doesn't have a pk).
|
||||
newvars=variables_dict,
|
||||
dbvars=None,
|
||||
invsrc_id=-1,
|
||||
inventory_id=inventory_id,
|
||||
reset=True,
|
||||
)
|
||||
|
||||
def create(self, validated_data):
|
||||
"""Called when a new inventory has to be created."""
|
||||
logger.debug(f"InventorySerializer.create({validated_data=}) >>>>")
|
||||
obj = super().create(validated_data)
|
||||
self._update_variables(validated_data.get("variables") or "", obj.id)
|
||||
return obj
|
||||
|
||||
def update(self, obj, validated_data):
|
||||
"""Called when an existing inventory is updated."""
|
||||
logger.debug(f"InventorySerializer.update({validated_data=}) >>>>")
|
||||
obj = super().update(obj, validated_data)
|
||||
self._update_variables(validated_data.get("variables") or "", obj.id)
|
||||
return obj
|
||||
|
||||
|
||||
class ConstructedFieldMixin(serializers.Field):
|
||||
def get_attribute(self, instance):
|
||||
@@ -1989,12 +1905,10 @@ class GroupSerializer(BaseSerializerWithVariables):
|
||||
return res
|
||||
|
||||
def validate(self, attrs):
|
||||
# Do not allow the group name to conflict with an existing host name.
|
||||
name = force_str(attrs.get('name', self.instance and self.instance.name or ''))
|
||||
inventory = attrs.get('inventory', self.instance and self.instance.inventory or '')
|
||||
if Host.objects.filter(name=name, inventory=inventory).exists():
|
||||
raise serializers.ValidationError(_('A Host with that name already exists.'))
|
||||
#
|
||||
return super(GroupSerializer, self).validate(attrs)
|
||||
|
||||
def validate_name(self, value):
|
||||
@@ -3333,7 +3247,6 @@ class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobO
|
||||
'webhook_service',
|
||||
'webhook_credential',
|
||||
'prevent_instance_group_fallback',
|
||||
'opa_query_path',
|
||||
)
|
||||
read_only_fields = ('*', 'custom_virtualenv')
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ from awx.api.generics import APIView, Response
|
||||
from awx.api.permissions import AnalyticsPermission
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.utils import get_awx_version
|
||||
from awx.main.utils.analytics_proxy import OIDCClient
|
||||
from awx.main.utils.analytics_proxy import OIDCClient, DEFAULT_OIDC_TOKEN_ENDPOINT
|
||||
from rest_framework import status
|
||||
|
||||
from collections import OrderedDict
|
||||
@@ -202,16 +202,10 @@ class AnalyticsGenericView(APIView):
|
||||
if method not in ["GET", "POST", "OPTIONS"]:
|
||||
return self._error_response(ERROR_UNSUPPORTED_METHOD, method, remote=False, status_code=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
||||
url = self._get_analytics_url(request.path)
|
||||
using_subscriptions_credentials = False
|
||||
try:
|
||||
rh_user = getattr(settings, 'REDHAT_USERNAME', None)
|
||||
rh_password = getattr(settings, 'REDHAT_PASSWORD', None)
|
||||
if not (rh_user and rh_password):
|
||||
rh_user = self._get_setting('SUBSCRIPTIONS_CLIENT_ID', None, ERROR_MISSING_USER)
|
||||
rh_password = self._get_setting('SUBSCRIPTIONS_CLIENT_SECRET', None, ERROR_MISSING_PASSWORD)
|
||||
using_subscriptions_credentials = True
|
||||
|
||||
client = OIDCClient(rh_user, rh_password)
|
||||
rh_user = self._get_setting('REDHAT_USERNAME', None, ERROR_MISSING_USER)
|
||||
rh_password = self._get_setting('REDHAT_PASSWORD', None, ERROR_MISSING_PASSWORD)
|
||||
client = OIDCClient(rh_user, rh_password, DEFAULT_OIDC_TOKEN_ENDPOINT, ['api.console'])
|
||||
response = client.make_request(
|
||||
method,
|
||||
url,
|
||||
@@ -222,17 +216,17 @@ class AnalyticsGenericView(APIView):
|
||||
timeout=(31, 31),
|
||||
)
|
||||
except requests.RequestException:
|
||||
# subscriptions credentials are not valid for basic auth, so just return 401
|
||||
if using_subscriptions_credentials:
|
||||
response = Response(status=status.HTTP_401_UNAUTHORIZED)
|
||||
else:
|
||||
logger.error("Automation Analytics API request failed, trying base auth method")
|
||||
response = self._base_auth_request(request, method, url, rh_user, rh_password, headers)
|
||||
logger.error("Automation Analytics API request failed, trying base auth method")
|
||||
response = self._base_auth_request(request, method, url, rh_user, rh_password, headers)
|
||||
except MissingSettings:
|
||||
rh_user = self._get_setting('SUBSCRIPTIONS_USERNAME', None, ERROR_MISSING_USER)
|
||||
rh_password = self._get_setting('SUBSCRIPTIONS_PASSWORD', None, ERROR_MISSING_PASSWORD)
|
||||
response = self._base_auth_request(request, method, url, rh_user, rh_password, headers)
|
||||
#
|
||||
# Missing or wrong user/pass
|
||||
#
|
||||
if response.status_code == status.HTTP_401_UNAUTHORIZED:
|
||||
text = response.get('text', '').rstrip("\n")
|
||||
text = (response.text or '').rstrip("\n")
|
||||
return self._error_response(ERROR_UNAUTHORIZED, text, remote=True, remote_status_code=response.status_code)
|
||||
#
|
||||
# Not found, No entitlement or No data in Analytics
|
||||
|
||||
@@ -32,7 +32,6 @@ from awx.api.versioning import URLPathVersioning, reverse, drf_reverse
|
||||
from awx.main.constants import PRIVILEGE_ESCALATION_METHODS
|
||||
from awx.main.models import Project, Organization, Instance, InstanceGroup, JobTemplate
|
||||
from awx.main.utils import set_environ
|
||||
from awx.main.utils.analytics_proxy import TokenError
|
||||
from awx.main.utils.licensing import get_licenser
|
||||
|
||||
logger = logging.getLogger('awx.api.views.root')
|
||||
@@ -177,21 +176,19 @@ class ApiV2SubscriptionView(APIView):
|
||||
|
||||
def post(self, request):
|
||||
data = request.data.copy()
|
||||
if data.get('subscriptions_client_secret') == '$encrypted$':
|
||||
data['subscriptions_client_secret'] = settings.SUBSCRIPTIONS_CLIENT_SECRET
|
||||
if data.get('subscriptions_password') == '$encrypted$':
|
||||
data['subscriptions_password'] = settings.SUBSCRIPTIONS_PASSWORD
|
||||
try:
|
||||
user, pw = data.get('subscriptions_client_id'), data.get('subscriptions_client_secret')
|
||||
user, pw = data.get('subscriptions_username'), data.get('subscriptions_password')
|
||||
with set_environ(**settings.AWX_TASK_ENV):
|
||||
validated = get_licenser().validate_rh(user, pw)
|
||||
if user:
|
||||
settings.SUBSCRIPTIONS_CLIENT_ID = data['subscriptions_client_id']
|
||||
settings.SUBSCRIPTIONS_USERNAME = data['subscriptions_username']
|
||||
if pw:
|
||||
settings.SUBSCRIPTIONS_CLIENT_SECRET = data['subscriptions_client_secret']
|
||||
settings.SUBSCRIPTIONS_PASSWORD = data['subscriptions_password']
|
||||
except Exception as exc:
|
||||
msg = _("Invalid Subscription")
|
||||
if isinstance(exc, TokenError) or (
|
||||
isinstance(exc, requests.exceptions.HTTPError) and getattr(getattr(exc, 'response', None), 'status_code', None) == 401
|
||||
):
|
||||
if isinstance(exc, requests.exceptions.HTTPError) and getattr(getattr(exc, 'response', None), 'status_code', None) == 401:
|
||||
msg = _("The provided credentials are invalid (HTTP 401).")
|
||||
elif isinstance(exc, requests.exceptions.ProxyError):
|
||||
msg = _("Unable to connect to proxy server.")
|
||||
@@ -218,12 +215,12 @@ class ApiV2AttachView(APIView):
|
||||
|
||||
def post(self, request):
|
||||
data = request.data.copy()
|
||||
subscription_id = data.get('subscription_id', None)
|
||||
if not subscription_id:
|
||||
return Response({"error": _("No subscription ID provided.")}, status=status.HTTP_400_BAD_REQUEST)
|
||||
user = getattr(settings, 'SUBSCRIPTIONS_CLIENT_ID', None)
|
||||
pw = getattr(settings, 'SUBSCRIPTIONS_CLIENT_SECRET', None)
|
||||
if subscription_id and user and pw:
|
||||
pool_id = data.get('pool_id', None)
|
||||
if not pool_id:
|
||||
return Response({"error": _("No subscription pool ID provided.")}, status=status.HTTP_400_BAD_REQUEST)
|
||||
user = getattr(settings, 'SUBSCRIPTIONS_USERNAME', None)
|
||||
pw = getattr(settings, 'SUBSCRIPTIONS_PASSWORD', None)
|
||||
if pool_id and user and pw:
|
||||
data = request.data.copy()
|
||||
try:
|
||||
with set_environ(**settings.AWX_TASK_ENV):
|
||||
@@ -242,7 +239,7 @@ class ApiV2AttachView(APIView):
|
||||
logger.exception(smart_str(u"Invalid subscription submitted."), extra=dict(actor=request.user.username))
|
||||
return Response({"error": msg}, status=status.HTTP_400_BAD_REQUEST)
|
||||
for sub in validated:
|
||||
if sub['subscription_id'] == subscription_id:
|
||||
if sub['pool_id'] == pool_id:
|
||||
sub['valid_key'] = True
|
||||
settings.LICENSE = sub
|
||||
return Response(sub)
|
||||
|
||||
@@ -10,7 +10,7 @@ from django.core.validators import URLValidator, _lazy_re_compile
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
# Django REST Framework
|
||||
from rest_framework.fields import BooleanField, CharField, ChoiceField, DictField, DateTimeField, EmailField, IntegerField, ListField, FloatField # noqa
|
||||
from rest_framework.fields import BooleanField, CharField, ChoiceField, DictField, DateTimeField, EmailField, IntegerField, ListField # noqa
|
||||
from rest_framework.serializers import PrimaryKeyRelatedField # noqa
|
||||
|
||||
# AWX
|
||||
@@ -207,8 +207,7 @@ class URLField(CharField):
|
||||
if self.allow_plain_hostname:
|
||||
try:
|
||||
url_parts = urlparse.urlsplit(value)
|
||||
looks_like_ipv6 = bool(url_parts.netloc and url_parts.netloc.startswith('[') and url_parts.netloc.endswith(']'))
|
||||
if not looks_like_ipv6 and url_parts.hostname and '.' not in url_parts.hostname:
|
||||
if url_parts.hostname and '.' not in url_parts.hostname:
|
||||
netloc = '{}.local'.format(url_parts.hostname)
|
||||
if url_parts.port:
|
||||
netloc = '{}:{}'.format(netloc, url_parts.port)
|
||||
|
||||
@@ -27,5 +27,5 @@ def _migrate_setting(apps, old_key, new_key, encrypted=False):
|
||||
|
||||
|
||||
def prefill_rh_credentials(apps, schema_editor):
|
||||
_migrate_setting(apps, 'REDHAT_USERNAME', 'SUBSCRIPTIONS_CLIENT_ID', encrypted=False)
|
||||
_migrate_setting(apps, 'REDHAT_PASSWORD', 'SUBSCRIPTIONS_CLIENT_SECRET', encrypted=True)
|
||||
_migrate_setting(apps, 'REDHAT_USERNAME', 'SUBSCRIPTIONS_USERNAME', encrypted=False)
|
||||
_migrate_setting(apps, 'REDHAT_PASSWORD', 'SUBSCRIPTIONS_PASSWORD', encrypted=True)
|
||||
|
||||
@@ -128,41 +128,3 @@ class TestURLField:
|
||||
else:
|
||||
with pytest.raises(ValidationError):
|
||||
field.run_validators(url)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"url, expect_error",
|
||||
[
|
||||
("https://[1:2:3]", True),
|
||||
("http://[1:2:3]", True),
|
||||
("https://[2001:db8:3333:4444:5555:6666:7777:8888", True),
|
||||
("https://2001:db8:3333:4444:5555:6666:7777:8888", True),
|
||||
("https://[2001:db8:3333:4444:5555:6666:7777:8888]", False),
|
||||
("https://[::1]", False),
|
||||
("https://[::]", False),
|
||||
("https://[2001:db8::1]", False),
|
||||
("https://[2001:db8:0:0:0:0:1:1]", False),
|
||||
("https://[fe80::2%eth0]", True), # ipv6 scope identifier
|
||||
("https://[fe80:0:0:0:200:f8ff:fe21:67cf]", False),
|
||||
("https://[::ffff:192.168.1.10]", False),
|
||||
("https://[0:0:0:0:0:ffff:c000:0201]", False),
|
||||
("https://[2001:0db8:000a:0001:0000:0000:0000:0000]", False),
|
||||
("https://[2001:db8:a:1::]", False),
|
||||
("https://[ff02::1]", False),
|
||||
("https://[ff02:0:0:0:0:0:0:1]", False),
|
||||
("https://[fc00::1]", False),
|
||||
("https://[fd12:3456:789a:1::1]", False),
|
||||
("https://[2001:db8::abcd:ef12:3456:7890]", False),
|
||||
("https://[2001:db8:0000:abcd:0000:ef12:0000:3456]", False),
|
||||
("https://[::ffff:10.0.0.1]", False),
|
||||
("https://[2001:db8:cafe::]", False),
|
||||
("https://[2001:db8:cafe:0:0:0:0:0]", False),
|
||||
("https://[fe80::210:f3ff:fedf:4567%3]", True), # ipv6 scope identifier, numerical interface
|
||||
],
|
||||
)
|
||||
def test_ipv6_urls(self, url, expect_error):
|
||||
field = URLField()
|
||||
if expect_error:
|
||||
with pytest.raises(ValidationError, match="Enter a valid URL"):
|
||||
field.run_validators(url)
|
||||
else:
|
||||
field.run_validators(url)
|
||||
|
||||
@@ -22,7 +22,7 @@ from ansible_base.lib.utils.db import advisory_lock
|
||||
from awx.main.models import Job
|
||||
from awx.main.access import access_registry
|
||||
from awx.main.utils import get_awx_http_client_headers, set_environ, datetime_hook
|
||||
from awx.main.utils.analytics_proxy import OIDCClient
|
||||
from awx.main.utils.analytics_proxy import OIDCClient, DEFAULT_OIDC_TOKEN_ENDPOINT
|
||||
|
||||
__all__ = ['register', 'gather', 'ship']
|
||||
|
||||
@@ -186,7 +186,7 @@ def gather(dest=None, module=None, subset=None, since=None, until=None, collecti
|
||||
|
||||
if not (
|
||||
settings.AUTOMATION_ANALYTICS_URL
|
||||
and ((settings.REDHAT_USERNAME and settings.REDHAT_PASSWORD) or (settings.SUBSCRIPTIONS_CLIENT_ID and settings.SUBSCRIPTIONS_CLIENT_SECRET))
|
||||
and ((settings.REDHAT_USERNAME and settings.REDHAT_PASSWORD) or (settings.SUBSCRIPTIONS_USERNAME and settings.SUBSCRIPTIONS_PASSWORD))
|
||||
):
|
||||
logger.log(log_level, "Not gathering analytics, configuration is invalid. Use --dry-run to gather locally without sending.")
|
||||
return None
|
||||
@@ -368,20 +368,8 @@ def ship(path):
|
||||
logger.error('AUTOMATION_ANALYTICS_URL is not set')
|
||||
return False
|
||||
|
||||
rh_id = getattr(settings, 'REDHAT_USERNAME', None)
|
||||
rh_secret = getattr(settings, 'REDHAT_PASSWORD', None)
|
||||
|
||||
if not (rh_id and rh_secret):
|
||||
rh_id = getattr(settings, 'SUBSCRIPTIONS_CLIENT_ID', None)
|
||||
rh_secret = getattr(settings, 'SUBSCRIPTIONS_CLIENT_SECRET', None)
|
||||
|
||||
if not rh_id:
|
||||
logger.error('Neither REDHAT_USERNAME nor SUBSCRIPTIONS_CLIENT_ID are set')
|
||||
return False
|
||||
|
||||
if not rh_secret:
|
||||
logger.error('Neither REDHAT_PASSWORD nor SUBSCRIPTIONS_CLIENT_SECRET are set')
|
||||
return False
|
||||
rh_user = getattr(settings, 'REDHAT_USERNAME', None)
|
||||
rh_password = getattr(settings, 'REDHAT_PASSWORD', None)
|
||||
|
||||
with open(path, 'rb') as f:
|
||||
files = {'file': (os.path.basename(path), f, settings.INSIGHTS_AGENT_MIME)}
|
||||
@@ -389,13 +377,25 @@ def ship(path):
|
||||
s.headers = get_awx_http_client_headers()
|
||||
s.headers.pop('Content-Type')
|
||||
with set_environ(**settings.AWX_TASK_ENV):
|
||||
try:
|
||||
client = OIDCClient(rh_id, rh_secret)
|
||||
response = client.make_request("POST", url, headers=s.headers, files=files, verify=settings.INSIGHTS_CERT_PATH, timeout=(31, 31))
|
||||
except requests.RequestException:
|
||||
logger.error("Automation Analytics API request failed, trying base auth method")
|
||||
response = s.post(url, files=files, verify=settings.INSIGHTS_CERT_PATH, auth=(rh_id, rh_secret), headers=s.headers, timeout=(31, 31))
|
||||
|
||||
if rh_user and rh_password:
|
||||
try:
|
||||
client = OIDCClient(rh_user, rh_password, DEFAULT_OIDC_TOKEN_ENDPOINT, ['api.console'])
|
||||
response = client.make_request("POST", url, headers=s.headers, files=files, verify=settings.INSIGHTS_CERT_PATH, timeout=(31, 31))
|
||||
except requests.RequestException:
|
||||
logger.error("Automation Analytics API request failed, trying base auth method")
|
||||
response = s.post(url, files=files, verify=settings.INSIGHTS_CERT_PATH, auth=(rh_user, rh_password), headers=s.headers, timeout=(31, 31))
|
||||
elif not rh_user or not rh_password:
|
||||
logger.info('REDHAT_USERNAME and REDHAT_PASSWORD are not set, using SUBSCRIPTIONS_USERNAME and SUBSCRIPTIONS_PASSWORD')
|
||||
rh_user = getattr(settings, 'SUBSCRIPTIONS_USERNAME', None)
|
||||
rh_password = getattr(settings, 'SUBSCRIPTIONS_PASSWORD', None)
|
||||
if rh_user and rh_password:
|
||||
response = s.post(url, files=files, verify=settings.INSIGHTS_CERT_PATH, auth=(rh_user, rh_password), headers=s.headers, timeout=(31, 31))
|
||||
elif not rh_user:
|
||||
logger.error('REDHAT_USERNAME and SUBSCRIPTIONS_USERNAME are not set')
|
||||
return False
|
||||
elif not rh_password:
|
||||
logger.error('REDHAT_PASSWORD and SUBSCRIPTIONS_USERNAME are not set')
|
||||
return False
|
||||
# Accept 2XX status_codes
|
||||
if response.status_code >= 300:
|
||||
logger.error('Upload failed with status {}, {}'.format(response.status_code, response.text))
|
||||
|
||||
144
awx/main/conf.py
144
awx/main/conf.py
@@ -4,7 +4,6 @@ import logging
|
||||
# Django
|
||||
from django.core.checks import Error
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from django.conf import settings
|
||||
|
||||
# Django REST Framework
|
||||
from rest_framework import serializers
|
||||
@@ -13,7 +12,6 @@ from rest_framework import serializers
|
||||
from awx.conf import fields, register, register_validate
|
||||
from awx.main.models import ExecutionEnvironment
|
||||
from awx.main.constants import SUBSCRIPTION_USAGE_MODEL_UNIQUE_HOSTS
|
||||
from awx.main.tasks.policy import OPA_AUTH_TYPES
|
||||
|
||||
logger = logging.getLogger('awx.main.conf')
|
||||
|
||||
@@ -126,8 +124,8 @@ register(
|
||||
allow_blank=True,
|
||||
encrypted=False,
|
||||
read_only=False,
|
||||
label=_('Red Hat Client ID for Analytics'),
|
||||
help_text=_('Client ID used to send data to Automation Analytics'),
|
||||
label=_('Red Hat customer username'),
|
||||
help_text=_('This username is used to send data to Automation Analytics'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
@@ -139,34 +137,34 @@ register(
|
||||
allow_blank=True,
|
||||
encrypted=True,
|
||||
read_only=False,
|
||||
label=_('Red Hat Client Secret for Analytics'),
|
||||
help_text=_('Client secret used to send data to Automation Analytics'),
|
||||
label=_('Red Hat customer password'),
|
||||
help_text=_('This password is used to send data to Automation Analytics'),
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
register(
|
||||
'SUBSCRIPTIONS_CLIENT_ID',
|
||||
'SUBSCRIPTIONS_USERNAME',
|
||||
field_class=fields.CharField,
|
||||
default='',
|
||||
allow_blank=True,
|
||||
encrypted=False,
|
||||
read_only=False,
|
||||
label=_('Red Hat Client ID for Subscriptions'),
|
||||
help_text=_('Client ID used to retrieve subscription and content information'), # noqa
|
||||
label=_('Red Hat or Satellite username'),
|
||||
help_text=_('This username is used to retrieve subscription and content information'), # noqa
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
|
||||
register(
|
||||
'SUBSCRIPTIONS_CLIENT_SECRET',
|
||||
'SUBSCRIPTIONS_PASSWORD',
|
||||
field_class=fields.CharField,
|
||||
default='',
|
||||
allow_blank=True,
|
||||
encrypted=True,
|
||||
read_only=False,
|
||||
label=_('Red Hat Client Secret for Subscriptions'),
|
||||
help_text=_('Client secret used to retrieve subscription and content information'), # noqa
|
||||
label=_('Red Hat or Satellite password'),
|
||||
help_text=_('This password is used to retrieve subscription and content information'), # noqa
|
||||
category=_('System'),
|
||||
category_slug='system',
|
||||
)
|
||||
@@ -982,125 +980,3 @@ def csrf_trusted_origins_validate(serializer, attrs):
|
||||
|
||||
|
||||
register_validate('system', csrf_trusted_origins_validate)
|
||||
|
||||
|
||||
if settings.FEATURE_POLICY_AS_CODE_ENABLED: # Unable to use flag_enabled due to AppRegistryNotReady error
|
||||
register(
|
||||
'OPA_HOST',
|
||||
field_class=fields.CharField,
|
||||
label=_('OPA server hostname'),
|
||||
default='',
|
||||
help_text=_('The hostname used to connect to the OPA server. If empty, policy enforcement will be disabled.'),
|
||||
category=('PolicyAsCode'),
|
||||
category_slug='policyascode',
|
||||
allow_blank=True,
|
||||
)
|
||||
|
||||
register(
|
||||
'OPA_PORT',
|
||||
field_class=fields.IntegerField,
|
||||
label=_('OPA server port'),
|
||||
default=8181,
|
||||
help_text=_('The port used to connect to the OPA server. Defaults to 8181.'),
|
||||
category=('PolicyAsCode'),
|
||||
category_slug='policyascode',
|
||||
)
|
||||
|
||||
register(
|
||||
'OPA_SSL',
|
||||
field_class=fields.BooleanField,
|
||||
label=_('Use SSL for OPA connection'),
|
||||
default=False,
|
||||
help_text=_('Enable or disable the use of SSL to connect to the OPA server. Defaults to false.'),
|
||||
category=('PolicyAsCode'),
|
||||
category_slug='policyascode',
|
||||
)
|
||||
|
||||
register(
|
||||
'OPA_AUTH_TYPE',
|
||||
field_class=fields.ChoiceField,
|
||||
label=_('OPA authentication type'),
|
||||
choices=[OPA_AUTH_TYPES.NONE, OPA_AUTH_TYPES.TOKEN, OPA_AUTH_TYPES.CERTIFICATE],
|
||||
default=OPA_AUTH_TYPES.NONE,
|
||||
help_text=_('The authentication type that will be used to connect to the OPA server: "None", "Token", or "Certificate".'),
|
||||
category=('PolicyAsCode'),
|
||||
category_slug='policyascode',
|
||||
)
|
||||
|
||||
register(
|
||||
'OPA_AUTH_TOKEN',
|
||||
field_class=fields.CharField,
|
||||
label=_('OPA authentication token'),
|
||||
default='',
|
||||
help_text=_(
|
||||
'The token for authentication to the OPA server. Required when OPA_AUTH_TYPE is "Token". If an authorization header is defined in OPA_AUTH_CUSTOM_HEADERS, it will be overridden by OPA_AUTH_TOKEN.'
|
||||
),
|
||||
category=('PolicyAsCode'),
|
||||
category_slug='policyascode',
|
||||
allow_blank=True,
|
||||
encrypted=True,
|
||||
)
|
||||
|
||||
register(
|
||||
'OPA_AUTH_CLIENT_CERT',
|
||||
field_class=fields.CharField,
|
||||
label=_('OPA client certificate content'),
|
||||
default='',
|
||||
help_text=_('The content of the client certificate file for mTLS authentication to the OPA server. Required when OPA_AUTH_TYPE is "Certificate".'),
|
||||
category=('PolicyAsCode'),
|
||||
category_slug='policyascode',
|
||||
allow_blank=True,
|
||||
)
|
||||
|
||||
register(
|
||||
'OPA_AUTH_CLIENT_KEY',
|
||||
field_class=fields.CharField,
|
||||
label=_('OPA client key content'),
|
||||
default='',
|
||||
help_text=_('The content of the client key for mTLS authentication to the OPA server. Required when OPA_AUTH_TYPE is "Certificate".'),
|
||||
category=('PolicyAsCode'),
|
||||
category_slug='policyascode',
|
||||
allow_blank=True,
|
||||
encrypted=True,
|
||||
)
|
||||
|
||||
register(
|
||||
'OPA_AUTH_CA_CERT',
|
||||
field_class=fields.CharField,
|
||||
label=_('OPA CA certificate content'),
|
||||
default='',
|
||||
help_text=_('The content of the CA certificate for mTLS authentication to the OPA server. Required when OPA_AUTH_TYPE is "Certificate".'),
|
||||
category=('PolicyAsCode'),
|
||||
category_slug='policyascode',
|
||||
allow_blank=True,
|
||||
)
|
||||
|
||||
register(
|
||||
'OPA_AUTH_CUSTOM_HEADERS',
|
||||
field_class=fields.DictField,
|
||||
label=_('OPA custom authentication headers'),
|
||||
default={},
|
||||
help_text=_('Optional custom headers included in requests to the OPA server. Defaults to empty dictionary ({}).'),
|
||||
category=('PolicyAsCode'),
|
||||
category_slug='policyascode',
|
||||
)
|
||||
|
||||
register(
|
||||
'OPA_REQUEST_TIMEOUT',
|
||||
field_class=fields.FloatField,
|
||||
label=_('OPA request timeout'),
|
||||
default=1.5,
|
||||
help_text=_('The number of seconds after which the connection to the OPA server will time out. Defaults to 1.5 seconds.'),
|
||||
category=('PolicyAsCode'),
|
||||
category_slug='policyascode',
|
||||
)
|
||||
|
||||
register(
|
||||
'OPA_REQUEST_RETRIES',
|
||||
field_class=fields.IntegerField,
|
||||
label=_('OPA request retry count'),
|
||||
default=2,
|
||||
help_text=_('The number of retry attempts for connecting to the OPA server. Default is 2.'),
|
||||
category=('PolicyAsCode'),
|
||||
category_slug='policyascode',
|
||||
)
|
||||
|
||||
@@ -7,7 +7,6 @@ import time
|
||||
import traceback
|
||||
from datetime import datetime
|
||||
from uuid import uuid4
|
||||
import json
|
||||
|
||||
import collections
|
||||
from multiprocessing import Process
|
||||
@@ -26,10 +25,7 @@ from ansible_base.lib.logging.runtime import log_excess_runtime
|
||||
|
||||
from awx.main.models import UnifiedJob
|
||||
from awx.main.dispatch import reaper
|
||||
from awx.main.utils.common import get_mem_effective_capacity, get_corrected_memory, get_corrected_cpu, get_cpu_effective_capacity
|
||||
|
||||
# ansible-runner
|
||||
from ansible_runner.utils.capacity import get_mem_in_bytes, get_cpu_count
|
||||
from awx.main.utils.common import convert_mem_str_to_bytes, get_mem_effective_capacity
|
||||
|
||||
if 'run_callback_receiver' in sys.argv:
|
||||
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
|
||||
@@ -311,41 +307,6 @@ class WorkerPool(object):
|
||||
logger.exception('could not kill {}'.format(worker.pid))
|
||||
|
||||
|
||||
def get_auto_max_workers():
|
||||
"""Method we normally rely on to get max_workers
|
||||
|
||||
Uses almost same logic as Instance.local_health_check
|
||||
The important thing is to be MORE than Instance.capacity
|
||||
so that the task-manager does not over-schedule this node
|
||||
|
||||
Ideally we would just use the capacity from the database plus reserve workers,
|
||||
but this poses some bootstrap problems where OCP task containers
|
||||
register themselves after startup
|
||||
"""
|
||||
# Get memory from ansible-runner
|
||||
total_memory_gb = get_mem_in_bytes()
|
||||
|
||||
# This may replace memory calculation with a user override
|
||||
corrected_memory = get_corrected_memory(total_memory_gb)
|
||||
|
||||
# Get same number as max forks based on memory, this function takes memory as bytes
|
||||
mem_capacity = get_mem_effective_capacity(corrected_memory, is_control_node=True)
|
||||
|
||||
# Follow same process for CPU capacity constraint
|
||||
cpu_count = get_cpu_count()
|
||||
corrected_cpu = get_corrected_cpu(cpu_count)
|
||||
cpu_capacity = get_cpu_effective_capacity(corrected_cpu, is_control_node=True)
|
||||
|
||||
# Here is what is different from health checks,
|
||||
auto_max = max(mem_capacity, cpu_capacity)
|
||||
|
||||
# add magic number of extra workers to ensure
|
||||
# we have a few extra workers to run the heartbeat
|
||||
auto_max += 7
|
||||
|
||||
return auto_max
|
||||
|
||||
|
||||
class AutoscalePool(WorkerPool):
|
||||
"""
|
||||
An extended pool implementation that automatically scales workers up and
|
||||
@@ -359,7 +320,19 @@ class AutoscalePool(WorkerPool):
|
||||
super(AutoscalePool, self).__init__(*args, **kwargs)
|
||||
|
||||
if self.max_workers is None:
|
||||
self.max_workers = get_auto_max_workers()
|
||||
settings_absmem = getattr(settings, 'SYSTEM_TASK_ABS_MEM', None)
|
||||
if settings_absmem is not None:
|
||||
# There are 1073741824 bytes in a gigabyte. Convert bytes to gigabytes by dividing by 2**30
|
||||
total_memory_gb = convert_mem_str_to_bytes(settings_absmem) // 2**30
|
||||
else:
|
||||
total_memory_gb = (psutil.virtual_memory().total >> 30) + 1 # noqa: round up
|
||||
|
||||
# Get same number as max forks based on memory, this function takes memory as bytes
|
||||
self.max_workers = get_mem_effective_capacity(total_memory_gb * 2**30)
|
||||
|
||||
# add magic prime number of extra workers to ensure
|
||||
# we have a few extra workers to run the heartbeat
|
||||
self.max_workers += 7
|
||||
|
||||
# max workers can't be less than min_workers
|
||||
self.max_workers = max(self.min_workers, self.max_workers)
|
||||
@@ -373,9 +346,6 @@ class AutoscalePool(WorkerPool):
|
||||
self.scale_up_ct = 0
|
||||
self.worker_count_max = 0
|
||||
|
||||
# last time we wrote current tasks, to avoid too much log spam
|
||||
self.last_task_list_log = time.monotonic()
|
||||
|
||||
def produce_subsystem_metrics(self, metrics_object):
|
||||
metrics_object.set('dispatcher_pool_scale_up_events', self.scale_up_ct)
|
||||
metrics_object.set('dispatcher_pool_active_task_count', sum(len(w.managed_tasks) for w in self.workers))
|
||||
@@ -493,14 +463,6 @@ class AutoscalePool(WorkerPool):
|
||||
self.worker_count_max = new_worker_ct
|
||||
return ret
|
||||
|
||||
@staticmethod
|
||||
def fast_task_serialization(current_task):
|
||||
try:
|
||||
return str(current_task.get('task')) + ' - ' + str(sorted(current_task.get('args', []))) + ' - ' + str(sorted(current_task.get('kwargs', {})))
|
||||
except Exception:
|
||||
# just make sure this does not make things worse
|
||||
return str(current_task)
|
||||
|
||||
def write(self, preferred_queue, body):
|
||||
if 'guid' in body:
|
||||
set_guid(body['guid'])
|
||||
@@ -522,15 +484,6 @@ class AutoscalePool(WorkerPool):
|
||||
if isinstance(body, dict):
|
||||
task_name = body.get('task')
|
||||
logger.warning(f'Workers maxed, queuing {task_name}, load: {sum(len(w.managed_tasks) for w in self.workers)} / {len(self.workers)}')
|
||||
# Once every 10 seconds write out task list for debugging
|
||||
if time.monotonic() - self.last_task_list_log >= 10.0:
|
||||
task_counts = {}
|
||||
for worker in self.workers:
|
||||
task_slug = self.fast_task_serialization(worker.current_task)
|
||||
task_counts.setdefault(task_slug, 0)
|
||||
task_counts[task_slug] += 1
|
||||
logger.info(f'Running tasks by count:\n{json.dumps(task_counts, indent=2)}')
|
||||
self.last_task_list_log = time.monotonic()
|
||||
return super(AutoscalePool, self).write(preferred_queue, body)
|
||||
except Exception:
|
||||
for conn in connections.all():
|
||||
|
||||
@@ -238,7 +238,7 @@ class AWXConsumerPG(AWXConsumerBase):
|
||||
def run(self, *args, **kwargs):
|
||||
super(AWXConsumerPG, self).run(*args, **kwargs)
|
||||
|
||||
logger.info(f"Running {self.name}, workers min={self.pool.min_workers} max={self.pool.max_workers}, listening to queues {self.queues}")
|
||||
logger.info(f"Running worker {self.name} listening to queues {self.queues}")
|
||||
init = False
|
||||
|
||||
while True:
|
||||
|
||||
@@ -38,12 +38,5 @@ class PostRunError(Exception):
|
||||
super(PostRunError, self).__init__(msg)
|
||||
|
||||
|
||||
class PolicyEvaluationError(Exception):
|
||||
def __init__(self, msg, status='failed', tb=''):
|
||||
self.status = status
|
||||
self.tb = tb
|
||||
super(PolicyEvaluationError, self).__init__(msg)
|
||||
|
||||
|
||||
class ReceptorNodeNotFound(RuntimeError):
|
||||
pass
|
||||
|
||||
@@ -33,7 +33,6 @@ from awx.main.utils.safe_yaml import sanitize_jinja
|
||||
from awx.main.models.rbac import batch_role_ancestor_rebuilding
|
||||
from awx.main.utils import ignore_inventory_computed_fields, get_licenser
|
||||
from awx.main.utils.execution_environments import get_default_execution_environment
|
||||
from awx.main.utils.inventory_vars import update_group_variables
|
||||
from awx.main.signals import disable_activity_stream
|
||||
from awx.main.constants import STANDARD_INVENTORY_UPDATE_ENV
|
||||
|
||||
@@ -458,19 +457,19 @@ class Command(BaseCommand):
|
||||
"""
|
||||
Update inventory variables from "all" group.
|
||||
"""
|
||||
# TODO: We disable variable overwrite here in case user-defined inventory variables get
|
||||
# mangled. But we still need to figure out a better way of processing multiple inventory
|
||||
# update variables mixing with each other.
|
||||
# issue for this: https://github.com/ansible/awx/issues/11623
|
||||
|
||||
if self.inventory.kind == 'constructed' and self.inventory_source.overwrite_vars:
|
||||
# NOTE: we had to add a exception case to not merge variables
|
||||
# to make constructed inventory coherent
|
||||
db_variables = self.all_group.variables
|
||||
else:
|
||||
db_variables = update_group_variables(
|
||||
group_id=None, # `None` denotes the 'all' group (which doesn't have a pk).
|
||||
newvars=self.all_group.variables,
|
||||
dbvars=self.inventory.variables_dict,
|
||||
invsrc_id=self.inventory_source.id,
|
||||
inventory_id=self.inventory.id,
|
||||
overwrite_vars=self.overwrite_vars,
|
||||
)
|
||||
db_variables = self.inventory.variables_dict
|
||||
db_variables.update(self.all_group.variables)
|
||||
|
||||
if db_variables != self.inventory.variables_dict:
|
||||
self.inventory.variables = json.dumps(db_variables)
|
||||
self.inventory.save(update_fields=['variables'])
|
||||
|
||||
@@ -1,61 +0,0 @@
|
||||
# Generated by Django 4.2.18 on 2025-02-27 20:35
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [('main', '0197_add_opa_query_path')]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='inventorysource',
|
||||
name='source',
|
||||
field=models.CharField(
|
||||
choices=[
|
||||
('file', 'File, Directory or Script'),
|
||||
('constructed', 'Template additional groups and hostvars at runtime'),
|
||||
('scm', 'Sourced from a Project'),
|
||||
('ec2', 'Amazon EC2'),
|
||||
('gce', 'Google Compute Engine'),
|
||||
('azure_rm', 'Microsoft Azure Resource Manager'),
|
||||
('vmware', 'VMware vCenter'),
|
||||
('vmware_esxi', 'VMware ESXi'),
|
||||
('satellite6', 'Red Hat Satellite 6'),
|
||||
('openstack', 'OpenStack'),
|
||||
('rhv', 'Red Hat Virtualization'),
|
||||
('controller', 'Red Hat Ansible Automation Platform'),
|
||||
('insights', 'Red Hat Insights'),
|
||||
('terraform', 'Terraform State'),
|
||||
('openshift_virtualization', 'OpenShift Virtualization'),
|
||||
],
|
||||
default=None,
|
||||
max_length=32,
|
||||
),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventoryupdate',
|
||||
name='source',
|
||||
field=models.CharField(
|
||||
choices=[
|
||||
('file', 'File, Directory or Script'),
|
||||
('constructed', 'Template additional groups and hostvars at runtime'),
|
||||
('scm', 'Sourced from a Project'),
|
||||
('ec2', 'Amazon EC2'),
|
||||
('gce', 'Google Compute Engine'),
|
||||
('azure_rm', 'Microsoft Azure Resource Manager'),
|
||||
('vmware', 'VMware vCenter'),
|
||||
('vmware_esxi', 'VMware ESXi'),
|
||||
('satellite6', 'Red Hat Satellite 6'),
|
||||
('openstack', 'OpenStack'),
|
||||
('rhv', 'Red Hat Virtualization'),
|
||||
('controller', 'Red Hat Ansible Automation Platform'),
|
||||
('insights', 'Red Hat Insights'),
|
||||
('terraform', 'Terraform State'),
|
||||
('openshift_virtualization', 'OpenShift Virtualization'),
|
||||
],
|
||||
default=None,
|
||||
max_length=32,
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -5,7 +5,7 @@ from django.db import migrations
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
('main', '0200_template_name_constraint'),
|
||||
('main', '0197_add_opa_query_path'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
@@ -1,32 +0,0 @@
|
||||
# Generated by Django 4.2.20 on 2025-04-24 09:08
|
||||
|
||||
from django.db import migrations, models
|
||||
import django.db.models.deletion
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0198_alter_inventorysource_source_and_more'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.CreateModel(
|
||||
name='InventoryGroupVariablesWithHistory',
|
||||
fields=[
|
||||
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
||||
('variables', models.JSONField()),
|
||||
('group', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='inventory_group_variables', to='main.group')),
|
||||
(
|
||||
'inventory',
|
||||
models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='inventory_group_variables', to='main.inventory'),
|
||||
),
|
||||
],
|
||||
),
|
||||
migrations.AddConstraint(
|
||||
model_name='inventorygroupvariableswithhistory',
|
||||
constraint=models.UniqueConstraint(
|
||||
fields=('inventory', 'group'), name='unique_inventory_group', violation_error_message='Inventory/Group combination must be unique.'
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -5,7 +5,7 @@ from django.db import migrations
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
('main', '0201_delete_profile'),
|
||||
('main', '0198_delete_profile'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
@@ -6,7 +6,7 @@ from django.db import migrations, models
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0202_remove_sso_app_content'),
|
||||
('main', '0199_remove_sso_app_content'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
@@ -1,50 +0,0 @@
|
||||
# Generated by Django 4.2.20 on 2025-04-22 15:54
|
||||
|
||||
import logging
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
from awx.main.migrations._db_constraints import _rename_duplicates
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def rename_jts(apps, schema_editor):
|
||||
cls = apps.get_model('main', 'JobTemplate')
|
||||
_rename_duplicates(cls)
|
||||
|
||||
|
||||
def rename_projects(apps, schema_editor):
|
||||
cls = apps.get_model('main', 'Project')
|
||||
_rename_duplicates(cls)
|
||||
|
||||
|
||||
def change_inventory_source_org_unique(apps, schema_editor):
|
||||
cls = apps.get_model('main', 'InventorySource')
|
||||
r = cls.objects.update(org_unique=False)
|
||||
logger.info(f'Set database constraint rule for {r} inventory source objects')
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0199_inventorygroupvariableswithhistory_and_more'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(rename_jts, migrations.RunPython.noop),
|
||||
migrations.RunPython(rename_projects, migrations.RunPython.noop),
|
||||
migrations.AddField(
|
||||
model_name='unifiedjobtemplate',
|
||||
name='org_unique',
|
||||
field=models.BooleanField(blank=True, default=True, editable=False, help_text='Used internally to selectively enforce database constraint on name'),
|
||||
),
|
||||
migrations.RunPython(change_inventory_source_org_unique, migrations.RunPython.noop),
|
||||
migrations.AddConstraint(
|
||||
model_name='unifiedjobtemplate',
|
||||
constraint=models.UniqueConstraint(
|
||||
condition=models.Q(('org_unique', True)), fields=('polymorphic_ctype', 'name', 'organization'), name='ujt_hard_name_constraint'
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -6,7 +6,7 @@ from django.db import migrations
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0203_alter_inventorysource_source_and_more'),
|
||||
('main', '0200_alter_inventorysource_source_and_more'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
@@ -8,7 +8,7 @@ from awx.main.migrations._create_system_jobs import delete_clear_tokens_sjt
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0204_alter_oauth2application_unique_together_and_more'),
|
||||
('main', '0201_alter_oauth2application_unique_together_and_more'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
@@ -1,22 +0,0 @@
|
||||
# Generated by Django 4.2.20 on 2025-05-22 08:57
|
||||
|
||||
from decimal import Decimal
|
||||
import django.core.validators
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0205_delete_token_cleanup_job'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='instance',
|
||||
name='capacity_adjustment',
|
||||
field=models.DecimalField(
|
||||
decimal_places=2, default=Decimal('0.75'), max_digits=3, validators=[django.core.validators.MinValueValidator(Decimal('0'))]
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -1,25 +0,0 @@
|
||||
import logging
|
||||
|
||||
from django.db.models import Count
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _rename_duplicates(cls):
|
||||
field = cls._meta.get_field('name')
|
||||
max_len = field.max_length
|
||||
for organization_id in cls.objects.order_by().values_list('organization_id', flat=True).distinct():
|
||||
duplicate_data = cls.objects.values('name').filter(organization_id=organization_id).annotate(count=Count('name')).order_by().filter(count__gt=1)
|
||||
for data in duplicate_data:
|
||||
name = data['name']
|
||||
for idx, ujt in enumerate(cls.objects.filter(name=name, organization_id=organization_id).order_by('created')):
|
||||
if idx > 0:
|
||||
suffix = f'_dup{idx}'
|
||||
max_chars = max_len - len(suffix)
|
||||
if len(ujt.name) >= max_chars:
|
||||
ujt.name = ujt.name[:max_chars] + suffix
|
||||
else:
|
||||
ujt.name = ujt.name + suffix
|
||||
logger.info(f'Renaming duplicate {cls._meta.model_name} to `{ujt.name}` because of duplicate name entry')
|
||||
ujt.save(update_fields=['name'])
|
||||
@@ -33,7 +33,6 @@ from awx.main.models.inventory import ( # noqa
|
||||
InventorySource,
|
||||
InventoryUpdate,
|
||||
SmartInventoryMembership,
|
||||
InventoryGroupVariablesWithHistory,
|
||||
)
|
||||
from awx.main.models.jobs import ( # noqa
|
||||
Job,
|
||||
|
||||
@@ -24,7 +24,6 @@ from awx.main.managers import DeferJobCreatedManager
|
||||
from awx.main.constants import MINIMAL_EVENTS
|
||||
from awx.main.models.base import CreatedModifiedModel
|
||||
from awx.main.utils import ignore_inventory_computed_fields, camelcase_to_underscore
|
||||
from awx.main.utils.db import bulk_update_sorted_by_id
|
||||
|
||||
analytics_logger = logging.getLogger('awx.analytics.job_events')
|
||||
|
||||
@@ -603,7 +602,7 @@ class JobEvent(BasePlaybookEvent):
|
||||
h.last_job_host_summary_id = host_mapping[h.id]
|
||||
updated_hosts.add(h)
|
||||
|
||||
bulk_update_sorted_by_id(Host, updated_hosts, ['last_job_id', 'last_job_host_summary_id'])
|
||||
Host.objects.bulk_update(list(updated_hosts), ['last_job_id', 'last_job_host_summary_id'], batch_size=100)
|
||||
|
||||
# Create/update Host Metrics
|
||||
self._update_host_metrics(updated_hosts_list)
|
||||
|
||||
@@ -160,7 +160,7 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
default=100,
|
||||
editable=False,
|
||||
)
|
||||
capacity_adjustment = models.DecimalField(default=Decimal(0.75), max_digits=3, decimal_places=2, validators=[MinValueValidator(Decimal(0.0))])
|
||||
capacity_adjustment = models.DecimalField(default=Decimal(1.0), max_digits=3, decimal_places=2, validators=[MinValueValidator(Decimal(0.0))])
|
||||
enabled = models.BooleanField(default=True)
|
||||
managed_by_policy = models.BooleanField(default=True)
|
||||
|
||||
|
||||
@@ -1120,10 +1120,8 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions, CustomVirtualE
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
# if this is a new object, inherit organization from its inventory
|
||||
if not self.pk:
|
||||
self.org_unique = False # needed to exclude from unique (name, organization) constraint
|
||||
if self.inventory and self.inventory.organization_id and not self.organization_id:
|
||||
self.organization_id = self.inventory.organization_id
|
||||
if not self.pk and self.inventory and self.inventory.organization_id and not self.organization_id:
|
||||
self.organization_id = self.inventory.organization_id
|
||||
|
||||
# If update_fields has been specified, add our field names to it,
|
||||
# if it hasn't been specified, then we're just doing a normal save.
|
||||
@@ -1404,38 +1402,3 @@ class CustomInventoryScript(CommonModelNameNotUnique):
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:inventory_script_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
|
||||
class InventoryGroupVariablesWithHistory(models.Model):
|
||||
"""
|
||||
Represents the inventory variables of one inventory group.
|
||||
|
||||
The purpose of this model is to persist the update history of the group
|
||||
variables. The update history is maintained in another class
|
||||
(`InventoryGroupVariables`), this class here is just a container for the
|
||||
database storage.
|
||||
"""
|
||||
|
||||
class Meta:
|
||||
constraints = [
|
||||
# Do not allow the same inventory/group combination more than once.
|
||||
models.UniqueConstraint(
|
||||
fields=["inventory", "group"],
|
||||
name="unique_inventory_group",
|
||||
violation_error_message=_("Inventory/Group combination must be unique."),
|
||||
),
|
||||
]
|
||||
|
||||
inventory = models.ForeignKey(
|
||||
'Inventory',
|
||||
related_name='inventory_group_variables',
|
||||
null=True,
|
||||
on_delete=models.CASCADE,
|
||||
)
|
||||
group = models.ForeignKey( # `None` denotes the 'all'-group.
|
||||
'Group',
|
||||
related_name='inventory_group_variables',
|
||||
null=True,
|
||||
on_delete=models.CASCADE,
|
||||
)
|
||||
variables = models.JSONField() # The group variables, including their history.
|
||||
|
||||
@@ -358,6 +358,26 @@ class JobTemplate(
|
||||
update_fields.append('organization_id')
|
||||
return super(JobTemplate, self).save(*args, **kwargs)
|
||||
|
||||
def validate_unique(self, exclude=None):
|
||||
"""Custom over-ride for JT specifically
|
||||
because organization is inferred from project after full_clean is finished
|
||||
thus the organization field is not yet set when validation happens
|
||||
"""
|
||||
errors = []
|
||||
for ut in JobTemplate.SOFT_UNIQUE_TOGETHER:
|
||||
kwargs = {'name': self.name}
|
||||
if self.project:
|
||||
kwargs['organization'] = self.project.organization_id
|
||||
else:
|
||||
kwargs['organization'] = None
|
||||
qs = JobTemplate.objects.filter(**kwargs)
|
||||
if self.pk:
|
||||
qs = qs.exclude(pk=self.pk)
|
||||
if qs.exists():
|
||||
errors.append('%s with this (%s) combination already exists.' % (JobTemplate.__name__, ', '.join(set(ut) - {'polymorphic_ctype'})))
|
||||
if errors:
|
||||
raise ValidationError(errors)
|
||||
|
||||
def create_unified_job(self, **kwargs):
|
||||
prevent_slicing = kwargs.pop('_prevent_slicing', False)
|
||||
slice_ct = self.get_effective_slice_ct(kwargs)
|
||||
@@ -384,26 +404,6 @@ class JobTemplate(
|
||||
WorkflowJobNode.objects.create(**create_kwargs)
|
||||
return job
|
||||
|
||||
def validate_unique(self, exclude=None):
|
||||
"""Custom over-ride for JT specifically
|
||||
because organization is inferred from project after full_clean is finished
|
||||
thus the organization field is not yet set when validation happens
|
||||
"""
|
||||
errors = []
|
||||
for ut in JobTemplate.SOFT_UNIQUE_TOGETHER:
|
||||
kwargs = {'name': self.name}
|
||||
if self.project:
|
||||
kwargs['organization'] = self.project.organization_id
|
||||
else:
|
||||
kwargs['organization'] = None
|
||||
qs = JobTemplate.objects.filter(**kwargs)
|
||||
if self.pk:
|
||||
qs = qs.exclude(pk=self.pk)
|
||||
if qs.exists():
|
||||
errors.append('%s with this (%s) combination already exists.' % (JobTemplate.__name__, ', '.join(set(ut) - {'polymorphic_ctype'})))
|
||||
if errors:
|
||||
raise ValidationError(errors)
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
return reverse('api:job_template_detail', kwargs={'pk': self.pk}, request=request)
|
||||
|
||||
|
||||
@@ -18,7 +18,6 @@ from collections import OrderedDict
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.db import models, connection, transaction
|
||||
from django.db.models.constraints import UniqueConstraint
|
||||
from django.core.exceptions import NON_FIELD_ERRORS
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from django.utils.timezone import now
|
||||
@@ -112,10 +111,7 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, ExecutionEn
|
||||
ordering = ('name',)
|
||||
# unique_together here is intentionally commented out. Please make sure sub-classes of this model
|
||||
# contain at least this uniqueness restriction: SOFT_UNIQUE_TOGETHER = [('polymorphic_ctype', 'name')]
|
||||
# Unique name constraint - note that inventory source model is excluded from this constraint entirely
|
||||
constraints = [
|
||||
UniqueConstraint(fields=['polymorphic_ctype', 'name', 'organization'], condition=models.Q(org_unique=True), name='ujt_hard_name_constraint')
|
||||
]
|
||||
# unique_together = [('polymorphic_ctype', 'name', 'organization')]
|
||||
|
||||
old_pk = models.PositiveIntegerField(
|
||||
null=True,
|
||||
@@ -184,9 +180,6 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, ExecutionEn
|
||||
)
|
||||
labels = models.ManyToManyField("Label", blank=True, related_name='%(class)s_labels')
|
||||
instance_groups = OrderedManyToManyField('InstanceGroup', blank=True, through='UnifiedJobTemplateInstanceGroupMembership')
|
||||
org_unique = models.BooleanField(
|
||||
blank=True, default=True, editable=False, help_text=_('Used internally to selectively enforce database constraint on name')
|
||||
)
|
||||
|
||||
def get_absolute_url(self, request=None):
|
||||
real_instance = self.get_real_instance()
|
||||
|
||||
@@ -8,13 +8,13 @@ import logging
|
||||
from django.conf import settings
|
||||
from django.utils.encoding import smart_str
|
||||
from django.utils.timezone import now
|
||||
from django.db import OperationalError
|
||||
|
||||
# django-ansible-base
|
||||
from ansible_base.lib.logging.runtime import log_excess_runtime
|
||||
|
||||
# AWX
|
||||
from awx.main.utils.db import bulk_update_sorted_by_id
|
||||
from awx.main.models import Host
|
||||
from awx.main.models.inventory import Host
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.tasks.facts')
|
||||
@@ -22,29 +22,27 @@ system_tracking_logger = logging.getLogger('awx.analytics.system_tracking')
|
||||
|
||||
|
||||
@log_excess_runtime(logger, debug_cutoff=0.01, msg='Inventory {inventory_id} host facts prepared for {written_ct} hosts, took {delta:.3f} s', add_log_data=True)
|
||||
def start_fact_cache(hosts, artifacts_dir, timeout=None, inventory_id=None, log_data=None):
|
||||
log_data = log_data or {}
|
||||
def start_fact_cache(hosts, destination, log_data, timeout=None, inventory_id=None):
|
||||
log_data['inventory_id'] = inventory_id
|
||||
log_data['written_ct'] = 0
|
||||
hosts_cached = []
|
||||
|
||||
# Create the fact_cache directory inside artifacts_dir
|
||||
fact_cache_dir = os.path.join(artifacts_dir, 'fact_cache')
|
||||
os.makedirs(fact_cache_dir, mode=0o700, exist_ok=True)
|
||||
hosts_cached = list()
|
||||
try:
|
||||
os.makedirs(destination, mode=0o700)
|
||||
except FileExistsError:
|
||||
pass
|
||||
|
||||
if timeout is None:
|
||||
timeout = settings.ANSIBLE_FACT_CACHE_TIMEOUT
|
||||
|
||||
last_write_time = None
|
||||
|
||||
last_filepath_written = None
|
||||
for host in hosts:
|
||||
hosts_cached.append(host.name)
|
||||
hosts_cached.append(host)
|
||||
if not host.ansible_facts_modified or (timeout and host.ansible_facts_modified < now() - datetime.timedelta(seconds=timeout)):
|
||||
continue # facts are expired - do not write them
|
||||
|
||||
filepath = os.path.join(fact_cache_dir, host.name)
|
||||
if not os.path.realpath(filepath).startswith(fact_cache_dir):
|
||||
logger.error(f'facts for host {smart_str(host.name)} could not be cached')
|
||||
filepath = os.sep.join(map(str, [destination, host.name]))
|
||||
if not os.path.realpath(filepath).startswith(destination):
|
||||
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
|
||||
continue
|
||||
|
||||
try:
|
||||
@@ -52,21 +50,37 @@ def start_fact_cache(hosts, artifacts_dir, timeout=None, inventory_id=None, log_
|
||||
os.chmod(f.name, 0o600)
|
||||
json.dump(host.ansible_facts, f)
|
||||
log_data['written_ct'] += 1
|
||||
last_write_time = os.path.getmtime(filepath)
|
||||
last_filepath_written = filepath
|
||||
except IOError:
|
||||
logger.error(f'facts for host {smart_str(host.name)} could not be cached')
|
||||
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
|
||||
continue
|
||||
|
||||
# Write summary file directly to the artifacts_dir
|
||||
if inventory_id is not None:
|
||||
summary_file = os.path.join(artifacts_dir, 'host_cache_summary.json')
|
||||
summary_data = {
|
||||
'last_write_time': last_write_time,
|
||||
'hosts_cached': hosts_cached,
|
||||
'written_ct': log_data['written_ct'],
|
||||
}
|
||||
with open(summary_file, 'w', encoding='utf-8') as f:
|
||||
json.dump(summary_data, f, indent=2)
|
||||
if last_filepath_written:
|
||||
return os.path.getmtime(last_filepath_written), hosts_cached
|
||||
|
||||
return None, hosts_cached
|
||||
|
||||
|
||||
def raw_update_hosts(host_list):
|
||||
Host.objects.bulk_update(host_list, ['ansible_facts', 'ansible_facts_modified'])
|
||||
|
||||
|
||||
def update_hosts(host_list, max_tries=5):
|
||||
if not host_list:
|
||||
return
|
||||
for i in range(max_tries):
|
||||
try:
|
||||
raw_update_hosts(host_list)
|
||||
except OperationalError as exc:
|
||||
# Deadlocks can happen if this runs at the same time as another large query
|
||||
# inventory updates and updating last_job_host_summary are candidates for conflict
|
||||
# but these would resolve easily on a retry
|
||||
if i + 1 < max_tries:
|
||||
logger.info(f'OperationalError (suspected deadlock) saving host facts retry {i}, message: {exc}')
|
||||
continue
|
||||
else:
|
||||
raise
|
||||
break
|
||||
|
||||
|
||||
@log_excess_runtime(
|
||||
@@ -75,54 +89,32 @@ def start_fact_cache(hosts, artifacts_dir, timeout=None, inventory_id=None, log_
|
||||
msg='Inventory {inventory_id} host facts: updated {updated_ct}, cleared {cleared_ct}, unchanged {unmodified_ct}, took {delta:.3f} s',
|
||||
add_log_data=True,
|
||||
)
|
||||
def finish_fact_cache(artifacts_dir, job_id=None, inventory_id=None, log_data=None):
|
||||
log_data = log_data or {}
|
||||
def finish_fact_cache(hosts_cached, destination, facts_write_time, log_data, job_id=None, inventory_id=None):
|
||||
log_data['inventory_id'] = inventory_id
|
||||
log_data['updated_ct'] = 0
|
||||
log_data['unmodified_ct'] = 0
|
||||
log_data['cleared_ct'] = 0
|
||||
# The summary file is directly inside the artifacts dir
|
||||
summary_path = os.path.join(artifacts_dir, 'host_cache_summary.json')
|
||||
if not os.path.exists(summary_path):
|
||||
logger.error(f'Missing summary file at {summary_path}')
|
||||
return
|
||||
|
||||
try:
|
||||
with open(summary_path, 'r', encoding='utf-8') as f:
|
||||
summary = json.load(f)
|
||||
facts_write_time = os.path.getmtime(summary_path) # After successful read
|
||||
except (json.JSONDecodeError, OSError) as e:
|
||||
logger.error(f'Error reading summary file at {summary_path}: {e}')
|
||||
return
|
||||
|
||||
host_names = summary.get('hosts_cached', [])
|
||||
hosts_cached = Host.objects.filter(name__in=host_names).order_by('id').iterator()
|
||||
# Path where individual fact files were written
|
||||
fact_cache_dir = os.path.join(artifacts_dir, 'fact_cache')
|
||||
hosts_to_update = []
|
||||
|
||||
for host in hosts_cached:
|
||||
filepath = os.path.join(fact_cache_dir, host.name)
|
||||
if not os.path.realpath(filepath).startswith(fact_cache_dir):
|
||||
logger.error(f'Invalid path for facts file: {filepath}')
|
||||
filepath = os.sep.join(map(str, [destination, host.name]))
|
||||
if not os.path.realpath(filepath).startswith(destination):
|
||||
system_tracking_logger.error('facts for host {} could not be cached'.format(smart_str(host.name)))
|
||||
continue
|
||||
|
||||
if os.path.exists(filepath):
|
||||
# If the file changed since we wrote the last facts file, pre-playbook run...
|
||||
modified = os.path.getmtime(filepath)
|
||||
if not facts_write_time or modified >= facts_write_time:
|
||||
try:
|
||||
with codecs.open(filepath, 'r', encoding='utf-8') as f:
|
||||
if (not facts_write_time) or modified > facts_write_time:
|
||||
with codecs.open(filepath, 'r', encoding='utf-8') as f:
|
||||
try:
|
||||
ansible_facts = json.load(f)
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
if ansible_facts != host.ansible_facts:
|
||||
except ValueError:
|
||||
continue
|
||||
host.ansible_facts = ansible_facts
|
||||
host.ansible_facts_modified = now()
|
||||
hosts_to_update.append(host)
|
||||
logger.info(
|
||||
f'New fact for inventory {smart_str(host.inventory.name)} host {smart_str(host.name)}',
|
||||
system_tracking_logger.info(
|
||||
'New fact for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)),
|
||||
extra=dict(
|
||||
inventory_id=host.inventory.id,
|
||||
host_name=host.name,
|
||||
@@ -132,8 +124,6 @@ def finish_fact_cache(artifacts_dir, job_id=None, inventory_id=None, log_data=No
|
||||
),
|
||||
)
|
||||
log_data['updated_ct'] += 1
|
||||
else:
|
||||
log_data['unmodified_ct'] += 1
|
||||
else:
|
||||
log_data['unmodified_ct'] += 1
|
||||
else:
|
||||
@@ -142,11 +132,9 @@ def finish_fact_cache(artifacts_dir, job_id=None, inventory_id=None, log_data=No
|
||||
host.ansible_facts = {}
|
||||
host.ansible_facts_modified = now()
|
||||
hosts_to_update.append(host)
|
||||
logger.info(f'Facts cleared for inventory {smart_str(host.inventory.name)} host {smart_str(host.name)}')
|
||||
system_tracking_logger.info('Facts cleared for inventory {} host {}'.format(smart_str(host.inventory.name), smart_str(host.name)))
|
||||
log_data['cleared_ct'] += 1
|
||||
|
||||
if len(hosts_to_update) >= 100:
|
||||
bulk_update_sorted_by_id(Host, hosts_to_update, fields=['ansible_facts', 'ansible_facts_modified'])
|
||||
if len(hosts_to_update) > 100:
|
||||
update_hosts(hosts_to_update)
|
||||
hosts_to_update = []
|
||||
|
||||
bulk_update_sorted_by_id(Host, hosts_to_update, fields=['ansible_facts', 'ansible_facts_modified'])
|
||||
update_hosts(hosts_to_update)
|
||||
|
||||
@@ -77,14 +77,7 @@ def build_indirect_host_data(job: Job, job_event_queries: dict[str, dict[str, st
|
||||
if jq_str_for_event not in compiled_jq_expressions:
|
||||
compiled_jq_expressions[resolved_action] = jq.compile(jq_str_for_event)
|
||||
compiled_jq = compiled_jq_expressions[resolved_action]
|
||||
|
||||
try:
|
||||
data_source = compiled_jq.input(event.event_data['res']).all()
|
||||
except Exception as e:
|
||||
logger.warning(f'error for module {resolved_action} and data {event.event_data["res"]}: {e}')
|
||||
continue
|
||||
|
||||
for data in data_source:
|
||||
for data in compiled_jq.input(event.event_data['res']).all():
|
||||
# From this jq result (specific to a single Ansible module), get index information about this host record
|
||||
if not data.get('canonical_facts'):
|
||||
if not facts_missing_logged:
|
||||
|
||||
@@ -12,7 +12,6 @@ from awx.main.models.inventory import HostMetric, HostMetricSummaryMonthly
|
||||
from awx.main.tasks.helpers import is_run_threshold_reached
|
||||
from awx.conf.license import get_license
|
||||
from ansible_base.lib.utils.db import advisory_lock
|
||||
from awx.main.utils.db import bulk_update_sorted_by_id
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.main.tasks.host_metrics')
|
||||
@@ -147,9 +146,8 @@ class HostMetricSummaryMonthlyTask:
|
||||
month = month + relativedelta(months=1)
|
||||
|
||||
# Create/Update stats
|
||||
HostMetricSummaryMonthly.objects.bulk_create(self.records_to_create)
|
||||
|
||||
bulk_update_sorted_by_id(HostMetricSummaryMonthly, self.records_to_update, ['license_consumed', 'hosts_added', 'hosts_deleted'])
|
||||
HostMetricSummaryMonthly.objects.bulk_create(self.records_to_create, batch_size=1000)
|
||||
HostMetricSummaryMonthly.objects.bulk_update(self.records_to_update, ['license_consumed', 'hosts_added', 'hosts_deleted'], batch_size=1000)
|
||||
|
||||
# Set timestamp of last run
|
||||
settings.HOST_METRIC_SUMMARY_TASK_LAST_TS = now()
|
||||
|
||||
@@ -21,6 +21,7 @@ from django.conf import settings
|
||||
# Shared code for the AWX platform
|
||||
from awx_plugins.interfaces._temporary_private_container_api import CONTAINER_ROOT, get_incontainer_path
|
||||
|
||||
|
||||
# Runner
|
||||
import ansible_runner
|
||||
|
||||
@@ -28,6 +29,7 @@ import ansible_runner
|
||||
import git
|
||||
from gitdb.exc import BadName as BadGitName
|
||||
|
||||
|
||||
# AWX
|
||||
from awx.main.dispatch.publish import task
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
@@ -63,12 +65,11 @@ from awx.main.tasks.callback import (
|
||||
RunnerCallbackForProjectUpdate,
|
||||
RunnerCallbackForSystemJob,
|
||||
)
|
||||
from awx.main.tasks.policy import evaluate_policy
|
||||
from awx.main.tasks.signals import with_signal_handling, signal_callback
|
||||
from awx.main.tasks.receptor import AWXReceptorJob
|
||||
from awx.main.tasks.facts import start_fact_cache, finish_fact_cache
|
||||
from awx.main.tasks.system import update_smart_memberships_for_inventory, update_inventory_computed_fields, events_processed_hook
|
||||
from awx.main.exceptions import AwxTaskError, PolicyEvaluationError, PostRunError, ReceptorNodeNotFound
|
||||
from awx.main.exceptions import AwxTaskError, PostRunError, ReceptorNodeNotFound
|
||||
from awx.main.utils.ansible import read_ansible_config
|
||||
from awx.main.utils.safe_yaml import safe_dump, sanitize_jinja
|
||||
from awx.main.utils.common import (
|
||||
@@ -487,7 +488,6 @@ class BaseTask(object):
|
||||
self.instance.send_notification_templates("running")
|
||||
private_data_dir = self.build_private_data_dir(self.instance)
|
||||
self.pre_run_hook(self.instance, private_data_dir)
|
||||
evaluate_policy(self.instance)
|
||||
self.build_project_dir(self.instance, private_data_dir)
|
||||
self.instance.log_lifecycle("preparing_playbook")
|
||||
if self.instance.cancel_flag or signal_callback():
|
||||
@@ -619,8 +619,6 @@ class BaseTask(object):
|
||||
elif cancel_flag_value is False:
|
||||
self.runner_callback.delay_update(skip_if_already_set=True, job_explanation="The running ansible process received a shutdown signal.")
|
||||
status = 'failed'
|
||||
except PolicyEvaluationError as exc:
|
||||
self.runner_callback.delay_update(job_explanation=str(exc), result_traceback=str(exc))
|
||||
except ReceptorNodeNotFound as exc:
|
||||
self.runner_callback.delay_update(job_explanation=str(exc))
|
||||
except Exception:
|
||||
@@ -1093,8 +1091,8 @@ class RunJob(SourceControlMixin, BaseTask):
|
||||
# where ansible expects to find it
|
||||
if self.should_use_fact_cache():
|
||||
job.log_lifecycle("start_job_fact_cache")
|
||||
self.hosts_with_facts_cached = start_fact_cache(
|
||||
job.get_hosts_for_fact_cache(), artifacts_dir=os.path.join(private_data_dir, 'artifacts', str(job.id)), inventory_id=job.inventory_id
|
||||
self.facts_write_time, self.hosts_with_facts_cached = start_fact_cache(
|
||||
job.get_hosts_for_fact_cache(), os.path.join(private_data_dir, 'artifacts', str(job.id), 'fact_cache'), inventory_id=job.inventory_id
|
||||
)
|
||||
|
||||
def build_project_dir(self, job, private_data_dir):
|
||||
@@ -1104,7 +1102,7 @@ class RunJob(SourceControlMixin, BaseTask):
|
||||
super(RunJob, self).post_run_hook(job, status)
|
||||
job.refresh_from_db(fields=['job_env'])
|
||||
private_data_dir = job.job_env.get('AWX_PRIVATE_DATA_DIR')
|
||||
if not private_data_dir:
|
||||
if (not private_data_dir) or (not hasattr(self, 'facts_write_time')):
|
||||
# If there's no private data dir, that means we didn't get into the
|
||||
# actual `run()` call; this _usually_ means something failed in
|
||||
# the pre_run_hook method
|
||||
@@ -1112,7 +1110,9 @@ class RunJob(SourceControlMixin, BaseTask):
|
||||
if self.should_use_fact_cache() and self.runner_callback.artifacts_processed:
|
||||
job.log_lifecycle("finish_job_fact_cache")
|
||||
finish_fact_cache(
|
||||
artifacts_dir=os.path.join(private_data_dir, 'artifacts', str(job.id)),
|
||||
self.hosts_with_facts_cached,
|
||||
os.path.join(private_data_dir, 'artifacts', str(job.id), 'fact_cache'),
|
||||
facts_write_time=self.facts_write_time,
|
||||
job_id=job.id,
|
||||
inventory_id=job.inventory_id,
|
||||
)
|
||||
@@ -1578,7 +1578,7 @@ class RunInventoryUpdate(SourceControlMixin, BaseTask):
|
||||
# Include any facts from input inventories so they can be used in filters
|
||||
start_fact_cache(
|
||||
input_inventory.hosts.only(*HOST_FACTS_FIELDS),
|
||||
artifacts_dir=os.path.join(private_data_dir, 'artifacts', str(inventory_update.id)),
|
||||
os.path.join(private_data_dir, 'artifacts', str(inventory_update.id), 'fact_cache'),
|
||||
inventory_id=input_inventory.id,
|
||||
)
|
||||
|
||||
|
||||
@@ -1,462 +0,0 @@
|
||||
import json
|
||||
import tempfile
|
||||
import contextlib
|
||||
|
||||
from pprint import pformat
|
||||
|
||||
from typing import Optional, Union
|
||||
|
||||
from django.conf import settings
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from flags.state import flag_enabled
|
||||
from opa_client import OpaClient
|
||||
from opa_client.base import BaseClient
|
||||
from requests import HTTPError
|
||||
from rest_framework import serializers
|
||||
from rest_framework import fields
|
||||
|
||||
from awx.main import models
|
||||
from awx.main.exceptions import PolicyEvaluationError
|
||||
|
||||
|
||||
# Monkey patching opa_client.base.BaseClient to fix retries and timeout settings
|
||||
_original_opa_base_client_init = BaseClient.__init__
|
||||
|
||||
|
||||
def _opa_base_client_init_fix(
|
||||
self,
|
||||
host: str = "localhost",
|
||||
port: int = 8181,
|
||||
version: str = "v1",
|
||||
ssl: bool = False,
|
||||
cert: Optional[Union[str, tuple]] = None,
|
||||
headers: Optional[dict] = None,
|
||||
retries: int = 2,
|
||||
timeout: float = 1.5,
|
||||
):
|
||||
_original_opa_base_client_init(self, host, port, version, ssl, cert, headers)
|
||||
self.retries = retries
|
||||
self.timeout = timeout
|
||||
|
||||
|
||||
BaseClient.__init__ = _opa_base_client_init_fix
|
||||
|
||||
|
||||
class _TeamSerializer(serializers.ModelSerializer):
|
||||
class Meta:
|
||||
model = models.Team
|
||||
fields = ('id', 'name')
|
||||
|
||||
|
||||
class _UserSerializer(serializers.ModelSerializer):
|
||||
teams = serializers.SerializerMethodField()
|
||||
|
||||
class Meta:
|
||||
model = models.User
|
||||
fields = ('id', 'username', 'is_superuser', 'teams')
|
||||
|
||||
def get_teams(self, user: models.User):
|
||||
teams = models.Team.access_qs(user, 'member')
|
||||
return _TeamSerializer(many=True).to_representation(teams)
|
||||
|
||||
|
||||
class _ExecutionEnvironmentSerializer(serializers.ModelSerializer):
|
||||
class Meta:
|
||||
model = models.ExecutionEnvironment
|
||||
fields = (
|
||||
'id',
|
||||
'name',
|
||||
'image',
|
||||
'pull',
|
||||
)
|
||||
|
||||
|
||||
class _InstanceGroupSerializer(serializers.ModelSerializer):
|
||||
class Meta:
|
||||
model = models.InstanceGroup
|
||||
fields = (
|
||||
'id',
|
||||
'name',
|
||||
'capacity',
|
||||
'jobs_running',
|
||||
'jobs_total',
|
||||
'max_concurrent_jobs',
|
||||
'max_forks',
|
||||
)
|
||||
|
||||
|
||||
class _InventorySourceSerializer(serializers.ModelSerializer):
|
||||
class Meta:
|
||||
model = models.InventorySource
|
||||
fields = ('id', 'name', 'source', 'status')
|
||||
|
||||
|
||||
class _InventorySerializer(serializers.ModelSerializer):
|
||||
inventory_sources = _InventorySourceSerializer(many=True)
|
||||
|
||||
class Meta:
|
||||
model = models.Inventory
|
||||
fields = (
|
||||
'id',
|
||||
'name',
|
||||
'description',
|
||||
'kind',
|
||||
'total_hosts',
|
||||
'total_groups',
|
||||
'has_inventory_sources',
|
||||
'total_inventory_sources',
|
||||
'has_active_failures',
|
||||
'hosts_with_active_failures',
|
||||
'inventory_sources',
|
||||
)
|
||||
|
||||
|
||||
class _JobTemplateSerializer(serializers.ModelSerializer):
|
||||
class Meta:
|
||||
model = models.JobTemplate
|
||||
fields = (
|
||||
'id',
|
||||
'name',
|
||||
'job_type',
|
||||
)
|
||||
|
||||
|
||||
class _WorkflowJobTemplateSerializer(serializers.ModelSerializer):
|
||||
class Meta:
|
||||
model = models.WorkflowJobTemplate
|
||||
fields = (
|
||||
'id',
|
||||
'name',
|
||||
'job_type',
|
||||
)
|
||||
|
||||
|
||||
class _WorkflowJobSerializer(serializers.ModelSerializer):
|
||||
class Meta:
|
||||
model = models.WorkflowJob
|
||||
fields = (
|
||||
'id',
|
||||
'name',
|
||||
)
|
||||
|
||||
|
||||
class _OrganizationSerializer(serializers.ModelSerializer):
|
||||
class Meta:
|
||||
model = models.Organization
|
||||
fields = (
|
||||
'id',
|
||||
'name',
|
||||
)
|
||||
|
||||
|
||||
class _ProjectSerializer(serializers.ModelSerializer):
|
||||
class Meta:
|
||||
model = models.Project
|
||||
fields = (
|
||||
'id',
|
||||
'name',
|
||||
'status',
|
||||
'scm_type',
|
||||
'scm_url',
|
||||
'scm_branch',
|
||||
'scm_refspec',
|
||||
'scm_clean',
|
||||
'scm_track_submodules',
|
||||
'scm_delete_on_update',
|
||||
)
|
||||
|
||||
|
||||
class _CredentialSerializer(serializers.ModelSerializer):
|
||||
organization = _OrganizationSerializer()
|
||||
|
||||
class Meta:
|
||||
model = models.Credential
|
||||
fields = (
|
||||
'id',
|
||||
'name',
|
||||
'description',
|
||||
'organization',
|
||||
'credential_type',
|
||||
'managed',
|
||||
'kind',
|
||||
'cloud',
|
||||
'kubernetes',
|
||||
)
|
||||
|
||||
|
||||
class _LabelSerializer(serializers.ModelSerializer):
|
||||
organization = _OrganizationSerializer()
|
||||
|
||||
class Meta:
|
||||
model = models.Label
|
||||
fields = ('id', 'name', 'organization')
|
||||
|
||||
|
||||
class JobSerializer(serializers.ModelSerializer):
|
||||
created_by = _UserSerializer()
|
||||
credentials = _CredentialSerializer(many=True)
|
||||
execution_environment = _ExecutionEnvironmentSerializer()
|
||||
instance_group = _InstanceGroupSerializer()
|
||||
inventory = _InventorySerializer()
|
||||
job_template = _JobTemplateSerializer()
|
||||
labels = _LabelSerializer(many=True)
|
||||
organization = _OrganizationSerializer()
|
||||
project = _ProjectSerializer()
|
||||
extra_vars = fields.SerializerMethodField()
|
||||
hosts_count = fields.SerializerMethodField()
|
||||
workflow_job = fields.SerializerMethodField()
|
||||
workflow_job_template = fields.SerializerMethodField()
|
||||
|
||||
class Meta:
|
||||
model = models.Job
|
||||
fields = (
|
||||
'id',
|
||||
'name',
|
||||
'created',
|
||||
'created_by',
|
||||
'credentials',
|
||||
'execution_environment',
|
||||
'extra_vars',
|
||||
'forks',
|
||||
'hosts_count',
|
||||
'instance_group',
|
||||
'inventory',
|
||||
'job_template',
|
||||
'job_type',
|
||||
'job_type_name',
|
||||
'labels',
|
||||
'launch_type',
|
||||
'limit',
|
||||
'launched_by',
|
||||
'organization',
|
||||
'playbook',
|
||||
'project',
|
||||
'scm_branch',
|
||||
'scm_revision',
|
||||
'workflow_job',
|
||||
'workflow_job_template',
|
||||
)
|
||||
|
||||
def get_extra_vars(self, obj: models.Job):
|
||||
return json.loads(obj.display_extra_vars())
|
||||
|
||||
def get_hosts_count(self, obj: models.Job):
|
||||
return obj.hosts.count()
|
||||
|
||||
def get_workflow_job(self, obj: models.Job):
|
||||
workflow_job: models.WorkflowJob = obj.get_workflow_job()
|
||||
if workflow_job is None:
|
||||
return None
|
||||
return _WorkflowJobSerializer().to_representation(workflow_job)
|
||||
|
||||
def get_workflow_job_template(self, obj: models.Job):
|
||||
workflow_job: models.WorkflowJob = obj.get_workflow_job()
|
||||
if workflow_job is None:
|
||||
return None
|
||||
|
||||
workflow_job_template: models.WorkflowJobTemplate = workflow_job.workflow_job_template
|
||||
if workflow_job_template is None:
|
||||
return None
|
||||
|
||||
return _WorkflowJobTemplateSerializer().to_representation(workflow_job_template)
|
||||
|
||||
|
||||
class OPAResultSerializer(serializers.Serializer):
|
||||
allowed = fields.BooleanField(required=True)
|
||||
violations = fields.ListField(child=fields.CharField())
|
||||
|
||||
|
||||
class OPA_AUTH_TYPES:
|
||||
NONE = 'None'
|
||||
TOKEN = 'Token'
|
||||
CERTIFICATE = 'Certificate'
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def opa_cert_file():
|
||||
"""
|
||||
Context manager that creates temporary certificate files for OPA authentication.
|
||||
|
||||
For mTLS (mutual TLS), we need:
|
||||
- Client certificate and key for client authentication
|
||||
- CA certificate (optional) for server verification
|
||||
|
||||
Returns:
|
||||
tuple: (client_cert_path, verify_path)
|
||||
- client_cert_path: Path to client cert file or None if not using client cert
|
||||
- verify_path: Path to CA cert file, True to use system CA store, or False for no verification
|
||||
"""
|
||||
client_cert_temp = None
|
||||
ca_temp = None
|
||||
|
||||
try:
|
||||
# Case 1: Full mTLS with client cert and optional CA cert
|
||||
if settings.OPA_AUTH_TYPE == OPA_AUTH_TYPES.CERTIFICATE:
|
||||
# Create client certificate file (required for mTLS)
|
||||
client_cert_temp = tempfile.NamedTemporaryFile(delete=True, mode='w', suffix=".pem")
|
||||
client_cert_temp.write(settings.OPA_AUTH_CLIENT_CERT)
|
||||
client_cert_temp.write("\n")
|
||||
client_cert_temp.write(settings.OPA_AUTH_CLIENT_KEY)
|
||||
client_cert_temp.write("\n")
|
||||
client_cert_temp.flush()
|
||||
|
||||
# If CA cert is provided, use it for server verification
|
||||
# Otherwise, use system CA store (True)
|
||||
if settings.OPA_AUTH_CA_CERT:
|
||||
ca_temp = tempfile.NamedTemporaryFile(delete=True, mode='w', suffix=".pem")
|
||||
ca_temp.write(settings.OPA_AUTH_CA_CERT)
|
||||
ca_temp.write("\n")
|
||||
ca_temp.flush()
|
||||
verify_path = ca_temp.name
|
||||
else:
|
||||
verify_path = True # Use system CA store
|
||||
|
||||
yield (client_cert_temp.name, verify_path)
|
||||
|
||||
# Case 2: TLS with only server verification (no client cert)
|
||||
elif settings.OPA_SSL:
|
||||
# If CA cert is provided, use it for server verification
|
||||
# Otherwise, use system CA store (True)
|
||||
if settings.OPA_AUTH_CA_CERT:
|
||||
ca_temp = tempfile.NamedTemporaryFile(delete=True, mode='w', suffix=".pem")
|
||||
ca_temp.write(settings.OPA_AUTH_CA_CERT)
|
||||
ca_temp.write("\n")
|
||||
ca_temp.flush()
|
||||
verify_path = ca_temp.name
|
||||
else:
|
||||
verify_path = True # Use system CA store
|
||||
|
||||
yield (None, verify_path)
|
||||
|
||||
# Case 3: No TLS
|
||||
else:
|
||||
yield (None, False)
|
||||
|
||||
finally:
|
||||
# Clean up temporary files
|
||||
if client_cert_temp:
|
||||
client_cert_temp.close()
|
||||
if ca_temp:
|
||||
ca_temp.close()
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def opa_client(headers=None):
|
||||
with opa_cert_file() as cert_files:
|
||||
cert, verify = cert_files
|
||||
|
||||
with OpaClient(
|
||||
host=settings.OPA_HOST,
|
||||
port=settings.OPA_PORT,
|
||||
headers=headers,
|
||||
ssl=settings.OPA_SSL,
|
||||
cert=cert,
|
||||
timeout=settings.OPA_REQUEST_TIMEOUT,
|
||||
retries=settings.OPA_REQUEST_RETRIES,
|
||||
) as client:
|
||||
# Workaround for https://github.com/Turall/OPA-python-client/issues/32
|
||||
# by directly setting cert and verify on requests.session
|
||||
client._session.cert = cert
|
||||
client._session.verify = verify
|
||||
|
||||
yield client
|
||||
|
||||
|
||||
def evaluate_policy(instance):
|
||||
# Policy evaluation for Policy as Code feature
|
||||
if not flag_enabled("FEATURE_POLICY_AS_CODE_ENABLED"):
|
||||
return
|
||||
|
||||
if not settings.OPA_HOST:
|
||||
return
|
||||
|
||||
if not isinstance(instance, models.Job):
|
||||
return
|
||||
|
||||
instance.log_lifecycle("evaluate_policy")
|
||||
|
||||
input_data = JobSerializer(instance=instance).data
|
||||
|
||||
headers = settings.OPA_AUTH_CUSTOM_HEADERS
|
||||
if settings.OPA_AUTH_TYPE == OPA_AUTH_TYPES.TOKEN:
|
||||
headers.update({'Authorization': 'Bearer {}'.format(settings.OPA_AUTH_TOKEN)})
|
||||
|
||||
if settings.OPA_AUTH_TYPE == OPA_AUTH_TYPES.CERTIFICATE and not settings.OPA_SSL:
|
||||
raise PolicyEvaluationError(_('OPA_AUTH_TYPE=Certificate requires OPA_SSL to be enabled.'))
|
||||
|
||||
cert_settings_missing = []
|
||||
|
||||
if settings.OPA_AUTH_TYPE == OPA_AUTH_TYPES.CERTIFICATE:
|
||||
if not settings.OPA_AUTH_CLIENT_CERT:
|
||||
cert_settings_missing += ['OPA_AUTH_CLIENT_CERT']
|
||||
if not settings.OPA_AUTH_CLIENT_KEY:
|
||||
cert_settings_missing += ['OPA_AUTH_CLIENT_KEY']
|
||||
if not settings.OPA_AUTH_CA_CERT:
|
||||
cert_settings_missing += ['OPA_AUTH_CA_CERT']
|
||||
|
||||
if cert_settings_missing:
|
||||
raise PolicyEvaluationError(_('Following certificate settings are missing for OPA_AUTH_TYPE=Certificate: {}').format(cert_settings_missing))
|
||||
|
||||
query_paths = [
|
||||
('Organization', instance.organization.opa_query_path),
|
||||
('Inventory', instance.inventory.opa_query_path),
|
||||
('Job template', instance.job_template.opa_query_path),
|
||||
]
|
||||
violations = dict()
|
||||
errors = dict()
|
||||
|
||||
try:
|
||||
with opa_client(headers=headers) as client:
|
||||
for path_type, query_path in query_paths:
|
||||
response = dict()
|
||||
try:
|
||||
if not query_path:
|
||||
continue
|
||||
|
||||
response = client.query_rule(input_data=input_data, package_path=query_path)
|
||||
|
||||
except HTTPError as e:
|
||||
message = _('Call to OPA failed. Exception: {}').format(e)
|
||||
try:
|
||||
error_data = e.response.json()
|
||||
except ValueError:
|
||||
errors[path_type] = message
|
||||
continue
|
||||
|
||||
error_code = error_data.get("code")
|
||||
error_message = error_data.get("message")
|
||||
if error_code or error_message:
|
||||
message = _('Call to OPA failed. Code: {}, Message: {}').format(error_code, error_message)
|
||||
errors[path_type] = message
|
||||
continue
|
||||
|
||||
except Exception as e:
|
||||
errors[path_type] = _('Call to OPA failed. Exception: {}').format(e)
|
||||
continue
|
||||
|
||||
result = response.get('result')
|
||||
if result is None:
|
||||
errors[path_type] = _('Call to OPA did not return a "result" property. The path refers to an undefined document.')
|
||||
continue
|
||||
|
||||
result_serializer = OPAResultSerializer(data=result)
|
||||
if not result_serializer.is_valid():
|
||||
errors[path_type] = _('OPA policy returned invalid result.')
|
||||
continue
|
||||
|
||||
result_data = result_serializer.validated_data
|
||||
if not result_data.get("allowed") and (result_violations := result_data.get("violations")):
|
||||
violations[path_type] = result_violations
|
||||
|
||||
format_results = dict()
|
||||
if any(errors[e] for e in errors):
|
||||
format_results["Errors"] = errors
|
||||
|
||||
if any(violations[v] for v in violations):
|
||||
format_results["Violations"] = violations
|
||||
|
||||
if violations or errors:
|
||||
raise PolicyEvaluationError(pformat(format_results, width=80))
|
||||
|
||||
except Exception as e:
|
||||
raise PolicyEvaluationError(_('This job cannot be executed due to a policy violation or error. See the following details:\n{}').format(e))
|
||||
@@ -1,3 +0,0 @@
|
||||
[all:vars]
|
||||
a=value_a
|
||||
b=value_b
|
||||
@@ -1,17 +0,0 @@
|
||||
import time
|
||||
import logging
|
||||
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
from awx.main.dispatch.publish import task
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
def sleep_task(seconds=10, log=False):
|
||||
if log:
|
||||
logger.info('starting sleep_task')
|
||||
time.sleep(seconds)
|
||||
if log:
|
||||
logger.info('finished sleep_task')
|
||||
@@ -87,8 +87,8 @@ def mock_analytic_post():
|
||||
{
|
||||
'REDHAT_USERNAME': 'redhat_user',
|
||||
'REDHAT_PASSWORD': 'redhat_pass', # NOSONAR
|
||||
'SUBSCRIPTIONS_CLIENT_ID': '',
|
||||
'SUBSCRIPTIONS_CLIENT_SECRET': '',
|
||||
'SUBSCRIPTIONS_USERNAME': '',
|
||||
'SUBSCRIPTIONS_PASSWORD': '',
|
||||
},
|
||||
True,
|
||||
('redhat_user', 'redhat_pass'),
|
||||
@@ -98,8 +98,8 @@ def mock_analytic_post():
|
||||
{
|
||||
'REDHAT_USERNAME': None,
|
||||
'REDHAT_PASSWORD': None,
|
||||
'SUBSCRIPTIONS_CLIENT_ID': 'subs_user',
|
||||
'SUBSCRIPTIONS_CLIENT_SECRET': 'subs_pass', # NOSONAR
|
||||
'SUBSCRIPTIONS_USERNAME': 'subs_user',
|
||||
'SUBSCRIPTIONS_PASSWORD': 'subs_pass', # NOSONAR
|
||||
},
|
||||
True,
|
||||
('subs_user', 'subs_pass'),
|
||||
@@ -109,8 +109,8 @@ def mock_analytic_post():
|
||||
{
|
||||
'REDHAT_USERNAME': '',
|
||||
'REDHAT_PASSWORD': '',
|
||||
'SUBSCRIPTIONS_CLIENT_ID': 'subs_user',
|
||||
'SUBSCRIPTIONS_CLIENT_SECRET': 'subs_pass', # NOSONAR
|
||||
'SUBSCRIPTIONS_USERNAME': 'subs_user',
|
||||
'SUBSCRIPTIONS_PASSWORD': 'subs_pass', # NOSONAR
|
||||
},
|
||||
True,
|
||||
('subs_user', 'subs_pass'),
|
||||
@@ -120,8 +120,8 @@ def mock_analytic_post():
|
||||
{
|
||||
'REDHAT_USERNAME': '',
|
||||
'REDHAT_PASSWORD': '',
|
||||
'SUBSCRIPTIONS_CLIENT_ID': '',
|
||||
'SUBSCRIPTIONS_CLIENT_SECRET': '',
|
||||
'SUBSCRIPTIONS_USERNAME': '',
|
||||
'SUBSCRIPTIONS_PASSWORD': '',
|
||||
},
|
||||
False,
|
||||
None, # No request should be made
|
||||
@@ -131,8 +131,8 @@ def mock_analytic_post():
|
||||
{
|
||||
'REDHAT_USERNAME': '',
|
||||
'REDHAT_PASSWORD': 'redhat_pass', # NOSONAR
|
||||
'SUBSCRIPTIONS_CLIENT_ID': 'subs_user',
|
||||
'SUBSCRIPTIONS_CLIENT_SECRET': '',
|
||||
'SUBSCRIPTIONS_USERNAME': 'subs_user',
|
||||
'SUBSCRIPTIONS_PASSWORD': '',
|
||||
},
|
||||
False,
|
||||
None, # Invalid, no request should be made
|
||||
|
||||
@@ -97,8 +97,8 @@ class TestAnalyticsGenericView:
|
||||
'INSIGHTS_TRACKING_STATE': True,
|
||||
'REDHAT_USERNAME': 'redhat_user',
|
||||
'REDHAT_PASSWORD': 'redhat_pass', # NOSONAR
|
||||
'SUBSCRIPTIONS_CLIENT_ID': '',
|
||||
'SUBSCRIPTIONS_CLIENT_SECRET': '',
|
||||
'SUBSCRIPTIONS_USERNAME': '',
|
||||
'SUBSCRIPTIONS_PASSWORD': '',
|
||||
},
|
||||
('redhat_user', 'redhat_pass'),
|
||||
None,
|
||||
@@ -109,8 +109,8 @@ class TestAnalyticsGenericView:
|
||||
'INSIGHTS_TRACKING_STATE': True,
|
||||
'REDHAT_USERNAME': '',
|
||||
'REDHAT_PASSWORD': '',
|
||||
'SUBSCRIPTIONS_CLIENT_ID': 'subs_user',
|
||||
'SUBSCRIPTIONS_CLIENT_SECRET': 'subs_pass', # NOSONAR
|
||||
'SUBSCRIPTIONS_USERNAME': 'subs_user',
|
||||
'SUBSCRIPTIONS_PASSWORD': 'subs_pass', # NOSONAR
|
||||
},
|
||||
('subs_user', 'subs_pass'),
|
||||
None,
|
||||
@@ -121,8 +121,8 @@ class TestAnalyticsGenericView:
|
||||
'INSIGHTS_TRACKING_STATE': True,
|
||||
'REDHAT_USERNAME': '',
|
||||
'REDHAT_PASSWORD': '',
|
||||
'SUBSCRIPTIONS_CLIENT_ID': '',
|
||||
'SUBSCRIPTIONS_CLIENT_SECRET': '',
|
||||
'SUBSCRIPTIONS_USERNAME': '',
|
||||
'SUBSCRIPTIONS_PASSWORD': '',
|
||||
},
|
||||
None,
|
||||
ERROR_MISSING_USER,
|
||||
@@ -133,8 +133,8 @@ class TestAnalyticsGenericView:
|
||||
'INSIGHTS_TRACKING_STATE': True,
|
||||
'REDHAT_USERNAME': 'redhat_user',
|
||||
'REDHAT_PASSWORD': 'redhat_pass', # NOSONAR
|
||||
'SUBSCRIPTIONS_CLIENT_ID': 'subs_user',
|
||||
'SUBSCRIPTIONS_CLIENT_SECRET': 'subs_pass', # NOSONAR
|
||||
'SUBSCRIPTIONS_USERNAME': 'subs_user',
|
||||
'SUBSCRIPTIONS_PASSWORD': 'subs_pass', # NOSONAR
|
||||
},
|
||||
('redhat_user', 'redhat_pass'),
|
||||
None,
|
||||
@@ -145,8 +145,8 @@ class TestAnalyticsGenericView:
|
||||
'INSIGHTS_TRACKING_STATE': True,
|
||||
'REDHAT_USERNAME': '',
|
||||
'REDHAT_PASSWORD': '',
|
||||
'SUBSCRIPTIONS_CLIENT_ID': 'subs_user', # NOSONAR
|
||||
'SUBSCRIPTIONS_CLIENT_SECRET': '',
|
||||
'SUBSCRIPTIONS_USERNAME': 'subs_user', # NOSONAR
|
||||
'SUBSCRIPTIONS_PASSWORD': '',
|
||||
},
|
||||
None,
|
||||
ERROR_MISSING_PASSWORD,
|
||||
@@ -155,36 +155,26 @@ class TestAnalyticsGenericView:
|
||||
)
|
||||
@pytest.mark.django_db
|
||||
def test__send_to_analytics_credentials(self, settings_map, expected_auth, expected_error_keyword):
|
||||
"""
|
||||
Test _send_to_analytics with various combinations of credentials.
|
||||
"""
|
||||
with override_settings(**settings_map):
|
||||
request = RequestFactory().post('/some/path')
|
||||
view = AnalyticsGenericView()
|
||||
|
||||
if expected_auth:
|
||||
with mock.patch('awx.api.views.analytics.OIDCClient') as mock_oidc_client:
|
||||
# Configure the mock OIDCClient instance and its make_request method
|
||||
mock_client_instance = mock.Mock()
|
||||
mock_oidc_client.return_value = mock_client_instance
|
||||
mock_client_instance.make_request.return_value = mock.Mock(status_code=200)
|
||||
with mock.patch('requests.request') as mock_request:
|
||||
mock_request.return_value = mock.Mock(status_code=200)
|
||||
|
||||
analytic_url = view._get_analytics_url(request.path)
|
||||
response = view._send_to_analytics(request, 'POST')
|
||||
|
||||
# Assertions
|
||||
# Assert OIDCClient instantiation
|
||||
expected_client_id, expected_client_secret = expected_auth
|
||||
mock_oidc_client.assert_called_once_with(expected_client_id, expected_client_secret)
|
||||
|
||||
# Assert make_request call
|
||||
mock_client_instance.make_request.assert_called_once_with(
|
||||
mock_request.assert_called_once_with(
|
||||
'POST',
|
||||
analytic_url,
|
||||
headers=mock.ANY,
|
||||
auth=expected_auth,
|
||||
verify=mock.ANY,
|
||||
params=mock.ANY,
|
||||
headers=mock.ANY,
|
||||
json=mock.ANY,
|
||||
params=mock.ANY,
|
||||
timeout=mock.ANY,
|
||||
)
|
||||
assert response.status_code == 200
|
||||
@@ -196,64 +186,3 @@ class TestAnalyticsGenericView:
|
||||
# mock_error_response.assert_called_once_with(expected_error_keyword, remote=False)
|
||||
assert response.status_code == status.HTTP_403_FORBIDDEN
|
||||
assert response.data['error']['keyword'] == expected_error_keyword
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize(
|
||||
"settings_map, expected_auth",
|
||||
[
|
||||
# Test case 1: Username and password should be used for basic auth
|
||||
(
|
||||
{
|
||||
'INSIGHTS_TRACKING_STATE': True,
|
||||
'REDHAT_USERNAME': 'redhat_user',
|
||||
'REDHAT_PASSWORD': 'redhat_pass', # NOSONAR
|
||||
'SUBSCRIPTIONS_CLIENT_ID': '',
|
||||
'SUBSCRIPTIONS_CLIENT_SECRET': '',
|
||||
},
|
||||
('redhat_user', 'redhat_pass'),
|
||||
),
|
||||
# Test case 2: Client ID and secret should be used for basic auth
|
||||
(
|
||||
{
|
||||
'INSIGHTS_TRACKING_STATE': True,
|
||||
'REDHAT_USERNAME': '',
|
||||
'REDHAT_PASSWORD': '',
|
||||
'SUBSCRIPTIONS_CLIENT_ID': 'subs_user',
|
||||
'SUBSCRIPTIONS_CLIENT_SECRET': 'subs_pass', # NOSONAR
|
||||
},
|
||||
None,
|
||||
),
|
||||
],
|
||||
)
|
||||
def test__send_to_analytics_fallback_to_basic_auth(self, settings_map, expected_auth):
|
||||
"""
|
||||
Test _send_to_analytics with basic auth fallback.
|
||||
"""
|
||||
with override_settings(**settings_map):
|
||||
request = RequestFactory().post('/some/path')
|
||||
view = AnalyticsGenericView()
|
||||
|
||||
with mock.patch('awx.api.views.analytics.OIDCClient') as mock_oidc_client, mock.patch(
|
||||
'awx.api.views.analytics.AnalyticsGenericView._base_auth_request'
|
||||
) as mock_base_auth_request:
|
||||
# Configure the mock OIDCClient instance and its make_request method
|
||||
mock_client_instance = mock.Mock()
|
||||
mock_oidc_client.return_value = mock_client_instance
|
||||
mock_client_instance.make_request.side_effect = requests.RequestException("Incorrect credentials")
|
||||
|
||||
analytic_url = view._get_analytics_url(request.path)
|
||||
view._send_to_analytics(request, 'POST')
|
||||
|
||||
if expected_auth:
|
||||
# assert mock_base_auth_request called with expected_auth
|
||||
mock_base_auth_request.assert_called_once_with(
|
||||
request,
|
||||
'POST',
|
||||
analytic_url,
|
||||
expected_auth[0],
|
||||
expected_auth[1],
|
||||
mock.ANY,
|
||||
)
|
||||
else:
|
||||
# assert mock_base_auth_request not called
|
||||
mock_base_auth_request.assert_not_called()
|
||||
|
||||
@@ -8,7 +8,6 @@ from django.core.exceptions import ValidationError
|
||||
from awx.api.versioning import reverse
|
||||
|
||||
from awx.main.models import InventorySource, Inventory, ActivityStream
|
||||
from awx.main.utils.inventory_vars import update_group_variables
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -691,241 +690,3 @@ class TestConstructedInventory:
|
||||
assert inv_r.data['url'] != const_r.data['url']
|
||||
assert inv_r.data['related']['constructed_url'] == url_const
|
||||
assert const_r.data['related']['constructed_url'] == url_const
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestInventoryAllVariables:
|
||||
|
||||
@staticmethod
|
||||
def simulate_update_from_source(inv_src, variables_dict, overwrite_vars=True):
|
||||
"""
|
||||
Update `inventory` with variables `variables_dict` from source
|
||||
`inv_src`.
|
||||
"""
|
||||
# Perform an update from source the same way it is done in
|
||||
# `inventory_import.Command._update_inventory`.
|
||||
new_vars = update_group_variables(
|
||||
group_id=None, # `None` denotes the 'all' group (which doesn't have a pk).
|
||||
newvars=variables_dict,
|
||||
dbvars=inv_src.inventory.variables_dict,
|
||||
invsrc_id=inv_src.id,
|
||||
inventory_id=inv_src.inventory.id,
|
||||
overwrite_vars=overwrite_vars,
|
||||
)
|
||||
inv_src.inventory.variables = json.dumps(new_vars)
|
||||
inv_src.inventory.save(update_fields=["variables"])
|
||||
return new_vars
|
||||
|
||||
def update_and_verify(self, inv_src, new_vars, expect=None, overwrite_vars=True, teststep=None):
|
||||
"""
|
||||
Helper: Update from source and verify the new inventory variables.
|
||||
|
||||
:param inv_src: An inventory source object with its inventory property
|
||||
set to the inventory fixture of the called.
|
||||
:param dict new_vars: The variables of the inventory source `inv_src`.
|
||||
:param dict expect: (optional) The expected variables state of the
|
||||
inventory after the update. If not set or None, expect `new_vars`.
|
||||
:param bool overwrite_vars: The status of the inventory source option
|
||||
'overwrite variables'. Default is `True`.
|
||||
:raise AssertionError: If the inventory does not contain the expected
|
||||
variables after the update.
|
||||
"""
|
||||
self.simulate_update_from_source(inv_src, new_vars, overwrite_vars=overwrite_vars)
|
||||
if teststep is not None:
|
||||
assert inv_src.inventory.variables_dict == (expect if expect is not None else new_vars), f"Test step {teststep}"
|
||||
else:
|
||||
assert inv_src.inventory.variables_dict == (expect if expect is not None else new_vars)
|
||||
|
||||
def test_set_variables_through_inventory_details_update(self, inventory, patch, admin_user):
|
||||
"""
|
||||
Set an inventory variable by changing the inventory details, simulating
|
||||
a user edit.
|
||||
"""
|
||||
# a: x
|
||||
patch(url=reverse('api:inventory_detail', kwargs={'pk': inventory.pk}), data={'variables': 'a: x'}, user=admin_user, expect=200)
|
||||
inventory.refresh_from_db()
|
||||
assert inventory.variables_dict == {"a": "x"}
|
||||
|
||||
def test_variables_set_by_user_persist_update_from_src(self, inventory, inventory_source, patch, admin_user):
|
||||
"""
|
||||
Verify the special behavior that a variable which originates from a user
|
||||
edit (instead of a source update), is not removed from the inventory
|
||||
when a source update with overwrite_vars=True does not contain that
|
||||
variable. This behavior is considered special because a variable which
|
||||
originates from a source would actually be deleted.
|
||||
|
||||
In addition, verify that an existing variable which was set by a user
|
||||
edit can be overwritten by a source update.
|
||||
"""
|
||||
# Set two variables via user edit.
|
||||
patch(
|
||||
url=reverse('api:inventory_detail', kwargs={'pk': inventory.pk}),
|
||||
data={'variables': '{"a": "a_from_user", "b": "b_from_user"}'},
|
||||
user=admin_user,
|
||||
expect=200,
|
||||
)
|
||||
inventory.refresh_from_db()
|
||||
assert inventory.variables_dict == {'a': 'a_from_user', 'b': 'b_from_user'}
|
||||
# Update from a source which contains only one of the two variables from
|
||||
# the previous update.
|
||||
self.simulate_update_from_source(inventory_source, {'a': 'a_from_source'})
|
||||
# Verify inventory variables.
|
||||
assert inventory.variables_dict == {'a': 'a_from_source', 'b': 'b_from_user'}
|
||||
|
||||
def test_variables_set_through_src_get_removed_on_update_from_same_src(self, inventory, inventory_source, patch, admin_user):
|
||||
"""
|
||||
Verify that a variable which originates from a source update, is removed
|
||||
from the inventory when a source update with overwrite_vars=True does
|
||||
not contain that variable.
|
||||
|
||||
In addition, verify that an existing variable which was set by a user
|
||||
edit can be overwritten by a source update.
|
||||
"""
|
||||
# Set two variables via update from source.
|
||||
self.simulate_update_from_source(inventory_source, {'a': 'a_from_source', 'b': 'b_from_source'})
|
||||
# Verify inventory variables.
|
||||
assert inventory.variables_dict == {'a': 'a_from_source', 'b': 'b_from_source'}
|
||||
# Update from the same source which now contains only one of the two
|
||||
# variables from the previous update.
|
||||
self.simulate_update_from_source(inventory_source, {'b': 'b_from_source'})
|
||||
# Verify the variable has been deleted from the inventory.
|
||||
assert inventory.variables_dict == {'b': 'b_from_source'}
|
||||
|
||||
def test_overwrite_variables_through_inventory_details_update(self, inventory, patch, admin_user):
|
||||
"""
|
||||
Set and update the inventory variables multiple times by changing the
|
||||
inventory details via api, simulating user edits.
|
||||
|
||||
Any variables update by means of an inventory details update shall
|
||||
overwright all existing inventory variables.
|
||||
"""
|
||||
# a: x
|
||||
patch(url=reverse('api:inventory_detail', kwargs={'pk': inventory.pk}), data={'variables': 'a: x'}, user=admin_user, expect=200)
|
||||
inventory.refresh_from_db()
|
||||
assert inventory.variables_dict == {"a": "x"}
|
||||
# a: x2
|
||||
patch(url=reverse('api:inventory_detail', kwargs={'pk': inventory.pk}), data={'variables': 'a: x2'}, user=admin_user, expect=200)
|
||||
inventory.refresh_from_db()
|
||||
assert inventory.variables_dict == {"a": "x2"}
|
||||
# b: y
|
||||
patch(url=reverse('api:inventory_detail', kwargs={'pk': inventory.pk}), data={'variables': 'b: y'}, user=admin_user, expect=200)
|
||||
inventory.refresh_from_db()
|
||||
assert inventory.variables_dict == {"b": "y"}
|
||||
|
||||
def test_inventory_group_variables_internal_data(self, inventory, patch, admin_user):
|
||||
"""
|
||||
Basic verification of how variable updates are stored internally.
|
||||
|
||||
.. Warning::
|
||||
|
||||
This test verifies a specific implementation of the inventory
|
||||
variables update business logic. It may deliver false negatives if
|
||||
the implementation changes.
|
||||
"""
|
||||
# x: a
|
||||
patch(url=reverse('api:inventory_detail', kwargs={'pk': inventory.pk}), data={'variables': 'a: x'}, user=admin_user, expect=200)
|
||||
igv = inventory.inventory_group_variables.first()
|
||||
assert igv.variables == {'a': [[-1, 'x']]}
|
||||
# b: y
|
||||
patch(url=reverse('api:inventory_detail', kwargs={'pk': inventory.pk}), data={'variables': 'b: y'}, user=admin_user, expect=200)
|
||||
igv = inventory.inventory_group_variables.first()
|
||||
assert igv.variables == {'b': [[-1, 'y']]}
|
||||
|
||||
def test_update_then_user_change(self, inventory, patch, admin_user, inventory_source):
|
||||
"""
|
||||
1. Update inventory vars by means of an inventory source update.
|
||||
2. Update inventory vars by editing the inventory details (aka a 'user
|
||||
update'), thereby changing variables values and deleting variables
|
||||
from the inventory.
|
||||
|
||||
.. Warning::
|
||||
|
||||
This test partly relies on a specific implementation of the
|
||||
inventory variables update business logic. It may deliver false
|
||||
negatives if the implementation changes.
|
||||
"""
|
||||
assert inventory_source.inventory_id == inventory.pk # sanity
|
||||
# ---- Test step 1: Set variables by updating from an inventory source.
|
||||
self.simulate_update_from_source(inventory_source, {'foo': 'foo_from_source', 'bar': 'bar_from_source'})
|
||||
# Verify inventory variables.
|
||||
assert inventory.variables_dict == {'foo': 'foo_from_source', 'bar': 'bar_from_source'}
|
||||
# Verify internal storage of variables data. Note that this is
|
||||
# implementation specific
|
||||
assert inventory.inventory_group_variables.count() == 1
|
||||
igv = inventory.inventory_group_variables.first()
|
||||
assert igv.variables == {'foo': [[inventory_source.id, 'foo_from_source']], 'bar': [[inventory_source.id, 'bar_from_source']]}
|
||||
# ---- Test step 2: Change the variables by editing the inventory details.
|
||||
patch(url=reverse('api:inventory_detail', kwargs={'pk': inventory.pk}), data={'variables': 'foo: foo_from_user'}, user=admin_user, expect=200)
|
||||
inventory.refresh_from_db()
|
||||
# Verify that variable `foo` contains the new value, and that variable
|
||||
# `bar` has been deleted from the inventory.
|
||||
assert inventory.variables_dict == {"foo": "foo_from_user"}
|
||||
# Verify internal storage of variables data. Note that this is
|
||||
# implementation specific
|
||||
inventory.inventory_group_variables.count() == 1
|
||||
igv = inventory.inventory_group_variables.first()
|
||||
assert igv.variables == {'foo': [[-1, 'foo_from_user']]}
|
||||
|
||||
def test_monotonic_deletions(self, inventory, patch, admin_user):
|
||||
"""
|
||||
Verify the variables history logic for monotonic deletions.
|
||||
|
||||
Monotonic in this context means that the variables are deleted in the
|
||||
reverse order of their creation.
|
||||
|
||||
1. Set inventory variable x: 0, expect INV={x: 0}
|
||||
|
||||
(The following steps use overwrite_variables=False)
|
||||
|
||||
2. Update from source A={x: 1}, expect INV={x: 1}
|
||||
3. Update from source B={x: 2}, expect INV={x: 2}
|
||||
4. Update from source B={}, expect INV={x: 1}
|
||||
5. Update from source A={}, expect INV={x: 0}
|
||||
"""
|
||||
inv_src_a = InventorySource.objects.create(name="inv-src-A", inventory=inventory, source="ec2")
|
||||
inv_src_b = InventorySource.objects.create(name="inv-src-B", inventory=inventory, source="ec2")
|
||||
# Test step 1:
|
||||
patch(url=reverse('api:inventory_detail', kwargs={'pk': inventory.pk}), data={'variables': 'x: 0'}, user=admin_user, expect=200)
|
||||
inventory.refresh_from_db()
|
||||
assert inventory.variables_dict == {"x": 0}
|
||||
# Test step 2: Source A overwrites value of var x
|
||||
self.update_and_verify(inv_src_a, {"x": 1}, teststep=2)
|
||||
# Test step 3: Source A overwrites value of var x
|
||||
self.update_and_verify(inv_src_b, {"x": 2}, teststep=3)
|
||||
# Test step 4: Value of var x from source A reappears
|
||||
self.update_and_verify(inv_src_b, {}, expect={"x": 1}, teststep=4)
|
||||
# Test step 5: Value of var x from initial user edit reappears
|
||||
self.update_and_verify(inv_src_a, {}, expect={"x": 0}, teststep=5)
|
||||
|
||||
def test_interleaved_deletions(self, inventory, patch, admin_user, inventory_source):
|
||||
"""
|
||||
Verify the variables history logic for interleaved deletions.
|
||||
|
||||
Interleaved in this context means that the variables are deleted in a
|
||||
different order than the sequence of their creation.
|
||||
|
||||
1. Set inventory variable x: 0, expect INV={x: 0}
|
||||
2. Update from source A={x: 1}, expect INV={x: 1}
|
||||
3. Update from source B={x: 2}, expect INV={x: 2}
|
||||
4. Update from source C={x: 3}, expect INV={x: 3}
|
||||
5. Update from source B={}, expect INV={x: 3}
|
||||
6. Update from source C={}, expect INV={x: 1}
|
||||
"""
|
||||
inv_src_a = InventorySource.objects.create(name="inv-src-A", inventory=inventory, source="ec2")
|
||||
inv_src_b = InventorySource.objects.create(name="inv-src-B", inventory=inventory, source="ec2")
|
||||
inv_src_c = InventorySource.objects.create(name="inv-src-C", inventory=inventory, source="ec2")
|
||||
# Test step 1. Set inventory variable x: 0
|
||||
patch(url=reverse('api:inventory_detail', kwargs={'pk': inventory.pk}), data={'variables': 'x: 0'}, user=admin_user, expect=200)
|
||||
inventory.refresh_from_db()
|
||||
assert inventory.variables_dict == {"x": 0}
|
||||
# Test step 2: Source A overwrites value of var x
|
||||
self.update_and_verify(inv_src_a, {"x": 1}, teststep=2)
|
||||
# Test step 3: Source B overwrites value of var x
|
||||
self.update_and_verify(inv_src_b, {"x": 2}, teststep=3)
|
||||
# Test step 4: Source C overwrites value of var x
|
||||
self.update_and_verify(inv_src_c, {"x": 3}, teststep=4)
|
||||
# Test step 5: Value of var x from source C remains unchanged
|
||||
self.update_and_verify(inv_src_b, {}, expect={"x": 3}, teststep=5)
|
||||
# Test step 6: Value of var x from source A reappears, because the
|
||||
# latest update from source B did not contain var x.
|
||||
self.update_and_verify(inv_src_c, {}, expect={"x": 1}, teststep=6)
|
||||
|
||||
@@ -34,18 +34,40 @@ def test_wrapup_does_send_notifications(mocker):
|
||||
mock.assert_called_once_with('succeeded')
|
||||
|
||||
|
||||
class FakeRedis:
|
||||
def keys(self, *args, **kwargs):
|
||||
return []
|
||||
|
||||
def set(self):
|
||||
pass
|
||||
|
||||
def get(self):
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def from_url(cls, *args, **kwargs):
|
||||
return cls()
|
||||
|
||||
def pipeline(self):
|
||||
return self
|
||||
|
||||
|
||||
class TestCallbackBrokerWorker(TransactionTestCase):
|
||||
@pytest.fixture(autouse=True)
|
||||
def turn_off_websockets_and_redis(self, fake_redis):
|
||||
def turn_off_websockets(self):
|
||||
with mock.patch('awx.main.dispatch.worker.callback.emit_event_detail', lambda *a, **kw: None):
|
||||
yield
|
||||
|
||||
def get_worker(self):
|
||||
with mock.patch('redis.Redis', new=FakeRedis): # turn off redis stuff
|
||||
return CallbackBrokerWorker()
|
||||
|
||||
def event_create_kwargs(self):
|
||||
inventory_update = InventoryUpdate.objects.create(source='file', inventory_source=InventorySource.objects.create(source='file'))
|
||||
return dict(inventory_update=inventory_update, created=inventory_update.created)
|
||||
|
||||
def test_flush_with_valid_event(self):
|
||||
worker = CallbackBrokerWorker()
|
||||
worker = self.get_worker()
|
||||
events = [InventoryUpdateEvent(uuid=str(uuid4()), **self.event_create_kwargs())]
|
||||
worker.buff = {InventoryUpdateEvent: events}
|
||||
worker.flush()
|
||||
@@ -53,7 +75,7 @@ class TestCallbackBrokerWorker(TransactionTestCase):
|
||||
assert InventoryUpdateEvent.objects.filter(uuid=events[0].uuid).count() == 1
|
||||
|
||||
def test_flush_with_invalid_event(self):
|
||||
worker = CallbackBrokerWorker()
|
||||
worker = self.get_worker()
|
||||
kwargs = self.event_create_kwargs()
|
||||
events = [
|
||||
InventoryUpdateEvent(uuid=str(uuid4()), stdout='good1', **kwargs),
|
||||
@@ -68,7 +90,7 @@ class TestCallbackBrokerWorker(TransactionTestCase):
|
||||
assert worker.buff == {InventoryUpdateEvent: [events[1]]}
|
||||
|
||||
def test_duplicate_key_not_saved_twice(self):
|
||||
worker = CallbackBrokerWorker()
|
||||
worker = self.get_worker()
|
||||
events = [InventoryUpdateEvent(uuid=str(uuid4()), **self.event_create_kwargs())]
|
||||
worker.buff = {InventoryUpdateEvent: events.copy()}
|
||||
worker.flush()
|
||||
@@ -82,7 +104,7 @@ class TestCallbackBrokerWorker(TransactionTestCase):
|
||||
assert worker.buff.get(InventoryUpdateEvent, []) == []
|
||||
|
||||
def test_give_up_on_bad_event(self):
|
||||
worker = CallbackBrokerWorker()
|
||||
worker = self.get_worker()
|
||||
events = [InventoryUpdateEvent(uuid=str(uuid4()), counter=-2, **self.event_create_kwargs())]
|
||||
worker.buff = {InventoryUpdateEvent: events.copy()}
|
||||
|
||||
@@ -95,7 +117,7 @@ class TestCallbackBrokerWorker(TransactionTestCase):
|
||||
assert InventoryUpdateEvent.objects.filter(uuid=events[0].uuid).count() == 0 # sanity
|
||||
|
||||
def test_flush_with_empty_buffer(self):
|
||||
worker = CallbackBrokerWorker()
|
||||
worker = self.get_worker()
|
||||
worker.buff = {InventoryUpdateEvent: []}
|
||||
with mock.patch.object(InventoryUpdateEvent.objects, 'bulk_create') as flush_mock:
|
||||
worker.flush()
|
||||
@@ -105,7 +127,7 @@ class TestCallbackBrokerWorker(TransactionTestCase):
|
||||
# In postgres, text fields reject NUL character, 0x00
|
||||
# tests use sqlite3 which will not raise an error
|
||||
# but we can still test that it is sanitized before saving
|
||||
worker = CallbackBrokerWorker()
|
||||
worker = self.get_worker()
|
||||
kwargs = self.event_create_kwargs()
|
||||
events = [InventoryUpdateEvent(uuid=str(uuid4()), stdout="\x00", **kwargs)]
|
||||
assert "\x00" in events[0].stdout # sanity
|
||||
|
||||
@@ -63,33 +63,6 @@ def swagger_autogen(requests=__SWAGGER_REQUESTS__):
|
||||
return requests
|
||||
|
||||
|
||||
class FakeRedis:
|
||||
def keys(self, *args, **kwargs):
|
||||
return []
|
||||
|
||||
def set(self):
|
||||
pass
|
||||
|
||||
def get(self):
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def from_url(cls, *args, **kwargs):
|
||||
return cls()
|
||||
|
||||
def pipeline(self):
|
||||
return self
|
||||
|
||||
def ping(self):
|
||||
return
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def fake_redis():
|
||||
with mock.patch('redis.Redis', new=FakeRedis): # turn off redis stuff
|
||||
yield
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def user():
|
||||
def u(name, is_superuser=False):
|
||||
|
||||
@@ -1,56 +0,0 @@
|
||||
import pytest
|
||||
|
||||
from awx.main.migrations._db_constraints import _rename_duplicates
|
||||
from awx.main.models import JobTemplate
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_rename_job_template_duplicates(organization, project):
|
||||
ids = []
|
||||
for i in range(5):
|
||||
jt = JobTemplate.objects.create(name=f'jt-{i}', organization=organization, project=project)
|
||||
ids.append(jt.id) # saved in order of creation
|
||||
|
||||
# Hack to first allow duplicate names of JT to test migration
|
||||
JobTemplate.objects.filter(id__in=ids).update(org_unique=False)
|
||||
|
||||
# Set all JTs to the same name
|
||||
JobTemplate.objects.filter(id__in=ids).update(name='same_name_for_test')
|
||||
|
||||
_rename_duplicates(JobTemplate)
|
||||
|
||||
first_jt = JobTemplate.objects.get(id=ids[0])
|
||||
assert first_jt.name == 'same_name_for_test'
|
||||
|
||||
for i, pk in enumerate(ids):
|
||||
if i == 0:
|
||||
continue
|
||||
jt = JobTemplate.objects.get(id=pk)
|
||||
# Name should be set based on creation order
|
||||
assert jt.name == f'same_name_for_test_dup{i}'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_rename_job_template_name_too_long(organization, project):
|
||||
ids = []
|
||||
for i in range(3):
|
||||
jt = JobTemplate.objects.create(name=f'jt-{i}', organization=organization, project=project)
|
||||
ids.append(jt.id) # saved in order of creation
|
||||
|
||||
JobTemplate.objects.filter(id__in=ids).update(org_unique=False)
|
||||
|
||||
chars = 512
|
||||
# Set all JTs to the same reaaaaaaly long name
|
||||
JobTemplate.objects.filter(id__in=ids).update(name='A' * chars)
|
||||
|
||||
_rename_duplicates(JobTemplate)
|
||||
|
||||
first_jt = JobTemplate.objects.get(id=ids[0])
|
||||
assert first_jt.name == 'A' * chars
|
||||
|
||||
for i, pk in enumerate(ids):
|
||||
if i == 0:
|
||||
continue
|
||||
jt = JobTemplate.objects.get(id=pk)
|
||||
assert jt.name.endswith(f'dup{i}')
|
||||
assert len(jt.name) <= 512
|
||||
@@ -3,10 +3,6 @@ import pytest
|
||||
# AWX
|
||||
from awx.main.ha import is_ha_environment
|
||||
from awx.main.models.ha import Instance
|
||||
from awx.main.dispatch.pool import get_auto_max_workers
|
||||
|
||||
# Django
|
||||
from django.test.utils import override_settings
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -21,25 +17,3 @@ def test_db_localhost():
|
||||
Instance.objects.create(hostname='foo', node_type='hybrid')
|
||||
Instance.objects.create(hostname='bar', node_type='execution')
|
||||
assert is_ha_environment() is False
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize(
|
||||
'settings',
|
||||
[
|
||||
dict(SYSTEM_TASK_ABS_MEM='16Gi', SYSTEM_TASK_ABS_CPU='24', SYSTEM_TASK_FORKS_MEM=400, SYSTEM_TASK_FORKS_CPU=4),
|
||||
dict(SYSTEM_TASK_ABS_MEM='124Gi', SYSTEM_TASK_ABS_CPU='2', SYSTEM_TASK_FORKS_MEM=None, SYSTEM_TASK_FORKS_CPU=None),
|
||||
],
|
||||
ids=['cpu_dominated', 'memory_dominated'],
|
||||
)
|
||||
def test_dispatcher_max_workers_reserve(settings, fake_redis):
|
||||
"""This tests that the dispatcher max_workers matches instance capacity
|
||||
|
||||
Assumes capacity_adjustment is 1,
|
||||
plus reserve worker count
|
||||
"""
|
||||
with override_settings(**settings):
|
||||
i = Instance.objects.create(hostname='test-1', node_type='hybrid', capacity_adjustment=1.0)
|
||||
i.local_health_check()
|
||||
|
||||
assert get_auto_max_workers() == i.capacity + 7, (i.cpu, i.memory, i.cpu_capacity, i.mem_capacity)
|
||||
|
||||
@@ -393,7 +393,7 @@ def test_dependency_isolation(organization):
|
||||
this should keep dependencies isolated"""
|
||||
with mock.patch('awx.main.models.unified_jobs.UnifiedJobTemplate.update'):
|
||||
updating_projects = [
|
||||
Project.objects.create(name=f'iso-proj{i}', organization=organization, scm_url='https://foo.invalid', scm_type='git', scm_update_on_launch=True)
|
||||
Project.objects.create(name='iso-proj', organization=organization, scm_url='https://foo.invalid', scm_type='git', scm_update_on_launch=True)
|
||||
for i in range(2)
|
||||
]
|
||||
|
||||
|
||||
@@ -43,7 +43,7 @@ def test_job_template_copy(
|
||||
c.save()
|
||||
assert get(reverse('api:job_template_copy', kwargs={'pk': job_template_with_survey_passwords.pk}), alice, expect=200).data['can_copy'] is True
|
||||
jt_copy_pk_alice = post(
|
||||
reverse('api:job_template_copy', kwargs={'pk': job_template_with_survey_passwords.pk}), {'name': 'new jt name alice'}, alice, expect=201
|
||||
reverse('api:job_template_copy', kwargs={'pk': job_template_with_survey_passwords.pk}), {'name': 'new jt name'}, alice, expect=201
|
||||
).data['id']
|
||||
|
||||
jt_copy_admin = type(job_template_with_survey_passwords).objects.get(pk=jt_copy_pk)
|
||||
@@ -53,7 +53,7 @@ def test_job_template_copy(
|
||||
assert jt_copy_alice.created_by == alice
|
||||
|
||||
for jt_copy in (jt_copy_admin, jt_copy_alice):
|
||||
assert jt_copy.name.startswith('new jt name')
|
||||
assert jt_copy.name == 'new jt name'
|
||||
assert jt_copy.project == project
|
||||
assert jt_copy.inventory == inventory
|
||||
assert jt_copy.playbook == job_template_with_survey_passwords.playbook
|
||||
|
||||
@@ -19,7 +19,7 @@ from awx.main.models import (
|
||||
ExecutionEnvironment,
|
||||
)
|
||||
from awx.main.tasks.system import cluster_node_heartbeat
|
||||
from awx.main.utils.db import bulk_update_sorted_by_id
|
||||
from awx.main.tasks.facts import update_hosts
|
||||
|
||||
from django.db import OperationalError
|
||||
from django.test.utils import override_settings
|
||||
@@ -39,13 +39,13 @@ def test_orphan_unified_job_creation(instance, inventory):
|
||||
@pytest.mark.django_db
|
||||
@mock.patch('awx.main.tasks.system.inspect_execution_and_hop_nodes', lambda *args, **kwargs: None)
|
||||
@mock.patch('awx.main.models.ha.get_cpu_effective_capacity', lambda cpu, is_control_node: 8)
|
||||
@mock.patch('awx.main.models.ha.get_mem_effective_capacity', lambda mem, is_control_node: 64)
|
||||
@mock.patch('awx.main.models.ha.get_mem_effective_capacity', lambda mem, is_control_node: 62)
|
||||
def test_job_capacity_and_with_inactive_node():
|
||||
i = Instance.objects.create(hostname='test-1')
|
||||
i.save_health_data('18.0.1', 2, 8000)
|
||||
assert i.enabled is True
|
||||
assert i.capacity_adjustment == 0.75
|
||||
assert i.capacity == 50
|
||||
assert i.capacity_adjustment == 1.0
|
||||
assert i.capacity == 62
|
||||
i.enabled = False
|
||||
i.save()
|
||||
with override_settings(CLUSTER_HOST_ID=i.hostname):
|
||||
@@ -128,7 +128,7 @@ class TestAnsibleFactsSave:
|
||||
assert inventory.hosts.count() == 3
|
||||
Host.objects.get(pk=last_pk).delete()
|
||||
assert inventory.hosts.count() == 2
|
||||
bulk_update_sorted_by_id(Host, hosts, fields=['ansible_facts'])
|
||||
update_hosts(hosts)
|
||||
assert inventory.hosts.count() == 2
|
||||
for host in inventory.hosts.all():
|
||||
host.refresh_from_db()
|
||||
@@ -141,7 +141,7 @@ class TestAnsibleFactsSave:
|
||||
db_mock = mocker.patch('awx.main.tasks.facts.Host.objects.bulk_update')
|
||||
db_mock.side_effect = OperationalError('deadlock detected')
|
||||
with pytest.raises(OperationalError):
|
||||
bulk_update_sorted_by_id(Host, hosts, fields=['ansible_facts'])
|
||||
update_hosts(hosts)
|
||||
|
||||
def fake_bulk_update(self, host_list):
|
||||
if self.current_call > 2:
|
||||
@@ -149,28 +149,16 @@ class TestAnsibleFactsSave:
|
||||
self.current_call += 1
|
||||
raise OperationalError('deadlock detected')
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_update_hosts_resolved_deadlock(inventory, mocker):
|
||||
|
||||
hosts = [Host.objects.create(inventory=inventory, name=f'foo{i}') for i in range(3)]
|
||||
|
||||
# Set ansible_facts for each host
|
||||
for host in hosts:
|
||||
host.ansible_facts = {'foo': 'bar'}
|
||||
|
||||
bulk_update_sorted_by_id(Host, hosts, fields=['ansible_facts'])
|
||||
|
||||
# Save changes and refresh from DB to ensure the updated facts are saved
|
||||
for host in hosts:
|
||||
host.save() # Ensure changes are persisted in the DB
|
||||
host.refresh_from_db() # Refresh from DB to get latest data
|
||||
|
||||
# Assert that the ansible_facts were updated correctly
|
||||
for host in inventory.hosts.all():
|
||||
assert host.ansible_facts == {'foo': 'bar'}
|
||||
|
||||
bulk_update_sorted_by_id(Host, hosts, fields=['ansible_facts'])
|
||||
def test_update_hosts_resolved_deadlock(self, inventory, mocker):
|
||||
hosts = [Host.objects.create(inventory=inventory, name=f'foo{i}') for i in range(3)]
|
||||
for host in hosts:
|
||||
host.ansible_facts = {'foo': 'bar'}
|
||||
self.current_call = 0
|
||||
mocker.patch('awx.main.tasks.facts.raw_update_hosts', new=self.fake_bulk_update)
|
||||
update_hosts(hosts)
|
||||
for host in inventory.hosts.all():
|
||||
host.refresh_from_db()
|
||||
assert host.ansible_facts == {'foo': 'bar'}
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
|
||||
@@ -106,37 +106,3 @@ class TestMigrationSmoke:
|
||||
)
|
||||
DABPermission = new_state.apps.get_model('dab_rbac', 'DABPermission')
|
||||
assert not DABPermission.objects.filter(codename='view_executionenvironment').exists()
|
||||
|
||||
# Test create a Project with a duplicate name
|
||||
Organization = new_state.apps.get_model('main', 'Organization')
|
||||
Project = new_state.apps.get_model('main', 'Project')
|
||||
org = Organization.objects.create(name='duplicate-obj-organization', created=now(), modified=now())
|
||||
proj_ids = []
|
||||
for i in range(3):
|
||||
proj = Project.objects.create(name='duplicate-project-name', organization=org, created=now(), modified=now())
|
||||
proj_ids.append(proj.id)
|
||||
|
||||
# The uniqueness rules will not apply to InventorySource
|
||||
Inventory = new_state.apps.get_model('main', 'Inventory')
|
||||
InventorySource = new_state.apps.get_model('main', 'InventorySource')
|
||||
inv = Inventory.objects.create(name='migration-test-inv', organization=org, created=now(), modified=now())
|
||||
InventorySource.objects.create(name='migration-test-src', source='file', inventory=inv, organization=org, created=now(), modified=now())
|
||||
|
||||
new_state = migrator.apply_tested_migration(
|
||||
('main', '0200_template_name_constraint'),
|
||||
)
|
||||
for i, proj_id in enumerate(proj_ids):
|
||||
proj = Project.objects.get(id=proj_id)
|
||||
if i == 0:
|
||||
assert proj.name == 'duplicate-project-name'
|
||||
else:
|
||||
assert proj.name != 'duplicate-project-name'
|
||||
assert proj.name.startswith('duplicate-project-name')
|
||||
|
||||
# The inventory source had this field set to avoid the constrains
|
||||
InventorySource = new_state.apps.get_model('main', 'InventorySource')
|
||||
inv_src = InventorySource.objects.get(name='migration-test-src')
|
||||
assert inv_src.org_unique is False
|
||||
Project = new_state.apps.get_model('main', 'Project')
|
||||
for proj in Project.objects.all():
|
||||
assert proj.org_unique is True
|
||||
|
||||
@@ -1,633 +0,0 @@
|
||||
import json
|
||||
import os
|
||||
from unittest import mock
|
||||
|
||||
import pytest
|
||||
import requests.exceptions
|
||||
from django.test import override_settings
|
||||
|
||||
from awx.main.models import (
|
||||
Job,
|
||||
Inventory,
|
||||
Project,
|
||||
Organization,
|
||||
JobTemplate,
|
||||
Credential,
|
||||
CredentialType,
|
||||
User,
|
||||
Team,
|
||||
Label,
|
||||
WorkflowJob,
|
||||
WorkflowJobNode,
|
||||
InventorySource,
|
||||
)
|
||||
from awx.main.exceptions import PolicyEvaluationError
|
||||
from awx.main.tasks import policy
|
||||
from awx.main.tasks.policy import JobSerializer, OPA_AUTH_TYPES
|
||||
|
||||
|
||||
def _parse_exception_message(exception: PolicyEvaluationError):
|
||||
pe_plain = str(exception.value)
|
||||
|
||||
assert "This job cannot be executed due to a policy violation or error. See the following details:" in pe_plain
|
||||
|
||||
violation_message = "This job cannot be executed due to a policy violation or error. See the following details:"
|
||||
return eval(pe_plain.split(violation_message)[1].strip())
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def enable_flag():
|
||||
with override_settings(
|
||||
OPA_HOST='opa.example.com',
|
||||
FLAGS={"FEATURE_POLICY_AS_CODE_ENABLED": [("boolean", True)]},
|
||||
FLAG_SOURCES=('flags.sources.SettingsFlagsSource',),
|
||||
):
|
||||
yield
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def opa_client():
|
||||
cls_mock = mock.MagicMock(name='OpaClient')
|
||||
instance_mock = cls_mock.return_value
|
||||
instance_mock.__enter__.return_value = instance_mock
|
||||
|
||||
with mock.patch('awx.main.tasks.policy.OpaClient', cls_mock):
|
||||
yield instance_mock
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def job():
|
||||
project: Project = Project.objects.create(name='proj1', scm_type='git', scm_branch='main', scm_url='https://git.example.com/proj1')
|
||||
inventory: Inventory = Inventory.objects.create(name='inv1', opa_query_path="inventory/response")
|
||||
org: Organization = Organization.objects.create(name="org1", opa_query_path="organization/response")
|
||||
jt: JobTemplate = JobTemplate.objects.create(name="jt1", opa_query_path="job_template/response")
|
||||
job: Job = Job.objects.create(name='job1', extra_vars="{}", inventory=inventory, project=project, organization=org, job_template=jt)
|
||||
return job
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_job_serializer():
|
||||
user: User = User.objects.create(username='user1')
|
||||
org: Organization = Organization.objects.create(name='org1')
|
||||
|
||||
team: Team = Team.objects.create(name='team1', organization=org)
|
||||
team.admin_role.members.add(user)
|
||||
|
||||
project: Project = Project.objects.create(name='proj1', scm_type='git', scm_branch='main', scm_url='https://git.example.com/proj1')
|
||||
inventory: Inventory = Inventory.objects.create(name='inv1', description='Demo inventory')
|
||||
inventory_source: InventorySource = InventorySource.objects.create(name='inv-src1', source='file', inventory=inventory)
|
||||
extra_vars = {"FOO": "value1", "BAR": "value2"}
|
||||
|
||||
CredentialType.setup_tower_managed_defaults()
|
||||
cred_type_ssh: CredentialType = CredentialType.objects.get(kind='ssh')
|
||||
cred: Credential = Credential.objects.create(name="cred1", description='Demo credential', credential_type=cred_type_ssh, organization=org)
|
||||
|
||||
label: Label = Label.objects.create(name='label1', organization=org)
|
||||
|
||||
job: Job = Job.objects.create(
|
||||
name='job1', extra_vars=json.dumps(extra_vars), inventory=inventory, project=project, organization=org, created_by=user, launch_type='workflow'
|
||||
)
|
||||
# job.unified_job_node.workflow_job = workflow_job
|
||||
job.credentials.add(cred)
|
||||
job.labels.add(label)
|
||||
|
||||
workflow_job: WorkflowJob = WorkflowJob.objects.create(name='wf-job1')
|
||||
WorkflowJobNode.objects.create(job=job, workflow_job=workflow_job)
|
||||
|
||||
serializer = JobSerializer(instance=job)
|
||||
|
||||
assert serializer.data == {
|
||||
'id': job.id,
|
||||
'name': 'job1',
|
||||
'created': job.created.strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
|
||||
'created_by': {
|
||||
'id': user.id,
|
||||
'username': 'user1',
|
||||
'is_superuser': False,
|
||||
'teams': [
|
||||
{'id': team.id, 'name': 'team1'},
|
||||
],
|
||||
},
|
||||
'credentials': [
|
||||
{
|
||||
'id': cred.id,
|
||||
'name': 'cred1',
|
||||
'description': 'Demo credential',
|
||||
'organization': {
|
||||
'id': org.id,
|
||||
'name': 'org1',
|
||||
},
|
||||
'credential_type': cred_type_ssh.id,
|
||||
'kind': 'ssh',
|
||||
'managed': False,
|
||||
'kubernetes': False,
|
||||
'cloud': False,
|
||||
},
|
||||
],
|
||||
'execution_environment': None,
|
||||
'extra_vars': extra_vars,
|
||||
'forks': 0,
|
||||
'hosts_count': 0,
|
||||
'instance_group': None,
|
||||
'inventory': {
|
||||
'id': inventory.id,
|
||||
'name': 'inv1',
|
||||
'description': 'Demo inventory',
|
||||
'kind': '',
|
||||
'total_hosts': 0,
|
||||
'total_groups': 0,
|
||||
'has_inventory_sources': False,
|
||||
'total_inventory_sources': 0,
|
||||
'has_active_failures': False,
|
||||
'hosts_with_active_failures': 0,
|
||||
'inventory_sources': [
|
||||
{
|
||||
'id': inventory_source.id,
|
||||
'name': 'inv-src1',
|
||||
'source': 'file',
|
||||
'status': 'never updated',
|
||||
}
|
||||
],
|
||||
},
|
||||
'job_template': None,
|
||||
'job_type': 'run',
|
||||
'job_type_name': 'job',
|
||||
'labels': [
|
||||
{
|
||||
'id': label.id,
|
||||
'name': 'label1',
|
||||
'organization': {
|
||||
'id': org.id,
|
||||
'name': 'org1',
|
||||
},
|
||||
},
|
||||
],
|
||||
'launch_type': 'workflow',
|
||||
'limit': '',
|
||||
'launched_by': {},
|
||||
'organization': {
|
||||
'id': org.id,
|
||||
'name': 'org1',
|
||||
},
|
||||
'playbook': '',
|
||||
'project': {
|
||||
'id': project.id,
|
||||
'name': 'proj1',
|
||||
'status': 'pending',
|
||||
'scm_type': 'git',
|
||||
'scm_url': 'https://git.example.com/proj1',
|
||||
'scm_branch': 'main',
|
||||
'scm_refspec': '',
|
||||
'scm_clean': False,
|
||||
'scm_track_submodules': False,
|
||||
'scm_delete_on_update': False,
|
||||
},
|
||||
'scm_branch': '',
|
||||
'scm_revision': '',
|
||||
'workflow_job': {
|
||||
'id': workflow_job.id,
|
||||
'name': 'wf-job1',
|
||||
},
|
||||
'workflow_job_template': None,
|
||||
}
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_evaluate_policy_missing_opa_query_path_field(opa_client):
|
||||
project: Project = Project.objects.create(name='proj1', scm_type='git', scm_branch='main', scm_url='https://git.example.com/proj1')
|
||||
inventory: Inventory = Inventory.objects.create(name='inv1')
|
||||
org: Organization = Organization.objects.create(name="org1")
|
||||
jt: JobTemplate = JobTemplate.objects.create(name="jt1")
|
||||
job: Job = Job.objects.create(name='job1', extra_vars="{}", inventory=inventory, project=project, organization=org, job_template=jt)
|
||||
|
||||
response = {
|
||||
"result": {
|
||||
"allowed": True,
|
||||
"violations": [],
|
||||
}
|
||||
}
|
||||
opa_client.query_rule.return_value = response
|
||||
try:
|
||||
policy.evaluate_policy(job)
|
||||
except PolicyEvaluationError as e:
|
||||
pytest.fail(f"Must not raise PolicyEvaluationError: {e}")
|
||||
|
||||
assert opa_client.query_rule.call_count == 0
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_evaluate_policy(opa_client, job):
|
||||
response = {
|
||||
"result": {
|
||||
"allowed": True,
|
||||
"violations": [],
|
||||
}
|
||||
}
|
||||
opa_client.query_rule.return_value = response
|
||||
try:
|
||||
policy.evaluate_policy(job)
|
||||
except PolicyEvaluationError as e:
|
||||
pytest.fail(f"Must not raise PolicyEvaluationError: {e}")
|
||||
|
||||
opa_client.query_rule.assert_has_calls(
|
||||
[
|
||||
mock.call(input_data=mock.ANY, package_path='organization/response'),
|
||||
mock.call(input_data=mock.ANY, package_path='inventory/response'),
|
||||
mock.call(input_data=mock.ANY, package_path='job_template/response'),
|
||||
],
|
||||
any_order=False,
|
||||
)
|
||||
assert opa_client.query_rule.call_count == 3
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_evaluate_policy_allowed(opa_client, job):
|
||||
response = {
|
||||
"result": {
|
||||
"allowed": True,
|
||||
"violations": [],
|
||||
}
|
||||
}
|
||||
opa_client.query_rule.return_value = response
|
||||
try:
|
||||
policy.evaluate_policy(job)
|
||||
except PolicyEvaluationError as e:
|
||||
pytest.fail(f"Must not raise PolicyEvaluationError: {e}")
|
||||
|
||||
assert opa_client.query_rule.call_count == 3
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_evaluate_policy_not_allowed(opa_client, job):
|
||||
response = {
|
||||
"result": {
|
||||
"allowed": False,
|
||||
"violations": ["Access not allowed."],
|
||||
}
|
||||
}
|
||||
opa_client.query_rule.return_value = response
|
||||
|
||||
with pytest.raises(PolicyEvaluationError) as pe:
|
||||
policy.evaluate_policy(job)
|
||||
|
||||
pe_plain = str(pe.value)
|
||||
assert "Errors:" not in pe_plain
|
||||
|
||||
exception = _parse_exception_message(pe)
|
||||
|
||||
assert exception["Violations"]["Organization"] == ["Access not allowed."]
|
||||
assert exception["Violations"]["Inventory"] == ["Access not allowed."]
|
||||
assert exception["Violations"]["Job template"] == ["Access not allowed."]
|
||||
|
||||
assert opa_client.query_rule.call_count == 3
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_evaluate_policy_not_found(opa_client, job):
|
||||
response = {}
|
||||
opa_client.query_rule.return_value = response
|
||||
|
||||
with pytest.raises(PolicyEvaluationError) as pe:
|
||||
policy.evaluate_policy(job)
|
||||
|
||||
missing_result_property = 'Call to OPA did not return a "result" property. The path refers to an undefined document.'
|
||||
|
||||
exception = _parse_exception_message(pe)
|
||||
assert exception["Errors"]["Organization"] == missing_result_property
|
||||
assert exception["Errors"]["Inventory"] == missing_result_property
|
||||
assert exception["Errors"]["Job template"] == missing_result_property
|
||||
|
||||
assert opa_client.query_rule.call_count == 3
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_evaluate_policy_server_error(opa_client, job):
|
||||
http_error_msg = '500 Server Error: Internal Server Error for url: https://opa.example.com:8181/v1/data/job_template/response/invalid'
|
||||
error_response = {
|
||||
'code': 'internal_error',
|
||||
'message': (
|
||||
'1 error occurred: 1:1: rego_type_error: undefined ref: data.job_template.response.invalid\n\t'
|
||||
'data.job_template.response.invalid\n\t'
|
||||
' ^\n\t'
|
||||
' have: "invalid"\n\t'
|
||||
' want (one of): ["allowed" "violations"]'
|
||||
),
|
||||
}
|
||||
response = mock.Mock()
|
||||
response.status_code = requests.codes.internal_server_error
|
||||
response.json.return_value = error_response
|
||||
|
||||
opa_client.query_rule.side_effect = requests.exceptions.HTTPError(http_error_msg, response=response)
|
||||
|
||||
with pytest.raises(PolicyEvaluationError) as pe:
|
||||
policy.evaluate_policy(job)
|
||||
|
||||
exception = _parse_exception_message(pe)
|
||||
assert exception["Errors"]["Organization"] == f'Call to OPA failed. Code: internal_error, Message: {error_response["message"]}'
|
||||
assert exception["Errors"]["Inventory"] == f'Call to OPA failed. Code: internal_error, Message: {error_response["message"]}'
|
||||
assert exception["Errors"]["Job template"] == f'Call to OPA failed. Code: internal_error, Message: {error_response["message"]}'
|
||||
|
||||
assert opa_client.query_rule.call_count == 3
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_evaluate_policy_invalid_result(opa_client, job):
|
||||
response = {
|
||||
"result": {
|
||||
"absolutely": "no!",
|
||||
}
|
||||
}
|
||||
opa_client.query_rule.return_value = response
|
||||
|
||||
with pytest.raises(PolicyEvaluationError) as pe:
|
||||
policy.evaluate_policy(job)
|
||||
|
||||
invalid_result = 'OPA policy returned invalid result.'
|
||||
|
||||
exception = _parse_exception_message(pe)
|
||||
assert exception["Errors"]["Organization"] == invalid_result
|
||||
assert exception["Errors"]["Inventory"] == invalid_result
|
||||
assert exception["Errors"]["Job template"] == invalid_result
|
||||
|
||||
assert opa_client.query_rule.call_count == 3
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_evaluate_policy_failed_exception(opa_client, job):
|
||||
error_response = {}
|
||||
response = mock.Mock()
|
||||
response.status_code = requests.codes.internal_server_error
|
||||
response.json.return_value = error_response
|
||||
|
||||
opa_client.query_rule.side_effect = ValueError("Invalid JSON")
|
||||
|
||||
with pytest.raises(PolicyEvaluationError) as pe:
|
||||
policy.evaluate_policy(job)
|
||||
|
||||
opa_failed_exception = 'Call to OPA failed. Exception: Invalid JSON'
|
||||
|
||||
exception = _parse_exception_message(pe)
|
||||
assert exception["Errors"]["Organization"] == opa_failed_exception
|
||||
assert exception["Errors"]["Inventory"] == opa_failed_exception
|
||||
assert exception["Errors"]["Job template"] == opa_failed_exception
|
||||
|
||||
assert opa_client.query_rule.call_count == 3
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize(
|
||||
"settings_kwargs, expected_client_cert, expected_verify, verify_content",
|
||||
[
|
||||
# Case 1: Certificate-based authentication (mTLS)
|
||||
(
|
||||
{
|
||||
"OPA_HOST": "opa.example.com",
|
||||
"OPA_SSL": True,
|
||||
"OPA_AUTH_TYPE": OPA_AUTH_TYPES.CERTIFICATE,
|
||||
"OPA_AUTH_CLIENT_CERT": "-----BEGIN CERTIFICATE-----\nMIICert\n-----END CERTIFICATE-----",
|
||||
"OPA_AUTH_CLIENT_KEY": "-----BEGIN PRIVATE KEY-----\nMIIKey\n-----END PRIVATE KEY-----",
|
||||
"OPA_AUTH_CA_CERT": "-----BEGIN CERTIFICATE-----\nMIICACert\n-----END CERTIFICATE-----",
|
||||
},
|
||||
True, # Client cert should be created
|
||||
"file", # Verify path should be a file
|
||||
"-----BEGIN CERTIFICATE-----", # Expected content in verify file
|
||||
),
|
||||
# Case 2: SSL with server verification only
|
||||
(
|
||||
{
|
||||
"OPA_HOST": "opa.example.com",
|
||||
"OPA_SSL": True,
|
||||
"OPA_AUTH_TYPE": OPA_AUTH_TYPES.NONE,
|
||||
"OPA_AUTH_CA_CERT": "-----BEGIN CERTIFICATE-----\nMIICACert\n-----END CERTIFICATE-----",
|
||||
},
|
||||
False, # No client cert should be created
|
||||
"file", # Verify path should be a file
|
||||
"-----BEGIN CERTIFICATE-----", # Expected content in verify file
|
||||
),
|
||||
# Case 3: SSL with system CA store
|
||||
(
|
||||
{
|
||||
"OPA_HOST": "opa.example.com",
|
||||
"OPA_SSL": True,
|
||||
"OPA_AUTH_TYPE": OPA_AUTH_TYPES.NONE,
|
||||
"OPA_AUTH_CA_CERT": "", # No custom CA cert
|
||||
},
|
||||
False, # No client cert should be created
|
||||
True, # Verify path should be True (system CA store)
|
||||
None, # No file to check content
|
||||
),
|
||||
# Case 4: No SSL
|
||||
(
|
||||
{
|
||||
"OPA_HOST": "opa.example.com",
|
||||
"OPA_SSL": False,
|
||||
"OPA_AUTH_TYPE": OPA_AUTH_TYPES.NONE,
|
||||
},
|
||||
False, # No client cert should be created
|
||||
False, # Verify path should be False (no verification)
|
||||
None, # No file to check content
|
||||
),
|
||||
],
|
||||
ids=[
|
||||
"certificate_auth",
|
||||
"ssl_server_verification",
|
||||
"ssl_system_ca_store",
|
||||
"no_ssl",
|
||||
],
|
||||
)
|
||||
def test_opa_cert_file(settings_kwargs, expected_client_cert, expected_verify, verify_content):
|
||||
"""Parameterized test for the opa_cert_file context manager.
|
||||
|
||||
Tests different configurations:
|
||||
- Certificate-based authentication (mTLS)
|
||||
- SSL with server verification only
|
||||
- SSL with system CA store
|
||||
- No SSL
|
||||
"""
|
||||
with override_settings(**settings_kwargs):
|
||||
client_cert_path = None
|
||||
verify_path = None
|
||||
|
||||
with policy.opa_cert_file() as cert_files:
|
||||
client_cert_path, verify_path = cert_files
|
||||
|
||||
# Check client cert based on expected_client_cert
|
||||
if expected_client_cert:
|
||||
assert client_cert_path is not None
|
||||
with open(client_cert_path, 'r') as f:
|
||||
content = f.read()
|
||||
assert "-----BEGIN CERTIFICATE-----" in content
|
||||
assert "-----BEGIN PRIVATE KEY-----" in content
|
||||
else:
|
||||
assert client_cert_path is None
|
||||
|
||||
# Check verify path based on expected_verify
|
||||
if expected_verify == "file":
|
||||
assert verify_path is not None
|
||||
assert os.path.isfile(verify_path)
|
||||
with open(verify_path, 'r') as f:
|
||||
content = f.read()
|
||||
assert verify_content in content
|
||||
else:
|
||||
assert verify_path is expected_verify
|
||||
|
||||
# Verify files are deleted after context manager exits
|
||||
if expected_client_cert:
|
||||
assert not os.path.exists(client_cert_path), "Client cert file was not deleted"
|
||||
|
||||
if expected_verify == "file":
|
||||
assert not os.path.exists(verify_path), "CA cert file was not deleted"
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@override_settings(
|
||||
OPA_HOST='opa.example.com',
|
||||
OPA_SSL=False, # SSL disabled
|
||||
OPA_AUTH_TYPE=OPA_AUTH_TYPES.CERTIFICATE, # But cert auth enabled
|
||||
OPA_AUTH_CLIENT_CERT="-----BEGIN CERTIFICATE-----\nMIICert\n-----END CERTIFICATE-----",
|
||||
OPA_AUTH_CLIENT_KEY="-----BEGIN PRIVATE KEY-----\nMIIKey\n-----END PRIVATE KEY-----",
|
||||
)
|
||||
def test_evaluate_policy_cert_auth_requires_ssl():
|
||||
"""Test that policy evaluation raises an error when certificate auth is used without SSL."""
|
||||
project = Project.objects.create(name='proj1')
|
||||
inventory = Inventory.objects.create(name='inv1', opa_query_path="inventory/response")
|
||||
org = Organization.objects.create(name="org1", opa_query_path="organization/response")
|
||||
jt = JobTemplate.objects.create(name="jt1", opa_query_path="job_template/response")
|
||||
job = Job.objects.create(name='job1', extra_vars="{}", inventory=inventory, project=project, organization=org, job_template=jt)
|
||||
|
||||
with pytest.raises(PolicyEvaluationError) as pe:
|
||||
policy.evaluate_policy(job)
|
||||
|
||||
assert "OPA_AUTH_TYPE=Certificate requires OPA_SSL to be enabled" in str(pe.value)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@override_settings(
|
||||
OPA_HOST='opa.example.com',
|
||||
OPA_SSL=True,
|
||||
OPA_AUTH_TYPE=OPA_AUTH_TYPES.CERTIFICATE,
|
||||
OPA_AUTH_CLIENT_CERT="", # Missing client cert
|
||||
OPA_AUTH_CLIENT_KEY="", # Missing client key
|
||||
OPA_AUTH_CA_CERT="", # Missing CA cert
|
||||
)
|
||||
def test_evaluate_policy_missing_cert_settings():
|
||||
"""Test that policy evaluation raises an error when certificate settings are missing."""
|
||||
project = Project.objects.create(name='proj1')
|
||||
inventory = Inventory.objects.create(name='inv1', opa_query_path="inventory/response")
|
||||
org = Organization.objects.create(name="org1", opa_query_path="organization/response")
|
||||
jt = JobTemplate.objects.create(name="jt1", opa_query_path="job_template/response")
|
||||
job = Job.objects.create(name='job1', extra_vars="{}", inventory=inventory, project=project, organization=org, job_template=jt)
|
||||
|
||||
with pytest.raises(PolicyEvaluationError) as pe:
|
||||
policy.evaluate_policy(job)
|
||||
|
||||
error_msg = str(pe.value)
|
||||
assert "Following certificate settings are missing for OPA_AUTH_TYPE=Certificate:" in error_msg
|
||||
assert "OPA_AUTH_CLIENT_CERT" in error_msg
|
||||
assert "OPA_AUTH_CLIENT_KEY" in error_msg
|
||||
assert "OPA_AUTH_CA_CERT" in error_msg
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@override_settings(
|
||||
OPA_HOST='opa.example.com',
|
||||
OPA_PORT=8181,
|
||||
OPA_SSL=True,
|
||||
OPA_AUTH_TYPE=OPA_AUTH_TYPES.CERTIFICATE,
|
||||
OPA_AUTH_CLIENT_CERT="-----BEGIN CERTIFICATE-----\nMIICert\n-----END CERTIFICATE-----",
|
||||
OPA_AUTH_CLIENT_KEY="-----BEGIN PRIVATE KEY-----\nMIIKey\n-----END PRIVATE KEY-----",
|
||||
OPA_AUTH_CA_CERT="-----BEGIN CERTIFICATE-----\nMIICACert\n-----END CERTIFICATE-----",
|
||||
OPA_REQUEST_TIMEOUT=2.5,
|
||||
OPA_REQUEST_RETRIES=3,
|
||||
)
|
||||
def test_opa_client_context_manager_mtls():
|
||||
"""Test that opa_client context manager correctly initializes the OPA client."""
|
||||
# Mock the OpaClient class
|
||||
with mock.patch('awx.main.tasks.policy.OpaClient') as mock_opa_client:
|
||||
# Setup the mock
|
||||
mock_instance = mock_opa_client.return_value
|
||||
mock_instance.__enter__.return_value = mock_instance
|
||||
mock_instance._session = mock.MagicMock()
|
||||
|
||||
# Use the context manager
|
||||
with policy.opa_client(headers={'Custom-Header': 'Value'}) as client:
|
||||
# Verify the client was initialized with the correct parameters
|
||||
mock_opa_client.assert_called_once_with(
|
||||
host='opa.example.com',
|
||||
port=8181,
|
||||
headers={'Custom-Header': 'Value'},
|
||||
ssl=True,
|
||||
cert=mock.ANY, # We can't check the exact value as it's a temporary file
|
||||
timeout=2.5,
|
||||
retries=3,
|
||||
)
|
||||
|
||||
# Verify the session properties were set correctly
|
||||
assert client._session.cert is not None
|
||||
assert client._session.verify is not None
|
||||
|
||||
# Check the content of the cert file
|
||||
cert_file_path = client._session.cert
|
||||
assert os.path.isfile(cert_file_path)
|
||||
with open(cert_file_path, 'r') as f:
|
||||
cert_content = f.read()
|
||||
assert "-----BEGIN CERTIFICATE-----" in cert_content
|
||||
assert "MIICert" in cert_content
|
||||
assert "-----BEGIN PRIVATE KEY-----" in cert_content
|
||||
assert "MIIKey" in cert_content
|
||||
|
||||
# Check the content of the verify file
|
||||
verify_file_path = client._session.verify
|
||||
assert os.path.isfile(verify_file_path)
|
||||
with open(verify_file_path, 'r') as f:
|
||||
verify_content = f.read()
|
||||
assert "-----BEGIN CERTIFICATE-----" in verify_content
|
||||
assert "MIICACert" in verify_content
|
||||
|
||||
# Verify the client is the mocked instance
|
||||
assert client is mock_instance
|
||||
|
||||
# Store file paths for checking after context exit
|
||||
cert_path = client._session.cert
|
||||
verify_path = client._session.verify
|
||||
|
||||
# Verify files are deleted after context manager exits
|
||||
assert not os.path.exists(cert_path), "Client cert file was not deleted"
|
||||
assert not os.path.exists(verify_path), "CA cert file was not deleted"
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@override_settings(
|
||||
OPA_HOST='opa.example.com',
|
||||
OPA_SSL=True,
|
||||
OPA_AUTH_TYPE=OPA_AUTH_TYPES.TOKEN,
|
||||
OPA_AUTH_TOKEN='secret-token',
|
||||
OPA_AUTH_CUSTOM_HEADERS={'X-Custom': 'Header'},
|
||||
)
|
||||
def test_opa_client_token_auth():
|
||||
"""Test that token authentication correctly adds the Authorization header."""
|
||||
# Create a job for testing
|
||||
project = Project.objects.create(name='proj1')
|
||||
inventory = Inventory.objects.create(name='inv1', opa_query_path="inventory/response")
|
||||
org = Organization.objects.create(name="org1", opa_query_path="organization/response")
|
||||
jt = JobTemplate.objects.create(name="jt1", opa_query_path="job_template/response")
|
||||
job = Job.objects.create(name='job1', extra_vars="{}", inventory=inventory, project=project, organization=org, job_template=jt)
|
||||
|
||||
# Mock the OpaClient class
|
||||
with mock.patch('awx.main.tasks.policy.opa_client') as mock_opa_client_cm:
|
||||
# Setup the mock
|
||||
mock_client = mock.MagicMock()
|
||||
mock_opa_client_cm.return_value.__enter__.return_value = mock_client
|
||||
mock_client.query_rule.return_value = {
|
||||
"result": {
|
||||
"allowed": True,
|
||||
"violations": [],
|
||||
}
|
||||
}
|
||||
|
||||
# Call evaluate_policy
|
||||
policy.evaluate_policy(job)
|
||||
|
||||
# Verify opa_client was called with the correct headers
|
||||
expected_headers = {'X-Custom': 'Header', 'Authorization': 'Bearer secret-token'}
|
||||
mock_opa_client_cm.assert_called_once_with(headers=expected_headers)
|
||||
@@ -436,22 +436,21 @@ def test_project_list_ordering_by_name(get, order_by, expected_names, organizati
|
||||
|
||||
@pytest.mark.parametrize('order_by', ('name', '-name'))
|
||||
@pytest.mark.django_db
|
||||
def test_project_list_ordering_with_duplicate_names(get, order_by, admin):
|
||||
def test_project_list_ordering_with_duplicate_names(get, order_by, organization_factory):
|
||||
# why? because all the '1' mean that all the names are the same, you can't sort based on that,
|
||||
# meaning you have to fall back on the default sort order, which in this case, is ID
|
||||
'ensure sorted order of project list is maintained correctly when the project names the same'
|
||||
from awx.main.models import Organization
|
||||
|
||||
projects = []
|
||||
for i in range(5):
|
||||
projects.append(Project.objects.create(name='1', organization=Organization.objects.create(name=f'org{i}')))
|
||||
objects = organization_factory(
|
||||
'org1',
|
||||
projects=['1', '1', '1', '1', '1'],
|
||||
superusers=['admin'],
|
||||
)
|
||||
project_ids = {}
|
||||
for x in range(3):
|
||||
results = get(reverse('api:project_list'), user=admin, QUERY_STRING='order_by=%s' % order_by).data['results']
|
||||
results = get(reverse('api:project_list'), objects.superusers.admin, QUERY_STRING='order_by=%s' % order_by).data['results']
|
||||
project_ids[x] = [proj['id'] for proj in results]
|
||||
assert project_ids[0] == project_ids[1] == project_ids[2]
|
||||
assert project_ids[0] == sorted(project_ids[0])
|
||||
assert set(project_ids[0]) == set([proj.id for proj in projects])
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
|
||||
@@ -36,7 +36,7 @@ def test_bootstrap_consistent():
|
||||
assert not different_requirements
|
||||
|
||||
|
||||
@pytest.mark.xfail(reason="This test needs some love")
|
||||
@pytest.mark.skip(reason="This test needs some love")
|
||||
def test_env_matches_requirements_txt():
|
||||
from pip.operations import freeze
|
||||
|
||||
|
||||
@@ -74,7 +74,7 @@ class TestWebsocketEventConsumer:
|
||||
connected, _ = await server.connect()
|
||||
assert connected is False, "Anonymous user should NOT be allowed to login."
|
||||
|
||||
@pytest.mark.xfail(reason="Ran out of coding time.")
|
||||
@pytest.mark.skip(reason="Ran out of coding time.")
|
||||
async def test_authorized(self, websocket_server_generator, application, admin):
|
||||
server = websocket_server_generator('/websocket/')
|
||||
|
||||
|
||||
@@ -1,77 +0,0 @@
|
||||
import multiprocessing
|
||||
import json
|
||||
|
||||
import pytest
|
||||
|
||||
import requests
|
||||
from requests.auth import HTTPBasicAuth
|
||||
|
||||
from django.db import connection
|
||||
|
||||
from awx.main.models import User, JobTemplate
|
||||
|
||||
|
||||
def create_in_subprocess(project_id, ready_event, continue_event, admin_auth):
|
||||
connection.connect()
|
||||
|
||||
print('setting ready event')
|
||||
ready_event.set()
|
||||
print('waiting for continue event')
|
||||
continue_event.wait()
|
||||
|
||||
if JobTemplate.objects.filter(name='test_jt_duplicate_name').exists():
|
||||
for jt in JobTemplate.objects.filter(name='test_jt_duplicate_name'):
|
||||
jt.delete()
|
||||
assert JobTemplate.objects.filter(name='test_jt_duplicate_name').count() == 0
|
||||
|
||||
jt_data = {'name': 'test_jt_duplicate_name', 'project': project_id, 'playbook': 'hello_world.yml', 'ask_inventory_on_launch': True}
|
||||
response = requests.post('http://localhost:8013/api/v2/job_templates/', json=jt_data, auth=admin_auth)
|
||||
# should either have a conflict or create
|
||||
assert response.status_code in (400, 201)
|
||||
print(f'Subprocess got {response.status_code}')
|
||||
if response.status_code == 400:
|
||||
print(json.dumps(response.json(), indent=2))
|
||||
return response.status_code
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def admin_for_test():
|
||||
user, created = User.objects.get_or_create(username='admin_for_test', defaults={'is_superuser': True})
|
||||
if created:
|
||||
user.set_password('for_test_123!')
|
||||
user.save()
|
||||
print(f'Created user {user.username}')
|
||||
return user
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def admin_auth(admin_for_test):
|
||||
return HTTPBasicAuth(admin_for_test.username, 'for_test_123!')
|
||||
|
||||
|
||||
def test_jt_duplicate_name(admin_auth, demo_proj):
|
||||
N_processes = 5
|
||||
ready_events = [multiprocessing.Event() for _ in range(N_processes)]
|
||||
continue_event = multiprocessing.Event()
|
||||
|
||||
processes = []
|
||||
for i in range(N_processes):
|
||||
p = multiprocessing.Process(target=create_in_subprocess, args=(demo_proj.id, ready_events[i], continue_event, admin_auth))
|
||||
processes.append(p)
|
||||
p.start()
|
||||
|
||||
# Assure both processes are connected and have loaded their host list
|
||||
for e in ready_events:
|
||||
print('waiting on subprocess ready event')
|
||||
e.wait()
|
||||
|
||||
# Begin the bulk_update queries
|
||||
print('setting the continue event for the workers')
|
||||
continue_event.set()
|
||||
|
||||
# if a Deadloack happens it will probably be surfaced by result here
|
||||
print('waiting on the workers to finish the creation')
|
||||
for p in processes:
|
||||
p.join()
|
||||
|
||||
assert JobTemplate.objects.filter(name='test_jt_duplicate_name').count() == 1
|
||||
@@ -3,7 +3,6 @@ import time
|
||||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
import logging
|
||||
|
||||
import pytest
|
||||
|
||||
@@ -20,9 +19,6 @@ from awx.main.tests import data
|
||||
from awx.main.models import Project, JobTemplate, Organization, Inventory
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
PROJ_DATA = os.path.join(os.path.dirname(data.__file__), 'projects')
|
||||
|
||||
|
||||
@@ -114,12 +110,6 @@ def demo_inv(default_org):
|
||||
return inventory
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def demo_proj(default_org):
|
||||
proj, _ = Project.objects.get_or_create(name='Demo Project', defaults={'organization': default_org})
|
||||
return proj
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def podman_image_generator():
|
||||
"""
|
||||
@@ -138,29 +128,30 @@ def podman_image_generator():
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def project_factory(post, default_org, admin):
|
||||
def _rf(scm_url=None, local_path=None):
|
||||
proj_kwargs = {}
|
||||
def run_job_from_playbook(default_org, demo_inv, post, admin):
|
||||
def _rf(test_name, playbook, local_path=None, scm_url=None, jt_params=None):
|
||||
project_name = f'{test_name} project'
|
||||
jt_name = f'{test_name} JT: {playbook}'
|
||||
|
||||
old_proj = Project.objects.filter(name=project_name).first()
|
||||
if old_proj:
|
||||
old_proj.delete()
|
||||
|
||||
old_jt = JobTemplate.objects.filter(name=jt_name).first()
|
||||
if old_jt:
|
||||
old_jt.delete()
|
||||
|
||||
proj_kwargs = {'name': project_name, 'organization': default_org.id}
|
||||
if local_path:
|
||||
# manual path
|
||||
project_name = f'Manual roject {local_path}'
|
||||
proj_kwargs['scm_type'] = ''
|
||||
proj_kwargs['local_path'] = local_path
|
||||
elif scm_url:
|
||||
project_name = f'Project {scm_url}'
|
||||
proj_kwargs['scm_type'] = 'git'
|
||||
proj_kwargs['scm_url'] = scm_url
|
||||
else:
|
||||
raise RuntimeError('Need to provide scm_url or local_path')
|
||||
|
||||
proj_kwargs['name'] = project_name
|
||||
proj_kwargs['organization'] = default_org.id
|
||||
|
||||
old_proj = Project.objects.filter(name=project_name).first()
|
||||
if old_proj:
|
||||
logger.info(f'Deleting existing project {project_name}')
|
||||
old_proj.delete()
|
||||
|
||||
result = post(
|
||||
reverse('api:project_list'),
|
||||
proj_kwargs,
|
||||
@@ -168,23 +159,6 @@ def project_factory(post, default_org, admin):
|
||||
expect=201,
|
||||
)
|
||||
proj = Project.objects.get(id=result.data['id'])
|
||||
return proj
|
||||
|
||||
return _rf
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def run_job_from_playbook(demo_inv, post, admin, project_factory):
|
||||
def _rf(test_name, playbook, local_path=None, scm_url=None, jt_params=None, proj=None):
|
||||
jt_name = f'{test_name} JT: {playbook}'
|
||||
|
||||
if not proj:
|
||||
proj = project_factory(scm_url=scm_url, local_path=local_path)
|
||||
|
||||
old_jt = JobTemplate.objects.filter(name=jt_name).first()
|
||||
if old_jt:
|
||||
logger.info(f'Deleting existing JT {jt_name}')
|
||||
old_jt.delete()
|
||||
|
||||
if proj.current_job:
|
||||
wait_for_job(proj.current_job)
|
||||
@@ -209,6 +183,4 @@ def run_job_from_playbook(demo_inv, post, admin, project_factory):
|
||||
wait_for_job(job)
|
||||
assert job.status == 'successful'
|
||||
|
||||
return {'job': job, 'job_template': jt, 'project': proj}
|
||||
|
||||
return _rf
|
||||
|
||||
@@ -1,20 +1,14 @@
|
||||
import pytest
|
||||
|
||||
from awx.main.tests.live.tests.conftest import wait_for_events, wait_for_job
|
||||
from awx.main.tests.live.tests.conftest import wait_for_events
|
||||
|
||||
from awx.main.models import Job, Inventory
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def facts_project(live_tmp_folder, project_factory):
|
||||
return project_factory(scm_url=f'file://{live_tmp_folder}/facts')
|
||||
|
||||
|
||||
def assert_facts_populated(name):
|
||||
job = Job.objects.filter(name__icontains=name).order_by('-created').first()
|
||||
assert job is not None
|
||||
wait_for_events(job)
|
||||
wait_for_job(job)
|
||||
|
||||
inventory = job.inventory
|
||||
assert inventory.hosts.count() > 0 # sanity
|
||||
@@ -23,24 +17,24 @@ def assert_facts_populated(name):
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def general_facts_test(facts_project, run_job_from_playbook):
|
||||
def general_facts_test(live_tmp_folder, run_job_from_playbook):
|
||||
def _rf(slug, jt_params):
|
||||
jt_params['use_fact_cache'] = True
|
||||
standard_kwargs = dict(jt_params=jt_params)
|
||||
standard_kwargs = dict(scm_url=f'file://{live_tmp_folder}/facts', jt_params=jt_params)
|
||||
|
||||
# GATHER FACTS
|
||||
name = f'test_gather_ansible_facts_{slug}'
|
||||
run_job_from_playbook(name, 'gather.yml', proj=facts_project, **standard_kwargs)
|
||||
run_job_from_playbook(name, 'gather.yml', **standard_kwargs)
|
||||
assert_facts_populated(name)
|
||||
|
||||
# KEEP FACTS
|
||||
name = f'test_clear_ansible_facts_{slug}'
|
||||
run_job_from_playbook(name, 'no_op.yml', proj=facts_project, **standard_kwargs)
|
||||
run_job_from_playbook(name, 'no_op.yml', **standard_kwargs)
|
||||
assert_facts_populated(name)
|
||||
|
||||
# CLEAR FACTS
|
||||
name = f'test_clear_ansible_facts_{slug}'
|
||||
run_job_from_playbook(name, 'clear.yml', proj=facts_project, **standard_kwargs)
|
||||
run_job_from_playbook(name, 'clear.yml', **standard_kwargs)
|
||||
job = Job.objects.filter(name__icontains=name).order_by('-created').first()
|
||||
|
||||
assert job is not None
|
||||
|
||||
@@ -1,78 +0,0 @@
|
||||
import multiprocessing
|
||||
import random
|
||||
|
||||
from django.db import connection
|
||||
from django.utils.timezone import now
|
||||
|
||||
from awx.main.models import Inventory, Host
|
||||
from awx.main.utils.db import bulk_update_sorted_by_id
|
||||
|
||||
|
||||
def worker_delete_target(ready_event, continue_event, field_name):
|
||||
"""Runs the bulk update, will be called in duplicate, in parallel"""
|
||||
inv = Inventory.objects.get(organization__name='Default', name='test_host_update_contention')
|
||||
host_list = list(inv.hosts.all())
|
||||
# Using random.shuffle for non-security-critical shuffling in a test
|
||||
random.shuffle(host_list) # NOSONAR
|
||||
for i, host in enumerate(host_list):
|
||||
setattr(host, field_name, f'my_var: {i}')
|
||||
|
||||
# ready to do the bulk_update
|
||||
print('worker has loaded all the hosts needed')
|
||||
ready_event.set()
|
||||
# wait for the coordination message
|
||||
continue_event.wait()
|
||||
|
||||
# NOTE: did not reproduce the bug without batch_size
|
||||
bulk_update_sorted_by_id(Host, host_list, fields=[field_name], batch_size=100)
|
||||
print('finished doing the bulk update in worker')
|
||||
|
||||
|
||||
def test_host_update_contention(default_org):
|
||||
inv_kwargs = dict(organization=default_org, name='test_host_update_contention')
|
||||
|
||||
if Inventory.objects.filter(**inv_kwargs).exists():
|
||||
inv = Inventory.objects.get(**inv_kwargs).delete()
|
||||
|
||||
inv = Inventory.objects.create(**inv_kwargs)
|
||||
right_now = now()
|
||||
hosts = [Host(inventory=inv, name=f'host-{i}', created=right_now, modified=right_now) for i in range(1000)]
|
||||
print('bulk creating hosts')
|
||||
Host.objects.bulk_create(hosts)
|
||||
|
||||
# sanity check
|
||||
for host in hosts:
|
||||
assert not host.variables
|
||||
|
||||
# Force our worker pool to make their own connection
|
||||
connection.close()
|
||||
|
||||
ready_events = [multiprocessing.Event() for _ in range(2)]
|
||||
continue_event = multiprocessing.Event()
|
||||
|
||||
print('spawning processes for concurrent bulk updates')
|
||||
processes = []
|
||||
fields = ['variables', 'ansible_facts']
|
||||
for i in range(2):
|
||||
p = multiprocessing.Process(target=worker_delete_target, args=(ready_events[i], continue_event, fields[i]))
|
||||
processes.append(p)
|
||||
p.start()
|
||||
|
||||
# Assure both processes are connected and have loaded their host list
|
||||
for e in ready_events:
|
||||
print('waiting on subprocess ready event')
|
||||
e.wait()
|
||||
|
||||
# Begin the bulk_update queries
|
||||
print('setting the continue event for the workers')
|
||||
continue_event.set()
|
||||
|
||||
# if a Deadloack happens it will probably be surfaced by result here
|
||||
print('waiting on the workers to finish the bulk_update')
|
||||
for p in processes:
|
||||
p.join()
|
||||
|
||||
print('checking workers have variables set')
|
||||
for host in inv.hosts.all():
|
||||
assert host.variables.startswith('my_var:')
|
||||
assert host.ansible_facts.startswith('my_var:')
|
||||
@@ -1,224 +0,0 @@
|
||||
import subprocess
|
||||
import time
|
||||
import os.path
|
||||
from urllib.parse import urlsplit
|
||||
|
||||
import pytest
|
||||
from unittest import mock
|
||||
|
||||
from awx.main.models.projects import Project
|
||||
from awx.main.models.organization import Organization
|
||||
from awx.main.models.inventory import Inventory, InventorySource
|
||||
from awx.main.tests.live.tests.conftest import wait_for_job
|
||||
|
||||
|
||||
NAME_PREFIX = "test-ivu"
|
||||
GIT_REPO_FOLDER = "inventory_vars"
|
||||
|
||||
|
||||
def create_new_by_name(model, **kwargs):
|
||||
"""
|
||||
Create a new model instance. Delete an existing instance first.
|
||||
|
||||
:param model: The Django model.
|
||||
:param dict kwargs: The keyword arguments required to create a model
|
||||
instance. Must contain at least `name`.
|
||||
:return: The model instance.
|
||||
"""
|
||||
name = kwargs["name"]
|
||||
try:
|
||||
instance = model.objects.get(name=name)
|
||||
except model.DoesNotExist:
|
||||
pass
|
||||
else:
|
||||
print(f"FORCE DELETE {name}")
|
||||
instance.delete()
|
||||
finally:
|
||||
instance = model.objects.create(**kwargs)
|
||||
return instance
|
||||
|
||||
|
||||
def wait_for_update(instance, timeout=3.0):
|
||||
"""Wait until the last update of *instance* is finished."""
|
||||
start = time.time()
|
||||
while time.time() - start < timeout:
|
||||
if instance.current_job or instance.last_job or instance.last_job_run:
|
||||
break
|
||||
time.sleep(0.2)
|
||||
assert instance.current_job or instance.last_job or instance.last_job_run, f'Instance never updated id={instance.id}'
|
||||
update = instance.current_job or instance.last_job
|
||||
if update:
|
||||
wait_for_job(update)
|
||||
|
||||
|
||||
def change_source_vars_and_update(invsrc, group_vars):
|
||||
"""
|
||||
Change the variables content of an inventory source and update its
|
||||
inventory.
|
||||
|
||||
Does not return before the inventory update is finished.
|
||||
|
||||
:param invsrc: The inventory source instance.
|
||||
:param dict group_vars: The variables for various groups. Format::
|
||||
|
||||
{
|
||||
<group>: {<variable>: <value>, <variable>: <value>, ..}, <group>:
|
||||
{<variable>: <value>, <variable>: <value>, ..}, ..
|
||||
}
|
||||
|
||||
:return: None
|
||||
"""
|
||||
project = invsrc.source_project
|
||||
repo_path = urlsplit(project.scm_url).path
|
||||
filepath = os.path.join(repo_path, invsrc.source_path)
|
||||
# print(f"change_source_vars_and_update: {project=} {repo_path=} {filepath=}")
|
||||
with open(filepath, "w") as fp:
|
||||
for group, variables in group_vars.items():
|
||||
fp.write(f"[{group}:vars]\n")
|
||||
for name, value in variables.items():
|
||||
fp.write(f"{name}={value}\n")
|
||||
subprocess.run('git add .; git commit -m "Update variables in invsrc.source_path"', cwd=repo_path, shell=True)
|
||||
# Update the project to sync the changed repo contents.
|
||||
project.update()
|
||||
wait_for_update(project)
|
||||
# Update the inventory from the changed source.
|
||||
invsrc.update()
|
||||
wait_for_update(invsrc)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def organization():
|
||||
name = f"{NAME_PREFIX}-org"
|
||||
instance = create_new_by_name(Organization, name=name, description=f"Description for {name}")
|
||||
yield instance
|
||||
instance.delete()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def project(organization, live_tmp_folder):
|
||||
name = f"{NAME_PREFIX}-project"
|
||||
instance = create_new_by_name(
|
||||
Project,
|
||||
name=name,
|
||||
description=f"Description for {name}",
|
||||
organization=organization,
|
||||
scm_url=f"file://{live_tmp_folder}/{GIT_REPO_FOLDER}",
|
||||
scm_type="git",
|
||||
)
|
||||
yield instance
|
||||
instance.delete()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def inventory(organization):
|
||||
name = f"{NAME_PREFIX}-inventory"
|
||||
instance = create_new_by_name(
|
||||
Inventory,
|
||||
name=name,
|
||||
description=f"Description for {name}",
|
||||
organization=organization,
|
||||
)
|
||||
yield instance
|
||||
instance.delete()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def inventory_source(inventory, project):
|
||||
name = f"{NAME_PREFIX}-invsrc"
|
||||
inv_src = InventorySource(
|
||||
name=name,
|
||||
source_project=project,
|
||||
source="scm",
|
||||
source_path="inventory_var_deleted_in_source.ini",
|
||||
inventory=inventory,
|
||||
overwrite_vars=True,
|
||||
)
|
||||
with mock.patch('awx.main.models.unified_jobs.UnifiedJobTemplate.update'):
|
||||
inv_src.save()
|
||||
yield inv_src
|
||||
inv_src.delete()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def inventory_source_factory(inventory, project):
|
||||
"""
|
||||
Use this fixture if you want to use multiple inventory sources for the same
|
||||
inventory in your test.
|
||||
"""
|
||||
# https://docs.pytest.org/en/stable/how-to/fixtures.html#factories-as-fixtures
|
||||
|
||||
created = []
|
||||
# repo_path = f"{live_tmp_folder}/{GIT_REPO_FOLDER}"
|
||||
|
||||
def _factory(inventory_file, name):
|
||||
# Make sure the inventory file exists before the inventory source
|
||||
# instance is created.
|
||||
#
|
||||
# Note: The current implementation of the inventory source object allows
|
||||
# to create an instance even when the inventory source file does not
|
||||
# exist. If this behaviour changes, uncomment the following code block
|
||||
# and add the fixture `live_tmp_folder` to the factory function
|
||||
# signature.
|
||||
#
|
||||
# inventory_file_path = os.path.join(repo_path, inventory_file) if not
|
||||
# os.path.isfile(inventory_file_path): with open(inventory_file_path,
|
||||
# "w") as fp: pass subprocess.run(f'git add .; git commit -m "Create
|
||||
# {inventory_file_path}"', cwd=repo_path, shell=True)
|
||||
#
|
||||
# Create the inventory source instance.
|
||||
name = f"{NAME_PREFIX}-invsrc-{name}"
|
||||
inv_src = InventorySource(
|
||||
name=name,
|
||||
source_project=project,
|
||||
source="scm",
|
||||
source_path=inventory_file,
|
||||
inventory=inventory,
|
||||
overwrite_vars=True,
|
||||
)
|
||||
with mock.patch('awx.main.models.unified_jobs.UnifiedJobTemplate.update'):
|
||||
inv_src.save()
|
||||
return inv_src
|
||||
|
||||
yield _factory
|
||||
for instance in created:
|
||||
instance.delete()
|
||||
|
||||
|
||||
def test_inventory_var_deleted_in_source(inventory, inventory_source):
|
||||
"""
|
||||
Verify that a variable which is deleted from its (git-)source between two
|
||||
updates is also deleted from the inventory.
|
||||
|
||||
Verifies https://issues.redhat.com/browse/AAP-17690
|
||||
"""
|
||||
inventory_source.update()
|
||||
wait_for_update(inventory_source)
|
||||
assert {"a": "value_a", "b": "value_b"} == Inventory.objects.get(name=inventory.name).variables_dict
|
||||
# Remove variable `a` from source and verify that it is also removed from
|
||||
# the inventory variables.
|
||||
change_source_vars_and_update(inventory_source, {"all": {"b": "value_b"}})
|
||||
assert {"b": "value_b"} == Inventory.objects.get(name=inventory.name).variables_dict
|
||||
|
||||
|
||||
def test_inventory_vars_with_multiple_sources(inventory, inventory_source_factory):
|
||||
"""
|
||||
Verify a sequence of updates from various sources with changing content.
|
||||
"""
|
||||
invsrc_a = inventory_source_factory("invsrc_a.ini", "A")
|
||||
invsrc_b = inventory_source_factory("invsrc_b.ini", "B")
|
||||
invsrc_c = inventory_source_factory("invsrc_c.ini", "C")
|
||||
|
||||
change_source_vars_and_update(invsrc_a, {"all": {"x": "x_from_a", "y": "y_from_a"}})
|
||||
assert {"x": "x_from_a", "y": "y_from_a"} == Inventory.objects.get(name=inventory.name).variables_dict
|
||||
change_source_vars_and_update(invsrc_b, {"all": {"x": "x_from_b", "y": "y_from_b", "z": "z_from_b"}})
|
||||
assert {"x": "x_from_b", "y": "y_from_b", "z": "z_from_b"} == Inventory.objects.get(name=inventory.name).variables_dict
|
||||
change_source_vars_and_update(invsrc_c, {"all": {"x": "x_from_c", "z": "z_from_c"}})
|
||||
assert {"x": "x_from_c", "y": "y_from_b", "z": "z_from_c"} == Inventory.objects.get(name=inventory.name).variables_dict
|
||||
change_source_vars_and_update(invsrc_b, {"all": {}})
|
||||
assert {"x": "x_from_c", "y": "y_from_a", "z": "z_from_c"} == Inventory.objects.get(name=inventory.name).variables_dict
|
||||
change_source_vars_and_update(invsrc_c, {"all": {"z": "z_from_c"}})
|
||||
assert {"x": "x_from_a", "y": "y_from_a", "z": "z_from_c"} == Inventory.objects.get(name=inventory.name).variables_dict
|
||||
change_source_vars_and_update(invsrc_a, {"all": {}})
|
||||
assert {"z": "z_from_c"} == Inventory.objects.get(name=inventory.name).variables_dict
|
||||
change_source_vars_and_update(invsrc_c, {"all": {}})
|
||||
assert {} == Inventory.objects.get(name=inventory.name).variables_dict
|
||||
@@ -1,6 +1,8 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
|
||||
import pytest
|
||||
|
||||
from awx.main.models import (
|
||||
@@ -13,8 +15,6 @@ from django.utils.timezone import now
|
||||
|
||||
from datetime import timedelta
|
||||
|
||||
import time
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def ref_time():
|
||||
@@ -33,23 +33,15 @@ def hosts(ref_time):
|
||||
|
||||
|
||||
def test_start_job_fact_cache(hosts, tmpdir):
|
||||
# Create artifacts dir inside tmpdir
|
||||
artifacts_dir = tmpdir.mkdir("artifacts")
|
||||
|
||||
# Assign a mock inventory ID
|
||||
inventory_id = 42
|
||||
|
||||
# Call the function WITHOUT log_data — the decorator handles it
|
||||
start_fact_cache(hosts, artifacts_dir=str(artifacts_dir), timeout=0, inventory_id=inventory_id)
|
||||
|
||||
# Fact files are written into artifacts_dir/fact_cache/
|
||||
fact_cache_dir = os.path.join(artifacts_dir, 'fact_cache')
|
||||
fact_cache = os.path.join(tmpdir, 'facts')
|
||||
last_modified, _ = start_fact_cache(hosts, fact_cache, timeout=0)
|
||||
|
||||
for host in hosts:
|
||||
filepath = os.path.join(fact_cache_dir, host.name)
|
||||
filepath = os.path.join(fact_cache, host.name)
|
||||
assert os.path.exists(filepath)
|
||||
with open(filepath, 'r', encoding='utf-8') as f:
|
||||
assert json.load(f) == host.ansible_facts
|
||||
with open(filepath, 'r') as f:
|
||||
assert f.read() == json.dumps(host.ansible_facts)
|
||||
assert os.path.getmtime(filepath) <= last_modified
|
||||
|
||||
|
||||
def test_fact_cache_with_invalid_path_traversal(tmpdir):
|
||||
@@ -59,84 +51,64 @@ def test_fact_cache_with_invalid_path_traversal(tmpdir):
|
||||
ansible_facts={"a": 1, "b": 2},
|
||||
),
|
||||
]
|
||||
artifacts_dir = tmpdir.mkdir("artifacts")
|
||||
inventory_id = 42
|
||||
|
||||
start_fact_cache(hosts, artifacts_dir=str(artifacts_dir), timeout=0, inventory_id=inventory_id)
|
||||
|
||||
# Fact cache directory (safe location)
|
||||
fact_cache_dir = os.path.join(artifacts_dir, 'fact_cache')
|
||||
|
||||
# The bad host name should not produce a file
|
||||
assert not os.path.exists(os.path.join(fact_cache_dir, '../foo'))
|
||||
|
||||
# Make sure the fact_cache dir exists and is still empty
|
||||
assert os.listdir(fact_cache_dir) == []
|
||||
fact_cache = os.path.join(tmpdir, 'facts')
|
||||
start_fact_cache(hosts, fact_cache, timeout=0)
|
||||
# a file called "foo" should _not_ be written outside the facts dir
|
||||
assert os.listdir(os.path.join(fact_cache, '..')) == ['facts']
|
||||
|
||||
|
||||
def test_start_job_fact_cache_past_timeout(hosts, tmpdir):
|
||||
fact_cache = os.path.join(tmpdir, 'facts')
|
||||
start_fact_cache(hosts, fact_cache, timeout=2)
|
||||
# the hosts fixture was modified 5s ago, which is more than 2s
|
||||
last_modified, _ = start_fact_cache(hosts, fact_cache, timeout=2)
|
||||
assert last_modified is None
|
||||
|
||||
for host in hosts:
|
||||
assert not os.path.exists(os.path.join(fact_cache, host.name))
|
||||
ret = start_fact_cache(hosts, fact_cache, timeout=2)
|
||||
assert ret is None
|
||||
|
||||
|
||||
def test_start_job_fact_cache_within_timeout(hosts, tmpdir):
|
||||
artifacts_dir = tmpdir.mkdir("artifacts")
|
||||
|
||||
# The hosts fixture was modified 5s ago, which is less than 7s
|
||||
start_fact_cache(hosts, str(artifacts_dir), timeout=7)
|
||||
|
||||
fact_cache_dir = os.path.join(artifacts_dir, 'fact_cache')
|
||||
for host in hosts:
|
||||
filepath = os.path.join(fact_cache_dir, host.name)
|
||||
assert os.path.exists(filepath)
|
||||
with open(filepath, 'r') as f:
|
||||
assert json.load(f) == host.ansible_facts
|
||||
|
||||
|
||||
def test_finish_job_fact_cache_clear(hosts, mocker, ref_time, tmpdir):
|
||||
fact_cache = os.path.join(tmpdir, 'facts')
|
||||
start_fact_cache(hosts, fact_cache, timeout=0)
|
||||
# the hosts fixture was modified 5s ago, which is less than 7s
|
||||
last_modified, _ = start_fact_cache(hosts, fact_cache, timeout=7)
|
||||
assert last_modified
|
||||
|
||||
bulk_update = mocker.patch('awx.main.tasks.facts.bulk_update_sorted_by_id')
|
||||
for host in hosts:
|
||||
assert os.path.exists(os.path.join(fact_cache, host.name))
|
||||
|
||||
# Mock the os.path.exists behavior for host deletion
|
||||
# Let's assume the fact file for hosts[1] is missing.
|
||||
mocker.patch('os.path.exists', side_effect=lambda path: hosts[1].name not in path)
|
||||
|
||||
# Simulate one host's fact file getting deleted manually
|
||||
host_to_delete_filepath = os.path.join(fact_cache, hosts[1].name)
|
||||
def test_finish_job_fact_cache_with_existing_data(hosts, mocker, tmpdir, ref_time):
|
||||
fact_cache = os.path.join(tmpdir, 'facts')
|
||||
last_modified, _ = start_fact_cache(hosts, fact_cache, timeout=0)
|
||||
|
||||
# Simulate the file being removed by checking existence first, to avoid FileNotFoundError
|
||||
if os.path.exists(host_to_delete_filepath):
|
||||
os.remove(host_to_delete_filepath)
|
||||
bulk_update = mocker.patch('django.db.models.query.QuerySet.bulk_update')
|
||||
|
||||
finish_fact_cache(fact_cache)
|
||||
ansible_facts_new = {"foo": "bar"}
|
||||
filepath = os.path.join(fact_cache, hosts[1].name)
|
||||
with open(filepath, 'w') as f:
|
||||
f.write(json.dumps(ansible_facts_new))
|
||||
f.flush()
|
||||
# I feel kind of gross about calling `os.utime` by hand, but I noticed
|
||||
# that in our container-based dev environment, the resolution for
|
||||
# `os.stat()` after a file write was over a second, and I don't want to put
|
||||
# a sleep() in this test
|
||||
new_modification_time = time.time() + 3600
|
||||
os.utime(filepath, (new_modification_time, new_modification_time))
|
||||
|
||||
# Simulate side effects that would normally be applied during bulk update
|
||||
hosts[1].ansible_facts = {}
|
||||
hosts[1].ansible_facts_modified = now()
|
||||
finish_fact_cache(hosts, fact_cache, last_modified)
|
||||
|
||||
# Verify facts are preserved for hosts with valid cache files
|
||||
for host in (hosts[0], hosts[2], hosts[3]):
|
||||
assert host.ansible_facts == {"a": 1, "b": 2}
|
||||
assert host.ansible_facts_modified == ref_time
|
||||
|
||||
# Verify facts were cleared for host with deleted cache file
|
||||
assert hosts[1].ansible_facts == {}
|
||||
assert hosts[1].ansible_facts == ansible_facts_new
|
||||
assert hosts[1].ansible_facts_modified > ref_time
|
||||
|
||||
# Current implementation skips the call entirely if hosts_to_update == []
|
||||
bulk_update.assert_not_called()
|
||||
bulk_update.assert_called_once_with([hosts[1]], ['ansible_facts', 'ansible_facts_modified'])
|
||||
|
||||
|
||||
def test_finish_job_fact_cache_with_bad_data(hosts, mocker, tmpdir):
|
||||
fact_cache = os.path.join(tmpdir, 'facts')
|
||||
start_fact_cache(hosts, fact_cache, timeout=0)
|
||||
last_modified, _ = start_fact_cache(hosts, fact_cache, timeout=0)
|
||||
|
||||
bulk_update = mocker.patch('django.db.models.query.QuerySet.bulk_update')
|
||||
|
||||
@@ -148,6 +120,23 @@ def test_finish_job_fact_cache_with_bad_data(hosts, mocker, tmpdir):
|
||||
new_modification_time = time.time() + 3600
|
||||
os.utime(filepath, (new_modification_time, new_modification_time))
|
||||
|
||||
finish_fact_cache(fact_cache)
|
||||
finish_fact_cache(hosts, fact_cache, last_modified)
|
||||
|
||||
bulk_update.assert_not_called()
|
||||
|
||||
|
||||
def test_finish_job_fact_cache_clear(hosts, mocker, ref_time, tmpdir):
|
||||
fact_cache = os.path.join(tmpdir, 'facts')
|
||||
last_modified, _ = start_fact_cache(hosts, fact_cache, timeout=0)
|
||||
|
||||
bulk_update = mocker.patch('django.db.models.query.QuerySet.bulk_update')
|
||||
|
||||
os.remove(os.path.join(fact_cache, hosts[1].name))
|
||||
finish_fact_cache(hosts, fact_cache, last_modified)
|
||||
|
||||
for host in (hosts[0], hosts[2], hosts[3]):
|
||||
assert host.ansible_facts == {"a": 1, "b": 2}
|
||||
assert host.ansible_facts_modified == ref_time
|
||||
assert hosts[1].ansible_facts == {}
|
||||
assert hosts[1].ansible_facts_modified > ref_time
|
||||
bulk_update.assert_called_once_with([hosts[1]], ['ansible_facts', 'ansible_facts_modified'])
|
||||
|
||||
@@ -561,7 +561,7 @@ class TestBFSNodesToRun:
|
||||
assert set([nodes[1], nodes[2]]) == set(g.bfs_nodes_to_run())
|
||||
|
||||
|
||||
@pytest.mark.xfail(reason="Run manually to re-generate doc images")
|
||||
@pytest.mark.skip(reason="Run manually to re-generate doc images")
|
||||
class TestDocsExample:
|
||||
@pytest.fixture
|
||||
def complex_dag(self, wf_node_generator):
|
||||
|
||||
@@ -32,140 +32,112 @@ def private_data_dir():
|
||||
shutil.rmtree(private_data, True)
|
||||
|
||||
|
||||
@mock.patch('awx.main.tasks.facts.update_hosts')
|
||||
@mock.patch('awx.main.tasks.facts.settings')
|
||||
@mock.patch('awx.main.tasks.jobs.create_partition', return_value=True)
|
||||
def test_pre_post_run_hook_facts(mock_create_partition, mock_facts_settings, private_data_dir, execution_environment):
|
||||
# Create mocked inventory and host queryset
|
||||
inventory = mock.MagicMock(spec=Inventory, pk=1)
|
||||
host1 = mock.MagicMock(spec=Host, id=1, name='host1', ansible_facts={"a": 1, "b": 2}, ansible_facts_modified=now(), inventory=inventory)
|
||||
host2 = mock.MagicMock(spec=Host, id=2, name='host2', ansible_facts={"a": 1, "b": 2}, ansible_facts_modified=now(), inventory=inventory)
|
||||
|
||||
# Mock hosts queryset
|
||||
hosts = [host1, host2]
|
||||
qs_hosts = mock.MagicMock(spec=QuerySet)
|
||||
def test_pre_post_run_hook_facts(mock_create_partition, mock_facts_settings, update_hosts, private_data_dir, execution_environment):
|
||||
# creates inventory_object with two hosts
|
||||
inventory = Inventory(pk=1)
|
||||
mock_inventory = mock.MagicMock(spec=Inventory, wraps=inventory)
|
||||
mock_inventory._state = mock.MagicMock()
|
||||
qs_hosts = QuerySet()
|
||||
hosts = [
|
||||
Host(id=1, name='host1', ansible_facts={"a": 1, "b": 2}, ansible_facts_modified=now(), inventory=mock_inventory),
|
||||
Host(id=2, name='host2', ansible_facts={"a": 1, "b": 2}, ansible_facts_modified=now(), inventory=mock_inventory),
|
||||
]
|
||||
qs_hosts._result_cache = hosts
|
||||
qs_hosts.only.return_value = hosts
|
||||
qs_hosts.count.side_effect = lambda: len(qs_hosts._result_cache)
|
||||
inventory.hosts = qs_hosts
|
||||
qs_hosts.only = mock.MagicMock(return_value=hosts)
|
||||
mock_inventory.hosts = qs_hosts
|
||||
assert mock_inventory.hosts.count() == 2
|
||||
|
||||
# Create mocked job object
|
||||
org = mock.MagicMock(spec=Organization, pk=1)
|
||||
proj = mock.MagicMock(spec=Project, pk=1, organization=org)
|
||||
job = mock.MagicMock(
|
||||
spec=Job,
|
||||
use_fact_cache=True,
|
||||
project=proj,
|
||||
organization=org,
|
||||
job_slice_number=1,
|
||||
job_slice_count=1,
|
||||
inventory=inventory,
|
||||
execution_environment=execution_environment,
|
||||
)
|
||||
job.get_hosts_for_fact_cache = Job.get_hosts_for_fact_cache.__get__(job)
|
||||
# creates job object with fact_cache enabled
|
||||
org = Organization(pk=1)
|
||||
proj = Project(pk=1, organization=org)
|
||||
job = mock.MagicMock(spec=Job, use_fact_cache=True, project=proj, organization=org, job_slice_number=1, job_slice_count=1)
|
||||
job.inventory = mock_inventory
|
||||
job.execution_environment = execution_environment
|
||||
job.get_hosts_for_fact_cache = Job.get_hosts_for_fact_cache.__get__(job) # to run original method
|
||||
job.job_env.get = mock.MagicMock(return_value=private_data_dir)
|
||||
|
||||
# Mock RunJob task
|
||||
mock_facts_settings.ANSIBLE_FACT_CACHE_TIMEOUT = False
|
||||
# creates the task object with job object as instance
|
||||
mock_facts_settings.ANSIBLE_FACT_CACHE_TIMEOUT = False # defines timeout to false
|
||||
task = jobs.RunJob()
|
||||
task.instance = job
|
||||
task.update_model = mock.Mock(return_value=job)
|
||||
task.model.objects.get = mock.Mock(return_value=job)
|
||||
|
||||
# Run pre_run_hook
|
||||
# run pre_run_hook
|
||||
task.facts_write_time = task.pre_run_hook(job, private_data_dir)
|
||||
|
||||
# Add a third mocked host
|
||||
host3 = mock.MagicMock(spec=Host, id=3, name='host3', ansible_facts={"added": True}, ansible_facts_modified=now(), inventory=inventory)
|
||||
qs_hosts._result_cache.append(host3)
|
||||
assert inventory.hosts.count() == 3
|
||||
# updates inventory with one more host
|
||||
hosts.append(Host(id=3, name='host3', ansible_facts={"added": True}, ansible_facts_modified=now(), inventory=mock_inventory))
|
||||
assert mock_inventory.hosts.count() == 3
|
||||
|
||||
# Run post_run_hook
|
||||
# run post_run_hook
|
||||
task.runner_callback.artifacts_processed = mock.MagicMock(return_value=True)
|
||||
|
||||
task.post_run_hook(job, "success")
|
||||
|
||||
# Verify final host facts
|
||||
assert qs_hosts._result_cache[2].ansible_facts == {"added": True}
|
||||
assert mock_inventory.hosts[2].ansible_facts == {"added": True}
|
||||
|
||||
|
||||
@mock.patch('awx.main.tasks.facts.bulk_update_sorted_by_id')
|
||||
@mock.patch('awx.main.tasks.facts.update_hosts')
|
||||
@mock.patch('awx.main.tasks.facts.settings')
|
||||
@mock.patch('awx.main.tasks.jobs.create_partition', return_value=True)
|
||||
def test_pre_post_run_hook_facts_deleted_sliced(mock_create_partition, mock_facts_settings, private_data_dir, execution_environment):
|
||||
# Fully mocked inventory
|
||||
mock_inventory = mock.MagicMock(spec=Inventory)
|
||||
def test_pre_post_run_hook_facts_deleted_sliced(mock_create_partition, mock_facts_settings, update_hosts, private_data_dir, execution_environment):
|
||||
# creates inventory_object with two hosts
|
||||
inventory = Inventory(pk=1)
|
||||
mock_inventory = mock.MagicMock(spec=Inventory, wraps=inventory)
|
||||
mock_inventory._state = mock.MagicMock()
|
||||
qs_hosts = QuerySet()
|
||||
hosts = [Host(id=num, name=f'host{num}', ansible_facts={"a": 1, "b": 2}, ansible_facts_modified=now(), inventory=mock_inventory) for num in range(999)]
|
||||
|
||||
# Create 999 mocked Host instances
|
||||
hosts = []
|
||||
for i in range(999):
|
||||
host = mock.MagicMock(spec=Host)
|
||||
host.id = i
|
||||
host.name = f'host{i}'
|
||||
host.ansible_facts = {"a": 1, "b": 2}
|
||||
host.ansible_facts_modified = now()
|
||||
host.inventory = mock_inventory
|
||||
hosts.append(host)
|
||||
qs_hosts._result_cache = hosts
|
||||
qs_hosts.only = mock.MagicMock(return_value=hosts)
|
||||
mock_inventory.hosts = qs_hosts
|
||||
assert mock_inventory.hosts.count() == 999
|
||||
|
||||
# Mock inventory.hosts behavior
|
||||
mock_qs_hosts = mock.MagicMock()
|
||||
mock_qs_hosts.only.return_value = hosts
|
||||
mock_qs_hosts.count.return_value = 999
|
||||
mock_inventory.hosts = mock_qs_hosts
|
||||
|
||||
# Mock Organization and Project
|
||||
org = mock.MagicMock(spec=Organization)
|
||||
proj = mock.MagicMock(spec=Project)
|
||||
proj.organization = org
|
||||
|
||||
# Mock job object
|
||||
job = mock.MagicMock(spec=Job)
|
||||
job.use_fact_cache = True
|
||||
job.project = proj
|
||||
job.organization = org
|
||||
job.job_slice_number = 1
|
||||
job.job_slice_count = 3
|
||||
job.execution_environment = execution_environment
|
||||
# creates job object with fact_cache enabled
|
||||
org = Organization(pk=1)
|
||||
proj = Project(pk=1, organization=org)
|
||||
job = mock.MagicMock(spec=Job, use_fact_cache=True, project=proj, organization=org, job_slice_number=1, job_slice_count=3)
|
||||
job.inventory = mock_inventory
|
||||
job.job_env.get.return_value = private_data_dir
|
||||
job.execution_environment = execution_environment
|
||||
job.get_hosts_for_fact_cache = Job.get_hosts_for_fact_cache.__get__(job) # to run original method
|
||||
job.job_env.get = mock.MagicMock(return_value=private_data_dir)
|
||||
|
||||
# Bind actual method for host filtering
|
||||
job.get_hosts_for_fact_cache = Job.get_hosts_for_fact_cache.__get__(job)
|
||||
|
||||
# Mock task instance
|
||||
# creates the task object with job object as instance
|
||||
mock_facts_settings.ANSIBLE_FACT_CACHE_TIMEOUT = False
|
||||
task = jobs.RunJob()
|
||||
task.instance = job
|
||||
task.update_model = mock.Mock(return_value=job)
|
||||
task.model.objects.get = mock.Mock(return_value=job)
|
||||
|
||||
# Call pre_run_hook
|
||||
# run pre_run_hook
|
||||
task.facts_write_time = task.pre_run_hook(job, private_data_dir)
|
||||
|
||||
# Simulate one host deletion
|
||||
hosts.pop(1)
|
||||
mock_qs_hosts.count.return_value = 998
|
||||
assert mock_inventory.hosts.count() == 998
|
||||
|
||||
# Call post_run_hook
|
||||
# run post_run_hook
|
||||
task.runner_callback.artifacts_processed = mock.MagicMock(return_value=True)
|
||||
task.post_run_hook(job, "success")
|
||||
|
||||
# Assert that ansible_facts were preserved
|
||||
for host in hosts:
|
||||
assert host.ansible_facts == {"a": 1, "b": 2}
|
||||
|
||||
# Add expected failure cases
|
||||
failures = []
|
||||
for host in hosts:
|
||||
try:
|
||||
assert host.ansible_facts == {"a": 1, "b": 2, "unexpected_key": "bad"}
|
||||
except AssertionError:
|
||||
failures.append(f"Host named {host.name} has facts {host.ansible_facts}")
|
||||
failures.append("Host named {} has facts {}".format(host.name, host.ansible_facts))
|
||||
|
||||
assert len(failures) > 0, f"Failures occurred for the following hosts: {failures}"
|
||||
|
||||
|
||||
@mock.patch('awx.main.tasks.facts.bulk_update_sorted_by_id')
|
||||
@mock.patch('awx.main.tasks.facts.update_hosts')
|
||||
@mock.patch('awx.main.tasks.facts.settings')
|
||||
def test_invalid_host_facts(mock_facts_settings, bulk_update_sorted_by_id, private_data_dir, execution_environment):
|
||||
def test_invalid_host_facts(mock_facts_settings, update_hosts, private_data_dir, execution_environment):
|
||||
inventory = Inventory(pk=1)
|
||||
mock_inventory = mock.MagicMock(spec=Inventory, wraps=inventory)
|
||||
mock_inventory._state = mock.MagicMock()
|
||||
@@ -183,7 +155,7 @@ def test_invalid_host_facts(mock_facts_settings, bulk_update_sorted_by_id, priva
|
||||
failures.append(host.name)
|
||||
|
||||
mock_facts_settings.SOME_SETTING = True
|
||||
bulk_update_sorted_by_id(Host, mock_inventory.hosts, fields=['ansible_facts'])
|
||||
update_hosts(mock_inventory.hosts)
|
||||
|
||||
with pytest.raises(pytest.fail.Exception):
|
||||
if failures:
|
||||
|
||||
@@ -472,7 +472,7 @@ class TestGenericRun:
|
||||
task.model.objects.get = mock.Mock(return_value=job)
|
||||
task.build_private_data_files = mock.Mock(side_effect=OSError())
|
||||
|
||||
with mock.patch('awx.main.tasks.jobs.shutil.copytree'), mock.patch('awx.main.tasks.jobs.evaluate_policy'):
|
||||
with mock.patch('awx.main.tasks.jobs.shutil.copytree'):
|
||||
with pytest.raises(Exception):
|
||||
task.run(1)
|
||||
|
||||
|
||||
@@ -1,110 +0,0 @@
|
||||
"""
|
||||
Test utility functions and classes for inventory variable handling.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
|
||||
from awx.main.utils.inventory_vars import InventoryVariable
|
||||
from awx.main.utils.inventory_vars import InventoryGroupVariables
|
||||
|
||||
|
||||
def test_inventory_variable_update_basic():
|
||||
"""Test basic functionality of an inventory variable."""
|
||||
x = InventoryVariable("x")
|
||||
assert x.has_no_source
|
||||
x.update(1, 101)
|
||||
assert str(x) == "1"
|
||||
x.update(2, 102)
|
||||
assert str(x) == "2"
|
||||
x.update(3, 103)
|
||||
assert str(x) == "3"
|
||||
x.delete(102)
|
||||
assert str(x) == "3"
|
||||
x.delete(103)
|
||||
assert str(x) == "1"
|
||||
x.delete(101)
|
||||
assert x.value is None
|
||||
assert x.has_no_source
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"updates", # (<source_id>, <value>, <expected_value>)
|
||||
[
|
||||
((101, 1, 1),),
|
||||
((101, 1, 1), (101, None, None)),
|
||||
((101, 1, 1), (102, 2, 2), (102, None, 1)),
|
||||
((101, 1, 1), (102, 2, 2), (101, None, 2), (102, None, None)),
|
||||
(
|
||||
(101, 0, 0),
|
||||
(101, 1, 1),
|
||||
(102, 2, 2),
|
||||
(103, 3, 3),
|
||||
(102, None, 3),
|
||||
(103, None, 1),
|
||||
(101, None, None),
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_inventory_variable_update(updates: tuple[int, int | None, int | None]):
|
||||
"""
|
||||
Test if the variable value is set correctly on a sequence of updates.
|
||||
|
||||
For this test, the value `None` implies the deletion of the source.
|
||||
"""
|
||||
x = InventoryVariable("x")
|
||||
for src_id, value, expected_value in updates:
|
||||
if value is None:
|
||||
x.delete(src_id)
|
||||
else:
|
||||
x.update(value, src_id)
|
||||
assert x.value == expected_value
|
||||
|
||||
|
||||
def test_inventory_group_variables_update_basic():
|
||||
"""Test basic functionality of an inventory variables update."""
|
||||
vars = InventoryGroupVariables(1)
|
||||
vars.update_from_src({"x": 1, "y": 2}, 101)
|
||||
assert vars == {"x": 1, "y": 2}
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"updates", # (<source_id>, <vars>: dict, <expected_vars>: dict)
|
||||
[
|
||||
((101, {"x": 1, "y": 1}, {"x": 1, "y": 1}),),
|
||||
(
|
||||
(101, {"x": 1, "y": 1}, {"x": 1, "y": 1}),
|
||||
(102, {}, {"x": 1, "y": 1}),
|
||||
),
|
||||
(
|
||||
(101, {"x": 1, "y": 1}, {"x": 1, "y": 1}),
|
||||
(102, {"x": 2}, {"x": 2, "y": 1}),
|
||||
),
|
||||
(
|
||||
(101, {"x": 1, "y": 1}, {"x": 1, "y": 1}),
|
||||
(102, {"x": 2, "y": 2}, {"x": 2, "y": 2}),
|
||||
),
|
||||
(
|
||||
(101, {"x": 1, "y": 1}, {"x": 1, "y": 1}),
|
||||
(102, {"x": 2, "z": 2}, {"x": 2, "y": 1, "z": 2}),
|
||||
),
|
||||
(
|
||||
(101, {"x": 1, "y": 1}, {"x": 1, "y": 1}),
|
||||
(102, {"x": 2, "z": 2}, {"x": 2, "y": 1, "z": 2}),
|
||||
(102, {}, {"x": 1, "y": 1}),
|
||||
),
|
||||
(
|
||||
(101, {"x": 1, "y": 1}, {"x": 1, "y": 1}),
|
||||
(102, {"x": 2, "z": 2}, {"x": 2, "y": 1, "z": 2}),
|
||||
(103, {"x": 3}, {"x": 3, "y": 1, "z": 2}),
|
||||
(101, {}, {"x": 3, "z": 2}),
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_inventory_group_variables_update(updates: tuple[int, int | None, int | None]):
|
||||
"""
|
||||
Test if the group vars are set correctly on various update sequences.
|
||||
"""
|
||||
groupvars = InventoryGroupVariables(2)
|
||||
for src_id, vars, expected_vars in updates:
|
||||
groupvars.update_from_src(vars, src_id)
|
||||
assert groupvars == expected_vars
|
||||
@@ -1,37 +0,0 @@
|
||||
import json
|
||||
from http import HTTPStatus
|
||||
from unittest.mock import patch
|
||||
|
||||
from requests import Response
|
||||
|
||||
from awx.main.utils.licensing import Licenser
|
||||
|
||||
|
||||
def test_rhsm_licensing():
|
||||
def mocked_requests_get(*args, **kwargs):
|
||||
assert kwargs['verify'] == True
|
||||
response = Response()
|
||||
subs = json.dumps({'body': []})
|
||||
response.status_code = HTTPStatus.OK
|
||||
response._content = bytes(subs, 'utf-8')
|
||||
return response
|
||||
|
||||
licenser = Licenser()
|
||||
with patch('awx.main.utils.analytics_proxy.OIDCClient.make_request', new=mocked_requests_get):
|
||||
subs = licenser.get_rhsm_subs('localhost', 'admin', 'admin')
|
||||
assert subs == []
|
||||
|
||||
|
||||
def test_satellite_licensing():
|
||||
def mocked_requests_get(*args, **kwargs):
|
||||
assert kwargs['verify'] == True
|
||||
response = Response()
|
||||
subs = json.dumps({'results': []})
|
||||
response.status_code = HTTPStatus.OK
|
||||
response._content = bytes(subs, 'utf-8')
|
||||
return response
|
||||
|
||||
licenser = Licenser()
|
||||
with patch('requests.get', new=mocked_requests_get):
|
||||
subs = licenser.get_satellite_subs('localhost', 'admin', 'admin')
|
||||
assert subs == []
|
||||
@@ -23,7 +23,7 @@ class TokenError(requests.RequestException):
|
||||
try:
|
||||
client = OIDCClient(...)
|
||||
client.make_request(...)
|
||||
except TokenError as e:
|
||||
except TokenGenerationError as e:
|
||||
print(f"Token generation failed due to {e.__cause__}")
|
||||
except requests.RequestException:
|
||||
print("API request failed)
|
||||
@@ -102,15 +102,13 @@ class OIDCClient:
|
||||
self,
|
||||
client_id: str,
|
||||
client_secret: str,
|
||||
token_url: str = DEFAULT_OIDC_TOKEN_ENDPOINT,
|
||||
scopes: list[str] = None,
|
||||
token_url: str,
|
||||
scopes: list[str],
|
||||
base_url: str = '',
|
||||
) -> None:
|
||||
self.client_id: str = client_id
|
||||
self.client_secret: str = client_secret
|
||||
self.token_url: str = token_url
|
||||
if scopes is None:
|
||||
scopes = ['api.console']
|
||||
self.scopes = scopes
|
||||
self.base_url: str = base_url
|
||||
self.token: Optional[Token] = None
|
||||
|
||||
@@ -8,27 +8,3 @@ from django.conf import settings
|
||||
|
||||
def set_connection_name(function):
|
||||
set_application_name(settings.DATABASES, settings.CLUSTER_HOST_ID, function=function)
|
||||
|
||||
|
||||
def bulk_update_sorted_by_id(model, objects, fields, batch_size=1000):
|
||||
"""
|
||||
Perform a sorted bulk update on model instances to avoid database deadlocks.
|
||||
|
||||
This function was introduced to prevent deadlocks observed in the AWX Controller
|
||||
when concurrent jobs attempt to update different fields on the same `main_hosts` table.
|
||||
Specifically, deadlocks occurred when one process updated `last_job_id` while another
|
||||
simultaneously updated `ansible_facts`.
|
||||
|
||||
By sorting updates ID, we ensure a consistent update order,
|
||||
which helps avoid the row-level locking contention that can lead to deadlocks
|
||||
in PostgreSQL when multiple processes are involved.
|
||||
|
||||
Returns:
|
||||
int: The number of rows affected by the update.
|
||||
"""
|
||||
objects = [obj for obj in objects if obj.id is not None]
|
||||
if not objects:
|
||||
return 0 # Return 0 when nothing is updated
|
||||
|
||||
sorted_objects = sorted(objects, key=lambda obj: obj.id)
|
||||
return model.objects.bulk_update(sorted_objects, fields, batch_size=batch_size)
|
||||
|
||||
@@ -1,277 +0,0 @@
|
||||
import logging
|
||||
from typing import TypeAlias, Any
|
||||
|
||||
from awx.main.models import InventoryGroupVariablesWithHistory
|
||||
|
||||
|
||||
var_value: TypeAlias = Any
|
||||
update_queue: TypeAlias = list[tuple[int, var_value]]
|
||||
|
||||
|
||||
logger = logging.getLogger('awx.api.inventory_import')
|
||||
|
||||
|
||||
class InventoryVariable:
|
||||
"""
|
||||
Represents an inventory variable.
|
||||
|
||||
This class keeps track of the variable updates from different inventory
|
||||
sources.
|
||||
"""
|
||||
|
||||
def __init__(self, name: str) -> None:
|
||||
"""
|
||||
:param str name: The variable's name.
|
||||
:return: None
|
||||
"""
|
||||
self.name = name
|
||||
self._update_queue: update_queue = []
|
||||
"""
|
||||
A queue representing updates from inventory sources in the sequence of
|
||||
occurrence.
|
||||
|
||||
The queue is realized as a list of two-tuples containing variable values
|
||||
and their originating inventory source. The last item of the list is
|
||||
considered the top of the queue, and holds the current value of the
|
||||
variable.
|
||||
"""
|
||||
|
||||
def reset(self) -> None:
|
||||
"""Reset the variable by deleting its history."""
|
||||
self._update_queue = []
|
||||
|
||||
def load(self, updates: update_queue) -> "InventoryVariable":
|
||||
"""Load internal state from a list."""
|
||||
self._update_queue = updates
|
||||
return self
|
||||
|
||||
def dump(self) -> update_queue:
|
||||
"""Save internal state to a list."""
|
||||
return self._update_queue
|
||||
|
||||
def update(self, value: var_value, invsrc_id: int) -> None:
|
||||
"""
|
||||
Update the variable with a new value from an inventory source.
|
||||
|
||||
Updating means that this source is moved to the top of the queue
|
||||
and `value` becomes the new current value.
|
||||
|
||||
:param value: The new value of the variable.
|
||||
:param int invsrc_id: The inventory source of the new variable value.
|
||||
:return: None
|
||||
"""
|
||||
logger.debug(f"InventoryVariable().update({value}, {invsrc_id}):")
|
||||
# Move this source to the front of the queue by first deleting a
|
||||
# possibly existing entry, and then add the new entry to the front.
|
||||
self.delete(invsrc_id)
|
||||
self._update_queue.append((invsrc_id, value))
|
||||
|
||||
def delete(self, invsrc_id: int) -> None:
|
||||
"""
|
||||
Delete an inventory source from the variable.
|
||||
|
||||
:param int invsrc_id: The inventory source id.
|
||||
:return: None
|
||||
"""
|
||||
data_index = self._get_invsrc_index(invsrc_id)
|
||||
# Remove last update from this source, if there was any.
|
||||
if data_index is not None:
|
||||
value = self._update_queue.pop(data_index)[1]
|
||||
logger.debug(f"InventoryVariable().delete({invsrc_id}): {data_index=} {value=}")
|
||||
|
||||
def _get_invsrc_index(self, invsrc_id: int) -> int | None:
|
||||
"""Return the inventory source's position in the queue, or `None`."""
|
||||
for i, entry in enumerate(self._update_queue):
|
||||
if entry[0] == invsrc_id:
|
||||
return i
|
||||
return None
|
||||
|
||||
def _get_current_value(self) -> var_value:
|
||||
"""
|
||||
Return the current value of the variable, or None if the variable has no
|
||||
history.
|
||||
"""
|
||||
return self._update_queue[-1][1] if self._update_queue else None
|
||||
|
||||
@property
|
||||
def value(self) -> var_value:
|
||||
"""Read the current value of the variable."""
|
||||
return self._get_current_value()
|
||||
|
||||
@property
|
||||
def has_no_source(self) -> bool:
|
||||
"""True, if the variable is orphan, i.e. no source contains this var anymore."""
|
||||
return not self._update_queue
|
||||
|
||||
def __str__(self):
|
||||
"""Return the string representation of the current value."""
|
||||
return str(self.value or "")
|
||||
|
||||
|
||||
class InventoryGroupVariables(dict):
|
||||
"""
|
||||
Represent all inventory variables from one group.
|
||||
|
||||
This dict contains all variables of a inventory group and their current
|
||||
value under consideration of the inventory source update history.
|
||||
|
||||
Note that variables values cannot be `None`, use the empty string to
|
||||
indicate that a variable holds no value. See also `InventoryVariable`.
|
||||
"""
|
||||
|
||||
def __init__(self, id: int) -> None:
|
||||
"""
|
||||
:param int id: The id of the group object.
|
||||
:return: None
|
||||
"""
|
||||
super().__init__()
|
||||
self.id = id
|
||||
# In _vars we keep all sources for a given variable. This enables us to
|
||||
# find the current value for a variable, which is the value from the
|
||||
# latest update which defined this variable.
|
||||
self._vars: dict[str, InventoryVariable] = {}
|
||||
|
||||
def _sync_vars(self) -> None:
|
||||
"""
|
||||
Copy the current values of all variables into the internal dict.
|
||||
|
||||
Call this everytime the `_vars` structure has been modified.
|
||||
"""
|
||||
for name, inv_var in self._vars.items():
|
||||
self[name] = inv_var.value
|
||||
|
||||
def load_state(self, state: dict[str, update_queue]) -> "InventoryGroupVariables":
|
||||
"""Load internal state from a dict."""
|
||||
for name, updates in state.items():
|
||||
self._vars[name] = InventoryVariable(name).load(updates)
|
||||
self._sync_vars()
|
||||
return self
|
||||
|
||||
def save_state(self) -> dict[str, update_queue]:
|
||||
"""Return internal state as a dict."""
|
||||
state = {}
|
||||
for name, inv_var in self._vars.items():
|
||||
state[name] = inv_var.dump()
|
||||
return state
|
||||
|
||||
def update_from_src(
|
||||
self,
|
||||
new_vars: dict[str, var_value],
|
||||
source_id: int,
|
||||
overwrite_vars: bool = True,
|
||||
reset: bool = False,
|
||||
) -> None:
|
||||
"""
|
||||
Update with variables from an inventory source.
|
||||
|
||||
Delete all variables for this source which are not in the update vars.
|
||||
|
||||
:param dict new_vars: The variables from the inventory source.
|
||||
:param int invsrc_id: The id of the inventory source for this update.
|
||||
:param bool overwrite_vars: If `True`, delete this source's history
|
||||
entry for variables which are not in this update. If `False`, keep
|
||||
the old updates in the history for such variables. Default is
|
||||
`True`.
|
||||
:param bool reset: If `True`, delete the update history for all existing
|
||||
variables before updating the new vars. Therewith making this update
|
||||
overwrite all history. Default is `False`.
|
||||
:return: None
|
||||
"""
|
||||
logger.debug(f"InventoryGroupVariables({self.id}).update_from_src({new_vars=}, {source_id=}, {overwrite_vars=}, {reset=}): {self=}")
|
||||
# Create variables which are newly introduced by this source.
|
||||
for name in new_vars:
|
||||
if name not in self._vars:
|
||||
self._vars[name] = InventoryVariable(name)
|
||||
# Combine the names of the existing vars and the new vars from this update.
|
||||
all_var_names = list(set(list(self.keys()) + list(new_vars.keys())))
|
||||
# In reset-mode, delete all existing vars and their history before
|
||||
# updating.
|
||||
if reset:
|
||||
for name in all_var_names:
|
||||
self._vars[name].reset()
|
||||
# Go through all variables (the existing ones, and the ones added by
|
||||
# this update), delete this source from variables which are not in this
|
||||
# update, and update the value of variables which are part of this
|
||||
# update.
|
||||
for name in all_var_names:
|
||||
# Update or delete source from var (if name not in vars).
|
||||
if name in new_vars:
|
||||
self._vars[name].update(new_vars[name], source_id)
|
||||
elif overwrite_vars:
|
||||
self._vars[name].delete(source_id)
|
||||
# Delete vars which have no source anymore.
|
||||
if self._vars[name].has_no_source:
|
||||
del self._vars[name]
|
||||
del self[name]
|
||||
# After the update, refresh the internal dict with the possibly changed
|
||||
# current values.
|
||||
self._sync_vars()
|
||||
logger.debug(f"InventoryGroupVariables({self.id}).update_from_src(): {self=}")
|
||||
|
||||
|
||||
def update_group_variables(
|
||||
group_id: int | None,
|
||||
newvars: dict,
|
||||
dbvars: dict | None,
|
||||
invsrc_id: int,
|
||||
inventory_id: int,
|
||||
overwrite_vars: bool = True,
|
||||
reset: bool = False,
|
||||
) -> dict[str, var_value]:
|
||||
"""
|
||||
Update the inventory variables of one group.
|
||||
|
||||
Merge the new variables into the existing group variables.
|
||||
|
||||
The update can be triggered either by an inventory update via API, or via a
|
||||
manual edit of the variables field in the awx inventory form.
|
||||
|
||||
TODO: Can we get rid of the dbvars? This is only needed because the new
|
||||
update-var mechanism needs to be properly initialized if the db already
|
||||
contains some variables.
|
||||
|
||||
:param int group_id: The inventory group id (pk). For the 'all'-group use
|
||||
`None`, because this group is not an actual `Group` object in the
|
||||
database.
|
||||
:param dict newvars: The variables contained in this update.
|
||||
:param dict dbvars: The variables which are already stored in the database
|
||||
for this inventory and this group. Can be `None`.
|
||||
:param int invsrc_id: The id of the inventory source. Usually this is the
|
||||
database primary key of the inventory source object, but there is one
|
||||
special id -1 which is used for the initial update from the database and
|
||||
for manual updates via the GUI.
|
||||
:param int inventory_id: The id of the inventory on which this update is
|
||||
applied.
|
||||
:param bool overwrite_vars: If `True`, delete variables which were merged
|
||||
from the same source in a previous update, but are no longer contained
|
||||
in that source. If `False`, such variables would not be removed from the
|
||||
group. Default is `True`.
|
||||
:param bool reset: If `True`, delete all variables from previous updates,
|
||||
therewith making this update overwrite all history. Default is `False`.
|
||||
:return: The variables and their current values as a dict.
|
||||
:rtype: dict
|
||||
"""
|
||||
inv_group_vars = InventoryGroupVariables(group_id)
|
||||
# Restore the existing variables state.
|
||||
try:
|
||||
# Get the object for this group from the database.
|
||||
model = InventoryGroupVariablesWithHistory.objects.get(inventory_id=inventory_id, group_id=group_id)
|
||||
except InventoryGroupVariablesWithHistory.DoesNotExist:
|
||||
# If no previous state exists, create a new database object, and
|
||||
# initialize it with the current group variables.
|
||||
model = InventoryGroupVariablesWithHistory(inventory_id=inventory_id, group_id=group_id)
|
||||
if dbvars:
|
||||
inv_group_vars.update_from_src(dbvars, -1) # Assume -1 as inv_source_id for existing vars.
|
||||
else:
|
||||
# Load the group variables state from the database object.
|
||||
inv_group_vars.load_state(model.variables)
|
||||
#
|
||||
logger.debug(f"update_group_variables: before update_from_src {model.variables=}")
|
||||
# Apply the new inventory update onto the group variables.
|
||||
inv_group_vars.update_from_src(newvars, invsrc_id, overwrite_vars, reset)
|
||||
# Save the new variables state.
|
||||
model.variables = inv_group_vars.save_state()
|
||||
model.save()
|
||||
logger.debug(f"update_group_variables: after update_from_src {model.variables=}")
|
||||
logger.debug(f"update_group_variables({group_id=}, {newvars}): {inv_group_vars}")
|
||||
return inv_group_vars
|
||||
@@ -38,7 +38,6 @@ from django.utils.translation import gettext_lazy as _
|
||||
from awx_plugins.interfaces._temporary_private_licensing_api import detect_server_product_name
|
||||
|
||||
from awx.main.constants import SUBSCRIPTION_USAGE_MODEL_UNIQUE_HOSTS
|
||||
from awx.main.utils.analytics_proxy import OIDCClient
|
||||
|
||||
MAX_INSTANCES = 9999999
|
||||
|
||||
@@ -229,38 +228,37 @@ class Licenser(object):
|
||||
host = getattr(settings, 'REDHAT_CANDLEPIN_HOST', None)
|
||||
|
||||
if not user:
|
||||
raise ValueError('subscriptions_client_id is required')
|
||||
raise ValueError('subscriptions_username is required')
|
||||
|
||||
if not pw:
|
||||
raise ValueError('subscriptions_client_secret is required')
|
||||
raise ValueError('subscriptions_password is required')
|
||||
|
||||
if host and user and pw:
|
||||
if 'subscription.rhsm.redhat.com' in host:
|
||||
json = self.get_rhsm_subs(settings.SUBSCRIPTIONS_RHSM_URL, user, pw)
|
||||
json = self.get_rhsm_subs(host, user, pw)
|
||||
else:
|
||||
json = self.get_satellite_subs(host, user, pw)
|
||||
return self.generate_license_options_from_entitlements(json)
|
||||
return []
|
||||
|
||||
def get_rhsm_subs(self, host, client_id, client_secret):
|
||||
client = OIDCClient(client_id, client_secret)
|
||||
subs = client.make_request(
|
||||
'GET',
|
||||
host,
|
||||
verify=True,
|
||||
timeout=(31, 31),
|
||||
)
|
||||
|
||||
def get_rhsm_subs(self, host, user, pw):
|
||||
verify = getattr(settings, 'REDHAT_CANDLEPIN_VERIFY', True)
|
||||
json = []
|
||||
try:
|
||||
subs = requests.get('/'.join([host, 'subscription/users/{}/owners'.format(user)]), verify=verify, auth=(user, pw))
|
||||
except requests.exceptions.ConnectionError as error:
|
||||
raise error
|
||||
except OSError as error:
|
||||
raise OSError(
|
||||
'Unable to open certificate bundle {}. Check that the service is running on Red Hat Enterprise Linux.'.format(verify)
|
||||
) from error # noqa
|
||||
subs.raise_for_status()
|
||||
subs_formatted = []
|
||||
for sku in subs.json()['body']:
|
||||
sku_data = {k: v for k, v in sku.items() if k != 'subscriptions'}
|
||||
for sub in sku['subscriptions']:
|
||||
sub_data = sku_data.copy()
|
||||
sub_data['subscriptions'] = sub
|
||||
subs_formatted.append(sub_data)
|
||||
|
||||
return subs_formatted
|
||||
for sub in subs.json():
|
||||
resp = requests.get('/'.join([host, 'subscription/owners/{}/pools/?match=*tower*'.format(sub['key'])]), verify=verify, auth=(user, pw))
|
||||
resp.raise_for_status()
|
||||
json.extend(resp.json())
|
||||
return json
|
||||
|
||||
def get_satellite_subs(self, host, user, pw):
|
||||
port = None
|
||||
@@ -269,7 +267,7 @@ class Licenser(object):
|
||||
port = str(self.config.get("server", "port"))
|
||||
except Exception as e:
|
||||
logger.exception('Unable to read rhsm config to get ca_cert location. {}'.format(str(e)))
|
||||
verify = True
|
||||
verify = getattr(settings, 'REDHAT_CANDLEPIN_VERIFY', True)
|
||||
if port:
|
||||
host = ':'.join([host, port])
|
||||
json = []
|
||||
@@ -316,11 +314,20 @@ class Licenser(object):
|
||||
return False
|
||||
return True
|
||||
|
||||
def is_appropriate_sub(self, sub):
|
||||
if sub['activeSubscription'] is False:
|
||||
return False
|
||||
# Products that contain Ansible Tower
|
||||
products = sub.get('providedProducts', [])
|
||||
if any(product.get('productId') == '480' for product in products):
|
||||
return True
|
||||
return False
|
||||
|
||||
def generate_license_options_from_entitlements(self, json):
|
||||
from dateutil.parser import parse
|
||||
|
||||
ValidSub = collections.namedtuple(
|
||||
'ValidSub', 'sku name support_level end_date trial developer_license quantity satellite subscription_id account_number usage'
|
||||
'ValidSub', 'sku name support_level end_date trial developer_license quantity pool_id satellite subscription_id account_number usage'
|
||||
)
|
||||
valid_subs = []
|
||||
for sub in json:
|
||||
@@ -328,14 +335,10 @@ class Licenser(object):
|
||||
if satellite:
|
||||
is_valid = self.is_appropriate_sat_sub(sub)
|
||||
else:
|
||||
# the list of subs from console.redhat.com are already valid based on the query params we provided
|
||||
is_valid = True
|
||||
is_valid = self.is_appropriate_sub(sub)
|
||||
if is_valid:
|
||||
try:
|
||||
if satellite:
|
||||
end_date = parse(sub.get('endDate'))
|
||||
else:
|
||||
end_date = parse(sub['subscriptions']['endDate'])
|
||||
end_date = parse(sub.get('endDate'))
|
||||
except Exception:
|
||||
continue
|
||||
now = datetime.utcnow()
|
||||
@@ -343,55 +346,44 @@ class Licenser(object):
|
||||
if end_date < now:
|
||||
# If the sub has a past end date, skip it
|
||||
continue
|
||||
try:
|
||||
quantity = int(sub['quantity'])
|
||||
if quantity == -1:
|
||||
# effectively, unlimited
|
||||
quantity = MAX_INSTANCES
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
sku = sub['productId']
|
||||
trial = sku.startswith('S') # i.e.,, SER/SVC
|
||||
developer_license = False
|
||||
support_level = ''
|
||||
account_number = ''
|
||||
usage = sub.get('usage', '')
|
||||
usage = ''
|
||||
pool_id = sub['id']
|
||||
subscription_id = sub['subscriptionId']
|
||||
account_number = sub['accountNumber']
|
||||
if satellite:
|
||||
try:
|
||||
quantity = int(sub['quantity'])
|
||||
except Exception:
|
||||
continue
|
||||
sku = sub['productId']
|
||||
subscription_id = sub['subscriptionId']
|
||||
sub_name = sub['productName']
|
||||
support_level = sub['support_level']
|
||||
account_number = sub['accountNumber']
|
||||
usage = sub['usage']
|
||||
else:
|
||||
try:
|
||||
# Determine total quantity based on capacity name
|
||||
# if capacity name is Nodes, capacity quantity x subscription quantity
|
||||
# if capacity name is Sockets, capacity quantity / 2 (minimum of 1) x subscription quantity
|
||||
if sub['capacity']['name'] == "Nodes":
|
||||
quantity = int(sub['capacity']['quantity']) * int(sub['subscriptions']['quantity'])
|
||||
elif sub['capacity']['name'] == "Sockets":
|
||||
quantity = max(int(sub['capacity']['quantity']) / 2, 1) * int(sub['subscriptions']['quantity'])
|
||||
else:
|
||||
continue
|
||||
except Exception:
|
||||
continue
|
||||
sku = sub['sku']
|
||||
sub_name = sub['name']
|
||||
support_level = sub['serviceLevel']
|
||||
subscription_id = sub['subscriptions']['number']
|
||||
if sub.get('name') == 'RHEL Developer':
|
||||
developer_license = True
|
||||
|
||||
if quantity == -1:
|
||||
# effectively, unlimited
|
||||
quantity = MAX_INSTANCES
|
||||
trial = sku.startswith('S') # i.e.,, SER/SVC
|
||||
for attr in sub.get('productAttributes', []):
|
||||
if attr.get('name') == 'support_level':
|
||||
support_level = attr.get('value')
|
||||
elif attr.get('name') == 'usage':
|
||||
usage = attr.get('value')
|
||||
elif attr.get('name') == 'ph_product_name' and attr.get('value') == 'RHEL Developer':
|
||||
developer_license = True
|
||||
|
||||
valid_subs.append(
|
||||
ValidSub(
|
||||
sku,
|
||||
sub_name,
|
||||
sub['productName'],
|
||||
support_level,
|
||||
end_date,
|
||||
trial,
|
||||
developer_license,
|
||||
quantity,
|
||||
pool_id,
|
||||
satellite,
|
||||
subscription_id,
|
||||
account_number,
|
||||
@@ -422,6 +414,7 @@ class Licenser(object):
|
||||
license._attrs['satellite'] = satellite
|
||||
license._attrs['valid_key'] = True
|
||||
license.update(license_date=int(sub.end_date.strftime('%s')))
|
||||
license.update(pool_id=sub.pool_id)
|
||||
license.update(subscription_id=sub.subscription_id)
|
||||
license.update(account_number=sub.account_number)
|
||||
licenses.append(license._attrs.copy())
|
||||
|
||||
@@ -964,9 +964,6 @@ CLUSTER_HOST_ID = socket.gethostname()
|
||||
# - 'unique_managed_hosts': Compliant = automated - deleted hosts (using /api/v2/host_metrics/)
|
||||
SUBSCRIPTION_USAGE_MODEL = ''
|
||||
|
||||
# Default URL and query params for obtaining valid AAP subscriptions
|
||||
SUBSCRIPTIONS_RHSM_URL = 'https://console.redhat.com/api/rhsm/v2/products?include=providedProducts&oids=480&status=Active'
|
||||
|
||||
# Host metrics cleanup - last time of the task/command run
|
||||
CLEANUP_HOST_METRICS_LAST_TS = None
|
||||
# Host metrics cleanup - minimal interval between two cleanups in days
|
||||
@@ -1078,25 +1075,7 @@ INDIRECT_HOST_QUERY_FALLBACK_GIVEUP_DAYS = 3
|
||||
INDIRECT_HOST_AUDIT_RECORD_MAX_AGE_DAYS = 7
|
||||
|
||||
|
||||
# setting for Policy as Code feature
|
||||
FEATURE_POLICY_AS_CODE_ENABLED = False
|
||||
|
||||
OPA_HOST = '' # The hostname used to connect to the OPA server. If empty, policy enforcement will be disabled.
|
||||
OPA_PORT = 8181 # The port used to connect to the OPA server. Defaults to 8181.
|
||||
OPA_SSL = False # Enable or disable the use of SSL to connect to the OPA server. Defaults to false.
|
||||
|
||||
OPA_AUTH_TYPE = 'None' # The authentication type that will be used to connect to the OPA server: "None", "Token", or "Certificate".
|
||||
OPA_AUTH_TOKEN = '' # The token for authentication to the OPA server. Required when OPA_AUTH_TYPE is "Token". If an authorization header is defined in OPA_AUTH_CUSTOM_HEADERS, it will be overridden by OPA_AUTH_TOKEN.
|
||||
OPA_AUTH_CLIENT_CERT = '' # The content of the client certificate file for mTLS authentication to the OPA server. Required when OPA_AUTH_TYPE is "Certificate".
|
||||
OPA_AUTH_CLIENT_KEY = '' # The content of the client key for mTLS authentication to the OPA server. Required when OPA_AUTH_TYPE is "Certificate".
|
||||
OPA_AUTH_CA_CERT = '' # The content of the CA certificate for mTLS authentication to the OPA server. Required when OPA_AUTH_TYPE is "Certificate".
|
||||
OPA_AUTH_CUSTOM_HEADERS = {} # Optional custom headers included in requests to the OPA server. Defaults to empty dictionary ({}).
|
||||
OPA_REQUEST_TIMEOUT = 1.5 # The number of seconds after which the connection to the OPA server will time out. Defaults to 1.5 seconds.
|
||||
OPA_REQUEST_RETRIES = 2 # The number of retry attempts for connecting to the OPA server. Default is 2.
|
||||
|
||||
# feature flags
|
||||
FLAGS = {'FEATURE_INDIRECT_NODE_COUNTING_ENABLED': [{'condition': 'boolean', 'value': False}]}
|
||||
|
||||
FLAG_SOURCES = ('flags.sources.SettingsFlagsSource',)
|
||||
FLAGS = {
|
||||
'FEATURE_INDIRECT_NODE_COUNTING_ENABLED': [{'condition': 'boolean', 'value': False}],
|
||||
'FEATURE_POLICY_AS_CODE_ENABLED': [{'condition': 'boolean', 'value': False}],
|
||||
}
|
||||
|
||||
@@ -31,9 +31,9 @@ options:
|
||||
unlicensed or trial licensed. When force=true, the license is always applied.
|
||||
type: bool
|
||||
default: 'False'
|
||||
subscription_id:
|
||||
pool_id:
|
||||
description:
|
||||
- Red Hat or Red Hat Satellite subscription_id to attach to
|
||||
- Red Hat or Red Hat Satellite pool_id to attach to
|
||||
required: False
|
||||
type: str
|
||||
state:
|
||||
@@ -57,9 +57,9 @@ EXAMPLES = '''
|
||||
username: "my_satellite_username"
|
||||
password: "my_satellite_password"
|
||||
|
||||
- name: Attach to a subscription (requires fetching subscriptions at least once before)
|
||||
- name: Attach to a pool (requires fetching subscriptions at least once before)
|
||||
license:
|
||||
subscription_id: 123456
|
||||
pool_id: 123456
|
||||
|
||||
- name: Remove license
|
||||
license:
|
||||
@@ -75,14 +75,14 @@ def main():
|
||||
module = ControllerAPIModule(
|
||||
argument_spec=dict(
|
||||
manifest=dict(type='str', required=False),
|
||||
subscription_id=dict(type='str', required=False),
|
||||
pool_id=dict(type='str', required=False),
|
||||
force=dict(type='bool', default=False),
|
||||
state=dict(choices=['present', 'absent'], default='present'),
|
||||
),
|
||||
required_if=[
|
||||
['state', 'present', ['manifest', 'subscription_id'], True],
|
||||
['state', 'present', ['manifest', 'pool_id'], True],
|
||||
],
|
||||
mutually_exclusive=[("manifest", "subscription_id")],
|
||||
mutually_exclusive=[("manifest", "pool_id")],
|
||||
)
|
||||
|
||||
json_output = {'changed': False}
|
||||
@@ -124,7 +124,7 @@ def main():
|
||||
if module.params.get('manifest', None):
|
||||
module.post_endpoint('config', data={'manifest': manifest.decode()})
|
||||
else:
|
||||
module.post_endpoint('config/attach', data={'subscription_id': module.params.get('subscription_id')})
|
||||
module.post_endpoint('config/attach', data={'pool_id': module.params.get('pool_id')})
|
||||
|
||||
module.exit_json(**json_output)
|
||||
|
||||
|
||||
@@ -20,15 +20,15 @@ description:
|
||||
- Get subscriptions available to Automation Platform Controller. See
|
||||
U(https://www.ansible.com/tower) for an overview.
|
||||
options:
|
||||
client_id:
|
||||
username:
|
||||
description:
|
||||
- Red Hat service account client ID or Red Hat Satellite username to get available subscriptions.
|
||||
- Red Hat or Red Hat Satellite username to get available subscriptions.
|
||||
- The credentials you use will be stored for future use in retrieving renewal or expanded subscriptions
|
||||
required: True
|
||||
type: str
|
||||
client_secret:
|
||||
password:
|
||||
description:
|
||||
- Red Hat service account client secret or Red Hat Satellite password to get available subscriptions.
|
||||
- Red Hat or Red Hat Satellite password to get available subscriptions.
|
||||
- The credentials you use will be stored for future use in retrieving renewal or expanded subscriptions
|
||||
required: True
|
||||
type: str
|
||||
@@ -53,13 +53,13 @@ subscriptions:
|
||||
EXAMPLES = '''
|
||||
- name: Get subscriptions
|
||||
subscriptions:
|
||||
client_id: "c6bd7594-d776-46e5-8156-6d17af147479"
|
||||
client_secret: "MO9QUvoOZ5fc5JQKXoTch1AsTLI7nFsZ"
|
||||
username: "my_username"
|
||||
password: "My Password"
|
||||
|
||||
- name: Get subscriptions with a filter
|
||||
subscriptions:
|
||||
client_id: "c6bd7594-d776-46e5-8156-6d17af147479"
|
||||
client_secret: "MO9QUvoOZ5fc5JQKXoTch1AsTLI7nFsZ"
|
||||
username: "my_username"
|
||||
password: "My Password"
|
||||
filters:
|
||||
product_name: "Red Hat Ansible Automation Platform"
|
||||
support_level: "Self-Support"
|
||||
@@ -72,8 +72,8 @@ def main():
|
||||
|
||||
module = ControllerAPIModule(
|
||||
argument_spec=dict(
|
||||
client_id=dict(type='str', required=True),
|
||||
client_secret=dict(type='str', no_log=True, required=True),
|
||||
username=dict(type='str', required=True),
|
||||
password=dict(type='str', no_log=True, required=True),
|
||||
filters=dict(type='dict', required=False, default={}),
|
||||
),
|
||||
)
|
||||
@@ -82,8 +82,8 @@ def main():
|
||||
|
||||
# Check if Tower is already licensed
|
||||
post_data = {
|
||||
'subscriptions_client_secret': module.params.get('client_secret'),
|
||||
'subscriptions_client_id': module.params.get('client_id'),
|
||||
'subscriptions_password': module.params.get('password'),
|
||||
'subscriptions_username': module.params.get('username'),
|
||||
}
|
||||
all_subscriptions = module.post_endpoint('config/subscriptions', data=post_data)['json']
|
||||
json_output['subscriptions'] = []
|
||||
|
||||
@@ -68,7 +68,3 @@
|
||||
job_template:
|
||||
name: "{{ bulk_job_name }}"
|
||||
state: absent
|
||||
register: del_res
|
||||
until: del_res is succeeded
|
||||
retries: 5
|
||||
delay: 3
|
||||
|
||||
@@ -452,14 +452,11 @@
|
||||
credential: "{{ cred3 }}"
|
||||
job_type: run
|
||||
state: absent
|
||||
register: del_res
|
||||
until: del_res is succeeded
|
||||
retries: 5
|
||||
delay: 3
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- "del_res is changed"
|
||||
- "result is changed"
|
||||
|
||||
- name: Delete the Demo Project
|
||||
project:
|
||||
@@ -468,6 +465,7 @@
|
||||
state: absent
|
||||
scm_type: git
|
||||
scm_url: https://github.com/ansible/ansible-tower-samples.git
|
||||
register: result
|
||||
|
||||
- name: Delete Credential1
|
||||
credential:
|
||||
@@ -519,3 +517,4 @@
|
||||
organization:
|
||||
name: "{{ org_name }}"
|
||||
state: absent
|
||||
register: result
|
||||
|
||||
@@ -156,17 +156,9 @@
|
||||
project: "{{ proj_name }}"
|
||||
inventory: "Demo Inventory"
|
||||
state: absent
|
||||
register: del_res
|
||||
until: del_res is succeeded
|
||||
retries: 5
|
||||
delay: 3
|
||||
|
||||
- name: Delete the project
|
||||
project:
|
||||
name: "{{ proj_name }}"
|
||||
organization: Default
|
||||
state: absent
|
||||
register: del_res
|
||||
until: del_res is succeeded
|
||||
retries: 5
|
||||
delay: 3
|
||||
|
||||
@@ -279,28 +279,24 @@
|
||||
with_items:
|
||||
- jt1
|
||||
- jt2
|
||||
register: result
|
||||
|
||||
- name: Delete the project
|
||||
project:
|
||||
name: "{{ project_name }}"
|
||||
organization: Default
|
||||
state: absent
|
||||
register: del_res
|
||||
until: del_res is succeeded
|
||||
retries: 5
|
||||
delay: 3
|
||||
register: result
|
||||
|
||||
- name: Delete the 2nd project
|
||||
project:
|
||||
name: "{{ project_name }}"
|
||||
organization: "{{ org2_name }}"
|
||||
state: absent
|
||||
register: del_res
|
||||
until: del_res is succeeded
|
||||
retries: 5
|
||||
delay: 3
|
||||
register: result
|
||||
|
||||
- name: Delete the 2nd organization
|
||||
organization:
|
||||
name: "{{ org2_name }}"
|
||||
state: absent
|
||||
register: result
|
||||
|
||||
@@ -353,10 +353,7 @@
|
||||
project: "{{ proj1 }}"
|
||||
playbook: hello_world.yml
|
||||
state: absent
|
||||
register: del_res
|
||||
until: del_res is succeeded
|
||||
retries: 5
|
||||
delay: 3
|
||||
ignore_errors: True
|
||||
|
||||
- name: Delete the jt2
|
||||
job_template:
|
||||
@@ -364,10 +361,7 @@
|
||||
project: "{{ proj2 }}"
|
||||
playbook: hello_world.yml
|
||||
state: absent
|
||||
register: del_res
|
||||
until: del_res is succeeded
|
||||
retries: 5
|
||||
delay: 3
|
||||
ignore_errors: True
|
||||
|
||||
- name: Delete the Project2
|
||||
project:
|
||||
@@ -376,10 +370,7 @@
|
||||
state: absent
|
||||
scm_type: git
|
||||
scm_url: https://github.com/ansible/ansible-tower-samples.git
|
||||
register: del_res
|
||||
until: del_res is succeeded
|
||||
retries: 5
|
||||
delay: 3
|
||||
ignore_errors: True
|
||||
|
||||
- name: Delete the Project1
|
||||
project:
|
||||
@@ -388,10 +379,7 @@
|
||||
state: absent
|
||||
scm_type: git
|
||||
scm_url: https://github.com/ansible/ansible-tower-samples.git
|
||||
register: del_res
|
||||
until: del_res is succeeded
|
||||
retries: 5
|
||||
delay: 3
|
||||
ignore_errors: True
|
||||
|
||||
- name: Delete Credential1
|
||||
credential:
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2019 Tural Muradov Mohubbet
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
@@ -1,19 +0,0 @@
|
||||
Copyright (C) 2014 by Saúl Ibarra Corretgé
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
@@ -1,201 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
@@ -1,19 +0,0 @@
|
||||
Copyright (c) 2009, 2010, 2013-2016 by the Brotli Authors.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
@@ -1,11 +0,0 @@
|
||||
Copyright 2022 Rick van Hattem
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
|
||||
|
||||
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
Binary file not shown.
BIN
licenses/psycopg-3.2.6.tar.gz
Normal file
BIN
licenses/psycopg-3.2.6.tar.gz
Normal file
Binary file not shown.
@@ -1,19 +0,0 @@
|
||||
Copyright (C) 2012 by Saúl Ibarra Corretgé
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
Binary file not shown.
BIN
licenses/uwsgi-2.0.29.tar.gz
Normal file
BIN
licenses/uwsgi-2.0.29.tar.gz
Normal file
Binary file not shown.
@@ -36,7 +36,6 @@ Markdown # used for formatting API help
|
||||
maturin # pydantic-core build dep
|
||||
msgpack
|
||||
msrestazure
|
||||
OPA-python-client==2.0.2 # Code contain monkey patch targeted to 2.0.2 to fix https://github.com/Turall/OPA-python-client/issues/29
|
||||
openshift
|
||||
opentelemetry-api~=1.24 # new y streams can be drastically different, in a good way
|
||||
opentelemetry-sdk~=1.24
|
||||
@@ -55,7 +54,7 @@ python-tss-sdk>=1.2.1
|
||||
pyyaml>=6.0.2 # require packing fix for cython 3 or higher
|
||||
pyzstd # otel collector log file compression library
|
||||
receptorctl
|
||||
sqlparse>=0.4.4 # Required by django https://github.com/ansible/awx/security/dependabot/96
|
||||
sqlparse>=0.5.2
|
||||
redis[hiredis]
|
||||
requests
|
||||
slack-sdk
|
||||
|
||||
@@ -1,18 +1,13 @@
|
||||
adal==1.2.7
|
||||
# via msrestazure
|
||||
aiodns==3.2.0
|
||||
aiohappyeyeballs==2.6.1
|
||||
# via aiohttp
|
||||
aiofiles==24.1.0
|
||||
# via opa-python-client
|
||||
aiohappyeyeballs==2.4.4
|
||||
# via aiohttp
|
||||
aiohttp[speedups]==3.11.11
|
||||
aiohttp==3.11.16
|
||||
# via
|
||||
# -r /awx_devel/requirements/requirements.in
|
||||
# aiohttp-retry
|
||||
# opa-python-client
|
||||
# twilio
|
||||
aiohttp-retry==2.8.3
|
||||
aiohttp-retry==2.9.1
|
||||
# via twilio
|
||||
aiosignal==1.3.2
|
||||
# via aiohttp
|
||||
@@ -30,9 +25,9 @@ asgiref==3.8.1
|
||||
# django
|
||||
# django-ansible-base
|
||||
# django-cors-headers
|
||||
asn1==2.7.1
|
||||
asn1==3.0.0
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
attrs==24.3.0
|
||||
attrs==25.3.0
|
||||
# via
|
||||
# aiohttp
|
||||
# jsonschema
|
||||
@@ -51,14 +46,14 @@ awx-plugins.interfaces @ git+https://github.com/ansible/awx_plugins.interfaces.g
|
||||
# via
|
||||
# -r /awx_devel/requirements/requirements_git.txt
|
||||
# awx-plugins-core
|
||||
azure-core==1.32.0
|
||||
azure-core==1.33.0
|
||||
# via
|
||||
# azure-identity
|
||||
# azure-keyvault-certificates
|
||||
# azure-keyvault-keys
|
||||
# azure-keyvault-secrets
|
||||
# msrest
|
||||
azure-identity==1.19.0
|
||||
azure-identity==1.21.0
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
azure-keyvault==4.2.0
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
@@ -70,16 +65,14 @@ azure-keyvault-secrets==4.9.0
|
||||
# via azure-keyvault
|
||||
backports-tarfile==1.2.0
|
||||
# via jaraco-context
|
||||
boto3==1.35.96
|
||||
boto3==1.37.34
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
botocore==1.35.96
|
||||
botocore==1.37.34
|
||||
# via
|
||||
# -r /awx_devel/requirements/requirements.in
|
||||
# boto3
|
||||
# s3transfer
|
||||
brotli==1.1.0
|
||||
# via aiohttp
|
||||
cachetools==5.5.0
|
||||
cachetools==5.5.2
|
||||
# via google-auth
|
||||
# git+https://github.com/ansible/system-certifi.git@devel # git requirements installed separately
|
||||
# via
|
||||
@@ -90,9 +83,8 @@ cachetools==5.5.0
|
||||
cffi==1.17.1
|
||||
# via
|
||||
# cryptography
|
||||
# pycares
|
||||
# pynacl
|
||||
channels==4.2.0
|
||||
channels==4.2.2
|
||||
# via
|
||||
# -r /awx_devel/requirements/requirements.in
|
||||
# channels-redis
|
||||
@@ -117,11 +109,11 @@ cryptography==41.0.7
|
||||
# pyjwt
|
||||
# pyopenssl
|
||||
# service-identity
|
||||
cython==3.0.11
|
||||
cython==3.0.12
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
daphne==4.1.2
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
deprecated==1.2.15
|
||||
deprecated==1.2.18
|
||||
# via
|
||||
# opentelemetry-api
|
||||
# opentelemetry-exporter-otlp-proto-grpc
|
||||
@@ -146,19 +138,19 @@ django==4.2.20
|
||||
# djangorestframework
|
||||
# django-ansible-base @ git+https://github.com/ansible/django-ansible-base@devel # git requirements installed separately
|
||||
# via -r /awx_devel/requirements/requirements_git.txt
|
||||
django-cors-headers==4.6.0
|
||||
django-cors-headers==4.7.0
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
django-crum==0.7.9
|
||||
# via
|
||||
# -r /awx_devel/requirements/requirements.in
|
||||
# django-ansible-base
|
||||
django-extensions==3.2.3
|
||||
django-extensions==4.1
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
django-flags==5.0.13
|
||||
# via
|
||||
# -r /awx_devel/requirements/requirements.in
|
||||
# django-ansible-base
|
||||
django-guid==3.5.0
|
||||
django-guid==3.5.1
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
django-oauth-toolkit==1.7.1
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
@@ -166,7 +158,7 @@ django-polymorphic==3.1.0
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
django-solo==2.4.0
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
djangorestframework==3.15.2
|
||||
djangorestframework==3.16.0
|
||||
# via
|
||||
# -r /awx_devel/requirements/requirements.in
|
||||
# django-ansible-base
|
||||
@@ -180,7 +172,7 @@ dynaconf==3.2.10
|
||||
# django-ansible-base
|
||||
enum-compat==0.0.3
|
||||
# via asn1
|
||||
filelock==3.16.1
|
||||
filelock==3.18.0
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
frozenlist==1.5.0
|
||||
# via
|
||||
@@ -190,13 +182,13 @@ gitdb==4.0.12
|
||||
# via gitpython
|
||||
gitpython==3.1.44
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
google-auth==2.37.0
|
||||
google-auth==2.39.0
|
||||
# via kubernetes
|
||||
googleapis-common-protos==1.66.0
|
||||
googleapis-common-protos==1.70.0
|
||||
# via
|
||||
# opentelemetry-exporter-otlp-proto-grpc
|
||||
# opentelemetry-exporter-otlp-proto-http
|
||||
grpcio==1.69.0
|
||||
grpcio==1.71.0
|
||||
# via
|
||||
# -r /awx_devel/requirements/requirements.in
|
||||
# opentelemetry-exporter-otlp-proto-grpc
|
||||
@@ -212,7 +204,7 @@ idna==3.10
|
||||
# requests
|
||||
# twisted
|
||||
# yarl
|
||||
importlib-metadata==8.5.0
|
||||
importlib-metadata==8.6.1
|
||||
# via opentelemetry-api
|
||||
importlib-resources==6.5.2
|
||||
# via irc
|
||||
@@ -245,7 +237,7 @@ jaraco-text==4.0.0
|
||||
# via
|
||||
# irc
|
||||
# jaraco-collections
|
||||
jinja2==3.1.5
|
||||
jinja2==3.1.6
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
jmespath==1.0.1
|
||||
# via
|
||||
@@ -253,7 +245,7 @@ jmespath==1.0.1
|
||||
# botocore
|
||||
jq==1.8.0
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
json-log-formatter==1.1
|
||||
json-log-formatter==1.1.1
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
jsonschema==4.23.0
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
@@ -261,27 +253,27 @@ jsonschema-specifications==2024.10.1
|
||||
# via jsonschema
|
||||
jwcrypto==1.5.6
|
||||
# via django-oauth-toolkit
|
||||
kubernetes==31.0.0
|
||||
kubernetes==32.0.1
|
||||
# via openshift
|
||||
lockfile==0.12.2
|
||||
# via python-daemon
|
||||
markdown==3.7
|
||||
markdown==3.8
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
markupsafe==3.0.2
|
||||
# via jinja2
|
||||
maturin==1.8.1
|
||||
maturin==1.8.3
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
more-itertools==10.5.0
|
||||
more-itertools==10.6.0
|
||||
# via
|
||||
# irc
|
||||
# jaraco-functools
|
||||
# jaraco-stream
|
||||
# jaraco-text
|
||||
msal==1.31.1
|
||||
msal==1.32.0
|
||||
# via
|
||||
# azure-identity
|
||||
# msal-extensions
|
||||
msal-extensions==1.2.0
|
||||
msal-extensions==1.3.1
|
||||
# via azure-identity
|
||||
msgpack==1.1.0
|
||||
# via
|
||||
@@ -291,7 +283,7 @@ msrest==0.7.1
|
||||
# via msrestazure
|
||||
msrestazure==0.6.4.post1
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
multidict==6.1.0
|
||||
multidict==6.4.3
|
||||
# via
|
||||
# aiohttp
|
||||
# yarl
|
||||
@@ -300,11 +292,9 @@ oauthlib==3.2.2
|
||||
# django-oauth-toolkit
|
||||
# kubernetes
|
||||
# requests-oauthlib
|
||||
opa-python-client==2.0.2
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
openshift==0.13.2
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
opentelemetry-api==1.29.0
|
||||
opentelemetry-api==1.32.0
|
||||
# via
|
||||
# -r /awx_devel/requirements/requirements.in
|
||||
# opentelemetry-exporter-otlp-proto-grpc
|
||||
@@ -313,31 +303,31 @@ opentelemetry-api==1.29.0
|
||||
# opentelemetry-instrumentation-logging
|
||||
# opentelemetry-sdk
|
||||
# opentelemetry-semantic-conventions
|
||||
opentelemetry-exporter-otlp==1.29.0
|
||||
opentelemetry-exporter-otlp==1.32.0
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
opentelemetry-exporter-otlp-proto-common==1.29.0
|
||||
opentelemetry-exporter-otlp-proto-common==1.32.0
|
||||
# via
|
||||
# opentelemetry-exporter-otlp-proto-grpc
|
||||
# opentelemetry-exporter-otlp-proto-http
|
||||
opentelemetry-exporter-otlp-proto-grpc==1.29.0
|
||||
opentelemetry-exporter-otlp-proto-grpc==1.32.0
|
||||
# via opentelemetry-exporter-otlp
|
||||
opentelemetry-exporter-otlp-proto-http==1.29.0
|
||||
opentelemetry-exporter-otlp-proto-http==1.32.0
|
||||
# via opentelemetry-exporter-otlp
|
||||
opentelemetry-instrumentation==0.50b0
|
||||
opentelemetry-instrumentation==0.53b0
|
||||
# via opentelemetry-instrumentation-logging
|
||||
opentelemetry-instrumentation-logging==0.50b0
|
||||
opentelemetry-instrumentation-logging==0.53b0
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
opentelemetry-proto==1.29.0
|
||||
opentelemetry-proto==1.32.0
|
||||
# via
|
||||
# opentelemetry-exporter-otlp-proto-common
|
||||
# opentelemetry-exporter-otlp-proto-grpc
|
||||
# opentelemetry-exporter-otlp-proto-http
|
||||
opentelemetry-sdk==1.29.0
|
||||
opentelemetry-sdk==1.32.0
|
||||
# via
|
||||
# -r /awx_devel/requirements/requirements.in
|
||||
# opentelemetry-exporter-otlp-proto-grpc
|
||||
# opentelemetry-exporter-otlp-proto-http
|
||||
opentelemetry-semantic-conventions==0.50b0
|
||||
opentelemetry-semantic-conventions==0.53b0
|
||||
# via
|
||||
# opentelemetry-instrumentation
|
||||
# opentelemetry-sdk
|
||||
@@ -352,21 +342,19 @@ pexpect==4.7.0
|
||||
# ansible-runner
|
||||
pkgconfig==1.5.5
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
portalocker==2.10.1
|
||||
# via msal-extensions
|
||||
prometheus-client==0.21.1
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
propcache==0.2.1
|
||||
propcache==0.3.1
|
||||
# via
|
||||
# aiohttp
|
||||
# yarl
|
||||
protobuf==5.29.3
|
||||
protobuf==5.29.4
|
||||
# via
|
||||
# googleapis-common-protos
|
||||
# opentelemetry-proto
|
||||
psutil==6.1.1
|
||||
psutil==7.0.0
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
psycopg==3.2.3
|
||||
psycopg==3.2.6
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
ptyprocess==0.7.0
|
||||
# via pexpect
|
||||
@@ -375,12 +363,10 @@ pyasn1==0.6.1
|
||||
# pyasn1-modules
|
||||
# rsa
|
||||
# service-identity
|
||||
pyasn1-modules==0.4.1
|
||||
pyasn1-modules==0.4.2
|
||||
# via
|
||||
# google-auth
|
||||
# service-identity
|
||||
pycares==4.5.0
|
||||
# via aiodns
|
||||
pycparser==2.22
|
||||
# via cffi
|
||||
pygerduty==0.38.3
|
||||
@@ -396,7 +382,7 @@ pyjwt[crypto]==2.10.1
|
||||
# twilio
|
||||
pynacl==1.5.0
|
||||
# via pygithub
|
||||
pyopenssl==24.3.0
|
||||
pyopenssl==25.0.0
|
||||
# via
|
||||
# -r /awx_devel/requirements/requirements.in
|
||||
# twisted
|
||||
@@ -419,7 +405,7 @@ python-string-utils==1.0.0
|
||||
# via openshift
|
||||
python-tss-sdk==1.2.3
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
pytz==2024.2
|
||||
pytz==2025.2
|
||||
# via irc
|
||||
pyyaml==6.0.2
|
||||
# via
|
||||
@@ -430,13 +416,13 @@ pyyaml==6.0.2
|
||||
# receptorctl
|
||||
pyzstd==0.16.2
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
receptorctl==1.5.2
|
||||
receptorctl==1.5.4
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
redis[hiredis]==5.2.1
|
||||
# via
|
||||
# -r /awx_devel/requirements/requirements.in
|
||||
# channels-redis
|
||||
referencing==0.35.1
|
||||
referencing==0.36.2
|
||||
# via
|
||||
# jsonschema
|
||||
# jsonschema-specifications
|
||||
@@ -450,7 +436,6 @@ requests==2.32.3
|
||||
# kubernetes
|
||||
# msal
|
||||
# msrest
|
||||
# opa-python-client
|
||||
# opentelemetry-exporter-otlp-proto-http
|
||||
# pygithub
|
||||
# python-dsv-sdk
|
||||
@@ -461,21 +446,21 @@ requests-oauthlib==2.0.0
|
||||
# via
|
||||
# kubernetes
|
||||
# msrest
|
||||
rpds-py==0.22.3
|
||||
rpds-py==0.24.0
|
||||
# via
|
||||
# jsonschema
|
||||
# referencing
|
||||
rsa==4.9
|
||||
# via google-auth
|
||||
s3transfer==0.10.4
|
||||
s3transfer==0.11.4
|
||||
# via boto3
|
||||
semantic-version==2.10.0
|
||||
# via setuptools-rust
|
||||
service-identity==24.2.0
|
||||
# via twisted
|
||||
setuptools-rust==1.10.2
|
||||
setuptools-rust==1.11.1
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
setuptools-scm[toml]==8.1.0
|
||||
setuptools-scm[toml]==8.2.0
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
six==1.17.0
|
||||
# via
|
||||
@@ -485,7 +470,7 @@ six==1.17.0
|
||||
# openshift
|
||||
# pygerduty
|
||||
# python-dateutil
|
||||
slack-sdk==3.34.0
|
||||
slack-sdk==3.35.0
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
smmap==5.0.2
|
||||
# via gitdb
|
||||
@@ -498,7 +483,7 @@ tempora==5.8.0
|
||||
# via
|
||||
# irc
|
||||
# jaraco-logging
|
||||
twilio==9.4.2
|
||||
twilio==9.5.2
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
twisted[tls]==24.11.0
|
||||
# via
|
||||
@@ -506,7 +491,7 @@ twisted[tls]==24.11.0
|
||||
# daphne
|
||||
txaio==23.1.1
|
||||
# via autobahn
|
||||
typing-extensions==4.12.2
|
||||
typing-extensions==4.13.2
|
||||
# via
|
||||
# azure-core
|
||||
# azure-identity
|
||||
@@ -517,15 +502,17 @@ typing-extensions==4.12.2
|
||||
# opentelemetry-sdk
|
||||
# psycopg
|
||||
# pygithub
|
||||
# pyopenssl
|
||||
# referencing
|
||||
# twisted
|
||||
urllib3==2.3.0
|
||||
urllib3==2.4.0
|
||||
# via
|
||||
# botocore
|
||||
# django-ansible-base
|
||||
# kubernetes
|
||||
# pygithub
|
||||
# requests
|
||||
uwsgi==2.0.28
|
||||
uwsgi==2.0.29
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
uwsgitop==0.12
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
@@ -533,11 +520,11 @@ websocket-client==1.8.0
|
||||
# via kubernetes
|
||||
wheel==0.45.1
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
wrapt==1.17.0
|
||||
wrapt==1.17.2
|
||||
# via
|
||||
# deprecated
|
||||
# opentelemetry-instrumentation
|
||||
yarl==1.18.3
|
||||
yarl==1.19.0
|
||||
# via aiohttp
|
||||
zipp==3.21.0
|
||||
# via importlib-metadata
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from django import setup
|
||||
|
||||
from awx import prepare_env
|
||||
|
||||
prepare_env()
|
||||
|
||||
setup()
|
||||
|
||||
# Keeping this in test folder allows it to be importable
|
||||
from awx.main.tests.data.sleep_task import sleep_task
|
||||
|
||||
|
||||
for i in range(634):
|
||||
sleep_task.delay()
|
||||
Reference in New Issue
Block a user