mirror of
https://github.com/ansible/awx.git
synced 2026-02-08 21:14:47 -03:30
Compare commits
2 Commits
devel_bug_
...
upgrade-sq
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
543d3f940b | ||
|
|
ee7edb9179 |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -150,8 +150,6 @@ use_dev_supervisor.txt
|
||||
|
||||
awx/ui/src
|
||||
awx/ui/build
|
||||
awx/ui/.ui-built
|
||||
awx/ui_next
|
||||
|
||||
# Docs build stuff
|
||||
docs/docsite/build/
|
||||
|
||||
@@ -7,7 +7,6 @@ import time
|
||||
import traceback
|
||||
from datetime import datetime
|
||||
from uuid import uuid4
|
||||
import json
|
||||
|
||||
import collections
|
||||
from multiprocessing import Process
|
||||
@@ -26,10 +25,7 @@ from ansible_base.lib.logging.runtime import log_excess_runtime
|
||||
|
||||
from awx.main.models import UnifiedJob
|
||||
from awx.main.dispatch import reaper
|
||||
from awx.main.utils.common import get_mem_effective_capacity, get_corrected_memory, get_corrected_cpu, get_cpu_effective_capacity
|
||||
|
||||
# ansible-runner
|
||||
from ansible_runner.utils.capacity import get_mem_in_bytes, get_cpu_count
|
||||
from awx.main.utils.common import convert_mem_str_to_bytes, get_mem_effective_capacity
|
||||
|
||||
if 'run_callback_receiver' in sys.argv:
|
||||
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
|
||||
@@ -311,41 +307,6 @@ class WorkerPool(object):
|
||||
logger.exception('could not kill {}'.format(worker.pid))
|
||||
|
||||
|
||||
def get_auto_max_workers():
|
||||
"""Method we normally rely on to get max_workers
|
||||
|
||||
Uses almost same logic as Instance.local_health_check
|
||||
The important thing is to be MORE than Instance.capacity
|
||||
so that the task-manager does not over-schedule this node
|
||||
|
||||
Ideally we would just use the capacity from the database plus reserve workers,
|
||||
but this poses some bootstrap problems where OCP task containers
|
||||
register themselves after startup
|
||||
"""
|
||||
# Get memory from ansible-runner
|
||||
total_memory_gb = get_mem_in_bytes()
|
||||
|
||||
# This may replace memory calculation with a user override
|
||||
corrected_memory = get_corrected_memory(total_memory_gb)
|
||||
|
||||
# Get same number as max forks based on memory, this function takes memory as bytes
|
||||
mem_capacity = get_mem_effective_capacity(corrected_memory, is_control_node=True)
|
||||
|
||||
# Follow same process for CPU capacity constraint
|
||||
cpu_count = get_cpu_count()
|
||||
corrected_cpu = get_corrected_cpu(cpu_count)
|
||||
cpu_capacity = get_cpu_effective_capacity(corrected_cpu, is_control_node=True)
|
||||
|
||||
# Here is what is different from health checks,
|
||||
auto_max = max(mem_capacity, cpu_capacity)
|
||||
|
||||
# add magic number of extra workers to ensure
|
||||
# we have a few extra workers to run the heartbeat
|
||||
auto_max += 7
|
||||
|
||||
return auto_max
|
||||
|
||||
|
||||
class AutoscalePool(WorkerPool):
|
||||
"""
|
||||
An extended pool implementation that automatically scales workers up and
|
||||
@@ -359,7 +320,19 @@ class AutoscalePool(WorkerPool):
|
||||
super(AutoscalePool, self).__init__(*args, **kwargs)
|
||||
|
||||
if self.max_workers is None:
|
||||
self.max_workers = get_auto_max_workers()
|
||||
settings_absmem = getattr(settings, 'SYSTEM_TASK_ABS_MEM', None)
|
||||
if settings_absmem is not None:
|
||||
# There are 1073741824 bytes in a gigabyte. Convert bytes to gigabytes by dividing by 2**30
|
||||
total_memory_gb = convert_mem_str_to_bytes(settings_absmem) // 2**30
|
||||
else:
|
||||
total_memory_gb = (psutil.virtual_memory().total >> 30) + 1 # noqa: round up
|
||||
|
||||
# Get same number as max forks based on memory, this function takes memory as bytes
|
||||
self.max_workers = get_mem_effective_capacity(total_memory_gb * 2**30)
|
||||
|
||||
# add magic prime number of extra workers to ensure
|
||||
# we have a few extra workers to run the heartbeat
|
||||
self.max_workers += 7
|
||||
|
||||
# max workers can't be less than min_workers
|
||||
self.max_workers = max(self.min_workers, self.max_workers)
|
||||
@@ -373,9 +346,6 @@ class AutoscalePool(WorkerPool):
|
||||
self.scale_up_ct = 0
|
||||
self.worker_count_max = 0
|
||||
|
||||
# last time we wrote current tasks, to avoid too much log spam
|
||||
self.last_task_list_log = time.monotonic()
|
||||
|
||||
def produce_subsystem_metrics(self, metrics_object):
|
||||
metrics_object.set('dispatcher_pool_scale_up_events', self.scale_up_ct)
|
||||
metrics_object.set('dispatcher_pool_active_task_count', sum(len(w.managed_tasks) for w in self.workers))
|
||||
@@ -493,14 +463,6 @@ class AutoscalePool(WorkerPool):
|
||||
self.worker_count_max = new_worker_ct
|
||||
return ret
|
||||
|
||||
@staticmethod
|
||||
def fast_task_serialization(current_task):
|
||||
try:
|
||||
return str(current_task.get('task')) + ' - ' + str(sorted(current_task.get('args', []))) + ' - ' + str(sorted(current_task.get('kwargs', {})))
|
||||
except Exception:
|
||||
# just make sure this does not make things worse
|
||||
return str(current_task)
|
||||
|
||||
def write(self, preferred_queue, body):
|
||||
if 'guid' in body:
|
||||
set_guid(body['guid'])
|
||||
@@ -522,15 +484,6 @@ class AutoscalePool(WorkerPool):
|
||||
if isinstance(body, dict):
|
||||
task_name = body.get('task')
|
||||
logger.warning(f'Workers maxed, queuing {task_name}, load: {sum(len(w.managed_tasks) for w in self.workers)} / {len(self.workers)}')
|
||||
# Once every 10 seconds write out task list for debugging
|
||||
if time.monotonic() - self.last_task_list_log >= 10.0:
|
||||
task_counts = {}
|
||||
for worker in self.workers:
|
||||
task_slug = self.fast_task_serialization(worker.current_task)
|
||||
task_counts.setdefault(task_slug, 0)
|
||||
task_counts[task_slug] += 1
|
||||
logger.info(f'Running tasks by count:\n{json.dumps(task_counts, indent=2)}')
|
||||
self.last_task_list_log = time.monotonic()
|
||||
return super(AutoscalePool, self).write(preferred_queue, body)
|
||||
except Exception:
|
||||
for conn in connections.all():
|
||||
|
||||
@@ -238,7 +238,7 @@ class AWXConsumerPG(AWXConsumerBase):
|
||||
def run(self, *args, **kwargs):
|
||||
super(AWXConsumerPG, self).run(*args, **kwargs)
|
||||
|
||||
logger.info(f"Running {self.name}, workers min={self.pool.min_workers} max={self.pool.max_workers}, listening to queues {self.queues}")
|
||||
logger.info(f"Running worker {self.name} listening to queues {self.queues}")
|
||||
init = False
|
||||
|
||||
while True:
|
||||
|
||||
@@ -602,7 +602,7 @@ class JobEvent(BasePlaybookEvent):
|
||||
h.last_job_host_summary_id = host_mapping[h.id]
|
||||
updated_hosts.add(h)
|
||||
|
||||
Host.objects.bulk_update(sorted(updated_hosts, key=lambda host: host.id), ['last_job_id', 'last_job_host_summary_id'], batch_size=100)
|
||||
Host.objects.bulk_update(list(updated_hosts), ['last_job_id', 'last_job_host_summary_id'], batch_size=100)
|
||||
|
||||
# Create/update Host Metrics
|
||||
self._update_host_metrics(updated_hosts_list)
|
||||
|
||||
@@ -62,8 +62,7 @@ def start_fact_cache(hosts, destination, log_data, timeout=None, inventory_id=No
|
||||
|
||||
|
||||
def raw_update_hosts(host_list):
|
||||
host_list = sorted(host_list, key=lambda host: host.id)
|
||||
Host.objects.bulk_update(host_list, ['ansible_facts', 'ansible_facts_modified'], batch_size=100)
|
||||
Host.objects.bulk_update(host_list, ['ansible_facts', 'ansible_facts_modified'])
|
||||
|
||||
|
||||
def update_hosts(host_list, max_tries=5):
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
import time
|
||||
import logging
|
||||
|
||||
from awx.main.dispatch import get_task_queuename
|
||||
from awx.main.dispatch.publish import task
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@task(queue=get_task_queuename)
|
||||
def sleep_task(seconds=10, log=False):
|
||||
if log:
|
||||
logger.info('starting sleep_task')
|
||||
time.sleep(seconds)
|
||||
if log:
|
||||
logger.info('finished sleep_task')
|
||||
@@ -34,18 +34,40 @@ def test_wrapup_does_send_notifications(mocker):
|
||||
mock.assert_called_once_with('succeeded')
|
||||
|
||||
|
||||
class FakeRedis:
|
||||
def keys(self, *args, **kwargs):
|
||||
return []
|
||||
|
||||
def set(self):
|
||||
pass
|
||||
|
||||
def get(self):
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def from_url(cls, *args, **kwargs):
|
||||
return cls()
|
||||
|
||||
def pipeline(self):
|
||||
return self
|
||||
|
||||
|
||||
class TestCallbackBrokerWorker(TransactionTestCase):
|
||||
@pytest.fixture(autouse=True)
|
||||
def turn_off_websockets_and_redis(self, fake_redis):
|
||||
def turn_off_websockets(self):
|
||||
with mock.patch('awx.main.dispatch.worker.callback.emit_event_detail', lambda *a, **kw: None):
|
||||
yield
|
||||
|
||||
def get_worker(self):
|
||||
with mock.patch('redis.Redis', new=FakeRedis): # turn off redis stuff
|
||||
return CallbackBrokerWorker()
|
||||
|
||||
def event_create_kwargs(self):
|
||||
inventory_update = InventoryUpdate.objects.create(source='file', inventory_source=InventorySource.objects.create(source='file'))
|
||||
return dict(inventory_update=inventory_update, created=inventory_update.created)
|
||||
|
||||
def test_flush_with_valid_event(self):
|
||||
worker = CallbackBrokerWorker()
|
||||
worker = self.get_worker()
|
||||
events = [InventoryUpdateEvent(uuid=str(uuid4()), **self.event_create_kwargs())]
|
||||
worker.buff = {InventoryUpdateEvent: events}
|
||||
worker.flush()
|
||||
@@ -53,7 +75,7 @@ class TestCallbackBrokerWorker(TransactionTestCase):
|
||||
assert InventoryUpdateEvent.objects.filter(uuid=events[0].uuid).count() == 1
|
||||
|
||||
def test_flush_with_invalid_event(self):
|
||||
worker = CallbackBrokerWorker()
|
||||
worker = self.get_worker()
|
||||
kwargs = self.event_create_kwargs()
|
||||
events = [
|
||||
InventoryUpdateEvent(uuid=str(uuid4()), stdout='good1', **kwargs),
|
||||
@@ -68,7 +90,7 @@ class TestCallbackBrokerWorker(TransactionTestCase):
|
||||
assert worker.buff == {InventoryUpdateEvent: [events[1]]}
|
||||
|
||||
def test_duplicate_key_not_saved_twice(self):
|
||||
worker = CallbackBrokerWorker()
|
||||
worker = self.get_worker()
|
||||
events = [InventoryUpdateEvent(uuid=str(uuid4()), **self.event_create_kwargs())]
|
||||
worker.buff = {InventoryUpdateEvent: events.copy()}
|
||||
worker.flush()
|
||||
@@ -82,7 +104,7 @@ class TestCallbackBrokerWorker(TransactionTestCase):
|
||||
assert worker.buff.get(InventoryUpdateEvent, []) == []
|
||||
|
||||
def test_give_up_on_bad_event(self):
|
||||
worker = CallbackBrokerWorker()
|
||||
worker = self.get_worker()
|
||||
events = [InventoryUpdateEvent(uuid=str(uuid4()), counter=-2, **self.event_create_kwargs())]
|
||||
worker.buff = {InventoryUpdateEvent: events.copy()}
|
||||
|
||||
@@ -95,7 +117,7 @@ class TestCallbackBrokerWorker(TransactionTestCase):
|
||||
assert InventoryUpdateEvent.objects.filter(uuid=events[0].uuid).count() == 0 # sanity
|
||||
|
||||
def test_flush_with_empty_buffer(self):
|
||||
worker = CallbackBrokerWorker()
|
||||
worker = self.get_worker()
|
||||
worker.buff = {InventoryUpdateEvent: []}
|
||||
with mock.patch.object(InventoryUpdateEvent.objects, 'bulk_create') as flush_mock:
|
||||
worker.flush()
|
||||
@@ -105,7 +127,7 @@ class TestCallbackBrokerWorker(TransactionTestCase):
|
||||
# In postgres, text fields reject NUL character, 0x00
|
||||
# tests use sqlite3 which will not raise an error
|
||||
# but we can still test that it is sanitized before saving
|
||||
worker = CallbackBrokerWorker()
|
||||
worker = self.get_worker()
|
||||
kwargs = self.event_create_kwargs()
|
||||
events = [InventoryUpdateEvent(uuid=str(uuid4()), stdout="\x00", **kwargs)]
|
||||
assert "\x00" in events[0].stdout # sanity
|
||||
|
||||
@@ -63,33 +63,6 @@ def swagger_autogen(requests=__SWAGGER_REQUESTS__):
|
||||
return requests
|
||||
|
||||
|
||||
class FakeRedis:
|
||||
def keys(self, *args, **kwargs):
|
||||
return []
|
||||
|
||||
def set(self):
|
||||
pass
|
||||
|
||||
def get(self):
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def from_url(cls, *args, **kwargs):
|
||||
return cls()
|
||||
|
||||
def pipeline(self):
|
||||
return self
|
||||
|
||||
def ping(self):
|
||||
return
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def fake_redis():
|
||||
with mock.patch('redis.Redis', new=FakeRedis): # turn off redis stuff
|
||||
yield
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def user():
|
||||
def u(name, is_superuser=False):
|
||||
|
||||
@@ -3,10 +3,6 @@ import pytest
|
||||
# AWX
|
||||
from awx.main.ha import is_ha_environment
|
||||
from awx.main.models.ha import Instance
|
||||
from awx.main.dispatch.pool import get_auto_max_workers
|
||||
|
||||
# Django
|
||||
from django.test.utils import override_settings
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -21,25 +17,3 @@ def test_db_localhost():
|
||||
Instance.objects.create(hostname='foo', node_type='hybrid')
|
||||
Instance.objects.create(hostname='bar', node_type='execution')
|
||||
assert is_ha_environment() is False
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize(
|
||||
'settings',
|
||||
[
|
||||
dict(SYSTEM_TASK_ABS_MEM='16Gi', SYSTEM_TASK_ABS_CPU='24', SYSTEM_TASK_FORKS_MEM=400, SYSTEM_TASK_FORKS_CPU=4),
|
||||
dict(SYSTEM_TASK_ABS_MEM='124Gi', SYSTEM_TASK_ABS_CPU='2', SYSTEM_TASK_FORKS_MEM=None, SYSTEM_TASK_FORKS_CPU=None),
|
||||
],
|
||||
ids=['cpu_dominated', 'memory_dominated'],
|
||||
)
|
||||
def test_dispatcher_max_workers_reserve(settings, fake_redis):
|
||||
"""This tests that the dispatcher max_workers matches instance capacity
|
||||
|
||||
Assumes capacity_adjustment is 1,
|
||||
plus reserve worker count
|
||||
"""
|
||||
with override_settings(**settings):
|
||||
i = Instance.objects.create(hostname='test-1', node_type='hybrid')
|
||||
i.local_health_check()
|
||||
|
||||
assert get_auto_max_workers() == i.capacity + 7, (i.cpu, i.memory, i.cpu_capacity, i.mem_capacity)
|
||||
|
||||
@@ -1,79 +0,0 @@
|
||||
import multiprocessing
|
||||
import random
|
||||
|
||||
from django.db import connection
|
||||
from django.utils.timezone import now
|
||||
|
||||
from awx.main.models import Inventory, Host
|
||||
|
||||
|
||||
def worker_delete_target(ready_event, continue_event, field_name):
|
||||
"""Runs the bulk update, will be called in duplicate, in parallel"""
|
||||
inv = Inventory.objects.get(organization__name='Default', name='test_host_update_contention')
|
||||
host_list = list(inv.hosts.all())
|
||||
random.shuffle(host_list)
|
||||
for i, host in enumerate(host_list):
|
||||
setattr(host, field_name, f'my_var: {i}')
|
||||
|
||||
# ready to do the bulk_update
|
||||
print('worker has loaded all the hosts needed')
|
||||
ready_event.set()
|
||||
# wait for the coordination message
|
||||
continue_event.wait()
|
||||
|
||||
# # presumed fix
|
||||
# host_list = sorted(host_list, key=lambda host: host.id)
|
||||
|
||||
# NOTE: did not reproduce the bug without batch_size
|
||||
Host.objects.bulk_update(host_list, [field_name], batch_size=100)
|
||||
print('finished doing the bulk update in worker')
|
||||
|
||||
|
||||
def test_host_update_contention(default_org):
|
||||
inv_kwargs = dict(organization=default_org, name='test_host_update_contention')
|
||||
|
||||
if Inventory.objects.filter(**inv_kwargs).exists():
|
||||
inv = Inventory.objects.get(**inv_kwargs).delete()
|
||||
|
||||
inv = Inventory.objects.create(**inv_kwargs)
|
||||
right_now = now()
|
||||
hosts = [Host(inventory=inv, name=f'host-{i}', created=right_now, modified=right_now) for i in range(1000)]
|
||||
print('bulk creating hosts')
|
||||
Host.objects.bulk_create(hosts)
|
||||
|
||||
# sanity check
|
||||
for host in hosts:
|
||||
assert not host.variables
|
||||
|
||||
# Force our worker pool to make their own connection
|
||||
connection.close()
|
||||
|
||||
ready_events = [multiprocessing.Event() for _ in range(2)]
|
||||
continue_event = multiprocessing.Event()
|
||||
|
||||
print('spawning processes for concurrent bulk updates')
|
||||
processes = []
|
||||
fields = ['variables', 'ansible_facts']
|
||||
for i in range(2):
|
||||
p = multiprocessing.Process(target=worker_delete_target, args=(ready_events[i], continue_event, fields[i]))
|
||||
processes.append(p)
|
||||
p.start()
|
||||
|
||||
# Assure both processes are connected and have loaded their host list
|
||||
for e in ready_events:
|
||||
print('waiting on subprocess ready event')
|
||||
e.wait()
|
||||
|
||||
# Begin the bulk_update queries
|
||||
print('setting the continue event for the workers')
|
||||
continue_event.set()
|
||||
|
||||
# if a Deadloack happens it will probably be surfaced by result here
|
||||
print('waiting on the workers to finish the bulk_update')
|
||||
for p in processes:
|
||||
p.join()
|
||||
|
||||
print('checking workers have variables set')
|
||||
for host in inv.hosts.all():
|
||||
assert host.variables.startswith('my_var:')
|
||||
assert host.ansible_facts.startswith('my_var:')
|
||||
@@ -103,7 +103,7 @@ def test_finish_job_fact_cache_with_existing_data(hosts, mocker, tmpdir, ref_tim
|
||||
assert host.ansible_facts_modified == ref_time
|
||||
assert hosts[1].ansible_facts == ansible_facts_new
|
||||
assert hosts[1].ansible_facts_modified > ref_time
|
||||
bulk_update.assert_called_once_with([hosts[1]], ['ansible_facts', 'ansible_facts_modified'], batch_size=100)
|
||||
bulk_update.assert_called_once_with([hosts[1]], ['ansible_facts', 'ansible_facts_modified'])
|
||||
|
||||
|
||||
def test_finish_job_fact_cache_with_bad_data(hosts, mocker, tmpdir):
|
||||
@@ -139,4 +139,4 @@ def test_finish_job_fact_cache_clear(hosts, mocker, ref_time, tmpdir):
|
||||
assert host.ansible_facts_modified == ref_time
|
||||
assert hosts[1].ansible_facts == {}
|
||||
assert hosts[1].ansible_facts_modified > ref_time
|
||||
bulk_update.assert_called_once_with([hosts[1]], ['ansible_facts', 'ansible_facts_modified'], batch_size=100)
|
||||
bulk_update.assert_called_once_with([hosts[1]], ['ansible_facts', 'ansible_facts_modified'])
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
Copyright 2022 Rick van Hattem
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
|
||||
|
||||
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
Binary file not shown.
BIN
licenses/psycopg-3.2.6.tar.gz
Normal file
BIN
licenses/psycopg-3.2.6.tar.gz
Normal file
Binary file not shown.
Binary file not shown.
BIN
licenses/uwsgi-2.0.29.tar.gz
Normal file
BIN
licenses/uwsgi-2.0.29.tar.gz
Normal file
Binary file not shown.
@@ -54,7 +54,7 @@ python-tss-sdk>=1.2.1
|
||||
pyyaml>=6.0.2 # require packing fix for cython 3 or higher
|
||||
pyzstd # otel collector log file compression library
|
||||
receptorctl
|
||||
sqlparse>=0.4.4 # Required by django https://github.com/ansible/awx/security/dependabot/96
|
||||
sqlparse>=0.5.2
|
||||
redis[hiredis]
|
||||
requests
|
||||
slack-sdk
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
adal==1.2.7
|
||||
# via msrestazure
|
||||
aiohappyeyeballs==2.4.4
|
||||
aiohappyeyeballs==2.6.1
|
||||
# via aiohttp
|
||||
aiohttp==3.11.11
|
||||
aiohttp==3.11.16
|
||||
# via
|
||||
# -r /awx_devel/requirements/requirements.in
|
||||
# aiohttp-retry
|
||||
# twilio
|
||||
aiohttp-retry==2.8.3
|
||||
aiohttp-retry==2.9.1
|
||||
# via twilio
|
||||
aiosignal==1.3.2
|
||||
# via aiohttp
|
||||
@@ -25,9 +25,9 @@ asgiref==3.8.1
|
||||
# django
|
||||
# django-ansible-base
|
||||
# django-cors-headers
|
||||
asn1==2.7.1
|
||||
asn1==3.0.0
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
attrs==24.3.0
|
||||
attrs==25.3.0
|
||||
# via
|
||||
# aiohttp
|
||||
# jsonschema
|
||||
@@ -46,14 +46,14 @@ awx-plugins.interfaces @ git+https://github.com/ansible/awx_plugins.interfaces.g
|
||||
# via
|
||||
# -r /awx_devel/requirements/requirements_git.txt
|
||||
# awx-plugins-core
|
||||
azure-core==1.32.0
|
||||
azure-core==1.33.0
|
||||
# via
|
||||
# azure-identity
|
||||
# azure-keyvault-certificates
|
||||
# azure-keyvault-keys
|
||||
# azure-keyvault-secrets
|
||||
# msrest
|
||||
azure-identity==1.19.0
|
||||
azure-identity==1.21.0
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
azure-keyvault==4.2.0
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
@@ -65,14 +65,14 @@ azure-keyvault-secrets==4.9.0
|
||||
# via azure-keyvault
|
||||
backports-tarfile==1.2.0
|
||||
# via jaraco-context
|
||||
boto3==1.35.96
|
||||
boto3==1.37.34
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
botocore==1.35.96
|
||||
botocore==1.37.34
|
||||
# via
|
||||
# -r /awx_devel/requirements/requirements.in
|
||||
# boto3
|
||||
# s3transfer
|
||||
cachetools==5.5.0
|
||||
cachetools==5.5.2
|
||||
# via google-auth
|
||||
# git+https://github.com/ansible/system-certifi.git@devel # git requirements installed separately
|
||||
# via
|
||||
@@ -84,7 +84,7 @@ cffi==1.17.1
|
||||
# via
|
||||
# cryptography
|
||||
# pynacl
|
||||
channels==4.2.0
|
||||
channels==4.2.2
|
||||
# via
|
||||
# -r /awx_devel/requirements/requirements.in
|
||||
# channels-redis
|
||||
@@ -109,11 +109,11 @@ cryptography==41.0.7
|
||||
# pyjwt
|
||||
# pyopenssl
|
||||
# service-identity
|
||||
cython==3.0.11
|
||||
cython==3.0.12
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
daphne==4.1.2
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
deprecated==1.2.15
|
||||
deprecated==1.2.18
|
||||
# via
|
||||
# opentelemetry-api
|
||||
# opentelemetry-exporter-otlp-proto-grpc
|
||||
@@ -138,19 +138,19 @@ django==4.2.20
|
||||
# djangorestframework
|
||||
# django-ansible-base @ git+https://github.com/ansible/django-ansible-base@devel # git requirements installed separately
|
||||
# via -r /awx_devel/requirements/requirements_git.txt
|
||||
django-cors-headers==4.6.0
|
||||
django-cors-headers==4.7.0
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
django-crum==0.7.9
|
||||
# via
|
||||
# -r /awx_devel/requirements/requirements.in
|
||||
# django-ansible-base
|
||||
django-extensions==3.2.3
|
||||
django-extensions==4.1
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
django-flags==5.0.13
|
||||
# via
|
||||
# -r /awx_devel/requirements/requirements.in
|
||||
# django-ansible-base
|
||||
django-guid==3.5.0
|
||||
django-guid==3.5.1
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
django-oauth-toolkit==1.7.1
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
@@ -158,7 +158,7 @@ django-polymorphic==3.1.0
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
django-solo==2.4.0
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
djangorestframework==3.15.2
|
||||
djangorestframework==3.16.0
|
||||
# via
|
||||
# -r /awx_devel/requirements/requirements.in
|
||||
# django-ansible-base
|
||||
@@ -172,7 +172,7 @@ dynaconf==3.2.10
|
||||
# django-ansible-base
|
||||
enum-compat==0.0.3
|
||||
# via asn1
|
||||
filelock==3.16.1
|
||||
filelock==3.18.0
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
frozenlist==1.5.0
|
||||
# via
|
||||
@@ -182,13 +182,13 @@ gitdb==4.0.12
|
||||
# via gitpython
|
||||
gitpython==3.1.44
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
google-auth==2.37.0
|
||||
google-auth==2.39.0
|
||||
# via kubernetes
|
||||
googleapis-common-protos==1.66.0
|
||||
googleapis-common-protos==1.70.0
|
||||
# via
|
||||
# opentelemetry-exporter-otlp-proto-grpc
|
||||
# opentelemetry-exporter-otlp-proto-http
|
||||
grpcio==1.69.0
|
||||
grpcio==1.71.0
|
||||
# via
|
||||
# -r /awx_devel/requirements/requirements.in
|
||||
# opentelemetry-exporter-otlp-proto-grpc
|
||||
@@ -204,7 +204,7 @@ idna==3.10
|
||||
# requests
|
||||
# twisted
|
||||
# yarl
|
||||
importlib-metadata==8.5.0
|
||||
importlib-metadata==8.6.1
|
||||
# via opentelemetry-api
|
||||
importlib-resources==6.5.2
|
||||
# via irc
|
||||
@@ -237,7 +237,7 @@ jaraco-text==4.0.0
|
||||
# via
|
||||
# irc
|
||||
# jaraco-collections
|
||||
jinja2==3.1.5
|
||||
jinja2==3.1.6
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
jmespath==1.0.1
|
||||
# via
|
||||
@@ -245,7 +245,7 @@ jmespath==1.0.1
|
||||
# botocore
|
||||
jq==1.8.0
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
json-log-formatter==1.1
|
||||
json-log-formatter==1.1.1
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
jsonschema==4.23.0
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
@@ -253,27 +253,27 @@ jsonschema-specifications==2024.10.1
|
||||
# via jsonschema
|
||||
jwcrypto==1.5.6
|
||||
# via django-oauth-toolkit
|
||||
kubernetes==31.0.0
|
||||
kubernetes==32.0.1
|
||||
# via openshift
|
||||
lockfile==0.12.2
|
||||
# via python-daemon
|
||||
markdown==3.7
|
||||
markdown==3.8
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
markupsafe==3.0.2
|
||||
# via jinja2
|
||||
maturin==1.8.1
|
||||
maturin==1.8.3
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
more-itertools==10.5.0
|
||||
more-itertools==10.6.0
|
||||
# via
|
||||
# irc
|
||||
# jaraco-functools
|
||||
# jaraco-stream
|
||||
# jaraco-text
|
||||
msal==1.31.1
|
||||
msal==1.32.0
|
||||
# via
|
||||
# azure-identity
|
||||
# msal-extensions
|
||||
msal-extensions==1.2.0
|
||||
msal-extensions==1.3.1
|
||||
# via azure-identity
|
||||
msgpack==1.1.0
|
||||
# via
|
||||
@@ -283,7 +283,7 @@ msrest==0.7.1
|
||||
# via msrestazure
|
||||
msrestazure==0.6.4.post1
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
multidict==6.1.0
|
||||
multidict==6.4.3
|
||||
# via
|
||||
# aiohttp
|
||||
# yarl
|
||||
@@ -294,7 +294,7 @@ oauthlib==3.2.2
|
||||
# requests-oauthlib
|
||||
openshift==0.13.2
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
opentelemetry-api==1.29.0
|
||||
opentelemetry-api==1.32.0
|
||||
# via
|
||||
# -r /awx_devel/requirements/requirements.in
|
||||
# opentelemetry-exporter-otlp-proto-grpc
|
||||
@@ -303,31 +303,31 @@ opentelemetry-api==1.29.0
|
||||
# opentelemetry-instrumentation-logging
|
||||
# opentelemetry-sdk
|
||||
# opentelemetry-semantic-conventions
|
||||
opentelemetry-exporter-otlp==1.29.0
|
||||
opentelemetry-exporter-otlp==1.32.0
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
opentelemetry-exporter-otlp-proto-common==1.29.0
|
||||
opentelemetry-exporter-otlp-proto-common==1.32.0
|
||||
# via
|
||||
# opentelemetry-exporter-otlp-proto-grpc
|
||||
# opentelemetry-exporter-otlp-proto-http
|
||||
opentelemetry-exporter-otlp-proto-grpc==1.29.0
|
||||
opentelemetry-exporter-otlp-proto-grpc==1.32.0
|
||||
# via opentelemetry-exporter-otlp
|
||||
opentelemetry-exporter-otlp-proto-http==1.29.0
|
||||
opentelemetry-exporter-otlp-proto-http==1.32.0
|
||||
# via opentelemetry-exporter-otlp
|
||||
opentelemetry-instrumentation==0.50b0
|
||||
opentelemetry-instrumentation==0.53b0
|
||||
# via opentelemetry-instrumentation-logging
|
||||
opentelemetry-instrumentation-logging==0.50b0
|
||||
opentelemetry-instrumentation-logging==0.53b0
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
opentelemetry-proto==1.29.0
|
||||
opentelemetry-proto==1.32.0
|
||||
# via
|
||||
# opentelemetry-exporter-otlp-proto-common
|
||||
# opentelemetry-exporter-otlp-proto-grpc
|
||||
# opentelemetry-exporter-otlp-proto-http
|
||||
opentelemetry-sdk==1.29.0
|
||||
opentelemetry-sdk==1.32.0
|
||||
# via
|
||||
# -r /awx_devel/requirements/requirements.in
|
||||
# opentelemetry-exporter-otlp-proto-grpc
|
||||
# opentelemetry-exporter-otlp-proto-http
|
||||
opentelemetry-semantic-conventions==0.50b0
|
||||
opentelemetry-semantic-conventions==0.53b0
|
||||
# via
|
||||
# opentelemetry-instrumentation
|
||||
# opentelemetry-sdk
|
||||
@@ -342,21 +342,19 @@ pexpect==4.7.0
|
||||
# ansible-runner
|
||||
pkgconfig==1.5.5
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
portalocker==2.10.1
|
||||
# via msal-extensions
|
||||
prometheus-client==0.21.1
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
propcache==0.2.1
|
||||
propcache==0.3.1
|
||||
# via
|
||||
# aiohttp
|
||||
# yarl
|
||||
protobuf==5.29.3
|
||||
protobuf==5.29.4
|
||||
# via
|
||||
# googleapis-common-protos
|
||||
# opentelemetry-proto
|
||||
psutil==6.1.1
|
||||
psutil==7.0.0
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
psycopg==3.2.3
|
||||
psycopg==3.2.6
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
ptyprocess==0.7.0
|
||||
# via pexpect
|
||||
@@ -365,7 +363,7 @@ pyasn1==0.6.1
|
||||
# pyasn1-modules
|
||||
# rsa
|
||||
# service-identity
|
||||
pyasn1-modules==0.4.1
|
||||
pyasn1-modules==0.4.2
|
||||
# via
|
||||
# google-auth
|
||||
# service-identity
|
||||
@@ -384,7 +382,7 @@ pyjwt[crypto]==2.10.1
|
||||
# twilio
|
||||
pynacl==1.5.0
|
||||
# via pygithub
|
||||
pyopenssl==24.3.0
|
||||
pyopenssl==25.0.0
|
||||
# via
|
||||
# -r /awx_devel/requirements/requirements.in
|
||||
# twisted
|
||||
@@ -407,7 +405,7 @@ python-string-utils==1.0.0
|
||||
# via openshift
|
||||
python-tss-sdk==1.2.3
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
pytz==2024.2
|
||||
pytz==2025.2
|
||||
# via irc
|
||||
pyyaml==6.0.2
|
||||
# via
|
||||
@@ -418,13 +416,13 @@ pyyaml==6.0.2
|
||||
# receptorctl
|
||||
pyzstd==0.16.2
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
receptorctl==1.5.2
|
||||
receptorctl==1.5.4
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
redis[hiredis]==5.2.1
|
||||
# via
|
||||
# -r /awx_devel/requirements/requirements.in
|
||||
# channels-redis
|
||||
referencing==0.35.1
|
||||
referencing==0.36.2
|
||||
# via
|
||||
# jsonschema
|
||||
# jsonschema-specifications
|
||||
@@ -448,21 +446,21 @@ requests-oauthlib==2.0.0
|
||||
# via
|
||||
# kubernetes
|
||||
# msrest
|
||||
rpds-py==0.22.3
|
||||
rpds-py==0.24.0
|
||||
# via
|
||||
# jsonschema
|
||||
# referencing
|
||||
rsa==4.9
|
||||
# via google-auth
|
||||
s3transfer==0.10.4
|
||||
s3transfer==0.11.4
|
||||
# via boto3
|
||||
semantic-version==2.10.0
|
||||
# via setuptools-rust
|
||||
service-identity==24.2.0
|
||||
# via twisted
|
||||
setuptools-rust==1.10.2
|
||||
setuptools-rust==1.11.1
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
setuptools-scm[toml]==8.1.0
|
||||
setuptools-scm[toml]==8.2.0
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
six==1.17.0
|
||||
# via
|
||||
@@ -472,7 +470,7 @@ six==1.17.0
|
||||
# openshift
|
||||
# pygerduty
|
||||
# python-dateutil
|
||||
slack-sdk==3.34.0
|
||||
slack-sdk==3.35.0
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
smmap==5.0.2
|
||||
# via gitdb
|
||||
@@ -485,7 +483,7 @@ tempora==5.8.0
|
||||
# via
|
||||
# irc
|
||||
# jaraco-logging
|
||||
twilio==9.4.2
|
||||
twilio==9.5.2
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
twisted[tls]==24.11.0
|
||||
# via
|
||||
@@ -493,7 +491,7 @@ twisted[tls]==24.11.0
|
||||
# daphne
|
||||
txaio==23.1.1
|
||||
# via autobahn
|
||||
typing-extensions==4.12.2
|
||||
typing-extensions==4.13.2
|
||||
# via
|
||||
# azure-core
|
||||
# azure-identity
|
||||
@@ -504,15 +502,17 @@ typing-extensions==4.12.2
|
||||
# opentelemetry-sdk
|
||||
# psycopg
|
||||
# pygithub
|
||||
# pyopenssl
|
||||
# referencing
|
||||
# twisted
|
||||
urllib3==2.3.0
|
||||
urllib3==2.4.0
|
||||
# via
|
||||
# botocore
|
||||
# django-ansible-base
|
||||
# kubernetes
|
||||
# pygithub
|
||||
# requests
|
||||
uwsgi==2.0.28
|
||||
uwsgi==2.0.29
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
uwsgitop==0.12
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
@@ -520,11 +520,11 @@ websocket-client==1.8.0
|
||||
# via kubernetes
|
||||
wheel==0.45.1
|
||||
# via -r /awx_devel/requirements/requirements.in
|
||||
wrapt==1.17.0
|
||||
wrapt==1.17.2
|
||||
# via
|
||||
# deprecated
|
||||
# opentelemetry-instrumentation
|
||||
yarl==1.18.3
|
||||
yarl==1.19.0
|
||||
# via aiohttp
|
||||
zipp==3.21.0
|
||||
# via importlib-metadata
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from django import setup
|
||||
|
||||
from awx import prepare_env
|
||||
|
||||
prepare_env()
|
||||
|
||||
setup()
|
||||
|
||||
# Keeping this in test folder allows it to be importable
|
||||
from awx.main.tests.data.sleep_task import sleep_task
|
||||
|
||||
|
||||
for i in range(634):
|
||||
sleep_task.delay()
|
||||
Reference in New Issue
Block a user