mirror of
https://github.com/ansible/awx.git
synced 2026-04-21 09:50:22 -02:30
move code linting to a stricter pep8-esque auto-formatting tool, black
This commit is contained in:
@@ -47,16 +47,9 @@ class PubSub(object):
|
||||
@contextmanager
|
||||
def pg_bus_conn():
|
||||
conf = settings.DATABASES['default']
|
||||
conn = psycopg2.connect(dbname=conf['NAME'],
|
||||
host=conf['HOST'],
|
||||
user=conf['USER'],
|
||||
password=conf['PASSWORD'],
|
||||
port=conf['PORT'],
|
||||
**conf.get("OPTIONS", {}))
|
||||
conn = psycopg2.connect(dbname=conf['NAME'], host=conf['HOST'], user=conf['USER'], password=conf['PASSWORD'], port=conf['PORT'], **conf.get("OPTIONS", {}))
|
||||
# Django connection.cursor().connection doesn't have autocommit=True on
|
||||
conn.set_session(autocommit=True)
|
||||
pubsub = PubSub(conn)
|
||||
yield pubsub
|
||||
conn.close()
|
||||
|
||||
|
||||
|
||||
@@ -48,8 +48,7 @@ class Control(object):
|
||||
|
||||
with pg_bus_conn() as conn:
|
||||
conn.listen(reply_queue)
|
||||
conn.notify(self.queuename,
|
||||
json.dumps({'control': command, 'reply_to': reply_queue}))
|
||||
conn.notify(self.queuename, json.dumps({'control': command, 'reply_to': reply_queue}))
|
||||
|
||||
for reply in conn.events(select_timeout=timeout, yield_timeouts=True):
|
||||
if reply is None:
|
||||
|
||||
@@ -14,12 +14,8 @@ logger = logging.getLogger('awx.main.dispatch.periodic')
|
||||
|
||||
|
||||
class Scheduler(Scheduler):
|
||||
|
||||
def run_continuously(self):
|
||||
idle_seconds = max(
|
||||
1,
|
||||
min(self.jobs).period.total_seconds() / 2
|
||||
)
|
||||
idle_seconds = max(1, min(self.jobs).period.total_seconds() / 2)
|
||||
|
||||
def run():
|
||||
ppid = os.getppid()
|
||||
@@ -39,9 +35,7 @@ class Scheduler(Scheduler):
|
||||
GuidMiddleware.set_guid(GuidMiddleware._generate_guid())
|
||||
self.run_pending()
|
||||
except Exception:
|
||||
logger.exception(
|
||||
'encountered an error while scheduling periodic tasks'
|
||||
)
|
||||
logger.exception('encountered an error while scheduling periodic tasks')
|
||||
time.sleep(idle_seconds)
|
||||
|
||||
process = Process(target=run)
|
||||
|
||||
@@ -30,13 +30,12 @@ else:
|
||||
|
||||
|
||||
class NoOpResultQueue(object):
|
||||
|
||||
def put(self, item):
|
||||
pass
|
||||
|
||||
|
||||
class PoolWorker(object):
|
||||
'''
|
||||
"""
|
||||
Used to track a worker child process and its pending and finished messages.
|
||||
|
||||
This class makes use of two distinct multiprocessing.Queues to track state:
|
||||
@@ -62,7 +61,7 @@ class PoolWorker(object):
|
||||
|
||||
A worker is "busy" when it has at least one message in self.managed_tasks.
|
||||
It is "idle" when self.managed_tasks is empty.
|
||||
'''
|
||||
"""
|
||||
|
||||
track_managed_tasks = False
|
||||
|
||||
@@ -91,10 +90,10 @@ class PoolWorker(object):
|
||||
self.calculate_managed_tasks()
|
||||
|
||||
def quit(self):
|
||||
'''
|
||||
"""
|
||||
Send a special control message to the worker that tells it to exit
|
||||
gracefully.
|
||||
'''
|
||||
"""
|
||||
self.queue.put('QUIT')
|
||||
|
||||
@property
|
||||
@@ -112,9 +111,7 @@ class PoolWorker(object):
|
||||
@property
|
||||
def mb(self):
|
||||
if self.alive:
|
||||
return '{:0.3f}'.format(
|
||||
psutil.Process(self.pid).memory_info().rss / 1024.0 / 1024.0
|
||||
)
|
||||
return '{:0.3f}'.format(psutil.Process(self.pid).memory_info().rss / 1024.0 / 1024.0)
|
||||
return '0'
|
||||
|
||||
@property
|
||||
@@ -179,11 +176,7 @@ class PoolWorker(object):
|
||||
except QueueEmpty:
|
||||
break # qsize is not always _totally_ up to date
|
||||
if len(orphaned):
|
||||
logger.error(
|
||||
'requeuing {} messages from gone worker pid:{}'.format(
|
||||
len(orphaned), self.pid
|
||||
)
|
||||
)
|
||||
logger.error('requeuing {} messages from gone worker pid:{}'.format(len(orphaned), self.pid))
|
||||
return orphaned
|
||||
|
||||
@property
|
||||
@@ -202,7 +195,7 @@ class StatefulPoolWorker(PoolWorker):
|
||||
|
||||
|
||||
class WorkerPool(object):
|
||||
'''
|
||||
"""
|
||||
Creates a pool of forked PoolWorkers.
|
||||
|
||||
As WorkerPool.write(...) is called (generally, by a kombu consumer
|
||||
@@ -220,7 +213,7 @@ class WorkerPool(object):
|
||||
0, # preferred worker 0
|
||||
'Hello, World!'
|
||||
)
|
||||
'''
|
||||
"""
|
||||
|
||||
pool_cls = PoolWorker
|
||||
debug_meta = ''
|
||||
@@ -284,13 +277,10 @@ class WorkerPool(object):
|
||||
'{% endfor %}'
|
||||
)
|
||||
now = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S UTC')
|
||||
return tmpl.render(
|
||||
pool=self, workers=self.workers, meta=self.debug_meta,
|
||||
dt=now
|
||||
)
|
||||
return tmpl.render(pool=self, workers=self.workers, meta=self.debug_meta, dt=now)
|
||||
|
||||
def write(self, preferred_queue, body):
|
||||
queue_order = sorted(range(len(self.workers)), key=lambda x: -1 if x==preferred_queue else x)
|
||||
queue_order = sorted(range(len(self.workers)), key=lambda x: -1 if x == preferred_queue else x)
|
||||
write_attempt_order = []
|
||||
for queue_actual in queue_order:
|
||||
try:
|
||||
@@ -315,10 +305,10 @@ class WorkerPool(object):
|
||||
|
||||
|
||||
class AutoscalePool(WorkerPool):
|
||||
'''
|
||||
"""
|
||||
An extended pool implementation that automatically scales workers up and
|
||||
down based on demand
|
||||
'''
|
||||
"""
|
||||
|
||||
pool_cls = StatefulPoolWorker
|
||||
|
||||
@@ -333,7 +323,7 @@ class AutoscalePool(WorkerPool):
|
||||
else:
|
||||
total_memory_gb = (psutil.virtual_memory().total >> 30) + 1 # noqa: round up
|
||||
# 5 workers per GB of total memory
|
||||
self.max_workers = (total_memory_gb * 5)
|
||||
self.max_workers = total_memory_gb * 5
|
||||
|
||||
# max workers can't be less than min_workers
|
||||
self.max_workers = max(self.min_workers, self.max_workers)
|
||||
@@ -410,15 +400,11 @@ class AutoscalePool(WorkerPool):
|
||||
if current_task and isinstance(current_task, dict):
|
||||
if current_task.get('task', '').endswith('tasks.run_task_manager'):
|
||||
if 'started' not in current_task:
|
||||
w.managed_tasks[
|
||||
current_task['uuid']
|
||||
]['started'] = time.time()
|
||||
w.managed_tasks[current_task['uuid']]['started'] = time.time()
|
||||
age = time.time() - current_task['started']
|
||||
w.managed_tasks[current_task['uuid']]['age'] = age
|
||||
if age > (60 * 5):
|
||||
logger.error(
|
||||
f'run_task_manager has held the advisory lock for >5m, sending SIGTERM to {w.pid}'
|
||||
) # noqa
|
||||
logger.error(f'run_task_manager has held the advisory lock for >5m, sending SIGTERM to {w.pid}') # noqa
|
||||
os.kill(w.pid, signal.SIGTERM)
|
||||
|
||||
for m in orphaned:
|
||||
|
||||
@@ -70,20 +70,12 @@ class task:
|
||||
task_id = uuid or str(uuid4())
|
||||
args = args or []
|
||||
kwargs = kwargs or {}
|
||||
queue = (
|
||||
queue or
|
||||
getattr(cls.queue, 'im_func', cls.queue)
|
||||
)
|
||||
queue = queue or getattr(cls.queue, 'im_func', cls.queue)
|
||||
if not queue:
|
||||
msg = f'{cls.name}: Queue value required and may not be None'
|
||||
logger.error(msg)
|
||||
raise ValueError(msg)
|
||||
obj = {
|
||||
'uuid': task_id,
|
||||
'args': args,
|
||||
'kwargs': kwargs,
|
||||
'task': cls.name
|
||||
}
|
||||
obj = {'uuid': task_id, 'args': args, 'kwargs': kwargs, 'task': cls.name}
|
||||
guid = GuidMiddleware.get_guid()
|
||||
if guid:
|
||||
obj['guid'] = guid
|
||||
@@ -105,11 +97,7 @@ class task:
|
||||
if inspect.isclass(fn):
|
||||
bases = list(fn.__bases__)
|
||||
ns.update(fn.__dict__)
|
||||
cls = type(
|
||||
fn.__name__,
|
||||
tuple(bases + [PublisherMixin]),
|
||||
ns
|
||||
)
|
||||
cls = type(fn.__name__, tuple(bases + [PublisherMixin]), ns)
|
||||
if inspect.isclass(fn):
|
||||
return cls
|
||||
|
||||
|
||||
@@ -16,23 +16,23 @@ def reap_job(j, status):
|
||||
return
|
||||
j.status = status
|
||||
j.start_args = '' # blank field to remove encrypted passwords
|
||||
j.job_explanation += ' '.join((
|
||||
'Task was marked as running in Tower but was not present in',
|
||||
'the job queue, so it has been marked as failed.',
|
||||
))
|
||||
j.job_explanation += ' '.join(
|
||||
(
|
||||
'Task was marked as running in Tower but was not present in',
|
||||
'the job queue, so it has been marked as failed.',
|
||||
)
|
||||
)
|
||||
j.save(update_fields=['status', 'start_args', 'job_explanation'])
|
||||
if hasattr(j, 'send_notification_templates'):
|
||||
j.send_notification_templates('failed')
|
||||
j.websocket_emit_status(status)
|
||||
logger.error(
|
||||
'{} is no longer running; reaping'.format(j.log_format)
|
||||
)
|
||||
logger.error('{} is no longer running; reaping'.format(j.log_format))
|
||||
|
||||
|
||||
def reap(instance=None, status='failed', excluded_uuids=[]):
|
||||
'''
|
||||
"""
|
||||
Reap all jobs in waiting|running for this instance.
|
||||
'''
|
||||
"""
|
||||
me = instance
|
||||
if me is None:
|
||||
(changed, me) = Instance.objects.get_or_register()
|
||||
@@ -41,13 +41,9 @@ def reap(instance=None, status='failed', excluded_uuids=[]):
|
||||
now = tz_now()
|
||||
workflow_ctype_id = ContentType.objects.get_for_model(WorkflowJob).id
|
||||
jobs = UnifiedJob.objects.filter(
|
||||
(
|
||||
Q(status='running') |
|
||||
Q(status='waiting', modified__lte=now - timedelta(seconds=60))
|
||||
) & (
|
||||
Q(execution_node=me.hostname) |
|
||||
Q(controller_node=me.hostname)
|
||||
) & ~Q(polymorphic_ctype_id=workflow_ctype_id)
|
||||
(Q(status='running') | Q(status='waiting', modified__lte=now - timedelta(seconds=60)))
|
||||
& (Q(execution_node=me.hostname) | Q(controller_node=me.hostname))
|
||||
& ~Q(polymorphic_ctype_id=workflow_ctype_id)
|
||||
).exclude(celery_task_id__in=excluded_uuids)
|
||||
for j in jobs:
|
||||
reap_job(j, status)
|
||||
|
||||
@@ -25,14 +25,10 @@ else:
|
||||
|
||||
|
||||
def signame(sig):
|
||||
return dict(
|
||||
(k, v) for v, k in signal.__dict__.items()
|
||||
if v.startswith('SIG') and not v.startswith('SIG_')
|
||||
)[sig]
|
||||
return dict((k, v) for v, k in signal.__dict__.items() if v.startswith('SIG') and not v.startswith('SIG_'))[sig]
|
||||
|
||||
|
||||
class WorkerSignalHandler:
|
||||
|
||||
def __init__(self):
|
||||
self.kill_now = False
|
||||
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
||||
@@ -162,7 +158,6 @@ class AWXConsumerPG(AWXConsumerBase):
|
||||
|
||||
|
||||
class BaseWorker(object):
|
||||
|
||||
def read(self, queue):
|
||||
return queue.get(block=True, timeout=1)
|
||||
|
||||
|
||||
@@ -16,9 +16,7 @@ import psutil
|
||||
import redis
|
||||
|
||||
from awx.main.consumers import emit_channel_notification
|
||||
from awx.main.models import (JobEvent, AdHocCommandEvent, ProjectUpdateEvent,
|
||||
InventoryUpdateEvent, SystemJobEvent, UnifiedJob,
|
||||
Job)
|
||||
from awx.main.models import JobEvent, AdHocCommandEvent, ProjectUpdateEvent, InventoryUpdateEvent, SystemJobEvent, UnifiedJob, Job
|
||||
from awx.main.tasks import handle_success_and_failure_notifications
|
||||
from awx.main.models.events import emit_event_detail
|
||||
from awx.main.utils.profiling import AWXProfiler
|
||||
@@ -29,13 +27,13 @@ logger = logging.getLogger('awx.main.commands.run_callback_receiver')
|
||||
|
||||
|
||||
class CallbackBrokerWorker(BaseWorker):
|
||||
'''
|
||||
"""
|
||||
A worker implementation that deserializes callback event data and persists
|
||||
it into the database.
|
||||
|
||||
The code that *generates* these types of messages is found in the
|
||||
ansible-runner display callback plugin.
|
||||
'''
|
||||
"""
|
||||
|
||||
MAX_RETRIES = 2
|
||||
last_stats = time.time()
|
||||
@@ -83,9 +81,7 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
|
||||
@property
|
||||
def mb(self):
|
||||
return '{:0.3f}'.format(
|
||||
psutil.Process(self.pid).memory_info().rss / 1024.0 / 1024.0
|
||||
)
|
||||
return '{:0.3f}'.format(psutil.Process(self.pid).memory_info().rss / 1024.0 / 1024.0)
|
||||
|
||||
def toggle_profiling(self, *args):
|
||||
if not self.prof.is_started():
|
||||
@@ -102,11 +98,7 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
|
||||
def flush(self, force=False):
|
||||
now = tz_now()
|
||||
if (
|
||||
force or
|
||||
(time.time() - self.last_flush) > settings.JOB_EVENT_BUFFER_SECONDS or
|
||||
any([len(events) >= 1000 for events in self.buff.values()])
|
||||
):
|
||||
if force or (time.time() - self.last_flush) > settings.JOB_EVENT_BUFFER_SECONDS or any([len(events) >= 1000 for events in self.buff.values()]):
|
||||
for cls, events in self.buff.items():
|
||||
logger.debug(f'{cls.__name__}.objects.bulk_create({len(events)})')
|
||||
for e in events:
|
||||
@@ -161,10 +153,7 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
# closed. don't actually persist them to the database; we
|
||||
# just use them to report `summary` websocket events as an
|
||||
# approximation for when a job is "done"
|
||||
emit_channel_notification(
|
||||
'jobs-summary',
|
||||
dict(group_name='jobs', unified_job_id=job_identifier, final_counter=final_counter)
|
||||
)
|
||||
emit_channel_notification('jobs-summary', dict(group_name='jobs', unified_job_id=job_identifier, final_counter=final_counter))
|
||||
# Additionally, when we've processed all events, we should
|
||||
# have all the data we need to send out success/failure
|
||||
# notification templates
|
||||
@@ -196,10 +185,7 @@ class CallbackBrokerWorker(BaseWorker):
|
||||
logger.exception('Worker could not re-establish database connectivity, giving up on one or more events.')
|
||||
return
|
||||
delay = 60 * retries
|
||||
logger.exception('Database Error Saving Job Event, retry #{i} in {delay} seconds:'.format(
|
||||
i=retries + 1,
|
||||
delay=delay
|
||||
))
|
||||
logger.exception('Database Error Saving Job Event, retry #{i} in {delay} seconds:'.format(i=retries + 1, delay=delay))
|
||||
django_connection.close()
|
||||
time.sleep(delay)
|
||||
retries += 1
|
||||
|
||||
@@ -17,22 +17,22 @@ logger = logging.getLogger('awx.main.dispatch')
|
||||
|
||||
|
||||
class TaskWorker(BaseWorker):
|
||||
'''
|
||||
"""
|
||||
A worker implementation that deserializes task messages and runs native
|
||||
Python code.
|
||||
|
||||
The code that *builds* these types of messages is found in
|
||||
`awx.main.dispatch.publish`.
|
||||
'''
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def resolve_callable(cls, task):
|
||||
'''
|
||||
"""
|
||||
Transform a dotted notation task into an imported, callable function, e.g.,
|
||||
|
||||
awx.main.tasks.delete_inventory
|
||||
awx.main.tasks.RunProjectUpdate
|
||||
'''
|
||||
"""
|
||||
if not task.startswith('awx.'):
|
||||
raise ValueError('{} is not a valid awx task'.format(task))
|
||||
module, target = task.rsplit('.', 1)
|
||||
@@ -40,17 +40,15 @@ class TaskWorker(BaseWorker):
|
||||
_call = None
|
||||
if hasattr(module, target):
|
||||
_call = getattr(module, target, None)
|
||||
if not (
|
||||
hasattr(_call, 'apply_async') and hasattr(_call, 'delay')
|
||||
):
|
||||
if not (hasattr(_call, 'apply_async') and hasattr(_call, 'delay')):
|
||||
raise ValueError('{} is not decorated with @task()'.format(task))
|
||||
|
||||
return _call
|
||||
|
||||
def run_callable(self, body):
|
||||
'''
|
||||
"""
|
||||
Given some AMQP message, import the correct Python code and run it.
|
||||
'''
|
||||
"""
|
||||
task = body['task']
|
||||
uuid = body.get('uuid', '<unknown>')
|
||||
args = body.get('args', [])
|
||||
@@ -67,7 +65,7 @@ class TaskWorker(BaseWorker):
|
||||
return _call(*args, **kwargs)
|
||||
|
||||
def perform_work(self, body):
|
||||
'''
|
||||
"""
|
||||
Import and run code for a task e.g.,
|
||||
|
||||
body = {
|
||||
@@ -85,7 +83,7 @@ class TaskWorker(BaseWorker):
|
||||
'kwargs': {},
|
||||
'task': u'awx.main.tasks.RunProjectUpdate'
|
||||
}
|
||||
'''
|
||||
"""
|
||||
settings.__clean_on_fork__()
|
||||
result = None
|
||||
try:
|
||||
@@ -101,9 +99,7 @@ class TaskWorker(BaseWorker):
|
||||
task = body['task']
|
||||
args = body.get('args', [])
|
||||
kwargs = body.get('kwargs', {})
|
||||
logger.exception('Worker failed to run task {}(*{}, **{}'.format(
|
||||
task, args, kwargs
|
||||
))
|
||||
logger.exception('Worker failed to run task {}(*{}, **{}'.format(task, args, kwargs))
|
||||
except Exception:
|
||||
# It's fairly critical that this code _not_ raise exceptions on logging
|
||||
# If you configure external logging in a way that _it_ fails, there's
|
||||
|
||||
Reference in New Issue
Block a user