Use runtime log utility moved to DAB (#15675)

* Use runtime log utility moved to DAB
This commit is contained in:
Alan Rominger
2024-12-11 10:38:24 -05:00
committed by GitHub
parent efbe729c42
commit f377b5fdde
4 changed files with 11 additions and 38 deletions

View File

@@ -21,9 +21,11 @@ from django_guid import set_guid
from jinja2 import Template from jinja2 import Template
import psutil import psutil
from ansible_base.lib.logging.runtime import log_excess_runtime
from awx.main.models import UnifiedJob from awx.main.models import UnifiedJob
from awx.main.dispatch import reaper from awx.main.dispatch import reaper
from awx.main.utils.common import convert_mem_str_to_bytes, get_mem_effective_capacity, log_excess_runtime from awx.main.utils.common import convert_mem_str_to_bytes, get_mem_effective_capacity
if 'run_callback_receiver' in sys.argv: if 'run_callback_receiver' in sys.argv:
logger = logging.getLogger('awx.main.commands.run_callback_receiver') logger = logging.getLogger('awx.main.commands.run_callback_receiver')
@@ -366,7 +368,7 @@ class AutoscalePool(WorkerPool):
def debug_meta(self): def debug_meta(self):
return 'min={} max={}'.format(self.min_workers, self.max_workers) return 'min={} max={}'.format(self.min_workers, self.max_workers)
@log_excess_runtime(logger) @log_excess_runtime(logger, debug_cutoff=0.05, cutoff=0.2)
def cleanup(self): def cleanup(self):
""" """
Perform some internal account and cleanup. This is run on Perform some internal account and cleanup. This is run on

View File

@@ -16,10 +16,11 @@ from datetime import timedelta
from django import db from django import db
from django.conf import settings from django.conf import settings
from ansible_base.lib.logging.runtime import log_excess_runtime
from awx.main.dispatch.pool import WorkerPool from awx.main.dispatch.pool import WorkerPool
from awx.main.dispatch.periodic import Scheduler from awx.main.dispatch.periodic import Scheduler
from awx.main.dispatch import pg_bus_conn from awx.main.dispatch import pg_bus_conn
from awx.main.utils.common import log_excess_runtime
from awx.main.utils.db import set_connection_name from awx.main.utils.db import set_connection_name
import awx.main.analytics.subsystem_metrics as s_metrics import awx.main.analytics.subsystem_metrics as s_metrics
@@ -126,7 +127,7 @@ class AWXConsumerBase(object):
return return
self.dispatch_task(body) self.dispatch_task(body)
@log_excess_runtime(logger) @log_excess_runtime(logger, debug_cutoff=0.05, cutoff=0.2)
def record_statistics(self): def record_statistics(self):
if time.time() - self.last_stats > 1: # buffer stat recording to once per second if time.time() - self.last_stats > 1: # buffer stat recording to once per second
try: try:
@@ -183,6 +184,7 @@ class AWXConsumerPG(AWXConsumerBase):
schedule['metrics_gather'] = {'control': self.record_metrics, 'schedule': timedelta(seconds=20)} schedule['metrics_gather'] = {'control': self.record_metrics, 'schedule': timedelta(seconds=20)}
self.scheduler = Scheduler(schedule) self.scheduler = Scheduler(schedule)
@log_excess_runtime(logger, debug_cutoff=0.05, cutoff=0.2)
def record_metrics(self): def record_metrics(self):
current_time = time.time() current_time = time.time()
self.pool.produce_subsystem_metrics(self.subsystem_metrics) self.pool.produce_subsystem_metrics(self.subsystem_metrics)

View File

@@ -11,8 +11,10 @@ from django.utils.encoding import smart_str
from django.utils.timezone import now from django.utils.timezone import now
from django.db import OperationalError from django.db import OperationalError
# django-ansible-base
from ansible_base.lib.logging.runtime import log_excess_runtime
# AWX # AWX
from awx.main.utils.common import log_excess_runtime
from awx.main.models.inventory import Host from awx.main.models.inventory import Host

View File

@@ -6,7 +6,6 @@ from datetime import timedelta
import json import json
import yaml import yaml
import logging import logging
import time
import psycopg import psycopg
import os import os
import subprocess import subprocess
@@ -89,7 +88,6 @@ __all__ = [
'deepmerge', 'deepmerge',
'get_event_partition_epoch', 'get_event_partition_epoch',
'cleanup_new_process', 'cleanup_new_process',
'log_excess_runtime',
'unified_job_class_to_event_table_name', 'unified_job_class_to_event_table_name',
] ]
@@ -1215,36 +1213,5 @@ def cleanup_new_process(func):
return wrapper_cleanup_new_process return wrapper_cleanup_new_process
def log_excess_runtime(func_logger, cutoff=5.0, debug_cutoff=5.0, msg=None, add_log_data=False):
def log_excess_runtime_decorator(func):
@functools.wraps(func)
def _new_func(*args, **kwargs):
start_time = time.time()
log_data = {'name': repr(func.__name__)}
if add_log_data:
return_value = func(*args, log_data=log_data, **kwargs)
else:
return_value = func(*args, **kwargs)
log_data['delta'] = time.time() - start_time
if isinstance(return_value, dict):
log_data.update(return_value)
if msg is None:
record_msg = 'Running {name} took {delta:.2f}s'
else:
record_msg = msg
if log_data['delta'] > cutoff:
func_logger.info(record_msg.format(**log_data))
elif log_data['delta'] > debug_cutoff:
func_logger.debug(record_msg.format(**log_data))
return return_value
return _new_func
return log_excess_runtime_decorator
def unified_job_class_to_event_table_name(job_class): def unified_job_class_to_event_table_name(job_class):
return f'main_{job_class().event_class.__name__.lower()}' return f'main_{job_class().event_class.__name__.lower()}'