Cache task_impact

task_impact is now a field on the database
It is calculated and set during create_unified_job

set task_impact on .save for adhoc commands
This commit is contained in:
Seth Foster
2022-07-19 14:02:45 -04:00
parent d06a3f060d
commit e6f8852b05
16 changed files with 279 additions and 248 deletions

View File

@@ -37,10 +37,9 @@ from awx.main.utils.pglock import advisory_lock
from awx.main.utils import (
get_type_for_model,
ScheduleTaskManager,
ScheduleDependencyManager,
ScheduleWorkflowManager,
)
from awx.main.utils.common import create_partition
from awx.main.utils.common import create_partition, task_manager_bulk_reschedule
from awx.main.signals import disable_activity_stream
from awx.main.constants import ACTIVE_STATES
from awx.main.scheduler.dependency_graph import DependencyGraph
@@ -121,23 +120,22 @@ class TaskBase:
def schedule(self):
# Lock
with advisory_lock(f"{self.prefix}_lock", wait=False) as acquired:
with transaction.atomic():
if acquired is False:
logger.debug(f"Not running {self.prefix} scheduler, another task holds lock")
return
logger.debug(f"Starting {self.prefix} Scheduler")
with self.schedule_manager.task_manager_bulk_reschedule():
with task_manager_bulk_reschedule():
with advisory_lock(f"{self.prefix}_lock", wait=False) as acquired:
with transaction.atomic():
if acquired is False:
logger.debug(f"Not running {self.prefix} scheduler, another task holds lock")
return
logger.debug(f"Starting {self.prefix} Scheduler")
# if sigterm due to timeout, still record metrics
signal.signal(signal.SIGTERM, self.record_aggregate_metrics_and_exit)
self._schedule()
self.record_aggregate_metrics()
logger.debug(f"Finishing {self.prefix} Scheduler")
logger.debug(f"Finishing {self.prefix} Scheduler")
class WorkflowManager(TaskBase):
def __init__(self):
self.schedule_manager = ScheduleWorkflowManager()
super().__init__(prefix="workflow_manager")
@timeit
@@ -146,7 +144,7 @@ class WorkflowManager(TaskBase):
for workflow_job in self.all_tasks:
if self.timed_out():
logger.warning("Workflow manager has reached time out while processing running workflows, exiting loop early")
self.schedule_manager.schedule()
ScheduleWorkflowManager().schedule()
# Do not process any more workflow jobs. Stop here.
# Maybe we should schedule another WorkflowManager run
break
@@ -263,7 +261,6 @@ class WorkflowManager(TaskBase):
class DependencyManager(TaskBase):
def __init__(self):
self.schedule_manager = ScheduleDependencyManager()
super().__init__(prefix="dependency_manager")
def create_project_update(self, task, project_id=None):
@@ -432,8 +429,9 @@ class DependencyManager(TaskBase):
return created_dependencies
def process_tasks(self):
self.generate_dependencies(self.all_tasks)
self.subsystem_metrics.inc(f"{self.prefix}_pending_processed", len(self.all_tasks))
deps = self.generate_dependencies(self.all_tasks)
self.generate_dependencies(deps)
self.subsystem_metrics.inc(f"{self.prefix}_pending_processed", len(self.all_tasks) + len(deps))
@timeit
def _schedule(self):
@@ -462,7 +460,6 @@ class TaskManager(TaskBase):
# 5 minutes to start pending jobs. If this limit is reached, pending jobs
# will no longer be started and will be started on the next task manager cycle.
self.time_delta_job_explanation = timedelta(seconds=30)
self.schedule_manager = ScheduleTaskManager()
super().__init__(prefix="task_manager")
def after_lock_init(self):
@@ -575,6 +572,8 @@ class TaskManager(TaskBase):
@timeit
def process_running_tasks(self, running_tasks):
for task in running_tasks:
if type(task) is WorkflowJob:
ScheduleWorkflowManager().schedule()
self.dependency_graph.add_job(task)
@timeit