mirror of
https://github.com/ansible/awx.git
synced 2026-05-12 20:07:37 -02:30
Add job lifecycle logging
Various points (e.g. created, running, processing events), are structured into json format and output to /var/log/tower/job_lifecycle.log As part of this work, the DependencyGraph is reworked to return which job object is doing the blocking, rather than a boolean.
This commit is contained in:
@@ -1,5 +1,3 @@
|
||||
from django.utils.timezone import now as tz_now
|
||||
|
||||
from awx.main.models import (
|
||||
Job,
|
||||
ProjectUpdate,
|
||||
@@ -20,119 +18,110 @@ class DependencyGraph(object):
|
||||
INVENTORY_SOURCE_UPDATES = 'inventory_source_updates'
|
||||
WORKFLOW_JOB_TEMPLATES_JOBS = 'workflow_job_template_jobs'
|
||||
|
||||
LATEST_PROJECT_UPDATES = 'latest_project_updates'
|
||||
LATEST_INVENTORY_UPDATES = 'latest_inventory_updates'
|
||||
|
||||
INVENTORY_SOURCES = 'inventory_source_ids'
|
||||
|
||||
def __init__(self, queue):
|
||||
self.queue = queue
|
||||
def __init__(self):
|
||||
self.data = {}
|
||||
# project_id -> True / False
|
||||
self.data[self.PROJECT_UPDATES] = {}
|
||||
# inventory_id -> True / False
|
||||
# The reason for tracking both inventory and inventory sources:
|
||||
# Consider InvA, which has two sources, InvSource1, InvSource2.
|
||||
# JobB might depend on InvA, which launches two updates, one for each source.
|
||||
# To determine if JobB can run, we can just check InvA, which is marked in
|
||||
# INVENTORY_UPDATES, instead of having to check for both entries in
|
||||
# INVENTORY_SOURCE_UPDATES.
|
||||
self.data[self.INVENTORY_UPDATES] = {}
|
||||
# job_template_id -> True / False
|
||||
self.data[self.JOB_TEMPLATE_JOBS] = {}
|
||||
|
||||
'''
|
||||
Track runnable job related project and inventory to ensure updates
|
||||
don't run while a job needing those resources is running.
|
||||
'''
|
||||
|
||||
# inventory_source_id -> True / False
|
||||
self.data[self.INVENTORY_SOURCE_UPDATES] = {}
|
||||
# True / False
|
||||
self.data[self.SYSTEM_JOB] = True
|
||||
# workflow_job_template_id -> True / False
|
||||
self.data[self.JOB_TEMPLATE_JOBS] = {}
|
||||
self.data[self.SYSTEM_JOB] = {}
|
||||
self.data[self.WORKFLOW_JOB_TEMPLATES_JOBS] = {}
|
||||
|
||||
# project_id -> latest ProjectUpdateLatestDict'
|
||||
self.data[self.LATEST_PROJECT_UPDATES] = {}
|
||||
# inventory_source_id -> latest InventoryUpdateLatestDict
|
||||
self.data[self.LATEST_INVENTORY_UPDATES] = {}
|
||||
def mark_if_no_key(self, job_type, id, job):
|
||||
# only mark first occurrence of a task. If 10 of JobA are launched
|
||||
# (concurrent disabled), the dependency graph should return that jobs
|
||||
# 2 through 10 are blocked by job1
|
||||
if id not in self.data[job_type]:
|
||||
self.data[job_type][id] = job
|
||||
|
||||
# inventory_id -> [inventory_source_ids]
|
||||
self.data[self.INVENTORY_SOURCES] = {}
|
||||
def get_item(self, job_type, id):
|
||||
return self.data[job_type].get(id, None)
|
||||
|
||||
def add_latest_project_update(self, job):
|
||||
self.data[self.LATEST_PROJECT_UPDATES][job.project_id] = job
|
||||
|
||||
def get_now(self):
|
||||
return tz_now()
|
||||
|
||||
def mark_system_job(self):
|
||||
self.data[self.SYSTEM_JOB] = False
|
||||
def mark_system_job(self, job):
|
||||
# Don't track different types of system jobs, so that only one can run
|
||||
# at a time. Therefore id in this case is just 'system_job'.
|
||||
self.mark_if_no_key(self.SYSTEM_JOB, 'system_job', job)
|
||||
|
||||
def mark_project_update(self, job):
|
||||
self.data[self.PROJECT_UPDATES][job.project_id] = False
|
||||
self.mark_if_no_key(self.PROJECT_UPDATES, job.project_id, job)
|
||||
|
||||
def mark_inventory_update(self, inventory_id):
|
||||
self.data[self.INVENTORY_UPDATES][inventory_id] = False
|
||||
def mark_inventory_update(self, job):
|
||||
if type(job) is AdHocCommand:
|
||||
self.mark_if_no_key(self.INVENTORY_UPDATES, job.inventory_id, job)
|
||||
else:
|
||||
self.mark_if_no_key(self.INVENTORY_UPDATES, job.inventory_source.inventory_id, job)
|
||||
|
||||
def mark_inventory_source_update(self, inventory_source_id):
|
||||
self.data[self.INVENTORY_SOURCE_UPDATES][inventory_source_id] = False
|
||||
def mark_inventory_source_update(self, job):
|
||||
self.mark_if_no_key(self.INVENTORY_SOURCE_UPDATES, job.inventory_source_id, job)
|
||||
|
||||
def mark_job_template_job(self, job):
|
||||
self.data[self.JOB_TEMPLATE_JOBS][job.job_template_id] = False
|
||||
self.mark_if_no_key(self.JOB_TEMPLATE_JOBS, job.job_template_id, job)
|
||||
|
||||
def mark_workflow_job(self, job):
|
||||
self.data[self.WORKFLOW_JOB_TEMPLATES_JOBS][job.workflow_job_template_id] = False
|
||||
self.mark_if_no_key(self.WORKFLOW_JOB_TEMPLATES_JOBS, job.workflow_job_template_id, job)
|
||||
|
||||
def can_project_update_run(self, job):
|
||||
return self.data[self.PROJECT_UPDATES].get(job.project_id, True)
|
||||
def project_update_blocked_by(self, job):
|
||||
return self.get_item(self.PROJECT_UPDATES, job.project_id)
|
||||
|
||||
def can_inventory_update_run(self, job):
|
||||
return self.data[self.INVENTORY_SOURCE_UPDATES].get(job.inventory_source_id, True)
|
||||
def inventory_update_blocked_by(self, job):
|
||||
return self.get_item(self.INVENTORY_SOURCE_UPDATES, job.inventory_source_id)
|
||||
|
||||
def can_job_run(self, job):
|
||||
if self.data[self.PROJECT_UPDATES].get(job.project_id, True) is True and \
|
||||
self.data[self.INVENTORY_UPDATES].get(job.inventory_id, True) is True:
|
||||
if job.allow_simultaneous is False:
|
||||
return self.data[self.JOB_TEMPLATE_JOBS].get(job.job_template_id, True)
|
||||
else:
|
||||
return True
|
||||
return False
|
||||
def job_blocked_by(self, job):
|
||||
project_block = self.get_item(self.PROJECT_UPDATES, job.project_id)
|
||||
inventory_block = self.get_item(self.INVENTORY_UPDATES, job.inventory_id)
|
||||
if job.allow_simultaneous is False:
|
||||
job_block = self.get_item(self.JOB_TEMPLATE_JOBS, job.job_template_id)
|
||||
else:
|
||||
job_block = None
|
||||
return project_block or inventory_block or job_block
|
||||
|
||||
def can_workflow_job_run(self, job):
|
||||
if job.allow_simultaneous:
|
||||
return True
|
||||
return self.data[self.WORKFLOW_JOB_TEMPLATES_JOBS].get(job.workflow_job_template_id, True)
|
||||
def workflow_job_blocked_by(self, job):
|
||||
if job.allow_simultaneous is False:
|
||||
return self.get_item(self.WORKFLOW_JOB_TEMPLATES_JOBS, job.workflow_job_template_id)
|
||||
return None
|
||||
|
||||
def can_system_job_run(self):
|
||||
return self.data[self.SYSTEM_JOB]
|
||||
def system_job_blocked_by(self, job):
|
||||
return self.get_item(self.SYSTEM_JOB, 'system_job')
|
||||
|
||||
def can_ad_hoc_command_run(self, job):
|
||||
return self.data[self.INVENTORY_UPDATES].get(job.inventory_id, True)
|
||||
def ad_hoc_command_blocked_by(self, job):
|
||||
return self.get_item(self.INVENTORY_UPDATES, job.inventory_id)
|
||||
|
||||
def is_job_blocked(self, job):
|
||||
def task_blocked_by(self, job):
|
||||
if type(job) is ProjectUpdate:
|
||||
return not self.can_project_update_run(job)
|
||||
return self.project_update_blocked_by(job)
|
||||
elif type(job) is InventoryUpdate:
|
||||
return not self.can_inventory_update_run(job)
|
||||
return self.inventory_update_blocked_by(job)
|
||||
elif type(job) is Job:
|
||||
return not self.can_job_run(job)
|
||||
return self.job_blocked_by(job)
|
||||
elif type(job) is SystemJob:
|
||||
return not self.can_system_job_run()
|
||||
return self.system_job_blocked_by(job)
|
||||
elif type(job) is AdHocCommand:
|
||||
return not self.can_ad_hoc_command_run(job)
|
||||
return self.ad_hoc_command_blocked_by(job)
|
||||
elif type(job) is WorkflowJob:
|
||||
return not self.can_workflow_job_run(job)
|
||||
return self.workflow_job_blocked_by(job)
|
||||
|
||||
def add_job(self, job):
|
||||
if type(job) is ProjectUpdate:
|
||||
self.mark_project_update(job)
|
||||
elif type(job) is InventoryUpdate:
|
||||
self.mark_inventory_update(job.inventory_source.inventory_id)
|
||||
self.mark_inventory_source_update(job.inventory_source_id)
|
||||
self.mark_inventory_update(job)
|
||||
self.mark_inventory_source_update(job)
|
||||
elif type(job) is Job:
|
||||
self.mark_job_template_job(job)
|
||||
elif type(job) is WorkflowJob:
|
||||
self.mark_workflow_job(job)
|
||||
elif type(job) is SystemJob:
|
||||
self.mark_system_job()
|
||||
self.mark_system_job(job)
|
||||
elif type(job) is AdHocCommand:
|
||||
self.mark_inventory_update(job.inventory_id)
|
||||
self.mark_inventory_update(job)
|
||||
|
||||
def add_jobs(self, jobs):
|
||||
for j in jobs:
|
||||
|
||||
@@ -64,6 +64,8 @@ class TaskManager():
|
||||
# will no longer be started and will be started on the next task manager cycle.
|
||||
self.start_task_limit = settings.START_TASK_LIMIT
|
||||
|
||||
self.time_delta_job_explanation = timedelta(seconds=30)
|
||||
|
||||
def after_lock_init(self):
|
||||
'''
|
||||
Init AFTER we know this instance of the task manager will run because the lock is acquired.
|
||||
@@ -80,7 +82,7 @@ class TaskManager():
|
||||
instances_by_hostname = {i.hostname: i for i in instances_partial}
|
||||
|
||||
for rampart_group in InstanceGroup.objects.prefetch_related('instances'):
|
||||
self.graph[rampart_group.name] = dict(graph=DependencyGraph(rampart_group.name),
|
||||
self.graph[rampart_group.name] = dict(graph=DependencyGraph(),
|
||||
capacity_total=rampart_group.capacity,
|
||||
consumed_capacity=0,
|
||||
instances=[])
|
||||
@@ -88,18 +90,21 @@ class TaskManager():
|
||||
if instance.hostname in instances_by_hostname:
|
||||
self.graph[rampart_group.name]['instances'].append(instances_by_hostname[instance.hostname])
|
||||
|
||||
def is_job_blocked(self, task):
|
||||
def job_blocked_by(self, task):
|
||||
# TODO: I'm not happy with this, I think blocking behavior should be decided outside of the dependency graph
|
||||
# in the old task manager this was handled as a method on each task object outside of the graph and
|
||||
# probably has the side effect of cutting down *a lot* of the logic from this task manager class
|
||||
for g in self.graph:
|
||||
if self.graph[g]['graph'].is_job_blocked(task):
|
||||
return True
|
||||
blocked_by = self.graph[g]['graph'].task_blocked_by(task)
|
||||
if blocked_by:
|
||||
return blocked_by
|
||||
|
||||
if not task.dependent_jobs_finished():
|
||||
return True
|
||||
blocked_by = task.dependent_jobs.first()
|
||||
if blocked_by:
|
||||
return blocked_by
|
||||
|
||||
return False
|
||||
return None
|
||||
|
||||
def get_tasks(self, status_list=('pending', 'waiting', 'running')):
|
||||
jobs = [j for j in Job.objects.filter(status__in=status_list).prefetch_related('instance_group')]
|
||||
@@ -312,6 +317,7 @@ class TaskManager():
|
||||
with disable_activity_stream():
|
||||
task.celery_task_id = str(uuid.uuid4())
|
||||
task.save()
|
||||
task.log_lifecycle("waiting")
|
||||
|
||||
if rampart_group is not None:
|
||||
self.consume_capacity(task, rampart_group.name)
|
||||
@@ -450,6 +456,7 @@ class TaskManager():
|
||||
def generate_dependencies(self, undeped_tasks):
|
||||
created_dependencies = []
|
||||
for task in undeped_tasks:
|
||||
task.log_lifecycle("acknowledged")
|
||||
dependencies = []
|
||||
if not type(task) is Job:
|
||||
continue
|
||||
@@ -489,11 +496,18 @@ class TaskManager():
|
||||
|
||||
def process_pending_tasks(self, pending_tasks):
|
||||
running_workflow_templates = set([wf.unified_job_template_id for wf in self.get_running_workflow_jobs()])
|
||||
tasks_to_update_job_explanation = []
|
||||
for task in pending_tasks:
|
||||
if self.start_task_limit <= 0:
|
||||
break
|
||||
if self.is_job_blocked(task):
|
||||
logger.debug("{} is blocked from running".format(task.log_format))
|
||||
blocked_by = self.job_blocked_by(task)
|
||||
if blocked_by:
|
||||
task.log_lifecycle("blocked", blocked_by=blocked_by)
|
||||
job_explanation = gettext_noop(f"waiting for {blocked_by._meta.model_name}-{blocked_by.id} to finish")
|
||||
if task.job_explanation != job_explanation:
|
||||
if task.created < (tz_now() - self.time_delta_job_explanation):
|
||||
task.job_explanation = job_explanation
|
||||
tasks_to_update_job_explanation.append(task)
|
||||
continue
|
||||
preferred_instance_groups = task.preferred_instance_groups
|
||||
found_acceptable_queue = False
|
||||
@@ -539,7 +553,17 @@ class TaskManager():
|
||||
logger.debug("No instance available in group {} to run job {} w/ capacity requirement {}".format(
|
||||
rampart_group.name, task.log_format, task.task_impact))
|
||||
if not found_acceptable_queue:
|
||||
task.log_lifecycle("needs_capacity")
|
||||
job_explanation = gettext_noop("This job is not ready to start because there is not enough available capacity.")
|
||||
if task.job_explanation != job_explanation:
|
||||
if task.created < (tz_now() - self.time_delta_job_explanation):
|
||||
# Many launched jobs are immediately blocked, but most blocks will resolve in a few seconds.
|
||||
# Therefore we should only update the job_explanation after some time has elapsed to
|
||||
# prevent excessive task saves.
|
||||
task.job_explanation = job_explanation
|
||||
tasks_to_update_job_explanation.append(task)
|
||||
logger.debug("{} couldn't be scheduled on graph, waiting for next cycle".format(task.log_format))
|
||||
UnifiedJob.objects.bulk_update(tasks_to_update_job_explanation, ['job_explanation'])
|
||||
|
||||
def timeout_approval_node(self):
|
||||
workflow_approvals = WorkflowApproval.objects.filter(status='pending')
|
||||
|
||||
Reference in New Issue
Block a user