HAify job schedules and more task_manager renaming

This commit is contained in:
Chris Meyers
2016-11-01 10:53:14 -05:00
parent 87dd91e849
commit 13c89ab78c
6 changed files with 40 additions and 28 deletions

View File

@@ -0,0 +1,24 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0045_v310_job_event_stdout'),
]
operations = [
migrations.CreateModel(
name='TowerState',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('schedule_last_run', models.DateTimeField(auto_now_add=True)),
],
options={
'abstract': False,
},
),
]

View File

@@ -5,13 +5,15 @@ from django.db import models
from django.db.models.signals import post_save from django.db.models.signals import post_save
from django.dispatch import receiver from django.dispatch import receiver
from solo.models import SingletonModel
from awx.main.managers import InstanceManager from awx.main.managers import InstanceManager
from awx.main.models.inventory import InventoryUpdate from awx.main.models.inventory import InventoryUpdate
from awx.main.models.jobs import Job from awx.main.models.jobs import Job
from awx.main.models.projects import ProjectUpdate from awx.main.models.projects import ProjectUpdate
from awx.main.models.unified_jobs import UnifiedJob from awx.main.models.unified_jobs import UnifiedJob
__all__ = ('Instance', 'JobOrigin') __all__ = ('Instance', 'JobOrigin', 'TowerState',)
class Instance(models.Model): class Instance(models.Model):
@@ -33,6 +35,8 @@ class Instance(models.Model):
# NOTE: TODO: Likely to repurpose this once standalone ramparts are a thing # NOTE: TODO: Likely to repurpose this once standalone ramparts are a thing
return "tower" return "tower"
class TowerState(SingletonModel):
schedule_last_run = models.DateTimeField(auto_now_add=True)
class JobOrigin(models.Model): class JobOrigin(models.Model):
"""A model representing the relationship between a unified job and """A model representing the relationship between a unified job and

View File

@@ -28,7 +28,7 @@ def run_job_complete(job_id):
TaskManager().schedule() TaskManager().schedule()
@task @task
def run_scheduler(): def run_task_manager():
TaskManager().schedule() TaskManager().schedule()
@task @task

View File

@@ -21,7 +21,6 @@ import traceback
import urlparse import urlparse
import uuid import uuid
from distutils.version import LooseVersion as Version from distutils.version import LooseVersion as Version
import dateutil.parser
import yaml import yaml
try: try:
import psutil import psutil
@@ -137,30 +136,12 @@ def cluster_node_heartbeat(self):
@task(bind=True, queue='default') @task(bind=True, queue='default')
def tower_periodic_scheduler(self): def tower_periodic_scheduler(self):
def get_last_run():
if not os.path.exists(settings.SCHEDULE_METADATA_LOCATION):
return None
fd = open(settings.SCHEDULE_METADATA_LOCATION)
try:
last_run = dateutil.parser.parse(fd.read())
return last_run
except Exception as exc:
logger.error("get_last_run failed: {}".format(exc))
return None
def write_last_run(last_run):
fd = open(settings.SCHEDULE_METADATA_LOCATION, 'w')
fd.write(last_run.isoformat())
fd.close()
run_now = now() run_now = now()
last_run = get_last_run() state = TowerState.get_solo()
if not last_run: last_run = state.schedule_last_run
logger.debug("First run time")
write_last_run(run_now)
return
logger.debug("Last run was: %s", last_run) logger.debug("Last run was: %s", last_run)
write_last_run(run_now) state.schedule_last_run = run_now
state.save()
old_schedules = Schedule.objects.enabled().before(last_run) old_schedules = Schedule.objects.enabled().before(last_run)
for schedule in old_schedules: for schedule in old_schedules:
@@ -180,6 +161,7 @@ def tower_periodic_scheduler(self):
new_unified_job.save(update_fields=['status', 'job_explanation']) new_unified_job.save(update_fields=['status', 'job_explanation'])
new_unified_job.websocket_emit_status("failed") new_unified_job.websocket_emit_status("failed")
emit_channel_notification('schedules-changed', dict(id=schedule.id, group_name="schedules")) emit_channel_notification('schedules-changed', dict(id=schedule.id, group_name="schedules"))
state.save()
def _send_notification_templates(instance, status_str): def _send_notification_templates(instance, status_str):
if status_str not in ['succeeded', 'failed']: if status_str not in ['succeeded', 'failed']:

View File

@@ -201,6 +201,7 @@ INSTALLED_APPS = (
'awx.ui', 'awx.ui',
'awx.fact', 'awx.fact',
'awx.sso', 'awx.sso',
'solo',
) )
INTERNAL_IPS = ('127.0.0.1',) INTERNAL_IPS = ('127.0.0.1',)
@@ -392,9 +393,9 @@ CELERYBEAT_SCHEDULE = {
'task': 'awx.main.tasks.cluster_node_heartbeat', 'task': 'awx.main.tasks.cluster_node_heartbeat',
'schedule': timedelta(seconds=60) 'schedule': timedelta(seconds=60)
}, },
'task_scheduler': { 'task_manager': {
'task': 'awx.main.scheduler.tasks.run_scheduler', 'task': 'awx.main.scheduler.tasks.run_task_manager',
'schedule': timedelta(seconds=10) 'schedule': timedelta(seconds=20)
}, },
'task_fail_inconsistent_running_jobs': { 'task_fail_inconsistent_running_jobs': {
'task': 'awx.main.scheduler.tasks.run_fail_inconsistent_running_jobs', 'task': 'awx.main.scheduler.tasks.run_fail_inconsistent_running_jobs',

View File

@@ -24,6 +24,7 @@ django-polymorphic==0.7.2
django-radius==1.0.0 django-radius==1.0.0
djangorestframework==3.3.2 djangorestframework==3.3.2
djangorestframework-yaml==1.0.2 djangorestframework-yaml==1.0.2
django-solo==1.1.2
django-split-settings==0.1.1 django-split-settings==0.1.1
django-transaction-hooks==0.2 django-transaction-hooks==0.2
django-taggit==0.17.6 django-taggit==0.17.6