mirror of
https://github.com/ansible/awx.git
synced 2026-04-21 18:00:23 -02:30
I have a hunch that our usage of a daemon thread is causing import lock contention related to https://github.com/ansible/awx/issues/5617 We've encountered similar issues before with threads across dispatcher processes at fork time, and cpython has had bugs like this in recent history: https://bugs.python.org/issue38884 My gut tells me this might be related. The prior implementation - based on celerybeat - ran its code in a process (not a thread), and the timing of that merge matches the period of time we started noticing issues. Currently testing it to see if it resolves some of the issues we're seeing.
57 lines
1.8 KiB
Python
57 lines
1.8 KiB
Python
import logging
|
|
import os
|
|
import time
|
|
from multiprocessing import Process
|
|
|
|
from django.conf import settings
|
|
from django.db import connections
|
|
from schedule import Scheduler
|
|
|
|
from awx.main.dispatch.worker import TaskWorker
|
|
|
|
logger = logging.getLogger('awx.main.dispatch.periodic')
|
|
|
|
|
|
class Scheduler(Scheduler):
|
|
|
|
def run_continuously(self):
|
|
idle_seconds = max(
|
|
1,
|
|
min(self.jobs).period.total_seconds() / 2
|
|
)
|
|
|
|
def run():
|
|
ppid = os.getppid()
|
|
logger.warn(f'periodic beat started')
|
|
while True:
|
|
if os.getppid() != ppid:
|
|
# if the parent PID changes, this process has been orphaned
|
|
# via e.g., segfault or sigkill, we should exit too
|
|
pid = os.getpid()
|
|
logger.warn(f'periodic beat exiting gracefully pid:{pid}')
|
|
raise SystemExit()
|
|
try:
|
|
for conn in connections.all():
|
|
# If the database connection has a hiccup, re-establish a new
|
|
# connection
|
|
conn.close_if_unusable_or_obsolete()
|
|
self.run_pending()
|
|
except Exception:
|
|
logger.exception(
|
|
'encountered an error while scheduling periodic tasks'
|
|
)
|
|
time.sleep(idle_seconds)
|
|
|
|
process = Process(target=run)
|
|
process.daemon = True
|
|
process.start()
|
|
|
|
|
|
def run_continuously():
|
|
scheduler = Scheduler()
|
|
for task in settings.CELERYBEAT_SCHEDULE.values():
|
|
apply_async = TaskWorker.resolve_callable(task['task']).apply_async
|
|
total_seconds = task['schedule'].total_seconds()
|
|
scheduler.every(total_seconds).seconds.do(apply_async)
|
|
scheduler.run_continuously()
|