mirror of
https://github.com/ansible/awx.git
synced 2026-03-21 10:57:36 -02:30
Include local versions of third-party dependencies, particularly those unavailable or outdated as OS packages.
This commit is contained in:
59
awx/lib/site-packages/celery/task/__init__.py
Normal file
59
awx/lib/site-packages/celery/task/__init__.py
Normal file
@@ -0,0 +1,59 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.task
|
||||
~~~~~~~~~~~
|
||||
|
||||
This is the old task module, it should not be used anymore,
|
||||
import from the main 'celery' module instead.
|
||||
If you're looking for the decorator implementation then that's in
|
||||
``celery.app.base.Celery.task``.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
from celery._state import current_app, current_task as current
|
||||
from celery.__compat__ import MagicModule, recreate_module
|
||||
from celery.local import Proxy
|
||||
|
||||
__all__ = [
|
||||
'BaseTask', 'Task', 'PeriodicTask', 'task', 'periodic_task',
|
||||
'group', 'chord', 'subtask', 'TaskSet',
|
||||
]
|
||||
|
||||
|
||||
STATICA_HACK = True
|
||||
globals()['kcah_acitats'[::-1].upper()] = False
|
||||
if STATICA_HACK:
|
||||
# This is never executed, but tricks static analyzers (PyDev, PyCharm,
|
||||
# pylint, etc.) into knowing the types of these symbols, and what
|
||||
# they contain.
|
||||
from celery.canvas import group, chord, subtask
|
||||
from .base import BaseTask, Task, PeriodicTask, task, periodic_task
|
||||
from .sets import TaskSet
|
||||
|
||||
|
||||
class module(MagicModule):
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
return self.task(*args, **kwargs)
|
||||
|
||||
|
||||
old_module, new_module = recreate_module( # pragma: no cover
|
||||
__name__,
|
||||
by_module={
|
||||
'celery.task.base': ['BaseTask', 'Task', 'PeriodicTask',
|
||||
'task', 'periodic_task'],
|
||||
'celery.canvas': ['group', 'chord', 'subtask'],
|
||||
'celery.task.sets': ['TaskSet'],
|
||||
},
|
||||
base=module,
|
||||
__package__='celery.task',
|
||||
__file__=__file__,
|
||||
__path__=__path__,
|
||||
__doc__=__doc__,
|
||||
current=current,
|
||||
discard_all=Proxy(lambda: current_app.control.purge),
|
||||
backend_cleanup=Proxy(
|
||||
lambda: current_app.tasks['celery.backend_cleanup']
|
||||
),
|
||||
)
|
||||
228
awx/lib/site-packages/celery/task/base.py
Normal file
228
awx/lib/site-packages/celery/task/base.py
Normal file
@@ -0,0 +1,228 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.task.base
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
The task implementation has been moved to :mod:`celery.app.task`.
|
||||
|
||||
This contains the backward compatible Task class used in the old API,
|
||||
and shouldn't be used in new applications.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
from kombu import Exchange
|
||||
|
||||
from celery import current_app
|
||||
from celery.__compat__ import class_property, reclassmethod
|
||||
from celery.app.task import Context, TaskType, Task as BaseTask # noqa
|
||||
from celery.schedules import maybe_schedule
|
||||
from celery.utils.log import get_task_logger
|
||||
|
||||
#: list of methods that must be classmethods in the old API.
|
||||
_COMPAT_CLASSMETHODS = (
|
||||
'delay', 'apply_async', 'retry', 'apply', 'subtask_from_request',
|
||||
'AsyncResult', 'subtask', '_get_request',
|
||||
)
|
||||
|
||||
|
||||
class Task(BaseTask):
|
||||
"""Deprecated Task base class.
|
||||
|
||||
Modern applications should use :class:`celery.Task` instead.
|
||||
|
||||
"""
|
||||
abstract = True
|
||||
__bound__ = False
|
||||
__v2_compat__ = True
|
||||
|
||||
#- Deprecated compat. attributes -:
|
||||
|
||||
queue = None
|
||||
routing_key = None
|
||||
exchange = None
|
||||
exchange_type = None
|
||||
delivery_mode = None
|
||||
mandatory = False # XXX deprecated
|
||||
immediate = False # XXX deprecated
|
||||
priority = None
|
||||
type = 'regular'
|
||||
error_whitelist = ()
|
||||
disable_error_emails = False
|
||||
accept_magic_kwargs = False
|
||||
|
||||
from_config = BaseTask.from_config + (
|
||||
('exchange_type', 'CELERY_DEFAULT_EXCHANGE_TYPE'),
|
||||
('delivery_mode', 'CELERY_DEFAULT_DELIVERY_MODE'),
|
||||
('error_whitelist', 'CELERY_TASK_ERROR_WHITELIST'),
|
||||
)
|
||||
|
||||
# In old Celery the @task decorator didn't exist, so one would create
|
||||
# classes instead and use them directly (e.g. MyTask.apply_async()).
|
||||
# the use of classmethods was a hack so that it was not necessary
|
||||
# to instantiate the class before using it, but it has only
|
||||
# given us pain (like all magic).
|
||||
for name in _COMPAT_CLASSMETHODS:
|
||||
locals()[name] = reclassmethod(getattr(BaseTask, name))
|
||||
|
||||
@class_property
|
||||
@classmethod
|
||||
def request(cls):
|
||||
return cls._get_request()
|
||||
|
||||
@classmethod
|
||||
def get_logger(self, **kwargs):
|
||||
return get_task_logger(self.name)
|
||||
|
||||
@classmethod
|
||||
def establish_connection(self, connect_timeout=None):
|
||||
"""Deprecated method used to get a broker connection.
|
||||
|
||||
Should be replaced with :meth:`@Celery.connection`
|
||||
instead, or by acquiring connections from the connection pool:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# using the connection pool
|
||||
with celery.pool.acquire(block=True) as conn:
|
||||
...
|
||||
|
||||
# establish fresh connection
|
||||
with celery.connection() as conn:
|
||||
...
|
||||
"""
|
||||
return self._get_app().connection(
|
||||
connect_timeout=connect_timeout)
|
||||
|
||||
def get_publisher(self, connection=None, exchange=None,
|
||||
connect_timeout=None, exchange_type=None, **options):
|
||||
"""Deprecated method to get the task publisher (now called producer).
|
||||
|
||||
Should be replaced with :class:`@amqp.TaskProducer`:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
with celery.connection() as conn:
|
||||
with celery.amqp.TaskProducer(conn) as prod:
|
||||
my_task.apply_async(producer=prod)
|
||||
|
||||
"""
|
||||
exchange = self.exchange if exchange is None else exchange
|
||||
if exchange_type is None:
|
||||
exchange_type = self.exchange_type
|
||||
connection = connection or self.establish_connection(connect_timeout)
|
||||
return self._get_app().amqp.TaskProducer(
|
||||
connection,
|
||||
exchange=exchange and Exchange(exchange, exchange_type),
|
||||
routing_key=self.routing_key, **options
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_consumer(self, connection=None, queues=None, **kwargs):
|
||||
"""Deprecated method used to get consumer for the queue
|
||||
this task is sent to.
|
||||
|
||||
Should be replaced with :class:`@amqp.TaskConsumer` instead:
|
||||
|
||||
"""
|
||||
Q = self._get_app().amqp
|
||||
connection = connection or self.establish_connection()
|
||||
if queues is None:
|
||||
queues = Q.queues[self.queue] if self.queue else Q.default_queue
|
||||
return Q.TaskConsumer(connection, queues, **kwargs)
|
||||
|
||||
|
||||
class PeriodicTask(Task):
|
||||
"""A periodic task is a task that adds itself to the
|
||||
:setting:`CELERYBEAT_SCHEDULE` setting."""
|
||||
abstract = True
|
||||
ignore_result = True
|
||||
relative = False
|
||||
options = None
|
||||
compat = True
|
||||
|
||||
def __init__(self):
|
||||
if not hasattr(self, 'run_every'):
|
||||
raise NotImplementedError(
|
||||
'Periodic tasks must have a run_every attribute')
|
||||
self.run_every = maybe_schedule(self.run_every, self.relative)
|
||||
super(PeriodicTask, self).__init__()
|
||||
|
||||
@classmethod
|
||||
def on_bound(cls, app):
|
||||
app.conf.CELERYBEAT_SCHEDULE[cls.name] = {
|
||||
'task': cls.name,
|
||||
'schedule': cls.run_every,
|
||||
'args': (),
|
||||
'kwargs': {},
|
||||
'options': cls.options or {},
|
||||
'relative': cls.relative,
|
||||
}
|
||||
|
||||
|
||||
def task(*args, **kwargs):
|
||||
"""Decorator to create a task class out of any callable.
|
||||
|
||||
**Examples**
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@task()
|
||||
def refresh_feed(url):
|
||||
return Feed.objects.get(url=url).refresh()
|
||||
|
||||
With setting extra options and using retry.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@task(max_retries=10)
|
||||
def refresh_feed(url):
|
||||
try:
|
||||
return Feed.objects.get(url=url).refresh()
|
||||
except socket.error, exc:
|
||||
refresh_feed.retry(exc=exc)
|
||||
|
||||
Calling the resulting task:
|
||||
|
||||
>>> refresh_feed('http://example.com/rss') # Regular
|
||||
<Feed: http://example.com/rss>
|
||||
>>> refresh_feed.delay('http://example.com/rss') # Async
|
||||
<AsyncResult: 8998d0f4-da0b-4669-ba03-d5ab5ac6ad5d>
|
||||
"""
|
||||
return current_app.task(*args, **dict({'accept_magic_kwargs': False,
|
||||
'base': Task}, **kwargs))
|
||||
|
||||
|
||||
def periodic_task(*args, **options):
|
||||
"""Decorator to create a task class out of any callable.
|
||||
|
||||
.. admonition:: Examples
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@task()
|
||||
def refresh_feed(url):
|
||||
return Feed.objects.get(url=url).refresh()
|
||||
|
||||
With setting extra options and using retry.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from celery.task import current
|
||||
|
||||
@task(exchange='feeds')
|
||||
def refresh_feed(url):
|
||||
try:
|
||||
return Feed.objects.get(url=url).refresh()
|
||||
except socket.error, exc:
|
||||
current.retry(exc=exc)
|
||||
|
||||
Calling the resulting task:
|
||||
|
||||
>>> refresh_feed('http://example.com/rss') # Regular
|
||||
<Feed: http://example.com/rss>
|
||||
>>> refresh_feed.delay('http://example.com/rss') # Async
|
||||
<AsyncResult: 8998d0f4-da0b-4669-ba03-d5ab5ac6ad5d>
|
||||
|
||||
"""
|
||||
return task(**dict({'base': PeriodicTask}, **options))
|
||||
216
awx/lib/site-packages/celery/task/http.py
Normal file
216
awx/lib/site-packages/celery/task/http.py
Normal file
@@ -0,0 +1,216 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.task.http
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
Webhook task implementation.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
import anyjson
|
||||
import sys
|
||||
import urllib2
|
||||
|
||||
from urllib import urlencode
|
||||
from urlparse import urlparse
|
||||
try:
|
||||
from urlparse import parse_qsl
|
||||
except ImportError: # pragma: no cover
|
||||
from cgi import parse_qsl # noqa
|
||||
|
||||
from celery import __version__ as celery_version
|
||||
from celery.utils.log import get_task_logger
|
||||
from .base import Task as BaseTask
|
||||
|
||||
GET_METHODS = frozenset(['GET', 'HEAD'])
|
||||
logger = get_task_logger(__name__)
|
||||
|
||||
|
||||
class InvalidResponseError(Exception):
|
||||
"""The remote server gave an invalid response."""
|
||||
|
||||
|
||||
class RemoteExecuteError(Exception):
|
||||
"""The remote task gave a custom error."""
|
||||
|
||||
|
||||
class UnknownStatusError(InvalidResponseError):
|
||||
"""The remote server gave an unknown status."""
|
||||
|
||||
|
||||
def maybe_utf8(value):
|
||||
"""Encode to utf-8, only if the value is Unicode."""
|
||||
if isinstance(value, unicode):
|
||||
return value.encode('utf-8')
|
||||
return value
|
||||
|
||||
|
||||
if sys.version_info[0] == 3: # pragma: no cover
|
||||
|
||||
def utf8dict(tup):
|
||||
if not isinstance(tup, dict):
|
||||
return dict(tup)
|
||||
return tup
|
||||
else:
|
||||
|
||||
def utf8dict(tup): # noqa
|
||||
"""With a dict's items() tuple return a new dict with any utf-8
|
||||
keys/values encoded."""
|
||||
return dict((key.encode('utf-8'), maybe_utf8(value))
|
||||
for key, value in tup)
|
||||
|
||||
|
||||
def extract_response(raw_response, loads=anyjson.loads):
|
||||
"""Extract the response text from a raw JSON response."""
|
||||
if not raw_response:
|
||||
raise InvalidResponseError('Empty response')
|
||||
try:
|
||||
payload = loads(raw_response)
|
||||
except ValueError, exc:
|
||||
raise InvalidResponseError, InvalidResponseError(
|
||||
str(exc)), sys.exc_info()[2]
|
||||
|
||||
status = payload['status']
|
||||
if status == 'success':
|
||||
return payload['retval']
|
||||
elif status == 'failure':
|
||||
raise RemoteExecuteError(payload.get('reason'))
|
||||
else:
|
||||
raise UnknownStatusError(str(status))
|
||||
|
||||
|
||||
class MutableURL(object):
|
||||
"""Object wrapping a Uniform Resource Locator.
|
||||
|
||||
Supports editing the query parameter list.
|
||||
You can convert the object back to a string, the query will be
|
||||
properly urlencoded.
|
||||
|
||||
Examples
|
||||
|
||||
>>> url = URL('http://www.google.com:6580/foo/bar?x=3&y=4#foo')
|
||||
>>> url.query
|
||||
{'x': '3', 'y': '4'}
|
||||
>>> str(url)
|
||||
'http://www.google.com:6580/foo/bar?y=4&x=3#foo'
|
||||
>>> url.query['x'] = 10
|
||||
>>> url.query.update({'George': 'Costanza'})
|
||||
>>> str(url)
|
||||
'http://www.google.com:6580/foo/bar?y=4&x=10&George=Costanza#foo'
|
||||
|
||||
"""
|
||||
def __init__(self, url):
|
||||
self.parts = urlparse(url)
|
||||
self.query = dict(parse_qsl(self.parts[4]))
|
||||
|
||||
def __str__(self):
|
||||
scheme, netloc, path, params, query, fragment = self.parts
|
||||
query = urlencode(utf8dict(self.query.items()))
|
||||
components = [scheme + '://', netloc, path or '/',
|
||||
';%s' % params if params else '',
|
||||
'?%s' % query if query else '',
|
||||
'#%s' % fragment if fragment else '']
|
||||
return ''.join(c for c in components if c)
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s: %s>' % (self.__class__.__name__, str(self))
|
||||
|
||||
|
||||
class HttpDispatch(object):
|
||||
"""Make task HTTP request and collect the task result.
|
||||
|
||||
:param url: The URL to request.
|
||||
:param method: HTTP method used. Currently supported methods are `GET`
|
||||
and `POST`.
|
||||
:param task_kwargs: Task keyword arguments.
|
||||
:param logger: Logger used for user/system feedback.
|
||||
|
||||
"""
|
||||
user_agent = 'celery/%s' % celery_version
|
||||
timeout = 5
|
||||
|
||||
def __init__(self, url, method, task_kwargs, **kwargs):
|
||||
self.url = url
|
||||
self.method = method
|
||||
self.task_kwargs = task_kwargs
|
||||
self.logger = kwargs.get("logger") or logger
|
||||
|
||||
def make_request(self, url, method, params):
|
||||
"""Makes an HTTP request and returns the response."""
|
||||
request = urllib2.Request(url, params)
|
||||
for key, val in self.http_headers.items():
|
||||
request.add_header(key, val)
|
||||
response = urllib2.urlopen(request) # user catches errors.
|
||||
return response.read()
|
||||
|
||||
def dispatch(self):
|
||||
"""Dispatch callback and return result."""
|
||||
url = MutableURL(self.url)
|
||||
params = None
|
||||
if self.method in GET_METHODS:
|
||||
url.query.update(self.task_kwargs)
|
||||
else:
|
||||
params = urlencode(utf8dict(self.task_kwargs.items()))
|
||||
raw_response = self.make_request(str(url), self.method, params)
|
||||
return extract_response(raw_response)
|
||||
|
||||
@property
|
||||
def http_headers(self):
|
||||
headers = {'User-Agent': self.user_agent}
|
||||
return headers
|
||||
|
||||
|
||||
class HttpDispatchTask(BaseTask):
|
||||
"""Task dispatching to an URL.
|
||||
|
||||
:keyword url: The URL location of the HTTP callback task.
|
||||
:keyword method: Method to use when dispatching the callback. Usually
|
||||
`GET` or `POST`.
|
||||
:keyword \*\*kwargs: Keyword arguments to pass on to the HTTP callback.
|
||||
|
||||
.. attribute:: url
|
||||
|
||||
If this is set, this is used as the default URL for requests.
|
||||
Default is to require the user of the task to supply the url as an
|
||||
argument, as this attribute is intended for subclasses.
|
||||
|
||||
.. attribute:: method
|
||||
|
||||
If this is set, this is the default method used for requests.
|
||||
Default is to require the user of the task to supply the method as an
|
||||
argument, as this attribute is intended for subclasses.
|
||||
|
||||
"""
|
||||
|
||||
url = None
|
||||
method = None
|
||||
accept_magic_kwargs = False
|
||||
|
||||
def run(self, url=None, method='GET', **kwargs):
|
||||
url = url or self.url
|
||||
method = method or self.method
|
||||
return HttpDispatch(url, method, kwargs).dispatch()
|
||||
|
||||
|
||||
class URL(MutableURL):
|
||||
"""HTTP Callback URL
|
||||
|
||||
Supports requesting an URL asynchronously.
|
||||
|
||||
:param url: URL to request.
|
||||
:keyword dispatcher: Class used to dispatch the request.
|
||||
By default this is :class:`HttpDispatchTask`.
|
||||
|
||||
"""
|
||||
dispatcher = HttpDispatchTask
|
||||
|
||||
def __init__(self, url, dispatcher=None):
|
||||
super(URL, self).__init__(url)
|
||||
self.dispatcher = dispatcher or self.dispatcher
|
||||
|
||||
def get_async(self, **kwargs):
|
||||
return self.dispatcher.delay(str(self), 'GET', **kwargs)
|
||||
|
||||
def post_async(self, **kwargs):
|
||||
return self.dispatcher.delay(str(self), 'POST', **kwargs)
|
||||
77
awx/lib/site-packages/celery/task/sets.py
Normal file
77
awx/lib/site-packages/celery/task/sets.py
Normal file
@@ -0,0 +1,77 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.task.sets
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
Old ``group`` implementation, this module should
|
||||
not be used anymore use :func:`celery.group` instead.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import with_statement
|
||||
|
||||
from celery._state import get_current_worker_task
|
||||
from celery.app import app_or_default
|
||||
from celery.canvas import subtask, maybe_subtask # noqa
|
||||
from celery.utils import uuid
|
||||
|
||||
|
||||
class TaskSet(list):
|
||||
"""A task containing several subtasks, making it possible
|
||||
to track how many, or when all of the tasks have been completed.
|
||||
|
||||
:param tasks: A list of :class:`subtask` instances.
|
||||
|
||||
Example::
|
||||
|
||||
>>> urls = ('http://cnn.com/rss', 'http://bbc.co.uk/rss')
|
||||
>>> s = TaskSet(refresh_feed.s(url) for url in urls)
|
||||
>>> taskset_result = s.apply_async()
|
||||
>>> list_of_return_values = taskset_result.join() # *expensive*
|
||||
|
||||
"""
|
||||
app = None
|
||||
|
||||
def __init__(self, tasks=None, app=None, Publisher=None):
|
||||
super(TaskSet, self).__init__(maybe_subtask(t) for t in tasks or [])
|
||||
self.app = app_or_default(app or self.app)
|
||||
self.Publisher = Publisher or self.app.amqp.TaskProducer
|
||||
self.total = len(self) # XXX compat
|
||||
|
||||
def apply_async(self, connection=None, connect_timeout=None,
|
||||
publisher=None, taskset_id=None):
|
||||
"""Apply TaskSet."""
|
||||
app = self.app
|
||||
|
||||
if app.conf.CELERY_ALWAYS_EAGER:
|
||||
return self.apply(taskset_id=taskset_id)
|
||||
|
||||
with app.connection_or_acquire(connection, connect_timeout) as conn:
|
||||
setid = taskset_id or uuid()
|
||||
pub = publisher or self.Publisher(conn)
|
||||
results = self._async_results(setid, pub)
|
||||
|
||||
result = app.TaskSetResult(setid, results)
|
||||
parent = get_current_worker_task()
|
||||
if parent:
|
||||
parent.request.children.append(result)
|
||||
return result
|
||||
|
||||
def _async_results(self, taskset_id, publisher):
|
||||
return [task.apply_async(taskset_id=taskset_id, publisher=publisher)
|
||||
for task in self]
|
||||
|
||||
def apply(self, taskset_id=None):
|
||||
"""Applies the TaskSet locally by blocking until all tasks return."""
|
||||
setid = taskset_id or uuid()
|
||||
return self.app.TaskSetResult(setid, self._sync_results(setid))
|
||||
|
||||
def _sync_results(self, taskset_id):
|
||||
return [task.apply(taskset_id=taskset_id) for task in self]
|
||||
|
||||
def _get_tasks(self):
|
||||
return self
|
||||
|
||||
def _set_tasks(self, tasks):
|
||||
self[:] = tasks
|
||||
tasks = property(_get_tasks, _set_tasks)
|
||||
418
awx/lib/site-packages/celery/task/trace.py
Normal file
418
awx/lib/site-packages/celery/task/trace.py
Normal file
@@ -0,0 +1,418 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.task.trace
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module defines how the task execution is traced:
|
||||
errors are recorded, handlers are applied and so on.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
# ## ---
|
||||
# This is the heart of the worker, the inner loop so to speak.
|
||||
# It used to be split up into nice little classes and methods,
|
||||
# but in the end it only resulted in bad performance and horrible tracebacks,
|
||||
# so instead we now use one closure per task class.
|
||||
|
||||
import os
|
||||
import socket
|
||||
import sys
|
||||
|
||||
from warnings import warn
|
||||
|
||||
from kombu.utils import kwdict
|
||||
|
||||
from celery import current_app
|
||||
from celery import states, signals
|
||||
from celery._state import _task_stack
|
||||
from celery.app import set_default_app
|
||||
from celery.app.task import Task as BaseTask, Context
|
||||
from celery.datastructures import ExceptionInfo
|
||||
from celery.exceptions import Ignore, RetryTaskError
|
||||
from celery.utils.serialization import get_pickleable_exception
|
||||
from celery.utils.log import get_logger
|
||||
|
||||
_logger = get_logger(__name__)
|
||||
|
||||
send_prerun = signals.task_prerun.send
|
||||
prerun_receivers = signals.task_prerun.receivers
|
||||
send_postrun = signals.task_postrun.send
|
||||
postrun_receivers = signals.task_postrun.receivers
|
||||
send_success = signals.task_success.send
|
||||
success_receivers = signals.task_success.receivers
|
||||
STARTED = states.STARTED
|
||||
SUCCESS = states.SUCCESS
|
||||
IGNORED = states.IGNORED
|
||||
RETRY = states.RETRY
|
||||
FAILURE = states.FAILURE
|
||||
EXCEPTION_STATES = states.EXCEPTION_STATES
|
||||
IGNORE_STATES = frozenset([IGNORED, RETRY])
|
||||
|
||||
#: set by :func:`setup_worker_optimizations`
|
||||
_tasks = None
|
||||
_patched = {}
|
||||
|
||||
|
||||
def mro_lookup(cls, attr, stop=(), monkey_patched=[]):
|
||||
"""Returns the first node by MRO order that defines an attribute.
|
||||
|
||||
:keyword stop: A list of types that if reached will stop the search.
|
||||
:keyword monkey_patched: Use one of the stop classes if the attr's
|
||||
module origin is not in this list, this to detect monkey patched
|
||||
attributes.
|
||||
|
||||
:returns None: if the attribute was not found.
|
||||
|
||||
"""
|
||||
for node in cls.mro():
|
||||
if node in stop:
|
||||
try:
|
||||
attr = node.__dict__[attr]
|
||||
module_origin = attr.__module__
|
||||
except (AttributeError, KeyError):
|
||||
pass
|
||||
else:
|
||||
if module_origin not in monkey_patched:
|
||||
return node
|
||||
return
|
||||
if attr in node.__dict__:
|
||||
return node
|
||||
|
||||
|
||||
def task_has_custom(task, attr):
|
||||
"""Returns true if the task or one of its bases
|
||||
defines ``attr`` (excluding the one in BaseTask)."""
|
||||
return mro_lookup(task.__class__, attr, stop=(BaseTask, object),
|
||||
monkey_patched=['celery.app.task'])
|
||||
|
||||
|
||||
class TraceInfo(object):
|
||||
__slots__ = ('state', 'retval')
|
||||
|
||||
def __init__(self, state, retval=None):
|
||||
self.state = state
|
||||
self.retval = retval
|
||||
|
||||
def handle_error_state(self, task, eager=False):
|
||||
store_errors = not eager
|
||||
if task.ignore_result:
|
||||
store_errors = task.store_errors_even_if_ignored
|
||||
|
||||
return {
|
||||
RETRY: self.handle_retry,
|
||||
FAILURE: self.handle_failure,
|
||||
}[self.state](task, store_errors=store_errors)
|
||||
|
||||
def handle_retry(self, task, store_errors=True):
|
||||
"""Handle retry exception."""
|
||||
# the exception raised is the RetryTaskError semi-predicate,
|
||||
# and it's exc' attribute is the original exception raised (if any).
|
||||
req = task.request
|
||||
type_, _, tb = sys.exc_info()
|
||||
try:
|
||||
reason = self.retval
|
||||
einfo = ExceptionInfo((type_, reason, tb))
|
||||
if store_errors:
|
||||
task.backend.mark_as_retry(req.id, reason.exc, einfo.traceback)
|
||||
task.on_retry(reason.exc, req.id, req.args, req.kwargs, einfo)
|
||||
signals.task_retry.send(sender=task, request=req,
|
||||
reason=reason, einfo=einfo)
|
||||
return einfo
|
||||
finally:
|
||||
del(tb)
|
||||
|
||||
def handle_failure(self, task, store_errors=True):
|
||||
"""Handle exception."""
|
||||
req = task.request
|
||||
type_, _, tb = sys.exc_info()
|
||||
try:
|
||||
exc = self.retval
|
||||
einfo = ExceptionInfo((type_, get_pickleable_exception(exc), tb))
|
||||
if store_errors:
|
||||
task.backend.mark_as_failure(req.id, exc, einfo.traceback)
|
||||
task.on_failure(exc, req.id, req.args, req.kwargs, einfo)
|
||||
signals.task_failure.send(sender=task, task_id=req.id,
|
||||
exception=exc, args=req.args,
|
||||
kwargs=req.kwargs,
|
||||
traceback=tb,
|
||||
einfo=einfo)
|
||||
return einfo
|
||||
finally:
|
||||
del(tb)
|
||||
|
||||
|
||||
def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
|
||||
Info=TraceInfo, eager=False, propagate=False,
|
||||
IGNORE_STATES=IGNORE_STATES):
|
||||
"""Builts a function that tracing the tasks execution; catches all
|
||||
exceptions, and saves the state and result of the task execution
|
||||
to the result backend.
|
||||
|
||||
If the call was successful, it saves the result to the task result
|
||||
backend, and sets the task status to `"SUCCESS"`.
|
||||
|
||||
If the call raises :exc:`~celery.exceptions.RetryTaskError`, it extracts
|
||||
the original exception, uses that as the result and sets the task status
|
||||
to `"RETRY"`.
|
||||
|
||||
If the call results in an exception, it saves the exception as the task
|
||||
result, and sets the task status to `"FAILURE"`.
|
||||
|
||||
Returns a function that takes the following arguments:
|
||||
|
||||
:param uuid: The unique id of the task.
|
||||
:param args: List of positional args to pass on to the function.
|
||||
:param kwargs: Keyword arguments mapping to pass on to the function.
|
||||
:keyword request: Request dict.
|
||||
|
||||
"""
|
||||
# If the task doesn't define a custom __call__ method
|
||||
# we optimize it away by simply calling the run method directly,
|
||||
# saving the extra method call and a line less in the stack trace.
|
||||
fun = task if task_has_custom(task, '__call__') else task.run
|
||||
|
||||
loader = loader or current_app.loader
|
||||
backend = task.backend
|
||||
ignore_result = task.ignore_result
|
||||
track_started = task.track_started
|
||||
track_started = not eager and (task.track_started and not ignore_result)
|
||||
publish_result = not eager and not ignore_result
|
||||
hostname = hostname or socket.gethostname()
|
||||
|
||||
loader_task_init = loader.on_task_init
|
||||
loader_cleanup = loader.on_process_cleanup
|
||||
|
||||
task_on_success = None
|
||||
task_after_return = None
|
||||
if task_has_custom(task, 'on_success'):
|
||||
task_on_success = task.on_success
|
||||
if task_has_custom(task, 'after_return'):
|
||||
task_after_return = task.after_return
|
||||
|
||||
store_result = backend.store_result
|
||||
backend_cleanup = backend.process_cleanup
|
||||
|
||||
pid = os.getpid()
|
||||
|
||||
request_stack = task.request_stack
|
||||
push_request = request_stack.push
|
||||
pop_request = request_stack.pop
|
||||
push_task = _task_stack.push
|
||||
pop_task = _task_stack.pop
|
||||
on_chord_part_return = backend.on_chord_part_return
|
||||
|
||||
from celery import canvas
|
||||
subtask = canvas.subtask
|
||||
|
||||
def trace_task(uuid, args, kwargs, request=None):
|
||||
R = I = None
|
||||
kwargs = kwdict(kwargs)
|
||||
try:
|
||||
push_task(task)
|
||||
task_request = Context(request or {}, args=args,
|
||||
called_directly=False, kwargs=kwargs)
|
||||
push_request(task_request)
|
||||
try:
|
||||
# -*- PRE -*-
|
||||
if prerun_receivers:
|
||||
send_prerun(sender=task, task_id=uuid, task=task,
|
||||
args=args, kwargs=kwargs)
|
||||
loader_task_init(uuid, task)
|
||||
if track_started:
|
||||
store_result(uuid, {'pid': pid,
|
||||
'hostname': hostname}, STARTED)
|
||||
|
||||
# -*- TRACE -*-
|
||||
try:
|
||||
R = retval = fun(*args, **kwargs)
|
||||
state = SUCCESS
|
||||
except Ignore, exc:
|
||||
I, R = Info(IGNORED, exc), ExceptionInfo(internal=True)
|
||||
state, retval = I.state, I.retval
|
||||
except RetryTaskError, exc:
|
||||
I = Info(RETRY, exc)
|
||||
state, retval = I.state, I.retval
|
||||
R = I.handle_error_state(task, eager=eager)
|
||||
except Exception, exc:
|
||||
if propagate:
|
||||
raise
|
||||
I = Info(FAILURE, exc)
|
||||
state, retval = I.state, I.retval
|
||||
R = I.handle_error_state(task, eager=eager)
|
||||
[subtask(errback).apply_async((uuid, ))
|
||||
for errback in task_request.errbacks or []]
|
||||
except BaseException, exc:
|
||||
raise
|
||||
except: # pragma: no cover
|
||||
# For Python2.5 where raising strings are still allowed
|
||||
# (but deprecated)
|
||||
if propagate:
|
||||
raise
|
||||
I = Info(FAILURE, None)
|
||||
state, retval = I.state, I.retval
|
||||
R = I.handle_error_state(task, eager=eager)
|
||||
[subtask(errback).apply_async((uuid, ))
|
||||
for errback in task_request.errbacks or []]
|
||||
else:
|
||||
# callback tasks must be applied before the result is
|
||||
# stored, so that result.children is populated.
|
||||
[subtask(callback).apply_async((retval, ))
|
||||
for callback in task_request.callbacks or []]
|
||||
if publish_result:
|
||||
store_result(uuid, retval, SUCCESS)
|
||||
if task_on_success:
|
||||
task_on_success(retval, uuid, args, kwargs)
|
||||
if success_receivers:
|
||||
send_success(sender=task, result=retval)
|
||||
|
||||
# -* POST *-
|
||||
if state not in IGNORE_STATES:
|
||||
if task_request.chord:
|
||||
on_chord_part_return(task)
|
||||
if task_after_return:
|
||||
task_after_return(
|
||||
state, retval, uuid, args, kwargs, None,
|
||||
)
|
||||
if postrun_receivers:
|
||||
send_postrun(sender=task, task_id=uuid, task=task,
|
||||
args=args, kwargs=kwargs,
|
||||
retval=retval, state=state)
|
||||
finally:
|
||||
pop_task()
|
||||
pop_request()
|
||||
if not eager:
|
||||
try:
|
||||
backend_cleanup()
|
||||
loader_cleanup()
|
||||
except (KeyboardInterrupt, SystemExit, MemoryError):
|
||||
raise
|
||||
except Exception, exc:
|
||||
_logger.error('Process cleanup failed: %r', exc,
|
||||
exc_info=True)
|
||||
except Exception, exc:
|
||||
if eager:
|
||||
raise
|
||||
R = report_internal_error(task, exc)
|
||||
return R, I
|
||||
|
||||
return trace_task
|
||||
|
||||
|
||||
def trace_task(task, uuid, args, kwargs, request={}, **opts):
|
||||
try:
|
||||
if task.__trace__ is None:
|
||||
task.__trace__ = build_tracer(task.name, task, **opts)
|
||||
return task.__trace__(uuid, args, kwargs, request)[0]
|
||||
except Exception, exc:
|
||||
return report_internal_error(task, exc)
|
||||
|
||||
|
||||
def _trace_task_ret(name, uuid, args, kwargs, request={}, **opts):
|
||||
return trace_task(current_app.tasks[name],
|
||||
uuid, args, kwargs, request, **opts)
|
||||
trace_task_ret = _trace_task_ret
|
||||
|
||||
|
||||
def _fast_trace_task(task, uuid, args, kwargs, request={}):
|
||||
# setup_worker_optimizations will point trace_task_ret to here,
|
||||
# so this is the function used in the worker.
|
||||
return _tasks[task].__trace__(uuid, args, kwargs, request)[0]
|
||||
|
||||
|
||||
def eager_trace_task(task, uuid, args, kwargs, request=None, **opts):
|
||||
opts.setdefault('eager', True)
|
||||
return build_tracer(task.name, task, **opts)(
|
||||
uuid, args, kwargs, request)
|
||||
|
||||
|
||||
def report_internal_error(task, exc):
|
||||
_type, _value, _tb = sys.exc_info()
|
||||
try:
|
||||
_value = task.backend.prepare_exception(exc)
|
||||
exc_info = ExceptionInfo((_type, _value, _tb), internal=True)
|
||||
warn(RuntimeWarning(
|
||||
'Exception raised outside body: %r:\n%s' % (
|
||||
exc, exc_info.traceback)))
|
||||
return exc_info
|
||||
finally:
|
||||
del(_tb)
|
||||
|
||||
|
||||
def setup_worker_optimizations(app):
|
||||
global _tasks
|
||||
global trace_task_ret
|
||||
|
||||
# make sure custom Task.__call__ methods that calls super
|
||||
# will not mess up the request/task stack.
|
||||
_install_stack_protection()
|
||||
|
||||
# all new threads start without a current app, so if an app is not
|
||||
# passed on to the thread it will fall back to the "default app",
|
||||
# which then could be the wrong app. So for the worker
|
||||
# we set this to always return our app. This is a hack,
|
||||
# and means that only a single app can be used for workers
|
||||
# running in the same process.
|
||||
app.set_current()
|
||||
set_default_app(app)
|
||||
|
||||
# evaluate all task classes by finalizing the app.
|
||||
app.finalize()
|
||||
|
||||
# set fast shortcut to task registry
|
||||
_tasks = app._tasks
|
||||
|
||||
trace_task_ret = _fast_trace_task
|
||||
try:
|
||||
job = sys.modules['celery.worker.job']
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
job.trace_task_ret = _fast_trace_task
|
||||
job.__optimize__()
|
||||
|
||||
|
||||
def reset_worker_optimizations():
|
||||
global trace_task_ret
|
||||
trace_task_ret = _trace_task_ret
|
||||
try:
|
||||
delattr(BaseTask, '_stackprotected')
|
||||
except AttributeError:
|
||||
pass
|
||||
try:
|
||||
BaseTask.__call__ = _patched.pop('BaseTask.__call__')
|
||||
except KeyError:
|
||||
pass
|
||||
try:
|
||||
sys.modules['celery.worker.job'].trace_task_ret = _trace_task_ret
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
|
||||
def _install_stack_protection():
|
||||
# Patches BaseTask.__call__ in the worker to handle the edge case
|
||||
# where people override it and also call super.
|
||||
#
|
||||
# - The worker optimizes away BaseTask.__call__ and instead
|
||||
# calls task.run directly.
|
||||
# - so with the addition of current_task and the request stack
|
||||
# BaseTask.__call__ now pushes to those stacks so that
|
||||
# they work when tasks are called directly.
|
||||
#
|
||||
# The worker only optimizes away __call__ in the case
|
||||
# where it has not been overridden, so the request/task stack
|
||||
# will blow if a custom task class defines __call__ and also
|
||||
# calls super().
|
||||
if not getattr(BaseTask, '_stackprotected', False):
|
||||
_patched['BaseTask.__call__'] = orig = BaseTask.__call__
|
||||
|
||||
def __protected_call__(self, *args, **kwargs):
|
||||
stack = self.request_stack
|
||||
req = stack.top
|
||||
if req and not req._protected and \
|
||||
len(stack) == 1 and not req.called_directly:
|
||||
req._protected = 1
|
||||
return self.run(*args, **kwargs)
|
||||
return orig(self, *args, **kwargs)
|
||||
BaseTask.__call__ = __protected_call__
|
||||
BaseTask._stackprotected = True
|
||||
Reference in New Issue
Block a user