mirror of
https://github.com/ansible/awx.git
synced 2026-02-17 03:00:04 -03:30
Include local versions of third-party dependencies, particularly those unavailable or outdated as OS packages.
This commit is contained in:
334
awx/lib/site-packages/ANSI.py
Normal file
334
awx/lib/site-packages/ANSI.py
Normal file
@@ -0,0 +1,334 @@
|
||||
"""This implements an ANSI terminal emulator as a subclass of screen.
|
||||
|
||||
$Id: ANSI.py 491 2007-12-16 20:04:57Z noah $
|
||||
"""
|
||||
# references:
|
||||
# http://www.retards.org/terminals/vt102.html
|
||||
# http://vt100.net/docs/vt102-ug/contents.html
|
||||
# http://vt100.net/docs/vt220-rm/
|
||||
# http://www.termsys.demon.co.uk/vtansi.htm
|
||||
|
||||
import screen
|
||||
import FSM
|
||||
import copy
|
||||
import string
|
||||
|
||||
def Emit (fsm):
|
||||
|
||||
screen = fsm.memory[0]
|
||||
screen.write_ch(fsm.input_symbol)
|
||||
|
||||
def StartNumber (fsm):
|
||||
|
||||
fsm.memory.append (fsm.input_symbol)
|
||||
|
||||
def BuildNumber (fsm):
|
||||
|
||||
ns = fsm.memory.pop()
|
||||
ns = ns + fsm.input_symbol
|
||||
fsm.memory.append (ns)
|
||||
|
||||
def DoBackOne (fsm):
|
||||
|
||||
screen = fsm.memory[0]
|
||||
screen.cursor_back ()
|
||||
|
||||
def DoBack (fsm):
|
||||
|
||||
count = int(fsm.memory.pop())
|
||||
screen = fsm.memory[0]
|
||||
screen.cursor_back (count)
|
||||
|
||||
def DoDownOne (fsm):
|
||||
|
||||
screen = fsm.memory[0]
|
||||
screen.cursor_down ()
|
||||
|
||||
def DoDown (fsm):
|
||||
|
||||
count = int(fsm.memory.pop())
|
||||
screen = fsm.memory[0]
|
||||
screen.cursor_down (count)
|
||||
|
||||
def DoForwardOne (fsm):
|
||||
|
||||
screen = fsm.memory[0]
|
||||
screen.cursor_forward ()
|
||||
|
||||
def DoForward (fsm):
|
||||
|
||||
count = int(fsm.memory.pop())
|
||||
screen = fsm.memory[0]
|
||||
screen.cursor_forward (count)
|
||||
|
||||
def DoUpReverse (fsm):
|
||||
|
||||
screen = fsm.memory[0]
|
||||
screen.cursor_up_reverse()
|
||||
|
||||
def DoUpOne (fsm):
|
||||
|
||||
screen = fsm.memory[0]
|
||||
screen.cursor_up ()
|
||||
|
||||
def DoUp (fsm):
|
||||
|
||||
count = int(fsm.memory.pop())
|
||||
screen = fsm.memory[0]
|
||||
screen.cursor_up (count)
|
||||
|
||||
def DoHome (fsm):
|
||||
|
||||
c = int(fsm.memory.pop())
|
||||
r = int(fsm.memory.pop())
|
||||
screen = fsm.memory[0]
|
||||
screen.cursor_home (r,c)
|
||||
|
||||
def DoHomeOrigin (fsm):
|
||||
|
||||
c = 1
|
||||
r = 1
|
||||
screen = fsm.memory[0]
|
||||
screen.cursor_home (r,c)
|
||||
|
||||
def DoEraseDown (fsm):
|
||||
|
||||
screen = fsm.memory[0]
|
||||
screen.erase_down()
|
||||
|
||||
def DoErase (fsm):
|
||||
|
||||
arg = int(fsm.memory.pop())
|
||||
screen = fsm.memory[0]
|
||||
if arg == 0:
|
||||
screen.erase_down()
|
||||
elif arg == 1:
|
||||
screen.erase_up()
|
||||
elif arg == 2:
|
||||
screen.erase_screen()
|
||||
|
||||
def DoEraseEndOfLine (fsm):
|
||||
|
||||
screen = fsm.memory[0]
|
||||
screen.erase_end_of_line()
|
||||
|
||||
def DoEraseLine (fsm):
|
||||
|
||||
screen = fsm.memory[0]
|
||||
if arg == 0:
|
||||
screen.end_of_line()
|
||||
elif arg == 1:
|
||||
screen.start_of_line()
|
||||
elif arg == 2:
|
||||
screen.erase_line()
|
||||
|
||||
def DoEnableScroll (fsm):
|
||||
|
||||
screen = fsm.memory[0]
|
||||
screen.scroll_screen()
|
||||
|
||||
def DoCursorSave (fsm):
|
||||
|
||||
screen = fsm.memory[0]
|
||||
screen.cursor_save_attrs()
|
||||
|
||||
def DoCursorRestore (fsm):
|
||||
|
||||
screen = fsm.memory[0]
|
||||
screen.cursor_restore_attrs()
|
||||
|
||||
def DoScrollRegion (fsm):
|
||||
|
||||
screen = fsm.memory[0]
|
||||
r2 = int(fsm.memory.pop())
|
||||
r1 = int(fsm.memory.pop())
|
||||
screen.scroll_screen_rows (r1,r2)
|
||||
|
||||
def DoMode (fsm):
|
||||
|
||||
screen = fsm.memory[0]
|
||||
mode = fsm.memory.pop() # Should be 4
|
||||
# screen.setReplaceMode ()
|
||||
|
||||
def Log (fsm):
|
||||
|
||||
screen = fsm.memory[0]
|
||||
fsm.memory = [screen]
|
||||
fout = open ('log', 'a')
|
||||
fout.write (fsm.input_symbol + ',' + fsm.current_state + '\n')
|
||||
fout.close()
|
||||
|
||||
class term (screen.screen):
|
||||
"""This is a placeholder.
|
||||
In theory I might want to add other terminal types.
|
||||
"""
|
||||
def __init__ (self, r=24, c=80):
|
||||
screen.screen.__init__(self, r,c)
|
||||
|
||||
class ANSI (term):
|
||||
|
||||
"""This class encapsulates a generic terminal. It filters a stream and
|
||||
maintains the state of a screen object. """
|
||||
|
||||
def __init__ (self, r=24,c=80):
|
||||
|
||||
term.__init__(self,r,c)
|
||||
|
||||
#self.screen = screen (24,80)
|
||||
self.state = FSM.FSM ('INIT',[self])
|
||||
self.state.set_default_transition (Log, 'INIT')
|
||||
self.state.add_transition_any ('INIT', Emit, 'INIT')
|
||||
self.state.add_transition ('\x1b', 'INIT', None, 'ESC')
|
||||
self.state.add_transition_any ('ESC', Log, 'INIT')
|
||||
self.state.add_transition ('(', 'ESC', None, 'G0SCS')
|
||||
self.state.add_transition (')', 'ESC', None, 'G1SCS')
|
||||
self.state.add_transition_list ('AB012', 'G0SCS', None, 'INIT')
|
||||
self.state.add_transition_list ('AB012', 'G1SCS', None, 'INIT')
|
||||
self.state.add_transition ('7', 'ESC', DoCursorSave, 'INIT')
|
||||
self.state.add_transition ('8', 'ESC', DoCursorRestore, 'INIT')
|
||||
self.state.add_transition ('M', 'ESC', DoUpReverse, 'INIT')
|
||||
self.state.add_transition ('>', 'ESC', DoUpReverse, 'INIT')
|
||||
self.state.add_transition ('<', 'ESC', DoUpReverse, 'INIT')
|
||||
self.state.add_transition ('=', 'ESC', None, 'INIT') # Selects application keypad.
|
||||
self.state.add_transition ('#', 'ESC', None, 'GRAPHICS_POUND')
|
||||
self.state.add_transition_any ('GRAPHICS_POUND', None, 'INIT')
|
||||
self.state.add_transition ('[', 'ESC', None, 'ELB')
|
||||
# ELB means Escape Left Bracket. That is ^[[
|
||||
self.state.add_transition ('H', 'ELB', DoHomeOrigin, 'INIT')
|
||||
self.state.add_transition ('D', 'ELB', DoBackOne, 'INIT')
|
||||
self.state.add_transition ('B', 'ELB', DoDownOne, 'INIT')
|
||||
self.state.add_transition ('C', 'ELB', DoForwardOne, 'INIT')
|
||||
self.state.add_transition ('A', 'ELB', DoUpOne, 'INIT')
|
||||
self.state.add_transition ('J', 'ELB', DoEraseDown, 'INIT')
|
||||
self.state.add_transition ('K', 'ELB', DoEraseEndOfLine, 'INIT')
|
||||
self.state.add_transition ('r', 'ELB', DoEnableScroll, 'INIT')
|
||||
self.state.add_transition ('m', 'ELB', None, 'INIT')
|
||||
self.state.add_transition ('?', 'ELB', None, 'MODECRAP')
|
||||
self.state.add_transition_list (string.digits, 'ELB', StartNumber, 'NUMBER_1')
|
||||
self.state.add_transition_list (string.digits, 'NUMBER_1', BuildNumber, 'NUMBER_1')
|
||||
self.state.add_transition ('D', 'NUMBER_1', DoBack, 'INIT')
|
||||
self.state.add_transition ('B', 'NUMBER_1', DoDown, 'INIT')
|
||||
self.state.add_transition ('C', 'NUMBER_1', DoForward, 'INIT')
|
||||
self.state.add_transition ('A', 'NUMBER_1', DoUp, 'INIT')
|
||||
self.state.add_transition ('J', 'NUMBER_1', DoErase, 'INIT')
|
||||
self.state.add_transition ('K', 'NUMBER_1', DoEraseLine, 'INIT')
|
||||
self.state.add_transition ('l', 'NUMBER_1', DoMode, 'INIT')
|
||||
### It gets worse... the 'm' code can have infinite number of
|
||||
### number;number;number before it. I've never seen more than two,
|
||||
### but the specs say it's allowed. crap!
|
||||
self.state.add_transition ('m', 'NUMBER_1', None, 'INIT')
|
||||
### LED control. Same problem as 'm' code.
|
||||
self.state.add_transition ('q', 'NUMBER_1', None, 'INIT')
|
||||
|
||||
# \E[?47h appears to be "switch to alternate screen"
|
||||
# \E[?47l restores alternate screen... I think.
|
||||
self.state.add_transition_list (string.digits, 'MODECRAP', StartNumber, 'MODECRAP_NUM')
|
||||
self.state.add_transition_list (string.digits, 'MODECRAP_NUM', BuildNumber, 'MODECRAP_NUM')
|
||||
self.state.add_transition ('l', 'MODECRAP_NUM', None, 'INIT')
|
||||
self.state.add_transition ('h', 'MODECRAP_NUM', None, 'INIT')
|
||||
|
||||
#RM Reset Mode Esc [ Ps l none
|
||||
self.state.add_transition (';', 'NUMBER_1', None, 'SEMICOLON')
|
||||
self.state.add_transition_any ('SEMICOLON', Log, 'INIT')
|
||||
self.state.add_transition_list (string.digits, 'SEMICOLON', StartNumber, 'NUMBER_2')
|
||||
self.state.add_transition_list (string.digits, 'NUMBER_2', BuildNumber, 'NUMBER_2')
|
||||
self.state.add_transition_any ('NUMBER_2', Log, 'INIT')
|
||||
self.state.add_transition ('H', 'NUMBER_2', DoHome, 'INIT')
|
||||
self.state.add_transition ('f', 'NUMBER_2', DoHome, 'INIT')
|
||||
self.state.add_transition ('r', 'NUMBER_2', DoScrollRegion, 'INIT')
|
||||
### It gets worse... the 'm' code can have infinite number of
|
||||
### number;number;number before it. I've never seen more than two,
|
||||
### but the specs say it's allowed. crap!
|
||||
self.state.add_transition ('m', 'NUMBER_2', None, 'INIT')
|
||||
### LED control. Same problem as 'm' code.
|
||||
self.state.add_transition ('q', 'NUMBER_2', None, 'INIT')
|
||||
|
||||
def process (self, c):
|
||||
|
||||
self.state.process(c)
|
||||
|
||||
def process_list (self, l):
|
||||
|
||||
self.write(l)
|
||||
|
||||
def write (self, s):
|
||||
|
||||
for c in s:
|
||||
self.process(c)
|
||||
|
||||
def flush (self):
|
||||
|
||||
pass
|
||||
|
||||
def write_ch (self, ch):
|
||||
|
||||
"""This puts a character at the current cursor position. cursor
|
||||
position if moved forward with wrap-around, but no scrolling is done if
|
||||
the cursor hits the lower-right corner of the screen. """
|
||||
|
||||
#\r and \n both produce a call to crlf().
|
||||
ch = ch[0]
|
||||
|
||||
if ch == '\r':
|
||||
# self.crlf()
|
||||
return
|
||||
if ch == '\n':
|
||||
self.crlf()
|
||||
return
|
||||
if ch == chr(screen.BS):
|
||||
self.cursor_back()
|
||||
self.put_abs(self.cur_r, self.cur_c, ' ')
|
||||
return
|
||||
|
||||
if ch not in string.printable:
|
||||
fout = open ('log', 'a')
|
||||
fout.write ('Nonprint: ' + str(ord(ch)) + '\n')
|
||||
fout.close()
|
||||
return
|
||||
self.put_abs(self.cur_r, self.cur_c, ch)
|
||||
old_r = self.cur_r
|
||||
old_c = self.cur_c
|
||||
self.cursor_forward()
|
||||
if old_c == self.cur_c:
|
||||
self.cursor_down()
|
||||
if old_r != self.cur_r:
|
||||
self.cursor_home (self.cur_r, 1)
|
||||
else:
|
||||
self.scroll_up ()
|
||||
self.cursor_home (self.cur_r, 1)
|
||||
self.erase_line()
|
||||
|
||||
# def test (self):
|
||||
#
|
||||
# import sys
|
||||
# write_text = 'I\'ve got a ferret sticking up my nose.\n' + \
|
||||
# '(He\'s got a ferret sticking up his nose.)\n' + \
|
||||
# 'How it got there I can\'t tell\n' + \
|
||||
# 'But now it\'s there it hurts like hell\n' + \
|
||||
# 'And what is more it radically affects my sense of smell.\n' + \
|
||||
# '(His sense of smell.)\n' + \
|
||||
# 'I can see a bare-bottomed mandril.\n' + \
|
||||
# '(Slyly eyeing his other nostril.)\n' + \
|
||||
# 'If it jumps inside there too I really don\'t know what to do\n' + \
|
||||
# 'I\'ll be the proud posessor of a kind of nasal zoo.\n' + \
|
||||
# '(A nasal zoo.)\n' + \
|
||||
# 'I\'ve got a ferret sticking up my nose.\n' + \
|
||||
# '(And what is worst of all it constantly explodes.)\n' + \
|
||||
# '"Ferrets don\'t explode," you say\n' + \
|
||||
# 'But it happened nine times yesterday\n' + \
|
||||
# 'And I should know for each time I was standing in the way.\n' + \
|
||||
# 'I\'ve got a ferret sticking up my nose.\n' + \
|
||||
# '(He\'s got a ferret sticking up his nose.)\n' + \
|
||||
# 'How it got there I can\'t tell\n' + \
|
||||
# 'But now it\'s there it hurts like hell\n' + \
|
||||
# 'And what is more it radically affects my sense of smell.\n' + \
|
||||
# '(His sense of smell.)'
|
||||
# self.fill('.')
|
||||
# self.cursor_home()
|
||||
# for c in write_text:
|
||||
# self.write_ch (c)
|
||||
# print str(self)
|
||||
#
|
||||
#if __name__ == '__main__':
|
||||
# t = ANSI(6,65)
|
||||
# t.test()
|
||||
331
awx/lib/site-packages/FSM.py
Normal file
331
awx/lib/site-packages/FSM.py
Normal file
@@ -0,0 +1,331 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
"""This module implements a Finite State Machine (FSM). In addition to state
|
||||
this FSM also maintains a user defined "memory". So this FSM can be used as a
|
||||
Push-down Automata (PDA) since a PDA is a FSM + memory.
|
||||
|
||||
The following describes how the FSM works, but you will probably also need to
|
||||
see the example function to understand how the FSM is used in practice.
|
||||
|
||||
You define an FSM by building tables of transitions. For a given input symbol
|
||||
the process() method uses these tables to decide what action to call and what
|
||||
the next state will be. The FSM has a table of transitions that associate:
|
||||
|
||||
(input_symbol, current_state) --> (action, next_state)
|
||||
|
||||
Where "action" is a function you define. The symbols and states can be any
|
||||
objects. You use the add_transition() and add_transition_list() methods to add
|
||||
to the transition table. The FSM also has a table of transitions that
|
||||
associate:
|
||||
|
||||
(current_state) --> (action, next_state)
|
||||
|
||||
You use the add_transition_any() method to add to this transition table. The
|
||||
FSM also has one default transition that is not associated with any specific
|
||||
input_symbol or state. You use the set_default_transition() method to set the
|
||||
default transition.
|
||||
|
||||
When an action function is called it is passed a reference to the FSM. The
|
||||
action function may then access attributes of the FSM such as input_symbol,
|
||||
current_state, or "memory". The "memory" attribute can be any object that you
|
||||
want to pass along to the action functions. It is not used by the FSM itself.
|
||||
For parsing you would typically pass a list to be used as a stack.
|
||||
|
||||
The processing sequence is as follows. The process() method is given an
|
||||
input_symbol to process. The FSM will search the table of transitions that
|
||||
associate:
|
||||
|
||||
(input_symbol, current_state) --> (action, next_state)
|
||||
|
||||
If the pair (input_symbol, current_state) is found then process() will call the
|
||||
associated action function and then set the current state to the next_state.
|
||||
|
||||
If the FSM cannot find a match for (input_symbol, current_state) it will then
|
||||
search the table of transitions that associate:
|
||||
|
||||
(current_state) --> (action, next_state)
|
||||
|
||||
If the current_state is found then the process() method will call the
|
||||
associated action function and then set the current state to the next_state.
|
||||
Notice that this table lacks an input_symbol. It lets you define transitions
|
||||
for a current_state and ANY input_symbol. Hence, it is called the "any" table.
|
||||
Remember, it is always checked after first searching the table for a specific
|
||||
(input_symbol, current_state).
|
||||
|
||||
For the case where the FSM did not match either of the previous two cases the
|
||||
FSM will try to use the default transition. If the default transition is
|
||||
defined then the process() method will call the associated action function and
|
||||
then set the current state to the next_state. This lets you define a default
|
||||
transition as a catch-all case. You can think of it as an exception handler.
|
||||
There can be only one default transition.
|
||||
|
||||
Finally, if none of the previous cases are defined for an input_symbol and
|
||||
current_state then the FSM will raise an exception. This may be desirable, but
|
||||
you can always prevent this just by defining a default transition.
|
||||
|
||||
Noah Spurrier 20020822
|
||||
"""
|
||||
|
||||
class ExceptionFSM(Exception):
|
||||
|
||||
"""This is the FSM Exception class."""
|
||||
|
||||
def __init__(self, value):
|
||||
self.value = value
|
||||
|
||||
def __str__(self):
|
||||
return `self.value`
|
||||
|
||||
class FSM:
|
||||
|
||||
"""This is a Finite State Machine (FSM).
|
||||
"""
|
||||
|
||||
def __init__(self, initial_state, memory=None):
|
||||
|
||||
"""This creates the FSM. You set the initial state here. The "memory"
|
||||
attribute is any object that you want to pass along to the action
|
||||
functions. It is not used by the FSM. For parsing you would typically
|
||||
pass a list to be used as a stack. """
|
||||
|
||||
# Map (input_symbol, current_state) --> (action, next_state).
|
||||
self.state_transitions = {}
|
||||
# Map (current_state) --> (action, next_state).
|
||||
self.state_transitions_any = {}
|
||||
self.default_transition = None
|
||||
|
||||
self.input_symbol = None
|
||||
self.initial_state = initial_state
|
||||
self.current_state = self.initial_state
|
||||
self.next_state = None
|
||||
self.action = None
|
||||
self.memory = memory
|
||||
|
||||
def reset (self):
|
||||
|
||||
"""This sets the current_state to the initial_state and sets
|
||||
input_symbol to None. The initial state was set by the constructor
|
||||
__init__(). """
|
||||
|
||||
self.current_state = self.initial_state
|
||||
self.input_symbol = None
|
||||
|
||||
def add_transition (self, input_symbol, state, action=None, next_state=None):
|
||||
|
||||
"""This adds a transition that associates:
|
||||
|
||||
(input_symbol, current_state) --> (action, next_state)
|
||||
|
||||
The action may be set to None in which case the process() method will
|
||||
ignore the action and only set the next_state. The next_state may be
|
||||
set to None in which case the current state will be unchanged.
|
||||
|
||||
You can also set transitions for a list of symbols by using
|
||||
add_transition_list(). """
|
||||
|
||||
if next_state is None:
|
||||
next_state = state
|
||||
self.state_transitions[(input_symbol, state)] = (action, next_state)
|
||||
|
||||
def add_transition_list (self, list_input_symbols, state, action=None, next_state=None):
|
||||
|
||||
"""This adds the same transition for a list of input symbols.
|
||||
You can pass a list or a string. Note that it is handy to use
|
||||
string.digits, string.whitespace, string.letters, etc. to add
|
||||
transitions that match character classes.
|
||||
|
||||
The action may be set to None in which case the process() method will
|
||||
ignore the action and only set the next_state. The next_state may be
|
||||
set to None in which case the current state will be unchanged. """
|
||||
|
||||
if next_state is None:
|
||||
next_state = state
|
||||
for input_symbol in list_input_symbols:
|
||||
self.add_transition (input_symbol, state, action, next_state)
|
||||
|
||||
def add_transition_any (self, state, action=None, next_state=None):
|
||||
|
||||
"""This adds a transition that associates:
|
||||
|
||||
(current_state) --> (action, next_state)
|
||||
|
||||
That is, any input symbol will match the current state.
|
||||
The process() method checks the "any" state associations after it first
|
||||
checks for an exact match of (input_symbol, current_state).
|
||||
|
||||
The action may be set to None in which case the process() method will
|
||||
ignore the action and only set the next_state. The next_state may be
|
||||
set to None in which case the current state will be unchanged. """
|
||||
|
||||
if next_state is None:
|
||||
next_state = state
|
||||
self.state_transitions_any [state] = (action, next_state)
|
||||
|
||||
def set_default_transition (self, action, next_state):
|
||||
|
||||
"""This sets the default transition. This defines an action and
|
||||
next_state if the FSM cannot find the input symbol and the current
|
||||
state in the transition list and if the FSM cannot find the
|
||||
current_state in the transition_any list. This is useful as a final
|
||||
fall-through state for catching errors and undefined states.
|
||||
|
||||
The default transition can be removed by setting the attribute
|
||||
default_transition to None. """
|
||||
|
||||
self.default_transition = (action, next_state)
|
||||
|
||||
def get_transition (self, input_symbol, state):
|
||||
|
||||
"""This returns (action, next state) given an input_symbol and state.
|
||||
This does not modify the FSM state, so calling this method has no side
|
||||
effects. Normally you do not call this method directly. It is called by
|
||||
process().
|
||||
|
||||
The sequence of steps to check for a defined transition goes from the
|
||||
most specific to the least specific.
|
||||
|
||||
1. Check state_transitions[] that match exactly the tuple,
|
||||
(input_symbol, state)
|
||||
|
||||
2. Check state_transitions_any[] that match (state)
|
||||
In other words, match a specific state and ANY input_symbol.
|
||||
|
||||
3. Check if the default_transition is defined.
|
||||
This catches any input_symbol and any state.
|
||||
This is a handler for errors, undefined states, or defaults.
|
||||
|
||||
4. No transition was defined. If we get here then raise an exception.
|
||||
"""
|
||||
|
||||
if self.state_transitions.has_key((input_symbol, state)):
|
||||
return self.state_transitions[(input_symbol, state)]
|
||||
elif self.state_transitions_any.has_key (state):
|
||||
return self.state_transitions_any[state]
|
||||
elif self.default_transition is not None:
|
||||
return self.default_transition
|
||||
else:
|
||||
raise ExceptionFSM ('Transition is undefined: (%s, %s).' %
|
||||
(str(input_symbol), str(state)) )
|
||||
|
||||
def process (self, input_symbol):
|
||||
|
||||
"""This is the main method that you call to process input. This may
|
||||
cause the FSM to change state and call an action. This method calls
|
||||
get_transition() to find the action and next_state associated with the
|
||||
input_symbol and current_state. If the action is None then the action
|
||||
is not called and only the current state is changed. This method
|
||||
processes one complete input symbol. You can process a list of symbols
|
||||
(or a string) by calling process_list(). """
|
||||
|
||||
self.input_symbol = input_symbol
|
||||
(self.action, self.next_state) = self.get_transition (self.input_symbol, self.current_state)
|
||||
if self.action is not None:
|
||||
self.action (self)
|
||||
self.current_state = self.next_state
|
||||
self.next_state = None
|
||||
|
||||
def process_list (self, input_symbols):
|
||||
|
||||
"""This takes a list and sends each element to process(). The list may
|
||||
be a string or any iterable object. """
|
||||
|
||||
for s in input_symbols:
|
||||
self.process (s)
|
||||
|
||||
##############################################################################
|
||||
# The following is an example that demonstrates the use of the FSM class to
|
||||
# process an RPN expression. Run this module from the command line. You will
|
||||
# get a prompt > for input. Enter an RPN Expression. Numbers may be integers.
|
||||
# Operators are * / + - Use the = sign to evaluate and print the expression.
|
||||
# For example:
|
||||
#
|
||||
# 167 3 2 2 * * * 1 - =
|
||||
#
|
||||
# will print:
|
||||
#
|
||||
# 2003
|
||||
##############################################################################
|
||||
|
||||
import sys, os, traceback, optparse, time, string
|
||||
|
||||
#
|
||||
# These define the actions.
|
||||
# Note that "memory" is a list being used as a stack.
|
||||
#
|
||||
|
||||
def BeginBuildNumber (fsm):
|
||||
fsm.memory.append (fsm.input_symbol)
|
||||
|
||||
def BuildNumber (fsm):
|
||||
s = fsm.memory.pop ()
|
||||
s = s + fsm.input_symbol
|
||||
fsm.memory.append (s)
|
||||
|
||||
def EndBuildNumber (fsm):
|
||||
s = fsm.memory.pop ()
|
||||
fsm.memory.append (int(s))
|
||||
|
||||
def DoOperator (fsm):
|
||||
ar = fsm.memory.pop()
|
||||
al = fsm.memory.pop()
|
||||
if fsm.input_symbol == '+':
|
||||
fsm.memory.append (al + ar)
|
||||
elif fsm.input_symbol == '-':
|
||||
fsm.memory.append (al - ar)
|
||||
elif fsm.input_symbol == '*':
|
||||
fsm.memory.append (al * ar)
|
||||
elif fsm.input_symbol == '/':
|
||||
fsm.memory.append (al / ar)
|
||||
|
||||
def DoEqual (fsm):
|
||||
print str(fsm.memory.pop())
|
||||
|
||||
def Error (fsm):
|
||||
print 'That does not compute.'
|
||||
print str(fsm.input_symbol)
|
||||
|
||||
def main():
|
||||
|
||||
"""This is where the example starts and the FSM state transitions are
|
||||
defined. Note that states are strings (such as 'INIT'). This is not
|
||||
necessary, but it makes the example easier to read. """
|
||||
|
||||
f = FSM ('INIT', []) # "memory" will be used as a stack.
|
||||
f.set_default_transition (Error, 'INIT')
|
||||
f.add_transition_any ('INIT', None, 'INIT')
|
||||
f.add_transition ('=', 'INIT', DoEqual, 'INIT')
|
||||
f.add_transition_list (string.digits, 'INIT', BeginBuildNumber, 'BUILDING_NUMBER')
|
||||
f.add_transition_list (string.digits, 'BUILDING_NUMBER', BuildNumber, 'BUILDING_NUMBER')
|
||||
f.add_transition_list (string.whitespace, 'BUILDING_NUMBER', EndBuildNumber, 'INIT')
|
||||
f.add_transition_list ('+-*/', 'INIT', DoOperator, 'INIT')
|
||||
|
||||
print
|
||||
print 'Enter an RPN Expression.'
|
||||
print 'Numbers may be integers. Operators are * / + -'
|
||||
print 'Use the = sign to evaluate and print the expression.'
|
||||
print 'For example: '
|
||||
print ' 167 3 2 2 * * * 1 - ='
|
||||
inputstr = raw_input ('> ')
|
||||
f.process_list(inputstr)
|
||||
|
||||
if __name__ == '__main__':
|
||||
try:
|
||||
start_time = time.time()
|
||||
parser = optparse.OptionParser(formatter=optparse.TitledHelpFormatter(), usage=globals()['__doc__'], version='$Id: FSM.py 490 2007-12-07 15:46:24Z noah $')
|
||||
parser.add_option ('-v', '--verbose', action='store_true', default=False, help='verbose output')
|
||||
(options, args) = parser.parse_args()
|
||||
if options.verbose: print time.asctime()
|
||||
main()
|
||||
if options.verbose: print time.asctime()
|
||||
if options.verbose: print 'TOTAL TIME IN MINUTES:',
|
||||
if options.verbose: print (time.time() - start_time) / 60.0
|
||||
sys.exit(0)
|
||||
except KeyboardInterrupt, e: # Ctrl-C
|
||||
raise e
|
||||
except SystemExit, e: # sys.exit()
|
||||
raise e
|
||||
except Exception, e:
|
||||
print 'ERROR, UNEXPECTED EXCEPTION'
|
||||
print str(e)
|
||||
traceback.print_exc()
|
||||
os._exit(1)
|
||||
22
awx/lib/site-packages/README
Normal file
22
awx/lib/site-packages/README
Normal file
@@ -0,0 +1,22 @@
|
||||
Local versions of third-party packages required by AWX. Package names and
|
||||
versions are listed below, along with notes on which files are included.
|
||||
|
||||
amqp-1.0.11 (amqp/*)
|
||||
anyjson-0.3.3 (anyjson/*)
|
||||
billiard-2.7.3.28 (billiard/*, funtests/*, excluded _billiard.so)
|
||||
celery-3.0.19 (celery/*, excluded bin/celery* and bin/camqadm)
|
||||
django-celery-3.0.17 (djcelery/*, excluded bin/djcelerymon)
|
||||
django-extensions-1.1.1 (django_extensions/*)
|
||||
django-jsonfield-0.9.10 (jsonfield/*)
|
||||
django-taggit-0.10a1 (taggit/*)
|
||||
djangorestframework-2.3.5 (rest_framework/*)
|
||||
importlib-1.0.2 (importlib/*, needed for Python 2.6 support)
|
||||
kombu-2.5.10 (kombu/*)
|
||||
Markdown-2.3.1 (markdown/*, excluded bin/markdown_py)
|
||||
ordereddict-1.1 (ordereddict.py, needed for Python 2.6 support)
|
||||
pexpect-2.4 (pexpect.py, pxssh.py, fdpexpect.py, FSM.py, screen.py, ANSI.py)
|
||||
python-dateutil-2.1 (dateutil/*)
|
||||
pytz-2013b (pytz/*)
|
||||
requests-1.2.3 (requests/*)
|
||||
six-1.3.0 (six.py)
|
||||
South-0.8.1 (south/*)
|
||||
50
awx/lib/site-packages/amqp/__init__.py
Normal file
50
awx/lib/site-packages/amqp/__init__.py
Normal file
@@ -0,0 +1,50 @@
|
||||
"""Low-level AMQP client for Python (fork of amqplib)"""
|
||||
# Copyright (C) 2007-2008 Barry Pederson <bp@barryp.org>
|
||||
#
|
||||
# This library is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU Lesser General Public
|
||||
# License as published by the Free Software Foundation; either
|
||||
# version 2.1 of the License, or (at your option) any later version.
|
||||
#
|
||||
# This library is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
# Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public
|
||||
# License along with this library; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
|
||||
from __future__ import absolute_import
|
||||
|
||||
VERSION = (1, 0, 11)
|
||||
__version__ = '.'.join(map(str, VERSION[0:3])) + ''.join(VERSION[3:])
|
||||
__author__ = 'Barry Pederson'
|
||||
__maintainer__ = 'Ask Solem'
|
||||
__contact__ = 'pyamqp@celeryproject.org'
|
||||
__homepage__ = 'http://github.com/celery/py-amqp'
|
||||
__docformat__ = 'restructuredtext'
|
||||
|
||||
# -eof meta-
|
||||
|
||||
#
|
||||
# Pull in the public items from the various sub-modules
|
||||
#
|
||||
from .basic_message import Message
|
||||
from .channel import Channel
|
||||
from .connection import Connection
|
||||
from .exceptions import (
|
||||
AMQPError,
|
||||
ConnectionError,
|
||||
ChannelError,
|
||||
ConsumerCancel,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
'Connection',
|
||||
'Channel',
|
||||
'Message',
|
||||
'AMQPError',
|
||||
'ConnectionError',
|
||||
'ChannelError',
|
||||
'ConsumerCancel',
|
||||
]
|
||||
94
awx/lib/site-packages/amqp/abstract_channel.py
Normal file
94
awx/lib/site-packages/amqp/abstract_channel.py
Normal file
@@ -0,0 +1,94 @@
|
||||
"""Code common to Connection and Channel objects."""
|
||||
# Copyright (C) 2007-2008 Barry Pederson <bp@barryp.org>
|
||||
#
|
||||
# This library is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU Lesser General Public
|
||||
# License as published by the Free Software Foundation; either
|
||||
# version 2.1 of the License, or (at your option) any later version.
|
||||
#
|
||||
# This library is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
# Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public
|
||||
# License along with this library; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
|
||||
from __future__ import absolute_import
|
||||
|
||||
from .exceptions import AMQPError
|
||||
from .serialization import AMQPWriter
|
||||
|
||||
try:
|
||||
bytes
|
||||
except NameError:
|
||||
# Python 2.5 and lower
|
||||
bytes = str
|
||||
|
||||
__all__ = ['AbstractChannel']
|
||||
|
||||
|
||||
class AbstractChannel(object):
|
||||
"""Superclass for both the Connection, which is treated
|
||||
as channel 0, and other user-created Channel objects.
|
||||
|
||||
The subclasses must have a _METHOD_MAP class property, mapping
|
||||
between AMQP method signatures and Python methods.
|
||||
|
||||
"""
|
||||
def __init__(self, connection, channel_id):
|
||||
self.connection = connection
|
||||
self.channel_id = channel_id
|
||||
connection.channels[channel_id] = self
|
||||
self.method_queue = [] # Higher level queue for methods
|
||||
self.auto_decode = False
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, *exc_info):
|
||||
self.close()
|
||||
|
||||
def _send_method(self, method_sig, args=bytes(), content=None):
|
||||
"""Send a method for our channel."""
|
||||
if isinstance(args, AMQPWriter):
|
||||
args = args.getvalue()
|
||||
|
||||
self.connection.method_writer.write_method(
|
||||
self.channel_id, method_sig, args, content,
|
||||
)
|
||||
|
||||
def close(self):
|
||||
"""Close this Channel or Connection"""
|
||||
raise NotImplementedError('Must be overriden in subclass')
|
||||
|
||||
def wait(self, allowed_methods=None):
|
||||
"""Wait for a method that matches our allowed_methods parameter (the
|
||||
default value of None means match any method), and dispatch to it."""
|
||||
method_sig, args, content = self.connection._wait_method(
|
||||
self.channel_id, allowed_methods)
|
||||
|
||||
return self.dispatch_method(method_sig, args, content)
|
||||
|
||||
def dispatch_method(self, method_sig, args, content):
|
||||
if content and \
|
||||
self.auto_decode and \
|
||||
hasattr(content, 'content_encoding'):
|
||||
try:
|
||||
content.body = content.body.decode(content.content_encoding)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
amqp_method = self._METHOD_MAP[method_sig]
|
||||
except KeyError:
|
||||
raise AMQPError('Unknown AMQP method %r' % (method_sig, ))
|
||||
|
||||
if content is None:
|
||||
return amqp_method(self, args)
|
||||
else:
|
||||
return amqp_method(self, args, content)
|
||||
|
||||
#: Placeholder, the concrete implementations will have to
|
||||
#: supply their own versions of _METHOD_MAP
|
||||
_METHOD_MAP = {}
|
||||
123
awx/lib/site-packages/amqp/basic_message.py
Normal file
123
awx/lib/site-packages/amqp/basic_message.py
Normal file
@@ -0,0 +1,123 @@
|
||||
"""Messages for AMQP"""
|
||||
# Copyright (C) 2007-2008 Barry Pederson <bp@barryp.org>
|
||||
#
|
||||
# This library is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU Lesser General Public
|
||||
# License as published by the Free Software Foundation; either
|
||||
# version 2.1 of the License, or (at your option) any later version.
|
||||
#
|
||||
# This library is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
# Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public
|
||||
# License along with this library; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
|
||||
from __future__ import absolute_import
|
||||
|
||||
from .serialization import GenericContent
|
||||
|
||||
__all__ = ['Message']
|
||||
|
||||
|
||||
class Message(GenericContent):
|
||||
"""A Message for use with the Channnel.basic_* methods."""
|
||||
|
||||
#: Instances of this class have these attributes, which
|
||||
#: are passed back and forth as message properties between
|
||||
#: client and server
|
||||
PROPERTIES = [
|
||||
('content_type', 'shortstr'),
|
||||
('content_encoding', 'shortstr'),
|
||||
('application_headers', 'table'),
|
||||
('delivery_mode', 'octet'),
|
||||
('priority', 'octet'),
|
||||
('correlation_id', 'shortstr'),
|
||||
('reply_to', 'shortstr'),
|
||||
('expiration', 'shortstr'),
|
||||
('message_id', 'shortstr'),
|
||||
('timestamp', 'timestamp'),
|
||||
('type', 'shortstr'),
|
||||
('user_id', 'shortstr'),
|
||||
('app_id', 'shortstr'),
|
||||
('cluster_id', 'shortstr')
|
||||
]
|
||||
|
||||
def __init__(self, body='', children=None, **properties):
|
||||
"""Expected arg types
|
||||
|
||||
body: string
|
||||
children: (not supported)
|
||||
|
||||
Keyword properties may include:
|
||||
|
||||
content_type: shortstr
|
||||
MIME content type
|
||||
|
||||
content_encoding: shortstr
|
||||
MIME content encoding
|
||||
|
||||
application_headers: table
|
||||
Message header field table, a dict with string keys,
|
||||
and string | int | Decimal | datetime | dict values.
|
||||
|
||||
delivery_mode: octet
|
||||
Non-persistent (1) or persistent (2)
|
||||
|
||||
priority: octet
|
||||
The message priority, 0 to 9
|
||||
|
||||
correlation_id: shortstr
|
||||
The application correlation identifier
|
||||
|
||||
reply_to: shortstr
|
||||
The destination to reply to
|
||||
|
||||
expiration: shortstr
|
||||
Message expiration specification
|
||||
|
||||
message_id: shortstr
|
||||
The application message identifier
|
||||
|
||||
timestamp: datetime.datetime
|
||||
The message timestamp
|
||||
|
||||
type: shortstr
|
||||
The message type name
|
||||
|
||||
user_id: shortstr
|
||||
The creating user id
|
||||
|
||||
app_id: shortstr
|
||||
The creating application id
|
||||
|
||||
cluster_id: shortstr
|
||||
Intra-cluster routing identifier
|
||||
|
||||
Unicode bodies are encoded according to the 'content_encoding'
|
||||
argument. If that's None, it's set to 'UTF-8' automatically.
|
||||
|
||||
example::
|
||||
|
||||
msg = Message('hello world',
|
||||
content_type='text/plain',
|
||||
application_headers={'foo': 7})
|
||||
|
||||
"""
|
||||
super(Message, self).__init__(**properties)
|
||||
self.body = body
|
||||
|
||||
def __eq__(self, other):
|
||||
"""Check if the properties and bodies of this Message and another
|
||||
Message are the same.
|
||||
|
||||
Received messages may contain a 'delivery_info' attribute,
|
||||
which isn't compared.
|
||||
|
||||
"""
|
||||
try:
|
||||
return (super(Message, self).__eq__(other) and
|
||||
self.body == other.body)
|
||||
except AttributeError:
|
||||
return NotImplemented
|
||||
2506
awx/lib/site-packages/amqp/channel.py
Normal file
2506
awx/lib/site-packages/amqp/channel.py
Normal file
File diff suppressed because it is too large
Load Diff
926
awx/lib/site-packages/amqp/connection.py
Normal file
926
awx/lib/site-packages/amqp/connection.py
Normal file
@@ -0,0 +1,926 @@
|
||||
"""AMQP Connections"""
|
||||
# Copyright (C) 2007-2008 Barry Pederson <bp@barryp.org>
|
||||
#
|
||||
# This library is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU Lesser General Public
|
||||
# License as published by the Free Software Foundation; either
|
||||
# version 2.1 of the License, or (at your option) any later version.
|
||||
#
|
||||
# This library is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
# Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public
|
||||
# License along with this library; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
|
||||
from __future__ import absolute_import
|
||||
|
||||
import logging
|
||||
import socket
|
||||
|
||||
from array import array
|
||||
try:
|
||||
from ssl import SSLError
|
||||
except ImportError:
|
||||
class SSLError(Exception): # noqa
|
||||
pass
|
||||
|
||||
from . import __version__
|
||||
from .abstract_channel import AbstractChannel
|
||||
from .channel import Channel
|
||||
from .exceptions import ChannelError, ConnectionError
|
||||
from .method_framing import MethodReader, MethodWriter
|
||||
from .serialization import AMQPWriter
|
||||
from .transport import create_transport
|
||||
|
||||
HAS_MSG_PEEK = hasattr(socket, 'MSG_PEEK')
|
||||
|
||||
START_DEBUG_FMT = """
|
||||
Start from server, version: %d.%d, properties: %s, mechanisms: %s, locales: %s
|
||||
""".strip()
|
||||
|
||||
__all__ = ['Connection']
|
||||
|
||||
#
|
||||
# Client property info that gets sent to the server on connection startup
|
||||
#
|
||||
LIBRARY_PROPERTIES = {
|
||||
'product': 'py-amqp',
|
||||
'product_version': __version__,
|
||||
'capabilities': {},
|
||||
}
|
||||
|
||||
AMQP_LOGGER = logging.getLogger('amqp')
|
||||
|
||||
|
||||
class Connection(AbstractChannel):
|
||||
"""The connection class provides methods for a client to establish a
|
||||
network connection to a server, and for both peers to operate the
|
||||
connection thereafter.
|
||||
|
||||
GRAMMAR::
|
||||
|
||||
connection = open-connection *use-connection close-connection
|
||||
open-connection = C:protocol-header
|
||||
S:START C:START-OK
|
||||
*challenge
|
||||
S:TUNE C:TUNE-OK
|
||||
C:OPEN S:OPEN-OK
|
||||
challenge = S:SECURE C:SECURE-OK
|
||||
use-connection = *channel
|
||||
close-connection = C:CLOSE S:CLOSE-OK
|
||||
/ S:CLOSE C:CLOSE-OK
|
||||
|
||||
"""
|
||||
Channel = Channel
|
||||
|
||||
prev_sent = None
|
||||
prev_recv = None
|
||||
missed_heartbeats = 0
|
||||
|
||||
def __init__(self, host='localhost', userid='guest', password='guest',
|
||||
login_method='AMQPLAIN', login_response=None,
|
||||
virtual_host='/', locale='en_US', client_properties=None,
|
||||
ssl=False, connect_timeout=None, channel_max=None,
|
||||
frame_max=None, heartbeat=0, **kwargs):
|
||||
"""Create a connection to the specified host, which should be
|
||||
a 'host[:port]', such as 'localhost', or '1.2.3.4:5672'
|
||||
(defaults to 'localhost', if a port is not specified then
|
||||
5672 is used)
|
||||
|
||||
If login_response is not specified, one is built up for you from
|
||||
userid and password if they are present.
|
||||
|
||||
The 'ssl' parameter may be simply True/False, or for Python >= 2.6
|
||||
a dictionary of options to pass to ssl.wrap_socket() such as
|
||||
requiring certain certificates.
|
||||
|
||||
"""
|
||||
channel_max = channel_max or 65535
|
||||
frame_max = frame_max or 131072
|
||||
if (login_response is None) \
|
||||
and (userid is not None) \
|
||||
and (password is not None):
|
||||
login_response = AMQPWriter()
|
||||
login_response.write_table({'LOGIN': userid, 'PASSWORD': password})
|
||||
login_response = login_response.getvalue()[4:] # Skip the length
|
||||
# at the beginning
|
||||
|
||||
d = dict(LIBRARY_PROPERTIES, **client_properties or {})
|
||||
self._method_override = {(60, 50): self._dispatch_basic_return}
|
||||
|
||||
self.channels = {}
|
||||
# The connection object itself is treated as channel 0
|
||||
super(Connection, self).__init__(self, 0)
|
||||
|
||||
self.transport = None
|
||||
|
||||
# Properties set in the Tune method
|
||||
self.channel_max = channel_max
|
||||
self.frame_max = frame_max
|
||||
self.heartbeat = heartbeat
|
||||
|
||||
self._avail_channel_ids = array('H', range(self.channel_max, 0, -1))
|
||||
|
||||
# Properties set in the Start method
|
||||
self.version_major = 0
|
||||
self.version_minor = 0
|
||||
self.server_properties = {}
|
||||
self.mechanisms = []
|
||||
self.locales = []
|
||||
|
||||
# Let the transport.py module setup the actual
|
||||
# socket connection to the broker.
|
||||
#
|
||||
self.transport = create_transport(host, connect_timeout, ssl)
|
||||
|
||||
self.method_reader = MethodReader(self.transport)
|
||||
self.method_writer = MethodWriter(self.transport, self.frame_max)
|
||||
|
||||
self.wait(allowed_methods=[
|
||||
(10, 10), # start
|
||||
])
|
||||
|
||||
self._x_start_ok(d, login_method, login_response, locale)
|
||||
|
||||
self._wait_tune_ok = True
|
||||
while self._wait_tune_ok:
|
||||
self.wait(allowed_methods=[
|
||||
(10, 20), # secure
|
||||
(10, 30), # tune
|
||||
])
|
||||
|
||||
return self._x_open(virtual_host)
|
||||
|
||||
def _do_close(self):
|
||||
try:
|
||||
self.transport.close()
|
||||
|
||||
temp_list = [x for x in self.channels.values() if x is not self]
|
||||
for ch in temp_list:
|
||||
ch._do_close()
|
||||
except socket.error:
|
||||
pass # connection already closed on the other end
|
||||
finally:
|
||||
self.transport = self.connection = self.channels = None
|
||||
|
||||
def _get_free_channel_id(self):
|
||||
try:
|
||||
return self._avail_channel_ids.pop()
|
||||
except IndexError:
|
||||
raise ConnectionError(
|
||||
'No free channel ids, current=%d, channel_max=%d' % (
|
||||
len(self.channels), self.channel_max), (20, 10))
|
||||
|
||||
def _claim_channel_id(self, channel_id):
|
||||
try:
|
||||
return self._avail_channel_ids.remove(channel_id)
|
||||
except ValueError:
|
||||
raise ConnectionError(
|
||||
'Channel %r already open' % (channel_id, ))
|
||||
|
||||
def _wait_method(self, channel_id, allowed_methods):
|
||||
"""Wait for a method from the server destined for
|
||||
a particular channel."""
|
||||
#
|
||||
# Check the channel's deferred methods
|
||||
#
|
||||
method_queue = self.channels[channel_id].method_queue
|
||||
|
||||
for queued_method in method_queue:
|
||||
method_sig = queued_method[0]
|
||||
if (allowed_methods is None) \
|
||||
or (method_sig in allowed_methods) \
|
||||
or (method_sig == (20, 40)):
|
||||
method_queue.remove(queued_method)
|
||||
return queued_method
|
||||
|
||||
#
|
||||
# Nothing queued, need to wait for a method from the peer
|
||||
#
|
||||
while 1:
|
||||
channel, method_sig, args, content = \
|
||||
self.method_reader.read_method()
|
||||
|
||||
if channel == channel_id and (
|
||||
allowed_methods is None or
|
||||
method_sig in allowed_methods or
|
||||
method_sig == (20, 40)):
|
||||
return method_sig, args, content
|
||||
|
||||
#
|
||||
# Certain methods like basic_return should be dispatched
|
||||
# immediately rather than being queued, even if they're not
|
||||
# one of the 'allowed_methods' we're looking for.
|
||||
#
|
||||
if channel and method_sig in self.Channel._IMMEDIATE_METHODS:
|
||||
self.channels[channel].dispatch_method(
|
||||
method_sig, args, content,
|
||||
)
|
||||
continue
|
||||
|
||||
#
|
||||
# Not the channel and/or method we were looking for. Queue
|
||||
# this method for later
|
||||
#
|
||||
self.channels[channel].method_queue.append(
|
||||
(method_sig, args, content)
|
||||
)
|
||||
|
||||
#
|
||||
# If we just queued up a method for channel 0 (the Connection
|
||||
# itself) it's probably a close method in reaction to some
|
||||
# error, so deal with it right away.
|
||||
#
|
||||
if not channel:
|
||||
self.wait()
|
||||
|
||||
def channel(self, channel_id=None):
|
||||
"""Fetch a Channel object identified by the numeric channel_id, or
|
||||
create that object if it doesn't already exist."""
|
||||
try:
|
||||
return self.channels[channel_id]
|
||||
except KeyError:
|
||||
return self.Channel(self, channel_id)
|
||||
|
||||
def is_alive(self):
|
||||
if HAS_MSG_PEEK:
|
||||
sock = self.sock
|
||||
prev = sock.gettimeout()
|
||||
sock.settimeout(0.0001)
|
||||
try:
|
||||
sock.recv(1, socket.MSG_PEEK)
|
||||
except socket.timeout:
|
||||
pass
|
||||
except socket.error:
|
||||
return False
|
||||
finally:
|
||||
sock.settimeout(prev)
|
||||
return True
|
||||
|
||||
def drain_events(self, timeout=None):
|
||||
"""Wait for an event on a channel."""
|
||||
chanmap = self.channels
|
||||
chanid, method_sig, args, content = self._wait_multiple(
|
||||
chanmap, None, timeout=timeout,
|
||||
)
|
||||
|
||||
channel = chanmap[chanid]
|
||||
|
||||
if (content and
|
||||
channel.auto_decode and
|
||||
hasattr(content, 'content_encoding')):
|
||||
try:
|
||||
content.body = content.body.decode(content.content_encoding)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
amqp_method = (self._method_override.get(method_sig) or
|
||||
channel._METHOD_MAP.get(method_sig, None))
|
||||
|
||||
if amqp_method is None:
|
||||
raise Exception('Unknown AMQP method %r' % (method_sig, ))
|
||||
|
||||
if content is None:
|
||||
return amqp_method(channel, args)
|
||||
else:
|
||||
return amqp_method(channel, args, content)
|
||||
|
||||
def read_timeout(self, timeout=None):
|
||||
if timeout is None:
|
||||
return self.method_reader.read_method()
|
||||
sock = self.sock
|
||||
prev = sock.gettimeout()
|
||||
if prev != timeout:
|
||||
sock.settimeout(timeout)
|
||||
try:
|
||||
try:
|
||||
return self.method_reader.read_method()
|
||||
except SSLError, exc:
|
||||
# http://bugs.python.org/issue10272
|
||||
if 'timed out' in str(exc):
|
||||
raise socket.timeout()
|
||||
# Non-blocking SSL sockets can throw SSLError
|
||||
if 'The operation did not complete' in str(exc):
|
||||
raise socket.timeout()
|
||||
raise
|
||||
finally:
|
||||
if prev != timeout:
|
||||
sock.settimeout(prev)
|
||||
|
||||
def _wait_multiple(self, channels, allowed_methods, timeout=None):
|
||||
for channel_id, channel in channels.iteritems():
|
||||
method_queue = channel.method_queue
|
||||
for queued_method in method_queue:
|
||||
method_sig = queued_method[0]
|
||||
if (allowed_methods is None or
|
||||
method_sig in allowed_methods or
|
||||
method_sig == (20, 40)):
|
||||
method_queue.remove(queued_method)
|
||||
method_sig, args, content = queued_method
|
||||
return channel_id, method_sig, args, content
|
||||
|
||||
# Nothing queued, need to wait for a method from the peer
|
||||
read_timeout = self.read_timeout
|
||||
wait = self.wait
|
||||
while 1:
|
||||
channel, method_sig, args, content = read_timeout(timeout)
|
||||
|
||||
if channel in channels and (
|
||||
allowed_methods is None or
|
||||
method_sig in allowed_methods or
|
||||
method_sig == (20, 40)):
|
||||
return channel, method_sig, args, content
|
||||
|
||||
# Not the channel and/or method we were looking for. Queue
|
||||
# this method for later
|
||||
channels[channel].method_queue.append((method_sig, args, content))
|
||||
|
||||
#
|
||||
# If we just queued up a method for channel 0 (the Connection
|
||||
# itself) it's probably a close method in reaction to some
|
||||
# error, so deal with it right away.
|
||||
#
|
||||
if channel == 0:
|
||||
wait()
|
||||
|
||||
def _dispatch_basic_return(self, channel, args, msg):
|
||||
reply_code = args.read_short()
|
||||
reply_text = args.read_shortstr()
|
||||
exchange = args.read_shortstr()
|
||||
routing_key = args.read_shortstr()
|
||||
|
||||
exc = ChannelError('basic.return', reply_code, reply_text, (50, 60))
|
||||
handlers = channel.events.get('basic_return')
|
||||
if not handlers:
|
||||
raise exc
|
||||
for callback in handlers:
|
||||
callback(exc, exchange, routing_key, msg)
|
||||
|
||||
def close(self, reply_code=0, reply_text='', method_sig=(0, 0)):
|
||||
"""Request a connection close
|
||||
|
||||
This method indicates that the sender wants to close the
|
||||
connection. This may be due to internal conditions (e.g. a
|
||||
forced shut-down) or due to an error handling a specific
|
||||
method, i.e. an exception. When a close is due to an
|
||||
exception, the sender provides the class and method id of the
|
||||
method which caused the exception.
|
||||
|
||||
RULE:
|
||||
|
||||
After sending this method any received method except the
|
||||
Close-OK method MUST be discarded.
|
||||
|
||||
RULE:
|
||||
|
||||
The peer sending this method MAY use a counter or timeout
|
||||
to detect failure of the other peer to respond correctly
|
||||
with the Close-OK method.
|
||||
|
||||
RULE:
|
||||
|
||||
When a server receives the Close method from a client it
|
||||
MUST delete all server-side resources associated with the
|
||||
client's context. A client CANNOT reconnect to a context
|
||||
after sending or receiving a Close method.
|
||||
|
||||
PARAMETERS:
|
||||
reply_code: short
|
||||
|
||||
The reply code. The AMQ reply codes are defined in AMQ
|
||||
RFC 011.
|
||||
|
||||
reply_text: shortstr
|
||||
|
||||
The localised reply text. This text can be logged as an
|
||||
aid to resolving issues.
|
||||
|
||||
class_id: short
|
||||
|
||||
failing method class
|
||||
|
||||
When the close is provoked by a method exception, this
|
||||
is the class of the method.
|
||||
|
||||
method_id: short
|
||||
|
||||
failing method ID
|
||||
|
||||
When the close is provoked by a method exception, this
|
||||
is the ID of the method.
|
||||
|
||||
"""
|
||||
if self.transport is None:
|
||||
# already closed
|
||||
return
|
||||
|
||||
args = AMQPWriter()
|
||||
args.write_short(reply_code)
|
||||
args.write_shortstr(reply_text)
|
||||
args.write_short(method_sig[0]) # class_id
|
||||
args.write_short(method_sig[1]) # method_id
|
||||
self._send_method((10, 50), args)
|
||||
return self.wait(allowed_methods=[
|
||||
(10, 50), # Connection.close
|
||||
(10, 51), # Connection.close_ok
|
||||
])
|
||||
|
||||
def _close(self, args):
|
||||
"""Request a connection close
|
||||
|
||||
This method indicates that the sender wants to close the
|
||||
connection. This may be due to internal conditions (e.g. a
|
||||
forced shut-down) or due to an error handling a specific
|
||||
method, i.e. an exception. When a close is due to an
|
||||
exception, the sender provides the class and method id of the
|
||||
method which caused the exception.
|
||||
|
||||
RULE:
|
||||
|
||||
After sending this method any received method except the
|
||||
Close-OK method MUST be discarded.
|
||||
|
||||
RULE:
|
||||
|
||||
The peer sending this method MAY use a counter or timeout
|
||||
to detect failure of the other peer to respond correctly
|
||||
with the Close-OK method.
|
||||
|
||||
RULE:
|
||||
|
||||
When a server receives the Close method from a client it
|
||||
MUST delete all server-side resources associated with the
|
||||
client's context. A client CANNOT reconnect to a context
|
||||
after sending or receiving a Close method.
|
||||
|
||||
PARAMETERS:
|
||||
reply_code: short
|
||||
|
||||
The reply code. The AMQ reply codes are defined in AMQ
|
||||
RFC 011.
|
||||
|
||||
reply_text: shortstr
|
||||
|
||||
The localised reply text. This text can be logged as an
|
||||
aid to resolving issues.
|
||||
|
||||
class_id: short
|
||||
|
||||
failing method class
|
||||
|
||||
When the close is provoked by a method exception, this
|
||||
is the class of the method.
|
||||
|
||||
method_id: short
|
||||
|
||||
failing method ID
|
||||
|
||||
When the close is provoked by a method exception, this
|
||||
is the ID of the method.
|
||||
|
||||
"""
|
||||
reply_code = args.read_short()
|
||||
reply_text = args.read_shortstr()
|
||||
class_id = args.read_short()
|
||||
method_id = args.read_short()
|
||||
|
||||
self._x_close_ok()
|
||||
|
||||
raise ConnectionError(reply_code, reply_text, (class_id, method_id))
|
||||
|
||||
def _x_close_ok(self):
|
||||
"""Confirm a connection close
|
||||
|
||||
This method confirms a Connection.Close method and tells the
|
||||
recipient that it is safe to release resources for the
|
||||
connection and close the socket.
|
||||
|
||||
RULE:
|
||||
|
||||
A peer that detects a socket closure without having
|
||||
received a Close-Ok handshake method SHOULD log the error.
|
||||
|
||||
"""
|
||||
self._send_method((10, 51))
|
||||
self._do_close()
|
||||
|
||||
def _close_ok(self, args):
|
||||
"""Confirm a connection close
|
||||
|
||||
This method confirms a Connection.Close method and tells the
|
||||
recipient that it is safe to release resources for the
|
||||
connection and close the socket.
|
||||
|
||||
RULE:
|
||||
|
||||
A peer that detects a socket closure without having
|
||||
received a Close-Ok handshake method SHOULD log the error.
|
||||
|
||||
"""
|
||||
self._do_close()
|
||||
|
||||
def _x_open(self, virtual_host, capabilities=''):
|
||||
"""Open connection to virtual host
|
||||
|
||||
This method opens a connection to a virtual host, which is a
|
||||
collection of resources, and acts to separate multiple
|
||||
application domains within a server.
|
||||
|
||||
RULE:
|
||||
|
||||
The client MUST open the context before doing any work on
|
||||
the connection.
|
||||
|
||||
PARAMETERS:
|
||||
virtual_host: shortstr
|
||||
|
||||
virtual host name
|
||||
|
||||
The name of the virtual host to work with.
|
||||
|
||||
RULE:
|
||||
|
||||
If the server supports multiple virtual hosts, it
|
||||
MUST enforce a full separation of exchanges,
|
||||
queues, and all associated entities per virtual
|
||||
host. An application, connected to a specific
|
||||
virtual host, MUST NOT be able to access resources
|
||||
of another virtual host.
|
||||
|
||||
RULE:
|
||||
|
||||
The server SHOULD verify that the client has
|
||||
permission to access the specified virtual host.
|
||||
|
||||
RULE:
|
||||
|
||||
The server MAY configure arbitrary limits per
|
||||
virtual host, such as the number of each type of
|
||||
entity that may be used, per connection and/or in
|
||||
total.
|
||||
|
||||
capabilities: shortstr
|
||||
|
||||
required capabilities
|
||||
|
||||
The client may specify a number of capability names,
|
||||
delimited by spaces. The server can use this string
|
||||
to how to process the client's connection request.
|
||||
|
||||
"""
|
||||
args = AMQPWriter()
|
||||
args.write_shortstr(virtual_host)
|
||||
args.write_shortstr(capabilities)
|
||||
args.write_bit(False)
|
||||
self._send_method((10, 40), args)
|
||||
return self.wait(allowed_methods=[
|
||||
(10, 41), # Connection.open_ok
|
||||
])
|
||||
|
||||
def _open_ok(self, args):
|
||||
"""Signal that the connection is ready
|
||||
|
||||
This method signals to the client that the connection is ready
|
||||
for use.
|
||||
|
||||
PARAMETERS:
|
||||
known_hosts: shortstr (deprecated)
|
||||
|
||||
"""
|
||||
AMQP_LOGGER.debug('Open OK!')
|
||||
|
||||
def _secure(self, args):
|
||||
"""Security mechanism challenge
|
||||
|
||||
The SASL protocol works by exchanging challenges and responses
|
||||
until both peers have received sufficient information to
|
||||
authenticate each other. This method challenges the client to
|
||||
provide more information.
|
||||
|
||||
PARAMETERS:
|
||||
challenge: longstr
|
||||
|
||||
security challenge data
|
||||
|
||||
Challenge information, a block of opaque binary data
|
||||
passed to the security mechanism.
|
||||
|
||||
"""
|
||||
challenge = args.read_longstr() # noqa
|
||||
|
||||
def _x_secure_ok(self, response):
|
||||
"""Security mechanism response
|
||||
|
||||
This method attempts to authenticate, passing a block of SASL
|
||||
data for the security mechanism at the server side.
|
||||
|
||||
PARAMETERS:
|
||||
response: longstr
|
||||
|
||||
security response data
|
||||
|
||||
A block of opaque data passed to the security
|
||||
mechanism. The contents of this data are defined by
|
||||
the SASL security mechanism.
|
||||
|
||||
"""
|
||||
args = AMQPWriter()
|
||||
args.write_longstr(response)
|
||||
self._send_method((10, 21), args)
|
||||
|
||||
def _start(self, args):
|
||||
"""Start connection negotiation
|
||||
|
||||
This method starts the connection negotiation process by
|
||||
telling the client the protocol version that the server
|
||||
proposes, along with a list of security mechanisms which the
|
||||
client can use for authentication.
|
||||
|
||||
RULE:
|
||||
|
||||
If the client cannot handle the protocol version suggested
|
||||
by the server it MUST close the socket connection.
|
||||
|
||||
RULE:
|
||||
|
||||
The server MUST provide a protocol version that is lower
|
||||
than or equal to that requested by the client in the
|
||||
protocol header. If the server cannot support the
|
||||
specified protocol it MUST NOT send this method, but MUST
|
||||
close the socket connection.
|
||||
|
||||
PARAMETERS:
|
||||
version_major: octet
|
||||
|
||||
protocol major version
|
||||
|
||||
The protocol major version that the server agrees to
|
||||
use, which cannot be higher than the client's major
|
||||
version.
|
||||
|
||||
version_minor: octet
|
||||
|
||||
protocol major version
|
||||
|
||||
The protocol minor version that the server agrees to
|
||||
use, which cannot be higher than the client's minor
|
||||
version.
|
||||
|
||||
server_properties: table
|
||||
|
||||
server properties
|
||||
|
||||
mechanisms: longstr
|
||||
|
||||
available security mechanisms
|
||||
|
||||
A list of the security mechanisms that the server
|
||||
supports, delimited by spaces. Currently ASL supports
|
||||
these mechanisms: PLAIN.
|
||||
|
||||
locales: longstr
|
||||
|
||||
available message locales
|
||||
|
||||
A list of the message locales that the server
|
||||
supports, delimited by spaces. The locale defines the
|
||||
language in which the server will send reply texts.
|
||||
|
||||
RULE:
|
||||
|
||||
All servers MUST support at least the en_US
|
||||
locale.
|
||||
|
||||
"""
|
||||
self.version_major = args.read_octet()
|
||||
self.version_minor = args.read_octet()
|
||||
self.server_properties = args.read_table()
|
||||
self.mechanisms = args.read_longstr().split(' ')
|
||||
self.locales = args.read_longstr().split(' ')
|
||||
|
||||
AMQP_LOGGER.debug(
|
||||
START_DEBUG_FMT,
|
||||
self.version_major, self.version_minor,
|
||||
self.server_properties, self.mechanisms, self.locales,
|
||||
)
|
||||
|
||||
def _x_start_ok(self, client_properties, mechanism, response, locale):
|
||||
"""Select security mechanism and locale
|
||||
|
||||
This method selects a SASL security mechanism. ASL uses SASL
|
||||
(RFC2222) to negotiate authentication and encryption.
|
||||
|
||||
PARAMETERS:
|
||||
client_properties: table
|
||||
|
||||
client properties
|
||||
|
||||
mechanism: shortstr
|
||||
|
||||
selected security mechanism
|
||||
|
||||
A single security mechanisms selected by the client,
|
||||
which must be one of those specified by the server.
|
||||
|
||||
RULE:
|
||||
|
||||
The client SHOULD authenticate using the highest-
|
||||
level security profile it can handle from the list
|
||||
provided by the server.
|
||||
|
||||
RULE:
|
||||
|
||||
The mechanism field MUST contain one of the
|
||||
security mechanisms proposed by the server in the
|
||||
Start method. If it doesn't, the server MUST close
|
||||
the socket.
|
||||
|
||||
response: longstr
|
||||
|
||||
security response data
|
||||
|
||||
A block of opaque data passed to the security
|
||||
mechanism. The contents of this data are defined by
|
||||
the SASL security mechanism. For the PLAIN security
|
||||
mechanism this is defined as a field table holding two
|
||||
fields, LOGIN and PASSWORD.
|
||||
|
||||
locale: shortstr
|
||||
|
||||
selected message locale
|
||||
|
||||
A single message local selected by the client, which
|
||||
must be one of those specified by the server.
|
||||
|
||||
"""
|
||||
if self.server_capabilities.get('consumer_cancel_notify'):
|
||||
if 'capabilities' not in client_properties:
|
||||
client_properties['capabilities'] = {}
|
||||
client_properties['capabilities']['consumer_cancel_notify'] = True
|
||||
args = AMQPWriter()
|
||||
args.write_table(client_properties)
|
||||
args.write_shortstr(mechanism)
|
||||
args.write_longstr(response)
|
||||
args.write_shortstr(locale)
|
||||
self._send_method((10, 11), args)
|
||||
|
||||
def _tune(self, args):
|
||||
"""Propose connection tuning parameters
|
||||
|
||||
This method proposes a set of connection configuration values
|
||||
to the client. The client can accept and/or adjust these.
|
||||
|
||||
PARAMETERS:
|
||||
channel_max: short
|
||||
|
||||
proposed maximum channels
|
||||
|
||||
The maximum total number of channels that the server
|
||||
allows per connection. Zero means that the server does
|
||||
not impose a fixed limit, but the number of allowed
|
||||
channels may be limited by available server resources.
|
||||
|
||||
frame_max: long
|
||||
|
||||
proposed maximum frame size
|
||||
|
||||
The largest frame size that the server proposes for
|
||||
the connection. The client can negotiate a lower
|
||||
value. Zero means that the server does not impose any
|
||||
specific limit but may reject very large frames if it
|
||||
cannot allocate resources for them.
|
||||
|
||||
RULE:
|
||||
|
||||
Until the frame-max has been negotiated, both
|
||||
peers MUST accept frames of up to 4096 octets
|
||||
large. The minimum non-zero value for the frame-
|
||||
max field is 4096.
|
||||
|
||||
heartbeat: short
|
||||
|
||||
desired heartbeat delay
|
||||
|
||||
The delay, in seconds, of the connection heartbeat
|
||||
that the server wants. Zero means the server does not
|
||||
want a heartbeat.
|
||||
|
||||
"""
|
||||
self.channel_max = args.read_short() or self.channel_max
|
||||
self.frame_max = args.read_long() or self.frame_max
|
||||
self.method_writer.frame_max = self.frame_max
|
||||
heartbeat = args.read_short() # noqa
|
||||
|
||||
self._x_tune_ok(self.channel_max, self.frame_max, self.heartbeat)
|
||||
|
||||
def send_heartbeat(self):
|
||||
self.transport.write_frame(8, 0, bytes())
|
||||
|
||||
def heartbeat_tick(self, rate=2):
|
||||
"""Verify that hartbeats are sent and received.
|
||||
|
||||
:keyword rate: Rate is how often the tick is called
|
||||
compared to the actual heartbeat value. E.g. if
|
||||
the heartbeat is set to 3 seconds, and the tick
|
||||
is called every 3 / 2 seconds, then the rate is 2.
|
||||
|
||||
"""
|
||||
sent_now = self.method_writer.bytes_sent
|
||||
recv_now = self.method_reader.bytes_recv
|
||||
|
||||
if self.prev_sent is not None and self.prev_sent == sent_now:
|
||||
self.send_heartbeat()
|
||||
|
||||
if self.prev_recv is not None and self.prev_recv == recv_now:
|
||||
self.missed_heartbeats += 1
|
||||
else:
|
||||
self.missed_heartbeats = 0
|
||||
|
||||
self.prev_sent, self.prev_recv = sent_now, recv_now
|
||||
|
||||
if self.missed_heartbeats >= rate:
|
||||
raise ConnectionError('Too many heartbeats missed')
|
||||
|
||||
def _x_tune_ok(self, channel_max, frame_max, heartbeat):
|
||||
"""Negotiate connection tuning parameters
|
||||
|
||||
This method sends the client's connection tuning parameters to
|
||||
the server. Certain fields are negotiated, others provide
|
||||
capability information.
|
||||
|
||||
PARAMETERS:
|
||||
channel_max: short
|
||||
|
||||
negotiated maximum channels
|
||||
|
||||
The maximum total number of channels that the client
|
||||
will use per connection. May not be higher than the
|
||||
value specified by the server.
|
||||
|
||||
RULE:
|
||||
|
||||
The server MAY ignore the channel-max value or MAY
|
||||
use it for tuning its resource allocation.
|
||||
|
||||
frame_max: long
|
||||
|
||||
negotiated maximum frame size
|
||||
|
||||
The largest frame size that the client and server will
|
||||
use for the connection. Zero means that the client
|
||||
does not impose any specific limit but may reject very
|
||||
large frames if it cannot allocate resources for them.
|
||||
Note that the frame-max limit applies principally to
|
||||
content frames, where large contents can be broken
|
||||
into frames of arbitrary size.
|
||||
|
||||
RULE:
|
||||
|
||||
Until the frame-max has been negotiated, both
|
||||
peers must accept frames of up to 4096 octets
|
||||
large. The minimum non-zero value for the frame-
|
||||
max field is 4096.
|
||||
|
||||
heartbeat: short
|
||||
|
||||
desired heartbeat delay
|
||||
|
||||
The delay, in seconds, of the connection heartbeat
|
||||
that the client wants. Zero means the client does not
|
||||
want a heartbeat.
|
||||
|
||||
"""
|
||||
args = AMQPWriter()
|
||||
args.write_short(channel_max)
|
||||
args.write_long(frame_max)
|
||||
args.write_short(heartbeat or 0)
|
||||
self._send_method((10, 31), args)
|
||||
self._wait_tune_ok = False
|
||||
|
||||
@property
|
||||
def sock(self):
|
||||
return self.transport.sock
|
||||
|
||||
@property
|
||||
def server_capabilities(self):
|
||||
return self.server_properties.get('capabilities') or {}
|
||||
|
||||
_METHOD_MAP = {
|
||||
(10, 10): _start,
|
||||
(10, 20): _secure,
|
||||
(10, 30): _tune,
|
||||
(10, 41): _open_ok,
|
||||
(10, 50): _close,
|
||||
(10, 51): _close_ok,
|
||||
}
|
||||
|
||||
_IMMEDIATE_METHODS = []
|
||||
connection_errors = (
|
||||
ConnectionError,
|
||||
socket.error,
|
||||
IOError,
|
||||
OSError,
|
||||
)
|
||||
channel_errors = (ChannelError, )
|
||||
125
awx/lib/site-packages/amqp/exceptions.py
Normal file
125
awx/lib/site-packages/amqp/exceptions.py
Normal file
@@ -0,0 +1,125 @@
|
||||
"""Exceptions used by amqp"""
|
||||
# Copyright (C) 2007-2008 Barry Pederson <bp@barryp.org>
|
||||
#
|
||||
# This library is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU Lesser General Public
|
||||
# License as published by the Free Software Foundation; either
|
||||
# version 2.1 of the License, or (at your option) any later version.
|
||||
#
|
||||
# This library is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
# Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public
|
||||
# License along with this library; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
|
||||
from __future__ import absolute_import
|
||||
|
||||
from struct import pack, unpack
|
||||
|
||||
__all__ = ['AMQPError', 'ConnectionError', 'ChannelError']
|
||||
|
||||
|
||||
class AMQPError(Exception):
|
||||
|
||||
def __init__(self, msg, reply_code=None, reply_text=None,
|
||||
method_sig=None, method_name=None):
|
||||
self.message = msg
|
||||
self.amqp_reply_code = reply_code
|
||||
self.amqp_reply_text = reply_text
|
||||
self.amqp_method_sig = method_sig
|
||||
self.method_name = method_name or ''
|
||||
if method_sig and not self.method_name:
|
||||
self.method_name = METHOD_NAME_MAP.get(method_sig, '')
|
||||
Exception.__init__(self, msg, reply_code,
|
||||
reply_text, method_sig, self.method_name)
|
||||
|
||||
def __str__(self):
|
||||
if self.amqp_reply_code:
|
||||
return '%s: (%s, %s, %s)' % (
|
||||
self.message, self.amqp_reply_code, self.amqp_reply_text,
|
||||
self.amqp_method_sig)
|
||||
return self.message
|
||||
|
||||
|
||||
class ConnectionError(AMQPError):
|
||||
pass
|
||||
|
||||
|
||||
class ChannelError(AMQPError):
|
||||
pass
|
||||
|
||||
|
||||
class ConsumerCancel(ChannelError):
|
||||
pass
|
||||
|
||||
|
||||
METHOD_NAME_MAP = {
|
||||
(10, 10): 'Connection.start',
|
||||
(10, 11): 'Connection.start_ok',
|
||||
(10, 20): 'Connection.secure',
|
||||
(10, 21): 'Connection.secure_ok',
|
||||
(10, 30): 'Connection.tune',
|
||||
(10, 31): 'Connection.tune_ok',
|
||||
(10, 40): 'Connection.open',
|
||||
(10, 41): 'Connection.open_ok',
|
||||
(10, 50): 'Connection.close',
|
||||
(10, 51): 'Connection.close_ok',
|
||||
(20, 10): 'Channel.open',
|
||||
(20, 11): 'Channel.open_ok',
|
||||
(20, 20): 'Channel.flow',
|
||||
(20, 21): 'Channel.flow_ok',
|
||||
(20, 40): 'Channel.close',
|
||||
(20, 41): 'Channel.close_ok',
|
||||
(30, 10): 'Access.request',
|
||||
(30, 11): 'Access.request_ok',
|
||||
(40, 10): 'Exchange.declare',
|
||||
(40, 11): 'Exchange.declare_ok',
|
||||
(40, 20): 'Exchange.delete',
|
||||
(40, 21): 'Exchange.delete_ok',
|
||||
(40, 30): 'Exchange.bind',
|
||||
(40, 31): 'Exchange.bind_ok',
|
||||
(40, 40): 'Exchange.unbind',
|
||||
(40, 41): 'Exchange.unbind_ok',
|
||||
(50, 10): 'Queue.declare',
|
||||
(50, 11): 'Queue.declare_ok',
|
||||
(50, 20): 'Queue.bind',
|
||||
(50, 21): 'Queue.bind_ok',
|
||||
(50, 30): 'Queue.purge',
|
||||
(50, 31): 'Queue.purge_ok',
|
||||
(50, 40): 'Queue.delete',
|
||||
(50, 41): 'Queue.delete_ok',
|
||||
(50, 50): 'Queue.unbind',
|
||||
(50, 51): 'Queue.unbind_ok',
|
||||
(60, 10): 'Basic.qos',
|
||||
(60, 11): 'Basic.qos_ok',
|
||||
(60, 20): 'Basic.consume',
|
||||
(60, 21): 'Basic.consume_ok',
|
||||
(60, 30): 'Basic.cancel',
|
||||
(60, 31): 'Basic.cancel_ok',
|
||||
(60, 40): 'Basic.publish',
|
||||
(60, 50): 'Basic.return',
|
||||
(60, 60): 'Basic.deliver',
|
||||
(60, 70): 'Basic.get',
|
||||
(60, 71): 'Basic.get_ok',
|
||||
(60, 72): 'Basic.get_empty',
|
||||
(60, 80): 'Basic.ack',
|
||||
(60, 90): 'Basic.reject',
|
||||
(60, 100): 'Basic.recover_async',
|
||||
(60, 110): 'Basic.recover',
|
||||
(60, 111): 'Basic.recover_ok',
|
||||
(60, 120): 'Basic.nack',
|
||||
(90, 10): 'Tx.select',
|
||||
(90, 11): 'Tx.select_ok',
|
||||
(90, 20): 'Tx.commit',
|
||||
(90, 21): 'Tx.commit_ok',
|
||||
(90, 30): 'Tx.rollback',
|
||||
(90, 31): 'Tx.rollback_ok',
|
||||
(85, 10): 'Confirm.select',
|
||||
(85, 11): 'Confirm.select_ok',
|
||||
}
|
||||
|
||||
|
||||
for _method_id, _method_name in list(METHOD_NAME_MAP.items()):
|
||||
METHOD_NAME_MAP[unpack('>I', pack('>HH', *_method_id))[0]] = _method_name
|
||||
229
awx/lib/site-packages/amqp/method_framing.py
Normal file
229
awx/lib/site-packages/amqp/method_framing.py
Normal file
@@ -0,0 +1,229 @@
|
||||
"""Convert between frames and higher-level AMQP methods"""
|
||||
# Copyright (C) 2007-2008 Barry Pederson <bp@barryp.org>
|
||||
#
|
||||
# This library is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU Lesser General Public
|
||||
# License as published by the Free Software Foundation; either
|
||||
# version 2.1 of the License, or (at your option) any later version.
|
||||
#
|
||||
# This library is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
# Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public
|
||||
# License along with this library; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
|
||||
from __future__ import absolute_import
|
||||
|
||||
from collections import defaultdict
|
||||
from struct import pack, unpack
|
||||
from Queue import Queue
|
||||
|
||||
try:
|
||||
bytes
|
||||
except NameError:
|
||||
# Python 2.5 and lower
|
||||
bytes = str
|
||||
|
||||
from .basic_message import Message
|
||||
from .exceptions import AMQPError
|
||||
from .serialization import AMQPReader
|
||||
|
||||
__all__ = ['MethodReader']
|
||||
|
||||
#
|
||||
# MethodReader needs to know which methods are supposed
|
||||
# to be followed by content headers and bodies.
|
||||
#
|
||||
_CONTENT_METHODS = [
|
||||
(60, 50), # Basic.return
|
||||
(60, 60), # Basic.deliver
|
||||
(60, 71), # Basic.get_ok
|
||||
]
|
||||
|
||||
|
||||
class _PartialMessage(object):
|
||||
"""Helper class to build up a multi-frame method."""
|
||||
|
||||
def __init__(self, method_sig, args):
|
||||
self.method_sig = method_sig
|
||||
self.args = args
|
||||
self.msg = Message()
|
||||
self.body_parts = []
|
||||
self.body_received = 0
|
||||
self.body_size = None
|
||||
self.complete = False
|
||||
|
||||
def add_header(self, payload):
|
||||
class_id, weight, self.body_size = unpack('>HHQ', payload[:12])
|
||||
self.msg._load_properties(payload[12:])
|
||||
self.complete = (self.body_size == 0)
|
||||
|
||||
def add_payload(self, payload):
|
||||
self.body_parts.append(payload)
|
||||
self.body_received += len(payload)
|
||||
|
||||
if self.body_received == self.body_size:
|
||||
self.msg.body = bytes().join(self.body_parts)
|
||||
self.complete = True
|
||||
|
||||
|
||||
class MethodReader(object):
|
||||
"""Helper class to receive frames from the broker, combine them if
|
||||
necessary with content-headers and content-bodies into complete methods.
|
||||
|
||||
Normally a method is represented as a tuple containing
|
||||
(channel, method_sig, args, content).
|
||||
|
||||
In the case of a framing error, an :exc:`ConnectionError` is placed
|
||||
in the queue.
|
||||
|
||||
In the case of unexpected frames, a tuple made up of
|
||||
``(channel, ChannelError)`` is placed in the queue.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, source):
|
||||
self.source = source
|
||||
self.queue = Queue()
|
||||
self.running = False
|
||||
self.partial_messages = {}
|
||||
self.heartbeats = 0
|
||||
# For each channel, which type is expected next
|
||||
self.expected_types = defaultdict(lambda: 1)
|
||||
# not an actual byte count, just incremented whenever we receive
|
||||
self.bytes_recv = 0
|
||||
|
||||
def _next_method(self):
|
||||
"""Read the next method from the source, once one complete method has
|
||||
been assembled it is placed in the internal queue."""
|
||||
empty = self.queue.empty
|
||||
read_frame = self.source.read_frame
|
||||
while empty():
|
||||
try:
|
||||
frame_type, channel, payload = read_frame()
|
||||
except Exception, e:
|
||||
#
|
||||
# Connection was closed? Framing Error?
|
||||
#
|
||||
self.queue.put(e)
|
||||
break
|
||||
|
||||
self.bytes_recv += 1
|
||||
|
||||
if frame_type not in (self.expected_types[channel], 8):
|
||||
self.queue.put((
|
||||
channel,
|
||||
AMQPError(
|
||||
'Received frame type %s while expecting type: %s' % (
|
||||
frame_type, self.expected_types[channel])
|
||||
),
|
||||
))
|
||||
elif frame_type == 1:
|
||||
self._process_method_frame(channel, payload)
|
||||
elif frame_type == 2:
|
||||
self._process_content_header(channel, payload)
|
||||
elif frame_type == 3:
|
||||
self._process_content_body(channel, payload)
|
||||
elif frame_type == 8:
|
||||
self._process_heartbeat(channel, payload)
|
||||
|
||||
def _process_heartbeat(self, channel, payload):
|
||||
self.heartbeats += 1
|
||||
|
||||
def _process_method_frame(self, channel, payload):
|
||||
"""Process Method frames"""
|
||||
method_sig = unpack('>HH', payload[:4])
|
||||
args = AMQPReader(payload[4:])
|
||||
|
||||
if method_sig in _CONTENT_METHODS:
|
||||
#
|
||||
# Save what we've got so far and wait for the content-header
|
||||
#
|
||||
self.partial_messages[channel] = _PartialMessage(method_sig, args)
|
||||
self.expected_types[channel] = 2
|
||||
else:
|
||||
self.queue.put((channel, method_sig, args, None))
|
||||
|
||||
def _process_content_header(self, channel, payload):
|
||||
"""Process Content Header frames"""
|
||||
partial = self.partial_messages[channel]
|
||||
partial.add_header(payload)
|
||||
|
||||
if partial.complete:
|
||||
#
|
||||
# a bodyless message, we're done
|
||||
#
|
||||
self.queue.put((channel, partial.method_sig,
|
||||
partial.args, partial.msg))
|
||||
self.partial_messages.pop(channel, None)
|
||||
self.expected_types[channel] = 1
|
||||
else:
|
||||
#
|
||||
# wait for the content-body
|
||||
#
|
||||
self.expected_types[channel] = 3
|
||||
|
||||
def _process_content_body(self, channel, payload):
|
||||
"""Process Content Body frames"""
|
||||
partial = self.partial_messages[channel]
|
||||
partial.add_payload(payload)
|
||||
if partial.complete:
|
||||
#
|
||||
# Stick the message in the queue and go back to
|
||||
# waiting for method frames
|
||||
#
|
||||
self.queue.put((channel, partial.method_sig,
|
||||
partial.args, partial.msg))
|
||||
self.partial_messages.pop(channel, None)
|
||||
self.expected_types[channel] = 1
|
||||
|
||||
def read_method(self):
|
||||
"""Read a method from the peer."""
|
||||
self._next_method()
|
||||
m = self.queue.get()
|
||||
if isinstance(m, Exception):
|
||||
raise m
|
||||
if isinstance(m, tuple) and isinstance(m[1], AMQPError):
|
||||
raise m[1]
|
||||
return m
|
||||
|
||||
|
||||
class MethodWriter(object):
|
||||
"""Convert AMQP methods into AMQP frames and send them out
|
||||
to the peer."""
|
||||
|
||||
def __init__(self, dest, frame_max):
|
||||
self.dest = dest
|
||||
self.frame_max = frame_max
|
||||
self.bytes_sent = 0
|
||||
|
||||
def write_method(self, channel, method_sig, args, content=None):
|
||||
write_frame = self.dest.write_frame
|
||||
payload = pack('>HH', method_sig[0], method_sig[1]) + args
|
||||
|
||||
if content:
|
||||
# do this early, so we can raise an exception if there's a
|
||||
# problem with the content properties, before sending the
|
||||
# first frame
|
||||
body = content.body
|
||||
if isinstance(body, unicode):
|
||||
coding = content.properties.get('content_encoding', None)
|
||||
if coding is None:
|
||||
coding = content.properties['content_encoding'] = 'UTF-8'
|
||||
|
||||
body = body.encode(coding)
|
||||
properties = content._serialize_properties()
|
||||
|
||||
write_frame(1, channel, payload)
|
||||
|
||||
if content:
|
||||
payload = pack('>HHQ', method_sig[0], 0, len(body)) + properties
|
||||
|
||||
write_frame(2, channel, payload)
|
||||
|
||||
chunk_size = self.frame_max - 8
|
||||
for i in xrange(0, len(body), chunk_size):
|
||||
write_frame(3, channel, body[i:i + chunk_size])
|
||||
self.bytes_sent += 1
|
||||
465
awx/lib/site-packages/amqp/serialization.py
Normal file
465
awx/lib/site-packages/amqp/serialization.py
Normal file
@@ -0,0 +1,465 @@
|
||||
"""
|
||||
Convert between bytestreams and higher-level AMQP types.
|
||||
|
||||
2007-11-05 Barry Pederson <bp@barryp.org>
|
||||
|
||||
"""
|
||||
# Copyright (C) 2007 Barry Pederson <bp@barryp.org>
|
||||
#
|
||||
# This library is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU Lesser General Public
|
||||
# License as published by the Free Software Foundation; either
|
||||
# version 2.1 of the License, or (at your option) any later version.
|
||||
#
|
||||
# This library is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
# Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public
|
||||
# License along with this library; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
|
||||
from __future__ import absolute_import
|
||||
|
||||
import sys
|
||||
|
||||
from datetime import datetime
|
||||
from decimal import Decimal
|
||||
from struct import pack, unpack
|
||||
from time import mktime
|
||||
|
||||
IS_PY3K = sys.version_info[0] >= 3
|
||||
|
||||
if IS_PY3K:
|
||||
def byte(n):
|
||||
return bytes([n])
|
||||
else:
|
||||
byte = chr
|
||||
|
||||
try:
|
||||
from io import BytesIO
|
||||
except ImportError: # Py2.5
|
||||
try:
|
||||
from cStringIO import StringIO as BytesIO # noqa
|
||||
except ImportError:
|
||||
from StringIO import StringIO as BytesIO # noqa
|
||||
|
||||
try:
|
||||
bytes
|
||||
except NameError:
|
||||
# Python 2.5 and lower
|
||||
bytes = str
|
||||
|
||||
|
||||
class AMQPReader(object):
|
||||
"""Read higher-level AMQP types from a bytestream."""
|
||||
def __init__(self, source):
|
||||
"""Source should be either a file-like object with a read() method, or
|
||||
a plain (non-unicode) string."""
|
||||
if isinstance(source, bytes):
|
||||
self.input = BytesIO(source)
|
||||
elif hasattr(source, 'read'):
|
||||
self.input = source
|
||||
else:
|
||||
raise ValueError(
|
||||
'AMQPReader needs a file-like object or plain string')
|
||||
|
||||
self.bitcount = self.bits = 0
|
||||
|
||||
def close(self):
|
||||
self.input.close()
|
||||
|
||||
def read(self, n):
|
||||
"""Read n bytes."""
|
||||
self.bitcount = self.bits = 0
|
||||
return self.input.read(n)
|
||||
|
||||
def read_bit(self):
|
||||
"""Read a single boolean value."""
|
||||
if not self.bitcount:
|
||||
self.bits = ord(self.input.read(1))
|
||||
self.bitcount = 8
|
||||
result = (self.bits & 1) == 1
|
||||
self.bits >>= 1
|
||||
self.bitcount -= 1
|
||||
return result
|
||||
|
||||
def read_octet(self):
|
||||
"""Read one byte, return as an integer"""
|
||||
self.bitcount = self.bits = 0
|
||||
return unpack('B', self.input.read(1))[0]
|
||||
|
||||
def read_short(self):
|
||||
"""Read an unsigned 16-bit integer"""
|
||||
self.bitcount = self.bits = 0
|
||||
return unpack('>H', self.input.read(2))[0]
|
||||
|
||||
def read_long(self):
|
||||
"""Read an unsigned 32-bit integer"""
|
||||
self.bitcount = self.bits = 0
|
||||
return unpack('>I', self.input.read(4))[0]
|
||||
|
||||
def read_longlong(self):
|
||||
"""Read an unsigned 64-bit integer"""
|
||||
self.bitcount = self.bits = 0
|
||||
return unpack('>Q', self.input.read(8))[0]
|
||||
|
||||
def read_float(self):
|
||||
"""Read float value."""
|
||||
self.bitcount = self.bits = 0
|
||||
return unpack('>d', self.input.read(8))[0]
|
||||
|
||||
def read_shortstr(self):
|
||||
"""Read a short string that's stored in up to 255 bytes.
|
||||
|
||||
The encoding isn't specified in the AMQP spec, so
|
||||
assume it's utf-8
|
||||
|
||||
"""
|
||||
self.bitcount = self.bits = 0
|
||||
slen = unpack('B', self.input.read(1))[0]
|
||||
return self.input.read(slen).decode('utf-8')
|
||||
|
||||
def read_longstr(self):
|
||||
"""Read a string that's up to 2**32 bytes.
|
||||
|
||||
The encoding isn't specified in the AMQP spec, so
|
||||
assume it's utf-8
|
||||
|
||||
"""
|
||||
self.bitcount = self.bits = 0
|
||||
slen = unpack('>I', self.input.read(4))[0]
|
||||
return self.input.read(slen).decode('utf-8')
|
||||
|
||||
def read_table(self):
|
||||
"""Read an AMQP table, and return as a Python dictionary."""
|
||||
self.bitcount = self.bits = 0
|
||||
tlen = unpack('>I', self.input.read(4))[0]
|
||||
table_data = AMQPReader(self.input.read(tlen))
|
||||
result = {}
|
||||
while table_data.input.tell() < tlen:
|
||||
name = table_data.read_shortstr()
|
||||
val = table_data.read_item()
|
||||
result[name] = val
|
||||
return result
|
||||
|
||||
def read_item(self):
|
||||
ftype = ord(self.input.read(1))
|
||||
if ftype == 83: # 'S'
|
||||
val = self.read_longstr()
|
||||
elif ftype == 73: # 'I'
|
||||
val = unpack('>i', self.input.read(4))[0]
|
||||
elif ftype == 68: # 'D'
|
||||
d = self.read_octet()
|
||||
n = unpack('>i', self.input.read(4))[0]
|
||||
val = Decimal(n) / Decimal(10 ** d)
|
||||
elif ftype == 84: # 'T'
|
||||
val = self.read_timestamp()
|
||||
elif ftype == 70: # 'F'
|
||||
val = self.read_table() # recurse
|
||||
elif ftype == 65: # 'A'
|
||||
val = self.read_array()
|
||||
elif ftype == 116:
|
||||
val = self.read_bit()
|
||||
elif ftype == 100:
|
||||
val = self.read_float()
|
||||
else:
|
||||
raise ValueError(
|
||||
'Unknown value in table: %r (%r)' % (
|
||||
ftype, type(ftype)))
|
||||
return val
|
||||
|
||||
def read_array(self):
|
||||
array_length = unpack('>I', self.input.read(4))[0]
|
||||
array_data = AMQPReader(self.input.read(array_length))
|
||||
result = []
|
||||
while array_data.input.tell() < array_length:
|
||||
val = array_data.read_item()
|
||||
result.append(val)
|
||||
return result
|
||||
|
||||
def read_timestamp(self):
|
||||
"""Read and AMQP timestamp, which is a 64-bit integer representing
|
||||
seconds since the Unix epoch in 1-second resolution.
|
||||
|
||||
Return as a Python datetime.datetime object,
|
||||
expressed as localtime.
|
||||
|
||||
"""
|
||||
return datetime.fromtimestamp(self.read_longlong())
|
||||
|
||||
|
||||
class AMQPWriter(object):
|
||||
"""Convert higher-level AMQP types to bytestreams."""
|
||||
|
||||
def __init__(self, dest=None):
|
||||
"""dest may be a file-type object (with a write() method). If None
|
||||
then a BytesIO is created, and the contents can be accessed with
|
||||
this class's getvalue() method."""
|
||||
self.out = BytesIO() if dest is None else dest
|
||||
self.bits = []
|
||||
self.bitcount = 0
|
||||
|
||||
def _flushbits(self):
|
||||
if self.bits:
|
||||
out = self.out
|
||||
for b in self.bits:
|
||||
out.write(pack('B', b))
|
||||
self.bits = []
|
||||
self.bitcount = 0
|
||||
|
||||
def close(self):
|
||||
"""Pass through if possible to any file-like destinations."""
|
||||
try:
|
||||
self.out.close()
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
def flush(self):
|
||||
"""Pass through if possible to any file-like destinations."""
|
||||
try:
|
||||
self.out.flush()
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
def getvalue(self):
|
||||
"""Get what's been encoded so far if we're working with a BytesIO."""
|
||||
self._flushbits()
|
||||
return self.out.getvalue()
|
||||
|
||||
def write(self, s):
|
||||
"""Write a plain Python string with no special encoding in Python 2.x,
|
||||
or bytes in Python 3.x"""
|
||||
self._flushbits()
|
||||
self.out.write(s)
|
||||
|
||||
def write_bit(self, b):
|
||||
"""Write a boolean value."""
|
||||
b = 1 if b else 0
|
||||
shift = self.bitcount % 8
|
||||
if shift == 0:
|
||||
self.bits.append(0)
|
||||
self.bits[-1] |= (b << shift)
|
||||
self.bitcount += 1
|
||||
|
||||
def write_octet(self, n):
|
||||
"""Write an integer as an unsigned 8-bit value."""
|
||||
if n < 0 or n > 255:
|
||||
raise ValueError('Octet %r out of range 0..255' % (n, ))
|
||||
self._flushbits()
|
||||
self.out.write(pack('B', n))
|
||||
|
||||
def write_short(self, n):
|
||||
"""Write an integer as an unsigned 16-bit value."""
|
||||
if n < 0 or n > 65535:
|
||||
raise ValueError('Octet %r out of range 0..65535' % (n, ))
|
||||
self._flushbits()
|
||||
self.out.write(pack('>H', int(n)))
|
||||
|
||||
def write_long(self, n):
|
||||
"""Write an integer as an unsigned2 32-bit value."""
|
||||
if n < 0 or n >= 4294967296:
|
||||
raise ValueError('Octet %r out of range 0..2**31-1' % (n, ))
|
||||
self._flushbits()
|
||||
self.out.write(pack('>I', n))
|
||||
|
||||
def write_longlong(self, n):
|
||||
"""Write an integer as an unsigned 64-bit value."""
|
||||
if n < 0 or n >= 18446744073709551616:
|
||||
raise ValueError('Octet %r out of range 0..2**64-1' % (n, ))
|
||||
self._flushbits()
|
||||
self.out.write(pack('>Q', n))
|
||||
|
||||
def write_shortstr(self, s):
|
||||
"""Write a string up to 255 bytes long (after any encoding).
|
||||
|
||||
If passed a unicode string, encode with UTF-8.
|
||||
|
||||
"""
|
||||
self._flushbits()
|
||||
if isinstance(s, unicode):
|
||||
s = s.encode('utf-8')
|
||||
if len(s) > 255:
|
||||
raise ValueError('String too long (%r)' % (len(s), ))
|
||||
self.write_octet(len(s))
|
||||
self.out.write(s)
|
||||
|
||||
def write_longstr(self, s):
|
||||
"""Write a string up to 2**32 bytes long after encoding.
|
||||
|
||||
If passed a unicode string, encode as UTF-8.
|
||||
|
||||
"""
|
||||
self._flushbits()
|
||||
if isinstance(s, unicode):
|
||||
s = s.encode('utf-8')
|
||||
self.write_long(len(s))
|
||||
self.out.write(s)
|
||||
|
||||
def write_table(self, d):
|
||||
"""Write out a Python dictionary made of up string keys, and values
|
||||
that are strings, signed integers, Decimal, datetime.datetime, or
|
||||
sub-dictionaries following the same constraints."""
|
||||
self._flushbits()
|
||||
table_data = AMQPWriter()
|
||||
for k, v in d.iteritems():
|
||||
table_data.write_shortstr(k)
|
||||
table_data.write_item(v)
|
||||
table_data = table_data.getvalue()
|
||||
self.write_long(len(table_data))
|
||||
self.out.write(table_data)
|
||||
|
||||
def write_item(self, v):
|
||||
if isinstance(v, basestring):
|
||||
if isinstance(v, unicode):
|
||||
v = v.encode('utf-8')
|
||||
self.write(byte(83)) # 'S'
|
||||
self.write_longstr(v)
|
||||
elif isinstance(v, bool):
|
||||
self.write(pack('>cB', byte(116), int(v))) # 't'
|
||||
elif isinstance(v, float):
|
||||
self.write(pack('>cd', byte(100), v)) # 'd'
|
||||
elif isinstance(v, (int, long)):
|
||||
self.write(pack('>ci', byte(73), v)) # 'I'
|
||||
elif isinstance(v, Decimal):
|
||||
self.write(byte(68)) # 'D'
|
||||
sign, digits, exponent = v.as_tuple()
|
||||
v = 0
|
||||
for d in digits:
|
||||
v = (v * 10) + d
|
||||
if sign:
|
||||
v = -v
|
||||
self.write_octet(-exponent)
|
||||
self.write(pack('>i', v))
|
||||
elif isinstance(v, datetime):
|
||||
self.write(byte(84)) # 'T'
|
||||
self.write_timestamp(v)
|
||||
## FIXME: timezone ?
|
||||
elif isinstance(v, dict):
|
||||
self.write(byte(70)) # 'F'
|
||||
self.write_table(v)
|
||||
elif isinstance(v, (list, tuple)):
|
||||
self.write(byte(65)) # 'A'
|
||||
self.write_array(v)
|
||||
else:
|
||||
raise ValueError(
|
||||
'Table type %r not handled by amqp: %r' % (
|
||||
type(v), v))
|
||||
|
||||
def write_array(self, a):
|
||||
array_data = AMQPWriter()
|
||||
for v in a:
|
||||
array_data.write_item(v)
|
||||
array_data = array_data.getvalue()
|
||||
self.write_long(len(array_data))
|
||||
self.out.write(array_data)
|
||||
|
||||
def write_timestamp(self, v):
|
||||
"""Write out a Python datetime.datetime object as a 64-bit integer
|
||||
representing seconds since the Unix epoch."""
|
||||
self.out.write(pack('>q', long(mktime(v.timetuple()))))
|
||||
|
||||
|
||||
class GenericContent(object):
|
||||
"""Abstract base class for AMQP content.
|
||||
|
||||
Subclasses should override the PROPERTIES attribute.
|
||||
|
||||
"""
|
||||
PROPERTIES = [('dummy', 'shortstr')]
|
||||
|
||||
def __init__(self, **props):
|
||||
"""Save the properties appropriate to this AMQP content type
|
||||
in a 'properties' dictionary."""
|
||||
d = {}
|
||||
for propname, _ in self.PROPERTIES:
|
||||
if propname in props:
|
||||
d[propname] = props[propname]
|
||||
# FIXME: should we ignore unknown properties?
|
||||
|
||||
self.properties = d
|
||||
|
||||
def __eq__(self, other):
|
||||
"""Check if this object has the same properties as another
|
||||
content object."""
|
||||
try:
|
||||
return self.properties == other.properties
|
||||
except AttributeError:
|
||||
return NotImplemented
|
||||
|
||||
def __getattr__(self, name):
|
||||
"""Look for additional properties in the 'properties'
|
||||
dictionary, and if present - the 'delivery_info'
|
||||
dictionary."""
|
||||
if name == '__setstate__':
|
||||
# Allows pickling/unpickling to work
|
||||
raise AttributeError('__setstate__')
|
||||
|
||||
if name in self.properties:
|
||||
return self.properties[name]
|
||||
|
||||
if 'delivery_info' in self.__dict__ \
|
||||
and name in self.delivery_info:
|
||||
return self.delivery_info[name]
|
||||
|
||||
raise AttributeError(name)
|
||||
|
||||
def _load_properties(self, raw_bytes):
|
||||
"""Given the raw bytes containing the property-flags and property-list
|
||||
from a content-frame-header, parse and insert into a dictionary
|
||||
stored in this object as an attribute named 'properties'."""
|
||||
r = AMQPReader(raw_bytes)
|
||||
|
||||
#
|
||||
# Read 16-bit shorts until we get one with a low bit set to zero
|
||||
#
|
||||
flags = []
|
||||
while 1:
|
||||
flag_bits = r.read_short()
|
||||
flags.append(flag_bits)
|
||||
if flag_bits & 1 == 0:
|
||||
break
|
||||
|
||||
shift = 0
|
||||
d = {}
|
||||
for key, proptype in self.PROPERTIES:
|
||||
if shift == 0:
|
||||
if not flags:
|
||||
break
|
||||
flag_bits, flags = flags[0], flags[1:]
|
||||
shift = 15
|
||||
if flag_bits & (1 << shift):
|
||||
d[key] = getattr(r, 'read_' + proptype)()
|
||||
shift -= 1
|
||||
|
||||
self.properties = d
|
||||
|
||||
def _serialize_properties(self):
|
||||
"""serialize the 'properties' attribute (a dictionary) into
|
||||
the raw bytes making up a set of property flags and a
|
||||
property list, suitable for putting into a content frame header."""
|
||||
shift = 15
|
||||
flag_bits = 0
|
||||
flags = []
|
||||
raw_bytes = AMQPWriter()
|
||||
for key, proptype in self.PROPERTIES:
|
||||
val = self.properties.get(key, None)
|
||||
if val is not None:
|
||||
if shift == 0:
|
||||
flags.append(flag_bits)
|
||||
flag_bits = 0
|
||||
shift = 15
|
||||
|
||||
flag_bits |= (1 << shift)
|
||||
if proptype != 'bit':
|
||||
getattr(raw_bytes, 'write_' + proptype)(val)
|
||||
|
||||
shift -= 1
|
||||
|
||||
flags.append(flag_bits)
|
||||
result = AMQPWriter()
|
||||
for flag_bits in flags:
|
||||
result.write_short(flag_bits)
|
||||
result.write(raw_bytes.getvalue())
|
||||
|
||||
return result.getvalue()
|
||||
252
awx/lib/site-packages/amqp/transport.py
Normal file
252
awx/lib/site-packages/amqp/transport.py
Normal file
@@ -0,0 +1,252 @@
|
||||
"""
|
||||
Read/Write AMQP frames over network transports.
|
||||
|
||||
2009-01-14 Barry Pederson <bp@barryp.org>
|
||||
|
||||
"""
|
||||
# Copyright (C) 2009 Barry Pederson <bp@barryp.org>
|
||||
#
|
||||
# This library is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU Lesser General Public
|
||||
# License as published by the Free Software Foundation; either
|
||||
# version 2.1 of the License, or (at your option) any later version.
|
||||
#
|
||||
# This library is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
# Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public
|
||||
# License along with this library; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
|
||||
from __future__ import absolute_import
|
||||
|
||||
import errno
|
||||
import re
|
||||
import socket
|
||||
|
||||
# Jython does not have this attribute
|
||||
try:
|
||||
from socket import SOL_TCP
|
||||
except ImportError: # pragma: no cover
|
||||
from socket import IPPROTO_TCP as SOL_TCP # noqa
|
||||
|
||||
#
|
||||
# See if Python 2.6+ SSL support is available
|
||||
#
|
||||
try:
|
||||
import ssl
|
||||
HAVE_PY26_SSL = True
|
||||
except:
|
||||
HAVE_PY26_SSL = False
|
||||
|
||||
try:
|
||||
bytes
|
||||
except:
|
||||
# Python 2.5 and lower
|
||||
bytes = str
|
||||
|
||||
from struct import pack, unpack
|
||||
|
||||
from .exceptions import AMQPError
|
||||
|
||||
AMQP_PORT = 5672
|
||||
|
||||
# Yes, Advanced Message Queuing Protocol Protocol is redundant
|
||||
AMQP_PROTOCOL_HEADER = 'AMQP\x01\x01\x00\x09'.encode('latin_1')
|
||||
|
||||
# Match things like: [fe80::1]:5432, from RFC 2732
|
||||
IPV6_LITERAL = re.compile(r'\[([\.0-9a-f:]+)\](?::(\d+))?')
|
||||
|
||||
|
||||
class _AbstractTransport(object):
|
||||
"""Common superclass for TCP and SSL transports"""
|
||||
|
||||
def __init__(self, host, connect_timeout):
|
||||
msg = 'socket.getaddrinfo() for %s returned an empty list' % host
|
||||
port = AMQP_PORT
|
||||
|
||||
m = IPV6_LITERAL.match(host)
|
||||
if m:
|
||||
host = m.group(1)
|
||||
if m.group(2):
|
||||
port = int(m.group(2))
|
||||
else:
|
||||
if ':' in host:
|
||||
host, port = host.rsplit(':', 1)
|
||||
port = int(port)
|
||||
|
||||
self.sock = None
|
||||
last_err = None
|
||||
for res in socket.getaddrinfo(host, port, 0,
|
||||
socket.SOCK_STREAM, SOL_TCP):
|
||||
af, socktype, proto, canonname, sa = res
|
||||
try:
|
||||
self.sock = socket.socket(af, socktype, proto)
|
||||
self.sock.settimeout(connect_timeout)
|
||||
self.sock.connect(sa)
|
||||
except socket.error, msg:
|
||||
self.sock.close()
|
||||
self.sock = None
|
||||
last_err = msg
|
||||
continue
|
||||
break
|
||||
|
||||
if not self.sock:
|
||||
# Didn't connect, return the most recent error message
|
||||
raise socket.error(last_err)
|
||||
|
||||
self.sock.settimeout(None)
|
||||
self.sock.setsockopt(SOL_TCP, socket.TCP_NODELAY, 1)
|
||||
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
|
||||
|
||||
self._setup_transport()
|
||||
|
||||
self._write(AMQP_PROTOCOL_HEADER)
|
||||
|
||||
def __del__(self):
|
||||
try:
|
||||
self.close()
|
||||
except socket.error:
|
||||
pass
|
||||
finally:
|
||||
self.sock = None
|
||||
|
||||
def _read(self, n, initial=False):
|
||||
"""Read exactly n bytes from the peer"""
|
||||
raise NotImplementedError('Must be overriden in subclass')
|
||||
|
||||
def _setup_transport(self):
|
||||
"""Do any additional initialization of the class (used
|
||||
by the subclasses)."""
|
||||
pass
|
||||
|
||||
def _shutdown_transport(self):
|
||||
"""Do any preliminary work in shutting down the connection."""
|
||||
pass
|
||||
|
||||
def _write(self, s):
|
||||
"""Completely write a string to the peer."""
|
||||
raise NotImplementedError('Must be overriden in subclass')
|
||||
|
||||
def close(self):
|
||||
if self.sock is not None:
|
||||
self._shutdown_transport()
|
||||
# Call shutdown first to make sure that pending messages
|
||||
# reach the AMQP broker if the program exits after
|
||||
# calling this method.
|
||||
self.sock.shutdown(socket.SHUT_RDWR)
|
||||
self.sock.close()
|
||||
self.sock = None
|
||||
|
||||
def read_frame(self):
|
||||
"""Read an AMQP frame."""
|
||||
frame_type, channel, size = unpack('>BHI', self._read(7, True))
|
||||
payload = self._read(size)
|
||||
ch = ord(self._read(1))
|
||||
if ch == 206: # '\xce'
|
||||
return frame_type, channel, payload
|
||||
else:
|
||||
raise AMQPError(
|
||||
'Framing Error, received 0x%02x while expecting 0xce' % ch)
|
||||
|
||||
def write_frame(self, frame_type, channel, payload):
|
||||
"""Write out an AMQP frame."""
|
||||
size = len(payload)
|
||||
self._write(
|
||||
pack('>BHI%dsB' % size, frame_type, channel, size, payload, 0xce),
|
||||
)
|
||||
|
||||
|
||||
class SSLTransport(_AbstractTransport):
|
||||
"""Transport that works over SSL"""
|
||||
|
||||
def __init__(self, host, connect_timeout, ssl):
|
||||
if isinstance(ssl, dict):
|
||||
self.sslopts = ssl
|
||||
self.sslobj = None
|
||||
super(SSLTransport, self).__init__(host, connect_timeout)
|
||||
|
||||
def _setup_transport(self):
|
||||
"""Wrap the socket in an SSL object, either the
|
||||
new Python 2.6 version, or the older Python 2.5 and
|
||||
lower version."""
|
||||
if HAVE_PY26_SSL:
|
||||
if hasattr(self, 'sslopts'):
|
||||
self.sslobj = ssl.wrap_socket(self.sock, **self.sslopts)
|
||||
else:
|
||||
self.sslobj = ssl.wrap_socket(self.sock)
|
||||
self.sslobj.do_handshake()
|
||||
else:
|
||||
self.sslobj = socket.ssl(self.sock)
|
||||
|
||||
def _shutdown_transport(self):
|
||||
"""Unwrap a Python 2.6 SSL socket, so we can call shutdown()"""
|
||||
if HAVE_PY26_SSL and (self.sslobj is not None):
|
||||
self.sock = self.sslobj.unwrap()
|
||||
self.sslobj = None
|
||||
|
||||
def _read(self, n, initial=False):
|
||||
"""It seems that SSL Objects read() method may not supply as much
|
||||
as you're asking for, at least with extremely large messages.
|
||||
somewhere > 16K - found this in the test_channel.py test_large
|
||||
unittest."""
|
||||
result = ''
|
||||
|
||||
while len(result) < n:
|
||||
try:
|
||||
s = self.sslobj.read(n - len(result))
|
||||
except socket.error, exc:
|
||||
if not initial and exc.errno in (errno.EAGAIN, errno.EINTR):
|
||||
continue
|
||||
raise
|
||||
if not s:
|
||||
raise IOError('Socket closed')
|
||||
result += s
|
||||
|
||||
return result
|
||||
|
||||
def _write(self, s):
|
||||
"""Write a string out to the SSL socket fully."""
|
||||
while s:
|
||||
n = self.sslobj.write(s)
|
||||
if not n:
|
||||
raise IOError('Socket closed')
|
||||
s = s[n:]
|
||||
|
||||
|
||||
class TCPTransport(_AbstractTransport):
|
||||
"""Transport that deals directly with TCP socket."""
|
||||
|
||||
def _setup_transport(self):
|
||||
"""Setup to _write() directly to the socket, and
|
||||
do our own buffered reads."""
|
||||
self._write = self.sock.sendall
|
||||
self._read_buffer = bytes()
|
||||
|
||||
def _read(self, n, initial=False):
|
||||
"""Read exactly n bytes from the socket"""
|
||||
while len(self._read_buffer) < n:
|
||||
try:
|
||||
s = self.sock.recv(65536)
|
||||
except socket.error, exc:
|
||||
if not initial and exc.errno in (errno.EAGAIN, errno.EINTR):
|
||||
continue
|
||||
raise
|
||||
if not s:
|
||||
raise IOError('Socket closed')
|
||||
self._read_buffer += s
|
||||
|
||||
result = self._read_buffer[:n]
|
||||
self._read_buffer = self._read_buffer[n:]
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def create_transport(host, connect_timeout, ssl=False):
|
||||
"""Given a few parameters from the Connection constructor,
|
||||
select and create a subclass of _AbstractTransport."""
|
||||
if ssl:
|
||||
return SSLTransport(host, connect_timeout, ssl)
|
||||
else:
|
||||
return TCPTransport(host, connect_timeout)
|
||||
142
awx/lib/site-packages/anyjson/__init__.py
Normal file
142
awx/lib/site-packages/anyjson/__init__.py
Normal file
@@ -0,0 +1,142 @@
|
||||
"""Wraps the best available JSON implementation available in a common
|
||||
interface"""
|
||||
|
||||
import sys
|
||||
|
||||
VERSION = (0, 3, 3)
|
||||
__version__ = ".".join(map(str, VERSION[0:3])) + "".join(VERSION[3:])
|
||||
__author__ = "Rune Halvorsen"
|
||||
__contact__ = "runefh@gmail.com"
|
||||
__homepage__ = "http://bitbucket.org/runeh/anyjson/"
|
||||
__docformat__ = "restructuredtext"
|
||||
|
||||
# -eof meta-
|
||||
|
||||
#: The json implementation object. This is probably not useful to you,
|
||||
#: except to get the name of the implementation in use. The name is
|
||||
#: available through ``implementation.name``.
|
||||
implementation = None
|
||||
|
||||
# json.loads does not support buffer() objects,
|
||||
# so we load() and StringIO instead, and it won't copy.
|
||||
if sys.version_info[0] == 3:
|
||||
from io import StringIO
|
||||
else:
|
||||
try:
|
||||
from cStringIO import StringIO # noqa
|
||||
except ImportError:
|
||||
from StringIO import StringIO # noqa
|
||||
|
||||
#: List of known json modules, and the names of their loads/dumps
|
||||
#: methods, as well as the exceptions they throw. Exception can be either
|
||||
#: an exception class or a string.
|
||||
_modules = [("yajl", "dumps", TypeError, "loads", ValueError, "load"),
|
||||
("jsonlib2", "write", "WriteError", "read", "ReadError", None),
|
||||
("jsonlib", "write", "WriteError", "read", "ReadError", None),
|
||||
("simplejson", "dumps", TypeError, "loads", ValueError, "load"),
|
||||
("json", "dumps", TypeError, "loads", ValueError, "load"),
|
||||
("django.utils.simplejson", "dumps", TypeError, "loads", ValueError, "load"),
|
||||
("cjson", "encode", "EncodeError", "decode", "DecodeError", None)
|
||||
]
|
||||
|
||||
_fields = ("modname", "encoder", "encerror",
|
||||
"decoder", "decerror", "filedecoder")
|
||||
|
||||
|
||||
class _JsonImplementation(object):
|
||||
"""Incapsulates a JSON implementation"""
|
||||
|
||||
def __init__(self, modspec):
|
||||
modinfo = dict(zip(_fields, modspec))
|
||||
|
||||
if modinfo["modname"] == "cjson":
|
||||
import warnings
|
||||
warnings.warn("cjson is deprecated! See http://pypi.python.org/pypi/python-cjson/1.0.5", DeprecationWarning)
|
||||
|
||||
# No try block. We want importerror to end up at caller
|
||||
module = self._attempt_load(modinfo["modname"])
|
||||
|
||||
self.implementation = modinfo["modname"]
|
||||
self._encode = getattr(module, modinfo["encoder"])
|
||||
self._decode = getattr(module, modinfo["decoder"])
|
||||
fdec = modinfo["filedecoder"]
|
||||
self._filedecode = fdec and getattr(module, fdec)
|
||||
self._encode_error = modinfo["encerror"]
|
||||
self._decode_error = modinfo["decerror"]
|
||||
|
||||
if isinstance(modinfo["encerror"], basestring):
|
||||
self._encode_error = getattr(module, modinfo["encerror"])
|
||||
if isinstance(modinfo["decerror"], basestring):
|
||||
self._decode_error = getattr(module, modinfo["decerror"])
|
||||
|
||||
self.name = modinfo["modname"]
|
||||
|
||||
def __repr__(self):
|
||||
return "<_JsonImplementation instance using %s>" % self.name
|
||||
|
||||
def _attempt_load(self, modname):
|
||||
"""Attempt to load module name modname, returning it on success,
|
||||
throwing ImportError if module couldn't be imported"""
|
||||
__import__(modname)
|
||||
return sys.modules[modname]
|
||||
|
||||
def dumps(self, data):
|
||||
"""Serialize the datastructure to json. Returns a string. Raises
|
||||
TypeError if the object could not be serialized."""
|
||||
try:
|
||||
return self._encode(data)
|
||||
except self._encode_error, exc:
|
||||
raise TypeError, TypeError(*exc.args), sys.exc_info()[2]
|
||||
serialize = dumps
|
||||
|
||||
def loads(self, s):
|
||||
"""deserialize the string to python data types. Raises
|
||||
ValueError if the string could not be parsed."""
|
||||
# uses StringIO to support buffer objects.
|
||||
try:
|
||||
if self._filedecode and not isinstance(s, basestring):
|
||||
return self._filedecode(StringIO(s))
|
||||
return self._decode(s)
|
||||
except self._decode_error, exc:
|
||||
raise ValueError, ValueError(*exc.args), sys.exc_info()[2]
|
||||
deserialize = loads
|
||||
|
||||
|
||||
def force_implementation(modname):
|
||||
"""Forces anyjson to use a specific json module if it's available"""
|
||||
global implementation
|
||||
for name, spec in [(e[0], e) for e in _modules]:
|
||||
if name == modname:
|
||||
implementation = _JsonImplementation(spec)
|
||||
return
|
||||
raise ImportError("No module named: %s" % modname)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# If run as a script, we do nothing but print an error message.
|
||||
# We do NOT try to load a compatible module because that may throw an
|
||||
# exception, which renders the package uninstallable with easy_install
|
||||
# (It trys to execfile the script when installing, to make sure it works)
|
||||
print "Running anyjson as a stand alone script is not supported"
|
||||
sys.exit(1)
|
||||
else:
|
||||
for modspec in _modules:
|
||||
try:
|
||||
implementation = _JsonImplementation(modspec)
|
||||
break
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
raise ImportError("No supported JSON module found")
|
||||
|
||||
|
||||
def loads(value):
|
||||
"""Serialize the object to JSON."""
|
||||
return implementation.loads(value)
|
||||
deserialize = loads # compat
|
||||
|
||||
|
||||
def dumps(value):
|
||||
"""Deserialize JSON-encoded object to a Python object."""
|
||||
return implementation.dumps(value)
|
||||
serialize = dumps
|
||||
323
awx/lib/site-packages/billiard/__init__.py
Normal file
323
awx/lib/site-packages/billiard/__init__.py
Normal file
@@ -0,0 +1,323 @@
|
||||
"""Python multiprocessing fork with improvements and bugfixes"""
|
||||
#
|
||||
# Package analogous to 'threading.py' but using processes
|
||||
#
|
||||
# multiprocessing/__init__.py
|
||||
#
|
||||
# This package is intended to duplicate the functionality (and much of
|
||||
# the API) of threading.py but uses processes instead of threads. A
|
||||
# subpackage 'multiprocessing.dummy' has the same API but is a simple
|
||||
# wrapper for 'threading'.
|
||||
#
|
||||
# Try calling `multiprocessing.doc.main()` to read the html
|
||||
# documentation in a webbrowser.
|
||||
#
|
||||
#
|
||||
# Copyright (c) 2006-2008, R Oudkerk
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
#
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import with_statement
|
||||
|
||||
VERSION = (2, 7, 3, 28)
|
||||
__version__ = ".".join(map(str, VERSION[0:4])) + "".join(VERSION[4:])
|
||||
__author__ = 'R Oudkerk / Python Software Foundation'
|
||||
__author_email__ = 'python-dev@python.org'
|
||||
__maintainer__ = 'Ask Solem',
|
||||
__contact__ = "ask@celeryproject.org"
|
||||
__homepage__ = "http://github.com/celery/billiard"
|
||||
__docformat__ = "restructuredtext"
|
||||
|
||||
# -eof meta-
|
||||
|
||||
__all__ = [
|
||||
'Process', 'current_process', 'active_children', 'freeze_support',
|
||||
'Manager', 'Pipe', 'cpu_count', 'log_to_stderr', 'get_logger',
|
||||
'allow_connection_pickling', 'BufferTooShort', 'TimeoutError',
|
||||
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
|
||||
'Event', 'Queue', 'JoinableQueue', 'Pool', 'Value', 'Array',
|
||||
'RawValue', 'RawArray', 'SUBDEBUG', 'SUBWARNING', 'set_executable',
|
||||
'forking_enable', 'forking_is_enabled'
|
||||
]
|
||||
|
||||
#
|
||||
# Imports
|
||||
#
|
||||
|
||||
import os
|
||||
import sys
|
||||
import warnings
|
||||
|
||||
from .exceptions import ( # noqa
|
||||
ProcessError,
|
||||
BufferTooShort,
|
||||
TimeoutError,
|
||||
AuthenticationError,
|
||||
TimeLimitExceeded,
|
||||
SoftTimeLimitExceeded,
|
||||
WorkerLostError,
|
||||
)
|
||||
from .process import Process, current_process, active_children
|
||||
from .util import SUBDEBUG, SUBWARNING
|
||||
|
||||
|
||||
def ensure_multiprocessing():
|
||||
from ._ext import ensure_multiprocessing
|
||||
return ensure_multiprocessing()
|
||||
|
||||
|
||||
W_NO_EXECV = """\
|
||||
force_execv is not supported as the billiard C extension \
|
||||
is not installed\
|
||||
"""
|
||||
|
||||
#
|
||||
# Definitions not depending on native semaphores
|
||||
#
|
||||
|
||||
|
||||
def Manager():
|
||||
'''
|
||||
Returns a manager associated with a running server process
|
||||
|
||||
The managers methods such as `Lock()`, `Condition()` and `Queue()`
|
||||
can be used to create shared objects.
|
||||
'''
|
||||
from .managers import SyncManager
|
||||
m = SyncManager()
|
||||
m.start()
|
||||
return m
|
||||
|
||||
|
||||
def Pipe(duplex=True):
|
||||
'''
|
||||
Returns two connection object connected by a pipe
|
||||
'''
|
||||
if sys.version_info[0] == 3:
|
||||
from multiprocessing.connection import Pipe
|
||||
else:
|
||||
from billiard._connection import Pipe
|
||||
return Pipe(duplex)
|
||||
|
||||
|
||||
def cpu_count():
|
||||
'''
|
||||
Returns the number of CPUs in the system
|
||||
'''
|
||||
if sys.platform == 'win32':
|
||||
try:
|
||||
num = int(os.environ['NUMBER_OF_PROCESSORS'])
|
||||
except (ValueError, KeyError):
|
||||
num = 0
|
||||
elif 'bsd' in sys.platform or sys.platform == 'darwin':
|
||||
comm = '/sbin/sysctl -n hw.ncpu'
|
||||
if sys.platform == 'darwin':
|
||||
comm = '/usr' + comm
|
||||
try:
|
||||
with os.popen(comm) as p:
|
||||
num = int(p.read())
|
||||
except ValueError:
|
||||
num = 0
|
||||
else:
|
||||
try:
|
||||
num = os.sysconf('SC_NPROCESSORS_ONLN')
|
||||
except (ValueError, OSError, AttributeError):
|
||||
num = 0
|
||||
|
||||
if num >= 1:
|
||||
return num
|
||||
else:
|
||||
raise NotImplementedError('cannot determine number of cpus')
|
||||
|
||||
|
||||
def freeze_support():
|
||||
'''
|
||||
Check whether this is a fake forked process in a frozen executable.
|
||||
If so then run code specified by commandline and exit.
|
||||
'''
|
||||
if sys.platform == 'win32' and getattr(sys, 'frozen', False):
|
||||
from .forking import freeze_support
|
||||
freeze_support()
|
||||
|
||||
|
||||
def get_logger():
|
||||
'''
|
||||
Return package logger -- if it does not already exist then it is created
|
||||
'''
|
||||
from .util import get_logger
|
||||
return get_logger()
|
||||
|
||||
|
||||
def log_to_stderr(level=None):
|
||||
'''
|
||||
Turn on logging and add a handler which prints to stderr
|
||||
'''
|
||||
from .util import log_to_stderr
|
||||
return log_to_stderr(level)
|
||||
|
||||
|
||||
def allow_connection_pickling():
|
||||
'''
|
||||
Install support for sending connections and sockets between processes
|
||||
'''
|
||||
from . import reduction # noqa
|
||||
|
||||
#
|
||||
# Definitions depending on native semaphores
|
||||
#
|
||||
|
||||
|
||||
def Lock():
|
||||
'''
|
||||
Returns a non-recursive lock object
|
||||
'''
|
||||
from .synchronize import Lock
|
||||
return Lock()
|
||||
|
||||
|
||||
def RLock():
|
||||
'''
|
||||
Returns a recursive lock object
|
||||
'''
|
||||
from .synchronize import RLock
|
||||
return RLock()
|
||||
|
||||
|
||||
def Condition(lock=None):
|
||||
'''
|
||||
Returns a condition object
|
||||
'''
|
||||
from .synchronize import Condition
|
||||
return Condition(lock)
|
||||
|
||||
|
||||
def Semaphore(value=1):
|
||||
'''
|
||||
Returns a semaphore object
|
||||
'''
|
||||
from .synchronize import Semaphore
|
||||
return Semaphore(value)
|
||||
|
||||
|
||||
def BoundedSemaphore(value=1):
|
||||
'''
|
||||
Returns a bounded semaphore object
|
||||
'''
|
||||
from .synchronize import BoundedSemaphore
|
||||
return BoundedSemaphore(value)
|
||||
|
||||
|
||||
def Event():
|
||||
'''
|
||||
Returns an event object
|
||||
'''
|
||||
from .synchronize import Event
|
||||
return Event()
|
||||
|
||||
|
||||
def Queue(maxsize=0):
|
||||
'''
|
||||
Returns a queue object
|
||||
'''
|
||||
from .queues import Queue
|
||||
return Queue(maxsize)
|
||||
|
||||
|
||||
def JoinableQueue(maxsize=0):
|
||||
'''
|
||||
Returns a queue object
|
||||
'''
|
||||
from .queues import JoinableQueue
|
||||
return JoinableQueue(maxsize)
|
||||
|
||||
|
||||
def Pool(processes=None, initializer=None, initargs=(), maxtasksperchild=None):
|
||||
'''
|
||||
Returns a process pool object
|
||||
'''
|
||||
from .pool import Pool
|
||||
return Pool(processes, initializer, initargs, maxtasksperchild)
|
||||
|
||||
|
||||
def RawValue(typecode_or_type, *args):
|
||||
'''
|
||||
Returns a shared object
|
||||
'''
|
||||
from .sharedctypes import RawValue
|
||||
return RawValue(typecode_or_type, *args)
|
||||
|
||||
|
||||
def RawArray(typecode_or_type, size_or_initializer):
|
||||
'''
|
||||
Returns a shared array
|
||||
'''
|
||||
from .sharedctypes import RawArray
|
||||
return RawArray(typecode_or_type, size_or_initializer)
|
||||
|
||||
|
||||
def Value(typecode_or_type, *args, **kwds):
|
||||
'''
|
||||
Returns a synchronized shared object
|
||||
'''
|
||||
from .sharedctypes import Value
|
||||
return Value(typecode_or_type, *args, **kwds)
|
||||
|
||||
|
||||
def Array(typecode_or_type, size_or_initializer, **kwds):
|
||||
'''
|
||||
Returns a synchronized shared array
|
||||
'''
|
||||
from .sharedctypes import Array
|
||||
return Array(typecode_or_type, size_or_initializer, **kwds)
|
||||
|
||||
#
|
||||
#
|
||||
#
|
||||
|
||||
|
||||
def set_executable(executable):
|
||||
'''
|
||||
Sets the path to a python.exe or pythonw.exe binary used to run
|
||||
child processes on Windows instead of sys.executable.
|
||||
Useful for people embedding Python.
|
||||
'''
|
||||
from .forking import set_executable
|
||||
set_executable(executable)
|
||||
|
||||
|
||||
def forking_is_enabled():
|
||||
'''
|
||||
Returns a boolean value indicating whether billiard is
|
||||
currently set to create child processes by forking the current
|
||||
python process rather than by starting a new instances of python.
|
||||
|
||||
On Windows this always returns `False`. On Unix it returns `True` by
|
||||
default.
|
||||
'''
|
||||
from . import forking
|
||||
return forking._forking_is_enabled
|
||||
|
||||
|
||||
def forking_enable(value):
|
||||
'''
|
||||
Enable/disable creation of child process by forking the current process.
|
||||
|
||||
`value` should be a boolean value. If `value` is true then
|
||||
forking is enabled. If `value` is false then forking is disabled.
|
||||
On systems with `os.fork()` forking is enabled by default, and on
|
||||
other systems it is always disabled.
|
||||
'''
|
||||
if not value:
|
||||
from ._ext import supports_exec
|
||||
if supports_exec:
|
||||
from . import forking
|
||||
if value and not hasattr(os, 'fork'):
|
||||
raise ValueError('os.fork() not found')
|
||||
forking._forking_is_enabled = bool(value)
|
||||
if not value:
|
||||
os.environ["MULTIPROCESSING_FORKING_DISABLE"] = "1"
|
||||
else:
|
||||
warnings.warn(RuntimeWarning(W_NO_EXECV))
|
||||
if os.environ.get("MULTIPROCESSING_FORKING_DISABLE"):
|
||||
forking_enable(False)
|
||||
473
awx/lib/site-packages/billiard/_connection.py
Normal file
473
awx/lib/site-packages/billiard/_connection.py
Normal file
@@ -0,0 +1,473 @@
|
||||
#
|
||||
# A higher level module for using sockets (or Windows named pipes)
|
||||
#
|
||||
# multiprocessing/connection.py
|
||||
#
|
||||
# Copyright (c) 2006-2008, R Oudkerk
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
#
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import with_statement
|
||||
|
||||
__all__ = ['Client', 'Listener', 'Pipe']
|
||||
|
||||
import os
|
||||
import sys
|
||||
import socket
|
||||
import errno
|
||||
import time
|
||||
import tempfile
|
||||
import itertools
|
||||
|
||||
from . import AuthenticationError
|
||||
from ._ext import _billiard, win32
|
||||
from .compat import get_errno
|
||||
from .util import get_temp_dir, Finalize, sub_debug, debug
|
||||
from .forking import duplicate, close
|
||||
from .compat import bytes
|
||||
|
||||
try:
|
||||
WindowsError = WindowsError # noqa
|
||||
except NameError:
|
||||
WindowsError = None # noqa
|
||||
|
||||
|
||||
# global set later
|
||||
xmlrpclib = None
|
||||
|
||||
|
||||
#
|
||||
#
|
||||
#
|
||||
|
||||
BUFSIZE = 8192
|
||||
# A very generous timeout when it comes to local connections...
|
||||
CONNECTION_TIMEOUT = 20.
|
||||
|
||||
_mmap_counter = itertools.count()
|
||||
|
||||
default_family = 'AF_INET'
|
||||
families = ['AF_INET']
|
||||
|
||||
if hasattr(socket, 'AF_UNIX'):
|
||||
default_family = 'AF_UNIX'
|
||||
families += ['AF_UNIX']
|
||||
|
||||
if sys.platform == 'win32':
|
||||
default_family = 'AF_PIPE'
|
||||
families += ['AF_PIPE']
|
||||
|
||||
|
||||
def _init_timeout(timeout=CONNECTION_TIMEOUT):
|
||||
return time.time() + timeout
|
||||
|
||||
|
||||
def _check_timeout(t):
|
||||
return time.time() > t
|
||||
|
||||
#
|
||||
#
|
||||
#
|
||||
|
||||
|
||||
def arbitrary_address(family):
|
||||
'''
|
||||
Return an arbitrary free address for the given family
|
||||
'''
|
||||
if family == 'AF_INET':
|
||||
return ('localhost', 0)
|
||||
elif family == 'AF_UNIX':
|
||||
return tempfile.mktemp(prefix='listener-', dir=get_temp_dir())
|
||||
elif family == 'AF_PIPE':
|
||||
return tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' %
|
||||
(os.getpid(), _mmap_counter.next()))
|
||||
else:
|
||||
raise ValueError('unrecognized family')
|
||||
|
||||
|
||||
def address_type(address):
|
||||
'''
|
||||
Return the types of the address
|
||||
|
||||
This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE'
|
||||
'''
|
||||
if type(address) == tuple:
|
||||
return 'AF_INET'
|
||||
elif type(address) is str and address.startswith('\\\\'):
|
||||
return 'AF_PIPE'
|
||||
elif type(address) is str:
|
||||
return 'AF_UNIX'
|
||||
else:
|
||||
raise ValueError('address type of %r unrecognized' % address)
|
||||
|
||||
#
|
||||
# Public functions
|
||||
#
|
||||
|
||||
|
||||
class Listener(object):
|
||||
'''
|
||||
Returns a listener object.
|
||||
|
||||
This is a wrapper for a bound socket which is 'listening' for
|
||||
connections, or for a Windows named pipe.
|
||||
'''
|
||||
def __init__(self, address=None, family=None, backlog=1, authkey=None):
|
||||
family = (family or
|
||||
(address and address_type(address)) or
|
||||
default_family)
|
||||
address = address or arbitrary_address(family)
|
||||
|
||||
if family == 'AF_PIPE':
|
||||
self._listener = PipeListener(address, backlog)
|
||||
else:
|
||||
self._listener = SocketListener(address, family, backlog)
|
||||
|
||||
if authkey is not None and not isinstance(authkey, bytes):
|
||||
raise TypeError('authkey should be a byte string')
|
||||
|
||||
self._authkey = authkey
|
||||
|
||||
def accept(self):
|
||||
'''
|
||||
Accept a connection on the bound socket or named pipe of `self`.
|
||||
|
||||
Returns a `Connection` object.
|
||||
'''
|
||||
if self._listener is None:
|
||||
raise IOError('listener is closed')
|
||||
c = self._listener.accept()
|
||||
if self._authkey:
|
||||
deliver_challenge(c, self._authkey)
|
||||
answer_challenge(c, self._authkey)
|
||||
return c
|
||||
|
||||
def close(self):
|
||||
'''
|
||||
Close the bound socket or named pipe of `self`.
|
||||
'''
|
||||
if self._listener is not None:
|
||||
self._listener.close()
|
||||
self._listener = None
|
||||
|
||||
address = property(lambda self: self._listener._address)
|
||||
last_accepted = property(lambda self: self._listener._last_accepted)
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, *exc_args):
|
||||
self.close()
|
||||
|
||||
|
||||
def Client(address, family=None, authkey=None):
|
||||
'''
|
||||
Returns a connection to the address of a `Listener`
|
||||
'''
|
||||
family = family or address_type(address)
|
||||
if family == 'AF_PIPE':
|
||||
c = PipeClient(address)
|
||||
else:
|
||||
c = SocketClient(address)
|
||||
|
||||
if authkey is not None and not isinstance(authkey, bytes):
|
||||
raise TypeError('authkey should be a byte string')
|
||||
|
||||
if authkey is not None:
|
||||
answer_challenge(c, authkey)
|
||||
deliver_challenge(c, authkey)
|
||||
|
||||
return c
|
||||
|
||||
|
||||
if sys.platform != 'win32':
|
||||
|
||||
def Pipe(duplex=True):
|
||||
'''
|
||||
Returns pair of connection objects at either end of a pipe
|
||||
'''
|
||||
if duplex:
|
||||
s1, s2 = socket.socketpair()
|
||||
c1 = _billiard.Connection(os.dup(s1.fileno()))
|
||||
c2 = _billiard.Connection(os.dup(s2.fileno()))
|
||||
s1.close()
|
||||
s2.close()
|
||||
else:
|
||||
fd1, fd2 = os.pipe()
|
||||
c1 = _billiard.Connection(fd1, writable=False)
|
||||
c2 = _billiard.Connection(fd2, readable=False)
|
||||
|
||||
return c1, c2
|
||||
|
||||
else:
|
||||
|
||||
def Pipe(duplex=True): # noqa
|
||||
'''
|
||||
Returns pair of connection objects at either end of a pipe
|
||||
'''
|
||||
address = arbitrary_address('AF_PIPE')
|
||||
if duplex:
|
||||
openmode = win32.PIPE_ACCESS_DUPLEX
|
||||
access = win32.GENERIC_READ | win32.GENERIC_WRITE
|
||||
obsize, ibsize = BUFSIZE, BUFSIZE
|
||||
else:
|
||||
openmode = win32.PIPE_ACCESS_INBOUND
|
||||
access = win32.GENERIC_WRITE
|
||||
obsize, ibsize = 0, BUFSIZE
|
||||
|
||||
h1 = win32.CreateNamedPipe(
|
||||
address, openmode,
|
||||
win32.PIPE_TYPE_MESSAGE | win32.PIPE_READMODE_MESSAGE |
|
||||
win32.PIPE_WAIT,
|
||||
1, obsize, ibsize, win32.NMPWAIT_WAIT_FOREVER, win32.NULL
|
||||
)
|
||||
h2 = win32.CreateFile(
|
||||
address, access, 0, win32.NULL, win32.OPEN_EXISTING, 0, win32.NULL
|
||||
)
|
||||
win32.SetNamedPipeHandleState(
|
||||
h2, win32.PIPE_READMODE_MESSAGE, None, None
|
||||
)
|
||||
|
||||
try:
|
||||
win32.ConnectNamedPipe(h1, win32.NULL)
|
||||
except WindowsError, e:
|
||||
if e.args[0] != win32.ERROR_PIPE_CONNECTED:
|
||||
raise
|
||||
|
||||
c1 = _billiard.PipeConnection(h1, writable=duplex)
|
||||
c2 = _billiard.PipeConnection(h2, readable=duplex)
|
||||
|
||||
return c1, c2
|
||||
|
||||
#
|
||||
# Definitions for connections based on sockets
|
||||
#
|
||||
|
||||
|
||||
class SocketListener(object):
|
||||
'''
|
||||
Representation of a socket which is bound to an address and listening
|
||||
'''
|
||||
def __init__(self, address, family, backlog=1):
|
||||
self._socket = socket.socket(getattr(socket, family))
|
||||
try:
|
||||
# SO_REUSEADDR has different semantics on Windows (Issue #2550).
|
||||
if os.name == 'posix':
|
||||
self._socket.setsockopt(socket.SOL_SOCKET,
|
||||
socket.SO_REUSEADDR, 1)
|
||||
self._socket.bind(address)
|
||||
self._socket.listen(backlog)
|
||||
self._address = self._socket.getsockname()
|
||||
except OSError:
|
||||
self._socket.close()
|
||||
raise
|
||||
self._family = family
|
||||
self._last_accepted = None
|
||||
|
||||
if family == 'AF_UNIX':
|
||||
self._unlink = Finalize(
|
||||
self, os.unlink, args=(address,), exitpriority=0
|
||||
)
|
||||
else:
|
||||
self._unlink = None
|
||||
|
||||
def accept(self):
|
||||
s, self._last_accepted = self._socket.accept()
|
||||
fd = duplicate(s.fileno())
|
||||
conn = _billiard.Connection(fd)
|
||||
s.close()
|
||||
return conn
|
||||
|
||||
def close(self):
|
||||
self._socket.close()
|
||||
if self._unlink is not None:
|
||||
self._unlink()
|
||||
|
||||
|
||||
def SocketClient(address):
|
||||
'''
|
||||
Return a connection object connected to the socket given by `address`
|
||||
'''
|
||||
family = address_type(address)
|
||||
s = socket.socket(getattr(socket, family))
|
||||
t = _init_timeout()
|
||||
|
||||
while 1:
|
||||
try:
|
||||
s.connect(address)
|
||||
except socket.error, exc:
|
||||
if get_errno(exc) != errno.ECONNREFUSED or _check_timeout(t):
|
||||
debug('failed to connect to address %s', address)
|
||||
raise
|
||||
time.sleep(0.01)
|
||||
else:
|
||||
break
|
||||
else:
|
||||
raise
|
||||
|
||||
fd = duplicate(s.fileno())
|
||||
conn = _billiard.Connection(fd)
|
||||
s.close()
|
||||
return conn
|
||||
|
||||
#
|
||||
# Definitions for connections based on named pipes
|
||||
#
|
||||
|
||||
if sys.platform == 'win32':
|
||||
|
||||
class PipeListener(object):
|
||||
'''
|
||||
Representation of a named pipe
|
||||
'''
|
||||
def __init__(self, address, backlog=None):
|
||||
self._address = address
|
||||
handle = win32.CreateNamedPipe(
|
||||
address, win32.PIPE_ACCESS_DUPLEX,
|
||||
win32.PIPE_TYPE_MESSAGE | win32.PIPE_READMODE_MESSAGE |
|
||||
win32.PIPE_WAIT,
|
||||
win32.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE,
|
||||
win32.NMPWAIT_WAIT_FOREVER, win32.NULL
|
||||
)
|
||||
self._handle_queue = [handle]
|
||||
self._last_accepted = None
|
||||
|
||||
sub_debug('listener created with address=%r', self._address)
|
||||
|
||||
self.close = Finalize(
|
||||
self, PipeListener._finalize_pipe_listener,
|
||||
args=(self._handle_queue, self._address), exitpriority=0
|
||||
)
|
||||
|
||||
def accept(self):
|
||||
newhandle = win32.CreateNamedPipe(
|
||||
self._address, win32.PIPE_ACCESS_DUPLEX,
|
||||
win32.PIPE_TYPE_MESSAGE | win32.PIPE_READMODE_MESSAGE |
|
||||
win32.PIPE_WAIT,
|
||||
win32.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE,
|
||||
win32.NMPWAIT_WAIT_FOREVER, win32.NULL
|
||||
)
|
||||
self._handle_queue.append(newhandle)
|
||||
handle = self._handle_queue.pop(0)
|
||||
try:
|
||||
win32.ConnectNamedPipe(handle, win32.NULL)
|
||||
except WindowsError, e:
|
||||
if e.args[0] != win32.ERROR_PIPE_CONNECTED:
|
||||
raise
|
||||
return _billiard.PipeConnection(handle)
|
||||
|
||||
@staticmethod
|
||||
def _finalize_pipe_listener(queue, address):
|
||||
sub_debug('closing listener with address=%r', address)
|
||||
for handle in queue:
|
||||
close(handle)
|
||||
|
||||
def PipeClient(address):
|
||||
'''
|
||||
Return a connection object connected to the pipe given by `address`
|
||||
'''
|
||||
t = _init_timeout()
|
||||
while 1:
|
||||
try:
|
||||
win32.WaitNamedPipe(address, 1000)
|
||||
h = win32.CreateFile(
|
||||
address, win32.GENERIC_READ | win32.GENERIC_WRITE,
|
||||
0, win32.NULL, win32.OPEN_EXISTING, 0, win32.NULL,
|
||||
)
|
||||
except WindowsError, e:
|
||||
if e.args[0] not in (
|
||||
win32.ERROR_SEM_TIMEOUT,
|
||||
win32.ERROR_PIPE_BUSY) or _check_timeout(t):
|
||||
raise
|
||||
else:
|
||||
break
|
||||
else:
|
||||
raise
|
||||
|
||||
win32.SetNamedPipeHandleState(
|
||||
h, win32.PIPE_READMODE_MESSAGE, None, None
|
||||
)
|
||||
return _billiard.PipeConnection(h)
|
||||
|
||||
#
|
||||
# Authentication stuff
|
||||
#
|
||||
|
||||
MESSAGE_LENGTH = 20
|
||||
|
||||
CHALLENGE = bytes('#CHALLENGE#', 'ascii')
|
||||
WELCOME = bytes('#WELCOME#', 'ascii')
|
||||
FAILURE = bytes('#FAILURE#', 'ascii')
|
||||
|
||||
|
||||
def deliver_challenge(connection, authkey):
|
||||
import hmac
|
||||
assert isinstance(authkey, bytes)
|
||||
message = os.urandom(MESSAGE_LENGTH)
|
||||
connection.send_bytes(CHALLENGE + message)
|
||||
digest = hmac.new(authkey, message).digest()
|
||||
response = connection.recv_bytes(256) # reject large message
|
||||
if response == digest:
|
||||
connection.send_bytes(WELCOME)
|
||||
else:
|
||||
connection.send_bytes(FAILURE)
|
||||
raise AuthenticationError('digest received was wrong')
|
||||
|
||||
|
||||
def answer_challenge(connection, authkey):
|
||||
import hmac
|
||||
assert isinstance(authkey, bytes)
|
||||
message = connection.recv_bytes(256) # reject large message
|
||||
assert message[:len(CHALLENGE)] == CHALLENGE, 'message = %r' % message
|
||||
message = message[len(CHALLENGE):]
|
||||
digest = hmac.new(authkey, message).digest()
|
||||
connection.send_bytes(digest)
|
||||
response = connection.recv_bytes(256) # reject large message
|
||||
if response != WELCOME:
|
||||
raise AuthenticationError('digest sent was rejected')
|
||||
|
||||
#
|
||||
# Support for using xmlrpclib for serialization
|
||||
#
|
||||
|
||||
|
||||
class ConnectionWrapper(object):
|
||||
def __init__(self, conn, dumps, loads):
|
||||
self._conn = conn
|
||||
self._dumps = dumps
|
||||
self._loads = loads
|
||||
for attr in ('fileno', 'close', 'poll', 'recv_bytes', 'send_bytes'):
|
||||
obj = getattr(conn, attr)
|
||||
setattr(self, attr, obj)
|
||||
|
||||
def send(self, obj):
|
||||
s = self._dumps(obj)
|
||||
self._conn.send_bytes(s)
|
||||
|
||||
def recv(self):
|
||||
s = self._conn.recv_bytes()
|
||||
return self._loads(s)
|
||||
|
||||
|
||||
def _xml_dumps(obj):
|
||||
return xmlrpclib.dumps((obj,), None, None, None, 1).encode('utf8')
|
||||
|
||||
|
||||
def _xml_loads(s):
|
||||
(obj,), method = xmlrpclib.loads(s.decode('utf8'))
|
||||
return obj
|
||||
|
||||
|
||||
class XmlListener(Listener):
|
||||
def accept(self):
|
||||
global xmlrpclib
|
||||
import xmlrpclib # noqa
|
||||
obj = Listener.accept(self)
|
||||
return ConnectionWrapper(obj, _xml_dumps, _xml_loads)
|
||||
|
||||
|
||||
def XmlClient(*args, **kwds):
|
||||
global xmlrpclib
|
||||
import xmlrpclib # noqa
|
||||
return ConnectionWrapper(Client(*args, **kwds), _xml_dumps, _xml_loads)
|
||||
39
awx/lib/site-packages/billiard/_ext.py
Normal file
39
awx/lib/site-packages/billiard/_ext.py
Normal file
@@ -0,0 +1,39 @@
|
||||
from __future__ import absolute_import
|
||||
|
||||
import sys
|
||||
|
||||
supports_exec = True
|
||||
|
||||
if sys.platform.startswith("java"):
|
||||
_billiard = None
|
||||
else:
|
||||
try:
|
||||
import _billiard # noqa
|
||||
except ImportError:
|
||||
import _multiprocessing as _billiard # noqa
|
||||
supports_exec = False
|
||||
try:
|
||||
Connection = _billiard.Connection
|
||||
except AttributeError: # Py3
|
||||
from multiprocessing.connection import Connection # noqa
|
||||
|
||||
PipeConnection = getattr(_billiard, "PipeConnection", None)
|
||||
win32 = getattr(_billiard, "win32", None)
|
||||
|
||||
|
||||
def ensure_multiprocessing():
|
||||
if _billiard is None:
|
||||
raise NotImplementedError("multiprocessing not supported")
|
||||
|
||||
|
||||
def ensure_SemLock():
|
||||
try:
|
||||
from _billiard import SemLock # noqa
|
||||
except ImportError:
|
||||
try:
|
||||
from _multiprocessing import SemLock # noqa
|
||||
except ImportError:
|
||||
raise ImportError("""\
|
||||
This platform lacks a functioning sem_open implementation, therefore,
|
||||
the required synchronization primitives needed will not function,
|
||||
see issue 3770.""")
|
||||
116
awx/lib/site-packages/billiard/_win.py
Normal file
116
awx/lib/site-packages/billiard/_win.py
Normal file
@@ -0,0 +1,116 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
billiard._win
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
Windows utilities to terminate process groups.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
import os
|
||||
|
||||
# psutil is painfully slow in win32. So to avoid adding big
|
||||
# dependencies like pywin32 a ctypes based solution is preferred
|
||||
|
||||
# Code based on the winappdbg project http://winappdbg.sourceforge.net/
|
||||
# (BSD License)
|
||||
from ctypes import (
|
||||
byref, sizeof, windll,
|
||||
Structure, WinError, POINTER,
|
||||
c_size_t, c_char, c_void_p,
|
||||
)
|
||||
from ctypes.wintypes import DWORD, LONG
|
||||
|
||||
ERROR_NO_MORE_FILES = 18
|
||||
INVALID_HANDLE_VALUE = c_void_p(-1).value
|
||||
|
||||
|
||||
class PROCESSENTRY32(Structure):
|
||||
_fields_ = [
|
||||
('dwSize', DWORD),
|
||||
('cntUsage', DWORD),
|
||||
('th32ProcessID', DWORD),
|
||||
('th32DefaultHeapID', c_size_t),
|
||||
('th32ModuleID', DWORD),
|
||||
('cntThreads', DWORD),
|
||||
('th32ParentProcessID', DWORD),
|
||||
('pcPriClassBase', LONG),
|
||||
('dwFlags', DWORD),
|
||||
('szExeFile', c_char * 260),
|
||||
]
|
||||
LPPROCESSENTRY32 = POINTER(PROCESSENTRY32)
|
||||
|
||||
|
||||
def CreateToolhelp32Snapshot(dwFlags=2, th32ProcessID=0):
|
||||
hSnapshot = windll.kernel32.CreateToolhelp32Snapshot(dwFlags,
|
||||
th32ProcessID)
|
||||
if hSnapshot == INVALID_HANDLE_VALUE:
|
||||
raise WinError()
|
||||
return hSnapshot
|
||||
|
||||
|
||||
def Process32First(hSnapshot, pe=None):
|
||||
return _Process32n(windll.kernel32.Process32First, hSnapshot, pe)
|
||||
|
||||
|
||||
def Process32Next(hSnapshot, pe=None):
|
||||
return _Process32n(windll.kernel32.Process32Next, hSnapshot, pe)
|
||||
|
||||
|
||||
def _Process32n(fun, hSnapshot, pe=None):
|
||||
if pe is None:
|
||||
pe = PROCESSENTRY32()
|
||||
pe.dwSize = sizeof(PROCESSENTRY32)
|
||||
success = fun(hSnapshot, byref(pe))
|
||||
if not success:
|
||||
if windll.kernel32.GetLastError() == ERROR_NO_MORE_FILES:
|
||||
return
|
||||
raise WinError()
|
||||
return pe
|
||||
|
||||
|
||||
def get_all_processes_pids():
|
||||
"""Return a dictionary with all processes pids as keys and their
|
||||
parents as value. Ignore processes with no parents.
|
||||
"""
|
||||
h = CreateToolhelp32Snapshot()
|
||||
parents = {}
|
||||
pe = Process32First(h)
|
||||
while pe:
|
||||
if pe.th32ParentProcessID:
|
||||
parents[pe.th32ProcessID] = pe.th32ParentProcessID
|
||||
pe = Process32Next(h, pe)
|
||||
|
||||
return parents
|
||||
|
||||
|
||||
def get_processtree_pids(pid, include_parent=True):
|
||||
"""Return a list with all the pids of a process tree"""
|
||||
parents = get_all_processes_pids()
|
||||
all_pids = parents.keys()
|
||||
pids = set([pid])
|
||||
while 1:
|
||||
pids_new = pids.copy()
|
||||
|
||||
for _pid in all_pids:
|
||||
if parents[_pid] in pids:
|
||||
pids_new.add(_pid)
|
||||
|
||||
if pids_new == pids:
|
||||
break
|
||||
|
||||
pids = pids_new.copy()
|
||||
|
||||
if not include_parent:
|
||||
pids.remove(pid)
|
||||
|
||||
return list(pids)
|
||||
|
||||
|
||||
def kill_processtree(pid, signum):
|
||||
"""Kill a process and all its descendants"""
|
||||
family_pids = get_processtree_pids(pid)
|
||||
|
||||
for _pid in family_pids:
|
||||
os.kill(_pid, signum)
|
||||
73
awx/lib/site-packages/billiard/common.py
Normal file
73
awx/lib/site-packages/billiard/common.py
Normal file
@@ -0,0 +1,73 @@
|
||||
from __future__ import absolute_import
|
||||
|
||||
import signal
|
||||
import sys
|
||||
|
||||
from time import time
|
||||
|
||||
from .exceptions import RestartFreqExceeded
|
||||
|
||||
TERMSIGS = (
|
||||
'SIGHUP',
|
||||
'SIGQUIT',
|
||||
'SIGILL',
|
||||
'SIGTRAP',
|
||||
'SIGABRT',
|
||||
'SIGEMT',
|
||||
'SIGFPE',
|
||||
'SIGBUS',
|
||||
'SIGSEGV',
|
||||
'SIGSYS',
|
||||
'SIGPIPE',
|
||||
'SIGALRM',
|
||||
'SIGTERM',
|
||||
'SIGXCPU',
|
||||
'SIGXFSZ',
|
||||
'SIGVTALRM',
|
||||
'SIGPROF',
|
||||
'SIGUSR1',
|
||||
'SIGUSR2',
|
||||
)
|
||||
|
||||
|
||||
def _shutdown_cleanup(signum, frame):
|
||||
sys.exit(-(256 - signum))
|
||||
|
||||
|
||||
def reset_signals(handler=_shutdown_cleanup):
|
||||
for sig in TERMSIGS:
|
||||
try:
|
||||
signum = getattr(signal, sig)
|
||||
current = signal.getsignal(signum)
|
||||
if current is not None and current != signal.SIG_IGN:
|
||||
signal.signal(signum, handler)
|
||||
except (OSError, AttributeError, ValueError, RuntimeError):
|
||||
pass
|
||||
|
||||
|
||||
class restart_state(object):
|
||||
RestartFreqExceeded = RestartFreqExceeded
|
||||
|
||||
def __init__(self, maxR, maxT):
|
||||
self.maxR, self.maxT = maxR, maxT
|
||||
self.R, self.T = 0, None
|
||||
|
||||
def step(self, now=None):
|
||||
now = time() if now is None else now
|
||||
R = self.R
|
||||
if self.T and now - self.T >= self.maxT:
|
||||
# maxT passed, reset counter and time passed.
|
||||
self.T, self.R = now, 0
|
||||
elif self.maxR and self.R >= self.maxR:
|
||||
# verify that R has a value as the result handler
|
||||
# resets this when a job is accepted. If a job is accepted
|
||||
# the startup probably went fine (startup restart burst
|
||||
# protection)
|
||||
if self.R: # pragma: no cover
|
||||
pass
|
||||
self.R = 0 # reset in case someone catches the error
|
||||
raise self.RestartFreqExceeded("%r in %rs" % (R, self.maxT))
|
||||
# first run sets T
|
||||
if self.T is None:
|
||||
self.T = now
|
||||
self.R += 1
|
||||
48
awx/lib/site-packages/billiard/compat.py
Normal file
48
awx/lib/site-packages/billiard/compat.py
Normal file
@@ -0,0 +1,48 @@
|
||||
from __future__ import absolute_import
|
||||
|
||||
import errno
|
||||
import os
|
||||
import sys
|
||||
import __builtin__
|
||||
|
||||
if sys.version_info[0] == 3:
|
||||
bytes = bytes
|
||||
else:
|
||||
try:
|
||||
_bytes = __builtin__.bytes
|
||||
except AttributeError:
|
||||
_bytes = str
|
||||
|
||||
class bytes(_bytes): # noqa
|
||||
|
||||
def __new__(cls, *args):
|
||||
if len(args) > 1:
|
||||
return _bytes(args[0]).encode(*args[1:])
|
||||
return _bytes(*args)
|
||||
|
||||
try:
|
||||
closerange = os.closerange
|
||||
except AttributeError:
|
||||
|
||||
def closerange(fd_low, fd_high): # noqa
|
||||
for fd in reversed(xrange(fd_low, fd_high)):
|
||||
try:
|
||||
os.close(fd)
|
||||
except OSError, exc:
|
||||
if exc.errno != errno.EBADF:
|
||||
raise
|
||||
|
||||
|
||||
def get_errno(exc):
|
||||
""":exc:`socket.error` and :exc:`IOError` first got
|
||||
the ``.errno`` attribute in Py2.7"""
|
||||
try:
|
||||
return exc.errno
|
||||
except AttributeError:
|
||||
try:
|
||||
# e.args = (errno, reason)
|
||||
if isinstance(exc.args, tuple) and len(exc.args) == 2:
|
||||
return exc.args[0]
|
||||
except AttributeError:
|
||||
pass
|
||||
return 0
|
||||
11
awx/lib/site-packages/billiard/connection.py
Normal file
11
awx/lib/site-packages/billiard/connection.py
Normal file
@@ -0,0 +1,11 @@
|
||||
from __future__ import absolute_import
|
||||
|
||||
|
||||
import sys
|
||||
|
||||
if sys.version_info[0] == 3:
|
||||
from multiprocessing import connection
|
||||
else:
|
||||
from billiard import _connection as connection # noqa
|
||||
|
||||
sys.modules[__name__] = connection
|
||||
167
awx/lib/site-packages/billiard/dummy/__init__.py
Normal file
167
awx/lib/site-packages/billiard/dummy/__init__.py
Normal file
@@ -0,0 +1,167 @@
|
||||
#
|
||||
# Support for the API of the multiprocessing package using threads
|
||||
#
|
||||
# multiprocessing/dummy/__init__.py
|
||||
#
|
||||
# Copyright (c) 2006-2008, R Oudkerk
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions
|
||||
# are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# 2. Redistributions in binary form must reproduce the above copyright
|
||||
# notice, this list of conditions and the following disclaimer in the
|
||||
# documentation and/or other materials provided with the distribution.
|
||||
# 3. Neither the name of author nor the names of any contributors may be
|
||||
# used to endorse or promote products derived from this software
|
||||
# without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
# SUCH DAMAGE.
|
||||
#
|
||||
from __future__ import absolute_import
|
||||
|
||||
__all__ = [
|
||||
'Process', 'current_process', 'active_children', 'freeze_support',
|
||||
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
|
||||
'Event', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue'
|
||||
]
|
||||
|
||||
#
|
||||
# Imports
|
||||
#
|
||||
|
||||
import threading
|
||||
import sys
|
||||
import weakref
|
||||
import array
|
||||
|
||||
from threading import Lock, RLock, Semaphore, BoundedSemaphore
|
||||
from threading import Event
|
||||
from Queue import Queue
|
||||
|
||||
if sys.version_info[0] == 3:
|
||||
from multiprocessing.connection import Pipe
|
||||
else:
|
||||
from billiard._connection import Pipe
|
||||
|
||||
|
||||
class DummyProcess(threading.Thread):
|
||||
|
||||
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
|
||||
threading.Thread.__init__(self, group, target, name, args, kwargs)
|
||||
self._pid = None
|
||||
self._children = weakref.WeakKeyDictionary()
|
||||
self._start_called = False
|
||||
self._parent = current_process()
|
||||
|
||||
def start(self):
|
||||
assert self._parent is current_process()
|
||||
self._start_called = True
|
||||
self._parent._children[self] = None
|
||||
threading.Thread.start(self)
|
||||
|
||||
@property
|
||||
def exitcode(self):
|
||||
if self._start_called and not self.is_alive():
|
||||
return 0
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
try:
|
||||
_Condition = threading._Condition
|
||||
except AttributeError: # Py3
|
||||
_Condition = threading.Condition # noqa
|
||||
|
||||
|
||||
class Condition(_Condition):
|
||||
if sys.version_info[0] == 3:
|
||||
notify_all = _Condition.notifyAll
|
||||
else:
|
||||
notify_all = _Condition.notifyAll.im_func
|
||||
|
||||
|
||||
Process = DummyProcess
|
||||
current_process = threading.currentThread
|
||||
current_process()._children = weakref.WeakKeyDictionary()
|
||||
|
||||
|
||||
def active_children():
|
||||
children = current_process()._children
|
||||
for p in list(children):
|
||||
if not p.is_alive():
|
||||
children.pop(p, None)
|
||||
return list(children)
|
||||
|
||||
|
||||
def freeze_support():
|
||||
pass
|
||||
|
||||
|
||||
class Namespace(object):
|
||||
|
||||
def __init__(self, **kwds):
|
||||
self.__dict__.update(kwds)
|
||||
|
||||
def __repr__(self):
|
||||
items = self.__dict__.items()
|
||||
temp = []
|
||||
for name, value in items:
|
||||
if not name.startswith('_'):
|
||||
temp.append('%s=%r' % (name, value))
|
||||
temp.sort()
|
||||
return 'Namespace(%s)' % str.join(', ', temp)
|
||||
|
||||
|
||||
dict = dict
|
||||
list = list
|
||||
|
||||
|
||||
def Array(typecode, sequence, lock=True):
|
||||
return array.array(typecode, sequence)
|
||||
|
||||
|
||||
class Value(object):
|
||||
|
||||
def __init__(self, typecode, value, lock=True):
|
||||
self._typecode = typecode
|
||||
self._value = value
|
||||
|
||||
def _get(self):
|
||||
return self._value
|
||||
|
||||
def _set(self, value):
|
||||
self._value = value
|
||||
value = property(_get, _set)
|
||||
|
||||
def __repr__(self):
|
||||
return '<%r(%r, %r)>' % (type(self).__name__,
|
||||
self._typecode, self._value)
|
||||
|
||||
|
||||
def Manager():
|
||||
return sys.modules[__name__]
|
||||
|
||||
|
||||
def shutdown():
|
||||
pass
|
||||
|
||||
|
||||
def Pool(processes=None, initializer=None, initargs=()):
|
||||
from billiard.pool import ThreadPool
|
||||
return ThreadPool(processes, initializer, initargs)
|
||||
|
||||
JoinableQueue = Queue
|
||||
94
awx/lib/site-packages/billiard/dummy/connection.py
Normal file
94
awx/lib/site-packages/billiard/dummy/connection.py
Normal file
@@ -0,0 +1,94 @@
|
||||
#
|
||||
# Analogue of `multiprocessing.connection` which uses queues instead of sockets
|
||||
#
|
||||
# multiprocessing/dummy/connection.py
|
||||
#
|
||||
# Copyright (c) 2006-2008, R Oudkerk
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions
|
||||
# are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# 2. Redistributions in binary form must reproduce the above copyright
|
||||
# notice, this list of conditions and the following disclaimer in the
|
||||
# documentation and/or other materials provided with the distribution.
|
||||
# 3. Neither the name of author nor the names of any contributors may be
|
||||
# used to endorse or promote products derived from this software
|
||||
# without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
# SUCH DAMAGE.
|
||||
#
|
||||
from __future__ import absolute_import
|
||||
|
||||
__all__ = ['Client', 'Listener', 'Pipe']
|
||||
|
||||
from Queue import Queue
|
||||
|
||||
|
||||
families = [None]
|
||||
|
||||
|
||||
class Listener(object):
|
||||
|
||||
def __init__(self, address=None, family=None, backlog=1):
|
||||
self._backlog_queue = Queue(backlog)
|
||||
|
||||
def accept(self):
|
||||
return Connection(*self._backlog_queue.get())
|
||||
|
||||
def close(self):
|
||||
self._backlog_queue = None
|
||||
|
||||
address = property(lambda self: self._backlog_queue)
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, *exc_info):
|
||||
self.close()
|
||||
|
||||
|
||||
def Client(address):
|
||||
_in, _out = Queue(), Queue()
|
||||
address.put((_out, _in))
|
||||
return Connection(_in, _out)
|
||||
|
||||
|
||||
def Pipe(duplex=True):
|
||||
a, b = Queue(), Queue()
|
||||
return Connection(a, b), Connection(b, a)
|
||||
|
||||
|
||||
class Connection(object):
|
||||
|
||||
def __init__(self, _in, _out):
|
||||
self._out = _out
|
||||
self._in = _in
|
||||
self.send = self.send_bytes = _out.put
|
||||
self.recv = self.recv_bytes = _in.get
|
||||
|
||||
def poll(self, timeout=0.0):
|
||||
if self._in.qsize() > 0:
|
||||
return True
|
||||
if timeout <= 0.0:
|
||||
return False
|
||||
self._in.not_empty.acquire()
|
||||
self._in.not_empty.wait(timeout)
|
||||
self._in.not_empty.release()
|
||||
return self._in.qsize() > 0
|
||||
|
||||
def close(self):
|
||||
pass
|
||||
112
awx/lib/site-packages/billiard/einfo.py
Normal file
112
awx/lib/site-packages/billiard/einfo.py
Normal file
@@ -0,0 +1,112 @@
|
||||
from __future__ import absolute_import
|
||||
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
|
||||
class _Code(object):
|
||||
|
||||
def __init__(self, code):
|
||||
self.co_filename = code.co_filename
|
||||
self.co_name = code.co_name
|
||||
|
||||
|
||||
class _Frame(object):
|
||||
Code = _Code
|
||||
|
||||
def __init__(self, frame):
|
||||
self.f_globals = {
|
||||
"__file__": frame.f_globals.get("__file__", "__main__"),
|
||||
"__name__": frame.f_globals.get("__name__"),
|
||||
"__loader__": None,
|
||||
}
|
||||
self.f_locals = fl = {}
|
||||
try:
|
||||
fl["__traceback_hide__"] = frame.f_locals["__traceback_hide__"]
|
||||
except KeyError:
|
||||
pass
|
||||
self.f_code = self.Code(frame.f_code)
|
||||
self.f_lineno = frame.f_lineno
|
||||
|
||||
|
||||
class _Object(object):
|
||||
|
||||
def __init__(self, **kw):
|
||||
[setattr(self, k, v) for k, v in kw.iteritems()]
|
||||
|
||||
|
||||
class _Truncated(object):
|
||||
|
||||
def __init__(self):
|
||||
self.tb_lineno = -1
|
||||
self.tb_frame = _Object(
|
||||
f_globals={"__file__": "",
|
||||
"__name__": "",
|
||||
"__loader__": None},
|
||||
f_fileno=None,
|
||||
f_code=_Object(co_filename="...",
|
||||
co_name="[rest of traceback truncated]"),
|
||||
)
|
||||
self.tb_next = None
|
||||
|
||||
|
||||
class Traceback(object):
|
||||
Frame = _Frame
|
||||
|
||||
tb_frame = tb_lineno = tb_next = None
|
||||
max_frames = sys.getrecursionlimit() // 8
|
||||
|
||||
def __init__(self, tb, max_frames=None, depth=0):
|
||||
limit = self.max_frames = max_frames or self.max_frames
|
||||
self.tb_frame = self.Frame(tb.tb_frame)
|
||||
self.tb_lineno = tb.tb_lineno
|
||||
if tb.tb_next is not None:
|
||||
if depth <= limit:
|
||||
self.tb_next = Traceback(tb.tb_next, limit, depth + 1)
|
||||
else:
|
||||
self.tb_next = _Truncated()
|
||||
|
||||
|
||||
class ExceptionInfo(object):
|
||||
"""Exception wrapping an exception and its traceback.
|
||||
|
||||
:param exc_info: The exception info tuple as returned by
|
||||
:func:`sys.exc_info`.
|
||||
|
||||
"""
|
||||
|
||||
#: Exception type.
|
||||
type = None
|
||||
|
||||
#: Exception instance.
|
||||
exception = None
|
||||
|
||||
#: Pickleable traceback instance for use with :mod:`traceback`
|
||||
tb = None
|
||||
|
||||
#: String representation of the traceback.
|
||||
traceback = None
|
||||
|
||||
#: Set to true if this is an internal error.
|
||||
internal = False
|
||||
|
||||
def __init__(self, exc_info=None, internal=False):
|
||||
self.type, self.exception, tb = exc_info or sys.exc_info()
|
||||
try:
|
||||
self.tb = Traceback(tb)
|
||||
self.traceback = ''.join(
|
||||
traceback.format_exception(self.type, self.exception, tb),
|
||||
)
|
||||
self.internal = internal
|
||||
finally:
|
||||
del(tb)
|
||||
|
||||
def __str__(self):
|
||||
return self.traceback
|
||||
|
||||
def __repr__(self):
|
||||
return "<ExceptionInfo: %r>" % (self.exception, )
|
||||
|
||||
@property
|
||||
def exc_info(self):
|
||||
return self.type, self.exception, self.tb
|
||||
54
awx/lib/site-packages/billiard/exceptions.py
Normal file
54
awx/lib/site-packages/billiard/exceptions.py
Normal file
@@ -0,0 +1,54 @@
|
||||
from __future__ import absolute_import
|
||||
|
||||
try:
|
||||
from multiprocessing import (
|
||||
ProcessError,
|
||||
BufferTooShort,
|
||||
TimeoutError,
|
||||
AuthenticationError,
|
||||
)
|
||||
except ImportError:
|
||||
class ProcessError(Exception): # noqa
|
||||
pass
|
||||
|
||||
class BufferTooShort(Exception): # noqa
|
||||
pass
|
||||
|
||||
class TimeoutError(Exception): # noqa
|
||||
pass
|
||||
|
||||
class AuthenticationError(Exception): # noqa
|
||||
pass
|
||||
|
||||
|
||||
class TimeLimitExceeded(Exception):
|
||||
"""The time limit has been exceeded and the job has been terminated."""
|
||||
|
||||
def __str__(self):
|
||||
return "TimeLimitExceeded%s" % (self.args, )
|
||||
|
||||
|
||||
class SoftTimeLimitExceeded(Exception):
|
||||
"""The soft time limit has been exceeded. This exception is raised
|
||||
to give the task a chance to clean up."""
|
||||
|
||||
def __str__(self):
|
||||
return "SoftTimeLimitExceeded%s" % (self.args, )
|
||||
|
||||
|
||||
class WorkerLostError(Exception):
|
||||
"""The worker processing a job has exited prematurely."""
|
||||
|
||||
|
||||
class Terminated(Exception):
|
||||
"""The worker processing a job has been terminated by user request."""
|
||||
|
||||
|
||||
class RestartFreqExceeded(Exception):
|
||||
"""Restarts too fast."""
|
||||
|
||||
|
||||
class CoroStop(Exception):
|
||||
"""Coroutine exit, as opposed to StopIteration which may
|
||||
mean it should be restarted."""
|
||||
pass
|
||||
667
awx/lib/site-packages/billiard/forking.py
Normal file
667
awx/lib/site-packages/billiard/forking.py
Normal file
@@ -0,0 +1,667 @@
|
||||
#
|
||||
# Module for starting a process object using os.fork() or CreateProcess()
|
||||
#
|
||||
# multiprocessing/forking.py
|
||||
#
|
||||
# Copyright (c) 2006-2008, R Oudkerk
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
#
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
import os
|
||||
import sys
|
||||
import signal
|
||||
import warnings
|
||||
|
||||
from ._ext import Connection, PipeConnection, win32
|
||||
from pickle import load, HIGHEST_PROTOCOL
|
||||
from billiard import util, process
|
||||
|
||||
__all__ = ['Popen', 'assert_spawning', 'exit',
|
||||
'duplicate', 'close', 'ForkingPickler']
|
||||
|
||||
try:
|
||||
WindowsError = WindowsError # noqa
|
||||
except NameError:
|
||||
class WindowsError(Exception): # noqa
|
||||
pass
|
||||
|
||||
W_OLD_DJANGO_LAYOUT = """\
|
||||
Will add directory %r to path! This is necessary to accommodate \
|
||||
pre-Django 1.4 layouts using setup_environ.
|
||||
You can skip this warning by adding a DJANGO_SETTINGS_MODULE=settings \
|
||||
environment variable.
|
||||
"""
|
||||
|
||||
#
|
||||
# Choose whether to do a fork or spawn (fork+exec) on Unix.
|
||||
# This affects how some shared resources should be created.
|
||||
#
|
||||
|
||||
_forking_is_enabled = sys.platform != 'win32'
|
||||
|
||||
#
|
||||
# Check that the current thread is spawning a child process
|
||||
#
|
||||
|
||||
|
||||
def assert_spawning(self):
|
||||
if not Popen.thread_is_spawning():
|
||||
raise RuntimeError(
|
||||
'%s objects should only be shared between processes'
|
||||
' through inheritance' % type(self).__name__
|
||||
)
|
||||
|
||||
#
|
||||
# Try making some callable types picklable
|
||||
#
|
||||
from pickle import Pickler
|
||||
|
||||
if sys.version_info[0] == 3:
|
||||
from copyreg import dispatch_table
|
||||
|
||||
class ForkingPickler(Pickler):
|
||||
_extra_reducers = {}
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
Pickler.__init__(self, *args, **kwargs)
|
||||
self.dispatch_table = dispatch_table.copy()
|
||||
self.dispatch_table.update(self._extra_reducers)
|
||||
|
||||
@classmethod
|
||||
def register(cls, type, reduce):
|
||||
cls._extra_reducers[type] = reduce
|
||||
|
||||
def _reduce_method(m):
|
||||
if m.__self__ is None:
|
||||
return getattr, (m.__class__, m.__func__.__name__)
|
||||
else:
|
||||
return getattr, (m.__self__, m.__func__.__name__)
|
||||
|
||||
class _C:
|
||||
def f(self):
|
||||
pass
|
||||
ForkingPickler.register(type(_C().f), _reduce_method)
|
||||
|
||||
else:
|
||||
|
||||
class ForkingPickler(Pickler): # noqa
|
||||
dispatch = Pickler.dispatch.copy()
|
||||
|
||||
@classmethod
|
||||
def register(cls, type, reduce):
|
||||
def dispatcher(self, obj):
|
||||
rv = reduce(obj)
|
||||
self.save_reduce(obj=obj, *rv)
|
||||
cls.dispatch[type] = dispatcher
|
||||
|
||||
def _reduce_method(m): # noqa
|
||||
if m.im_self is None:
|
||||
return getattr, (m.im_class, m.im_func.func_name)
|
||||
else:
|
||||
return getattr, (m.im_self, m.im_func.func_name)
|
||||
ForkingPickler.register(type(ForkingPickler.save), _reduce_method)
|
||||
|
||||
|
||||
def _reduce_method_descriptor(m):
|
||||
return getattr, (m.__objclass__, m.__name__)
|
||||
ForkingPickler.register(type(list.append), _reduce_method_descriptor)
|
||||
ForkingPickler.register(type(int.__add__), _reduce_method_descriptor)
|
||||
|
||||
try:
|
||||
from functools import partial
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
|
||||
def _reduce_partial(p):
|
||||
return _rebuild_partial, (p.func, p.args, p.keywords or {})
|
||||
|
||||
def _rebuild_partial(func, args, keywords):
|
||||
return partial(func, *args, **keywords)
|
||||
ForkingPickler.register(partial, _reduce_partial)
|
||||
|
||||
|
||||
def dump(obj, file, protocol=None):
|
||||
ForkingPickler(file, protocol).dump(obj)
|
||||
|
||||
#
|
||||
# Make (Pipe)Connection picklable
|
||||
#
|
||||
|
||||
|
||||
def reduce_connection(conn):
|
||||
# XXX check not necessary since only registered with ForkingPickler
|
||||
if not Popen.thread_is_spawning():
|
||||
raise RuntimeError(
|
||||
'By default %s objects can only be shared between processes\n'
|
||||
'using inheritance' % type(conn).__name__
|
||||
)
|
||||
return type(conn), (Popen.duplicate_for_child(conn.fileno()),
|
||||
conn.readable, conn.writable)
|
||||
|
||||
ForkingPickler.register(Connection, reduce_connection)
|
||||
if PipeConnection:
|
||||
ForkingPickler.register(PipeConnection, reduce_connection)
|
||||
|
||||
|
||||
#
|
||||
# Unix
|
||||
#
|
||||
|
||||
if sys.platform != 'win32':
|
||||
import thread
|
||||
import select
|
||||
|
||||
WINEXE = False
|
||||
WINSERVICE = False
|
||||
|
||||
exit = os._exit
|
||||
duplicate = os.dup
|
||||
close = os.close
|
||||
_select = util._eintr_retry(select.select)
|
||||
|
||||
#
|
||||
# We define a Popen class similar to the one from subprocess, but
|
||||
# whose constructor takes a process object as its argument.
|
||||
#
|
||||
|
||||
class Popen(object):
|
||||
|
||||
_tls = thread._local()
|
||||
|
||||
def __init__(self, process_obj):
|
||||
_Django_old_layout_hack__save()
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
self.returncode = None
|
||||
r, w = os.pipe()
|
||||
self.sentinel = r
|
||||
|
||||
if _forking_is_enabled:
|
||||
self.pid = os.fork()
|
||||
if self.pid == 0:
|
||||
os.close(r)
|
||||
if 'random' in sys.modules:
|
||||
import random
|
||||
random.seed()
|
||||
code = process_obj._bootstrap()
|
||||
os._exit(code)
|
||||
else:
|
||||
from_parent_fd, to_child_fd = os.pipe()
|
||||
cmd = get_command_line() + [str(from_parent_fd)]
|
||||
|
||||
self.pid = os.fork()
|
||||
if self.pid == 0:
|
||||
os.close(r)
|
||||
os.close(to_child_fd)
|
||||
os.execv(sys.executable, cmd)
|
||||
|
||||
# send information to child
|
||||
prep_data = get_preparation_data(process_obj._name)
|
||||
os.close(from_parent_fd)
|
||||
to_child = os.fdopen(to_child_fd, 'wb')
|
||||
Popen._tls.process_handle = self.pid
|
||||
try:
|
||||
dump(prep_data, to_child, HIGHEST_PROTOCOL)
|
||||
dump(process_obj, to_child, HIGHEST_PROTOCOL)
|
||||
finally:
|
||||
del(Popen._tls.process_handle)
|
||||
to_child.close()
|
||||
|
||||
# `w` will be closed when the child exits, at which point `r`
|
||||
# will become ready for reading (using e.g. select()).
|
||||
os.close(w)
|
||||
util.Finalize(self, os.close, (r,))
|
||||
|
||||
def poll(self, flag=os.WNOHANG):
|
||||
if self.returncode is None:
|
||||
try:
|
||||
pid, sts = os.waitpid(self.pid, flag)
|
||||
except os.error:
|
||||
# Child process not yet created. See #1731717
|
||||
# e.errno == errno.ECHILD == 10
|
||||
return None
|
||||
if pid == self.pid:
|
||||
if os.WIFSIGNALED(sts):
|
||||
self.returncode = -os.WTERMSIG(sts)
|
||||
else:
|
||||
assert os.WIFEXITED(sts)
|
||||
self.returncode = os.WEXITSTATUS(sts)
|
||||
return self.returncode
|
||||
|
||||
def wait(self, timeout=None):
|
||||
if self.returncode is None:
|
||||
if timeout is not None:
|
||||
r = _select([self.sentinel], [], [], timeout)[0]
|
||||
if not r:
|
||||
return None
|
||||
# This shouldn't block if select() returned successfully.
|
||||
return self.poll(os.WNOHANG if timeout == 0.0 else 0)
|
||||
return self.returncode
|
||||
|
||||
def terminate(self):
|
||||
if self.returncode is None:
|
||||
try:
|
||||
os.kill(self.pid, signal.SIGTERM)
|
||||
except OSError:
|
||||
if self.wait(timeout=0.1) is None:
|
||||
raise
|
||||
|
||||
@staticmethod
|
||||
def thread_is_spawning():
|
||||
if _forking_is_enabled:
|
||||
return False
|
||||
else:
|
||||
return getattr(Popen._tls, 'process_handle', None) is not None
|
||||
|
||||
@staticmethod
|
||||
def duplicate_for_child(handle):
|
||||
return handle
|
||||
|
||||
#
|
||||
# Windows
|
||||
#
|
||||
|
||||
else:
|
||||
import thread
|
||||
import msvcrt
|
||||
import _subprocess
|
||||
|
||||
#
|
||||
#
|
||||
#
|
||||
|
||||
TERMINATE = 0x10000
|
||||
WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False))
|
||||
WINSERVICE = sys.executable.lower().endswith("pythonservice.exe")
|
||||
|
||||
exit = win32.ExitProcess
|
||||
close = win32.CloseHandle
|
||||
|
||||
#
|
||||
#
|
||||
#
|
||||
|
||||
def duplicate(handle, target_process=None, inheritable=False):
|
||||
if target_process is None:
|
||||
target_process = _subprocess.GetCurrentProcess()
|
||||
return _subprocess.DuplicateHandle(
|
||||
_subprocess.GetCurrentProcess(), handle, target_process,
|
||||
0, inheritable, _subprocess.DUPLICATE_SAME_ACCESS
|
||||
).Detach()
|
||||
|
||||
#
|
||||
# We define a Popen class similar to the one from subprocess, but
|
||||
# whose constructor takes a process object as its argument.
|
||||
#
|
||||
|
||||
class Popen(object):
|
||||
'''
|
||||
Start a subprocess to run the code of a process object
|
||||
'''
|
||||
_tls = thread._local()
|
||||
|
||||
def __init__(self, process_obj):
|
||||
_Django_old_layout_hack__save()
|
||||
# create pipe for communication with child
|
||||
rfd, wfd = os.pipe()
|
||||
|
||||
# get handle for read end of the pipe and make it inheritable
|
||||
rhandle = duplicate(msvcrt.get_osfhandle(rfd), inheritable=True)
|
||||
os.close(rfd)
|
||||
|
||||
# start process
|
||||
cmd = get_command_line() + [rhandle]
|
||||
cmd = ' '.join('"%s"' % x for x in cmd)
|
||||
hp, ht, pid, tid = _subprocess.CreateProcess(
|
||||
_python_exe, cmd, None, None, 1, 0, None, None, None
|
||||
)
|
||||
ht.Close()
|
||||
close(rhandle)
|
||||
|
||||
# set attributes of self
|
||||
self.pid = pid
|
||||
self.returncode = None
|
||||
self._handle = hp
|
||||
self.sentinel = int(hp)
|
||||
|
||||
# send information to child
|
||||
prep_data = get_preparation_data(process_obj._name)
|
||||
to_child = os.fdopen(wfd, 'wb')
|
||||
Popen._tls.process_handle = int(hp)
|
||||
try:
|
||||
dump(prep_data, to_child, HIGHEST_PROTOCOL)
|
||||
dump(process_obj, to_child, HIGHEST_PROTOCOL)
|
||||
finally:
|
||||
del Popen._tls.process_handle
|
||||
to_child.close()
|
||||
|
||||
@staticmethod
|
||||
def thread_is_spawning():
|
||||
return getattr(Popen._tls, 'process_handle', None) is not None
|
||||
|
||||
@staticmethod
|
||||
def duplicate_for_child(handle):
|
||||
return duplicate(handle, Popen._tls.process_handle)
|
||||
|
||||
def wait(self, timeout=None):
|
||||
if self.returncode is None:
|
||||
if timeout is None:
|
||||
msecs = _subprocess.INFINITE
|
||||
else:
|
||||
msecs = max(0, int(timeout * 1000 + 0.5))
|
||||
|
||||
res = _subprocess.WaitForSingleObject(int(self._handle), msecs)
|
||||
if res == _subprocess.WAIT_OBJECT_0:
|
||||
code = _subprocess.GetExitCodeProcess(self._handle)
|
||||
if code == TERMINATE:
|
||||
code = -signal.SIGTERM
|
||||
self.returncode = code
|
||||
|
||||
return self.returncode
|
||||
|
||||
def poll(self):
|
||||
return self.wait(timeout=0)
|
||||
|
||||
def terminate(self):
|
||||
if self.returncode is None:
|
||||
try:
|
||||
_subprocess.TerminateProcess(int(self._handle), TERMINATE)
|
||||
except WindowsError:
|
||||
if self.wait(timeout=0.1) is None:
|
||||
raise
|
||||
|
||||
#
|
||||
#
|
||||
#
|
||||
|
||||
if WINSERVICE:
|
||||
_python_exe = os.path.join(sys.exec_prefix, 'python.exe')
|
||||
else:
|
||||
_python_exe = sys.executable
|
||||
|
||||
|
||||
def set_executable(exe):
|
||||
global _python_exe
|
||||
_python_exe = exe
|
||||
|
||||
|
||||
def is_forking(argv):
|
||||
'''
|
||||
Return whether commandline indicates we are forking
|
||||
'''
|
||||
if len(argv) >= 2 and argv[1] == '--billiard-fork':
|
||||
assert len(argv) == 3
|
||||
os.environ["FORKED_BY_MULTIPROCESSING"] = "1"
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def freeze_support():
|
||||
'''
|
||||
Run code for process object if this in not the main process
|
||||
'''
|
||||
if is_forking(sys.argv):
|
||||
main()
|
||||
sys.exit()
|
||||
|
||||
|
||||
def get_command_line():
|
||||
'''
|
||||
Returns prefix of command line used for spawning a child process
|
||||
'''
|
||||
if process.current_process()._identity == () and is_forking(sys.argv):
|
||||
raise RuntimeError('''
|
||||
Attempt to start a new process before the current process
|
||||
has finished its bootstrapping phase.
|
||||
|
||||
This probably means that have forgotten to use the proper
|
||||
idiom in the main module:
|
||||
|
||||
if __name__ == '__main__':
|
||||
freeze_support()
|
||||
...
|
||||
|
||||
The "freeze_support()" line can be omitted if the program
|
||||
is not going to be frozen to produce a Windows executable.''')
|
||||
|
||||
if getattr(sys, 'frozen', False):
|
||||
return [sys.executable, '--billiard-fork']
|
||||
else:
|
||||
prog = 'from billiard.forking import main; main()'
|
||||
return [_python_exe, '-c', prog, '--billiard-fork']
|
||||
|
||||
|
||||
def _Django_old_layout_hack__save():
|
||||
if 'DJANGO_PROJECT_DIR' not in os.environ:
|
||||
try:
|
||||
settings_name = os.environ['DJANGO_SETTINGS_MODULE']
|
||||
except KeyError:
|
||||
return # not using Django.
|
||||
|
||||
conf_settings = sys.modules.get('django.conf.settings')
|
||||
configured = conf_settings and conf_settings.configured
|
||||
try:
|
||||
project_name, _ = settings_name.split('.', 1)
|
||||
except ValueError:
|
||||
return # not modified by setup_environ
|
||||
|
||||
project = __import__(project_name)
|
||||
try:
|
||||
project_dir = os.path.normpath(_module_parent_dir(project))
|
||||
except AttributeError:
|
||||
return # dynamically generated module (no __file__)
|
||||
if configured:
|
||||
warnings.warn(UserWarning(
|
||||
W_OLD_DJANGO_LAYOUT % os.path.realpath(project_dir)
|
||||
))
|
||||
os.environ['DJANGO_PROJECT_DIR'] = project_dir
|
||||
|
||||
|
||||
def _Django_old_layout_hack__load():
|
||||
try:
|
||||
sys.path.append(os.environ['DJANGO_PROJECT_DIR'])
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
|
||||
def _module_parent_dir(mod):
|
||||
dir, filename = os.path.split(_module_dir(mod))
|
||||
if dir == os.curdir or not dir:
|
||||
dir = os.getcwd()
|
||||
return dir
|
||||
|
||||
|
||||
def _module_dir(mod):
|
||||
if '__init__.py' in mod.__file__:
|
||||
return os.path.dirname(mod.__file__)
|
||||
return mod.__file__
|
||||
|
||||
|
||||
def main():
|
||||
'''
|
||||
Run code specifed by data received over pipe
|
||||
'''
|
||||
global _forking_is_enabled
|
||||
_Django_old_layout_hack__load()
|
||||
|
||||
assert is_forking(sys.argv)
|
||||
_forking_is_enabled = False
|
||||
|
||||
handle = int(sys.argv[-1])
|
||||
if sys.platform == 'win32':
|
||||
fd = msvcrt.open_osfhandle(handle, os.O_RDONLY)
|
||||
else:
|
||||
fd = handle
|
||||
from_parent = os.fdopen(fd, 'rb')
|
||||
|
||||
process.current_process()._inheriting = True
|
||||
preparation_data = load(from_parent)
|
||||
prepare(preparation_data)
|
||||
# Huge hack to make logging before Process.run work.
|
||||
try:
|
||||
os.environ["MP_MAIN_FILE"] = sys.modules["__main__"].__file__
|
||||
except KeyError:
|
||||
pass
|
||||
loglevel = os.environ.get("_MP_FORK_LOGLEVEL_")
|
||||
logfile = os.environ.get("_MP_FORK_LOGFILE_") or None
|
||||
format = os.environ.get("_MP_FORK_LOGFORMAT_")
|
||||
if loglevel:
|
||||
from billiard import util
|
||||
import logging
|
||||
logger = util.get_logger()
|
||||
logger.setLevel(int(loglevel))
|
||||
if not logger.handlers:
|
||||
logger._rudimentary_setup = True
|
||||
logfile = logfile or sys.__stderr__
|
||||
if hasattr(logfile, "write"):
|
||||
handler = logging.StreamHandler(logfile)
|
||||
else:
|
||||
handler = logging.FileHandler(logfile)
|
||||
formatter = logging.Formatter(
|
||||
format or util.DEFAULT_LOGGING_FORMAT,
|
||||
)
|
||||
handler.setFormatter(formatter)
|
||||
logger.addHandler(handler)
|
||||
|
||||
self = load(from_parent)
|
||||
process.current_process()._inheriting = False
|
||||
|
||||
from_parent.close()
|
||||
|
||||
exitcode = self._bootstrap()
|
||||
exit(exitcode)
|
||||
|
||||
|
||||
def get_preparation_data(name):
|
||||
'''
|
||||
Return info about parent needed by child to unpickle process object
|
||||
'''
|
||||
from billiard.util import _logger, _log_to_stderr
|
||||
|
||||
d = dict(
|
||||
name=name,
|
||||
sys_path=sys.path,
|
||||
sys_argv=sys.argv,
|
||||
log_to_stderr=_log_to_stderr,
|
||||
orig_dir=process.ORIGINAL_DIR,
|
||||
authkey=process.current_process().authkey,
|
||||
)
|
||||
|
||||
if _logger is not None:
|
||||
d['log_level'] = _logger.getEffectiveLevel()
|
||||
|
||||
if not WINEXE and not WINSERVICE:
|
||||
main_path = getattr(sys.modules['__main__'], '__file__', None)
|
||||
if not main_path and sys.argv[0] not in ('', '-c'):
|
||||
main_path = sys.argv[0]
|
||||
if main_path is not None:
|
||||
if (not os.path.isabs(main_path) and
|
||||
process.ORIGINAL_DIR is not None):
|
||||
main_path = os.path.join(process.ORIGINAL_DIR, main_path)
|
||||
d['main_path'] = os.path.normpath(main_path)
|
||||
|
||||
return d
|
||||
|
||||
#
|
||||
# Make (Pipe)Connection picklable
|
||||
#
|
||||
|
||||
def reduce_connection(conn):
|
||||
if not Popen.thread_is_spawning():
|
||||
raise RuntimeError(
|
||||
'By default %s objects can only be shared between processes\n'
|
||||
'using inheritance' % type(conn).__name__
|
||||
)
|
||||
return type(conn), (Popen.duplicate_for_child(conn.fileno()),
|
||||
conn.readable, conn.writable)
|
||||
|
||||
ForkingPickler.register(Connection, reduce_connection)
|
||||
ForkingPickler.register(PipeConnection, reduce_connection)
|
||||
|
||||
#
|
||||
# Prepare current process
|
||||
#
|
||||
|
||||
old_main_modules = []
|
||||
|
||||
|
||||
def prepare(data):
|
||||
'''
|
||||
Try to get current process ready to unpickle process object
|
||||
'''
|
||||
old_main_modules.append(sys.modules['__main__'])
|
||||
|
||||
if 'name' in data:
|
||||
process.current_process().name = data['name']
|
||||
|
||||
if 'authkey' in data:
|
||||
process.current_process()._authkey = data['authkey']
|
||||
|
||||
if 'log_to_stderr' in data and data['log_to_stderr']:
|
||||
util.log_to_stderr()
|
||||
|
||||
if 'log_level' in data:
|
||||
util.get_logger().setLevel(data['log_level'])
|
||||
|
||||
if 'sys_path' in data:
|
||||
sys.path = data['sys_path']
|
||||
|
||||
if 'sys_argv' in data:
|
||||
sys.argv = data['sys_argv']
|
||||
|
||||
if 'dir' in data:
|
||||
os.chdir(data['dir'])
|
||||
|
||||
if 'orig_dir' in data:
|
||||
process.ORIGINAL_DIR = data['orig_dir']
|
||||
|
||||
if 'main_path' in data:
|
||||
main_path = data['main_path']
|
||||
main_name = os.path.splitext(os.path.basename(main_path))[0]
|
||||
if main_name == '__init__':
|
||||
main_name = os.path.basename(os.path.dirname(main_path))
|
||||
|
||||
if main_name == '__main__':
|
||||
main_module = sys.modules['__main__']
|
||||
main_module.__file__ = main_path
|
||||
elif main_name != 'ipython':
|
||||
# Main modules not actually called __main__.py may
|
||||
# contain additional code that should still be executed
|
||||
import imp
|
||||
|
||||
if main_path is None:
|
||||
dirs = None
|
||||
elif os.path.basename(main_path).startswith('__init__.py'):
|
||||
dirs = [os.path.dirname(os.path.dirname(main_path))]
|
||||
else:
|
||||
dirs = [os.path.dirname(main_path)]
|
||||
|
||||
assert main_name not in sys.modules, main_name
|
||||
file, path_name, etc = imp.find_module(main_name, dirs)
|
||||
try:
|
||||
# We would like to do "imp.load_module('__main__', ...)"
|
||||
# here. However, that would cause 'if __name__ ==
|
||||
# "__main__"' clauses to be executed.
|
||||
main_module = imp.load_module(
|
||||
'__parents_main__', file, path_name, etc
|
||||
)
|
||||
finally:
|
||||
if file:
|
||||
file.close()
|
||||
|
||||
sys.modules['__main__'] = main_module
|
||||
main_module.__name__ = '__main__'
|
||||
|
||||
# Try to make the potentially picklable objects in
|
||||
# sys.modules['__main__'] realize they are in the main
|
||||
# module -- somewhat ugly.
|
||||
for obj in main_module.__dict__.values():
|
||||
try:
|
||||
if obj.__module__ == '__parents_main__':
|
||||
obj.__module__ = '__main__'
|
||||
except Exception:
|
||||
pass
|
||||
254
awx/lib/site-packages/billiard/heap.py
Normal file
254
awx/lib/site-packages/billiard/heap.py
Normal file
@@ -0,0 +1,254 @@
|
||||
#
|
||||
# Module which supports allocation of memory from an mmap
|
||||
#
|
||||
# multiprocessing/heap.py
|
||||
#
|
||||
# Copyright (c) 2006-2008, R Oudkerk
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
#
|
||||
from __future__ import absolute_import
|
||||
|
||||
import bisect
|
||||
import mmap
|
||||
import os
|
||||
import sys
|
||||
import threading
|
||||
import itertools
|
||||
|
||||
from ._ext import _billiard, win32
|
||||
from .util import Finalize, info, get_temp_dir
|
||||
from .forking import assert_spawning, ForkingPickler
|
||||
|
||||
__all__ = ['BufferWrapper']
|
||||
|
||||
try:
|
||||
maxsize = sys.maxsize
|
||||
except AttributeError:
|
||||
maxsize = sys.maxint
|
||||
|
||||
#
|
||||
# Inheirtable class which wraps an mmap, and from which blocks can be allocated
|
||||
#
|
||||
|
||||
if sys.platform == 'win32':
|
||||
|
||||
class Arena(object):
|
||||
|
||||
_counter = itertools.count()
|
||||
|
||||
def __init__(self, size):
|
||||
self.size = size
|
||||
self.name = 'pym-%d-%d' % (os.getpid(), Arena._counter.next())
|
||||
self.buffer = mmap.mmap(-1, self.size, tagname=self.name)
|
||||
assert win32.GetLastError() == 0, 'tagname already in use'
|
||||
self._state = (self.size, self.name)
|
||||
|
||||
def __getstate__(self):
|
||||
assert_spawning(self)
|
||||
return self._state
|
||||
|
||||
def __setstate__(self, state):
|
||||
self.size, self.name = self._state = state
|
||||
self.buffer = mmap.mmap(-1, self.size, tagname=self.name)
|
||||
assert win32.GetLastError() == win32.ERROR_ALREADY_EXISTS
|
||||
|
||||
else:
|
||||
|
||||
class Arena(object):
|
||||
|
||||
_counter = itertools.count()
|
||||
|
||||
def __init__(self, size, fileno=-1):
|
||||
from .forking import _forking_is_enabled
|
||||
self.size = size
|
||||
self.fileno = fileno
|
||||
if fileno == -1 and not _forking_is_enabled:
|
||||
name = os.path.join(
|
||||
get_temp_dir(),
|
||||
'pym-%d-%d' % (os.getpid(), self._counter.next()))
|
||||
self.fileno = os.open(
|
||||
name, os.O_RDWR | os.O_CREAT | os.O_EXCL, 0600)
|
||||
os.unlink(name)
|
||||
os.ftruncate(self.fileno, size)
|
||||
self.buffer = mmap.mmap(self.fileno, self.size)
|
||||
|
||||
def reduce_arena(a):
|
||||
if a.fileno == -1:
|
||||
raise ValueError('Arena is unpicklable because'
|
||||
'forking was enabled when it was created')
|
||||
return Arena, (a.size, a.fileno)
|
||||
|
||||
ForkingPickler.register(Arena, reduce_arena)
|
||||
|
||||
#
|
||||
# Class allowing allocation of chunks of memory from arenas
|
||||
#
|
||||
|
||||
|
||||
class Heap(object):
|
||||
|
||||
_alignment = 8
|
||||
|
||||
def __init__(self, size=mmap.PAGESIZE):
|
||||
self._lastpid = os.getpid()
|
||||
self._lock = threading.Lock()
|
||||
self._size = size
|
||||
self._lengths = []
|
||||
self._len_to_seq = {}
|
||||
self._start_to_block = {}
|
||||
self._stop_to_block = {}
|
||||
self._allocated_blocks = set()
|
||||
self._arenas = []
|
||||
# list of pending blocks to free - see free() comment below
|
||||
self._pending_free_blocks = []
|
||||
|
||||
@staticmethod
|
||||
def _roundup(n, alignment):
|
||||
# alignment must be a power of 2
|
||||
mask = alignment - 1
|
||||
return (n + mask) & ~mask
|
||||
|
||||
def _malloc(self, size):
|
||||
# returns a large enough block -- it might be much larger
|
||||
i = bisect.bisect_left(self._lengths, size)
|
||||
if i == len(self._lengths):
|
||||
length = self._roundup(max(self._size, size), mmap.PAGESIZE)
|
||||
self._size *= 2
|
||||
info('allocating a new mmap of length %d', length)
|
||||
arena = Arena(length)
|
||||
self._arenas.append(arena)
|
||||
return (arena, 0, length)
|
||||
else:
|
||||
length = self._lengths[i]
|
||||
seq = self._len_to_seq[length]
|
||||
block = seq.pop()
|
||||
if not seq:
|
||||
del self._len_to_seq[length], self._lengths[i]
|
||||
|
||||
(arena, start, stop) = block
|
||||
del self._start_to_block[(arena, start)]
|
||||
del self._stop_to_block[(arena, stop)]
|
||||
return block
|
||||
|
||||
def _free(self, block):
|
||||
# free location and try to merge with neighbours
|
||||
(arena, start, stop) = block
|
||||
|
||||
try:
|
||||
prev_block = self._stop_to_block[(arena, start)]
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
start, _ = self._absorb(prev_block)
|
||||
|
||||
try:
|
||||
next_block = self._start_to_block[(arena, stop)]
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
_, stop = self._absorb(next_block)
|
||||
|
||||
block = (arena, start, stop)
|
||||
length = stop - start
|
||||
|
||||
try:
|
||||
self._len_to_seq[length].append(block)
|
||||
except KeyError:
|
||||
self._len_to_seq[length] = [block]
|
||||
bisect.insort(self._lengths, length)
|
||||
|
||||
self._start_to_block[(arena, start)] = block
|
||||
self._stop_to_block[(arena, stop)] = block
|
||||
|
||||
def _absorb(self, block):
|
||||
# deregister this block so it can be merged with a neighbour
|
||||
(arena, start, stop) = block
|
||||
del self._start_to_block[(arena, start)]
|
||||
del self._stop_to_block[(arena, stop)]
|
||||
|
||||
length = stop - start
|
||||
seq = self._len_to_seq[length]
|
||||
seq.remove(block)
|
||||
if not seq:
|
||||
del self._len_to_seq[length]
|
||||
self._lengths.remove(length)
|
||||
|
||||
return start, stop
|
||||
|
||||
def _free_pending_blocks(self):
|
||||
# Free all the blocks in the pending list - called with the lock held
|
||||
while 1:
|
||||
try:
|
||||
block = self._pending_free_blocks.pop()
|
||||
except IndexError:
|
||||
break
|
||||
self._allocated_blocks.remove(block)
|
||||
self._free(block)
|
||||
|
||||
def free(self, block):
|
||||
# free a block returned by malloc()
|
||||
# Since free() can be called asynchronously by the GC, it could happen
|
||||
# that it's called while self._lock is held: in that case,
|
||||
# self._lock.acquire() would deadlock (issue #12352). To avoid that, a
|
||||
# trylock is used instead, and if the lock can't be acquired
|
||||
# immediately, the block is added to a list of blocks to be freed
|
||||
# synchronously sometimes later from malloc() or free(), by calling
|
||||
# _free_pending_blocks() (appending and retrieving from a list is not
|
||||
# strictly thread-safe but under cPython it's atomic thanks
|
||||
# to the GIL).
|
||||
assert os.getpid() == self._lastpid
|
||||
if not self._lock.acquire(False):
|
||||
# can't aquire the lock right now, add the block to the list of
|
||||
# pending blocks to free
|
||||
self._pending_free_blocks.append(block)
|
||||
else:
|
||||
# we hold the lock
|
||||
try:
|
||||
self._free_pending_blocks()
|
||||
self._allocated_blocks.remove(block)
|
||||
self._free(block)
|
||||
finally:
|
||||
self._lock.release()
|
||||
|
||||
def malloc(self, size):
|
||||
# return a block of right size (possibly rounded up)
|
||||
assert 0 <= size < maxsize
|
||||
if os.getpid() != self._lastpid:
|
||||
self.__init__() # reinitialize after fork
|
||||
self._lock.acquire()
|
||||
self._free_pending_blocks()
|
||||
try:
|
||||
size = self._roundup(max(size, 1), self._alignment)
|
||||
(arena, start, stop) = self._malloc(size)
|
||||
new_stop = start + size
|
||||
if new_stop < stop:
|
||||
self._free((arena, new_stop, stop))
|
||||
block = (arena, start, new_stop)
|
||||
self._allocated_blocks.add(block)
|
||||
return block
|
||||
finally:
|
||||
self._lock.release()
|
||||
|
||||
#
|
||||
# Class representing a chunk of an mmap -- can be inherited
|
||||
#
|
||||
|
||||
|
||||
class BufferWrapper(object):
|
||||
|
||||
_heap = Heap()
|
||||
|
||||
def __init__(self, size):
|
||||
assert 0 <= size < maxsize
|
||||
block = BufferWrapper._heap.malloc(size)
|
||||
self._state = (block, size)
|
||||
Finalize(self, BufferWrapper._heap.free, args=(block,))
|
||||
|
||||
def get_address(self):
|
||||
(arena, start, stop), size = self._state
|
||||
address, length = _billiard.address_of_buffer(arena.buffer)
|
||||
assert size <= length
|
||||
return address + start
|
||||
|
||||
def get_size(self):
|
||||
return self._state[1]
|
||||
1170
awx/lib/site-packages/billiard/managers.py
Normal file
1170
awx/lib/site-packages/billiard/managers.py
Normal file
File diff suppressed because it is too large
Load Diff
1670
awx/lib/site-packages/billiard/pool.py
Normal file
1670
awx/lib/site-packages/billiard/pool.py
Normal file
File diff suppressed because it is too large
Load Diff
330
awx/lib/site-packages/billiard/process.py
Normal file
330
awx/lib/site-packages/billiard/process.py
Normal file
@@ -0,0 +1,330 @@
|
||||
#
|
||||
# Module providing the `Process` class which emulates `threading.Thread`
|
||||
#
|
||||
# multiprocessing/process.py
|
||||
#
|
||||
# Copyright (c) 2006-2008, R Oudkerk
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
#
|
||||
from __future__ import absolute_import
|
||||
|
||||
__all__ = ['Process', 'current_process', 'active_children']
|
||||
|
||||
#
|
||||
# Imports
|
||||
#
|
||||
|
||||
import os
|
||||
import sys
|
||||
import signal
|
||||
import itertools
|
||||
import binascii
|
||||
|
||||
from .compat import bytes
|
||||
try:
|
||||
from _weakrefset import WeakSet
|
||||
except ImportError:
|
||||
WeakSet = None # noqa
|
||||
|
||||
try:
|
||||
ORIGINAL_DIR = os.path.abspath(os.getcwd())
|
||||
except OSError:
|
||||
ORIGINAL_DIR = None
|
||||
|
||||
#
|
||||
# Public functions
|
||||
#
|
||||
|
||||
|
||||
def current_process():
|
||||
'''
|
||||
Return process object representing the current process
|
||||
'''
|
||||
return _current_process
|
||||
|
||||
|
||||
def _cleanup():
|
||||
# check for processes which have finished
|
||||
for p in list(_current_process._children):
|
||||
if p._popen.poll() is not None:
|
||||
_current_process._children.discard(p)
|
||||
|
||||
|
||||
def active_children(_cleanup=_cleanup):
|
||||
'''
|
||||
Return list of process objects corresponding to live child processes
|
||||
'''
|
||||
try:
|
||||
_cleanup()
|
||||
except TypeError:
|
||||
# called after gc collect so _cleanup does not exist anymore
|
||||
return []
|
||||
return list(_current_process._children)
|
||||
|
||||
|
||||
class Process(object):
|
||||
'''
|
||||
Process objects represent activity that is run in a separate process
|
||||
|
||||
The class is analagous to `threading.Thread`
|
||||
'''
|
||||
_Popen = None
|
||||
|
||||
def __init__(self, group=None, target=None, name=None,
|
||||
args=(), kwargs={}, daemon=None, **_kw):
|
||||
assert group is None, 'group argument must be None for now'
|
||||
count = _current_process._counter.next()
|
||||
self._identity = _current_process._identity + (count,)
|
||||
self._authkey = _current_process._authkey
|
||||
if daemon is not None:
|
||||
self._daemonic = daemon
|
||||
else:
|
||||
self._daemonic = _current_process._daemonic
|
||||
self._tempdir = _current_process._tempdir
|
||||
self._semprefix = _current_process._semprefix
|
||||
self._unlinkfd = _current_process._unlinkfd
|
||||
self._parent_pid = os.getpid()
|
||||
self._popen = None
|
||||
self._target = target
|
||||
self._args = tuple(args)
|
||||
self._kwargs = dict(kwargs)
|
||||
self._name = (
|
||||
name or type(self).__name__ + '-' +
|
||||
':'.join(str(i) for i in self._identity)
|
||||
)
|
||||
if _dangling is not None:
|
||||
_dangling.add(self)
|
||||
|
||||
def run(self):
|
||||
'''
|
||||
Method to be run in sub-process; can be overridden in sub-class
|
||||
'''
|
||||
if self._target:
|
||||
self._target(*self._args, **self._kwargs)
|
||||
|
||||
def start(self):
|
||||
'''
|
||||
Start child process
|
||||
'''
|
||||
assert self._popen is None, 'cannot start a process twice'
|
||||
assert self._parent_pid == os.getpid(), \
|
||||
'can only start a process object created by current process'
|
||||
assert not _current_process._daemonic, \
|
||||
'daemonic processes are not allowed to have children'
|
||||
_cleanup()
|
||||
if self._Popen is not None:
|
||||
Popen = self._Popen
|
||||
else:
|
||||
from .forking import Popen
|
||||
self._popen = Popen(self)
|
||||
self._sentinel = self._popen.sentinel
|
||||
_current_process._children.add(self)
|
||||
|
||||
def terminate(self):
|
||||
'''
|
||||
Terminate process; sends SIGTERM signal or uses TerminateProcess()
|
||||
'''
|
||||
self._popen.terminate()
|
||||
|
||||
def join(self, timeout=None):
|
||||
'''
|
||||
Wait until child process terminates
|
||||
'''
|
||||
assert self._parent_pid == os.getpid(), 'can only join a child process'
|
||||
assert self._popen is not None, 'can only join a started process'
|
||||
res = self._popen.wait(timeout)
|
||||
if res is not None:
|
||||
_current_process._children.discard(self)
|
||||
|
||||
def is_alive(self):
|
||||
'''
|
||||
Return whether process is alive
|
||||
'''
|
||||
if self is _current_process:
|
||||
return True
|
||||
assert self._parent_pid == os.getpid(), 'can only test a child process'
|
||||
if self._popen is None:
|
||||
return False
|
||||
self._popen.poll()
|
||||
return self._popen.returncode is None
|
||||
|
||||
def _get_name(self):
|
||||
return self._name
|
||||
|
||||
def _set_name(self, value):
|
||||
assert isinstance(name, basestring), 'name must be a string'
|
||||
self._name = value
|
||||
name = property(_get_name, _set_name)
|
||||
|
||||
def _get_daemon(self):
|
||||
return self._daemonic
|
||||
|
||||
def _set_daemon(self, daemonic):
|
||||
assert self._popen is None, 'process has already started'
|
||||
self._daemonic = daemonic
|
||||
daemon = property(_get_daemon, _set_daemon)
|
||||
|
||||
def _get_authkey(self):
|
||||
return self._authkey
|
||||
|
||||
def _set_authkey(self, authkey):
|
||||
self._authkey = AuthenticationString(authkey)
|
||||
authkey = property(_get_authkey, _set_authkey)
|
||||
|
||||
@property
|
||||
def exitcode(self):
|
||||
'''
|
||||
Return exit code of process or `None` if it has yet to stop
|
||||
'''
|
||||
if self._popen is None:
|
||||
return self._popen
|
||||
return self._popen.poll()
|
||||
|
||||
@property
|
||||
def ident(self):
|
||||
'''
|
||||
Return identifier (PID) of process or `None` if it has yet to start
|
||||
'''
|
||||
if self is _current_process:
|
||||
return os.getpid()
|
||||
else:
|
||||
return self._popen and self._popen.pid
|
||||
|
||||
pid = ident
|
||||
|
||||
@property
|
||||
def sentinel(self):
|
||||
'''
|
||||
Return a file descriptor (Unix) or handle (Windows) suitable for
|
||||
waiting for process termination.
|
||||
'''
|
||||
try:
|
||||
return self._sentinel
|
||||
except AttributeError:
|
||||
raise ValueError("process not started")
|
||||
|
||||
def __repr__(self):
|
||||
if self is _current_process:
|
||||
status = 'started'
|
||||
elif self._parent_pid != os.getpid():
|
||||
status = 'unknown'
|
||||
elif self._popen is None:
|
||||
status = 'initial'
|
||||
else:
|
||||
if self._popen.poll() is not None:
|
||||
status = self.exitcode
|
||||
else:
|
||||
status = 'started'
|
||||
|
||||
if type(status) is int:
|
||||
if status == 0:
|
||||
status = 'stopped'
|
||||
else:
|
||||
status = 'stopped[%s]' % _exitcode_to_name.get(status, status)
|
||||
|
||||
return '<%s(%s, %s%s)>' % (type(self).__name__, self._name,
|
||||
status, self._daemonic and ' daemon' or '')
|
||||
|
||||
##
|
||||
|
||||
def _bootstrap(self):
|
||||
from . import util
|
||||
global _current_process
|
||||
|
||||
try:
|
||||
self._children = set()
|
||||
self._counter = itertools.count(1)
|
||||
if sys.stdin is not None:
|
||||
try:
|
||||
sys.stdin.close()
|
||||
sys.stdin = open(os.devnull)
|
||||
except (OSError, ValueError):
|
||||
pass
|
||||
old_process = _current_process
|
||||
_current_process = self
|
||||
try:
|
||||
util._finalizer_registry.clear()
|
||||
util._run_after_forkers()
|
||||
finally:
|
||||
# delay finalization of the old process object until after
|
||||
# _run_after_forkers() is executed
|
||||
del old_process
|
||||
util.info('child process %s calling self.run()', self.pid)
|
||||
try:
|
||||
self.run()
|
||||
exitcode = 0
|
||||
finally:
|
||||
util._exit_function()
|
||||
except SystemExit, e:
|
||||
if not e.args:
|
||||
exitcode = 1
|
||||
elif isinstance(e.args[0], int):
|
||||
exitcode = e.args[0]
|
||||
else:
|
||||
sys.stderr.write(str(e.args[0]) + '\n')
|
||||
sys.stderr.flush()
|
||||
exitcode = 0 if isinstance(e.args[0], str) else 1
|
||||
except:
|
||||
exitcode = 1
|
||||
if not util.error('Process %s', self.name, exc_info=True):
|
||||
import traceback
|
||||
sys.stderr.write('Process %s:\n' % self.name)
|
||||
traceback.print_exc()
|
||||
finally:
|
||||
util.info('process %s exiting with exitcode %d',
|
||||
self.pid, exitcode)
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
return exitcode
|
||||
|
||||
#
|
||||
# We subclass bytes to avoid accidental transmission of auth keys over network
|
||||
#
|
||||
|
||||
|
||||
class AuthenticationString(bytes):
|
||||
|
||||
def __reduce__(self):
|
||||
from .forking import Popen
|
||||
|
||||
if not Popen.thread_is_spawning():
|
||||
raise TypeError(
|
||||
'Pickling an AuthenticationString object is '
|
||||
'disallowed for security reasons')
|
||||
return AuthenticationString, (bytes(self),)
|
||||
|
||||
#
|
||||
# Create object representing the main process
|
||||
#
|
||||
|
||||
|
||||
class _MainProcess(Process):
|
||||
|
||||
def __init__(self):
|
||||
self._identity = ()
|
||||
self._daemonic = False
|
||||
self._name = 'MainProcess'
|
||||
self._parent_pid = None
|
||||
self._popen = None
|
||||
self._counter = itertools.count(1)
|
||||
self._children = set()
|
||||
self._authkey = AuthenticationString(os.urandom(32))
|
||||
self._tempdir = None
|
||||
self._semprefix = 'mp-' + binascii.hexlify(
|
||||
os.urandom(4)).decode('ascii')
|
||||
self._unlinkfd = None
|
||||
|
||||
_current_process = _MainProcess()
|
||||
del _MainProcess
|
||||
|
||||
#
|
||||
# Give names to some return codes
|
||||
#
|
||||
|
||||
_exitcode_to_name = {}
|
||||
|
||||
for name, signum in signal.__dict__.items():
|
||||
if name[:3] == 'SIG' and '_' not in name:
|
||||
_exitcode_to_name[-signum] = name
|
||||
|
||||
_dangling = WeakSet() if WeakSet is not None else None
|
||||
354
awx/lib/site-packages/billiard/queues.py
Normal file
354
awx/lib/site-packages/billiard/queues.py
Normal file
@@ -0,0 +1,354 @@
|
||||
#
|
||||
# Module implementing queues
|
||||
#
|
||||
# multiprocessing/queues.py
|
||||
#
|
||||
# Copyright (c) 2006-2008, R Oudkerk
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
#
|
||||
from __future__ import absolute_import
|
||||
from __future__ import with_statement
|
||||
|
||||
__all__ = ['Queue', 'SimpleQueue', 'JoinableQueue']
|
||||
|
||||
import sys
|
||||
import os
|
||||
import threading
|
||||
import collections
|
||||
import time
|
||||
import weakref
|
||||
import errno
|
||||
|
||||
from Queue import Empty, Full
|
||||
|
||||
from . import Pipe
|
||||
from ._ext import _billiard
|
||||
from .compat import get_errno
|
||||
from .synchronize import Lock, BoundedSemaphore, Semaphore, Condition
|
||||
from .util import debug, error, info, Finalize, register_after_fork
|
||||
from .forking import assert_spawning
|
||||
|
||||
|
||||
class Queue(object):
|
||||
'''
|
||||
Queue type using a pipe, buffer and thread
|
||||
'''
|
||||
def __init__(self, maxsize=0):
|
||||
if maxsize <= 0:
|
||||
maxsize = _billiard.SemLock.SEM_VALUE_MAX
|
||||
self._maxsize = maxsize
|
||||
self._reader, self._writer = Pipe(duplex=False)
|
||||
self._rlock = Lock()
|
||||
self._opid = os.getpid()
|
||||
if sys.platform == 'win32':
|
||||
self._wlock = None
|
||||
else:
|
||||
self._wlock = Lock()
|
||||
self._sem = BoundedSemaphore(maxsize)
|
||||
# For use by concurrent.futures
|
||||
self._ignore_epipe = False
|
||||
|
||||
self._after_fork()
|
||||
|
||||
if sys.platform != 'win32':
|
||||
register_after_fork(self, Queue._after_fork)
|
||||
|
||||
def __getstate__(self):
|
||||
assert_spawning(self)
|
||||
return (self._ignore_epipe, self._maxsize, self._reader, self._writer,
|
||||
self._rlock, self._wlock, self._sem, self._opid)
|
||||
|
||||
def __setstate__(self, state):
|
||||
(self._ignore_epipe, self._maxsize, self._reader, self._writer,
|
||||
self._rlock, self._wlock, self._sem, self._opid) = state
|
||||
self._after_fork()
|
||||
|
||||
def _after_fork(self):
|
||||
debug('Queue._after_fork()')
|
||||
self._notempty = threading.Condition(threading.Lock())
|
||||
self._buffer = collections.deque()
|
||||
self._thread = None
|
||||
self._jointhread = None
|
||||
self._joincancelled = False
|
||||
self._closed = False
|
||||
self._close = None
|
||||
self._send = self._writer.send
|
||||
self._recv = self._reader.recv
|
||||
self._poll = self._reader.poll
|
||||
|
||||
def put(self, obj, block=True, timeout=None):
|
||||
assert not self._closed
|
||||
if not self._sem.acquire(block, timeout):
|
||||
raise Full
|
||||
|
||||
with self._notempty:
|
||||
if self._thread is None:
|
||||
self._start_thread()
|
||||
self._buffer.append(obj)
|
||||
self._notempty.notify()
|
||||
|
||||
def get(self, block=True, timeout=None):
|
||||
if block and timeout is None:
|
||||
with self._rlock:
|
||||
res = self._recv()
|
||||
self._sem.release()
|
||||
return res
|
||||
|
||||
else:
|
||||
if block:
|
||||
deadline = time.time() + timeout
|
||||
if not self._rlock.acquire(block, timeout):
|
||||
raise Empty
|
||||
try:
|
||||
if block:
|
||||
timeout = deadline - time.time()
|
||||
if timeout < 0 or not self._poll(timeout):
|
||||
raise Empty
|
||||
elif not self._poll():
|
||||
raise Empty
|
||||
res = self._recv()
|
||||
self._sem.release()
|
||||
return res
|
||||
finally:
|
||||
self._rlock.release()
|
||||
|
||||
def qsize(self):
|
||||
# Raises NotImplementedError on Mac OSX because
|
||||
# of broken sem_getvalue()
|
||||
return self._maxsize - self._sem._semlock._get_value()
|
||||
|
||||
def empty(self):
|
||||
return not self._poll()
|
||||
|
||||
def full(self):
|
||||
return self._sem._semlock._is_zero()
|
||||
|
||||
def get_nowait(self):
|
||||
return self.get(False)
|
||||
|
||||
def put_nowait(self, obj):
|
||||
return self.put(obj, False)
|
||||
|
||||
def close(self):
|
||||
self._closed = True
|
||||
self._reader.close()
|
||||
if self._close:
|
||||
self._close()
|
||||
|
||||
def join_thread(self):
|
||||
debug('Queue.join_thread()')
|
||||
assert self._closed
|
||||
if self._jointhread:
|
||||
self._jointhread()
|
||||
|
||||
def cancel_join_thread(self):
|
||||
debug('Queue.cancel_join_thread()')
|
||||
self._joincancelled = True
|
||||
try:
|
||||
self._jointhread.cancel()
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
def _start_thread(self):
|
||||
debug('Queue._start_thread()')
|
||||
|
||||
# Start thread which transfers data from buffer to pipe
|
||||
self._buffer.clear()
|
||||
self._thread = threading.Thread(
|
||||
target=Queue._feed,
|
||||
args=(self._buffer, self._notempty, self._send,
|
||||
self._wlock, self._writer.close, self._ignore_epipe),
|
||||
name='QueueFeederThread'
|
||||
)
|
||||
self._thread.daemon = True
|
||||
|
||||
debug('doing self._thread.start()')
|
||||
self._thread.start()
|
||||
debug('... done self._thread.start()')
|
||||
|
||||
# On process exit we will wait for data to be flushed to pipe.
|
||||
#
|
||||
# However, if this process created the queue then all
|
||||
# processes which use the queue will be descendants of this
|
||||
# process. Therefore waiting for the queue to be flushed
|
||||
# is pointless once all the child processes have been joined.
|
||||
created_by_this_process = (self._opid == os.getpid())
|
||||
if not self._joincancelled and not created_by_this_process:
|
||||
self._jointhread = Finalize(
|
||||
self._thread, Queue._finalize_join,
|
||||
[weakref.ref(self._thread)],
|
||||
exitpriority=-5
|
||||
)
|
||||
|
||||
# Send sentinel to the thread queue object when garbage collected
|
||||
self._close = Finalize(
|
||||
self, Queue._finalize_close,
|
||||
[self._buffer, self._notempty],
|
||||
exitpriority=10
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _finalize_join(twr):
|
||||
debug('joining queue thread')
|
||||
thread = twr()
|
||||
if thread is not None:
|
||||
thread.join()
|
||||
debug('... queue thread joined')
|
||||
else:
|
||||
debug('... queue thread already dead')
|
||||
|
||||
@staticmethod
|
||||
def _finalize_close(buffer, notempty):
|
||||
debug('telling queue thread to quit')
|
||||
with notempty:
|
||||
buffer.append(_sentinel)
|
||||
notempty.notify()
|
||||
|
||||
@staticmethod
|
||||
def _feed(buffer, notempty, send, writelock, close, ignore_epipe):
|
||||
debug('starting thread to feed data to pipe')
|
||||
from .util import is_exiting
|
||||
|
||||
ncond = notempty
|
||||
nwait = notempty.wait
|
||||
bpopleft = buffer.popleft
|
||||
sentinel = _sentinel
|
||||
if sys.platform != 'win32':
|
||||
wlock = writelock
|
||||
else:
|
||||
wlock = None
|
||||
|
||||
try:
|
||||
while 1:
|
||||
with ncond:
|
||||
if not buffer:
|
||||
nwait()
|
||||
try:
|
||||
while 1:
|
||||
obj = bpopleft()
|
||||
if obj is sentinel:
|
||||
debug('feeder thread got sentinel -- exiting')
|
||||
close()
|
||||
return
|
||||
|
||||
if wlock is None:
|
||||
send(obj)
|
||||
else:
|
||||
with wlock:
|
||||
send(obj)
|
||||
except IndexError:
|
||||
pass
|
||||
except Exception, exc:
|
||||
if ignore_epipe and get_errno(exc) == errno.EPIPE:
|
||||
return
|
||||
# Since this runs in a daemon thread the resources it uses
|
||||
# may be become unusable while the process is cleaning up.
|
||||
# We ignore errors which happen after the process has
|
||||
# started to cleanup.
|
||||
try:
|
||||
if is_exiting():
|
||||
info('error in queue thread: %r', exc, exc_info=True)
|
||||
else:
|
||||
if not error('error in queue thread: %r', exc,
|
||||
exc_info=True):
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
_sentinel = object()
|
||||
|
||||
|
||||
class JoinableQueue(Queue):
|
||||
'''
|
||||
A queue type which also supports join() and task_done() methods
|
||||
|
||||
Note that if you do not call task_done() for each finished task then
|
||||
eventually the counter's semaphore may overflow causing Bad Things
|
||||
to happen.
|
||||
'''
|
||||
|
||||
def __init__(self, maxsize=0):
|
||||
Queue.__init__(self, maxsize)
|
||||
self._unfinished_tasks = Semaphore(0)
|
||||
self._cond = Condition()
|
||||
|
||||
def __getstate__(self):
|
||||
return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks)
|
||||
|
||||
def __setstate__(self, state):
|
||||
Queue.__setstate__(self, state[:-2])
|
||||
self._cond, self._unfinished_tasks = state[-2:]
|
||||
|
||||
def put(self, obj, block=True, timeout=None):
|
||||
assert not self._closed
|
||||
if not self._sem.acquire(block, timeout):
|
||||
raise Full
|
||||
|
||||
with self._notempty:
|
||||
with self._cond:
|
||||
if self._thread is None:
|
||||
self._start_thread()
|
||||
self._buffer.append(obj)
|
||||
self._unfinished_tasks.release()
|
||||
self._notempty.notify()
|
||||
|
||||
def task_done(self):
|
||||
with self._cond:
|
||||
if not self._unfinished_tasks.acquire(False):
|
||||
raise ValueError('task_done() called too many times')
|
||||
if self._unfinished_tasks._semlock._is_zero():
|
||||
self._cond.notify_all()
|
||||
|
||||
def join(self):
|
||||
with self._cond:
|
||||
if not self._unfinished_tasks._semlock._is_zero():
|
||||
self._cond.wait()
|
||||
|
||||
|
||||
class SimpleQueue(object):
|
||||
'''
|
||||
Simplified Queue type -- really just a locked pipe
|
||||
'''
|
||||
|
||||
def __init__(self):
|
||||
self._reader, self._writer = Pipe(duplex=False)
|
||||
self._rlock = Lock()
|
||||
self._poll = self._reader.poll
|
||||
if sys.platform == 'win32':
|
||||
self._wlock = None
|
||||
else:
|
||||
self._wlock = Lock()
|
||||
self._make_methods()
|
||||
|
||||
def empty(self):
|
||||
return not self._poll()
|
||||
|
||||
def __getstate__(self):
|
||||
assert_spawning(self)
|
||||
return (self._reader, self._writer, self._rlock, self._wlock)
|
||||
|
||||
def __setstate__(self, state):
|
||||
(self._reader, self._writer, self._rlock, self._wlock) = state
|
||||
self._make_methods()
|
||||
|
||||
def _make_methods(self):
|
||||
recv = self._reader.recv
|
||||
rlock = self._rlock
|
||||
|
||||
def get():
|
||||
with rlock:
|
||||
return recv()
|
||||
self.get = get
|
||||
|
||||
if self._wlock is None:
|
||||
# writes to a message oriented win32 pipe are atomic
|
||||
self.put = self._writer.send
|
||||
else:
|
||||
send = self._writer.send
|
||||
wlock = self._wlock
|
||||
|
||||
def put(obj):
|
||||
with wlock:
|
||||
return send(obj)
|
||||
self.put = put
|
||||
200
awx/lib/site-packages/billiard/reduction.py
Normal file
200
awx/lib/site-packages/billiard/reduction.py
Normal file
@@ -0,0 +1,200 @@
|
||||
#
|
||||
# Module to allow connection and socket objects to be transferred
|
||||
# between processes
|
||||
#
|
||||
# multiprocessing/reduction.py
|
||||
#
|
||||
# Copyright (c) 2006-2008, R Oudkerk
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
#
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
__all__ = []
|
||||
|
||||
import os
|
||||
import sys
|
||||
import socket
|
||||
import threading
|
||||
|
||||
if sys.version_info[0] == 3:
|
||||
from multiprocessing.connection import Client, Listener
|
||||
else:
|
||||
from billiard._connection import Client, Listener # noqa
|
||||
|
||||
from . import current_process
|
||||
from ._ext import _billiard, win32
|
||||
from .forking import Popen, duplicate, close, ForkingPickler
|
||||
from .util import register_after_fork, debug, sub_debug
|
||||
|
||||
if not(sys.platform == 'win32' or hasattr(_billiard, 'recvfd')):
|
||||
raise ImportError('pickling of connections not supported')
|
||||
|
||||
|
||||
# globals set later
|
||||
_listener = None
|
||||
_lock = None
|
||||
_cache = set()
|
||||
|
||||
#
|
||||
# Platform specific definitions
|
||||
#
|
||||
|
||||
if sys.platform == 'win32':
|
||||
# XXX Should this subprocess import be here?
|
||||
import _subprocess # noqa
|
||||
|
||||
def send_handle(conn, handle, destination_pid):
|
||||
process_handle = win32.OpenProcess(
|
||||
win32.PROCESS_ALL_ACCESS, False, destination_pid
|
||||
)
|
||||
try:
|
||||
new_handle = duplicate(handle, process_handle)
|
||||
conn.send(new_handle)
|
||||
finally:
|
||||
close(process_handle)
|
||||
|
||||
def recv_handle(conn):
|
||||
return conn.recv()
|
||||
|
||||
else:
|
||||
def send_handle(conn, handle, destination_pid): # noqa
|
||||
_billiard.sendfd(conn.fileno(), handle)
|
||||
|
||||
def recv_handle(conn): # noqa
|
||||
return _billiard.recvfd(conn.fileno())
|
||||
|
||||
#
|
||||
# Support for a per-process server thread which caches pickled handles
|
||||
#
|
||||
|
||||
|
||||
def _reset(obj):
|
||||
global _lock, _listener, _cache
|
||||
for h in _cache:
|
||||
close(h)
|
||||
_cache.clear()
|
||||
_lock = threading.Lock()
|
||||
_listener = None
|
||||
|
||||
_reset(None)
|
||||
register_after_fork(_reset, _reset)
|
||||
|
||||
|
||||
def _get_listener():
|
||||
global _listener
|
||||
|
||||
if _listener is None:
|
||||
_lock.acquire()
|
||||
try:
|
||||
if _listener is None:
|
||||
debug('starting listener and thread for sending handles')
|
||||
_listener = Listener(authkey=current_process().authkey)
|
||||
t = threading.Thread(target=_serve)
|
||||
t.daemon = True
|
||||
t.start()
|
||||
finally:
|
||||
_lock.release()
|
||||
|
||||
return _listener
|
||||
|
||||
|
||||
def _serve():
|
||||
from .util import is_exiting, sub_warning
|
||||
|
||||
while 1:
|
||||
try:
|
||||
conn = _listener.accept()
|
||||
handle_wanted, destination_pid = conn.recv()
|
||||
_cache.remove(handle_wanted)
|
||||
send_handle(conn, handle_wanted, destination_pid)
|
||||
close(handle_wanted)
|
||||
conn.close()
|
||||
except:
|
||||
if not is_exiting():
|
||||
sub_warning('thread for sharing handles raised exception',
|
||||
exc_info=True)
|
||||
|
||||
#
|
||||
# Functions to be used for pickling/unpickling objects with handles
|
||||
#
|
||||
|
||||
|
||||
def reduce_handle(handle):
|
||||
if Popen.thread_is_spawning():
|
||||
return (None, Popen.duplicate_for_child(handle), True)
|
||||
dup_handle = duplicate(handle)
|
||||
_cache.add(dup_handle)
|
||||
sub_debug('reducing handle %d', handle)
|
||||
return (_get_listener().address, dup_handle, False)
|
||||
|
||||
|
||||
def rebuild_handle(pickled_data):
|
||||
address, handle, inherited = pickled_data
|
||||
if inherited:
|
||||
return handle
|
||||
sub_debug('rebuilding handle %d', handle)
|
||||
conn = Client(address, authkey=current_process().authkey)
|
||||
conn.send((handle, os.getpid()))
|
||||
new_handle = recv_handle(conn)
|
||||
conn.close()
|
||||
return new_handle
|
||||
|
||||
#
|
||||
# Register `_billiard.Connection` with `ForkingPickler`
|
||||
#
|
||||
|
||||
|
||||
def reduce_connection(conn):
|
||||
rh = reduce_handle(conn.fileno())
|
||||
return rebuild_connection, (rh, conn.readable, conn.writable)
|
||||
|
||||
|
||||
def rebuild_connection(reduced_handle, readable, writable):
|
||||
handle = rebuild_handle(reduced_handle)
|
||||
return _billiard.Connection(
|
||||
handle, readable=readable, writable=writable
|
||||
)
|
||||
|
||||
ForkingPickler.register(_billiard.Connection, reduce_connection)
|
||||
|
||||
#
|
||||
# Register `socket.socket` with `ForkingPickler`
|
||||
#
|
||||
|
||||
|
||||
def fromfd(fd, family, type_, proto=0):
|
||||
s = socket.fromfd(fd, family, type_, proto)
|
||||
if s.__class__ is not socket.socket:
|
||||
s = socket.socket(_sock=s)
|
||||
return s
|
||||
|
||||
|
||||
def reduce_socket(s):
|
||||
reduced_handle = reduce_handle(s.fileno())
|
||||
return rebuild_socket, (reduced_handle, s.family, s.type, s.proto)
|
||||
|
||||
|
||||
def rebuild_socket(reduced_handle, family, type_, proto):
|
||||
fd = rebuild_handle(reduced_handle)
|
||||
_sock = fromfd(fd, family, type_, proto)
|
||||
close(fd)
|
||||
return _sock
|
||||
ForkingPickler.register(socket.socket, reduce_socket)
|
||||
|
||||
#
|
||||
# Register `_billiard.PipeConnection` with `ForkingPickler`
|
||||
#
|
||||
|
||||
if sys.platform == 'win32':
|
||||
|
||||
def reduce_pipe_connection(conn):
|
||||
rh = reduce_handle(conn.fileno())
|
||||
return rebuild_pipe_connection, (rh, conn.readable, conn.writable)
|
||||
|
||||
def rebuild_pipe_connection(reduced_handle, readable, writable):
|
||||
handle = rebuild_handle(reduced_handle)
|
||||
return _billiard.PipeConnection(
|
||||
handle, readable=readable, writable=writable
|
||||
)
|
||||
ForkingPickler.register(_billiard.PipeConnection, reduce_pipe_connection)
|
||||
244
awx/lib/site-packages/billiard/sharedctypes.py
Normal file
244
awx/lib/site-packages/billiard/sharedctypes.py
Normal file
@@ -0,0 +1,244 @@
|
||||
#
|
||||
# Module which supports allocation of ctypes objects from shared memory
|
||||
#
|
||||
# multiprocessing/sharedctypes.py
|
||||
#
|
||||
# Copyright (c) 2006-2008, R Oudkerk
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
#
|
||||
from __future__ import absolute_import
|
||||
|
||||
import ctypes
|
||||
import weakref
|
||||
|
||||
from . import heap, RLock
|
||||
from .forking import assert_spawning, ForkingPickler
|
||||
|
||||
__all__ = ['RawValue', 'RawArray', 'Value', 'Array', 'copy', 'synchronized']
|
||||
|
||||
typecode_to_type = {
|
||||
'c': ctypes.c_char, 'u': ctypes.c_wchar,
|
||||
'b': ctypes.c_byte, 'B': ctypes.c_ubyte,
|
||||
'h': ctypes.c_short, 'H': ctypes.c_ushort,
|
||||
'i': ctypes.c_int, 'I': ctypes.c_uint,
|
||||
'l': ctypes.c_long, 'L': ctypes.c_ulong,
|
||||
'f': ctypes.c_float, 'd': ctypes.c_double
|
||||
}
|
||||
|
||||
|
||||
def _new_value(type_):
|
||||
size = ctypes.sizeof(type_)
|
||||
wrapper = heap.BufferWrapper(size)
|
||||
return rebuild_ctype(type_, wrapper, None)
|
||||
|
||||
|
||||
def RawValue(typecode_or_type, *args):
|
||||
'''
|
||||
Returns a ctypes object allocated from shared memory
|
||||
'''
|
||||
type_ = typecode_to_type.get(typecode_or_type, typecode_or_type)
|
||||
obj = _new_value(type_)
|
||||
ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj))
|
||||
obj.__init__(*args)
|
||||
return obj
|
||||
|
||||
|
||||
def RawArray(typecode_or_type, size_or_initializer):
|
||||
'''
|
||||
Returns a ctypes array allocated from shared memory
|
||||
'''
|
||||
type_ = typecode_to_type.get(typecode_or_type, typecode_or_type)
|
||||
if isinstance(size_or_initializer, (int, long)):
|
||||
type_ = type_ * size_or_initializer
|
||||
obj = _new_value(type_)
|
||||
ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj))
|
||||
return obj
|
||||
else:
|
||||
type_ = type_ * len(size_or_initializer)
|
||||
result = _new_value(type_)
|
||||
result.__init__(*size_or_initializer)
|
||||
return result
|
||||
|
||||
|
||||
def Value(typecode_or_type, *args, **kwds):
|
||||
'''
|
||||
Return a synchronization wrapper for a Value
|
||||
'''
|
||||
lock = kwds.pop('lock', None)
|
||||
if kwds:
|
||||
raise ValueError('unrecognized keyword argument(s): %s' % kwds.keys())
|
||||
obj = RawValue(typecode_or_type, *args)
|
||||
if lock is False:
|
||||
return obj
|
||||
if lock in (True, None):
|
||||
lock = RLock()
|
||||
if not hasattr(lock, 'acquire'):
|
||||
raise AttributeError("'%r' has no method 'acquire'" % lock)
|
||||
return synchronized(obj, lock)
|
||||
|
||||
|
||||
def Array(typecode_or_type, size_or_initializer, **kwds):
|
||||
'''
|
||||
Return a synchronization wrapper for a RawArray
|
||||
'''
|
||||
lock = kwds.pop('lock', None)
|
||||
if kwds:
|
||||
raise ValueError('unrecognized keyword argument(s): %s' % kwds.keys())
|
||||
obj = RawArray(typecode_or_type, size_or_initializer)
|
||||
if lock is False:
|
||||
return obj
|
||||
if lock in (True, None):
|
||||
lock = RLock()
|
||||
if not hasattr(lock, 'acquire'):
|
||||
raise AttributeError("'%r' has no method 'acquire'" % lock)
|
||||
return synchronized(obj, lock)
|
||||
|
||||
|
||||
def copy(obj):
|
||||
new_obj = _new_value(type(obj))
|
||||
ctypes.pointer(new_obj)[0] = obj
|
||||
return new_obj
|
||||
|
||||
|
||||
def synchronized(obj, lock=None):
|
||||
assert not isinstance(obj, SynchronizedBase), 'object already synchronized'
|
||||
|
||||
if isinstance(obj, ctypes._SimpleCData):
|
||||
return Synchronized(obj, lock)
|
||||
elif isinstance(obj, ctypes.Array):
|
||||
if obj._type_ is ctypes.c_char:
|
||||
return SynchronizedString(obj, lock)
|
||||
return SynchronizedArray(obj, lock)
|
||||
else:
|
||||
cls = type(obj)
|
||||
try:
|
||||
scls = class_cache[cls]
|
||||
except KeyError:
|
||||
names = [field[0] for field in cls._fields_]
|
||||
d = dict((name, make_property(name)) for name in names)
|
||||
classname = 'Synchronized' + cls.__name__
|
||||
scls = class_cache[cls] = type(classname, (SynchronizedBase,), d)
|
||||
return scls(obj, lock)
|
||||
|
||||
#
|
||||
# Functions for pickling/unpickling
|
||||
#
|
||||
|
||||
|
||||
def reduce_ctype(obj):
|
||||
assert_spawning(obj)
|
||||
if isinstance(obj, ctypes.Array):
|
||||
return rebuild_ctype, (obj._type_, obj._wrapper, obj._length_)
|
||||
else:
|
||||
return rebuild_ctype, (type(obj), obj._wrapper, None)
|
||||
|
||||
|
||||
def rebuild_ctype(type_, wrapper, length):
|
||||
if length is not None:
|
||||
type_ = type_ * length
|
||||
ForkingPickler.register(type_, reduce_ctype)
|
||||
obj = type_.from_address(wrapper.get_address())
|
||||
obj._wrapper = wrapper
|
||||
return obj
|
||||
|
||||
#
|
||||
# Function to create properties
|
||||
#
|
||||
|
||||
|
||||
def make_property(name):
|
||||
try:
|
||||
return prop_cache[name]
|
||||
except KeyError:
|
||||
d = {}
|
||||
exec(template % ((name, ) * 7), d)
|
||||
prop_cache[name] = d[name]
|
||||
return d[name]
|
||||
|
||||
template = '''
|
||||
def get%s(self):
|
||||
self.acquire()
|
||||
try:
|
||||
return self._obj.%s
|
||||
finally:
|
||||
self.release()
|
||||
def set%s(self, value):
|
||||
self.acquire()
|
||||
try:
|
||||
self._obj.%s = value
|
||||
finally:
|
||||
self.release()
|
||||
%s = property(get%s, set%s)
|
||||
'''
|
||||
|
||||
prop_cache = {}
|
||||
class_cache = weakref.WeakKeyDictionary()
|
||||
|
||||
#
|
||||
# Synchronized wrappers
|
||||
#
|
||||
|
||||
|
||||
class SynchronizedBase(object):
|
||||
|
||||
def __init__(self, obj, lock=None):
|
||||
self._obj = obj
|
||||
self._lock = lock or RLock()
|
||||
self.acquire = self._lock.acquire
|
||||
self.release = self._lock.release
|
||||
|
||||
def __reduce__(self):
|
||||
assert_spawning(self)
|
||||
return synchronized, (self._obj, self._lock)
|
||||
|
||||
def get_obj(self):
|
||||
return self._obj
|
||||
|
||||
def get_lock(self):
|
||||
return self._lock
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s wrapper for %s>' % (type(self).__name__, self._obj)
|
||||
|
||||
|
||||
class Synchronized(SynchronizedBase):
|
||||
value = make_property('value')
|
||||
|
||||
|
||||
class SynchronizedArray(SynchronizedBase):
|
||||
|
||||
def __len__(self):
|
||||
return len(self._obj)
|
||||
|
||||
def __getitem__(self, i):
|
||||
self.acquire()
|
||||
try:
|
||||
return self._obj[i]
|
||||
finally:
|
||||
self.release()
|
||||
|
||||
def __setitem__(self, i, value):
|
||||
self.acquire()
|
||||
try:
|
||||
self._obj[i] = value
|
||||
finally:
|
||||
self.release()
|
||||
|
||||
def __getslice__(self, start, stop):
|
||||
self.acquire()
|
||||
try:
|
||||
return self._obj[start:stop]
|
||||
finally:
|
||||
self.release()
|
||||
|
||||
def __setslice__(self, start, stop, values):
|
||||
self.acquire()
|
||||
try:
|
||||
self._obj[start:stop] = values
|
||||
finally:
|
||||
self.release()
|
||||
|
||||
|
||||
class SynchronizedString(SynchronizedArray):
|
||||
value = make_property('value')
|
||||
raw = make_property('raw')
|
||||
446
awx/lib/site-packages/billiard/synchronize.py
Normal file
446
awx/lib/site-packages/billiard/synchronize.py
Normal file
@@ -0,0 +1,446 @@
|
||||
#
|
||||
# Module implementing synchronization primitives
|
||||
#
|
||||
# multiprocessing/synchronize.py
|
||||
#
|
||||
# Copyright (c) 2006-2008, R Oudkerk
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
#
|
||||
from __future__ import absolute_import
|
||||
|
||||
__all__ = [
|
||||
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event',
|
||||
]
|
||||
|
||||
import itertools
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
import threading
|
||||
|
||||
|
||||
from time import time as _time
|
||||
|
||||
from ._ext import _billiard, ensure_SemLock
|
||||
from .process import current_process
|
||||
from .util import Finalize, register_after_fork, debug
|
||||
from .forking import assert_spawning, Popen
|
||||
from .compat import bytes, closerange
|
||||
|
||||
# Try to import the mp.synchronize module cleanly, if it fails
|
||||
# raise ImportError for platforms lacking a working sem_open implementation.
|
||||
# See issue 3770
|
||||
ensure_SemLock()
|
||||
|
||||
#
|
||||
# Constants
|
||||
#
|
||||
|
||||
RECURSIVE_MUTEX, SEMAPHORE = range(2)
|
||||
SEM_VALUE_MAX = _billiard.SemLock.SEM_VALUE_MAX
|
||||
|
||||
try:
|
||||
sem_unlink = _billiard.SemLock.sem_unlink
|
||||
except AttributeError:
|
||||
sem_unlink = None
|
||||
|
||||
#
|
||||
# Base class for semaphores and mutexes; wraps `_billiard.SemLock`
|
||||
#
|
||||
|
||||
|
||||
def _semname(sl):
|
||||
try:
|
||||
return sl.name
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
|
||||
class SemLock(object):
|
||||
_counter = itertools.count()
|
||||
|
||||
def __init__(self, kind, value, maxvalue):
|
||||
from .forking import _forking_is_enabled
|
||||
unlink_immediately = _forking_is_enabled or sys.platform == 'win32'
|
||||
if sem_unlink:
|
||||
sl = self._semlock = _billiard.SemLock(
|
||||
kind, value, maxvalue, self._make_name(), unlink_immediately)
|
||||
else:
|
||||
sl = self._semlock = _billiard.SemLock(kind, value, maxvalue)
|
||||
|
||||
debug('created semlock with handle %s', sl.handle)
|
||||
self._make_methods()
|
||||
|
||||
if sem_unlink:
|
||||
|
||||
if sys.platform != 'win32':
|
||||
def _after_fork(obj):
|
||||
obj._semlock._after_fork()
|
||||
register_after_fork(self, _after_fork)
|
||||
|
||||
if _semname(self._semlock) is not None:
|
||||
# We only get here if we are on Unix with forking
|
||||
# disabled. When the object is garbage collected or the
|
||||
# process shuts down we unlink the semaphore name
|
||||
Finalize(self, sem_unlink, (self._semlock.name,),
|
||||
exitpriority=0)
|
||||
# In case of abnormal termination unlink semaphore name
|
||||
_cleanup_semaphore_if_leaked(self._semlock.name)
|
||||
|
||||
def _make_methods(self):
|
||||
self.acquire = self._semlock.acquire
|
||||
self.release = self._semlock.release
|
||||
|
||||
def __enter__(self):
|
||||
return self._semlock.__enter__()
|
||||
|
||||
def __exit__(self, *args):
|
||||
return self._semlock.__exit__(*args)
|
||||
|
||||
def __getstate__(self):
|
||||
assert_spawning(self)
|
||||
sl = self._semlock
|
||||
state = (Popen.duplicate_for_child(sl.handle), sl.kind, sl.maxvalue)
|
||||
try:
|
||||
state += (sl.name, )
|
||||
except AttributeError:
|
||||
pass
|
||||
return state
|
||||
|
||||
def __setstate__(self, state):
|
||||
self._semlock = _billiard.SemLock._rebuild(*state)
|
||||
debug('recreated blocker with handle %r', state[0])
|
||||
self._make_methods()
|
||||
|
||||
@staticmethod
|
||||
def _make_name():
|
||||
return '/%s-%s-%s' % (current_process()._semprefix,
|
||||
os.getpid(), SemLock._counter.next())
|
||||
|
||||
|
||||
class Semaphore(SemLock):
|
||||
|
||||
def __init__(self, value=1):
|
||||
SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX)
|
||||
|
||||
def get_value(self):
|
||||
return self._semlock._get_value()
|
||||
|
||||
def __repr__(self):
|
||||
try:
|
||||
value = self._semlock._get_value()
|
||||
except Exception:
|
||||
value = 'unknown'
|
||||
return '<Semaphore(value=%s)>' % value
|
||||
|
||||
|
||||
class BoundedSemaphore(Semaphore):
|
||||
|
||||
def __init__(self, value=1):
|
||||
SemLock.__init__(self, SEMAPHORE, value, value)
|
||||
|
||||
def __repr__(self):
|
||||
try:
|
||||
value = self._semlock._get_value()
|
||||
except Exception:
|
||||
value = 'unknown'
|
||||
return '<BoundedSemaphore(value=%s, maxvalue=%s)>' % \
|
||||
(value, self._semlock.maxvalue)
|
||||
|
||||
|
||||
class Lock(SemLock):
|
||||
'''
|
||||
Non-recursive lock.
|
||||
'''
|
||||
|
||||
def __init__(self):
|
||||
SemLock.__init__(self, SEMAPHORE, 1, 1)
|
||||
|
||||
def __repr__(self):
|
||||
try:
|
||||
if self._semlock._is_mine():
|
||||
name = current_process().name
|
||||
if threading.currentThread().name != 'MainThread':
|
||||
name += '|' + threading.currentThread().name
|
||||
elif self._semlock._get_value() == 1:
|
||||
name = 'None'
|
||||
elif self._semlock._count() > 0:
|
||||
name = 'SomeOtherThread'
|
||||
else:
|
||||
name = 'SomeOtherProcess'
|
||||
except Exception:
|
||||
name = 'unknown'
|
||||
return '<Lock(owner=%s)>' % name
|
||||
|
||||
|
||||
class RLock(SemLock):
|
||||
'''
|
||||
Recursive lock
|
||||
'''
|
||||
|
||||
def __init__(self):
|
||||
SemLock.__init__(self, RECURSIVE_MUTEX, 1, 1)
|
||||
|
||||
def __repr__(self):
|
||||
try:
|
||||
if self._semlock._is_mine():
|
||||
name = current_process().name
|
||||
if threading.currentThread().name != 'MainThread':
|
||||
name += '|' + threading.currentThread().name
|
||||
count = self._semlock._count()
|
||||
elif self._semlock._get_value() == 1:
|
||||
name, count = 'None', 0
|
||||
elif self._semlock._count() > 0:
|
||||
name, count = 'SomeOtherThread', 'nonzero'
|
||||
else:
|
||||
name, count = 'SomeOtherProcess', 'nonzero'
|
||||
except Exception:
|
||||
name, count = 'unknown', 'unknown'
|
||||
return '<RLock(%s, %s)>' % (name, count)
|
||||
|
||||
|
||||
class Condition(object):
|
||||
'''
|
||||
Condition variable
|
||||
'''
|
||||
|
||||
def __init__(self, lock=None):
|
||||
self._lock = lock or RLock()
|
||||
self._sleeping_count = Semaphore(0)
|
||||
self._woken_count = Semaphore(0)
|
||||
self._wait_semaphore = Semaphore(0)
|
||||
self._make_methods()
|
||||
|
||||
def __getstate__(self):
|
||||
assert_spawning(self)
|
||||
return (self._lock, self._sleeping_count,
|
||||
self._woken_count, self._wait_semaphore)
|
||||
|
||||
def __setstate__(self, state):
|
||||
(self._lock, self._sleeping_count,
|
||||
self._woken_count, self._wait_semaphore) = state
|
||||
self._make_methods()
|
||||
|
||||
def __enter__(self):
|
||||
return self._lock.__enter__()
|
||||
|
||||
def __exit__(self, *args):
|
||||
return self._lock.__exit__(*args)
|
||||
|
||||
def _make_methods(self):
|
||||
self.acquire = self._lock.acquire
|
||||
self.release = self._lock.release
|
||||
|
||||
def __repr__(self):
|
||||
try:
|
||||
num_waiters = (self._sleeping_count._semlock._get_value() -
|
||||
self._woken_count._semlock._get_value())
|
||||
except Exception:
|
||||
num_waiters = 'unkown'
|
||||
return '<Condition(%s, %s)>' % (self._lock, num_waiters)
|
||||
|
||||
def wait(self, timeout=None):
|
||||
assert self._lock._semlock._is_mine(), \
|
||||
'must acquire() condition before using wait()'
|
||||
|
||||
# indicate that this thread is going to sleep
|
||||
self._sleeping_count.release()
|
||||
|
||||
# release lock
|
||||
count = self._lock._semlock._count()
|
||||
for i in xrange(count):
|
||||
self._lock.release()
|
||||
|
||||
try:
|
||||
# wait for notification or timeout
|
||||
ret = self._wait_semaphore.acquire(True, timeout)
|
||||
finally:
|
||||
# indicate that this thread has woken
|
||||
self._woken_count.release()
|
||||
|
||||
# reacquire lock
|
||||
for i in xrange(count):
|
||||
self._lock.acquire()
|
||||
return ret
|
||||
|
||||
def notify(self):
|
||||
assert self._lock._semlock._is_mine(), 'lock is not owned'
|
||||
assert not self._wait_semaphore.acquire(False)
|
||||
|
||||
# to take account of timeouts since last notify() we subtract
|
||||
# woken_count from sleeping_count and rezero woken_count
|
||||
while self._woken_count.acquire(False):
|
||||
res = self._sleeping_count.acquire(False)
|
||||
assert res
|
||||
|
||||
if self._sleeping_count.acquire(False): # try grabbing a sleeper
|
||||
self._wait_semaphore.release() # wake up one sleeper
|
||||
self._woken_count.acquire() # wait for sleeper to wake
|
||||
|
||||
# rezero _wait_semaphore in case a timeout just happened
|
||||
self._wait_semaphore.acquire(False)
|
||||
|
||||
def notify_all(self):
|
||||
assert self._lock._semlock._is_mine(), 'lock is not owned'
|
||||
assert not self._wait_semaphore.acquire(False)
|
||||
|
||||
# to take account of timeouts since last notify*() we subtract
|
||||
# woken_count from sleeping_count and rezero woken_count
|
||||
while self._woken_count.acquire(False):
|
||||
res = self._sleeping_count.acquire(False)
|
||||
assert res
|
||||
|
||||
sleepers = 0
|
||||
while self._sleeping_count.acquire(False):
|
||||
self._wait_semaphore.release() # wake up one sleeper
|
||||
sleepers += 1
|
||||
|
||||
if sleepers:
|
||||
for i in xrange(sleepers):
|
||||
self._woken_count.acquire() # wait for a sleeper to wake
|
||||
|
||||
# rezero wait_semaphore in case some timeouts just happened
|
||||
while self._wait_semaphore.acquire(False):
|
||||
pass
|
||||
|
||||
def wait_for(self, predicate, timeout=None):
|
||||
result = predicate()
|
||||
if result:
|
||||
return result
|
||||
if timeout is not None:
|
||||
endtime = _time() + timeout
|
||||
else:
|
||||
endtime = None
|
||||
waittime = None
|
||||
while not result:
|
||||
if endtime is not None:
|
||||
waittime = endtime - _time()
|
||||
if waittime <= 0:
|
||||
break
|
||||
self.wait(waittime)
|
||||
result = predicate()
|
||||
return result
|
||||
|
||||
|
||||
class Event(object):
|
||||
|
||||
def __init__(self):
|
||||
self._cond = Condition(Lock())
|
||||
self._flag = Semaphore(0)
|
||||
|
||||
def is_set(self):
|
||||
self._cond.acquire()
|
||||
try:
|
||||
if self._flag.acquire(False):
|
||||
self._flag.release()
|
||||
return True
|
||||
return False
|
||||
finally:
|
||||
self._cond.release()
|
||||
|
||||
def set(self):
|
||||
self._cond.acquire()
|
||||
try:
|
||||
self._flag.acquire(False)
|
||||
self._flag.release()
|
||||
self._cond.notify_all()
|
||||
finally:
|
||||
self._cond.release()
|
||||
|
||||
def clear(self):
|
||||
self._cond.acquire()
|
||||
try:
|
||||
self._flag.acquire(False)
|
||||
finally:
|
||||
self._cond.release()
|
||||
|
||||
def wait(self, timeout=None):
|
||||
self._cond.acquire()
|
||||
try:
|
||||
if self._flag.acquire(False):
|
||||
self._flag.release()
|
||||
else:
|
||||
self._cond.wait(timeout)
|
||||
|
||||
if self._flag.acquire(False):
|
||||
self._flag.release()
|
||||
return True
|
||||
return False
|
||||
finally:
|
||||
self._cond.release()
|
||||
|
||||
|
||||
if sys.platform != 'win32':
|
||||
#
|
||||
# Protection against unlinked semaphores if the program ends abnormally
|
||||
# and forking has been disabled.
|
||||
#
|
||||
|
||||
def _cleanup_semaphore_if_leaked(name):
|
||||
name = name.encode('ascii') + bytes('\0', 'ascii')
|
||||
if len(name) > 512:
|
||||
# posix guarantees that writes to a pipe of less than PIPE_BUF
|
||||
# bytes are atomic, and that PIPE_BUF >= 512
|
||||
raise ValueError('name too long')
|
||||
fd = _get_unlinkfd()
|
||||
bits = os.write(fd, name)
|
||||
assert bits == len(name)
|
||||
|
||||
def _get_unlinkfd():
|
||||
cp = current_process()
|
||||
if cp._unlinkfd is None:
|
||||
r, w = os.pipe()
|
||||
pid = os.fork()
|
||||
if pid == 0:
|
||||
try:
|
||||
from setproctitle import setproctitle
|
||||
setproctitle("[sem_cleanup for %r]" % cp.pid)
|
||||
except:
|
||||
pass
|
||||
|
||||
# Fork a process which will survive until all other processes
|
||||
# which have a copy of the write end of the pipe have exited.
|
||||
# The forked process just collects names of semaphores until
|
||||
# EOF is indicated. Then it tries unlinking all the names it
|
||||
# has collected.
|
||||
_collect_names_then_unlink(r)
|
||||
os._exit(0)
|
||||
os.close(r)
|
||||
cp._unlinkfd = w
|
||||
return cp._unlinkfd
|
||||
|
||||
def _collect_names_then_unlink(r):
|
||||
# protect the process from ^C and "killall python" etc
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
signal.signal(signal.SIGTERM, signal.SIG_IGN)
|
||||
|
||||
# close all fds except r
|
||||
try:
|
||||
MAXFD = os.sysconf("SC_OPEN_MAX")
|
||||
except:
|
||||
MAXFD = 256
|
||||
closerange(0, r)
|
||||
closerange(r + 1, MAXFD)
|
||||
|
||||
# collect data written to pipe
|
||||
data = []
|
||||
while 1:
|
||||
try:
|
||||
s = os.read(r, 512)
|
||||
except:
|
||||
# XXX IO lock might be held at fork, so don't try
|
||||
# printing unexpected exception - see issue 6721
|
||||
pass
|
||||
else:
|
||||
if not s:
|
||||
break
|
||||
data.append(s)
|
||||
|
||||
# attempt to unlink each collected name
|
||||
for name in bytes('', 'ascii').join(data).split(bytes('\0', 'ascii')):
|
||||
try:
|
||||
sem_unlink(name.decode('ascii'))
|
||||
except:
|
||||
# XXX IO lock might be held at fork, so don't try
|
||||
# printing unexpected exception - see issue 6721
|
||||
pass
|
||||
18
awx/lib/site-packages/billiard/tests/__init__.py
Normal file
18
awx/lib/site-packages/billiard/tests/__init__.py
Normal file
@@ -0,0 +1,18 @@
|
||||
from __future__ import absolute_import
|
||||
|
||||
import atexit
|
||||
|
||||
|
||||
def teardown():
|
||||
# Workaround for multiprocessing bug where logging
|
||||
# is attempted after global already collected at shutdown.
|
||||
cancelled = set()
|
||||
try:
|
||||
import multiprocessing.util
|
||||
cancelled.add(multiprocessing.util._exit_function)
|
||||
except (AttributeError, ImportError):
|
||||
pass
|
||||
|
||||
atexit._exithandlers[:] = [
|
||||
e for e in atexit._exithandlers if e[0] not in cancelled
|
||||
]
|
||||
85
awx/lib/site-packages/billiard/tests/compat.py
Normal file
85
awx/lib/site-packages/billiard/tests/compat.py
Normal file
@@ -0,0 +1,85 @@
|
||||
from __future__ import absolute_import
|
||||
|
||||
import sys
|
||||
|
||||
|
||||
class WarningMessage(object):
|
||||
|
||||
"""Holds the result of a single showwarning() call."""
|
||||
|
||||
_WARNING_DETAILS = ('message', 'category', 'filename', 'lineno', 'file',
|
||||
'line')
|
||||
|
||||
def __init__(self, message, category, filename, lineno, file=None,
|
||||
line=None):
|
||||
local_values = locals()
|
||||
for attr in self._WARNING_DETAILS:
|
||||
setattr(self, attr, local_values[attr])
|
||||
|
||||
self._category_name = category and category.__name__ or None
|
||||
|
||||
def __str__(self):
|
||||
return ('{message : %r, category : %r, filename : %r, lineno : %s, '
|
||||
'line : %r}' % (self.message, self._category_name,
|
||||
self.filename, self.lineno, self.line))
|
||||
|
||||
|
||||
class catch_warnings(object):
|
||||
|
||||
"""A context manager that copies and restores the warnings filter upon
|
||||
exiting the context.
|
||||
|
||||
The 'record' argument specifies whether warnings should be captured by a
|
||||
custom implementation of warnings.showwarning() and be appended to a list
|
||||
returned by the context manager. Otherwise None is returned by the context
|
||||
manager. The objects appended to the list are arguments whose attributes
|
||||
mirror the arguments to showwarning().
|
||||
|
||||
The 'module' argument is to specify an alternative module to the module
|
||||
named 'warnings' and imported under that name. This argument is only
|
||||
useful when testing the warnings module itself.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, record=False, module=None):
|
||||
"""Specify whether to record warnings and if an alternative module
|
||||
should be used other than sys.modules['warnings'].
|
||||
|
||||
For compatibility with Python 3.0, please consider all arguments to be
|
||||
keyword-only.
|
||||
|
||||
"""
|
||||
self._record = record
|
||||
self._module = module is None and sys.modules['warnings'] or module
|
||||
self._entered = False
|
||||
|
||||
def __repr__(self):
|
||||
args = []
|
||||
if self._record:
|
||||
args.append('record=True')
|
||||
if self._module is not sys.modules['warnings']:
|
||||
args.append('module=%r' % self._module)
|
||||
name = type(self).__name__
|
||||
return '%s(%s)' % (name, ', '.join(args))
|
||||
|
||||
def __enter__(self):
|
||||
if self._entered:
|
||||
raise RuntimeError('Cannot enter %r twice' % self)
|
||||
self._entered = True
|
||||
self._filters = self._module.filters
|
||||
self._module.filters = self._filters[:]
|
||||
self._showwarning = self._module.showwarning
|
||||
if self._record:
|
||||
log = []
|
||||
|
||||
def showwarning(*args, **kwargs):
|
||||
log.append(WarningMessage(*args, **kwargs))
|
||||
|
||||
self._module.showwarning = showwarning
|
||||
return log
|
||||
|
||||
def __exit__(self, *exc_info):
|
||||
if not self._entered:
|
||||
raise RuntimeError('Cannot exit %r without entering first' % self)
|
||||
self._module.filters = self._filters
|
||||
self._module.showwarning = self._showwarning
|
||||
98
awx/lib/site-packages/billiard/tests/test_common.py
Normal file
98
awx/lib/site-packages/billiard/tests/test_common.py
Normal file
@@ -0,0 +1,98 @@
|
||||
from __future__ import absolute_import
|
||||
from __future__ import with_statement
|
||||
|
||||
import os
|
||||
import signal
|
||||
|
||||
from contextlib import contextmanager
|
||||
from mock import call, patch, Mock
|
||||
from time import time
|
||||
|
||||
from billiard.common import (
|
||||
_shutdown_cleanup,
|
||||
reset_signals,
|
||||
restart_state,
|
||||
)
|
||||
|
||||
from .utils import Case
|
||||
|
||||
|
||||
def signo(name):
|
||||
return getattr(signal, name)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def termsigs(*sigs):
|
||||
from billiard import common
|
||||
prev, common.TERMSIGS = common.TERMSIGS, sigs
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
common.TERMSIGS = prev
|
||||
|
||||
|
||||
class test_reset_signals(Case):
|
||||
|
||||
def test_shutdown_handler(self):
|
||||
with patch('sys.exit') as exit:
|
||||
_shutdown_cleanup(15, Mock())
|
||||
self.assertTrue(exit.called)
|
||||
self.assertEqual(os.WTERMSIG(exit.call_args[0][0]), 15)
|
||||
|
||||
def test_does_not_reset_ignored_signal(self, sigs=['SIGTERM']):
|
||||
with self.assert_context(sigs, signal.SIG_IGN) as (_, SET):
|
||||
self.assertFalse(SET.called)
|
||||
|
||||
def test_does_not_reset_if_current_is_None(self, sigs=['SIGTERM']):
|
||||
with self.assert_context(sigs, None) as (_, SET):
|
||||
self.assertFalse(SET.called)
|
||||
|
||||
def test_resets_for_SIG_DFL(self, sigs=['SIGTERM', 'SIGINT', 'SIGUSR1']):
|
||||
with self.assert_context(sigs, signal.SIG_DFL) as (_, SET):
|
||||
SET.assert_has_calls([
|
||||
call(signo(sig), _shutdown_cleanup) for sig in sigs
|
||||
])
|
||||
|
||||
def test_resets_for_obj(self, sigs=['SIGTERM', 'SIGINT', 'SIGUSR1']):
|
||||
with self.assert_context(sigs, object()) as (_, SET):
|
||||
SET.assert_has_calls([
|
||||
call(signo(sig), _shutdown_cleanup) for sig in sigs
|
||||
])
|
||||
|
||||
def test_handles_errors(self, sigs=['SIGTERM']):
|
||||
for exc in (OSError(), AttributeError(),
|
||||
ValueError(), RuntimeError()):
|
||||
with self.assert_context(sigs, signal.SIG_DFL, exc) as (_, SET):
|
||||
self.assertTrue(SET.called)
|
||||
|
||||
@contextmanager
|
||||
def assert_context(self, sigs, get_returns=None, set_effect=None):
|
||||
with termsigs(*sigs):
|
||||
with patch('signal.getsignal') as GET:
|
||||
with patch('signal.signal') as SET:
|
||||
GET.return_value = get_returns
|
||||
SET.side_effect = set_effect
|
||||
reset_signals()
|
||||
GET.assert_has_calls([
|
||||
call(signo(sig)) for sig in sigs
|
||||
])
|
||||
yield GET, SET
|
||||
|
||||
|
||||
class test_restart_state(Case):
|
||||
|
||||
def test_raises(self):
|
||||
s = restart_state(100, 1) # max 100 restarts in 1 second.
|
||||
s.R = 99
|
||||
s.step()
|
||||
with self.assertRaises(s.RestartFreqExceeded):
|
||||
s.step()
|
||||
|
||||
def test_time_passed_resets_counter(self):
|
||||
s = restart_state(100, 10)
|
||||
s.R, s.T = 100, time()
|
||||
with self.assertRaises(s.RestartFreqExceeded):
|
||||
s.step()
|
||||
s.R, s.T = 100, time()
|
||||
s.step(time() + 20)
|
||||
self.assertEqual(s.R, 1)
|
||||
12
awx/lib/site-packages/billiard/tests/test_package.py
Normal file
12
awx/lib/site-packages/billiard/tests/test_package.py
Normal file
@@ -0,0 +1,12 @@
|
||||
from __future__ import absolute_import
|
||||
|
||||
import billiard
|
||||
|
||||
from .utils import Case
|
||||
|
||||
|
||||
class test_billiard(Case):
|
||||
|
||||
def test_has_version(self):
|
||||
self.assertTrue(billiard.__version__)
|
||||
self.assertIsInstance(billiard.__version__, str)
|
||||
144
awx/lib/site-packages/billiard/tests/utils.py
Normal file
144
awx/lib/site-packages/billiard/tests/utils.py
Normal file
@@ -0,0 +1,144 @@
|
||||
from __future__ import absolute_import
|
||||
from __future__ import with_statement
|
||||
|
||||
import re
|
||||
import sys
|
||||
import warnings
|
||||
|
||||
try:
|
||||
import unittest # noqa
|
||||
unittest.skip
|
||||
from unittest.util import safe_repr, unorderable_list_difference
|
||||
except AttributeError:
|
||||
import unittest2 as unittest # noqa
|
||||
from unittest2.util import safe_repr, unorderable_list_difference # noqa
|
||||
|
||||
from .compat import catch_warnings
|
||||
|
||||
# -- adds assertWarns from recent unittest2, not in Python 2.7.
|
||||
|
||||
|
||||
class _AssertRaisesBaseContext(object):
|
||||
|
||||
def __init__(self, expected, test_case, callable_obj=None,
|
||||
expected_regex=None):
|
||||
self.expected = expected
|
||||
self.failureException = test_case.failureException
|
||||
self.obj_name = None
|
||||
if isinstance(expected_regex, basestring):
|
||||
expected_regex = re.compile(expected_regex)
|
||||
self.expected_regex = expected_regex
|
||||
|
||||
|
||||
class _AssertWarnsContext(_AssertRaisesBaseContext):
|
||||
"""A context manager used to implement TestCase.assertWarns* methods."""
|
||||
|
||||
def __enter__(self):
|
||||
# The __warningregistry__'s need to be in a pristine state for tests
|
||||
# to work properly.
|
||||
warnings.resetwarnings()
|
||||
for v in sys.modules.values():
|
||||
if getattr(v, '__warningregistry__', None):
|
||||
v.__warningregistry__ = {}
|
||||
self.warnings_manager = catch_warnings(record=True)
|
||||
self.warnings = self.warnings_manager.__enter__()
|
||||
warnings.simplefilter('always', self.expected)
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, tb):
|
||||
self.warnings_manager.__exit__(exc_type, exc_value, tb)
|
||||
if exc_type is not None:
|
||||
# let unexpected exceptions pass through
|
||||
return
|
||||
try:
|
||||
exc_name = self.expected.__name__
|
||||
except AttributeError:
|
||||
exc_name = str(self.expected)
|
||||
first_matching = None
|
||||
for m in self.warnings:
|
||||
w = m.message
|
||||
if not isinstance(w, self.expected):
|
||||
continue
|
||||
if first_matching is None:
|
||||
first_matching = w
|
||||
if (self.expected_regex is not None and
|
||||
not self.expected_regex.search(str(w))):
|
||||
continue
|
||||
# store warning for later retrieval
|
||||
self.warning = w
|
||||
self.filename = m.filename
|
||||
self.lineno = m.lineno
|
||||
return
|
||||
# Now we simply try to choose a helpful failure message
|
||||
if first_matching is not None:
|
||||
raise self.failureException(
|
||||
'%r does not match %r' % (
|
||||
self.expected_regex.pattern, str(first_matching)))
|
||||
if self.obj_name:
|
||||
raise self.failureException(
|
||||
'%s not triggered by %s' % (exc_name, self.obj_name))
|
||||
else:
|
||||
raise self.failureException('%s not triggered' % exc_name)
|
||||
|
||||
|
||||
class Case(unittest.TestCase):
|
||||
|
||||
def assertWarns(self, expected_warning):
|
||||
return _AssertWarnsContext(expected_warning, self, None)
|
||||
|
||||
def assertWarnsRegex(self, expected_warning, expected_regex):
|
||||
return _AssertWarnsContext(expected_warning, self,
|
||||
None, expected_regex)
|
||||
|
||||
def assertDictContainsSubset(self, expected, actual, msg=None):
|
||||
missing, mismatched = [], []
|
||||
|
||||
for key, value in expected.iteritems():
|
||||
if key not in actual:
|
||||
missing.append(key)
|
||||
elif value != actual[key]:
|
||||
mismatched.append('%s, expected: %s, actual: %s' % (
|
||||
safe_repr(key), safe_repr(value),
|
||||
safe_repr(actual[key])))
|
||||
|
||||
if not (missing or mismatched):
|
||||
return
|
||||
|
||||
standard_msg = ''
|
||||
if missing:
|
||||
standard_msg = 'Missing: %s' % ','.join(map(safe_repr, missing))
|
||||
|
||||
if mismatched:
|
||||
if standard_msg:
|
||||
standard_msg += '; '
|
||||
standard_msg += 'Mismatched values: %s' % (
|
||||
','.join(mismatched))
|
||||
|
||||
self.fail(self._formatMessage(msg, standard_msg))
|
||||
|
||||
def assertItemsEqual(self, expected_seq, actual_seq, msg=None):
|
||||
missing = unexpected = None
|
||||
try:
|
||||
expected = sorted(expected_seq)
|
||||
actual = sorted(actual_seq)
|
||||
except TypeError:
|
||||
# Unsortable items (example: set(), complex(), ...)
|
||||
expected = list(expected_seq)
|
||||
actual = list(actual_seq)
|
||||
missing, unexpected = unorderable_list_difference(
|
||||
expected, actual)
|
||||
else:
|
||||
return self.assertSequenceEqual(expected, actual, msg=msg)
|
||||
|
||||
errors = []
|
||||
if missing:
|
||||
errors.append(
|
||||
'Expected, but missing:\n %s' % (safe_repr(missing), ),
|
||||
)
|
||||
if unexpected:
|
||||
errors.append(
|
||||
'Unexpected, but present:\n %s' % (safe_repr(unexpected), ),
|
||||
)
|
||||
if errors:
|
||||
standardMsg = '\n'.join(errors)
|
||||
self.fail(self._formatMessage(msg, standardMsg))
|
||||
339
awx/lib/site-packages/billiard/util.py
Normal file
339
awx/lib/site-packages/billiard/util.py
Normal file
@@ -0,0 +1,339 @@
|
||||
#
|
||||
# Module providing various facilities to other parts of the package
|
||||
#
|
||||
# billiard/util.py
|
||||
#
|
||||
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
#
|
||||
from __future__ import absolute_import
|
||||
|
||||
import errno
|
||||
import functools
|
||||
import itertools
|
||||
import weakref
|
||||
import atexit
|
||||
import shutil
|
||||
import tempfile
|
||||
import threading # we want threading to install its
|
||||
# cleanup function before multiprocessing does
|
||||
|
||||
from .compat import get_errno
|
||||
from .process import current_process, active_children
|
||||
|
||||
__all__ = [
|
||||
'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger',
|
||||
'log_to_stderr', 'get_temp_dir', 'register_after_fork',
|
||||
'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal',
|
||||
'SUBDEBUG', 'SUBWARNING',
|
||||
]
|
||||
|
||||
#
|
||||
# Logging
|
||||
#
|
||||
|
||||
NOTSET = 0
|
||||
SUBDEBUG = 5
|
||||
DEBUG = 10
|
||||
INFO = 20
|
||||
SUBWARNING = 25
|
||||
ERROR = 40
|
||||
|
||||
LOGGER_NAME = 'multiprocessing'
|
||||
DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s'
|
||||
|
||||
_logger = None
|
||||
_log_to_stderr = False
|
||||
|
||||
#: Support for reinitialization of objects when bootstrapping a child process
|
||||
_afterfork_registry = weakref.WeakValueDictionary()
|
||||
_afterfork_counter = itertools.count()
|
||||
|
||||
#: Finalization using weakrefs
|
||||
_finalizer_registry = {}
|
||||
_finalizer_counter = itertools.count()
|
||||
|
||||
#: set to true if the process is shutting down.
|
||||
_exiting = False
|
||||
|
||||
|
||||
def sub_debug(msg, *args, **kwargs):
|
||||
if _logger:
|
||||
_logger.log(SUBDEBUG, msg, *args, **kwargs)
|
||||
|
||||
|
||||
def debug(msg, *args, **kwargs):
|
||||
if _logger:
|
||||
_logger.log(DEBUG, msg, *args, **kwargs)
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def info(msg, *args, **kwargs):
|
||||
if _logger:
|
||||
_logger.log(INFO, msg, *args, **kwargs)
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def sub_warning(msg, *args, **kwargs):
|
||||
if _logger:
|
||||
_logger.log(SUBWARNING, msg, *args, **kwargs)
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def error(msg, *args, **kwargs):
|
||||
if _logger:
|
||||
_logger.log(ERROR, msg, *args, **kwargs)
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def get_logger():
|
||||
'''
|
||||
Returns logger used by multiprocessing
|
||||
'''
|
||||
global _logger
|
||||
import logging
|
||||
|
||||
logging._acquireLock()
|
||||
try:
|
||||
if not _logger:
|
||||
|
||||
_logger = logging.getLogger(LOGGER_NAME)
|
||||
_logger.propagate = 0
|
||||
logging.addLevelName(SUBDEBUG, 'SUBDEBUG')
|
||||
logging.addLevelName(SUBWARNING, 'SUBWARNING')
|
||||
|
||||
# XXX multiprocessing should cleanup before logging
|
||||
if hasattr(atexit, 'unregister'):
|
||||
atexit.unregister(_exit_function)
|
||||
atexit.register(_exit_function)
|
||||
else:
|
||||
atexit._exithandlers.remove((_exit_function, (), {}))
|
||||
atexit._exithandlers.append((_exit_function, (), {}))
|
||||
finally:
|
||||
logging._releaseLock()
|
||||
|
||||
return _logger
|
||||
|
||||
|
||||
def log_to_stderr(level=None):
|
||||
'''
|
||||
Turn on logging and add a handler which prints to stderr
|
||||
'''
|
||||
global _log_to_stderr
|
||||
import logging
|
||||
|
||||
logger = get_logger()
|
||||
formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT)
|
||||
handler = logging.StreamHandler()
|
||||
handler.setFormatter(formatter)
|
||||
logger.addHandler(handler)
|
||||
|
||||
if level:
|
||||
logger.setLevel(level)
|
||||
_log_to_stderr = True
|
||||
return _logger
|
||||
|
||||
|
||||
def get_temp_dir():
|
||||
'''
|
||||
Function returning a temp directory which will be removed on exit
|
||||
'''
|
||||
# get name of a temp directory which will be automatically cleaned up
|
||||
if current_process()._tempdir is None:
|
||||
tempdir = tempfile.mkdtemp(prefix='pymp-')
|
||||
info('created temp directory %s', tempdir)
|
||||
Finalize(None, shutil.rmtree, args=[tempdir], exitpriority=-100)
|
||||
current_process()._tempdir = tempdir
|
||||
return current_process()._tempdir
|
||||
|
||||
|
||||
def _run_after_forkers():
|
||||
items = list(_afterfork_registry.items())
|
||||
items.sort()
|
||||
for (index, ident, func), obj in items:
|
||||
try:
|
||||
func(obj)
|
||||
except Exception, e:
|
||||
info('after forker raised exception %s', e)
|
||||
|
||||
|
||||
def register_after_fork(obj, func):
|
||||
_afterfork_registry[(_afterfork_counter.next(), id(obj), func)] = obj
|
||||
|
||||
|
||||
class Finalize(object):
|
||||
'''
|
||||
Class which supports object finalization using weakrefs
|
||||
'''
|
||||
def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None):
|
||||
assert exitpriority is None or type(exitpriority) is int
|
||||
|
||||
if obj is not None:
|
||||
self._weakref = weakref.ref(obj, self)
|
||||
else:
|
||||
assert exitpriority is not None
|
||||
|
||||
self._callback = callback
|
||||
self._args = args
|
||||
self._kwargs = kwargs or {}
|
||||
self._key = (exitpriority, _finalizer_counter.next())
|
||||
|
||||
_finalizer_registry[self._key] = self
|
||||
|
||||
def __call__(self, wr=None,
|
||||
# Need to bind these locally because the globals
|
||||
# could've been cleared at shutdown
|
||||
_finalizer_registry=_finalizer_registry,
|
||||
sub_debug=sub_debug):
|
||||
'''
|
||||
Run the callback unless it has already been called or cancelled
|
||||
'''
|
||||
try:
|
||||
del _finalizer_registry[self._key]
|
||||
except KeyError:
|
||||
sub_debug('finalizer no longer registered')
|
||||
else:
|
||||
sub_debug(
|
||||
'finalizer calling %s with args %s and kwargs %s',
|
||||
self._callback, self._args, self._kwargs,
|
||||
)
|
||||
res = self._callback(*self._args, **self._kwargs)
|
||||
self._weakref = self._callback = self._args = \
|
||||
self._kwargs = self._key = None
|
||||
return res
|
||||
|
||||
def cancel(self):
|
||||
'''
|
||||
Cancel finalization of the object
|
||||
'''
|
||||
try:
|
||||
del _finalizer_registry[self._key]
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
self._weakref = self._callback = self._args = \
|
||||
self._kwargs = self._key = None
|
||||
|
||||
def still_active(self):
|
||||
'''
|
||||
Return whether this finalizer is still waiting to invoke callback
|
||||
'''
|
||||
return self._key in _finalizer_registry
|
||||
|
||||
def __repr__(self):
|
||||
try:
|
||||
obj = self._weakref()
|
||||
except (AttributeError, TypeError):
|
||||
obj = None
|
||||
|
||||
if obj is None:
|
||||
return '<Finalize object, dead>'
|
||||
|
||||
x = '<Finalize object, callback=%s' % \
|
||||
getattr(self._callback, '__name__', self._callback)
|
||||
if self._args:
|
||||
x += ', args=' + str(self._args)
|
||||
if self._kwargs:
|
||||
x += ', kwargs=' + str(self._kwargs)
|
||||
if self._key[0] is not None:
|
||||
x += ', exitprority=' + str(self._key[0])
|
||||
return x + '>'
|
||||
|
||||
|
||||
def _run_finalizers(minpriority=None):
|
||||
'''
|
||||
Run all finalizers whose exit priority is not None and at least minpriority
|
||||
|
||||
Finalizers with highest priority are called first; finalizers with
|
||||
the same priority will be called in reverse order of creation.
|
||||
'''
|
||||
if minpriority is None:
|
||||
f = lambda p: p[0][0] is not None
|
||||
else:
|
||||
f = lambda p: p[0][0] is not None and p[0][0] >= minpriority
|
||||
|
||||
items = [x for x in _finalizer_registry.items() if f(x)]
|
||||
items.sort(reverse=True)
|
||||
|
||||
for key, finalizer in items:
|
||||
sub_debug('calling %s', finalizer)
|
||||
try:
|
||||
finalizer()
|
||||
except Exception:
|
||||
if not error("Error calling finalizer %r", finalizer,
|
||||
exc_info=True):
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
if minpriority is None:
|
||||
_finalizer_registry.clear()
|
||||
|
||||
|
||||
def is_exiting():
|
||||
'''
|
||||
Returns true if the process is shutting down
|
||||
'''
|
||||
return _exiting or _exiting is None
|
||||
|
||||
|
||||
def _exit_function():
|
||||
'''
|
||||
Clean up on exit
|
||||
'''
|
||||
|
||||
global _exiting
|
||||
|
||||
info('process shutting down')
|
||||
debug('running all "atexit" finalizers with priority >= 0')
|
||||
_run_finalizers(0)
|
||||
|
||||
for p in active_children():
|
||||
if p._daemonic:
|
||||
info('calling terminate() for daemon %s', p.name)
|
||||
p._popen.terminate()
|
||||
|
||||
for p in active_children():
|
||||
info('calling join() for process %s', p.name)
|
||||
p.join()
|
||||
|
||||
debug('running the remaining "atexit" finalizers')
|
||||
_run_finalizers()
|
||||
atexit.register(_exit_function)
|
||||
|
||||
|
||||
class ForkAwareThreadLock(object):
|
||||
|
||||
def __init__(self):
|
||||
self._lock = threading.Lock()
|
||||
self.acquire = self._lock.acquire
|
||||
self.release = self._lock.release
|
||||
register_after_fork(self, ForkAwareThreadLock.__init__)
|
||||
|
||||
|
||||
class ForkAwareLocal(threading.local):
|
||||
|
||||
def __init__(self):
|
||||
register_after_fork(self, lambda obj: obj.__dict__.clear())
|
||||
|
||||
def __reduce__(self):
|
||||
return type(self), ()
|
||||
|
||||
|
||||
def _eintr_retry(func):
|
||||
'''
|
||||
Automatic retry after EINTR.
|
||||
'''
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapped(*args, **kwargs):
|
||||
while 1:
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except OSError, exc:
|
||||
if get_errno(exc) != errno.EINTR:
|
||||
raise
|
||||
return wrapped
|
||||
203
awx/lib/site-packages/celery/__compat__.py
Normal file
203
awx/lib/site-packages/celery/__compat__.py
Normal file
@@ -0,0 +1,203 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.__compat__
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module contains utilities to dynamically
|
||||
recreate modules, either for lazy loading or
|
||||
to create old modules at runtime instead of
|
||||
having them litter the source tree.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
import operator
|
||||
import sys
|
||||
|
||||
from functools import reduce
|
||||
from importlib import import_module
|
||||
from types import ModuleType
|
||||
|
||||
from .local import Proxy
|
||||
|
||||
MODULE_DEPRECATED = """
|
||||
The module %s is deprecated and will be removed in a future version.
|
||||
"""
|
||||
|
||||
DEFAULT_ATTRS = set(['__file__', '__path__', '__doc__', '__all__'])
|
||||
|
||||
# im_func is no longer available in Py3.
|
||||
# instead the unbound method itself can be used.
|
||||
if sys.version_info[0] == 3: # pragma: no cover
|
||||
def fun_of_method(method):
|
||||
return method
|
||||
else:
|
||||
def fun_of_method(method): # noqa
|
||||
return method.im_func
|
||||
|
||||
|
||||
def getappattr(path):
|
||||
"""Gets attribute from the current_app recursively,
|
||||
e.g. getappattr('amqp.get_task_consumer')``."""
|
||||
from celery import current_app
|
||||
return current_app._rgetattr(path)
|
||||
|
||||
|
||||
def _compat_task_decorator(*args, **kwargs):
|
||||
from celery import current_app
|
||||
kwargs.setdefault('accept_magic_kwargs', True)
|
||||
return current_app.task(*args, **kwargs)
|
||||
|
||||
|
||||
def _compat_periodic_task_decorator(*args, **kwargs):
|
||||
from celery.task import periodic_task
|
||||
kwargs.setdefault('accept_magic_kwargs', True)
|
||||
return periodic_task(*args, **kwargs)
|
||||
|
||||
|
||||
COMPAT_MODULES = {
|
||||
'celery': {
|
||||
'execute': {
|
||||
'send_task': 'send_task',
|
||||
},
|
||||
'decorators': {
|
||||
'task': _compat_task_decorator,
|
||||
'periodic_task': _compat_periodic_task_decorator,
|
||||
},
|
||||
'log': {
|
||||
'get_default_logger': 'log.get_default_logger',
|
||||
'setup_logger': 'log.setup_logger',
|
||||
'setup_loggig_subsystem': 'log.setup_logging_subsystem',
|
||||
'redirect_stdouts_to_logger': 'log.redirect_stdouts_to_logger',
|
||||
},
|
||||
'messaging': {
|
||||
'TaskPublisher': 'amqp.TaskPublisher',
|
||||
'TaskConsumer': 'amqp.TaskConsumer',
|
||||
'establish_connection': 'connection',
|
||||
'with_connection': 'with_default_connection',
|
||||
'get_consumer_set': 'amqp.TaskConsumer',
|
||||
},
|
||||
'registry': {
|
||||
'tasks': 'tasks',
|
||||
},
|
||||
},
|
||||
'celery.task': {
|
||||
'control': {
|
||||
'broadcast': 'control.broadcast',
|
||||
'rate_limit': 'control.rate_limit',
|
||||
'time_limit': 'control.time_limit',
|
||||
'ping': 'control.ping',
|
||||
'revoke': 'control.revoke',
|
||||
'discard_all': 'control.purge',
|
||||
'inspect': 'control.inspect',
|
||||
},
|
||||
'schedules': 'celery.schedules',
|
||||
'chords': 'celery.canvas',
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class class_property(object):
|
||||
|
||||
def __init__(self, fget=None, fset=None):
|
||||
assert fget and isinstance(fget, classmethod)
|
||||
assert isinstance(fset, classmethod) if fset else True
|
||||
self.__get = fget
|
||||
self.__set = fset
|
||||
|
||||
info = fget.__get__(object) # just need the info attrs.
|
||||
self.__doc__ = info.__doc__
|
||||
self.__name__ = info.__name__
|
||||
self.__module__ = info.__module__
|
||||
|
||||
def __get__(self, obj, type=None):
|
||||
if obj and type is None:
|
||||
type = obj.__class__
|
||||
return self.__get.__get__(obj, type)()
|
||||
|
||||
def __set__(self, obj, value):
|
||||
if obj is None:
|
||||
return self
|
||||
return self.__set.__get__(obj)(value)
|
||||
|
||||
|
||||
def reclassmethod(method):
|
||||
return classmethod(fun_of_method(method))
|
||||
|
||||
|
||||
class MagicModule(ModuleType):
|
||||
_compat_modules = ()
|
||||
_all_by_module = {}
|
||||
_direct = {}
|
||||
_object_origins = {}
|
||||
|
||||
def __getattr__(self, name):
|
||||
if name in self._object_origins:
|
||||
module = __import__(self._object_origins[name], None, None, [name])
|
||||
for item in self._all_by_module[module.__name__]:
|
||||
setattr(self, item, getattr(module, item))
|
||||
return getattr(module, name)
|
||||
elif name in self._direct:
|
||||
module = __import__(self._direct[name], None, None, [name])
|
||||
setattr(self, name, module)
|
||||
return module
|
||||
return ModuleType.__getattribute__(self, name)
|
||||
|
||||
def __dir__(self):
|
||||
return list(set(self.__all__) | DEFAULT_ATTRS)
|
||||
|
||||
|
||||
def create_module(name, attrs, cls_attrs=None, pkg=None,
|
||||
base=MagicModule, prepare_attr=None):
|
||||
fqdn = '.'.join([pkg.__name__, name]) if pkg else name
|
||||
cls_attrs = {} if cls_attrs is None else cls_attrs
|
||||
|
||||
attrs = dict((attr_name, prepare_attr(attr) if prepare_attr else attr)
|
||||
for attr_name, attr in attrs.iteritems())
|
||||
module = sys.modules[fqdn] = type(name, (base, ), cls_attrs)(fqdn)
|
||||
module.__dict__.update(attrs)
|
||||
return module
|
||||
|
||||
|
||||
def recreate_module(name, compat_modules=(), by_module={}, direct={},
|
||||
base=MagicModule, **attrs):
|
||||
old_module = sys.modules[name]
|
||||
origins = get_origins(by_module)
|
||||
compat_modules = COMPAT_MODULES.get(name, ())
|
||||
|
||||
cattrs = dict(
|
||||
_compat_modules=compat_modules,
|
||||
_all_by_module=by_module, _direct=direct,
|
||||
_object_origins=origins,
|
||||
__all__=tuple(set(reduce(
|
||||
operator.add,
|
||||
[tuple(v) for v in [compat_modules, origins, direct, attrs]],
|
||||
))),
|
||||
)
|
||||
new_module = create_module(name, attrs, cls_attrs=cattrs, base=base)
|
||||
new_module.__dict__.update(dict((mod, get_compat_module(new_module, mod))
|
||||
for mod in compat_modules))
|
||||
return old_module, new_module
|
||||
|
||||
|
||||
def get_compat_module(pkg, name):
|
||||
|
||||
def prepare(attr):
|
||||
if isinstance(attr, basestring):
|
||||
return Proxy(getappattr, (attr, ))
|
||||
return attr
|
||||
|
||||
attrs = COMPAT_MODULES[pkg.__name__][name]
|
||||
if isinstance(attrs, basestring):
|
||||
fqdn = '.'.join([pkg.__name__, name])
|
||||
module = sys.modules[fqdn] = import_module(attrs)
|
||||
return module
|
||||
attrs['__all__'] = list(attrs)
|
||||
return create_module(name, dict(attrs), pkg=pkg, prepare_attr=prepare)
|
||||
|
||||
|
||||
def get_origins(defs):
|
||||
origins = {}
|
||||
for module, items in defs.iteritems():
|
||||
origins.update(dict((item, module) for item in items))
|
||||
return origins
|
||||
61
awx/lib/site-packages/celery/__init__.py
Normal file
61
awx/lib/site-packages/celery/__init__.py
Normal file
@@ -0,0 +1,61 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""Distributed Task Queue"""
|
||||
# :copyright: (c) 2009 - 2012 Ask Solem and individual contributors,
|
||||
# All rights reserved.
|
||||
# :copyright: (c) 2012 VMware, Inc., All rights reserved.
|
||||
# :license: BSD (3 Clause), see LICENSE for more details.
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
SERIES = 'Chiastic Slide'
|
||||
VERSION = (3, 0, 19)
|
||||
__version__ = '.'.join(str(p) for p in VERSION[0:3]) + ''.join(VERSION[3:])
|
||||
__author__ = 'Ask Solem'
|
||||
__contact__ = 'ask@celeryproject.org'
|
||||
__homepage__ = 'http://celeryproject.org'
|
||||
__docformat__ = 'restructuredtext'
|
||||
__all__ = [
|
||||
'Celery', 'bugreport', 'shared_task', 'Task',
|
||||
'current_app', 'current_task',
|
||||
'chain', 'chord', 'chunks', 'group', 'subtask',
|
||||
'xmap', 'xstarmap', 'uuid', 'VERSION', '__version__',
|
||||
]
|
||||
VERSION_BANNER = '%s (%s)' % (__version__, SERIES)
|
||||
|
||||
# -eof meta-
|
||||
|
||||
STATICA_HACK = True
|
||||
globals()['kcah_acitats'[::-1].upper()] = False
|
||||
if STATICA_HACK:
|
||||
# This is never executed, but tricks static analyzers (PyDev, PyCharm,
|
||||
# pylint, etc.) into knowing the types of these symbols, and what
|
||||
# they contain.
|
||||
from celery.app.base import Celery
|
||||
from celery.app.utils import bugreport
|
||||
from celery.app.task import Task
|
||||
from celery._state import current_app, current_task
|
||||
from celery.canvas import (
|
||||
chain, chord, chunks, group, subtask, xmap, xstarmap,
|
||||
)
|
||||
from celery.utils import uuid
|
||||
|
||||
# Lazy loading
|
||||
from .__compat__ import recreate_module
|
||||
|
||||
old_module, new_module = recreate_module( # pragma: no cover
|
||||
__name__,
|
||||
by_module={
|
||||
'celery.app': ['Celery', 'bugreport', 'shared_task'],
|
||||
'celery.app.task': ['Task'],
|
||||
'celery._state': ['current_app', 'current_task'],
|
||||
'celery.canvas': ['chain', 'chord', 'chunks', 'group',
|
||||
'subtask', 'xmap', 'xstarmap'],
|
||||
'celery.utils': ['uuid'],
|
||||
},
|
||||
direct={'task': 'celery.task'},
|
||||
__package__='celery', __file__=__file__,
|
||||
__path__=__path__, __doc__=__doc__, __version__=__version__,
|
||||
__author__=__author__, __contact__=__contact__,
|
||||
__homepage__=__homepage__, __docformat__=__docformat__,
|
||||
VERSION=VERSION, SERIES=SERIES, VERSION_BANNER=VERSION_BANNER,
|
||||
)
|
||||
36
awx/lib/site-packages/celery/__main__.py
Normal file
36
awx/lib/site-packages/celery/__main__.py
Normal file
@@ -0,0 +1,36 @@
|
||||
from __future__ import absolute_import
|
||||
|
||||
import sys
|
||||
|
||||
|
||||
def maybe_patch_concurrency():
|
||||
from celery.platforms import maybe_patch_concurrency
|
||||
maybe_patch_concurrency(sys.argv, ['-P'], ['--pool'])
|
||||
|
||||
|
||||
def main():
|
||||
maybe_patch_concurrency()
|
||||
from celery.bin.celery import main
|
||||
main()
|
||||
|
||||
|
||||
def _compat_worker():
|
||||
maybe_patch_concurrency()
|
||||
from celery.bin.celeryd import main
|
||||
main()
|
||||
|
||||
|
||||
def _compat_multi():
|
||||
maybe_patch_concurrency()
|
||||
from celery.bin.celeryd_multi import main
|
||||
main()
|
||||
|
||||
|
||||
def _compat_beat():
|
||||
maybe_patch_concurrency()
|
||||
from celery.bin.celerybeat import main
|
||||
main()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
106
awx/lib/site-packages/celery/_state.py
Normal file
106
awx/lib/site-packages/celery/_state.py
Normal file
@@ -0,0 +1,106 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery._state
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
This is an internal module containing thread state
|
||||
like the ``current_app``, and ``current_task``.
|
||||
|
||||
This module shouldn't be used directly.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
import os
|
||||
import sys
|
||||
import threading
|
||||
import weakref
|
||||
|
||||
from celery.local import Proxy
|
||||
from celery.utils.threads import LocalStack
|
||||
|
||||
#: Global default app used when no current app.
|
||||
default_app = None
|
||||
|
||||
#: List of all app instances (weakrefs), must not be used directly.
|
||||
_apps = set()
|
||||
|
||||
|
||||
class _TLS(threading.local):
|
||||
#: Apps with the :attr:`~celery.app.base.BaseApp.set_as_current` attribute
|
||||
#: sets this, so it will always contain the last instantiated app,
|
||||
#: and is the default app returned by :func:`app_or_default`.
|
||||
current_app = None
|
||||
_tls = _TLS()
|
||||
|
||||
_task_stack = LocalStack()
|
||||
|
||||
|
||||
def set_default_app(app):
|
||||
global default_app
|
||||
default_app = app
|
||||
|
||||
|
||||
def _get_current_app():
|
||||
if default_app is None:
|
||||
#: creates the global fallback app instance.
|
||||
from celery.app import Celery
|
||||
set_default_app(Celery(
|
||||
'default',
|
||||
loader=os.environ.get('CELERY_LOADER') or 'default',
|
||||
set_as_current=False, accept_magic_kwargs=True,
|
||||
))
|
||||
return _tls.current_app or default_app
|
||||
|
||||
C_STRICT_APP = os.environ.get('C_STRICT_APP')
|
||||
if os.environ.get('C_STRICT_APP'):
|
||||
def get_current_app():
|
||||
import traceback
|
||||
sys.stderr.write('USES CURRENT_APP\n')
|
||||
traceback.print_stack(file=sys.stderr)
|
||||
return _get_current_app()
|
||||
else:
|
||||
get_current_app = _get_current_app
|
||||
|
||||
|
||||
def get_current_task():
|
||||
"""Currently executing task."""
|
||||
return _task_stack.top
|
||||
|
||||
|
||||
def get_current_worker_task():
|
||||
"""Currently executing task, that was applied by the worker.
|
||||
|
||||
This is used to differentiate between the actual task
|
||||
executed by the worker and any task that was called within
|
||||
a task (using ``task.__call__`` or ``task.apply``)
|
||||
|
||||
"""
|
||||
for task in reversed(_task_stack.stack):
|
||||
if not task.request.called_directly:
|
||||
return task
|
||||
|
||||
|
||||
#: Proxy to current app.
|
||||
current_app = Proxy(get_current_app)
|
||||
|
||||
#: Proxy to current task.
|
||||
current_task = Proxy(get_current_task)
|
||||
|
||||
|
||||
def _register_app(app):
|
||||
_apps.add(weakref.ref(app))
|
||||
|
||||
|
||||
def _get_active_apps():
|
||||
dirty = []
|
||||
try:
|
||||
for appref in _apps:
|
||||
app = appref()
|
||||
if app is None:
|
||||
dirty.append(appref)
|
||||
else:
|
||||
yield app
|
||||
finally:
|
||||
while dirty:
|
||||
_apps.discard(dirty.pop())
|
||||
136
awx/lib/site-packages/celery/app/__init__.py
Normal file
136
awx/lib/site-packages/celery/app/__init__.py
Normal file
@@ -0,0 +1,136 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.app
|
||||
~~~~~~~~~~
|
||||
|
||||
Celery Application.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import with_statement
|
||||
|
||||
import os
|
||||
|
||||
from celery.local import Proxy
|
||||
from celery import _state
|
||||
from celery._state import ( # noqa
|
||||
set_default_app,
|
||||
get_current_app as current_app,
|
||||
get_current_task as current_task,
|
||||
_get_active_apps,
|
||||
)
|
||||
from celery.utils import gen_task_name
|
||||
|
||||
from .builtins import shared_task as _shared_task
|
||||
from .base import Celery, AppPickler # noqa
|
||||
|
||||
#: Proxy always returning the app set as default.
|
||||
default_app = Proxy(lambda: _state.default_app)
|
||||
|
||||
#: Function returning the app provided or the default app if none.
|
||||
#:
|
||||
#: The environment variable :envvar:`CELERY_TRACE_APP` is used to
|
||||
#: trace app leaks. When enabled an exception is raised if there
|
||||
#: is no active app.
|
||||
app_or_default = None
|
||||
|
||||
#: The 'default' loader is the default loader used by old applications.
|
||||
#: This is deprecated and should no longer be used as it's set too early
|
||||
#: to be affected by --loader argument.
|
||||
default_loader = os.environ.get('CELERY_LOADER') or 'default' # XXX
|
||||
|
||||
|
||||
def bugreport():
|
||||
return current_app().bugreport()
|
||||
|
||||
|
||||
def _app_or_default(app=None):
|
||||
if app is None:
|
||||
return _state.get_current_app()
|
||||
return app
|
||||
|
||||
|
||||
def _app_or_default_trace(app=None): # pragma: no cover
|
||||
from traceback import print_stack
|
||||
from billiard import current_process
|
||||
if app is None:
|
||||
if getattr(_state._tls, 'current_app', None):
|
||||
print('-- RETURNING TO CURRENT APP --') # noqa+
|
||||
print_stack()
|
||||
return _state._tls.current_app
|
||||
if current_process()._name == 'MainProcess':
|
||||
raise Exception('DEFAULT APP')
|
||||
print('-- RETURNING TO DEFAULT APP --') # noqa+
|
||||
print_stack()
|
||||
return _state.default_app
|
||||
return app
|
||||
|
||||
|
||||
def enable_trace():
|
||||
global app_or_default
|
||||
app_or_default = _app_or_default_trace
|
||||
|
||||
|
||||
def disable_trace():
|
||||
global app_or_default
|
||||
app_or_default = _app_or_default
|
||||
|
||||
if os.environ.get('CELERY_TRACE_APP'): # pragma: no cover
|
||||
enable_trace()
|
||||
else:
|
||||
disable_trace()
|
||||
|
||||
App = Celery # XXX Compat
|
||||
|
||||
|
||||
def shared_task(*args, **kwargs):
|
||||
"""Task decorator that creates shared tasks,
|
||||
and returns a proxy that always returns the task from the current apps
|
||||
task registry.
|
||||
|
||||
This can be used by library authors to create tasks that will work
|
||||
for any app environment.
|
||||
|
||||
Example:
|
||||
|
||||
>>> from celery import Celery, shared_task
|
||||
>>> @shared_task
|
||||
... def add(x, y):
|
||||
... return x + y
|
||||
|
||||
>>> app1 = Celery(broker='amqp://')
|
||||
>>> add.app is app1
|
||||
True
|
||||
|
||||
>>> app2 = Celery(broker='redis://')
|
||||
>>> add.app is app2
|
||||
|
||||
"""
|
||||
|
||||
def create_shared_task(**options):
|
||||
|
||||
def __inner(fun):
|
||||
name = options.get('name')
|
||||
# Set as shared task so that unfinalized apps,
|
||||
# and future apps will load the task.
|
||||
_shared_task(lambda app: app._task_from_fun(fun, **options))
|
||||
|
||||
# Force all finalized apps to take this task as well.
|
||||
for app in _get_active_apps():
|
||||
if app.finalized:
|
||||
with app._finalize_mutex:
|
||||
app._task_from_fun(fun, **options)
|
||||
|
||||
# Returns a proxy that always gets the task from the current
|
||||
# apps task registry.
|
||||
def task_by_cons():
|
||||
app = current_app()
|
||||
return app.tasks[
|
||||
name or gen_task_name(app, fun.__name__, fun.__module__)
|
||||
]
|
||||
return Proxy(task_by_cons)
|
||||
return __inner
|
||||
|
||||
if len(args) == 1 and callable(args[0]):
|
||||
return create_shared_task(**kwargs)(args[0])
|
||||
return create_shared_task(*args, **kwargs)
|
||||
63
awx/lib/site-packages/celery/app/abstract.py
Normal file
63
awx/lib/site-packages/celery/app/abstract.py
Normal file
@@ -0,0 +1,63 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.app.abstract
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Abstract class that takes default attribute values
|
||||
from the configuration.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
|
||||
class from_config(object):
|
||||
|
||||
def __init__(self, key=None):
|
||||
self.key = key
|
||||
|
||||
def get_key(self, attr):
|
||||
return attr if self.key is None else self.key
|
||||
|
||||
|
||||
class _configurated(type):
|
||||
|
||||
def __new__(cls, name, bases, attrs):
|
||||
attrs['__confopts__'] = dict((attr, spec.get_key(attr))
|
||||
for attr, spec in attrs.iteritems()
|
||||
if isinstance(spec, from_config))
|
||||
inherit_from = attrs.get('inherit_confopts', ())
|
||||
for subcls in bases:
|
||||
try:
|
||||
attrs['__confopts__'].update(subcls.__confopts__)
|
||||
except AttributeError:
|
||||
pass
|
||||
for subcls in inherit_from:
|
||||
attrs['__confopts__'].update(subcls.__confopts__)
|
||||
attrs = dict((k, v if not isinstance(v, from_config) else None)
|
||||
for k, v in attrs.iteritems())
|
||||
return super(_configurated, cls).__new__(cls, name, bases, attrs)
|
||||
|
||||
|
||||
class configurated(object):
|
||||
__metaclass__ = _configurated
|
||||
|
||||
def setup_defaults(self, kwargs, namespace='celery'):
|
||||
confopts = self.__confopts__
|
||||
app, find = self.app, self.app.conf.find_value_for_key
|
||||
|
||||
for attr, keyname in confopts.iteritems():
|
||||
try:
|
||||
value = kwargs[attr]
|
||||
except KeyError:
|
||||
value = find(keyname, namespace)
|
||||
else:
|
||||
if value is None:
|
||||
value = find(keyname, namespace)
|
||||
setattr(self, attr, value)
|
||||
|
||||
for attr_name, attr_value in kwargs.iteritems():
|
||||
if attr_name not in confopts and attr_value is not None:
|
||||
setattr(self, attr_name, attr_value)
|
||||
|
||||
def confopts_as_dict(self):
|
||||
return dict((key, getattr(self, key)) for key in self.__confopts__)
|
||||
425
awx/lib/site-packages/celery/app/amqp.py
Normal file
425
awx/lib/site-packages/celery/app/amqp.py
Normal file
@@ -0,0 +1,425 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.app.amqp
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
Sending and receiving messages using Kombu.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
from datetime import timedelta
|
||||
from weakref import WeakValueDictionary
|
||||
|
||||
from kombu import Connection, Consumer, Exchange, Producer, Queue
|
||||
from kombu.common import entry_to_queue
|
||||
from kombu.pools import ProducerPool
|
||||
from kombu.utils import cached_property, uuid
|
||||
from kombu.utils.encoding import safe_repr
|
||||
|
||||
from celery import signals
|
||||
from celery.utils.text import indent as textindent
|
||||
|
||||
from . import app_or_default
|
||||
from . import routes as _routes
|
||||
|
||||
#: Human readable queue declaration.
|
||||
QUEUE_FORMAT = """
|
||||
.> %(name)s exchange:%(exchange)s(%(exchange_type)s) binding:%(routing_key)s
|
||||
"""
|
||||
|
||||
|
||||
class Queues(dict):
|
||||
"""Queue name⇒ declaration mapping.
|
||||
|
||||
:param queues: Initial list/tuple or dict of queues.
|
||||
:keyword create_missing: By default any unknown queues will be
|
||||
added automatically, but if disabled
|
||||
the occurrence of unknown queues
|
||||
in `wanted` will raise :exc:`KeyError`.
|
||||
:keyword ha_policy: Default HA policy for queues with none set.
|
||||
|
||||
|
||||
"""
|
||||
#: If set, this is a subset of queues to consume from.
|
||||
#: The rest of the queues are then used for routing only.
|
||||
_consume_from = None
|
||||
|
||||
def __init__(self, queues=None, default_exchange=None,
|
||||
create_missing=True, ha_policy=None):
|
||||
dict.__init__(self)
|
||||
self.aliases = WeakValueDictionary()
|
||||
self.default_exchange = default_exchange
|
||||
self.create_missing = create_missing
|
||||
self.ha_policy = ha_policy
|
||||
if isinstance(queues, (tuple, list)):
|
||||
queues = dict((q.name, q) for q in queues)
|
||||
for name, q in (queues or {}).iteritems():
|
||||
self.add(q) if isinstance(q, Queue) else self.add_compat(name, **q)
|
||||
|
||||
def __getitem__(self, name):
|
||||
try:
|
||||
return self.aliases[name]
|
||||
except KeyError:
|
||||
return dict.__getitem__(self, name)
|
||||
|
||||
def __setitem__(self, name, queue):
|
||||
if self.default_exchange and (not queue.exchange or
|
||||
not queue.exchange.name):
|
||||
queue.exchange = self.default_exchange
|
||||
dict.__setitem__(self, name, queue)
|
||||
if queue.alias:
|
||||
self.aliases[queue.alias] = queue
|
||||
|
||||
def __missing__(self, name):
|
||||
if self.create_missing:
|
||||
return self.add(self.new_missing(name))
|
||||
raise KeyError(name)
|
||||
|
||||
def add(self, queue, **kwargs):
|
||||
"""Add new queue.
|
||||
|
||||
:param queue: Name of the queue.
|
||||
:keyword exchange: Name of the exchange.
|
||||
:keyword routing_key: Binding key.
|
||||
:keyword exchange_type: Type of exchange.
|
||||
:keyword \*\*options: Additional declaration options.
|
||||
|
||||
"""
|
||||
if not isinstance(queue, Queue):
|
||||
return self.add_compat(queue, **kwargs)
|
||||
if self.ha_policy:
|
||||
if queue.queue_arguments is None:
|
||||
queue.queue_arguments = {}
|
||||
self._set_ha_policy(queue.queue_arguments)
|
||||
self[queue.name] = queue
|
||||
return queue
|
||||
|
||||
def add_compat(self, name, **options):
|
||||
# docs used to use binding_key as routing key
|
||||
options.setdefault('routing_key', options.get('binding_key'))
|
||||
if options['routing_key'] is None:
|
||||
options['routing_key'] = name
|
||||
if self.ha_policy is not None:
|
||||
self._set_ha_policy(options.setdefault('queue_arguments', {}))
|
||||
q = self[name] = entry_to_queue(name, **options)
|
||||
return q
|
||||
|
||||
def _set_ha_policy(self, args):
|
||||
policy = self.ha_policy
|
||||
if isinstance(policy, (list, tuple)):
|
||||
return args.update({'x-ha-policy': 'nodes',
|
||||
'x-ha-policy-params': list(policy)})
|
||||
args['x-ha-policy'] = policy
|
||||
|
||||
def format(self, indent=0, indent_first=True):
|
||||
"""Format routing table into string for log dumps."""
|
||||
active = self.consume_from
|
||||
if not active:
|
||||
return ''
|
||||
info = [
|
||||
QUEUE_FORMAT.strip() % {
|
||||
'name': (name + ':').ljust(12),
|
||||
'exchange': q.exchange.name,
|
||||
'exchange_type': q.exchange.type,
|
||||
'routing_key': q.routing_key}
|
||||
for name, q in sorted(active.iteritems())]
|
||||
if indent_first:
|
||||
return textindent('\n'.join(info), indent)
|
||||
return info[0] + '\n' + textindent('\n'.join(info[1:]), indent)
|
||||
|
||||
def select_add(self, queue, **kwargs):
|
||||
"""Add new task queue that will be consumed from even when
|
||||
a subset has been selected using the :option:`-Q` option."""
|
||||
q = self.add(queue, **kwargs)
|
||||
if self._consume_from is not None:
|
||||
self._consume_from[q.name] = q
|
||||
return q
|
||||
|
||||
def select_subset(self, wanted):
|
||||
"""Sets :attr:`consume_from` by selecting a subset of the
|
||||
currently defined queues.
|
||||
|
||||
:param wanted: List of wanted queue names.
|
||||
"""
|
||||
if wanted:
|
||||
self._consume_from = dict((name, self[name]) for name in wanted)
|
||||
|
||||
def select_remove(self, queue):
|
||||
if self._consume_from is None:
|
||||
self.select_subset(k for k in self if k != queue)
|
||||
else:
|
||||
self._consume_from.pop(queue, None)
|
||||
|
||||
def new_missing(self, name):
|
||||
return Queue(name, Exchange(name), name)
|
||||
|
||||
@property
|
||||
def consume_from(self):
|
||||
if self._consume_from is not None:
|
||||
return self._consume_from
|
||||
return self
|
||||
|
||||
|
||||
class TaskProducer(Producer):
|
||||
app = None
|
||||
auto_declare = False
|
||||
retry = False
|
||||
retry_policy = None
|
||||
utc = True
|
||||
event_dispatcher = None
|
||||
send_sent_event = False
|
||||
|
||||
def __init__(self, channel=None, exchange=None, *args, **kwargs):
|
||||
self.retry = kwargs.pop('retry', self.retry)
|
||||
self.retry_policy = kwargs.pop('retry_policy',
|
||||
self.retry_policy or {})
|
||||
self.send_sent_event = kwargs.pop('send_sent_event',
|
||||
self.send_sent_event)
|
||||
exchange = exchange or self.exchange
|
||||
self.queues = self.app.amqp.queues # shortcut
|
||||
self.default_queue = self.app.amqp.default_queue
|
||||
super(TaskProducer, self).__init__(channel, exchange, *args, **kwargs)
|
||||
|
||||
def publish_task(self, task_name, task_args=None, task_kwargs=None,
|
||||
countdown=None, eta=None, task_id=None, group_id=None,
|
||||
taskset_id=None, # compat alias to group_id
|
||||
expires=None, exchange=None, exchange_type=None,
|
||||
event_dispatcher=None, retry=None, retry_policy=None,
|
||||
queue=None, now=None, retries=0, chord=None,
|
||||
callbacks=None, errbacks=None, routing_key=None,
|
||||
serializer=None, delivery_mode=None, compression=None,
|
||||
declare=None, **kwargs):
|
||||
"""Send task message."""
|
||||
|
||||
qname = queue
|
||||
if queue is None and exchange is None:
|
||||
queue = self.default_queue
|
||||
if queue is not None:
|
||||
if isinstance(queue, basestring):
|
||||
qname, queue = queue, self.queues[queue]
|
||||
else:
|
||||
qname = queue.name
|
||||
exchange = exchange or queue.exchange.name
|
||||
routing_key = routing_key or queue.routing_key
|
||||
declare = declare or ([queue] if queue else [])
|
||||
|
||||
# merge default and custom policy
|
||||
retry = self.retry if retry is None else retry
|
||||
_rp = (dict(self.retry_policy, **retry_policy) if retry_policy
|
||||
else self.retry_policy)
|
||||
task_id = task_id or uuid()
|
||||
task_args = task_args or []
|
||||
task_kwargs = task_kwargs or {}
|
||||
if not isinstance(task_args, (list, tuple)):
|
||||
raise ValueError('task args must be a list or tuple')
|
||||
if not isinstance(task_kwargs, dict):
|
||||
raise ValueError('task kwargs must be a dictionary')
|
||||
if countdown: # Convert countdown to ETA.
|
||||
now = now or self.app.now()
|
||||
eta = now + timedelta(seconds=countdown)
|
||||
if isinstance(expires, (int, float)):
|
||||
now = now or self.app.now()
|
||||
expires = now + timedelta(seconds=expires)
|
||||
eta = eta and eta.isoformat()
|
||||
expires = expires and expires.isoformat()
|
||||
|
||||
body = {
|
||||
'task': task_name,
|
||||
'id': task_id,
|
||||
'args': task_args,
|
||||
'kwargs': task_kwargs,
|
||||
'retries': retries or 0,
|
||||
'eta': eta,
|
||||
'expires': expires,
|
||||
'utc': self.utc,
|
||||
'callbacks': callbacks,
|
||||
'errbacks': errbacks,
|
||||
'taskset': group_id or taskset_id,
|
||||
'chord': chord,
|
||||
}
|
||||
|
||||
self.publish(
|
||||
body,
|
||||
exchange=exchange, routing_key=routing_key,
|
||||
serializer=serializer or self.serializer,
|
||||
compression=compression or self.compression,
|
||||
retry=retry, retry_policy=_rp,
|
||||
delivery_mode=delivery_mode, declare=declare,
|
||||
**kwargs
|
||||
)
|
||||
|
||||
signals.task_sent.send(sender=task_name, **body)
|
||||
if self.send_sent_event:
|
||||
evd = event_dispatcher or self.event_dispatcher
|
||||
exname = exchange or self.exchange
|
||||
if isinstance(exname, Exchange):
|
||||
exname = exname.name
|
||||
evd.publish(
|
||||
'task-sent',
|
||||
{
|
||||
'uuid': task_id,
|
||||
'name': task_name,
|
||||
'args': safe_repr(task_args),
|
||||
'kwargs': safe_repr(task_kwargs),
|
||||
'retries': retries,
|
||||
'eta': eta,
|
||||
'expires': expires,
|
||||
'queue': qname,
|
||||
'exchange': exname,
|
||||
'routing_key': routing_key,
|
||||
},
|
||||
self, retry=retry, retry_policy=retry_policy,
|
||||
)
|
||||
return task_id
|
||||
delay_task = publish_task # XXX Compat
|
||||
|
||||
@cached_property
|
||||
def event_dispatcher(self):
|
||||
# We call Dispatcher.publish with a custom producer
|
||||
# so don't need the dispatcher to be "enabled".
|
||||
return self.app.events.Dispatcher(enabled=False)
|
||||
|
||||
|
||||
class TaskPublisher(TaskProducer):
|
||||
"""Deprecated version of :class:`TaskProducer`."""
|
||||
|
||||
def __init__(self, channel=None, exchange=None, *args, **kwargs):
|
||||
self.app = app_or_default(kwargs.pop('app', self.app))
|
||||
self.retry = kwargs.pop('retry', self.retry)
|
||||
self.retry_policy = kwargs.pop('retry_policy',
|
||||
self.retry_policy or {})
|
||||
exchange = exchange or self.exchange
|
||||
if not isinstance(exchange, Exchange):
|
||||
exchange = Exchange(exchange,
|
||||
kwargs.pop('exchange_type', 'direct'))
|
||||
self.queues = self.app.amqp.queues # shortcut
|
||||
super(TaskPublisher, self).__init__(channel, exchange, *args, **kwargs)
|
||||
|
||||
|
||||
class TaskConsumer(Consumer):
|
||||
app = None
|
||||
|
||||
def __init__(self, channel, queues=None, app=None, accept=None, **kw):
|
||||
self.app = app or self.app
|
||||
if accept is None:
|
||||
accept = self.app.conf.CELERY_ACCEPT_CONTENT
|
||||
super(TaskConsumer, self).__init__(
|
||||
channel,
|
||||
queues or self.app.amqp.queues.consume_from.values(),
|
||||
accept=accept,
|
||||
**kw
|
||||
)
|
||||
|
||||
|
||||
class AMQP(object):
|
||||
Connection = Connection
|
||||
Consumer = Consumer
|
||||
|
||||
#: compat alias to Connection
|
||||
BrokerConnection = Connection
|
||||
|
||||
producer_cls = TaskProducer
|
||||
consumer_cls = TaskConsumer
|
||||
|
||||
#: Cached and prepared routing table.
|
||||
_rtable = None
|
||||
|
||||
#: Underlying producer pool instance automatically
|
||||
#: set by the :attr:`producer_pool`.
|
||||
_producer_pool = None
|
||||
|
||||
def __init__(self, app):
|
||||
self.app = app
|
||||
|
||||
def flush_routes(self):
|
||||
self._rtable = _routes.prepare(self.app.conf.CELERY_ROUTES)
|
||||
|
||||
def Queues(self, queues, create_missing=None, ha_policy=None):
|
||||
"""Create new :class:`Queues` instance, using queue defaults
|
||||
from the current configuration."""
|
||||
conf = self.app.conf
|
||||
if create_missing is None:
|
||||
create_missing = conf.CELERY_CREATE_MISSING_QUEUES
|
||||
if ha_policy is None:
|
||||
ha_policy = conf.CELERY_QUEUE_HA_POLICY
|
||||
if not queues and conf.CELERY_DEFAULT_QUEUE:
|
||||
queues = (Queue(conf.CELERY_DEFAULT_QUEUE,
|
||||
exchange=self.default_exchange,
|
||||
routing_key=conf.CELERY_DEFAULT_ROUTING_KEY), )
|
||||
return Queues(queues, self.default_exchange, create_missing, ha_policy)
|
||||
|
||||
def Router(self, queues=None, create_missing=None):
|
||||
"""Returns the current task router."""
|
||||
return _routes.Router(self.routes, queues or self.queues,
|
||||
self.app.either('CELERY_CREATE_MISSING_QUEUES',
|
||||
create_missing), app=self.app)
|
||||
|
||||
@cached_property
|
||||
def TaskConsumer(self):
|
||||
"""Return consumer configured to consume from the queues
|
||||
we are configured for (``app.amqp.queues.consume_from``)."""
|
||||
return self.app.subclass_with_self(self.consumer_cls,
|
||||
reverse='amqp.TaskConsumer')
|
||||
get_task_consumer = TaskConsumer # XXX compat
|
||||
|
||||
@cached_property
|
||||
def TaskProducer(self):
|
||||
"""Returns publisher used to send tasks.
|
||||
|
||||
You should use `app.send_task` instead.
|
||||
|
||||
"""
|
||||
conf = self.app.conf
|
||||
return self.app.subclass_with_self(
|
||||
self.producer_cls,
|
||||
reverse='amqp.TaskProducer',
|
||||
exchange=self.default_exchange,
|
||||
routing_key=conf.CELERY_DEFAULT_ROUTING_KEY,
|
||||
serializer=conf.CELERY_TASK_SERIALIZER,
|
||||
compression=conf.CELERY_MESSAGE_COMPRESSION,
|
||||
retry=conf.CELERY_TASK_PUBLISH_RETRY,
|
||||
retry_policy=conf.CELERY_TASK_PUBLISH_RETRY_POLICY,
|
||||
send_sent_event=conf.CELERY_SEND_TASK_SENT_EVENT,
|
||||
utc=conf.CELERY_ENABLE_UTC,
|
||||
)
|
||||
TaskPublisher = TaskProducer # compat
|
||||
|
||||
@cached_property
|
||||
def default_queue(self):
|
||||
return self.queues[self.app.conf.CELERY_DEFAULT_QUEUE]
|
||||
|
||||
@cached_property
|
||||
def queues(self):
|
||||
"""Queue name⇒ declaration mapping."""
|
||||
return self.Queues(self.app.conf.CELERY_QUEUES)
|
||||
|
||||
@queues.setter # noqa
|
||||
def queues(self, queues):
|
||||
return self.Queues(queues)
|
||||
|
||||
@property
|
||||
def routes(self):
|
||||
if self._rtable is None:
|
||||
self.flush_routes()
|
||||
return self._rtable
|
||||
|
||||
@cached_property
|
||||
def router(self):
|
||||
return self.Router()
|
||||
|
||||
@property
|
||||
def producer_pool(self):
|
||||
if self._producer_pool is None:
|
||||
self._producer_pool = ProducerPool(
|
||||
self.app.pool,
|
||||
limit=self.app.pool.limit,
|
||||
Producer=self.TaskProducer,
|
||||
)
|
||||
return self._producer_pool
|
||||
publisher_pool = producer_pool # compat alias
|
||||
|
||||
@cached_property
|
||||
def default_exchange(self):
|
||||
return Exchange(self.app.conf.CELERY_DEFAULT_EXCHANGE,
|
||||
self.app.conf.CELERY_DEFAULT_EXCHANGE_TYPE)
|
||||
55
awx/lib/site-packages/celery/app/annotations.py
Normal file
55
awx/lib/site-packages/celery/app/annotations.py
Normal file
@@ -0,0 +1,55 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.app.annotations
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Annotations is a nice term for moneky patching
|
||||
task classes in the configuration.
|
||||
|
||||
This prepares and performs the annotations in the
|
||||
:setting:`CELERY_ANNOTATIONS` setting.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
from celery.utils.functional import firstmethod, mpromise
|
||||
from celery.utils.imports import instantiate
|
||||
|
||||
_first_match = firstmethod('annotate')
|
||||
_first_match_any = firstmethod('annotate_any')
|
||||
|
||||
|
||||
def resolve_all(anno, task):
|
||||
return (r for r in (_first_match(anno, task), _first_match_any(anno)) if r)
|
||||
|
||||
|
||||
class MapAnnotation(dict):
|
||||
|
||||
def annotate_any(self):
|
||||
try:
|
||||
return dict(self['*'])
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
def annotate(self, task):
|
||||
try:
|
||||
return dict(self[task.name])
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
|
||||
def prepare(annotations):
|
||||
"""Expands the :setting:`CELERY_ANNOTATIONS` setting."""
|
||||
|
||||
def expand_annotation(annotation):
|
||||
if isinstance(annotation, dict):
|
||||
return MapAnnotation(annotation)
|
||||
elif isinstance(annotation, basestring):
|
||||
return mpromise(instantiate, annotation)
|
||||
return annotation
|
||||
|
||||
if annotations is None:
|
||||
return ()
|
||||
elif not isinstance(annotations, (list, tuple)):
|
||||
annotations = (annotations, )
|
||||
return [expand_annotation(anno) for anno in annotations]
|
||||
516
awx/lib/site-packages/celery/app/base.py
Normal file
516
awx/lib/site-packages/celery/app/base.py
Normal file
@@ -0,0 +1,516 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.app.base
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
Actual App instance implementation.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import with_statement
|
||||
|
||||
import os
|
||||
import threading
|
||||
import warnings
|
||||
|
||||
from collections import deque
|
||||
from contextlib import contextmanager
|
||||
from copy import deepcopy
|
||||
from functools import wraps
|
||||
|
||||
from billiard.util import register_after_fork
|
||||
from kombu.clocks import LamportClock
|
||||
from kombu.utils import cached_property
|
||||
|
||||
from celery import platforms
|
||||
from celery.exceptions import AlwaysEagerIgnored, ImproperlyConfigured
|
||||
from celery.loaders import get_loader_cls
|
||||
from celery.local import PromiseProxy, maybe_evaluate
|
||||
from celery._state import _task_stack, _tls, get_current_app, _register_app
|
||||
from celery.utils.functional import first
|
||||
from celery.utils.imports import instantiate, symbol_by_name
|
||||
|
||||
from .annotations import prepare as prepare_annotations
|
||||
from .builtins import shared_task, load_shared_tasks
|
||||
from .defaults import DEFAULTS, find_deprecated_settings
|
||||
from .registry import TaskRegistry
|
||||
from .utils import AppPickler, Settings, bugreport, _unpickle_app
|
||||
|
||||
_EXECV = os.environ.get('FORKED_BY_MULTIPROCESSING')
|
||||
|
||||
|
||||
def _unpickle_appattr(reverse_name, args):
|
||||
"""Given an attribute name and a list of args, gets
|
||||
the attribute from the current app and calls it."""
|
||||
return get_current_app()._rgetattr(reverse_name)(*args)
|
||||
|
||||
|
||||
class Celery(object):
|
||||
Pickler = AppPickler
|
||||
|
||||
SYSTEM = platforms.SYSTEM
|
||||
IS_OSX, IS_WINDOWS = platforms.IS_OSX, platforms.IS_WINDOWS
|
||||
|
||||
amqp_cls = 'celery.app.amqp:AMQP'
|
||||
backend_cls = None
|
||||
events_cls = 'celery.events:Events'
|
||||
loader_cls = 'celery.loaders.app:AppLoader'
|
||||
log_cls = 'celery.app.log:Logging'
|
||||
control_cls = 'celery.app.control:Control'
|
||||
registry_cls = TaskRegistry
|
||||
_pool = None
|
||||
|
||||
def __init__(self, main=None, loader=None, backend=None,
|
||||
amqp=None, events=None, log=None, control=None,
|
||||
set_as_current=True, accept_magic_kwargs=False,
|
||||
tasks=None, broker=None, include=None, changes=None,
|
||||
config_source=None,
|
||||
**kwargs):
|
||||
self.clock = LamportClock()
|
||||
self.main = main
|
||||
self.amqp_cls = amqp or self.amqp_cls
|
||||
self.backend_cls = backend or self.backend_cls
|
||||
self.events_cls = events or self.events_cls
|
||||
self.loader_cls = loader or self.loader_cls
|
||||
self.log_cls = log or self.log_cls
|
||||
self.control_cls = control or self.control_cls
|
||||
self.set_as_current = set_as_current
|
||||
self.registry_cls = symbol_by_name(self.registry_cls)
|
||||
self.accept_magic_kwargs = accept_magic_kwargs
|
||||
self._config_source = config_source
|
||||
|
||||
self.configured = False
|
||||
self._pending_defaults = deque()
|
||||
|
||||
self.finalized = False
|
||||
self._finalize_mutex = threading.Lock()
|
||||
self._pending = deque()
|
||||
self._tasks = tasks
|
||||
if not isinstance(self._tasks, TaskRegistry):
|
||||
self._tasks = TaskRegistry(self._tasks or {})
|
||||
|
||||
# these options are moved to the config to
|
||||
# simplify pickling of the app object.
|
||||
self._preconf = changes or {}
|
||||
if broker:
|
||||
self._preconf['BROKER_URL'] = broker
|
||||
if include:
|
||||
self._preconf['CELERY_IMPORTS'] = include
|
||||
|
||||
if self.set_as_current:
|
||||
self.set_current()
|
||||
|
||||
# See Issue #1126
|
||||
# this is used when pickling the app object so that configuration
|
||||
# is reread without having to pickle the contents
|
||||
# (which is often unpickleable anyway)
|
||||
if self._config_source:
|
||||
self.config_from_object(self._config_source)
|
||||
|
||||
self.on_init()
|
||||
_register_app(self)
|
||||
|
||||
def set_current(self):
|
||||
_tls.current_app = self
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, *exc_info):
|
||||
self.close()
|
||||
|
||||
def close(self):
|
||||
self._maybe_close_pool()
|
||||
|
||||
def on_init(self):
|
||||
"""Optional callback called at init."""
|
||||
pass
|
||||
|
||||
def start(self, argv=None):
|
||||
return instantiate(
|
||||
'celery.bin.celery:CeleryCommand',
|
||||
app=self).execute_from_commandline(argv)
|
||||
|
||||
def worker_main(self, argv=None):
|
||||
return instantiate(
|
||||
'celery.bin.celeryd:WorkerCommand',
|
||||
app=self).execute_from_commandline(argv)
|
||||
|
||||
def task(self, *args, **opts):
|
||||
"""Creates new task class from any callable."""
|
||||
if _EXECV and not opts.get('_force_evaluate'):
|
||||
# When using execv the task in the original module will point to a
|
||||
# different app, so doing things like 'add.request' will point to
|
||||
# a differnt task instance. This makes sure it will always use
|
||||
# the task instance from the current app.
|
||||
# Really need a better solution for this :(
|
||||
from . import shared_task as proxies_to_curapp
|
||||
opts['_force_evaluate'] = True # XXX Py2.5
|
||||
return proxies_to_curapp(*args, **opts)
|
||||
|
||||
def inner_create_task_cls(shared=True, filter=None, **opts):
|
||||
_filt = filter # stupid 2to3
|
||||
|
||||
def _create_task_cls(fun):
|
||||
if shared:
|
||||
cons = lambda app: app._task_from_fun(fun, **opts)
|
||||
cons.__name__ = fun.__name__
|
||||
shared_task(cons)
|
||||
if self.accept_magic_kwargs: # compat mode
|
||||
task = self._task_from_fun(fun, **opts)
|
||||
if filter:
|
||||
task = filter(task)
|
||||
return task
|
||||
|
||||
# return a proxy object that is only evaluated when first used
|
||||
promise = PromiseProxy(self._task_from_fun, (fun, ), opts)
|
||||
self._pending.append(promise)
|
||||
if _filt:
|
||||
return _filt(promise)
|
||||
return promise
|
||||
|
||||
return _create_task_cls
|
||||
|
||||
if len(args) == 1 and callable(args[0]):
|
||||
return inner_create_task_cls(**opts)(*args)
|
||||
if args:
|
||||
raise TypeError(
|
||||
'task() takes no arguments (%s given)' % (len(args, )))
|
||||
return inner_create_task_cls(**opts)
|
||||
|
||||
def _task_from_fun(self, fun, **options):
|
||||
base = options.pop('base', None) or self.Task
|
||||
|
||||
T = type(fun.__name__, (base, ), dict({
|
||||
'app': self,
|
||||
'accept_magic_kwargs': False,
|
||||
'run': staticmethod(fun),
|
||||
'__doc__': fun.__doc__,
|
||||
'__module__': fun.__module__}, **options))()
|
||||
task = self._tasks[T.name] # return global instance.
|
||||
task.bind(self)
|
||||
return task
|
||||
|
||||
def finalize(self):
|
||||
with self._finalize_mutex:
|
||||
if not self.finalized:
|
||||
self.finalized = True
|
||||
load_shared_tasks(self)
|
||||
|
||||
pending = self._pending
|
||||
while pending:
|
||||
maybe_evaluate(pending.popleft())
|
||||
|
||||
for task in self._tasks.itervalues():
|
||||
task.bind(self)
|
||||
|
||||
def add_defaults(self, fun):
|
||||
if not callable(fun):
|
||||
d, fun = fun, lambda: d
|
||||
if self.configured:
|
||||
return self.conf.add_defaults(fun())
|
||||
self._pending_defaults.append(fun)
|
||||
|
||||
def config_from_object(self, obj, silent=False):
|
||||
del(self.conf)
|
||||
self._config_source = obj
|
||||
return self.loader.config_from_object(obj, silent=silent)
|
||||
|
||||
def config_from_envvar(self, variable_name, silent=False):
|
||||
module_name = os.environ.get(variable_name)
|
||||
if not module_name:
|
||||
if silent:
|
||||
return False
|
||||
raise ImproperlyConfigured(self.error_envvar_not_set % module_name)
|
||||
return self.config_from_object(module_name, silent=silent)
|
||||
|
||||
def config_from_cmdline(self, argv, namespace='celery'):
|
||||
self.conf.update(self.loader.cmdline_config_parser(argv, namespace))
|
||||
|
||||
def send_task(self, name, args=None, kwargs=None, countdown=None,
|
||||
eta=None, task_id=None, producer=None, connection=None,
|
||||
result_cls=None, expires=None, queues=None, publisher=None,
|
||||
**options):
|
||||
producer = producer or publisher # XXX compat
|
||||
if self.conf.CELERY_ALWAYS_EAGER: # pragma: no cover
|
||||
warnings.warn(AlwaysEagerIgnored(
|
||||
'CELERY_ALWAYS_EAGER has no effect on send_task'))
|
||||
|
||||
result_cls = result_cls or self.AsyncResult
|
||||
router = self.amqp.Router(queues)
|
||||
options.setdefault('compression',
|
||||
self.conf.CELERY_MESSAGE_COMPRESSION)
|
||||
options = router.route(options, name, args, kwargs)
|
||||
with self.producer_or_acquire(producer) as producer:
|
||||
return result_cls(producer.publish_task(
|
||||
name, args, kwargs,
|
||||
task_id=task_id,
|
||||
countdown=countdown, eta=eta,
|
||||
expires=expires, **options
|
||||
))
|
||||
|
||||
def connection(self, hostname=None, userid=None,
|
||||
password=None, virtual_host=None, port=None, ssl=None,
|
||||
insist=None, connect_timeout=None, transport=None,
|
||||
transport_options=None, heartbeat=None, **kwargs):
|
||||
conf = self.conf
|
||||
return self.amqp.Connection(
|
||||
hostname or conf.BROKER_HOST,
|
||||
userid or conf.BROKER_USER,
|
||||
password or conf.BROKER_PASSWORD,
|
||||
virtual_host or conf.BROKER_VHOST,
|
||||
port or conf.BROKER_PORT,
|
||||
transport=transport or conf.BROKER_TRANSPORT,
|
||||
insist=self.either('BROKER_INSIST', insist),
|
||||
ssl=self.either('BROKER_USE_SSL', ssl),
|
||||
connect_timeout=self.either(
|
||||
'BROKER_CONNECTION_TIMEOUT', connect_timeout),
|
||||
heartbeat=heartbeat,
|
||||
transport_options=dict(conf.BROKER_TRANSPORT_OPTIONS,
|
||||
**transport_options or {}))
|
||||
broker_connection = connection
|
||||
|
||||
@contextmanager
|
||||
def connection_or_acquire(self, connection=None, pool=True,
|
||||
*args, **kwargs):
|
||||
if connection:
|
||||
yield connection
|
||||
else:
|
||||
if pool:
|
||||
with self.pool.acquire(block=True) as connection:
|
||||
yield connection
|
||||
else:
|
||||
with self.connection() as connection:
|
||||
yield connection
|
||||
default_connection = connection_or_acquire # XXX compat
|
||||
|
||||
@contextmanager
|
||||
def producer_or_acquire(self, producer=None):
|
||||
if producer:
|
||||
yield producer
|
||||
else:
|
||||
with self.amqp.producer_pool.acquire(block=True) as producer:
|
||||
yield producer
|
||||
default_producer = producer_or_acquire # XXX compat
|
||||
|
||||
def with_default_connection(self, fun):
|
||||
"""With any function accepting a `connection`
|
||||
keyword argument, establishes a default connection if one is
|
||||
not already passed to it.
|
||||
|
||||
Any automatically established connection will be closed after
|
||||
the function returns.
|
||||
|
||||
**Deprecated**
|
||||
|
||||
Use ``with app.connection_or_acquire(connection)`` instead.
|
||||
|
||||
"""
|
||||
@wraps(fun)
|
||||
def _inner(*args, **kwargs):
|
||||
connection = kwargs.pop('connection', None)
|
||||
with self.connection_or_acquire(connection) as c:
|
||||
return fun(*args, **dict(kwargs, connection=c))
|
||||
return _inner
|
||||
|
||||
def prepare_config(self, c):
|
||||
"""Prepare configuration before it is merged with the defaults."""
|
||||
return find_deprecated_settings(c)
|
||||
|
||||
def now(self):
|
||||
return self.loader.now(utc=self.conf.CELERY_ENABLE_UTC)
|
||||
|
||||
def mail_admins(self, subject, body, fail_silently=False):
|
||||
if self.conf.ADMINS:
|
||||
to = [admin_email for _, admin_email in self.conf.ADMINS]
|
||||
return self.loader.mail_admins(
|
||||
subject, body, fail_silently, to=to,
|
||||
sender=self.conf.SERVER_EMAIL,
|
||||
host=self.conf.EMAIL_HOST,
|
||||
port=self.conf.EMAIL_PORT,
|
||||
user=self.conf.EMAIL_HOST_USER,
|
||||
password=self.conf.EMAIL_HOST_PASSWORD,
|
||||
timeout=self.conf.EMAIL_TIMEOUT,
|
||||
use_ssl=self.conf.EMAIL_USE_SSL,
|
||||
use_tls=self.conf.EMAIL_USE_TLS,
|
||||
)
|
||||
|
||||
def select_queues(self, queues=None):
|
||||
return self.amqp.queues.select_subset(queues)
|
||||
|
||||
def either(self, default_key, *values):
|
||||
"""Fallback to the value of a configuration key if none of the
|
||||
`*values` are true."""
|
||||
return first(None, values) or self.conf.get(default_key)
|
||||
|
||||
def bugreport(self):
|
||||
return bugreport(self)
|
||||
|
||||
def _get_backend(self):
|
||||
from celery.backends import get_backend_by_url
|
||||
backend, url = get_backend_by_url(
|
||||
self.backend_cls or self.conf.CELERY_RESULT_BACKEND,
|
||||
self.loader)
|
||||
return backend(app=self, url=url)
|
||||
|
||||
def _get_config(self):
|
||||
self.configured = True
|
||||
s = Settings({}, [self.prepare_config(self.loader.conf),
|
||||
deepcopy(DEFAULTS)])
|
||||
|
||||
# load lazy config dict initializers.
|
||||
pending = self._pending_defaults
|
||||
while pending:
|
||||
s.add_defaults(pending.popleft()())
|
||||
if self._preconf:
|
||||
for key, value in self._preconf.iteritems():
|
||||
setattr(s, key, value)
|
||||
return s
|
||||
|
||||
def _after_fork(self, obj_):
|
||||
self._maybe_close_pool()
|
||||
|
||||
def _maybe_close_pool(self):
|
||||
if self._pool:
|
||||
self._pool.force_close_all()
|
||||
self._pool = None
|
||||
amqp = self.amqp
|
||||
if amqp._producer_pool:
|
||||
amqp._producer_pool.force_close_all()
|
||||
amqp._producer_pool = None
|
||||
|
||||
def create_task_cls(self):
|
||||
"""Creates a base task class using default configuration
|
||||
taken from this app."""
|
||||
return self.subclass_with_self('celery.app.task:Task', name='Task',
|
||||
attribute='_app', abstract=True)
|
||||
|
||||
def subclass_with_self(self, Class, name=None, attribute='app',
|
||||
reverse=None, **kw):
|
||||
"""Subclass an app-compatible class by setting its app attribute
|
||||
to be this app instance.
|
||||
|
||||
App-compatible means that the class has a class attribute that
|
||||
provides the default app it should use, e.g.
|
||||
``class Foo: app = None``.
|
||||
|
||||
:param Class: The app-compatible class to subclass.
|
||||
:keyword name: Custom name for the target class.
|
||||
:keyword attribute: Name of the attribute holding the app,
|
||||
default is 'app'.
|
||||
|
||||
"""
|
||||
Class = symbol_by_name(Class)
|
||||
reverse = reverse if reverse else Class.__name__
|
||||
|
||||
def __reduce__(self):
|
||||
return _unpickle_appattr, (reverse, self.__reduce_args__())
|
||||
|
||||
attrs = dict({attribute: self}, __module__=Class.__module__,
|
||||
__doc__=Class.__doc__, __reduce__=__reduce__, **kw)
|
||||
|
||||
return type(name or Class.__name__, (Class, ), attrs)
|
||||
|
||||
def _rgetattr(self, path):
|
||||
return reduce(getattr, [self] + path.split('.'))
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s %s:0x%x>' % (self.__class__.__name__,
|
||||
self.main or '__main__', id(self), )
|
||||
|
||||
def __reduce__(self):
|
||||
# Reduce only pickles the configuration changes,
|
||||
# so the default configuration doesn't have to be passed
|
||||
# between processes.
|
||||
return (
|
||||
_unpickle_app,
|
||||
(self.__class__, self.Pickler) + self.__reduce_args__(),
|
||||
)
|
||||
|
||||
def __reduce_args__(self):
|
||||
return (self.main, self.conf.changes, self.loader_cls,
|
||||
self.backend_cls, self.amqp_cls, self.events_cls,
|
||||
self.log_cls, self.control_cls, self.accept_magic_kwargs,
|
||||
self._config_source)
|
||||
|
||||
@cached_property
|
||||
def Worker(self):
|
||||
return self.subclass_with_self('celery.apps.worker:Worker')
|
||||
|
||||
@cached_property
|
||||
def WorkController(self, **kwargs):
|
||||
return self.subclass_with_self('celery.worker:WorkController')
|
||||
|
||||
@cached_property
|
||||
def Beat(self, **kwargs):
|
||||
return self.subclass_with_self('celery.apps.beat:Beat')
|
||||
|
||||
@cached_property
|
||||
def TaskSet(self):
|
||||
return self.subclass_with_self('celery.task.sets:TaskSet')
|
||||
|
||||
@cached_property
|
||||
def Task(self):
|
||||
return self.create_task_cls()
|
||||
|
||||
@cached_property
|
||||
def annotations(self):
|
||||
return prepare_annotations(self.conf.CELERY_ANNOTATIONS)
|
||||
|
||||
@cached_property
|
||||
def AsyncResult(self):
|
||||
return self.subclass_with_self('celery.result:AsyncResult')
|
||||
|
||||
@cached_property
|
||||
def GroupResult(self):
|
||||
return self.subclass_with_self('celery.result:GroupResult')
|
||||
|
||||
@cached_property
|
||||
def TaskSetResult(self): # XXX compat
|
||||
return self.subclass_with_self('celery.result:TaskSetResult')
|
||||
|
||||
@property
|
||||
def pool(self):
|
||||
if self._pool is None:
|
||||
register_after_fork(self, self._after_fork)
|
||||
limit = self.conf.BROKER_POOL_LIMIT
|
||||
self._pool = self.connection().Pool(limit=limit)
|
||||
return self._pool
|
||||
|
||||
@property
|
||||
def current_task(self):
|
||||
return _task_stack.top
|
||||
|
||||
@cached_property
|
||||
def amqp(self):
|
||||
return instantiate(self.amqp_cls, app=self)
|
||||
|
||||
@cached_property
|
||||
def backend(self):
|
||||
return self._get_backend()
|
||||
|
||||
@cached_property
|
||||
def conf(self):
|
||||
return self._get_config()
|
||||
|
||||
@cached_property
|
||||
def control(self):
|
||||
return instantiate(self.control_cls, app=self)
|
||||
|
||||
@cached_property
|
||||
def events(self):
|
||||
return instantiate(self.events_cls, app=self)
|
||||
|
||||
@cached_property
|
||||
def loader(self):
|
||||
return get_loader_cls(self.loader_cls)(app=self)
|
||||
|
||||
@cached_property
|
||||
def log(self):
|
||||
return instantiate(self.log_cls, app=self)
|
||||
|
||||
@cached_property
|
||||
def tasks(self):
|
||||
self.finalize()
|
||||
return self._tasks
|
||||
App = Celery # compat
|
||||
374
awx/lib/site-packages/celery/app/builtins.py
Normal file
374
awx/lib/site-packages/celery/app/builtins.py
Normal file
@@ -0,0 +1,374 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.app.builtins
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Built-in tasks that are always available in all
|
||||
app instances. E.g. chord, group and xmap.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import with_statement
|
||||
|
||||
from collections import deque
|
||||
|
||||
from celery._state import get_current_worker_task
|
||||
from celery.utils import uuid
|
||||
|
||||
#: global list of functions defining tasks that should be
|
||||
#: added to all apps.
|
||||
_shared_tasks = []
|
||||
|
||||
|
||||
def shared_task(constructor):
|
||||
"""Decorator that specifies that the decorated function is a function
|
||||
that generates a built-in task.
|
||||
|
||||
The function will then be called for every new app instance created
|
||||
(lazily, so more exactly when the task registry for that app is needed).
|
||||
"""
|
||||
_shared_tasks.append(constructor)
|
||||
return constructor
|
||||
|
||||
|
||||
def load_shared_tasks(app):
|
||||
"""Loads the built-in tasks for an app instance."""
|
||||
for constructor in _shared_tasks:
|
||||
constructor(app)
|
||||
|
||||
|
||||
@shared_task
|
||||
def add_backend_cleanup_task(app):
|
||||
"""The backend cleanup task can be used to clean up the default result
|
||||
backend.
|
||||
|
||||
This task is also added do the periodic task schedule so that it is
|
||||
run every day at midnight, but :program:`celerybeat` must be running
|
||||
for this to be effective.
|
||||
|
||||
Note that not all backends do anything for this, what needs to be
|
||||
done at cleanup is up to each backend, and some backends
|
||||
may even clean up in realtime so that a periodic cleanup is not necessary.
|
||||
|
||||
"""
|
||||
|
||||
@app.task(name='celery.backend_cleanup', _force_evaluate=True)
|
||||
def backend_cleanup():
|
||||
app.backend.cleanup()
|
||||
return backend_cleanup
|
||||
|
||||
|
||||
@shared_task
|
||||
def add_unlock_chord_task(app):
|
||||
"""The unlock chord task is used by result backends that doesn't
|
||||
have native chord support.
|
||||
|
||||
It creates a task chain polling the header for completion.
|
||||
|
||||
"""
|
||||
from celery.canvas import subtask
|
||||
from celery.exceptions import ChordError
|
||||
from celery.result import from_serializable
|
||||
|
||||
default_propagate = app.conf.CELERY_CHORD_PROPAGATES
|
||||
|
||||
@app.task(name='celery.chord_unlock', max_retries=None,
|
||||
default_retry_delay=1, ignore_result=True, _force_evaluate=True)
|
||||
def unlock_chord(group_id, callback, interval=None, propagate=None,
|
||||
max_retries=None, result=None,
|
||||
Result=app.AsyncResult, GroupResult=app.GroupResult,
|
||||
from_serializable=from_serializable):
|
||||
# if propagate is disabled exceptions raised by chord tasks
|
||||
# will be sent as part of the result list to the chord callback.
|
||||
# Since 3.1 propagate will be enabled by default, and instead
|
||||
# the chord callback changes state to FAILURE with the
|
||||
# exception set to ChordError.
|
||||
propagate = default_propagate if propagate is None else propagate
|
||||
|
||||
# check if the task group is ready, and if so apply the callback.
|
||||
deps = GroupResult(
|
||||
group_id,
|
||||
[from_serializable(r, app=app) for r in result],
|
||||
)
|
||||
j = deps.join_native if deps.supports_native_join else deps.join
|
||||
|
||||
if deps.ready():
|
||||
callback = subtask(callback)
|
||||
try:
|
||||
ret = j(propagate=propagate)
|
||||
except Exception, exc:
|
||||
try:
|
||||
culprit = deps._failed_join_report().next()
|
||||
reason = 'Dependency %s raised %r' % (culprit.id, exc)
|
||||
except StopIteration:
|
||||
reason = repr(exc)
|
||||
app._tasks[callback.task].backend.fail_from_current_stack(
|
||||
callback.id, exc=ChordError(reason),
|
||||
)
|
||||
else:
|
||||
try:
|
||||
callback.delay(ret)
|
||||
except Exception, exc:
|
||||
app._tasks[callback.task].backend.fail_from_current_stack(
|
||||
callback.id,
|
||||
exc=ChordError('Callback error: %r' % (exc, )),
|
||||
)
|
||||
else:
|
||||
return unlock_chord.retry(countdown=interval,
|
||||
max_retries=max_retries)
|
||||
return unlock_chord
|
||||
|
||||
|
||||
@shared_task
|
||||
def add_map_task(app):
|
||||
from celery.canvas import subtask
|
||||
|
||||
@app.task(name='celery.map', _force_evaluate=True)
|
||||
def xmap(task, it):
|
||||
task = subtask(task).type
|
||||
return [task(value) for value in it]
|
||||
return xmap
|
||||
|
||||
|
||||
@shared_task
|
||||
def add_starmap_task(app):
|
||||
from celery.canvas import subtask
|
||||
|
||||
@app.task(name='celery.starmap', _force_evaluate=True)
|
||||
def xstarmap(task, it):
|
||||
task = subtask(task).type
|
||||
return [task(*args) for args in it]
|
||||
return xstarmap
|
||||
|
||||
|
||||
@shared_task
|
||||
def add_chunk_task(app):
|
||||
from celery.canvas import chunks as _chunks
|
||||
|
||||
@app.task(name='celery.chunks', _force_evaluate=True)
|
||||
def chunks(task, it, n):
|
||||
return _chunks.apply_chunks(task, it, n)
|
||||
return chunks
|
||||
|
||||
|
||||
@shared_task
|
||||
def add_group_task(app):
|
||||
_app = app
|
||||
from celery.canvas import maybe_subtask, subtask
|
||||
from celery.result import from_serializable
|
||||
|
||||
class Group(app.Task):
|
||||
app = _app
|
||||
name = 'celery.group'
|
||||
accept_magic_kwargs = False
|
||||
|
||||
def run(self, tasks, result, group_id, partial_args):
|
||||
app = self.app
|
||||
result = from_serializable(result, app)
|
||||
# any partial args are added to all tasks in the group
|
||||
taskit = (subtask(task).clone(partial_args)
|
||||
for i, task in enumerate(tasks))
|
||||
if self.request.is_eager or app.conf.CELERY_ALWAYS_EAGER:
|
||||
return app.GroupResult(
|
||||
result.id,
|
||||
[stask.apply(group_id=group_id) for stask in taskit],
|
||||
)
|
||||
with app.producer_or_acquire() as pub:
|
||||
[stask.apply_async(group_id=group_id, publisher=pub,
|
||||
add_to_parent=False) for stask in taskit]
|
||||
parent = get_current_worker_task()
|
||||
if parent:
|
||||
parent.request.children.append(result)
|
||||
return result
|
||||
|
||||
def prepare(self, options, tasks, args, **kwargs):
|
||||
AsyncResult = self.AsyncResult
|
||||
options['group_id'] = group_id = (
|
||||
options.setdefault('task_id', uuid()))
|
||||
|
||||
def prepare_member(task):
|
||||
task = maybe_subtask(task)
|
||||
opts = task.options
|
||||
opts['group_id'] = group_id
|
||||
try:
|
||||
tid = opts['task_id']
|
||||
except KeyError:
|
||||
tid = opts['task_id'] = uuid()
|
||||
return task, AsyncResult(tid)
|
||||
|
||||
try:
|
||||
tasks, results = zip(*[prepare_member(task) for task in tasks])
|
||||
except ValueError: # tasks empty
|
||||
tasks, results = [], []
|
||||
return (tasks, self.app.GroupResult(group_id, results),
|
||||
group_id, args)
|
||||
|
||||
def apply_async(self, partial_args=(), kwargs={}, **options):
|
||||
if self.app.conf.CELERY_ALWAYS_EAGER:
|
||||
return self.apply(partial_args, kwargs, **options)
|
||||
tasks, result, gid, args = self.prepare(
|
||||
options, args=partial_args, **kwargs
|
||||
)
|
||||
super(Group, self).apply_async((
|
||||
list(tasks), result.serializable(), gid, args), **options
|
||||
)
|
||||
return result
|
||||
|
||||
def apply(self, args=(), kwargs={}, **options):
|
||||
return super(Group, self).apply(
|
||||
self.prepare(options, args=args, **kwargs),
|
||||
**options).get()
|
||||
return Group
|
||||
|
||||
|
||||
@shared_task
|
||||
def add_chain_task(app):
|
||||
from celery.canvas import Signature, chord, group, maybe_subtask
|
||||
_app = app
|
||||
|
||||
class Chain(app.Task):
|
||||
app = _app
|
||||
name = 'celery.chain'
|
||||
accept_magic_kwargs = False
|
||||
|
||||
def prepare_steps(self, args, tasks):
|
||||
steps = deque(tasks)
|
||||
next_step = prev_task = prev_res = None
|
||||
tasks, results = [], []
|
||||
i = 0
|
||||
while steps:
|
||||
# First task get partial args from chain.
|
||||
task = maybe_subtask(steps.popleft())
|
||||
task = task.clone() if i else task.clone(args)
|
||||
res = task._freeze()
|
||||
i += 1
|
||||
|
||||
if isinstance(task, group):
|
||||
# automatically upgrade group(..) | s to chord(group, s)
|
||||
try:
|
||||
next_step = steps.popleft()
|
||||
# for chords we freeze by pretending it's a normal
|
||||
# task instead of a group.
|
||||
res = Signature._freeze(task)
|
||||
task = chord(task, body=next_step, task_id=res.task_id)
|
||||
except IndexError:
|
||||
pass
|
||||
if prev_task:
|
||||
# link previous task to this task.
|
||||
prev_task.link(task)
|
||||
# set the results parent attribute.
|
||||
res.parent = prev_res
|
||||
|
||||
results.append(res)
|
||||
tasks.append(task)
|
||||
prev_task, prev_res = task, res
|
||||
|
||||
return tasks, results
|
||||
|
||||
def apply_async(self, args=(), kwargs={}, group_id=None, chord=None,
|
||||
task_id=None, **options):
|
||||
if self.app.conf.CELERY_ALWAYS_EAGER:
|
||||
return self.apply(args, kwargs, **options)
|
||||
options.pop('publisher', None)
|
||||
tasks, results = self.prepare_steps(args, kwargs['tasks'])
|
||||
result = results[-1]
|
||||
if group_id:
|
||||
tasks[-1].set(group_id=group_id)
|
||||
if chord:
|
||||
tasks[-1].set(chord=chord)
|
||||
if task_id:
|
||||
tasks[-1].set(task_id=task_id)
|
||||
result = tasks[-1].type.AsyncResult(task_id)
|
||||
tasks[0].apply_async()
|
||||
return result
|
||||
|
||||
def apply(self, args=(), kwargs={}, subtask=maybe_subtask, **options):
|
||||
last, fargs = None, args # fargs passed to first task only
|
||||
for task in kwargs['tasks']:
|
||||
res = subtask(task).clone(fargs).apply(last and (last.get(), ))
|
||||
res.parent, last, fargs = last, res, None
|
||||
return last
|
||||
return Chain
|
||||
|
||||
|
||||
@shared_task
|
||||
def add_chord_task(app):
|
||||
"""Every chord is executed in a dedicated task, so that the chord
|
||||
can be used as a subtask, and this generates the task
|
||||
responsible for that."""
|
||||
from celery import group
|
||||
from celery.canvas import maybe_subtask
|
||||
_app = app
|
||||
default_propagate = app.conf.CELERY_CHORD_PROPAGATES
|
||||
|
||||
class Chord(app.Task):
|
||||
app = _app
|
||||
name = 'celery.chord'
|
||||
accept_magic_kwargs = False
|
||||
ignore_result = False
|
||||
|
||||
def run(self, header, body, partial_args=(), interval=1, countdown=1,
|
||||
max_retries=None, propagate=None, eager=False, **kwargs):
|
||||
propagate = default_propagate if propagate is None else propagate
|
||||
group_id = uuid()
|
||||
AsyncResult = self.app.AsyncResult
|
||||
prepare_member = self._prepare_member
|
||||
|
||||
# - convert back to group if serialized
|
||||
tasks = header.tasks if isinstance(header, group) else header
|
||||
header = group([maybe_subtask(s).clone() for s in tasks])
|
||||
# - eager applies the group inline
|
||||
if eager:
|
||||
return header.apply(args=partial_args, task_id=group_id)
|
||||
|
||||
results = [AsyncResult(prepare_member(task, body, group_id))
|
||||
for task in header.tasks]
|
||||
|
||||
# - fallback implementations schedules the chord_unlock task here
|
||||
app.backend.on_chord_apply(group_id, body,
|
||||
interval=interval,
|
||||
countdown=countdown,
|
||||
max_retries=max_retries,
|
||||
propagate=propagate,
|
||||
result=results)
|
||||
# - call the header group, returning the GroupResult.
|
||||
# XXX Python 2.5 doesn't allow kwargs after star-args.
|
||||
return header(*partial_args, **{'task_id': group_id})
|
||||
|
||||
def _prepare_member(self, task, body, group_id):
|
||||
opts = task.options
|
||||
# d.setdefault would work but generating uuid's are expensive
|
||||
try:
|
||||
task_id = opts['task_id']
|
||||
except KeyError:
|
||||
task_id = opts['task_id'] = uuid()
|
||||
opts.update(chord=body, group_id=group_id)
|
||||
return task_id
|
||||
|
||||
def apply_async(self, args=(), kwargs={}, task_id=None, **options):
|
||||
if self.app.conf.CELERY_ALWAYS_EAGER:
|
||||
return self.apply(args, kwargs, **options)
|
||||
group_id = options.pop('group_id', None)
|
||||
chord = options.pop('chord', None)
|
||||
header = kwargs.pop('header')
|
||||
body = kwargs.pop('body')
|
||||
header, body = (list(maybe_subtask(header)),
|
||||
maybe_subtask(body))
|
||||
if group_id:
|
||||
body.set(group_id=group_id)
|
||||
if chord:
|
||||
body.set(chord=chord)
|
||||
callback_id = body.options.setdefault('task_id', task_id or uuid())
|
||||
parent = super(Chord, self).apply_async((header, body, args),
|
||||
kwargs, **options)
|
||||
body_result = self.AsyncResult(callback_id)
|
||||
body_result.parent = parent
|
||||
return body_result
|
||||
|
||||
def apply(self, args=(), kwargs={}, propagate=True, **options):
|
||||
body = kwargs['body']
|
||||
res = super(Chord, self).apply(args, dict(kwargs, eager=True),
|
||||
**options)
|
||||
return maybe_subtask(body).apply(
|
||||
args=(res.get(propagate=propagate).get(), ))
|
||||
return Chord
|
||||
270
awx/lib/site-packages/celery/app/control.py
Normal file
270
awx/lib/site-packages/celery/app/control.py
Normal file
@@ -0,0 +1,270 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.app.control
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Client for worker remote control commands.
|
||||
Server implementation is in :mod:`celery.worker.control`.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import with_statement
|
||||
|
||||
from kombu.pidbox import Mailbox
|
||||
from kombu.utils import cached_property
|
||||
|
||||
from . import app_or_default
|
||||
|
||||
|
||||
def flatten_reply(reply):
|
||||
nodes = {}
|
||||
for item in reply:
|
||||
nodes.update(item)
|
||||
return nodes
|
||||
|
||||
|
||||
class Inspect(object):
|
||||
app = None
|
||||
|
||||
def __init__(self, destination=None, timeout=1, callback=None,
|
||||
connection=None, app=None, limit=None):
|
||||
self.app = app or self.app
|
||||
self.destination = destination
|
||||
self.timeout = timeout
|
||||
self.callback = callback
|
||||
self.connection = connection
|
||||
self.limit = limit
|
||||
|
||||
def _prepare(self, reply):
|
||||
if not reply:
|
||||
return
|
||||
by_node = flatten_reply(reply)
|
||||
if self.destination and \
|
||||
not isinstance(self.destination, (list, tuple)):
|
||||
return by_node.get(self.destination)
|
||||
return by_node
|
||||
|
||||
def _request(self, command, **kwargs):
|
||||
return self._prepare(self.app.control.broadcast(
|
||||
command,
|
||||
arguments=kwargs,
|
||||
destination=self.destination,
|
||||
callback=self.callback,
|
||||
connection=self.connection,
|
||||
limit=self.limit,
|
||||
timeout=self.timeout, reply=True,
|
||||
))
|
||||
|
||||
def report(self):
|
||||
return self._request('report')
|
||||
|
||||
def active(self, safe=False):
|
||||
return self._request('dump_active', safe=safe)
|
||||
|
||||
def scheduled(self, safe=False):
|
||||
return self._request('dump_schedule', safe=safe)
|
||||
|
||||
def reserved(self, safe=False):
|
||||
return self._request('dump_reserved', safe=safe)
|
||||
|
||||
def stats(self):
|
||||
return self._request('stats')
|
||||
|
||||
def revoked(self):
|
||||
return self._request('dump_revoked')
|
||||
|
||||
def registered(self, *taskinfoitems):
|
||||
return self._request('dump_tasks', taskinfoitems=taskinfoitems)
|
||||
registered_tasks = registered
|
||||
|
||||
def ping(self):
|
||||
return self._request('ping')
|
||||
|
||||
def active_queues(self):
|
||||
return self._request('active_queues')
|
||||
|
||||
def conf(self):
|
||||
return self._request('dump_conf')
|
||||
|
||||
|
||||
class Control(object):
|
||||
Mailbox = Mailbox
|
||||
|
||||
def __init__(self, app=None):
|
||||
self.app = app_or_default(app)
|
||||
self.mailbox = self.Mailbox('celery', type='fanout',
|
||||
accept=self.app.conf.CELERY_ACCEPT_CONTENT)
|
||||
|
||||
@cached_property
|
||||
def inspect(self):
|
||||
return self.app.subclass_with_self(Inspect, reverse='control.inspect')
|
||||
|
||||
def purge(self, connection=None):
|
||||
"""Discard all waiting tasks.
|
||||
|
||||
This will ignore all tasks waiting for execution, and they will
|
||||
be deleted from the messaging server.
|
||||
|
||||
:returns: the number of tasks discarded.
|
||||
|
||||
"""
|
||||
with self.app.connection_or_acquire(connection) as conn:
|
||||
return self.app.amqp.TaskConsumer(conn).purge()
|
||||
discard_all = purge
|
||||
|
||||
def revoke(self, task_id, destination=None, terminate=False,
|
||||
signal='SIGTERM', **kwargs):
|
||||
"""Tell all (or specific) workers to revoke a task by id.
|
||||
|
||||
If a task is revoked, the workers will ignore the task and
|
||||
not execute it after all.
|
||||
|
||||
:param task_id: Id of the task to revoke.
|
||||
:keyword terminate: Also terminate the process currently working
|
||||
on the task (if any).
|
||||
:keyword signal: Name of signal to send to process if terminate.
|
||||
Default is TERM.
|
||||
|
||||
See :meth:`broadcast` for supported keyword arguments.
|
||||
|
||||
"""
|
||||
return self.broadcast('revoke', destination=destination,
|
||||
arguments={'task_id': task_id,
|
||||
'terminate': terminate,
|
||||
'signal': signal}, **kwargs)
|
||||
|
||||
def ping(self, destination=None, timeout=1, **kwargs):
|
||||
"""Ping all (or specific) workers.
|
||||
|
||||
Returns answer from alive workers.
|
||||
|
||||
See :meth:`broadcast` for supported keyword arguments.
|
||||
|
||||
"""
|
||||
return self.broadcast('ping', reply=True, destination=destination,
|
||||
timeout=timeout, **kwargs)
|
||||
|
||||
def rate_limit(self, task_name, rate_limit, destination=None, **kwargs):
|
||||
"""Tell all (or specific) workers to set a new rate limit
|
||||
for task by type.
|
||||
|
||||
:param task_name: Name of task to change rate limit for.
|
||||
:param rate_limit: The rate limit as tasks per second, or a rate limit
|
||||
string (`'100/m'`, etc.
|
||||
see :attr:`celery.task.base.Task.rate_limit` for
|
||||
more information).
|
||||
|
||||
See :meth:`broadcast` for supported keyword arguments.
|
||||
|
||||
"""
|
||||
return self.broadcast('rate_limit', destination=destination,
|
||||
arguments={'task_name': task_name,
|
||||
'rate_limit': rate_limit},
|
||||
**kwargs)
|
||||
|
||||
def add_consumer(self, queue, exchange=None, exchange_type='direct',
|
||||
routing_key=None, options=None, **kwargs):
|
||||
"""Tell all (or specific) workers to start consuming from a new queue.
|
||||
|
||||
Only the queue name is required as if only the queue is specified
|
||||
then the exchange/routing key will be set to the same name (
|
||||
like automatic queues do).
|
||||
|
||||
.. note::
|
||||
|
||||
This command does not respect the default queue/exchange
|
||||
options in the configuration.
|
||||
|
||||
:param queue: Name of queue to start consuming from.
|
||||
:keyword exchange: Optional name of exchange.
|
||||
:keyword exchange_type: Type of exchange (defaults to 'direct')
|
||||
command to, when empty broadcast to all workers.
|
||||
:keyword routing_key: Optional routing key.
|
||||
:keyword options: Additional options as supported
|
||||
by :meth:`kombu.entitiy.Queue.from_dict`.
|
||||
|
||||
See :meth:`broadcast` for supported keyword arguments.
|
||||
|
||||
"""
|
||||
return self.broadcast(
|
||||
'add_consumer',
|
||||
arguments=dict({'queue': queue, 'exchange': exchange,
|
||||
'exchange_type': exchange_type,
|
||||
'routing_key': routing_key}, **options or {}),
|
||||
**kwargs
|
||||
)
|
||||
|
||||
def cancel_consumer(self, queue, **kwargs):
|
||||
"""Tell all (or specific) workers to stop consuming from ``queue``.
|
||||
|
||||
Supports the same keyword arguments as :meth:`broadcast`.
|
||||
|
||||
"""
|
||||
return self.broadcast(
|
||||
'cancel_consumer', arguments={'queue': queue}, **kwargs
|
||||
)
|
||||
|
||||
def time_limit(self, task_name, soft=None, hard=None, **kwargs):
|
||||
"""Tell all (or specific) workers to set time limits for
|
||||
a task by type.
|
||||
|
||||
:param task_name: Name of task to change time limits for.
|
||||
:keyword soft: New soft time limit (in seconds).
|
||||
:keyword hard: New hard time limit (in seconds).
|
||||
|
||||
Any additional keyword arguments are passed on to :meth:`broadcast`.
|
||||
|
||||
"""
|
||||
return self.broadcast(
|
||||
'time_limit',
|
||||
arguments={'task_name': task_name,
|
||||
'hard': hard, 'soft': soft}, **kwargs)
|
||||
|
||||
def enable_events(self, destination=None, **kwargs):
|
||||
"""Tell all (or specific) workers to enable events."""
|
||||
return self.broadcast('enable_events', {}, destination, **kwargs)
|
||||
|
||||
def disable_events(self, destination=None, **kwargs):
|
||||
"""Tell all (or specific) workers to enable events."""
|
||||
return self.broadcast('disable_events', {}, destination, **kwargs)
|
||||
|
||||
def pool_grow(self, n=1, destination=None, **kwargs):
|
||||
"""Tell all (or specific) workers to grow the pool by ``n``.
|
||||
|
||||
Supports the same arguments as :meth:`broadcast`.
|
||||
|
||||
"""
|
||||
return self.broadcast('pool_grow', {}, destination, **kwargs)
|
||||
|
||||
def pool_shrink(self, n=1, destination=None, **kwargs):
|
||||
"""Tell all (or specific) workers to shrink the pool by ``n``.
|
||||
|
||||
Supports the same arguments as :meth:`broadcast`.
|
||||
|
||||
"""
|
||||
return self.broadcast('pool_shrink', {}, destination, **kwargs)
|
||||
|
||||
def broadcast(self, command, arguments=None, destination=None,
|
||||
connection=None, reply=False, timeout=1, limit=None,
|
||||
callback=None, channel=None, **extra_kwargs):
|
||||
"""Broadcast a control command to the celery workers.
|
||||
|
||||
:param command: Name of command to send.
|
||||
:param arguments: Keyword arguments for the command.
|
||||
:keyword destination: If set, a list of the hosts to send the
|
||||
command to, when empty broadcast to all workers.
|
||||
:keyword connection: Custom broker connection to use, if not set,
|
||||
a connection will be established automatically.
|
||||
:keyword reply: Wait for and return the reply.
|
||||
:keyword timeout: Timeout in seconds to wait for the reply.
|
||||
:keyword limit: Limit number of replies.
|
||||
:keyword callback: Callback called immediately for each reply
|
||||
received.
|
||||
|
||||
"""
|
||||
with self.app.connection_or_acquire(connection) as conn:
|
||||
arguments = dict(arguments or {}, **extra_kwargs)
|
||||
return self.mailbox(conn)._broadcast(
|
||||
command, arguments, destination, reply, timeout,
|
||||
limit, callback, channel=channel,
|
||||
)
|
||||
267
awx/lib/site-packages/celery/app/defaults.py
Normal file
267
awx/lib/site-packages/celery/app/defaults.py
Normal file
@@ -0,0 +1,267 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.app.defaults
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Configuration introspection and defaults.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
import sys
|
||||
|
||||
from collections import deque
|
||||
from datetime import timedelta
|
||||
|
||||
from celery.utils import strtobool
|
||||
from celery.utils.functional import memoize
|
||||
|
||||
is_jython = sys.platform.startswith('java')
|
||||
is_pypy = hasattr(sys, 'pypy_version_info')
|
||||
|
||||
DEFAULT_POOL = 'processes'
|
||||
if is_jython:
|
||||
DEFAULT_POOL = 'threads'
|
||||
elif is_pypy:
|
||||
if sys.pypy_version_info[0:3] < (1, 5, 0):
|
||||
DEFAULT_POOL = 'solo'
|
||||
else:
|
||||
DEFAULT_POOL = 'processes'
|
||||
|
||||
|
||||
DEFAULT_PROCESS_LOG_FMT = """
|
||||
[%(asctime)s: %(levelname)s/%(processName)s] %(message)s
|
||||
""".strip()
|
||||
DEFAULT_LOG_FMT = '[%(asctime)s: %(levelname)s] %(message)s'
|
||||
DEFAULT_TASK_LOG_FMT = """[%(asctime)s: %(levelname)s/%(processName)s] \
|
||||
%(task_name)s[%(task_id)s]: %(message)s"""
|
||||
|
||||
_BROKER_OLD = {'deprecate_by': '2.5', 'remove_by': '4.0', 'alt': 'BROKER_URL'}
|
||||
_REDIS_OLD = {'deprecate_by': '2.5', 'remove_by': '4.0',
|
||||
'alt': 'URL form of CELERY_RESULT_BACKEND'}
|
||||
|
||||
|
||||
class Option(object):
|
||||
alt = None
|
||||
deprecate_by = None
|
||||
remove_by = None
|
||||
typemap = dict(string=str, int=int, float=float, any=lambda v: v,
|
||||
bool=strtobool, dict=dict, tuple=tuple)
|
||||
|
||||
def __init__(self, default=None, *args, **kwargs):
|
||||
self.default = default
|
||||
self.type = kwargs.get('type') or 'string'
|
||||
for attr, value in kwargs.iteritems():
|
||||
setattr(self, attr, value)
|
||||
|
||||
def to_python(self, value):
|
||||
return self.typemap[self.type](value)
|
||||
|
||||
def __repr__(self):
|
||||
return '<Option: type->%s default->%r>' % (self.type, self.default)
|
||||
|
||||
|
||||
NAMESPACES = {
|
||||
'BROKER': {
|
||||
'URL': Option(None, type='string'),
|
||||
'CONNECTION_TIMEOUT': Option(4, type='float'),
|
||||
'CONNECTION_RETRY': Option(True, type='bool'),
|
||||
'CONNECTION_MAX_RETRIES': Option(100, type='int'),
|
||||
'HEARTBEAT': Option(None, type='int'),
|
||||
'HEARTBEAT_CHECKRATE': Option(3.0, type='int'),
|
||||
'POOL_LIMIT': Option(10, type='int'),
|
||||
'INSIST': Option(False, type='bool',
|
||||
deprecate_by='2.4', remove_by='4.0'),
|
||||
'USE_SSL': Option(False, type='bool'),
|
||||
'TRANSPORT': Option(type='string'),
|
||||
'TRANSPORT_OPTIONS': Option({}, type='dict'),
|
||||
'HOST': Option(type='string', **_BROKER_OLD),
|
||||
'PORT': Option(type='int', **_BROKER_OLD),
|
||||
'USER': Option(type='string', **_BROKER_OLD),
|
||||
'PASSWORD': Option(type='string', **_BROKER_OLD),
|
||||
'VHOST': Option(type='string', **_BROKER_OLD),
|
||||
},
|
||||
'CASSANDRA': {
|
||||
'COLUMN_FAMILY': Option(type='string'),
|
||||
'DETAILED_MODE': Option(False, type='bool'),
|
||||
'KEYSPACE': Option(type='string'),
|
||||
'READ_CONSISTENCY': Option(type='string'),
|
||||
'SERVERS': Option(type='list'),
|
||||
'WRITE_CONSISTENCY': Option(type='string'),
|
||||
},
|
||||
'CELERY': {
|
||||
'ACCEPT_CONTENT': Option(None, type='any'),
|
||||
'ACKS_LATE': Option(False, type='bool'),
|
||||
'ALWAYS_EAGER': Option(False, type='bool'),
|
||||
'AMQP_TASK_RESULT_EXPIRES': Option(
|
||||
type='float', deprecate_by='2.5', remove_by='4.0',
|
||||
alt='CELERY_TASK_RESULT_EXPIRES'
|
||||
),
|
||||
'AMQP_TASK_RESULT_CONNECTION_MAX': Option(
|
||||
1, type='int', remove_by='2.5', alt='BROKER_POOL_LIMIT',
|
||||
),
|
||||
'ANNOTATIONS': Option(type='any'),
|
||||
'BROADCAST_QUEUE': Option('celeryctl'),
|
||||
'BROADCAST_EXCHANGE': Option('celeryctl'),
|
||||
'BROADCAST_EXCHANGE_TYPE': Option('fanout'),
|
||||
'CACHE_BACKEND': Option(),
|
||||
'CACHE_BACKEND_OPTIONS': Option({}, type='dict'),
|
||||
# chord propagate will be True from v3.1
|
||||
'CHORD_PROPAGATES': Option(False, type='bool'),
|
||||
'CREATE_MISSING_QUEUES': Option(True, type='bool'),
|
||||
'DEFAULT_RATE_LIMIT': Option(type='string'),
|
||||
'DISABLE_RATE_LIMITS': Option(False, type='bool'),
|
||||
'DEFAULT_ROUTING_KEY': Option('celery'),
|
||||
'DEFAULT_QUEUE': Option('celery'),
|
||||
'DEFAULT_EXCHANGE': Option('celery'),
|
||||
'DEFAULT_EXCHANGE_TYPE': Option('direct'),
|
||||
'DEFAULT_DELIVERY_MODE': Option(2, type='string'),
|
||||
'EAGER_PROPAGATES_EXCEPTIONS': Option(False, type='bool'),
|
||||
'ENABLE_UTC': Option(True, type='bool'),
|
||||
'EVENT_SERIALIZER': Option('json'),
|
||||
'IMPORTS': Option((), type='tuple'),
|
||||
'INCLUDE': Option((), type='tuple'),
|
||||
'IGNORE_RESULT': Option(False, type='bool'),
|
||||
'MAX_CACHED_RESULTS': Option(5000, type='int'),
|
||||
'MESSAGE_COMPRESSION': Option(type='string'),
|
||||
'MONGODB_BACKEND_SETTINGS': Option(type='dict'),
|
||||
'REDIS_HOST': Option(type='string', **_REDIS_OLD),
|
||||
'REDIS_PORT': Option(type='int', **_REDIS_OLD),
|
||||
'REDIS_DB': Option(type='int', **_REDIS_OLD),
|
||||
'REDIS_PASSWORD': Option(type='string', **_REDIS_OLD),
|
||||
'REDIS_MAX_CONNECTIONS': Option(type='int'),
|
||||
'RESULT_BACKEND': Option(type='string'),
|
||||
'RESULT_DB_SHORT_LIVED_SESSIONS': Option(False, type='bool'),
|
||||
'RESULT_DBURI': Option(),
|
||||
'RESULT_ENGINE_OPTIONS': Option(type='dict'),
|
||||
'RESULT_EXCHANGE': Option('celeryresults'),
|
||||
'RESULT_EXCHANGE_TYPE': Option('direct'),
|
||||
'RESULT_SERIALIZER': Option('pickle'),
|
||||
'RESULT_PERSISTENT': Option(False, type='bool'),
|
||||
'ROUTES': Option(type='any'),
|
||||
'SEND_EVENTS': Option(False, type='bool'),
|
||||
'SEND_TASK_ERROR_EMAILS': Option(False, type='bool'),
|
||||
'SEND_TASK_SENT_EVENT': Option(False, type='bool'),
|
||||
'STORE_ERRORS_EVEN_IF_IGNORED': Option(False, type='bool'),
|
||||
'TASK_ERROR_WHITELIST': Option(
|
||||
(), type='tuple', deprecate_by='2.5', remove_by='4.0',
|
||||
),
|
||||
'TASK_PUBLISH_RETRY': Option(True, type='bool'),
|
||||
'TASK_PUBLISH_RETRY_POLICY': Option({
|
||||
'max_retries': 3,
|
||||
'interval_start': 0,
|
||||
'interval_max': 1,
|
||||
'interval_step': 0.2}, type='dict'),
|
||||
'TASK_RESULT_EXPIRES': Option(timedelta(days=1), type='float'),
|
||||
'TASK_SERIALIZER': Option('pickle'),
|
||||
'TIMEZONE': Option(type='string'),
|
||||
'TRACK_STARTED': Option(False, type='bool'),
|
||||
'REDIRECT_STDOUTS': Option(True, type='bool'),
|
||||
'REDIRECT_STDOUTS_LEVEL': Option('WARNING'),
|
||||
'QUEUES': Option(type='dict'),
|
||||
'QUEUE_HA_POLICY': Option(None, type='string'),
|
||||
'SECURITY_KEY': Option(type='string'),
|
||||
'SECURITY_CERTIFICATE': Option(type='string'),
|
||||
'SECURITY_CERT_STORE': Option(type='string'),
|
||||
'WORKER_DIRECT': Option(False, type='bool'),
|
||||
},
|
||||
'CELERYD': {
|
||||
'AUTOSCALER': Option('celery.worker.autoscale.Autoscaler'),
|
||||
'AUTORELOADER': Option('celery.worker.autoreload.Autoreloader'),
|
||||
'BOOT_STEPS': Option((), type='tuple'),
|
||||
'CONCURRENCY': Option(0, type='int'),
|
||||
'TIMER': Option(type='string'),
|
||||
'TIMER_PRECISION': Option(1.0, type='float'),
|
||||
'FORCE_EXECV': Option(False, type='bool'),
|
||||
'HIJACK_ROOT_LOGGER': Option(True, type='bool'),
|
||||
'CONSUMER': Option(type='string'),
|
||||
'LOG_FORMAT': Option(DEFAULT_PROCESS_LOG_FMT),
|
||||
'LOG_COLOR': Option(type='bool'),
|
||||
'LOG_LEVEL': Option('WARN', deprecate_by='2.4', remove_by='4.0',
|
||||
alt='--loglevel argument'),
|
||||
'LOG_FILE': Option(deprecate_by='2.4', remove_by='4.0',
|
||||
alt='--logfile argument'),
|
||||
'MEDIATOR': Option('celery.worker.mediator.Mediator'),
|
||||
'MAX_TASKS_PER_CHILD': Option(type='int'),
|
||||
'POOL': Option(DEFAULT_POOL),
|
||||
'POOL_PUTLOCKS': Option(True, type='bool'),
|
||||
'POOL_RESTARTS': Option(False, type='bool'),
|
||||
'PREFETCH_MULTIPLIER': Option(4, type='int'),
|
||||
'STATE_DB': Option(),
|
||||
'TASK_LOG_FORMAT': Option(DEFAULT_TASK_LOG_FMT),
|
||||
'TASK_SOFT_TIME_LIMIT': Option(type='float'),
|
||||
'TASK_TIME_LIMIT': Option(type='float'),
|
||||
'WORKER_LOST_WAIT': Option(10.0, type='float')
|
||||
},
|
||||
'CELERYBEAT': {
|
||||
'SCHEDULE': Option({}, type='dict'),
|
||||
'SCHEDULER': Option('celery.beat.PersistentScheduler'),
|
||||
'SCHEDULE_FILENAME': Option('celerybeat-schedule'),
|
||||
'MAX_LOOP_INTERVAL': Option(0, type='float'),
|
||||
'LOG_LEVEL': Option('INFO', deprecate_by='2.4', remove_by='4.0',
|
||||
alt='--loglevel argument'),
|
||||
'LOG_FILE': Option(deprecate_by='2.4', remove_by='4.0',
|
||||
alt='--logfile argument'),
|
||||
},
|
||||
'CELERYMON': {
|
||||
'LOG_LEVEL': Option('INFO', deprecate_by='2.4', remove_by='4.0',
|
||||
alt='--loglevel argument'),
|
||||
'LOG_FILE': Option(deprecate_by='2.4', remove_by='4.0',
|
||||
alt='--logfile argument'),
|
||||
'LOG_FORMAT': Option(DEFAULT_LOG_FMT),
|
||||
},
|
||||
'EMAIL': {
|
||||
'HOST': Option('localhost'),
|
||||
'PORT': Option(25, type='int'),
|
||||
'HOST_USER': Option(),
|
||||
'HOST_PASSWORD': Option(),
|
||||
'TIMEOUT': Option(2, type='float'),
|
||||
'USE_SSL': Option(False, type='bool'),
|
||||
'USE_TLS': Option(False, type='bool'),
|
||||
},
|
||||
'SERVER_EMAIL': Option('celery@localhost'),
|
||||
'ADMINS': Option((), type='tuple'),
|
||||
}
|
||||
|
||||
|
||||
def flatten(d, ns=''):
|
||||
stack = deque([(ns, d)])
|
||||
while stack:
|
||||
name, space = stack.popleft()
|
||||
for key, value in space.iteritems():
|
||||
if isinstance(value, dict):
|
||||
stack.append((name + key + '_', value))
|
||||
else:
|
||||
yield name + key, value
|
||||
DEFAULTS = dict((key, value.default) for key, value in flatten(NAMESPACES))
|
||||
|
||||
|
||||
def find_deprecated_settings(source):
|
||||
from celery.utils import warn_deprecated
|
||||
for name, opt in flatten(NAMESPACES):
|
||||
if (opt.deprecate_by or opt.remove_by) and getattr(source, name, None):
|
||||
warn_deprecated(description='The %r setting' % (name, ),
|
||||
deprecation=opt.deprecate_by,
|
||||
removal=opt.remove_by,
|
||||
alternative='Use %s instead' % (opt.alt, ))
|
||||
return source
|
||||
|
||||
|
||||
@memoize(maxsize=None)
|
||||
def find(name, namespace='celery'):
|
||||
# - Try specified namespace first.
|
||||
namespace = namespace.upper()
|
||||
try:
|
||||
return namespace, name.upper(), NAMESPACES[namespace][name.upper()]
|
||||
except KeyError:
|
||||
# - Try all the other namespaces.
|
||||
for ns, keys in NAMESPACES.iteritems():
|
||||
if ns.upper() == name.upper():
|
||||
return None, ns, keys
|
||||
elif isinstance(keys, dict):
|
||||
try:
|
||||
return ns, name.upper(), keys[name.upper()]
|
||||
except KeyError:
|
||||
pass
|
||||
# - See if name is a qualname last.
|
||||
return None, name.upper(), DEFAULTS[name.upper()]
|
||||
231
awx/lib/site-packages/celery/app/log.py
Normal file
231
awx/lib/site-packages/celery/app/log.py
Normal file
@@ -0,0 +1,231 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.app.log
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
The Celery instances logging section: ``Celery.log``.
|
||||
|
||||
Sets up logging for the worker and other programs,
|
||||
redirects stdouts, colors log output, patches logging
|
||||
related compatibility fixes, and so on.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
|
||||
from kombu.log import NullHandler
|
||||
|
||||
from celery import signals
|
||||
from celery._state import get_current_task
|
||||
from celery.utils import isatty
|
||||
from celery.utils.compat import WatchedFileHandler
|
||||
from celery.utils.log import (
|
||||
get_logger, mlevel,
|
||||
ColorFormatter, ensure_process_aware_logger,
|
||||
LoggingProxy, get_multiprocessing_logger,
|
||||
reset_multiprocessing_logger,
|
||||
)
|
||||
from celery.utils.term import colored
|
||||
|
||||
is_py3k = sys.version_info[0] == 3
|
||||
|
||||
MP_LOG = os.environ.get('MP_LOG', False)
|
||||
|
||||
|
||||
class TaskFormatter(ColorFormatter):
|
||||
|
||||
def format(self, record):
|
||||
task = get_current_task()
|
||||
if task and task.request:
|
||||
record.__dict__.update(task_id=task.request.id,
|
||||
task_name=task.name)
|
||||
else:
|
||||
record.__dict__.setdefault('task_name', '???')
|
||||
record.__dict__.setdefault('task_id', '???')
|
||||
return ColorFormatter.format(self, record)
|
||||
|
||||
|
||||
class Logging(object):
|
||||
#: The logging subsystem is only configured once per process.
|
||||
#: setup_logging_subsystem sets this flag, and subsequent calls
|
||||
#: will do nothing.
|
||||
_setup = False
|
||||
|
||||
def __init__(self, app):
|
||||
self.app = app
|
||||
self.loglevel = mlevel(self.app.conf.CELERYD_LOG_LEVEL)
|
||||
self.format = self.app.conf.CELERYD_LOG_FORMAT
|
||||
self.task_format = self.app.conf.CELERYD_TASK_LOG_FORMAT
|
||||
self.colorize = self.app.conf.CELERYD_LOG_COLOR
|
||||
|
||||
def setup(self, loglevel=None, logfile=None, redirect_stdouts=False,
|
||||
redirect_level='WARNING', colorize=None):
|
||||
handled = self.setup_logging_subsystem(
|
||||
loglevel, logfile, colorize=colorize,
|
||||
)
|
||||
if not handled:
|
||||
logger = get_logger('celery.redirected')
|
||||
if redirect_stdouts:
|
||||
self.redirect_stdouts_to_logger(logger,
|
||||
loglevel=redirect_level)
|
||||
os.environ.update(
|
||||
CELERY_LOG_LEVEL=str(loglevel) if loglevel else '',
|
||||
CELERY_LOG_FILE=str(logfile) if logfile else '',
|
||||
CELERY_LOG_REDIRECT='1' if redirect_stdouts else '',
|
||||
CELERY_LOG_REDIRECT_LEVEL=str(redirect_level),
|
||||
)
|
||||
|
||||
def setup_logging_subsystem(self, loglevel=None, logfile=None,
|
||||
format=None, colorize=None, **kwargs):
|
||||
if Logging._setup:
|
||||
return
|
||||
Logging._setup = True
|
||||
loglevel = mlevel(loglevel or self.loglevel)
|
||||
format = format or self.format
|
||||
colorize = self.supports_color(colorize, logfile)
|
||||
reset_multiprocessing_logger()
|
||||
if not is_py3k:
|
||||
ensure_process_aware_logger()
|
||||
receivers = signals.setup_logging.send(
|
||||
sender=None, loglevel=loglevel, logfile=logfile,
|
||||
format=format, colorize=colorize,
|
||||
)
|
||||
|
||||
if not receivers:
|
||||
root = logging.getLogger()
|
||||
|
||||
if self.app.conf.CELERYD_HIJACK_ROOT_LOGGER:
|
||||
root.handlers = []
|
||||
|
||||
# Configure root logger
|
||||
self._configure_logger(
|
||||
root, logfile, loglevel, format, colorize, **kwargs
|
||||
)
|
||||
|
||||
# Configure the multiprocessing logger
|
||||
self._configure_logger(
|
||||
get_multiprocessing_logger(),
|
||||
logfile, loglevel if MP_LOG else logging.ERROR,
|
||||
format, colorize, **kwargs
|
||||
)
|
||||
|
||||
signals.after_setup_logger.send(
|
||||
sender=None, logger=root,
|
||||
loglevel=loglevel, logfile=logfile,
|
||||
format=format, colorize=colorize,
|
||||
)
|
||||
|
||||
# then setup the root task logger.
|
||||
self.setup_task_loggers(loglevel, logfile, colorize=colorize)
|
||||
|
||||
# This is a hack for multiprocessing's fork+exec, so that
|
||||
# logging before Process.run works.
|
||||
logfile_name = logfile if isinstance(logfile, basestring) else ''
|
||||
os.environ.update(
|
||||
_MP_FORK_LOGLEVEL_=str(loglevel),
|
||||
_MP_FORK_LOGFILE_=logfile_name,
|
||||
_MP_FORK_LOGFORMAT_=format,
|
||||
)
|
||||
return receivers
|
||||
|
||||
def _configure_logger(self, logger, logfile, loglevel,
|
||||
format, colorize, **kwargs):
|
||||
if logger is not None:
|
||||
self.setup_handlers(logger, logfile, format,
|
||||
colorize, **kwargs)
|
||||
if loglevel:
|
||||
logger.setLevel(loglevel)
|
||||
|
||||
def setup_task_loggers(self, loglevel=None, logfile=None, format=None,
|
||||
colorize=None, propagate=False, **kwargs):
|
||||
"""Setup the task logger.
|
||||
|
||||
If `logfile` is not specified, then `sys.stderr` is used.
|
||||
|
||||
Returns logger object.
|
||||
|
||||
"""
|
||||
loglevel = mlevel(loglevel or self.loglevel)
|
||||
format = format or self.task_format
|
||||
colorize = self.supports_color(colorize, logfile)
|
||||
|
||||
logger = self.setup_handlers(
|
||||
get_logger('celery.task'),
|
||||
logfile, format, colorize,
|
||||
formatter=TaskFormatter, **kwargs
|
||||
)
|
||||
logger.setLevel(loglevel)
|
||||
logger.propagate = int(propagate) # this is an int for some reason.
|
||||
# better to not question why.
|
||||
signals.after_setup_task_logger.send(
|
||||
sender=None, logger=logger,
|
||||
loglevel=loglevel, logfile=logfile,
|
||||
format=format, colorize=colorize,
|
||||
)
|
||||
return logger
|
||||
|
||||
def redirect_stdouts_to_logger(self, logger, loglevel=None,
|
||||
stdout=True, stderr=True):
|
||||
"""Redirect :class:`sys.stdout` and :class:`sys.stderr` to a
|
||||
logging instance.
|
||||
|
||||
:param logger: The :class:`logging.Logger` instance to redirect to.
|
||||
:param loglevel: The loglevel redirected messages will be logged as.
|
||||
|
||||
"""
|
||||
proxy = LoggingProxy(logger, loglevel)
|
||||
if stdout:
|
||||
sys.stdout = proxy
|
||||
if stderr:
|
||||
sys.stderr = proxy
|
||||
return proxy
|
||||
|
||||
def supports_color(self, colorize=None, logfile=None):
|
||||
colorize = self.colorize if colorize is None else colorize
|
||||
if self.app.IS_WINDOWS:
|
||||
# Windows does not support ANSI color codes.
|
||||
return False
|
||||
if colorize or colorize is None:
|
||||
# Only use color if there is no active log file
|
||||
# and stderr is an actual terminal.
|
||||
return logfile is None and isatty(sys.stderr)
|
||||
return colorize
|
||||
|
||||
def colored(self, logfile=None, enabled=None):
|
||||
return colored(enabled=self.supports_color(enabled, logfile))
|
||||
|
||||
def setup_handlers(self, logger, logfile, format, colorize,
|
||||
formatter=ColorFormatter, **kwargs):
|
||||
if self._is_configured(logger):
|
||||
return logger
|
||||
handler = self._detect_handler(logfile)
|
||||
handler.setFormatter(formatter(format, use_color=colorize))
|
||||
logger.addHandler(handler)
|
||||
return logger
|
||||
|
||||
def _detect_handler(self, logfile=None):
|
||||
"""Create log handler with either a filename, an open stream
|
||||
or :const:`None` (stderr)."""
|
||||
logfile = sys.__stderr__ if logfile is None else logfile
|
||||
if hasattr(logfile, 'write'):
|
||||
return logging.StreamHandler(logfile)
|
||||
return WatchedFileHandler(logfile)
|
||||
|
||||
def _has_handler(self, logger):
|
||||
return (logger.handlers and
|
||||
not isinstance(logger.handlers[0], NullHandler))
|
||||
|
||||
def _is_configured(self, logger):
|
||||
return self._has_handler(logger) and not getattr(
|
||||
logger, '_rudimentary_setup', False)
|
||||
|
||||
def setup_logger(self, name='celery', *args, **kwargs):
|
||||
"""Deprecated: No longer used."""
|
||||
self.setup_logging_subsystem(*args, **kwargs)
|
||||
return logging.root
|
||||
|
||||
def get_default_logger(self, name='celery', **kwargs):
|
||||
return get_logger(name)
|
||||
60
awx/lib/site-packages/celery/app/registry.py
Normal file
60
awx/lib/site-packages/celery/app/registry.py
Normal file
@@ -0,0 +1,60 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.app.registry
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Registry of available tasks.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
import inspect
|
||||
|
||||
from celery.exceptions import NotRegistered
|
||||
|
||||
|
||||
class TaskRegistry(dict):
|
||||
NotRegistered = NotRegistered
|
||||
|
||||
def __missing__(self, key):
|
||||
raise self.NotRegistered(key)
|
||||
|
||||
def register(self, task):
|
||||
"""Register a task in the task registry.
|
||||
|
||||
The task will be automatically instantiated if not already an
|
||||
instance.
|
||||
|
||||
"""
|
||||
self[task.name] = inspect.isclass(task) and task() or task
|
||||
|
||||
def unregister(self, name):
|
||||
"""Unregister task by name.
|
||||
|
||||
:param name: name of the task to unregister, or a
|
||||
:class:`celery.task.base.Task` with a valid `name` attribute.
|
||||
|
||||
:raises celery.exceptions.NotRegistered: if the task has not
|
||||
been registered.
|
||||
|
||||
"""
|
||||
try:
|
||||
self.pop(getattr(name, 'name', name))
|
||||
except KeyError:
|
||||
raise self.NotRegistered(name)
|
||||
|
||||
# -- these methods are irrelevant now and will be removed in 4.0
|
||||
def regular(self):
|
||||
return self.filter_types('regular')
|
||||
|
||||
def periodic(self):
|
||||
return self.filter_types('periodic')
|
||||
|
||||
def filter_types(self, type):
|
||||
return dict((name, task) for name, task in self.iteritems()
|
||||
if getattr(task, 'type', 'regular') == type)
|
||||
|
||||
|
||||
def _unpickle_task(name):
|
||||
from celery import current_app
|
||||
return current_app.tasks[name]
|
||||
94
awx/lib/site-packages/celery/app/routes.py
Normal file
94
awx/lib/site-packages/celery/app/routes.py
Normal file
@@ -0,0 +1,94 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.routes
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
Contains utilities for working with task routers,
|
||||
(:setting:`CELERY_ROUTES`).
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
from celery.exceptions import QueueNotFound
|
||||
from celery.utils import lpmerge
|
||||
from celery.utils.functional import firstmethod, mpromise
|
||||
from celery.utils.imports import instantiate
|
||||
|
||||
_first_route = firstmethod('route_for_task')
|
||||
|
||||
|
||||
class MapRoute(object):
|
||||
"""Creates a router out of a :class:`dict`."""
|
||||
|
||||
def __init__(self, map):
|
||||
self.map = map
|
||||
|
||||
def route_for_task(self, task, *args, **kwargs):
|
||||
route = self.map.get(task)
|
||||
if route:
|
||||
return dict(route)
|
||||
|
||||
|
||||
class Router(object):
|
||||
|
||||
def __init__(self, routes=None, queues=None,
|
||||
create_missing=False, app=None):
|
||||
self.app = app
|
||||
self.queues = {} if queues is None else queues
|
||||
self.routes = [] if routes is None else routes
|
||||
self.create_missing = create_missing
|
||||
|
||||
def route(self, options, task, args=(), kwargs={}):
|
||||
options = self.expand_destination(options) # expands 'queue'
|
||||
if self.routes:
|
||||
route = self.lookup_route(task, args, kwargs)
|
||||
if route: # expands 'queue' in route.
|
||||
return lpmerge(self.expand_destination(route), options)
|
||||
if 'queue' not in options:
|
||||
options = lpmerge(self.expand_destination(
|
||||
self.app.conf.CELERY_DEFAULT_QUEUE), options)
|
||||
return options
|
||||
|
||||
def expand_destination(self, route):
|
||||
# Route can be a queue name: convenient for direct exchanges.
|
||||
if isinstance(route, basestring):
|
||||
queue, route = route, {}
|
||||
else:
|
||||
# can use defaults from configured queue, but override specific
|
||||
# things (like the routing_key): great for topic exchanges.
|
||||
queue = route.pop('queue', None)
|
||||
|
||||
if queue:
|
||||
try:
|
||||
Q = self.queues[queue] # noqa
|
||||
except KeyError:
|
||||
if not self.create_missing:
|
||||
raise QueueNotFound(
|
||||
'Queue %r is not defined in CELERY_QUEUES' % queue)
|
||||
for key in 'exchange', 'routing_key':
|
||||
if route.get(key) is None:
|
||||
route[key] = queue
|
||||
Q = self.app.amqp.queues.add(queue, **route)
|
||||
# needs to be declared by publisher
|
||||
route['queue'] = Q
|
||||
return route
|
||||
|
||||
def lookup_route(self, task, args=None, kwargs=None):
|
||||
return _first_route(self.routes, task, args, kwargs)
|
||||
|
||||
|
||||
def prepare(routes):
|
||||
"""Expands the :setting:`CELERY_ROUTES` setting."""
|
||||
|
||||
def expand_route(route):
|
||||
if isinstance(route, dict):
|
||||
return MapRoute(route)
|
||||
if isinstance(route, basestring):
|
||||
return mpromise(instantiate, route)
|
||||
return route
|
||||
|
||||
if routes is None:
|
||||
return ()
|
||||
if not isinstance(routes, (list, tuple)):
|
||||
routes = (routes, )
|
||||
return [expand_route(route) for route in routes]
|
||||
795
awx/lib/site-packages/celery/app/task.py
Normal file
795
awx/lib/site-packages/celery/app/task.py
Normal file
@@ -0,0 +1,795 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.app.task
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
Task Implementation: Task request context, and the base task class.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import with_statement
|
||||
|
||||
from celery import current_app
|
||||
from celery import states
|
||||
from celery.__compat__ import class_property
|
||||
from celery._state import get_current_worker_task, _task_stack
|
||||
from celery.canvas import subtask
|
||||
from celery.datastructures import ExceptionInfo
|
||||
from celery.exceptions import MaxRetriesExceededError, RetryTaskError
|
||||
from celery.result import EagerResult
|
||||
from celery.utils import gen_task_name, fun_takes_kwargs, uuid, maybe_reraise
|
||||
from celery.utils.functional import mattrgetter, maybe_list
|
||||
from celery.utils.imports import instantiate
|
||||
from celery.utils.mail import ErrorMail
|
||||
|
||||
from .annotations import resolve_all as resolve_all_annotations
|
||||
from .registry import _unpickle_task
|
||||
|
||||
#: extracts attributes related to publishing a message from an object.
|
||||
extract_exec_options = mattrgetter(
|
||||
'queue', 'routing_key', 'exchange',
|
||||
'immediate', 'mandatory', 'priority', 'expires',
|
||||
'serializer', 'delivery_mode', 'compression',
|
||||
)
|
||||
|
||||
|
||||
class Context(object):
|
||||
# Default context
|
||||
logfile = None
|
||||
loglevel = None
|
||||
hostname = None
|
||||
id = None
|
||||
args = None
|
||||
kwargs = None
|
||||
retries = 0
|
||||
eta = None
|
||||
expires = None
|
||||
is_eager = False
|
||||
delivery_info = None
|
||||
taskset = None # compat alias to group
|
||||
group = None
|
||||
chord = None
|
||||
utc = None
|
||||
called_directly = True
|
||||
callbacks = None
|
||||
errbacks = None
|
||||
timeouts = None
|
||||
_children = None # see property
|
||||
_protected = 0
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.update(*args, **kwargs)
|
||||
|
||||
def update(self, *args, **kwargs):
|
||||
self.__dict__.update(*args, **kwargs)
|
||||
|
||||
def clear(self):
|
||||
self.__dict__.clear()
|
||||
|
||||
def get(self, key, default=None):
|
||||
try:
|
||||
return getattr(self, key)
|
||||
except AttributeError:
|
||||
return default
|
||||
|
||||
def __repr__(self):
|
||||
return '<Context: %r>' % (vars(self, ))
|
||||
|
||||
@property
|
||||
def children(self):
|
||||
# children must be an empy list for every thread
|
||||
if self._children is None:
|
||||
self._children = []
|
||||
return self._children
|
||||
|
||||
|
||||
class TaskType(type):
|
||||
"""Meta class for tasks.
|
||||
|
||||
Automatically registers the task in the task registry, except
|
||||
if the `abstract` attribute is set.
|
||||
|
||||
If no `name` attribute is provided, then no name is automatically
|
||||
set to the name of the module it was defined in, and the class name.
|
||||
|
||||
"""
|
||||
|
||||
def __new__(cls, name, bases, attrs):
|
||||
new = super(TaskType, cls).__new__
|
||||
task_module = attrs.get('__module__') or '__main__'
|
||||
|
||||
# - Abstract class: abstract attribute should not be inherited.
|
||||
if attrs.pop('abstract', None) or not attrs.get('autoregister', True):
|
||||
return new(cls, name, bases, attrs)
|
||||
|
||||
# The 'app' attribute is now a property, with the real app located
|
||||
# in the '_app' attribute. Previously this was a regular attribute,
|
||||
# so we should support classes defining it.
|
||||
_app1, _app2 = attrs.pop('_app', None), attrs.pop('app', None)
|
||||
app = attrs['_app'] = _app1 or _app2 or current_app
|
||||
|
||||
# - Automatically generate missing/empty name.
|
||||
task_name = attrs.get('name')
|
||||
if not task_name:
|
||||
attrs['name'] = task_name = gen_task_name(app, name, task_module)
|
||||
|
||||
# - Create and register class.
|
||||
# Because of the way import happens (recursively)
|
||||
# we may or may not be the first time the task tries to register
|
||||
# with the framework. There should only be one class for each task
|
||||
# name, so we always return the registered version.
|
||||
tasks = app._tasks
|
||||
if task_name not in tasks:
|
||||
tasks.register(new(cls, name, bases, attrs))
|
||||
instance = tasks[task_name]
|
||||
instance.bind(app)
|
||||
return instance.__class__
|
||||
|
||||
def __repr__(cls):
|
||||
if cls._app:
|
||||
return '<class %s of %s>' % (cls.__name__, cls._app, )
|
||||
if cls.__v2_compat__:
|
||||
return '<unbound %s (v2 compatible)>' % (cls.__name__, )
|
||||
return '<unbound %s>' % (cls.__name__, )
|
||||
|
||||
|
||||
class Task(object):
|
||||
"""Task base class.
|
||||
|
||||
When called tasks apply the :meth:`run` method. This method must
|
||||
be defined by all tasks (that is unless the :meth:`__call__` method
|
||||
is overridden).
|
||||
|
||||
"""
|
||||
__metaclass__ = TaskType
|
||||
__trace__ = None
|
||||
__v2_compat__ = False # set by old base in celery.task.base
|
||||
|
||||
ErrorMail = ErrorMail
|
||||
MaxRetriesExceededError = MaxRetriesExceededError
|
||||
|
||||
#: Execution strategy used, or the qualified name of one.
|
||||
Strategy = 'celery.worker.strategy:default'
|
||||
|
||||
#: This is the instance bound to if the task is a method of a class.
|
||||
__self__ = None
|
||||
|
||||
#: The application instance associated with this task class.
|
||||
_app = None
|
||||
|
||||
#: Name of the task.
|
||||
name = None
|
||||
|
||||
#: If :const:`True` the task is an abstract base class.
|
||||
abstract = True
|
||||
|
||||
#: If disabled the worker will not forward magic keyword arguments.
|
||||
#: Deprecated and scheduled for removal in v4.0.
|
||||
accept_magic_kwargs = False
|
||||
|
||||
#: Maximum number of retries before giving up. If set to :const:`None`,
|
||||
#: it will **never** stop retrying.
|
||||
max_retries = 3
|
||||
|
||||
#: Default time in seconds before a retry of the task should be
|
||||
#: executed. 3 minutes by default.
|
||||
default_retry_delay = 3 * 60
|
||||
|
||||
#: Rate limit for this task type. Examples: :const:`None` (no rate
|
||||
#: limit), `'100/s'` (hundred tasks a second), `'100/m'` (hundred tasks
|
||||
#: a minute),`'100/h'` (hundred tasks an hour)
|
||||
rate_limit = None
|
||||
|
||||
#: If enabled the worker will not store task state and return values
|
||||
#: for this task. Defaults to the :setting:`CELERY_IGNORE_RESULT`
|
||||
#: setting.
|
||||
ignore_result = None
|
||||
|
||||
#: When enabled errors will be stored even if the task is otherwise
|
||||
#: configured to ignore results.
|
||||
store_errors_even_if_ignored = None
|
||||
|
||||
#: If enabled an email will be sent to :setting:`ADMINS` whenever a task
|
||||
#: of this type fails.
|
||||
send_error_emails = None
|
||||
|
||||
#: The name of a serializer that are registered with
|
||||
#: :mod:`kombu.serialization.registry`. Default is `'pickle'`.
|
||||
serializer = None
|
||||
|
||||
#: Hard time limit.
|
||||
#: Defaults to the :setting:`CELERY_TASK_TIME_LIMIT` setting.
|
||||
time_limit = None
|
||||
|
||||
#: Soft time limit.
|
||||
#: Defaults to the :setting:`CELERY_TASK_SOFT_TIME_LIMIT` setting.
|
||||
soft_time_limit = None
|
||||
|
||||
#: The result store backend used for this task.
|
||||
backend = None
|
||||
|
||||
#: If disabled this task won't be registered automatically.
|
||||
autoregister = True
|
||||
|
||||
#: If enabled the task will report its status as 'started' when the task
|
||||
#: is executed by a worker. Disabled by default as the normal behaviour
|
||||
#: is to not report that level of granularity. Tasks are either pending,
|
||||
#: finished, or waiting to be retried.
|
||||
#:
|
||||
#: Having a 'started' status can be useful for when there are long
|
||||
#: running tasks and there is a need to report which task is currently
|
||||
#: running.
|
||||
#:
|
||||
#: The application default can be overridden using the
|
||||
#: :setting:`CELERY_TRACK_STARTED` setting.
|
||||
track_started = None
|
||||
|
||||
#: When enabled messages for this task will be acknowledged **after**
|
||||
#: the task has been executed, and not *just before* which is the
|
||||
#: default behavior.
|
||||
#:
|
||||
#: Please note that this means the task may be executed twice if the
|
||||
#: worker crashes mid execution (which may be acceptable for some
|
||||
#: applications).
|
||||
#:
|
||||
#: The application default can be overridden with the
|
||||
#: :setting:`CELERY_ACKS_LATE` setting.
|
||||
acks_late = None
|
||||
|
||||
#: Default task expiry time.
|
||||
expires = None
|
||||
|
||||
#: Some may expect a request to exist even if the task has not been
|
||||
#: called. This should probably be deprecated.
|
||||
_default_request = None
|
||||
|
||||
__bound__ = False
|
||||
|
||||
from_config = (
|
||||
('send_error_emails', 'CELERY_SEND_TASK_ERROR_EMAILS'),
|
||||
('serializer', 'CELERY_TASK_SERIALIZER'),
|
||||
('rate_limit', 'CELERY_DEFAULT_RATE_LIMIT'),
|
||||
('track_started', 'CELERY_TRACK_STARTED'),
|
||||
('acks_late', 'CELERY_ACKS_LATE'),
|
||||
('ignore_result', 'CELERY_IGNORE_RESULT'),
|
||||
('store_errors_even_if_ignored',
|
||||
'CELERY_STORE_ERRORS_EVEN_IF_IGNORED'),
|
||||
)
|
||||
|
||||
__bound__ = False
|
||||
|
||||
# - Tasks are lazily bound, so that configuration is not set
|
||||
# - until the task is actually used
|
||||
|
||||
@classmethod
|
||||
def bind(self, app):
|
||||
was_bound, self.__bound__ = self.__bound__, True
|
||||
self._app = app
|
||||
conf = app.conf
|
||||
|
||||
for attr_name, config_name in self.from_config:
|
||||
if getattr(self, attr_name, None) is None:
|
||||
setattr(self, attr_name, conf[config_name])
|
||||
if self.accept_magic_kwargs is None:
|
||||
self.accept_magic_kwargs = app.accept_magic_kwargs
|
||||
if self.backend is None:
|
||||
self.backend = app.backend
|
||||
|
||||
# decorate with annotations from config.
|
||||
if not was_bound:
|
||||
self.annotate()
|
||||
|
||||
from celery.utils.threads import LocalStack
|
||||
self.request_stack = LocalStack()
|
||||
|
||||
# PeriodicTask uses this to add itself to the PeriodicTask schedule.
|
||||
self.on_bound(app)
|
||||
|
||||
return app
|
||||
|
||||
@classmethod
|
||||
def on_bound(self, app):
|
||||
"""This method can be defined to do additional actions when the
|
||||
task class is bound to an app."""
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def _get_app(self):
|
||||
if not self.__bound__ or self._app is None:
|
||||
# The app property's __set__ method is not called
|
||||
# if Task.app is set (on the class), so must bind on use.
|
||||
self.bind(current_app)
|
||||
return self._app
|
||||
app = class_property(_get_app, bind)
|
||||
|
||||
@classmethod
|
||||
def annotate(self):
|
||||
for d in resolve_all_annotations(self.app.annotations, self):
|
||||
for key, value in d.iteritems():
|
||||
if key.startswith('@'):
|
||||
self.add_around(key[1:], value)
|
||||
else:
|
||||
setattr(self, key, value)
|
||||
|
||||
@classmethod
|
||||
def add_around(self, attr, around):
|
||||
orig = getattr(self, attr)
|
||||
if getattr(orig, '__wrapped__', None):
|
||||
orig = orig.__wrapped__
|
||||
meth = around(orig)
|
||||
meth.__wrapped__ = orig
|
||||
setattr(self, attr, meth)
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
_task_stack.push(self)
|
||||
self.push_request()
|
||||
try:
|
||||
# add self if this is a bound task
|
||||
if self.__self__ is not None:
|
||||
return self.run(self.__self__, *args, **kwargs)
|
||||
return self.run(*args, **kwargs)
|
||||
finally:
|
||||
self.pop_request()
|
||||
_task_stack.pop()
|
||||
|
||||
# - tasks are pickled into the name of the task only, and the reciever
|
||||
# - simply grabs it from the local registry.
|
||||
def __reduce__(self):
|
||||
return (_unpickle_task, (self.name, ), None)
|
||||
|
||||
def run(self, *args, **kwargs):
|
||||
"""The body of the task executed by workers."""
|
||||
raise NotImplementedError('Tasks must define the run method.')
|
||||
|
||||
def start_strategy(self, app, consumer):
|
||||
return instantiate(self.Strategy, self, app, consumer)
|
||||
|
||||
def delay(self, *args, **kwargs):
|
||||
"""Star argument version of :meth:`apply_async`.
|
||||
|
||||
Does not support the extra options enabled by :meth:`apply_async`.
|
||||
|
||||
:param \*args: positional arguments passed on to the task.
|
||||
:param \*\*kwargs: keyword arguments passed on to the task.
|
||||
|
||||
:returns :class:`celery.result.AsyncResult`:
|
||||
|
||||
"""
|
||||
return self.apply_async(args, kwargs)
|
||||
|
||||
def apply_async(self, args=None, kwargs=None,
|
||||
task_id=None, producer=None, connection=None, router=None,
|
||||
link=None, link_error=None, publisher=None,
|
||||
add_to_parent=True, **options):
|
||||
"""Apply tasks asynchronously by sending a message.
|
||||
|
||||
:keyword args: The positional arguments to pass on to the
|
||||
task (a :class:`list` or :class:`tuple`).
|
||||
|
||||
:keyword kwargs: The keyword arguments to pass on to the
|
||||
task (a :class:`dict`)
|
||||
|
||||
:keyword countdown: Number of seconds into the future that the
|
||||
task should execute. Defaults to immediate
|
||||
execution (do not confuse with the
|
||||
`immediate` flag, as they are unrelated).
|
||||
|
||||
:keyword eta: A :class:`~datetime.datetime` object describing
|
||||
the absolute time and date of when the task should
|
||||
be executed. May not be specified if `countdown`
|
||||
is also supplied. (Do not confuse this with the
|
||||
`immediate` flag, as they are unrelated).
|
||||
|
||||
:keyword expires: Either a :class:`int`, describing the number of
|
||||
seconds, or a :class:`~datetime.datetime` object
|
||||
that describes the absolute time and date of when
|
||||
the task should expire. The task will not be
|
||||
executed after the expiration time.
|
||||
|
||||
:keyword connection: Re-use existing broker connection instead
|
||||
of establishing a new one.
|
||||
|
||||
:keyword retry: If enabled sending of the task message will be retried
|
||||
in the event of connection loss or failure. Default
|
||||
is taken from the :setting:`CELERY_TASK_PUBLISH_RETRY`
|
||||
setting. Note you need to handle the
|
||||
producer/connection manually for this to work.
|
||||
|
||||
:keyword retry_policy: Override the retry policy used. See the
|
||||
:setting:`CELERY_TASK_PUBLISH_RETRY` setting.
|
||||
|
||||
:keyword routing_key: Custom routing key used to route the task to a
|
||||
worker server. If in combination with a
|
||||
``queue`` argument only used to specify custom
|
||||
routing keys to topic exchanges.
|
||||
|
||||
:keyword queue: The queue to route the task to. This must be a key
|
||||
present in :setting:`CELERY_QUEUES`, or
|
||||
:setting:`CELERY_CREATE_MISSING_QUEUES` must be
|
||||
enabled. See :ref:`guide-routing` for more
|
||||
information.
|
||||
|
||||
:keyword exchange: Named custom exchange to send the task to.
|
||||
Usually not used in combination with the ``queue``
|
||||
argument.
|
||||
|
||||
:keyword priority: The task priority, a number between 0 and 9.
|
||||
Defaults to the :attr:`priority` attribute.
|
||||
|
||||
:keyword serializer: A string identifying the default
|
||||
serialization method to use. Can be `pickle`,
|
||||
`json`, `yaml`, `msgpack` or any custom
|
||||
serialization method that has been registered
|
||||
with :mod:`kombu.serialization.registry`.
|
||||
Defaults to the :attr:`serializer` attribute.
|
||||
|
||||
:keyword compression: A string identifying the compression method
|
||||
to use. Can be one of ``zlib``, ``bzip2``,
|
||||
or any custom compression methods registered with
|
||||
:func:`kombu.compression.register`. Defaults to
|
||||
the :setting:`CELERY_MESSAGE_COMPRESSION`
|
||||
setting.
|
||||
:keyword link: A single, or a list of subtasks to apply if the
|
||||
task exits successfully.
|
||||
:keyword link_error: A single, or a list of subtasks to apply
|
||||
if an error occurs while executing the task.
|
||||
|
||||
:keyword producer: :class:~@amqp.TaskProducer` instance to use.
|
||||
:keyword add_to_parent: If set to True (default) and the task
|
||||
is applied while executing another task, then the result
|
||||
will be appended to the parent tasks ``request.children``
|
||||
attribute.
|
||||
:keyword publisher: Deprecated alias to ``producer``.
|
||||
|
||||
Also supports all keyword arguments supported by
|
||||
:meth:`kombu.messaging.Producer.publish`.
|
||||
|
||||
.. note::
|
||||
If the :setting:`CELERY_ALWAYS_EAGER` setting is set, it will
|
||||
be replaced by a local :func:`apply` call instead.
|
||||
|
||||
"""
|
||||
producer = producer or publisher
|
||||
app = self._get_app()
|
||||
router = router or self.app.amqp.router
|
||||
conf = app.conf
|
||||
|
||||
# add 'self' if this is a bound method.
|
||||
if self.__self__ is not None:
|
||||
args = (self.__self__, ) + tuple(args)
|
||||
|
||||
if conf.CELERY_ALWAYS_EAGER:
|
||||
return self.apply(args, kwargs, task_id=task_id, **options)
|
||||
options = dict(extract_exec_options(self), **options)
|
||||
options = router.route(options, self.name, args, kwargs)
|
||||
|
||||
if connection:
|
||||
producer = app.amqp.TaskProducer(connection)
|
||||
with app.producer_or_acquire(producer) as P:
|
||||
task_id = P.publish_task(self.name, args, kwargs,
|
||||
task_id=task_id,
|
||||
callbacks=maybe_list(link),
|
||||
errbacks=maybe_list(link_error),
|
||||
**options)
|
||||
result = self.AsyncResult(task_id)
|
||||
if add_to_parent:
|
||||
parent = get_current_worker_task()
|
||||
if parent:
|
||||
parent.request.children.append(result)
|
||||
return result
|
||||
|
||||
def subtask_from_request(self, request=None, args=None, kwargs=None,
|
||||
**extra_options):
|
||||
|
||||
request = self.request if request is None else request
|
||||
args = request.args if args is None else args
|
||||
kwargs = request.kwargs if kwargs is None else kwargs
|
||||
delivery_info = request.delivery_info or {}
|
||||
options = {
|
||||
'task_id': request.id,
|
||||
'link': request.callbacks,
|
||||
'link_error': request.errbacks,
|
||||
'exchange': delivery_info.get('exchange'),
|
||||
'routing_key': delivery_info.get('routing_key')
|
||||
}
|
||||
return self.subtask(args, kwargs, options, type=self, **extra_options)
|
||||
|
||||
def retry(self, args=None, kwargs=None, exc=None, throw=True,
|
||||
eta=None, countdown=None, max_retries=None, **options):
|
||||
"""Retry the task.
|
||||
|
||||
:param args: Positional arguments to retry with.
|
||||
:param kwargs: Keyword arguments to retry with.
|
||||
:keyword exc: Custom exception to report when the max restart
|
||||
limit has been exceeded (default:
|
||||
:exc:`~celery.exceptions.MaxRetriesExceededError`).
|
||||
|
||||
If this argument is set and retry is called while
|
||||
an exception was raised (``sys.exc_info()`` is set)
|
||||
it will attempt to reraise the current exception.
|
||||
|
||||
If no exception was raised it will raise the ``exc``
|
||||
argument provided.
|
||||
:keyword countdown: Time in seconds to delay the retry for.
|
||||
:keyword eta: Explicit time and date to run the retry at
|
||||
(must be a :class:`~datetime.datetime` instance).
|
||||
:keyword max_retries: If set, overrides the default retry limit.
|
||||
:keyword \*\*options: Any extra options to pass on to
|
||||
meth:`apply_async`.
|
||||
:keyword throw: If this is :const:`False`, do not raise the
|
||||
:exc:`~celery.exceptions.RetryTaskError` exception,
|
||||
that tells the worker to mark the task as being
|
||||
retried. Note that this means the task will be
|
||||
marked as failed if the task raises an exception,
|
||||
or successful if it returns.
|
||||
|
||||
:raises celery.exceptions.RetryTaskError: To tell the worker that
|
||||
the task has been re-sent for retry. This always happens,
|
||||
unless the `throw` keyword argument has been explicitly set
|
||||
to :const:`False`, and is considered normal operation.
|
||||
|
||||
**Example**
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
>>> @task()
|
||||
>>> def tweet(auth, message):
|
||||
... twitter = Twitter(oauth=auth)
|
||||
... try:
|
||||
... twitter.post_status_update(message)
|
||||
... except twitter.FailWhale, exc:
|
||||
... # Retry in 5 minutes.
|
||||
... raise tweet.retry(countdown=60 * 5, exc=exc)
|
||||
|
||||
Although the task will never return above as `retry` raises an
|
||||
exception to notify the worker, we use `return` in front of the retry
|
||||
to convey that the rest of the block will not be executed.
|
||||
|
||||
"""
|
||||
request = self.request
|
||||
retries = request.retries + 1
|
||||
max_retries = self.max_retries if max_retries is None else max_retries
|
||||
|
||||
# Not in worker or emulated by (apply/always_eager),
|
||||
# so just raise the original exception.
|
||||
if request.called_directly:
|
||||
maybe_reraise() # raise orig stack if PyErr_Occurred
|
||||
raise exc or RetryTaskError('Task can be retried', None)
|
||||
|
||||
if not eta and countdown is None:
|
||||
countdown = self.default_retry_delay
|
||||
|
||||
S = self.subtask_from_request(
|
||||
request, args, kwargs,
|
||||
countdown=countdown, eta=eta, retries=retries,
|
||||
**options
|
||||
)
|
||||
|
||||
if max_retries is not None and retries > max_retries:
|
||||
if exc:
|
||||
maybe_reraise()
|
||||
raise self.MaxRetriesExceededError(
|
||||
"""Can't retry %s[%s] args:%s kwargs:%s""" % (
|
||||
self.name, request.id, S.args, S.kwargs))
|
||||
|
||||
# If task was executed eagerly using apply(),
|
||||
# then the retry must also be executed eagerly.
|
||||
S.apply().get() if request.is_eager else S.apply_async()
|
||||
ret = RetryTaskError(exc=exc, when=eta or countdown)
|
||||
if throw:
|
||||
raise ret
|
||||
return ret
|
||||
|
||||
def apply(self, args=None, kwargs=None, **options):
|
||||
"""Execute this task locally, by blocking until the task returns.
|
||||
|
||||
:param args: positional arguments passed on to the task.
|
||||
:param kwargs: keyword arguments passed on to the task.
|
||||
:keyword throw: Re-raise task exceptions. Defaults to
|
||||
the :setting:`CELERY_EAGER_PROPAGATES_EXCEPTIONS`
|
||||
setting.
|
||||
|
||||
:rtype :class:`celery.result.EagerResult`:
|
||||
|
||||
"""
|
||||
# trace imports Task, so need to import inline.
|
||||
from celery.task.trace import eager_trace_task
|
||||
|
||||
app = self._get_app()
|
||||
args = args or ()
|
||||
# add 'self' if this is a bound method.
|
||||
if self.__self__ is not None:
|
||||
args = (self.__self__, ) + tuple(args)
|
||||
kwargs = kwargs or {}
|
||||
task_id = options.get('task_id') or uuid()
|
||||
retries = options.get('retries', 0)
|
||||
throw = app.either('CELERY_EAGER_PROPAGATES_EXCEPTIONS',
|
||||
options.pop('throw', None))
|
||||
|
||||
# Make sure we get the task instance, not class.
|
||||
task = app._tasks[self.name]
|
||||
|
||||
request = {'id': task_id,
|
||||
'retries': retries,
|
||||
'is_eager': True,
|
||||
'logfile': options.get('logfile'),
|
||||
'loglevel': options.get('loglevel', 0),
|
||||
'delivery_info': {'is_eager': True}}
|
||||
if self.accept_magic_kwargs:
|
||||
default_kwargs = {'task_name': task.name,
|
||||
'task_id': task_id,
|
||||
'task_retries': retries,
|
||||
'task_is_eager': True,
|
||||
'logfile': options.get('logfile'),
|
||||
'loglevel': options.get('loglevel', 0),
|
||||
'delivery_info': {'is_eager': True}}
|
||||
supported_keys = fun_takes_kwargs(task.run, default_kwargs)
|
||||
extend_with = dict((key, val)
|
||||
for key, val in default_kwargs.items()
|
||||
if key in supported_keys)
|
||||
kwargs.update(extend_with)
|
||||
|
||||
tb = None
|
||||
retval, info = eager_trace_task(task, task_id, args, kwargs,
|
||||
request=request, propagate=throw)
|
||||
if isinstance(retval, ExceptionInfo):
|
||||
retval, tb = retval.exception, retval.traceback
|
||||
state = states.SUCCESS if info is None else info.state
|
||||
return EagerResult(task_id, retval, state, traceback=tb)
|
||||
|
||||
def AsyncResult(self, task_id, **kwargs):
|
||||
"""Get AsyncResult instance for this kind of task.
|
||||
|
||||
:param task_id: Task id to get result for.
|
||||
|
||||
"""
|
||||
return self._get_app().AsyncResult(task_id, backend=self.backend,
|
||||
task_name=self.name, **kwargs)
|
||||
|
||||
def subtask(self, args=None, *starargs, **starkwargs):
|
||||
"""Returns :class:`~celery.subtask` object for
|
||||
this task, wrapping arguments and execution options
|
||||
for a single task invocation."""
|
||||
return subtask(self, args, *starargs, **starkwargs)
|
||||
|
||||
def s(self, *args, **kwargs):
|
||||
"""``.s(*a, **k) -> .subtask(a, k)``"""
|
||||
return self.subtask(args, kwargs)
|
||||
|
||||
def si(self, *args, **kwargs):
|
||||
"""``.si(*a, **k) -> .subtask(a, k, immutable=True)``"""
|
||||
return self.subtask(args, kwargs, immutable=True)
|
||||
|
||||
def chunks(self, it, n):
|
||||
"""Creates a :class:`~celery.canvas.chunks` task for this task."""
|
||||
from celery import chunks
|
||||
return chunks(self.s(), it, n)
|
||||
|
||||
def map(self, it):
|
||||
"""Creates a :class:`~celery.canvas.xmap` task from ``it``."""
|
||||
from celery import xmap
|
||||
return xmap(self.s(), it)
|
||||
|
||||
def starmap(self, it):
|
||||
"""Creates a :class:`~celery.canvas.xstarmap` task from ``it``."""
|
||||
from celery import xstarmap
|
||||
return xstarmap(self.s(), it)
|
||||
|
||||
def update_state(self, task_id=None, state=None, meta=None):
|
||||
"""Update task state.
|
||||
|
||||
:keyword task_id: Id of the task to update, defaults to the
|
||||
id of the current task
|
||||
:keyword state: New state (:class:`str`).
|
||||
:keyword meta: State metadata (:class:`dict`).
|
||||
|
||||
|
||||
|
||||
"""
|
||||
if task_id is None:
|
||||
task_id = self.request.id
|
||||
self.backend.store_result(task_id, meta, state)
|
||||
|
||||
def on_success(self, retval, task_id, args, kwargs):
|
||||
"""Success handler.
|
||||
|
||||
Run by the worker if the task executes successfully.
|
||||
|
||||
:param retval: The return value of the task.
|
||||
:param task_id: Unique id of the executed task.
|
||||
:param args: Original arguments for the executed task.
|
||||
:param kwargs: Original keyword arguments for the executed task.
|
||||
|
||||
The return value of this handler is ignored.
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
def on_retry(self, exc, task_id, args, kwargs, einfo):
|
||||
"""Retry handler.
|
||||
|
||||
This is run by the worker when the task is to be retried.
|
||||
|
||||
:param exc: The exception sent to :meth:`retry`.
|
||||
:param task_id: Unique id of the retried task.
|
||||
:param args: Original arguments for the retried task.
|
||||
:param kwargs: Original keyword arguments for the retried task.
|
||||
|
||||
:keyword einfo: :class:`~celery.datastructures.ExceptionInfo`
|
||||
instance, containing the traceback.
|
||||
|
||||
The return value of this handler is ignored.
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
def on_failure(self, exc, task_id, args, kwargs, einfo):
|
||||
"""Error handler.
|
||||
|
||||
This is run by the worker when the task fails.
|
||||
|
||||
:param exc: The exception raised by the task.
|
||||
:param task_id: Unique id of the failed task.
|
||||
:param args: Original arguments for the task that failed.
|
||||
:param kwargs: Original keyword arguments for the task
|
||||
that failed.
|
||||
|
||||
:keyword einfo: :class:`~celery.datastructures.ExceptionInfo`
|
||||
instance, containing the traceback.
|
||||
|
||||
The return value of this handler is ignored.
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
def after_return(self, status, retval, task_id, args, kwargs, einfo):
|
||||
"""Handler called after the task returns.
|
||||
|
||||
:param status: Current task state.
|
||||
:param retval: Task return value/exception.
|
||||
:param task_id: Unique id of the task.
|
||||
:param args: Original arguments for the task that failed.
|
||||
:param kwargs: Original keyword arguments for the task
|
||||
that failed.
|
||||
|
||||
:keyword einfo: :class:`~celery.datastructures.ExceptionInfo`
|
||||
instance, containing the traceback (if any).
|
||||
|
||||
The return value of this handler is ignored.
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
def send_error_email(self, context, exc, **kwargs):
|
||||
if self.send_error_emails and \
|
||||
not getattr(self, 'disable_error_emails', None):
|
||||
self.ErrorMail(self, **kwargs).send(context, exc)
|
||||
|
||||
def push_request(self, *args, **kwargs):
|
||||
self.request_stack.push(Context(*args, **kwargs))
|
||||
|
||||
def pop_request(self):
|
||||
self.request_stack.pop()
|
||||
|
||||
def __repr__(self):
|
||||
"""`repr(task)`"""
|
||||
if self.__self__:
|
||||
return '<bound task %s of %r>' % (self.name, self.__self__)
|
||||
return '<@task: %s>' % (self.name, )
|
||||
|
||||
def _get_request(self):
|
||||
"""Get current request object."""
|
||||
req = self.request_stack.top
|
||||
if req is None:
|
||||
# task was not called, but some may still expect a request
|
||||
# to be there, perhaps that should be deprecated.
|
||||
if self._default_request is None:
|
||||
self._default_request = Context()
|
||||
return self._default_request
|
||||
return req
|
||||
request = property(_get_request)
|
||||
|
||||
@property
|
||||
def __name__(self):
|
||||
return self.__class__.__name__
|
||||
BaseTask = Task # compat alias
|
||||
178
awx/lib/site-packages/celery/app/utils.py
Normal file
178
awx/lib/site-packages/celery/app/utils.py
Normal file
@@ -0,0 +1,178 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.app.utils
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
App utilities: Compat settings, bugreport tool, pickling apps.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
import os
|
||||
import platform as _platform
|
||||
import re
|
||||
|
||||
from celery import platforms
|
||||
from celery.datastructures import ConfigurationView
|
||||
from celery.utils.text import pretty
|
||||
from celery.utils.imports import qualname
|
||||
|
||||
from .defaults import find
|
||||
|
||||
#: Format used to generate bugreport information.
|
||||
BUGREPORT_INFO = """
|
||||
software -> celery:%(celery_v)s kombu:%(kombu_v)s py:%(py_v)s
|
||||
billiard:%(billiard_v)s %(driver_v)s
|
||||
platform -> system:%(system)s arch:%(arch)s imp:%(py_i)s
|
||||
loader -> %(loader)s
|
||||
settings -> transport:%(transport)s results:%(results)s
|
||||
|
||||
%(human_settings)s
|
||||
"""
|
||||
|
||||
HIDDEN_SETTINGS = re.compile(
|
||||
'API|TOKEN|KEY|SECRET|PASS|PROFANITIES_LIST|SIGNATURE|DATABASE',
|
||||
re.IGNORECASE,
|
||||
)
|
||||
|
||||
|
||||
class Settings(ConfigurationView):
|
||||
"""Celery settings object."""
|
||||
|
||||
@property
|
||||
def CELERY_RESULT_BACKEND(self):
|
||||
return self.first('CELERY_RESULT_BACKEND', 'CELERY_BACKEND')
|
||||
|
||||
@property
|
||||
def BROKER_TRANSPORT(self):
|
||||
return self.first('BROKER_TRANSPORT',
|
||||
'BROKER_BACKEND', 'CARROT_BACKEND')
|
||||
|
||||
@property
|
||||
def BROKER_BACKEND(self):
|
||||
"""Deprecated compat alias to :attr:`BROKER_TRANSPORT`."""
|
||||
return self.BROKER_TRANSPORT
|
||||
|
||||
@property
|
||||
def BROKER_HOST(self):
|
||||
return (os.environ.get('CELERY_BROKER_URL') or
|
||||
self.first('BROKER_URL', 'BROKER_HOST'))
|
||||
|
||||
@property
|
||||
def CELERY_TIMEZONE(self):
|
||||
# this way we also support django's time zone.
|
||||
return self.first('CELERY_TIMEZONE', 'TIME_ZONE')
|
||||
|
||||
def without_defaults(self):
|
||||
"""Returns the current configuration, but without defaults."""
|
||||
# the last stash is the default settings, so just skip that
|
||||
return Settings({}, self._order[:-1])
|
||||
|
||||
def find_option(self, name, namespace='celery'):
|
||||
"""Search for option by name.
|
||||
|
||||
Will return ``(namespace, option_name, Option)`` tuple, e.g.::
|
||||
|
||||
>>> celery.conf.find_option('disable_rate_limits')
|
||||
('CELERY', 'DISABLE_RATE_LIMITS',
|
||||
<Option: type->bool default->False>))
|
||||
|
||||
:param name: Name of option, cannot be partial.
|
||||
:keyword namespace: Preferred namespace (``CELERY`` by default).
|
||||
|
||||
"""
|
||||
return find(name, namespace)
|
||||
|
||||
def find_value_for_key(self, name, namespace='celery'):
|
||||
"""Shortcut to ``get_by_parts(*find_option(name)[:-1])``"""
|
||||
return self.get_by_parts(*self.find_option(name, namespace)[:-1])
|
||||
|
||||
def get_by_parts(self, *parts):
|
||||
"""Returns the current value for setting specified as a path.
|
||||
|
||||
Example::
|
||||
|
||||
>>> celery.conf.get_by_parts('CELERY', 'DISABLE_RATE_LIMITS')
|
||||
False
|
||||
|
||||
"""
|
||||
return self['_'.join(part for part in parts if part)]
|
||||
|
||||
def humanize(self):
|
||||
"""Returns a human readable string showing changes to the
|
||||
configuration."""
|
||||
return '\n'.join(
|
||||
'%s: %s' % (key, pretty(value, width=50))
|
||||
for key, value in filter_hidden_settings(dict(
|
||||
(k, v) for k, v in self.without_defaults().iteritems()
|
||||
if k.isupper() and not k.startswith('_'))).iteritems())
|
||||
|
||||
|
||||
class AppPickler(object):
|
||||
"""Default application pickler/unpickler."""
|
||||
|
||||
def __call__(self, cls, *args):
|
||||
kwargs = self.build_kwargs(*args)
|
||||
app = self.construct(cls, **kwargs)
|
||||
self.prepare(app, **kwargs)
|
||||
return app
|
||||
|
||||
def prepare(self, app, **kwargs):
|
||||
app.conf.update(kwargs['changes'])
|
||||
|
||||
def build_kwargs(self, *args):
|
||||
return self.build_standard_kwargs(*args)
|
||||
|
||||
def build_standard_kwargs(self, main, changes, loader, backend, amqp,
|
||||
events, log, control, accept_magic_kwargs,
|
||||
config_source=None):
|
||||
return dict(main=main, loader=loader, backend=backend, amqp=amqp,
|
||||
changes=changes, events=events, log=log, control=control,
|
||||
set_as_current=False,
|
||||
accept_magic_kwargs=accept_magic_kwargs,
|
||||
config_source=config_source)
|
||||
|
||||
def construct(self, cls, **kwargs):
|
||||
return cls(**kwargs)
|
||||
|
||||
|
||||
def _unpickle_app(cls, pickler, *args):
|
||||
return pickler()(cls, *args)
|
||||
|
||||
|
||||
def filter_hidden_settings(conf):
|
||||
|
||||
def maybe_censor(key, value):
|
||||
return '********' if HIDDEN_SETTINGS.search(key) else value
|
||||
|
||||
return dict((k, maybe_censor(k, v)) for k, v in conf.iteritems())
|
||||
|
||||
|
||||
def bugreport(app):
|
||||
"""Returns a string containing information useful in bug reports."""
|
||||
import billiard
|
||||
import celery
|
||||
import kombu
|
||||
|
||||
try:
|
||||
conn = app.connection()
|
||||
driver_v = '%s:%s' % (conn.transport.driver_name,
|
||||
conn.transport.driver_version())
|
||||
transport = conn.transport_cls
|
||||
except Exception:
|
||||
transport = driver_v = ''
|
||||
|
||||
return BUGREPORT_INFO % {
|
||||
'system': _platform.system(),
|
||||
'arch': ', '.join(p for p in _platform.architecture() if p),
|
||||
'py_i': platforms.pyimplementation(),
|
||||
'celery_v': celery.VERSION_BANNER,
|
||||
'kombu_v': kombu.__version__,
|
||||
'billiard_v': billiard.__version__,
|
||||
'py_v': _platform.python_version(),
|
||||
'driver_v': driver_v,
|
||||
'transport': transport,
|
||||
'results': app.conf.CELERY_RESULT_BACKEND or 'disabled',
|
||||
'human_settings': app.conf.humanize(),
|
||||
'loader': qualname(app.loader.__class__),
|
||||
}
|
||||
0
awx/lib/site-packages/celery/apps/__init__.py
Normal file
0
awx/lib/site-packages/celery/apps/__init__.py
Normal file
140
awx/lib/site-packages/celery/apps/beat.py
Normal file
140
awx/lib/site-packages/celery/apps/beat.py
Normal file
@@ -0,0 +1,140 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.apps.beat
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
This module is the 'program-version' of :mod:`celery.beat`.
|
||||
|
||||
It does everything necessary to run that module
|
||||
as an actual application, like installing signal handlers
|
||||
and so on.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
import socket
|
||||
import sys
|
||||
|
||||
from celery import VERSION_BANNER, platforms, beat
|
||||
from celery.app import app_or_default
|
||||
from celery.app.abstract import configurated, from_config
|
||||
from celery.utils.imports import qualname
|
||||
from celery.utils.log import LOG_LEVELS, get_logger
|
||||
from celery.utils.timeutils import humanize_seconds
|
||||
|
||||
STARTUP_INFO_FMT = """
|
||||
Configuration ->
|
||||
. broker -> %(conninfo)s
|
||||
. loader -> %(loader)s
|
||||
. scheduler -> %(scheduler)s
|
||||
%(scheduler_info)s
|
||||
. logfile -> %(logfile)s@%(loglevel)s
|
||||
. maxinterval -> %(hmax_interval)s (%(max_interval)ss)
|
||||
""".strip()
|
||||
|
||||
logger = get_logger('celery.beat')
|
||||
|
||||
|
||||
class Beat(configurated):
|
||||
Service = beat.Service
|
||||
|
||||
app = None
|
||||
loglevel = from_config('log_level')
|
||||
logfile = from_config('log_file')
|
||||
schedule = from_config('schedule_filename')
|
||||
scheduler_cls = from_config('scheduler')
|
||||
redirect_stdouts = from_config()
|
||||
redirect_stdouts_level = from_config()
|
||||
|
||||
def __init__(self, max_interval=None, app=None,
|
||||
socket_timeout=30, pidfile=None, no_color=None, **kwargs):
|
||||
"""Starts the celerybeat task scheduler."""
|
||||
self.app = app = app_or_default(app or self.app)
|
||||
self.setup_defaults(kwargs, namespace='celerybeat')
|
||||
|
||||
self.max_interval = max_interval
|
||||
self.socket_timeout = socket_timeout
|
||||
self.no_color = no_color
|
||||
self.colored = app.log.colored(
|
||||
self.logfile,
|
||||
enabled=not no_color if no_color is not None else no_color,
|
||||
)
|
||||
self.pidfile = pidfile
|
||||
|
||||
if not isinstance(self.loglevel, int):
|
||||
self.loglevel = LOG_LEVELS[self.loglevel.upper()]
|
||||
|
||||
def run(self):
|
||||
print(str(self.colored.cyan(
|
||||
'celerybeat v%s is starting.' % VERSION_BANNER)))
|
||||
self.init_loader()
|
||||
self.set_process_title()
|
||||
self.start_scheduler()
|
||||
|
||||
def setup_logging(self, colorize=None):
|
||||
if colorize is None and self.no_color is not None:
|
||||
colorize = not self.no_color
|
||||
self.app.log.setup(self.loglevel, self.logfile,
|
||||
self.redirect_stdouts, self.redirect_stdouts_level,
|
||||
colorize=colorize)
|
||||
|
||||
def start_scheduler(self):
|
||||
c = self.colored
|
||||
if self.pidfile:
|
||||
platforms.create_pidlock(self.pidfile)
|
||||
beat = self.Service(app=self.app,
|
||||
max_interval=self.max_interval,
|
||||
scheduler_cls=self.scheduler_cls,
|
||||
schedule_filename=self.schedule)
|
||||
|
||||
print(str(c.blue('__ ', c.magenta('-'),
|
||||
c.blue(' ... __ '), c.magenta('-'),
|
||||
c.blue(' _\n'),
|
||||
c.reset(self.startup_info(beat)))))
|
||||
self.setup_logging()
|
||||
if self.socket_timeout:
|
||||
logger.debug('Setting default socket timeout to %r',
|
||||
self.socket_timeout)
|
||||
socket.setdefaulttimeout(self.socket_timeout)
|
||||
try:
|
||||
self.install_sync_handler(beat)
|
||||
beat.start()
|
||||
except Exception, exc:
|
||||
logger.critical('celerybeat raised exception %s: %r',
|
||||
exc.__class__, exc,
|
||||
exc_info=True)
|
||||
|
||||
def init_loader(self):
|
||||
# Run the worker init handler.
|
||||
# (Usually imports task modules and such.)
|
||||
self.app.loader.init_worker()
|
||||
self.app.finalize()
|
||||
|
||||
def startup_info(self, beat):
|
||||
scheduler = beat.get_scheduler(lazy=True)
|
||||
return STARTUP_INFO_FMT % {
|
||||
'conninfo': self.app.connection().as_uri(),
|
||||
'logfile': self.logfile or '[stderr]',
|
||||
'loglevel': LOG_LEVELS[self.loglevel],
|
||||
'loader': qualname(self.app.loader),
|
||||
'scheduler': qualname(scheduler),
|
||||
'scheduler_info': scheduler.info,
|
||||
'hmax_interval': humanize_seconds(beat.max_interval),
|
||||
'max_interval': beat.max_interval,
|
||||
}
|
||||
|
||||
def set_process_title(self):
|
||||
arg_start = 'manage' in sys.argv[0] and 2 or 1
|
||||
platforms.set_process_title(
|
||||
'celerybeat', info=' '.join(sys.argv[arg_start:]),
|
||||
)
|
||||
|
||||
def install_sync_handler(self, beat):
|
||||
"""Install a `SIGTERM` + `SIGINT` handler that saves
|
||||
the celerybeat schedule."""
|
||||
|
||||
def _sync(signum, frame):
|
||||
beat.sync()
|
||||
raise SystemExit()
|
||||
|
||||
platforms.signals.update(SIGTERM=_sync, SIGINT=_sync)
|
||||
419
awx/lib/site-packages/celery/apps/worker.py
Normal file
419
awx/lib/site-packages/celery/apps/worker.py
Normal file
@@ -0,0 +1,419 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.apps.worker
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module is the 'program-version' of :mod:`celery.worker`.
|
||||
|
||||
It does everything necessary to run that module
|
||||
as an actual application, like installing signal handlers,
|
||||
platform tweaks, and so on.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
import logging
|
||||
import os
|
||||
import platform as _platform
|
||||
import socket
|
||||
import sys
|
||||
import warnings
|
||||
|
||||
from functools import partial
|
||||
|
||||
from billiard import cpu_count, current_process
|
||||
from kombu.utils.encoding import safe_str
|
||||
|
||||
from celery import VERSION_BANNER, platforms, signals
|
||||
from celery.app import app_or_default
|
||||
from celery.app.abstract import configurated, from_config
|
||||
from celery.exceptions import ImproperlyConfigured, SystemTerminate
|
||||
from celery.loaders.app import AppLoader
|
||||
from celery.task import trace
|
||||
from celery.utils import cry, isatty, worker_direct
|
||||
from celery.utils.imports import qualname
|
||||
from celery.utils.log import get_logger, mlevel, set_in_sighandler
|
||||
from celery.utils.text import pluralize
|
||||
from celery.worker import WorkController
|
||||
|
||||
try:
|
||||
from greenlet import GreenletExit
|
||||
IGNORE_ERRORS = (GreenletExit, )
|
||||
except ImportError: # pragma: no cover
|
||||
IGNORE_ERRORS = ()
|
||||
|
||||
logger = get_logger(__name__)
|
||||
is_jython = sys.platform.startswith('java')
|
||||
is_pypy = hasattr(sys, 'pypy_version_info')
|
||||
|
||||
|
||||
def active_thread_count():
|
||||
from threading import enumerate
|
||||
# must use .getName on Python 2.5
|
||||
return sum(1 for t in enumerate()
|
||||
if not t.getName().startswith('Dummy-'))
|
||||
|
||||
|
||||
def safe_say(msg):
|
||||
sys.__stderr__.write('\n%s\n' % msg)
|
||||
|
||||
ARTLINES = [
|
||||
' --------------',
|
||||
'---- **** -----',
|
||||
'--- * *** * --',
|
||||
'-- * - **** ---',
|
||||
'- ** ----------',
|
||||
'- ** ----------',
|
||||
'- ** ----------',
|
||||
'- ** ----------',
|
||||
'- *** --- * ---',
|
||||
'-- ******* ----',
|
||||
'--- ***** -----',
|
||||
' --------------',
|
||||
]
|
||||
|
||||
BANNER = """\
|
||||
celery@%(hostname)s v%(version)s
|
||||
|
||||
%(platform)s
|
||||
|
||||
[config]
|
||||
.> broker: %(conninfo)s
|
||||
.> app: %(app)s
|
||||
.> concurrency: %(concurrency)s
|
||||
.> events: %(events)s
|
||||
|
||||
[queues]
|
||||
%(queues)s
|
||||
"""
|
||||
|
||||
EXTRA_INFO_FMT = """
|
||||
[Tasks]
|
||||
%(tasks)s
|
||||
"""
|
||||
|
||||
UNKNOWN_QUEUE = """\
|
||||
Trying to select queue subset of %r, but queue %s is not
|
||||
defined in the CELERY_QUEUES setting.
|
||||
|
||||
If you want to automatically declare unknown queues you can
|
||||
enable the CELERY_CREATE_MISSING_QUEUES setting.
|
||||
"""
|
||||
|
||||
|
||||
class Worker(configurated):
|
||||
WorkController = WorkController
|
||||
|
||||
app = None
|
||||
inherit_confopts = (WorkController, )
|
||||
loglevel = from_config('log_level')
|
||||
redirect_stdouts = from_config()
|
||||
redirect_stdouts_level = from_config()
|
||||
|
||||
def __init__(self, hostname=None, purge=False, beat=False,
|
||||
queues=None, include=None, app=None, pidfile=None,
|
||||
autoscale=None, autoreload=False, no_execv=False,
|
||||
no_color=None, **kwargs):
|
||||
self.app = app = app_or_default(app or self.app)
|
||||
self.hostname = hostname or socket.gethostname()
|
||||
|
||||
# this signal can be used to set up configuration for
|
||||
# workers by name.
|
||||
signals.celeryd_init.send(sender=self.hostname, instance=self,
|
||||
conf=self.app.conf)
|
||||
|
||||
self.setup_defaults(kwargs, namespace='celeryd')
|
||||
if not self.concurrency:
|
||||
try:
|
||||
self.concurrency = cpu_count()
|
||||
except NotImplementedError:
|
||||
self.concurrency = 2
|
||||
self.purge = purge
|
||||
self.beat = beat
|
||||
self.use_queues = [] if queues is None else queues
|
||||
self.queues = None
|
||||
self.include = include
|
||||
self.pidfile = pidfile
|
||||
self.autoscale = None
|
||||
self.autoreload = autoreload
|
||||
self.no_color = no_color
|
||||
self.no_execv = no_execv
|
||||
if autoscale:
|
||||
max_c, _, min_c = autoscale.partition(',')
|
||||
self.autoscale = [int(max_c), min_c and int(min_c) or 0]
|
||||
self._isatty = isatty(sys.stdout)
|
||||
|
||||
self.colored = app.log.colored(
|
||||
self.logfile,
|
||||
enabled=not no_color if no_color is not None else no_color
|
||||
)
|
||||
|
||||
if isinstance(self.use_queues, basestring):
|
||||
self.use_queues = self.use_queues.split(',')
|
||||
if self.include:
|
||||
if isinstance(self.include, basestring):
|
||||
self.include = self.include.split(',')
|
||||
app.conf.CELERY_INCLUDE = (
|
||||
tuple(app.conf.CELERY_INCLUDE) + tuple(self.include))
|
||||
self.loglevel = mlevel(self.loglevel)
|
||||
|
||||
def run(self):
|
||||
self.init_queues()
|
||||
self.app.loader.init_worker()
|
||||
|
||||
# this signal can be used to e.g. change queues after
|
||||
# the -Q option has been applied.
|
||||
signals.celeryd_after_setup.send(sender=self.hostname, instance=self,
|
||||
conf=self.app.conf)
|
||||
|
||||
if getattr(os, 'getuid', None) and os.getuid() == 0:
|
||||
warnings.warn(RuntimeWarning(
|
||||
'Running celeryd with superuser privileges is discouraged!'))
|
||||
|
||||
if self.purge:
|
||||
self.purge_messages()
|
||||
|
||||
# Dump configuration to screen so we have some basic information
|
||||
# for when users sends bug reports.
|
||||
print(str(self.colored.cyan(' \n', self.startup_info())) +
|
||||
str(self.colored.reset(self.extra_info() or '')))
|
||||
self.set_process_status('-active-')
|
||||
|
||||
self.setup_logging()
|
||||
|
||||
# apply task execution optimizations
|
||||
trace.setup_worker_optimizations(self.app)
|
||||
|
||||
try:
|
||||
self.run_worker()
|
||||
except IGNORE_ERRORS:
|
||||
pass
|
||||
|
||||
def on_consumer_ready(self, consumer):
|
||||
signals.worker_ready.send(sender=consumer)
|
||||
print('celery@%s ready.' % safe_str(self.hostname))
|
||||
|
||||
def init_queues(self):
|
||||
try:
|
||||
self.app.select_queues(self.use_queues)
|
||||
except KeyError, exc:
|
||||
raise ImproperlyConfigured(UNKNOWN_QUEUE % (self.use_queues, exc))
|
||||
if self.app.conf.CELERY_WORKER_DIRECT:
|
||||
self.app.amqp.queues.select_add(worker_direct(self.hostname))
|
||||
|
||||
def setup_logging(self, colorize=None):
|
||||
if colorize is None and self.no_color is not None:
|
||||
colorize = not self.no_color
|
||||
self.app.log.setup(self.loglevel, self.logfile,
|
||||
self.redirect_stdouts, self.redirect_stdouts_level,
|
||||
colorize=colorize)
|
||||
|
||||
def purge_messages(self):
|
||||
count = self.app.control.purge()
|
||||
print('purge: Erased %d %s from the queue.\n' % (
|
||||
count, pluralize(count, 'message')))
|
||||
|
||||
def tasklist(self, include_builtins=True, sep='\n', int_='celery.'):
|
||||
return sep.join(
|
||||
' . %s' % task for task in sorted(self.app.tasks)
|
||||
if (not task.startswith(int_) if not include_builtins else task)
|
||||
)
|
||||
|
||||
def extra_info(self):
|
||||
if self.loglevel <= logging.INFO:
|
||||
include_builtins = self.loglevel <= logging.DEBUG
|
||||
tasklist = self.tasklist(include_builtins=include_builtins)
|
||||
return EXTRA_INFO_FMT % {'tasks': tasklist}
|
||||
|
||||
def startup_info(self):
|
||||
app = self.app
|
||||
concurrency = unicode(self.concurrency)
|
||||
appr = '%s:0x%x' % (app.main or '__main__', id(app))
|
||||
if not isinstance(app.loader, AppLoader):
|
||||
loader = qualname(app.loader)
|
||||
if loader.startswith('celery.loaders'):
|
||||
loader = loader[14:]
|
||||
appr += ' (%s)' % loader
|
||||
if self.autoscale:
|
||||
max, min = self.autoscale
|
||||
concurrency = '{min=%s, max=%s}' % (min, max)
|
||||
pool = self.pool_cls
|
||||
if not isinstance(pool, basestring):
|
||||
pool = pool.__module__
|
||||
concurrency += ' (%s)' % pool.split('.')[-1]
|
||||
events = 'ON'
|
||||
if not self.send_events:
|
||||
events = 'OFF (enable -E to monitor this worker)'
|
||||
|
||||
banner = (BANNER % {
|
||||
'app': appr,
|
||||
'hostname': self.hostname,
|
||||
'version': VERSION_BANNER,
|
||||
'conninfo': self.app.connection().as_uri(),
|
||||
'concurrency': concurrency,
|
||||
'platform': _platform.platform(),
|
||||
'events': events,
|
||||
'queues': app.amqp.queues.format(indent=0, indent_first=False),
|
||||
}).splitlines()
|
||||
|
||||
# integrate the ASCII art.
|
||||
for i, x in enumerate(banner):
|
||||
try:
|
||||
banner[i] = ' '.join([ARTLINES[i], banner[i]])
|
||||
except IndexError:
|
||||
banner[i] = ' ' * 16 + banner[i]
|
||||
return '\n'.join(banner) + '\n'
|
||||
|
||||
def run_worker(self):
|
||||
worker = self.WorkController(
|
||||
app=self.app,
|
||||
hostname=self.hostname,
|
||||
ready_callback=self.on_consumer_ready, beat=self.beat,
|
||||
autoscale=self.autoscale, autoreload=self.autoreload,
|
||||
no_execv=self.no_execv,
|
||||
pidfile=self.pidfile,
|
||||
**self.confopts_as_dict()
|
||||
)
|
||||
self.install_platform_tweaks(worker)
|
||||
signals.worker_init.send(sender=worker)
|
||||
worker.start()
|
||||
|
||||
def install_platform_tweaks(self, worker):
|
||||
"""Install platform specific tweaks and workarounds."""
|
||||
if self.app.IS_OSX:
|
||||
self.osx_proxy_detection_workaround()
|
||||
|
||||
# Install signal handler so SIGHUP restarts the worker.
|
||||
if not self._isatty:
|
||||
# only install HUP handler if detached from terminal,
|
||||
# so closing the terminal window doesn't restart celeryd
|
||||
# into the background.
|
||||
if self.app.IS_OSX:
|
||||
# OS X can't exec from a process using threads.
|
||||
# See http://github.com/celery/celery/issues#issue/152
|
||||
install_HUP_not_supported_handler(worker)
|
||||
else:
|
||||
install_worker_restart_handler(worker)
|
||||
install_worker_term_handler(worker)
|
||||
install_worker_term_hard_handler(worker)
|
||||
install_worker_int_handler(worker)
|
||||
install_cry_handler()
|
||||
install_rdb_handler()
|
||||
|
||||
def osx_proxy_detection_workaround(self):
|
||||
"""See http://github.com/celery/celery/issues#issue/161"""
|
||||
os.environ.setdefault('celery_dummy_proxy', 'set_by_celeryd')
|
||||
|
||||
def set_process_status(self, info):
|
||||
return platforms.set_mp_process_title(
|
||||
'celeryd',
|
||||
info='%s (%s)' % (info, platforms.strargv(sys.argv)),
|
||||
hostname=self.hostname,
|
||||
)
|
||||
|
||||
|
||||
def _shutdown_handler(worker, sig='TERM', how='Warm',
|
||||
exc=SystemExit, callback=None):
|
||||
|
||||
def _handle_request(*args):
|
||||
set_in_sighandler(True)
|
||||
try:
|
||||
from celery.worker import state
|
||||
if current_process()._name == 'MainProcess':
|
||||
if callback:
|
||||
callback(worker)
|
||||
safe_say('celeryd: %s shutdown (MainProcess)' % how)
|
||||
if active_thread_count() > 1:
|
||||
setattr(state, {'Warm': 'should_stop',
|
||||
'Cold': 'should_terminate'}[how], True)
|
||||
else:
|
||||
raise exc()
|
||||
finally:
|
||||
set_in_sighandler(False)
|
||||
_handle_request.__name__ = 'worker_' + how
|
||||
platforms.signals[sig] = _handle_request
|
||||
install_worker_term_handler = partial(
|
||||
_shutdown_handler, sig='SIGTERM', how='Warm', exc=SystemExit,
|
||||
)
|
||||
if not is_jython:
|
||||
install_worker_term_hard_handler = partial(
|
||||
_shutdown_handler, sig='SIGQUIT', how='Cold', exc=SystemTerminate,
|
||||
)
|
||||
else:
|
||||
install_worker_term_handler = \
|
||||
install_worker_term_hard_handler = lambda *a, **kw: None
|
||||
|
||||
|
||||
def on_SIGINT(worker):
|
||||
safe_say('celeryd: Hitting Ctrl+C again will terminate all running tasks!')
|
||||
install_worker_term_hard_handler(worker, sig='SIGINT')
|
||||
if not is_jython:
|
||||
install_worker_int_handler = partial(
|
||||
_shutdown_handler, sig='SIGINT', callback=on_SIGINT
|
||||
)
|
||||
else:
|
||||
install_worker_int_handler = lambda *a, **kw: None
|
||||
|
||||
|
||||
def _clone_current_worker():
|
||||
if os.fork() == 0:
|
||||
platforms.close_open_fds([
|
||||
sys.__stdin__, sys.__stdout__, sys.__stderr__,
|
||||
])
|
||||
os.execv(sys.executable, [sys.executable] + sys.argv)
|
||||
|
||||
|
||||
def install_worker_restart_handler(worker, sig='SIGHUP'):
|
||||
|
||||
def restart_worker_sig_handler(*args):
|
||||
"""Signal handler restarting the current python program."""
|
||||
set_in_sighandler(True)
|
||||
safe_say('Restarting celeryd (%s)' % (' '.join(sys.argv), ))
|
||||
import atexit
|
||||
atexit.register(_clone_current_worker)
|
||||
from celery.worker import state
|
||||
state.should_stop = True
|
||||
platforms.signals[sig] = restart_worker_sig_handler
|
||||
|
||||
|
||||
def install_cry_handler():
|
||||
# Jython/PyPy does not have sys._current_frames
|
||||
if is_jython or is_pypy: # pragma: no cover
|
||||
return
|
||||
|
||||
def cry_handler(*args):
|
||||
"""Signal handler logging the stacktrace of all active threads."""
|
||||
set_in_sighandler(True)
|
||||
try:
|
||||
safe_say(cry())
|
||||
finally:
|
||||
set_in_sighandler(False)
|
||||
platforms.signals['SIGUSR1'] = cry_handler
|
||||
|
||||
|
||||
def install_rdb_handler(envvar='CELERY_RDBSIG',
|
||||
sig='SIGUSR2'): # pragma: no cover
|
||||
|
||||
def rdb_handler(*args):
|
||||
"""Signal handler setting a rdb breakpoint at the current frame."""
|
||||
set_in_sighandler(True)
|
||||
try:
|
||||
_, frame = args
|
||||
from celery.contrib import rdb
|
||||
rdb.set_trace(frame)
|
||||
finally:
|
||||
set_in_sighandler(False)
|
||||
if os.environ.get(envvar):
|
||||
platforms.signals[sig] = rdb_handler
|
||||
|
||||
|
||||
def install_HUP_not_supported_handler(worker, sig='SIGHUP'):
|
||||
|
||||
def warn_on_HUP_handler(*args):
|
||||
set_in_sighandler(True)
|
||||
try:
|
||||
safe_say('%(sig)s not supported: Restarting with %(sig)s is '
|
||||
'unstable on this platform!' % {'sig': sig})
|
||||
finally:
|
||||
set_in_sighandler(False)
|
||||
platforms.signals[sig] = warn_on_HUP_handler
|
||||
56
awx/lib/site-packages/celery/backends/__init__.py
Normal file
56
awx/lib/site-packages/celery/backends/__init__.py
Normal file
@@ -0,0 +1,56 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.backends
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
Backend abstract factory (...did I just say that?) and alias definitions.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
import sys
|
||||
|
||||
from kombu.utils.url import _parse_url
|
||||
|
||||
from celery.local import Proxy
|
||||
from celery._state import current_app
|
||||
from celery.utils.imports import symbol_by_name
|
||||
from celery.utils.functional import memoize
|
||||
|
||||
UNKNOWN_BACKEND = """\
|
||||
Unknown result backend: %r. Did you spell that correctly? (%r)\
|
||||
"""
|
||||
|
||||
BACKEND_ALIASES = {
|
||||
'amqp': 'celery.backends.amqp:AMQPBackend',
|
||||
'cache': 'celery.backends.cache:CacheBackend',
|
||||
'redis': 'celery.backends.redis:RedisBackend',
|
||||
'mongodb': 'celery.backends.mongodb:MongoBackend',
|
||||
'database': 'celery.backends.database:DatabaseBackend',
|
||||
'cassandra': 'celery.backends.cassandra:CassandraBackend',
|
||||
'disabled': 'celery.backends.base:DisabledBackend',
|
||||
}
|
||||
|
||||
#: deprecated alias to ``current_app.backend``.
|
||||
default_backend = Proxy(lambda: current_app.backend)
|
||||
|
||||
|
||||
@memoize(100)
|
||||
def get_backend_cls(backend=None, loader=None):
|
||||
"""Get backend class by name/alias"""
|
||||
backend = backend or 'disabled'
|
||||
loader = loader or current_app.loader
|
||||
aliases = dict(BACKEND_ALIASES, **loader.override_backends)
|
||||
try:
|
||||
return symbol_by_name(backend, aliases)
|
||||
except ValueError, exc:
|
||||
raise ValueError, ValueError(UNKNOWN_BACKEND % (
|
||||
backend, exc)), sys.exc_info()[2]
|
||||
|
||||
|
||||
def get_backend_by_url(backend=None, loader=None):
|
||||
url = None
|
||||
if backend and '://' in backend:
|
||||
url = backend
|
||||
backend, _, _, _, _, _, _ = _parse_url(url)
|
||||
return get_backend_cls(backend, loader), url
|
||||
268
awx/lib/site-packages/celery/backends/amqp.py
Normal file
268
awx/lib/site-packages/celery/backends/amqp.py
Normal file
@@ -0,0 +1,268 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.backends.amqp
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The AMQP result backend.
|
||||
|
||||
This backend publishes results as messages.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import with_statement
|
||||
|
||||
import socket
|
||||
import threading
|
||||
import time
|
||||
|
||||
from kombu import Exchange, Queue, Producer, Consumer
|
||||
|
||||
from celery import states
|
||||
from celery.exceptions import TimeoutError
|
||||
from celery.utils.log import get_logger
|
||||
|
||||
from .base import BaseDictBackend
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
||||
class BacklogLimitExceeded(Exception):
|
||||
"""Too much state history to fast-forward."""
|
||||
|
||||
|
||||
def repair_uuid(s):
|
||||
# Historically the dashes in UUIDS are removed from AMQ entity names,
|
||||
# but there is no known reason to. Hopefully we'll be able to fix
|
||||
# this in v4.0.
|
||||
return '%s-%s-%s-%s-%s' % (s[:8], s[8:12], s[12:16], s[16:20], s[20:])
|
||||
|
||||
|
||||
class AMQPBackend(BaseDictBackend):
|
||||
"""Publishes results by sending messages."""
|
||||
Exchange = Exchange
|
||||
Queue = Queue
|
||||
Consumer = Consumer
|
||||
Producer = Producer
|
||||
|
||||
BacklogLimitExceeded = BacklogLimitExceeded
|
||||
|
||||
supports_autoexpire = True
|
||||
supports_native_join = True
|
||||
|
||||
retry_policy = {
|
||||
'max_retries': 20,
|
||||
'interval_start': 0,
|
||||
'interval_step': 1,
|
||||
'interval_max': 1,
|
||||
}
|
||||
|
||||
def __init__(self, connection=None, exchange=None, exchange_type=None,
|
||||
persistent=None, serializer=None, auto_delete=True,
|
||||
**kwargs):
|
||||
super(AMQPBackend, self).__init__(**kwargs)
|
||||
conf = self.app.conf
|
||||
self._connection = connection
|
||||
self.queue_arguments = {}
|
||||
self.persistent = (conf.CELERY_RESULT_PERSISTENT if persistent is None
|
||||
else persistent)
|
||||
delivery_mode = persistent and 'persistent' or 'transient'
|
||||
exchange = exchange or conf.CELERY_RESULT_EXCHANGE
|
||||
exchange_type = exchange_type or conf.CELERY_RESULT_EXCHANGE_TYPE
|
||||
self.exchange = self.Exchange(name=exchange,
|
||||
type=exchange_type,
|
||||
delivery_mode=delivery_mode,
|
||||
durable=self.persistent,
|
||||
auto_delete=False)
|
||||
self.serializer = serializer or conf.CELERY_RESULT_SERIALIZER
|
||||
self.auto_delete = auto_delete
|
||||
|
||||
# AMQP_TASK_RESULT_EXPIRES setting is deprecated and will be
|
||||
# removed in version 4.0.
|
||||
dexpires = conf.CELERY_AMQP_TASK_RESULT_EXPIRES
|
||||
|
||||
self.expires = None
|
||||
if 'expires' in kwargs:
|
||||
if kwargs['expires'] is not None:
|
||||
self.expires = self.prepare_expires(kwargs['expires'])
|
||||
else:
|
||||
self.expires = self.prepare_expires(dexpires)
|
||||
|
||||
if self.expires:
|
||||
self.queue_arguments['x-expires'] = int(self.expires * 1000)
|
||||
self.mutex = threading.Lock()
|
||||
|
||||
def _create_binding(self, task_id):
|
||||
name = task_id.replace('-', '')
|
||||
return self.Queue(name=name,
|
||||
exchange=self.exchange,
|
||||
routing_key=name,
|
||||
durable=self.persistent,
|
||||
auto_delete=self.auto_delete,
|
||||
queue_arguments=self.queue_arguments)
|
||||
|
||||
def revive(self, channel):
|
||||
pass
|
||||
|
||||
def _republish(self, channel, task_id, body, content_type,
|
||||
content_encoding):
|
||||
return Producer(channel).publish(
|
||||
body,
|
||||
exchange=self.exchange,
|
||||
routing_key=task_id.replace('-', ''),
|
||||
serializer=self.serializer,
|
||||
content_type=content_type,
|
||||
content_encoding=content_encoding,
|
||||
retry=True, retry_policy=self.retry_policy,
|
||||
declare=[self._create_binding(task_id)],
|
||||
)
|
||||
|
||||
def _store_result(self, task_id, result, status, traceback=None):
|
||||
"""Send task return value and status."""
|
||||
with self.mutex:
|
||||
with self.app.amqp.producer_pool.acquire(block=True) as pub:
|
||||
pub.publish({'task_id': task_id, 'status': status,
|
||||
'result': self.encode_result(result, status),
|
||||
'traceback': traceback,
|
||||
'children': self.current_task_children()},
|
||||
exchange=self.exchange,
|
||||
routing_key=task_id.replace('-', ''),
|
||||
serializer=self.serializer,
|
||||
retry=True, retry_policy=self.retry_policy,
|
||||
declare=[self._create_binding(task_id)])
|
||||
return result
|
||||
|
||||
def wait_for(self, task_id, timeout=None, cache=True, propagate=True,
|
||||
**kwargs):
|
||||
cached_meta = self._cache.get(task_id)
|
||||
if cache and cached_meta and \
|
||||
cached_meta['status'] in states.READY_STATES:
|
||||
meta = cached_meta
|
||||
else:
|
||||
try:
|
||||
meta = self.consume(task_id, timeout=timeout)
|
||||
except socket.timeout:
|
||||
raise TimeoutError('The operation timed out.')
|
||||
|
||||
state = meta['status']
|
||||
if state == states.SUCCESS:
|
||||
return meta['result']
|
||||
elif state in states.PROPAGATE_STATES:
|
||||
if propagate:
|
||||
raise self.exception_to_python(meta['result'])
|
||||
return meta['result']
|
||||
else:
|
||||
return self.wait_for(task_id, timeout, cache)
|
||||
|
||||
def get_task_meta(self, task_id, backlog_limit=1000):
|
||||
# Polling and using basic_get
|
||||
with self.app.pool.acquire_channel(block=True) as (_, channel):
|
||||
binding = self._create_binding(task_id)(channel)
|
||||
binding.declare()
|
||||
prev = latest = acc = None
|
||||
for i in xrange(backlog_limit): # spool ffwd
|
||||
prev, latest, acc = latest, acc, binding.get(no_ack=False)
|
||||
if not acc: # no more messages
|
||||
break
|
||||
if prev:
|
||||
# backends are not expected to keep history,
|
||||
# so we delete everything except the most recent state.
|
||||
prev.ack()
|
||||
else:
|
||||
raise self.BacklogLimitExceeded(task_id)
|
||||
|
||||
if latest:
|
||||
payload = self._cache[task_id] = latest.payload
|
||||
latest.requeue()
|
||||
return payload
|
||||
else:
|
||||
# no new state, use previous
|
||||
try:
|
||||
return self._cache[task_id]
|
||||
except KeyError:
|
||||
# result probably pending.
|
||||
return {'status': states.PENDING, 'result': None}
|
||||
poll = get_task_meta # XXX compat
|
||||
|
||||
def drain_events(self, connection, consumer, timeout=None, now=time.time):
|
||||
wait = connection.drain_events
|
||||
results = {}
|
||||
|
||||
def callback(meta, message):
|
||||
if meta['status'] in states.READY_STATES:
|
||||
uuid = repair_uuid(message.delivery_info['routing_key'])
|
||||
results[uuid] = meta
|
||||
|
||||
consumer.callbacks[:] = [callback]
|
||||
time_start = now()
|
||||
|
||||
while 1:
|
||||
# Total time spent may exceed a single call to wait()
|
||||
if timeout and now() - time_start >= timeout:
|
||||
raise socket.timeout()
|
||||
wait(timeout=timeout)
|
||||
if results: # got event on the wanted channel.
|
||||
break
|
||||
self._cache.update(results)
|
||||
return results
|
||||
|
||||
def consume(self, task_id, timeout=None):
|
||||
with self.app.pool.acquire_channel(block=True) as (conn, channel):
|
||||
binding = self._create_binding(task_id)
|
||||
with self.Consumer(channel, binding, no_ack=True) as consumer:
|
||||
return self.drain_events(conn, consumer, timeout).values()[0]
|
||||
|
||||
def get_many(self, task_ids, timeout=None, **kwargs):
|
||||
with self.app.pool.acquire_channel(block=True) as (conn, channel):
|
||||
ids = set(task_ids)
|
||||
cached_ids = set()
|
||||
for task_id in ids:
|
||||
try:
|
||||
cached = self._cache[task_id]
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
if cached['status'] in states.READY_STATES:
|
||||
yield task_id, cached
|
||||
cached_ids.add(task_id)
|
||||
ids ^= cached_ids
|
||||
|
||||
bindings = [self._create_binding(task_id) for task_id in task_ids]
|
||||
with self.Consumer(channel, bindings, no_ack=True) as consumer:
|
||||
while ids:
|
||||
r = self.drain_events(conn, consumer, timeout)
|
||||
ids ^= set(r)
|
||||
for ready_id, ready_meta in r.iteritems():
|
||||
yield ready_id, ready_meta
|
||||
|
||||
def reload_task_result(self, task_id):
|
||||
raise NotImplementedError(
|
||||
'reload_task_result is not supported by this backend.')
|
||||
|
||||
def reload_group_result(self, task_id):
|
||||
"""Reload group result, even if it has been previously fetched."""
|
||||
raise NotImplementedError(
|
||||
'reload_group_result is not supported by this backend.')
|
||||
|
||||
def save_group(self, group_id, result):
|
||||
raise NotImplementedError(
|
||||
'save_group is not supported by this backend.')
|
||||
|
||||
def restore_group(self, group_id, cache=True):
|
||||
raise NotImplementedError(
|
||||
'restore_group is not supported by this backend.')
|
||||
|
||||
def delete_group(self, group_id):
|
||||
raise NotImplementedError(
|
||||
'delete_group is not supported by this backend.')
|
||||
|
||||
def __reduce__(self, args=(), kwargs={}):
|
||||
kwargs.update(
|
||||
connection=self._connection,
|
||||
exchange=self.exchange.name,
|
||||
exchange_type=self.exchange.type,
|
||||
persistent=self.persistent,
|
||||
serializer=self.serializer,
|
||||
auto_delete=self.auto_delete,
|
||||
expires=self.expires,
|
||||
)
|
||||
return super(AMQPBackend, self).__reduce__(args, kwargs)
|
||||
542
awx/lib/site-packages/celery/backends/base.py
Normal file
542
awx/lib/site-packages/celery/backends/base.py
Normal file
@@ -0,0 +1,542 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.backends.base
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Result backend base classes.
|
||||
|
||||
- :class:`BaseBackend` defines the interface.
|
||||
|
||||
- :class:`BaseDictBackend` assumes the fields are stored in a dict.
|
||||
|
||||
- :class:`KeyValueStoreBackend` is a common base class
|
||||
using K/V semantics like _get and _put.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
import time
|
||||
import sys
|
||||
|
||||
from datetime import timedelta
|
||||
|
||||
from billiard.einfo import ExceptionInfo
|
||||
from kombu import serialization
|
||||
from kombu.utils.encoding import bytes_to_str, ensure_bytes, from_utf8
|
||||
|
||||
from celery import states
|
||||
from celery.app import current_task
|
||||
from celery.datastructures import LRUCache
|
||||
from celery.exceptions import ChordError, TaskRevokedError, TimeoutError
|
||||
from celery.result import from_serializable, GroupResult
|
||||
from celery.utils import timeutils
|
||||
from celery.utils.serialization import (
|
||||
get_pickled_exception,
|
||||
get_pickleable_exception,
|
||||
create_exception_cls,
|
||||
)
|
||||
|
||||
EXCEPTION_ABLE_CODECS = frozenset(['pickle', 'yaml'])
|
||||
is_py3k = sys.version_info >= (3, 0)
|
||||
|
||||
|
||||
def unpickle_backend(cls, args, kwargs):
|
||||
"""Returns an unpickled backend."""
|
||||
return cls(*args, **kwargs)
|
||||
|
||||
|
||||
class BaseBackend(object):
|
||||
"""Base backend class."""
|
||||
READY_STATES = states.READY_STATES
|
||||
UNREADY_STATES = states.UNREADY_STATES
|
||||
EXCEPTION_STATES = states.EXCEPTION_STATES
|
||||
|
||||
TimeoutError = TimeoutError
|
||||
|
||||
#: Time to sleep between polling each individual item
|
||||
#: in `ResultSet.iterate`. as opposed to the `interval`
|
||||
#: argument which is for each pass.
|
||||
subpolling_interval = None
|
||||
|
||||
#: If true the backend must implement :meth:`get_many`.
|
||||
supports_native_join = False
|
||||
|
||||
#: If true the backend must automatically expire results.
|
||||
#: The daily backend_cleanup periodic task will not be triggered
|
||||
#: in this case.
|
||||
supports_autoexpire = False
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
from celery.app import app_or_default
|
||||
self.app = app_or_default(kwargs.get('app'))
|
||||
self.serializer = kwargs.get('serializer',
|
||||
self.app.conf.CELERY_RESULT_SERIALIZER)
|
||||
(self.content_type,
|
||||
self.content_encoding,
|
||||
self.encoder) = serialization.registry._encoders[self.serializer]
|
||||
|
||||
def encode(self, data):
|
||||
_, _, payload = serialization.encode(data, serializer=self.serializer)
|
||||
return payload
|
||||
|
||||
def decode(self, payload):
|
||||
payload = is_py3k and payload or str(payload)
|
||||
return serialization.decode(payload,
|
||||
content_type=self.content_type,
|
||||
content_encoding=self.content_encoding)
|
||||
|
||||
def prepare_expires(self, value, type=None):
|
||||
if value is None:
|
||||
value = self.app.conf.CELERY_TASK_RESULT_EXPIRES
|
||||
if isinstance(value, timedelta):
|
||||
value = timeutils.timedelta_seconds(value)
|
||||
if value is not None and type:
|
||||
return type(value)
|
||||
return value
|
||||
|
||||
def encode_result(self, result, status):
|
||||
if status in self.EXCEPTION_STATES and isinstance(result, Exception):
|
||||
return self.prepare_exception(result)
|
||||
else:
|
||||
return self.prepare_value(result)
|
||||
|
||||
def store_result(self, task_id, result, status, traceback=None):
|
||||
"""Store the result and status of a task."""
|
||||
raise NotImplementedError(
|
||||
'store_result is not supported by this backend.')
|
||||
|
||||
def mark_as_started(self, task_id, **meta):
|
||||
"""Mark a task as started"""
|
||||
return self.store_result(task_id, meta, status=states.STARTED)
|
||||
|
||||
def mark_as_done(self, task_id, result):
|
||||
"""Mark task as successfully executed."""
|
||||
return self.store_result(task_id, result, status=states.SUCCESS)
|
||||
|
||||
def mark_as_failure(self, task_id, exc, traceback=None):
|
||||
"""Mark task as executed with failure. Stores the execption."""
|
||||
return self.store_result(task_id, exc, status=states.FAILURE,
|
||||
traceback=traceback)
|
||||
|
||||
def fail_from_current_stack(self, task_id, exc=None):
|
||||
type_, real_exc, tb = sys.exc_info()
|
||||
try:
|
||||
exc = real_exc if exc is None else exc
|
||||
ei = ExceptionInfo((type_, exc, tb))
|
||||
self.mark_as_failure(task_id, exc, ei.traceback)
|
||||
return ei
|
||||
finally:
|
||||
del(tb)
|
||||
|
||||
def mark_as_retry(self, task_id, exc, traceback=None):
|
||||
"""Mark task as being retries. Stores the current
|
||||
exception (if any)."""
|
||||
return self.store_result(task_id, exc, status=states.RETRY,
|
||||
traceback=traceback)
|
||||
|
||||
def mark_as_revoked(self, task_id, reason=''):
|
||||
return self.store_result(task_id, TaskRevokedError(reason),
|
||||
status=states.REVOKED, traceback=None)
|
||||
|
||||
def prepare_exception(self, exc):
|
||||
"""Prepare exception for serialization."""
|
||||
if self.serializer in EXCEPTION_ABLE_CODECS:
|
||||
return get_pickleable_exception(exc)
|
||||
return {'exc_type': type(exc).__name__, 'exc_message': str(exc)}
|
||||
|
||||
def exception_to_python(self, exc):
|
||||
"""Convert serialized exception to Python exception."""
|
||||
if self.serializer in EXCEPTION_ABLE_CODECS:
|
||||
return get_pickled_exception(exc)
|
||||
return create_exception_cls(from_utf8(exc['exc_type']),
|
||||
sys.modules[__name__])(exc['exc_message'])
|
||||
|
||||
def prepare_value(self, result):
|
||||
"""Prepare value for storage."""
|
||||
if isinstance(result, GroupResult):
|
||||
return result.serializable()
|
||||
return result
|
||||
|
||||
def forget(self, task_id):
|
||||
raise NotImplementedError('%s does not implement forget.' % (
|
||||
self.__class__))
|
||||
|
||||
def wait_for(self, task_id, timeout=None, propagate=True, interval=0.5):
|
||||
"""Wait for task and return its result.
|
||||
|
||||
If the task raises an exception, this exception
|
||||
will be re-raised by :func:`wait_for`.
|
||||
|
||||
If `timeout` is not :const:`None`, this raises the
|
||||
:class:`celery.exceptions.TimeoutError` exception if the operation
|
||||
takes longer than `timeout` seconds.
|
||||
|
||||
"""
|
||||
|
||||
time_elapsed = 0.0
|
||||
|
||||
while 1:
|
||||
status = self.get_status(task_id)
|
||||
if status == states.SUCCESS:
|
||||
return self.get_result(task_id)
|
||||
elif status in states.PROPAGATE_STATES:
|
||||
result = self.get_result(task_id)
|
||||
if propagate:
|
||||
raise result
|
||||
return result
|
||||
# avoid hammering the CPU checking status.
|
||||
time.sleep(interval)
|
||||
time_elapsed += interval
|
||||
if timeout and time_elapsed >= timeout:
|
||||
raise TimeoutError('The operation timed out.')
|
||||
|
||||
def cleanup(self):
|
||||
"""Backend cleanup. Is run by
|
||||
:class:`celery.task.DeleteExpiredTaskMetaTask`."""
|
||||
pass
|
||||
|
||||
def process_cleanup(self):
|
||||
"""Cleanup actions to do at the end of a task worker process."""
|
||||
pass
|
||||
|
||||
def get_status(self, task_id):
|
||||
"""Get the status of a task."""
|
||||
raise NotImplementedError(
|
||||
'get_status is not supported by this backend.')
|
||||
|
||||
def get_result(self, task_id):
|
||||
"""Get the result of a task."""
|
||||
raise NotImplementedError(
|
||||
'get_result is not supported by this backend.')
|
||||
|
||||
def get_children(self, task_id):
|
||||
raise NotImplementedError(
|
||||
'get_children is not supported by this backend.')
|
||||
|
||||
def get_traceback(self, task_id):
|
||||
"""Get the traceback for a failed task."""
|
||||
raise NotImplementedError(
|
||||
'get_traceback is not supported by this backend.')
|
||||
|
||||
def save_group(self, group_id, result):
|
||||
"""Store the result and status of a task."""
|
||||
|
||||
raise NotImplementedError(
|
||||
'save_group is not supported by %s.' % (type(self).__name__, ))
|
||||
|
||||
def restore_group(self, group_id, cache=True):
|
||||
"""Get the result of a group."""
|
||||
raise NotImplementedError(
|
||||
'restore_group is not supported by this backend.')
|
||||
|
||||
def delete_group(self, group_id):
|
||||
raise NotImplementedError(
|
||||
'delete_group is not supported by this backend.')
|
||||
|
||||
def reload_task_result(self, task_id):
|
||||
"""Reload task result, even if it has been previously fetched."""
|
||||
raise NotImplementedError(
|
||||
'reload_task_result is not supported by this backend.')
|
||||
|
||||
def reload_group_result(self, task_id):
|
||||
"""Reload group result, even if it has been previously fetched."""
|
||||
raise NotImplementedError(
|
||||
'reload_group_result is not supported by this backend.')
|
||||
|
||||
def on_chord_part_return(self, task, propagate=True):
|
||||
pass
|
||||
|
||||
def fallback_chord_unlock(self, group_id, body, result=None,
|
||||
countdown=1, **kwargs):
|
||||
kwargs['result'] = [r.id for r in result]
|
||||
self.app.tasks['celery.chord_unlock'].apply_async(
|
||||
(group_id, body, ), kwargs, countdown=countdown,
|
||||
)
|
||||
on_chord_apply = fallback_chord_unlock
|
||||
|
||||
def current_task_children(self):
|
||||
current = current_task()
|
||||
if current:
|
||||
return [r.serializable() for r in current.request.children]
|
||||
|
||||
def __reduce__(self, args=(), kwargs={}):
|
||||
return (unpickle_backend, (self.__class__, args, kwargs))
|
||||
|
||||
def is_cached(self, task_id):
|
||||
return False
|
||||
|
||||
|
||||
class BaseDictBackend(BaseBackend):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(BaseDictBackend, self).__init__(*args, **kwargs)
|
||||
self._cache = LRUCache(limit=kwargs.get('max_cached_results') or
|
||||
self.app.conf.CELERY_MAX_CACHED_RESULTS)
|
||||
|
||||
def is_cached(self, task_id):
|
||||
return task_id in self._cache
|
||||
|
||||
def store_result(self, task_id, result, status, traceback=None, **kwargs):
|
||||
"""Store task result and status."""
|
||||
result = self.encode_result(result, status)
|
||||
self._store_result(task_id, result, status, traceback, **kwargs)
|
||||
return result
|
||||
|
||||
def forget(self, task_id):
|
||||
self._cache.pop(task_id, None)
|
||||
self._forget(task_id)
|
||||
|
||||
def _forget(self, task_id):
|
||||
raise NotImplementedError('%s does not implement forget.' % (
|
||||
self.__class__))
|
||||
|
||||
def get_status(self, task_id):
|
||||
"""Get the status of a task."""
|
||||
return self.get_task_meta(task_id)['status']
|
||||
|
||||
def get_traceback(self, task_id):
|
||||
"""Get the traceback for a failed task."""
|
||||
return self.get_task_meta(task_id).get('traceback')
|
||||
|
||||
def get_result(self, task_id):
|
||||
"""Get the result of a task."""
|
||||
meta = self.get_task_meta(task_id)
|
||||
if meta['status'] in self.EXCEPTION_STATES:
|
||||
return self.exception_to_python(meta['result'])
|
||||
else:
|
||||
return meta['result']
|
||||
|
||||
def get_children(self, task_id):
|
||||
"""Get the list of subtasks sent by a task."""
|
||||
try:
|
||||
return self.get_task_meta(task_id)['children']
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
def get_task_meta(self, task_id, cache=True):
|
||||
if cache:
|
||||
try:
|
||||
return self._cache[task_id]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
meta = self._get_task_meta_for(task_id)
|
||||
if cache and meta.get('status') == states.SUCCESS:
|
||||
self._cache[task_id] = meta
|
||||
return meta
|
||||
|
||||
def reload_task_result(self, task_id):
|
||||
self._cache[task_id] = self.get_task_meta(task_id, cache=False)
|
||||
|
||||
def reload_group_result(self, group_id):
|
||||
self._cache[group_id] = self.get_group_meta(group_id,
|
||||
cache=False)
|
||||
|
||||
def get_group_meta(self, group_id, cache=True):
|
||||
if cache:
|
||||
try:
|
||||
return self._cache[group_id]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
meta = self._restore_group(group_id)
|
||||
if cache and meta is not None:
|
||||
self._cache[group_id] = meta
|
||||
return meta
|
||||
|
||||
def restore_group(self, group_id, cache=True):
|
||||
"""Get the result for a group."""
|
||||
meta = self.get_group_meta(group_id, cache=cache)
|
||||
if meta:
|
||||
return meta['result']
|
||||
|
||||
def save_group(self, group_id, result):
|
||||
"""Store the result of an executed group."""
|
||||
return self._save_group(group_id, result)
|
||||
|
||||
def delete_group(self, group_id):
|
||||
self._cache.pop(group_id, None)
|
||||
return self._delete_group(group_id)
|
||||
|
||||
|
||||
class KeyValueStoreBackend(BaseDictBackend):
|
||||
task_keyprefix = ensure_bytes('celery-task-meta-')
|
||||
group_keyprefix = ensure_bytes('celery-taskset-meta-')
|
||||
chord_keyprefix = ensure_bytes('chord-unlock-')
|
||||
implements_incr = False
|
||||
|
||||
def get(self, key):
|
||||
raise NotImplementedError('Must implement the get method.')
|
||||
|
||||
def mget(self, keys):
|
||||
raise NotImplementedError('Does not support get_many')
|
||||
|
||||
def set(self, key, value):
|
||||
raise NotImplementedError('Must implement the set method.')
|
||||
|
||||
def delete(self, key):
|
||||
raise NotImplementedError('Must implement the delete method')
|
||||
|
||||
def incr(self, key):
|
||||
raise NotImplementedError('Does not implement incr')
|
||||
|
||||
def expire(self, key, value):
|
||||
pass
|
||||
|
||||
def get_key_for_task(self, task_id):
|
||||
"""Get the cache key for a task by id."""
|
||||
return self.task_keyprefix + ensure_bytes(task_id)
|
||||
|
||||
def get_key_for_group(self, group_id):
|
||||
"""Get the cache key for a group by id."""
|
||||
return self.group_keyprefix + ensure_bytes(group_id)
|
||||
|
||||
def get_key_for_chord(self, group_id):
|
||||
"""Get the cache key for the chord waiting on group with given id."""
|
||||
return self.chord_keyprefix + ensure_bytes(group_id)
|
||||
|
||||
def _strip_prefix(self, key):
|
||||
"""Takes bytes, emits string."""
|
||||
key = ensure_bytes(key)
|
||||
for prefix in self.task_keyprefix, self.group_keyprefix:
|
||||
if key.startswith(prefix):
|
||||
return bytes_to_str(key[len(prefix):])
|
||||
return bytes_to_str(key)
|
||||
|
||||
def _mget_to_results(self, values, keys):
|
||||
if hasattr(values, 'items'):
|
||||
# client returns dict so mapping preserved.
|
||||
return dict((self._strip_prefix(k), self.decode(v))
|
||||
for k, v in values.iteritems()
|
||||
if v is not None)
|
||||
else:
|
||||
# client returns list so need to recreate mapping.
|
||||
return dict((bytes_to_str(keys[i]), self.decode(value))
|
||||
for i, value in enumerate(values)
|
||||
if value is not None)
|
||||
|
||||
def get_many(self, task_ids, timeout=None, interval=0.5):
|
||||
ids = set(task_ids)
|
||||
cached_ids = set()
|
||||
for task_id in ids:
|
||||
try:
|
||||
cached = self._cache[task_id]
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
if cached['status'] in states.READY_STATES:
|
||||
yield bytes_to_str(task_id), cached
|
||||
cached_ids.add(task_id)
|
||||
|
||||
ids ^= cached_ids
|
||||
iterations = 0
|
||||
while ids:
|
||||
keys = list(ids)
|
||||
r = self._mget_to_results(self.mget([self.get_key_for_task(k)
|
||||
for k in keys]), keys)
|
||||
self._cache.update(r)
|
||||
ids ^= set(bytes_to_str(v) for v in r)
|
||||
for key, value in r.iteritems():
|
||||
yield bytes_to_str(key), value
|
||||
if timeout and iterations * interval >= timeout:
|
||||
raise TimeoutError('Operation timed out (%s)' % (timeout, ))
|
||||
time.sleep(interval) # don't busy loop.
|
||||
iterations += 1
|
||||
|
||||
def _forget(self, task_id):
|
||||
self.delete(self.get_key_for_task(task_id))
|
||||
|
||||
def _store_result(self, task_id, result, status, traceback=None):
|
||||
meta = {'status': status, 'result': result, 'traceback': traceback,
|
||||
'children': self.current_task_children()}
|
||||
self.set(self.get_key_for_task(task_id), self.encode(meta))
|
||||
return result
|
||||
|
||||
def _save_group(self, group_id, result):
|
||||
self.set(self.get_key_for_group(group_id),
|
||||
self.encode({'result': result.serializable()}))
|
||||
return result
|
||||
|
||||
def _delete_group(self, group_id):
|
||||
self.delete(self.get_key_for_group(group_id))
|
||||
|
||||
def _get_task_meta_for(self, task_id):
|
||||
"""Get task metadata for a task by id."""
|
||||
meta = self.get(self.get_key_for_task(task_id))
|
||||
if not meta:
|
||||
return {'status': states.PENDING, 'result': None}
|
||||
return self.decode(meta)
|
||||
|
||||
def _restore_group(self, group_id):
|
||||
"""Get task metadata for a task by id."""
|
||||
meta = self.get(self.get_key_for_group(group_id))
|
||||
# previously this was always pickled, but later this
|
||||
# was extended to support other serializers, so the
|
||||
# structure is kind of weird.
|
||||
if meta:
|
||||
meta = self.decode(meta)
|
||||
result = meta['result']
|
||||
if isinstance(result, (list, tuple)):
|
||||
return {'result': from_serializable(result, self.app)}
|
||||
return meta
|
||||
|
||||
def on_chord_apply(self, group_id, body, result=None, **kwargs):
|
||||
if self.implements_incr:
|
||||
self.save_group(group_id, self.app.GroupResult(group_id, result))
|
||||
else:
|
||||
self.fallback_chord_unlock(group_id, body, result, **kwargs)
|
||||
|
||||
def on_chord_part_return(self, task, propagate=None):
|
||||
if not self.implements_incr:
|
||||
return
|
||||
from celery import subtask
|
||||
from celery.result import GroupResult
|
||||
app = self.app
|
||||
if propagate is None:
|
||||
propagate = self.app.conf.CELERY_CHORD_PROPAGATES
|
||||
gid = task.request.group
|
||||
if not gid:
|
||||
return
|
||||
key = self.get_key_for_chord(gid)
|
||||
deps = GroupResult.restore(gid, backend=task.backend)
|
||||
val = self.incr(key)
|
||||
if val >= len(deps):
|
||||
j = deps.join_native if deps.supports_native_join else deps.join
|
||||
callback = subtask(task.request.chord)
|
||||
try:
|
||||
ret = j(propagate=propagate)
|
||||
except Exception, exc:
|
||||
try:
|
||||
culprit = deps._failed_join_report().next()
|
||||
reason = 'Dependency %s raised %r' % (culprit.id, exc)
|
||||
except StopIteration:
|
||||
reason = repr(exc)
|
||||
app._tasks[callback.task].backend.fail_from_current_stack(
|
||||
callback.id, exc=ChordError(reason),
|
||||
)
|
||||
else:
|
||||
try:
|
||||
callback.delay(ret)
|
||||
except Exception, exc:
|
||||
app._tasks[callback.task].backend.fail_from_current_stack(
|
||||
callback.id,
|
||||
exc=ChordError('Callback error: %r' % (exc, )),
|
||||
)
|
||||
finally:
|
||||
deps.delete()
|
||||
self.client.delete(key)
|
||||
else:
|
||||
self.expire(key, 86400)
|
||||
|
||||
|
||||
class DisabledBackend(BaseBackend):
|
||||
_cache = {} # need this attribute to reset cache in tests.
|
||||
|
||||
def store_result(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
def _is_disabled(self, *args, **kwargs):
|
||||
raise NotImplementedError(
|
||||
'No result backend configured. '
|
||||
'Please see the documentation for more information.')
|
||||
wait_for = get_status = get_result = get_traceback = _is_disabled
|
||||
135
awx/lib/site-packages/celery/backends/cache.py
Normal file
135
awx/lib/site-packages/celery/backends/cache.py
Normal file
@@ -0,0 +1,135 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.backends.cache
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Memcache and in-memory cache result backend.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
from kombu.utils import cached_property
|
||||
|
||||
from celery.datastructures import LRUCache
|
||||
from celery.exceptions import ImproperlyConfigured
|
||||
|
||||
from .base import KeyValueStoreBackend
|
||||
|
||||
_imp = [None]
|
||||
|
||||
REQUIRES_BACKEND = """\
|
||||
The memcached backend requires either pylibmc or python-memcached.\
|
||||
"""
|
||||
|
||||
UNKNOWN_BACKEND = """\
|
||||
The cache backend %r is unknown,
|
||||
Please use one of the following backends instead: %s\
|
||||
"""
|
||||
|
||||
|
||||
def import_best_memcache():
|
||||
if _imp[0] is None:
|
||||
is_pylibmc = False
|
||||
try:
|
||||
import pylibmc as memcache
|
||||
is_pylibmc = True
|
||||
except ImportError:
|
||||
try:
|
||||
import memcache # noqa
|
||||
except ImportError:
|
||||
raise ImproperlyConfigured(REQUIRES_BACKEND)
|
||||
_imp[0] = (is_pylibmc, memcache)
|
||||
return _imp[0]
|
||||
|
||||
|
||||
def get_best_memcache(*args, **kwargs):
|
||||
behaviors = kwargs.pop('behaviors', None)
|
||||
is_pylibmc, memcache = import_best_memcache()
|
||||
client = memcache.Client(*args, **kwargs)
|
||||
if is_pylibmc and behaviors is not None:
|
||||
client.behaviors = behaviors
|
||||
return client
|
||||
|
||||
|
||||
class DummyClient(object):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.cache = LRUCache(limit=5000)
|
||||
|
||||
def get(self, key, *args, **kwargs):
|
||||
return self.cache.get(key)
|
||||
|
||||
def get_multi(self, keys):
|
||||
cache = self.cache
|
||||
return dict((k, cache[k]) for k in keys if k in cache)
|
||||
|
||||
def set(self, key, value, *args, **kwargs):
|
||||
self.cache[key] = value
|
||||
|
||||
def delete(self, key, *args, **kwargs):
|
||||
self.cache.pop(key, None)
|
||||
|
||||
def incr(self, key, delta=1):
|
||||
return self.cache.incr(key, delta)
|
||||
|
||||
|
||||
backends = {'memcache': lambda: get_best_memcache,
|
||||
'memcached': lambda: get_best_memcache,
|
||||
'pylibmc': lambda: get_best_memcache,
|
||||
'memory': lambda: DummyClient}
|
||||
|
||||
|
||||
class CacheBackend(KeyValueStoreBackend):
|
||||
servers = None
|
||||
supports_autoexpire = True
|
||||
supports_native_join = True
|
||||
implements_incr = True
|
||||
|
||||
def __init__(self, expires=None, backend=None, options={}, **kwargs):
|
||||
super(CacheBackend, self).__init__(self, **kwargs)
|
||||
|
||||
self.options = dict(self.app.conf.CELERY_CACHE_BACKEND_OPTIONS,
|
||||
**options)
|
||||
|
||||
self.backend = backend or self.app.conf.CELERY_CACHE_BACKEND
|
||||
if self.backend:
|
||||
self.backend, _, servers = self.backend.partition('://')
|
||||
self.servers = servers.rstrip('/').split(';')
|
||||
self.expires = self.prepare_expires(expires, type=int)
|
||||
try:
|
||||
self.Client = backends[self.backend]()
|
||||
except KeyError:
|
||||
raise ImproperlyConfigured(UNKNOWN_BACKEND % (
|
||||
self.backend, ', '.join(backends)))
|
||||
|
||||
def get(self, key):
|
||||
return self.client.get(key)
|
||||
|
||||
def mget(self, keys):
|
||||
return self.client.get_multi(keys)
|
||||
|
||||
def set(self, key, value):
|
||||
return self.client.set(key, value, self.expires)
|
||||
|
||||
def delete(self, key):
|
||||
return self.client.delete(key)
|
||||
|
||||
def on_chord_apply(self, group_id, body, result=None, **kwargs):
|
||||
self.client.set(self.get_key_for_chord(group_id), '0', time=86400)
|
||||
self.save_group(group_id, self.app.GroupResult(group_id, result))
|
||||
|
||||
def incr(self, key):
|
||||
return self.client.incr(key)
|
||||
|
||||
@cached_property
|
||||
def client(self):
|
||||
return self.Client(self.servers, **self.options)
|
||||
|
||||
def __reduce__(self, args=(), kwargs={}):
|
||||
servers = ';'.join(self.servers)
|
||||
backend = '%s://%s/' % (self.backend, servers)
|
||||
kwargs.update(
|
||||
dict(backend=backend,
|
||||
expires=self.expires,
|
||||
options=self.options))
|
||||
return super(CacheBackend, self).__reduce__(args, kwargs)
|
||||
188
awx/lib/site-packages/celery/backends/cassandra.py
Normal file
188
awx/lib/site-packages/celery/backends/cassandra.py
Normal file
@@ -0,0 +1,188 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.backends.cassandra
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Apache Cassandra result store backend.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
try: # pragma: no cover
|
||||
import pycassa
|
||||
from thrift import Thrift
|
||||
C = pycassa.cassandra.ttypes
|
||||
except ImportError: # pragma: no cover
|
||||
pycassa = None # noqa
|
||||
|
||||
import socket
|
||||
import time
|
||||
|
||||
from celery import states
|
||||
from celery.exceptions import ImproperlyConfigured
|
||||
from celery.utils.log import get_logger
|
||||
from celery.utils.timeutils import maybe_timedelta, timedelta_seconds
|
||||
|
||||
from .base import BaseDictBackend
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
||||
class CassandraBackend(BaseDictBackend):
|
||||
"""Highly fault tolerant Cassandra backend.
|
||||
|
||||
.. attribute:: servers
|
||||
|
||||
List of Cassandra servers with format: ``hostname:port``.
|
||||
|
||||
:raises celery.exceptions.ImproperlyConfigured: if
|
||||
module :mod:`pycassa` is not available.
|
||||
|
||||
"""
|
||||
servers = []
|
||||
keyspace = None
|
||||
column_family = None
|
||||
detailed_mode = False
|
||||
_retry_timeout = 300
|
||||
_retry_wait = 3
|
||||
supports_autoexpire = True
|
||||
|
||||
def __init__(self, servers=None, keyspace=None, column_family=None,
|
||||
cassandra_options=None, detailed_mode=False, **kwargs):
|
||||
"""Initialize Cassandra backend.
|
||||
|
||||
Raises :class:`celery.exceptions.ImproperlyConfigured` if
|
||||
the :setting:`CASSANDRA_SERVERS` setting is not set.
|
||||
|
||||
"""
|
||||
super(CassandraBackend, self).__init__(**kwargs)
|
||||
|
||||
self.expires = kwargs.get('expires') or maybe_timedelta(
|
||||
self.app.conf.CELERY_TASK_RESULT_EXPIRES)
|
||||
|
||||
if not pycassa:
|
||||
raise ImproperlyConfigured(
|
||||
'You need to install the pycassa library to use the '
|
||||
'Cassandra backend. See https://github.com/pycassa/pycassa')
|
||||
|
||||
conf = self.app.conf
|
||||
self.servers = (servers or
|
||||
conf.get('CASSANDRA_SERVERS') or
|
||||
self.servers)
|
||||
self.keyspace = (keyspace or
|
||||
conf.get('CASSANDRA_KEYSPACE') or
|
||||
self.keyspace)
|
||||
self.column_family = (column_family or
|
||||
conf.get('CASSANDRA_COLUMN_FAMILY') or
|
||||
self.column_family)
|
||||
self.cassandra_options = dict(conf.get('CASSANDRA_OPTIONS') or {},
|
||||
**cassandra_options or {})
|
||||
self.detailed_mode = (detailed_mode or
|
||||
conf.get('CASSANDRA_DETAILED_MODE') or
|
||||
self.detailed_mode)
|
||||
read_cons = conf.get('CASSANDRA_READ_CONSISTENCY') or 'LOCAL_QUORUM'
|
||||
write_cons = conf.get('CASSANDRA_WRITE_CONSISTENCY') or 'LOCAL_QUORUM'
|
||||
try:
|
||||
self.read_consistency = getattr(pycassa.ConsistencyLevel,
|
||||
read_cons)
|
||||
except AttributeError:
|
||||
self.read_consistency = pycassa.ConsistencyLevel.LOCAL_QUORUM
|
||||
try:
|
||||
self.write_consistency = getattr(pycassa.ConsistencyLevel,
|
||||
write_cons)
|
||||
except AttributeError:
|
||||
self.write_consistency = pycassa.ConsistencyLevel.LOCAL_QUORUM
|
||||
|
||||
if not self.servers or not self.keyspace or not self.column_family:
|
||||
raise ImproperlyConfigured(
|
||||
'Cassandra backend not configured.')
|
||||
|
||||
self._column_family = None
|
||||
|
||||
def _retry_on_error(self, fun, *args, **kwargs):
|
||||
ts = time.time() + self._retry_timeout
|
||||
while 1:
|
||||
try:
|
||||
return fun(*args, **kwargs)
|
||||
except (pycassa.InvalidRequestException,
|
||||
pycassa.TimedOutException,
|
||||
pycassa.UnavailableException,
|
||||
pycassa.AllServersUnavailable,
|
||||
socket.error,
|
||||
socket.timeout,
|
||||
Thrift.TException), exc:
|
||||
if time.time() > ts:
|
||||
raise
|
||||
logger.warning('Cassandra error: %r. Retrying...', exc)
|
||||
time.sleep(self._retry_wait)
|
||||
|
||||
def _get_column_family(self):
|
||||
if self._column_family is None:
|
||||
conn = pycassa.ConnectionPool(self.keyspace,
|
||||
server_list=self.servers,
|
||||
**self.cassandra_options)
|
||||
self._column_family = pycassa.ColumnFamily(
|
||||
conn, self.column_family,
|
||||
read_consistency_level=self.read_consistency,
|
||||
write_consistency_level=self.write_consistency,
|
||||
)
|
||||
return self._column_family
|
||||
|
||||
def process_cleanup(self):
|
||||
if self._column_family is not None:
|
||||
self._column_family = None
|
||||
|
||||
def _store_result(self, task_id, result, status, traceback=None):
|
||||
"""Store return value and status of an executed task."""
|
||||
|
||||
def _do_store():
|
||||
cf = self._get_column_family()
|
||||
date_done = self.app.now()
|
||||
meta = {'status': status,
|
||||
'date_done': date_done.strftime('%Y-%m-%dT%H:%M:%SZ'),
|
||||
'traceback': self.encode(traceback),
|
||||
'children': self.encode(self.current_task_children())}
|
||||
if self.detailed_mode:
|
||||
meta['result'] = result
|
||||
cf.insert(task_id, {date_done: self.encode(meta)},
|
||||
ttl=self.expires and timedelta_seconds(self.expires))
|
||||
else:
|
||||
meta['result'] = self.encode(result)
|
||||
cf.insert(task_id, meta,
|
||||
ttl=self.expires and timedelta_seconds(self.expires))
|
||||
|
||||
return self._retry_on_error(_do_store)
|
||||
|
||||
def _get_task_meta_for(self, task_id):
|
||||
"""Get task metadata for a task by id."""
|
||||
|
||||
def _do_get():
|
||||
cf = self._get_column_family()
|
||||
try:
|
||||
if self.detailed_mode:
|
||||
row = cf.get(task_id, column_reversed=True, column_count=1)
|
||||
meta = self.decode(row.values()[0])
|
||||
meta['task_id'] = task_id
|
||||
else:
|
||||
obj = cf.get(task_id)
|
||||
meta = {
|
||||
'task_id': task_id,
|
||||
'status': obj['status'],
|
||||
'result': self.decode(obj['result']),
|
||||
'date_done': obj['date_done'],
|
||||
'traceback': self.decode(obj['traceback']),
|
||||
'children': self.decode(obj['children']),
|
||||
}
|
||||
except (KeyError, pycassa.NotFoundException):
|
||||
meta = {'status': states.PENDING, 'result': None}
|
||||
return meta
|
||||
|
||||
return self._retry_on_error(_do_get)
|
||||
|
||||
def __reduce__(self, args=(), kwargs={}):
|
||||
kwargs.update(
|
||||
dict(servers=self.servers,
|
||||
keyspace=self.keyspace,
|
||||
column_family=self.column_family,
|
||||
cassandra_options=self.cassandra_options))
|
||||
return super(CassandraBackend, self).__reduce__(args, kwargs)
|
||||
182
awx/lib/site-packages/celery/backends/database/__init__.py
Normal file
182
awx/lib/site-packages/celery/backends/database/__init__.py
Normal file
@@ -0,0 +1,182 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.backends.database
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
SQLAlchemy result store backend.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
from functools import wraps
|
||||
|
||||
from celery import states
|
||||
from celery.exceptions import ImproperlyConfigured
|
||||
from celery.utils.timeutils import maybe_timedelta
|
||||
|
||||
from celery.backends.base import BaseDictBackend
|
||||
|
||||
from .models import Task, TaskSet
|
||||
from .session import ResultSession
|
||||
|
||||
|
||||
def _sqlalchemy_installed():
|
||||
try:
|
||||
import sqlalchemy
|
||||
except ImportError:
|
||||
raise ImproperlyConfigured(
|
||||
'The database result backend requires SQLAlchemy to be installed.'
|
||||
'See http://pypi.python.org/pypi/SQLAlchemy')
|
||||
return sqlalchemy
|
||||
_sqlalchemy_installed()
|
||||
|
||||
from sqlalchemy.exc import DatabaseError, OperationalError
|
||||
|
||||
|
||||
def retry(fun):
|
||||
|
||||
@wraps(fun)
|
||||
def _inner(*args, **kwargs):
|
||||
max_retries = kwargs.pop('max_retries', 3)
|
||||
|
||||
for retries in xrange(max_retries + 1):
|
||||
try:
|
||||
return fun(*args, **kwargs)
|
||||
except (DatabaseError, OperationalError):
|
||||
if retries + 1 > max_retries:
|
||||
raise
|
||||
|
||||
return _inner
|
||||
|
||||
|
||||
class DatabaseBackend(BaseDictBackend):
|
||||
"""The database result backend."""
|
||||
# ResultSet.iterate should sleep this much between each pool,
|
||||
# to not bombard the database with queries.
|
||||
subpolling_interval = 0.5
|
||||
|
||||
def __init__(self, dburi=None, expires=None,
|
||||
engine_options=None, **kwargs):
|
||||
super(DatabaseBackend, self).__init__(**kwargs)
|
||||
conf = self.app.conf
|
||||
self.expires = maybe_timedelta(self.prepare_expires(expires))
|
||||
self.dburi = dburi or conf.CELERY_RESULT_DBURI
|
||||
self.engine_options = dict(
|
||||
engine_options or {},
|
||||
**conf.CELERY_RESULT_ENGINE_OPTIONS or {})
|
||||
self.short_lived_sessions = kwargs.get(
|
||||
'short_lived_sessions',
|
||||
conf.CELERY_RESULT_DB_SHORT_LIVED_SESSIONS,
|
||||
)
|
||||
if not self.dburi:
|
||||
raise ImproperlyConfigured(
|
||||
'Missing connection string! Do you have '
|
||||
'CELERY_RESULT_DBURI set to a real value?')
|
||||
|
||||
def ResultSession(self):
|
||||
return ResultSession(
|
||||
dburi=self.dburi,
|
||||
short_lived_sessions=self.short_lived_sessions,
|
||||
**self.engine_options
|
||||
)
|
||||
|
||||
@retry
|
||||
def _store_result(self, task_id, result, status,
|
||||
traceback=None, max_retries=3):
|
||||
"""Store return value and status of an executed task."""
|
||||
session = self.ResultSession()
|
||||
try:
|
||||
task = session.query(Task).filter(Task.task_id == task_id).first()
|
||||
if not task:
|
||||
task = Task(task_id)
|
||||
session.add(task)
|
||||
session.flush()
|
||||
task.result = result
|
||||
task.status = status
|
||||
task.traceback = traceback
|
||||
session.commit()
|
||||
return result
|
||||
finally:
|
||||
session.close()
|
||||
|
||||
@retry
|
||||
def _get_task_meta_for(self, task_id):
|
||||
"""Get task metadata for a task by id."""
|
||||
session = self.ResultSession()
|
||||
try:
|
||||
task = session.query(Task).filter(Task.task_id == task_id).first()
|
||||
if task is None:
|
||||
task = Task(task_id)
|
||||
task.status = states.PENDING
|
||||
task.result = None
|
||||
return task.to_dict()
|
||||
finally:
|
||||
session.close()
|
||||
|
||||
@retry
|
||||
def _save_group(self, group_id, result):
|
||||
"""Store the result of an executed group."""
|
||||
session = self.ResultSession()
|
||||
try:
|
||||
group = TaskSet(group_id, result)
|
||||
session.add(group)
|
||||
session.flush()
|
||||
session.commit()
|
||||
return result
|
||||
finally:
|
||||
session.close()
|
||||
|
||||
@retry
|
||||
def _restore_group(self, group_id):
|
||||
"""Get metadata for group by id."""
|
||||
session = self.ResultSession()
|
||||
try:
|
||||
group = session.query(TaskSet).filter(
|
||||
TaskSet.taskset_id == group_id).first()
|
||||
if group:
|
||||
return group.to_dict()
|
||||
finally:
|
||||
session.close()
|
||||
|
||||
@retry
|
||||
def _delete_group(self, group_id):
|
||||
"""Delete metadata for group by id."""
|
||||
session = self.ResultSession()
|
||||
try:
|
||||
session.query(TaskSet).filter(
|
||||
TaskSet.taskset_id == group_id).delete()
|
||||
session.flush()
|
||||
session.commit()
|
||||
finally:
|
||||
session.close()
|
||||
|
||||
@retry
|
||||
def _forget(self, task_id):
|
||||
"""Forget about result."""
|
||||
session = self.ResultSession()
|
||||
try:
|
||||
session.query(Task).filter(Task.task_id == task_id).delete()
|
||||
session.commit()
|
||||
finally:
|
||||
session.close()
|
||||
|
||||
def cleanup(self):
|
||||
"""Delete expired metadata."""
|
||||
session = self.ResultSession()
|
||||
expires = self.expires
|
||||
now = self.app.now()
|
||||
try:
|
||||
session.query(Task).filter(
|
||||
Task.date_done < (now - expires)).delete()
|
||||
session.query(TaskSet).filter(
|
||||
TaskSet.date_done < (now - expires)).delete()
|
||||
session.commit()
|
||||
finally:
|
||||
session.close()
|
||||
|
||||
def __reduce__(self, args=(), kwargs={}):
|
||||
kwargs.update(
|
||||
dict(dburi=self.dburi,
|
||||
expires=self.expires,
|
||||
engine_options=self.engine_options))
|
||||
return super(DatabaseBackend, self).__reduce__(args, kwargs)
|
||||
71
awx/lib/site-packages/celery/backends/database/a805d4bd.py
Normal file
71
awx/lib/site-packages/celery/backends/database/a805d4bd.py
Normal file
@@ -0,0 +1,71 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.backends.database.a805d4bd
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module fixes a bug with pickling and relative imports in Python < 2.6.
|
||||
|
||||
The problem is with pickling an e.g. `exceptions.KeyError` instance.
|
||||
As SQLAlchemy has its own `exceptions` module, pickle will try to
|
||||
lookup :exc:`KeyError` in the wrong module, resulting in this exception::
|
||||
|
||||
cPickle.PicklingError: Can't pickle <type 'exceptions.KeyError'>:
|
||||
attribute lookup exceptions.KeyError failed
|
||||
|
||||
doing `import exceptions` just before the dump in `sqlalchemy.types`
|
||||
reveals the source of the bug::
|
||||
|
||||
EXCEPTIONS: <module 'sqlalchemy.exc' from '/var/lib/hudson/jobs/celery/
|
||||
workspace/buildenv/lib/python2.5/site-packages/sqlalchemy/exc.pyc'>
|
||||
|
||||
Hence the random module name 'a805d5bd' is taken to decrease the chances of
|
||||
a collision.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
from sqlalchemy.types import PickleType as _PickleType
|
||||
|
||||
|
||||
class PickleType(_PickleType): # pragma: no cover
|
||||
|
||||
def bind_processor(self, dialect):
|
||||
impl_processor = self.impl.bind_processor(dialect)
|
||||
dumps = self.pickler.dumps
|
||||
protocol = self.protocol
|
||||
if impl_processor:
|
||||
|
||||
def process(value):
|
||||
if value is not None:
|
||||
value = dumps(value, protocol)
|
||||
return impl_processor(value)
|
||||
|
||||
else:
|
||||
|
||||
def process(value): # noqa
|
||||
if value is not None:
|
||||
value = dumps(value, protocol)
|
||||
return value
|
||||
return process
|
||||
|
||||
def result_processor(self, dialect, coltype):
|
||||
impl_processor = self.impl.result_processor(dialect, coltype)
|
||||
loads = self.pickler.loads
|
||||
if impl_processor:
|
||||
|
||||
def process(value):
|
||||
value = impl_processor(value)
|
||||
if value is not None:
|
||||
return loads(value)
|
||||
else:
|
||||
|
||||
def process(value): # noqa
|
||||
if value is not None:
|
||||
return loads(value)
|
||||
return process
|
||||
|
||||
def copy_value(self, value):
|
||||
if self.mutable:
|
||||
return self.pickler.loads(self.pickler.dumps(value, self.protocol))
|
||||
else:
|
||||
return value
|
||||
50
awx/lib/site-packages/celery/backends/database/dfd042c7.py
Normal file
50
awx/lib/site-packages/celery/backends/database/dfd042c7.py
Normal file
@@ -0,0 +1,50 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.backends.database.dfd042c7
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
SQLAlchemy 0.5.8 version of :mod:`~celery.backends.database.a805d4bd`,
|
||||
see the docstring of that module for an explanation of why we need
|
||||
this workaround.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
from sqlalchemy.types import PickleType as _PickleType
|
||||
from sqlalchemy import util
|
||||
|
||||
|
||||
class PickleType(_PickleType): # pragma: no cover
|
||||
|
||||
def process_bind_param(self, value, dialect):
|
||||
dumps = self.pickler.dumps
|
||||
protocol = self.protocol
|
||||
if value is not None:
|
||||
return dumps(value, protocol)
|
||||
|
||||
def process_result_value(self, value, dialect):
|
||||
loads = self.pickler.loads
|
||||
if value is not None:
|
||||
return loads(str(value))
|
||||
|
||||
def copy_value(self, value):
|
||||
if self.mutable:
|
||||
return self.pickler.loads(self.pickler.dumps(value, self.protocol))
|
||||
else:
|
||||
return value
|
||||
|
||||
def compare_values(self, x, y):
|
||||
if self.comparator:
|
||||
return self.comparator(x, y)
|
||||
elif self.mutable and not hasattr(x, '__eq__') and x is not None:
|
||||
util.warn_deprecated(
|
||||
'Objects stored with PickleType when mutable=True '
|
||||
'must implement __eq__() for reliable comparison.')
|
||||
a = self.pickler.dumps(x, self.protocol)
|
||||
b = self.pickler.dumps(y, self.protocol)
|
||||
return a == b
|
||||
else:
|
||||
return x == y
|
||||
|
||||
def is_mutable(self):
|
||||
return self.mutable
|
||||
77
awx/lib/site-packages/celery/backends/database/models.py
Normal file
77
awx/lib/site-packages/celery/backends/database/models.py
Normal file
@@ -0,0 +1,77 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.backends.database.models
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Database tables for the SQLAlchemy result store backend.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
from datetime import datetime
|
||||
|
||||
import sqlalchemy as sa
|
||||
|
||||
from celery import states
|
||||
|
||||
from .session import ResultModelBase
|
||||
|
||||
# See docstring of a805d4bd for an explanation for this workaround ;)
|
||||
if sa.__version__.startswith('0.5'):
|
||||
from .dfd042c7 import PickleType
|
||||
else:
|
||||
from .a805d4bd import PickleType # noqa
|
||||
|
||||
|
||||
class Task(ResultModelBase):
|
||||
"""Task result/status."""
|
||||
__tablename__ = 'celery_taskmeta'
|
||||
__table_args__ = {'sqlite_autoincrement': True}
|
||||
|
||||
id = sa.Column(sa.Integer, sa.Sequence('task_id_sequence'),
|
||||
primary_key=True,
|
||||
autoincrement=True)
|
||||
task_id = sa.Column(sa.String(255), unique=True)
|
||||
status = sa.Column(sa.String(50), default=states.PENDING)
|
||||
result = sa.Column(PickleType, nullable=True)
|
||||
date_done = sa.Column(sa.DateTime, default=datetime.utcnow,
|
||||
onupdate=datetime.utcnow, nullable=True)
|
||||
traceback = sa.Column(sa.Text, nullable=True)
|
||||
|
||||
def __init__(self, task_id):
|
||||
self.task_id = task_id
|
||||
|
||||
def to_dict(self):
|
||||
return {'task_id': self.task_id,
|
||||
'status': self.status,
|
||||
'result': self.result,
|
||||
'traceback': self.traceback,
|
||||
'date_done': self.date_done}
|
||||
|
||||
def __repr__(self):
|
||||
return '<Task %s state: %s>' % (self.task_id, self.status)
|
||||
|
||||
|
||||
class TaskSet(ResultModelBase):
|
||||
"""TaskSet result"""
|
||||
__tablename__ = 'celery_tasksetmeta'
|
||||
__table_args__ = {'sqlite_autoincrement': True}
|
||||
|
||||
id = sa.Column(sa.Integer, sa.Sequence('taskset_id_sequence'),
|
||||
autoincrement=True, primary_key=True)
|
||||
taskset_id = sa.Column(sa.String(255), unique=True)
|
||||
result = sa.Column(sa.PickleType, nullable=True)
|
||||
date_done = sa.Column(sa.DateTime, default=datetime.utcnow,
|
||||
nullable=True)
|
||||
|
||||
def __init__(self, taskset_id, result):
|
||||
self.taskset_id = taskset_id
|
||||
self.result = result
|
||||
|
||||
def to_dict(self):
|
||||
return {'taskset_id': self.taskset_id,
|
||||
'result': self.result,
|
||||
'date_done': self.date_done}
|
||||
|
||||
def __repr__(self):
|
||||
return '<TaskSet: %s>' % (self.taskset_id, )
|
||||
46
awx/lib/site-packages/celery/backends/database/session.py
Normal file
46
awx/lib/site-packages/celery/backends/database/session.py
Normal file
@@ -0,0 +1,46 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.backends.database.session
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
SQLAlchemy sessions.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
from collections import defaultdict
|
||||
|
||||
from sqlalchemy import create_engine
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
from sqlalchemy.ext.declarative import declarative_base
|
||||
|
||||
ResultModelBase = declarative_base()
|
||||
|
||||
_SETUP = defaultdict(lambda: False)
|
||||
_ENGINES = {}
|
||||
_SESSIONS = {}
|
||||
|
||||
|
||||
def get_engine(dburi, **kwargs):
|
||||
if dburi not in _ENGINES:
|
||||
_ENGINES[dburi] = create_engine(dburi, **kwargs)
|
||||
return _ENGINES[dburi]
|
||||
|
||||
|
||||
def create_session(dburi, short_lived_sessions=False, **kwargs):
|
||||
engine = get_engine(dburi, **kwargs)
|
||||
if short_lived_sessions or dburi not in _SESSIONS:
|
||||
_SESSIONS[dburi] = sessionmaker(bind=engine)
|
||||
return engine, _SESSIONS[dburi]
|
||||
|
||||
|
||||
def setup_results(engine):
|
||||
if not _SETUP['results']:
|
||||
ResultModelBase.metadata.create_all(engine)
|
||||
_SETUP['results'] = True
|
||||
|
||||
|
||||
def ResultSession(dburi, **kwargs):
|
||||
engine, session = create_session(dburi, **kwargs)
|
||||
setup_results(engine)
|
||||
return session()
|
||||
223
awx/lib/site-packages/celery/backends/mongodb.py
Normal file
223
awx/lib/site-packages/celery/backends/mongodb.py
Normal file
@@ -0,0 +1,223 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.backends.mongodb
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
MongoDB result store backend.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
from datetime import datetime
|
||||
|
||||
try:
|
||||
import pymongo
|
||||
except ImportError: # pragma: no cover
|
||||
pymongo = None # noqa
|
||||
|
||||
if pymongo:
|
||||
try:
|
||||
from bson.binary import Binary
|
||||
except ImportError: # pragma: no cover
|
||||
from pymongo.binary import Binary # noqa
|
||||
else: # pragma: no cover
|
||||
Binary = None # noqa
|
||||
|
||||
from kombu.utils import cached_property
|
||||
|
||||
from celery import states
|
||||
from celery.exceptions import ImproperlyConfigured
|
||||
from celery.utils.timeutils import maybe_timedelta
|
||||
|
||||
from .base import BaseDictBackend
|
||||
|
||||
|
||||
class Bunch(object):
|
||||
|
||||
def __init__(self, **kw):
|
||||
self.__dict__.update(kw)
|
||||
|
||||
|
||||
class MongoBackend(BaseDictBackend):
|
||||
mongodb_host = 'localhost'
|
||||
mongodb_port = 27017
|
||||
mongodb_user = None
|
||||
mongodb_password = None
|
||||
mongodb_database = 'celery'
|
||||
mongodb_taskmeta_collection = 'celery_taskmeta'
|
||||
mongodb_max_pool_size = 10
|
||||
mongodb_options = None
|
||||
|
||||
supports_autoexpire = False
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
"""Initialize MongoDB backend instance.
|
||||
|
||||
:raises celery.exceptions.ImproperlyConfigured: if
|
||||
module :mod:`pymongo` is not available.
|
||||
|
||||
"""
|
||||
super(MongoBackend, self).__init__(*args, **kwargs)
|
||||
self.expires = kwargs.get('expires') or maybe_timedelta(
|
||||
self.app.conf.CELERY_TASK_RESULT_EXPIRES)
|
||||
|
||||
if not pymongo:
|
||||
raise ImproperlyConfigured(
|
||||
'You need to install the pymongo library to use the '
|
||||
'MongoDB backend.')
|
||||
|
||||
config = self.app.conf.get('CELERY_MONGODB_BACKEND_SETTINGS', None)
|
||||
if config is not None:
|
||||
if not isinstance(config, dict):
|
||||
raise ImproperlyConfigured(
|
||||
'MongoDB backend settings should be grouped in a dict')
|
||||
|
||||
self.mongodb_host = config.get('host', self.mongodb_host)
|
||||
self.mongodb_port = int(config.get('port', self.mongodb_port))
|
||||
self.mongodb_user = config.get('user', self.mongodb_user)
|
||||
self.mongodb_options = config.get('options', {})
|
||||
self.mongodb_password = config.get(
|
||||
'password', self.mongodb_password)
|
||||
self.mongodb_database = config.get(
|
||||
'database', self.mongodb_database)
|
||||
self.mongodb_taskmeta_collection = config.get(
|
||||
'taskmeta_collection', self.mongodb_taskmeta_collection)
|
||||
self.mongodb_max_pool_size = config.get(
|
||||
'max_pool_size', self.mongodb_max_pool_size)
|
||||
|
||||
self._connection = None
|
||||
|
||||
def _get_connection(self):
|
||||
"""Connect to the MongoDB server."""
|
||||
if self._connection is None:
|
||||
from pymongo.connection import Connection
|
||||
|
||||
# The first pymongo.Connection() argument (host) can be
|
||||
# a list of ['host:port'] elements or a mongodb connection
|
||||
# URI. If this is the case, don't use self.mongodb_port
|
||||
# but let pymongo get the port(s) from the URI instead.
|
||||
# This enables the use of replica sets and sharding.
|
||||
# See pymongo.Connection() for more info.
|
||||
args = [self.mongodb_host]
|
||||
kwargs = {'max_pool_size': self.mongodb_max_pool_size}
|
||||
if isinstance(self.mongodb_host, basestring) \
|
||||
and not self.mongodb_host.startswith('mongodb://'):
|
||||
args.append(self.mongodb_port)
|
||||
|
||||
self._connection = Connection(
|
||||
*args, **dict(kwargs, **self.mongodb_options or {})
|
||||
)
|
||||
|
||||
return self._connection
|
||||
|
||||
def process_cleanup(self):
|
||||
if self._connection is not None:
|
||||
# MongoDB connection will be closed automatically when object
|
||||
# goes out of scope
|
||||
self._connection = None
|
||||
|
||||
def _store_result(self, task_id, result, status, traceback=None):
|
||||
"""Store return value and status of an executed task."""
|
||||
meta = {'_id': task_id,
|
||||
'status': status,
|
||||
'result': Binary(self.encode(result)),
|
||||
'date_done': datetime.utcnow(),
|
||||
'traceback': Binary(self.encode(traceback)),
|
||||
'children': Binary(self.encode(self.current_task_children()))}
|
||||
self.collection.save(meta, safe=True)
|
||||
|
||||
return result
|
||||
|
||||
def _get_task_meta_for(self, task_id):
|
||||
"""Get task metadata for a task by id."""
|
||||
|
||||
obj = self.collection.find_one({'_id': task_id})
|
||||
if not obj:
|
||||
return {'status': states.PENDING, 'result': None}
|
||||
|
||||
meta = {
|
||||
'task_id': obj['_id'],
|
||||
'status': obj['status'],
|
||||
'result': self.decode(obj['result']),
|
||||
'date_done': obj['date_done'],
|
||||
'traceback': self.decode(obj['traceback']),
|
||||
'children': self.decode(obj['children']),
|
||||
}
|
||||
|
||||
return meta
|
||||
|
||||
def _save_group(self, group_id, result):
|
||||
"""Save the group result."""
|
||||
meta = {'_id': group_id,
|
||||
'result': Binary(self.encode(result)),
|
||||
'date_done': datetime.utcnow()}
|
||||
self.collection.save(meta, safe=True)
|
||||
|
||||
return result
|
||||
|
||||
def _restore_group(self, group_id):
|
||||
"""Get the result for a group by id."""
|
||||
obj = self.collection.find_one({'_id': group_id})
|
||||
if not obj:
|
||||
return
|
||||
|
||||
meta = {
|
||||
'task_id': obj['_id'],
|
||||
'result': self.decode(obj['result']),
|
||||
'date_done': obj['date_done'],
|
||||
}
|
||||
|
||||
return meta
|
||||
|
||||
def _delete_group(self, group_id):
|
||||
"""Delete a group by id."""
|
||||
self.collection.remove({'_id': group_id})
|
||||
|
||||
def _forget(self, task_id):
|
||||
"""
|
||||
Remove result from MongoDB.
|
||||
|
||||
:raises celery.exceptions.OperationsError: if the task_id could not be
|
||||
removed.
|
||||
"""
|
||||
# By using safe=True, this will wait until it receives a response from
|
||||
# the server. Likewise, it will raise an OperationsError if the
|
||||
# response was unable to be completed.
|
||||
self.collection.remove({'_id': task_id}, safe=True)
|
||||
|
||||
def cleanup(self):
|
||||
"""Delete expired metadata."""
|
||||
self.collection.remove(
|
||||
{'date_done': {'$lt': self.app.now() - self.expires}},
|
||||
)
|
||||
|
||||
def __reduce__(self, args=(), kwargs={}):
|
||||
kwargs.update(
|
||||
dict(expires=self.expires))
|
||||
return super(MongoBackend, self).__reduce__(args, kwargs)
|
||||
|
||||
def _get_database(self):
|
||||
conn = self._get_connection()
|
||||
db = conn[self.mongodb_database]
|
||||
if self.mongodb_user and self.mongodb_password:
|
||||
if not db.authenticate(self.mongodb_user,
|
||||
self.mongodb_password):
|
||||
raise ImproperlyConfigured(
|
||||
'Invalid MongoDB username or password.')
|
||||
return db
|
||||
|
||||
@cached_property
|
||||
def database(self):
|
||||
"""Get database from MongoDB connection and perform authentication
|
||||
if necessary."""
|
||||
return self._get_database()
|
||||
|
||||
@cached_property
|
||||
def collection(self):
|
||||
"""Get the metadata task collection."""
|
||||
collection = self.database[self.mongodb_taskmeta_collection]
|
||||
|
||||
# Ensure an index on date_done is there, if not process the index
|
||||
# in the background. Once completed cleanup will be much faster
|
||||
collection.ensure_index('date_done', background='true')
|
||||
return collection
|
||||
121
awx/lib/site-packages/celery/backends/redis.py
Normal file
121
awx/lib/site-packages/celery/backends/redis.py
Normal file
@@ -0,0 +1,121 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.backends.redis
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Redis result store backend.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
from kombu.utils import cached_property
|
||||
from kombu.utils.url import _parse_url
|
||||
|
||||
from celery.exceptions import ImproperlyConfigured
|
||||
|
||||
from .base import KeyValueStoreBackend
|
||||
|
||||
try:
|
||||
import redis
|
||||
from redis.exceptions import ConnectionError
|
||||
except ImportError: # pragma: no cover
|
||||
redis = None # noqa
|
||||
ConnectionError = None # noqa
|
||||
|
||||
|
||||
class RedisBackend(KeyValueStoreBackend):
|
||||
"""Redis task result store."""
|
||||
|
||||
#: redis-py client module.
|
||||
redis = redis
|
||||
|
||||
#: default Redis server hostname (`localhost`).
|
||||
host = 'localhost'
|
||||
|
||||
#: default Redis server port (6379)
|
||||
port = 6379
|
||||
|
||||
#: default Redis db number (0)
|
||||
db = 0
|
||||
|
||||
#: default Redis password (:const:`None`)
|
||||
password = None
|
||||
|
||||
#: Maximium number of connections in the pool.
|
||||
max_connections = None
|
||||
|
||||
supports_autoexpire = True
|
||||
supports_native_join = True
|
||||
implements_incr = True
|
||||
|
||||
def __init__(self, host=None, port=None, db=None, password=None,
|
||||
expires=None, max_connections=None, url=None, **kwargs):
|
||||
super(RedisBackend, self).__init__(**kwargs)
|
||||
conf = self.app.conf
|
||||
if self.redis is None:
|
||||
raise ImproperlyConfigured(
|
||||
'You need to install the redis library in order to use '
|
||||
'the Redis result store backend.')
|
||||
|
||||
# For compatibility with the old REDIS_* configuration keys.
|
||||
def _get(key):
|
||||
for prefix in 'CELERY_REDIS_%s', 'REDIS_%s':
|
||||
try:
|
||||
return conf[prefix % key]
|
||||
except KeyError:
|
||||
pass
|
||||
if host and '://' in host:
|
||||
url, host = host, None
|
||||
self.url = url
|
||||
uhost = uport = upass = udb = None
|
||||
if url:
|
||||
_, uhost, uport, _, upass, udb, _ = _parse_url(url)
|
||||
udb = udb.strip('/') if udb else 0
|
||||
self.host = uhost or host or _get('HOST') or self.host
|
||||
self.port = int(uport or port or _get('PORT') or self.port)
|
||||
self.db = udb or db or _get('DB') or self.db
|
||||
self.password = upass or password or _get('PASSWORD') or self.password
|
||||
self.expires = self.prepare_expires(expires, type=int)
|
||||
self.max_connections = (max_connections
|
||||
or _get('MAX_CONNECTIONS')
|
||||
or self.max_connections)
|
||||
|
||||
def get(self, key):
|
||||
return self.client.get(key)
|
||||
|
||||
def mget(self, keys):
|
||||
return self.client.mget(keys)
|
||||
|
||||
def set(self, key, value):
|
||||
client = self.client
|
||||
if self.expires is not None:
|
||||
client.setex(key, value, self.expires)
|
||||
else:
|
||||
client.set(key, value)
|
||||
client.publish(key, value)
|
||||
|
||||
def delete(self, key):
|
||||
self.client.delete(key)
|
||||
|
||||
def incr(self, key):
|
||||
return self.client.incr(key)
|
||||
|
||||
def expire(self, key, value):
|
||||
return self.client.expire(key, value)
|
||||
|
||||
@cached_property
|
||||
def client(self):
|
||||
pool = self.redis.ConnectionPool(host=self.host, port=self.port,
|
||||
db=self.db, password=self.password,
|
||||
max_connections=self.max_connections)
|
||||
return self.redis.Redis(connection_pool=pool)
|
||||
|
||||
def __reduce__(self, args=(), kwargs={}):
|
||||
kwargs.update(
|
||||
dict(host=self.host,
|
||||
port=self.port,
|
||||
db=self.db,
|
||||
password=self.password,
|
||||
expires=self.expires,
|
||||
max_connections=self.max_connections))
|
||||
return super(RedisBackend, self).__reduce__(args, kwargs)
|
||||
510
awx/lib/site-packages/celery/beat.py
Normal file
510
awx/lib/site-packages/celery/beat.py
Normal file
@@ -0,0 +1,510 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.beat
|
||||
~~~~~~~~~~~
|
||||
|
||||
The periodic task scheduler.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import with_statement
|
||||
|
||||
import errno
|
||||
import os
|
||||
import time
|
||||
import shelve
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
from billiard import Process, ensure_multiprocessing
|
||||
from kombu.utils import cached_property, reprcall
|
||||
from kombu.utils.functional import maybe_promise
|
||||
|
||||
from . import __version__
|
||||
from . import platforms
|
||||
from . import signals
|
||||
from . import current_app
|
||||
from .app import app_or_default
|
||||
from .schedules import maybe_schedule, crontab
|
||||
from .utils.imports import instantiate
|
||||
from .utils.threads import Event, Thread
|
||||
from .utils.timeutils import humanize_seconds
|
||||
from .utils.log import get_logger
|
||||
|
||||
logger = get_logger(__name__)
|
||||
debug, info, error, warning = (logger.debug, logger.info,
|
||||
logger.error, logger.warning)
|
||||
|
||||
DEFAULT_MAX_INTERVAL = 300 # 5 minutes
|
||||
|
||||
|
||||
class SchedulingError(Exception):
|
||||
"""An error occured while scheduling a task."""
|
||||
|
||||
|
||||
class ScheduleEntry(object):
|
||||
"""An entry in the scheduler.
|
||||
|
||||
:keyword name: see :attr:`name`.
|
||||
:keyword schedule: see :attr:`schedule`.
|
||||
:keyword args: see :attr:`args`.
|
||||
:keyword kwargs: see :attr:`kwargs`.
|
||||
:keyword options: see :attr:`options`.
|
||||
:keyword last_run_at: see :attr:`last_run_at`.
|
||||
:keyword total_run_count: see :attr:`total_run_count`.
|
||||
:keyword relative: Is the time relative to when the server starts?
|
||||
|
||||
"""
|
||||
|
||||
#: The task name
|
||||
name = None
|
||||
|
||||
#: The schedule (run_every/crontab)
|
||||
schedule = None
|
||||
|
||||
#: Positional arguments to apply.
|
||||
args = None
|
||||
|
||||
#: Keyword arguments to apply.
|
||||
kwargs = None
|
||||
|
||||
#: Task execution options.
|
||||
options = None
|
||||
|
||||
#: The time and date of when this task was last scheduled.
|
||||
last_run_at = None
|
||||
|
||||
#: Total number of times this task has been scheduled.
|
||||
total_run_count = 0
|
||||
|
||||
def __init__(self, name=None, task=None, last_run_at=None,
|
||||
total_run_count=None, schedule=None, args=(), kwargs={},
|
||||
options={}, relative=False):
|
||||
self.name = name
|
||||
self.task = task
|
||||
self.args = args
|
||||
self.kwargs = kwargs
|
||||
self.options = options
|
||||
self.schedule = maybe_schedule(schedule, relative)
|
||||
self.last_run_at = last_run_at or self._default_now()
|
||||
self.total_run_count = total_run_count or 0
|
||||
|
||||
def _default_now(self):
|
||||
return self.schedule.now() if self.schedule else current_app.now()
|
||||
|
||||
def _next_instance(self, last_run_at=None):
|
||||
"""Returns a new instance of the same class, but with
|
||||
its date and count fields updated."""
|
||||
return self.__class__(**dict(
|
||||
self,
|
||||
last_run_at=last_run_at or self._default_now(),
|
||||
total_run_count=self.total_run_count + 1,
|
||||
))
|
||||
__next__ = next = _next_instance # for 2to3
|
||||
|
||||
def update(self, other):
|
||||
"""Update values from another entry.
|
||||
|
||||
Does only update "editable" fields (task, schedule, args, kwargs,
|
||||
options).
|
||||
|
||||
"""
|
||||
self.__dict__.update({'task': other.task, 'schedule': other.schedule,
|
||||
'args': other.args, 'kwargs': other.kwargs,
|
||||
'options': other.options})
|
||||
|
||||
def is_due(self):
|
||||
"""See :meth:`~celery.schedule.schedule.is_due`."""
|
||||
return self.schedule.is_due(self.last_run_at)
|
||||
|
||||
def __iter__(self):
|
||||
return vars(self).iteritems()
|
||||
|
||||
def __repr__(self):
|
||||
return '<Entry: %s %s {%s}' % (
|
||||
self.name,
|
||||
reprcall(self.task, self.args or (), self.kwargs or {}),
|
||||
self.schedule,
|
||||
)
|
||||
|
||||
|
||||
class Scheduler(object):
|
||||
"""Scheduler for periodic tasks.
|
||||
|
||||
:keyword schedule: see :attr:`schedule`.
|
||||
:keyword max_interval: see :attr:`max_interval`.
|
||||
|
||||
"""
|
||||
|
||||
Entry = ScheduleEntry
|
||||
|
||||
#: The schedule dict/shelve.
|
||||
schedule = None
|
||||
|
||||
#: Maximum time to sleep between re-checking the schedule.
|
||||
max_interval = DEFAULT_MAX_INTERVAL
|
||||
|
||||
#: How often to sync the schedule (3 minutes by default)
|
||||
sync_every = 3 * 60
|
||||
|
||||
_last_sync = None
|
||||
|
||||
logger = logger # compat
|
||||
|
||||
def __init__(self, schedule=None, max_interval=None,
|
||||
app=None, Publisher=None, lazy=False, **kwargs):
|
||||
app = self.app = app_or_default(app)
|
||||
self.data = maybe_promise({} if schedule is None else schedule)
|
||||
self.max_interval = (max_interval
|
||||
or app.conf.CELERYBEAT_MAX_LOOP_INTERVAL
|
||||
or self.max_interval)
|
||||
self.Publisher = Publisher or app.amqp.TaskProducer
|
||||
if not lazy:
|
||||
self.setup_schedule()
|
||||
|
||||
def install_default_entries(self, data):
|
||||
entries = {}
|
||||
if self.app.conf.CELERY_TASK_RESULT_EXPIRES and \
|
||||
not self.app.backend.supports_autoexpire:
|
||||
if 'celery.backend_cleanup' not in data:
|
||||
entries['celery.backend_cleanup'] = {
|
||||
'task': 'celery.backend_cleanup',
|
||||
'schedule': crontab('0', '4', '*'),
|
||||
'options': {'expires': 12 * 3600}}
|
||||
self.update_from_dict(entries)
|
||||
|
||||
def maybe_due(self, entry, publisher=None):
|
||||
is_due, next_time_to_run = entry.is_due()
|
||||
|
||||
if is_due:
|
||||
info('Scheduler: Sending due task %s (%s)', entry.name, entry.task)
|
||||
try:
|
||||
result = self.apply_async(entry, publisher=publisher)
|
||||
except Exception, exc:
|
||||
error('Message Error: %s\n%s',
|
||||
exc, traceback.format_stack(), exc_info=True)
|
||||
else:
|
||||
debug('%s sent. id->%s', entry.task, result.id)
|
||||
return next_time_to_run
|
||||
|
||||
def tick(self):
|
||||
"""Run a tick, that is one iteration of the scheduler.
|
||||
|
||||
Executes all due tasks.
|
||||
|
||||
"""
|
||||
remaining_times = []
|
||||
try:
|
||||
for entry in self.schedule.itervalues():
|
||||
next_time_to_run = self.maybe_due(entry, self.publisher)
|
||||
if next_time_to_run:
|
||||
remaining_times.append(next_time_to_run)
|
||||
except RuntimeError:
|
||||
pass
|
||||
|
||||
return min(remaining_times + [self.max_interval])
|
||||
|
||||
def should_sync(self):
|
||||
return (not self._last_sync or
|
||||
(time.time() - self._last_sync) > self.sync_every)
|
||||
|
||||
def reserve(self, entry):
|
||||
new_entry = self.schedule[entry.name] = entry.next()
|
||||
return new_entry
|
||||
|
||||
def apply_async(self, entry, publisher=None, **kwargs):
|
||||
# Update timestamps and run counts before we actually execute,
|
||||
# so we have that done if an exception is raised (doesn't schedule
|
||||
# forever.)
|
||||
entry = self.reserve(entry)
|
||||
task = self.app.tasks.get(entry.task)
|
||||
|
||||
try:
|
||||
if task:
|
||||
result = task.apply_async(entry.args, entry.kwargs,
|
||||
publisher=publisher,
|
||||
**entry.options)
|
||||
else:
|
||||
result = self.send_task(entry.task, entry.args, entry.kwargs,
|
||||
publisher=publisher,
|
||||
**entry.options)
|
||||
except Exception, exc:
|
||||
raise SchedulingError, SchedulingError(
|
||||
"Couldn't apply scheduled task %s: %s" % (
|
||||
entry.name, exc)), sys.exc_info()[2]
|
||||
finally:
|
||||
if self.should_sync():
|
||||
self._do_sync()
|
||||
return result
|
||||
|
||||
def send_task(self, *args, **kwargs):
|
||||
return self.app.send_task(*args, **kwargs)
|
||||
|
||||
def setup_schedule(self):
|
||||
self.install_default_entries(self.data)
|
||||
|
||||
def _do_sync(self):
|
||||
try:
|
||||
debug('Celerybeat: Synchronizing schedule...')
|
||||
self.sync()
|
||||
finally:
|
||||
self._last_sync = time.time()
|
||||
|
||||
def sync(self):
|
||||
pass
|
||||
|
||||
def close(self):
|
||||
self.sync()
|
||||
|
||||
def add(self, **kwargs):
|
||||
entry = self.Entry(**kwargs)
|
||||
self.schedule[entry.name] = entry
|
||||
return entry
|
||||
|
||||
def _maybe_entry(self, name, entry):
|
||||
if isinstance(entry, self.Entry):
|
||||
return entry
|
||||
return self.Entry(**dict(entry, name=name))
|
||||
|
||||
def update_from_dict(self, dict_):
|
||||
self.schedule.update(dict(
|
||||
(name, self._maybe_entry(name, entry))
|
||||
for name, entry in dict_.items()))
|
||||
|
||||
def merge_inplace(self, b):
|
||||
schedule = self.schedule
|
||||
A, B = set(schedule), set(b)
|
||||
|
||||
# Remove items from disk not in the schedule anymore.
|
||||
for key in A ^ B:
|
||||
schedule.pop(key, None)
|
||||
|
||||
# Update and add new items in the schedule
|
||||
for key in B:
|
||||
entry = self.Entry(**dict(b[key], name=key))
|
||||
if schedule.get(key):
|
||||
schedule[key].update(entry)
|
||||
else:
|
||||
schedule[key] = entry
|
||||
|
||||
def _ensure_connected(self):
|
||||
# callback called for each retry while the connection
|
||||
# can't be established.
|
||||
def _error_handler(exc, interval):
|
||||
error('Celerybeat: Connection error: %s. '
|
||||
'Trying again in %s seconds...', exc, interval)
|
||||
|
||||
return self.connection.ensure_connection(
|
||||
_error_handler, self.app.conf.BROKER_CONNECTION_MAX_RETRIES
|
||||
)
|
||||
|
||||
def get_schedule(self):
|
||||
return self.data
|
||||
|
||||
def set_schedule(self, schedule):
|
||||
self.data = schedule
|
||||
schedule = property(get_schedule, set_schedule)
|
||||
|
||||
@cached_property
|
||||
def connection(self):
|
||||
return self.app.connection()
|
||||
|
||||
@cached_property
|
||||
def publisher(self):
|
||||
return self.Publisher(self._ensure_connected())
|
||||
|
||||
@property
|
||||
def info(self):
|
||||
return ''
|
||||
|
||||
|
||||
class PersistentScheduler(Scheduler):
|
||||
persistence = shelve
|
||||
known_suffixes = ('', '.db', '.dat', '.bak', '.dir')
|
||||
|
||||
_store = None
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.schedule_filename = kwargs.get('schedule_filename')
|
||||
Scheduler.__init__(self, *args, **kwargs)
|
||||
|
||||
def _remove_db(self):
|
||||
for suffix in self.known_suffixes:
|
||||
with platforms.ignore_errno(errno.ENOENT):
|
||||
os.remove(self.schedule_filename + suffix)
|
||||
|
||||
def setup_schedule(self):
|
||||
try:
|
||||
self._store = self.persistence.open(self.schedule_filename,
|
||||
writeback=True)
|
||||
entries = self._store.setdefault('entries', {})
|
||||
except Exception, exc:
|
||||
error('Removing corrupted schedule file %r: %r',
|
||||
self.schedule_filename, exc, exc_info=True)
|
||||
self._remove_db()
|
||||
self._store = self.persistence.open(self.schedule_filename,
|
||||
writeback=True)
|
||||
else:
|
||||
if '__version__' not in self._store:
|
||||
warning('Reset: Account for new __version__ field')
|
||||
self._store.clear() # remove schedule at 2.2.2 upgrade.
|
||||
if 'tz' not in self._store:
|
||||
warning('Reset: Account for new tz field')
|
||||
self._store.clear() # remove schedule at 3.0.8 upgrade
|
||||
if 'utc_enabled' not in self._store:
|
||||
warning('Reset: Account for new utc_enabled field')
|
||||
self._store.clear() # remove schedule at 3.0.9 upgrade
|
||||
|
||||
tz = self.app.conf.CELERY_TIMEZONE
|
||||
stored_tz = self._store.get('tz')
|
||||
if stored_tz is not None and stored_tz != tz:
|
||||
warning('Reset: Timezone changed from %r to %r', stored_tz, tz)
|
||||
self._store.clear() # Timezone changed, reset db!
|
||||
utc = self.app.conf.CELERY_ENABLE_UTC
|
||||
stored_utc = self._store.get('utc_enabled')
|
||||
if stored_utc is not None and stored_utc != utc:
|
||||
choices = {True: 'enabled', False: 'disabled'}
|
||||
warning('Reset: UTC changed from %s to %s',
|
||||
choices[stored_utc], choices[utc])
|
||||
self._store.clear() # UTC setting changed, reset db!
|
||||
entries = self._store.setdefault('entries', {})
|
||||
self.merge_inplace(self.app.conf.CELERYBEAT_SCHEDULE)
|
||||
self.install_default_entries(self.schedule)
|
||||
self._store.update(__version__=__version__, tz=tz, utc_enabled=utc)
|
||||
self.sync()
|
||||
debug('Current schedule:\n' + '\n'.join(
|
||||
repr(entry) for entry in entries.itervalues()))
|
||||
|
||||
def get_schedule(self):
|
||||
return self._store['entries']
|
||||
|
||||
def set_schedule(self, schedule):
|
||||
self._store['entries'] = schedule
|
||||
schedule = property(get_schedule, set_schedule)
|
||||
|
||||
def sync(self):
|
||||
if self._store is not None:
|
||||
self._store.sync()
|
||||
|
||||
def close(self):
|
||||
self.sync()
|
||||
self._store.close()
|
||||
|
||||
@property
|
||||
def info(self):
|
||||
return ' . db -> %s' % (self.schedule_filename, )
|
||||
|
||||
|
||||
class Service(object):
|
||||
scheduler_cls = PersistentScheduler
|
||||
|
||||
def __init__(self, max_interval=None, schedule_filename=None,
|
||||
scheduler_cls=None, app=None):
|
||||
app = self.app = app_or_default(app)
|
||||
self.max_interval = (max_interval
|
||||
or app.conf.CELERYBEAT_MAX_LOOP_INTERVAL)
|
||||
self.scheduler_cls = scheduler_cls or self.scheduler_cls
|
||||
self.schedule_filename = (
|
||||
schedule_filename or app.conf.CELERYBEAT_SCHEDULE_FILENAME)
|
||||
|
||||
self._is_shutdown = Event()
|
||||
self._is_stopped = Event()
|
||||
|
||||
def __reduce__(self):
|
||||
return self.__class__, (self.max_interval, self.schedule_filename,
|
||||
self.scheduler_cls, self.app)
|
||||
|
||||
def start(self, embedded_process=False):
|
||||
info('Celerybeat: Starting...')
|
||||
debug('Celerybeat: Ticking with max interval->%s',
|
||||
humanize_seconds(self.scheduler.max_interval))
|
||||
|
||||
signals.beat_init.send(sender=self)
|
||||
if embedded_process:
|
||||
signals.beat_embedded_init.send(sender=self)
|
||||
platforms.set_process_title('celerybeat')
|
||||
|
||||
try:
|
||||
while not self._is_shutdown.is_set():
|
||||
interval = self.scheduler.tick()
|
||||
debug('Celerybeat: Waking up %s.',
|
||||
humanize_seconds(interval, prefix='in '))
|
||||
time.sleep(interval)
|
||||
except (KeyboardInterrupt, SystemExit):
|
||||
self._is_shutdown.set()
|
||||
finally:
|
||||
self.sync()
|
||||
|
||||
def sync(self):
|
||||
self.scheduler.close()
|
||||
self._is_stopped.set()
|
||||
|
||||
def stop(self, wait=False):
|
||||
info('Celerybeat: Shutting down...')
|
||||
self._is_shutdown.set()
|
||||
wait and self._is_stopped.wait() # block until shutdown done.
|
||||
|
||||
def get_scheduler(self, lazy=False):
|
||||
filename = self.schedule_filename
|
||||
scheduler = instantiate(self.scheduler_cls,
|
||||
app=self.app,
|
||||
schedule_filename=filename,
|
||||
max_interval=self.max_interval,
|
||||
lazy=lazy)
|
||||
return scheduler
|
||||
|
||||
@cached_property
|
||||
def scheduler(self):
|
||||
return self.get_scheduler()
|
||||
|
||||
|
||||
class _Threaded(Thread):
|
||||
"""Embedded task scheduler using threading."""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(_Threaded, self).__init__()
|
||||
self.service = Service(*args, **kwargs)
|
||||
self.daemon = True
|
||||
self.name = 'Beat'
|
||||
|
||||
def run(self):
|
||||
self.service.start()
|
||||
|
||||
def stop(self):
|
||||
self.service.stop(wait=True)
|
||||
|
||||
|
||||
try:
|
||||
ensure_multiprocessing()
|
||||
except NotImplementedError: # pragma: no cover
|
||||
_Process = None
|
||||
else:
|
||||
class _Process(Process): # noqa
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(_Process, self).__init__()
|
||||
self.service = Service(*args, **kwargs)
|
||||
self.name = 'Beat'
|
||||
|
||||
def run(self):
|
||||
platforms.signals.reset('SIGTERM')
|
||||
self.service.start(embedded_process=True)
|
||||
|
||||
def stop(self):
|
||||
self.service.stop()
|
||||
self.terminate()
|
||||
|
||||
|
||||
def EmbeddedService(*args, **kwargs):
|
||||
"""Return embedded clock service.
|
||||
|
||||
:keyword thread: Run threaded instead of as a separate process.
|
||||
Default is :const:`False`.
|
||||
|
||||
"""
|
||||
if kwargs.pop('thread', False) or _Process is None:
|
||||
# Need short max interval to be able to stop thread
|
||||
# in reasonable time.
|
||||
kwargs.setdefault('max_interval', 1)
|
||||
return _Threaded(*args, **kwargs)
|
||||
return _Process(*args, **kwargs)
|
||||
0
awx/lib/site-packages/celery/bin/__init__.py
Normal file
0
awx/lib/site-packages/celery/bin/__init__.py
Normal file
392
awx/lib/site-packages/celery/bin/base.py
Normal file
392
awx/lib/site-packages/celery/bin/base.py
Normal file
@@ -0,0 +1,392 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
|
||||
.. _preload-options:
|
||||
|
||||
Preload Options
|
||||
---------------
|
||||
|
||||
These options are supported by all commands,
|
||||
and usually parsed before command-specific arguments.
|
||||
|
||||
.. cmdoption:: -A, --app
|
||||
|
||||
app instance to use (e.g. module.attr_name)
|
||||
|
||||
.. cmdoption:: -b, --broker
|
||||
|
||||
url to broker. default is 'amqp://guest@localhost//'
|
||||
|
||||
.. cmdoption:: --loader
|
||||
|
||||
name of custom loader class to use.
|
||||
|
||||
.. cmdoption:: --config
|
||||
|
||||
Name of the configuration module
|
||||
|
||||
.. _daemon-options:
|
||||
|
||||
Daemon Options
|
||||
--------------
|
||||
|
||||
These options are supported by commands that can detach
|
||||
into the background (daemon). They will be present
|
||||
in any command that also has a `--detach` option.
|
||||
|
||||
.. cmdoption:: -f, --logfile
|
||||
|
||||
Path to log file. If no logfile is specified, `stderr` is used.
|
||||
|
||||
.. cmdoption:: --pidfile
|
||||
|
||||
Optional file used to store the process pid.
|
||||
|
||||
The program will not start if this file already exists
|
||||
and the pid is still alive.
|
||||
|
||||
.. cmdoption:: --uid
|
||||
|
||||
User id, or user name of the user to run as after detaching.
|
||||
|
||||
.. cmdoption:: --gid
|
||||
|
||||
Group id, or group name of the main group to change to after
|
||||
detaching.
|
||||
|
||||
.. cmdoption:: --umask
|
||||
|
||||
Effective umask of the process after detaching. Default is 0.
|
||||
|
||||
.. cmdoption:: --workdir
|
||||
|
||||
Optional directory to change to after detaching.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import warnings
|
||||
|
||||
from collections import defaultdict
|
||||
from optparse import OptionParser, IndentedHelpFormatter, make_option as Option
|
||||
from types import ModuleType
|
||||
|
||||
import celery
|
||||
from celery.exceptions import CDeprecationWarning, CPendingDeprecationWarning
|
||||
from celery.platforms import EX_FAILURE, EX_USAGE, maybe_patch_concurrency
|
||||
from celery.utils import text
|
||||
from celery.utils.imports import symbol_by_name, import_from_cwd
|
||||
|
||||
# always enable DeprecationWarnings, so our users can see them.
|
||||
for warning in (CDeprecationWarning, CPendingDeprecationWarning):
|
||||
warnings.simplefilter('once', warning, 0)
|
||||
|
||||
ARGV_DISABLED = """
|
||||
Unrecognized command line arguments: %s
|
||||
|
||||
Try --help?
|
||||
"""
|
||||
|
||||
find_long_opt = re.compile(r'.+?(--.+?)(?:\s|,|$)')
|
||||
find_rst_ref = re.compile(r':\w+:`(.+?)`')
|
||||
|
||||
|
||||
class HelpFormatter(IndentedHelpFormatter):
|
||||
|
||||
def format_epilog(self, epilog):
|
||||
if epilog:
|
||||
return '\n%s\n\n' % epilog
|
||||
return ''
|
||||
|
||||
def format_description(self, description):
|
||||
return text.ensure_2lines(text.fill_paragraphs(
|
||||
text.dedent(description), self.width))
|
||||
|
||||
|
||||
class Command(object):
|
||||
"""Base class for command line applications.
|
||||
|
||||
:keyword app: The current app.
|
||||
:keyword get_app: Callable returning the current app if no app provided.
|
||||
|
||||
"""
|
||||
Parser = OptionParser
|
||||
|
||||
#: Arg list used in help.
|
||||
args = ''
|
||||
|
||||
#: Application version.
|
||||
version = celery.VERSION_BANNER
|
||||
|
||||
#: If false the parser will raise an exception if positional
|
||||
#: args are provided.
|
||||
supports_args = True
|
||||
|
||||
#: List of options (without preload options).
|
||||
option_list = ()
|
||||
|
||||
# module Rst documentation to parse help from (if any)
|
||||
doc = None
|
||||
|
||||
# Some programs (multi) does not want to load the app specified
|
||||
# (Issue #1008).
|
||||
respects_app_option = True
|
||||
|
||||
#: List of options to parse before parsing other options.
|
||||
preload_options = (
|
||||
Option('-A', '--app', default=None),
|
||||
Option('-b', '--broker', default=None),
|
||||
Option('--loader', default=None),
|
||||
Option('--config', default=None),
|
||||
Option('--workdir', default=None, dest='working_directory'),
|
||||
)
|
||||
|
||||
#: Enable if the application should support config from the cmdline.
|
||||
enable_config_from_cmdline = False
|
||||
|
||||
#: Default configuration namespace.
|
||||
namespace = 'celery'
|
||||
|
||||
#: Text to print at end of --help
|
||||
epilog = None
|
||||
|
||||
#: Text to print in --help before option list.
|
||||
description = ''
|
||||
|
||||
#: Set to true if this command doesn't have subcommands
|
||||
leaf = True
|
||||
|
||||
def __init__(self, app=None, get_app=None):
|
||||
self.app = app
|
||||
self.get_app = get_app or self._get_default_app
|
||||
|
||||
def run(self, *args, **options):
|
||||
"""This is the body of the command called by :meth:`handle_argv`."""
|
||||
raise NotImplementedError('subclass responsibility')
|
||||
|
||||
def execute_from_commandline(self, argv=None):
|
||||
"""Execute application from command line.
|
||||
|
||||
:keyword argv: The list of command line arguments.
|
||||
Defaults to ``sys.argv``.
|
||||
|
||||
"""
|
||||
if argv is None:
|
||||
argv = list(sys.argv)
|
||||
# Should we load any special concurrency environment?
|
||||
self.maybe_patch_concurrency(argv)
|
||||
self.on_concurrency_setup()
|
||||
|
||||
# Dump version and exit if '--version' arg set.
|
||||
self.early_version(argv)
|
||||
argv = self.setup_app_from_commandline(argv)
|
||||
prog_name = os.path.basename(argv[0])
|
||||
return self.handle_argv(prog_name, argv[1:])
|
||||
|
||||
def run_from_argv(self, prog_name, argv=None):
|
||||
return self.handle_argv(prog_name, sys.argv if argv is None else argv)
|
||||
|
||||
def maybe_patch_concurrency(self, argv=None):
|
||||
argv = argv or sys.argv
|
||||
pool_option = self.with_pool_option(argv)
|
||||
if pool_option:
|
||||
maybe_patch_concurrency(argv, *pool_option)
|
||||
short_opts, long_opts = pool_option
|
||||
|
||||
def on_concurrency_setup(self):
|
||||
pass
|
||||
|
||||
def usage(self, command):
|
||||
"""Returns the command line usage string for this app."""
|
||||
return '%%prog [options] %s' % (self.args, )
|
||||
|
||||
def get_options(self):
|
||||
"""Get supported command line options."""
|
||||
return self.option_list
|
||||
|
||||
def expanduser(self, value):
|
||||
if isinstance(value, basestring):
|
||||
return os.path.expanduser(value)
|
||||
return value
|
||||
|
||||
def handle_argv(self, prog_name, argv):
|
||||
"""Parses command line arguments from ``argv`` and dispatches
|
||||
to :meth:`run`.
|
||||
|
||||
:param prog_name: The program name (``argv[0]``).
|
||||
:param argv: Command arguments.
|
||||
|
||||
Exits with an error message if :attr:`supports_args` is disabled
|
||||
and ``argv`` contains positional arguments.
|
||||
|
||||
"""
|
||||
options, args = self.prepare_args(*self.parse_options(prog_name, argv))
|
||||
return self.run(*args, **options)
|
||||
|
||||
def prepare_args(self, options, args):
|
||||
if options:
|
||||
options = dict((k, self.expanduser(v))
|
||||
for k, v in vars(options).iteritems()
|
||||
if not k.startswith('_'))
|
||||
args = [self.expanduser(arg) for arg in args]
|
||||
self.check_args(args)
|
||||
return options, args
|
||||
|
||||
def check_args(self, args):
|
||||
if not self.supports_args and args:
|
||||
self.die(ARGV_DISABLED % (', '.join(args, )), EX_USAGE)
|
||||
|
||||
def die(self, msg, status=EX_FAILURE):
|
||||
sys.stderr.write(msg + '\n')
|
||||
sys.exit(status)
|
||||
|
||||
def early_version(self, argv):
|
||||
if '--version' in argv:
|
||||
sys.stdout.write('%s\n' % self.version)
|
||||
sys.exit(0)
|
||||
|
||||
def parse_options(self, prog_name, arguments):
|
||||
"""Parse the available options."""
|
||||
# Don't want to load configuration to just print the version,
|
||||
# so we handle --version manually here.
|
||||
parser = self.create_parser(prog_name)
|
||||
return parser.parse_args(arguments)
|
||||
|
||||
def create_parser(self, prog_name, command=None):
|
||||
return self.prepare_parser(self.Parser(
|
||||
prog=prog_name,
|
||||
usage=self.usage(command),
|
||||
version=self.version,
|
||||
epilog=self.epilog,
|
||||
formatter=HelpFormatter(),
|
||||
description=self.description,
|
||||
option_list=(self.preload_options + self.get_options())))
|
||||
|
||||
def prepare_parser(self, parser):
|
||||
docs = [self.parse_doc(doc) for doc in (self.doc, __doc__) if doc]
|
||||
for doc in docs:
|
||||
for long_opt, help in doc.iteritems():
|
||||
option = parser.get_option(long_opt)
|
||||
if option is not None:
|
||||
option.help = ' '.join(help) % {'default': option.default}
|
||||
return parser
|
||||
|
||||
def setup_app_from_commandline(self, argv):
|
||||
preload_options = self.parse_preload_options(argv)
|
||||
workdir = preload_options.get('working_directory')
|
||||
if workdir:
|
||||
os.chdir(workdir)
|
||||
app = (preload_options.get('app') or
|
||||
os.environ.get('CELERY_APP') or
|
||||
self.app)
|
||||
preload_loader = preload_options.get('loader')
|
||||
if preload_loader:
|
||||
# Default app takes loader from this env (Issue #1066).
|
||||
os.environ['CELERY_LOADER'] = preload_loader
|
||||
loader = (preload_loader,
|
||||
os.environ.get('CELERY_LOADER') or
|
||||
'default')
|
||||
broker = preload_options.get('broker', None)
|
||||
if broker:
|
||||
os.environ['CELERY_BROKER_URL'] = broker
|
||||
config = preload_options.get('config')
|
||||
if config:
|
||||
os.environ['CELERY_CONFIG_MODULE'] = config
|
||||
if self.respects_app_option:
|
||||
if app and self.respects_app_option:
|
||||
self.app = self.find_app(app)
|
||||
elif self.app is None:
|
||||
self.app = self.get_app(loader=loader)
|
||||
if self.enable_config_from_cmdline:
|
||||
argv = self.process_cmdline_config(argv)
|
||||
else:
|
||||
self.app = celery.Celery()
|
||||
return argv
|
||||
|
||||
def find_app(self, app):
|
||||
try:
|
||||
sym = self.symbol_by_name(app)
|
||||
except AttributeError:
|
||||
# last part was not an attribute, but a module
|
||||
sym = import_from_cwd(app)
|
||||
if isinstance(sym, ModuleType):
|
||||
if getattr(sym, '__path__', None):
|
||||
return self.find_app('%s.celery:' % (app.replace(':', ''), ))
|
||||
return sym.celery
|
||||
return sym
|
||||
|
||||
def symbol_by_name(self, name):
|
||||
return symbol_by_name(name, imp=import_from_cwd)
|
||||
get_cls_by_name = symbol_by_name # XXX compat
|
||||
|
||||
def process_cmdline_config(self, argv):
|
||||
try:
|
||||
cargs_start = argv.index('--')
|
||||
except ValueError:
|
||||
return argv
|
||||
argv, cargs = argv[:cargs_start], argv[cargs_start + 1:]
|
||||
self.app.config_from_cmdline(cargs, namespace=self.namespace)
|
||||
return argv
|
||||
|
||||
def parse_preload_options(self, args):
|
||||
acc = {}
|
||||
opts = {}
|
||||
for opt in self.preload_options:
|
||||
for t in (opt._long_opts, opt._short_opts):
|
||||
opts.update(dict(zip(t, [opt.dest] * len(t))))
|
||||
index = 0
|
||||
length = len(args)
|
||||
while index < length:
|
||||
arg = args[index]
|
||||
if arg.startswith('--') and '=' in arg:
|
||||
key, value = arg.split('=', 1)
|
||||
dest = opts.get(key)
|
||||
if dest:
|
||||
acc[dest] = value
|
||||
elif arg.startswith('-'):
|
||||
dest = opts.get(arg)
|
||||
if dest:
|
||||
acc[dest] = args[index + 1]
|
||||
index += 1
|
||||
index += 1
|
||||
return acc
|
||||
|
||||
def parse_doc(self, doc):
|
||||
options, in_option = defaultdict(list), None
|
||||
for line in doc.splitlines():
|
||||
if line.startswith('.. cmdoption::'):
|
||||
m = find_long_opt.match(line)
|
||||
if m:
|
||||
in_option = m.groups()[0].strip()
|
||||
assert in_option, 'missing long opt'
|
||||
elif in_option and line.startswith(' ' * 4):
|
||||
options[in_option].append(
|
||||
find_rst_ref.sub(r'\1', line.strip()).replace('`', ''))
|
||||
return options
|
||||
|
||||
def with_pool_option(self, argv):
|
||||
"""Returns tuple of ``(short_opts, long_opts)`` if the command
|
||||
supports a pool argument, and used to monkey patch eventlet/gevent
|
||||
environments as early as possible.
|
||||
|
||||
E.g::
|
||||
has_pool_option = (['-P'], ['--pool'])
|
||||
"""
|
||||
pass
|
||||
|
||||
def _get_default_app(self, *args, **kwargs):
|
||||
from celery._state import get_current_app
|
||||
return get_current_app() # omit proxy
|
||||
|
||||
|
||||
def daemon_options(default_pidfile=None, default_logfile=None):
|
||||
return (
|
||||
Option('-f', '--logfile', default=default_logfile),
|
||||
Option('--pidfile', default=default_pidfile),
|
||||
Option('--uid', default=None),
|
||||
Option('--gid', default=None),
|
||||
Option('--umask', default=0, type='int'),
|
||||
)
|
||||
384
awx/lib/site-packages/celery/bin/camqadm.py
Normal file
384
awx/lib/site-packages/celery/bin/camqadm.py
Normal file
@@ -0,0 +1,384 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
The :program:`celery amqp` command.
|
||||
|
||||
.. program:: celery amqp
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
import cmd
|
||||
import sys
|
||||
import shlex
|
||||
import pprint
|
||||
|
||||
from itertools import count
|
||||
|
||||
try:
|
||||
import amqp
|
||||
except ImportError:
|
||||
from amqplib import client_0_8 as amqp # noqa
|
||||
|
||||
from celery.app import app_or_default
|
||||
from celery.utils.functional import padlist
|
||||
|
||||
from celery.bin.base import Command
|
||||
from celery.utils import strtobool
|
||||
|
||||
# Map to coerce strings to other types.
|
||||
COERCE = {bool: strtobool}
|
||||
|
||||
HELP_HEADER = """
|
||||
Commands
|
||||
--------
|
||||
""".rstrip()
|
||||
|
||||
EXAMPLE_TEXT = """
|
||||
Example:
|
||||
-> queue.delete myqueue yes no
|
||||
"""
|
||||
|
||||
|
||||
def say(m, fh=sys.stderr):
|
||||
fh.write('%s\n' % (m, ))
|
||||
|
||||
|
||||
class Spec(object):
|
||||
"""AMQP Command specification.
|
||||
|
||||
Used to convert arguments to Python values and display various help
|
||||
and tooltips.
|
||||
|
||||
:param args: see :attr:`args`.
|
||||
:keyword returns: see :attr:`returns`.
|
||||
|
||||
.. attribute args::
|
||||
|
||||
List of arguments this command takes. Should
|
||||
contain `(argument_name, argument_type)` tuples.
|
||||
|
||||
.. attribute returns:
|
||||
|
||||
Helpful human string representation of what this command returns.
|
||||
May be :const:`None`, to signify the return type is unknown.
|
||||
|
||||
"""
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.args = args
|
||||
self.returns = kwargs.get('returns')
|
||||
|
||||
def coerce(self, index, value):
|
||||
"""Coerce value for argument at index.
|
||||
|
||||
E.g. if :attr:`args` is `[('is_active', bool)]`:
|
||||
|
||||
>>> coerce(0, 'False')
|
||||
False
|
||||
|
||||
"""
|
||||
arg_info = self.args[index]
|
||||
arg_type = arg_info[1]
|
||||
# Might be a custom way to coerce the string value,
|
||||
# so look in the coercion map.
|
||||
return COERCE.get(arg_type, arg_type)(value)
|
||||
|
||||
def str_args_to_python(self, arglist):
|
||||
"""Process list of string arguments to values according to spec.
|
||||
|
||||
e.g:
|
||||
|
||||
>>> spec = Spec([('queue', str), ('if_unused', bool)])
|
||||
>>> spec.str_args_to_python('pobox', 'true')
|
||||
('pobox', True)
|
||||
|
||||
"""
|
||||
return tuple(
|
||||
self.coerce(index, value) for index, value in enumerate(arglist))
|
||||
|
||||
def format_response(self, response):
|
||||
"""Format the return value of this command in a human-friendly way."""
|
||||
if not self.returns:
|
||||
if response is None:
|
||||
return 'ok.'
|
||||
return response
|
||||
if callable(self.returns):
|
||||
return self.returns(response)
|
||||
return self.returns % (response, )
|
||||
|
||||
def format_arg(self, name, type, default_value=None):
|
||||
if default_value is not None:
|
||||
return '%s:%s' % (name, default_value)
|
||||
return name
|
||||
|
||||
def format_signature(self):
|
||||
return ' '.join(self.format_arg(*padlist(list(arg), 3))
|
||||
for arg in self.args)
|
||||
|
||||
|
||||
def dump_message(message):
|
||||
if message is None:
|
||||
return 'No messages in queue. basic.publish something.'
|
||||
return {'body': message.body,
|
||||
'properties': message.properties,
|
||||
'delivery_info': message.delivery_info}
|
||||
|
||||
|
||||
def format_declare_queue(ret):
|
||||
return 'ok. queue:%s messages:%s consumers:%s.' % ret
|
||||
|
||||
|
||||
class AMQShell(cmd.Cmd):
|
||||
"""AMQP API Shell.
|
||||
|
||||
:keyword connect: Function used to connect to the server, must return
|
||||
connection object.
|
||||
|
||||
:keyword silent: If :const:`True`, the commands won't have annoying
|
||||
output not relevant when running in non-shell mode.
|
||||
|
||||
|
||||
.. attribute: builtins
|
||||
|
||||
Mapping of built-in command names -> method names
|
||||
|
||||
.. attribute:: amqp
|
||||
|
||||
Mapping of AMQP API commands and their :class:`Spec`.
|
||||
|
||||
"""
|
||||
conn = None
|
||||
chan = None
|
||||
prompt_fmt = '%d> '
|
||||
identchars = cmd.IDENTCHARS = '.'
|
||||
needs_reconnect = False
|
||||
counter = 1
|
||||
inc_counter = count(2).next
|
||||
|
||||
builtins = {'EOF': 'do_exit',
|
||||
'exit': 'do_exit',
|
||||
'help': 'do_help'}
|
||||
|
||||
amqp = {
|
||||
'exchange.declare': Spec(('exchange', str),
|
||||
('type', str),
|
||||
('passive', bool, 'no'),
|
||||
('durable', bool, 'no'),
|
||||
('auto_delete', bool, 'no'),
|
||||
('internal', bool, 'no')),
|
||||
'exchange.delete': Spec(('exchange', str),
|
||||
('if_unused', bool)),
|
||||
'queue.bind': Spec(('queue', str),
|
||||
('exchange', str),
|
||||
('routing_key', str)),
|
||||
'queue.declare': Spec(('queue', str),
|
||||
('passive', bool, 'no'),
|
||||
('durable', bool, 'no'),
|
||||
('exclusive', bool, 'no'),
|
||||
('auto_delete', bool, 'no'),
|
||||
returns=format_declare_queue),
|
||||
'queue.delete': Spec(('queue', str),
|
||||
('if_unused', bool, 'no'),
|
||||
('if_empty', bool, 'no'),
|
||||
returns='ok. %d messages deleted.'),
|
||||
'queue.purge': Spec(('queue', str),
|
||||
returns='ok. %d messages deleted.'),
|
||||
'basic.get': Spec(('queue', str),
|
||||
('no_ack', bool, 'off'),
|
||||
returns=dump_message),
|
||||
'basic.publish': Spec(('msg', amqp.Message),
|
||||
('exchange', str),
|
||||
('routing_key', str),
|
||||
('mandatory', bool, 'no'),
|
||||
('immediate', bool, 'no')),
|
||||
'basic.ack': Spec(('delivery_tag', int)),
|
||||
}
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.connect = kwargs.pop('connect')
|
||||
self.silent = kwargs.pop('silent', False)
|
||||
self.out = kwargs.pop('out', sys.stderr)
|
||||
cmd.Cmd.__init__(self, *args, **kwargs)
|
||||
self._reconnect()
|
||||
|
||||
def note(self, m):
|
||||
"""Say something to the user. Disabled if :attr:`silent`."""
|
||||
if not self.silent:
|
||||
say(m, fh=self.out)
|
||||
|
||||
def say(self, m):
|
||||
say(m, fh=self.out)
|
||||
|
||||
def get_amqp_api_command(self, cmd, arglist):
|
||||
"""With a command name and a list of arguments, convert the arguments
|
||||
to Python values and find the corresponding method on the AMQP channel
|
||||
object.
|
||||
|
||||
:returns: tuple of `(method, processed_args)`.
|
||||
|
||||
Example:
|
||||
|
||||
>>> get_amqp_api_command('queue.delete', ['pobox', 'yes', 'no'])
|
||||
(<bound method Channel.queue_delete of
|
||||
<amqp.channel.Channel object at 0x...>>,
|
||||
('testfoo', True, False))
|
||||
|
||||
"""
|
||||
spec = self.amqp[cmd]
|
||||
args = spec.str_args_to_python(arglist)
|
||||
attr_name = cmd.replace('.', '_')
|
||||
if self.needs_reconnect:
|
||||
self._reconnect()
|
||||
return getattr(self.chan, attr_name), args, spec.format_response
|
||||
|
||||
def do_exit(self, *args):
|
||||
"""The `'exit'` command."""
|
||||
self.note("\n-> please, don't leave!")
|
||||
sys.exit(0)
|
||||
|
||||
def display_command_help(self, cmd, short=False):
|
||||
spec = self.amqp[cmd]
|
||||
self.say('%s %s' % (cmd, spec.format_signature()))
|
||||
|
||||
def do_help(self, *args):
|
||||
if not args:
|
||||
self.say(HELP_HEADER)
|
||||
for cmd_name in self.amqp:
|
||||
self.display_command_help(cmd_name, short=True)
|
||||
self.say(EXAMPLE_TEXT)
|
||||
else:
|
||||
self.display_command_help(args[0])
|
||||
|
||||
def default(self, line):
|
||||
self.say("unknown syntax: '%s'. how about some 'help'?" % line)
|
||||
|
||||
def get_names(self):
|
||||
return set(self.builtins) | set(self.amqp)
|
||||
|
||||
def completenames(self, text, *ignored):
|
||||
"""Return all commands starting with `text`, for tab-completion."""
|
||||
names = self.get_names()
|
||||
first = [cmd for cmd in names
|
||||
if cmd.startswith(text.replace('_', '.'))]
|
||||
if first:
|
||||
return first
|
||||
return [cmd for cmd in names
|
||||
if cmd.partition('.')[2].startswith(text)]
|
||||
|
||||
def dispatch(self, cmd, argline):
|
||||
"""Dispatch and execute the command.
|
||||
|
||||
Lookup order is: :attr:`builtins` -> :attr:`amqp`.
|
||||
|
||||
"""
|
||||
arglist = shlex.split(argline)
|
||||
if cmd in self.builtins:
|
||||
return getattr(self, self.builtins[cmd])(*arglist)
|
||||
fun, args, formatter = self.get_amqp_api_command(cmd, arglist)
|
||||
return formatter(fun(*args))
|
||||
|
||||
def parseline(self, line):
|
||||
"""Parse input line.
|
||||
|
||||
:returns: tuple of three items:
|
||||
`(command_name, arglist, original_line)`
|
||||
|
||||
E.g::
|
||||
|
||||
>>> parseline('queue.delete A 'B' C')
|
||||
('queue.delete', 'A 'B' C', 'queue.delete A 'B' C')
|
||||
|
||||
"""
|
||||
parts = line.split()
|
||||
if parts:
|
||||
return parts[0], ' '.join(parts[1:]), line
|
||||
return '', '', line
|
||||
|
||||
def onecmd(self, line):
|
||||
"""Parse line and execute command."""
|
||||
cmd, arg, line = self.parseline(line)
|
||||
if not line:
|
||||
return self.emptyline()
|
||||
if cmd is None:
|
||||
return self.default(line)
|
||||
self.lastcmd = line
|
||||
if cmd == '':
|
||||
return self.default(line)
|
||||
else:
|
||||
self.counter = self.inc_counter()
|
||||
try:
|
||||
self.respond(self.dispatch(cmd, arg))
|
||||
except (AttributeError, KeyError), exc:
|
||||
self.default(line)
|
||||
except Exception, exc:
|
||||
self.say(exc)
|
||||
self.needs_reconnect = True
|
||||
|
||||
def respond(self, retval):
|
||||
"""What to do with the return value of a command."""
|
||||
if retval is not None:
|
||||
if isinstance(retval, basestring):
|
||||
self.say(retval)
|
||||
else:
|
||||
self.say(pprint.pformat(retval))
|
||||
|
||||
def _reconnect(self):
|
||||
"""Re-establish connection to the AMQP server."""
|
||||
self.conn = self.connect(self.conn)
|
||||
self.chan = self.conn.default_channel
|
||||
self.needs_reconnect = False
|
||||
|
||||
@property
|
||||
def prompt(self):
|
||||
return self.prompt_fmt % self.counter
|
||||
|
||||
|
||||
class AMQPAdmin(object):
|
||||
"""The celery :program:`camqadm` utility."""
|
||||
Shell = AMQShell
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.app = app_or_default(kwargs.get('app'))
|
||||
self.out = kwargs.setdefault('out', sys.stderr)
|
||||
self.silent = kwargs.get('silent')
|
||||
self.args = args
|
||||
|
||||
def connect(self, conn=None):
|
||||
if conn:
|
||||
conn.close()
|
||||
conn = self.app.connection()
|
||||
self.note('-> connecting to %s.' % conn.as_uri())
|
||||
conn.connect()
|
||||
self.note('-> connected.')
|
||||
return conn
|
||||
|
||||
def run(self):
|
||||
shell = self.Shell(connect=self.connect, out=self.out)
|
||||
if self.args:
|
||||
return shell.onecmd(' '.join(self.args))
|
||||
try:
|
||||
return shell.cmdloop()
|
||||
except KeyboardInterrupt:
|
||||
self.note('(bibi)')
|
||||
pass
|
||||
|
||||
def note(self, m):
|
||||
if not self.silent:
|
||||
say(m, fh=self.out)
|
||||
|
||||
|
||||
class AMQPAdminCommand(Command):
|
||||
|
||||
def run(self, *args, **options):
|
||||
options['app'] = self.app
|
||||
return AMQPAdmin(*args, **options).run()
|
||||
|
||||
|
||||
def camqadm(*args, **options):
|
||||
AMQPAdmin(*args, **options).run()
|
||||
|
||||
|
||||
def main():
|
||||
AMQPAdminCommand().execute_from_commandline()
|
||||
|
||||
if __name__ == '__main__': # pragma: no cover
|
||||
main()
|
||||
963
awx/lib/site-packages/celery/bin/celery.py
Normal file
963
awx/lib/site-packages/celery/bin/celery.py
Normal file
@@ -0,0 +1,963 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
|
||||
The :program:`celery` umbrella command.
|
||||
|
||||
.. program:: celery
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import with_statement
|
||||
|
||||
import anyjson
|
||||
import heapq
|
||||
import os
|
||||
import sys
|
||||
import warnings
|
||||
|
||||
from importlib import import_module
|
||||
from pprint import pformat
|
||||
|
||||
from celery.platforms import EX_OK, EX_FAILURE, EX_UNAVAILABLE, EX_USAGE
|
||||
from celery.utils import term
|
||||
from celery.utils import text
|
||||
from celery.utils.functional import memoize
|
||||
from celery.utils.imports import symbol_by_name
|
||||
from celery.utils.timeutils import maybe_iso8601
|
||||
|
||||
from celery.bin.base import Command as BaseCommand, Option
|
||||
|
||||
HELP = """
|
||||
---- -- - - ---- Commands- -------------- --- ------------
|
||||
|
||||
%(commands)s
|
||||
---- -- - - --------- -- - -------------- --- ------------
|
||||
|
||||
Type '%(prog_name)s <command> --help' for help using a specific command.
|
||||
"""
|
||||
|
||||
commands = {}
|
||||
|
||||
command_classes = [
|
||||
('Main', ['worker', 'events', 'beat', 'shell', 'multi', 'amqp'], 'green'),
|
||||
('Remote Control', ['status', 'inspect', 'control'], 'blue'),
|
||||
('Utils', ['purge', 'list', 'migrate', 'call', 'result', 'report'], None),
|
||||
]
|
||||
|
||||
|
||||
@memoize()
|
||||
def _get_extension_classes():
|
||||
extensions = []
|
||||
command_classes.append(('Extensions', extensions, 'magenta'))
|
||||
return extensions
|
||||
|
||||
|
||||
def ensure_broadcast_supported(app):
|
||||
if app.connection().transport.driver_type == 'sql':
|
||||
raise Error('SQL broker transports does not support broadcast')
|
||||
|
||||
|
||||
class Error(Exception):
|
||||
|
||||
def __init__(self, reason, status=EX_FAILURE):
|
||||
self.reason = reason
|
||||
self.status = status
|
||||
super(Error, self).__init__(reason, status)
|
||||
|
||||
def __str__(self):
|
||||
return self.reason
|
||||
|
||||
|
||||
def command(fun, name=None, sortpri=0):
|
||||
commands[name or fun.__name__] = fun
|
||||
fun.sortpri = sortpri
|
||||
return fun
|
||||
|
||||
|
||||
def load_extension_commands(namespace='celery.commands'):
|
||||
try:
|
||||
from pkg_resources import iter_entry_points
|
||||
except ImportError:
|
||||
return
|
||||
|
||||
for ep in iter_entry_points(namespace):
|
||||
sym = ':'.join([ep.module_name, ep.attrs[0]])
|
||||
try:
|
||||
cls = symbol_by_name(sym)
|
||||
except (ImportError, SyntaxError), exc:
|
||||
warnings.warn('Cannot load extension %r: %r' % (sym, exc))
|
||||
else:
|
||||
heapq.heappush(_get_extension_classes(), ep.name)
|
||||
command(cls, name=ep.name)
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = ''
|
||||
args = ''
|
||||
prog_name = 'celery'
|
||||
show_body = True
|
||||
show_reply = True
|
||||
|
||||
option_list = (
|
||||
Option('--quiet', '-q', action='store_true'),
|
||||
Option('--no-color', '-C', action='store_true', default=None),
|
||||
)
|
||||
|
||||
def __init__(self, app=None, no_color=False, stdout=sys.stdout,
|
||||
stderr=sys.stderr, show_reply=True):
|
||||
super(Command, self).__init__(app=app)
|
||||
self.colored = term.colored(enabled=not no_color)
|
||||
self.stdout = stdout
|
||||
self.stderr = stderr
|
||||
self.quiet = False
|
||||
if show_reply is not None:
|
||||
self.show_reply = show_reply
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
try:
|
||||
ret = self.run(*args, **kwargs)
|
||||
except Error, exc:
|
||||
self.error(self.colored.red('Error: %s' % exc))
|
||||
return exc.status
|
||||
|
||||
return ret if ret is not None else EX_OK
|
||||
|
||||
def show_help(self, command):
|
||||
self.run_from_argv(self.prog_name, [command, '--help'])
|
||||
return EX_USAGE
|
||||
|
||||
def error(self, s):
|
||||
self.out(s, fh=self.stderr)
|
||||
|
||||
def out(self, s, fh=None):
|
||||
s = str(s)
|
||||
if not s.endswith('\n'):
|
||||
s += '\n'
|
||||
(fh or self.stdout).write(s)
|
||||
|
||||
def run_from_argv(self, prog_name, argv):
|
||||
self.prog_name = prog_name
|
||||
self.command = argv[0]
|
||||
self.arglist = argv[1:]
|
||||
self.parser = self.create_parser(self.prog_name, self.command)
|
||||
options, args = self.prepare_args(
|
||||
*self.parser.parse_args(self.arglist))
|
||||
self.colored = term.colored(enabled=not options['no_color'])
|
||||
self.quiet = options.get('quiet', False)
|
||||
self.show_body = options.get('show_body', True)
|
||||
return self(*args, **options)
|
||||
|
||||
def usage(self, command):
|
||||
return '%%prog %s [options] %s' % (command, self.args)
|
||||
|
||||
def prettify_list(self, n):
|
||||
c = self.colored
|
||||
if not n:
|
||||
return '- empty -'
|
||||
return '\n'.join(str(c.reset(c.white('*'), ' %s' % (item, )))
|
||||
for item in n)
|
||||
|
||||
def prettify_dict_ok_error(self, n):
|
||||
c = self.colored
|
||||
try:
|
||||
return (c.green('OK'),
|
||||
text.indent(self.prettify(n['ok'])[1], 4))
|
||||
except KeyError:
|
||||
pass
|
||||
return (c.red('ERROR'),
|
||||
text.indent(self.prettify(n['error'])[1], 4))
|
||||
|
||||
def say_remote_command_reply(self, replies):
|
||||
c = self.colored
|
||||
node = iter(replies).next() # <-- take first.
|
||||
reply = replies[node]
|
||||
status, preply = self.prettify(reply)
|
||||
self.say_chat('->', c.cyan(node, ': ') + status,
|
||||
text.indent(preply, 4) if self.show_reply else '')
|
||||
|
||||
def prettify(self, n):
|
||||
OK = str(self.colored.green('OK'))
|
||||
if isinstance(n, list):
|
||||
return OK, self.prettify_list(n)
|
||||
if isinstance(n, dict):
|
||||
if 'ok' in n or 'error' in n:
|
||||
return self.prettify_dict_ok_error(n)
|
||||
if isinstance(n, basestring):
|
||||
return OK, unicode(n)
|
||||
return OK, pformat(n)
|
||||
|
||||
def say_chat(self, direction, title, body=''):
|
||||
c = self.colored
|
||||
if direction == '<-' and self.quiet:
|
||||
return
|
||||
dirstr = not self.quiet and c.bold(c.white(direction), ' ') or ''
|
||||
self.out(c.reset(dirstr, title))
|
||||
if body and self.show_body:
|
||||
self.out(body)
|
||||
|
||||
@property
|
||||
def description(self):
|
||||
return self.__doc__
|
||||
|
||||
|
||||
class Delegate(Command):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(Delegate, self).__init__(*args, **kwargs)
|
||||
|
||||
self.target = symbol_by_name(self.Command)(app=self.app)
|
||||
self.args = self.target.args
|
||||
|
||||
def get_options(self):
|
||||
return self.option_list + self.target.get_options()
|
||||
|
||||
def create_parser(self, prog_name, command):
|
||||
parser = super(Delegate, self).create_parser(prog_name, command)
|
||||
return self.target.prepare_parser(parser)
|
||||
|
||||
def run(self, *args, **kwargs):
|
||||
self.target.check_args(args)
|
||||
return self.target.run(*args, **kwargs)
|
||||
|
||||
|
||||
class multi(Command):
|
||||
"""Start multiple worker instances."""
|
||||
respects_app_option = False
|
||||
|
||||
def get_options(self):
|
||||
return ()
|
||||
|
||||
def run_from_argv(self, prog_name, argv):
|
||||
from celery.bin.celeryd_multi import MultiTool
|
||||
return MultiTool().execute_from_commandline(argv, prog_name)
|
||||
multi = command(multi)
|
||||
|
||||
|
||||
class worker(Delegate):
|
||||
"""Start worker instance.
|
||||
|
||||
Examples::
|
||||
|
||||
celery worker --app=proj -l info
|
||||
celery worker -A proj -l info -Q hipri,lopri
|
||||
|
||||
celery worker -A proj --concurrency=4
|
||||
celery worker -A proj --concurrency=1000 -P eventlet
|
||||
|
||||
celery worker --autoscale=10,0
|
||||
"""
|
||||
Command = 'celery.bin.celeryd:WorkerCommand'
|
||||
worker = command(worker, sortpri=01)
|
||||
|
||||
|
||||
class events(Delegate):
|
||||
"""Event-stream utilities.
|
||||
|
||||
Commands::
|
||||
|
||||
celery events --app=proj
|
||||
start graphical monitor (requires curses)
|
||||
celery events -d --app=proj
|
||||
dump events to screen.
|
||||
celery events -b amqp://
|
||||
celery events -C <camera> [options]
|
||||
run snapshot camera.
|
||||
|
||||
Examples::
|
||||
|
||||
celery events
|
||||
celery events -d
|
||||
celery events -C mod.attr -F 1.0 --detach --maxrate=100/m -l info
|
||||
"""
|
||||
Command = 'celery.bin.celeryev:EvCommand'
|
||||
events = command(events, sortpri=10)
|
||||
|
||||
|
||||
class beat(Delegate):
|
||||
"""Start the celerybeat periodic task scheduler.
|
||||
|
||||
Examples::
|
||||
|
||||
celery beat -l info
|
||||
celery beat -s /var/run/celerybeat/schedule --detach
|
||||
celery beat -S djcelery.schedulers.DatabaseScheduler
|
||||
|
||||
"""
|
||||
Command = 'celery.bin.celerybeat:BeatCommand'
|
||||
beat = command(beat, sortpri=20)
|
||||
|
||||
|
||||
class amqp(Delegate):
|
||||
"""AMQP Administration Shell.
|
||||
|
||||
Also works for non-amqp transports.
|
||||
|
||||
Examples::
|
||||
|
||||
celery amqp
|
||||
start shell mode
|
||||
celery amqp help
|
||||
show list of commands
|
||||
|
||||
celery amqp exchange.delete name
|
||||
celery amqp queue.delete queue
|
||||
celery amqp queue.delete queue yes yes
|
||||
|
||||
"""
|
||||
Command = 'celery.bin.camqadm:AMQPAdminCommand'
|
||||
amqp = command(amqp, sortpri=30)
|
||||
|
||||
|
||||
class list_(Command):
|
||||
"""Get info from broker.
|
||||
|
||||
Examples::
|
||||
|
||||
celery list bindings
|
||||
|
||||
NOTE: For RabbitMQ the management plugin is required.
|
||||
"""
|
||||
args = '[bindings]'
|
||||
|
||||
def list_bindings(self, management):
|
||||
try:
|
||||
bindings = management.get_bindings()
|
||||
except NotImplementedError:
|
||||
raise Error('Your transport cannot list bindings.')
|
||||
|
||||
fmt = lambda q, e, r: self.out('%s %s %s' % (q.ljust(28),
|
||||
e.ljust(28), r))
|
||||
fmt('Queue', 'Exchange', 'Routing Key')
|
||||
fmt('-' * 16, '-' * 16, '-' * 16)
|
||||
for b in bindings:
|
||||
fmt(b['destination'], b['source'], b['routing_key'])
|
||||
|
||||
def run(self, what=None, *_, **kw):
|
||||
topics = {'bindings': self.list_bindings}
|
||||
available = ', '.join(topics)
|
||||
if not what:
|
||||
raise Error('You must specify what to list (%s)' % available)
|
||||
if what not in topics:
|
||||
raise Error('unknown topic %r (choose one of: %s)' % (
|
||||
what, available))
|
||||
with self.app.connection() as conn:
|
||||
self.app.amqp.TaskConsumer(conn).declare()
|
||||
topics[what](conn.manager)
|
||||
list_ = command(list_, 'list')
|
||||
|
||||
|
||||
class call(Command):
|
||||
"""Call a task by name.
|
||||
|
||||
Examples::
|
||||
|
||||
celery call tasks.add --args='[2, 2]'
|
||||
celery call tasks.add --args='[2, 2]' --countdown=10
|
||||
"""
|
||||
args = '<task_name>'
|
||||
option_list = Command.option_list + (
|
||||
Option('--args', '-a', help='positional arguments (json).'),
|
||||
Option('--kwargs', '-k', help='keyword arguments (json).'),
|
||||
Option('--eta', help='scheduled time (ISO-8601).'),
|
||||
Option('--countdown', type='float',
|
||||
help='eta in seconds from now (float/int).'),
|
||||
Option('--expires', help='expiry time (ISO-8601/float/int).'),
|
||||
Option('--serializer', default='json', help='defaults to json.'),
|
||||
Option('--queue', help='custom queue name.'),
|
||||
Option('--exchange', help='custom exchange name.'),
|
||||
Option('--routing-key', help='custom routing key.'),
|
||||
)
|
||||
|
||||
def run(self, name, *_, **kw):
|
||||
# Positional args.
|
||||
args = kw.get('args') or ()
|
||||
if isinstance(args, basestring):
|
||||
args = anyjson.loads(args)
|
||||
|
||||
# Keyword args.
|
||||
kwargs = kw.get('kwargs') or {}
|
||||
if isinstance(kwargs, basestring):
|
||||
kwargs = anyjson.loads(kwargs)
|
||||
|
||||
# Expires can be int/float.
|
||||
expires = kw.get('expires') or None
|
||||
try:
|
||||
expires = float(expires)
|
||||
except (TypeError, ValueError):
|
||||
# or a string describing an ISO 8601 datetime.
|
||||
try:
|
||||
expires = maybe_iso8601(expires)
|
||||
except (TypeError, ValueError):
|
||||
raise
|
||||
|
||||
res = self.app.send_task(name, args=args, kwargs=kwargs,
|
||||
countdown=kw.get('countdown'),
|
||||
serializer=kw.get('serializer'),
|
||||
queue=kw.get('queue'),
|
||||
exchange=kw.get('exchange'),
|
||||
routing_key=kw.get('routing_key'),
|
||||
eta=maybe_iso8601(kw.get('eta')),
|
||||
expires=expires)
|
||||
self.out(res.id)
|
||||
call = command(call)
|
||||
|
||||
|
||||
class purge(Command):
|
||||
"""Erase all messages from all known task queues.
|
||||
|
||||
WARNING: There is no undo operation for this command.
|
||||
|
||||
"""
|
||||
def run(self, *args, **kwargs):
|
||||
queues = len(self.app.amqp.queues)
|
||||
messages_removed = self.app.control.purge()
|
||||
if messages_removed:
|
||||
self.out('Purged %s %s from %s known task %s.' % (
|
||||
messages_removed, text.pluralize(messages_removed, 'message'),
|
||||
queues, text.pluralize(queues, 'queue')))
|
||||
else:
|
||||
self.out('No messages purged from %s known %s' % (
|
||||
queues, text.pluralize(queues, 'queue')))
|
||||
purge = command(purge)
|
||||
|
||||
|
||||
class result(Command):
|
||||
"""Gives the return value for a given task id.
|
||||
|
||||
Examples::
|
||||
|
||||
celery result 8f511516-e2f5-4da4-9d2f-0fb83a86e500
|
||||
celery result 8f511516-e2f5-4da4-9d2f-0fb83a86e500 -t tasks.add
|
||||
celery result 8f511516-e2f5-4da4-9d2f-0fb83a86e500 --traceback
|
||||
|
||||
"""
|
||||
args = '<task_id>'
|
||||
option_list = Command.option_list + (
|
||||
Option('--task', '-t', help='name of task (if custom backend)'),
|
||||
Option('--traceback', action='store_true',
|
||||
help='show traceback instead'),
|
||||
)
|
||||
|
||||
def run(self, task_id, *args, **kwargs):
|
||||
result_cls = self.app.AsyncResult
|
||||
task = kwargs.get('task')
|
||||
traceback = kwargs.get('traceback', False)
|
||||
|
||||
if task:
|
||||
result_cls = self.app.tasks[task].AsyncResult
|
||||
result = result_cls(task_id)
|
||||
if traceback:
|
||||
value = result.traceback
|
||||
else:
|
||||
value = result.get()
|
||||
self.out(self.prettify(value)[1])
|
||||
result = command(result)
|
||||
|
||||
|
||||
class _RemoteControl(Command):
|
||||
name = None
|
||||
choices = None
|
||||
leaf = False
|
||||
option_list = Command.option_list + (
|
||||
Option('--timeout', '-t', type='float',
|
||||
help='Timeout in seconds (float) waiting for reply'),
|
||||
Option('--destination', '-d',
|
||||
help='Comma separated list of destination node names.'))
|
||||
|
||||
@classmethod
|
||||
def get_command_info(self, command,
|
||||
indent=0, prefix='', color=None, help=False):
|
||||
if help:
|
||||
help = '|' + text.indent(self.choices[command][1], indent + 4)
|
||||
else:
|
||||
help = None
|
||||
try:
|
||||
# see if it uses args.
|
||||
meth = getattr(self, command)
|
||||
return text.join([
|
||||
'|' + text.indent('%s%s %s' % (prefix, color(command),
|
||||
meth.__doc__), indent), help,
|
||||
])
|
||||
|
||||
except AttributeError:
|
||||
return text.join([
|
||||
'|' + text.indent(prefix + str(color(command)), indent), help,
|
||||
])
|
||||
|
||||
@classmethod
|
||||
def list_commands(self, indent=0, prefix='', color=None, help=False):
|
||||
color = color if color else lambda x: x
|
||||
prefix = prefix + ' ' if prefix else ''
|
||||
return '\n'.join(self.get_command_info(c, indent, prefix, color, help)
|
||||
for c in sorted(self.choices))
|
||||
|
||||
@property
|
||||
def epilog(self):
|
||||
return '\n'.join([
|
||||
'[Commands]',
|
||||
self.list_commands(indent=4, help=True)
|
||||
])
|
||||
|
||||
def usage(self, command):
|
||||
return '%%prog %s [options] %s <command> [arg1 .. argN]' % (
|
||||
command, self.args)
|
||||
|
||||
def call(self, *args, **kwargs):
|
||||
raise NotImplementedError('get_obj')
|
||||
|
||||
def run(self, *args, **kwargs):
|
||||
if not args:
|
||||
raise Error('Missing %s method. See --help' % self.name)
|
||||
return self.do_call_method(args, **kwargs)
|
||||
|
||||
def do_call_method(self, args, **kwargs):
|
||||
method = args[0]
|
||||
if method == 'help':
|
||||
raise Error("Did you mean '%s --help'?" % self.name)
|
||||
if method not in self.choices:
|
||||
raise Error('Unknown %s method %s' % (self.name, method))
|
||||
|
||||
ensure_broadcast_supported(self.app)
|
||||
|
||||
destination = kwargs.get('destination')
|
||||
timeout = kwargs.get('timeout') or self.choices[method][0]
|
||||
if destination and isinstance(destination, basestring):
|
||||
destination = [v.strip() for v in destination.split(',')]
|
||||
|
||||
try:
|
||||
handler = getattr(self, method)
|
||||
except AttributeError:
|
||||
handler = self.call
|
||||
|
||||
# XXX Python 2.5 does not support X(*args, foo=1)
|
||||
kwargs = {"timeout": timeout, "destination": destination,
|
||||
"callback": self.say_remote_command_reply}
|
||||
replies = handler(method, *args[1:], **kwargs)
|
||||
if not replies:
|
||||
raise Error('No nodes replied within time constraint.',
|
||||
status=EX_UNAVAILABLE)
|
||||
return replies
|
||||
|
||||
def say(self, direction, title, body=''):
|
||||
c = self.colored
|
||||
if direction == '<-' and self.quiet:
|
||||
return
|
||||
dirstr = not self.quiet and c.bold(c.white(direction), ' ') or ''
|
||||
self.out(c.reset(dirstr, title))
|
||||
if body and self.show_body:
|
||||
self.out(body)
|
||||
|
||||
|
||||
class inspect(_RemoteControl):
|
||||
"""Inspect the worker at runtime.
|
||||
|
||||
Availability: RabbitMQ (amqp), Redis, and MongoDB transports.
|
||||
|
||||
Examples::
|
||||
|
||||
celery inspect active --timeout=5
|
||||
celery inspect scheduled -d worker1.example.com
|
||||
celery inspect revoked -d w1.e.com,w2.e.com
|
||||
|
||||
"""
|
||||
name = 'inspect'
|
||||
choices = {
|
||||
'active': (1.0, 'dump active tasks (being processed)'),
|
||||
'active_queues': (1.0, 'dump queues being consumed from'),
|
||||
'scheduled': (1.0, 'dump scheduled tasks (eta/countdown/retry)'),
|
||||
'reserved': (1.0, 'dump reserved tasks (waiting to be processed)'),
|
||||
'stats': (1.0, 'dump worker statistics'),
|
||||
'revoked': (1.0, 'dump of revoked task ids'),
|
||||
'registered': (1.0, 'dump of registered tasks'),
|
||||
'ping': (0.2, 'ping worker(s)'),
|
||||
'report': (1.0, 'get bugreport info')
|
||||
}
|
||||
|
||||
def call(self, method, *args, **options):
|
||||
i = self.app.control.inspect(**options)
|
||||
return getattr(i, method)(*args)
|
||||
inspect = command(inspect)
|
||||
|
||||
|
||||
class control(_RemoteControl):
|
||||
"""Workers remote control.
|
||||
|
||||
Availability: RabbitMQ (amqp), Redis, and MongoDB transports.
|
||||
|
||||
Examples::
|
||||
|
||||
celery control enable_events --timeout=5
|
||||
celery control -d worker1.example.com enable_events
|
||||
celery control -d w1.e.com,w2.e.com enable_events
|
||||
|
||||
celery control -d w1.e.com add_consumer queue_name
|
||||
celery control -d w1.e.com cancel_consumer queue_name
|
||||
|
||||
celery control -d w1.e.com add_consumer queue exchange direct rkey
|
||||
|
||||
"""
|
||||
name = 'control'
|
||||
choices = {
|
||||
'enable_events': (1.0, 'tell worker(s) to enable events'),
|
||||
'disable_events': (1.0, 'tell worker(s) to disable events'),
|
||||
'add_consumer': (1.0, 'tell worker(s) to start consuming a queue'),
|
||||
'cancel_consumer': (1.0, 'tell worker(s) to stop consuming a queue'),
|
||||
'rate_limit': (
|
||||
1.0, 'tell worker(s) to modify the rate limit for a task type'),
|
||||
'time_limit': (
|
||||
1.0, 'tell worker(s) to modify the time limit for a task type.'),
|
||||
'autoscale': (1.0, 'change autoscale settings'),
|
||||
'pool_grow': (1.0, 'start more pool processes'),
|
||||
'pool_shrink': (1.0, 'use less pool processes'),
|
||||
}
|
||||
|
||||
def call(self, method, *args, **options):
|
||||
# XXX Python 2.5 doesn't support X(*args, reply=True, **kwargs)
|
||||
return getattr(self.app.control, method)(
|
||||
*args, **dict(options, retry=True))
|
||||
|
||||
def pool_grow(self, method, n=1, **kwargs):
|
||||
"""[N=1]"""
|
||||
return self.call(method, n, **kwargs)
|
||||
|
||||
def pool_shrink(self, method, n=1, **kwargs):
|
||||
"""[N=1]"""
|
||||
return self.call(method, n, **kwargs)
|
||||
|
||||
def autoscale(self, method, max=None, min=None, **kwargs):
|
||||
"""[max] [min]"""
|
||||
return self.call(method, max, min, **kwargs)
|
||||
|
||||
def rate_limit(self, method, task_name, rate_limit, **kwargs):
|
||||
"""<task_name> <rate_limit> (e.g. 5/s | 5/m | 5/h)>"""
|
||||
return self.call(method, task_name, rate_limit, reply=True, **kwargs)
|
||||
|
||||
def time_limit(self, method, task_name, soft, hard=None, **kwargs):
|
||||
"""<task_name> <soft_secs> [hard_secs]"""
|
||||
return self.call(method, task_name, soft, hard, reply=True, **kwargs)
|
||||
|
||||
def add_consumer(self, method, queue, exchange=None,
|
||||
exchange_type='direct', routing_key=None, **kwargs):
|
||||
"""<queue> [exchange [type [routing_key]]]"""
|
||||
return self.call(method, queue, exchange,
|
||||
exchange_type, routing_key, reply=True, **kwargs)
|
||||
|
||||
def cancel_consumer(self, method, queue, **kwargs):
|
||||
"""<queue>"""
|
||||
return self.call(method, queue, reply=True, **kwargs)
|
||||
control = command(control)
|
||||
|
||||
|
||||
class status(Command):
|
||||
"""Show list of workers that are online."""
|
||||
option_list = inspect.option_list
|
||||
|
||||
def run(self, *args, **kwargs):
|
||||
replies = inspect(
|
||||
app=self.app,
|
||||
no_color=kwargs.get('no_color', False),
|
||||
stdout=self.stdout, stderr=self.stderr,
|
||||
show_reply=False).run(
|
||||
'ping', **dict(kwargs, quiet=True, show_body=False))
|
||||
if not replies:
|
||||
raise Error('No nodes replied within time constraint',
|
||||
status=EX_UNAVAILABLE)
|
||||
nodecount = len(replies)
|
||||
if not kwargs.get('quiet', False):
|
||||
self.out('\n%s %s online.' % (nodecount,
|
||||
text.pluralize(nodecount, 'node')))
|
||||
status = command(status)
|
||||
|
||||
|
||||
class migrate(Command):
|
||||
"""Migrate tasks from one broker to another.
|
||||
|
||||
Examples::
|
||||
|
||||
celery migrate redis://localhost amqp://guest@localhost//
|
||||
celery migrate django:// redis://localhost
|
||||
|
||||
NOTE: This command is experimental, make sure you have
|
||||
a backup of the tasks before you continue.
|
||||
"""
|
||||
args = '<source_url> <dest_url>'
|
||||
option_list = Command.option_list + (
|
||||
Option('--limit', '-n', type='int',
|
||||
help='Number of tasks to consume (int)'),
|
||||
Option('--timeout', '-t', type='float', default=1.0,
|
||||
help='Timeout in seconds (float) waiting for tasks'),
|
||||
Option('--ack-messages', '-a', action='store_true',
|
||||
help='Ack messages from source broker.'),
|
||||
Option('--tasks', '-T',
|
||||
help='List of task names to filter on.'),
|
||||
Option('--queues', '-Q',
|
||||
help='List of queues to migrate.'),
|
||||
Option('--forever', '-F', action='store_true',
|
||||
help='Continually migrate tasks until killed.'),
|
||||
)
|
||||
|
||||
def on_migrate_task(self, state, body, message):
|
||||
self.out('Migrating task %s/%s: %s[%s]' % (
|
||||
state.count, state.strtotal, body['task'], body['id']))
|
||||
|
||||
def run(self, *args, **kwargs):
|
||||
if len(args) != 2:
|
||||
return self.show_help('migrate')
|
||||
from kombu import Connection
|
||||
from celery.contrib.migrate import migrate_tasks
|
||||
|
||||
migrate_tasks(Connection(args[0]),
|
||||
Connection(args[1]),
|
||||
callback=self.on_migrate_task,
|
||||
**kwargs)
|
||||
migrate = command(migrate)
|
||||
|
||||
|
||||
class shell(Command): # pragma: no cover
|
||||
"""Start shell session with convenient access to celery symbols.
|
||||
|
||||
The following symbols will be added to the main globals:
|
||||
|
||||
- celery: the current application.
|
||||
- chord, group, chain, chunks,
|
||||
xmap, xstarmap subtask, Task
|
||||
- all registered tasks.
|
||||
|
||||
Example Session:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ celery shell
|
||||
|
||||
>>> celery
|
||||
<Celery default:0x1012d9fd0>
|
||||
>>> add
|
||||
<@task: tasks.add>
|
||||
>>> add.delay(2, 2)
|
||||
<AsyncResult: 537b48c7-d6d3-427a-a24a-d1b4414035be>
|
||||
"""
|
||||
option_list = Command.option_list + (
|
||||
Option('--ipython', '-I',
|
||||
action='store_true', dest='force_ipython',
|
||||
help='force iPython.'),
|
||||
Option('--bpython', '-B',
|
||||
action='store_true', dest='force_bpython',
|
||||
help='force bpython.'),
|
||||
Option('--python', '-P',
|
||||
action='store_true', dest='force_python',
|
||||
help='force default Python shell.'),
|
||||
Option('--without-tasks', '-T', action='store_true',
|
||||
help="don't add tasks to locals."),
|
||||
Option('--eventlet', action='store_true',
|
||||
help='use eventlet.'),
|
||||
Option('--gevent', action='store_true', help='use gevent.'),
|
||||
)
|
||||
|
||||
def run(self, force_ipython=False, force_bpython=False,
|
||||
force_python=False, without_tasks=False, eventlet=False,
|
||||
gevent=False, **kwargs):
|
||||
sys.path.insert(0, os.getcwd())
|
||||
if eventlet:
|
||||
import_module('celery.concurrency.eventlet')
|
||||
if gevent:
|
||||
import_module('celery.concurrency.gevent')
|
||||
import celery
|
||||
import celery.task.base
|
||||
self.app.loader.import_default_modules()
|
||||
self.locals = {'celery': self.app,
|
||||
'Task': celery.Task,
|
||||
'chord': celery.chord,
|
||||
'group': celery.group,
|
||||
'chain': celery.chain,
|
||||
'chunks': celery.chunks,
|
||||
'xmap': celery.xmap,
|
||||
'xstarmap': celery.xstarmap,
|
||||
'subtask': celery.subtask}
|
||||
|
||||
if not without_tasks:
|
||||
self.locals.update(dict(
|
||||
(task.__name__, task) for task in self.app.tasks.itervalues()
|
||||
if not task.name.startswith('celery.')),
|
||||
)
|
||||
|
||||
if force_python:
|
||||
return self.invoke_fallback_shell()
|
||||
elif force_bpython:
|
||||
return self.invoke_bpython_shell()
|
||||
elif force_ipython:
|
||||
return self.invoke_ipython_shell()
|
||||
return self.invoke_default_shell()
|
||||
|
||||
def invoke_default_shell(self):
|
||||
try:
|
||||
import IPython # noqa
|
||||
except ImportError:
|
||||
try:
|
||||
import bpython # noqa
|
||||
except ImportError:
|
||||
return self.invoke_fallback_shell()
|
||||
else:
|
||||
return self.invoke_bpython_shell()
|
||||
else:
|
||||
return self.invoke_ipython_shell()
|
||||
|
||||
def invoke_fallback_shell(self):
|
||||
import code
|
||||
try:
|
||||
import readline
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
import rlcompleter
|
||||
readline.set_completer(
|
||||
rlcompleter.Completer(self.locals).complete)
|
||||
readline.parse_and_bind('tab:complete')
|
||||
code.interact(local=self.locals)
|
||||
|
||||
def invoke_ipython_shell(self):
|
||||
try:
|
||||
from IPython.frontend.terminal import embed
|
||||
embed.TerminalInteractiveShell(user_ns=self.locals).mainloop()
|
||||
except ImportError: # ipython < 0.11
|
||||
from IPython.Shell import IPShell
|
||||
IPShell(argv=[], user_ns=self.locals).mainloop()
|
||||
|
||||
def invoke_bpython_shell(self):
|
||||
import bpython
|
||||
bpython.embed(self.locals)
|
||||
|
||||
shell = command(shell)
|
||||
|
||||
|
||||
class help(Command):
|
||||
"""Show help screen and exit."""
|
||||
|
||||
def usage(self, command):
|
||||
return '%%prog <command> [options] %s' % (self.args, )
|
||||
|
||||
def run(self, *args, **kwargs):
|
||||
self.parser.print_help()
|
||||
self.out(HELP % {'prog_name': self.prog_name,
|
||||
'commands': CeleryCommand.list_commands()})
|
||||
|
||||
return EX_USAGE
|
||||
help = command(help)
|
||||
|
||||
|
||||
class report(Command):
|
||||
"""Shows information useful to include in bugreports."""
|
||||
|
||||
def run(self, *args, **kwargs):
|
||||
self.out(self.app.bugreport())
|
||||
return EX_OK
|
||||
report = command(report)
|
||||
|
||||
|
||||
class CeleryCommand(BaseCommand):
|
||||
commands = commands
|
||||
enable_config_from_cmdline = True
|
||||
prog_name = 'celery'
|
||||
|
||||
def execute(self, command, argv=None):
|
||||
try:
|
||||
cls = self.commands[command]
|
||||
except KeyError:
|
||||
cls, argv = self.commands['help'], ['help']
|
||||
cls = self.commands.get(command) or self.commands['help']
|
||||
try:
|
||||
return cls(app=self.app).run_from_argv(self.prog_name, argv)
|
||||
except (TypeError, Error):
|
||||
return self.execute('help', argv)
|
||||
|
||||
def remove_options_at_beginning(self, argv, index=0):
|
||||
if argv:
|
||||
while index < len(argv):
|
||||
value = argv[index]
|
||||
if value.startswith('--'):
|
||||
pass
|
||||
elif value.startswith('-'):
|
||||
index += 1
|
||||
else:
|
||||
return argv[index:]
|
||||
index += 1
|
||||
return []
|
||||
|
||||
def handle_argv(self, prog_name, argv):
|
||||
self.prog_name = prog_name
|
||||
argv = self.remove_options_at_beginning(argv)
|
||||
_, argv = self.prepare_args(None, argv)
|
||||
try:
|
||||
command = argv[0]
|
||||
except IndexError:
|
||||
command, argv = 'help', ['help']
|
||||
return self.execute(command, argv)
|
||||
|
||||
def execute_from_commandline(self, argv=None):
|
||||
argv = sys.argv if argv is None else argv
|
||||
if 'multi' in argv[1:3]: # Issue 1008
|
||||
self.respects_app_option = False
|
||||
try:
|
||||
sys.exit(determine_exit_status(
|
||||
super(CeleryCommand, self).execute_from_commandline(argv)))
|
||||
except KeyboardInterrupt:
|
||||
sys.exit(EX_FAILURE)
|
||||
|
||||
@classmethod
|
||||
def get_command_info(self, command, indent=0, color=None):
|
||||
colored = term.colored().names[color] if color else lambda x: x
|
||||
obj = self.commands[command]
|
||||
if obj.leaf:
|
||||
return '|' + text.indent('celery %s' % colored(command), indent)
|
||||
return text.join([
|
||||
' ',
|
||||
'|' + text.indent('celery %s --help' % colored(command), indent),
|
||||
obj.list_commands(indent, 'celery %s' % command, colored),
|
||||
])
|
||||
|
||||
@classmethod
|
||||
def list_commands(self, indent=0):
|
||||
white = term.colored().white
|
||||
ret = []
|
||||
for cls, commands, color in command_classes:
|
||||
ret.extend([
|
||||
text.indent('+ %s: ' % white(cls), indent),
|
||||
'\n'.join(self.get_command_info(command, indent + 4, color)
|
||||
for command in commands),
|
||||
''
|
||||
])
|
||||
return '\n'.join(ret).strip()
|
||||
|
||||
def with_pool_option(self, argv):
|
||||
if len(argv) > 1 and argv[1] == 'worker':
|
||||
# this command supports custom pools
|
||||
# that may have to be loaded as early as possible.
|
||||
return (['-P'], ['--pool'])
|
||||
|
||||
def on_concurrency_setup(self):
|
||||
load_extension_commands()
|
||||
|
||||
|
||||
def determine_exit_status(ret):
|
||||
if isinstance(ret, int):
|
||||
return ret
|
||||
return EX_OK if ret else EX_FAILURE
|
||||
|
||||
|
||||
def main(argv=None):
|
||||
# Fix for setuptools generated scripts, so that it will
|
||||
# work with multiprocessing fork emulation.
|
||||
# (see multiprocessing.forking.get_preparation_data())
|
||||
try:
|
||||
if __name__ != '__main__': # pragma: no cover
|
||||
sys.modules['__main__'] = sys.modules[__name__]
|
||||
cmd = CeleryCommand()
|
||||
cmd.maybe_patch_concurrency()
|
||||
from billiard import freeze_support
|
||||
freeze_support()
|
||||
cmd.execute_from_commandline(argv)
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == '__main__': # pragma: no cover
|
||||
main()
|
||||
86
awx/lib/site-packages/celery/bin/celerybeat.py
Normal file
86
awx/lib/site-packages/celery/bin/celerybeat.py
Normal file
@@ -0,0 +1,86 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
|
||||
The :program:`celery beat` command.
|
||||
|
||||
.. program:: celery beat
|
||||
|
||||
.. seealso::
|
||||
|
||||
See :ref:`preload-options` and :ref:`daemon-options`.
|
||||
|
||||
.. cmdoption:: --detach
|
||||
|
||||
Detach and run in the background as a daemon.
|
||||
|
||||
.. cmdoption:: -s, --schedule
|
||||
|
||||
Path to the schedule database. Defaults to `celerybeat-schedule`.
|
||||
The extension '.db' may be appended to the filename.
|
||||
Default is %(default)s.
|
||||
|
||||
.. cmdoption:: -S, --scheduler
|
||||
|
||||
Scheduler class to use.
|
||||
Default is :class:`celery.beat.PersistentScheduler`.
|
||||
|
||||
.. cmdoption:: max-interval
|
||||
|
||||
Max seconds to sleep between schedule iterations.
|
||||
|
||||
.. cmdoption:: -f, --logfile
|
||||
|
||||
Path to log file. If no logfile is specified, `stderr` is used.
|
||||
|
||||
.. cmdoption:: -l, --loglevel
|
||||
|
||||
Logging level, choose between `DEBUG`, `INFO`, `WARNING`,
|
||||
`ERROR`, `CRITICAL`, or `FATAL`.
|
||||
|
||||
"""
|
||||
from __future__ import with_statement
|
||||
from __future__ import absolute_import
|
||||
|
||||
from functools import partial
|
||||
|
||||
from celery.platforms import detached
|
||||
|
||||
from celery.bin.base import Command, Option, daemon_options
|
||||
|
||||
|
||||
class BeatCommand(Command):
|
||||
doc = __doc__
|
||||
enable_config_from_cmdline = True
|
||||
supports_args = False
|
||||
|
||||
def run(self, detach=False, logfile=None, pidfile=None, uid=None,
|
||||
gid=None, umask=None, working_directory=None, **kwargs):
|
||||
workdir = working_directory
|
||||
kwargs.pop('app', None)
|
||||
beat = partial(self.app.Beat,
|
||||
logfile=logfile, pidfile=pidfile, **kwargs)
|
||||
|
||||
if detach:
|
||||
with detached(logfile, pidfile, uid, gid, umask, workdir):
|
||||
return beat().run()
|
||||
else:
|
||||
return beat().run()
|
||||
|
||||
def get_options(self):
|
||||
c = self.app.conf
|
||||
|
||||
return (
|
||||
Option('--detach', action='store_true'),
|
||||
Option('-s', '--schedule', default=c.CELERYBEAT_SCHEDULE_FILENAME),
|
||||
Option('--max-interval', type='float'),
|
||||
Option('-S', '--scheduler', dest='scheduler_cls'),
|
||||
Option('-l', '--loglevel', default=c.CELERYBEAT_LOG_LEVEL),
|
||||
) + daemon_options(default_pidfile='celerybeat.pid')
|
||||
|
||||
|
||||
def main():
|
||||
beat = BeatCommand()
|
||||
beat.execute_from_commandline()
|
||||
|
||||
if __name__ == '__main__': # pragma: no cover
|
||||
main()
|
||||
16
awx/lib/site-packages/celery/bin/celeryctl.py
Normal file
16
awx/lib/site-packages/celery/bin/celeryctl.py
Normal file
@@ -0,0 +1,16 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.bin.celeryctl
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Now replaced by the :program:`celery` command.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
from celery.bin.celery import ( # noqa
|
||||
CeleryCommand as celeryctl, Command, main,
|
||||
)
|
||||
|
||||
if __name__ == '__main__': # pragma: no cover
|
||||
main()
|
||||
206
awx/lib/site-packages/celery/bin/celeryd.py
Normal file
206
awx/lib/site-packages/celery/bin/celeryd.py
Normal file
@@ -0,0 +1,206 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
|
||||
The :program:`celery worker` command (previously known as ``celeryd``)
|
||||
|
||||
.. program:: celery worker
|
||||
|
||||
.. seealso::
|
||||
|
||||
See :ref:`preload-options`.
|
||||
|
||||
.. cmdoption:: -c, --concurrency
|
||||
|
||||
Number of child processes processing the queue. The default
|
||||
is the number of CPUs available on your system.
|
||||
|
||||
.. cmdoption:: -P, --pool
|
||||
|
||||
Pool implementation:
|
||||
|
||||
processes (default), eventlet, gevent, solo or threads.
|
||||
|
||||
.. cmdoption:: -f, --logfile
|
||||
|
||||
Path to log file. If no logfile is specified, `stderr` is used.
|
||||
|
||||
.. cmdoption:: -l, --loglevel
|
||||
|
||||
Logging level, choose between `DEBUG`, `INFO`, `WARNING`,
|
||||
`ERROR`, `CRITICAL`, or `FATAL`.
|
||||
|
||||
.. cmdoption:: -n, --hostname
|
||||
|
||||
Set custom hostname, e.g. 'foo.example.com'.
|
||||
|
||||
.. cmdoption:: -B, --beat
|
||||
|
||||
Also run the `celerybeat` periodic task scheduler. Please note that
|
||||
there must only be one instance of this service.
|
||||
|
||||
.. cmdoption:: -Q, --queues
|
||||
|
||||
List of queues to enable for this worker, separated by comma.
|
||||
By default all configured queues are enabled.
|
||||
Example: `-Q video,image`
|
||||
|
||||
.. cmdoption:: -I, --include
|
||||
|
||||
Comma separated list of additional modules to import.
|
||||
Example: -I foo.tasks,bar.tasks
|
||||
|
||||
.. cmdoption:: -s, --schedule
|
||||
|
||||
Path to the schedule database if running with the `-B` option.
|
||||
Defaults to `celerybeat-schedule`. The extension ".db" may be
|
||||
appended to the filename.
|
||||
|
||||
.. cmdoption:: --scheduler
|
||||
|
||||
Scheduler class to use. Default is celery.beat.PersistentScheduler
|
||||
|
||||
.. cmdoption:: -S, --statedb
|
||||
|
||||
Path to the state database. The extension '.db' may
|
||||
be appended to the filename. Default: %(default)s
|
||||
|
||||
.. cmdoption:: -E, --events
|
||||
|
||||
Send events that can be captured by monitors like :program:`celeryev`,
|
||||
`celerymon`, and others.
|
||||
|
||||
.. cmdoption:: --purge
|
||||
|
||||
Purges all waiting tasks before the daemon is started.
|
||||
**WARNING**: This is unrecoverable, and the tasks will be
|
||||
deleted from the messaging server.
|
||||
|
||||
.. cmdoption:: --time-limit
|
||||
|
||||
Enables a hard time limit (in seconds int/float) for tasks.
|
||||
|
||||
.. cmdoption:: --soft-time-limit
|
||||
|
||||
Enables a soft time limit (in seconds int/float) for tasks.
|
||||
|
||||
.. cmdoption:: --maxtasksperchild
|
||||
|
||||
Maximum number of tasks a pool worker can execute before it's
|
||||
terminated and replaced by a new worker.
|
||||
|
||||
.. cmdoption:: --pidfile
|
||||
|
||||
Optional file used to store the workers pid.
|
||||
|
||||
The worker will not start if this file already exists
|
||||
and the pid is still alive.
|
||||
|
||||
.. cmdoption:: --autoscale
|
||||
|
||||
Enable autoscaling by providing
|
||||
max_concurrency, min_concurrency. Example::
|
||||
|
||||
--autoscale=10,3
|
||||
|
||||
(always keep 3 processes, but grow to 10 if necessary)
|
||||
|
||||
.. cmdoption:: --autoreload
|
||||
|
||||
Enable autoreloading.
|
||||
|
||||
.. cmdoption:: --no-execv
|
||||
|
||||
Don't do execv after multiprocessing child fork.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
import sys
|
||||
|
||||
from celery import concurrency
|
||||
from celery.bin.base import Command, Option
|
||||
from celery.utils.log import LOG_LEVELS, mlevel
|
||||
|
||||
|
||||
class WorkerCommand(Command):
|
||||
doc = __doc__ # parse help from this.
|
||||
namespace = 'celeryd'
|
||||
enable_config_from_cmdline = True
|
||||
supports_args = False
|
||||
|
||||
def execute_from_commandline(self, argv=None):
|
||||
if argv is None:
|
||||
argv = list(sys.argv)
|
||||
return super(WorkerCommand, self).execute_from_commandline(argv)
|
||||
|
||||
def run(self, *args, **kwargs):
|
||||
kwargs.pop('app', None)
|
||||
# Pools like eventlet/gevent needs to patch libs as early
|
||||
# as possible.
|
||||
kwargs['pool_cls'] = concurrency.get_implementation(
|
||||
kwargs.get('pool_cls') or self.app.conf.CELERYD_POOL)
|
||||
if self.app.IS_WINDOWS and kwargs.get('beat'):
|
||||
self.die('-B option does not work on Windows. '
|
||||
'Please run celerybeat as a separate service.')
|
||||
loglevel = kwargs.get('loglevel')
|
||||
if loglevel:
|
||||
try:
|
||||
kwargs['loglevel'] = mlevel(loglevel)
|
||||
except KeyError: # pragma: no cover
|
||||
self.die('Unknown level %r. Please use one of %s.' % (
|
||||
loglevel, '|'.join(l for l in LOG_LEVELS
|
||||
if isinstance(l, basestring))))
|
||||
return self.app.Worker(**kwargs).run()
|
||||
|
||||
def with_pool_option(self, argv):
|
||||
# this command support custom pools
|
||||
# that may have to be loaded as early as possible.
|
||||
return (['-P'], ['--pool'])
|
||||
|
||||
def get_options(self):
|
||||
conf = self.app.conf
|
||||
return (
|
||||
Option('-c', '--concurrency',
|
||||
default=conf.CELERYD_CONCURRENCY, type='int'),
|
||||
Option('-P', '--pool', default=conf.CELERYD_POOL, dest='pool_cls'),
|
||||
Option('--purge', '--discard', default=False, action='store_true'),
|
||||
Option('-f', '--logfile', default=conf.CELERYD_LOG_FILE),
|
||||
Option('-l', '--loglevel', default=conf.CELERYD_LOG_LEVEL),
|
||||
Option('-n', '--hostname'),
|
||||
Option('-B', '--beat', action='store_true'),
|
||||
Option('-s', '--schedule', dest='schedule_filename',
|
||||
default=conf.CELERYBEAT_SCHEDULE_FILENAME),
|
||||
Option('--scheduler', dest='scheduler_cls'),
|
||||
Option('-S', '--statedb',
|
||||
default=conf.CELERYD_STATE_DB, dest='state_db'),
|
||||
Option('-E', '--events', default=conf.CELERY_SEND_EVENTS,
|
||||
action='store_true', dest='send_events'),
|
||||
Option('--time-limit', type='float', dest='task_time_limit',
|
||||
default=conf.CELERYD_TASK_TIME_LIMIT),
|
||||
Option('--soft-time-limit', dest='task_soft_time_limit',
|
||||
default=conf.CELERYD_TASK_SOFT_TIME_LIMIT, type='float'),
|
||||
Option('--maxtasksperchild', dest='max_tasks_per_child',
|
||||
default=conf.CELERYD_MAX_TASKS_PER_CHILD, type='int'),
|
||||
Option('--queues', '-Q', default=[]),
|
||||
Option('--include', '-I', default=[]),
|
||||
Option('--pidfile'),
|
||||
Option('--autoscale'),
|
||||
Option('--autoreload', action='store_true'),
|
||||
Option('--no-execv', action='store_true', default=False),
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
# Fix for setuptools generated scripts, so that it will
|
||||
# work with multiprocessing fork emulation.
|
||||
# (see multiprocessing.forking.get_preparation_data())
|
||||
if __name__ != '__main__': # pragma: no cover
|
||||
sys.modules['__main__'] = sys.modules[__name__]
|
||||
from billiard import freeze_support
|
||||
freeze_support()
|
||||
worker = WorkerCommand()
|
||||
worker.execute_from_commandline()
|
||||
|
||||
|
||||
if __name__ == '__main__': # pragma: no cover
|
||||
main()
|
||||
154
awx/lib/site-packages/celery/bin/celeryd_detach.py
Normal file
154
awx/lib/site-packages/celery/bin/celeryd_detach.py
Normal file
@@ -0,0 +1,154 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.bin.celeryd_detach
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Program used to daemonize celeryd.
|
||||
|
||||
Using :func:`os.execv` because forking and multiprocessing
|
||||
leads to weird issues (it was a long time ago now, but it
|
||||
could have something to do with the threading mutex bug)
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import with_statement
|
||||
|
||||
import celery
|
||||
import os
|
||||
import sys
|
||||
|
||||
from optparse import OptionParser, BadOptionError
|
||||
|
||||
from celery.platforms import EX_FAILURE, detached
|
||||
from celery.utils.log import get_logger
|
||||
|
||||
from celery.bin.base import daemon_options, Option
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
OPTION_LIST = daemon_options(default_pidfile='celeryd.pid') + (
|
||||
Option('--fake',
|
||||
default=False, action='store_true', dest='fake',
|
||||
help="Don't fork (for debugging purposes)"),
|
||||
)
|
||||
|
||||
|
||||
def detach(path, argv, logfile=None, pidfile=None, uid=None,
|
||||
gid=None, umask=0, working_directory=None, fake=False, ):
|
||||
with detached(logfile, pidfile, uid, gid, umask, working_directory, fake):
|
||||
try:
|
||||
os.execv(path, [path] + argv)
|
||||
except Exception:
|
||||
from celery import current_app
|
||||
current_app.log.setup_logging_subsystem('ERROR', logfile)
|
||||
logger.critical("Can't exec %r", ' '.join([path] + argv),
|
||||
exc_info=True)
|
||||
return EX_FAILURE
|
||||
|
||||
|
||||
class PartialOptionParser(OptionParser):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.leftovers = []
|
||||
OptionParser.__init__(self, *args, **kwargs)
|
||||
|
||||
def _process_long_opt(self, rargs, values):
|
||||
arg = rargs.pop(0)
|
||||
|
||||
if '=' in arg:
|
||||
opt, next_arg = arg.split('=', 1)
|
||||
rargs.insert(0, next_arg)
|
||||
had_explicit_value = True
|
||||
else:
|
||||
opt = arg
|
||||
had_explicit_value = False
|
||||
|
||||
try:
|
||||
opt = self._match_long_opt(opt)
|
||||
option = self._long_opt.get(opt)
|
||||
except BadOptionError:
|
||||
option = None
|
||||
|
||||
if option:
|
||||
if option.takes_value():
|
||||
nargs = option.nargs
|
||||
if len(rargs) < nargs:
|
||||
if nargs == 1:
|
||||
self.error('%s option requires an argument' % opt)
|
||||
else:
|
||||
self.error('%s option requires %d arguments' % (
|
||||
opt, nargs))
|
||||
elif nargs == 1:
|
||||
value = rargs.pop(0)
|
||||
else:
|
||||
value = tuple(rargs[0:nargs])
|
||||
del rargs[0:nargs]
|
||||
|
||||
elif had_explicit_value:
|
||||
self.error('%s option does not take a value' % opt)
|
||||
else:
|
||||
value = None
|
||||
option.process(opt, value, values, self)
|
||||
else:
|
||||
self.leftovers.append(arg)
|
||||
|
||||
def _process_short_opts(self, rargs, values):
|
||||
arg = rargs[0]
|
||||
try:
|
||||
OptionParser._process_short_opts(self, rargs, values)
|
||||
except BadOptionError:
|
||||
self.leftovers.append(arg)
|
||||
if rargs and not rargs[0][0] == '-':
|
||||
self.leftovers.append(rargs.pop(0))
|
||||
|
||||
|
||||
class detached_celeryd(object):
|
||||
option_list = OPTION_LIST
|
||||
usage = '%prog [options] [celeryd options]'
|
||||
version = celery.VERSION_BANNER
|
||||
description = ('Detaches Celery worker nodes. See `celeryd --help` '
|
||||
'for the list of supported worker arguments.')
|
||||
command = sys.executable
|
||||
execv_path = sys.executable
|
||||
execv_argv = ['-m', 'celery.bin.celeryd']
|
||||
|
||||
def Parser(self, prog_name):
|
||||
return PartialOptionParser(prog=prog_name,
|
||||
option_list=self.option_list,
|
||||
usage=self.usage,
|
||||
description=self.description,
|
||||
version=self.version)
|
||||
|
||||
def parse_options(self, prog_name, argv):
|
||||
parser = self.Parser(prog_name)
|
||||
options, values = parser.parse_args(argv)
|
||||
if options.logfile:
|
||||
parser.leftovers.append('--logfile=%s' % (options.logfile, ))
|
||||
if options.pidfile:
|
||||
parser.leftovers.append('--pidfile=%s' % (options.pidfile, ))
|
||||
return options, values, parser.leftovers
|
||||
|
||||
def execute_from_commandline(self, argv=None):
|
||||
if argv is None:
|
||||
argv = sys.argv
|
||||
config = []
|
||||
seen_cargs = 0
|
||||
for arg in argv:
|
||||
if seen_cargs:
|
||||
config.append(arg)
|
||||
else:
|
||||
if arg == '--':
|
||||
seen_cargs = 1
|
||||
config.append(arg)
|
||||
prog_name = os.path.basename(argv[0])
|
||||
options, values, leftovers = self.parse_options(prog_name, argv[1:])
|
||||
sys.exit(detach(path=self.execv_path,
|
||||
argv=self.execv_argv + leftovers + config,
|
||||
**vars(options)))
|
||||
|
||||
|
||||
def main():
|
||||
detached_celeryd().execute_from_commandline()
|
||||
|
||||
if __name__ == '__main__': # pragma: no cover
|
||||
main()
|
||||
567
awx/lib/site-packages/celery/bin/celeryd_multi.py
Normal file
567
awx/lib/site-packages/celery/bin/celeryd_multi.py
Normal file
@@ -0,0 +1,567 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
|
||||
.. program:: celeryd-multi
|
||||
|
||||
Examples
|
||||
========
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Single worker with explicit name and events enabled.
|
||||
$ celeryd-multi start Leslie -E
|
||||
|
||||
# Pidfiles and logfiles are stored in the current directory
|
||||
# by default. Use --pidfile and --logfile argument to change
|
||||
# this. The abbreviation %n will be expanded to the current
|
||||
# node name.
|
||||
$ celeryd-multi start Leslie -E --pidfile=/var/run/celery/%n.pid
|
||||
--logfile=/var/log/celery/%n.log
|
||||
|
||||
|
||||
# You need to add the same arguments when you restart,
|
||||
# as these are not persisted anywhere.
|
||||
$ celeryd-multi restart Leslie -E --pidfile=/var/run/celery/%n.pid
|
||||
--logfile=/var/run/celery/%n.log
|
||||
|
||||
# To stop the node, you need to specify the same pidfile.
|
||||
$ celeryd-multi stop Leslie --pidfile=/var/run/celery/%n.pid
|
||||
|
||||
# 3 workers, with 3 processes each
|
||||
$ celeryd-multi start 3 -c 3
|
||||
celeryd -n celeryd1.myhost -c 3
|
||||
celeryd -n celeryd2.myhost -c 3
|
||||
celeryd- n celeryd3.myhost -c 3
|
||||
|
||||
# start 3 named workers
|
||||
$ celeryd-multi start image video data -c 3
|
||||
celeryd -n image.myhost -c 3
|
||||
celeryd -n video.myhost -c 3
|
||||
celeryd -n data.myhost -c 3
|
||||
|
||||
# specify custom hostname
|
||||
$ celeryd-multi start 2 -n worker.example.com -c 3
|
||||
celeryd -n celeryd1.worker.example.com -c 3
|
||||
celeryd -n celeryd2.worker.example.com -c 3
|
||||
|
||||
# Advanced example starting 10 workers in the background:
|
||||
# * Three of the workers processes the images and video queue
|
||||
# * Two of the workers processes the data queue with loglevel DEBUG
|
||||
# * the rest processes the default' queue.
|
||||
$ celeryd-multi start 10 -l INFO -Q:1-3 images,video -Q:4,5 data
|
||||
-Q default -L:4,5 DEBUG
|
||||
|
||||
# You can show the commands necessary to start the workers with
|
||||
# the 'show' command:
|
||||
$ celeryd-multi show 10 -l INFO -Q:1-3 images,video -Q:4,5 data
|
||||
-Q default -L:4,5 DEBUG
|
||||
|
||||
# Additional options are added to each celeryd',
|
||||
# but you can also modify the options for ranges of, or specific workers
|
||||
|
||||
# 3 workers: Two with 3 processes, and one with 10 processes.
|
||||
$ celeryd-multi start 3 -c 3 -c:1 10
|
||||
celeryd -n celeryd1.myhost -c 10
|
||||
celeryd -n celeryd2.myhost -c 3
|
||||
celeryd -n celeryd3.myhost -c 3
|
||||
|
||||
# can also specify options for named workers
|
||||
$ celeryd-multi start image video data -c 3 -c:image 10
|
||||
celeryd -n image.myhost -c 10
|
||||
celeryd -n video.myhost -c 3
|
||||
celeryd -n data.myhost -c 3
|
||||
|
||||
# ranges and lists of workers in options is also allowed:
|
||||
# (-c:1-3 can also be written as -c:1,2,3)
|
||||
$ celeryd-multi start 5 -c 3 -c:1-3 10
|
||||
celeryd -n celeryd1.myhost -c 10
|
||||
celeryd -n celeryd2.myhost -c 10
|
||||
celeryd -n celeryd3.myhost -c 10
|
||||
celeryd -n celeryd4.myhost -c 3
|
||||
celeryd -n celeryd5.myhost -c 3
|
||||
|
||||
# lists also works with named workers
|
||||
$ celeryd-multi start foo bar baz xuzzy -c 3 -c:foo,bar,baz 10
|
||||
celeryd -n foo.myhost -c 10
|
||||
celeryd -n bar.myhost -c 10
|
||||
celeryd -n baz.myhost -c 10
|
||||
celeryd -n xuzzy.myhost -c 3
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
import errno
|
||||
import os
|
||||
import signal
|
||||
import socket
|
||||
import sys
|
||||
|
||||
from collections import defaultdict
|
||||
from subprocess import Popen
|
||||
from time import sleep
|
||||
|
||||
from kombu.utils import cached_property
|
||||
from kombu.utils.encoding import from_utf8
|
||||
|
||||
from celery import VERSION_BANNER
|
||||
from celery.platforms import Pidfile, shellsplit
|
||||
from celery.utils import term
|
||||
from celery.utils.text import pluralize
|
||||
|
||||
SIGNAMES = set(sig for sig in dir(signal)
|
||||
if sig.startswith('SIG') and '_' not in sig)
|
||||
SIGMAP = dict((getattr(signal, name), name) for name in SIGNAMES)
|
||||
|
||||
USAGE = """\
|
||||
usage: %(prog_name)s start <node1 node2 nodeN|range> [celeryd options]
|
||||
%(prog_name)s stop <n1 n2 nN|range> [-SIG (default: -TERM)]
|
||||
%(prog_name)s restart <n1 n2 nN|range> [-SIG] [celeryd options]
|
||||
%(prog_name)s kill <n1 n2 nN|range>
|
||||
|
||||
%(prog_name)s show <n1 n2 nN|range> [celeryd options]
|
||||
%(prog_name)s get hostname <n1 n2 nN|range> [-qv] [celeryd options]
|
||||
%(prog_name)s names <n1 n2 nN|range>
|
||||
%(prog_name)s expand template <n1 n2 nN|range>
|
||||
%(prog_name)s help
|
||||
|
||||
additional options (must appear after command name):
|
||||
|
||||
* --nosplash: Don't display program info.
|
||||
* --quiet: Don't show as much output.
|
||||
* --verbose: Show more output.
|
||||
* --no-color: Don't display colors.
|
||||
"""
|
||||
|
||||
|
||||
def main():
|
||||
sys.exit(MultiTool().execute_from_commandline(sys.argv))
|
||||
|
||||
|
||||
class MultiTool(object):
|
||||
retcode = 0 # Final exit code.
|
||||
|
||||
def __init__(self, env=None, fh=None, quiet=False, verbose=False,
|
||||
no_color=False, nosplash=False):
|
||||
self.fh = fh or sys.stderr
|
||||
self.env = env
|
||||
self.nosplash = nosplash
|
||||
self.quiet = quiet
|
||||
self.verbose = verbose
|
||||
self.no_color = no_color
|
||||
self.prog_name = 'celeryd-multi'
|
||||
self.commands = {'start': self.start,
|
||||
'show': self.show,
|
||||
'stop': self.stop,
|
||||
'stopwait': self.stopwait,
|
||||
'stop_verify': self.stopwait, # compat alias
|
||||
'restart': self.restart,
|
||||
'kill': self.kill,
|
||||
'names': self.names,
|
||||
'expand': self.expand,
|
||||
'get': self.get,
|
||||
'help': self.help}
|
||||
|
||||
def execute_from_commandline(self, argv, cmd='celeryd'):
|
||||
argv = list(argv) # don't modify callers argv.
|
||||
|
||||
# Reserve the --nosplash|--quiet|-q/--verbose options.
|
||||
if '--nosplash' in argv:
|
||||
self.nosplash = argv.pop(argv.index('--nosplash'))
|
||||
if '--quiet' in argv:
|
||||
self.quiet = argv.pop(argv.index('--quiet'))
|
||||
if '-q' in argv:
|
||||
self.quiet = argv.pop(argv.index('-q'))
|
||||
if '--verbose' in argv:
|
||||
self.verbose = argv.pop(argv.index('--verbose'))
|
||||
if '--no-color' in argv:
|
||||
self.no_color = argv.pop(argv.index('--no-color'))
|
||||
|
||||
self.prog_name = os.path.basename(argv.pop(0))
|
||||
if not argv or argv[0][0] == '-':
|
||||
return self.error()
|
||||
|
||||
try:
|
||||
self.commands[argv[0]](argv[1:], cmd)
|
||||
except KeyError:
|
||||
self.error('Invalid command: %s' % argv[0])
|
||||
|
||||
return self.retcode
|
||||
|
||||
def say(self, m, newline=True):
|
||||
self.fh.write('%s%s' % (m, '\n' if newline else ''))
|
||||
|
||||
def names(self, argv, cmd):
|
||||
p = NamespacedOptionParser(argv)
|
||||
self.say('\n'.join(
|
||||
hostname for hostname, _, _ in multi_args(p, cmd)),
|
||||
)
|
||||
|
||||
def get(self, argv, cmd):
|
||||
wanted = argv[0]
|
||||
p = NamespacedOptionParser(argv[1:])
|
||||
for name, worker, _ in multi_args(p, cmd):
|
||||
if name == wanted:
|
||||
self.say(' '.join(worker))
|
||||
return
|
||||
|
||||
def show(self, argv, cmd):
|
||||
p = NamespacedOptionParser(argv)
|
||||
self.note('> Starting nodes...')
|
||||
self.say('\n'.join(
|
||||
' '.join(worker) for _, worker, _ in multi_args(p, cmd)),
|
||||
)
|
||||
|
||||
def start(self, argv, cmd):
|
||||
self.splash()
|
||||
p = NamespacedOptionParser(argv)
|
||||
self.with_detacher_default_options(p)
|
||||
retcodes = []
|
||||
self.note('> Starting nodes...')
|
||||
for nodename, argv, _ in multi_args(p, cmd):
|
||||
self.note('\t> %s: ' % (nodename, ), newline=False)
|
||||
retcode = self.waitexec(argv)
|
||||
self.note(retcode and self.FAILED or self.OK)
|
||||
retcodes.append(retcode)
|
||||
self.retcode = int(any(retcodes))
|
||||
|
||||
def with_detacher_default_options(self, p):
|
||||
p.options.setdefault('--pidfile', 'celeryd@%n.pid')
|
||||
p.options.setdefault('--logfile', 'celeryd@%n.log')
|
||||
p.options.setdefault('--cmd', '-m celery.bin.celeryd_detach')
|
||||
|
||||
def signal_node(self, nodename, pid, sig):
|
||||
try:
|
||||
os.kill(pid, sig)
|
||||
except OSError, exc:
|
||||
if exc.errno != errno.ESRCH:
|
||||
raise
|
||||
self.note('Could not signal %s (%s): No such process' % (
|
||||
nodename, pid))
|
||||
return False
|
||||
return True
|
||||
|
||||
def node_alive(self, pid):
|
||||
try:
|
||||
os.kill(pid, 0)
|
||||
except OSError, exc:
|
||||
if exc.errno == errno.ESRCH:
|
||||
return False
|
||||
raise
|
||||
return True
|
||||
|
||||
def shutdown_nodes(self, nodes, sig=signal.SIGTERM, retry=None,
|
||||
callback=None):
|
||||
if not nodes:
|
||||
return
|
||||
P = set(nodes)
|
||||
|
||||
def on_down(node):
|
||||
P.discard(node)
|
||||
if callback:
|
||||
callback(*node)
|
||||
|
||||
self.note(self.colored.blue('> Stopping nodes...'))
|
||||
for node in list(P):
|
||||
if node in P:
|
||||
nodename, _, pid = node
|
||||
self.note('\t> %s: %s -> %s' % (nodename,
|
||||
SIGMAP[sig][3:],
|
||||
pid))
|
||||
if not self.signal_node(nodename, pid, sig):
|
||||
on_down(node)
|
||||
|
||||
def note_waiting():
|
||||
left = len(P)
|
||||
if left:
|
||||
pids = ', '.join(str(pid) for _, _, pid in P)
|
||||
self.note(self.colored.blue('> Waiting for %s %s -> %s...' % (
|
||||
left, pluralize(left, 'node'), pids)), newline=False)
|
||||
|
||||
if retry:
|
||||
note_waiting()
|
||||
its = 0
|
||||
while P:
|
||||
for node in P:
|
||||
its += 1
|
||||
self.note('.', newline=False)
|
||||
nodename, _, pid = node
|
||||
if not self.node_alive(pid):
|
||||
self.note('\n\t> %s: %s' % (nodename, self.OK))
|
||||
on_down(node)
|
||||
note_waiting()
|
||||
break
|
||||
if P and not its % len(P):
|
||||
sleep(float(retry))
|
||||
self.note('')
|
||||
|
||||
def getpids(self, p, cmd, callback=None):
|
||||
pidfile_template = p.options.setdefault('--pidfile', 'celeryd@%n.pid')
|
||||
|
||||
nodes = []
|
||||
for nodename, argv, expander in multi_args(p, cmd):
|
||||
pid = None
|
||||
pidfile = expander(pidfile_template)
|
||||
try:
|
||||
pid = Pidfile(pidfile).read_pid()
|
||||
except ValueError:
|
||||
pass
|
||||
if pid:
|
||||
nodes.append((nodename, tuple(argv), pid))
|
||||
else:
|
||||
self.note('> %s: %s' % (nodename, self.DOWN))
|
||||
if callback:
|
||||
callback(nodename, argv, pid)
|
||||
|
||||
return nodes
|
||||
|
||||
def kill(self, argv, cmd):
|
||||
self.splash()
|
||||
p = NamespacedOptionParser(argv)
|
||||
for nodename, _, pid in self.getpids(p, cmd):
|
||||
self.note('Killing node %s (%s)' % (nodename, pid))
|
||||
self.signal_node(nodename, pid, signal.SIGKILL)
|
||||
|
||||
def stop(self, argv, cmd, retry=None, callback=None):
|
||||
self.splash()
|
||||
p = NamespacedOptionParser(argv)
|
||||
return self._stop_nodes(p, cmd, retry=retry, callback=callback)
|
||||
|
||||
def _stop_nodes(self, p, cmd, retry=None, callback=None):
|
||||
restargs = p.args[len(p.values):]
|
||||
self.shutdown_nodes(self.getpids(p, cmd, callback=callback),
|
||||
sig=findsig(restargs),
|
||||
retry=retry,
|
||||
callback=callback)
|
||||
|
||||
def restart(self, argv, cmd):
|
||||
self.splash()
|
||||
p = NamespacedOptionParser(argv)
|
||||
self.with_detacher_default_options(p)
|
||||
retvals = []
|
||||
|
||||
def on_node_shutdown(nodename, argv, pid):
|
||||
self.note(self.colored.blue(
|
||||
'> Restarting node %s: ' % nodename), newline=False)
|
||||
retval = self.waitexec(argv)
|
||||
self.note(retval and self.FAILED or self.OK)
|
||||
retvals.append(retval)
|
||||
|
||||
self._stop_nodes(p, cmd, retry=2, callback=on_node_shutdown)
|
||||
self.retval = int(any(retvals))
|
||||
|
||||
def stopwait(self, argv, cmd):
|
||||
self.splash()
|
||||
p = NamespacedOptionParser(argv)
|
||||
self.with_detacher_default_options(p)
|
||||
return self._stop_nodes(p, cmd, retry=2)
|
||||
stop_verify = stopwait # compat
|
||||
|
||||
def expand(self, argv, cmd=None):
|
||||
template = argv[0]
|
||||
p = NamespacedOptionParser(argv[1:])
|
||||
for _, _, expander in multi_args(p, cmd):
|
||||
self.say(expander(template))
|
||||
|
||||
def help(self, argv, cmd=None):
|
||||
self.say(__doc__)
|
||||
|
||||
def usage(self):
|
||||
self.splash()
|
||||
self.say(USAGE % {'prog_name': self.prog_name})
|
||||
|
||||
def splash(self):
|
||||
if not self.nosplash:
|
||||
c = self.colored
|
||||
self.note(c.cyan('celeryd-multi v%s' % VERSION_BANNER))
|
||||
|
||||
def waitexec(self, argv, path=sys.executable):
|
||||
args = ' '.join([path] + list(argv))
|
||||
argstr = shellsplit(from_utf8(args))
|
||||
pipe = Popen(argstr, env=self.env)
|
||||
self.info(' %s' % ' '.join(argstr))
|
||||
retcode = pipe.wait()
|
||||
if retcode < 0:
|
||||
self.note('* Child was terminated by signal %s' % (-retcode, ))
|
||||
return -retcode
|
||||
elif retcode > 0:
|
||||
self.note('* Child terminated with failure code %s' % (retcode, ))
|
||||
return retcode
|
||||
|
||||
def error(self, msg=None):
|
||||
if msg:
|
||||
self.say(msg)
|
||||
self.usage()
|
||||
self.retcode = 1
|
||||
return 1
|
||||
|
||||
def info(self, msg, newline=True):
|
||||
if self.verbose:
|
||||
self.note(msg, newline=newline)
|
||||
|
||||
def note(self, msg, newline=True):
|
||||
if not self.quiet:
|
||||
self.say(str(msg), newline=newline)
|
||||
|
||||
@cached_property
|
||||
def colored(self):
|
||||
return term.colored(enabled=not self.no_color)
|
||||
|
||||
@cached_property
|
||||
def OK(self):
|
||||
return str(self.colored.green('OK'))
|
||||
|
||||
@cached_property
|
||||
def FAILED(self):
|
||||
return str(self.colored.red('FAILED'))
|
||||
|
||||
@cached_property
|
||||
def DOWN(self):
|
||||
return str(self.colored.magenta('DOWN'))
|
||||
|
||||
|
||||
def multi_args(p, cmd='celeryd', append='', prefix='', suffix=''):
|
||||
names = p.values
|
||||
options = dict(p.options)
|
||||
passthrough = p.passthrough
|
||||
ranges = len(names) == 1
|
||||
if ranges:
|
||||
try:
|
||||
noderange = int(names[0])
|
||||
except ValueError:
|
||||
pass
|
||||
else:
|
||||
names = [str(v) for v in range(1, noderange + 1)]
|
||||
prefix = 'celery'
|
||||
cmd = options.pop('--cmd', cmd)
|
||||
append = options.pop('--append', append)
|
||||
hostname = options.pop('--hostname',
|
||||
options.pop('-n', socket.gethostname()))
|
||||
prefix = options.pop('--prefix', prefix) or ''
|
||||
suffix = options.pop('--suffix', suffix) or '.' + hostname
|
||||
if suffix in ('""', "''"):
|
||||
suffix = ''
|
||||
|
||||
for ns_name, ns_opts in p.namespaces.items():
|
||||
if ',' in ns_name or (ranges and '-' in ns_name):
|
||||
for subns in parse_ns_range(ns_name, ranges):
|
||||
p.namespaces[subns].update(ns_opts)
|
||||
p.namespaces.pop(ns_name)
|
||||
|
||||
for name in names:
|
||||
this_name = options['-n'] = prefix + name + suffix
|
||||
expand = abbreviations({'%h': this_name,
|
||||
'%n': name})
|
||||
argv = ([expand(cmd)] +
|
||||
[format_opt(opt, expand(value))
|
||||
for opt, value in p.optmerge(name, options).items()] +
|
||||
[passthrough])
|
||||
if append:
|
||||
argv.append(expand(append))
|
||||
yield this_name, argv, expand
|
||||
|
||||
|
||||
class NamespacedOptionParser(object):
|
||||
|
||||
def __init__(self, args):
|
||||
self.args = args
|
||||
self.options = {}
|
||||
self.values = []
|
||||
self.passthrough = ''
|
||||
self.namespaces = defaultdict(lambda: {})
|
||||
|
||||
self.parse()
|
||||
|
||||
def parse(self):
|
||||
rargs = list(self.args)
|
||||
pos = 0
|
||||
while pos < len(rargs):
|
||||
arg = rargs[pos]
|
||||
if arg == '--':
|
||||
self.passthrough = ' '.join(rargs[pos:])
|
||||
break
|
||||
elif arg[0] == '-':
|
||||
if arg[1] == '-':
|
||||
self.process_long_opt(arg[2:])
|
||||
else:
|
||||
value = None
|
||||
if len(rargs) > pos + 1 and rargs[pos + 1][0] != '-':
|
||||
value = rargs[pos + 1]
|
||||
pos += 1
|
||||
self.process_short_opt(arg[1:], value)
|
||||
else:
|
||||
self.values.append(arg)
|
||||
pos += 1
|
||||
|
||||
def process_long_opt(self, arg, value=None):
|
||||
if '=' in arg:
|
||||
arg, value = arg.split('=', 1)
|
||||
self.add_option(arg, value, short=False)
|
||||
|
||||
def process_short_opt(self, arg, value=None):
|
||||
self.add_option(arg, value, short=True)
|
||||
|
||||
def optmerge(self, ns, defaults=None):
|
||||
if defaults is None:
|
||||
defaults = self.options
|
||||
return dict(defaults, **self.namespaces[ns])
|
||||
|
||||
def add_option(self, name, value, short=False, ns=None):
|
||||
prefix = short and '-' or '--'
|
||||
dest = self.options
|
||||
if ':' in name:
|
||||
name, ns = name.split(':')
|
||||
dest = self.namespaces[ns]
|
||||
dest[prefix + name] = value
|
||||
|
||||
|
||||
def quote(v):
|
||||
return "\\'".join("'" + p + "'" for p in v.split("'"))
|
||||
|
||||
|
||||
def format_opt(opt, value):
|
||||
if not value:
|
||||
return opt
|
||||
if opt.startswith('--'):
|
||||
return '%s=%s' % (opt, value)
|
||||
return '%s %s' % (opt, value)
|
||||
|
||||
|
||||
def parse_ns_range(ns, ranges=False):
|
||||
ret = []
|
||||
for space in ',' in ns and ns.split(',') or [ns]:
|
||||
if ranges and '-' in space:
|
||||
start, stop = space.split('-')
|
||||
x = [str(v) for v in range(int(start), int(stop) + 1)]
|
||||
ret.extend(x)
|
||||
else:
|
||||
ret.append(space)
|
||||
return ret
|
||||
|
||||
|
||||
def abbreviations(mapping):
|
||||
|
||||
def expand(S):
|
||||
ret = S
|
||||
if S is not None:
|
||||
for short, long in mapping.items():
|
||||
ret = ret.replace(short, long)
|
||||
return ret
|
||||
|
||||
return expand
|
||||
|
||||
|
||||
def findsig(args, default=signal.SIGTERM):
|
||||
for arg in reversed(args):
|
||||
if len(arg) == 2 and arg[0] == '-':
|
||||
try:
|
||||
return int(arg[1])
|
||||
except ValueError:
|
||||
pass
|
||||
if arg[0] == '-':
|
||||
maybe_sig = 'SIG' + arg[1:]
|
||||
if maybe_sig in SIGNAMES:
|
||||
return getattr(signal, maybe_sig)
|
||||
return default
|
||||
|
||||
if __name__ == '__main__': # pragma: no cover
|
||||
main()
|
||||
117
awx/lib/site-packages/celery/bin/celeryev.py
Normal file
117
awx/lib/site-packages/celery/bin/celeryev.py
Normal file
@@ -0,0 +1,117 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
|
||||
The :program:`celery events` command.
|
||||
|
||||
.. program:: celery events
|
||||
|
||||
.. seealso::
|
||||
|
||||
See :ref:`preload-options` and :ref:`daemon-options`.
|
||||
|
||||
.. cmdoption:: -d, --dump
|
||||
|
||||
Dump events to stdout.
|
||||
|
||||
.. cmdoption:: -c, --camera
|
||||
|
||||
Take snapshots of events using this camera.
|
||||
|
||||
.. cmdoption:: --detach
|
||||
|
||||
Camera: Detach and run in the background as a daemon.
|
||||
|
||||
.. cmdoption:: -F, --freq, --frequency
|
||||
|
||||
Camera: Shutter frequency. Default is every 1.0 seconds.
|
||||
|
||||
.. cmdoption:: -r, --maxrate
|
||||
|
||||
Camera: Optional shutter rate limit (e.g. 10/m).
|
||||
|
||||
.. cmdoption:: -l, --loglevel
|
||||
|
||||
Logging level, choose between `DEBUG`, `INFO`, `WARNING`,
|
||||
`ERROR`, `CRITICAL`, or `FATAL`. Default is INFO.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import with_statement
|
||||
|
||||
import sys
|
||||
|
||||
from functools import partial
|
||||
|
||||
from celery.platforms import detached, set_process_title, strargv
|
||||
from celery.bin.base import Command, Option, daemon_options
|
||||
|
||||
|
||||
class EvCommand(Command):
|
||||
doc = __doc__
|
||||
supports_args = False
|
||||
|
||||
def run(self, dump=False, camera=None, frequency=1.0, maxrate=None,
|
||||
loglevel='INFO', logfile=None, prog_name='celeryev',
|
||||
pidfile=None, uid=None, gid=None, umask=None,
|
||||
working_directory=None, detach=False, **kwargs):
|
||||
self.prog_name = prog_name
|
||||
|
||||
if dump:
|
||||
return self.run_evdump()
|
||||
if camera:
|
||||
return self.run_evcam(camera, freq=frequency, maxrate=maxrate,
|
||||
loglevel=loglevel, logfile=logfile,
|
||||
pidfile=pidfile, uid=uid, gid=gid,
|
||||
umask=umask,
|
||||
working_directory=working_directory,
|
||||
detach=detach)
|
||||
return self.run_evtop()
|
||||
|
||||
def run_evdump(self):
|
||||
from celery.events.dumper import evdump
|
||||
self.set_process_status('dump')
|
||||
return evdump(app=self.app)
|
||||
|
||||
def run_evtop(self):
|
||||
from celery.events.cursesmon import evtop
|
||||
self.set_process_status('top')
|
||||
return evtop(app=self.app)
|
||||
|
||||
def run_evcam(self, camera, logfile=None, pidfile=None, uid=None,
|
||||
gid=None, umask=None, working_directory=None,
|
||||
detach=False, **kwargs):
|
||||
from celery.events.snapshot import evcam
|
||||
workdir = working_directory
|
||||
self.set_process_status('cam')
|
||||
kwargs['app'] = self.app
|
||||
cam = partial(evcam, camera,
|
||||
logfile=logfile, pidfile=pidfile, **kwargs)
|
||||
|
||||
if detach:
|
||||
with detached(logfile, pidfile, uid, gid, umask, workdir):
|
||||
return cam()
|
||||
else:
|
||||
return cam()
|
||||
|
||||
def set_process_status(self, prog, info=''):
|
||||
prog = '%s:%s' % (self.prog_name, prog)
|
||||
info = '%s %s' % (info, strargv(sys.argv))
|
||||
return set_process_title(prog, info=info)
|
||||
|
||||
def get_options(self):
|
||||
return (
|
||||
Option('-d', '--dump', action='store_true'),
|
||||
Option('-c', '--camera'),
|
||||
Option('--detach', action='store_true'),
|
||||
Option('-F', '--frequency', '--freq', type='float', default=1.0),
|
||||
Option('-r', '--maxrate'),
|
||||
Option('-l', '--loglevel', default='INFO'),
|
||||
) + daemon_options(default_pidfile='celeryev.pid')
|
||||
|
||||
|
||||
def main():
|
||||
ev = EvCommand()
|
||||
ev.execute_from_commandline()
|
||||
|
||||
if __name__ == '__main__': # pragma: no cover
|
||||
main()
|
||||
510
awx/lib/site-packages/celery/canvas.py
Normal file
510
awx/lib/site-packages/celery/canvas.py
Normal file
@@ -0,0 +1,510 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.canvas
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
Composing task workflows.
|
||||
|
||||
Documentation for these functions are in :mod:`celery`.
|
||||
You should not import from this module directly.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
from copy import deepcopy
|
||||
from functools import partial as _partial
|
||||
from operator import itemgetter
|
||||
from itertools import chain as _chain
|
||||
|
||||
from kombu.utils import cached_property, fxrange, kwdict, reprcall, uuid
|
||||
|
||||
from celery._state import current_app
|
||||
from celery.utils.compat import chain_from_iterable
|
||||
from celery.result import AsyncResult, GroupResult
|
||||
from celery.utils.functional import (
|
||||
maybe_list, is_list, regen,
|
||||
chunks as _chunks,
|
||||
)
|
||||
from celery.utils.text import truncate
|
||||
|
||||
|
||||
class _getitem_property(object):
|
||||
"""Attribute -> dict key descriptor.
|
||||
|
||||
The target object must support ``__getitem__``,
|
||||
and optionally ``__setitem__``.
|
||||
|
||||
Example:
|
||||
|
||||
class Me(dict):
|
||||
deep = defaultdict(dict)
|
||||
|
||||
foo = _getitem_property('foo')
|
||||
deep_thing = _getitem_property('deep.thing')
|
||||
|
||||
|
||||
>>> me = Me()
|
||||
>>> me.foo
|
||||
None
|
||||
|
||||
>>> me.foo = 10
|
||||
>>> me.foo
|
||||
10
|
||||
>>> me['foo']
|
||||
10
|
||||
|
||||
>>> me.deep_thing = 42
|
||||
>>> me.deep_thinge
|
||||
42
|
||||
>>> me.deep:
|
||||
defaultdict(<type 'dict'>, {'thing': 42})
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, keypath):
|
||||
path, _, self.key = keypath.rpartition('.')
|
||||
self.path = path.split('.') if path else None
|
||||
|
||||
def _path(self, obj):
|
||||
return (reduce(lambda d, k: d[k], [obj] + self.path) if self.path
|
||||
else obj)
|
||||
|
||||
def __get__(self, obj, type=None):
|
||||
if obj is None:
|
||||
return type
|
||||
return self._path(obj).get(self.key)
|
||||
|
||||
def __set__(self, obj, value):
|
||||
self._path(obj)[self.key] = value
|
||||
|
||||
|
||||
class Signature(dict):
|
||||
"""Class that wraps the arguments and execution options
|
||||
for a single task invocation.
|
||||
|
||||
Used as the parts in a :class:`group` or to safely
|
||||
pass tasks around as callbacks.
|
||||
|
||||
:param task: Either a task class/instance, or the name of a task.
|
||||
:keyword args: Positional arguments to apply.
|
||||
:keyword kwargs: Keyword arguments to apply.
|
||||
:keyword options: Additional options to :meth:`Task.apply_async`.
|
||||
|
||||
Note that if the first argument is a :class:`dict`, the other
|
||||
arguments will be ignored and the values in the dict will be used
|
||||
instead.
|
||||
|
||||
>>> s = subtask('tasks.add', args=(2, 2))
|
||||
>>> subtask(s)
|
||||
{'task': 'tasks.add', args=(2, 2), kwargs={}, options={}}
|
||||
|
||||
"""
|
||||
TYPES = {}
|
||||
_type = None
|
||||
|
||||
@classmethod
|
||||
def register_type(cls, subclass, name=None):
|
||||
cls.TYPES[name or subclass.__name__] = subclass
|
||||
return subclass
|
||||
|
||||
@classmethod
|
||||
def from_dict(self, d):
|
||||
typ = d.get('subtask_type')
|
||||
if typ:
|
||||
return self.TYPES[typ].from_dict(kwdict(d))
|
||||
return Signature(d)
|
||||
|
||||
def __init__(self, task=None, args=None, kwargs=None, options=None,
|
||||
type=None, subtask_type=None, immutable=False, **ex):
|
||||
init = dict.__init__
|
||||
|
||||
if isinstance(task, dict):
|
||||
return init(self, task) # works like dict(d)
|
||||
|
||||
# Also supports using task class/instance instead of string name.
|
||||
try:
|
||||
task_name = task.name
|
||||
except AttributeError:
|
||||
task_name = task
|
||||
else:
|
||||
self._type = task
|
||||
|
||||
init(self,
|
||||
task=task_name, args=tuple(args or ()),
|
||||
kwargs=kwargs or {},
|
||||
options=dict(options or {}, **ex),
|
||||
subtask_type=subtask_type,
|
||||
immutable=immutable)
|
||||
|
||||
def __call__(self, *partial_args, **partial_kwargs):
|
||||
return self.apply_async(partial_args, partial_kwargs)
|
||||
delay = __call__
|
||||
|
||||
def apply(self, args=(), kwargs={}, **options):
|
||||
"""Apply this task locally."""
|
||||
# For callbacks: extra args are prepended to the stored args.
|
||||
args, kwargs, options = self._merge(args, kwargs, options)
|
||||
return self.type.apply(args, kwargs, **options)
|
||||
|
||||
def _merge(self, args=(), kwargs={}, options={}):
|
||||
if self.immutable:
|
||||
return self.args, self.kwargs, dict(self.options, **options)
|
||||
return (tuple(args) + tuple(self.args) if args else self.args,
|
||||
dict(self.kwargs, **kwargs) if kwargs else self.kwargs,
|
||||
dict(self.options, **options) if options else self.options)
|
||||
|
||||
def clone(self, args=(), kwargs={}, **opts):
|
||||
# need to deepcopy options so origins links etc. is not modified.
|
||||
args, kwargs, opts = self._merge(args, kwargs, opts)
|
||||
s = Signature.from_dict({'task': self.task, 'args': tuple(args),
|
||||
'kwargs': kwargs, 'options': deepcopy(opts),
|
||||
'subtask_type': self.subtask_type,
|
||||
'immutable': self.immutable})
|
||||
s._type = self._type
|
||||
return s
|
||||
partial = clone
|
||||
|
||||
def _freeze(self, _id=None):
|
||||
opts = self.options
|
||||
try:
|
||||
tid = opts['task_id']
|
||||
except KeyError:
|
||||
tid = opts['task_id'] = _id or uuid()
|
||||
return self.AsyncResult(tid)
|
||||
|
||||
def replace(self, args=None, kwargs=None, options=None):
|
||||
s = self.clone()
|
||||
if args is not None:
|
||||
s.args = args
|
||||
if kwargs is not None:
|
||||
s.kwargs = kwargs
|
||||
if options is not None:
|
||||
s.options = options
|
||||
return s
|
||||
|
||||
def set(self, immutable=None, **options):
|
||||
if immutable is not None:
|
||||
self.immutable = immutable
|
||||
self.options.update(options)
|
||||
return self
|
||||
|
||||
def apply_async(self, args=(), kwargs={}, **options):
|
||||
# For callbacks: extra args are prepended to the stored args.
|
||||
args, kwargs, options = self._merge(args, kwargs, options)
|
||||
return self._apply_async(args, kwargs, **options)
|
||||
|
||||
def append_to_list_option(self, key, value):
|
||||
items = self.options.setdefault(key, [])
|
||||
if value not in items:
|
||||
items.append(value)
|
||||
return value
|
||||
|
||||
def link(self, callback):
|
||||
return self.append_to_list_option('link', callback)
|
||||
|
||||
def link_error(self, errback):
|
||||
return self.append_to_list_option('link_error', errback)
|
||||
|
||||
def flatten_links(self):
|
||||
return list(chain_from_iterable(_chain(
|
||||
[[self]],
|
||||
(link.flatten_links()
|
||||
for link in maybe_list(self.options.get('link')) or [])
|
||||
)))
|
||||
|
||||
def __or__(self, other):
|
||||
if not isinstance(self, chain) and isinstance(other, chain):
|
||||
return chain((self,) + other.tasks)
|
||||
elif isinstance(other, chain):
|
||||
return chain(*self.tasks + other.tasks)
|
||||
elif isinstance(other, Signature):
|
||||
if isinstance(self, chain):
|
||||
return chain(*self.tasks + (other, ))
|
||||
return chain(self, other)
|
||||
return NotImplemented
|
||||
|
||||
def __invert__(self):
|
||||
return self.apply_async().get()
|
||||
|
||||
def __reduce__(self):
|
||||
# for serialization, the task type is lazily loaded,
|
||||
# and not stored in the dict itself.
|
||||
return subtask, (dict(self), )
|
||||
|
||||
def reprcall(self, *args, **kwargs):
|
||||
args, kwargs, _ = self._merge(args, kwargs, {})
|
||||
return reprcall(self['task'], args, kwargs)
|
||||
|
||||
def __repr__(self):
|
||||
return self.reprcall()
|
||||
|
||||
@cached_property
|
||||
def type(self):
|
||||
return self._type or current_app.tasks[self['task']]
|
||||
|
||||
@cached_property
|
||||
def AsyncResult(self):
|
||||
try:
|
||||
return self.type.AsyncResult
|
||||
except KeyError: # task not registered
|
||||
return AsyncResult
|
||||
|
||||
@cached_property
|
||||
def _apply_async(self):
|
||||
try:
|
||||
return self.type.apply_async
|
||||
except KeyError:
|
||||
return _partial(current_app.send_task, self['task'])
|
||||
id = _getitem_property('options.task_id')
|
||||
task = _getitem_property('task')
|
||||
args = _getitem_property('args')
|
||||
kwargs = _getitem_property('kwargs')
|
||||
options = _getitem_property('options')
|
||||
subtask_type = _getitem_property('subtask_type')
|
||||
immutable = _getitem_property('immutable')
|
||||
|
||||
|
||||
class chain(Signature):
|
||||
|
||||
def __init__(self, *tasks, **options):
|
||||
tasks = tasks[0] if len(tasks) == 1 and is_list(tasks[0]) else tasks
|
||||
Signature.__init__(
|
||||
self, 'celery.chain', (), {'tasks': tasks}, **options
|
||||
)
|
||||
self.tasks = tasks
|
||||
self.subtask_type = 'chain'
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
if self.tasks:
|
||||
return self.apply_async(args, kwargs)
|
||||
|
||||
@classmethod
|
||||
def from_dict(self, d):
|
||||
tasks = d['kwargs']['tasks']
|
||||
if d['args'] and tasks:
|
||||
# partial args passed on to first task in chain (Issue #1057).
|
||||
tasks[0]['args'] = d['args'] + tasks[0]['args']
|
||||
return chain(*d['kwargs']['tasks'], **kwdict(d['options']))
|
||||
|
||||
@property
|
||||
def type(self):
|
||||
return self._type or self.tasks[0].type.app.tasks['celery.chain']
|
||||
|
||||
def __repr__(self):
|
||||
return ' | '.join(repr(t) for t in self.tasks)
|
||||
Signature.register_type(chain)
|
||||
|
||||
|
||||
class _basemap(Signature):
|
||||
_task_name = None
|
||||
_unpack_args = itemgetter('task', 'it')
|
||||
|
||||
def __init__(self, task, it, **options):
|
||||
Signature.__init__(
|
||||
self, self._task_name, (),
|
||||
{'task': task, 'it': regen(it)}, immutable=True, **options
|
||||
)
|
||||
|
||||
def apply_async(self, args=(), kwargs={}, **opts):
|
||||
# need to evaluate generators
|
||||
task, it = self._unpack_args(self.kwargs)
|
||||
return self.type.apply_async(
|
||||
(), {'task': task, 'it': list(it)}, **opts
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_dict(self, d):
|
||||
return chunks(*self._unpack_args(d['kwargs']), **d['options'])
|
||||
|
||||
|
||||
class xmap(_basemap):
|
||||
_task_name = 'celery.map'
|
||||
|
||||
def __repr__(self):
|
||||
task, it = self._unpack_args(self.kwargs)
|
||||
return '[%s(x) for x in %s]' % (task.task, truncate(repr(it), 100))
|
||||
Signature.register_type(xmap)
|
||||
|
||||
|
||||
class xstarmap(_basemap):
|
||||
_task_name = 'celery.starmap'
|
||||
|
||||
def __repr__(self):
|
||||
task, it = self._unpack_args(self.kwargs)
|
||||
return '[%s(*x) for x in %s]' % (task.task, truncate(repr(it), 100))
|
||||
Signature.register_type(xstarmap)
|
||||
|
||||
|
||||
class chunks(Signature):
|
||||
_unpack_args = itemgetter('task', 'it', 'n')
|
||||
|
||||
def __init__(self, task, it, n, **options):
|
||||
Signature.__init__(
|
||||
self, 'celery.chunks', (),
|
||||
{'task': task, 'it': regen(it), 'n': n},
|
||||
immutable=True, **options
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_dict(self, d):
|
||||
return chunks(*self._unpack_args(d['kwargs']), **d['options'])
|
||||
|
||||
def apply_async(self, args=(), kwargs={}, **opts):
|
||||
return self.group().apply_async(args, kwargs, **opts)
|
||||
|
||||
def __call__(self, **options):
|
||||
return self.group()(**options)
|
||||
|
||||
def group(self):
|
||||
# need to evaluate generators
|
||||
task, it, n = self._unpack_args(self.kwargs)
|
||||
return group(xstarmap(task, part) for part in _chunks(iter(it), n))
|
||||
|
||||
@classmethod
|
||||
def apply_chunks(cls, task, it, n):
|
||||
return cls(task, it, n)()
|
||||
Signature.register_type(chunks)
|
||||
|
||||
|
||||
def _maybe_group(tasks):
|
||||
if isinstance(tasks, group):
|
||||
tasks = list(tasks.tasks)
|
||||
elif isinstance(tasks, Signature):
|
||||
tasks = [tasks]
|
||||
else:
|
||||
tasks = regen(tasks)
|
||||
return tasks
|
||||
|
||||
|
||||
class group(Signature):
|
||||
|
||||
def __init__(self, *tasks, **options):
|
||||
if len(tasks) == 1:
|
||||
tasks = _maybe_group(tasks[0])
|
||||
Signature.__init__(
|
||||
self, 'celery.group', (), {'tasks': tasks}, **options
|
||||
)
|
||||
self.tasks, self.subtask_type = tasks, 'group'
|
||||
|
||||
@classmethod
|
||||
def from_dict(self, d):
|
||||
tasks = d['kwargs']['tasks']
|
||||
if d['args'] and tasks:
|
||||
# partial args passed on to all tasks in the group (Issue #1057).
|
||||
for task in tasks:
|
||||
task['args'] = d['args'] + task['args']
|
||||
return group(tasks, **kwdict(d['options']))
|
||||
|
||||
def __call__(self, *partial_args, **options):
|
||||
tasks = [task.clone() for task in self.tasks]
|
||||
if not tasks:
|
||||
return
|
||||
# taking the app from the first task in the list,
|
||||
# there may be a better solution to this, e.g.
|
||||
# consolidate tasks with the same app and apply them in
|
||||
# batches.
|
||||
type = tasks[0].type.app.tasks[self['task']]
|
||||
return type(*type.prepare(options, tasks, partial_args))
|
||||
|
||||
def _freeze(self, _id=None):
|
||||
opts = self.options
|
||||
try:
|
||||
gid = opts['group']
|
||||
except KeyError:
|
||||
gid = opts['group'] = uuid()
|
||||
new_tasks, results = [], []
|
||||
for task in self.tasks:
|
||||
task = maybe_subtask(task).clone()
|
||||
results.append(task._freeze())
|
||||
new_tasks.append(task)
|
||||
self.tasks = self.kwargs['tasks'] = new_tasks
|
||||
return GroupResult(gid, results)
|
||||
|
||||
def skew(self, start=1.0, stop=None, step=1.0):
|
||||
_next_skew = fxrange(start, stop, step, repeatlast=True).next
|
||||
for task in self.tasks:
|
||||
task.set(countdown=_next_skew())
|
||||
return self
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.tasks)
|
||||
|
||||
def __repr__(self):
|
||||
return repr(self.tasks)
|
||||
Signature.register_type(group)
|
||||
|
||||
|
||||
class chord(Signature):
|
||||
|
||||
def __init__(self, header, body=None, task='celery.chord',
|
||||
args=(), kwargs={}, **options):
|
||||
Signature.__init__(
|
||||
self, task, args,
|
||||
dict(kwargs, header=_maybe_group(header),
|
||||
body=maybe_subtask(body)), **options
|
||||
)
|
||||
self.subtask_type = 'chord'
|
||||
|
||||
@classmethod
|
||||
def from_dict(self, d):
|
||||
args, d['kwargs'] = self._unpack_args(**kwdict(d['kwargs']))
|
||||
return self(*args, **kwdict(d))
|
||||
|
||||
@staticmethod
|
||||
def _unpack_args(header=None, body=None, **kwargs):
|
||||
# Python signatures are better at extracting keys from dicts
|
||||
# than manually popping things off.
|
||||
return (header, body), kwargs
|
||||
|
||||
@property
|
||||
def type(self):
|
||||
return self._type or self.tasks[0].type.app.tasks['celery.chord']
|
||||
|
||||
def __call__(self, body=None, **kwargs):
|
||||
_chord = self.type
|
||||
body = (body or self.kwargs['body']).clone()
|
||||
kwargs = dict(self.kwargs, body=body, **kwargs)
|
||||
if _chord.app.conf.CELERY_ALWAYS_EAGER:
|
||||
return self.apply((), kwargs)
|
||||
callback_id = body.options.setdefault('task_id', uuid())
|
||||
return _chord.AsyncResult(callback_id, parent=_chord(**kwargs))
|
||||
|
||||
def clone(self, *args, **kwargs):
|
||||
s = Signature.clone(self, *args, **kwargs)
|
||||
# need to make copy of body
|
||||
try:
|
||||
s.kwargs['body'] = s.kwargs['body'].clone()
|
||||
except (AttributeError, KeyError):
|
||||
pass
|
||||
return s
|
||||
|
||||
def link(self, callback):
|
||||
self.body.link(callback)
|
||||
return callback
|
||||
|
||||
def link_error(self, errback):
|
||||
self.body.link_error(errback)
|
||||
return errback
|
||||
|
||||
def __repr__(self):
|
||||
if self.body:
|
||||
return self.body.reprcall(self.tasks)
|
||||
return '<chord without body: %r>' % (self.tasks, )
|
||||
|
||||
tasks = _getitem_property('kwargs.header')
|
||||
body = _getitem_property('kwargs.body')
|
||||
Signature.register_type(chord)
|
||||
|
||||
|
||||
def subtask(varies, *args, **kwargs):
|
||||
if not (args or kwargs) and isinstance(varies, dict):
|
||||
if isinstance(varies, Signature):
|
||||
return varies.clone()
|
||||
return Signature.from_dict(varies)
|
||||
return Signature(varies, *args, **kwargs)
|
||||
|
||||
|
||||
def maybe_subtask(d):
|
||||
if d is not None and isinstance(d, dict) and not isinstance(d, Signature):
|
||||
return subtask(d)
|
||||
return d
|
||||
26
awx/lib/site-packages/celery/concurrency/__init__.py
Normal file
26
awx/lib/site-packages/celery/concurrency/__init__.py
Normal file
@@ -0,0 +1,26 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.concurrency
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Pool implementation abstract factory, and alias definitions.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import from kombu directly as it's used
|
||||
# early in the import stage, where celery.utils loads
|
||||
# too much (e.g. for eventlet patching)
|
||||
from kombu.utils import symbol_by_name
|
||||
|
||||
ALIASES = {
|
||||
'processes': 'celery.concurrency.processes:TaskPool',
|
||||
'eventlet': 'celery.concurrency.eventlet:TaskPool',
|
||||
'gevent': 'celery.concurrency.gevent:TaskPool',
|
||||
'threads': 'celery.concurrency.threads:TaskPool',
|
||||
'solo': 'celery.concurrency.solo:TaskPool',
|
||||
}
|
||||
|
||||
|
||||
def get_implementation(cls):
|
||||
return symbol_by_name(cls, ALIASES)
|
||||
166
awx/lib/site-packages/celery/concurrency/base.py
Normal file
166
awx/lib/site-packages/celery/concurrency/base.py
Normal file
@@ -0,0 +1,166 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.concurrency.base
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
TaskPool interface.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
|
||||
from kombu.utils.encoding import safe_repr
|
||||
|
||||
from celery.utils import timer2
|
||||
from celery.utils.log import get_logger
|
||||
|
||||
logger = get_logger('celery.concurrency')
|
||||
|
||||
|
||||
def apply_target(target, args=(), kwargs={}, callback=None,
|
||||
accept_callback=None, pid=None, **_):
|
||||
if accept_callback:
|
||||
accept_callback(pid or os.getpid(), time.time())
|
||||
callback(target(*args, **kwargs))
|
||||
|
||||
|
||||
class BasePool(object):
|
||||
RUN = 0x1
|
||||
CLOSE = 0x2
|
||||
TERMINATE = 0x3
|
||||
|
||||
Timer = timer2.Timer
|
||||
|
||||
#: set to true if the pool can be shutdown from within
|
||||
#: a signal handler.
|
||||
signal_safe = True
|
||||
|
||||
#: set to true if pool supports rate limits.
|
||||
#: (this is here for gevent, which currently does not implement
|
||||
#: the necessary timers).
|
||||
rlimit_safe = True
|
||||
|
||||
#: set to true if pool requires the use of a mediator
|
||||
#: thread (e.g. if applying new items can block the current thread).
|
||||
requires_mediator = False
|
||||
|
||||
#: set to true if pool uses greenlets.
|
||||
is_green = False
|
||||
|
||||
_state = None
|
||||
_pool = None
|
||||
|
||||
#: only used by multiprocessing pool
|
||||
uses_semaphore = False
|
||||
|
||||
def __init__(self, limit=None, putlocks=True,
|
||||
forking_enable=True, callbacks_propagate=(), **options):
|
||||
self.limit = limit
|
||||
self.putlocks = putlocks
|
||||
self.options = options
|
||||
self.forking_enable = forking_enable
|
||||
self.callbacks_propagate = callbacks_propagate
|
||||
self._does_debug = logger.isEnabledFor(logging.DEBUG)
|
||||
|
||||
def on_start(self):
|
||||
pass
|
||||
|
||||
def did_start_ok(self):
|
||||
return True
|
||||
|
||||
def on_stop(self):
|
||||
pass
|
||||
|
||||
def on_apply(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
def on_terminate(self):
|
||||
pass
|
||||
|
||||
def on_soft_timeout(self, job):
|
||||
pass
|
||||
|
||||
def on_hard_timeout(self, job):
|
||||
pass
|
||||
|
||||
def maybe_handle_result(self, *args):
|
||||
pass
|
||||
|
||||
def maintain_pool(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
def terminate_job(self, pid):
|
||||
raise NotImplementedError(
|
||||
'%s does not implement kill_job' % (self.__class__, ))
|
||||
|
||||
def restart(self):
|
||||
raise NotImplementedError(
|
||||
'%s does not implement restart' % (self.__class__, ))
|
||||
|
||||
def stop(self):
|
||||
self.on_stop()
|
||||
self._state = self.TERMINATE
|
||||
|
||||
def terminate(self):
|
||||
self._state = self.TERMINATE
|
||||
self.on_terminate()
|
||||
|
||||
def start(self):
|
||||
self.on_start()
|
||||
self._state = self.RUN
|
||||
|
||||
def close(self):
|
||||
self._state = self.CLOSE
|
||||
self.on_close()
|
||||
|
||||
def on_close(self):
|
||||
pass
|
||||
|
||||
def init_callbacks(self, **kwargs):
|
||||
pass
|
||||
|
||||
def apply_async(self, target, args=[], kwargs={}, **options):
|
||||
"""Equivalent of the :func:`apply` built-in function.
|
||||
|
||||
Callbacks should optimally return as soon as possible since
|
||||
otherwise the thread which handles the result will get blocked.
|
||||
|
||||
"""
|
||||
if self._does_debug:
|
||||
logger.debug('TaskPool: Apply %s (args:%s kwargs:%s)',
|
||||
target, safe_repr(args), safe_repr(kwargs))
|
||||
|
||||
return self.on_apply(target, args, kwargs,
|
||||
waitforslot=self.putlocks,
|
||||
callbacks_propagate=self.callbacks_propagate,
|
||||
**options)
|
||||
|
||||
def _get_info(self):
|
||||
return {}
|
||||
|
||||
@property
|
||||
def info(self):
|
||||
return self._get_info()
|
||||
|
||||
@property
|
||||
def active(self):
|
||||
return self._state == self.RUN
|
||||
|
||||
@property
|
||||
def num_processes(self):
|
||||
return self.limit
|
||||
|
||||
@property
|
||||
def readers(self):
|
||||
return {}
|
||||
|
||||
@property
|
||||
def writers(self):
|
||||
return {}
|
||||
|
||||
@property
|
||||
def timers(self):
|
||||
return {}
|
||||
153
awx/lib/site-packages/celery/concurrency/eventlet.py
Normal file
153
awx/lib/site-packages/celery/concurrency/eventlet.py
Normal file
@@ -0,0 +1,153 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.concurrency.eventlet
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Eventlet pool implementation.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
EVENTLET_NOPATCH = os.environ.get('EVENTLET_NOPATCH', False)
|
||||
EVENTLET_DBLOCK = int(os.environ.get('EVENTLET_NOBLOCK', 0))
|
||||
W_RACE = """\
|
||||
Celery module with %s imported before eventlet patched\
|
||||
"""
|
||||
RACE_MODS = ('billiard.', 'celery.', 'kombu.')
|
||||
|
||||
|
||||
#: Warn if we couldn't patch early enough,
|
||||
#: and thread/socket depending celery modules have already been loaded.
|
||||
for mod in (mod for mod in sys.modules if mod.startswith(RACE_MODS)):
|
||||
for side in ('thread', 'threading', 'socket'):
|
||||
if getattr(mod, side, None):
|
||||
import warnings
|
||||
warnings.warn(RuntimeWarning(W_RACE % side))
|
||||
|
||||
|
||||
PATCHED = [0]
|
||||
if not EVENTLET_NOPATCH and not PATCHED[0]:
|
||||
PATCHED[0] += 1
|
||||
import eventlet
|
||||
import eventlet.debug
|
||||
eventlet.monkey_patch()
|
||||
eventlet.debug.hub_blocking_detection(EVENTLET_DBLOCK)
|
||||
|
||||
from time import time
|
||||
|
||||
from celery import signals
|
||||
from celery.utils import timer2
|
||||
|
||||
from . import base
|
||||
|
||||
|
||||
def apply_target(target, args=(), kwargs={}, callback=None,
|
||||
accept_callback=None, getpid=None):
|
||||
return base.apply_target(target, args, kwargs, callback, accept_callback,
|
||||
pid=getpid())
|
||||
|
||||
|
||||
class Schedule(timer2.Schedule):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
from eventlet.greenthread import spawn_after
|
||||
from greenlet import GreenletExit
|
||||
super(Schedule, self).__init__(*args, **kwargs)
|
||||
|
||||
self.GreenletExit = GreenletExit
|
||||
self._spawn_after = spawn_after
|
||||
self._queue = set()
|
||||
|
||||
def _enter(self, eta, priority, entry):
|
||||
secs = max(eta - time(), 0)
|
||||
g = self._spawn_after(secs, entry)
|
||||
self._queue.add(g)
|
||||
g.link(self._entry_exit, entry)
|
||||
g.entry = entry
|
||||
g.eta = eta
|
||||
g.priority = priority
|
||||
g.cancelled = False
|
||||
return g
|
||||
|
||||
def _entry_exit(self, g, entry):
|
||||
try:
|
||||
try:
|
||||
g.wait()
|
||||
except self.GreenletExit:
|
||||
entry.cancel()
|
||||
g.cancelled = True
|
||||
finally:
|
||||
self._queue.discard(g)
|
||||
|
||||
def clear(self):
|
||||
queue = self._queue
|
||||
while queue:
|
||||
try:
|
||||
queue.pop().cancel()
|
||||
except (KeyError, self.GreenletExit):
|
||||
pass
|
||||
|
||||
@property
|
||||
def queue(self):
|
||||
return [(g.eta, g.priority, g.entry) for g in self._queue]
|
||||
|
||||
|
||||
class Timer(timer2.Timer):
|
||||
Schedule = Schedule
|
||||
|
||||
def ensure_started(self):
|
||||
pass
|
||||
|
||||
def stop(self):
|
||||
self.schedule.clear()
|
||||
|
||||
def cancel(self, tref):
|
||||
try:
|
||||
tref.cancel()
|
||||
except self.schedule.GreenletExit:
|
||||
pass
|
||||
|
||||
def start(self):
|
||||
pass
|
||||
|
||||
|
||||
class TaskPool(base.BasePool):
|
||||
Timer = Timer
|
||||
|
||||
rlimit_safe = False
|
||||
signal_safe = False
|
||||
is_green = True
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
from eventlet import greenthread
|
||||
from eventlet.greenpool import GreenPool
|
||||
self.Pool = GreenPool
|
||||
self.getcurrent = greenthread.getcurrent
|
||||
self.getpid = lambda: id(greenthread.getcurrent())
|
||||
self.spawn_n = greenthread.spawn_n
|
||||
|
||||
super(TaskPool, self).__init__(*args, **kwargs)
|
||||
|
||||
def on_start(self):
|
||||
self._pool = self.Pool(self.limit)
|
||||
signals.eventlet_pool_started.send(sender=self)
|
||||
self._quick_put = self._pool.spawn_n
|
||||
self._quick_apply_sig = signals.eventlet_pool_apply.send
|
||||
|
||||
def on_stop(self):
|
||||
signals.eventlet_pool_preshutdown.send(sender=self)
|
||||
if self._pool is not None:
|
||||
self._pool.waitall()
|
||||
signals.eventlet_pool_postshutdown.send(sender=self)
|
||||
|
||||
def on_apply(self, target, args=None, kwargs=None, callback=None,
|
||||
accept_callback=None, **_):
|
||||
self._quick_apply_sig(
|
||||
sender=self, target=target, args=args, kwargs=kwargs,
|
||||
)
|
||||
self._quick_put(apply_target, target, args, kwargs,
|
||||
callback, accept_callback,
|
||||
self.getpid)
|
||||
149
awx/lib/site-packages/celery/concurrency/gevent.py
Normal file
149
awx/lib/site-packages/celery/concurrency/gevent.py
Normal file
@@ -0,0 +1,149 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.concurrency.gevent
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
gevent pool implementation.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import with_statement
|
||||
|
||||
import os
|
||||
|
||||
PATCHED = [0]
|
||||
if not os.environ.get('GEVENT_NOPATCH') and not PATCHED[0]:
|
||||
PATCHED[0] += 1
|
||||
from gevent import monkey, version_info
|
||||
monkey.patch_all()
|
||||
if version_info[0] == 0:
|
||||
# Signals are not working along gevent in version prior 1.0
|
||||
# and they are not monkey patch by monkey.patch_all()
|
||||
from gevent import signal as _gevent_signal
|
||||
_signal = __import__('signal')
|
||||
_signal.signal = _gevent_signal
|
||||
|
||||
try:
|
||||
from gevent import Timeout
|
||||
except ImportError:
|
||||
Timeout = None # noqa
|
||||
|
||||
from time import time
|
||||
|
||||
from celery.utils import timer2
|
||||
|
||||
from .base import apply_target, BasePool
|
||||
|
||||
|
||||
def apply_timeout(target, args=(), kwargs={}, callback=None,
|
||||
accept_callback=None, pid=None, timeout=None,
|
||||
timeout_callback=None, **rest):
|
||||
try:
|
||||
with Timeout(timeout):
|
||||
return apply_target(target, args, kwargs, callback,
|
||||
accept_callback, pid, **rest)
|
||||
except Timeout:
|
||||
return timeout_callback(False, timeout)
|
||||
|
||||
|
||||
class Schedule(timer2.Schedule):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
from gevent.greenlet import Greenlet, GreenletExit
|
||||
|
||||
class _Greenlet(Greenlet):
|
||||
|
||||
def cancel(self):
|
||||
self.kill()
|
||||
|
||||
self._Greenlet = _Greenlet
|
||||
self._GreenletExit = GreenletExit
|
||||
super(Schedule, self).__init__(*args, **kwargs)
|
||||
self._queue = set()
|
||||
|
||||
def _enter(self, eta, priority, entry):
|
||||
secs = max(eta - time(), 0)
|
||||
g = self._Greenlet.spawn_later(secs, entry)
|
||||
self._queue.add(g)
|
||||
g.link(self._entry_exit)
|
||||
g.entry = entry
|
||||
g.eta = eta
|
||||
g.priority = priority
|
||||
g.cancelled = False
|
||||
return g
|
||||
|
||||
def _entry_exit(self, g):
|
||||
try:
|
||||
g.kill()
|
||||
finally:
|
||||
self._queue.discard(g)
|
||||
|
||||
def clear(self):
|
||||
queue = self._queue
|
||||
while queue:
|
||||
try:
|
||||
queue.pop().kill()
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
@property
|
||||
def queue(self):
|
||||
return [(g.eta, g.priority, g.entry) for g in self._queue]
|
||||
|
||||
|
||||
class Timer(timer2.Timer):
|
||||
Schedule = Schedule
|
||||
|
||||
def ensure_started(self):
|
||||
pass
|
||||
|
||||
def stop(self):
|
||||
self.schedule.clear()
|
||||
|
||||
def start(self):
|
||||
pass
|
||||
|
||||
|
||||
class TaskPool(BasePool):
|
||||
Timer = Timer
|
||||
|
||||
signal_safe = False
|
||||
rlimit_safe = False
|
||||
is_green = True
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
from gevent import spawn_raw
|
||||
from gevent.pool import Pool
|
||||
self.Pool = Pool
|
||||
self.spawn_n = spawn_raw
|
||||
self.timeout = kwargs.get('timeout')
|
||||
super(TaskPool, self).__init__(*args, **kwargs)
|
||||
|
||||
def on_start(self):
|
||||
self._pool = self.Pool(self.limit)
|
||||
self._quick_put = self._pool.spawn
|
||||
|
||||
def on_stop(self):
|
||||
if self._pool is not None:
|
||||
self._pool.join()
|
||||
|
||||
def on_apply(self, target, args=None, kwargs=None, callback=None,
|
||||
accept_callback=None, timeout=None,
|
||||
timeout_callback=None, **_):
|
||||
timeout = self.timeout if timeout is None else timeout
|
||||
return self._quick_put(apply_timeout if timeout else apply_target,
|
||||
target, args, kwargs, callback, accept_callback,
|
||||
timeout=timeout,
|
||||
timeout_callback=timeout_callback)
|
||||
|
||||
def grow(self, n=1):
|
||||
self._pool._semaphore.counter += n
|
||||
self._pool.size += n
|
||||
|
||||
def shrink(self, n=1):
|
||||
self._pool._semaphore.counter -= n
|
||||
self._pool.size -= n
|
||||
|
||||
@property
|
||||
def num_processes(self):
|
||||
return len(self._pool)
|
||||
148
awx/lib/site-packages/celery/concurrency/processes/__init__.py
Normal file
148
awx/lib/site-packages/celery/concurrency/processes/__init__.py
Normal file
@@ -0,0 +1,148 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.concurrency.processes
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Pool implementation using :mod:`multiprocessing`.
|
||||
|
||||
We use the billiard fork of multiprocessing which contains
|
||||
numerous improvements.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
import os
|
||||
|
||||
from billiard import forking_enable
|
||||
from billiard.pool import Pool, RUN, CLOSE
|
||||
|
||||
from celery import platforms
|
||||
from celery import signals
|
||||
from celery._state import set_default_app
|
||||
from celery.concurrency.base import BasePool
|
||||
from celery.task import trace
|
||||
|
||||
#: List of signals to reset when a child process starts.
|
||||
WORKER_SIGRESET = frozenset(['SIGTERM',
|
||||
'SIGHUP',
|
||||
'SIGTTIN',
|
||||
'SIGTTOU',
|
||||
'SIGUSR1'])
|
||||
|
||||
#: List of signals to ignore when a child process starts.
|
||||
WORKER_SIGIGNORE = frozenset(['SIGINT'])
|
||||
|
||||
|
||||
def process_initializer(app, hostname):
|
||||
"""Initializes the process so it can be used to process tasks."""
|
||||
platforms.signals.reset(*WORKER_SIGRESET)
|
||||
platforms.signals.ignore(*WORKER_SIGIGNORE)
|
||||
platforms.set_mp_process_title('celeryd', hostname=hostname)
|
||||
# This is for Windows and other platforms not supporting
|
||||
# fork(). Note that init_worker makes sure it's only
|
||||
# run once per process.
|
||||
app.loader.init_worker()
|
||||
app.loader.init_worker_process()
|
||||
app.log.setup(int(os.environ.get('CELERY_LOG_LEVEL', 0)),
|
||||
os.environ.get('CELERY_LOG_FILE') or None,
|
||||
bool(os.environ.get('CELERY_LOG_REDIRECT', False)),
|
||||
str(os.environ.get('CELERY_LOG_REDIRECT_LEVEL')))
|
||||
if os.environ.get('FORKED_BY_MULTIPROCESSING'):
|
||||
# pool did execv after fork
|
||||
trace.setup_worker_optimizations(app)
|
||||
else:
|
||||
app.set_current()
|
||||
set_default_app(app)
|
||||
app.finalize()
|
||||
trace._tasks = app._tasks # enables fast_trace_task optimization.
|
||||
from celery.task.trace import build_tracer
|
||||
for name, task in app.tasks.iteritems():
|
||||
task.__trace__ = build_tracer(name, task, app.loader, hostname)
|
||||
signals.worker_process_init.send(sender=None)
|
||||
|
||||
|
||||
class TaskPool(BasePool):
|
||||
"""Multiprocessing Pool implementation."""
|
||||
Pool = Pool
|
||||
|
||||
requires_mediator = True
|
||||
uses_semaphore = True
|
||||
|
||||
def on_start(self):
|
||||
"""Run the task pool.
|
||||
|
||||
Will pre-fork all workers so they're ready to accept tasks.
|
||||
|
||||
"""
|
||||
forking_enable(self.forking_enable)
|
||||
P = self._pool = self.Pool(processes=self.limit,
|
||||
initializer=process_initializer,
|
||||
**self.options)
|
||||
self.on_apply = P.apply_async
|
||||
self.on_soft_timeout = P._timeout_handler.on_soft_timeout
|
||||
self.on_hard_timeout = P._timeout_handler.on_hard_timeout
|
||||
self.maintain_pool = P.maintain_pool
|
||||
self.maybe_handle_result = P._result_handler.handle_event
|
||||
|
||||
def did_start_ok(self):
|
||||
return self._pool.did_start_ok()
|
||||
|
||||
def on_stop(self):
|
||||
"""Gracefully stop the pool."""
|
||||
if self._pool is not None and self._pool._state in (RUN, CLOSE):
|
||||
self._pool.close()
|
||||
self._pool.join()
|
||||
self._pool = None
|
||||
|
||||
def on_terminate(self):
|
||||
"""Force terminate the pool."""
|
||||
if self._pool is not None:
|
||||
self._pool.terminate()
|
||||
self._pool = None
|
||||
|
||||
def on_close(self):
|
||||
if self._pool is not None and self._pool._state == RUN:
|
||||
self._pool.close()
|
||||
|
||||
def terminate_job(self, pid, signal=None):
|
||||
return self._pool.terminate_job(pid, signal)
|
||||
|
||||
def grow(self, n=1):
|
||||
return self._pool.grow(n)
|
||||
|
||||
def shrink(self, n=1):
|
||||
return self._pool.shrink(n)
|
||||
|
||||
def restart(self):
|
||||
self._pool.restart()
|
||||
|
||||
def _get_info(self):
|
||||
return {'max-concurrency': self.limit,
|
||||
'processes': [p.pid for p in self._pool._pool],
|
||||
'max-tasks-per-child': self._pool._maxtasksperchild,
|
||||
'put-guarded-by-semaphore': self.putlocks,
|
||||
'timeouts': (self._pool.soft_timeout, self._pool.timeout)}
|
||||
|
||||
def init_callbacks(self, **kwargs):
|
||||
for k, v in kwargs.iteritems():
|
||||
setattr(self._pool, k, v)
|
||||
|
||||
def handle_timeouts(self):
|
||||
if self._pool._timeout_handler:
|
||||
self._pool._timeout_handler.handle_event()
|
||||
|
||||
@property
|
||||
def num_processes(self):
|
||||
return self._pool._processes
|
||||
|
||||
@property
|
||||
def readers(self):
|
||||
return self._pool.readers
|
||||
|
||||
@property
|
||||
def writers(self):
|
||||
return self._pool.writers
|
||||
|
||||
@property
|
||||
def timers(self):
|
||||
return {self.maintain_pool: 5.0}
|
||||
28
awx/lib/site-packages/celery/concurrency/solo.py
Normal file
28
awx/lib/site-packages/celery/concurrency/solo.py
Normal file
@@ -0,0 +1,28 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.concurrency.solo
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Single-threaded pool implementation.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
import os
|
||||
|
||||
from .base import BasePool, apply_target
|
||||
|
||||
|
||||
class TaskPool(BasePool):
|
||||
"""Solo task pool (blocking, inline, fast)."""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(TaskPool, self).__init__(*args, **kwargs)
|
||||
self.on_apply = apply_target
|
||||
|
||||
def _get_info(self):
|
||||
return {'max-concurrency': 1,
|
||||
'processes': [os.getpid()],
|
||||
'max-tasks-per-child': None,
|
||||
'put-guarded-by-semaphore': True,
|
||||
'timeouts': ()}
|
||||
55
awx/lib/site-packages/celery/concurrency/threads.py
Normal file
55
awx/lib/site-packages/celery/concurrency/threads.py
Normal file
@@ -0,0 +1,55 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.concurrency.threads
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Pool implementation using threads.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
from celery.utils.compat import UserDict
|
||||
|
||||
from .base import apply_target, BasePool
|
||||
|
||||
|
||||
class NullDict(UserDict):
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
pass
|
||||
|
||||
|
||||
class TaskPool(BasePool):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
try:
|
||||
import threadpool
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
'The threaded pool requires the threadpool module.')
|
||||
self.WorkRequest = threadpool.WorkRequest
|
||||
self.ThreadPool = threadpool.ThreadPool
|
||||
super(TaskPool, self).__init__(*args, **kwargs)
|
||||
|
||||
def on_start(self):
|
||||
self._pool = self.ThreadPool(self.limit)
|
||||
# threadpool stores all work requests until they are processed
|
||||
# we don't need this dict, and it occupies way too much memory.
|
||||
self._pool.workRequests = NullDict()
|
||||
self._quick_put = self._pool.putRequest
|
||||
self._quick_clear = self._pool._results_queue.queue.clear
|
||||
|
||||
def on_stop(self):
|
||||
self._pool.dismissWorkers(self.limit, do_join=True)
|
||||
|
||||
def on_apply(self, target, args=None, kwargs=None, callback=None,
|
||||
accept_callback=None, **_):
|
||||
req = self.WorkRequest(apply_target, (target, args, kwargs, callback,
|
||||
accept_callback))
|
||||
self._quick_put(req)
|
||||
# threadpool also has callback support,
|
||||
# but for some reason the callback is not triggered
|
||||
# before you've collected the results.
|
||||
# Clear the results (if any), so it doesn't grow too large.
|
||||
self._quick_clear()
|
||||
return req
|
||||
0
awx/lib/site-packages/celery/contrib/__init__.py
Normal file
0
awx/lib/site-packages/celery/contrib/__init__.py
Normal file
167
awx/lib/site-packages/celery/contrib/abortable.py
Normal file
167
awx/lib/site-packages/celery/contrib/abortable.py
Normal file
@@ -0,0 +1,167 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
=========================
|
||||
Abortable tasks overview
|
||||
=========================
|
||||
|
||||
For long-running :class:`Task`'s, it can be desirable to support
|
||||
aborting during execution. Of course, these tasks should be built to
|
||||
support abortion specifically.
|
||||
|
||||
The :class:`AbortableTask` serves as a base class for all :class:`Task`
|
||||
objects that should support abortion by producers.
|
||||
|
||||
* Producers may invoke the :meth:`abort` method on
|
||||
:class:`AbortableAsyncResult` instances, to request abortion.
|
||||
|
||||
* Consumers (workers) should periodically check (and honor!) the
|
||||
:meth:`is_aborted` method at controlled points in their task's
|
||||
:meth:`run` method. The more often, the better.
|
||||
|
||||
The necessary intermediate communication is dealt with by the
|
||||
:class:`AbortableTask` implementation.
|
||||
|
||||
Usage example
|
||||
-------------
|
||||
|
||||
In the consumer:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from celery.contrib.abortable import AbortableTask
|
||||
from celery.utils.log import get_task_logger
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
class MyLongRunningTask(AbortableTask):
|
||||
|
||||
def run(self, **kwargs):
|
||||
results = []
|
||||
for x in xrange(100):
|
||||
# Check after every 5 loops..
|
||||
if x % 5 == 0: # alternatively, check when some timer is due
|
||||
if self.is_aborted(**kwargs):
|
||||
# Respect the aborted status and terminate
|
||||
# gracefully
|
||||
logger.warning('Task aborted.')
|
||||
return
|
||||
y = do_something_expensive(x)
|
||||
results.append(y)
|
||||
logger.info('Task finished.')
|
||||
return results
|
||||
|
||||
|
||||
In the producer:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from myproject.tasks import MyLongRunningTask
|
||||
|
||||
def myview(request):
|
||||
|
||||
async_result = MyLongRunningTask.delay()
|
||||
# async_result is of type AbortableAsyncResult
|
||||
|
||||
# After 10 seconds, abort the task
|
||||
time.sleep(10)
|
||||
async_result.abort()
|
||||
|
||||
...
|
||||
|
||||
After the `async_result.abort()` call, the task execution is not
|
||||
aborted immediately. In fact, it is not guaranteed to abort at all. Keep
|
||||
checking the `async_result` status, or call `async_result.wait()` to
|
||||
have it block until the task is finished.
|
||||
|
||||
.. note::
|
||||
|
||||
In order to abort tasks, there needs to be communication between the
|
||||
producer and the consumer. This is currently implemented through the
|
||||
database backend. Therefore, this class will only work with the
|
||||
database backends.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
from celery.task.base import Task
|
||||
from celery.result import AsyncResult
|
||||
|
||||
|
||||
"""
|
||||
Task States
|
||||
-----------
|
||||
|
||||
.. state:: ABORTED
|
||||
|
||||
ABORTED
|
||||
~~~~~~~
|
||||
|
||||
Task is aborted (typically by the producer) and should be
|
||||
aborted as soon as possible.
|
||||
|
||||
"""
|
||||
ABORTED = 'ABORTED'
|
||||
|
||||
|
||||
class AbortableAsyncResult(AsyncResult):
|
||||
"""Represents a abortable result.
|
||||
|
||||
Specifically, this gives the `AsyncResult` a :meth:`abort()` method,
|
||||
which sets the state of the underlying Task to `'ABORTED'`.
|
||||
|
||||
"""
|
||||
|
||||
def is_aborted(self):
|
||||
"""Returns :const:`True` if the task is (being) aborted."""
|
||||
return self.state == ABORTED
|
||||
|
||||
def abort(self):
|
||||
"""Set the state of the task to :const:`ABORTED`.
|
||||
|
||||
Abortable tasks monitor their state at regular intervals and
|
||||
terminate execution if so.
|
||||
|
||||
Be aware that invoking this method does not guarantee when the
|
||||
task will be aborted (or even if the task will be aborted at
|
||||
all).
|
||||
|
||||
"""
|
||||
# TODO: store_result requires all four arguments to be set,
|
||||
# but only status should be updated here
|
||||
return self.backend.store_result(self.id, result=None,
|
||||
status=ABORTED, traceback=None)
|
||||
|
||||
|
||||
class AbortableTask(Task):
|
||||
"""A celery task that serves as a base class for all :class:`Task`'s
|
||||
that support aborting during execution.
|
||||
|
||||
All subclasses of :class:`AbortableTask` must call the
|
||||
:meth:`is_aborted` method periodically and act accordingly when
|
||||
the call evaluates to :const:`True`.
|
||||
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def AsyncResult(cls, task_id):
|
||||
"""Returns the accompanying AbortableAsyncResult instance."""
|
||||
return AbortableAsyncResult(task_id, backend=cls.backend)
|
||||
|
||||
def is_aborted(self, **kwargs):
|
||||
"""Checks against the backend whether this
|
||||
:class:`AbortableAsyncResult` is :const:`ABORTED`.
|
||||
|
||||
Always returns :const:`False` in case the `task_id` parameter
|
||||
refers to a regular (non-abortable) :class:`Task`.
|
||||
|
||||
Be aware that invoking this method will cause a hit in the
|
||||
backend (for example a database query), so find a good balance
|
||||
between calling it regularly (for responsiveness), but not too
|
||||
often (for performance).
|
||||
|
||||
"""
|
||||
task_id = kwargs.get('task_id', self.request.id)
|
||||
result = self.AsyncResult(task_id)
|
||||
if not isinstance(result, AbortableAsyncResult):
|
||||
return False
|
||||
return result.is_aborted()
|
||||
245
awx/lib/site-packages/celery/contrib/batches.py
Normal file
245
awx/lib/site-packages/celery/contrib/batches.py
Normal file
@@ -0,0 +1,245 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.contrib.batches
|
||||
======================
|
||||
|
||||
Experimental task class that buffers messages and processes them as a list.
|
||||
|
||||
.. warning::
|
||||
|
||||
For this to work you have to set
|
||||
:setting:`CELERYD_PREFETCH_MULTIPLIER` to zero, or some value where
|
||||
the final multiplied value is higher than ``flush_every``.
|
||||
|
||||
In the future we hope to add the ability to direct batching tasks
|
||||
to a channel with different QoS requirements than the task channel.
|
||||
|
||||
**Simple Example**
|
||||
|
||||
A click counter that flushes the buffer every 100 messages, and every
|
||||
seconds. Does not do anything with the data, but can easily be modified
|
||||
to store it in a database.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# Flush after 100 messages, or 10 seconds.
|
||||
@app.task(base=Batches, flush_every=100, flush_interval=10)
|
||||
def count_click(requests):
|
||||
from collections import Counter
|
||||
count = Counter(request.kwargs['url'] for request in requests)
|
||||
for url, count in count.items():
|
||||
print('>>> Clicks: %s -> %s' % (url, count))
|
||||
|
||||
|
||||
Then you can ask for a click to be counted by doing::
|
||||
|
||||
>>> count_click.delay('http://example.com')
|
||||
|
||||
**Example returning results**
|
||||
|
||||
An interface to the Web of Trust API that flushes the buffer every 100
|
||||
messages, and every 10 seconds.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import requests
|
||||
from urlparse import urlparse
|
||||
|
||||
from celery.contrib.batches import Batches
|
||||
|
||||
wot_api_target = "https://api.mywot.com/0.4/public_link_json"
|
||||
|
||||
@app.task(base=Batches, flush_every=100, flush_interval=10)
|
||||
def wot_api(requests):
|
||||
sig = lambda url: url
|
||||
reponses = wot_api_real(
|
||||
(sig(*request.args, **request.kwargs) for request in requests)
|
||||
)
|
||||
# use mark_as_done to manually return response data
|
||||
for response, request in zip(reponses, requests):
|
||||
app.backend.mark_as_done(request.id, response)
|
||||
|
||||
|
||||
def wot_api_real(urls):
|
||||
domains = [urlparse(url).netloc for url in urls]
|
||||
response = requests.get(
|
||||
wot_api_target,
|
||||
params={"hosts": ('/').join(set(domains)) + '/'}
|
||||
)
|
||||
return [response.json[domain] for domain in domains]
|
||||
|
||||
Using the API is done as follows::
|
||||
|
||||
>>> wot_api.delay('http://example.com')
|
||||
|
||||
.. note::
|
||||
|
||||
If you don't have an ``app`` instance then use the current app proxy
|
||||
instead::
|
||||
|
||||
from celery import current_app
|
||||
app.backend.mark_as_done(request.id, response)
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
from itertools import count
|
||||
from Queue import Empty, Queue
|
||||
|
||||
from celery.task import Task
|
||||
from celery.utils.log import get_logger
|
||||
from celery.worker.job import Request
|
||||
from celery.utils import noop
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
||||
def consume_queue(queue):
|
||||
"""Iterator yielding all immediately available items in a
|
||||
:class:`Queue.Queue`.
|
||||
|
||||
The iterator stops as soon as the queue raises :exc:`Queue.Empty`.
|
||||
|
||||
*Examples*
|
||||
|
||||
>>> q = Queue()
|
||||
>>> map(q.put, range(4))
|
||||
>>> list(consume_queue(q))
|
||||
[0, 1, 2, 3]
|
||||
>>> list(consume_queue(q))
|
||||
[]
|
||||
|
||||
"""
|
||||
get = queue.get_nowait
|
||||
while 1:
|
||||
try:
|
||||
yield get()
|
||||
except Empty:
|
||||
break
|
||||
|
||||
|
||||
def apply_batches_task(task, args, loglevel, logfile):
|
||||
task.push_request(loglevel=loglevel, logfile=logfile)
|
||||
try:
|
||||
result = task(*args)
|
||||
except Exception, exc:
|
||||
result = None
|
||||
logger.error('Error: %r', exc, exc_info=True)
|
||||
finally:
|
||||
task.pop_request()
|
||||
return result
|
||||
|
||||
|
||||
class SimpleRequest(object):
|
||||
"""Pickleable request."""
|
||||
|
||||
#: task id
|
||||
id = None
|
||||
|
||||
#: task name
|
||||
name = None
|
||||
|
||||
#: positional arguments
|
||||
args = ()
|
||||
|
||||
#: keyword arguments
|
||||
kwargs = {}
|
||||
|
||||
#: message delivery information.
|
||||
delivery_info = None
|
||||
|
||||
#: worker node name
|
||||
hostname = None
|
||||
|
||||
def __init__(self, id, name, args, kwargs, delivery_info, hostname):
|
||||
self.id = id
|
||||
self.name = name
|
||||
self.args = args
|
||||
self.kwargs = kwargs
|
||||
self.delivery_info = delivery_info
|
||||
self.hostname = hostname
|
||||
|
||||
@classmethod
|
||||
def from_request(cls, request):
|
||||
return cls(request.id, request.name, request.args,
|
||||
request.kwargs, request.delivery_info, request.hostname)
|
||||
|
||||
|
||||
class Batches(Task):
|
||||
abstract = True
|
||||
|
||||
#: Maximum number of message in buffer.
|
||||
flush_every = 10
|
||||
|
||||
#: Timeout in seconds before buffer is flushed anyway.
|
||||
flush_interval = 30
|
||||
|
||||
def __init__(self):
|
||||
self._buffer = Queue()
|
||||
self._count = count(1).next
|
||||
self._tref = None
|
||||
self._pool = None
|
||||
|
||||
def run(self, requests):
|
||||
raise NotImplementedError('%r must implement run(requests)' % (self, ))
|
||||
|
||||
def Strategy(self, task, app, consumer):
|
||||
self._pool = consumer.pool
|
||||
hostname = consumer.hostname
|
||||
eventer = consumer.event_dispatcher
|
||||
Req = Request
|
||||
connection_errors = consumer.connection_errors
|
||||
timer = consumer.timer
|
||||
put_buffer = self._buffer.put
|
||||
flush_buffer = self._do_flush
|
||||
|
||||
def task_message_handler(message, body, ack):
|
||||
request = Req(body, on_ack=ack, app=app, hostname=hostname,
|
||||
events=eventer, task=task,
|
||||
connection_errors=connection_errors,
|
||||
delivery_info=message.delivery_info)
|
||||
put_buffer(request)
|
||||
|
||||
if self._tref is None: # first request starts flush timer.
|
||||
self._tref = timer.apply_interval(self.flush_interval * 1000.0,
|
||||
flush_buffer)
|
||||
|
||||
if not self._count() % self.flush_every:
|
||||
flush_buffer()
|
||||
|
||||
return task_message_handler
|
||||
|
||||
def flush(self, requests):
|
||||
return self.apply_buffer(requests, ([SimpleRequest.from_request(r)
|
||||
for r in requests], ))
|
||||
|
||||
def _do_flush(self):
|
||||
logger.debug('Batches: Wake-up to flush buffer...')
|
||||
requests = None
|
||||
if self._buffer.qsize():
|
||||
requests = list(consume_queue(self._buffer))
|
||||
if requests:
|
||||
logger.debug('Batches: Buffer complete: %s', len(requests))
|
||||
self.flush(requests)
|
||||
if not requests:
|
||||
logger.debug('Batches: Cancelling timer: Nothing in buffer.')
|
||||
self._tref.cancel() # cancel timer.
|
||||
self._tref = None
|
||||
|
||||
def apply_buffer(self, requests, args=(), kwargs={}):
|
||||
acks_late = [], []
|
||||
[acks_late[r.task.acks_late].append(r) for r in requests]
|
||||
assert requests and (acks_late[True] or acks_late[False])
|
||||
|
||||
def on_accepted(pid, time_accepted):
|
||||
[req.acknowledge() for req in acks_late[False]]
|
||||
|
||||
def on_return(result):
|
||||
[req.acknowledge() for req in acks_late[True]]
|
||||
|
||||
return self._pool.apply_async(
|
||||
apply_batches_task,
|
||||
(self, args, 0, None),
|
||||
accept_callback=on_accepted,
|
||||
callback=acks_late[True] and on_return or noop,
|
||||
)
|
||||
65
awx/lib/site-packages/celery/contrib/bundles.py
Normal file
65
awx/lib/site-packages/celery/contrib/bundles.py
Normal file
@@ -0,0 +1,65 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.contrib.bundles
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Celery PyPI Bundles.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
from celery import VERSION
|
||||
from bundle.extensions import Dist
|
||||
|
||||
|
||||
defaults = {'author': 'Celery Project',
|
||||
'author_email': 'bundles@celeryproject.org',
|
||||
'url': 'http://celeryproject.org',
|
||||
'license': 'BSD'}
|
||||
celery = Dist('celery', VERSION, **defaults)
|
||||
django_celery = Dist('django-celery', VERSION, **defaults)
|
||||
flask_celery = Dist('Flask-Celery', VERSION, **defaults)
|
||||
|
||||
bundles = [
|
||||
celery.Bundle(
|
||||
'celery-with-redis',
|
||||
'Bundle installing the dependencies for Celery and Redis',
|
||||
requires=['redis>=2.4.4'],
|
||||
),
|
||||
celery.Bundle(
|
||||
'celery-with-mongodb',
|
||||
'Bundle installing the dependencies for Celery and MongoDB',
|
||||
requires=['pymongo'],
|
||||
),
|
||||
celery.Bundle(
|
||||
'celery-with-couchdb',
|
||||
'Bundle installing the dependencies for Celery and CouchDB',
|
||||
requires=['couchdb'],
|
||||
),
|
||||
celery.Bundle(
|
||||
'celery-with-beanstalk',
|
||||
'Bundle installing the dependencies for Celery and Beanstalk',
|
||||
requires=['beanstalkc'],
|
||||
),
|
||||
|
||||
django_celery.Bundle(
|
||||
'django-celery-with-redis',
|
||||
'Bundle installing the dependencies for Django-Celery and Redis',
|
||||
requires=['redis>=2.4.4'],
|
||||
),
|
||||
django_celery.Bundle(
|
||||
'django-celery-with-mongodb',
|
||||
'Bundle installing the dependencies for Django-Celery and MongoDB',
|
||||
requires=['pymongo'],
|
||||
),
|
||||
django_celery.Bundle(
|
||||
'django-celery-with-couchdb',
|
||||
'Bundle installing the dependencies for Django-Celery and CouchDB',
|
||||
requires=['couchdb'],
|
||||
),
|
||||
django_celery.Bundle(
|
||||
'django-celery-with-beanstalk',
|
||||
'Bundle installing the dependencies for Django-Celery and Beanstalk',
|
||||
requires=['beanstalkc'],
|
||||
),
|
||||
]
|
||||
118
awx/lib/site-packages/celery/contrib/methods.py
Normal file
118
awx/lib/site-packages/celery/contrib/methods.py
Normal file
@@ -0,0 +1,118 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.contrib.methods
|
||||
======================
|
||||
|
||||
Task decorator that supports creating tasks out of methods.
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from celery.contrib.methods import task
|
||||
|
||||
class X(object):
|
||||
|
||||
@task()
|
||||
def add(self, x, y):
|
||||
return x + y
|
||||
|
||||
or with any task decorator:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from celery.contrib.methods import task_method
|
||||
|
||||
class X(object):
|
||||
|
||||
@celery.task(filter=task_method)
|
||||
def add(self, x, y):
|
||||
return x + y
|
||||
|
||||
.. note::
|
||||
|
||||
The task must use the new Task base class (:class:`celery.Task`),
|
||||
and the old base class using classmethods (``celery.task.Task``,
|
||||
``celery.task.base.Task``).
|
||||
|
||||
This means that you have to use the task decorator from a Celery app
|
||||
instance, and not the old-API:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
|
||||
from celery import task # BAD
|
||||
from celery.task import task # ALSO BAD
|
||||
|
||||
# GOOD:
|
||||
celery = Celery(...)
|
||||
|
||||
@celery.task(filter=task_method)
|
||||
def foo(self): pass
|
||||
|
||||
# ALSO GOOD:
|
||||
from celery import current_app
|
||||
|
||||
@current_app.task(filter=task_method)
|
||||
def foo(self): pass
|
||||
|
||||
Caveats
|
||||
-------
|
||||
|
||||
- Automatic naming won't be able to know what the class name is.
|
||||
|
||||
The name will still be module_name + task_name,
|
||||
so two methods with the same name in the same module will collide
|
||||
so that only one task can run:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class A(object):
|
||||
|
||||
@task()
|
||||
def add(self, x, y):
|
||||
return x + y
|
||||
|
||||
class B(object):
|
||||
|
||||
@task()
|
||||
def add(self, x, y):
|
||||
return x + y
|
||||
|
||||
would have to be written as:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class A(object):
|
||||
@task(name='A.add')
|
||||
def add(self, x, y):
|
||||
return x + y
|
||||
|
||||
class B(object):
|
||||
@task(name='B.add')
|
||||
def add(self, x, y):
|
||||
return x + y
|
||||
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
from celery import current_app
|
||||
|
||||
|
||||
class task_method(object):
|
||||
|
||||
def __init__(self, task, *args, **kwargs):
|
||||
self.task = task
|
||||
|
||||
def __get__(self, obj, type=None):
|
||||
if obj is None:
|
||||
return self.task
|
||||
task = self.task.__class__()
|
||||
task.__self__ = obj
|
||||
return task
|
||||
|
||||
|
||||
def task(*args, **kwargs):
|
||||
return current_app.task(*args, **dict(kwargs, filter=task_method))
|
||||
355
awx/lib/site-packages/celery/contrib/migrate.py
Normal file
355
awx/lib/site-packages/celery/contrib/migrate.py
Normal file
@@ -0,0 +1,355 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.contrib.migrate
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Migration tools.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import with_statement
|
||||
|
||||
import socket
|
||||
|
||||
from functools import partial
|
||||
from itertools import cycle, islice
|
||||
|
||||
from kombu import eventloop, Queue
|
||||
from kombu.common import maybe_declare
|
||||
from kombu.exceptions import StdChannelError
|
||||
from kombu.utils.encoding import ensure_bytes
|
||||
|
||||
from celery.app import app_or_default
|
||||
from celery.utils import worker_direct
|
||||
|
||||
|
||||
class StopFiltering(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class State(object):
|
||||
count = 0
|
||||
filtered = 0
|
||||
total_apx = 0
|
||||
|
||||
@property
|
||||
def strtotal(self):
|
||||
if not self.total_apx:
|
||||
return u'?'
|
||||
return unicode(self.total_apx)
|
||||
|
||||
def __repr__(self):
|
||||
if self.filtered:
|
||||
return '^%s' % self.filtered
|
||||
return '%s/%s' % (self.count, self.strtotal)
|
||||
|
||||
|
||||
def republish(producer, message, exchange=None, routing_key=None,
|
||||
remove_props=['application_headers',
|
||||
'content_type',
|
||||
'content_encoding',
|
||||
'headers']):
|
||||
body = ensure_bytes(message.body) # use raw message body.
|
||||
info, headers, props = (message.delivery_info,
|
||||
message.headers, message.properties)
|
||||
exchange = info['exchange'] if exchange is None else exchange
|
||||
routing_key = info['routing_key'] if routing_key is None else routing_key
|
||||
ctype, enc = message.content_type, message.content_encoding
|
||||
# remove compression header, as this will be inserted again
|
||||
# when the message is recompressed.
|
||||
compression = headers.pop('compression', None)
|
||||
|
||||
for key in remove_props:
|
||||
props.pop(key, None)
|
||||
|
||||
producer.publish(ensure_bytes(body), exchange=exchange,
|
||||
routing_key=routing_key, compression=compression,
|
||||
headers=headers, content_type=ctype,
|
||||
content_encoding=enc, **props)
|
||||
|
||||
|
||||
def migrate_task(producer, body_, message, queues=None):
|
||||
info = message.delivery_info
|
||||
queues = {} if queues is None else queues
|
||||
republish(producer, message,
|
||||
exchange=queues.get(info['exchange']),
|
||||
routing_key=queues.get(info['routing_key']))
|
||||
|
||||
|
||||
def filter_callback(callback, tasks):
|
||||
|
||||
def filtered(body, message):
|
||||
if tasks and message.payload['task'] not in tasks:
|
||||
return
|
||||
|
||||
return callback(body, message)
|
||||
return filtered
|
||||
|
||||
|
||||
def migrate_tasks(source, dest, migrate=migrate_task, app=None,
|
||||
queues=None, **kwargs):
|
||||
app = app_or_default(app)
|
||||
queues = prepare_queues(queues)
|
||||
producer = app.amqp.TaskProducer(dest)
|
||||
migrate = partial(migrate, producer, queues=queues)
|
||||
|
||||
def on_declare_queue(queue):
|
||||
new_queue = queue(producer.channel)
|
||||
new_queue.name = queues.get(queue.name, queue.name)
|
||||
if new_queue.routing_key == queue.name:
|
||||
new_queue.routing_key = queues.get(queue.name,
|
||||
new_queue.routing_key)
|
||||
if new_queue.exchange.name == queue.name:
|
||||
new_queue.exchange.name = queues.get(queue.name, queue.name)
|
||||
new_queue.declare()
|
||||
|
||||
return start_filter(app, source, migrate, queues=queues,
|
||||
on_declare_queue=on_declare_queue, **kwargs)
|
||||
|
||||
|
||||
def _maybe_queue(app, q):
|
||||
if isinstance(q, basestring):
|
||||
return app.amqp.queues[q]
|
||||
return q
|
||||
|
||||
|
||||
def move(predicate, connection=None, exchange=None, routing_key=None,
|
||||
source=None, app=None, callback=None, limit=None, transform=None,
|
||||
**kwargs):
|
||||
"""Find tasks by filtering them and move the tasks to a new queue.
|
||||
|
||||
:param predicate: Filter function used to decide which messages
|
||||
to move. Must accept the standard signature of ``(body, message)``
|
||||
used by Kombu consumer callbacks. If the predicate wants the message
|
||||
to be moved it must return either:
|
||||
|
||||
1) a tuple of ``(exchange, routing_key)``, or
|
||||
|
||||
2) a :class:`~kombu.entity.Queue` instance, or
|
||||
|
||||
3) any other true value which means the specified
|
||||
``exchange`` and ``routing_key`` arguments will be used.
|
||||
|
||||
:keyword connection: Custom connection to use.
|
||||
:keyword source: Optional list of source queues to use instead of the
|
||||
default (which is the queues in :setting:`CELERY_QUEUES`).
|
||||
This list can also contain new :class:`~kombu.entity.Queue` instances.
|
||||
:keyword exchange: Default destination exchange.
|
||||
:keyword routing_key: Default destination routing key.
|
||||
:keyword limit: Limit number of messages to filter.
|
||||
:keyword callback: Callback called after message moved,
|
||||
with signature ``(state, body, message)``.
|
||||
:keyword transform: Optional function to transform the return
|
||||
value (destination) of the filter function.
|
||||
|
||||
Also supports the same keyword arguments as :func:`start_filter`.
|
||||
|
||||
To demonstrate, the :func:`move_task_by_id` operation can be implemented
|
||||
like this:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def is_wanted_task(body, message):
|
||||
if body['id'] == wanted_id:
|
||||
return Queue('foo', exchange=Exchange('foo'),
|
||||
routing_key='foo')
|
||||
|
||||
move(is_wanted_task)
|
||||
|
||||
or with a transform:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def transform(value):
|
||||
if isinstance(value, basestring):
|
||||
return Queue(value, Exchange(value), value)
|
||||
return value
|
||||
|
||||
move(is_wanted_task, transform=transform)
|
||||
|
||||
The predicate may also return a tuple of ``(exchange, routing_key)``
|
||||
to specify the destination to where the task should be moved,
|
||||
or a :class:`~kombu.entitiy.Queue` instance.
|
||||
Any other true value means that the task will be moved to the
|
||||
default exchange/routing_key.
|
||||
|
||||
"""
|
||||
app = app_or_default(app)
|
||||
queues = [_maybe_queue(app, queue) for queue in source or []] or None
|
||||
with app.connection_or_acquire(connection, pool=False) as conn:
|
||||
producer = app.amqp.TaskProducer(conn)
|
||||
state = State()
|
||||
|
||||
def on_task(body, message):
|
||||
ret = predicate(body, message)
|
||||
if ret:
|
||||
if transform:
|
||||
ret = transform(ret)
|
||||
if isinstance(ret, Queue):
|
||||
maybe_declare(ret, conn.default_channel)
|
||||
ex, rk = ret.exchange.name, ret.routing_key
|
||||
else:
|
||||
ex, rk = expand_dest(ret, exchange, routing_key)
|
||||
republish(producer, message,
|
||||
exchange=ex, routing_key=rk)
|
||||
message.ack()
|
||||
|
||||
state.filtered += 1
|
||||
if callback:
|
||||
callback(state, body, message)
|
||||
if limit and state.filtered >= limit:
|
||||
raise StopFiltering()
|
||||
|
||||
return start_filter(app, conn, on_task, consume_from=queues, **kwargs)
|
||||
|
||||
|
||||
def expand_dest(ret, exchange, routing_key):
|
||||
try:
|
||||
ex, rk = ret
|
||||
except (TypeError, ValueError):
|
||||
ex, rk = exchange, routing_key
|
||||
return ex, rk
|
||||
|
||||
|
||||
def task_id_eq(task_id, body, message):
|
||||
return body['id'] == task_id
|
||||
|
||||
|
||||
def task_id_in(ids, body, message):
|
||||
return body['id'] in ids
|
||||
|
||||
|
||||
def prepare_queues(queues):
|
||||
if isinstance(queues, basestring):
|
||||
queues = queues.split(',')
|
||||
if isinstance(queues, list):
|
||||
queues = dict(tuple(islice(cycle(q.split(':')), None, 2))
|
||||
for q in queues)
|
||||
if queues is None:
|
||||
queues = {}
|
||||
return queues
|
||||
|
||||
|
||||
def start_filter(app, conn, filter, limit=None, timeout=1.0,
|
||||
ack_messages=False, tasks=None, queues=None,
|
||||
callback=None, forever=False, on_declare_queue=None,
|
||||
consume_from=None, state=None, **kwargs):
|
||||
state = state or State()
|
||||
queues = prepare_queues(queues)
|
||||
if isinstance(tasks, basestring):
|
||||
tasks = set(tasks.split(','))
|
||||
if tasks is None:
|
||||
tasks = set([])
|
||||
|
||||
def update_state(body, message):
|
||||
state.count += 1
|
||||
if limit and state.count >= limit:
|
||||
raise StopFiltering()
|
||||
|
||||
def ack_message(body, message):
|
||||
message.ack()
|
||||
|
||||
consumer = app.amqp.TaskConsumer(conn, queues=consume_from)
|
||||
|
||||
if tasks:
|
||||
filter = filter_callback(filter, tasks)
|
||||
update_state = filter_callback(update_state, tasks)
|
||||
ack_message = filter_callback(ack_message, tasks)
|
||||
|
||||
consumer.register_callback(filter)
|
||||
consumer.register_callback(update_state)
|
||||
if ack_messages:
|
||||
consumer.register_callback(ack_message)
|
||||
if callback is not None:
|
||||
callback = partial(callback, state)
|
||||
if tasks:
|
||||
callback = filter_callback(callback, tasks)
|
||||
consumer.register_callback(callback)
|
||||
|
||||
# declare all queues on the new broker.
|
||||
for queue in consumer.queues:
|
||||
if queues and queue.name not in queues:
|
||||
continue
|
||||
if on_declare_queue is not None:
|
||||
on_declare_queue(queue)
|
||||
try:
|
||||
_, mcount, _ = queue(consumer.channel).queue_declare(passive=True)
|
||||
if mcount:
|
||||
state.total_apx += mcount
|
||||
except conn.channel_errors + (StdChannelError, ):
|
||||
pass
|
||||
|
||||
# start migrating messages.
|
||||
with consumer:
|
||||
try:
|
||||
for _ in eventloop(conn, # pragma: no cover
|
||||
timeout=timeout, ignore_timeouts=forever):
|
||||
pass
|
||||
except socket.timeout:
|
||||
pass
|
||||
except StopFiltering:
|
||||
pass
|
||||
return state
|
||||
|
||||
|
||||
def move_task_by_id(task_id, dest, **kwargs):
|
||||
"""Find a task by id and move it to another queue.
|
||||
|
||||
:param task_id: Id of task to move.
|
||||
:param dest: Destination queue.
|
||||
|
||||
Also supports the same keyword arguments as :func:`move`.
|
||||
|
||||
"""
|
||||
return move_by_idmap({task_id: dest}, **kwargs)
|
||||
|
||||
|
||||
def move_by_idmap(map, **kwargs):
|
||||
"""Moves tasks by matching from a ``task_id: queue`` mapping,
|
||||
where ``queue`` is a queue to move the task to.
|
||||
|
||||
Example::
|
||||
|
||||
>>> reroute_idmap({
|
||||
... '5bee6e82-f4ac-468e-bd3d-13e8600250bc': Queue(...),
|
||||
... 'ada8652d-aef3-466b-abd2-becdaf1b82b3': Queue(...),
|
||||
... '3a2b140d-7db1-41ba-ac90-c36a0ef4ab1f': Queue(...)},
|
||||
... queues=['hipri'])
|
||||
|
||||
"""
|
||||
def task_id_in_map(body, message):
|
||||
return map.get(body['id'])
|
||||
|
||||
# adding the limit means that we don't have to consume any more
|
||||
# when we've found everything.
|
||||
return move(task_id_in_map, limit=len(map), **kwargs)
|
||||
|
||||
|
||||
def move_by_taskmap(map, **kwargs):
|
||||
"""Moves tasks by matching from a ``task_name: queue`` mapping,
|
||||
where ``queue`` is the queue to move the task to.
|
||||
|
||||
Example::
|
||||
|
||||
>>> reroute_idmap({
|
||||
... 'tasks.add': Queue(...),
|
||||
... 'tasks.mul': Queue(...),
|
||||
... })
|
||||
|
||||
"""
|
||||
|
||||
def task_name_in_map(body, message):
|
||||
return map.get(body['task']) # <- name of task
|
||||
|
||||
return move(task_name_in_map, **kwargs)
|
||||
|
||||
|
||||
move_direct = partial(move, transform=worker_direct)
|
||||
move_direct_by_id = partial(move_task_by_id, transform=worker_direct)
|
||||
move_direct_by_idmap = partial(move_by_idmap, transform=worker_direct)
|
||||
move_direct_by_taskmap = partial(move_by_taskmap, transform=worker_direct)
|
||||
|
||||
|
||||
def filter_status(state, body, message):
|
||||
print('Moving task %s/%s: %s[%s]' % (
|
||||
state.filtered, state.strtotal, body['task'], body['id']))
|
||||
163
awx/lib/site-packages/celery/contrib/rdb.py
Normal file
163
awx/lib/site-packages/celery/contrib/rdb.py
Normal file
@@ -0,0 +1,163 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.contrib.rdb
|
||||
==================
|
||||
|
||||
Remote debugger for Celery tasks running in multiprocessing pool workers.
|
||||
Inspired by http://snippets.dzone.com/posts/show/7248
|
||||
|
||||
**Usage**
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from celery.contrib import rdb
|
||||
from celery import task
|
||||
|
||||
@task()
|
||||
def add(x, y):
|
||||
result = x + y
|
||||
rdb.set_trace()
|
||||
return result
|
||||
|
||||
|
||||
**Environment Variables**
|
||||
|
||||
.. envvar:: CELERY_RDB_HOST
|
||||
|
||||
Hostname to bind to. Default is '127.0.01', which means the socket
|
||||
will only be accessible from the local host.
|
||||
|
||||
.. envvar:: CELERY_RDB_PORT
|
||||
|
||||
Base port to bind to. Default is 6899.
|
||||
The debugger will try to find an available port starting from the
|
||||
base port. The selected port will be logged by the worker.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import with_statement
|
||||
|
||||
import errno
|
||||
import os
|
||||
import socket
|
||||
import sys
|
||||
|
||||
from pdb import Pdb
|
||||
|
||||
from billiard import current_process
|
||||
|
||||
from celery.platforms import ignore_errno
|
||||
|
||||
default_port = 6899
|
||||
|
||||
CELERY_RDB_HOST = os.environ.get('CELERY_RDB_HOST') or '127.0.0.1'
|
||||
CELERY_RDB_PORT = int(os.environ.get('CELERY_RDB_PORT') or default_port)
|
||||
|
||||
#: Holds the currently active debugger.
|
||||
_current = [None]
|
||||
|
||||
_frame = getattr(sys, '_getframe')
|
||||
|
||||
|
||||
class Rdb(Pdb):
|
||||
me = 'Remote Debugger'
|
||||
_prev_outs = None
|
||||
_sock = None
|
||||
|
||||
def __init__(self, host=CELERY_RDB_HOST, port=CELERY_RDB_PORT,
|
||||
port_search_limit=100, port_skew=+0, out=sys.stdout):
|
||||
self.active = True
|
||||
self.out = out
|
||||
|
||||
self._prev_handles = sys.stdin, sys.stdout
|
||||
|
||||
self._sock, this_port = self.get_avail_port(
|
||||
host, port, port_search_limit, port_skew,
|
||||
)
|
||||
self._sock.setblocking(1)
|
||||
self._sock.listen(1)
|
||||
me = '%s:%s' % (self.me, this_port)
|
||||
context = self.context = {'me': me, 'host': host, 'port': this_port}
|
||||
self.say('%(me)s: Please telnet %(host)s %(port)s.'
|
||||
' Type `exit` in session to continue.' % context)
|
||||
self.say('%(me)s: Waiting for client...' % context)
|
||||
|
||||
self._client, address = self._sock.accept()
|
||||
self._client.setblocking(1)
|
||||
context['remote_addr'] = ':'.join(str(v) for v in address)
|
||||
self.say('%(me)s: In session with %(remote_addr)s' % context)
|
||||
self._handle = sys.stdin = sys.stdout = self._client.makefile('rw')
|
||||
Pdb.__init__(self, completekey='tab',
|
||||
stdin=self._handle, stdout=self._handle)
|
||||
|
||||
def get_avail_port(self, host, port, search_limit=100, skew=+0):
|
||||
try:
|
||||
_, skew = current_process().name.split('-')
|
||||
skew = int(skew)
|
||||
except ValueError:
|
||||
pass
|
||||
this_port = None
|
||||
for i in xrange(search_limit):
|
||||
_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
this_port = port + skew + i
|
||||
try:
|
||||
_sock.bind((host, this_port))
|
||||
except socket.error, exc:
|
||||
if exc.errno in [errno.EADDRINUSE, errno.EINVAL]:
|
||||
continue
|
||||
raise
|
||||
else:
|
||||
return _sock, this_port
|
||||
else:
|
||||
raise Exception(
|
||||
'%s: Could not find available port. Please set using '
|
||||
'environment variable CELERY_RDB_PORT' % (self.me, ))
|
||||
|
||||
def say(self, m):
|
||||
self.out.write(m + '\n')
|
||||
|
||||
def _close_session(self):
|
||||
self.stdin, self.stdout = sys.stdin, sys.stdout = self._prev_handles
|
||||
self._handle.close()
|
||||
self._client.close()
|
||||
self._sock.close()
|
||||
self.active = False
|
||||
self.say('%(me)s: Session %(remote_addr)s ended.' % self.context)
|
||||
|
||||
def do_continue(self, arg):
|
||||
self._close_session()
|
||||
self.set_continue()
|
||||
return 1
|
||||
do_c = do_cont = do_continue
|
||||
|
||||
def do_quit(self, arg):
|
||||
self._close_session()
|
||||
self.set_quit()
|
||||
return 1
|
||||
do_q = do_exit = do_quit
|
||||
|
||||
def set_trace(self, frame=None):
|
||||
if frame is None:
|
||||
frame = _frame().f_back
|
||||
with ignore_errno(errno.ECONNRESET):
|
||||
Pdb.set_trace(self, frame)
|
||||
|
||||
def set_quit(self):
|
||||
# this raises a BdbQuit exception that we are unable to catch.
|
||||
sys.settrace(None)
|
||||
|
||||
|
||||
def debugger():
|
||||
"""Returns the current debugger instance (if any),
|
||||
or creates a new one."""
|
||||
rdb = _current[0]
|
||||
if rdb is None or not rdb.active:
|
||||
rdb = _current[0] = Rdb()
|
||||
return rdb
|
||||
|
||||
|
||||
def set_trace(frame=None):
|
||||
"""Set breakpoint at current location, or a specified frame"""
|
||||
if frame is None:
|
||||
frame = _frame().f_back
|
||||
return debugger().set_trace(frame)
|
||||
503
awx/lib/site-packages/celery/datastructures.py
Normal file
503
awx/lib/site-packages/celery/datastructures.py
Normal file
@@ -0,0 +1,503 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.datastructures
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Custom types and data structures.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import with_statement
|
||||
|
||||
import sys
|
||||
import time
|
||||
|
||||
from collections import defaultdict
|
||||
from heapq import heapify, heappush, heappop
|
||||
from itertools import chain
|
||||
|
||||
try:
|
||||
from collections import Mapping, MutableMapping
|
||||
except ImportError: # pragma: no cover
|
||||
MutableMapping = None # noqa
|
||||
Mapping = dict # noqa
|
||||
|
||||
from billiard.einfo import ExceptionInfo # noqa
|
||||
from kombu.utils.limits import TokenBucket # noqa
|
||||
|
||||
from .utils.functional import LRUCache, first, uniq # noqa
|
||||
|
||||
|
||||
class CycleError(Exception):
|
||||
"""A cycle was detected in an acyclic graph."""
|
||||
|
||||
|
||||
class DependencyGraph(object):
|
||||
"""A directed acyclic graph of objects and their dependencies.
|
||||
|
||||
Supports a robust topological sort
|
||||
to detect the order in which they must be handled.
|
||||
|
||||
Takes an optional iterator of ``(obj, dependencies)``
|
||||
tuples to build the graph from.
|
||||
|
||||
.. warning::
|
||||
|
||||
Does not support cycle detection.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, it=None):
|
||||
self.adjacent = {}
|
||||
if it is not None:
|
||||
self.update(it)
|
||||
|
||||
def add_arc(self, obj):
|
||||
"""Add an object to the graph."""
|
||||
self.adjacent.setdefault(obj, [])
|
||||
|
||||
def add_edge(self, A, B):
|
||||
"""Add an edge from object ``A`` to object ``B``
|
||||
(``A`` depends on ``B``)."""
|
||||
self[A].append(B)
|
||||
|
||||
def topsort(self):
|
||||
"""Sort the graph topologically.
|
||||
|
||||
:returns: a list of objects in the order
|
||||
in which they must be handled.
|
||||
|
||||
"""
|
||||
graph = DependencyGraph()
|
||||
components = self._tarjan72()
|
||||
|
||||
NC = dict((node, component)
|
||||
for component in components
|
||||
for node in component)
|
||||
for component in components:
|
||||
graph.add_arc(component)
|
||||
for node in self:
|
||||
node_c = NC[node]
|
||||
for successor in self[node]:
|
||||
successor_c = NC[successor]
|
||||
if node_c != successor_c:
|
||||
graph.add_edge(node_c, successor_c)
|
||||
return [t[0] for t in graph._khan62()]
|
||||
|
||||
def valency_of(self, obj):
|
||||
"""Returns the velency (degree) of a vertex in the graph."""
|
||||
try:
|
||||
l = [len(self[obj])]
|
||||
except KeyError:
|
||||
return 0
|
||||
for node in self[obj]:
|
||||
l.append(self.valency_of(node))
|
||||
return sum(l)
|
||||
|
||||
def update(self, it):
|
||||
"""Update the graph with data from a list
|
||||
of ``(obj, dependencies)`` tuples."""
|
||||
tups = list(it)
|
||||
for obj, _ in tups:
|
||||
self.add_arc(obj)
|
||||
for obj, deps in tups:
|
||||
for dep in deps:
|
||||
self.add_edge(obj, dep)
|
||||
|
||||
def edges(self):
|
||||
"""Returns generator that yields for all edges in the graph."""
|
||||
return (obj for obj, adj in self.iteritems() if adj)
|
||||
|
||||
def _khan62(self):
|
||||
"""Khans simple topological sort algorithm from '62
|
||||
|
||||
See http://en.wikipedia.org/wiki/Topological_sorting
|
||||
|
||||
"""
|
||||
count = defaultdict(lambda: 0)
|
||||
result = []
|
||||
|
||||
for node in self:
|
||||
for successor in self[node]:
|
||||
count[successor] += 1
|
||||
ready = [node for node in self if not count[node]]
|
||||
|
||||
while ready:
|
||||
node = ready.pop()
|
||||
result.append(node)
|
||||
|
||||
for successor in self[node]:
|
||||
count[successor] -= 1
|
||||
if count[successor] == 0:
|
||||
ready.append(successor)
|
||||
result.reverse()
|
||||
return result
|
||||
|
||||
def _tarjan72(self):
|
||||
"""Tarjan's algorithm to find strongly connected components.
|
||||
|
||||
See http://bit.ly/vIMv3h.
|
||||
|
||||
"""
|
||||
result, stack, low = [], [], {}
|
||||
|
||||
def visit(node):
|
||||
if node in low:
|
||||
return
|
||||
num = len(low)
|
||||
low[node] = num
|
||||
stack_pos = len(stack)
|
||||
stack.append(node)
|
||||
|
||||
for successor in self[node]:
|
||||
visit(successor)
|
||||
low[node] = min(low[node], low[successor])
|
||||
|
||||
if num == low[node]:
|
||||
component = tuple(stack[stack_pos:])
|
||||
stack[stack_pos:] = []
|
||||
result.append(component)
|
||||
for item in component:
|
||||
low[item] = len(self)
|
||||
|
||||
for node in self:
|
||||
visit(node)
|
||||
|
||||
return result
|
||||
|
||||
def to_dot(self, fh, ws=' ' * 4):
|
||||
"""Convert the graph to DOT format.
|
||||
|
||||
:param fh: A file, or a file-like object to write the graph to.
|
||||
|
||||
"""
|
||||
fh.write('digraph dependencies {\n')
|
||||
for obj, adjacent in self.iteritems():
|
||||
if not adjacent:
|
||||
fh.write(ws + '"%s"\n' % (obj, ))
|
||||
for req in adjacent:
|
||||
fh.write(ws + '"%s" -> "%s"\n' % (obj, req))
|
||||
fh.write('}\n')
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.adjacent)
|
||||
|
||||
def __getitem__(self, node):
|
||||
return self.adjacent[node]
|
||||
|
||||
def __len__(self):
|
||||
return len(self.adjacent)
|
||||
|
||||
def __contains__(self, obj):
|
||||
return obj in self.adjacent
|
||||
|
||||
def _iterate_items(self):
|
||||
return self.adjacent.iteritems()
|
||||
items = iteritems = _iterate_items
|
||||
|
||||
def __repr__(self):
|
||||
return '\n'.join(self.repr_node(N) for N in self)
|
||||
|
||||
def repr_node(self, obj, level=1):
|
||||
output = ['%s(%s)' % (obj, self.valency_of(obj))]
|
||||
if obj in self:
|
||||
for other in self[obj]:
|
||||
d = '%s(%s)' % (other, self.valency_of(other))
|
||||
output.append(' ' * level + d)
|
||||
output.extend(self.repr_node(other, level + 1).split('\n')[1:])
|
||||
return '\n'.join(output)
|
||||
|
||||
|
||||
class AttributeDictMixin(object):
|
||||
"""Adds attribute access to mappings.
|
||||
|
||||
`d.key -> d[key]`
|
||||
|
||||
"""
|
||||
|
||||
def __getattr__(self, k):
|
||||
"""`d.key -> d[key]`"""
|
||||
try:
|
||||
return self[k]
|
||||
except KeyError:
|
||||
raise AttributeError(
|
||||
"'%s' object has no attribute '%s'" % (type(self).__name__, k))
|
||||
|
||||
def __setattr__(self, key, value):
|
||||
"""`d[key] = value -> d.key = value`"""
|
||||
self[key] = value
|
||||
|
||||
|
||||
class AttributeDict(dict, AttributeDictMixin):
|
||||
"""Dict subclass with attribute access."""
|
||||
pass
|
||||
|
||||
|
||||
class DictAttribute(object):
|
||||
"""Dict interface to attributes.
|
||||
|
||||
`obj[k] -> obj.k`
|
||||
|
||||
"""
|
||||
obj = None
|
||||
|
||||
def __init__(self, obj):
|
||||
object.__setattr__(self, 'obj', obj)
|
||||
|
||||
def __getattr__(self, key):
|
||||
return getattr(self.obj, key)
|
||||
|
||||
def __setattr__(self, key, value):
|
||||
return setattr(self.obj, key, value)
|
||||
|
||||
def get(self, key, default=None):
|
||||
try:
|
||||
return self[key]
|
||||
except KeyError:
|
||||
return default
|
||||
|
||||
def setdefault(self, key, default):
|
||||
try:
|
||||
return self[key]
|
||||
except KeyError:
|
||||
self[key] = default
|
||||
return default
|
||||
|
||||
def __getitem__(self, key):
|
||||
try:
|
||||
return getattr(self.obj, key)
|
||||
except AttributeError:
|
||||
raise KeyError(key)
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
setattr(self.obj, key, value)
|
||||
|
||||
def __contains__(self, key):
|
||||
return hasattr(self.obj, key)
|
||||
|
||||
def _iterate_keys(self):
|
||||
return iter(dir(self.obj))
|
||||
iterkeys = _iterate_keys
|
||||
|
||||
def __iter__(self):
|
||||
return self._iterate_keys()
|
||||
|
||||
def _iterate_items(self):
|
||||
for key in self._iterate_keys():
|
||||
yield key, getattr(self.obj, key)
|
||||
iteritems = _iterate_items
|
||||
|
||||
if sys.version_info[0] == 3: # pragma: no cover
|
||||
items = _iterate_items
|
||||
keys = _iterate_keys
|
||||
else:
|
||||
|
||||
def keys(self):
|
||||
return list(self)
|
||||
|
||||
def items(self):
|
||||
return list(self._iterate_items())
|
||||
|
||||
|
||||
class ConfigurationView(AttributeDictMixin):
|
||||
"""A view over an applications configuration dicts.
|
||||
|
||||
If the key does not exist in ``changes``, the ``defaults`` dicts
|
||||
are consulted.
|
||||
|
||||
:param changes: Dict containing changes to the configuration.
|
||||
:param defaults: List of dicts containing the default configuration.
|
||||
|
||||
"""
|
||||
changes = None
|
||||
defaults = None
|
||||
_order = None
|
||||
|
||||
def __init__(self, changes, defaults):
|
||||
self.__dict__.update(changes=changes, defaults=defaults,
|
||||
_order=[changes] + defaults)
|
||||
|
||||
def add_defaults(self, d):
|
||||
if not isinstance(d, Mapping):
|
||||
d = DictAttribute(d)
|
||||
self.defaults.insert(0, d)
|
||||
self._order.insert(1, d)
|
||||
|
||||
def __getitem__(self, key):
|
||||
for d in self._order:
|
||||
try:
|
||||
return d[key]
|
||||
except KeyError:
|
||||
pass
|
||||
raise KeyError(key)
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
self.changes[key] = value
|
||||
|
||||
def first(self, *keys):
|
||||
return first(None, (self.get(key) for key in keys))
|
||||
|
||||
def get(self, key, default=None):
|
||||
try:
|
||||
return self[key]
|
||||
except KeyError:
|
||||
return default
|
||||
|
||||
def setdefault(self, key, default):
|
||||
try:
|
||||
return self[key]
|
||||
except KeyError:
|
||||
self[key] = default
|
||||
return default
|
||||
|
||||
def update(self, *args, **kwargs):
|
||||
return self.changes.update(*args, **kwargs)
|
||||
|
||||
def __contains__(self, key):
|
||||
for d in self._order:
|
||||
if key in d:
|
||||
return True
|
||||
return False
|
||||
|
||||
def __repr__(self):
|
||||
return repr(dict(self.iteritems()))
|
||||
|
||||
def __iter__(self):
|
||||
return self._iterate_keys()
|
||||
|
||||
def __len__(self):
|
||||
# The logic for iterating keys includes uniq(),
|
||||
# so to be safe we count by explicitly iterating
|
||||
return len(self.keys())
|
||||
|
||||
def _iter(self, op):
|
||||
# defaults must be first in the stream, so values in
|
||||
# changes takes precedence.
|
||||
return chain(*[op(d) for d in reversed(self._order)])
|
||||
|
||||
def _iterate_keys(self):
|
||||
return uniq(self._iter(lambda d: d))
|
||||
iterkeys = _iterate_keys
|
||||
|
||||
def _iterate_items(self):
|
||||
return ((key, self[key]) for key in self)
|
||||
iteritems = _iterate_items
|
||||
|
||||
def _iterate_values(self):
|
||||
return (self[key] for key in self)
|
||||
itervalues = _iterate_values
|
||||
|
||||
def keys(self):
|
||||
return list(self._iterate_keys())
|
||||
|
||||
def items(self):
|
||||
return list(self._iterate_items())
|
||||
|
||||
def values(self):
|
||||
return list(self._iterate_values())
|
||||
if MutableMapping:
|
||||
MutableMapping.register(ConfigurationView)
|
||||
|
||||
|
||||
class LimitedSet(object):
|
||||
"""Kind-of Set with limitations.
|
||||
|
||||
Good for when you need to test for membership (`a in set`),
|
||||
but the list might become to big, so you want to limit it so it doesn't
|
||||
consume too much resources.
|
||||
|
||||
:keyword maxlen: Maximum number of members before we start
|
||||
evicting expired members.
|
||||
:keyword expires: Time in seconds, before a membership expires.
|
||||
|
||||
"""
|
||||
__slots__ = ('maxlen', 'expires', '_data', '__len__', '_heap')
|
||||
|
||||
def __init__(self, maxlen=None, expires=None, data=None, heap=None):
|
||||
self.maxlen = maxlen
|
||||
self.expires = expires
|
||||
self._data = data or {}
|
||||
self._heap = heap or []
|
||||
self.__len__ = self._data.__len__
|
||||
|
||||
def add(self, value):
|
||||
"""Add a new member."""
|
||||
self.purge(1)
|
||||
now = time.time()
|
||||
self._data[value] = now
|
||||
heappush(self._heap, (now, value))
|
||||
|
||||
def __reduce__(self):
|
||||
return self.__class__, (
|
||||
self.maxlen, self.expires, self._data, self._heap,
|
||||
)
|
||||
|
||||
def clear(self):
|
||||
"""Remove all members"""
|
||||
self._data.clear()
|
||||
self._heap[:] = []
|
||||
|
||||
def pop_value(self, value):
|
||||
"""Remove membership by finding value."""
|
||||
try:
|
||||
itime = self._data[value]
|
||||
except KeyError:
|
||||
return
|
||||
try:
|
||||
self._heap.remove((value, itime))
|
||||
except ValueError:
|
||||
pass
|
||||
self._data.pop(value, None)
|
||||
|
||||
def _expire_item(self):
|
||||
"""Hunt down and remove an expired item."""
|
||||
self.purge(1)
|
||||
|
||||
def __contains__(self, value):
|
||||
return value in self._data
|
||||
|
||||
def purge(self, limit=None):
|
||||
H, maxlen = self._heap, self.maxlen
|
||||
if not maxlen:
|
||||
return
|
||||
i = 0
|
||||
while len(self) >= maxlen:
|
||||
if limit and i > limit:
|
||||
break
|
||||
try:
|
||||
item = heappop(H)
|
||||
except IndexError:
|
||||
break
|
||||
if self.expires:
|
||||
if time.time() < item[0] + self.expires:
|
||||
heappush(H, item)
|
||||
break
|
||||
self._data.pop(item[1])
|
||||
i += 1
|
||||
|
||||
def update(self, other, heappush=heappush):
|
||||
if isinstance(other, self.__class__):
|
||||
self._data.update(other._data)
|
||||
self._heap.extend(other._heap)
|
||||
heapify(self._heap)
|
||||
else:
|
||||
for obj in other:
|
||||
self.add(obj)
|
||||
|
||||
def as_dict(self):
|
||||
return self._data
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self._data)
|
||||
|
||||
def __repr__(self):
|
||||
return 'LimitedSet(%s)' % (repr(list(self._data))[:100], )
|
||||
|
||||
@property
|
||||
def chronologically(self):
|
||||
return [value for _, value in self._heap]
|
||||
|
||||
@property
|
||||
def first(self):
|
||||
"""Get the oldest member."""
|
||||
return self._heap[0][1]
|
||||
286
awx/lib/site-packages/celery/events/__init__.py
Normal file
286
awx/lib/site-packages/celery/events/__init__.py
Normal file
@@ -0,0 +1,286 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.events
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
Events is a stream of messages sent for certain actions occurring
|
||||
in the worker (and clients if :setting:`CELERY_SEND_TASK_SENT_EVENT`
|
||||
is enabled), used for monitoring purposes.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import with_statement
|
||||
|
||||
import time
|
||||
import socket
|
||||
import threading
|
||||
|
||||
from collections import deque
|
||||
from contextlib import contextmanager
|
||||
from copy import copy
|
||||
|
||||
from kombu import eventloop, Exchange, Queue, Consumer, Producer
|
||||
from kombu.utils import cached_property
|
||||
|
||||
from celery.app import app_or_default
|
||||
from celery.utils import uuid
|
||||
|
||||
event_exchange = Exchange('celeryev', type='topic')
|
||||
|
||||
|
||||
def get_exchange(conn):
|
||||
ex = copy(event_exchange)
|
||||
if conn.transport.driver_type == 'redis':
|
||||
# quick hack for Issue #436
|
||||
ex.type = 'fanout'
|
||||
return ex
|
||||
|
||||
|
||||
def Event(type, _fields=None, **fields):
|
||||
"""Create an event.
|
||||
|
||||
An event is a dictionary, the only required field is ``type``.
|
||||
|
||||
"""
|
||||
event = dict(_fields or {}, type=type, **fields)
|
||||
if 'timestamp' not in event:
|
||||
event['timestamp'] = time.time()
|
||||
return event
|
||||
|
||||
|
||||
class EventDispatcher(object):
|
||||
"""Send events as messages.
|
||||
|
||||
:param connection: Connection to the broker.
|
||||
|
||||
:keyword hostname: Hostname to identify ourselves as,
|
||||
by default uses the hostname returned by :func:`socket.gethostname`.
|
||||
|
||||
:keyword enabled: Set to :const:`False` to not actually publish any events,
|
||||
making :meth:`send` a noop operation.
|
||||
|
||||
:keyword channel: Can be used instead of `connection` to specify
|
||||
an exact channel to use when sending events.
|
||||
|
||||
:keyword buffer_while_offline: If enabled events will be buffered
|
||||
while the connection is down. :meth:`flush` must be called
|
||||
as soon as the connection is re-established.
|
||||
|
||||
You need to :meth:`close` this after use.
|
||||
|
||||
"""
|
||||
DISABLED_TRANSPORTS = set(['sql'])
|
||||
|
||||
def __init__(self, connection=None, hostname=None, enabled=True,
|
||||
channel=None, buffer_while_offline=True, app=None,
|
||||
serializer=None):
|
||||
self.app = app_or_default(app or self.app)
|
||||
self.connection = connection
|
||||
self.channel = channel
|
||||
self.hostname = hostname or socket.gethostname()
|
||||
self.buffer_while_offline = buffer_while_offline
|
||||
self.mutex = threading.Lock()
|
||||
self.producer = None
|
||||
self._outbound_buffer = deque()
|
||||
self.serializer = serializer or self.app.conf.CELERY_EVENT_SERIALIZER
|
||||
self.on_enabled = set()
|
||||
self.on_disabled = set()
|
||||
|
||||
self.enabled = enabled
|
||||
if not connection and channel:
|
||||
self.connection = channel.connection.client
|
||||
self.enabled = enabled
|
||||
conninfo = self.connection or self.app.connection()
|
||||
self.exchange = get_exchange(conninfo)
|
||||
if conninfo.transport.driver_type in self.DISABLED_TRANSPORTS:
|
||||
self.enabled = False
|
||||
if self.enabled:
|
||||
self.enable()
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, *exc_info):
|
||||
self.close()
|
||||
|
||||
def enable(self):
|
||||
self.producer = Producer(self.channel or self.connection,
|
||||
exchange=self.exchange,
|
||||
serializer=self.serializer)
|
||||
self.enabled = True
|
||||
for callback in self.on_enabled:
|
||||
callback()
|
||||
|
||||
def disable(self):
|
||||
if self.enabled:
|
||||
self.enabled = False
|
||||
self.close()
|
||||
for callback in self.on_disabled:
|
||||
callback()
|
||||
|
||||
def publish(self, type, fields, producer, retry=False, retry_policy=None):
|
||||
with self.mutex:
|
||||
event = Event(type, hostname=self.hostname,
|
||||
clock=self.app.clock.forward(), **fields)
|
||||
exchange = self.exchange
|
||||
producer.publish(
|
||||
event,
|
||||
routing_key=type.replace('-', '.'),
|
||||
exchange=exchange.name,
|
||||
retry=retry,
|
||||
retry_policy=retry_policy,
|
||||
declare=[exchange],
|
||||
serializer=self.serializer,
|
||||
)
|
||||
|
||||
def send(self, type, **fields):
|
||||
"""Send event.
|
||||
|
||||
:param type: Kind of event.
|
||||
:keyword \*\*fields: Event arguments.
|
||||
|
||||
"""
|
||||
if self.enabled:
|
||||
try:
|
||||
self.publish(type, fields, self.producer)
|
||||
except Exception, exc:
|
||||
if not self.buffer_while_offline:
|
||||
raise
|
||||
self._outbound_buffer.append((type, fields, exc))
|
||||
|
||||
def flush(self):
|
||||
while self._outbound_buffer:
|
||||
try:
|
||||
type, fields, _ = self._outbound_buffer.popleft()
|
||||
except IndexError:
|
||||
return
|
||||
self.send(type, **fields)
|
||||
|
||||
def copy_buffer(self, other):
|
||||
self._outbound_buffer = other._outbound_buffer
|
||||
|
||||
def close(self):
|
||||
"""Close the event dispatcher."""
|
||||
self.mutex.locked() and self.mutex.release()
|
||||
self.producer = None
|
||||
|
||||
def _get_publisher(self):
|
||||
return self.producer
|
||||
|
||||
def _set_publisher(self, producer):
|
||||
self.producer = producer
|
||||
publisher = property(_get_publisher, _set_publisher) # XXX compat
|
||||
|
||||
|
||||
class EventReceiver(object):
|
||||
"""Capture events.
|
||||
|
||||
:param connection: Connection to the broker.
|
||||
:keyword handlers: Event handlers.
|
||||
|
||||
:attr:`handlers` is a dict of event types and their handlers,
|
||||
the special handler `"*"` captures all events that doesn't have a
|
||||
handler.
|
||||
|
||||
"""
|
||||
handlers = {}
|
||||
|
||||
def __init__(self, connection, handlers=None, routing_key='#',
|
||||
node_id=None, app=None, queue_prefix='celeryev'):
|
||||
self.app = app_or_default(app)
|
||||
self.connection = connection
|
||||
if handlers is not None:
|
||||
self.handlers = handlers
|
||||
self.routing_key = routing_key
|
||||
self.node_id = node_id or uuid()
|
||||
self.queue_prefix = queue_prefix
|
||||
self.exchange = get_exchange(self.connection or self.app.connection())
|
||||
self.queue = Queue('.'.join([self.queue_prefix, self.node_id]),
|
||||
exchange=self.exchange,
|
||||
routing_key=self.routing_key,
|
||||
auto_delete=True,
|
||||
durable=False)
|
||||
|
||||
def process(self, type, event):
|
||||
"""Process the received event by dispatching it to the appropriate
|
||||
handler."""
|
||||
handler = self.handlers.get(type) or self.handlers.get('*')
|
||||
handler and handler(event)
|
||||
|
||||
@contextmanager
|
||||
def consumer(self, wakeup=True):
|
||||
"""Create event consumer."""
|
||||
consumer = Consumer(self.connection,
|
||||
queues=[self.queue], no_ack=True,
|
||||
accept=['application/json'])
|
||||
consumer.register_callback(self._receive)
|
||||
consumer.consume()
|
||||
|
||||
try:
|
||||
if wakeup:
|
||||
self.wakeup_workers(channel=consumer.channel)
|
||||
yield consumer
|
||||
finally:
|
||||
try:
|
||||
consumer.cancel()
|
||||
except self.connection.connection_errors:
|
||||
pass
|
||||
|
||||
def itercapture(self, limit=None, timeout=None, wakeup=True):
|
||||
with self.consumer(wakeup=wakeup) as consumer:
|
||||
yield consumer
|
||||
self.drain_events(limit=limit, timeout=timeout)
|
||||
|
||||
def capture(self, limit=None, timeout=None, wakeup=True):
|
||||
"""Open up a consumer capturing events.
|
||||
|
||||
This has to run in the main process, and it will never
|
||||
stop unless forced via :exc:`KeyboardInterrupt` or :exc:`SystemExit`.
|
||||
|
||||
"""
|
||||
list(self.itercapture(limit=limit, timeout=timeout, wakeup=wakeup))
|
||||
|
||||
def wakeup_workers(self, channel=None):
|
||||
self.app.control.broadcast('heartbeat',
|
||||
connection=self.connection,
|
||||
channel=channel)
|
||||
|
||||
def drain_events(self, **kwargs):
|
||||
for _ in eventloop(self.connection, **kwargs):
|
||||
pass
|
||||
|
||||
def _receive(self, body, message):
|
||||
type = body.pop('type').lower()
|
||||
clock = body.get('clock')
|
||||
if clock:
|
||||
self.app.clock.adjust(clock)
|
||||
self.process(type, Event(type, body))
|
||||
|
||||
|
||||
class Events(object):
|
||||
|
||||
def __init__(self, app=None):
|
||||
self.app = app
|
||||
|
||||
@cached_property
|
||||
def Receiver(self):
|
||||
return self.app.subclass_with_self(EventReceiver,
|
||||
reverse='events.Receiver')
|
||||
|
||||
@cached_property
|
||||
def Dispatcher(self):
|
||||
return self.app.subclass_with_self(EventDispatcher,
|
||||
reverse='events.Dispatcher')
|
||||
|
||||
@cached_property
|
||||
def State(self):
|
||||
return self.app.subclass_with_self('celery.events.state:State',
|
||||
reverse='events.State')
|
||||
|
||||
@contextmanager
|
||||
def default_dispatcher(self, hostname=None, enabled=True,
|
||||
buffer_while_offline=False):
|
||||
with self.app.amqp.producer_pool.acquire(block=True) as pub:
|
||||
with self.Dispatcher(pub.connection, hostname, enabled,
|
||||
pub.channel, buffer_while_offline) as d:
|
||||
yield d
|
||||
527
awx/lib/site-packages/celery/events/cursesmon.py
Normal file
527
awx/lib/site-packages/celery/events/cursesmon.py
Normal file
@@ -0,0 +1,527 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
celery.events.cursesmon
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Graphical monitor of Celery events using curses.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import with_statement
|
||||
|
||||
import curses
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
|
||||
from datetime import datetime
|
||||
from itertools import count
|
||||
from textwrap import wrap
|
||||
from math import ceil
|
||||
|
||||
from celery import VERSION_BANNER
|
||||
from celery import states
|
||||
from celery.app import app_or_default
|
||||
from celery.utils.text import abbr, abbrtask
|
||||
|
||||
BORDER_SPACING = 4
|
||||
LEFT_BORDER_OFFSET = 3
|
||||
UUID_WIDTH = 36
|
||||
STATE_WIDTH = 8
|
||||
TIMESTAMP_WIDTH = 8
|
||||
MIN_WORKER_WIDTH = 15
|
||||
MIN_TASK_WIDTH = 16
|
||||
|
||||
# this module is considered experimental
|
||||
# we don't care about coverage.
|
||||
|
||||
|
||||
class CursesMonitor(object): # pragma: no cover
|
||||
keymap = {}
|
||||
win = None
|
||||
screen_width = None
|
||||
screen_delay = 10
|
||||
selected_task = None
|
||||
selected_position = 0
|
||||
selected_str = 'Selected: '
|
||||
foreground = curses.COLOR_BLACK
|
||||
background = curses.COLOR_WHITE
|
||||
online_str = 'Workers online: '
|
||||
help_title = 'Keys: '
|
||||
help = ('j:up k:down i:info t:traceback r:result c:revoke ^c: quit')
|
||||
greet = 'celeryev %s' % VERSION_BANNER
|
||||
info_str = 'Info: '
|
||||
|
||||
def __init__(self, state, keymap=None, app=None):
|
||||
self.app = app_or_default(app)
|
||||
self.keymap = keymap or self.keymap
|
||||
self.state = state
|
||||
default_keymap = {'J': self.move_selection_down,
|
||||
'K': self.move_selection_up,
|
||||
'C': self.revoke_selection,
|
||||
'T': self.selection_traceback,
|
||||
'R': self.selection_result,
|
||||
'I': self.selection_info,
|
||||
'L': self.selection_rate_limit}
|
||||
self.keymap = dict(default_keymap, **self.keymap)
|
||||
|
||||
def format_row(self, uuid, task, worker, timestamp, state):
|
||||
mx = self.display_width
|
||||
|
||||
# include spacing
|
||||
detail_width = mx - 1 - STATE_WIDTH - 1 - TIMESTAMP_WIDTH
|
||||
uuid_space = detail_width - 1 - MIN_TASK_WIDTH - 1 - MIN_WORKER_WIDTH
|
||||
|
||||
if uuid_space < UUID_WIDTH:
|
||||
uuid_width = uuid_space
|
||||
else:
|
||||
uuid_width = UUID_WIDTH
|
||||
|
||||
detail_width = detail_width - uuid_width - 1
|
||||
task_width = int(ceil(detail_width / 2.0))
|
||||
worker_width = detail_width - task_width - 1
|
||||
|
||||
uuid = abbr(uuid, uuid_width).ljust(uuid_width)
|
||||
worker = abbr(worker, worker_width).ljust(worker_width)
|
||||
task = abbrtask(task, task_width).ljust(task_width)
|
||||
state = abbr(state, STATE_WIDTH).ljust(STATE_WIDTH)
|
||||
timestamp = timestamp.ljust(TIMESTAMP_WIDTH)
|
||||
|
||||
row = '%s %s %s %s %s ' % (uuid, worker, task, timestamp, state)
|
||||
if self.screen_width is None:
|
||||
self.screen_width = len(row[:mx])
|
||||
return row[:mx]
|
||||
|
||||
@property
|
||||
def screen_width(self):
|
||||
_, mx = self.win.getmaxyx()
|
||||
return mx
|
||||
|
||||
@property
|
||||
def screen_height(self):
|
||||
my, _ = self.win.getmaxyx()
|
||||
return my
|
||||
|
||||
@property
|
||||
def display_width(self):
|
||||
_, mx = self.win.getmaxyx()
|
||||
return mx - BORDER_SPACING
|
||||
|
||||
@property
|
||||
def display_height(self):
|
||||
my, _ = self.win.getmaxyx()
|
||||
return my - 10
|
||||
|
||||
@property
|
||||
def limit(self):
|
||||
return self.display_height
|
||||
|
||||
def find_position(self):
|
||||
if not self.tasks:
|
||||
return 0
|
||||
for i, e in enumerate(self.tasks):
|
||||
if self.selected_task == e[0]:
|
||||
return i
|
||||
return 0
|
||||
|
||||
def move_selection_up(self):
|
||||
self.move_selection(-1)
|
||||
|
||||
def move_selection_down(self):
|
||||
self.move_selection(1)
|
||||
|
||||
def move_selection(self, direction=1):
|
||||
if not self.tasks:
|
||||
return
|
||||
pos = self.find_position()
|
||||
try:
|
||||
self.selected_task = self.tasks[pos + direction][0]
|
||||
except IndexError:
|
||||
self.selected_task = self.tasks[0][0]
|
||||
|
||||
keyalias = {curses.KEY_DOWN: 'J',
|
||||
curses.KEY_UP: 'K',
|
||||
curses.KEY_ENTER: 'I'}
|
||||
|
||||
def handle_keypress(self):
|
||||
try:
|
||||
key = self.win.getkey().upper()
|
||||
except:
|
||||
return
|
||||
key = self.keyalias.get(key) or key
|
||||
handler = self.keymap.get(key)
|
||||
if handler is not None:
|
||||
handler()
|
||||
|
||||
def alert(self, callback, title=None):
|
||||
self.win.erase()
|
||||
my, mx = self.win.getmaxyx()
|
||||
y = blank_line = count(2).next
|
||||
if title:
|
||||
self.win.addstr(y(), 3, title, curses.A_BOLD | curses.A_UNDERLINE)
|
||||
blank_line()
|
||||
callback(my, mx, y())
|
||||
self.win.addstr(my - 1, 0, 'Press any key to continue...',
|
||||
curses.A_BOLD)
|
||||
self.win.refresh()
|
||||
while 1:
|
||||
try:
|
||||
return self.win.getkey().upper()
|
||||
except:
|
||||
pass
|
||||
|
||||
def selection_rate_limit(self):
|
||||
if not self.selected_task:
|
||||
return curses.beep()
|
||||
task = self.state.tasks[self.selected_task]
|
||||
if not task.name:
|
||||
return curses.beep()
|
||||
|
||||
my, mx = self.win.getmaxyx()
|
||||
r = 'New rate limit: '
|
||||
self.win.addstr(my - 2, 3, r, curses.A_BOLD | curses.A_UNDERLINE)
|
||||
self.win.addstr(my - 2, len(r) + 3, ' ' * (mx - len(r)))
|
||||
rlimit = self.readline(my - 2, 3 + len(r))
|
||||
|
||||
if rlimit:
|
||||
reply = self.app.control.rate_limit(task.name,
|
||||
rlimit.strip(), reply=True)
|
||||
self.alert_remote_control_reply(reply)
|
||||
|
||||
def alert_remote_control_reply(self, reply):
|
||||
|
||||
def callback(my, mx, xs):
|
||||
y = count(xs).next
|
||||
if not reply:
|
||||
self.win.addstr(
|
||||
y(), 3, 'No replies received in 1s deadline.',
|
||||
curses.A_BOLD + curses.color_pair(2),
|
||||
)
|
||||
return
|
||||
|
||||
for subreply in reply:
|
||||
curline = y()
|
||||
|
||||
host, response = subreply.items()[0]
|
||||
host = '%s: ' % host
|
||||
self.win.addstr(curline, 3, host, curses.A_BOLD)
|
||||
attr = curses.A_NORMAL
|
||||
text = ''
|
||||
if 'error' in response:
|
||||
text = response['error']
|
||||
attr |= curses.color_pair(2)
|
||||
elif 'ok' in response:
|
||||
text = response['ok']
|
||||
attr |= curses.color_pair(3)
|
||||
self.win.addstr(curline, 3 + len(host), text, attr)
|
||||
|
||||
return self.alert(callback, 'Remote Control Command Replies')
|
||||
|
||||
def readline(self, x, y):
|
||||
buffer = str()
|
||||
curses.echo()
|
||||
try:
|
||||
i = 0
|
||||
while 1:
|
||||
ch = self.win.getch(x, y + i)
|
||||
if ch != -1:
|
||||
if ch in (10, curses.KEY_ENTER): # enter
|
||||
break
|
||||
if ch in (27, ):
|
||||
buffer = str()
|
||||
break
|
||||
buffer += chr(ch)
|
||||
i += 1
|
||||
finally:
|
||||
curses.noecho()
|
||||
return buffer
|
||||
|
||||
def revoke_selection(self):
|
||||
if not self.selected_task:
|
||||
return curses.beep()
|
||||
reply = self.app.control.revoke(self.selected_task, reply=True)
|
||||
self.alert_remote_control_reply(reply)
|
||||
|
||||
def selection_info(self):
|
||||
if not self.selected_task:
|
||||
return
|
||||
|
||||
def alert_callback(mx, my, xs):
|
||||
my, mx = self.win.getmaxyx()
|
||||
y = count(xs).next
|
||||
task = self.state.tasks[self.selected_task]
|
||||
info = task.info(extra=['state'])
|
||||
infoitems = [('args', info.pop('args', None)),
|
||||
('kwargs', info.pop('kwargs', None))] + info.items()
|
||||
for key, value in infoitems:
|
||||
if key is None:
|
||||
continue
|
||||
value = str(value)
|
||||
curline = y()
|
||||
keys = key + ': '
|
||||
self.win.addstr(curline, 3, keys, curses.A_BOLD)
|
||||
wrapped = wrap(value, mx - 2)
|
||||
if len(wrapped) == 1:
|
||||
self.win.addstr(
|
||||
curline, len(keys) + 3,
|
||||
abbr(wrapped[0],
|
||||
self.screen_width - (len(keys) + 3)))
|
||||
else:
|
||||
for subline in wrapped:
|
||||
nexty = y()
|
||||
if nexty >= my - 1:
|
||||
subline = ' ' * 4 + '[...]'
|
||||
elif nexty >= my:
|
||||
break
|
||||
self.win.addstr(
|
||||
nexty, 3,
|
||||
abbr(' ' * 4 + subline, self.screen_width - 4),
|
||||
curses.A_NORMAL,
|
||||
)
|
||||
|
||||
return self.alert(
|
||||
alert_callback, 'Task details for %s' % self.selected_task,
|
||||
)
|
||||
|
||||
def selection_traceback(self):
|
||||
if not self.selected_task:
|
||||
return curses.beep()
|
||||
task = self.state.tasks[self.selected_task]
|
||||
if task.state not in states.EXCEPTION_STATES:
|
||||
return curses.beep()
|
||||
|
||||
def alert_callback(my, mx, xs):
|
||||
y = count(xs).next
|
||||
for line in task.traceback.split('\n'):
|
||||
self.win.addstr(y(), 3, line)
|
||||
|
||||
return self.alert(
|
||||
alert_callback,
|
||||
'Task Exception Traceback for %s' % self.selected_task,
|
||||
)
|
||||
|
||||
def selection_result(self):
|
||||
if not self.selected_task:
|
||||
return
|
||||
|
||||
def alert_callback(my, mx, xs):
|
||||
y = count(xs).next
|
||||
task = self.state.tasks[self.selected_task]
|
||||
result = (getattr(task, 'result', None)
|
||||
or getattr(task, 'exception', None))
|
||||
for line in wrap(result, mx - 2):
|
||||
self.win.addstr(y(), 3, line)
|
||||
|
||||
return self.alert(
|
||||
alert_callback, 'Task Result for %s' % self.selected_task,
|
||||
)
|
||||
|
||||
def display_task_row(self, lineno, task):
|
||||
state_color = self.state_colors.get(task.state)
|
||||
attr = curses.A_NORMAL
|
||||
if task.uuid == self.selected_task:
|
||||
attr = curses.A_STANDOUT
|
||||
timestamp = datetime.utcfromtimestamp(
|
||||
task.timestamp or time.time(),
|
||||
)
|
||||
timef = timestamp.strftime('%H:%M:%S')
|
||||
hostname = task.worker.hostname if task.worker else '*NONE*'
|
||||
line = self.format_row(task.uuid, task.name,
|
||||
hostname,
|
||||
timef, task.state)
|
||||
self.win.addstr(lineno, LEFT_BORDER_OFFSET, line, attr)
|
||||
|
||||
if state_color:
|
||||
self.win.addstr(lineno,
|
||||
len(line) - STATE_WIDTH + BORDER_SPACING - 1,
|
||||
task.state, state_color | attr)
|
||||
|
||||
def draw(self):
|
||||
win = self.win
|
||||
self.handle_keypress()
|
||||
x = LEFT_BORDER_OFFSET
|
||||
y = blank_line = count(2).next
|
||||
my, mx = win.getmaxyx()
|
||||
win.erase()
|
||||
win.bkgd(' ', curses.color_pair(1))
|
||||
win.border()
|
||||
win.addstr(1, x, self.greet, curses.A_DIM | curses.color_pair(5))
|
||||
blank_line()
|
||||
win.addstr(y(), x, self.format_row('UUID', 'TASK',
|
||||
'WORKER', 'TIME', 'STATE'),
|
||||
curses.A_BOLD | curses.A_UNDERLINE)
|
||||
tasks = self.tasks
|
||||
if tasks:
|
||||
for row, (uuid, task) in enumerate(tasks):
|
||||
if row > self.display_height:
|
||||
break
|
||||
|
||||
if task.uuid:
|
||||
lineno = y()
|
||||
self.display_task_row(lineno, task)
|
||||
|
||||
# -- Footer
|
||||
blank_line()
|
||||
win.hline(my - 6, x, curses.ACS_HLINE, self.screen_width - 4)
|
||||
|
||||
# Selected Task Info
|
||||
if self.selected_task:
|
||||
win.addstr(my - 5, x, self.selected_str, curses.A_BOLD)
|
||||
info = 'Missing extended info'
|
||||
detail = ''
|
||||
try:
|
||||
selection = self.state.tasks[self.selected_task]
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
info = selection.info()
|
||||
if 'runtime' in info:
|
||||
info['runtime'] = '%.2fs' % info['runtime']
|
||||
if 'result' in info:
|
||||
info['result'] = abbr(info['result'], 16)
|
||||
info = ' '.join(
|
||||
'%s=%s' % (key, value) for key, value in info.items())
|
||||
detail = '... -> key i'
|
||||
infowin = abbr(info,
|
||||
self.screen_width - len(self.selected_str) - 2,
|
||||
detail)
|
||||
win.addstr(my - 5, x + len(self.selected_str), infowin)
|
||||
# Make ellipsis bold
|
||||
if detail in infowin:
|
||||
detailpos = len(infowin) - len(detail)
|
||||
win.addstr(my - 5, x + len(self.selected_str) + detailpos,
|
||||
detail, curses.A_BOLD)
|
||||
else:
|
||||
win.addstr(my - 5, x, 'No task selected', curses.A_NORMAL)
|
||||
|
||||
# Workers
|
||||
if self.workers:
|
||||
win.addstr(my - 4, x, self.online_str, curses.A_BOLD)
|
||||
win.addstr(my - 4, x + len(self.online_str),
|
||||
', '.join(sorted(self.workers)), curses.A_NORMAL)
|
||||
else:
|
||||
win.addstr(my - 4, x, 'No workers discovered.')
|
||||
|
||||
# Info
|
||||
win.addstr(my - 3, x, self.info_str, curses.A_BOLD)
|
||||
win.addstr(
|
||||
my - 3, x + len(self.info_str),
|
||||
'events:%s tasks:%s workers:%s/%s' % (
|
||||
self.state.event_count, self.state.task_count,
|
||||
len([w for w in self.state.workers.values()
|
||||
if w.alive]),
|
||||
len(self.state.workers)),
|
||||
curses.A_DIM,
|
||||
)
|
||||
|
||||
# Help
|
||||
self.safe_add_str(my - 2, x, self.help_title, curses.A_BOLD)
|
||||
self.safe_add_str(my - 2, x + len(self.help_title), self.help,
|
||||
curses.A_DIM)
|
||||
win.refresh()
|
||||
|
||||
def safe_add_str(self, y, x, string, *args, **kwargs):
|
||||
if x + len(string) > self.screen_width:
|
||||
string = string[:self.screen_width - x]
|
||||
self.win.addstr(y, x, string, *args, **kwargs)
|
||||
|
||||
def init_screen(self):
|
||||
self.win = curses.initscr()
|
||||
self.win.nodelay(True)
|
||||
self.win.keypad(True)
|
||||
curses.start_color()
|
||||
curses.init_pair(1, self.foreground, self.background)
|
||||
# exception states
|
||||
curses.init_pair(2, curses.COLOR_RED, self.background)
|
||||
# successful state
|
||||
curses.init_pair(3, curses.COLOR_GREEN, self.background)
|
||||
# revoked state
|
||||
curses.init_pair(4, curses.COLOR_MAGENTA, self.background)
|
||||
# greeting
|
||||
curses.init_pair(5, curses.COLOR_BLUE, self.background)
|
||||
# started state
|
||||
curses.init_pair(6, curses.COLOR_YELLOW, self.foreground)
|
||||
|
||||
self.state_colors = {states.SUCCESS: curses.color_pair(3),
|
||||
states.REVOKED: curses.color_pair(4),
|
||||
states.STARTED: curses.color_pair(6)}
|
||||
for state in states.EXCEPTION_STATES:
|
||||
self.state_colors[state] = curses.color_pair(2)
|
||||
|
||||
curses.cbreak()
|
||||
|
||||
def resetscreen(self):
|
||||
curses.nocbreak()
|
||||
self.win.keypad(False)
|
||||
curses.echo()
|
||||
curses.endwin()
|
||||
|
||||
def nap(self):
|
||||
curses.napms(self.screen_delay)
|
||||
|
||||
@property
|
||||
def tasks(self):
|
||||
return self.state.tasks_by_timestamp()[:self.limit]
|
||||
|
||||
@property
|
||||
def workers(self):
|
||||
return [hostname for hostname, w in self.state.workers.items()
|
||||
if w.alive]
|
||||
|
||||
|
||||
class DisplayThread(threading.Thread): # pragma: no cover
|
||||
|
||||
def __init__(self, display):
|
||||
self.display = display
|
||||
self.shutdown = False
|
||||
threading.Thread.__init__(self)
|
||||
|
||||
def run(self):
|
||||
while not self.shutdown:
|
||||
self.display.draw()
|
||||
self.display.nap()
|
||||
|
||||
|
||||
def capture_events(app, state, display): # pragma: no cover
|
||||
|
||||
def on_connection_error(exc, interval):
|
||||
sys.stderr.write('Connection Error: %r. Retry in %ss.' % (
|
||||
exc, interval))
|
||||
|
||||
while 1:
|
||||
sys.stderr.write('-> evtop: starting capture...\n')
|
||||
with app.connection() as conn:
|
||||
try:
|
||||
conn.ensure_connection(on_connection_error,
|
||||
app.conf.BROKER_CONNECTION_MAX_RETRIES)
|
||||
recv = app.events.Receiver(conn, handlers={'*': state.event})
|
||||
display.resetscreen()
|
||||
display.init_screen()
|
||||
with recv.consumer():
|
||||
recv.drain_events(timeout=1, ignore_timeouts=True)
|
||||
except (conn.connection_errors, conn.channel_errors), exc:
|
||||
sys.stderr.write('Connection lost: %r' % (exc, ))
|
||||
|
||||
|
||||
def evtop(app=None): # pragma: no cover
|
||||
app = app_or_default(app)
|
||||
state = app.events.State()
|
||||
display = CursesMonitor(state, app=app)
|
||||
display.init_screen()
|
||||
refresher = DisplayThread(display)
|
||||
refresher.start()
|
||||
try:
|
||||
capture_events(app, state, display)
|
||||
except Exception:
|
||||
refresher.shutdown = True
|
||||
refresher.join()
|
||||
display.resetscreen()
|
||||
raise
|
||||
except (KeyboardInterrupt, SystemExit):
|
||||
refresher.shutdown = True
|
||||
refresher.join()
|
||||
display.resetscreen()
|
||||
|
||||
|
||||
if __name__ == '__main__': # pragma: no cover
|
||||
evtop()
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user