Some text with an ABBR and a REF. Ignore REFERENCE and ref.
- -Copyright 2007-2008 -* [Waylan Limberg](http://achinghead.com/) -* [Seemant Kulleen](http://www.kulleen.org/) - - -''' - -from __future__ import absolute_import -from __future__ import unicode_literals -from . import Extension -from ..preprocessors import Preprocessor -from ..inlinepatterns import Pattern -from ..util import etree, AtomicString -import re - -# Global Vars -ABBR_REF_RE = re.compile(r'[*]\[(?P[^\]]*)\][ ]?:\s*(?PSome text with an ABBR and a REF. Ignore REFERENCE and ref.
+ +Copyright 2007-2008 +* [Waylan Limberg](http://achinghead.com/) +* [Seemant Kulleen](http://www.kulleen.org/) + + +''' + +from __future__ import absolute_import +from __future__ import unicode_literals +from . import Extension +from ..preprocessors import Preprocessor +from ..inlinepatterns import Pattern +from ..util import etree, AtomicString +import re + +# Global Vars +ABBR_REF_RE = re.compile(r'[*]\[(?P[^\]]*)\][ ]?:\s*(?PNote
-This is the first line inside the box
-Did you know?
-Another line here.
-Note
` - title = klass.capitalize() - elif title == '': - # an explicit blank title should not be rendered - # e.g.: `!!! warning ""` will *not* render `p` with a title - title = None - return klass, title - - -def makeExtension(configs={}): - return AdmonitionExtension(configs=configs) +""" +Admonition extension for Python-Markdown +======================================== + +Adds rST-style admonitions. Inspired by [rST][] feature with the same name. + +The syntax is (followed by an indented block with the contents): + !!! [type] [optional explicit title] + +Where `type` is used as a CSS class name of the div. If not present, `title` +defaults to the capitalized `type`, so "note" -> "Note". + +rST suggests the following `types`, but you're free to use whatever you want: + attention, caution, danger, error, hint, important, note, tip, warning + + +A simple example: + !!! note + This is the first line inside the box. + +Outputs: +Note
+This is the first line inside the box
+Did you know?
+Another line here.
+Note
` + title = klass.capitalize() + elif title == '': + # an explicit blank title should not be rendered + # e.g.: `!!! warning ""` will *not* render `p` with a title + title = None + return klass, title + + +def makeExtension(configs={}): + return AdmonitionExtension(configs=configs) diff --git a/awx/lib/site-packages/markdown/extensions/attr_list.py b/awx/lib/site-packages/markdown/extensions/attr_list.py index 209dbaf783..8b65f5661d 100644 --- a/awx/lib/site-packages/markdown/extensions/attr_list.py +++ b/awx/lib/site-packages/markdown/extensions/attr_list.py @@ -1,168 +1,168 @@ -""" -Attribute List Extension for Python-Markdown -============================================ - -Adds attribute list syntax. Inspired by -[maruku](http://maruku.rubyforge.org/proposal.html#attribute_lists)'s -feature of the same name. - -Copyright 2011 [Waylan Limberg](http://achinghead.com/). - -Contact: markdown@freewisdom.org - -License: BSD (see ../LICENSE.md for details) - -Dependencies: -* [Python 2.4+](http://python.org) -* [Markdown 2.1+](http://packages.python.org/Markdown/) - -""" - -from __future__ import absolute_import -from __future__ import unicode_literals -from . import Extension -from ..treeprocessors import Treeprocessor -from ..util import isBlockLevel -import re - -try: - Scanner = re.Scanner -except AttributeError: - # must be on Python 2.4 - from sre import Scanner - -def _handle_double_quote(s, t): - k, v = t.split('=') - return k, v.strip('"') - -def _handle_single_quote(s, t): - k, v = t.split('=') - return k, v.strip("'") - -def _handle_key_value(s, t): - return t.split('=') - -def _handle_word(s, t): - if t.startswith('.'): - return '.', t[1:] - if t.startswith('#'): - return 'id', t[1:] - return t, t - -_scanner = Scanner([ - (r'[^ ]+=".*?"', _handle_double_quote), - (r"[^ ]+='.*?'", _handle_single_quote), - (r'[^ ]+=[^ ]*', _handle_key_value), - (r'[^ ]+', _handle_word), - (r' ', None) -]) - -def get_attrs(str): - """ Parse attribute list and return a list of attribute tuples. """ - return _scanner.scan(str)[0] - -def isheader(elem): - return elem.tag in ['h1', 'h2', 'h3', 'h4', 'h5', 'h6'] - -class AttrListTreeprocessor(Treeprocessor): - - BASE_RE = r'\{\:?([^\}]*)\}' - HEADER_RE = re.compile(r'[ ]+%s[ ]*$' % BASE_RE) - BLOCK_RE = re.compile(r'\n[ ]*%s[ ]*$' % BASE_RE) - INLINE_RE = re.compile(r'^%s' % BASE_RE) - NAME_RE = re.compile(r'[^A-Z_a-z\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u02ff\u0370-\u037d' - r'\u037f-\u1fff\u200c-\u200d\u2070-\u218f\u2c00-\u2fef' - r'\u3001-\ud7ff\uf900-\ufdcf\ufdf0-\ufffd' - r'\:\-\.0-9\u00b7\u0300-\u036f\u203f-\u2040]+') - - def run(self, doc): - for elem in doc.getiterator(): - if isBlockLevel(elem.tag): - # Block level: check for attrs on last line of text - RE = self.BLOCK_RE - if isheader(elem) or elem.tag == 'dt': - # header or def-term: check for attrs at end of line - RE = self.HEADER_RE - if len(elem) and elem.tag == 'li': - # special case list items. children may include a ul or ol. - pos = None - # find the ul or ol position - for i, child in enumerate(elem): - if child.tag in ['ul', 'ol']: - pos = i - break - if pos is None and elem[-1].tail: - # use tail of last child. no ul or ol. - m = RE.search(elem[-1].tail) - if m: - self.assign_attrs(elem, m.group(1)) - elem[-1].tail = elem[-1].tail[:m.start()] - elif pos is not None and pos > 0 and elem[pos-1].tail: - # use tail of last child before ul or ol - m = RE.search(elem[pos-1].tail) - if m: - self.assign_attrs(elem, m.group(1)) - elem[pos-1].tail = elem[pos-1].tail[:m.start()] - elif elem.text: - # use text. ul is first child. - m = RE.search(elem.text) - if m: - self.assign_attrs(elem, m.group(1)) - elem.text = elem.text[:m.start()] - elif len(elem) and elem[-1].tail: - # has children. Get from tail of last child - m = RE.search(elem[-1].tail) - if m: - self.assign_attrs(elem, m.group(1)) - elem[-1].tail = elem[-1].tail[:m.start()] - if isheader(elem): - # clean up trailing #s - elem[-1].tail = elem[-1].tail.rstrip('#').rstrip() - elif elem.text: - # no children. Get from text. - m = RE.search(elem.text) - if not m and elem.tag == 'td': - m = re.search(self.BASE_RE, elem.text) - if m: - self.assign_attrs(elem, m.group(1)) - elem.text = elem.text[:m.start()] - if isheader(elem): - # clean up trailing #s - elem.text = elem.text.rstrip('#').rstrip() - else: - # inline: check for attrs at start of tail - if elem.tail: - m = self.INLINE_RE.match(elem.tail) - if m: - self.assign_attrs(elem, m.group(1)) - elem.tail = elem.tail[m.end():] - - def assign_attrs(self, elem, attrs): - """ Assign attrs to element. """ - for k, v in get_attrs(attrs): - if k == '.': - # add to class - cls = elem.get('class') - if cls: - elem.set('class', '%s %s' % (cls, v)) - else: - elem.set('class', v) - else: - # assign attr k with v - elem.set(self.sanitize_name(k), v) - - def sanitize_name(self, name): - """ - Sanitize name as 'an XML Name, minus the ":"'. - See http://www.w3.org/TR/REC-xml-names/#NT-NCName - """ - return self.NAME_RE.sub('_', name) - - -class AttrListExtension(Extension): - def extendMarkdown(self, md, md_globals): - md.treeprocessors.add('attr_list', AttrListTreeprocessor(md), '>prettify') - - -def makeExtension(configs={}): - return AttrListExtension(configs=configs) +""" +Attribute List Extension for Python-Markdown +============================================ + +Adds attribute list syntax. Inspired by +[maruku](http://maruku.rubyforge.org/proposal.html#attribute_lists)'s +feature of the same name. + +Copyright 2011 [Waylan Limberg](http://achinghead.com/). + +Contact: markdown@freewisdom.org + +License: BSD (see ../LICENSE.md for details) + +Dependencies: +* [Python 2.4+](http://python.org) +* [Markdown 2.1+](http://packages.python.org/Markdown/) + +""" + +from __future__ import absolute_import +from __future__ import unicode_literals +from . import Extension +from ..treeprocessors import Treeprocessor +from ..util import isBlockLevel +import re + +try: + Scanner = re.Scanner +except AttributeError: + # must be on Python 2.4 + from sre import Scanner + +def _handle_double_quote(s, t): + k, v = t.split('=') + return k, v.strip('"') + +def _handle_single_quote(s, t): + k, v = t.split('=') + return k, v.strip("'") + +def _handle_key_value(s, t): + return t.split('=') + +def _handle_word(s, t): + if t.startswith('.'): + return '.', t[1:] + if t.startswith('#'): + return 'id', t[1:] + return t, t + +_scanner = Scanner([ + (r'[^ ]+=".*?"', _handle_double_quote), + (r"[^ ]+='.*?'", _handle_single_quote), + (r'[^ ]+=[^ ]*', _handle_key_value), + (r'[^ ]+', _handle_word), + (r' ', None) +]) + +def get_attrs(str): + """ Parse attribute list and return a list of attribute tuples. """ + return _scanner.scan(str)[0] + +def isheader(elem): + return elem.tag in ['h1', 'h2', 'h3', 'h4', 'h5', 'h6'] + +class AttrListTreeprocessor(Treeprocessor): + + BASE_RE = r'\{\:?([^\}]*)\}' + HEADER_RE = re.compile(r'[ ]+%s[ ]*$' % BASE_RE) + BLOCK_RE = re.compile(r'\n[ ]*%s[ ]*$' % BASE_RE) + INLINE_RE = re.compile(r'^%s' % BASE_RE) + NAME_RE = re.compile(r'[^A-Z_a-z\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u02ff\u0370-\u037d' + r'\u037f-\u1fff\u200c-\u200d\u2070-\u218f\u2c00-\u2fef' + r'\u3001-\ud7ff\uf900-\ufdcf\ufdf0-\ufffd' + r'\:\-\.0-9\u00b7\u0300-\u036f\u203f-\u2040]+') + + def run(self, doc): + for elem in doc.getiterator(): + if isBlockLevel(elem.tag): + # Block level: check for attrs on last line of text + RE = self.BLOCK_RE + if isheader(elem) or elem.tag == 'dt': + # header or def-term: check for attrs at end of line + RE = self.HEADER_RE + if len(elem) and elem.tag == 'li': + # special case list items. children may include a ul or ol. + pos = None + # find the ul or ol position + for i, child in enumerate(elem): + if child.tag in ['ul', 'ol']: + pos = i + break + if pos is None and elem[-1].tail: + # use tail of last child. no ul or ol. + m = RE.search(elem[-1].tail) + if m: + self.assign_attrs(elem, m.group(1)) + elem[-1].tail = elem[-1].tail[:m.start()] + elif pos is not None and pos > 0 and elem[pos-1].tail: + # use tail of last child before ul or ol + m = RE.search(elem[pos-1].tail) + if m: + self.assign_attrs(elem, m.group(1)) + elem[pos-1].tail = elem[pos-1].tail[:m.start()] + elif elem.text: + # use text. ul is first child. + m = RE.search(elem.text) + if m: + self.assign_attrs(elem, m.group(1)) + elem.text = elem.text[:m.start()] + elif len(elem) and elem[-1].tail: + # has children. Get from tail of last child + m = RE.search(elem[-1].tail) + if m: + self.assign_attrs(elem, m.group(1)) + elem[-1].tail = elem[-1].tail[:m.start()] + if isheader(elem): + # clean up trailing #s + elem[-1].tail = elem[-1].tail.rstrip('#').rstrip() + elif elem.text: + # no children. Get from text. + m = RE.search(elem.text) + if not m and elem.tag == 'td': + m = re.search(self.BASE_RE, elem.text) + if m: + self.assign_attrs(elem, m.group(1)) + elem.text = elem.text[:m.start()] + if isheader(elem): + # clean up trailing #s + elem.text = elem.text.rstrip('#').rstrip() + else: + # inline: check for attrs at start of tail + if elem.tail: + m = self.INLINE_RE.match(elem.tail) + if m: + self.assign_attrs(elem, m.group(1)) + elem.tail = elem.tail[m.end():] + + def assign_attrs(self, elem, attrs): + """ Assign attrs to element. """ + for k, v in get_attrs(attrs): + if k == '.': + # add to class + cls = elem.get('class') + if cls: + elem.set('class', '%s %s' % (cls, v)) + else: + elem.set('class', v) + else: + # assign attr k with v + elem.set(self.sanitize_name(k), v) + + def sanitize_name(self, name): + """ + Sanitize name as 'an XML Name, minus the ":"'. + See http://www.w3.org/TR/REC-xml-names/#NT-NCName + """ + return self.NAME_RE.sub('_', name) + + +class AttrListExtension(Extension): + def extendMarkdown(self, md, md_globals): + md.treeprocessors.add('attr_list', AttrListTreeprocessor(md), '>prettify') + + +def makeExtension(configs={}): + return AttrListExtension(configs=configs) diff --git a/awx/lib/site-packages/markdown/extensions/codehilite.py b/awx/lib/site-packages/markdown/extensions/codehilite.py index 589a7e7639..428bd0cb2b 100644 --- a/awx/lib/site-packages/markdown/extensions/codehilite.py +++ b/awx/lib/site-packages/markdown/extensions/codehilite.py @@ -1,268 +1,268 @@ -""" -CodeHilite Extension for Python-Markdown -======================================== - -Adds code/syntax highlighting to standard Python-Markdown code blocks. - -Copyright 2006-2008 [Waylan Limberg](http://achinghead.com/). - -Project website:%s\n'% \
- (self.css_class, class_str, txt)
-
- def _parseHeader(self):
- """
- Determines language of a code block from shebang line and whether said
- line should be removed or left in place. If the sheband line contains a
- path (even a single /) then it is assumed to be a real shebang line and
- left alone. However, if no path is given (e.i.: #!python or :::python)
- then it is assumed to be a mock shebang for language identifitation of a
- code fragment and removed from the code block prior to processing for
- code highlighting. When a mock shebang (e.i: #!python) is found, line
- numbering is turned on. When colons are found in place of a shebang
- (e.i.: :::python), line numbering is left in the current state - off
- by default.
-
- Also parses optional list of highlight lines, like:
-
- :::python hl_lines="1 3"
- """
-
- import re
-
- #split text into lines
- lines = self.src.split("\n")
- #pull first line to examine
- fl = lines.pop(0)
-
- c = re.compile(r'''
- (?:(?:^::+)|(?P%s\n'% \
+ (self.css_class, class_str, txt)
+
+ def _parseHeader(self):
+ """
+ Determines language of a code block from shebang line and whether said
+ line should be removed or left in place. If the sheband line contains a
+ path (even a single /) then it is assumed to be a real shebang line and
+ left alone. However, if no path is given (e.i.: #!python or :::python)
+ then it is assumed to be a mock shebang for language identifitation of a
+ code fragment and removed from the code block prior to processing for
+ code highlighting. When a mock shebang (e.i: #!python) is found, line
+ numbering is turned on. When colons are found in place of a shebang
+ (e.i.: :::python), line numbering is left in the current state - off
+ by default.
+
+ Also parses optional list of highlight lines, like:
+
+ :::python hl_lines="1 3"
+ """
+
+ import re
+
+ #split text into lines
+ lines = self.src.split("\n")
+ #pull first line to examine
+ fl = lines.pop(0)
+
+ c = re.compile(r'''
+ (?:(?:^::+)|(?PA paragraph before a fenced code block:
-Fenced code block
-
-
-Works with safe_mode also (we check this because we are using the HtmlStash):
-
- >>> print markdown.markdown(text, extensions=['fenced_code'], safe_mode='replace')
- A paragraph before a fenced code block:
-Fenced code block
-
-
-Include tilde's in a code block and wrap with blank lines:
-
- >>> text = '''
- ... ~~~~~~~~
- ...
- ... ~~~~
- ... ~~~~~~~~'''
- >>> print markdown.markdown(text, extensions=['fenced_code'])
-
- ~~~~
-
-
-Language tags:
-
- >>> text = '''
- ... ~~~~{.python}
- ... # Some python code
- ... ~~~~'''
- >>> print markdown.markdown(text, extensions=['fenced_code'])
- # Some python code
-
-
-Optionally backticks instead of tildes as per how github's code block markdown is identified:
-
- >>> text = '''
- ... `````
- ... # Arbitrary code
- ... ~~~~~ # these tildes will not close the block
- ... `````'''
- >>> print markdown.markdown(text, extensions=['fenced_code'])
- # Arbitrary code
- ~~~~~ # these tildes will not close the block
-
-
-If the codehighlite extension and Pygments are installed, lines can be highlighted:
-
- >>> text = '''
- ... ```hl_lines="1 3"
- ... line 1
- ... line 2
- ... line 3
- ... ```'''
- >>> print markdown.markdown(text, extensions=['codehilite', 'fenced_code'])
- line 1
- line 2
- line 3
-
-
-Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/).
-
-Project website: .*?)(?<=\n)
-(?P=fence)[ ]*$''', re.MULTILINE | re.DOTALL | re.VERBOSE)
- CODE_WRAP = '%s
'
- LANG_TAG = ' class="%s"'
-
- def __init__(self, md):
- super(FencedBlockPreprocessor, self).__init__(md)
-
- self.checked_for_codehilite = False
- self.codehilite_conf = {}
-
- def run(self, lines):
- """ Match and store Fenced Code Blocks in the HtmlStash. """
-
- # Check for code hilite extension
- if not self.checked_for_codehilite:
- for ext in self.markdown.registeredExtensions:
- if isinstance(ext, CodeHiliteExtension):
- self.codehilite_conf = ext.config
- break
-
- self.checked_for_codehilite = True
-
- text = "\n".join(lines)
- while 1:
- m = self.FENCED_BLOCK_RE.search(text)
- if m:
- lang = ''
- if m.group('lang'):
- lang = self.LANG_TAG % m.group('lang')
-
- # If config is not empty, then the codehighlite extension
- # is enabled, so we call it to highlight the code
- if self.codehilite_conf:
- highliter = CodeHilite(m.group('code'),
- linenums=self.codehilite_conf['linenums'][0],
- guess_lang=self.codehilite_conf['guess_lang'][0],
- css_class=self.codehilite_conf['css_class'][0],
- style=self.codehilite_conf['pygments_style'][0],
- lang=(m.group('lang') or None),
- noclasses=self.codehilite_conf['noclasses'][0],
- hl_lines=parse_hl_lines(m.group('hl_lines')))
-
- code = highliter.hilite()
- else:
- code = self.CODE_WRAP % (lang, self._escape(m.group('code')))
-
- placeholder = self.markdown.htmlStash.store(code, safe=True)
- text = '%s\n%s\n%s'% (text[:m.start()], placeholder, text[m.end():])
- else:
- break
- return text.split("\n")
-
- def _escape(self, txt):
- """ basic html escaping """
- txt = txt.replace('&', '&')
- txt = txt.replace('<', '<')
- txt = txt.replace('>', '>')
- txt = txt.replace('"', '"')
- return txt
-
-
-def makeExtension(configs=None):
- return FencedCodeExtension(configs=configs)
+"""
+Fenced Code Extension for Python Markdown
+=========================================
+
+This extension adds Fenced Code Blocks to Python-Markdown.
+
+ >>> import markdown
+ >>> text = '''
+ ... A paragraph before a fenced code block:
+ ...
+ ... ~~~
+ ... Fenced code block
+ ... ~~~
+ ... '''
+ >>> html = markdown.markdown(text, extensions=['fenced_code'])
+ >>> print html
+ A paragraph before a fenced code block:
+ Fenced code block
+
+
+Works with safe_mode also (we check this because we are using the HtmlStash):
+
+ >>> print markdown.markdown(text, extensions=['fenced_code'], safe_mode='replace')
+ A paragraph before a fenced code block:
+ Fenced code block
+
+
+Include tilde's in a code block and wrap with blank lines:
+
+ >>> text = '''
+ ... ~~~~~~~~
+ ...
+ ... ~~~~
+ ... ~~~~~~~~'''
+ >>> print markdown.markdown(text, extensions=['fenced_code'])
+
+ ~~~~
+
+
+Language tags:
+
+ >>> text = '''
+ ... ~~~~{.python}
+ ... # Some python code
+ ... ~~~~'''
+ >>> print markdown.markdown(text, extensions=['fenced_code'])
+ # Some python code
+
+
+Optionally backticks instead of tildes as per how github's code block markdown is identified:
+
+ >>> text = '''
+ ... `````
+ ... # Arbitrary code
+ ... ~~~~~ # these tildes will not close the block
+ ... `````'''
+ >>> print markdown.markdown(text, extensions=['fenced_code'])
+ # Arbitrary code
+ ~~~~~ # these tildes will not close the block
+
+
+If the codehighlite extension and Pygments are installed, lines can be highlighted:
+
+ >>> text = '''
+ ... ```hl_lines="1 3"
+ ... line 1
+ ... line 2
+ ... line 3
+ ... ```'''
+ >>> print markdown.markdown(text, extensions=['codehilite', 'fenced_code'])
+ line 1
+ line 2
+ line 3
+
+
+Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/).
+
+Project website:
+Contact: markdown@freewisdom.org
+
+License: BSD (see ../docs/LICENSE for details)
+
+Dependencies:
+* [Python 2.4+](http://python.org)
+* [Markdown 2.0+](http://packages.python.org/Markdown/)
+* [Pygments (optional)](http://pygments.org)
+
+"""
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+from . import Extension
+from ..preprocessors import Preprocessor
+from .codehilite import CodeHilite, CodeHiliteExtension, parse_hl_lines
+import re
+
+
+class FencedCodeExtension(Extension):
+
+ def extendMarkdown(self, md, md_globals):
+ """ Add FencedBlockPreprocessor to the Markdown instance. """
+ md.registerExtension(self)
+
+ md.preprocessors.add('fenced_code_block',
+ FencedBlockPreprocessor(md),
+ ">normalize_whitespace")
+
+
+class FencedBlockPreprocessor(Preprocessor):
+ FENCED_BLOCK_RE = re.compile(r'''
+(?P^(?:~{3,}|`{3,}))[ ]* # Opening ``` or ~~~
+(\{?\.?(?P[a-zA-Z0-9_+-]*))?[ ]* # Optional {, and lang
+# Optional highlight lines, single- or double-quote-delimited
+(hl_lines=(?P"|')(?P.*?)(?P=quot))?[ ]*
+}?[ ]*\n # Optional closing }
+(?P.*?)(?<=\n)
+(?P=fence)[ ]*$''', re.MULTILINE | re.DOTALL | re.VERBOSE)
+ CODE_WRAP = '%s
'
+ LANG_TAG = ' class="%s"'
+
+ def __init__(self, md):
+ super(FencedBlockPreprocessor, self).__init__(md)
+
+ self.checked_for_codehilite = False
+ self.codehilite_conf = {}
+
+ def run(self, lines):
+ """ Match and store Fenced Code Blocks in the HtmlStash. """
+
+ # Check for code hilite extension
+ if not self.checked_for_codehilite:
+ for ext in self.markdown.registeredExtensions:
+ if isinstance(ext, CodeHiliteExtension):
+ self.codehilite_conf = ext.config
+ break
+
+ self.checked_for_codehilite = True
+
+ text = "\n".join(lines)
+ while 1:
+ m = self.FENCED_BLOCK_RE.search(text)
+ if m:
+ lang = ''
+ if m.group('lang'):
+ lang = self.LANG_TAG % m.group('lang')
+
+ # If config is not empty, then the codehighlite extension
+ # is enabled, so we call it to highlight the code
+ if self.codehilite_conf:
+ highliter = CodeHilite(m.group('code'),
+ linenums=self.codehilite_conf['linenums'][0],
+ guess_lang=self.codehilite_conf['guess_lang'][0],
+ css_class=self.codehilite_conf['css_class'][0],
+ style=self.codehilite_conf['pygments_style'][0],
+ lang=(m.group('lang') or None),
+ noclasses=self.codehilite_conf['noclasses'][0],
+ hl_lines=parse_hl_lines(m.group('hl_lines')))
+
+ code = highliter.hilite()
+ else:
+ code = self.CODE_WRAP % (lang, self._escape(m.group('code')))
+
+ placeholder = self.markdown.htmlStash.store(code, safe=True)
+ text = '%s\n%s\n%s'% (text[:m.start()], placeholder, text[m.end():])
+ else:
+ break
+ return text.split("\n")
+
+ def _escape(self, txt):
+ """ basic html escaping """
+ txt = txt.replace('&', '&')
+ txt = txt.replace('<', '<')
+ txt = txt.replace('>', '>')
+ txt = txt.replace('"', '"')
+ return txt
+
+
+def makeExtension(configs=None):
+ return FencedCodeExtension(configs=configs)
diff --git a/awx/lib/site-packages/markdown/extensions/footnotes.py b/awx/lib/site-packages/markdown/extensions/footnotes.py
index 6d1b412a87..9f93ad1b5d 100644
--- a/awx/lib/site-packages/markdown/extensions/footnotes.py
+++ b/awx/lib/site-packages/markdown/extensions/footnotes.py
@@ -1,315 +1,315 @@
-"""
-========================= FOOTNOTES =================================
-
-This section adds footnote handling to markdown. It can be used as
-an example for extending python-markdown with relatively complex
-functionality. While in this case the extension is included inside
-the module itself, it could just as easily be added from outside the
-module. Not that all markdown classes above are ignorant about
-footnotes. All footnote functionality is provided separately and
-then added to the markdown instance at the run time.
-
-Footnote functionality is attached by calling extendMarkdown()
-method of FootnoteExtension. The method also registers the
-extension to allow it's state to be reset by a call to reset()
-method.
-
-Example:
- Footnotes[^1] have a label[^label] and a definition[^!DEF].
-
- [^1]: This is a footnote
- [^label]: A footnote on "label"
- [^!DEF]: The footnote for definition
-
-"""
-
-from __future__ import absolute_import
-from __future__ import unicode_literals
-from . import Extension
-from ..preprocessors import Preprocessor
-from ..inlinepatterns import Pattern
-from ..treeprocessors import Treeprocessor
-from ..postprocessors import Postprocessor
-from ..util import etree, text_type
-from ..odict import OrderedDict
-import re
-
-FN_BACKLINK_TEXT = "zz1337820767766393qq"
-NBSP_PLACEHOLDER = "qq3936677670287331zz"
-DEF_RE = re.compile(r'[ ]{0,3}\[\^([^\]]*)\]:\s*(.*)')
-TABBED_RE = re.compile(r'((\t)|( ))(.*)')
-
-class FootnoteExtension(Extension):
- """ Footnote Extension. """
-
- def __init__ (self, configs):
- """ Setup configs. """
- self.config = {'PLACE_MARKER':
- ["///Footnotes Go Here///",
- "The text string that marks where the footnotes go"],
- 'UNIQUE_IDS':
- [False,
- "Avoid name collisions across "
- "multiple calls to reset()."],
- "BACKLINK_TEXT":
- ["↩",
- "The text string that links from the footnote to the reader's place."]
- }
-
- for key, value in configs:
- self.config[key][0] = value
-
- # In multiple invocations, emit links that don't get tangled.
- self.unique_prefix = 0
-
- self.reset()
-
- def extendMarkdown(self, md, md_globals):
- """ Add pieces to Markdown. """
- md.registerExtension(self)
- self.parser = md.parser
- self.md = md
- # Insert a preprocessor before ReferencePreprocessor
- md.preprocessors.add("footnote", FootnotePreprocessor(self),
- "amp_substitute")
-
- def reset(self):
- """ Clear the footnotes on reset, and prepare for a distinct document. """
- self.footnotes = OrderedDict()
- self.unique_prefix += 1
-
- def findFootnotesPlaceholder(self, root):
- """ Return ElementTree Element that contains Footnote placeholder. """
- def finder(element):
- for child in element:
- if child.text:
- if child.text.find(self.getConfig("PLACE_MARKER")) > -1:
- return child, element, True
- if child.tail:
- if child.tail.find(self.getConfig("PLACE_MARKER")) > -1:
- return child, element, False
- finder(child)
- return None
-
- res = finder(root)
- return res
-
- def setFootnote(self, id, text):
- """ Store a footnote for later retrieval. """
- self.footnotes[id] = text
-
- def get_separator(self):
- if self.md.output_format in ['html5', 'xhtml5']:
- return '-'
- return ':'
-
- def makeFootnoteId(self, id):
- """ Return footnote link id. """
- if self.getConfig("UNIQUE_IDS"):
- return 'fn%s%d-%s' % (self.get_separator(), self.unique_prefix, id)
- else:
- return 'fn%s%s' % (self.get_separator(), id)
-
- def makeFootnoteRefId(self, id):
- """ Return footnote back-link id. """
- if self.getConfig("UNIQUE_IDS"):
- return 'fnref%s%d-%s' % (self.get_separator(), self.unique_prefix, id)
- else:
- return 'fnref%s%s' % (self.get_separator(), id)
-
- def makeFootnotesDiv(self, root):
- """ Return div of footnotes as et Element. """
-
- if not list(self.footnotes.keys()):
- return None
-
- div = etree.Element("div")
- div.set('class', 'footnote')
- etree.SubElement(div, "hr")
- ol = etree.SubElement(div, "ol")
-
- for id in self.footnotes.keys():
- li = etree.SubElement(ol, "li")
- li.set("id", self.makeFootnoteId(id))
- self.parser.parseChunk(li, self.footnotes[id])
- backlink = etree.Element("a")
- backlink.set("href", "#" + self.makeFootnoteRefId(id))
- if self.md.output_format not in ['html5', 'xhtml5']:
- backlink.set("rev", "footnote") # Invalid in HTML5
- backlink.set("class", "footnote-backref")
- backlink.set("title", "Jump back to footnote %d in the text" % \
- (self.footnotes.index(id)+1))
- backlink.text = FN_BACKLINK_TEXT
-
- if li.getchildren():
- node = li[-1]
- if node.tag == "p":
- node.text = node.text + NBSP_PLACEHOLDER
- node.append(backlink)
- else:
- p = etree.SubElement(li, "p")
- p.append(backlink)
- return div
-
-
-class FootnotePreprocessor(Preprocessor):
- """ Find all footnote references and store for later use. """
-
- def __init__ (self, footnotes):
- self.footnotes = footnotes
-
- def run(self, lines):
- """
- Loop through lines and find, set, and remove footnote definitions.
-
- Keywords:
-
- * lines: A list of lines of text
-
- Return: A list of lines of text with footnote definitions removed.
-
- """
- newlines = []
- i = 0
- while True:
- m = DEF_RE.match(lines[i])
- if m:
- fn, _i = self.detectTabbed(lines[i+1:])
- fn.insert(0, m.group(2))
- i += _i-1 # skip past footnote
- self.footnotes.setFootnote(m.group(1), "\n".join(fn))
- else:
- newlines.append(lines[i])
- if len(lines) > i+1:
- i += 1
- else:
- break
- return newlines
-
- def detectTabbed(self, lines):
- """ Find indented text and remove indent before further proccesing.
-
- Keyword arguments:
-
- * lines: an array of strings
-
- Returns: a list of post processed items and the index of last line.
-
- """
- items = []
- blank_line = False # have we encountered a blank line yet?
- i = 0 # to keep track of where we are
-
- def detab(line):
- match = TABBED_RE.match(line)
- if match:
- return match.group(4)
-
- for line in lines:
- if line.strip(): # Non-blank line
- detabbed_line = detab(line)
- if detabbed_line:
- items.append(detabbed_line)
- i += 1
- continue
- elif not blank_line and not DEF_RE.match(line):
- # not tabbed but still part of first par.
- items.append(line)
- i += 1
- continue
- else:
- return items, i+1
-
- else: # Blank line: _maybe_ we are done.
- blank_line = True
- i += 1 # advance
-
- # Find the next non-blank line
- for j in range(i, len(lines)):
- if lines[j].strip():
- next_line = lines[j]; break
- else:
- break # There is no more text; we are done.
-
- # Check if the next non-blank line is tabbed
- if detab(next_line): # Yes, more work to do.
- items.append("")
- continue
- else:
- break # No, we are done.
- else:
- i += 1
-
- return items, i
-
-
-class FootnotePattern(Pattern):
- """ InlinePattern for footnote markers in a document's body text. """
-
- def __init__(self, pattern, footnotes):
- super(FootnotePattern, self).__init__(pattern)
- self.footnotes = footnotes
-
- def handleMatch(self, m):
- id = m.group(2)
- if id in self.footnotes.footnotes.keys():
- sup = etree.Element("sup")
- a = etree.SubElement(sup, "a")
- sup.set('id', self.footnotes.makeFootnoteRefId(id))
- a.set('href', '#' + self.footnotes.makeFootnoteId(id))
- if self.footnotes.md.output_format not in ['html5', 'xhtml5']:
- a.set('rel', 'footnote') # invalid in HTML5
- a.set('class', 'footnote-ref')
- a.text = text_type(self.footnotes.footnotes.index(id) + 1)
- return sup
- else:
- return None
-
-
-class FootnoteTreeprocessor(Treeprocessor):
- """ Build and append footnote div to end of document. """
-
- def __init__ (self, footnotes):
- self.footnotes = footnotes
-
- def run(self, root):
- footnotesDiv = self.footnotes.makeFootnotesDiv(root)
- if footnotesDiv:
- result = self.footnotes.findFootnotesPlaceholder(root)
- if result:
- child, parent, isText = result
- ind = parent.getchildren().index(child)
- if isText:
- parent.remove(child)
- parent.insert(ind, footnotesDiv)
- else:
- parent.insert(ind + 1, footnotesDiv)
- child.tail = None
- else:
- root.append(footnotesDiv)
-
-class FootnotePostprocessor(Postprocessor):
- """ Replace placeholders with html entities. """
- def __init__(self, footnotes):
- self.footnotes = footnotes
-
- def run(self, text):
- text = text.replace(FN_BACKLINK_TEXT, self.footnotes.getConfig("BACKLINK_TEXT"))
- return text.replace(NBSP_PLACEHOLDER, " ")
-
-def makeExtension(configs=[]):
- """ Return an instance of the FootnoteExtension """
- return FootnoteExtension(configs=configs)
-
+"""
+========================= FOOTNOTES =================================
+
+This section adds footnote handling to markdown. It can be used as
+an example for extending python-markdown with relatively complex
+functionality. While in this case the extension is included inside
+the module itself, it could just as easily be added from outside the
+module. Not that all markdown classes above are ignorant about
+footnotes. All footnote functionality is provided separately and
+then added to the markdown instance at the run time.
+
+Footnote functionality is attached by calling extendMarkdown()
+method of FootnoteExtension. The method also registers the
+extension to allow it's state to be reset by a call to reset()
+method.
+
+Example:
+ Footnotes[^1] have a label[^label] and a definition[^!DEF].
+
+ [^1]: This is a footnote
+ [^label]: A footnote on "label"
+ [^!DEF]: The footnote for definition
+
+"""
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+from . import Extension
+from ..preprocessors import Preprocessor
+from ..inlinepatterns import Pattern
+from ..treeprocessors import Treeprocessor
+from ..postprocessors import Postprocessor
+from ..util import etree, text_type
+from ..odict import OrderedDict
+import re
+
+FN_BACKLINK_TEXT = "zz1337820767766393qq"
+NBSP_PLACEHOLDER = "qq3936677670287331zz"
+DEF_RE = re.compile(r'[ ]{0,3}\[\^([^\]]*)\]:\s*(.*)')
+TABBED_RE = re.compile(r'((\t)|( ))(.*)')
+
+class FootnoteExtension(Extension):
+ """ Footnote Extension. """
+
+ def __init__ (self, configs):
+ """ Setup configs. """
+ self.config = {'PLACE_MARKER':
+ ["///Footnotes Go Here///",
+ "The text string that marks where the footnotes go"],
+ 'UNIQUE_IDS':
+ [False,
+ "Avoid name collisions across "
+ "multiple calls to reset()."],
+ "BACKLINK_TEXT":
+ ["↩",
+ "The text string that links from the footnote to the reader's place."]
+ }
+
+ for key, value in configs:
+ self.config[key][0] = value
+
+ # In multiple invocations, emit links that don't get tangled.
+ self.unique_prefix = 0
+
+ self.reset()
+
+ def extendMarkdown(self, md, md_globals):
+ """ Add pieces to Markdown. """
+ md.registerExtension(self)
+ self.parser = md.parser
+ self.md = md
+ # Insert a preprocessor before ReferencePreprocessor
+ md.preprocessors.add("footnote", FootnotePreprocessor(self),
+ "amp_substitute")
+
+ def reset(self):
+ """ Clear the footnotes on reset, and prepare for a distinct document. """
+ self.footnotes = OrderedDict()
+ self.unique_prefix += 1
+
+ def findFootnotesPlaceholder(self, root):
+ """ Return ElementTree Element that contains Footnote placeholder. """
+ def finder(element):
+ for child in element:
+ if child.text:
+ if child.text.find(self.getConfig("PLACE_MARKER")) > -1:
+ return child, element, True
+ if child.tail:
+ if child.tail.find(self.getConfig("PLACE_MARKER")) > -1:
+ return child, element, False
+ finder(child)
+ return None
+
+ res = finder(root)
+ return res
+
+ def setFootnote(self, id, text):
+ """ Store a footnote for later retrieval. """
+ self.footnotes[id] = text
+
+ def get_separator(self):
+ if self.md.output_format in ['html5', 'xhtml5']:
+ return '-'
+ return ':'
+
+ def makeFootnoteId(self, id):
+ """ Return footnote link id. """
+ if self.getConfig("UNIQUE_IDS"):
+ return 'fn%s%d-%s' % (self.get_separator(), self.unique_prefix, id)
+ else:
+ return 'fn%s%s' % (self.get_separator(), id)
+
+ def makeFootnoteRefId(self, id):
+ """ Return footnote back-link id. """
+ if self.getConfig("UNIQUE_IDS"):
+ return 'fnref%s%d-%s' % (self.get_separator(), self.unique_prefix, id)
+ else:
+ return 'fnref%s%s' % (self.get_separator(), id)
+
+ def makeFootnotesDiv(self, root):
+ """ Return div of footnotes as et Element. """
+
+ if not list(self.footnotes.keys()):
+ return None
+
+ div = etree.Element("div")
+ div.set('class', 'footnote')
+ etree.SubElement(div, "hr")
+ ol = etree.SubElement(div, "ol")
+
+ for id in self.footnotes.keys():
+ li = etree.SubElement(ol, "li")
+ li.set("id", self.makeFootnoteId(id))
+ self.parser.parseChunk(li, self.footnotes[id])
+ backlink = etree.Element("a")
+ backlink.set("href", "#" + self.makeFootnoteRefId(id))
+ if self.md.output_format not in ['html5', 'xhtml5']:
+ backlink.set("rev", "footnote") # Invalid in HTML5
+ backlink.set("class", "footnote-backref")
+ backlink.set("title", "Jump back to footnote %d in the text" % \
+ (self.footnotes.index(id)+1))
+ backlink.text = FN_BACKLINK_TEXT
+
+ if li.getchildren():
+ node = li[-1]
+ if node.tag == "p":
+ node.text = node.text + NBSP_PLACEHOLDER
+ node.append(backlink)
+ else:
+ p = etree.SubElement(li, "p")
+ p.append(backlink)
+ return div
+
+
+class FootnotePreprocessor(Preprocessor):
+ """ Find all footnote references and store for later use. """
+
+ def __init__ (self, footnotes):
+ self.footnotes = footnotes
+
+ def run(self, lines):
+ """
+ Loop through lines and find, set, and remove footnote definitions.
+
+ Keywords:
+
+ * lines: A list of lines of text
+
+ Return: A list of lines of text with footnote definitions removed.
+
+ """
+ newlines = []
+ i = 0
+ while True:
+ m = DEF_RE.match(lines[i])
+ if m:
+ fn, _i = self.detectTabbed(lines[i+1:])
+ fn.insert(0, m.group(2))
+ i += _i-1 # skip past footnote
+ self.footnotes.setFootnote(m.group(1), "\n".join(fn))
+ else:
+ newlines.append(lines[i])
+ if len(lines) > i+1:
+ i += 1
+ else:
+ break
+ return newlines
+
+ def detectTabbed(self, lines):
+ """ Find indented text and remove indent before further proccesing.
+
+ Keyword arguments:
+
+ * lines: an array of strings
+
+ Returns: a list of post processed items and the index of last line.
+
+ """
+ items = []
+ blank_line = False # have we encountered a blank line yet?
+ i = 0 # to keep track of where we are
+
+ def detab(line):
+ match = TABBED_RE.match(line)
+ if match:
+ return match.group(4)
+
+ for line in lines:
+ if line.strip(): # Non-blank line
+ detabbed_line = detab(line)
+ if detabbed_line:
+ items.append(detabbed_line)
+ i += 1
+ continue
+ elif not blank_line and not DEF_RE.match(line):
+ # not tabbed but still part of first par.
+ items.append(line)
+ i += 1
+ continue
+ else:
+ return items, i+1
+
+ else: # Blank line: _maybe_ we are done.
+ blank_line = True
+ i += 1 # advance
+
+ # Find the next non-blank line
+ for j in range(i, len(lines)):
+ if lines[j].strip():
+ next_line = lines[j]; break
+ else:
+ break # There is no more text; we are done.
+
+ # Check if the next non-blank line is tabbed
+ if detab(next_line): # Yes, more work to do.
+ items.append("")
+ continue
+ else:
+ break # No, we are done.
+ else:
+ i += 1
+
+ return items, i
+
+
+class FootnotePattern(Pattern):
+ """ InlinePattern for footnote markers in a document's body text. """
+
+ def __init__(self, pattern, footnotes):
+ super(FootnotePattern, self).__init__(pattern)
+ self.footnotes = footnotes
+
+ def handleMatch(self, m):
+ id = m.group(2)
+ if id in self.footnotes.footnotes.keys():
+ sup = etree.Element("sup")
+ a = etree.SubElement(sup, "a")
+ sup.set('id', self.footnotes.makeFootnoteRefId(id))
+ a.set('href', '#' + self.footnotes.makeFootnoteId(id))
+ if self.footnotes.md.output_format not in ['html5', 'xhtml5']:
+ a.set('rel', 'footnote') # invalid in HTML5
+ a.set('class', 'footnote-ref')
+ a.text = text_type(self.footnotes.footnotes.index(id) + 1)
+ return sup
+ else:
+ return None
+
+
+class FootnoteTreeprocessor(Treeprocessor):
+ """ Build and append footnote div to end of document. """
+
+ def __init__ (self, footnotes):
+ self.footnotes = footnotes
+
+ def run(self, root):
+ footnotesDiv = self.footnotes.makeFootnotesDiv(root)
+ if footnotesDiv:
+ result = self.footnotes.findFootnotesPlaceholder(root)
+ if result:
+ child, parent, isText = result
+ ind = parent.getchildren().index(child)
+ if isText:
+ parent.remove(child)
+ parent.insert(ind, footnotesDiv)
+ else:
+ parent.insert(ind + 1, footnotesDiv)
+ child.tail = None
+ else:
+ root.append(footnotesDiv)
+
+class FootnotePostprocessor(Postprocessor):
+ """ Replace placeholders with html entities. """
+ def __init__(self, footnotes):
+ self.footnotes = footnotes
+
+ def run(self, text):
+ text = text.replace(FN_BACKLINK_TEXT, self.footnotes.getConfig("BACKLINK_TEXT"))
+ return text.replace(NBSP_PLACEHOLDER, " ")
+
+def makeExtension(configs=[]):
+ """ Return an instance of the FootnoteExtension """
+ return FootnoteExtension(configs=configs)
+
diff --git a/awx/lib/site-packages/markdown/extensions/headerid.py b/awx/lib/site-packages/markdown/extensions/headerid.py
index ac55bc6426..8221fe1d52 100644
--- a/awx/lib/site-packages/markdown/extensions/headerid.py
+++ b/awx/lib/site-packages/markdown/extensions/headerid.py
@@ -1,208 +1,208 @@
-"""
-HeaderID Extension for Python-Markdown
-======================================
-
-Auto-generate id attributes for HTML headers.
-
-Basic usage:
-
- >>> import markdown
- >>> text = "# Some Header #"
- >>> md = markdown.markdown(text, ['headerid'])
- >>> print md
- Some Header
-
-All header IDs are unique:
-
- >>> text = '''
- ... #Header
- ... #Header
- ... #Header'''
- >>> md = markdown.markdown(text, ['headerid'])
- >>> print md
- Header
- Header
- Header
-
-To fit within a html template's hierarchy, set the header base level:
-
- >>> text = '''
- ... #Some Header
- ... ## Next Level'''
- >>> md = markdown.markdown(text, ['headerid(level=3)'])
- >>> print md
- Some Header
- Next Level
-
-Works with inline markup.
-
- >>> text = '#Some *Header* with [markup](http://example.com).'
- >>> md = markdown.markdown(text, ['headerid'])
- >>> print md
- Some Header with markup.
-
-Turn off auto generated IDs:
-
- >>> text = '''
- ... # Some Header
- ... # Another Header'''
- >>> md = markdown.markdown(text, ['headerid(forceid=False)'])
- >>> print md
- Some Header
- Another Header
-
-Use with MetaData extension:
-
- >>> text = '''header_level: 2
- ... header_forceid: Off
- ...
- ... # A Header'''
- >>> md = markdown.markdown(text, ['headerid', 'meta'])
- >>> print md
- A Header
-
-Copyright 2007-2011 [Waylan Limberg](http://achinghead.com/).
-
-Project website:
-Contact: markdown@freewisdom.org
-
-License: BSD (see ../docs/LICENSE for details)
-
-Dependencies:
-* [Python 2.3+](http://python.org)
-* [Markdown 2.0+](http://packages.python.org/Markdown/)
-
-"""
-
-from __future__ import absolute_import
-from __future__ import unicode_literals
-from . import Extension
-from ..treeprocessors import Treeprocessor
-from ..util import HTML_PLACEHOLDER_RE, parseBoolValue
-import re
-import logging
-import unicodedata
-
-logger = logging.getLogger('MARKDOWN')
-
-IDCOUNT_RE = re.compile(r'^(.*)_([0-9]+)$')
-
-
-def slugify(value, separator):
- """ Slugify a string, to make it URL friendly. """
- value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
- value = re.sub('[^\w\s-]', '', value.decode('ascii')).strip().lower()
- return re.sub('[%s\s]+' % separator, separator, value)
-
-
-def unique(id, ids):
- """ Ensure id is unique in set of ids. Append '_1', '_2'... if not """
- while id in ids or not id:
- m = IDCOUNT_RE.match(id)
- if m:
- id = '%s_%d'% (m.group(1), int(m.group(2))+1)
- else:
- id = '%s_%d'% (id, 1)
- ids.add(id)
- return id
-
-
-def itertext(elem):
- """ Loop through all children and return text only.
-
- Reimplements method of same name added to ElementTree in Python 2.7
-
- """
- if elem.text:
- yield elem.text
- for e in elem:
- for s in itertext(e):
- yield s
- if e.tail:
- yield e.tail
-
-
-def stashedHTML2text(text, md):
- """ Extract raw HTML, reduce to plain text and swap with placeholder. """
- def _html_sub(m):
- """ Substitute raw html with plain text. """
- try:
- raw, safe = md.htmlStash.rawHtmlBlocks[int(m.group(1))]
- except (IndexError, TypeError):
- return m.group(0)
- if md.safeMode and not safe:
- return ''
- # Strip out tags and entities - leaveing text
- return re.sub(r'(<[^>]+>)|(&[\#a-zA-Z0-9]+;)', '', raw)
-
- return HTML_PLACEHOLDER_RE.sub(_html_sub, text)
-
-
-class HeaderIdTreeprocessor(Treeprocessor):
- """ Assign IDs to headers. """
-
- IDs = set()
-
- def run(self, doc):
- start_level, force_id = self._get_meta()
- slugify = self.config['slugify']
- sep = self.config['separator']
- for elem in doc.getiterator():
- if elem.tag in ['h1', 'h2', 'h3', 'h4', 'h5', 'h6']:
- if force_id:
- if "id" in elem.attrib:
- id = elem.get('id')
- else:
- id = stashedHTML2text(''.join(itertext(elem)), self.md)
- id = slugify(id, sep)
- elem.set('id', unique(id, self.IDs))
- if start_level:
- level = int(elem.tag[-1]) + start_level
- if level > 6:
- level = 6
- elem.tag = 'h%d' % level
-
-
- def _get_meta(self):
- """ Return meta data suported by this ext as a tuple """
- level = int(self.config['level']) - 1
- force = parseBoolValue(self.config['forceid'])
- if hasattr(self.md, 'Meta'):
- if 'header_level' in self.md.Meta:
- level = int(self.md.Meta['header_level'][0]) - 1
- if 'header_forceid' in self.md.Meta:
- force = parseBoolValue(self.md.Meta['header_forceid'][0])
- return level, force
-
-
-class HeaderIdExtension(Extension):
- def __init__(self, configs):
- # set defaults
- self.config = {
- 'level' : ['1', 'Base level for headers.'],
- 'forceid' : ['True', 'Force all headers to have an id.'],
- 'separator' : ['-', 'Word separator.'],
- 'slugify' : [slugify, 'Callable to generate anchors'],
- }
-
- for key, value in configs:
- self.setConfig(key, value)
-
- def extendMarkdown(self, md, md_globals):
- md.registerExtension(self)
- self.processor = HeaderIdTreeprocessor()
- self.processor.md = md
- self.processor.config = self.getConfigs()
- if 'attr_list' in md.treeprocessors.keys():
- # insert after attr_list treeprocessor
- md.treeprocessors.add('headerid', self.processor, '>attr_list')
- else:
- # insert after 'prettify' treeprocessor.
- md.treeprocessors.add('headerid', self.processor, '>prettify')
-
- def reset(self):
- self.processor.IDs = set()
-
-
-def makeExtension(configs=None):
- return HeaderIdExtension(configs=configs)
+"""
+HeaderID Extension for Python-Markdown
+======================================
+
+Auto-generate id attributes for HTML headers.
+
+Basic usage:
+
+ >>> import markdown
+ >>> text = "# Some Header #"
+ >>> md = markdown.markdown(text, ['headerid'])
+ >>> print md
+ Some Header
+
+All header IDs are unique:
+
+ >>> text = '''
+ ... #Header
+ ... #Header
+ ... #Header'''
+ >>> md = markdown.markdown(text, ['headerid'])
+ >>> print md
+ Header
+ Header
+ Header
+
+To fit within a html template's hierarchy, set the header base level:
+
+ >>> text = '''
+ ... #Some Header
+ ... ## Next Level'''
+ >>> md = markdown.markdown(text, ['headerid(level=3)'])
+ >>> print md
+ Some Header
+ Next Level
+
+Works with inline markup.
+
+ >>> text = '#Some *Header* with [markup](http://example.com).'
+ >>> md = markdown.markdown(text, ['headerid'])
+ >>> print md
+ Some Header with markup.
+
+Turn off auto generated IDs:
+
+ >>> text = '''
+ ... # Some Header
+ ... # Another Header'''
+ >>> md = markdown.markdown(text, ['headerid(forceid=False)'])
+ >>> print md
+ Some Header
+ Another Header
+
+Use with MetaData extension:
+
+ >>> text = '''header_level: 2
+ ... header_forceid: Off
+ ...
+ ... # A Header'''
+ >>> md = markdown.markdown(text, ['headerid', 'meta'])
+ >>> print md
+ A Header
+
+Copyright 2007-2011 [Waylan Limberg](http://achinghead.com/).
+
+Project website:
+Contact: markdown@freewisdom.org
+
+License: BSD (see ../docs/LICENSE for details)
+
+Dependencies:
+* [Python 2.3+](http://python.org)
+* [Markdown 2.0+](http://packages.python.org/Markdown/)
+
+"""
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+from . import Extension
+from ..treeprocessors import Treeprocessor
+from ..util import HTML_PLACEHOLDER_RE, parseBoolValue
+import re
+import logging
+import unicodedata
+
+logger = logging.getLogger('MARKDOWN')
+
+IDCOUNT_RE = re.compile(r'^(.*)_([0-9]+)$')
+
+
+def slugify(value, separator):
+ """ Slugify a string, to make it URL friendly. """
+ value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
+ value = re.sub('[^\w\s-]', '', value.decode('ascii')).strip().lower()
+ return re.sub('[%s\s]+' % separator, separator, value)
+
+
+def unique(id, ids):
+ """ Ensure id is unique in set of ids. Append '_1', '_2'... if not """
+ while id in ids or not id:
+ m = IDCOUNT_RE.match(id)
+ if m:
+ id = '%s_%d'% (m.group(1), int(m.group(2))+1)
+ else:
+ id = '%s_%d'% (id, 1)
+ ids.add(id)
+ return id
+
+
+def itertext(elem):
+ """ Loop through all children and return text only.
+
+ Reimplements method of same name added to ElementTree in Python 2.7
+
+ """
+ if elem.text:
+ yield elem.text
+ for e in elem:
+ for s in itertext(e):
+ yield s
+ if e.tail:
+ yield e.tail
+
+
+def stashedHTML2text(text, md):
+ """ Extract raw HTML, reduce to plain text and swap with placeholder. """
+ def _html_sub(m):
+ """ Substitute raw html with plain text. """
+ try:
+ raw, safe = md.htmlStash.rawHtmlBlocks[int(m.group(1))]
+ except (IndexError, TypeError):
+ return m.group(0)
+ if md.safeMode and not safe:
+ return ''
+ # Strip out tags and entities - leaveing text
+ return re.sub(r'(<[^>]+>)|(&[\#a-zA-Z0-9]+;)', '', raw)
+
+ return HTML_PLACEHOLDER_RE.sub(_html_sub, text)
+
+
+class HeaderIdTreeprocessor(Treeprocessor):
+ """ Assign IDs to headers. """
+
+ IDs = set()
+
+ def run(self, doc):
+ start_level, force_id = self._get_meta()
+ slugify = self.config['slugify']
+ sep = self.config['separator']
+ for elem in doc.getiterator():
+ if elem.tag in ['h1', 'h2', 'h3', 'h4', 'h5', 'h6']:
+ if force_id:
+ if "id" in elem.attrib:
+ id = elem.get('id')
+ else:
+ id = stashedHTML2text(''.join(itertext(elem)), self.md)
+ id = slugify(id, sep)
+ elem.set('id', unique(id, self.IDs))
+ if start_level:
+ level = int(elem.tag[-1]) + start_level
+ if level > 6:
+ level = 6
+ elem.tag = 'h%d' % level
+
+
+ def _get_meta(self):
+ """ Return meta data suported by this ext as a tuple """
+ level = int(self.config['level']) - 1
+ force = parseBoolValue(self.config['forceid'])
+ if hasattr(self.md, 'Meta'):
+ if 'header_level' in self.md.Meta:
+ level = int(self.md.Meta['header_level'][0]) - 1
+ if 'header_forceid' in self.md.Meta:
+ force = parseBoolValue(self.md.Meta['header_forceid'][0])
+ return level, force
+
+
+class HeaderIdExtension(Extension):
+ def __init__(self, configs):
+ # set defaults
+ self.config = {
+ 'level' : ['1', 'Base level for headers.'],
+ 'forceid' : ['True', 'Force all headers to have an id.'],
+ 'separator' : ['-', 'Word separator.'],
+ 'slugify' : [slugify, 'Callable to generate anchors'],
+ }
+
+ for key, value in configs:
+ self.setConfig(key, value)
+
+ def extendMarkdown(self, md, md_globals):
+ md.registerExtension(self)
+ self.processor = HeaderIdTreeprocessor()
+ self.processor.md = md
+ self.processor.config = self.getConfigs()
+ if 'attr_list' in md.treeprocessors.keys():
+ # insert after attr_list treeprocessor
+ md.treeprocessors.add('headerid', self.processor, '>attr_list')
+ else:
+ # insert after 'prettify' treeprocessor.
+ md.treeprocessors.add('headerid', self.processor, '>prettify')
+
+ def reset(self):
+ self.processor.IDs = set()
+
+
+def makeExtension(configs=None):
+ return HeaderIdExtension(configs=configs)
diff --git a/awx/lib/site-packages/markdown/extensions/meta.py b/awx/lib/site-packages/markdown/extensions/meta.py
index 093eff1918..c4a4b210f9 100644
--- a/awx/lib/site-packages/markdown/extensions/meta.py
+++ b/awx/lib/site-packages/markdown/extensions/meta.py
@@ -1,93 +1,93 @@
-"""
-Meta Data Extension for Python-Markdown
-=======================================
-
-This extension adds Meta Data handling to markdown.
-
-Basic Usage:
-
- >>> import markdown
- >>> text = '''Title: A Test Doc.
- ... Author: Waylan Limberg
- ... John Doe
- ... Blank_Data:
- ...
- ... The body. This is paragraph one.
- ... '''
- >>> md = markdown.Markdown(['meta'])
- >>> print md.convert(text)
- The body. This is paragraph one.
- >>> print md.Meta
- {u'blank_data': [u''], u'author': [u'Waylan Limberg', u'John Doe'], u'title': [u'A Test Doc.']}
-
-Make sure text without Meta Data still works (markdown < 1.6b returns a ).
-
- >>> text = ' Some Code - not extra lines of meta data.'
- >>> md = markdown.Markdown(['meta'])
- >>> print md.convert(text)
-
Some Code - not extra lines of meta data.
-
- >>> md.Meta
- {}
-
-Copyright 2007-2008 [Waylan Limberg](http://achinghead.com).
-
-Project website:
-Contact: markdown@freewisdom.org
-
-License: BSD (see ../LICENSE.md for details)
-
-"""
-
-from __future__ import absolute_import
-from __future__ import unicode_literals
-from . import Extension
-from ..preprocessors import Preprocessor
-import re
-
-# Global Vars
-META_RE = re.compile(r'^[ ]{0,3}(?P[A-Za-z0-9_-]+):\s*(?P.*)')
-META_MORE_RE = re.compile(r'^[ ]{4,}(?P.*)')
-
-class MetaExtension (Extension):
- """ Meta-Data extension for Python-Markdown. """
-
- def extendMarkdown(self, md, md_globals):
- """ Add MetaPreprocessor to Markdown instance. """
-
- md.preprocessors.add("meta", MetaPreprocessor(md), "_begin")
-
-
-class MetaPreprocessor(Preprocessor):
- """ Get Meta-Data. """
-
- def run(self, lines):
- """ Parse Meta-Data and store in Markdown.Meta. """
- meta = {}
- key = None
- while lines:
- line = lines.pop(0)
- if line.strip() == '':
- break # blank line - done
- m1 = META_RE.match(line)
- if m1:
- key = m1.group('key').lower().strip()
- value = m1.group('value').strip()
- try:
- meta[key].append(value)
- except KeyError:
- meta[key] = [value]
- else:
- m2 = META_MORE_RE.match(line)
- if m2 and key:
- # Add another line to existing key
- meta[key].append(m2.group('value').strip())
- else:
- lines.insert(0, line)
- break # no meta data - done
- self.markdown.Meta = meta
- return lines
-
-
-def makeExtension(configs={}):
- return MetaExtension(configs=configs)
+"""
+Meta Data Extension for Python-Markdown
+=======================================
+
+This extension adds Meta Data handling to markdown.
+
+Basic Usage:
+
+ >>> import markdown
+ >>> text = '''Title: A Test Doc.
+ ... Author: Waylan Limberg
+ ... John Doe
+ ... Blank_Data:
+ ...
+ ... The body. This is paragraph one.
+ ... '''
+ >>> md = markdown.Markdown(['meta'])
+ >>> print md.convert(text)
+ The body. This is paragraph one.
+ >>> print md.Meta
+ {u'blank_data': [u''], u'author': [u'Waylan Limberg', u'John Doe'], u'title': [u'A Test Doc.']}
+
+Make sure text without Meta Data still works (markdown < 1.6b returns a ).
+
+ >>> text = ' Some Code - not extra lines of meta data.'
+ >>> md = markdown.Markdown(['meta'])
+ >>> print md.convert(text)
+
Some Code - not extra lines of meta data.
+
+ >>> md.Meta
+ {}
+
+Copyright 2007-2008 [Waylan Limberg](http://achinghead.com).
+
+Project website:
+Contact: markdown@freewisdom.org
+
+License: BSD (see ../LICENSE.md for details)
+
+"""
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+from . import Extension
+from ..preprocessors import Preprocessor
+import re
+
+# Global Vars
+META_RE = re.compile(r'^[ ]{0,3}(?P[A-Za-z0-9_-]+):\s*(?P.*)')
+META_MORE_RE = re.compile(r'^[ ]{4,}(?P.*)')
+
+class MetaExtension (Extension):
+ """ Meta-Data extension for Python-Markdown. """
+
+ def extendMarkdown(self, md, md_globals):
+ """ Add MetaPreprocessor to Markdown instance. """
+
+ md.preprocessors.add("meta", MetaPreprocessor(md), "_begin")
+
+
+class MetaPreprocessor(Preprocessor):
+ """ Get Meta-Data. """
+
+ def run(self, lines):
+ """ Parse Meta-Data and store in Markdown.Meta. """
+ meta = {}
+ key = None
+ while lines:
+ line = lines.pop(0)
+ if line.strip() == '':
+ break # blank line - done
+ m1 = META_RE.match(line)
+ if m1:
+ key = m1.group('key').lower().strip()
+ value = m1.group('value').strip()
+ try:
+ meta[key].append(value)
+ except KeyError:
+ meta[key] = [value]
+ else:
+ m2 = META_MORE_RE.match(line)
+ if m2 and key:
+ # Add another line to existing key
+ meta[key].append(m2.group('value').strip())
+ else:
+ lines.insert(0, line)
+ break # no meta data - done
+ self.markdown.Meta = meta
+ return lines
+
+
+def makeExtension(configs={}):
+ return MetaExtension(configs=configs)
diff --git a/awx/lib/site-packages/markdown/extensions/nl2br.py b/awx/lib/site-packages/markdown/extensions/nl2br.py
index 02945c72be..da4b339958 100644
--- a/awx/lib/site-packages/markdown/extensions/nl2br.py
+++ b/awx/lib/site-packages/markdown/extensions/nl2br.py
@@ -1,38 +1,38 @@
-"""
-NL2BR Extension
-===============
-
-A Python-Markdown extension to treat newlines as hard breaks; like
-GitHub-flavored Markdown does.
-
-Usage:
-
- >>> import markdown
- >>> print markdown.markdown('line 1\\nline 2', extensions=['nl2br'])
- line 1
- line 2
-
-Copyright 2011 [Brian Neal](http://deathofagremmie.com/)
-
-Dependencies:
-* [Python 2.4+](http://python.org)
-* [Markdown 2.1+](http://packages.python.org/Markdown/)
-
-"""
-
-from __future__ import absolute_import
-from __future__ import unicode_literals
-from . import Extension
-from ..inlinepatterns import SubstituteTagPattern
-
-BR_RE = r'\n'
-
-class Nl2BrExtension(Extension):
-
- def extendMarkdown(self, md, md_globals):
- br_tag = SubstituteTagPattern(BR_RE, 'br')
- md.inlinePatterns.add('nl', br_tag, '_end')
-
-
-def makeExtension(configs=None):
- return Nl2BrExtension(configs)
+"""
+NL2BR Extension
+===============
+
+A Python-Markdown extension to treat newlines as hard breaks; like
+GitHub-flavored Markdown does.
+
+Usage:
+
+ >>> import markdown
+ >>> print markdown.markdown('line 1\\nline 2', extensions=['nl2br'])
+ line 1
+ line 2
+
+Copyright 2011 [Brian Neal](http://deathofagremmie.com/)
+
+Dependencies:
+* [Python 2.4+](http://python.org)
+* [Markdown 2.1+](http://packages.python.org/Markdown/)
+
+"""
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+from . import Extension
+from ..inlinepatterns import SubstituteTagPattern
+
+BR_RE = r'\n'
+
+class Nl2BrExtension(Extension):
+
+ def extendMarkdown(self, md, md_globals):
+ br_tag = SubstituteTagPattern(BR_RE, 'br')
+ md.inlinePatterns.add('nl', br_tag, '_end')
+
+
+def makeExtension(configs=None):
+ return Nl2BrExtension(configs)
diff --git a/awx/lib/site-packages/markdown/extensions/sane_lists.py b/awx/lib/site-packages/markdown/extensions/sane_lists.py
index 05b3ed0231..fda663828e 100644
--- a/awx/lib/site-packages/markdown/extensions/sane_lists.py
+++ b/awx/lib/site-packages/markdown/extensions/sane_lists.py
@@ -1,51 +1,51 @@
-"""
-Sane List Extension for Python-Markdown
-=======================================
-
-Modify the behavior of Lists in Python-Markdown t act in a sane manor.
-
-In standard Markdown syntax, the following would constitute a single
-ordered list. However, with this extension, the output would include
-two lists, the first an ordered list and the second and unordered list.
-
- 1. ordered
- 2. list
-
- * unordered
- * list
-
-Copyright 2011 - [Waylan Limberg](http://achinghead.com)
-
-"""
-
-from __future__ import absolute_import
-from __future__ import unicode_literals
-from . import Extension
-from ..blockprocessors import OListProcessor, UListProcessor
-import re
-
-
-class SaneOListProcessor(OListProcessor):
-
- CHILD_RE = re.compile(r'^[ ]{0,3}((\d+\.))[ ]+(.*)')
- SIBLING_TAGS = ['ol']
-
-
-class SaneUListProcessor(UListProcessor):
-
- CHILD_RE = re.compile(r'^[ ]{0,3}(([*+-]))[ ]+(.*)')
- SIBLING_TAGS = ['ul']
-
-
-class SaneListExtension(Extension):
- """ Add sane lists to Markdown. """
-
- def extendMarkdown(self, md, md_globals):
- """ Override existing Processors. """
- md.parser.blockprocessors['olist'] = SaneOListProcessor(md.parser)
- md.parser.blockprocessors['ulist'] = SaneUListProcessor(md.parser)
-
-
-def makeExtension(configs={}):
- return SaneListExtension(configs=configs)
-
+"""
+Sane List Extension for Python-Markdown
+=======================================
+
+Modify the behavior of Lists in Python-Markdown t act in a sane manor.
+
+In standard Markdown syntax, the following would constitute a single
+ordered list. However, with this extension, the output would include
+two lists, the first an ordered list and the second and unordered list.
+
+ 1. ordered
+ 2. list
+
+ * unordered
+ * list
+
+Copyright 2011 - [Waylan Limberg](http://achinghead.com)
+
+"""
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+from . import Extension
+from ..blockprocessors import OListProcessor, UListProcessor
+import re
+
+
+class SaneOListProcessor(OListProcessor):
+
+ CHILD_RE = re.compile(r'^[ ]{0,3}((\d+\.))[ ]+(.*)')
+ SIBLING_TAGS = ['ol']
+
+
+class SaneUListProcessor(UListProcessor):
+
+ CHILD_RE = re.compile(r'^[ ]{0,3}(([*+-]))[ ]+(.*)')
+ SIBLING_TAGS = ['ul']
+
+
+class SaneListExtension(Extension):
+ """ Add sane lists to Markdown. """
+
+ def extendMarkdown(self, md, md_globals):
+ """ Override existing Processors. """
+ md.parser.blockprocessors['olist'] = SaneOListProcessor(md.parser)
+ md.parser.blockprocessors['ulist'] = SaneUListProcessor(md.parser)
+
+
+def makeExtension(configs={}):
+ return SaneListExtension(configs=configs)
+
diff --git a/awx/lib/site-packages/markdown/extensions/smart_strong.py b/awx/lib/site-packages/markdown/extensions/smart_strong.py
index a5c59c2da7..4818cf9ea8 100644
--- a/awx/lib/site-packages/markdown/extensions/smart_strong.py
+++ b/awx/lib/site-packages/markdown/extensions/smart_strong.py
@@ -1,42 +1,42 @@
-'''
-Smart_Strong Extension for Python-Markdown
-==========================================
-
-This extention adds smarter handling of double underscores within words.
-
-Simple Usage:
-
- >>> import markdown
- >>> print markdown.markdown('Text with double__underscore__words.',
- ... extensions=['smart_strong'])
- Text with double__underscore__words.
- >>> print markdown.markdown('__Strong__ still works.',
- ... extensions=['smart_strong'])
- Strong still works.
- >>> print markdown.markdown('__this__works__too__.',
- ... extensions=['smart_strong'])
- this__works__too.
-
-Copyright 2011
-[Waylan Limberg](http://achinghead.com)
-
-'''
-
-from __future__ import absolute_import
-from __future__ import unicode_literals
-from . import Extension
-from ..inlinepatterns import SimpleTagPattern
-
-SMART_STRONG_RE = r'(?emphasis2')
-
-def makeExtension(configs={}):
- return SmartEmphasisExtension(configs=dict(configs))
+'''
+Smart_Strong Extension for Python-Markdown
+==========================================
+
+This extention adds smarter handling of double underscores within words.
+
+Simple Usage:
+
+ >>> import markdown
+ >>> print markdown.markdown('Text with double__underscore__words.',
+ ... extensions=['smart_strong'])
+ Text with double__underscore__words.
+ >>> print markdown.markdown('__Strong__ still works.',
+ ... extensions=['smart_strong'])
+ Strong still works.
+ >>> print markdown.markdown('__this__works__too__.',
+ ... extensions=['smart_strong'])
+ this__works__too.
+
+Copyright 2011
+[Waylan Limberg](http://achinghead.com)
+
+'''
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+from . import Extension
+from ..inlinepatterns import SimpleTagPattern
+
+SMART_STRONG_RE = r'(?emphasis2')
+
+def makeExtension(configs={}):
+ return SmartEmphasisExtension(configs=dict(configs))
diff --git a/awx/lib/site-packages/markdown/extensions/smarty.py b/awx/lib/site-packages/markdown/extensions/smarty.py
index 9007cc1a28..2f946f8294 100644
--- a/awx/lib/site-packages/markdown/extensions/smarty.py
+++ b/awx/lib/site-packages/markdown/extensions/smarty.py
@@ -1,191 +1,191 @@
-# -*- coding: utf-8 -*-
-# Smarty extension for Python-Markdown
-# Author: 2013, Dmitry Shachnev
-
-# SmartyPants license:
-#
-# Copyright (c) 2003 John Gruber
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-#
-# * Neither the name "SmartyPants" nor the names of its contributors
-# may be used to endorse or promote products derived from this
-# software without specific prior written permission.
-#
-# This software is provided by the copyright holders and contributors "as
-# is" and any express or implied warranties, including, but not limited
-# to, the implied warranties of merchantability and fitness for a
-# particular purpose are disclaimed. In no event shall the copyright
-# owner or contributors be liable for any direct, indirect, incidental,
-# special, exemplary, or consequential damages (including, but not
-# limited to, procurement of substitute goods or services; loss of use,
-# data, or profits; or business interruption) however caused and on any
-# theory of liability, whether in contract, strict liability, or tort
-# (including negligence or otherwise) arising in any way out of the use
-# of this software, even if advised of the possibility of such damage.
-#
-#
-# smartypants.py license:
-#
-# smartypants.py is a derivative work of SmartyPants.
-# Copyright (c) 2004, 2007 Chad Miller
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-#
-# This software is provided by the copyright holders and contributors "as
-# is" and any express or implied warranties, including, but not limited
-# to, the implied warranties of merchantability and fitness for a
-# particular purpose are disclaimed. In no event shall the copyright
-# owner or contributors be liable for any direct, indirect, incidental,
-# special, exemplary, or consequential damages (including, but not
-# limited to, procurement of substitute goods or services; loss of use,
-# data, or profits; or business interruption) however caused and on any
-# theory of liability, whether in contract, strict liability, or tort
-# (including negligence or otherwise) arising in any way out of the use
-# of this software, even if advised of the possibility of such damage.
-
-from __future__ import unicode_literals
-from . import Extension
-from ..inlinepatterns import HtmlPattern
-from ..util import parseBoolValue
-
-# Constants for quote education.
-punctClass = r"""[!"#\$\%'()*+,-.\/:;<=>?\@\[\\\]\^_`{|}~]"""
-endOfWordClass = r"[\s.,;:!?)]"
-closeClass = r"[^\ \t\r\n\[\{\(\-\u0002\u0003]"
-
-openingQuotesBase = (
- '(\s' # a whitespace char
- '| ' # or a non-breaking space entity
- '|--' # or dashes
- '|–|—' # or unicode
- '|&[mn]dash;' # or named dash entities
- '|–|—' # or decimal entities
- ')'
-)
-
-# Special case if the very first character is a quote
-# followed by punctuation at a non-word-break. Close the quotes by brute force:
-singleQuoteStartRe = r"^'(?=%s\\B)" % punctClass
-doubleQuoteStartRe = r'^"(?=%s\\B)' % punctClass
-
-# Special case for double sets of quotes, e.g.:
-# He said, "'Quoted' words in a larger quote."
-doubleQuoteSetsRe = r""""'(?=\w)"""
-singleQuoteSetsRe = r"""'"(?=\w)"""
-
-# Get most opening double quotes:
-openingDoubleQuotesRegex = r'%s"(?=\w)' % openingQuotesBase
-
-# Double closing quotes:
-closingDoubleQuotesRegex = r'"(?=\s)'
-closingDoubleQuotesRegex2 = '(?<=%s)"' % closeClass
-
-# Get most opening single quotes:
-openingSingleQuotesRegex = r"%s'(?=\w)" % openingQuotesBase
-
-# Single closing quotes:
-closingSingleQuotesRegex = r"(?<=%s)'(?!\s|s\b|\d)" % closeClass
-closingSingleQuotesRegex2 = r"(?<=%s)'(\s|s\b)" % closeClass
-
-# All remaining quotes should be opening ones
-remainingSingleQuotesRegex = "'"
-remainingDoubleQuotesRegex = '"'
-
-lsquo, rsquo, ldquo, rdquo = '‘', '’', '“', '”'
-
-class SubstituteTextPattern(HtmlPattern):
- def __init__(self, pattern, replace, markdown_instance):
- """ Replaces matches with some text. """
- HtmlPattern.__init__(self, pattern)
- self.replace = replace
- self.markdown = markdown_instance
-
- def handleMatch(self, m):
- result = ''
- for part in self.replace:
- if isinstance(part, int):
- result += m.group(part)
- else:
- result += self.markdown.htmlStash.store(part, safe=True)
- return result
-
-class SmartyExtension(Extension):
- def __init__(self, configs):
- self.config = {
- 'smart_quotes': [True, 'Educate quotes'],
- 'smart_dashes': [True, 'Educate dashes'],
- 'smart_ellipses': [True, 'Educate ellipses']
- }
- for key, value in configs:
- self.setConfig(key, parseBoolValue(value))
-
- def _addPatterns(self, md, patterns, serie):
- for ind, pattern in enumerate(patterns):
- pattern += (md,)
- pattern = SubstituteTextPattern(*pattern)
- after = ('>smarty-%s-%d' % (serie, ind - 1) if ind else '>entity')
- name = 'smarty-%s-%d' % (serie, ind)
- md.inlinePatterns.add(name, pattern, after)
-
- def educateDashes(self, md):
- emDashesPattern = SubstituteTextPattern(r'(?entity')
- md.inlinePatterns.add('smarty-en-dashes', enDashesPattern,
- '>smarty-em-dashes')
-
- def educateEllipses(self, md):
- ellipsesPattern = SubstituteTextPattern(r'(?entity')
-
- def educateQuotes(self, md):
- patterns = (
- (singleQuoteStartRe, (rsquo,)),
- (doubleQuoteStartRe, (rdquo,)),
- (doubleQuoteSetsRe, (ldquo + lsquo,)),
- (singleQuoteSetsRe, (lsquo + ldquo,)),
- (openingSingleQuotesRegex, (2, lsquo)),
- (closingSingleQuotesRegex, (rsquo,)),
- (closingSingleQuotesRegex2, (rsquo, 2)),
- (remainingSingleQuotesRegex, (lsquo,)),
- (openingDoubleQuotesRegex, (2, ldquo)),
- (closingDoubleQuotesRegex, (rdquo,)),
- (closingDoubleQuotesRegex2, (rdquo,)),
- (remainingDoubleQuotesRegex, (ldquo,))
- )
- self._addPatterns(md, patterns, 'quotes')
-
- def extendMarkdown(self, md, md_globals):
- configs = self.getConfigs()
- if configs['smart_quotes']:
- self.educateQuotes(md)
- if configs['smart_dashes']:
- self.educateDashes(md)
- if configs['smart_ellipses']:
- self.educateEllipses(md)
- md.ESCAPED_CHARS.extend(['"', "'"])
-
-def makeExtension(configs=None):
- return SmartyExtension(configs)
+# -*- coding: utf-8 -*-
+# Smarty extension for Python-Markdown
+# Author: 2013, Dmitry Shachnev
+
+# SmartyPants license:
+#
+# Copyright (c) 2003 John Gruber
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+#
+# * Neither the name "SmartyPants" nor the names of its contributors
+# may be used to endorse or promote products derived from this
+# software without specific prior written permission.
+#
+# This software is provided by the copyright holders and contributors "as
+# is" and any express or implied warranties, including, but not limited
+# to, the implied warranties of merchantability and fitness for a
+# particular purpose are disclaimed. In no event shall the copyright
+# owner or contributors be liable for any direct, indirect, incidental,
+# special, exemplary, or consequential damages (including, but not
+# limited to, procurement of substitute goods or services; loss of use,
+# data, or profits; or business interruption) however caused and on any
+# theory of liability, whether in contract, strict liability, or tort
+# (including negligence or otherwise) arising in any way out of the use
+# of this software, even if advised of the possibility of such damage.
+#
+#
+# smartypants.py license:
+#
+# smartypants.py is a derivative work of SmartyPants.
+# Copyright (c) 2004, 2007 Chad Miller
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+#
+# This software is provided by the copyright holders and contributors "as
+# is" and any express or implied warranties, including, but not limited
+# to, the implied warranties of merchantability and fitness for a
+# particular purpose are disclaimed. In no event shall the copyright
+# owner or contributors be liable for any direct, indirect, incidental,
+# special, exemplary, or consequential damages (including, but not
+# limited to, procurement of substitute goods or services; loss of use,
+# data, or profits; or business interruption) however caused and on any
+# theory of liability, whether in contract, strict liability, or tort
+# (including negligence or otherwise) arising in any way out of the use
+# of this software, even if advised of the possibility of such damage.
+
+from __future__ import unicode_literals
+from . import Extension
+from ..inlinepatterns import HtmlPattern
+from ..util import parseBoolValue
+
+# Constants for quote education.
+punctClass = r"""[!"#\$\%'()*+,-.\/:;<=>?\@\[\\\]\^_`{|}~]"""
+endOfWordClass = r"[\s.,;:!?)]"
+closeClass = "[^\ \t\r\n\[\{\(\-\u0002\u0003]"
+
+openingQuotesBase = (
+ '(\s' # a whitespace char
+ '| ' # or a non-breaking space entity
+ '|--' # or dashes
+ '|–|—' # or unicode
+ '|&[mn]dash;' # or named dash entities
+ '|–|—' # or decimal entities
+ ')'
+)
+
+# Special case if the very first character is a quote
+# followed by punctuation at a non-word-break. Close the quotes by brute force:
+singleQuoteStartRe = r"^'(?=%s\\B)" % punctClass
+doubleQuoteStartRe = r'^"(?=%s\\B)' % punctClass
+
+# Special case for double sets of quotes, e.g.:
+# He said, "'Quoted' words in a larger quote."
+doubleQuoteSetsRe = r""""'(?=\w)"""
+singleQuoteSetsRe = r"""'"(?=\w)"""
+
+# Get most opening double quotes:
+openingDoubleQuotesRegex = r'%s"(?=\w)' % openingQuotesBase
+
+# Double closing quotes:
+closingDoubleQuotesRegex = r'"(?=\s)'
+closingDoubleQuotesRegex2 = '(?<=%s)"' % closeClass
+
+# Get most opening single quotes:
+openingSingleQuotesRegex = r"%s'(?=\w)" % openingQuotesBase
+
+# Single closing quotes:
+closingSingleQuotesRegex = r"(?<=%s)'(?!\s|s\b|\d)" % closeClass
+closingSingleQuotesRegex2 = r"(?<=%s)'(\s|s\b)" % closeClass
+
+# All remaining quotes should be opening ones
+remainingSingleQuotesRegex = "'"
+remainingDoubleQuotesRegex = '"'
+
+lsquo, rsquo, ldquo, rdquo = '‘', '’', '“', '”'
+
+class SubstituteTextPattern(HtmlPattern):
+ def __init__(self, pattern, replace, markdown_instance):
+ """ Replaces matches with some text. """
+ HtmlPattern.__init__(self, pattern)
+ self.replace = replace
+ self.markdown = markdown_instance
+
+ def handleMatch(self, m):
+ result = ''
+ for part in self.replace:
+ if isinstance(part, int):
+ result += m.group(part)
+ else:
+ result += self.markdown.htmlStash.store(part, safe=True)
+ return result
+
+class SmartyExtension(Extension):
+ def __init__(self, configs):
+ self.config = {
+ 'smart_quotes': [True, 'Educate quotes'],
+ 'smart_dashes': [True, 'Educate dashes'],
+ 'smart_ellipses': [True, 'Educate ellipses']
+ }
+ for key, value in configs:
+ self.setConfig(key, parseBoolValue(value))
+
+ def _addPatterns(self, md, patterns, serie):
+ for ind, pattern in enumerate(patterns):
+ pattern += (md,)
+ pattern = SubstituteTextPattern(*pattern)
+ after = ('>smarty-%s-%d' % (serie, ind - 1) if ind else '>entity')
+ name = 'smarty-%s-%d' % (serie, ind)
+ md.inlinePatterns.add(name, pattern, after)
+
+ def educateDashes(self, md):
+ emDashesPattern = SubstituteTextPattern(r'(?entity')
+ md.inlinePatterns.add('smarty-en-dashes', enDashesPattern,
+ '>smarty-em-dashes')
+
+ def educateEllipses(self, md):
+ ellipsesPattern = SubstituteTextPattern(r'(?entity')
+
+ def educateQuotes(self, md):
+ patterns = (
+ (singleQuoteStartRe, (rsquo,)),
+ (doubleQuoteStartRe, (rdquo,)),
+ (doubleQuoteSetsRe, (ldquo + lsquo,)),
+ (singleQuoteSetsRe, (lsquo + ldquo,)),
+ (openingSingleQuotesRegex, (2, lsquo)),
+ (closingSingleQuotesRegex, (rsquo,)),
+ (closingSingleQuotesRegex2, (rsquo, 2)),
+ (remainingSingleQuotesRegex, (lsquo,)),
+ (openingDoubleQuotesRegex, (2, ldquo)),
+ (closingDoubleQuotesRegex, (rdquo,)),
+ (closingDoubleQuotesRegex2, (rdquo,)),
+ (remainingDoubleQuotesRegex, (ldquo,))
+ )
+ self._addPatterns(md, patterns, 'quotes')
+
+ def extendMarkdown(self, md, md_globals):
+ configs = self.getConfigs()
+ if configs['smart_quotes']:
+ self.educateQuotes(md)
+ if configs['smart_dashes']:
+ self.educateDashes(md)
+ if configs['smart_ellipses']:
+ self.educateEllipses(md)
+ md.ESCAPED_CHARS.extend(['"', "'"])
+
+def makeExtension(configs=None):
+ return SmartyExtension(configs)
diff --git a/awx/lib/site-packages/markdown/extensions/tables.py b/awx/lib/site-packages/markdown/extensions/tables.py
index c928365358..ad52ec11c7 100644
--- a/awx/lib/site-packages/markdown/extensions/tables.py
+++ b/awx/lib/site-packages/markdown/extensions/tables.py
@@ -1,100 +1,100 @@
-"""
-Tables Extension for Python-Markdown
-====================================
-
-Added parsing of tables to Python-Markdown.
-
-A simple example:
-
- First Header | Second Header
- ------------- | -------------
- Content Cell | Content Cell
- Content Cell | Content Cell
-
-Copyright 2009 - [Waylan Limberg](http://achinghead.com)
-"""
-
-from __future__ import absolute_import
-from __future__ import unicode_literals
-from . import Extension
-from ..blockprocessors import BlockProcessor
-from ..util import etree
-
-class TableProcessor(BlockProcessor):
- """ Process Tables. """
-
- def test(self, parent, block):
- rows = block.split('\n')
- return (len(rows) > 2 and '|' in rows[0] and
- '|' in rows[1] and '-' in rows[1] and
- rows[1].strip()[0] in ['|', ':', '-'])
-
- def run(self, parent, blocks):
- """ Parse a table block and build table. """
- block = blocks.pop(0).split('\n')
- header = block[0].strip()
- seperator = block[1].strip()
- rows = block[2:]
- # Get format type (bordered by pipes or not)
- border = False
- if header.startswith('|'):
- border = True
- # Get alignment of columns
- align = []
- for c in self._split_row(seperator, border):
- if c.startswith(':') and c.endswith(':'):
- align.append('center')
- elif c.startswith(':'):
- align.append('left')
- elif c.endswith(':'):
- align.append('right')
- else:
- align.append(None)
- # Build table
- table = etree.SubElement(parent, 'table')
- thead = etree.SubElement(table, 'thead')
- self._build_row(header, thead, align, border)
- tbody = etree.SubElement(table, 'tbody')
- for row in rows:
- self._build_row(row.strip(), tbody, align, border)
-
- def _build_row(self, row, parent, align, border):
- """ Given a row of text, build table cells. """
- tr = etree.SubElement(parent, 'tr')
- tag = 'td'
- if parent.tag == 'thead':
- tag = 'th'
- cells = self._split_row(row, border)
- # We use align here rather than cells to ensure every row
- # contains the same number of columns.
- for i, a in enumerate(align):
- c = etree.SubElement(tr, tag)
- try:
- c.text = cells[i].strip()
- except IndexError:
- c.text = ""
- if a:
- c.set('align', a)
-
- def _split_row(self, row, border):
- """ split a row of text into list of cells. """
- if border:
- if row.startswith('|'):
- row = row[1:]
- if row.endswith('|'):
- row = row[:-1]
- return row.split('|')
-
-
-class TableExtension(Extension):
- """ Add tables to Markdown. """
-
- def extendMarkdown(self, md, md_globals):
- """ Add an instance of TableProcessor to BlockParser. """
- md.parser.blockprocessors.add('table',
- TableProcessor(md.parser),
- ' 2 and '|' in rows[0] and
+ '|' in rows[1] and '-' in rows[1] and
+ rows[1].strip()[0] in ['|', ':', '-'])
+
+ def run(self, parent, blocks):
+ """ Parse a table block and build table. """
+ block = blocks.pop(0).split('\n')
+ header = block[0].strip()
+ seperator = block[1].strip()
+ rows = block[2:]
+ # Get format type (bordered by pipes or not)
+ border = False
+ if header.startswith('|'):
+ border = True
+ # Get alignment of columns
+ align = []
+ for c in self._split_row(seperator, border):
+ if c.startswith(':') and c.endswith(':'):
+ align.append('center')
+ elif c.startswith(':'):
+ align.append('left')
+ elif c.endswith(':'):
+ align.append('right')
+ else:
+ align.append(None)
+ # Build table
+ table = etree.SubElement(parent, 'table')
+ thead = etree.SubElement(table, 'thead')
+ self._build_row(header, thead, align, border)
+ tbody = etree.SubElement(table, 'tbody')
+ for row in rows:
+ self._build_row(row.strip(), tbody, align, border)
+
+ def _build_row(self, row, parent, align, border):
+ """ Given a row of text, build table cells. """
+ tr = etree.SubElement(parent, 'tr')
+ tag = 'td'
+ if parent.tag == 'thead':
+ tag = 'th'
+ cells = self._split_row(row, border)
+ # We use align here rather than cells to ensure every row
+ # contains the same number of columns.
+ for i, a in enumerate(align):
+ c = etree.SubElement(tr, tag)
+ try:
+ c.text = cells[i].strip()
+ except IndexError:
+ c.text = ""
+ if a:
+ c.set('align', a)
+
+ def _split_row(self, row, border):
+ """ split a row of text into list of cells. """
+ if border:
+ if row.startswith('|'):
+ row = row[1:]
+ if row.endswith('|'):
+ row = row[:-1]
+ return row.split('|')
+
+
+class TableExtension(Extension):
+ """ Add tables to Markdown. """
+
+ def extendMarkdown(self, md, md_globals):
+ """ Add an instance of TableProcessor to BlockParser. """
+ md.parser.blockprocessors.add('table',
+ TableProcessor(md.parser),
+ '
- [{'level': 1, 'children': [{'level': 2, 'children': []}]}]
-
- A wrong list is also converted:
- [{'level': 2}, {'level': 1}]
- =>
- [{'level': 2, 'children': []}, {'level': 1, 'children': []}]
- """
-
- def build_correct(remaining_list, prev_elements=[{'level': 1000}]):
-
- if not remaining_list:
- return [], []
-
- current = remaining_list.pop(0)
- if not 'children' in current.keys():
- current['children'] = []
-
- if not prev_elements:
- # This happens for instance with [8, 1, 1], ie. when some
- # header level is outside a scope. We treat it as a
- # top-level
- next_elements, children = build_correct(remaining_list, [current])
- current['children'].append(children)
- return [current] + next_elements, []
-
- prev_element = prev_elements.pop()
- children = []
- next_elements = []
- # Is current part of the child list or next list?
- if current['level'] > prev_element['level']:
- #print "%d is a child of %d" % (current['level'], prev_element['level'])
- prev_elements.append(prev_element)
- prev_elements.append(current)
- prev_element['children'].append(current)
- next_elements2, children2 = build_correct(remaining_list, prev_elements)
- children += children2
- next_elements += next_elements2
- else:
- #print "%d is ancestor of %d" % (current['level'], prev_element['level'])
- if not prev_elements:
- #print "No previous elements, so appending to the next set"
- next_elements.append(current)
- prev_elements = [current]
- next_elements2, children2 = build_correct(remaining_list, prev_elements)
- current['children'].extend(children2)
- else:
- #print "Previous elements, comparing to those first"
- remaining_list.insert(0, current)
- next_elements2, children2 = build_correct(remaining_list, prev_elements)
- children.extend(children2)
- next_elements += next_elements2
-
- return next_elements, children
-
- ordered_list, __ = build_correct(toc_list)
- return ordered_list
-
-
-class TocTreeprocessor(Treeprocessor):
-
- # Iterator wrapper to get parent and child all at once
- def iterparent(self, root):
- for parent in root.getiterator():
- for child in parent:
- yield parent, child
-
- def add_anchor(self, c, elem_id): #@ReservedAssignment
- anchor = etree.Element("a")
- anchor.text = c.text
- anchor.attrib["href"] = "#" + elem_id
- anchor.attrib["class"] = "toclink"
- c.text = ""
- for elem in c.getchildren():
- anchor.append(elem)
- c.remove(elem)
- c.append(anchor)
-
- def add_permalink(self, c, elem_id):
- permalink = etree.Element("a")
- permalink.text = ("%spara;" % AMP_SUBSTITUTE
- if self.use_permalinks is True else self.use_permalinks)
- permalink.attrib["href"] = "#" + elem_id
- permalink.attrib["class"] = "headerlink"
- permalink.attrib["title"] = "Permanent link"
- c.append(permalink)
-
- def build_toc_etree(self, div, toc_list):
- # Add title to the div
- if self.config["title"]:
- header = etree.SubElement(div, "span")
- header.attrib["class"] = "toctitle"
- header.text = self.config["title"]
-
- def build_etree_ul(toc_list, parent):
- ul = etree.SubElement(parent, "ul")
- for item in toc_list:
- # List item link, to be inserted into the toc div
- li = etree.SubElement(ul, "li")
- link = etree.SubElement(li, "a")
- link.text = item.get('name', '')
- link.attrib["href"] = '#' + item.get('id', '')
- if item['children']:
- build_etree_ul(item['children'], li)
- return ul
-
- return build_etree_ul(toc_list, div)
-
- def run(self, doc):
-
- div = etree.Element("div")
- div.attrib["class"] = "toc"
- header_rgx = re.compile("[Hh][123456]")
-
- self.use_anchors = parseBoolValue(self.config["anchorlink"])
- self.use_permalinks = parseBoolValue(self.config["permalink"], False)
- if self.use_permalinks is None:
- self.use_permalinks = self.config["permalink"]
-
- # Get a list of id attributes
- used_ids = set()
- for c in doc.getiterator():
- if "id" in c.attrib:
- used_ids.add(c.attrib["id"])
-
- toc_list = []
- marker_found = False
- for (p, c) in self.iterparent(doc):
- text = ''.join(itertext(c)).strip()
- if not text:
- continue
-
- # To keep the output from screwing up the
- # validation by putting a inside of a
- # we actually replace the
in its entirety.
- # We do not allow the marker inside a header as that
- # would causes an enless loop of placing a new TOC
- # inside previously generated TOC.
- if c.text and c.text.strip() == self.config["marker"] and \
- not header_rgx.match(c.tag) and c.tag not in ['pre', 'code']:
- for i in range(len(p)):
- if p[i] == c:
- p[i] = div
- break
- marker_found = True
-
- if header_rgx.match(c.tag):
-
- # Do not override pre-existing ids
- if not "id" in c.attrib:
- elem_id = stashedHTML2text(text, self.markdown)
- elem_id = unique(self.config["slugify"](elem_id, '-'), used_ids)
- c.attrib["id"] = elem_id
- else:
- elem_id = c.attrib["id"]
-
- tag_level = int(c.tag[-1])
-
- toc_list.append({'level': tag_level,
- 'id': elem_id,
- 'name': text})
-
- if self.use_anchors:
- self.add_anchor(c, elem_id)
- if self.use_permalinks:
- self.add_permalink(c, elem_id)
-
- toc_list_nested = order_toc_list(toc_list)
- self.build_toc_etree(div, toc_list_nested)
- prettify = self.markdown.treeprocessors.get('prettify')
- if prettify: prettify.run(div)
- if not marker_found:
- # serialize and attach to markdown instance.
- toc = self.markdown.serializer(div)
- for pp in self.markdown.postprocessors.values():
- toc = pp.run(toc)
- self.markdown.toc = toc
-
-
-class TocExtension(Extension):
-
- TreeProcessorClass = TocTreeprocessor
-
- def __init__(self, configs=[]):
- self.config = { "marker" : ["[TOC]",
- "Text to find and replace with Table of Contents -"
- "Defaults to \"[TOC]\""],
- "slugify" : [slugify,
- "Function to generate anchors based on header text-"
- "Defaults to the headerid ext's slugify function."],
- "title" : [None,
- "Title to insert into TOC
- "
- "Defaults to None"],
- "anchorlink" : [0,
- "1 if header should be a self link"
- "Defaults to 0"],
- "permalink" : [0,
- "1 or link text if a Sphinx-style permalink should be added",
- "Defaults to 0"]
- }
-
- for key, value in configs:
- self.setConfig(key, value)
-
- def extendMarkdown(self, md, md_globals):
- tocext = self.TreeProcessorClass(md)
- tocext.config = self.getConfigs()
- # Headerid ext is set to '>prettify'. With this set to '_end',
- # it should always come after headerid ext (and honor ids assinged
- # by the header id extension) if both are used. Same goes for
- # attr_list extension. This must come last because we don't want
- # to redefine ids after toc is created. But we do want toc prettified.
- md.treeprocessors.add("toc", tocext, "_end")
-
-
-def makeExtension(configs={}):
- return TocExtension(configs=configs)
+"""
+Table of Contents Extension for Python-Markdown
+* * *
+
+(c) 2008 [Jack Miller](http://codezen.org)
+
+Dependencies:
+* [Markdown 2.1+](http://packages.python.org/Markdown/)
+
+"""
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+from . import Extension
+from ..treeprocessors import Treeprocessor
+from ..util import etree, parseBoolValue, AMP_SUBSTITUTE
+from .headerid import slugify, unique, itertext, stashedHTML2text
+import re
+
+
+def order_toc_list(toc_list):
+ """Given an unsorted list with errors and skips, return a nested one.
+ [{'level': 1}, {'level': 2}]
+ =>
+ [{'level': 1, 'children': [{'level': 2, 'children': []}]}]
+
+ A wrong list is also converted:
+ [{'level': 2}, {'level': 1}]
+ =>
+ [{'level': 2, 'children': []}, {'level': 1, 'children': []}]
+ """
+
+ def build_correct(remaining_list, prev_elements=[{'level': 1000}]):
+
+ if not remaining_list:
+ return [], []
+
+ current = remaining_list.pop(0)
+ if not 'children' in current.keys():
+ current['children'] = []
+
+ if not prev_elements:
+ # This happens for instance with [8, 1, 1], ie. when some
+ # header level is outside a scope. We treat it as a
+ # top-level
+ next_elements, children = build_correct(remaining_list, [current])
+ current['children'].append(children)
+ return [current] + next_elements, []
+
+ prev_element = prev_elements.pop()
+ children = []
+ next_elements = []
+ # Is current part of the child list or next list?
+ if current['level'] > prev_element['level']:
+ #print "%d is a child of %d" % (current['level'], prev_element['level'])
+ prev_elements.append(prev_element)
+ prev_elements.append(current)
+ prev_element['children'].append(current)
+ next_elements2, children2 = build_correct(remaining_list, prev_elements)
+ children += children2
+ next_elements += next_elements2
+ else:
+ #print "%d is ancestor of %d" % (current['level'], prev_element['level'])
+ if not prev_elements:
+ #print "No previous elements, so appending to the next set"
+ next_elements.append(current)
+ prev_elements = [current]
+ next_elements2, children2 = build_correct(remaining_list, prev_elements)
+ current['children'].extend(children2)
+ else:
+ #print "Previous elements, comparing to those first"
+ remaining_list.insert(0, current)
+ next_elements2, children2 = build_correct(remaining_list, prev_elements)
+ children.extend(children2)
+ next_elements += next_elements2
+
+ return next_elements, children
+
+ ordered_list, __ = build_correct(toc_list)
+ return ordered_list
+
+
+class TocTreeprocessor(Treeprocessor):
+
+ # Iterator wrapper to get parent and child all at once
+ def iterparent(self, root):
+ for parent in root.getiterator():
+ for child in parent:
+ yield parent, child
+
+ def add_anchor(self, c, elem_id): #@ReservedAssignment
+ anchor = etree.Element("a")
+ anchor.text = c.text
+ anchor.attrib["href"] = "#" + elem_id
+ anchor.attrib["class"] = "toclink"
+ c.text = ""
+ for elem in c.getchildren():
+ anchor.append(elem)
+ c.remove(elem)
+ c.append(anchor)
+
+ def add_permalink(self, c, elem_id):
+ permalink = etree.Element("a")
+ permalink.text = ("%spara;" % AMP_SUBSTITUTE
+ if self.use_permalinks is True else self.use_permalinks)
+ permalink.attrib["href"] = "#" + elem_id
+ permalink.attrib["class"] = "headerlink"
+ permalink.attrib["title"] = "Permanent link"
+ c.append(permalink)
+
+ def build_toc_etree(self, div, toc_list):
+ # Add title to the div
+ if self.config["title"]:
+ header = etree.SubElement(div, "span")
+ header.attrib["class"] = "toctitle"
+ header.text = self.config["title"]
+
+ def build_etree_ul(toc_list, parent):
+ ul = etree.SubElement(parent, "ul")
+ for item in toc_list:
+ # List item link, to be inserted into the toc div
+ li = etree.SubElement(ul, "li")
+ link = etree.SubElement(li, "a")
+ link.text = item.get('name', '')
+ link.attrib["href"] = '#' + item.get('id', '')
+ if item['children']:
+ build_etree_ul(item['children'], li)
+ return ul
+
+ return build_etree_ul(toc_list, div)
+
+ def run(self, doc):
+
+ div = etree.Element("div")
+ div.attrib["class"] = "toc"
+ header_rgx = re.compile("[Hh][123456]")
+
+ self.use_anchors = parseBoolValue(self.config["anchorlink"])
+ self.use_permalinks = parseBoolValue(self.config["permalink"], False)
+ if self.use_permalinks is None:
+ self.use_permalinks = self.config["permalink"]
+
+ # Get a list of id attributes
+ used_ids = set()
+ for c in doc.getiterator():
+ if "id" in c.attrib:
+ used_ids.add(c.attrib["id"])
+
+ toc_list = []
+ marker_found = False
+ for (p, c) in self.iterparent(doc):
+ text = ''.join(itertext(c)).strip()
+ if not text:
+ continue
+
+ # To keep the output from screwing up the
+ # validation by putting a inside of a
+ # we actually replace the
in its entirety.
+ # We do not allow the marker inside a header as that
+ # would causes an enless loop of placing a new TOC
+ # inside previously generated TOC.
+ if c.text and c.text.strip() == self.config["marker"] and \
+ not header_rgx.match(c.tag) and c.tag not in ['pre', 'code']:
+ for i in range(len(p)):
+ if p[i] == c:
+ p[i] = div
+ break
+ marker_found = True
+
+ if header_rgx.match(c.tag):
+
+ # Do not override pre-existing ids
+ if not "id" in c.attrib:
+ elem_id = stashedHTML2text(text, self.markdown)
+ elem_id = unique(self.config["slugify"](elem_id, '-'), used_ids)
+ c.attrib["id"] = elem_id
+ else:
+ elem_id = c.attrib["id"]
+
+ tag_level = int(c.tag[-1])
+
+ toc_list.append({'level': tag_level,
+ 'id': elem_id,
+ 'name': text})
+
+ if self.use_anchors:
+ self.add_anchor(c, elem_id)
+ if self.use_permalinks:
+ self.add_permalink(c, elem_id)
+
+ toc_list_nested = order_toc_list(toc_list)
+ self.build_toc_etree(div, toc_list_nested)
+ prettify = self.markdown.treeprocessors.get('prettify')
+ if prettify: prettify.run(div)
+ if not marker_found:
+ # serialize and attach to markdown instance.
+ toc = self.markdown.serializer(div)
+ for pp in self.markdown.postprocessors.values():
+ toc = pp.run(toc)
+ self.markdown.toc = toc
+
+
+class TocExtension(Extension):
+
+ TreeProcessorClass = TocTreeprocessor
+
+ def __init__(self, configs=[]):
+ self.config = { "marker" : ["[TOC]",
+ "Text to find and replace with Table of Contents -"
+ "Defaults to \"[TOC]\""],
+ "slugify" : [slugify,
+ "Function to generate anchors based on header text-"
+ "Defaults to the headerid ext's slugify function."],
+ "title" : [None,
+ "Title to insert into TOC
- "
+ "Defaults to None"],
+ "anchorlink" : [0,
+ "1 if header should be a self link"
+ "Defaults to 0"],
+ "permalink" : [0,
+ "1 or link text if a Sphinx-style permalink should be added",
+ "Defaults to 0"]
+ }
+
+ for key, value in configs:
+ self.setConfig(key, value)
+
+ def extendMarkdown(self, md, md_globals):
+ tocext = self.TreeProcessorClass(md)
+ tocext.config = self.getConfigs()
+ # Headerid ext is set to '>prettify'. With this set to '_end',
+ # it should always come after headerid ext (and honor ids assinged
+ # by the header id extension) if both are used. Same goes for
+ # attr_list extension. This must come last because we don't want
+ # to redefine ids after toc is created. But we do want toc prettified.
+ md.treeprocessors.add("toc", tocext, "_end")
+
+
+def makeExtension(configs={}):
+ return TocExtension(configs=configs)
diff --git a/awx/lib/site-packages/markdown/extensions/wikilinks.py b/awx/lib/site-packages/markdown/extensions/wikilinks.py
index 3afcc337ff..ba1947c0b1 100644
--- a/awx/lib/site-packages/markdown/extensions/wikilinks.py
+++ b/awx/lib/site-packages/markdown/extensions/wikilinks.py
@@ -1,151 +1,151 @@
-'''
-WikiLinks Extension for Python-Markdown
-======================================
-
-Converts [[WikiLinks]] to relative links. Requires Python-Markdown 2.0+
-
-Basic usage:
-
- >>> import markdown
- >>> text = "Some text with a [[WikiLink]]."
- >>> html = markdown.markdown(text, ['wikilinks'])
- >>> print html
- Some text with a WikiLink.
-
-Whitespace behavior:
-
- >>> print markdown.markdown('[[ foo bar_baz ]]', ['wikilinks'])
-
- >>> print markdown.markdown('foo [[ ]] bar', ['wikilinks'])
- foo bar
-
-To define custom settings the simple way:
-
- >>> print markdown.markdown(text,
- ... ['wikilinks(base_url=/wiki/,end_url=.html,html_class=foo)']
- ... )
- Some text with a WikiLink.
-
-Custom settings the complex way:
-
- >>> md = markdown.Markdown(
- ... extensions = ['wikilinks'],
- ... extension_configs = {'wikilinks': [
- ... ('base_url', 'http://example.com/'),
- ... ('end_url', '.html'),
- ... ('html_class', '') ]},
- ... safe_mode = True)
- >>> print md.convert(text)
- Some text with a WikiLink.
-
-Use MetaData with mdx_meta.py (Note the blank html_class in MetaData):
-
- >>> text = """wiki_base_url: http://example.com/
- ... wiki_end_url: .html
- ... wiki_html_class:
- ...
- ... Some text with a [[WikiLink]]."""
- >>> md = markdown.Markdown(extensions=['meta', 'wikilinks'])
- >>> print md.convert(text)
- Some text with a WikiLink.
-
-MetaData should not carry over to next document:
-
- >>> print md.convert("No [[MetaData]] here.")
- No MetaData here.
-
-Define a custom URL builder:
-
- >>> def my_url_builder(label, base, end):
- ... return '/bar/'
- >>> md = markdown.Markdown(extensions=['wikilinks'],
- ... extension_configs={'wikilinks' : [('build_url', my_url_builder)]})
- >>> print md.convert('[[foo]]')
-
-
-From the command line:
-
- python markdown.py -x wikilinks(base_url=http://example.com/,end_url=.html,html_class=foo) src.txt
-
-By [Waylan Limberg](http://achinghead.com/).
-
-License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
-
-Dependencies:
-* [Python 2.3+](http://python.org)
-* [Markdown 2.0+](http://packages.python.org/Markdown/)
-'''
-
-from __future__ import absolute_import
-from __future__ import unicode_literals
-from . import Extension
-from ..inlinepatterns import Pattern
-from ..util import etree
-import re
-
-def build_url(label, base, end):
- """ Build a url from the label, a base, and an end. """
- clean_label = re.sub(r'([ ]+_)|(_[ ]+)|([ ]+)', '_', label)
- return '%s%s%s'% (base, clean_label, end)
-
-
-class WikiLinkExtension(Extension):
- def __init__(self, configs):
- # set extension defaults
- self.config = {
- 'base_url' : ['/', 'String to append to beginning or URL.'],
- 'end_url' : ['/', 'String to append to end of URL.'],
- 'html_class' : ['wikilink', 'CSS hook. Leave blank for none.'],
- 'build_url' : [build_url, 'Callable formats URL from label.'],
- }
- configs = dict(configs) or {}
- # Override defaults with user settings
- for key, value in configs.items():
- self.setConfig(key, value)
-
- def extendMarkdown(self, md, md_globals):
- self.md = md
-
- # append to end of inline patterns
- WIKILINK_RE = r'\[\[([\w0-9_ -]+)\]\]'
- wikilinkPattern = WikiLinks(WIKILINK_RE, self.getConfigs())
- wikilinkPattern.md = md
- md.inlinePatterns.add('wikilink', wikilinkPattern, ">> import markdown
+ >>> text = "Some text with a [[WikiLink]]."
+ >>> html = markdown.markdown(text, ['wikilinks'])
+ >>> print html
+ Some text with a WikiLink.
+
+Whitespace behavior:
+
+ >>> print markdown.markdown('[[ foo bar_baz ]]', ['wikilinks'])
+
+ >>> print markdown.markdown('foo [[ ]] bar', ['wikilinks'])
+ foo bar
+
+To define custom settings the simple way:
+
+ >>> print markdown.markdown(text,
+ ... ['wikilinks(base_url=/wiki/,end_url=.html,html_class=foo)']
+ ... )
+ Some text with a WikiLink.
+
+Custom settings the complex way:
+
+ >>> md = markdown.Markdown(
+ ... extensions = ['wikilinks'],
+ ... extension_configs = {'wikilinks': [
+ ... ('base_url', 'http://example.com/'),
+ ... ('end_url', '.html'),
+ ... ('html_class', '') ]},
+ ... safe_mode = True)
+ >>> print md.convert(text)
+ Some text with a WikiLink.
+
+Use MetaData with mdx_meta.py (Note the blank html_class in MetaData):
+
+ >>> text = """wiki_base_url: http://example.com/
+ ... wiki_end_url: .html
+ ... wiki_html_class:
+ ...
+ ... Some text with a [[WikiLink]]."""
+ >>> md = markdown.Markdown(extensions=['meta', 'wikilinks'])
+ >>> print md.convert(text)
+ Some text with a WikiLink.
+
+MetaData should not carry over to next document:
+
+ >>> print md.convert("No [[MetaData]] here.")
+ No MetaData here.
+
+Define a custom URL builder:
+
+ >>> def my_url_builder(label, base, end):
+ ... return '/bar/'
+ >>> md = markdown.Markdown(extensions=['wikilinks'],
+ ... extension_configs={'wikilinks' : [('build_url', my_url_builder)]})
+ >>> print md.convert('[[foo]]')
+
+
+From the command line:
+
+ python markdown.py -x wikilinks(base_url=http://example.com/,end_url=.html,html_class=foo) src.txt
+
+By [Waylan Limberg](http://achinghead.com/).
+
+License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
+
+Dependencies:
+* [Python 2.3+](http://python.org)
+* [Markdown 2.0+](http://packages.python.org/Markdown/)
+'''
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+from . import Extension
+from ..inlinepatterns import Pattern
+from ..util import etree
+import re
+
+def build_url(label, base, end):
+ """ Build a url from the label, a base, and an end. """
+ clean_label = re.sub(r'([ ]+_)|(_[ ]+)|([ ]+)', '_', label)
+ return '%s%s%s'% (base, clean_label, end)
+
+
+class WikiLinkExtension(Extension):
+ def __init__(self, configs):
+ # set extension defaults
+ self.config = {
+ 'base_url' : ['/', 'String to append to beginning or URL.'],
+ 'end_url' : ['/', 'String to append to end of URL.'],
+ 'html_class' : ['wikilink', 'CSS hook. Leave blank for none.'],
+ 'build_url' : [build_url, 'Callable formats URL from label.'],
+ }
+ configs = dict(configs) or {}
+ # Override defaults with user settings
+ for key, value in configs.items():
+ self.setConfig(key, value)
+
+ def extendMarkdown(self, md, md_globals):
+ self.md = md
+
+ # append to end of inline patterns
+ WIKILINK_RE = r'\[\[([\w0-9_ -]+)\]\]'
+ wikilinkPattern = WikiLinks(WIKILINK_RE, self.getConfigs())
+ wikilinkPattern.md = md
+ md.inlinePatterns.add('wikilink', wikilinkPattern, " tags
-and _then_ try to replace inline html, we would end up with a mess.
-So, we apply the expressions in the following order:
-
-* escape and backticks have to go before everything else, so
- that we can preempt any markdown patterns by escaping them.
-
-* then we handle auto-links (must be done before inline html)
-
-* then we handle inline HTML. At this point we will simply
- replace all inline HTML strings with a placeholder and add
- the actual HTML to a hash.
-
-* then inline images (must be done before links)
-
-* then bracketed links, first regular then reference-style
-
-* finally we apply strong and emphasis
-"""
-
-from __future__ import absolute_import
-from __future__ import unicode_literals
-from . import util
-from . import odict
-import re
-try:
- from urllib.parse import urlparse, urlunparse
-except ImportError:
- from urlparse import urlparse, urlunparse
-try:
- from html import entities
-except ImportError:
- import htmlentitydefs as entities
-
-
-def build_inlinepatterns(md_instance, **kwargs):
- """ Build the default set of inline patterns for Markdown. """
- inlinePatterns = odict.OrderedDict()
- inlinePatterns["backtick"] = BacktickPattern(BACKTICK_RE)
- inlinePatterns["escape"] = EscapePattern(ESCAPE_RE, md_instance)
- inlinePatterns["reference"] = ReferencePattern(REFERENCE_RE, md_instance)
- inlinePatterns["link"] = LinkPattern(LINK_RE, md_instance)
- inlinePatterns["image_link"] = ImagePattern(IMAGE_LINK_RE, md_instance)
- inlinePatterns["image_reference"] = \
- ImageReferencePattern(IMAGE_REFERENCE_RE, md_instance)
- inlinePatterns["short_reference"] = \
- ReferencePattern(SHORT_REF_RE, md_instance)
- inlinePatterns["autolink"] = AutolinkPattern(AUTOLINK_RE, md_instance)
- inlinePatterns["automail"] = AutomailPattern(AUTOMAIL_RE, md_instance)
- inlinePatterns["linebreak"] = SubstituteTagPattern(LINE_BREAK_RE, 'br')
- if md_instance.safeMode != 'escape':
- inlinePatterns["html"] = HtmlPattern(HTML_RE, md_instance)
- inlinePatterns["entity"] = HtmlPattern(ENTITY_RE, md_instance)
- inlinePatterns["not_strong"] = SimpleTextPattern(NOT_STRONG_RE)
- inlinePatterns["strong_em"] = DoubleTagPattern(STRONG_EM_RE, 'strong,em')
- inlinePatterns["strong"] = SimpleTagPattern(STRONG_RE, 'strong')
- inlinePatterns["emphasis"] = SimpleTagPattern(EMPHASIS_RE, 'em')
- if md_instance.smart_emphasis:
- inlinePatterns["emphasis2"] = SimpleTagPattern(SMART_EMPHASIS_RE, 'em')
- else:
- inlinePatterns["emphasis2"] = SimpleTagPattern(EMPHASIS_2_RE, 'em')
- return inlinePatterns
-
-"""
-The actual regular expressions for patterns
------------------------------------------------------------------------------
-"""
-
-NOBRACKET = r'[^\]\[]*'
-BRK = ( r'\[('
- + (NOBRACKET + r'(\[')*6
- + (NOBRACKET+ r'\])*')*6
- + NOBRACKET + r')\]' )
-NOIMG = r'(?|((?:(?:\(.*?\))|[^\(\)]))*?)\s*((['"])(.*?)\12\s*)?\)'''
-# [text](url) or [text]() or [text](url "title")
-
-IMAGE_LINK_RE = r'\!' + BRK + r'\s*\((<.*?>|([^")]+"[^"]*"|[^\)]*))\)'
-#  or 
-REFERENCE_RE = NOIMG + BRK+ r'\s?\[([^\]]*)\]' # [Google][3]
-SHORT_REF_RE = NOIMG + r'\[([^\]]+)\]' # [Google]
-IMAGE_REFERENCE_RE = r'\!' + BRK + '\s?\[([^\]]*)\]' # ![alt text][2]
-NOT_STRONG_RE = r'((^| )(\*|_)( |$))' # stand-alone * or _
-AUTOLINK_RE = r'<((?:[Ff]|[Hh][Tt])[Tt][Pp][Ss]?://[^>]*)>' #
-AUTOMAIL_RE = r'<([^> \!]*@[^> ]*)>' #
-
-HTML_RE = r'(\<([a-zA-Z/][^\>]*?|\!--.*?--)\>)' # <...>
-ENTITY_RE = r'(&[\#a-zA-Z0-9]*;)' # &
-LINE_BREAK_RE = r' \n' # two spaces at end of line
-
-
-def dequote(string):
- """Remove quotes from around a string."""
- if ( ( string.startswith('"') and string.endswith('"'))
- or (string.startswith("'") and string.endswith("'")) ):
- return string[1:-1]
- else:
- return string
-
-ATTR_RE = re.compile("\{@([^\}]*)=([^\}]*)}") # {@id=123}
-
-def handleAttributes(text, parent):
- """Set values of an element based on attribute definitions ({@id=123})."""
- def attributeCallback(match):
- parent.set(match.group(1), match.group(2).replace('\n', ' '))
- return ATTR_RE.sub(attributeCallback, text)
-
-
-"""
-The pattern classes
------------------------------------------------------------------------------
-"""
-
-class Pattern(object):
- """Base class that inline patterns subclass. """
-
- def __init__(self, pattern, markdown_instance=None):
- """
- Create an instant of an inline pattern.
-
- Keyword arguments:
-
- * pattern: A regular expression that matches a pattern
-
- """
- self.pattern = pattern
- self.compiled_re = re.compile("^(.*?)%s(.*?)$" % pattern,
- re.DOTALL | re.UNICODE)
-
- # Api for Markdown to pass safe_mode into instance
- self.safe_mode = False
- if markdown_instance:
- self.markdown = markdown_instance
-
- def getCompiledRegExp(self):
- """ Return a compiled regular expression. """
- return self.compiled_re
-
- def handleMatch(self, m):
- """Return a ElementTree element from the given match.
-
- Subclasses should override this method.
-
- Keyword arguments:
-
- * m: A re match object containing a match of the pattern.
-
- """
- pass
-
- def type(self):
- """ Return class name, to define pattern type """
- return self.__class__.__name__
-
- def unescape(self, text):
- """ Return unescaped text given text with an inline placeholder. """
- try:
- stash = self.markdown.treeprocessors['inline'].stashed_nodes
- except KeyError:
- return text
- def itertext(el):
- ' Reimplement Element.itertext for older python versions '
- tag = el.tag
- if not isinstance(tag, util.string_type) and tag is not None:
- return
- if el.text:
- yield el.text
- for e in el:
- for s in itertext(e):
- yield s
- if e.tail:
- yield e.tail
- def get_stash(m):
- id = m.group(1)
- if id in stash:
- value = stash.get(id)
- if isinstance(value, util.string_type):
- return value
- else:
- # An etree Element - return text content only
- return ''.join(itertext(value))
- return util.INLINE_PLACEHOLDER_RE.sub(get_stash, text)
-
-
-class SimpleTextPattern(Pattern):
- """ Return a simple text of group(2) of a Pattern. """
- def handleMatch(self, m):
- text = m.group(2)
- if text == util.INLINE_PLACEHOLDER_PREFIX:
- return None
- return text
-
-
-class EscapePattern(Pattern):
- """ Return an escaped character. """
-
- def handleMatch(self, m):
- char = m.group(2)
- if char in self.markdown.ESCAPED_CHARS:
- return '%s%s%s' % (util.STX, ord(char), util.ETX)
- else:
- return None
-
-
-class SimpleTagPattern(Pattern):
- """
- Return element of type `tag` with a text attribute of group(3)
- of a Pattern.
-
- """
- def __init__ (self, pattern, tag):
- Pattern.__init__(self, pattern)
- self.tag = tag
-
- def handleMatch(self, m):
- el = util.etree.Element(self.tag)
- el.text = m.group(3)
- return el
-
-
-class SubstituteTagPattern(SimpleTagPattern):
- """ Return an element of type `tag` with no children. """
- def handleMatch (self, m):
- return util.etree.Element(self.tag)
-
-
-class BacktickPattern(Pattern):
- """ Return a `` element containing the matching text. """
- def __init__ (self, pattern):
- Pattern.__init__(self, pattern)
- self.tag = "code"
-
- def handleMatch(self, m):
- el = util.etree.Element(self.tag)
- el.text = util.AtomicString(m.group(3).strip())
- return el
-
-
-class DoubleTagPattern(SimpleTagPattern):
- """Return a ElementTree element nested in tag2 nested in tag1.
-
- Useful for strong emphasis etc.
-
- """
- def handleMatch(self, m):
- tag1, tag2 = self.tag.split(",")
- el1 = util.etree.Element(tag1)
- el2 = util.etree.SubElement(el1, tag2)
- el2.text = m.group(3)
- return el1
-
-
-class HtmlPattern(Pattern):
- """ Store raw inline html and return a placeholder. """
- def handleMatch (self, m):
- rawhtml = self.unescape(m.group(2))
- place_holder = self.markdown.htmlStash.store(rawhtml)
- return place_holder
-
- def unescape(self, text):
- """ Return unescaped text given text with an inline placeholder. """
- try:
- stash = self.markdown.treeprocessors['inline'].stashed_nodes
- except KeyError:
- return text
- def get_stash(m):
- id = m.group(1)
- value = stash.get(id)
- if value is not None:
- try:
- return self.markdown.serializer(value)
- except:
- return '\%s' % value
-
- return util.INLINE_PLACEHOLDER_RE.sub(get_stash, text)
-
-
-class LinkPattern(Pattern):
- """ Return a link element from the given match. """
- def handleMatch(self, m):
- el = util.etree.Element("a")
- el.text = m.group(2)
- title = m.group(13)
- href = m.group(9)
-
- if href:
- if href[0] == "<":
- href = href[1:-1]
- el.set("href", self.sanitize_url(self.unescape(href.strip())))
- else:
- el.set("href", "")
-
- if title:
- title = dequote(self.unescape(title))
- el.set("title", title)
- return el
-
- def sanitize_url(self, url):
- """
- Sanitize a url against xss attacks in "safe_mode".
-
- Rather than specifically blacklisting `javascript:alert("XSS")` and all
- its aliases (see ), we whitelist known
- safe url formats. Most urls contain a network location, however some
- are known not to (i.e.: mailto links). Script urls do not contain a
- location. Additionally, for `javascript:...`, the scheme would be
- "javascript" but some aliases will appear to `urlparse()` to have no
- scheme. On top of that relative links (i.e.: "foo/bar.html") have no
- scheme. Therefore we must check "path", "parameters", "query" and
- "fragment" for any literal colons. We don't check "scheme" for colons
- because it *should* never have any and "netloc" must allow the form:
- `username:password@host:port`.
-
- """
- if not self.markdown.safeMode:
- # Return immediately bipassing parsing.
- return url
-
- try:
- scheme, netloc, path, params, query, fragment = url = urlparse(url)
- except ValueError:
- # Bad url - so bad it couldn't be parsed.
- return ''
-
- locless_schemes = ['', 'mailto', 'news']
- allowed_schemes = locless_schemes + ['http', 'https', 'ftp', 'ftps']
- if scheme not in allowed_schemes:
- # Not a known (allowed) scheme. Not safe.
- return ''
-
- if netloc == '' and scheme not in locless_schemes:
- # This should not happen. Treat as suspect.
- return ''
-
- for part in url[2:]:
- if ":" in part:
- # A colon in "path", "parameters", "query" or "fragment" is suspect.
- return ''
-
- # Url passes all tests. Return url as-is.
- return urlunparse(url)
-
-class ImagePattern(LinkPattern):
- """ Return a img element from the given match. """
- def handleMatch(self, m):
- el = util.etree.Element("img")
- src_parts = m.group(9).split()
- if src_parts:
- src = src_parts[0]
- if src[0] == "<" and src[-1] == ">":
- src = src[1:-1]
- el.set('src', self.sanitize_url(self.unescape(src)))
- else:
- el.set('src', "")
- if len(src_parts) > 1:
- el.set('title', dequote(self.unescape(" ".join(src_parts[1:]))))
-
- if self.markdown.enable_attributes:
- truealt = handleAttributes(m.group(2), el)
- else:
- truealt = m.group(2)
-
- el.set('alt', self.unescape(truealt))
- return el
-
-class ReferencePattern(LinkPattern):
- """ Match to a stored reference and return link element. """
-
- NEWLINE_CLEANUP_RE = re.compile(r'[ ]?\n', re.MULTILINE)
-
- def handleMatch(self, m):
- try:
- id = m.group(9).lower()
- except IndexError:
- id = None
- if not id:
- # if we got something like "[Google][]" or "[Goggle]"
- # we'll use "google" as the id
- id = m.group(2).lower()
-
- # Clean up linebreaks in id
- id = self.NEWLINE_CLEANUP_RE.sub(' ', id)
- if not id in self.markdown.references: # ignore undefined refs
- return None
- href, title = self.markdown.references[id]
-
- text = m.group(2)
- return self.makeTag(href, title, text)
-
- def makeTag(self, href, title, text):
- el = util.etree.Element('a')
-
- el.set('href', self.sanitize_url(href))
- if title:
- el.set('title', title)
-
- el.text = text
- return el
-
-
-class ImageReferencePattern(ReferencePattern):
- """ Match to a stored reference and return img element. """
- def makeTag(self, href, title, text):
- el = util.etree.Element("img")
- el.set("src", self.sanitize_url(href))
- if title:
- el.set("title", title)
-
- if self.markdown.enable_attributes:
- text = handleAttributes(text, el)
-
- el.set("alt", self.unescape(text))
- return el
-
-
-class AutolinkPattern(Pattern):
- """ Return a link Element given an autolink (``). """
- def handleMatch(self, m):
- el = util.etree.Element("a")
- el.set('href', self.unescape(m.group(2)))
- el.text = util.AtomicString(m.group(2))
- return el
-
-class AutomailPattern(Pattern):
- """
- Return a mailto link Element given an automail link (``).
- """
- def handleMatch(self, m):
- el = util.etree.Element('a')
- email = self.unescape(m.group(2))
- if email.startswith("mailto:"):
- email = email[len("mailto:"):]
-
- def codepoint2name(code):
- """Return entity definition by code, or the code if not defined."""
- entity = entities.codepoint2name.get(code)
- if entity:
- return "%s%s;" % (util.AMP_SUBSTITUTE, entity)
- else:
- return "%s#%d;" % (util.AMP_SUBSTITUTE, code)
-
- letters = [codepoint2name(ord(letter)) for letter in email]
- el.text = util.AtomicString(''.join(letters))
-
- mailto = "mailto:" + email
- mailto = "".join([util.AMP_SUBSTITUTE + '#%d;' %
- ord(letter) for letter in mailto])
- el.set('href', mailto)
- return el
-
+"""
+INLINE PATTERNS
+=============================================================================
+
+Inline patterns such as *emphasis* are handled by means of auxiliary
+objects, one per pattern. Pattern objects must be instances of classes
+that extend markdown.Pattern. Each pattern object uses a single regular
+expression and needs support the following methods:
+
+ pattern.getCompiledRegExp() # returns a regular expression
+
+ pattern.handleMatch(m) # takes a match object and returns
+ # an ElementTree element or just plain text
+
+All of python markdown's built-in patterns subclass from Pattern,
+but you can add additional patterns that don't.
+
+Also note that all the regular expressions used by inline must
+capture the whole block. For this reason, they all start with
+'^(.*)' and end with '(.*)!'. In case with built-in expression
+Pattern takes care of adding the "^(.*)" and "(.*)!".
+
+Finally, the order in which regular expressions are applied is very
+important - e.g. if we first replace http://.../ links with tags
+and _then_ try to replace inline html, we would end up with a mess.
+So, we apply the expressions in the following order:
+
+* escape and backticks have to go before everything else, so
+ that we can preempt any markdown patterns by escaping them.
+
+* then we handle auto-links (must be done before inline html)
+
+* then we handle inline HTML. At this point we will simply
+ replace all inline HTML strings with a placeholder and add
+ the actual HTML to a hash.
+
+* then inline images (must be done before links)
+
+* then bracketed links, first regular then reference-style
+
+* finally we apply strong and emphasis
+"""
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+from . import util
+from . import odict
+import re
+try:
+ from urllib.parse import urlparse, urlunparse
+except ImportError:
+ from urlparse import urlparse, urlunparse
+try:
+ from html import entities
+except ImportError:
+ import htmlentitydefs as entities
+
+
+def build_inlinepatterns(md_instance, **kwargs):
+ """ Build the default set of inline patterns for Markdown. """
+ inlinePatterns = odict.OrderedDict()
+ inlinePatterns["backtick"] = BacktickPattern(BACKTICK_RE)
+ inlinePatterns["escape"] = EscapePattern(ESCAPE_RE, md_instance)
+ inlinePatterns["reference"] = ReferencePattern(REFERENCE_RE, md_instance)
+ inlinePatterns["link"] = LinkPattern(LINK_RE, md_instance)
+ inlinePatterns["image_link"] = ImagePattern(IMAGE_LINK_RE, md_instance)
+ inlinePatterns["image_reference"] = \
+ ImageReferencePattern(IMAGE_REFERENCE_RE, md_instance)
+ inlinePatterns["short_reference"] = \
+ ReferencePattern(SHORT_REF_RE, md_instance)
+ inlinePatterns["autolink"] = AutolinkPattern(AUTOLINK_RE, md_instance)
+ inlinePatterns["automail"] = AutomailPattern(AUTOMAIL_RE, md_instance)
+ inlinePatterns["linebreak"] = SubstituteTagPattern(LINE_BREAK_RE, 'br')
+ if md_instance.safeMode != 'escape':
+ inlinePatterns["html"] = HtmlPattern(HTML_RE, md_instance)
+ inlinePatterns["entity"] = HtmlPattern(ENTITY_RE, md_instance)
+ inlinePatterns["not_strong"] = SimpleTextPattern(NOT_STRONG_RE)
+ inlinePatterns["strong_em"] = DoubleTagPattern(STRONG_EM_RE, 'strong,em')
+ inlinePatterns["strong"] = SimpleTagPattern(STRONG_RE, 'strong')
+ inlinePatterns["emphasis"] = SimpleTagPattern(EMPHASIS_RE, 'em')
+ if md_instance.smart_emphasis:
+ inlinePatterns["emphasis2"] = SimpleTagPattern(SMART_EMPHASIS_RE, 'em')
+ else:
+ inlinePatterns["emphasis2"] = SimpleTagPattern(EMPHASIS_2_RE, 'em')
+ return inlinePatterns
+
+"""
+The actual regular expressions for patterns
+-----------------------------------------------------------------------------
+"""
+
+NOBRACKET = r'[^\]\[]*'
+BRK = ( r'\[('
+ + (NOBRACKET + r'(\[')*6
+ + (NOBRACKET+ r'\])*')*6
+ + NOBRACKET + r')\]' )
+NOIMG = r'(?|((?:(?:\(.*?\))|[^\(\)]))*?)\s*((['"])(.*?)\12\s*)?\)'''
+# [text](url) or [text]() or [text](url "title")
+
+IMAGE_LINK_RE = r'\!' + BRK + r'\s*\((<.*?>|([^")]+"[^"]*"|[^\)]*))\)'
+#  or 
+REFERENCE_RE = NOIMG + BRK+ r'\s?\[([^\]]*)\]' # [Google][3]
+SHORT_REF_RE = NOIMG + r'\[([^\]]+)\]' # [Google]
+IMAGE_REFERENCE_RE = r'\!' + BRK + '\s?\[([^\]]*)\]' # ![alt text][2]
+NOT_STRONG_RE = r'((^| )(\*|_)( |$))' # stand-alone * or _
+AUTOLINK_RE = r'<((?:[Ff]|[Hh][Tt])[Tt][Pp][Ss]?://[^>]*)>' #
+AUTOMAIL_RE = r'<([^> \!]*@[^> ]*)>' #
+
+HTML_RE = r'(\<([a-zA-Z/][^\>]*?|\!--.*?--)\>)' # <...>
+ENTITY_RE = r'(&[\#a-zA-Z0-9]*;)' # &
+LINE_BREAK_RE = r' \n' # two spaces at end of line
+
+
+def dequote(string):
+ """Remove quotes from around a string."""
+ if ( ( string.startswith('"') and string.endswith('"'))
+ or (string.startswith("'") and string.endswith("'")) ):
+ return string[1:-1]
+ else:
+ return string
+
+ATTR_RE = re.compile("\{@([^\}]*)=([^\}]*)}") # {@id=123}
+
+def handleAttributes(text, parent):
+ """Set values of an element based on attribute definitions ({@id=123})."""
+ def attributeCallback(match):
+ parent.set(match.group(1), match.group(2).replace('\n', ' '))
+ return ATTR_RE.sub(attributeCallback, text)
+
+
+"""
+The pattern classes
+-----------------------------------------------------------------------------
+"""
+
+class Pattern(object):
+ """Base class that inline patterns subclass. """
+
+ def __init__(self, pattern, markdown_instance=None):
+ """
+ Create an instant of an inline pattern.
+
+ Keyword arguments:
+
+ * pattern: A regular expression that matches a pattern
+
+ """
+ self.pattern = pattern
+ self.compiled_re = re.compile("^(.*?)%s(.*?)$" % pattern,
+ re.DOTALL | re.UNICODE)
+
+ # Api for Markdown to pass safe_mode into instance
+ self.safe_mode = False
+ if markdown_instance:
+ self.markdown = markdown_instance
+
+ def getCompiledRegExp(self):
+ """ Return a compiled regular expression. """
+ return self.compiled_re
+
+ def handleMatch(self, m):
+ """Return a ElementTree element from the given match.
+
+ Subclasses should override this method.
+
+ Keyword arguments:
+
+ * m: A re match object containing a match of the pattern.
+
+ """
+ pass
+
+ def type(self):
+ """ Return class name, to define pattern type """
+ return self.__class__.__name__
+
+ def unescape(self, text):
+ """ Return unescaped text given text with an inline placeholder. """
+ try:
+ stash = self.markdown.treeprocessors['inline'].stashed_nodes
+ except KeyError:
+ return text
+ def itertext(el):
+ ' Reimplement Element.itertext for older python versions '
+ tag = el.tag
+ if not isinstance(tag, util.string_type) and tag is not None:
+ return
+ if el.text:
+ yield el.text
+ for e in el:
+ for s in itertext(e):
+ yield s
+ if e.tail:
+ yield e.tail
+ def get_stash(m):
+ id = m.group(1)
+ if id in stash:
+ value = stash.get(id)
+ if isinstance(value, util.string_type):
+ return value
+ else:
+ # An etree Element - return text content only
+ return ''.join(itertext(value))
+ return util.INLINE_PLACEHOLDER_RE.sub(get_stash, text)
+
+
+class SimpleTextPattern(Pattern):
+ """ Return a simple text of group(2) of a Pattern. """
+ def handleMatch(self, m):
+ text = m.group(2)
+ if text == util.INLINE_PLACEHOLDER_PREFIX:
+ return None
+ return text
+
+
+class EscapePattern(Pattern):
+ """ Return an escaped character. """
+
+ def handleMatch(self, m):
+ char = m.group(2)
+ if char in self.markdown.ESCAPED_CHARS:
+ return '%s%s%s' % (util.STX, ord(char), util.ETX)
+ else:
+ return None
+
+
+class SimpleTagPattern(Pattern):
+ """
+ Return element of type `tag` with a text attribute of group(3)
+ of a Pattern.
+
+ """
+ def __init__ (self, pattern, tag):
+ Pattern.__init__(self, pattern)
+ self.tag = tag
+
+ def handleMatch(self, m):
+ el = util.etree.Element(self.tag)
+ el.text = m.group(3)
+ return el
+
+
+class SubstituteTagPattern(SimpleTagPattern):
+ """ Return an element of type `tag` with no children. """
+ def handleMatch (self, m):
+ return util.etree.Element(self.tag)
+
+
+class BacktickPattern(Pattern):
+ """ Return a `` element containing the matching text. """
+ def __init__ (self, pattern):
+ Pattern.__init__(self, pattern)
+ self.tag = "code"
+
+ def handleMatch(self, m):
+ el = util.etree.Element(self.tag)
+ el.text = util.AtomicString(m.group(3).strip())
+ return el
+
+
+class DoubleTagPattern(SimpleTagPattern):
+ """Return a ElementTree element nested in tag2 nested in tag1.
+
+ Useful for strong emphasis etc.
+
+ """
+ def handleMatch(self, m):
+ tag1, tag2 = self.tag.split(",")
+ el1 = util.etree.Element(tag1)
+ el2 = util.etree.SubElement(el1, tag2)
+ el2.text = m.group(3)
+ return el1
+
+
+class HtmlPattern(Pattern):
+ """ Store raw inline html and return a placeholder. """
+ def handleMatch (self, m):
+ rawhtml = self.unescape(m.group(2))
+ place_holder = self.markdown.htmlStash.store(rawhtml)
+ return place_holder
+
+ def unescape(self, text):
+ """ Return unescaped text given text with an inline placeholder. """
+ try:
+ stash = self.markdown.treeprocessors['inline'].stashed_nodes
+ except KeyError:
+ return text
+ def get_stash(m):
+ id = m.group(1)
+ value = stash.get(id)
+ if value is not None:
+ try:
+ return self.markdown.serializer(value)
+ except:
+ return '\%s' % value
+
+ return util.INLINE_PLACEHOLDER_RE.sub(get_stash, text)
+
+
+class LinkPattern(Pattern):
+ """ Return a link element from the given match. """
+ def handleMatch(self, m):
+ el = util.etree.Element("a")
+ el.text = m.group(2)
+ title = m.group(13)
+ href = m.group(9)
+
+ if href:
+ if href[0] == "<":
+ href = href[1:-1]
+ el.set("href", self.sanitize_url(self.unescape(href.strip())))
+ else:
+ el.set("href", "")
+
+ if title:
+ title = dequote(self.unescape(title))
+ el.set("title", title)
+ return el
+
+ def sanitize_url(self, url):
+ """
+ Sanitize a url against xss attacks in "safe_mode".
+
+ Rather than specifically blacklisting `javascript:alert("XSS")` and all
+ its aliases (see ), we whitelist known
+ safe url formats. Most urls contain a network location, however some
+ are known not to (i.e.: mailto links). Script urls do not contain a
+ location. Additionally, for `javascript:...`, the scheme would be
+ "javascript" but some aliases will appear to `urlparse()` to have no
+ scheme. On top of that relative links (i.e.: "foo/bar.html") have no
+ scheme. Therefore we must check "path", "parameters", "query" and
+ "fragment" for any literal colons. We don't check "scheme" for colons
+ because it *should* never have any and "netloc" must allow the form:
+ `username:password@host:port`.
+
+ """
+ if not self.markdown.safeMode:
+ # Return immediately bipassing parsing.
+ return url
+
+ try:
+ scheme, netloc, path, params, query, fragment = url = urlparse(url)
+ except ValueError:
+ # Bad url - so bad it couldn't be parsed.
+ return ''
+
+ locless_schemes = ['', 'mailto', 'news']
+ allowed_schemes = locless_schemes + ['http', 'https', 'ftp', 'ftps']
+ if scheme not in allowed_schemes:
+ # Not a known (allowed) scheme. Not safe.
+ return ''
+
+ if netloc == '' and scheme not in locless_schemes:
+ # This should not happen. Treat as suspect.
+ return ''
+
+ for part in url[2:]:
+ if ":" in part:
+ # A colon in "path", "parameters", "query" or "fragment" is suspect.
+ return ''
+
+ # Url passes all tests. Return url as-is.
+ return urlunparse(url)
+
+class ImagePattern(LinkPattern):
+ """ Return a img element from the given match. """
+ def handleMatch(self, m):
+ el = util.etree.Element("img")
+ src_parts = m.group(9).split()
+ if src_parts:
+ src = src_parts[0]
+ if src[0] == "<" and src[-1] == ">":
+ src = src[1:-1]
+ el.set('src', self.sanitize_url(self.unescape(src)))
+ else:
+ el.set('src', "")
+ if len(src_parts) > 1:
+ el.set('title', dequote(self.unescape(" ".join(src_parts[1:]))))
+
+ if self.markdown.enable_attributes:
+ truealt = handleAttributes(m.group(2), el)
+ else:
+ truealt = m.group(2)
+
+ el.set('alt', self.unescape(truealt))
+ return el
+
+class ReferencePattern(LinkPattern):
+ """ Match to a stored reference and return link element. """
+
+ NEWLINE_CLEANUP_RE = re.compile(r'[ ]?\n', re.MULTILINE)
+
+ def handleMatch(self, m):
+ try:
+ id = m.group(9).lower()
+ except IndexError:
+ id = None
+ if not id:
+ # if we got something like "[Google][]" or "[Goggle]"
+ # we'll use "google" as the id
+ id = m.group(2).lower()
+
+ # Clean up linebreaks in id
+ id = self.NEWLINE_CLEANUP_RE.sub(' ', id)
+ if not id in self.markdown.references: # ignore undefined refs
+ return None
+ href, title = self.markdown.references[id]
+
+ text = m.group(2)
+ return self.makeTag(href, title, text)
+
+ def makeTag(self, href, title, text):
+ el = util.etree.Element('a')
+
+ el.set('href', self.sanitize_url(href))
+ if title:
+ el.set('title', title)
+
+ el.text = text
+ return el
+
+
+class ImageReferencePattern(ReferencePattern):
+ """ Match to a stored reference and return img element. """
+ def makeTag(self, href, title, text):
+ el = util.etree.Element("img")
+ el.set("src", self.sanitize_url(href))
+ if title:
+ el.set("title", title)
+
+ if self.markdown.enable_attributes:
+ text = handleAttributes(text, el)
+
+ el.set("alt", self.unescape(text))
+ return el
+
+
+class AutolinkPattern(Pattern):
+ """ Return a link Element given an autolink (``). """
+ def handleMatch(self, m):
+ el = util.etree.Element("a")
+ el.set('href', self.unescape(m.group(2)))
+ el.text = util.AtomicString(m.group(2))
+ return el
+
+class AutomailPattern(Pattern):
+ """
+ Return a mailto link Element given an automail link (``).
+ """
+ def handleMatch(self, m):
+ el = util.etree.Element('a')
+ email = self.unescape(m.group(2))
+ if email.startswith("mailto:"):
+ email = email[len("mailto:"):]
+
+ def codepoint2name(code):
+ """Return entity definition by code, or the code if not defined."""
+ entity = entities.codepoint2name.get(code)
+ if entity:
+ return "%s%s;" % (util.AMP_SUBSTITUTE, entity)
+ else:
+ return "%s#%d;" % (util.AMP_SUBSTITUTE, code)
+
+ letters = [codepoint2name(ord(letter)) for letter in email]
+ el.text = util.AtomicString(''.join(letters))
+
+ mailto = "mailto:" + email
+ mailto = "".join([util.AMP_SUBSTITUTE + '#%d;' %
+ ord(letter) for letter in mailto])
+ el.set('href', mailto)
+ return el
+
diff --git a/awx/lib/site-packages/markdown/odict.py b/awx/lib/site-packages/markdown/odict.py
index a0b0635b02..68c12593f7 100644
--- a/awx/lib/site-packages/markdown/odict.py
+++ b/awx/lib/site-packages/markdown/odict.py
@@ -1,189 +1,189 @@
-from __future__ import unicode_literals
-from __future__ import absolute_import
-from . import util
-
-from copy import deepcopy
-
-class OrderedDict(dict):
- """
- A dictionary that keeps its keys in the order in which they're inserted.
-
- Copied from Django's SortedDict with some modifications.
-
- """
- def __new__(cls, *args, **kwargs):
- instance = super(OrderedDict, cls).__new__(cls, *args, **kwargs)
- instance.keyOrder = []
- return instance
-
- def __init__(self, data=None):
- if data is None or isinstance(data, dict):
- data = data or []
- super(OrderedDict, self).__init__(data)
- self.keyOrder = list(data) if data else []
- else:
- super(OrderedDict, self).__init__()
- super_set = super(OrderedDict, self).__setitem__
- for key, value in data:
- # Take the ordering from first key
- if key not in self:
- self.keyOrder.append(key)
- # But override with last value in data (dict() does this)
- super_set(key, value)
-
- def __deepcopy__(self, memo):
- return self.__class__([(key, deepcopy(value, memo))
- for key, value in self.items()])
-
- def __copy__(self):
- # The Python's default copy implementation will alter the state
- # of self. The reason for this seems complex but is likely related to
- # subclassing dict.
- return self.copy()
-
- def __setitem__(self, key, value):
- if key not in self:
- self.keyOrder.append(key)
- super(OrderedDict, self).__setitem__(key, value)
-
- def __delitem__(self, key):
- super(OrderedDict, self).__delitem__(key)
- self.keyOrder.remove(key)
-
- def __iter__(self):
- return iter(self.keyOrder)
-
- def __reversed__(self):
- return reversed(self.keyOrder)
-
- def pop(self, k, *args):
- result = super(OrderedDict, self).pop(k, *args)
- try:
- self.keyOrder.remove(k)
- except ValueError:
- # Key wasn't in the dictionary in the first place. No problem.
- pass
- return result
-
- def popitem(self):
- result = super(OrderedDict, self).popitem()
- self.keyOrder.remove(result[0])
- return result
-
- def _iteritems(self):
- for key in self.keyOrder:
- yield key, self[key]
-
- def _iterkeys(self):
- for key in self.keyOrder:
- yield key
-
- def _itervalues(self):
- for key in self.keyOrder:
- yield self[key]
-
- if util.PY3:
- items = _iteritems
- keys = _iterkeys
- values = _itervalues
- else:
- iteritems = _iteritems
- iterkeys = _iterkeys
- itervalues = _itervalues
-
- def items(self):
- return [(k, self[k]) for k in self.keyOrder]
-
- def keys(self):
- return self.keyOrder[:]
-
- def values(self):
- return [self[k] for k in self.keyOrder]
-
- def update(self, dict_):
- for k in dict_:
- self[k] = dict_[k]
-
- def setdefault(self, key, default):
- if key not in self:
- self.keyOrder.append(key)
- return super(OrderedDict, self).setdefault(key, default)
-
- def value_for_index(self, index):
- """Returns the value of the item at the given zero-based index."""
- return self[self.keyOrder[index]]
-
- def insert(self, index, key, value):
- """Inserts the key, value pair before the item with the given index."""
- if key in self.keyOrder:
- n = self.keyOrder.index(key)
- del self.keyOrder[n]
- if n < index:
- index -= 1
- self.keyOrder.insert(index, key)
- super(OrderedDict, self).__setitem__(key, value)
-
- def copy(self):
- """Returns a copy of this object."""
- # This way of initializing the copy means it works for subclasses, too.
- return self.__class__(self)
-
- def __repr__(self):
- """
- Replaces the normal dict.__repr__ with a version that returns the keys
- in their Ordered order.
- """
- return '{%s}' % ', '.join(['%r: %r' % (k, v) for k, v in self._iteritems()])
-
- def clear(self):
- super(OrderedDict, self).clear()
- self.keyOrder = []
-
- def index(self, key):
- """ Return the index of a given key. """
- try:
- return self.keyOrder.index(key)
- except ValueError:
- raise ValueError("Element '%s' was not found in OrderedDict" % key)
-
- def index_for_location(self, location):
- """ Return index or None for a given location. """
- if location == '_begin':
- i = 0
- elif location == '_end':
- i = None
- elif location.startswith('<') or location.startswith('>'):
- i = self.index(location[1:])
- if location.startswith('>'):
- if i >= len(self):
- # last item
- i = None
- else:
- i += 1
- else:
- raise ValueError('Not a valid location: "%s". Location key '
- 'must start with a ">" or "<".' % location)
- return i
-
- def add(self, key, value, location):
- """ Insert by key location. """
- i = self.index_for_location(location)
- if i is not None:
- self.insert(i, key, value)
- else:
- self.__setitem__(key, value)
-
- def link(self, key, location):
- """ Change location of an existing item. """
- n = self.keyOrder.index(key)
- del self.keyOrder[n]
- try:
- i = self.index_for_location(location)
- if i is not None:
- self.keyOrder.insert(i, key)
- else:
- self.keyOrder.append(key)
- except Exception as e:
- # restore to prevent data loss and reraise
- self.keyOrder.insert(n, key)
- raise e
+from __future__ import unicode_literals
+from __future__ import absolute_import
+from . import util
+
+from copy import deepcopy
+
+class OrderedDict(dict):
+ """
+ A dictionary that keeps its keys in the order in which they're inserted.
+
+ Copied from Django's SortedDict with some modifications.
+
+ """
+ def __new__(cls, *args, **kwargs):
+ instance = super(OrderedDict, cls).__new__(cls, *args, **kwargs)
+ instance.keyOrder = []
+ return instance
+
+ def __init__(self, data=None):
+ if data is None or isinstance(data, dict):
+ data = data or []
+ super(OrderedDict, self).__init__(data)
+ self.keyOrder = list(data) if data else []
+ else:
+ super(OrderedDict, self).__init__()
+ super_set = super(OrderedDict, self).__setitem__
+ for key, value in data:
+ # Take the ordering from first key
+ if key not in self:
+ self.keyOrder.append(key)
+ # But override with last value in data (dict() does this)
+ super_set(key, value)
+
+ def __deepcopy__(self, memo):
+ return self.__class__([(key, deepcopy(value, memo))
+ for key, value in self.items()])
+
+ def __copy__(self):
+ # The Python's default copy implementation will alter the state
+ # of self. The reason for this seems complex but is likely related to
+ # subclassing dict.
+ return self.copy()
+
+ def __setitem__(self, key, value):
+ if key not in self:
+ self.keyOrder.append(key)
+ super(OrderedDict, self).__setitem__(key, value)
+
+ def __delitem__(self, key):
+ super(OrderedDict, self).__delitem__(key)
+ self.keyOrder.remove(key)
+
+ def __iter__(self):
+ return iter(self.keyOrder)
+
+ def __reversed__(self):
+ return reversed(self.keyOrder)
+
+ def pop(self, k, *args):
+ result = super(OrderedDict, self).pop(k, *args)
+ try:
+ self.keyOrder.remove(k)
+ except ValueError:
+ # Key wasn't in the dictionary in the first place. No problem.
+ pass
+ return result
+
+ def popitem(self):
+ result = super(OrderedDict, self).popitem()
+ self.keyOrder.remove(result[0])
+ return result
+
+ def _iteritems(self):
+ for key in self.keyOrder:
+ yield key, self[key]
+
+ def _iterkeys(self):
+ for key in self.keyOrder:
+ yield key
+
+ def _itervalues(self):
+ for key in self.keyOrder:
+ yield self[key]
+
+ if util.PY3:
+ items = _iteritems
+ keys = _iterkeys
+ values = _itervalues
+ else:
+ iteritems = _iteritems
+ iterkeys = _iterkeys
+ itervalues = _itervalues
+
+ def items(self):
+ return [(k, self[k]) for k in self.keyOrder]
+
+ def keys(self):
+ return self.keyOrder[:]
+
+ def values(self):
+ return [self[k] for k in self.keyOrder]
+
+ def update(self, dict_):
+ for k in dict_:
+ self[k] = dict_[k]
+
+ def setdefault(self, key, default):
+ if key not in self:
+ self.keyOrder.append(key)
+ return super(OrderedDict, self).setdefault(key, default)
+
+ def value_for_index(self, index):
+ """Returns the value of the item at the given zero-based index."""
+ return self[self.keyOrder[index]]
+
+ def insert(self, index, key, value):
+ """Inserts the key, value pair before the item with the given index."""
+ if key in self.keyOrder:
+ n = self.keyOrder.index(key)
+ del self.keyOrder[n]
+ if n < index:
+ index -= 1
+ self.keyOrder.insert(index, key)
+ super(OrderedDict, self).__setitem__(key, value)
+
+ def copy(self):
+ """Returns a copy of this object."""
+ # This way of initializing the copy means it works for subclasses, too.
+ return self.__class__(self)
+
+ def __repr__(self):
+ """
+ Replaces the normal dict.__repr__ with a version that returns the keys
+ in their Ordered order.
+ """
+ return '{%s}' % ', '.join(['%r: %r' % (k, v) for k, v in self._iteritems()])
+
+ def clear(self):
+ super(OrderedDict, self).clear()
+ self.keyOrder = []
+
+ def index(self, key):
+ """ Return the index of a given key. """
+ try:
+ return self.keyOrder.index(key)
+ except ValueError:
+ raise ValueError("Element '%s' was not found in OrderedDict" % key)
+
+ def index_for_location(self, location):
+ """ Return index or None for a given location. """
+ if location == '_begin':
+ i = 0
+ elif location == '_end':
+ i = None
+ elif location.startswith('<') or location.startswith('>'):
+ i = self.index(location[1:])
+ if location.startswith('>'):
+ if i >= len(self):
+ # last item
+ i = None
+ else:
+ i += 1
+ else:
+ raise ValueError('Not a valid location: "%s". Location key '
+ 'must start with a ">" or "<".' % location)
+ return i
+
+ def add(self, key, value, location):
+ """ Insert by key location. """
+ i = self.index_for_location(location)
+ if i is not None:
+ self.insert(i, key, value)
+ else:
+ self.__setitem__(key, value)
+
+ def link(self, key, location):
+ """ Change location of an existing item. """
+ n = self.keyOrder.index(key)
+ del self.keyOrder[n]
+ try:
+ i = self.index_for_location(location)
+ if i is not None:
+ self.keyOrder.insert(i, key)
+ else:
+ self.keyOrder.append(key)
+ except Exception as e:
+ # restore to prevent data loss and reraise
+ self.keyOrder.insert(n, key)
+ raise e
diff --git a/awx/lib/site-packages/markdown/postprocessors.py b/awx/lib/site-packages/markdown/postprocessors.py
index 0d20723b14..5f3f032c15 100644
--- a/awx/lib/site-packages/markdown/postprocessors.py
+++ b/awx/lib/site-packages/markdown/postprocessors.py
@@ -1,104 +1,104 @@
-"""
-POST-PROCESSORS
-=============================================================================
-
-Markdown also allows post-processors, which are similar to preprocessors in
-that they need to implement a "run" method. However, they are run after core
-processing.
-
-"""
-
-from __future__ import absolute_import
-from __future__ import unicode_literals
-from . import util
-from . import odict
-import re
-
-
-def build_postprocessors(md_instance, **kwargs):
- """ Build the default postprocessors for Markdown. """
- postprocessors = odict.OrderedDict()
- postprocessors["raw_html"] = RawHtmlPostprocessor(md_instance)
- postprocessors["amp_substitute"] = AndSubstitutePostprocessor()
- postprocessors["unescape"] = UnescapePostprocessor()
- return postprocessors
-
-
-class Postprocessor(util.Processor):
- """
- Postprocessors are run after the ElementTree it converted back into text.
-
- Each Postprocessor implements a "run" method that takes a pointer to a
- text string, modifies it as necessary and returns a text string.
-
- Postprocessors must extend markdown.Postprocessor.
-
- """
-
- def run(self, text):
- """
- Subclasses of Postprocessor should implement a `run` method, which
- takes the html document as a single text string and returns a
- (possibly modified) string.
-
- """
- pass
-
-
-class RawHtmlPostprocessor(Postprocessor):
- """ Restore raw html to the document. """
-
- def run(self, text):
- """ Iterate over html stash and restore "safe" html. """
- for i in range(self.markdown.htmlStash.html_counter):
- html, safe = self.markdown.htmlStash.rawHtmlBlocks[i]
- if self.markdown.safeMode and not safe:
- if str(self.markdown.safeMode).lower() == 'escape':
- html = self.escape(html)
- elif str(self.markdown.safeMode).lower() == 'remove':
- html = ''
- else:
- html = self.markdown.html_replacement_text
- if self.isblocklevel(html) and (safe or not self.markdown.safeMode):
- text = text.replace("%s
" %
- (self.markdown.htmlStash.get_placeholder(i)),
- html + "\n")
- text = text.replace(self.markdown.htmlStash.get_placeholder(i),
- html)
- return text
-
- def escape(self, html):
- """ Basic html escaping """
- html = html.replace('&', '&')
- html = html.replace('<', '<')
- html = html.replace('>', '>')
- return html.replace('"', '"')
-
- def isblocklevel(self, html):
- m = re.match(r'^\<\/?([^ >]+)', html)
- if m:
- if m.group(1)[0] in ('!', '?', '@', '%'):
- # Comment, php etc...
- return True
- return util.isBlockLevel(m.group(1))
- return False
-
-
-class AndSubstitutePostprocessor(Postprocessor):
- """ Restore valid entities """
-
- def run(self, text):
- text = text.replace(util.AMP_SUBSTITUTE, "&")
- return text
-
-
-class UnescapePostprocessor(Postprocessor):
- """ Restore escaped chars """
-
- RE = re.compile('%s(\d+)%s' % (util.STX, util.ETX))
-
- def unescape(self, m):
- return util.int2str(int(m.group(1)))
-
- def run(self, text):
- return self.RE.sub(self.unescape, text)
+"""
+POST-PROCESSORS
+=============================================================================
+
+Markdown also allows post-processors, which are similar to preprocessors in
+that they need to implement a "run" method. However, they are run after core
+processing.
+
+"""
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+from . import util
+from . import odict
+import re
+
+
+def build_postprocessors(md_instance, **kwargs):
+ """ Build the default postprocessors for Markdown. """
+ postprocessors = odict.OrderedDict()
+ postprocessors["raw_html"] = RawHtmlPostprocessor(md_instance)
+ postprocessors["amp_substitute"] = AndSubstitutePostprocessor()
+ postprocessors["unescape"] = UnescapePostprocessor()
+ return postprocessors
+
+
+class Postprocessor(util.Processor):
+ """
+ Postprocessors are run after the ElementTree it converted back into text.
+
+ Each Postprocessor implements a "run" method that takes a pointer to a
+ text string, modifies it as necessary and returns a text string.
+
+ Postprocessors must extend markdown.Postprocessor.
+
+ """
+
+ def run(self, text):
+ """
+ Subclasses of Postprocessor should implement a `run` method, which
+ takes the html document as a single text string and returns a
+ (possibly modified) string.
+
+ """
+ pass
+
+
+class RawHtmlPostprocessor(Postprocessor):
+ """ Restore raw html to the document. """
+
+ def run(self, text):
+ """ Iterate over html stash and restore "safe" html. """
+ for i in range(self.markdown.htmlStash.html_counter):
+ html, safe = self.markdown.htmlStash.rawHtmlBlocks[i]
+ if self.markdown.safeMode and not safe:
+ if str(self.markdown.safeMode).lower() == 'escape':
+ html = self.escape(html)
+ elif str(self.markdown.safeMode).lower() == 'remove':
+ html = ''
+ else:
+ html = self.markdown.html_replacement_text
+ if self.isblocklevel(html) and (safe or not self.markdown.safeMode):
+ text = text.replace("%s
" %
+ (self.markdown.htmlStash.get_placeholder(i)),
+ html + "\n")
+ text = text.replace(self.markdown.htmlStash.get_placeholder(i),
+ html)
+ return text
+
+ def escape(self, html):
+ """ Basic html escaping """
+ html = html.replace('&', '&')
+ html = html.replace('<', '<')
+ html = html.replace('>', '>')
+ return html.replace('"', '"')
+
+ def isblocklevel(self, html):
+ m = re.match(r'^\<\/?([^ >]+)', html)
+ if m:
+ if m.group(1)[0] in ('!', '?', '@', '%'):
+ # Comment, php etc...
+ return True
+ return util.isBlockLevel(m.group(1))
+ return False
+
+
+class AndSubstitutePostprocessor(Postprocessor):
+ """ Restore valid entities """
+
+ def run(self, text):
+ text = text.replace(util.AMP_SUBSTITUTE, "&")
+ return text
+
+
+class UnescapePostprocessor(Postprocessor):
+ """ Restore escaped chars """
+
+ RE = re.compile('%s(\d+)%s' % (util.STX, util.ETX))
+
+ def unescape(self, m):
+ return util.int2str(int(m.group(1)))
+
+ def run(self, text):
+ return self.RE.sub(self.unescape, text)
diff --git a/awx/lib/site-packages/markdown/preprocessors.py b/awx/lib/site-packages/markdown/preprocessors.py
index 3972731cdd..5bfca55530 100644
--- a/awx/lib/site-packages/markdown/preprocessors.py
+++ b/awx/lib/site-packages/markdown/preprocessors.py
@@ -1,341 +1,341 @@
-"""
-PRE-PROCESSORS
-=============================================================================
-
-Preprocessors work on source text before we start doing anything too
-complicated.
-"""
-
-from __future__ import absolute_import
-from __future__ import unicode_literals
-from . import util
-from . import odict
-import re
-
-
-def build_preprocessors(md_instance, **kwargs):
- """ Build the default set of preprocessors used by Markdown. """
- preprocessors = odict.OrderedDict()
- preprocessors['normalize_whitespace'] = NormalizeWhitespace(md_instance)
- if md_instance.safeMode != 'escape':
- preprocessors["html_block"] = HtmlBlockPreprocessor(md_instance)
- preprocessors["reference"] = ReferencePreprocessor(md_instance)
- return preprocessors
-
-
-class Preprocessor(util.Processor):
- """
- Preprocessors are run after the text is broken into lines.
-
- Each preprocessor implements a "run" method that takes a pointer to a
- list of lines of the document, modifies it as necessary and returns
- either the same pointer or a pointer to a new list.
-
- Preprocessors must extend markdown.Preprocessor.
-
- """
- def run(self, lines):
- """
- Each subclass of Preprocessor should override the `run` method, which
- takes the document as a list of strings split by newlines and returns
- the (possibly modified) list of lines.
-
- """
- pass
-
-
-class NormalizeWhitespace(Preprocessor):
- """ Normalize whitespace for consistant parsing. """
-
- def run(self, lines):
- source = '\n'.join(lines)
- source = source.replace(util.STX, "").replace(util.ETX, "")
- source = source.replace("\r\n", "\n").replace("\r", "\n") + "\n\n"
- source = source.expandtabs(self.markdown.tab_length)
- source = re.sub(r'(?<=\n) +\n', '\n', source)
- return source.split('\n')
-
-
-class HtmlBlockPreprocessor(Preprocessor):
- """Remove html blocks from the text and store them for later retrieval."""
-
- right_tag_patterns = ["%s>", "%s>"]
- attrs_pattern = r"""
- \s+(?P[^>"'/= ]+)=(?P['"])(?P.*?)(?P=q) # attr="value"
- | # OR
- \s+(?P[^>"'/= ]+)=(?P[^> ]+) # attr=value
- | # OR
- \s+(?P[^>"'/= ]+) # attr
- """
- left_tag_pattern = r'^\<(?P[^> ]+)(?P(%s)*)\s*\/?\>?' % attrs_pattern
- attrs_re = re.compile(attrs_pattern, re.VERBOSE)
- left_tag_re = re.compile(left_tag_pattern, re.VERBOSE)
- markdown_in_raw = False
-
- def _get_left_tag(self, block):
- m = self.left_tag_re.match(block)
- if m:
- tag = m.group('tag')
- raw_attrs = m.group('attrs')
- attrs = {}
- if raw_attrs:
- for ma in self.attrs_re.finditer(raw_attrs):
- if ma.group('attr'):
- if ma.group('value'):
- attrs[ma.group('attr').strip()] = ma.group('value')
- else:
- attrs[ma.group('attr').strip()] = ""
- elif ma.group('attr1'):
- if ma.group('value1'):
- attrs[ma.group('attr1').strip()] = ma.group('value1')
- else:
- attrs[ma.group('attr1').strip()] = ""
- elif ma.group('attr2'):
- attrs[ma.group('attr2').strip()] = ""
- return tag, len(m.group(0)), attrs
- else:
- tag = block[1:].split(">", 1)[0].lower()
- return tag, len(tag)+2, {}
-
- def _recursive_tagfind(self, ltag, rtag, start_index, block):
- while 1:
- i = block.find(rtag, start_index)
- if i == -1:
- return -1
- j = block.find(ltag, start_index)
- # if no ltag, or rtag found before another ltag, return index
- if (j > i or j == -1):
- return i + len(rtag)
- # another ltag found before rtag, use end of ltag as starting
- # point and search again
- j = block.find('>', j)
- start_index = self._recursive_tagfind(ltag, rtag, j + 1, block)
- if start_index == -1:
- # HTML potentially malformed- ltag has no corresponding
- # rtag
- return -1
-
- def _get_right_tag(self, left_tag, left_index, block):
- for p in self.right_tag_patterns:
- tag = p % left_tag
- i = self._recursive_tagfind("<%s" % left_tag, tag, left_index, block)
- if i > 2:
- return tag.lstrip("<").rstrip(">"), i
- return block.rstrip()[-left_index:-1].lower(), len(block)
-
- def _equal_tags(self, left_tag, right_tag):
- if left_tag[0] in ['?', '@', '%']: # handle PHP, etc.
- return True
- if ("/" + left_tag) == right_tag:
- return True
- if (right_tag == "--" and left_tag == "--"):
- return True
- elif left_tag == right_tag[1:] \
- and right_tag[0] == "/":
- return True
- else:
- return False
-
- def _is_oneliner(self, tag):
- return (tag in ['hr', 'hr/'])
-
- def _stringindex_to_listindex(self, stringindex, items):
- """
- Same effect as concatenating the strings in items,
- finding the character to which stringindex refers in that string,
- and returning the item in which that character resides.
- """
- items.append('dummy')
- i, count = 0, 0
- while count <= stringindex:
- count += len(items[i])
- i += 1
- return i - 1
-
- def _nested_markdown_in_html(self, items):
- """Find and process html child elements of the given element block."""
- for i, item in enumerate(items):
- if self.left_tag_re.match(item):
- left_tag, left_index, attrs = \
- self._get_left_tag(''.join(items[i:]))
- right_tag, data_index = self._get_right_tag(
- left_tag, left_index, ''.join(items[i:]))
- right_listindex = \
- self._stringindex_to_listindex(data_index, items[i:]) + i
- if 'markdown' in attrs.keys():
- items[i] = items[i][left_index:] # remove opening tag
- placeholder = self.markdown.htmlStash.store_tag(
- left_tag, attrs, i + 1, right_listindex + 1)
- items.insert(i, placeholder)
- if len(items) - right_listindex <= 1: # last nest, no tail
- right_listindex -= 1
- items[right_listindex] = items[right_listindex][
- :-len(right_tag) - 2] # remove closing tag
- else: # raw html
- if len(items) - right_listindex <= 1: # last element
- right_listindex -= 1
- placeholder = self.markdown.htmlStash.store('\n\n'.join(
- items[i:right_listindex]))
- del items[i:right_listindex]
- items.insert(i, placeholder)
- return items
-
- def run(self, lines):
- text = "\n".join(lines)
- new_blocks = []
- text = text.rsplit("\n\n")
- items = []
- left_tag = ''
- right_tag = ''
- in_tag = False # flag
-
- while text:
- block = text[0]
- if block.startswith("\n"):
- block = block[1:]
- text = text[1:]
-
- if block.startswith("\n"):
- block = block[1:]
-
- if not in_tag:
- if block.startswith("<") and len(block.strip()) > 1:
-
- if block[1:4] == "!--":
- # is a comment block
- left_tag, left_index, attrs = "--", 2, {}
- else:
- left_tag, left_index, attrs = self._get_left_tag(block)
- right_tag, data_index = self._get_right_tag(left_tag,
- left_index,
- block)
- # keep checking conditions below and maybe just append
-
- if data_index < len(block) \
- and (util.isBlockLevel(left_tag)
- or left_tag == '--'):
- text.insert(0, block[data_index:])
- block = block[:data_index]
-
- if not (util.isBlockLevel(left_tag) \
- or block[1] in ["!", "?", "@", "%"]):
- new_blocks.append(block)
- continue
-
- if self._is_oneliner(left_tag):
- new_blocks.append(block.strip())
- continue
-
- if block.rstrip().endswith(">") \
- and self._equal_tags(left_tag, right_tag):
- if self.markdown_in_raw and 'markdown' in attrs.keys():
- block = block[left_index:-len(right_tag) - 2]
- new_blocks.append(self.markdown.htmlStash.
- store_tag(left_tag, attrs, 0, 2))
- new_blocks.extend([block])
- else:
- new_blocks.append(
- self.markdown.htmlStash.store(block.strip()))
- continue
- else:
- # if is block level tag and is not complete
- if (not self._equal_tags(left_tag, right_tag)) and \
- (util.isBlockLevel(left_tag) or left_tag == "--"):
- items.append(block.strip())
- in_tag = True
- else:
- new_blocks.append(
- self.markdown.htmlStash.store(block.strip()))
-
- continue
-
- else:
- new_blocks.append(block)
-
- else:
- items.append(block)
-
- right_tag, data_index = self._get_right_tag(left_tag, 0, block)
-
- if self._equal_tags(left_tag, right_tag):
- # if find closing tag
-
- if data_index < len(block):
- # we have more text after right_tag
- items[-1] = block[:data_index]
- text.insert(0, block[data_index:])
-
- in_tag = False
- if self.markdown_in_raw and 'markdown' in attrs.keys():
- items[0] = items[0][left_index:]
- items[-1] = items[-1][:-len(right_tag) - 2]
- if items[len(items) - 1]: # not a newline/empty string
- right_index = len(items) + 3
- else:
- right_index = len(items) + 2
- new_blocks.append(self.markdown.htmlStash.store_tag(
- left_tag, attrs, 0, right_index))
- placeholderslen = len(self.markdown.htmlStash.tag_data)
- new_blocks.extend(
- self._nested_markdown_in_html(items))
- nests = len(self.markdown.htmlStash.tag_data) - \
- placeholderslen
- self.markdown.htmlStash.tag_data[-1 - nests][
- 'right_index'] += nests - 2
- else:
- new_blocks.append(
- self.markdown.htmlStash.store('\n\n'.join(items)))
- items = []
-
- if items:
- if self.markdown_in_raw and 'markdown' in attrs.keys():
- items[0] = items[0][left_index:]
- items[-1] = items[-1][:-len(right_tag) - 2]
- if items[len(items) - 1]: # not a newline/empty string
- right_index = len(items) + 3
- else:
- right_index = len(items) + 2
- new_blocks.append(
- self.markdown.htmlStash.store_tag(
- left_tag, attrs, 0, right_index))
- placeholderslen = len(self.markdown.htmlStash.tag_data)
- new_blocks.extend(self._nested_markdown_in_html(items))
- nests = len(self.markdown.htmlStash.tag_data) - placeholderslen
- self.markdown.htmlStash.tag_data[-1 - nests][
- 'right_index'] += nests - 2
- else:
- new_blocks.append(
- self.markdown.htmlStash.store('\n\n'.join(items)))
- new_blocks.append('\n')
-
- new_text = "\n\n".join(new_blocks)
- return new_text.split("\n")
-
-
-class ReferencePreprocessor(Preprocessor):
- """ Remove reference definitions from text and store for later use. """
-
- TITLE = r'[ ]*(\"(.*)\"|\'(.*)\'|\((.*)\))[ ]*'
- RE = re.compile(r'^[ ]{0,3}\[([^\]]*)\]:\s*([^ ]*)[ ]*(%s)?$' % TITLE, re.DOTALL)
- TITLE_RE = re.compile(r'^%s$' % TITLE)
-
- def run (self, lines):
- new_text = [];
- while lines:
- line = lines.pop(0)
- m = self.RE.match(line)
- if m:
- id = m.group(1).strip().lower()
- link = m.group(2).lstrip('<').rstrip('>')
- t = m.group(5) or m.group(6) or m.group(7)
- if not t:
- # Check next line for title
- tm = self.TITLE_RE.match(lines[0])
- if tm:
- lines.pop(0)
- t = tm.group(2) or tm.group(3) or tm.group(4)
- self.markdown.references[id] = (link, t)
- else:
- new_text.append(line)
-
- return new_text #+ "\n"
+"""
+PRE-PROCESSORS
+=============================================================================
+
+Preprocessors work on source text before we start doing anything too
+complicated.
+"""
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+from . import util
+from . import odict
+import re
+
+
+def build_preprocessors(md_instance, **kwargs):
+ """ Build the default set of preprocessors used by Markdown. """
+ preprocessors = odict.OrderedDict()
+ preprocessors['normalize_whitespace'] = NormalizeWhitespace(md_instance)
+ if md_instance.safeMode != 'escape':
+ preprocessors["html_block"] = HtmlBlockPreprocessor(md_instance)
+ preprocessors["reference"] = ReferencePreprocessor(md_instance)
+ return preprocessors
+
+
+class Preprocessor(util.Processor):
+ """
+ Preprocessors are run after the text is broken into lines.
+
+ Each preprocessor implements a "run" method that takes a pointer to a
+ list of lines of the document, modifies it as necessary and returns
+ either the same pointer or a pointer to a new list.
+
+ Preprocessors must extend markdown.Preprocessor.
+
+ """
+ def run(self, lines):
+ """
+ Each subclass of Preprocessor should override the `run` method, which
+ takes the document as a list of strings split by newlines and returns
+ the (possibly modified) list of lines.
+
+ """
+ pass
+
+
+class NormalizeWhitespace(Preprocessor):
+ """ Normalize whitespace for consistant parsing. """
+
+ def run(self, lines):
+ source = '\n'.join(lines)
+ source = source.replace(util.STX, "").replace(util.ETX, "")
+ source = source.replace("\r\n", "\n").replace("\r", "\n") + "\n\n"
+ source = source.expandtabs(self.markdown.tab_length)
+ source = re.sub(r'(?<=\n) +\n', '\n', source)
+ return source.split('\n')
+
+
+class HtmlBlockPreprocessor(Preprocessor):
+ """Remove html blocks from the text and store them for later retrieval."""
+
+ right_tag_patterns = ["%s>", "%s>"]
+ attrs_pattern = r"""
+ \s+(?P[^>"'/= ]+)=(?P['"])(?P.*?)(?P=q) # attr="value"
+ | # OR
+ \s+(?P[^>"'/= ]+)=(?P[^> ]+) # attr=value
+ | # OR
+ \s+(?P[^>"'/= ]+) # attr
+ """
+ left_tag_pattern = r'^\<(?P[^> ]+)(?P(%s)*)\s*\/?\>?' % attrs_pattern
+ attrs_re = re.compile(attrs_pattern, re.VERBOSE)
+ left_tag_re = re.compile(left_tag_pattern, re.VERBOSE)
+ markdown_in_raw = False
+
+ def _get_left_tag(self, block):
+ m = self.left_tag_re.match(block)
+ if m:
+ tag = m.group('tag')
+ raw_attrs = m.group('attrs')
+ attrs = {}
+ if raw_attrs:
+ for ma in self.attrs_re.finditer(raw_attrs):
+ if ma.group('attr'):
+ if ma.group('value'):
+ attrs[ma.group('attr').strip()] = ma.group('value')
+ else:
+ attrs[ma.group('attr').strip()] = ""
+ elif ma.group('attr1'):
+ if ma.group('value1'):
+ attrs[ma.group('attr1').strip()] = ma.group('value1')
+ else:
+ attrs[ma.group('attr1').strip()] = ""
+ elif ma.group('attr2'):
+ attrs[ma.group('attr2').strip()] = ""
+ return tag, len(m.group(0)), attrs
+ else:
+ tag = block[1:].split(">", 1)[0].lower()
+ return tag, len(tag)+2, {}
+
+ def _recursive_tagfind(self, ltag, rtag, start_index, block):
+ while 1:
+ i = block.find(rtag, start_index)
+ if i == -1:
+ return -1
+ j = block.find(ltag, start_index)
+ # if no ltag, or rtag found before another ltag, return index
+ if (j > i or j == -1):
+ return i + len(rtag)
+ # another ltag found before rtag, use end of ltag as starting
+ # point and search again
+ j = block.find('>', j)
+ start_index = self._recursive_tagfind(ltag, rtag, j + 1, block)
+ if start_index == -1:
+ # HTML potentially malformed- ltag has no corresponding
+ # rtag
+ return -1
+
+ def _get_right_tag(self, left_tag, left_index, block):
+ for p in self.right_tag_patterns:
+ tag = p % left_tag
+ i = self._recursive_tagfind("<%s" % left_tag, tag, left_index, block)
+ if i > 2:
+ return tag.lstrip("<").rstrip(">"), i
+ return block.rstrip()[-left_index:-1].lower(), len(block)
+
+ def _equal_tags(self, left_tag, right_tag):
+ if left_tag[0] in ['?', '@', '%']: # handle PHP, etc.
+ return True
+ if ("/" + left_tag) == right_tag:
+ return True
+ if (right_tag == "--" and left_tag == "--"):
+ return True
+ elif left_tag == right_tag[1:] \
+ and right_tag[0] == "/":
+ return True
+ else:
+ return False
+
+ def _is_oneliner(self, tag):
+ return (tag in ['hr', 'hr/'])
+
+ def _stringindex_to_listindex(self, stringindex, items):
+ """
+ Same effect as concatenating the strings in items,
+ finding the character to which stringindex refers in that string,
+ and returning the index of the item in which that character resides.
+ """
+ items.append('dummy')
+ i, count = 0, 0
+ while count <= stringindex:
+ count += len(items[i])
+ i += 1
+ return i - 1
+
+ def _nested_markdown_in_html(self, items):
+ """Find and process html child elements of the given element block."""
+ for i, item in enumerate(items):
+ if self.left_tag_re.match(item):
+ left_tag, left_index, attrs = \
+ self._get_left_tag(''.join(items[i:]))
+ right_tag, data_index = self._get_right_tag(
+ left_tag, left_index, ''.join(items[i:]))
+ right_listindex = \
+ self._stringindex_to_listindex(data_index, items[i:]) + i
+ if 'markdown' in attrs.keys():
+ items[i] = items[i][left_index:] # remove opening tag
+ placeholder = self.markdown.htmlStash.store_tag(
+ left_tag, attrs, i + 1, right_listindex + 1)
+ items.insert(i, placeholder)
+ if len(items) - right_listindex <= 1: # last nest, no tail
+ right_listindex -= 1
+ items[right_listindex] = items[right_listindex][
+ :-len(right_tag) - 2] # remove closing tag
+ else: # raw html
+ if len(items) - right_listindex <= 1: # last element
+ right_listindex -= 1
+ placeholder = self.markdown.htmlStash.store('\n\n'.join(
+ items[i:right_listindex + 1]))
+ del items[i:right_listindex + 1]
+ items.insert(i, placeholder)
+ return items
+
+ def run(self, lines):
+ text = "\n".join(lines)
+ new_blocks = []
+ text = text.rsplit("\n\n")
+ items = []
+ left_tag = ''
+ right_tag = ''
+ in_tag = False # flag
+
+ while text:
+ block = text[0]
+ if block.startswith("\n"):
+ block = block[1:]
+ text = text[1:]
+
+ if block.startswith("\n"):
+ block = block[1:]
+
+ if not in_tag:
+ if block.startswith("<") and len(block.strip()) > 1:
+
+ if block[1:4] == "!--":
+ # is a comment block
+ left_tag, left_index, attrs = "--", 2, {}
+ else:
+ left_tag, left_index, attrs = self._get_left_tag(block)
+ right_tag, data_index = self._get_right_tag(left_tag,
+ left_index,
+ block)
+ # keep checking conditions below and maybe just append
+
+ if data_index < len(block) \
+ and (util.isBlockLevel(left_tag)
+ or left_tag == '--'):
+ text.insert(0, block[data_index:])
+ block = block[:data_index]
+
+ if not (util.isBlockLevel(left_tag) \
+ or block[1] in ["!", "?", "@", "%"]):
+ new_blocks.append(block)
+ continue
+
+ if self._is_oneliner(left_tag):
+ new_blocks.append(block.strip())
+ continue
+
+ if block.rstrip().endswith(">") \
+ and self._equal_tags(left_tag, right_tag):
+ if self.markdown_in_raw and 'markdown' in attrs.keys():
+ block = block[left_index:-len(right_tag) - 2]
+ new_blocks.append(self.markdown.htmlStash.
+ store_tag(left_tag, attrs, 0, 2))
+ new_blocks.extend([block])
+ else:
+ new_blocks.append(
+ self.markdown.htmlStash.store(block.strip()))
+ continue
+ else:
+ # if is block level tag and is not complete
+ if (not self._equal_tags(left_tag, right_tag)) and \
+ (util.isBlockLevel(left_tag) or left_tag == "--"):
+ items.append(block.strip())
+ in_tag = True
+ else:
+ new_blocks.append(
+ self.markdown.htmlStash.store(block.strip()))
+
+ continue
+
+ else:
+ new_blocks.append(block)
+
+ else:
+ items.append(block)
+
+ right_tag, data_index = self._get_right_tag(left_tag, 0, block)
+
+ if self._equal_tags(left_tag, right_tag):
+ # if find closing tag
+
+ if data_index < len(block):
+ # we have more text after right_tag
+ items[-1] = block[:data_index]
+ text.insert(0, block[data_index:])
+
+ in_tag = False
+ if self.markdown_in_raw and 'markdown' in attrs.keys():
+ items[0] = items[0][left_index:]
+ items[-1] = items[-1][:-len(right_tag) - 2]
+ if items[len(items) - 1]: # not a newline/empty string
+ right_index = len(items) + 3
+ else:
+ right_index = len(items) + 2
+ new_blocks.append(self.markdown.htmlStash.store_tag(
+ left_tag, attrs, 0, right_index))
+ placeholderslen = len(self.markdown.htmlStash.tag_data)
+ new_blocks.extend(
+ self._nested_markdown_in_html(items))
+ nests = len(self.markdown.htmlStash.tag_data) - \
+ placeholderslen
+ self.markdown.htmlStash.tag_data[-1 - nests][
+ 'right_index'] += nests - 2
+ else:
+ new_blocks.append(
+ self.markdown.htmlStash.store('\n\n'.join(items)))
+ items = []
+
+ if items:
+ if self.markdown_in_raw and 'markdown' in attrs.keys():
+ items[0] = items[0][left_index:]
+ items[-1] = items[-1][:-len(right_tag) - 2]
+ if items[len(items) - 1]: # not a newline/empty string
+ right_index = len(items) + 3
+ else:
+ right_index = len(items) + 2
+ new_blocks.append(
+ self.markdown.htmlStash.store_tag(
+ left_tag, attrs, 0, right_index))
+ placeholderslen = len(self.markdown.htmlStash.tag_data)
+ new_blocks.extend(self._nested_markdown_in_html(items))
+ nests = len(self.markdown.htmlStash.tag_data) - placeholderslen
+ self.markdown.htmlStash.tag_data[-1 - nests][
+ 'right_index'] += nests - 2
+ else:
+ new_blocks.append(
+ self.markdown.htmlStash.store('\n\n'.join(items)))
+ new_blocks.append('\n')
+
+ new_text = "\n\n".join(new_blocks)
+ return new_text.split("\n")
+
+
+class ReferencePreprocessor(Preprocessor):
+ """ Remove reference definitions from text and store for later use. """
+
+ TITLE = r'[ ]*(\"(.*)\"|\'(.*)\'|\((.*)\))[ ]*'
+ RE = re.compile(r'^[ ]{0,3}\[([^\]]*)\]:\s*([^ ]*)[ ]*(%s)?$' % TITLE, re.DOTALL)
+ TITLE_RE = re.compile(r'^%s$' % TITLE)
+
+ def run (self, lines):
+ new_text = [];
+ while lines:
+ line = lines.pop(0)
+ m = self.RE.match(line)
+ if m:
+ id = m.group(1).strip().lower()
+ link = m.group(2).lstrip('<').rstrip('>')
+ t = m.group(5) or m.group(6) or m.group(7)
+ if not t:
+ # Check next line for title
+ tm = self.TITLE_RE.match(lines[0])
+ if tm:
+ lines.pop(0)
+ t = tm.group(2) or tm.group(3) or tm.group(4)
+ self.markdown.references[id] = (link, t)
+ else:
+ new_text.append(line)
+
+ return new_text #+ "\n"
diff --git a/awx/lib/site-packages/markdown/serializers.py b/awx/lib/site-packages/markdown/serializers.py
index 581b632e1d..aa828066b4 100644
--- a/awx/lib/site-packages/markdown/serializers.py
+++ b/awx/lib/site-packages/markdown/serializers.py
@@ -1,276 +1,276 @@
-# markdown/searializers.py
-#
-# Add x/html serialization to Elementree
-# Taken from ElementTree 1.3 preview with slight modifications
-#
-# Copyright (c) 1999-2007 by Fredrik Lundh. All rights reserved.
-#
-# fredrik@pythonware.com
-# http://www.pythonware.com
-#
-# --------------------------------------------------------------------
-# The ElementTree toolkit is
-#
-# Copyright (c) 1999-2007 by Fredrik Lundh
-#
-# By obtaining, using, and/or copying this software and/or its
-# associated documentation, you agree that you have read, understood,
-# and will comply with the following terms and conditions:
-#
-# Permission to use, copy, modify, and distribute this software and
-# its associated documentation for any purpose and without fee is
-# hereby granted, provided that the above copyright notice appears in
-# all copies, and that both that copyright notice and this permission
-# notice appear in supporting documentation, and that the name of
-# Secret Labs AB or the author not be used in advertising or publicity
-# pertaining to distribution of the software without specific, written
-# prior permission.
-#
-# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
-# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
-# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
-# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
-# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
-# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
-# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
-# OF THIS SOFTWARE.
-# --------------------------------------------------------------------
-
-
-from __future__ import absolute_import
-from __future__ import unicode_literals
-from . import util
-ElementTree = util.etree.ElementTree
-QName = util.etree.QName
-if hasattr(util.etree, 'test_comment'):
- Comment = util.etree.test_comment
-else:
- Comment = util.etree.Comment
-PI = util.etree.PI
-ProcessingInstruction = util.etree.ProcessingInstruction
-
-__all__ = ['to_html_string', 'to_xhtml_string']
-
-HTML_EMPTY = ("area", "base", "basefont", "br", "col", "frame", "hr",
- "img", "input", "isindex", "link", "meta" "param")
-
-try:
- HTML_EMPTY = set(HTML_EMPTY)
-except NameError:
- pass
-
-_namespace_map = {
- # "well-known" namespace prefixes
- "http://www.w3.org/XML/1998/namespace": "xml",
- "http://www.w3.org/1999/xhtml": "html",
- "http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
- "http://schemas.xmlsoap.org/wsdl/": "wsdl",
- # xml schema
- "http://www.w3.org/2001/XMLSchema": "xs",
- "http://www.w3.org/2001/XMLSchema-instance": "xsi",
- # dublic core
- "http://purl.org/dc/elements/1.1/": "dc",
-}
-
-
-def _raise_serialization_error(text):
- raise TypeError(
- "cannot serialize %r (type %s)" % (text, type(text).__name__)
- )
-
-def _encode(text, encoding):
- try:
- return text.encode(encoding, "xmlcharrefreplace")
- except (TypeError, AttributeError):
- _raise_serialization_error(text)
-
-def _escape_cdata(text):
- # escape character data
- try:
- # it's worth avoiding do-nothing calls for strings that are
- # shorter than 500 character, or so. assume that's, by far,
- # the most common case in most applications.
- if "&" in text:
- text = text.replace("&", "&")
- if "<" in text:
- text = text.replace("<", "<")
- if ">" in text:
- text = text.replace(">", ">")
- return text
- except (TypeError, AttributeError):
- _raise_serialization_error(text)
-
-
-def _escape_attrib(text):
- # escape attribute value
- try:
- if "&" in text:
- text = text.replace("&", "&")
- if "<" in text:
- text = text.replace("<", "<")
- if ">" in text:
- text = text.replace(">", ">")
- if "\"" in text:
- text = text.replace("\"", """)
- if "\n" in text:
- text = text.replace("\n", "
")
- return text
- except (TypeError, AttributeError):
- _raise_serialization_error(text)
-
-def _escape_attrib_html(text):
- # escape attribute value
- try:
- if "&" in text:
- text = text.replace("&", "&")
- if "<" in text:
- text = text.replace("<", "<")
- if ">" in text:
- text = text.replace(">", ">")
- if "\"" in text:
- text = text.replace("\"", """)
- return text
- except (TypeError, AttributeError):
- _raise_serialization_error(text)
-
-
-def _serialize_html(write, elem, qnames, namespaces, format):
- tag = elem.tag
- text = elem.text
- if tag is Comment:
- write("" % _escape_cdata(text))
- elif tag is ProcessingInstruction:
- write("%s?>" % _escape_cdata(text))
- else:
- tag = qnames[tag]
- if tag is None:
- if text:
- write(_escape_cdata(text))
- for e in elem:
- _serialize_html(write, e, qnames, None, format)
- else:
- write("<" + tag)
- items = elem.items()
- if items or namespaces:
- items = sorted(items) # lexical order
- for k, v in items:
- if isinstance(k, QName):
- k = k.text
- if isinstance(v, QName):
- v = qnames[v.text]
- else:
- v = _escape_attrib_html(v)
- if qnames[k] == v and format == 'html':
- # handle boolean attributes
- write(" %s" % v)
- else:
- write(" %s=\"%s\"" % (qnames[k], v))
- if namespaces:
- items = namespaces.items()
- items.sort(key=lambda x: x[1]) # sort on prefix
- for v, k in items:
- if k:
- k = ":" + k
- write(" xmlns%s=\"%s\"" % (k, _escape_attrib(v)))
- if format == "xhtml" and tag.lower() in HTML_EMPTY:
- write(" />")
- else:
- write(">")
- if text:
- if tag.lower() in ["script", "style"]:
- write(text)
- else:
- write(_escape_cdata(text))
- for e in elem:
- _serialize_html(write, e, qnames, None, format)
- if tag.lower() not in HTML_EMPTY:
- write("" + tag + ">")
- if elem.tail:
- write(_escape_cdata(elem.tail))
-
-def _write_html(root,
- encoding=None,
- default_namespace=None,
- format="html"):
- assert root is not None
- data = []
- write = data.append
- qnames, namespaces = _namespaces(root, default_namespace)
- _serialize_html(write, root, qnames, namespaces, format)
- if encoding is None:
- return "".join(data)
- else:
- return _encode("".join(data))
-
-
-# --------------------------------------------------------------------
-# serialization support
-
-def _namespaces(elem, default_namespace=None):
- # identify namespaces used in this tree
-
- # maps qnames to *encoded* prefix:local names
- qnames = {None: None}
-
- # maps uri:s to prefixes
- namespaces = {}
- if default_namespace:
- namespaces[default_namespace] = ""
-
- def add_qname(qname):
- # calculate serialized qname representation
- try:
- if qname[:1] == "{":
- uri, tag = qname[1:].split("}", 1)
- prefix = namespaces.get(uri)
- if prefix is None:
- prefix = _namespace_map.get(uri)
- if prefix is None:
- prefix = "ns%d" % len(namespaces)
- if prefix != "xml":
- namespaces[uri] = prefix
- if prefix:
- qnames[qname] = "%s:%s" % (prefix, tag)
- else:
- qnames[qname] = tag # default element
- else:
- if default_namespace:
- raise ValueError(
- "cannot use non-qualified names with "
- "default_namespace option"
- )
- qnames[qname] = qname
- except TypeError:
- _raise_serialization_error(qname)
-
- # populate qname and namespaces table
- try:
- iterate = elem.iter
- except AttributeError:
- iterate = elem.getiterator # cET compatibility
- for elem in iterate():
- tag = elem.tag
- if isinstance(tag, QName) and tag.text not in qnames:
- add_qname(tag.text)
- elif isinstance(tag, util.string_type):
- if tag not in qnames:
- add_qname(tag)
- elif tag is not None and tag is not Comment and tag is not PI:
- _raise_serialization_error(tag)
- for key, value in elem.items():
- if isinstance(key, QName):
- key = key.text
- if key not in qnames:
- add_qname(key)
- if isinstance(value, QName) and value.text not in qnames:
- add_qname(value.text)
- text = elem.text
- if isinstance(text, QName) and text.text not in qnames:
- add_qname(text.text)
- return qnames, namespaces
-
-def to_html_string(element):
- return _write_html(ElementTree(element).getroot(), format="html")
-
-def to_xhtml_string(element):
- return _write_html(ElementTree(element).getroot(), format="xhtml")
+# markdown/searializers.py
+#
+# Add x/html serialization to Elementree
+# Taken from ElementTree 1.3 preview with slight modifications
+#
+# Copyright (c) 1999-2007 by Fredrik Lundh. All rights reserved.
+#
+# fredrik@pythonware.com
+# http://www.pythonware.com
+#
+# --------------------------------------------------------------------
+# The ElementTree toolkit is
+#
+# Copyright (c) 1999-2007 by Fredrik Lundh
+#
+# By obtaining, using, and/or copying this software and/or its
+# associated documentation, you agree that you have read, understood,
+# and will comply with the following terms and conditions:
+#
+# Permission to use, copy, modify, and distribute this software and
+# its associated documentation for any purpose and without fee is
+# hereby granted, provided that the above copyright notice appears in
+# all copies, and that both that copyright notice and this permission
+# notice appear in supporting documentation, and that the name of
+# Secret Labs AB or the author not be used in advertising or publicity
+# pertaining to distribution of the software without specific, written
+# prior permission.
+#
+# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
+# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
+# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
+# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+# --------------------------------------------------------------------
+
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+from . import util
+ElementTree = util.etree.ElementTree
+QName = util.etree.QName
+if hasattr(util.etree, 'test_comment'):
+ Comment = util.etree.test_comment
+else:
+ Comment = util.etree.Comment
+PI = util.etree.PI
+ProcessingInstruction = util.etree.ProcessingInstruction
+
+__all__ = ['to_html_string', 'to_xhtml_string']
+
+HTML_EMPTY = ("area", "base", "basefont", "br", "col", "frame", "hr",
+ "img", "input", "isindex", "link", "meta" "param")
+
+try:
+ HTML_EMPTY = set(HTML_EMPTY)
+except NameError:
+ pass
+
+_namespace_map = {
+ # "well-known" namespace prefixes
+ "http://www.w3.org/XML/1998/namespace": "xml",
+ "http://www.w3.org/1999/xhtml": "html",
+ "http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
+ "http://schemas.xmlsoap.org/wsdl/": "wsdl",
+ # xml schema
+ "http://www.w3.org/2001/XMLSchema": "xs",
+ "http://www.w3.org/2001/XMLSchema-instance": "xsi",
+ # dublic core
+ "http://purl.org/dc/elements/1.1/": "dc",
+}
+
+
+def _raise_serialization_error(text):
+ raise TypeError(
+ "cannot serialize %r (type %s)" % (text, type(text).__name__)
+ )
+
+def _encode(text, encoding):
+ try:
+ return text.encode(encoding, "xmlcharrefreplace")
+ except (TypeError, AttributeError):
+ _raise_serialization_error(text)
+
+def _escape_cdata(text):
+ # escape character data
+ try:
+ # it's worth avoiding do-nothing calls for strings that are
+ # shorter than 500 character, or so. assume that's, by far,
+ # the most common case in most applications.
+ if "&" in text:
+ text = text.replace("&", "&")
+ if "<" in text:
+ text = text.replace("<", "<")
+ if ">" in text:
+ text = text.replace(">", ">")
+ return text
+ except (TypeError, AttributeError):
+ _raise_serialization_error(text)
+
+
+def _escape_attrib(text):
+ # escape attribute value
+ try:
+ if "&" in text:
+ text = text.replace("&", "&")
+ if "<" in text:
+ text = text.replace("<", "<")
+ if ">" in text:
+ text = text.replace(">", ">")
+ if "\"" in text:
+ text = text.replace("\"", """)
+ if "\n" in text:
+ text = text.replace("\n", "
")
+ return text
+ except (TypeError, AttributeError):
+ _raise_serialization_error(text)
+
+def _escape_attrib_html(text):
+ # escape attribute value
+ try:
+ if "&" in text:
+ text = text.replace("&", "&")
+ if "<" in text:
+ text = text.replace("<", "<")
+ if ">" in text:
+ text = text.replace(">", ">")
+ if "\"" in text:
+ text = text.replace("\"", """)
+ return text
+ except (TypeError, AttributeError):
+ _raise_serialization_error(text)
+
+
+def _serialize_html(write, elem, qnames, namespaces, format):
+ tag = elem.tag
+ text = elem.text
+ if tag is Comment:
+ write("" % _escape_cdata(text))
+ elif tag is ProcessingInstruction:
+ write("%s?>" % _escape_cdata(text))
+ else:
+ tag = qnames[tag]
+ if tag is None:
+ if text:
+ write(_escape_cdata(text))
+ for e in elem:
+ _serialize_html(write, e, qnames, None, format)
+ else:
+ write("<" + tag)
+ items = elem.items()
+ if items or namespaces:
+ items = sorted(items) # lexical order
+ for k, v in items:
+ if isinstance(k, QName):
+ k = k.text
+ if isinstance(v, QName):
+ v = qnames[v.text]
+ else:
+ v = _escape_attrib_html(v)
+ if qnames[k] == v and format == 'html':
+ # handle boolean attributes
+ write(" %s" % v)
+ else:
+ write(" %s=\"%s\"" % (qnames[k], v))
+ if namespaces:
+ items = namespaces.items()
+ items.sort(key=lambda x: x[1]) # sort on prefix
+ for v, k in items:
+ if k:
+ k = ":" + k
+ write(" xmlns%s=\"%s\"" % (k, _escape_attrib(v)))
+ if format == "xhtml" and tag.lower() in HTML_EMPTY:
+ write(" />")
+ else:
+ write(">")
+ if text:
+ if tag.lower() in ["script", "style"]:
+ write(text)
+ else:
+ write(_escape_cdata(text))
+ for e in elem:
+ _serialize_html(write, e, qnames, None, format)
+ if tag.lower() not in HTML_EMPTY:
+ write("" + tag + ">")
+ if elem.tail:
+ write(_escape_cdata(elem.tail))
+
+def _write_html(root,
+ encoding=None,
+ default_namespace=None,
+ format="html"):
+ assert root is not None
+ data = []
+ write = data.append
+ qnames, namespaces = _namespaces(root, default_namespace)
+ _serialize_html(write, root, qnames, namespaces, format)
+ if encoding is None:
+ return "".join(data)
+ else:
+ return _encode("".join(data))
+
+
+# --------------------------------------------------------------------
+# serialization support
+
+def _namespaces(elem, default_namespace=None):
+ # identify namespaces used in this tree
+
+ # maps qnames to *encoded* prefix:local names
+ qnames = {None: None}
+
+ # maps uri:s to prefixes
+ namespaces = {}
+ if default_namespace:
+ namespaces[default_namespace] = ""
+
+ def add_qname(qname):
+ # calculate serialized qname representation
+ try:
+ if qname[:1] == "{":
+ uri, tag = qname[1:].split("}", 1)
+ prefix = namespaces.get(uri)
+ if prefix is None:
+ prefix = _namespace_map.get(uri)
+ if prefix is None:
+ prefix = "ns%d" % len(namespaces)
+ if prefix != "xml":
+ namespaces[uri] = prefix
+ if prefix:
+ qnames[qname] = "%s:%s" % (prefix, tag)
+ else:
+ qnames[qname] = tag # default element
+ else:
+ if default_namespace:
+ raise ValueError(
+ "cannot use non-qualified names with "
+ "default_namespace option"
+ )
+ qnames[qname] = qname
+ except TypeError:
+ _raise_serialization_error(qname)
+
+ # populate qname and namespaces table
+ try:
+ iterate = elem.iter
+ except AttributeError:
+ iterate = elem.getiterator # cET compatibility
+ for elem in iterate():
+ tag = elem.tag
+ if isinstance(tag, QName) and tag.text not in qnames:
+ add_qname(tag.text)
+ elif isinstance(tag, util.string_type):
+ if tag not in qnames:
+ add_qname(tag)
+ elif tag is not None and tag is not Comment and tag is not PI:
+ _raise_serialization_error(tag)
+ for key, value in elem.items():
+ if isinstance(key, QName):
+ key = key.text
+ if key not in qnames:
+ add_qname(key)
+ if isinstance(value, QName) and value.text not in qnames:
+ add_qname(value.text)
+ text = elem.text
+ if isinstance(text, QName) and text.text not in qnames:
+ add_qname(text.text)
+ return qnames, namespaces
+
+def to_html_string(element):
+ return _write_html(ElementTree(element).getroot(), format="html")
+
+def to_xhtml_string(element):
+ return _write_html(ElementTree(element).getroot(), format="xhtml")
diff --git a/awx/lib/site-packages/markdown/treeprocessors.py b/awx/lib/site-packages/markdown/treeprocessors.py
index ec9ff6956f..ef0a2aa00c 100644
--- a/awx/lib/site-packages/markdown/treeprocessors.py
+++ b/awx/lib/site-packages/markdown/treeprocessors.py
@@ -1,360 +1,360 @@
-from __future__ import unicode_literals
-from __future__ import absolute_import
-from . import util
-from . import odict
-from . import inlinepatterns
-
-
-def build_treeprocessors(md_instance, **kwargs):
- """ Build the default treeprocessors for Markdown. """
- treeprocessors = odict.OrderedDict()
- treeprocessors["inline"] = InlineProcessor(md_instance)
- treeprocessors["prettify"] = PrettifyTreeprocessor(md_instance)
- return treeprocessors
-
-
-def isString(s):
- """ Check if it's string """
- if not isinstance(s, util.AtomicString):
- return isinstance(s, util.string_type)
- return False
-
-
-class Treeprocessor(util.Processor):
- """
- Treeprocessors are run on the ElementTree object before serialization.
-
- Each Treeprocessor implements a "run" method that takes a pointer to an
- ElementTree, modifies it as necessary and returns an ElementTree
- object.
-
- Treeprocessors must extend markdown.Treeprocessor.
-
- """
- def run(self, root):
- """
- Subclasses of Treeprocessor should implement a `run` method, which
- takes a root ElementTree. This method can return another ElementTree
- object, and the existing root ElementTree will be replaced, or it can
- modify the current tree and return None.
- """
- pass
-
-
-class InlineProcessor(Treeprocessor):
- """
- A Treeprocessor that traverses a tree, applying inline patterns.
- """
-
- def __init__(self, md):
- self.__placeholder_prefix = util.INLINE_PLACEHOLDER_PREFIX
- self.__placeholder_suffix = util.ETX
- self.__placeholder_length = 4 + len(self.__placeholder_prefix) \
- + len(self.__placeholder_suffix)
- self.__placeholder_re = util.INLINE_PLACEHOLDER_RE
- self.markdown = md
-
- def __makePlaceholder(self, type):
- """ Generate a placeholder """
- id = "%04d" % len(self.stashed_nodes)
- hash = util.INLINE_PLACEHOLDER % id
- return hash, id
-
- def __findPlaceholder(self, data, index):
- """
- Extract id from data string, start from index
-
- Keyword arguments:
-
- * data: string
- * index: index, from which we start search
-
- Returns: placeholder id and string index, after the found placeholder.
-
- """
- m = self.__placeholder_re.search(data, index)
- if m:
- return m.group(1), m.end()
- else:
- return None, index + 1
-
- def __stashNode(self, node, type):
- """ Add node to stash """
- placeholder, id = self.__makePlaceholder(type)
- self.stashed_nodes[id] = node
- return placeholder
-
- def __handleInline(self, data, patternIndex=0):
- """
- Process string with inline patterns and replace it
- with placeholders
-
- Keyword arguments:
-
- * data: A line of Markdown text
- * patternIndex: The index of the inlinePattern to start with
-
- Returns: String with placeholders.
-
- """
- if not isinstance(data, util.AtomicString):
- startIndex = 0
- while patternIndex < len(self.markdown.inlinePatterns):
- data, matched, startIndex = self.__applyPattern(
- self.markdown.inlinePatterns.value_for_index(patternIndex),
- data, patternIndex, startIndex)
- if not matched:
- patternIndex += 1
- return data
-
- def __processElementText(self, node, subnode, isText=True):
- """
- Process placeholders in Element.text or Element.tail
- of Elements popped from self.stashed_nodes.
-
- Keywords arguments:
-
- * node: parent node
- * subnode: processing node
- * isText: bool variable, True - it's text, False - it's tail
-
- Returns: None
-
- """
- if isText:
- text = subnode.text
- subnode.text = None
- else:
- text = subnode.tail
- subnode.tail = None
-
- childResult = self.__processPlaceholders(text, subnode)
-
- if not isText and node is not subnode:
- pos = list(node).index(subnode)
- node.remove(subnode)
- else:
- pos = 0
-
- childResult.reverse()
- for newChild in childResult:
- node.insert(pos, newChild)
-
- def __processPlaceholders(self, data, parent):
- """
- Process string with placeholders and generate ElementTree tree.
-
- Keyword arguments:
-
- * data: string with placeholders instead of ElementTree elements.
- * parent: Element, which contains processing inline data
-
- Returns: list with ElementTree elements with applied inline patterns.
-
- """
- def linkText(text):
- if text:
- if result:
- if result[-1].tail:
- result[-1].tail += text
- else:
- result[-1].tail = text
- else:
- if parent.text:
- parent.text += text
- else:
- parent.text = text
- result = []
- strartIndex = 0
- while data:
- index = data.find(self.__placeholder_prefix, strartIndex)
- if index != -1:
- id, phEndIndex = self.__findPlaceholder(data, index)
-
- if id in self.stashed_nodes:
- node = self.stashed_nodes.get(id)
-
- if index > 0:
- text = data[strartIndex:index]
- linkText(text)
-
- if not isString(node): # it's Element
- for child in [node] + list(node):
- if child.tail:
- if child.tail.strip():
- self.__processElementText(node, child,False)
- if child.text:
- if child.text.strip():
- self.__processElementText(child, child)
- else: # it's just a string
- linkText(node)
- strartIndex = phEndIndex
- continue
-
- strartIndex = phEndIndex
- result.append(node)
-
- else: # wrong placeholder
- end = index + len(self.__placeholder_prefix)
- linkText(data[strartIndex:end])
- strartIndex = end
- else:
- text = data[strartIndex:]
- if isinstance(data, util.AtomicString):
- # We don't want to loose the AtomicString
- text = util.AtomicString(text)
- linkText(text)
- data = ""
-
- return result
-
- def __applyPattern(self, pattern, data, patternIndex, startIndex=0):
- """
- Check if the line fits the pattern, create the necessary
- elements, add it to stashed_nodes.
-
- Keyword arguments:
-
- * data: the text to be processed
- * pattern: the pattern to be checked
- * patternIndex: index of current pattern
- * startIndex: string index, from which we start searching
-
- Returns: String with placeholders instead of ElementTree elements.
-
- """
- match = pattern.getCompiledRegExp().match(data[startIndex:])
- leftData = data[:startIndex]
-
- if not match:
- return data, False, 0
-
- node = pattern.handleMatch(match)
-
- if node is None:
- return data, True, len(leftData)+match.span(len(match.groups()))[0]
-
- if not isString(node):
- if not isinstance(node.text, util.AtomicString):
- # We need to process current node too
- for child in [node] + list(node):
- if not isString(node):
- if child.text:
- child.text = self.__handleInline(child.text,
- patternIndex + 1)
- if child.tail:
- child.tail = self.__handleInline(child.tail,
- patternIndex)
-
- placeholder = self.__stashNode(node, pattern.type())
-
- return "%s%s%s%s" % (leftData,
- match.group(1),
- placeholder, match.groups()[-1]), True, 0
-
- def run(self, tree):
- """Apply inline patterns to a parsed Markdown tree.
-
- Iterate over ElementTree, find elements with inline tag, apply inline
- patterns and append newly created Elements to tree. If you don't
- want to process your data with inline paterns, instead of normal string,
- use subclass AtomicString:
-
- node.text = markdown.AtomicString("This will not be processed.")
-
- Arguments:
-
- * tree: ElementTree object, representing Markdown tree.
-
- Returns: ElementTree object with applied inline patterns.
-
- """
- self.stashed_nodes = {}
-
- stack = [tree]
-
- while stack:
- currElement = stack.pop()
- insertQueue = []
- for child in currElement:
- if child.text and not isinstance(child.text, util.AtomicString):
- text = child.text
- child.text = None
- lst = self.__processPlaceholders(self.__handleInline(
- text), child)
- stack += lst
- insertQueue.append((child, lst))
- if child.tail:
- tail = self.__handleInline(child.tail)
- dumby = util.etree.Element('d')
- tailResult = self.__processPlaceholders(tail, dumby)
- if dumby.text:
- child.tail = dumby.text
- else:
- child.tail = None
- pos = list(currElement).index(child) + 1
- tailResult.reverse()
- for newChild in tailResult:
- currElement.insert(pos, newChild)
- if len(child):
- stack.append(child)
-
- for element, lst in insertQueue:
- if self.markdown.enable_attributes:
- if element.text and isString(element.text):
- element.text = \
- inlinepatterns.handleAttributes(element.text,
- element)
- i = 0
- for newChild in lst:
- if self.markdown.enable_attributes:
- # Processing attributes
- if newChild.tail and isString(newChild.tail):
- newChild.tail = \
- inlinepatterns.handleAttributes(newChild.tail,
- element)
- if newChild.text and isString(newChild.text):
- newChild.text = \
- inlinepatterns.handleAttributes(newChild.text,
- newChild)
- element.insert(i, newChild)
- i += 1
- return tree
-
-
-class PrettifyTreeprocessor(Treeprocessor):
- """ Add linebreaks to the html document. """
-
- def _prettifyETree(self, elem):
- """ Recursively add linebreaks to ElementTree children. """
-
- i = "\n"
- if util.isBlockLevel(elem.tag) and elem.tag not in ['code', 'pre']:
- if (not elem.text or not elem.text.strip()) \
- and len(elem) and util.isBlockLevel(elem[0].tag):
- elem.text = i
- for e in elem:
- if util.isBlockLevel(e.tag):
- self._prettifyETree(e)
- if not elem.tail or not elem.tail.strip():
- elem.tail = i
- if not elem.tail or not elem.tail.strip():
- elem.tail = i
-
- def run(self, root):
- """ Add linebreaks to ElementTree root object. """
-
- self._prettifyETree(root)
- # Do
's seperately as they are often in the middle of
- # inline content and missed by _prettifyETree.
- brs = root.getiterator('br')
- for br in brs:
- if not br.tail or not br.tail.strip():
- br.tail = '\n'
- else:
- br.tail = '\n%s' % br.tail
- # Clean up extra empty lines at end of code blocks.
- pres = root.getiterator('pre')
- for pre in pres:
- if len(pre) and pre[0].tag == 'code':
- pre[0].text = pre[0].text.rstrip() + '\n'
+from __future__ import unicode_literals
+from __future__ import absolute_import
+from . import util
+from . import odict
+from . import inlinepatterns
+
+
+def build_treeprocessors(md_instance, **kwargs):
+ """ Build the default treeprocessors for Markdown. """
+ treeprocessors = odict.OrderedDict()
+ treeprocessors["inline"] = InlineProcessor(md_instance)
+ treeprocessors["prettify"] = PrettifyTreeprocessor(md_instance)
+ return treeprocessors
+
+
+def isString(s):
+ """ Check if it's string """
+ if not isinstance(s, util.AtomicString):
+ return isinstance(s, util.string_type)
+ return False
+
+
+class Treeprocessor(util.Processor):
+ """
+ Treeprocessors are run on the ElementTree object before serialization.
+
+ Each Treeprocessor implements a "run" method that takes a pointer to an
+ ElementTree, modifies it as necessary and returns an ElementTree
+ object.
+
+ Treeprocessors must extend markdown.Treeprocessor.
+
+ """
+ def run(self, root):
+ """
+ Subclasses of Treeprocessor should implement a `run` method, which
+ takes a root ElementTree. This method can return another ElementTree
+ object, and the existing root ElementTree will be replaced, or it can
+ modify the current tree and return None.
+ """
+ pass
+
+
+class InlineProcessor(Treeprocessor):
+ """
+ A Treeprocessor that traverses a tree, applying inline patterns.
+ """
+
+ def __init__(self, md):
+ self.__placeholder_prefix = util.INLINE_PLACEHOLDER_PREFIX
+ self.__placeholder_suffix = util.ETX
+ self.__placeholder_length = 4 + len(self.__placeholder_prefix) \
+ + len(self.__placeholder_suffix)
+ self.__placeholder_re = util.INLINE_PLACEHOLDER_RE
+ self.markdown = md
+
+ def __makePlaceholder(self, type):
+ """ Generate a placeholder """
+ id = "%04d" % len(self.stashed_nodes)
+ hash = util.INLINE_PLACEHOLDER % id
+ return hash, id
+
+ def __findPlaceholder(self, data, index):
+ """
+ Extract id from data string, start from index
+
+ Keyword arguments:
+
+ * data: string
+ * index: index, from which we start search
+
+ Returns: placeholder id and string index, after the found placeholder.
+
+ """
+ m = self.__placeholder_re.search(data, index)
+ if m:
+ return m.group(1), m.end()
+ else:
+ return None, index + 1
+
+ def __stashNode(self, node, type):
+ """ Add node to stash """
+ placeholder, id = self.__makePlaceholder(type)
+ self.stashed_nodes[id] = node
+ return placeholder
+
+ def __handleInline(self, data, patternIndex=0):
+ """
+ Process string with inline patterns and replace it
+ with placeholders
+
+ Keyword arguments:
+
+ * data: A line of Markdown text
+ * patternIndex: The index of the inlinePattern to start with
+
+ Returns: String with placeholders.
+
+ """
+ if not isinstance(data, util.AtomicString):
+ startIndex = 0
+ while patternIndex < len(self.markdown.inlinePatterns):
+ data, matched, startIndex = self.__applyPattern(
+ self.markdown.inlinePatterns.value_for_index(patternIndex),
+ data, patternIndex, startIndex)
+ if not matched:
+ patternIndex += 1
+ return data
+
+ def __processElementText(self, node, subnode, isText=True):
+ """
+ Process placeholders in Element.text or Element.tail
+ of Elements popped from self.stashed_nodes.
+
+ Keywords arguments:
+
+ * node: parent node
+ * subnode: processing node
+ * isText: bool variable, True - it's text, False - it's tail
+
+ Returns: None
+
+ """
+ if isText:
+ text = subnode.text
+ subnode.text = None
+ else:
+ text = subnode.tail
+ subnode.tail = None
+
+ childResult = self.__processPlaceholders(text, subnode)
+
+ if not isText and node is not subnode:
+ pos = list(node).index(subnode)
+ node.remove(subnode)
+ else:
+ pos = 0
+
+ childResult.reverse()
+ for newChild in childResult:
+ node.insert(pos, newChild)
+
+ def __processPlaceholders(self, data, parent):
+ """
+ Process string with placeholders and generate ElementTree tree.
+
+ Keyword arguments:
+
+ * data: string with placeholders instead of ElementTree elements.
+ * parent: Element, which contains processing inline data
+
+ Returns: list with ElementTree elements with applied inline patterns.
+
+ """
+ def linkText(text):
+ if text:
+ if result:
+ if result[-1].tail:
+ result[-1].tail += text
+ else:
+ result[-1].tail = text
+ else:
+ if parent.text:
+ parent.text += text
+ else:
+ parent.text = text
+ result = []
+ strartIndex = 0
+ while data:
+ index = data.find(self.__placeholder_prefix, strartIndex)
+ if index != -1:
+ id, phEndIndex = self.__findPlaceholder(data, index)
+
+ if id in self.stashed_nodes:
+ node = self.stashed_nodes.get(id)
+
+ if index > 0:
+ text = data[strartIndex:index]
+ linkText(text)
+
+ if not isString(node): # it's Element
+ for child in [node] + list(node):
+ if child.tail:
+ if child.tail.strip():
+ self.__processElementText(node, child,False)
+ if child.text:
+ if child.text.strip():
+ self.__processElementText(child, child)
+ else: # it's just a string
+ linkText(node)
+ strartIndex = phEndIndex
+ continue
+
+ strartIndex = phEndIndex
+ result.append(node)
+
+ else: # wrong placeholder
+ end = index + len(self.__placeholder_prefix)
+ linkText(data[strartIndex:end])
+ strartIndex = end
+ else:
+ text = data[strartIndex:]
+ if isinstance(data, util.AtomicString):
+ # We don't want to loose the AtomicString
+ text = util.AtomicString(text)
+ linkText(text)
+ data = ""
+
+ return result
+
+ def __applyPattern(self, pattern, data, patternIndex, startIndex=0):
+ """
+ Check if the line fits the pattern, create the necessary
+ elements, add it to stashed_nodes.
+
+ Keyword arguments:
+
+ * data: the text to be processed
+ * pattern: the pattern to be checked
+ * patternIndex: index of current pattern
+ * startIndex: string index, from which we start searching
+
+ Returns: String with placeholders instead of ElementTree elements.
+
+ """
+ match = pattern.getCompiledRegExp().match(data[startIndex:])
+ leftData = data[:startIndex]
+
+ if not match:
+ return data, False, 0
+
+ node = pattern.handleMatch(match)
+
+ if node is None:
+ return data, True, len(leftData)+match.span(len(match.groups()))[0]
+
+ if not isString(node):
+ if not isinstance(node.text, util.AtomicString):
+ # We need to process current node too
+ for child in [node] + list(node):
+ if not isString(node):
+ if child.text:
+ child.text = self.__handleInline(child.text,
+ patternIndex + 1)
+ if child.tail:
+ child.tail = self.__handleInline(child.tail,
+ patternIndex)
+
+ placeholder = self.__stashNode(node, pattern.type())
+
+ return "%s%s%s%s" % (leftData,
+ match.group(1),
+ placeholder, match.groups()[-1]), True, 0
+
+ def run(self, tree):
+ """Apply inline patterns to a parsed Markdown tree.
+
+ Iterate over ElementTree, find elements with inline tag, apply inline
+ patterns and append newly created Elements to tree. If you don't
+ want to process your data with inline paterns, instead of normal string,
+ use subclass AtomicString:
+
+ node.text = markdown.AtomicString("This will not be processed.")
+
+ Arguments:
+
+ * tree: ElementTree object, representing Markdown tree.
+
+ Returns: ElementTree object with applied inline patterns.
+
+ """
+ self.stashed_nodes = {}
+
+ stack = [tree]
+
+ while stack:
+ currElement = stack.pop()
+ insertQueue = []
+ for child in currElement:
+ if child.text and not isinstance(child.text, util.AtomicString):
+ text = child.text
+ child.text = None
+ lst = self.__processPlaceholders(self.__handleInline(
+ text), child)
+ stack += lst
+ insertQueue.append((child, lst))
+ if child.tail:
+ tail = self.__handleInline(child.tail)
+ dumby = util.etree.Element('d')
+ tailResult = self.__processPlaceholders(tail, dumby)
+ if dumby.text:
+ child.tail = dumby.text
+ else:
+ child.tail = None
+ pos = list(currElement).index(child) + 1
+ tailResult.reverse()
+ for newChild in tailResult:
+ currElement.insert(pos, newChild)
+ if len(child):
+ stack.append(child)
+
+ for element, lst in insertQueue:
+ if self.markdown.enable_attributes:
+ if element.text and isString(element.text):
+ element.text = \
+ inlinepatterns.handleAttributes(element.text,
+ element)
+ i = 0
+ for newChild in lst:
+ if self.markdown.enable_attributes:
+ # Processing attributes
+ if newChild.tail and isString(newChild.tail):
+ newChild.tail = \
+ inlinepatterns.handleAttributes(newChild.tail,
+ element)
+ if newChild.text and isString(newChild.text):
+ newChild.text = \
+ inlinepatterns.handleAttributes(newChild.text,
+ newChild)
+ element.insert(i, newChild)
+ i += 1
+ return tree
+
+
+class PrettifyTreeprocessor(Treeprocessor):
+ """ Add linebreaks to the html document. """
+
+ def _prettifyETree(self, elem):
+ """ Recursively add linebreaks to ElementTree children. """
+
+ i = "\n"
+ if util.isBlockLevel(elem.tag) and elem.tag not in ['code', 'pre']:
+ if (not elem.text or not elem.text.strip()) \
+ and len(elem) and util.isBlockLevel(elem[0].tag):
+ elem.text = i
+ for e in elem:
+ if util.isBlockLevel(e.tag):
+ self._prettifyETree(e)
+ if not elem.tail or not elem.tail.strip():
+ elem.tail = i
+ if not elem.tail or not elem.tail.strip():
+ elem.tail = i
+
+ def run(self, root):
+ """ Add linebreaks to ElementTree root object. """
+
+ self._prettifyETree(root)
+ # Do
's seperately as they are often in the middle of
+ # inline content and missed by _prettifyETree.
+ brs = root.getiterator('br')
+ for br in brs:
+ if not br.tail or not br.tail.strip():
+ br.tail = '\n'
+ else:
+ br.tail = '\n%s' % br.tail
+ # Clean up extra empty lines at end of code blocks.
+ pres = root.getiterator('pre')
+ for pre in pres:
+ if len(pre) and pre[0].tag == 'code':
+ pre[0].text = pre[0].text.rstrip() + '\n'
diff --git a/awx/lib/site-packages/markdown/util.py b/awx/lib/site-packages/markdown/util.py
index 433a3251f3..edb25886ad 100644
--- a/awx/lib/site-packages/markdown/util.py
+++ b/awx/lib/site-packages/markdown/util.py
@@ -1,163 +1,163 @@
-# -*- coding: utf-8 -*-
-from __future__ import unicode_literals
-import re
-import sys
-
-
-"""
-Python 3 Stuff
-=============================================================================
-"""
-PY3 = sys.version_info[0] == 3
-
-if PY3:
- string_type = str
- text_type = str
- int2str = chr
-else:
- string_type = basestring
- text_type = unicode
- int2str = unichr
-
-
-"""
-Constants you might want to modify
------------------------------------------------------------------------------
-"""
-
-BLOCK_LEVEL_ELEMENTS = re.compile("^(p|div|h[1-6]|blockquote|pre|table|dl|ol|ul"
- "|script|noscript|form|fieldset|iframe|math"
- "|hr|hr/|style|li|dt|dd|thead|tbody"
- "|tr|th|td|section|footer|header|group|figure"
- "|figcaption|aside|article|canvas|output"
- "|progress|video)$", re.IGNORECASE)
-# Placeholders
-STX = '\u0002' # Use STX ("Start of text") for start-of-placeholder
-ETX = '\u0003' # Use ETX ("End of text") for end-of-placeholder
-INLINE_PLACEHOLDER_PREFIX = STX+"klzzwxh:"
-INLINE_PLACEHOLDER = INLINE_PLACEHOLDER_PREFIX + "%s" + ETX
-INLINE_PLACEHOLDER_RE = re.compile(INLINE_PLACEHOLDER % r'([0-9]+)')
-AMP_SUBSTITUTE = STX+"amp"+ETX
-HTML_PLACEHOLDER = STX + "wzxhzdk:%s" + ETX
-HTML_PLACEHOLDER_RE = re.compile(HTML_PLACEHOLDER % r'([0-9]+)')
-TAG_PLACEHOLDER = STX + "hzzhzkh:%s" + ETX
-
-
-"""
-Constants you probably do not need to change
------------------------------------------------------------------------------
-"""
-
-RTL_BIDI_RANGES = ( ('\u0590', '\u07FF'),
- # Hebrew (0590-05FF), Arabic (0600-06FF),
- # Syriac (0700-074F), Arabic supplement (0750-077F),
- # Thaana (0780-07BF), Nko (07C0-07FF).
- ('\u2D30', '\u2D7F'), # Tifinagh
- )
-
-# Extensions should use "markdown.util.etree" instead of "etree" (or do `from
-# markdown.util import etree`). Do not import it by yourself.
-
-try: # Is the C implementation of ElementTree available?
- import xml.etree.cElementTree as etree
- from xml.etree.ElementTree import Comment
- # Serializers (including ours) test with non-c Comment
- etree.test_comment = Comment
- if etree.VERSION < "1.0.5":
- raise RuntimeError("cElementTree version 1.0.5 or higher is required.")
-except (ImportError, RuntimeError):
- # Use the Python implementation of ElementTree?
- import xml.etree.ElementTree as etree
- if etree.VERSION < "1.1":
- raise RuntimeError("ElementTree version 1.1 or higher is required")
-
-
-"""
-AUXILIARY GLOBAL FUNCTIONS
-=============================================================================
-"""
-
-
-def isBlockLevel(tag):
- """Check if the tag is a block level HTML tag."""
- if isinstance(tag, string_type):
- return BLOCK_LEVEL_ELEMENTS.match(tag)
- # Some ElementTree tags are not strings, so return False.
- return False
-
-def parseBoolValue(value, fail_on_errors=True):
- """Parses a string representing bool value. If parsing was successful,
- returns True or False. If parsing was not successful, raises
- ValueError, or, if fail_on_errors=False, returns None."""
- if not isinstance(value, string_type):
- return bool(value)
- elif value.lower() in ('true', 'yes', 'y', 'on', '1'):
- return True
- elif value.lower() in ('false', 'no', 'n', 'off', '0'):
- return False
- elif fail_on_errors:
- raise ValueError('Cannot parse bool value: %r' % value)
-
-"""
-MISC AUXILIARY CLASSES
-=============================================================================
-"""
-
-class AtomicString(text_type):
- """A string which should not be further processed."""
- pass
-
-
-class Processor(object):
- def __init__(self, markdown_instance=None):
- if markdown_instance:
- self.markdown = markdown_instance
-
-
-class HtmlStash(object):
- """
- This class is used for stashing HTML objects that we extract
- in the beginning and replace with place-holders.
- """
-
- def __init__(self):
- """ Create a HtmlStash. """
- self.html_counter = 0 # for counting inline html segments
- self.rawHtmlBlocks = []
- self.tag_counter = 0
- self.tag_data = [] # list of dictionaries in the order tags appear
-
- def store(self, html, safe=False):
- """
- Saves an HTML segment for later reinsertion. Returns a
- placeholder string that needs to be inserted into the
- document.
-
- Keyword arguments:
-
- * html: an html segment
- * safe: label an html segment as safe for safemode
-
- Returns : a placeholder string
-
- """
- self.rawHtmlBlocks.append((html, safe))
- placeholder = self.get_placeholder(self.html_counter)
- self.html_counter += 1
- return placeholder
-
- def reset(self):
- self.html_counter = 0
- self.rawHtmlBlocks = []
-
- def get_placeholder(self, key):
- return HTML_PLACEHOLDER % key
-
- def store_tag(self, tag, attrs, left_index, right_index):
- """Store tag data and return a placeholder."""
- self.tag_data.append({'tag': tag, 'attrs': attrs,
- 'left_index': left_index,
- 'right_index': right_index})
- placeholder = TAG_PLACEHOLDER % str(self.tag_counter)
- self.tag_counter += 1 # equal to the tag's index in self.tag_data
- return placeholder
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+import re
+import sys
+
+
+"""
+Python 3 Stuff
+=============================================================================
+"""
+PY3 = sys.version_info[0] == 3
+
+if PY3:
+ string_type = str
+ text_type = str
+ int2str = chr
+else:
+ string_type = basestring
+ text_type = unicode
+ int2str = unichr
+
+
+"""
+Constants you might want to modify
+-----------------------------------------------------------------------------
+"""
+
+BLOCK_LEVEL_ELEMENTS = re.compile("^(p|div|h[1-6]|blockquote|pre|table|dl|ol|ul"
+ "|script|noscript|form|fieldset|iframe|math"
+ "|hr|hr/|style|li|dt|dd|thead|tbody"
+ "|tr|th|td|section|footer|header|group|figure"
+ "|figcaption|aside|article|canvas|output"
+ "|progress|video|nav)$", re.IGNORECASE)
+# Placeholders
+STX = '\u0002' # Use STX ("Start of text") for start-of-placeholder
+ETX = '\u0003' # Use ETX ("End of text") for end-of-placeholder
+INLINE_PLACEHOLDER_PREFIX = STX+"klzzwxh:"
+INLINE_PLACEHOLDER = INLINE_PLACEHOLDER_PREFIX + "%s" + ETX
+INLINE_PLACEHOLDER_RE = re.compile(INLINE_PLACEHOLDER % r'([0-9]+)')
+AMP_SUBSTITUTE = STX+"amp"+ETX
+HTML_PLACEHOLDER = STX + "wzxhzdk:%s" + ETX
+HTML_PLACEHOLDER_RE = re.compile(HTML_PLACEHOLDER % r'([0-9]+)')
+TAG_PLACEHOLDER = STX + "hzzhzkh:%s" + ETX
+
+
+"""
+Constants you probably do not need to change
+-----------------------------------------------------------------------------
+"""
+
+RTL_BIDI_RANGES = ( ('\u0590', '\u07FF'),
+ # Hebrew (0590-05FF), Arabic (0600-06FF),
+ # Syriac (0700-074F), Arabic supplement (0750-077F),
+ # Thaana (0780-07BF), Nko (07C0-07FF).
+ ('\u2D30', '\u2D7F'), # Tifinagh
+ )
+
+# Extensions should use "markdown.util.etree" instead of "etree" (or do `from
+# markdown.util import etree`). Do not import it by yourself.
+
+try: # Is the C implementation of ElementTree available?
+ import xml.etree.cElementTree as etree
+ from xml.etree.ElementTree import Comment
+ # Serializers (including ours) test with non-c Comment
+ etree.test_comment = Comment
+ if etree.VERSION < "1.0.5":
+ raise RuntimeError("cElementTree version 1.0.5 or higher is required.")
+except (ImportError, RuntimeError):
+ # Use the Python implementation of ElementTree?
+ import xml.etree.ElementTree as etree
+ if etree.VERSION < "1.1":
+ raise RuntimeError("ElementTree version 1.1 or higher is required")
+
+
+"""
+AUXILIARY GLOBAL FUNCTIONS
+=============================================================================
+"""
+
+
+def isBlockLevel(tag):
+ """Check if the tag is a block level HTML tag."""
+ if isinstance(tag, string_type):
+ return BLOCK_LEVEL_ELEMENTS.match(tag)
+ # Some ElementTree tags are not strings, so return False.
+ return False
+
+def parseBoolValue(value, fail_on_errors=True):
+ """Parses a string representing bool value. If parsing was successful,
+ returns True or False. If parsing was not successful, raises
+ ValueError, or, if fail_on_errors=False, returns None."""
+ if not isinstance(value, string_type):
+ return bool(value)
+ elif value.lower() in ('true', 'yes', 'y', 'on', '1'):
+ return True
+ elif value.lower() in ('false', 'no', 'n', 'off', '0'):
+ return False
+ elif fail_on_errors:
+ raise ValueError('Cannot parse bool value: %r' % value)
+
+"""
+MISC AUXILIARY CLASSES
+=============================================================================
+"""
+
+class AtomicString(text_type):
+ """A string which should not be further processed."""
+ pass
+
+
+class Processor(object):
+ def __init__(self, markdown_instance=None):
+ if markdown_instance:
+ self.markdown = markdown_instance
+
+
+class HtmlStash(object):
+ """
+ This class is used for stashing HTML objects that we extract
+ in the beginning and replace with place-holders.
+ """
+
+ def __init__(self):
+ """ Create a HtmlStash. """
+ self.html_counter = 0 # for counting inline html segments
+ self.rawHtmlBlocks = []
+ self.tag_counter = 0
+ self.tag_data = [] # list of dictionaries in the order tags appear
+
+ def store(self, html, safe=False):
+ """
+ Saves an HTML segment for later reinsertion. Returns a
+ placeholder string that needs to be inserted into the
+ document.
+
+ Keyword arguments:
+
+ * html: an html segment
+ * safe: label an html segment as safe for safemode
+
+ Returns : a placeholder string
+
+ """
+ self.rawHtmlBlocks.append((html, safe))
+ placeholder = self.get_placeholder(self.html_counter)
+ self.html_counter += 1
+ return placeholder
+
+ def reset(self):
+ self.html_counter = 0
+ self.rawHtmlBlocks = []
+
+ def get_placeholder(self, key):
+ return HTML_PLACEHOLDER % key
+
+ def store_tag(self, tag, attrs, left_index, right_index):
+ """Store tag data and return a placeholder."""
+ self.tag_data.append({'tag': tag, 'attrs': attrs,
+ 'left_index': left_index,
+ 'right_index': right_index})
+ placeholder = TAG_PLACEHOLDER % str(self.tag_counter)
+ self.tag_counter += 1 # equal to the tag's index in self.tag_data
+ return placeholder