1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809 |
- #!/usr/bin/env python
- # Copyright (c) 2012 Trent Mick.
- # Copyright (c) 2007-2008 ActiveState Corp.
- # License: MIT (http://www.opensource.org/licenses/mit-license.php)
- r"""A fast and complete Python implementation of Markdown.
- [from http://daringfireball.net/projects/markdown/]
- > Markdown is a text-to-HTML filter; it translates an easy-to-read /
- > easy-to-write structured text format into HTML. Markdown's text
- > format is most similar to that of plain text email, and supports
- > features such as headers, *emphasis*, code blocks, blockquotes, and
- > links.
- >
- > Markdown's syntax is designed not as a generic markup language, but
- > specifically to serve as a front-end to (X)HTML. You can use span-level
- > HTML tags anywhere in a Markdown document, and you can use block level
- > HTML tags (like <div> and <table> as well).
- Module usage:
- >>> import markdown2
- >>> markdown2.markdown("*boo!*") # or use `html = markdown_path(PATH)`
- u'<p><em>boo!</em></p>\n'
- >>> markdowner = Markdown()
- >>> markdowner.convert("*boo!*")
- u'<p><em>boo!</em></p>\n'
- >>> markdowner.convert("**boom!**")
- u'<p><strong>boom!</strong></p>\n'
- This implementation of Markdown implements the full "core" syntax plus a
- number of extras (e.g., code syntax coloring, footnotes) as described on
- <https://github.com/trentm/python-markdown2/wiki/Extras>.
- """
- cmdln_desc = """A fast and complete Python implementation of Markdown, a
- text-to-HTML conversion tool for web writers.
- Supported extra syntax options (see -x|--extras option below and
- see <https://github.com/trentm/python-markdown2/wiki/Extras> for details):
- * break-on-newline: Replace single new line characters with <br> when True
- * code-friendly: Disable _ and __ for em and strong.
- * cuddled-lists: Allow lists to be cuddled to the preceding paragraph.
- * fenced-code-blocks: Allows a code block to not have to be indented
- by fencing it with '```' on a line before and after. Based on
- <http://github.github.com/github-flavored-markdown/> with support for
- syntax highlighting.
- * footnotes: Support footnotes as in use on daringfireball.net and
- implemented in other Markdown processors (tho not in Markdown.pl v1.0.1).
- * header-ids: Adds "id" attributes to headers. The id value is a slug of
- the header text.
- * highlightjs-lang: Allows specifying the language which used for syntax
- highlighting when using fenced-code-blocks and highlightjs.
- * html-classes: Takes a dict mapping html tag names (lowercase) to a
- string to use for a "class" tag attribute. Currently only supports "img",
- "table", "pre" and "code" tags. Add an issue if you require this for other
- tags.
- * link-patterns: Auto-link given regex patterns in text (e.g. bug number
- references, revision number references).
- * markdown-in-html: Allow the use of `markdown="1"` in a block HTML tag to
- have markdown processing be done on its contents. Similar to
- <http://michelf.com/projects/php-markdown/extra/#markdown-attr> but with
- some limitations.
- * metadata: Extract metadata from a leading '---'-fenced block.
- See <https://github.com/trentm/python-markdown2/issues/77> for details.
- * nofollow: Add `rel="nofollow"` to add `<a>` tags with an href. See
- <http://en.wikipedia.org/wiki/Nofollow>.
- * numbering: Support of generic counters. Non standard extension to
- allow sequential numbering of figures, tables, equations, exhibits etc.
- * pyshell: Treats unindented Python interactive shell sessions as <code>
- blocks.
- * smarty-pants: Replaces ' and " with curly quotation marks or curly
- apostrophes. Replaces --, ---, ..., and . . . with en dashes, em dashes,
- and ellipses.
- * spoiler: A special kind of blockquote commonly hidden behind a
- click on SO. Syntax per <http://meta.stackexchange.com/a/72878>.
- * strike: text inside of double tilde is ~~strikethrough~~
- * tag-friendly: Requires atx style headers to have a space between the # and
- the header text. Useful for applications that require twitter style tags to
- pass through the parser.
- * tables: Tables using the same format as GFM
- <https://help.github.com/articles/github-flavored-markdown#tables> and
- PHP-Markdown Extra <https://michelf.ca/projects/php-markdown/extra/#table>.
- * toc: The returned HTML string gets a new "toc_html" attribute which is
- a Table of Contents for the document. (experimental)
- * use-file-vars: Look for an Emacs-style markdown-extras file variable to turn
- on Extras.
- * wiki-tables: Google Code Wiki-style tables. See
- <http://code.google.com/p/support/wiki/WikiSyntax#Tables>.
- * xml: Passes one-liner processing instructions and namespaced XML tags.
- """
- # Dev Notes:
- # - Python's regex syntax doesn't have '\z', so I'm using '\Z'. I'm
- # not yet sure if there implications with this. Compare 'pydoc sre'
- # and 'perldoc perlre'.
- __version_info__ = (2, 4, 1)
- __version__ = '.'.join(map(str, __version_info__))
- __author__ = "Trent Mick"
- import sys
- import re
- import logging
- from hashlib import sha256
- import optparse
- from random import random, randint
- import codecs
- from collections import defaultdict
- # ---- Python version compat
- # Use `bytes` for byte strings and `unicode` for unicode strings (str in Py3).
- if sys.version_info[0] <= 2:
- py3 = False
- try:
- bytes
- except NameError:
- bytes = str
- base_string_type = basestring
- elif sys.version_info[0] >= 3:
- py3 = True
- unicode = str
- base_string_type = str
- # ---- globals
- DEBUG = False
- log = logging.getLogger("markdown")
- DEFAULT_TAB_WIDTH = 4
- SECRET_SALT = bytes(randint(0, 1000000))
- # MD5 function was previously used for this; the "md5" prefix was kept for
- # backwards compatibility.
- def _hash_text(s):
- return 'md5-' + sha256(SECRET_SALT + s.encode("utf-8")).hexdigest()[32:]
- # Table of hash values for escaped characters:
- g_escape_table = dict([(ch, _hash_text(ch))
- for ch in '\\`*_{}[]()>#+-.!'])
- # Ampersand-encoding based entirely on Nat Irons's Amputator MT plugin:
- # http://bumppo.net/projects/amputator/
- _AMPERSAND_RE = re.compile(r'&(?!#?[xX]?(?:[0-9a-fA-F]+|\w+);)')
- # ---- exceptions
- class MarkdownError(Exception):
- pass
- # ---- public api
- def markdown_path(path, encoding="utf-8",
- html4tags=False, tab_width=DEFAULT_TAB_WIDTH,
- safe_mode=None, extras=None, link_patterns=None,
- footnote_title=None, footnote_return_symbol=None,
- use_file_vars=False):
- fp = codecs.open(path, 'r', encoding)
- text = fp.read()
- fp.close()
- return Markdown(html4tags=html4tags, tab_width=tab_width,
- safe_mode=safe_mode, extras=extras,
- link_patterns=link_patterns,
- footnote_title=footnote_title,
- footnote_return_symbol=footnote_return_symbol,
- use_file_vars=use_file_vars).convert(text)
- def markdown(text, html4tags=False, tab_width=DEFAULT_TAB_WIDTH,
- safe_mode=None, extras=None, link_patterns=None,
- footnote_title=None, footnote_return_symbol=None,
- use_file_vars=False, cli=False):
- return Markdown(html4tags=html4tags, tab_width=tab_width,
- safe_mode=safe_mode, extras=extras,
- link_patterns=link_patterns,
- footnote_title=footnote_title,
- footnote_return_symbol=footnote_return_symbol,
- use_file_vars=use_file_vars, cli=cli).convert(text)
- class Markdown(object):
- # The dict of "extras" to enable in processing -- a mapping of
- # extra name to argument for the extra. Most extras do not have an
- # argument, in which case the value is None.
- #
- # This can be set via (a) subclassing and (b) the constructor
- # "extras" argument.
- extras = None
- urls = None
- titles = None
- html_blocks = None
- html_spans = None
- html_removed_text = "{(#HTML#)}" # placeholder removed text that does not trigger bold
- html_removed_text_compat = "[HTML_REMOVED]" # for compat with markdown.py
- _toc = None
- # Used to track when we're inside an ordered or unordered list
- # (see _ProcessListItems() for details):
- list_level = 0
- _ws_only_line_re = re.compile(r"^[ \t]+$", re.M)
- def __init__(self, html4tags=False, tab_width=4, safe_mode=None,
- extras=None, link_patterns=None,
- footnote_title=None, footnote_return_symbol=None,
- use_file_vars=False, cli=False):
- if html4tags:
- self.empty_element_suffix = ">"
- else:
- self.empty_element_suffix = " />"
- self.tab_width = tab_width
- self.tab = tab_width * " "
- # For compatibility with earlier markdown2.py and with
- # markdown.py's safe_mode being a boolean,
- # safe_mode == True -> "replace"
- if safe_mode is True:
- self.safe_mode = "replace"
- else:
- self.safe_mode = safe_mode
- # Massaging and building the "extras" info.
- if self.extras is None:
- self.extras = {}
- elif not isinstance(self.extras, dict):
- self.extras = dict([(e, None) for e in self.extras])
- if extras:
- if not isinstance(extras, dict):
- extras = dict([(e, None) for e in extras])
- self.extras.update(extras)
- assert isinstance(self.extras, dict)
- if "toc" in self.extras:
- if "header-ids" not in self.extras:
- self.extras["header-ids"] = None # "toc" implies "header-ids"
- if self.extras["toc"] is None:
- self._toc_depth = 6
- else:
- self._toc_depth = self.extras["toc"].get("depth", 6)
- self._instance_extras = self.extras.copy()
- self.link_patterns = link_patterns
- self.footnote_title = footnote_title
- self.footnote_return_symbol = footnote_return_symbol
- self.use_file_vars = use_file_vars
- self._outdent_re = re.compile(r'^(\t|[ ]{1,%d})' % tab_width, re.M)
- self.cli = cli
- self._escape_table = g_escape_table.copy()
- if "smarty-pants" in self.extras:
- self._escape_table['"'] = _hash_text('"')
- self._escape_table["'"] = _hash_text("'")
- def reset(self):
- self.urls = {}
- self.titles = {}
- self.html_blocks = {}
- self.html_spans = {}
- self.list_level = 0
- self.extras = self._instance_extras.copy()
- if "footnotes" in self.extras:
- self.footnotes = {}
- self.footnote_ids = []
- if "header-ids" in self.extras:
- self._count_from_header_id = defaultdict(int)
- if "metadata" in self.extras:
- self.metadata = {}
- self._toc = None
- # Per <https://developer.mozilla.org/en-US/docs/HTML/Element/a> "rel"
- # should only be used in <a> tags with an "href" attribute.
- # Opens the linked document in a new window or tab
- # should only used in <a> tags with an "href" attribute.
- # same with _a_nofollow
- _a_nofollow_or_blank_links = re.compile(r"""
- <(a)
- (
- [^>]*
- href= # href is required
- ['"]? # HTML5 attribute values do not have to be quoted
- [^#'"] # We don't want to match href values that start with # (like footnotes)
- )
- """,
- re.IGNORECASE | re.VERBOSE
- )
- def convert(self, text):
- """Convert the given text."""
- # Main function. The order in which other subs are called here is
- # essential. Link and image substitutions need to happen before
- # _EscapeSpecialChars(), so that any *'s or _'s in the <a>
- # and <img> tags get encoded.
- # Clear the global hashes. If we don't clear these, you get conflicts
- # from other articles when generating a page which contains more than
- # one article (e.g. an index page that shows the N most recent
- # articles):
- self.reset()
- if not isinstance(text, unicode):
- # TODO: perhaps shouldn't presume UTF-8 for string input?
- text = unicode(text, 'utf-8')
- if self.use_file_vars:
- # Look for emacs-style file variable hints.
- emacs_vars = self._get_emacs_vars(text)
- if "markdown-extras" in emacs_vars:
- splitter = re.compile("[ ,]+")
- for e in splitter.split(emacs_vars["markdown-extras"]):
- if '=' in e:
- ename, earg = e.split('=', 1)
- try:
- earg = int(earg)
- except ValueError:
- pass
- else:
- ename, earg = e, None
- self.extras[ename] = earg
- # Standardize line endings:
- text = text.replace("\r\n", "\n")
- text = text.replace("\r", "\n")
- # Make sure $text ends with a couple of newlines:
- text += "\n\n"
- # Convert all tabs to spaces.
- text = self._detab(text)
- # Strip any lines consisting only of spaces and tabs.
- # This makes subsequent regexen easier to write, because we can
- # match consecutive blank lines with /\n+/ instead of something
- # contorted like /[ \t]*\n+/ .
- text = self._ws_only_line_re.sub("", text)
- # strip metadata from head and extract
- if "metadata" in self.extras:
- text = self._extract_metadata(text)
- text = self.preprocess(text)
- if "fenced-code-blocks" in self.extras and not self.safe_mode:
- text = self._do_fenced_code_blocks(text)
- if self.safe_mode:
- text = self._hash_html_spans(text)
- # Turn block-level HTML blocks into hash entries
- text = self._hash_html_blocks(text, raw=True)
- if "fenced-code-blocks" in self.extras and self.safe_mode:
- text = self._do_fenced_code_blocks(text)
- # Because numbering references aren't links (yet?) then we can do everything associated with counters
- # before we get started
- if "numbering" in self.extras:
- text = self._do_numbering(text)
- # Strip link definitions, store in hashes.
- if "footnotes" in self.extras:
- # Must do footnotes first because an unlucky footnote defn
- # looks like a link defn:
- # [^4]: this "looks like a link defn"
- text = self._strip_footnote_definitions(text)
- text = self._strip_link_definitions(text)
- text = self._run_block_gamut(text)
- if "footnotes" in self.extras:
- text = self._add_footnotes(text)
- text = self.postprocess(text)
- text = self._unescape_special_chars(text)
- if self.safe_mode:
- text = self._unhash_html_spans(text)
- # return the removed text warning to its markdown.py compatible form
- text = text.replace(self.html_removed_text, self.html_removed_text_compat)
- do_target_blank_links = "target-blank-links" in self.extras
- do_nofollow_links = "nofollow" in self.extras
- if do_target_blank_links and do_nofollow_links:
- text = self._a_nofollow_or_blank_links.sub(r'<\1 rel="nofollow noopener" target="_blank"\2', text)
- elif do_target_blank_links:
- text = self._a_nofollow_or_blank_links.sub(r'<\1 rel="noopener" target="_blank"\2', text)
- elif do_nofollow_links:
- text = self._a_nofollow_or_blank_links.sub(r'<\1 rel="nofollow"\2', text)
- if "toc" in self.extras and self._toc:
- self._toc_html = calculate_toc_html(self._toc)
- # Prepend toc html to output
- if self.cli:
- text = '{}\n{}'.format(self._toc_html, text)
- text += "\n"
- # Attach attrs to output
- rv = UnicodeWithAttrs(text)
- if "toc" in self.extras and self._toc:
- rv.toc_html = self._toc_html
- if "metadata" in self.extras:
- rv.metadata = self.metadata
- return rv
- def postprocess(self, text):
- """A hook for subclasses to do some postprocessing of the html, if
- desired. This is called before unescaping of special chars and
- unhashing of raw HTML spans.
- """
- return text
- def preprocess(self, text):
- """A hook for subclasses to do some preprocessing of the Markdown, if
- desired. This is called after basic formatting of the text, but prior
- to any extras, safe mode, etc. processing.
- """
- return text
- # Is metadata if the content starts with optional '---'-fenced `key: value`
- # pairs. E.g. (indented for presentation):
- # ---
- # foo: bar
- # another-var: blah blah
- # ---
- # # header
- # or:
- # foo: bar
- # another-var: blah blah
- #
- # # header
- _meta_data_pattern = re.compile(r'^(?:---[\ \t]*\n)?((?:[\S\w]+\s*:(?:\n+[ \t]+.*)+)|(?:.*:\s+>\n\s+[\S\s]+?)(?=\n\w+\s*:\s*\w+\n|\Z)|(?:\s*[\S\w]+\s*:(?! >)[ \t]*.*\n?))(?:---[\ \t]*\n)?', re.MULTILINE)
- _key_val_pat = re.compile(r"[\S\w]+\s*:(?! >)[ \t]*.*\n?", re.MULTILINE)
- # this allows key: >
- # value
- # conutiues over multiple lines
- _key_val_block_pat = re.compile(
- r"(.*:\s+>\n\s+[\S\s]+?)(?=\n\w+\s*:\s*\w+\n|\Z)", re.MULTILINE
- )
- _key_val_list_pat = re.compile(
- r"^-(?:[ \t]*([^\n]*)(?:[ \t]*[:-][ \t]*(\S+))?)(?:\n((?:[ \t]+[^\n]+\n?)+))?",
- re.MULTILINE,
- )
- _key_val_dict_pat = re.compile(
- r"^([^:\n]+)[ \t]*:[ \t]*([^\n]*)(?:((?:\n[ \t]+[^\n]+)+))?", re.MULTILINE
- ) # grp0: key, grp1: value, grp2: multiline value
- _meta_data_fence_pattern = re.compile(r'^---[\ \t]*\n', re.MULTILINE)
- _meta_data_newline = re.compile("^\n", re.MULTILINE)
- def _extract_metadata(self, text):
- if text.startswith("---"):
- fence_splits = re.split(self._meta_data_fence_pattern, text, maxsplit=2)
- metadata_content = fence_splits[1]
- match = re.findall(self._meta_data_pattern, metadata_content)
- if not match:
- return text
- tail = fence_splits[2]
- else:
- metadata_split = re.split(self._meta_data_newline, text, maxsplit=1)
- metadata_content = metadata_split[0]
- match = re.findall(self._meta_data_pattern, metadata_content)
- if not match:
- return text
- tail = metadata_split[1]
- def parse_structured_value(value):
- vs = value.lstrip()
- vs = value.replace(v[: len(value) - len(vs)], "\n")[1:]
- # List
- if vs.startswith("-"):
- r = []
- for match in re.findall(self._key_val_list_pat, vs):
- if match[0] and not match[1] and not match[2]:
- r.append(match[0].strip())
- elif match[0] == ">" and not match[1] and match[2]:
- r.append(match[2].strip())
- elif match[0] and match[1]:
- r.append({match[0].strip(): match[1].strip()})
- elif not match[0] and not match[1] and match[2]:
- r.append(parse_structured_value(match[2]))
- else:
- # Broken case
- pass
- return r
- # Dict
- else:
- return {
- match[0].strip(): (
- match[1].strip()
- if match[1]
- else parse_structured_value(match[2])
- )
- for match in re.findall(self._key_val_dict_pat, vs)
- }
- for item in match:
- k, v = item.split(":", 1)
- # Multiline value
- if v[:3] == " >\n":
- self.metadata[k.strip()] = v[3:].strip()
- # Empty value
- elif v == "\n":
- self.metadata[k.strip()] = ""
- # Structured value
- elif v[0] == "\n":
- self.metadata[k.strip()] = parse_structured_value(v)
- # Simple value
- else:
- self.metadata[k.strip()] = v.strip()
- return tail
- _emacs_oneliner_vars_pat = re.compile(r"-\*-\s*(?:(\S[^\r\n]*?)([\r\n]\s*)?)?-\*-", re.UNICODE)
- # This regular expression is intended to match blocks like this:
- # PREFIX Local Variables: SUFFIX
- # PREFIX mode: Tcl SUFFIX
- # PREFIX End: SUFFIX
- # Some notes:
- # - "[ \t]" is used instead of "\s" to specifically exclude newlines
- # - "(\r\n|\n|\r)" is used instead of "$" because the sre engine does
- # not like anything other than Unix-style line terminators.
- _emacs_local_vars_pat = re.compile(r"""^
- (?P<prefix>(?:[^\r\n|\n|\r])*?)
- [\ \t]*Local\ Variables:[\ \t]*
- (?P<suffix>.*?)(?:\r\n|\n|\r)
- (?P<content>.*?\1End:)
- """, re.IGNORECASE | re.MULTILINE | re.DOTALL | re.VERBOSE)
- def _get_emacs_vars(self, text):
- """Return a dictionary of emacs-style local variables.
- Parsing is done loosely according to this spec (and according to
- some in-practice deviations from this):
- http://www.gnu.org/software/emacs/manual/html_node/emacs/Specifying-File-Variables.html#Specifying-File-Variables
- """
- emacs_vars = {}
- SIZE = pow(2, 13) # 8kB
- # Search near the start for a '-*-'-style one-liner of variables.
- head = text[:SIZE]
- if "-*-" in head:
- match = self._emacs_oneliner_vars_pat.search(head)
- if match:
- emacs_vars_str = match.group(1)
- assert '\n' not in emacs_vars_str
- emacs_var_strs = [s.strip() for s in emacs_vars_str.split(';')
- if s.strip()]
- if len(emacs_var_strs) == 1 and ':' not in emacs_var_strs[0]:
- # While not in the spec, this form is allowed by emacs:
- # -*- Tcl -*-
- # where the implied "variable" is "mode". This form
- # is only allowed if there are no other variables.
- emacs_vars["mode"] = emacs_var_strs[0].strip()
- else:
- for emacs_var_str in emacs_var_strs:
- try:
- variable, value = emacs_var_str.strip().split(':', 1)
- except ValueError:
- log.debug("emacs variables error: malformed -*- "
- "line: %r", emacs_var_str)
- continue
- # Lowercase the variable name because Emacs allows "Mode"
- # or "mode" or "MoDe", etc.
- emacs_vars[variable.lower()] = value.strip()
- tail = text[-SIZE:]
- if "Local Variables" in tail:
- match = self._emacs_local_vars_pat.search(tail)
- if match:
- prefix = match.group("prefix")
- suffix = match.group("suffix")
- lines = match.group("content").splitlines(0)
- # print "prefix=%r, suffix=%r, content=%r, lines: %s"\
- # % (prefix, suffix, match.group("content"), lines)
- # Validate the Local Variables block: proper prefix and suffix
- # usage.
- for i, line in enumerate(lines):
- if not line.startswith(prefix):
- log.debug("emacs variables error: line '%s' "
- "does not use proper prefix '%s'"
- % (line, prefix))
- return {}
- # Don't validate suffix on last line. Emacs doesn't care,
- # neither should we.
- if i != len(lines)-1 and not line.endswith(suffix):
- log.debug("emacs variables error: line '%s' "
- "does not use proper suffix '%s'"
- % (line, suffix))
- return {}
- # Parse out one emacs var per line.
- continued_for = None
- for line in lines[:-1]: # no var on the last line ("PREFIX End:")
- if prefix: line = line[len(prefix):] # strip prefix
- if suffix: line = line[:-len(suffix)] # strip suffix
- line = line.strip()
- if continued_for:
- variable = continued_for
- if line.endswith('\\'):
- line = line[:-1].rstrip()
- else:
- continued_for = None
- emacs_vars[variable] += ' ' + line
- else:
- try:
- variable, value = line.split(':', 1)
- except ValueError:
- log.debug("local variables error: missing colon "
- "in local variables entry: '%s'" % line)
- continue
- # Do NOT lowercase the variable name, because Emacs only
- # allows "mode" (and not "Mode", "MoDe", etc.) in this block.
- value = value.strip()
- if value.endswith('\\'):
- value = value[:-1].rstrip()
- continued_for = variable
- else:
- continued_for = None
- emacs_vars[variable] = value
- # Unquote values.
- for var, val in list(emacs_vars.items()):
- if len(val) > 1 and (val.startswith('"') and val.endswith('"')
- or val.startswith('"') and val.endswith('"')):
- emacs_vars[var] = val[1:-1]
- return emacs_vars
- def _detab_line(self, line):
- r"""Recusively convert tabs to spaces in a single line.
- Called from _detab()."""
- if '\t' not in line:
- return line
- chunk1, chunk2 = line.split('\t', 1)
- chunk1 += (' ' * (self.tab_width - len(chunk1) % self.tab_width))
- output = chunk1 + chunk2
- return self._detab_line(output)
- def _detab(self, text):
- r"""Iterate text line by line and convert tabs to spaces.
- >>> m = Markdown()
- >>> m._detab("\tfoo")
- ' foo'
- >>> m._detab(" \tfoo")
- ' foo'
- >>> m._detab("\t foo")
- ' foo'
- >>> m._detab(" foo")
- ' foo'
- >>> m._detab(" foo\n\tbar\tblam")
- ' foo\n bar blam'
- """
- if '\t' not in text:
- return text
- output = []
- for line in text.splitlines():
- output.append(self._detab_line(line))
- return '\n'.join(output)
- # I broke out the html5 tags here and add them to _block_tags_a and
- # _block_tags_b. This way html5 tags are easy to keep track of.
- _html5tags = '|article|aside|header|hgroup|footer|nav|section|figure|figcaption'
- _block_tags_a = 'p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math|ins|del'
- _block_tags_a += _html5tags
- _strict_tag_block_re = re.compile(r"""
- ( # save in \1
- ^ # start of line (with re.M)
- <(%s) # start tag = \2
- \b # word break
- (.*\n)*? # any number of lines, minimally matching
- </\2> # the matching end tag
- [ \t]* # trailing spaces/tabs
- (?=\n+|\Z) # followed by a newline or end of document
- )
- """ % _block_tags_a,
- re.X | re.M)
- _block_tags_b = 'p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math'
- _block_tags_b += _html5tags
- _liberal_tag_block_re = re.compile(r"""
- ( # save in \1
- ^ # start of line (with re.M)
- <(%s) # start tag = \2
- \b # word break
- (.*\n)*? # any number of lines, minimally matching
- .*</\2> # the matching end tag
- [ \t]* # trailing spaces/tabs
- (?=\n+|\Z) # followed by a newline or end of document
- )
- """ % _block_tags_b,
- re.X | re.M)
- _html_markdown_attr_re = re.compile(
- r'''\s+markdown=("1"|'1')''')
- def _hash_html_block_sub(self, match, raw=False):
- html = match.group(1)
- if raw and self.safe_mode:
- html = self._sanitize_html(html)
- elif 'markdown-in-html' in self.extras and 'markdown=' in html:
- first_line = html.split('\n', 1)[0]
- m = self._html_markdown_attr_re.search(first_line)
- if m:
- lines = html.split('\n')
- middle = '\n'.join(lines[1:-1])
- last_line = lines[-1]
- first_line = first_line[:m.start()] + first_line[m.end():]
- f_key = _hash_text(first_line)
- self.html_blocks[f_key] = first_line
- l_key = _hash_text(last_line)
- self.html_blocks[l_key] = last_line
- return ''.join(["\n\n", f_key,
- "\n\n", middle, "\n\n",
- l_key, "\n\n"])
- key = _hash_text(html)
- self.html_blocks[key] = html
- return "\n\n" + key + "\n\n"
- def _hash_html_blocks(self, text, raw=False):
- """Hashify HTML blocks
- We only want to do this for block-level HTML tags, such as headers,
- lists, and tables. That's because we still want to wrap <p>s around
- "paragraphs" that are wrapped in non-block-level tags, such as anchors,
- phrase emphasis, and spans. The list of tags we're looking for is
- hard-coded.
- @param raw {boolean} indicates if these are raw HTML blocks in
- the original source. It makes a difference in "safe" mode.
- """
- if '<' not in text:
- return text
- # Pass `raw` value into our calls to self._hash_html_block_sub.
- hash_html_block_sub = _curry(self._hash_html_block_sub, raw=raw)
- # First, look for nested blocks, e.g.:
- # <div>
- # <div>
- # tags for inner block must be indented.
- # </div>
- # </div>
- #
- # The outermost tags must start at the left margin for this to match, and
- # the inner nested divs must be indented.
- # We need to do this before the next, more liberal match, because the next
- # match will start at the first `<div>` and stop at the first `</div>`.
- text = self._strict_tag_block_re.sub(hash_html_block_sub, text)
- # Now match more liberally, simply from `\n<tag>` to `</tag>\n`
- text = self._liberal_tag_block_re.sub(hash_html_block_sub, text)
- # Special case just for <hr />. It was easier to make a special
- # case than to make the other regex more complicated.
- if "<hr" in text:
- _hr_tag_re = _hr_tag_re_from_tab_width(self.tab_width)
- text = _hr_tag_re.sub(hash_html_block_sub, text)
- # Special case for standalone HTML comments:
- if "<!--" in text:
- start = 0
- while True:
- # Delimiters for next comment block.
- try:
- start_idx = text.index("<!--", start)
- except ValueError:
- break
- try:
- end_idx = text.index("-->", start_idx) + 3
- except ValueError:
- break
- # Start position for next comment block search.
- start = end_idx
- # Validate whitespace before comment.
- if start_idx:
- # - Up to `tab_width - 1` spaces before start_idx.
- for i in range(self.tab_width - 1):
- if text[start_idx - 1] != ' ':
- break
- start_idx -= 1
- if start_idx == 0:
- break
- # - Must be preceded by 2 newlines or hit the start of
- # the document.
- if start_idx == 0:
- pass
- elif start_idx == 1 and text[0] == '\n':
- start_idx = 0 # to match minute detail of Markdown.pl regex
- elif text[start_idx-2:start_idx] == '\n\n':
- pass
- else:
- break
- # Validate whitespace after comment.
- # - Any number of spaces and tabs.
- while end_idx < len(text):
- if text[end_idx] not in ' \t':
- break
- end_idx += 1
- # - Must be following by 2 newlines or hit end of text.
- if text[end_idx:end_idx+2] not in ('', '\n', '\n\n'):
- continue
- # Escape and hash (must match `_hash_html_block_sub`).
- html = text[start_idx:end_idx]
- if raw and self.safe_mode:
- html = self._sanitize_html(html)
- key = _hash_text(html)
- self.html_blocks[key] = html
- text = text[:start_idx] + "\n\n" + key + "\n\n" + text[end_idx:]
- if "xml" in self.extras:
- # Treat XML processing instructions and namespaced one-liner
- # tags as if they were block HTML tags. E.g., if standalone
- # (i.e. are their own paragraph), the following do not get
- # wrapped in a <p> tag:
- # <?foo bar?>
- #
- # <xi:include xmlns:xi="http://www.w3.org/2001/XInclude" href="chapter_1.md"/>
- _xml_oneliner_re = _xml_oneliner_re_from_tab_width(self.tab_width)
- text = _xml_oneliner_re.sub(hash_html_block_sub, text)
- return text
- def _strip_link_definitions(self, text):
- # Strips link definitions from text, stores the URLs and titles in
- # hash references.
- less_than_tab = self.tab_width - 1
- # Link defs are in the form:
- # [id]: url "optional title"
- _link_def_re = re.compile(r"""
- ^[ ]{0,%d}\[(.+)\]: # id = \1
- [ \t]*
- \n? # maybe *one* newline
- [ \t]*
- <?(.+?)>? # url = \2
- [ \t]*
- (?:
- \n? # maybe one newline
- [ \t]*
- (?<=\s) # lookbehind for whitespace
- ['"(]
- ([^\n]*) # title = \3
- ['")]
- [ \t]*
- )? # title is optional
- (?:\n+|\Z)
- """ % less_than_tab, re.X | re.M | re.U)
- return _link_def_re.sub(self._extract_link_def_sub, text)
- def _extract_link_def_sub(self, match):
- id, url, title = match.groups()
- key = id.lower() # Link IDs are case-insensitive
- self.urls[key] = self._encode_amps_and_angles(url)
- if title:
- self.titles[key] = title
- return ""
- def _do_numbering(self, text):
- ''' We handle the special extension for generic numbering for
- tables, figures etc.
- '''
- # First pass to define all the references
- self.regex_defns = re.compile(r'''
- \[\#(\w+) # the counter. Open square plus hash plus a word \1
- ([^@]*) # Some optional characters, that aren't an @. \2
- @(\w+) # the id. Should this be normed? \3
- ([^\]]*)\] # The rest of the text up to the terminating ] \4
- ''', re.VERBOSE)
- self.regex_subs = re.compile(r"\[@(\w+)\s*\]") # [@ref_id]
- counters = {}
- references = {}
- replacements = []
- definition_html = '<figcaption class="{}" id="counter-ref-{}">{}{}{}</figcaption>'
- reference_html = '<a class="{}" href="#counter-ref-{}">{}</a>'
- for match in self.regex_defns.finditer(text):
- # We must have four match groups otherwise this isn't a numbering reference
- if len(match.groups()) != 4:
- continue
- counter = match.group(1)
- text_before = match.group(2).strip()
- ref_id = match.group(3)
- text_after = match.group(4)
- number = counters.get(counter, 1)
- references[ref_id] = (number, counter)
- replacements.append((match.start(0),
- definition_html.format(counter,
- ref_id,
- text_before,
- number,
- text_after),
- match.end(0)))
- counters[counter] = number + 1
- for repl in reversed(replacements):
- text = text[:repl[0]] + repl[1] + text[repl[2]:]
- # Second pass to replace the references with the right
- # value of the counter
- # Fwiw, it's vaguely annoying to have to turn the iterator into
- # a list and then reverse it but I can't think of a better thing to do.
- for match in reversed(list(self.regex_subs.finditer(text))):
- number, counter = references.get(match.group(1), (None, None))
- if number is not None:
- repl = reference_html.format(counter,
- match.group(1),
- number)
- else:
- repl = reference_html.format(match.group(1),
- 'countererror',
- '?' + match.group(1) + '?')
- if "smarty-pants" in self.extras:
- repl = repl.replace('"', self._escape_table['"'])
- text = text[:match.start()] + repl + text[match.end():]
- return text
- def _extract_footnote_def_sub(self, match):
- id, text = match.groups()
- text = _dedent(text, skip_first_line=not text.startswith('\n')).strip()
- normed_id = re.sub(r'\W', '-', id)
- # Ensure footnote text ends with a couple newlines (for some
- # block gamut matches).
- self.footnotes[normed_id] = text + "\n\n"
- return ""
- def _strip_footnote_definitions(self, text):
- """A footnote definition looks like this:
- [^note-id]: Text of the note.
- May include one or more indented paragraphs.
- Where,
- - The 'note-id' can be pretty much anything, though typically it
- is the number of the footnote.
- - The first paragraph may start on the next line, like so:
- [^note-id]:
- Text of the note.
- """
- less_than_tab = self.tab_width - 1
- footnote_def_re = re.compile(r'''
- ^[ ]{0,%d}\[\^(.+)\]: # id = \1
- [ \t]*
- ( # footnote text = \2
- # First line need not start with the spaces.
- (?:\s*.*\n+)
- (?:
- (?:[ ]{%d} | \t) # Subsequent lines must be indented.
- .*\n+
- )*
- )
- # Lookahead for non-space at line-start, or end of doc.
- (?:(?=^[ ]{0,%d}\S)|\Z)
- ''' % (less_than_tab, self.tab_width, self.tab_width),
- re.X | re.M)
- return footnote_def_re.sub(self._extract_footnote_def_sub, text)
- _hr_re = re.compile(r'^[ ]{0,3}([-_*][ ]{0,2}){3,}$', re.M)
- def _run_block_gamut(self, text):
- # These are all the transformations that form block-level
- # tags like paragraphs, headers, and list items.
- if "fenced-code-blocks" in self.extras:
- text = self._do_fenced_code_blocks(text)
- text = self._do_headers(text)
- # Do Horizontal Rules:
- # On the number of spaces in horizontal rules: The spec is fuzzy: "If
- # you wish, you may use spaces between the hyphens or asterisks."
- # Markdown.pl 1.0.1's hr regexes limit the number of spaces between the
- # hr chars to one or two. We'll reproduce that limit here.
- hr = "\n<hr"+self.empty_element_suffix+"\n"
- text = re.sub(self._hr_re, hr, text)
- text = self._do_lists(text)
- if "pyshell" in self.extras:
- text = self._prepare_pyshell_blocks(text)
- if "wiki-tables" in self.extras:
- text = self._do_wiki_tables(text)
- if "tables" in self.extras:
- text = self._do_tables(text)
- text = self._do_code_blocks(text)
- text = self._do_block_quotes(text)
- # We already ran _HashHTMLBlocks() before, in Markdown(), but that
- # was to escape raw HTML in the original Markdown source. This time,
- # we're escaping the markup we've just created, so that we don't wrap
- # <p> tags around block-level tags.
- text = self._hash_html_blocks(text)
- text = self._form_paragraphs(text)
- return text
- def _pyshell_block_sub(self, match):
- if "fenced-code-blocks" in self.extras:
- dedented = _dedent(match.group(0))
- return self._do_fenced_code_blocks("```pycon\n" + dedented + "```\n")
- lines = match.group(0).splitlines(0)
- _dedentlines(lines)
- indent = ' ' * self.tab_width
- s = ('\n' # separate from possible cuddled paragraph
- + indent + ('\n'+indent).join(lines)
- + '\n\n')
- return s
- def _prepare_pyshell_blocks(self, text):
- """Ensure that Python interactive shell sessions are put in
- code blocks -- even if not properly indented.
- """
- if ">>>" not in text:
- return text
- less_than_tab = self.tab_width - 1
- _pyshell_block_re = re.compile(r"""
- ^([ ]{0,%d})>>>[ ].*\n # first line
- ^(\1.*\S+.*\n)* # any number of subsequent lines
- ^\n # ends with a blank line
- """ % less_than_tab, re.M | re.X)
- return _pyshell_block_re.sub(self._pyshell_block_sub, text)
- def _table_sub(self, match):
- trim_space_re = '^[ \t\n]+|[ \t\n]+$'
- trim_bar_re = r'^\||\|$'
- split_bar_re = r'^\||(?<!\\)\|'
- escape_bar_re = r'\\\|'
- head, underline, body = match.groups()
- # Determine aligns for columns.
- cols = [re.sub(escape_bar_re, '|', cell.strip()) for cell in re.split(split_bar_re, re.sub(trim_bar_re, "", re.sub(trim_space_re, "", underline)))]
- align_from_col_idx = {}
- for col_idx, col in enumerate(cols):
- if col[0] == ':' and col[-1] == ':':
- align_from_col_idx[col_idx] = ' style="text-align:center;"'
- elif col[0] == ':':
- align_from_col_idx[col_idx] = ' style="text-align:left;"'
- elif col[-1] == ':':
- align_from_col_idx[col_idx] = ' style="text-align:right;"'
- # thead
- hlines = ['<table%s>' % self._html_class_str_from_tag('table'), '<thead>', '<tr>']
- cols = [re.sub(escape_bar_re, '|', cell.strip()) for cell in re.split(split_bar_re, re.sub(trim_bar_re, "", re.sub(trim_space_re, "", head)))]
- for col_idx, col in enumerate(cols):
- hlines.append(' <th%s>%s</th>' % (
- align_from_col_idx.get(col_idx, ''),
- self._run_span_gamut(col)
- ))
- hlines.append('</tr>')
- hlines.append('</thead>')
- # tbody
- hlines.append('<tbody>')
- for line in body.strip('\n').split('\n'):
- hlines.append('<tr>')
- cols = [re.sub(escape_bar_re, '|', cell.strip()) for cell in re.split(split_bar_re, re.sub(trim_bar_re, "", re.sub(trim_space_re, "", line)))]
- for col_idx, col in enumerate(cols):
- hlines.append(' <td%s>%s</td>' % (
- align_from_col_idx.get(col_idx, ''),
- self._run_span_gamut(col)
- ))
- hlines.append('</tr>')
- hlines.append('</tbody>')
- hlines.append('</table>')
- return '\n'.join(hlines) + '\n'
- def _do_tables(self, text):
- """Copying PHP-Markdown and GFM table syntax. Some regex borrowed from
- https://github.com/michelf/php-markdown/blob/lib/Michelf/Markdown.php#L2538
- """
- less_than_tab = self.tab_width - 1
- table_re = re.compile(r'''
- (?:(?<=\n\n)|\A\n?) # leading blank line
- ^[ ]{0,%d} # allowed whitespace
- (.*[|].*) \n # $1: header row (at least one pipe)
- ^[ ]{0,%d} # allowed whitespace
- ( # $2: underline row
- # underline row with leading bar
- (?: \|\ *:?-+:?\ * )+ \|? \s? \n
- |
- # or, underline row without leading bar
- (?: \ *:?-+:?\ *\| )+ (?: \ *:?-+:?\ * )? \s? \n
- )
- ( # $3: data rows
- (?:
- ^[ ]{0,%d}(?!\ ) # ensure line begins with 0 to less_than_tab spaces
- .*\|.* \n
- )+
- )
- ''' % (less_than_tab, less_than_tab, less_than_tab), re.M | re.X)
- return table_re.sub(self._table_sub, text)
- def _wiki_table_sub(self, match):
- ttext = match.group(0).strip()
- # print('wiki table: %r' % match.group(0))
- rows = []
- for line in ttext.splitlines(0):
- line = line.strip()[2:-2].strip()
- row = [c.strip() for c in re.split(r'(?<!\\)\|\|', line)]
- rows.append(row)
- # from pprint import pprint
- # pprint(rows)
- hlines = []
- def add_hline(line, indents=0):
- hlines.append((self.tab * indents) + line)
- def format_cell(text):
- return self._run_span_gamut(re.sub(r"^\s*~", "", cell).strip(" "))
- add_hline('<table%s>' % self._html_class_str_from_tag('table'))
- # Check if first cell of first row is a header cell. If so, assume the whole row is a header row.
- if rows and rows[0] and re.match(r"^\s*~", rows[0][0]):
- add_hline('<thead>', 1)
- add_hline('<tr>', 2)
- for cell in rows[0]:
- add_hline("<th>{}</th>".format(format_cell(cell)), 3)
- add_hline('</tr>', 2)
- add_hline('</thead>', 1)
- # Only one header row allowed.
- rows = rows[1:]
- # If no more rows, don't create a tbody.
- if rows:
- add_hline('<tbody>', 1)
- for row in rows:
- add_hline('<tr>', 2)
- for cell in row:
- add_hline('<td>{}</td>'.format(format_cell(cell)), 3)
- add_hline('</tr>', 2)
- add_hline('</tbody>', 1)
- add_hline('</table>')
- return '\n'.join(hlines) + '\n'
- def _do_wiki_tables(self, text):
- # Optimization.
- if "||" not in text:
- return text
- less_than_tab = self.tab_width - 1
- wiki_table_re = re.compile(r'''
- (?:(?<=\n\n)|\A\n?) # leading blank line
- ^([ ]{0,%d})\|\|.+?\|\|[ ]*\n # first line
- (^\1\|\|.+?\|\|\n)* # any number of subsequent lines
- ''' % less_than_tab, re.M | re.X)
- return wiki_table_re.sub(self._wiki_table_sub, text)
- def _run_span_gamut(self, text):
- # These are all the transformations that occur *within* block-level
- # tags like paragraphs, headers, and list items.
- text = self._do_code_spans(text)
- text = self._escape_special_chars(text)
- # Process anchor and image tags.
- if "link-patterns" in self.extras:
- text = self._do_link_patterns(text)
- text = self._do_links(text)
- # Make links out of things like `<http://example.com/>`
- # Must come after _do_links(), because you can use < and >
- # delimiters in inline links like [this](<url>).
- text = self._do_auto_links(text)
- text = self._encode_amps_and_angles(text)
- if "strike" in self.extras:
- text = self._do_strike(text)
- if "underline" in self.extras:
- text = self._do_underline(text)
- text = self._do_italics_and_bold(text)
- if "smarty-pants" in self.extras:
- text = self._do_smart_punctuation(text)
- # Do hard breaks:
- if "break-on-newline" in self.extras:
- text = re.sub(r" *\n", "<br%s\n" % self.empty_element_suffix, text)
- else:
- text = re.sub(r" {2,}\n", " <br%s\n" % self.empty_element_suffix, text)
- return text
- # "Sorta" because auto-links are identified as "tag" tokens.
- _sorta_html_tokenize_re = re.compile(r"""
- (
- # tag
- </?
- (?:\w+) # tag name
- (?:\s+(?:[\w-]+:)?[\w-]+=(?:".*?"|'.*?'))* # attributes
- \s*/?>
- |
- # auto-link (e.g., <http://www.activestate.com/>)
- <\w+[^>]*>
- |
- <!--.*?--> # comment
- |
- <\?.*?\?> # processing instruction
- )
- """, re.X)
- def _escape_special_chars(self, text):
- # Python markdown note: the HTML tokenization here differs from
- # that in Markdown.pl, hence the behaviour for subtle cases can
- # differ (I believe the tokenizer here does a better job because
- # it isn't susceptible to unmatched '<' and '>' in HTML tags).
- # Note, however, that '>' is not allowed in an auto-link URL
- # here.
- escaped = []
- is_html_markup = False
- for token in self._sorta_html_tokenize_re.split(text):
- if is_html_markup:
- # Within tags/HTML-comments/auto-links, encode * and _
- # so they don't conflict with their use in Markdown for
- # italics and strong. We're replacing each such
- # character with its corresponding MD5 checksum value;
- # this is likely overkill, but it should prevent us from
- # colliding with the escape values by accident.
- escaped.append(token.replace('*', self._escape_table['*'])
- .replace('_', self._escape_table['_']))
- else:
- escaped.append(self._encode_backslash_escapes(token))
- is_html_markup = not is_html_markup
- return ''.join(escaped)
- def _hash_html_spans(self, text):
- # Used for safe_mode.
- def _is_auto_link(s):
- if ':' in s and self._auto_link_re.match(s):
- return True
- elif '@' in s and self._auto_email_link_re.match(s):
- return True
- return False
- tokens = []
- is_html_markup = False
- for token in self._sorta_html_tokenize_re.split(text):
- if is_html_markup and not _is_auto_link(token):
- sanitized = self._sanitize_html(token)
- key = _hash_text(sanitized)
- self.html_spans[key] = sanitized
- tokens.append(key)
- else:
- tokens.append(self._encode_incomplete_tags(token))
- is_html_markup = not is_html_markup
- return ''.join(tokens)
- def _unhash_html_spans(self, text):
- for key, sanitized in list(self.html_spans.items()):
- text = text.replace(key, sanitized)
- return text
- def _sanitize_html(self, s):
- if self.safe_mode == "replace":
- return self.html_removed_text
- elif self.safe_mode == "escape":
- replacements = [
- ('&', '&'),
- ('<', '<'),
- ('>', '>'),
- ]
- for before, after in replacements:
- s = s.replace(before, after)
- return s
- else:
- raise MarkdownError("invalid value for 'safe_mode': %r (must be "
- "'escape' or 'replace')" % self.safe_mode)
- _inline_link_title = re.compile(r'''
- ( # \1
- [ \t]+
- (['"]) # quote char = \2
- (?P<title>.*?)
- \2
- )? # title is optional
- \)$
- ''', re.X | re.S)
- _tail_of_reference_link_re = re.compile(r'''
- # Match tail of: [text][id]
- [ ]? # one optional space
- (?:\n[ ]*)? # one optional newline followed by spaces
- \[
- (?P<id>.*?)
- \]
- ''', re.X | re.S)
- _whitespace = re.compile(r'\s*')
- _strip_anglebrackets = re.compile(r'<(.*)>.*')
- def _find_non_whitespace(self, text, start):
- """Returns the index of the first non-whitespace character in text
- after (and including) start
- """
- match = self._whitespace.match(text, start)
- return match.end()
- def _find_balanced(self, text, start, open_c, close_c):
- """Returns the index where the open_c and close_c characters balance
- out - the same number of open_c and close_c are encountered - or the
- end of string if it's reached before the balance point is found.
- """
- i = start
- l = len(text)
- count = 1
- while count > 0 and i < l:
- if text[i] == open_c:
- count += 1
- elif text[i] == close_c:
- count -= 1
- i += 1
- return i
- def _extract_url_and_title(self, text, start):
- """Extracts the url and (optional) title from the tail of a link"""
- # text[start] equals the opening parenthesis
- idx = self._find_non_whitespace(text, start+1)
- if idx == len(text):
- return None, None, None
- end_idx = idx
- has_anglebrackets = text[idx] == "<"
- if has_anglebrackets:
- end_idx = self._find_balanced(text, end_idx+1, "<", ">")
- end_idx = self._find_balanced(text, end_idx, "(", ")")
- match = self._inline_link_title.search(text, idx, end_idx)
- if not match:
- return None, None, None
- url, title = text[idx:match.start()], match.group("title")
- if has_anglebrackets:
- url = self._strip_anglebrackets.sub(r'\1', url)
- return url, title, end_idx
- _safe_protocols = re.compile(r'(https?|ftp):', re.I)
- def _do_links(self, text):
- """Turn Markdown link shortcuts into XHTML <a> and <img> tags.
- This is a combination of Markdown.pl's _DoAnchors() and
- _DoImages(). They are done together because that simplified the
- approach. It was necessary to use a different approach than
- Markdown.pl because of the lack of atomic matching support in
- Python's regex engine used in $g_nested_brackets.
- """
- MAX_LINK_TEXT_SENTINEL = 3000 # markdown2 issue 24
- # `anchor_allowed_pos` is used to support img links inside
- # anchors, but not anchors inside anchors. An anchor's start
- # pos must be `>= anchor_allowed_pos`.
- anchor_allowed_pos = 0
- curr_pos = 0
- while True: # Handle the next link.
- # The next '[' is the start of:
- # - an inline anchor: [text](url "title")
- # - a reference anchor: [text][id]
- # - an inline img: ![text](url "title")
- # - a reference img: ![text][id]
- # - a footnote ref: [^id]
- # (Only if 'footnotes' extra enabled)
- # - a footnote defn: [^id]: ...
- # (Only if 'footnotes' extra enabled) These have already
- # been stripped in _strip_footnote_definitions() so no
- # need to watch for them.
- # - a link definition: [id]: url "title"
- # These have already been stripped in
- # _strip_link_definitions() so no need to watch for them.
- # - not markup: [...anything else...
- try:
- start_idx = text.index('[', curr_pos)
- except ValueError:
- break
- text_length = len(text)
- # Find the matching closing ']'.
- # Markdown.pl allows *matching* brackets in link text so we
- # will here too. Markdown.pl *doesn't* currently allow
- # matching brackets in img alt text -- we'll differ in that
- # regard.
- bracket_depth = 0
- for p in range(start_idx+1, min(start_idx+MAX_LINK_TEXT_SENTINEL,
- text_length)):
- ch = text[p]
- if ch == ']':
- bracket_depth -= 1
- if bracket_depth < 0:
- break
- elif ch == '[':
- bracket_depth += 1
- else:
- # Closing bracket not found within sentinel length.
- # This isn't markup.
- curr_pos = start_idx + 1
- continue
- link_text = text[start_idx+1:p]
- # Fix for issue 341 - Injecting XSS into link text
- if self.safe_mode:
- link_text = self._hash_html_spans(link_text)
- link_text = self._unhash_html_spans(link_text)
- # Possibly a footnote ref?
- if "footnotes" in self.extras and link_text.startswith("^"):
- normed_id = re.sub(r'\W', '-', link_text[1:])
- if normed_id in self.footnotes:
- self.footnote_ids.append(normed_id)
- result = '<sup class="footnote-ref" id="fnref-%s">' \
- '<a href="#fn-%s">%s</a></sup>' \
- % (normed_id, normed_id, len(self.footnote_ids))
- text = text[:start_idx] + result + text[p+1:]
- else:
- # This id isn't defined, leave the markup alone.
- curr_pos = p+1
- continue
- # Now determine what this is by the remainder.
- p += 1
- if p == text_length:
- return text
- # Inline anchor or img?
- if text[p] == '(': # attempt at perf improvement
- url, title, url_end_idx = self._extract_url_and_title(text, p)
- if url is not None:
- # Handle an inline anchor or img.
- is_img = start_idx > 0 and text[start_idx-1] == "!"
- if is_img:
- start_idx -= 1
- # We've got to encode these to avoid conflicting
- # with italics/bold.
- url = url.replace('*', self._escape_table['*']) \
- .replace('_', self._escape_table['_'])
- if title:
- title_str = ' title="%s"' % (
- _xml_escape_attr(title)
- .replace('*', self._escape_table['*'])
- .replace('_', self._escape_table['_']))
- else:
- title_str = ''
- if is_img:
- img_class_str = self._html_class_str_from_tag("img")
- result = '<img src="%s" alt="%s"%s%s%s' \
- % (_html_escape_url(url, safe_mode=self.safe_mode),
- _xml_escape_attr(link_text),
- title_str,
- img_class_str,
- self.empty_element_suffix)
- if "smarty-pants" in self.extras:
- result = result.replace('"', self._escape_table['"'])
- curr_pos = start_idx + len(result)
- text = text[:start_idx] + result + text[url_end_idx:]
- elif start_idx >= anchor_allowed_pos:
- safe_link = self._safe_protocols.match(url) or url.startswith('#')
- if self.safe_mode and not safe_link:
- result_head = '<a href="#"%s>' % (title_str)
- else:
- result_head = '<a href="%s"%s>' % (_html_escape_url(url, safe_mode=self.safe_mode), title_str)
- result = '%s%s</a>' % (result_head, link_text)
- if "smarty-pants" in self.extras:
- result = result.replace('"', self._escape_table['"'])
- # <img> allowed from curr_pos on, <a> from
- # anchor_allowed_pos on.
- curr_pos = start_idx + len(result_head)
- anchor_allowed_pos = start_idx + len(result)
- text = text[:start_idx] + result + text[url_end_idx:]
- else:
- # Anchor not allowed here.
- curr_pos = start_idx + 1
- continue
- # Reference anchor or img?
- else:
- match = self._tail_of_reference_link_re.match(text, p)
- if match:
- # Handle a reference-style anchor or img.
- is_img = start_idx > 0 and text[start_idx-1] == "!"
- if is_img:
- start_idx -= 1
- link_id = match.group("id").lower()
- if not link_id:
- link_id = link_text.lower() # for links like [this][]
- if link_id in self.urls:
- url = self.urls[link_id]
- # We've got to encode these to avoid conflicting
- # with italics/bold.
- url = url.replace('*', self._escape_table['*']) \
- .replace('_', self._escape_table['_'])
- title = self.titles.get(link_id)
- if title:
- title = _xml_escape_attr(title) \
- .replace('*', self._escape_table['*']) \
- .replace('_', self._escape_table['_'])
- title_str = ' title="%s"' % title
- else:
- title_str = ''
- if is_img:
- img_class_str = self._html_class_str_from_tag("img")
- result = '<img src="%s" alt="%s"%s%s%s' \
- % (_html_escape_url(url, safe_mode=self.safe_mode),
- _xml_escape_attr(link_text),
- title_str,
- img_class_str,
- self.empty_element_suffix)
- if "smarty-pants" in self.extras:
- result = result.replace('"', self._escape_table['"'])
- curr_pos = start_idx + len(result)
- text = text[:start_idx] + result + text[match.end():]
- elif start_idx >= anchor_allowed_pos:
- if self.safe_mode and not self._safe_protocols.match(url):
- result_head = '<a href="#"%s>' % (title_str)
- else:
- result_head = '<a href="%s"%s>' % (_html_escape_url(url, safe_mode=self.safe_mode), title_str)
- result = '%s%s</a>' % (result_head, link_text)
- if "smarty-pants" in self.extras:
- result = result.replace('"', self._escape_table['"'])
- # <img> allowed from curr_pos on, <a> from
- # anchor_allowed_pos on.
- curr_pos = start_idx + len(result_head)
- anchor_allowed_pos = start_idx + len(result)
- text = text[:start_idx] + result + text[match.end():]
- else:
- # Anchor not allowed here.
- curr_pos = start_idx + 1
- else:
- # This id isn't defined, leave the markup alone.
- curr_pos = match.end()
- continue
- # Otherwise, it isn't markup.
- curr_pos = start_idx + 1
- return text
- def header_id_from_text(self, text, prefix, n):
- """Generate a header id attribute value from the given header
- HTML content.
- This is only called if the "header-ids" extra is enabled.
- Subclasses may override this for different header ids.
- @param text {str} The text of the header tag
- @param prefix {str} The requested prefix for header ids. This is the
- value of the "header-ids" extra key, if any. Otherwise, None.
- @param n {int} The <hN> tag number, i.e. `1` for an <h1> tag.
- @returns {str} The value for the header tag's "id" attribute. Return
- None to not have an id attribute and to exclude this header from
- the TOC (if the "toc" extra is specified).
- """
- header_id = _slugify(text)
- if prefix and isinstance(prefix, base_string_type):
- header_id = prefix + '-' + header_id
- self._count_from_header_id[header_id] += 1
- if 0 == len(header_id) or self._count_from_header_id[header_id] > 1:
- header_id += '-%s' % self._count_from_header_id[header_id]
- return header_id
- def _toc_add_entry(self, level, id, name):
- if level > self._toc_depth:
- return
- if self._toc is None:
- self._toc = []
- self._toc.append((level, id, self._unescape_special_chars(name)))
- _h_re_base = r'''
- (^(.+)[ \t]*\n(=+|-+)[ \t]*\n+)
- |
- (^(\#{1,6}) # \1 = string of #'s
- [ \t]%s
- (.+?) # \2 = Header text
- [ \t]*
- (?<!\\) # ensure not an escaped trailing '#'
- \#* # optional closing #'s (not counted)
- \n+
- )
- '''
- _h_re = re.compile(_h_re_base % '*', re.X | re.M)
- _h_re_tag_friendly = re.compile(_h_re_base % '+', re.X | re.M)
- def _h_sub(self, match):
- if match.group(1) is not None and match.group(3) == "-":
- return match.group(1)
- elif match.group(1) is not None:
- # Setext header
- n = {"=": 1, "-": 2}[match.group(3)[0]]
- header_group = match.group(2)
- else:
- # atx header
- n = len(match.group(5))
- header_group = match.group(6)
- demote_headers = self.extras.get("demote-headers")
- if demote_headers:
- n = min(n + demote_headers, 6)
- header_id_attr = ""
- if "header-ids" in self.extras:
- header_id = self.header_id_from_text(header_group,
- self.extras["header-ids"], n)
- if header_id:
- header_id_attr = ' id="%s"' % header_id
- html = self._run_span_gamut(header_group)
- if "toc" in self.extras and header_id:
- self._toc_add_entry(n, header_id, html)
- return "<h%d%s>%s</h%d>\n\n" % (n, header_id_attr, html, n)
- def _do_headers(self, text):
- # Setext-style headers:
- # Header 1
- # ========
- #
- # Header 2
- # --------
- # atx-style headers:
- # # Header 1
- # ## Header 2
- # ## Header 2 with closing hashes ##
- # ...
- # ###### Header 6
- if 'tag-friendly' in self.extras:
- return self._h_re_tag_friendly.sub(self._h_sub, text)
- return self._h_re.sub(self._h_sub, text)
- _marker_ul_chars = '*+-'
- _marker_any = r'(?:[%s]|\d+\.)' % _marker_ul_chars
- _marker_ul = '(?:[%s])' % _marker_ul_chars
- _marker_ol = r'(?:\d+\.)'
- def _list_sub(self, match):
- lst = match.group(1)
- lst_type = match.group(3) in self._marker_ul_chars and "ul" or "ol"
- result = self._process_list_items(lst)
- if self.list_level:
- return "<%s>\n%s</%s>\n" % (lst_type, result, lst_type)
- else:
- return "<%s>\n%s</%s>\n\n" % (lst_type, result, lst_type)
- def _do_lists(self, text):
- # Form HTML ordered (numbered) and unordered (bulleted) lists.
- # Iterate over each *non-overlapping* list match.
- pos = 0
- while True:
- # Find the *first* hit for either list style (ul or ol). We
- # match ul and ol separately to avoid adjacent lists of different
- # types running into each other (see issue #16).
- hits = []
- for marker_pat in (self._marker_ul, self._marker_ol):
- less_than_tab = self.tab_width - 1
- whole_list = r'''
- ( # \1 = whole list
- ( # \2
- [ ]{0,%d}
- (%s) # \3 = first list item marker
- [ \t]+
- (?!\ *\3\ ) # '- - - ...' isn't a list. See 'not_quite_a_list' test case.
- )
- (?:.+?)
- ( # \4
- \Z
- |
- \n{2,}
- (?=\S)
- (?! # Negative lookahead for another list item marker
- [ \t]*
- %s[ \t]+
- )
- )
- )
- ''' % (less_than_tab, marker_pat, marker_pat)
- if self.list_level: # sub-list
- list_re = re.compile("^"+whole_list, re.X | re.M | re.S)
- else:
- list_re = re.compile(r"(?:(?<=\n\n)|\A\n?)"+whole_list,
- re.X | re.M | re.S)
- match = list_re.search(text, pos)
- if match:
- hits.append((match.start(), match))
- if not hits:
- break
- hits.sort()
- match = hits[0][1]
- start, end = match.span()
- middle = self._list_sub(match)
- text = text[:start] + middle + text[end:]
- pos = start + len(middle) # start pos for next attempted match
- return text
- _list_item_re = re.compile(r'''
- (\n)? # leading line = \1
- (^[ \t]*) # leading whitespace = \2
- (?P<marker>%s) [ \t]+ # list marker = \3
- ((?:.+?) # list item text = \4
- (\n{1,2})) # eols = \5
- (?= \n* (\Z | \2 (?P<next_marker>%s) [ \t]+))
- ''' % (_marker_any, _marker_any),
- re.M | re.X | re.S)
- _task_list_item_re = re.compile(r'''
- (\[[\ xX]\])[ \t]+ # tasklist marker = \1
- (.*) # list item text = \2
- ''', re.M | re.X | re.S)
- _task_list_warpper_str = r'<input type="checkbox" class="task-list-item-checkbox" %sdisabled> %s'
- def _task_list_item_sub(self, match):
- marker = match.group(1)
- item_text = match.group(2)
- if marker in ['[x]','[X]']:
- return self._task_list_warpper_str % ('checked ', item_text)
- elif marker == '[ ]':
- return self._task_list_warpper_str % ('', item_text)
- _last_li_endswith_two_eols = False
- def _list_item_sub(self, match):
- item = match.group(4)
- leading_line = match.group(1)
- if leading_line or "\n\n" in item or self._last_li_endswith_two_eols:
- item = self._run_block_gamut(self._outdent(item))
- else:
- # Recursion for sub-lists:
- item = self._do_lists(self._outdent(item))
- if item.endswith('\n'):
- item = item[:-1]
- item = self._run_span_gamut(item)
- self._last_li_endswith_two_eols = (len(match.group(5)) == 2)
- if "task_list" in self.extras:
- item = self._task_list_item_re.sub(self._task_list_item_sub, item)
- return "<li>%s</li>\n" % item
- def _process_list_items(self, list_str):
- # Process the contents of a single ordered or unordered list,
- # splitting it into individual list items.
- # The $g_list_level global keeps track of when we're inside a list.
- # Each time we enter a list, we increment it; when we leave a list,
- # we decrement. If it's zero, we're not in a list anymore.
- #
- # We do this because when we're not inside a list, we want to treat
- # something like this:
- #
- # I recommend upgrading to version
- # 8. Oops, now this line is treated
- # as a sub-list.
- #
- # As a single paragraph, despite the fact that the second line starts
- # with a digit-period-space sequence.
- #
- # Whereas when we're inside a list (or sub-list), that line will be
- # treated as the start of a sub-list. What a kludge, huh? This is
- # an aspect of Markdown's syntax that's hard to parse perfectly
- # without resorting to mind-reading. Perhaps the solution is to
- # change the syntax rules such that sub-lists must start with a
- # starting cardinal number; e.g. "1." or "a.".
- self.list_level += 1
- self._last_li_endswith_two_eols = False
- list_str = list_str.rstrip('\n') + '\n'
- list_str = self._list_item_re.sub(self._list_item_sub, list_str)
- self.list_level -= 1
- return list_str
- def _get_pygments_lexer(self, lexer_name):
- try:
- from pygments import lexers, util
- except ImportError:
- return None
- try:
- return lexers.get_lexer_by_name(lexer_name)
- except util.ClassNotFound:
- return None
- def _color_with_pygments(self, codeblock, lexer, **formatter_opts):
- import pygments
- import pygments.formatters
- class HtmlCodeFormatter(pygments.formatters.HtmlFormatter):
- def _wrap_code(self, inner):
- """A function for use in a Pygments Formatter which
- wraps in <code> tags.
- """
- yield 0, "<code>"
- for tup in inner:
- yield tup
- yield 0, "</code>"
- def wrap(self, source, outfile):
- """Return the source with a code, pre, and div."""
- return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
- formatter_opts.setdefault("cssclass", "codehilite")
- formatter = HtmlCodeFormatter(**formatter_opts)
- return pygments.highlight(codeblock, lexer, formatter)
- def _code_block_sub(self, match, is_fenced_code_block=False):
- lexer_name = None
- if is_fenced_code_block:
- lexer_name = match.group(1)
- if lexer_name:
- formatter_opts = self.extras['fenced-code-blocks'] or {}
- codeblock = match.group(2)
- codeblock = codeblock[:-1] # drop one trailing newline
- else:
- codeblock = match.group(1)
- codeblock = self._outdent(codeblock)
- codeblock = self._detab(codeblock)
- codeblock = codeblock.lstrip('\n') # trim leading newlines
- codeblock = codeblock.rstrip() # trim trailing whitespace
- # Note: "code-color" extra is DEPRECATED.
- if "code-color" in self.extras and codeblock.startswith(":::"):
- lexer_name, rest = codeblock.split('\n', 1)
- lexer_name = lexer_name[3:].strip()
- codeblock = rest.lstrip("\n") # Remove lexer declaration line.
- formatter_opts = self.extras['code-color'] or {}
- # Use pygments only if not using the highlightjs-lang extra
- if lexer_name and "highlightjs-lang" not in self.extras:
- def unhash_code(codeblock):
- for key, sanitized in list(self.html_spans.items()):
- codeblock = codeblock.replace(key, sanitized)
- replacements = [
- ("&", "&"),
- ("<", "<"),
- (">", ">")
- ]
- for old, new in replacements:
- codeblock = codeblock.replace(old, new)
- return codeblock
- lexer = self._get_pygments_lexer(lexer_name)
- if lexer:
- codeblock = unhash_code( codeblock )
- colored = self._color_with_pygments(codeblock, lexer,
- **formatter_opts)
- return "\n\n%s\n\n" % colored
- codeblock = self._encode_code(codeblock)
- pre_class_str = self._html_class_str_from_tag("pre")
- if "highlightjs-lang" in self.extras and lexer_name:
- code_class_str = ' class="%s language-%s"' % (lexer_name, lexer_name)
- else:
- code_class_str = self._html_class_str_from_tag("code")
- return "\n\n<pre%s><code%s>%s\n</code></pre>\n\n" % (
- pre_class_str, code_class_str, codeblock)
- def _html_class_str_from_tag(self, tag):
- """Get the appropriate ' class="..."' string (note the leading
- space), if any, for the given tag.
- """
- if "html-classes" not in self.extras:
- return ""
- try:
- html_classes_from_tag = self.extras["html-classes"]
- except TypeError:
- return ""
- else:
- if tag in html_classes_from_tag:
- return ' class="%s"' % html_classes_from_tag[tag]
- return ""
- def _do_code_blocks(self, text):
- """Process Markdown `<pre><code>` blocks."""
- code_block_re = re.compile(r'''
- (?:\n\n|\A\n?)
- ( # $1 = the code block -- one or more lines, starting with a space/tab
- (?:
- (?:[ ]{%d} | \t) # Lines must start with a tab or a tab-width of spaces
- .*\n+
- )+
- )
- ((?=^[ ]{0,%d}\S)|\Z) # Lookahead for non-space at line-start, or end of doc
- # Lookahead to make sure this block isn't already in a code block.
- # Needed when syntax highlighting is being used.
- (?![^<]*\</code\>)
- ''' % (self.tab_width, self.tab_width),
- re.M | re.X)
- return code_block_re.sub(self._code_block_sub, text)
- _fenced_code_block_re = re.compile(r'''
- (?:\n+|\A\n?)
- ^```\s{0,99}([\w+-]+)?\s{0,99}\n # opening fence, $1 = optional lang
- (.*?) # $2 = code block content
- ^```[ \t]*\n # closing fence
- ''', re.M | re.X | re.S)
- def _fenced_code_block_sub(self, match):
- return self._code_block_sub(match, is_fenced_code_block=True)
- def _do_fenced_code_blocks(self, text):
- """Process ```-fenced unindented code blocks ('fenced-code-blocks' extra)."""
- return self._fenced_code_block_re.sub(self._fenced_code_block_sub, text)
- # Rules for a code span:
- # - backslash escapes are not interpreted in a code span
- # - to include one or or a run of more backticks the delimiters must
- # be a longer run of backticks
- # - cannot start or end a code span with a backtick; pad with a
- # space and that space will be removed in the emitted HTML
- # See `test/tm-cases/escapes.text` for a number of edge-case
- # examples.
- _code_span_re = re.compile(r'''
- (?<!\\)
- (`+) # \1 = Opening run of `
- (?!`) # See Note A test/tm-cases/escapes.text
- (.+?) # \2 = The code block
- (?<!`)
- \1 # Matching closer
- (?!`)
- ''', re.X | re.S)
- def _code_span_sub(self, match):
- c = match.group(2).strip(" \t")
- c = self._encode_code(c)
- return "<code>%s</code>" % c
- def _do_code_spans(self, text):
- # * Backtick quotes are used for <code></code> spans.
- #
- # * You can use multiple backticks as the delimiters if you want to
- # include literal backticks in the code span. So, this input:
- #
- # Just type ``foo `bar` baz`` at the prompt.
- #
- # Will translate to:
- #
- # <p>Just type <code>foo `bar` baz</code> at the prompt.</p>
- #
- # There's no arbitrary limit to the number of backticks you
- # can use as delimters. If you need three consecutive backticks
- # in your code, use four for delimiters, etc.
- #
- # * You can use spaces to get literal backticks at the edges:
- #
- # ... type `` `bar` `` ...
- #
- # Turns to:
- #
- # ... type <code>`bar`</code> ...
- return self._code_span_re.sub(self._code_span_sub, text)
- def _encode_code(self, text):
- """Encode/escape certain characters inside Markdown code runs.
- The point is that in code, these characters are literals,
- and lose their special Markdown meanings.
- """
- replacements = [
- # Encode all ampersands; HTML entities are not
- # entities within a Markdown code span.
- ('&', '&'),
- # Do the angle bracket song and dance:
- ('<', '<'),
- ('>', '>'),
- ]
- for before, after in replacements:
- text = text.replace(before, after)
- hashed = _hash_text(text)
- self._escape_table[text] = hashed
- return hashed
- _strike_re = re.compile(r"~~(?=\S)(.+?)(?<=\S)~~", re.S)
- def _do_strike(self, text):
- text = self._strike_re.sub(r"<strike>\1</strike>", text)
- return text
- _underline_re = re.compile(r"--(?=\S)(.+?)(?<=\S)--", re.S)
- def _do_underline(self, text):
- text = self._underline_re.sub(r"<u>\1</u>", text)
- return text
- _strong_re = re.compile(r"(\*\*|__)(?=\S)(.+?[*_]*)(?<=\S)\1", re.S)
- _em_re = re.compile(r"(\*|_)(?=\S)(.+?)(?<=\S)\1", re.S)
- _code_friendly_strong_re = re.compile(r"\*\*(?=\S)(.+?[*_]*)(?<=\S)\*\*", re.S)
- _code_friendly_em_re = re.compile(r"\*(?=\S)(.+?)(?<=\S)\*", re.S)
- def _do_italics_and_bold(self, text):
- # <strong> must go first:
- if "code-friendly" in self.extras:
- text = self._code_friendly_strong_re.sub(r"<strong>\1</strong>", text)
- text = self._code_friendly_em_re.sub(r"<em>\1</em>", text)
- else:
- text = self._strong_re.sub(r"<strong>\2</strong>", text)
- text = self._em_re.sub(r"<em>\2</em>", text)
- return text
- # "smarty-pants" extra: Very liberal in interpreting a single prime as an
- # apostrophe; e.g. ignores the fact that "round", "bout", "twer", and
- # "twixt" can be written without an initial apostrophe. This is fine because
- # using scare quotes (single quotation marks) is rare.
- _apostrophe_year_re = re.compile(r"'(\d\d)(?=(\s|,|;|\.|\?|!|$))")
- _contractions = ["tis", "twas", "twer", "neath", "o", "n",
- "round", "bout", "twixt", "nuff", "fraid", "sup"]
- def _do_smart_contractions(self, text):
- text = self._apostrophe_year_re.sub(r"’\1", text)
- for c in self._contractions:
- text = text.replace("'%s" % c, "’%s" % c)
- text = text.replace("'%s" % c.capitalize(),
- "’%s" % c.capitalize())
- return text
- # Substitute double-quotes before single-quotes.
- _opening_single_quote_re = re.compile(r"(?<!\S)'(?=\S)")
- _opening_double_quote_re = re.compile(r'(?<!\S)"(?=\S)')
- _closing_single_quote_re = re.compile(r"(?<=\S)'")
- _closing_double_quote_re = re.compile(r'(?<=\S)"(?=(\s|,|;|\.|\?|!|$))')
- def _do_smart_punctuation(self, text):
- """Fancifies 'single quotes', "double quotes", and apostrophes.
- Converts --, ---, and ... into en dashes, em dashes, and ellipses.
- Inspiration is: <http://daringfireball.net/projects/smartypants/>
- See "test/tm-cases/smarty_pants.text" for a full discussion of the
- support here and
- <http://code.google.com/p/python-markdown2/issues/detail?id=42> for a
- discussion of some diversion from the original SmartyPants.
- """
- if "'" in text: # guard for perf
- text = self._do_smart_contractions(text)
- text = self._opening_single_quote_re.sub("‘", text)
- text = self._closing_single_quote_re.sub("’", text)
- if '"' in text: # guard for perf
- text = self._opening_double_quote_re.sub("“", text)
- text = self._closing_double_quote_re.sub("”", text)
- text = text.replace("---", "—")
- text = text.replace("--", "–")
- text = text.replace("...", "…")
- text = text.replace(" . . . ", "…")
- text = text.replace(". . .", "…")
- # TODO: Temporary hack to fix https://github.com/trentm/python-markdown2/issues/150
- if "footnotes" in self.extras and "footnote-ref" in text:
- # Quotes in the footnote back ref get converted to "smart" quotes
- # Change them back here to ensure they work.
- text = text.replace('class="footnote-ref”', 'class="footnote-ref"')
- return text
- _block_quote_base = r'''
- ( # Wrap whole match in \1
- (
- ^[ \t]*>%s[ \t]? # '>' at the start of a line
- .+\n # rest of the first line
- (.+\n)* # subsequent consecutive lines
- )+
- )
- '''
- _block_quote_re = re.compile(_block_quote_base % '', re.M | re.X)
- _block_quote_re_spoiler = re.compile(_block_quote_base % '[ \t]*?!?', re.M | re.X)
- _bq_one_level_re = re.compile('^[ \t]*>[ \t]?', re.M)
- _bq_one_level_re_spoiler = re.compile('^[ \t]*>[ \t]*?![ \t]?', re.M)
- _bq_all_lines_spoilers = re.compile(r'\A(?:^[ \t]*>[ \t]*?!.*[\n\r]*)+\Z', re.M)
- _html_pre_block_re = re.compile(r'(\s*<pre>.+?</pre>)', re.S)
- def _dedent_two_spaces_sub(self, match):
- return re.sub(r'(?m)^ ', '', match.group(1))
- def _block_quote_sub(self, match):
- bq = match.group(1)
- is_spoiler = 'spoiler' in self.extras and self._bq_all_lines_spoilers.match(bq)
- # trim one level of quoting
- if is_spoiler:
- bq = self._bq_one_level_re_spoiler.sub('', bq)
- else:
- bq = self._bq_one_level_re.sub('', bq)
- # trim whitespace-only lines
- bq = self._ws_only_line_re.sub('', bq)
- bq = self._run_block_gamut(bq) # recurse
- bq = re.sub('(?m)^', ' ', bq)
- # These leading spaces screw with <pre> content, so we need to fix that:
- bq = self._html_pre_block_re.sub(self._dedent_two_spaces_sub, bq)
- if is_spoiler:
- return '<blockquote class="spoiler">\n%s\n</blockquote>\n\n' % bq
- else:
- return '<blockquote>\n%s\n</blockquote>\n\n' % bq
- def _do_block_quotes(self, text):
- if '>' not in text:
- return text
- if 'spoiler' in self.extras:
- return self._block_quote_re_spoiler.sub(self._block_quote_sub, text)
- else:
- return self._block_quote_re.sub(self._block_quote_sub, text)
- def _form_paragraphs(self, text):
- # Strip leading and trailing lines:
- text = text.strip('\n')
- # Wrap <p> tags.
- grafs = []
- for i, graf in enumerate(re.split(r"\n{2,}", text)):
- if graf in self.html_blocks:
- # Unhashify HTML blocks
- grafs.append(self.html_blocks[graf])
- else:
- cuddled_list = None
- if "cuddled-lists" in self.extras:
- # Need to put back trailing '\n' for `_list_item_re`
- # match at the end of the paragraph.
- li = self._list_item_re.search(graf + '\n')
- # Two of the same list marker in this paragraph: a likely
- # candidate for a list cuddled to preceding paragraph
- # text (issue 33). Note the `[-1]` is a quick way to
- # consider numeric bullets (e.g. "1." and "2.") to be
- # equal.
- if (li and len(li.group(2)) <= 3
- and (
- (li.group("next_marker") and li.group("marker")[-1] == li.group("next_marker")[-1])
- or
- li.group("next_marker") is None
- )
- ):
- start = li.start()
- cuddled_list = self._do_lists(graf[start:]).rstrip("\n")
- assert cuddled_list.startswith("<ul>") or cuddled_list.startswith("<ol>")
- graf = graf[:start]
- # Wrap <p> tags.
- graf = self._run_span_gamut(graf)
- grafs.append("<p%s>" % self._html_class_str_from_tag('p') + graf.lstrip(" \t") + "</p>")
- if cuddled_list:
- grafs.append(cuddled_list)
- return "\n\n".join(grafs)
- def _add_footnotes(self, text):
- if self.footnotes:
- footer = [
- '<div class="footnotes">',
- '<hr' + self.empty_element_suffix,
- '<ol>',
- ]
- if not self.footnote_title:
- self.footnote_title = "Jump back to footnote %d in the text."
- if not self.footnote_return_symbol:
- self.footnote_return_symbol = "↩"
- for i, id in enumerate(self.footnote_ids):
- if i != 0:
- footer.append('')
- footer.append('<li id="fn-%s">' % id)
- footer.append(self._run_block_gamut(self.footnotes[id]))
- try:
- backlink = ('<a href="#fnref-%s" ' +
- 'class="footnoteBackLink" ' +
- 'title="' + self.footnote_title + '">' +
- self.footnote_return_symbol +
- '</a>') % (id, i+1)
- except TypeError:
- log.debug("Footnote error. `footnote_title` "
- "must include parameter. Using defaults.")
- backlink = ('<a href="#fnref-%s" '
- 'class="footnoteBackLink" '
- 'title="Jump back to footnote %d in the text.">'
- '↩</a>' % (id, i+1))
- if footer[-1].endswith("</p>"):
- footer[-1] = footer[-1][:-len("</p>")] \
- + ' ' + backlink + "</p>"
- else:
- footer.append("\n<p>%s</p>" % backlink)
- footer.append('</li>')
- footer.append('</ol>')
- footer.append('</div>')
- return text + '\n\n' + '\n'.join(footer)
- else:
- return text
- _naked_lt_re = re.compile(r'<(?![a-z/?\$!])', re.I)
- _naked_gt_re = re.compile(r'''(?<![a-z0-9?!/'"-])>''', re.I)
- def _encode_amps_and_angles(self, text):
- # Smart processing for ampersands and angle brackets that need
- # to be encoded.
- text = _AMPERSAND_RE.sub('&', text)
- # Encode naked <'s
- text = self._naked_lt_re.sub('<', text)
- # Encode naked >'s
- # Note: Other markdown implementations (e.g. Markdown.pl, PHP
- # Markdown) don't do this.
- text = self._naked_gt_re.sub('>', text)
- return text
- _incomplete_tags_re = re.compile(r"<(/?\w+?(?!\w).+?[\s/]+?)")
- def _encode_incomplete_tags(self, text):
- if self.safe_mode not in ("replace", "escape"):
- return text
- if text.endswith(">"):
- return text # this is not an incomplete tag, this is a link in the form <http://x.y.z>
- return self._incomplete_tags_re.sub("<\\1", text)
- def _encode_backslash_escapes(self, text):
- for ch, escape in list(self._escape_table.items()):
- text = text.replace("\\"+ch, escape)
- return text
- _auto_link_re = re.compile(r'<((https?|ftp):[^\'">\s]+)>', re.I)
- def _auto_link_sub(self, match):
- g1 = match.group(1)
- return '<a href="%s">%s</a>' % (g1, g1)
- _auto_email_link_re = re.compile(r"""
- <
- (?:mailto:)?
- (
- [-.\w]+
- \@
- [-\w]+(\.[-\w]+)*\.[a-z]+
- )
- >
- """, re.I | re.X | re.U)
- def _auto_email_link_sub(self, match):
- return self._encode_email_address(
- self._unescape_special_chars(match.group(1)))
- def _do_auto_links(self, text):
- text = self._auto_link_re.sub(self._auto_link_sub, text)
- text = self._auto_email_link_re.sub(self._auto_email_link_sub, text)
- return text
- def _encode_email_address(self, addr):
- # Input: an email address, e.g. "foo@example.com"
- #
- # Output: the email address as a mailto link, with each character
- # of the address encoded as either a decimal or hex entity, in
- # the hopes of foiling most address harvesting spam bots. E.g.:
- #
- # <a href="mailto:foo@e
- # xample.com">foo
- # @example.com</a>
- #
- # Based on a filter by Matthew Wickline, posted to the BBEdit-Talk
- # mailing list: <http://tinyurl.com/yu7ue>
- chars = [_xml_encode_email_char_at_random(ch)
- for ch in "mailto:" + addr]
- # Strip the mailto: from the visible part.
- addr = '<a href="%s">%s</a>' \
- % (''.join(chars), ''.join(chars[7:]))
- return addr
- def _do_link_patterns(self, text):
- link_from_hash = {}
- for regex, repl in self.link_patterns:
- replacements = []
- for match in regex.finditer(text):
- if hasattr(repl, "__call__"):
- href = repl(match)
- else:
- href = match.expand(repl)
- replacements.append((match.span(), href))
- for (start, end), href in reversed(replacements):
- # Do not match against links inside brackets.
- if text[start - 1:start] == '[' and text[end:end + 1] == ']':
- continue
- # Do not match against links in the standard markdown syntax.
- if text[start - 2:start] == '](' or text[end:end + 2] == '")':
- continue
- # Do not match against links which are escaped.
- if text[start - 3:start] == '"""' and text[end:end + 3] == '"""':
- text = text[:start - 3] + text[start:end] + text[end + 3:]
- continue
- escaped_href = (
- href.replace('"', '"') # b/c of attr quote
- # To avoid markdown <em> and <strong>:
- .replace('*', self._escape_table['*'])
- .replace('_', self._escape_table['_']))
- link = '<a href="%s">%s</a>' % (escaped_href, text[start:end])
- hash = _hash_text(link)
- link_from_hash[hash] = link
- text = text[:start] + hash + text[end:]
- for hash, link in list(link_from_hash.items()):
- text = text.replace(hash, link)
- return text
- def _unescape_special_chars(self, text):
- # Swap back in all the special characters we've hidden.
- for ch, hash in list(self._escape_table.items()):
- text = text.replace(hash, ch)
- return text
- def _outdent(self, text):
- # Remove one level of line-leading tabs or spaces
- return self._outdent_re.sub('', text)
- class MarkdownWithExtras(Markdown):
- """A markdowner class that enables most extras:
- - footnotes
- - code-color (only has effect if 'pygments' Python module on path)
- These are not included:
- - pyshell (specific to Python-related documenting)
- - code-friendly (because it *disables* part of the syntax)
- - link-patterns (because you need to specify some actual
- link-patterns anyway)
- """
- extras = ["footnotes", "code-color"]
- # ---- internal support functions
- def calculate_toc_html(toc):
- """Return the HTML for the current TOC.
- This expects the `_toc` attribute to have been set on this instance.
- """
- if toc is None:
- return None
- def indent():
- return ' ' * (len(h_stack) - 1)
- lines = []
- h_stack = [0] # stack of header-level numbers
- for level, id, name in toc:
- if level > h_stack[-1]:
- lines.append("%s<ul>" % indent())
- h_stack.append(level)
- elif level == h_stack[-1]:
- lines[-1] += "</li>"
- else:
- while level < h_stack[-1]:
- h_stack.pop()
- if not lines[-1].endswith("</li>"):
- lines[-1] += "</li>"
- lines.append("%s</ul></li>" % indent())
- lines.append('%s<li><a href="#%s">%s</a>' % (
- indent(), id, name))
- while len(h_stack) > 1:
- h_stack.pop()
- if not lines[-1].endswith("</li>"):
- lines[-1] += "</li>"
- lines.append("%s</ul>" % indent())
- return '\n'.join(lines) + '\n'
- class UnicodeWithAttrs(unicode):
- """A subclass of unicode used for the return value of conversion to
- possibly attach some attributes. E.g. the "toc_html" attribute when
- the "toc" extra is used.
- """
- metadata = None
- toc_html = None
- ## {{{ http://code.activestate.com/recipes/577257/ (r1)
- _slugify_strip_re = re.compile(r'[^\w\s-]')
- _slugify_hyphenate_re = re.compile(r'[-\s]+')
- def _slugify(value):
- """
- Normalizes string, converts to lowercase, removes non-alpha characters,
- and converts spaces to hyphens.
- From Django's "django/template/defaultfilters.py".
- """
- import unicodedata
- value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode()
- value = _slugify_strip_re.sub('', value).strip().lower()
- return _slugify_hyphenate_re.sub('-', value)
- ## end of http://code.activestate.com/recipes/577257/ }}}
- # From http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52549
- def _curry(*args, **kwargs):
- function, args = args[0], args[1:]
- def result(*rest, **kwrest):
- combined = kwargs.copy()
- combined.update(kwrest)
- return function(*args + rest, **combined)
- return result
- # Recipe: regex_from_encoded_pattern (1.0)
- def _regex_from_encoded_pattern(s):
- """'foo' -> re.compile(re.escape('foo'))
- '/foo/' -> re.compile('foo')
- '/foo/i' -> re.compile('foo', re.I)
- """
- if s.startswith('/') and s.rfind('/') != 0:
- # Parse it: /PATTERN/FLAGS
- idx = s.rfind('/')
- _, flags_str = s[1:idx], s[idx+1:]
- flag_from_char = {
- "i": re.IGNORECASE,
- "l": re.LOCALE,
- "s": re.DOTALL,
- "m": re.MULTILINE,
- "u": re.UNICODE,
- }
- flags = 0
- for char in flags_str:
- try:
- flags |= flag_from_char[char]
- except KeyError:
- raise ValueError("unsupported regex flag: '%s' in '%s' "
- "(must be one of '%s')"
- % (char, s, ''.join(list(flag_from_char.keys()))))
- return re.compile(s[1:idx], flags)
- else: # not an encoded regex
- return re.compile(re.escape(s))
- # Recipe: dedent (0.1.2)
- def _dedentlines(lines, tabsize=8, skip_first_line=False):
- """_dedentlines(lines, tabsize=8, skip_first_line=False) -> dedented lines
- "lines" is a list of lines to dedent.
- "tabsize" is the tab width to use for indent width calculations.
- "skip_first_line" is a boolean indicating if the first line should
- be skipped for calculating the indent width and for dedenting.
- This is sometimes useful for docstrings and similar.
- Same as dedent() except operates on a sequence of lines. Note: the
- lines list is modified **in-place**.
- """
- DEBUG = False
- if DEBUG:
- print("dedent: dedent(..., tabsize=%d, skip_first_line=%r)"\
- % (tabsize, skip_first_line))
- margin = None
- for i, line in enumerate(lines):
- if i == 0 and skip_first_line: continue
- indent = 0
- for ch in line:
- if ch == ' ':
- indent += 1
- elif ch == '\t':
- indent += tabsize - (indent % tabsize)
- elif ch in '\r\n':
- continue # skip all-whitespace lines
- else:
- break
- else:
- continue # skip all-whitespace lines
- if DEBUG: print("dedent: indent=%d: %r" % (indent, line))
- if margin is None:
- margin = indent
- else:
- margin = min(margin, indent)
- if DEBUG: print("dedent: margin=%r" % margin)
- if margin is not None and margin > 0:
- for i, line in enumerate(lines):
- if i == 0 and skip_first_line: continue
- removed = 0
- for j, ch in enumerate(line):
- if ch == ' ':
- removed += 1
- elif ch == '\t':
- removed += tabsize - (removed % tabsize)
- elif ch in '\r\n':
- if DEBUG: print("dedent: %r: EOL -> strip up to EOL" % line)
- lines[i] = lines[i][j:]
- break
- else:
- raise ValueError("unexpected non-whitespace char %r in "
- "line %r while removing %d-space margin"
- % (ch, line, margin))
- if DEBUG:
- print("dedent: %r: %r -> removed %d/%d"\
- % (line, ch, removed, margin))
- if removed == margin:
- lines[i] = lines[i][j+1:]
- break
- elif removed > margin:
- lines[i] = ' '*(removed-margin) + lines[i][j+1:]
- break
- else:
- if removed:
- lines[i] = lines[i][removed:]
- return lines
- def _dedent(text, tabsize=8, skip_first_line=False):
- """_dedent(text, tabsize=8, skip_first_line=False) -> dedented text
- "text" is the text to dedent.
- "tabsize" is the tab width to use for indent width calculations.
- "skip_first_line" is a boolean indicating if the first line should
- be skipped for calculating the indent width and for dedenting.
- This is sometimes useful for docstrings and similar.
- textwrap.dedent(s), but don't expand tabs to spaces
- """
- lines = text.splitlines(1)
- _dedentlines(lines, tabsize=tabsize, skip_first_line=skip_first_line)
- return ''.join(lines)
- class _memoized(object):
- """Decorator that caches a function's return value each time it is called.
- If called later with the same arguments, the cached value is returned, and
- not re-evaluated.
- http://wiki.python.org/moin/PythonDecoratorLibrary
- """
- def __init__(self, func):
- self.func = func
- self.cache = {}
- def __call__(self, *args):
- try:
- return self.cache[args]
- except KeyError:
- self.cache[args] = value = self.func(*args)
- return value
- except TypeError:
- # uncachable -- for instance, passing a list as an argument.
- # Better to not cache than to blow up entirely.
- return self.func(*args)
- def __repr__(self):
- """Return the function's docstring."""
- return self.func.__doc__
- def _xml_oneliner_re_from_tab_width(tab_width):
- """Standalone XML processing instruction regex."""
- return re.compile(r"""
- (?:
- (?<=\n\n) # Starting after a blank line
- | # or
- \A\n? # the beginning of the doc
- )
- ( # save in $1
- [ ]{0,%d}
- (?:
- <\?\w+\b\s+.*?\?> # XML processing instruction
- |
- <\w+:\w+\b\s+.*?/> # namespaced single tag
- )
- [ \t]*
- (?=\n{2,}|\Z) # followed by a blank line or end of document
- )
- """ % (tab_width - 1), re.X)
- _xml_oneliner_re_from_tab_width = _memoized(_xml_oneliner_re_from_tab_width)
- def _hr_tag_re_from_tab_width(tab_width):
- return re.compile(r"""
- (?:
- (?<=\n\n) # Starting after a blank line
- | # or
- \A\n? # the beginning of the doc
- )
- ( # save in \1
- [ ]{0,%d}
- <(hr) # start tag = \2
- \b # word break
- ([^<>])*? #
- /?> # the matching end tag
- [ \t]*
- (?=\n{2,}|\Z) # followed by a blank line or end of document
- )
- """ % (tab_width - 1), re.X)
- _hr_tag_re_from_tab_width = _memoized(_hr_tag_re_from_tab_width)
- def _xml_escape_attr(attr, skip_single_quote=True):
- """Escape the given string for use in an HTML/XML tag attribute.
- By default this doesn't bother with escaping `'` to `'`, presuming that
- the tag attribute is surrounded by double quotes.
- """
- escaped = _AMPERSAND_RE.sub('&', attr)
- escaped = (attr
- .replace('"', '"')
- .replace('<', '<')
- .replace('>', '>'))
- if not skip_single_quote:
- escaped = escaped.replace("'", "'")
- return escaped
- def _xml_encode_email_char_at_random(ch):
- r = random()
- # Roughly 10% raw, 45% hex, 45% dec.
- # '@' *must* be encoded. I [John Gruber] insist.
- # Issue 26: '_' must be encoded.
- if r > 0.9 and ch not in "@_":
- return ch
- elif r < 0.45:
- # The [1:] is to drop leading '0': 0x63 -> x63
- return '&#%s;' % hex(ord(ch))[1:]
- else:
- return '&#%s;' % ord(ch)
- def _html_escape_url(attr, safe_mode=False):
- """Replace special characters that are potentially malicious in url string."""
- escaped = (attr
- .replace('"', '"')
- .replace('<', '<')
- .replace('>', '>'))
- if safe_mode:
- escaped = escaped.replace('+', ' ')
- escaped = escaped.replace("'", "'")
- return escaped
- # ---- mainline
- class _NoReflowFormatter(optparse.IndentedHelpFormatter):
- """An optparse formatter that does NOT reflow the description."""
- def format_description(self, description):
- return description or ""
- def _test():
- import doctest
- doctest.testmod()
- def main(argv=None):
- if argv is None:
- argv = sys.argv
- if not logging.root.handlers:
- logging.basicConfig()
- usage = "usage: %prog [PATHS...]"
- version = "%prog "+__version__
- parser = optparse.OptionParser(prog="markdown2", usage=usage,
- version=version, description=cmdln_desc,
- formatter=_NoReflowFormatter())
- parser.add_option("-v", "--verbose", dest="log_level",
- action="store_const", const=logging.DEBUG,
- help="more verbose output")
- parser.add_option("--encoding",
- help="specify encoding of text content")
- parser.add_option("--html4tags", action="store_true", default=False,
- help="use HTML 4 style for empty element tags")
- parser.add_option("-s", "--safe", metavar="MODE", dest="safe_mode",
- help="sanitize literal HTML: 'escape' escapes "
- "HTML meta chars, 'replace' replaces with an "
- "[HTML_REMOVED] note")
- parser.add_option("-x", "--extras", action="append",
- help="Turn on specific extra features (not part of "
- "the core Markdown spec). See above.")
- parser.add_option("--use-file-vars",
- help="Look for and use Emacs-style 'markdown-extras' "
- "file var to turn on extras. See "
- "<https://github.com/trentm/python-markdown2/wiki/Extras>")
- parser.add_option("--link-patterns-file",
- help="path to a link pattern file")
- parser.add_option("--self-test", action="store_true",
- help="run internal self-tests (some doctests)")
- parser.add_option("--compare", action="store_true",
- help="run against Markdown.pl as well (for testing)")
- parser.set_defaults(log_level=logging.INFO, compare=False,
- encoding="utf-8", safe_mode=None, use_file_vars=False)
- opts, paths = parser.parse_args()
- log.setLevel(opts.log_level)
- if opts.self_test:
- return _test()
- if opts.extras:
- extras = {}
- for s in opts.extras:
- splitter = re.compile("[,;: ]+")
- for e in splitter.split(s):
- if '=' in e:
- ename, earg = e.split('=', 1)
- try:
- earg = int(earg)
- except ValueError:
- pass
- else:
- ename, earg = e, None
- extras[ename] = earg
- else:
- extras = None
- if opts.link_patterns_file:
- link_patterns = []
- f = open(opts.link_patterns_file)
- try:
- for i, line in enumerate(f.readlines()):
- if not line.strip(): continue
- if line.lstrip().startswith("#"): continue
- try:
- pat, href = line.rstrip().rsplit(None, 1)
- except ValueError:
- raise MarkdownError("%s:%d: invalid link pattern line: %r"
- % (opts.link_patterns_file, i+1, line))
- link_patterns.append(
- (_regex_from_encoded_pattern(pat), href))
- finally:
- f.close()
- else:
- link_patterns = None
- from os.path import join, dirname, abspath, exists
- markdown_pl = join(dirname(dirname(abspath(__file__))), "test",
- "Markdown.pl")
- if not paths:
- paths = ['-']
- for path in paths:
- if path == '-':
- text = sys.stdin.read()
- else:
- fp = codecs.open(path, 'r', opts.encoding)
- text = fp.read()
- fp.close()
- if opts.compare:
- from subprocess import Popen, PIPE
- print("==== Markdown.pl ====")
- p = Popen('perl %s' % markdown_pl, shell=True, stdin=PIPE, stdout=PIPE, close_fds=True)
- p.stdin.write(text.encode('utf-8'))
- p.stdin.close()
- perl_html = p.stdout.read().decode('utf-8')
- if py3:
- sys.stdout.write(perl_html)
- else:
- sys.stdout.write(perl_html.encode(
- sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
- print("==== markdown2.py ====")
- html = markdown(text,
- html4tags=opts.html4tags,
- safe_mode=opts.safe_mode,
- extras=extras, link_patterns=link_patterns,
- use_file_vars=opts.use_file_vars,
- cli=True)
- if py3:
- sys.stdout.write(html)
- else:
- sys.stdout.write(html.encode(
- sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
- if extras and "toc" in extras:
- log.debug("toc_html: " +
- str(html.toc_html.encode(sys.stdout.encoding or "utf-8", 'xmlcharrefreplace')))
- if opts.compare:
- test_dir = join(dirname(dirname(abspath(__file__))), "test")
- if exists(join(test_dir, "test_markdown2.py")):
- sys.path.insert(0, test_dir)
- from test_markdown2 import norm_html_from_html
- norm_html = norm_html_from_html(html)
- norm_perl_html = norm_html_from_html(perl_html)
- else:
- norm_html = html
- norm_perl_html = perl_html
- print("==== match? %r ====" % (norm_perl_html == norm_html))
- if __name__ == "__main__":
- sys.exit(main(sys.argv))
|