pygments_markdown_lexer package

Pygments Markdown Lexer – A Markdown lexer for Pygments to highlight Markdown code snippets.

Copyright © 2015 Jürgen Hermann <jh@web.de>

Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License. You may obtain a copy of the License at

Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.

class pygments_markdown_lexer.MarkdownLexer(**options)[source]

Bases: pygments.lexer.RegexLexer

A Markdown lexer for Pygments.

Some rules adapted from code in pygments.lexers.markup (BSD-licensed).

aliases = [u'md', u'markdown']
closers = u'\'")]}>\u2019\u201d\xbb!?'
end_string_suffix = u'((?=$)|(?=[-/:.,; \\n\\x00\\\u2010\\\u2011\\\u2012\\\u2013\\\u2014\\\xa0\\\'\\"\\)\\]\\}\\>\\\u2019\\\u201d\\\xbb\\!\\?]))'
filenames = [u'*.md', u'*.mkd', u'*.markdown']
flags = 8
mimetypes = [u'text/x-markdown']
name = u'Markdown'
tokens = {'inline': [(u'\\\\.', Token.Literal.String.Escape), (u'&[-a-z0-9]+;', Token.Literal.String.Symbol), (u'&#[0-9]{1,9};', Token.Literal.String.Symbol), (u'&', Token.Text), (u'``?', Token.Keyword, 'literal'), (u'_?_[ \\n]', Token.Text), (u'\\*?\\*[ \\n]', Token.Text), (u'(\\*\\*)(.+?)((?<![ \\\\])\\*\\*)', <function callback at 0x7f331f0a57d0>), (u'(__)(.+?)((?<![ \\\\])__)', <function callback at 0x7f331f0a5848>), (u'(\\*)(.+?)((?<![ \\\\])\\*)', <function callback at 0x7f331f0a58c0>), (u'(_)(.+?)((?<![ \\\\])_)', <function callback at 0x7f331f0a5938>), (u'[a-zA-Z0-9]+', Token.Text), (u'.', Token.Text)], 'literal': [(u'[^`]+', Token.Literal.String.Backtick), (u'(?<!\\\\)``?((?=$)|(?=[-/:.,; \\n\\x00\\\u2010\\\u2011\\\u2012\\\u2013\\\u2014\\\xa0\\\'\\"\\)\\]\\}\\>\\\u2019\\\u201d\\\xbb\\!\\?]))', Token.Keyword, '#pop')], 'codeblock': [(u'^```\\n', Token.Keyword, '#pop'), (u'[^`]+', Token.Comment.Preproc), (u'`', Token.Comment.Preproc)], 'root': [(u'^\\s*\\n(?:\\s*[-*_]){3,}\\s*\\n', Token.Keyword), (u'^(# )(.+?)( #)?(\\n)', <function callback at 0x7f331f0a5500>), (u'^(#{2,6} )(.+?)( #{2,6})?(\\n)', <function callback at 0x7f331f0a5578>), (u'^(={3,}\\n)?(\\S.{2,}\\n)(={3,})(\\n)', <function callback at 0x7f331f0a55f0>), (u'^(-{3,}\\n)?(\\S.{2,}\\n)(-{3,})(\\n)', <function callback at 0x7f331f0a5668>), (u'^\\s*>\\s', Token.Keyword), (u'^\\s*[-+*]\\s', Token.Keyword), (u'^\\s*[0-9]+\\.\\s', Token.Keyword), (u'^<(?P<tag>[-:a-zA-Z0-9]+)( [^>]+)>.+</(?P=tag)>\\n', Token.Comment.Single), (u'(<!--)((?:.*?\\n?)*)(-->)', <function callback at 0x7f331f0a56e0>), (u'^<[^/>][^>]*>\\n', Token.Comment.Preproc, 'htmlblock'), (u'^(```)(.*?)(\\n)', <function callback at 0x7f331f0a5758>, 'codeblock'), 'inline'], 'htmlblock': [(u'^</[^>]+>\\n', Token.Comment.Preproc, '#pop'), (u'.*\\n', Token.Comment.Preproc)]}
unicode_delimiters = u'\u2010\u2011\u2012\u2013\u2014\xa0'

Submodules

pygments_markdown_lexer.lexer module

Markdown lexer for Pygments.

See Write your own lexer and Builtin Tokens.

class pygments_markdown_lexer.lexer.Markdown[source]

Bases: object

Symbolic names for Markdown tokens.

CodeBlock = Token.Comment.Preproc
Heading = Token.Generic.Heading
HtmlBlock = Token.Comment.Preproc
HtmlComment = Token.Comment.MultiLine
HtmlEntity = Token.Literal.String.Symbol
HtmlSingle = Token.Comment.Single
Markup = Token.Keyword
SubHeading = Token.Generic.Heading
class pygments_markdown_lexer.lexer.MarkdownLexer(**options)[source]

Bases: pygments.lexer.RegexLexer

A Markdown lexer for Pygments.

Some rules adapted from code in pygments.lexers.markup (BSD-licensed).

aliases = [u'md', u'markdown']
closers = u'\'")]}>\u2019\u201d\xbb!?'
end_string_suffix = u'((?=$)|(?=[-/:.,; \\n\\x00\\\u2010\\\u2011\\\u2012\\\u2013\\\u2014\\\xa0\\\'\\"\\)\\]\\}\\>\\\u2019\\\u201d\\\xbb\\!\\?]))'
filenames = [u'*.md', u'*.mkd', u'*.markdown']
flags = 8
mimetypes = [u'text/x-markdown']
name = u'Markdown'
tokens = {'inline': [(u'\\\\.', Token.Literal.String.Escape), (u'&[-a-z0-9]+;', Token.Literal.String.Symbol), (u'&#[0-9]{1,9};', Token.Literal.String.Symbol), (u'&', Token.Text), (u'``?', Token.Keyword, 'literal'), (u'_?_[ \\n]', Token.Text), (u'\\*?\\*[ \\n]', Token.Text), (u'(\\*\\*)(.+?)((?<![ \\\\])\\*\\*)', <function callback at 0x7f331f0a57d0>), (u'(__)(.+?)((?<![ \\\\])__)', <function callback at 0x7f331f0a5848>), (u'(\\*)(.+?)((?<![ \\\\])\\*)', <function callback at 0x7f331f0a58c0>), (u'(_)(.+?)((?<![ \\\\])_)', <function callback at 0x7f331f0a5938>), (u'[a-zA-Z0-9]+', Token.Text), (u'.', Token.Text)], 'literal': [(u'[^`]+', Token.Literal.String.Backtick), (u'(?<!\\\\)``?((?=$)|(?=[-/:.,; \\n\\x00\\\u2010\\\u2011\\\u2012\\\u2013\\\u2014\\\xa0\\\'\\"\\)\\]\\}\\>\\\u2019\\\u201d\\\xbb\\!\\?]))', Token.Keyword, '#pop')], 'codeblock': [(u'^```\\n', Token.Keyword, '#pop'), (u'[^`]+', Token.Comment.Preproc), (u'`', Token.Comment.Preproc)], 'root': [(u'^\\s*\\n(?:\\s*[-*_]){3,}\\s*\\n', Token.Keyword), (u'^(# )(.+?)( #)?(\\n)', <function callback at 0x7f331f0a5500>), (u'^(#{2,6} )(.+?)( #{2,6})?(\\n)', <function callback at 0x7f331f0a5578>), (u'^(={3,}\\n)?(\\S.{2,}\\n)(={3,})(\\n)', <function callback at 0x7f331f0a55f0>), (u'^(-{3,}\\n)?(\\S.{2,}\\n)(-{3,})(\\n)', <function callback at 0x7f331f0a5668>), (u'^\\s*>\\s', Token.Keyword), (u'^\\s*[-+*]\\s', Token.Keyword), (u'^\\s*[0-9]+\\.\\s', Token.Keyword), (u'^<(?P<tag>[-:a-zA-Z0-9]+)( [^>]+)>.+</(?P=tag)>\\n', Token.Comment.Single), (u'(<!--)((?:.*?\\n?)*)(-->)', <function callback at 0x7f331f0a56e0>), (u'^<[^/>][^>]*>\\n', Token.Comment.Preproc, 'htmlblock'), (u'^(```)(.*?)(\\n)', <function callback at 0x7f331f0a5758>, 'codeblock'), 'inline'], 'htmlblock': [(u'^</[^>]+>\\n', Token.Comment.Preproc, '#pop'), (u'.*\\n', Token.Comment.Preproc)]}
unicode_delimiters = u'\u2010\u2011\u2012\u2013\u2014\xa0'