{# closes "outerwrapper" div #}
+{% endblock %}
+
+{% block sidebarrel %}
+{% endblock %}
+
+{% block sidebarsourcelink %}
+{% endblock %}
diff --git a/vendor/pygments/doc/_themes/pygments14/static/bodybg.png b/vendor/pygments/doc/_themes/pygments14/static/bodybg.png
new file mode 100644
index 0000000..46892b8
Binary files /dev/null and b/vendor/pygments/doc/_themes/pygments14/static/bodybg.png differ
diff --git a/vendor/pygments/doc/_themes/pygments14/static/docbg.png b/vendor/pygments/doc/_themes/pygments14/static/docbg.png
new file mode 100644
index 0000000..13e61f3
Binary files /dev/null and b/vendor/pygments/doc/_themes/pygments14/static/docbg.png differ
diff --git a/vendor/pygments/doc/_themes/pygments14/static/listitem.png b/vendor/pygments/doc/_themes/pygments14/static/listitem.png
new file mode 100644
index 0000000..e45715f
Binary files /dev/null and b/vendor/pygments/doc/_themes/pygments14/static/listitem.png differ
diff --git a/vendor/pygments/doc/_themes/pygments14/static/logo.png b/vendor/pygments/doc/_themes/pygments14/static/logo.png
new file mode 100644
index 0000000..2c1a24d
Binary files /dev/null and b/vendor/pygments/doc/_themes/pygments14/static/logo.png differ
diff --git a/vendor/pygments/doc/_themes/pygments14/static/pocoo.png b/vendor/pygments/doc/_themes/pygments14/static/pocoo.png
new file mode 100644
index 0000000..4174149
Binary files /dev/null and b/vendor/pygments/doc/_themes/pygments14/static/pocoo.png differ
diff --git a/vendor/pygments/doc/_themes/pygments14/static/pygments14.css_t b/vendor/pygments/doc/_themes/pygments14/static/pygments14.css_t
new file mode 100644
index 0000000..5c37aaf
--- /dev/null
+++ b/vendor/pygments/doc/_themes/pygments14/static/pygments14.css_t
@@ -0,0 +1,401 @@
+/*
+ * pygments14.css
+ * ~~~~~~~~~~~~~~
+ *
+ * Sphinx stylesheet -- pygments14 theme. Heavily copied from sphinx13.
+ *
+ * :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
+ * :license: BSD, see LICENSE for details.
+ *
+ */
+
+@import url("basic.css");
+
+/* -- page layout ----------------------------------------------------------- */
+
+body {
+ font-family: {{ theme_font }}, 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
+ 'Verdana', sans-serif;
+ font-size: 14px;
+ text-align: center;
+ background-image: url(bodybg.png);
+ background-color: {{ theme_background }};
+ color: black;
+ padding: 0;
+ /*
+ border-right: 1px solid {{ theme_border }};
+ border-left: 1px solid {{ theme_border }};
+ */
+
+ margin: 0 auto;
+ min-width: 780px;
+ max-width: 1080px;
+}
+
+.outerwrapper {
+ background-image: url(docbg.png);
+ background-attachment: fixed;
+}
+
+.pageheader {
+ text-align: left;
+ padding: 10px 15px;
+}
+
+.pageheader ul {
+ float: right;
+ color: white;
+ list-style-type: none;
+ padding-left: 0;
+ margin-top: 40px;
+ margin-right: 10px;
+}
+
+.pageheader li {
+ float: left;
+ margin: 0 0 0 10px;
+}
+
+.pageheader li a {
+ border-radius: 3px;
+ padding: 8px 12px;
+ color: {{ theme_darkgray }};
+ text-shadow: 0 0 5px rgba(0, 0, 0, 0.2);
+}
+
+.pageheader li a:hover {
+ background-color: {{ theme_yellow }};
+ color: black;
+ text-shadow: none;
+}
+
+div.document {
+ text-align: left;
+ /*border-left: 1em solid {{ theme_lightyellow }};*/
+}
+
+div.bodywrapper {
+ margin: 0 12px 0 240px;
+ background-color: white;
+/* border-right: 1px solid {{ theme_border }}; */
+}
+
+div.body {
+ margin: 0;
+ padding: 0.5em 20px 20px 20px;
+}
+
+div.related {
+ font-size: 1em;
+ color: {{ theme_darkgray }};
+}
+
+div.related ul {
+ background-image: url(relbg.png);
+ background-repeat: repeat-y;
+ background-color: {{ theme_yellow }};
+ height: 1.9em;
+ /*
+ border-top: 1px solid {{ theme_border }};
+ border-bottom: 1px solid {{ theme_border }};
+ */
+}
+
+div.related ul li {
+ margin: 0 5px 0 0;
+ padding: 0;
+ float: left;
+}
+
+div.related ul li.right {
+ float: right;
+ margin-right: 5px;
+}
+
+div.related ul li a {
+ margin: 0;
+ padding: 0 5px 0 5px;
+ line-height: 1.75em;
+ color: {{ theme_darkgray }};
+ /*text-shadow: 0px 0px 1px rgba(0, 0, 0, 0.5);*/
+}
+
+div.related ul li a:hover {
+ text-decoration: underline;
+ text-shadow: 0px 0px 1px rgba(255, 255, 255, 0.5);
+}
+
+div.sphinxsidebarwrapper {
+ position: relative;
+ top: 0px;
+ padding: 0;
+}
+
+div.sphinxsidebar {
+ margin: 0;
+ padding: 0 0px 15px 15px;
+ width: 210px;
+ float: left;
+ font-size: 1em;
+ text-align: left;
+}
+
+div.sphinxsidebar .logo {
+ font-size: 1.8em;
+ color: #666;
+ font-weight: 300;
+ text-align: center;
+}
+
+div.sphinxsidebar .logo img {
+ vertical-align: middle;
+}
+
+div.sphinxsidebar input {
+ border: 1px solid #aaa;
+ font-family: {{ theme_font }}, 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
+ 'Verdana', sans-serif;
+ font-size: 1em;
+}
+
+div.sphinxsidebar h3 {
+ font-size: 1.5em;
+ /* border-top: 1px solid {{ theme_border }}; */
+ margin-top: 1em;
+ margin-bottom: 0.5em;
+ padding-top: 0.5em;
+}
+
+div.sphinxsidebar h4 {
+ font-size: 1.2em;
+ margin-bottom: 0;
+}
+
+div.sphinxsidebar h3, div.sphinxsidebar h4 {
+ margin-right: -15px;
+ margin-left: -15px;
+ padding-right: 14px;
+ padding-left: 14px;
+ color: #333;
+ font-weight: 300;
+ /*text-shadow: 0px 0px 0.5px rgba(0, 0, 0, 0.4);*/
+}
+
+div.sphinxsidebarwrapper > h3:first-child {
+ margin-top: 0.5em;
+ border: none;
+}
+
+div.sphinxsidebar h3 a {
+ color: #333;
+}
+
+div.sphinxsidebar ul {
+ color: #444;
+ margin-top: 7px;
+ padding: 0;
+ line-height: 130%;
+}
+
+div.sphinxsidebar ul ul {
+ margin-left: 20px;
+ list-style-image: url(listitem.png);
+}
+
+div.footer {
+ color: {{ theme_darkgray }};
+ text-shadow: 0 0 .2px rgba(255, 255, 255, 0.8);
+ padding: 2em;
+ text-align: center;
+ clear: both;
+ font-size: 0.8em;
+}
+
+/* -- body styles ----------------------------------------------------------- */
+
+p {
+ margin: 0.8em 0 0.5em 0;
+}
+
+a {
+ color: {{ theme_darkgreen }};
+ text-decoration: none;
+}
+
+a:hover {
+ color: {{ theme_darkyellow }};
+}
+
+div.body a {
+ text-decoration: underline;
+}
+
+h1 {
+ margin: 10px 0 0 0;
+ font-size: 2.4em;
+ color: {{ theme_darkgray }};
+ font-weight: 300;
+}
+
+h2 {
+ margin: 1.em 0 0.2em 0;
+ font-size: 1.5em;
+ font-weight: 300;
+ padding: 0;
+ color: {{ theme_darkgreen }};
+}
+
+h3 {
+ margin: 1em 0 -0.3em 0;
+ font-size: 1.3em;
+ font-weight: 300;
+}
+
+div.body h1 a, div.body h2 a, div.body h3 a, div.body h4 a, div.body h5 a, div.body h6 a {
+ text-decoration: none;
+}
+
+div.body h1 a tt, div.body h2 a tt, div.body h3 a tt, div.body h4 a tt, div.body h5 a tt, div.body h6 a tt {
+ color: {{ theme_darkgreen }} !important;
+ font-size: inherit !important;
+}
+
+a.headerlink {
+ color: {{ theme_green }} !important;
+ font-size: 12px;
+ margin-left: 6px;
+ padding: 0 4px 0 4px;
+ text-decoration: none !important;
+ float: right;
+}
+
+a.headerlink:hover {
+ background-color: #ccc;
+ color: white!important;
+}
+
+cite, code, tt {
+ font-family: 'Consolas', 'DejaVu Sans Mono',
+ 'Bitstream Vera Sans Mono', monospace;
+ font-size: 14px;
+ letter-spacing: -0.02em;
+}
+
+tt {
+ background-color: #f2f2f2;
+ border: 1px solid #ddd;
+ border-radius: 2px;
+ color: #333;
+ padding: 1px;
+}
+
+tt.descname, tt.descclassname, tt.xref {
+ border: 0;
+}
+
+hr {
+ border: 1px solid #abc;
+ margin: 2em;
+}
+
+a tt {
+ border: 0;
+ color: {{ theme_darkgreen }};
+}
+
+a tt:hover {
+ color: {{ theme_darkyellow }};
+}
+
+pre {
+ font-family: 'Consolas', 'DejaVu Sans Mono',
+ 'Bitstream Vera Sans Mono', monospace;
+ font-size: 13px;
+ letter-spacing: 0.015em;
+ line-height: 120%;
+ padding: 0.5em;
+ border: 1px solid #ccc;
+ border-radius: 2px;
+ background-color: #f8f8f8;
+}
+
+pre a {
+ color: inherit;
+ text-decoration: underline;
+}
+
+td.linenos pre {
+ padding: 0.5em 0;
+}
+
+div.quotebar {
+ background-color: #f8f8f8;
+ max-width: 250px;
+ float: right;
+ padding: 0px 7px;
+ border: 1px solid #ccc;
+ margin-left: 1em;
+}
+
+div.topic {
+ background-color: #f8f8f8;
+}
+
+table {
+ border-collapse: collapse;
+ margin: 0 -0.5em 0 -0.5em;
+}
+
+table td, table th {
+ padding: 0.2em 0.5em 0.2em 0.5em;
+}
+
+div.admonition, div.warning {
+ font-size: 0.9em;
+ margin: 1em 0 1em 0;
+ border: 1px solid #86989B;
+ border-radius: 2px;
+ background-color: #f7f7f7;
+ padding: 0;
+}
+
+div.admonition p, div.warning p {
+ margin: 0.5em 1em 0.5em 1em;
+ padding: 0;
+}
+
+div.admonition pre, div.warning pre {
+ margin: 0.4em 1em 0.4em 1em;
+}
+
+div.admonition p.admonition-title,
+div.warning p.admonition-title {
+ margin-top: 1em;
+ padding-top: 0.5em;
+ font-weight: bold;
+}
+
+div.warning {
+ border: 1px solid #940000;
+/* background-color: #FFCCCF;*/
+}
+
+div.warning p.admonition-title {
+}
+
+div.admonition ul, div.admonition ol,
+div.warning ul, div.warning ol {
+ margin: 0.1em 0.5em 0.5em 3em;
+ padding: 0;
+}
+
+.viewcode-back {
+ font-family: {{ theme_font }}, 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
+ 'Verdana', sans-serif;
+}
+
+div.viewcode-block:target {
+ background-color: #f4debf;
+ border-top: 1px solid #ac9;
+ border-bottom: 1px solid #ac9;
+}
diff --git a/vendor/pygments/doc/_themes/pygments14/theme.conf b/vendor/pygments/doc/_themes/pygments14/theme.conf
new file mode 100644
index 0000000..fffe66d
--- /dev/null
+++ b/vendor/pygments/doc/_themes/pygments14/theme.conf
@@ -0,0 +1,15 @@
+[theme]
+inherit = basic
+stylesheet = pygments14.css
+pygments_style = friendly
+
+[options]
+green = #66b55e
+darkgreen = #36852e
+darkgray = #666666
+border = #66b55e
+yellow = #f4cd00
+darkyellow = #d4ad00
+lightyellow = #fffbe3
+background = #f9f9f9
+font = PT Sans
diff --git a/vendor/pygments/doc/conf.py b/vendor/pygments/doc/conf.py
new file mode 100644
index 0000000..51a9161
--- /dev/null
+++ b/vendor/pygments/doc/conf.py
@@ -0,0 +1,241 @@
+# -*- coding: utf-8 -*-
+#
+# Pygments documentation build configuration file
+#
+
+import sys, os
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+sys.path.insert(0, os.path.abspath('..'))
+
+import pygments
+
+# -- General configuration -----------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be extensions
+# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'pygments.sphinxext']
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'Pygments'
+copyright = u'2015, Georg Brandl'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = pygments.__version__
+# The full version, including alpha/beta/rc tags.
+release = version
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = ['_build']
+
+# The reST default role (used for this markup: `text`) to use for all documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+#pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+
+# -- Options for HTML output ---------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+html_theme = 'pygments14'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+html_theme_path = ['_themes']
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# " v documentation".
+#html_title = None
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+html_favicon = '_static/favicon.ico'
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+html_sidebars = {'index': 'indexsidebar.html',
+ 'docs/*': 'docssidebar.html'}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_domain_indices = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = None
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'Pygmentsdoc'
+
+
+# -- Options for LaTeX output --------------------------------------------------
+
+latex_elements = {
+# The paper size ('letterpaper' or 'a4paper').
+#'papersize': 'letterpaper',
+
+# The font size ('10pt', '11pt' or '12pt').
+#'pointsize': '10pt',
+
+# Additional stuff for the LaTeX preamble.
+#'preamble': '',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass [howto/manual]).
+latex_documents = [
+ ('index', 'Pygments.tex', u'Pygments Documentation',
+ u'Georg Brandl', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_domain_indices = True
+
+
+# -- Options for manual page output --------------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+ ('index', 'pygments', u'Pygments Documentation',
+ [u'Georg Brandl'], 1)
+]
+
+# If true, show URL addresses after external links.
+#man_show_urls = False
+
+
+# -- Options for Texinfo output ------------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+# dir menu entry, description, category)
+texinfo_documents = [
+ ('index', 'Pygments', u'Pygments Documentation',
+ u'Georg Brandl', 'Pygments', 'One line description of project.',
+ 'Miscellaneous'),
+]
+
+# Documents to append as an appendix to all manuals.
+#texinfo_appendices = []
+
+# If false, no module index is generated.
+#texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+#texinfo_show_urls = 'footnote'
+
+
+# Example configuration for intersphinx: refer to the Python standard library.
+#intersphinx_mapping = {'http://docs.python.org/': None}
diff --git a/vendor/pygments/doc/docs/api.rst b/vendor/pygments/doc/docs/api.rst
new file mode 100644
index 0000000..123a464
--- /dev/null
+++ b/vendor/pygments/doc/docs/api.rst
@@ -0,0 +1,316 @@
+.. -*- mode: rst -*-
+
+=====================
+The full Pygments API
+=====================
+
+This page describes the Pygments API.
+
+High-level API
+==============
+
+.. module:: pygments
+
+Functions from the :mod:`pygments` module:
+
+.. function:: lex(code, lexer)
+
+ Lex `code` with the `lexer` (must be a `Lexer` instance)
+ and return an iterable of tokens. Currently, this only calls
+ `lexer.get_tokens()`.
+
+.. function:: format(tokens, formatter, outfile=None)
+
+ Format a token stream (iterable of tokens) `tokens` with the
+ `formatter` (must be a `Formatter` instance). The result is
+ written to `outfile`, or if that is ``None``, returned as a
+ string.
+
+.. function:: highlight(code, lexer, formatter, outfile=None)
+
+ This is the most high-level highlighting function.
+ It combines `lex` and `format` in one function.
+
+
+.. module:: pygments.lexers
+
+Functions from :mod:`pygments.lexers`:
+
+.. function:: get_lexer_by_name(alias, **options)
+
+ Return an instance of a `Lexer` subclass that has `alias` in its
+ aliases list. The lexer is given the `options` at its
+ instantiation.
+
+ Will raise :exc:`pygments.util.ClassNotFound` if no lexer with that alias is
+ found.
+
+.. function:: get_lexer_for_filename(fn, **options)
+
+ Return a `Lexer` subclass instance that has a filename pattern
+ matching `fn`. The lexer is given the `options` at its
+ instantiation.
+
+ Will raise :exc:`pygments.util.ClassNotFound` if no lexer for that filename
+ is found.
+
+.. function:: get_lexer_for_mimetype(mime, **options)
+
+ Return a `Lexer` subclass instance that has `mime` in its mimetype
+ list. The lexer is given the `options` at its instantiation.
+
+ Will raise :exc:`pygments.util.ClassNotFound` if not lexer for that mimetype
+ is found.
+
+.. function:: guess_lexer(text, **options)
+
+ Return a `Lexer` subclass instance that's guessed from the text in
+ `text`. For that, the :meth:`.analyse_text()` method of every known lexer
+ class is called with the text as argument, and the lexer which returned the
+ highest value will be instantiated and returned.
+
+ :exc:`pygments.util.ClassNotFound` is raised if no lexer thinks it can
+ handle the content.
+
+.. function:: guess_lexer_for_filename(filename, text, **options)
+
+ As :func:`guess_lexer()`, but only lexers which have a pattern in `filenames`
+ or `alias_filenames` that matches `filename` are taken into consideration.
+
+ :exc:`pygments.util.ClassNotFound` is raised if no lexer thinks it can
+ handle the content.
+
+.. function:: get_all_lexers()
+
+ Return an iterable over all registered lexers, yielding tuples in the
+ format::
+
+ (longname, tuple of aliases, tuple of filename patterns, tuple of mimetypes)
+
+ .. versionadded:: 0.6
+
+
+.. module:: pygments.formatters
+
+Functions from :mod:`pygments.formatters`:
+
+.. function:: get_formatter_by_name(alias, **options)
+
+ Return an instance of a :class:`.Formatter` subclass that has `alias` in its
+ aliases list. The formatter is given the `options` at its instantiation.
+
+ Will raise :exc:`pygments.util.ClassNotFound` if no formatter with that
+ alias is found.
+
+.. function:: get_formatter_for_filename(fn, **options)
+
+ Return a :class:`.Formatter` subclass instance that has a filename pattern
+ matching `fn`. The formatter is given the `options` at its instantiation.
+
+ Will raise :exc:`pygments.util.ClassNotFound` if no formatter for that filename
+ is found.
+
+
+.. module:: pygments.styles
+
+Functions from :mod:`pygments.styles`:
+
+.. function:: get_style_by_name(name)
+
+ Return a style class by its short name. The names of the builtin styles
+ are listed in :data:`pygments.styles.STYLE_MAP`.
+
+ Will raise :exc:`pygments.util.ClassNotFound` if no style of that name is
+ found.
+
+.. function:: get_all_styles()
+
+ Return an iterable over all registered styles, yielding their names.
+
+ .. versionadded:: 0.6
+
+
+.. module:: pygments.lexer
+
+Lexers
+======
+
+The base lexer class from which all lexers are derived is:
+
+.. class:: Lexer(**options)
+
+ The constructor takes a \*\*keywords dictionary of options.
+ Every subclass must first process its own options and then call
+ the `Lexer` constructor, since it processes the `stripnl`,
+ `stripall` and `tabsize` options.
+
+ An example looks like this:
+
+ .. sourcecode:: python
+
+ def __init__(self, **options):
+ self.compress = options.get('compress', '')
+ Lexer.__init__(self, **options)
+
+ As these options must all be specifiable as strings (due to the
+ command line usage), there are various utility functions
+ available to help with that, see `Option processing`_.
+
+ .. method:: get_tokens(text)
+
+ This method is the basic interface of a lexer. It is called by
+ the `highlight()` function. It must process the text and return an
+ iterable of ``(tokentype, value)`` pairs from `text`.
+
+ Normally, you don't need to override this method. The default
+ implementation processes the `stripnl`, `stripall` and `tabsize`
+ options and then yields all tokens from `get_tokens_unprocessed()`,
+ with the ``index`` dropped.
+
+ .. method:: get_tokens_unprocessed(text)
+
+ This method should process the text and return an iterable of
+ ``(index, tokentype, value)`` tuples where ``index`` is the starting
+ position of the token within the input text.
+
+ This method must be overridden by subclasses.
+
+ .. staticmethod:: analyse_text(text)
+
+ A static method which is called for lexer guessing. It should analyse
+ the text and return a float in the range from ``0.0`` to ``1.0``.
+ If it returns ``0.0``, the lexer will not be selected as the most
+ probable one, if it returns ``1.0``, it will be selected immediately.
+
+ .. note:: You don't have to add ``@staticmethod`` to the definition of
+ this method, this will be taken care of by the Lexer's metaclass.
+
+ For a list of known tokens have a look at the :doc:`tokens` page.
+
+ A lexer also can have the following attributes (in fact, they are mandatory
+ except `alias_filenames`) that are used by the builtin lookup mechanism.
+
+ .. attribute:: name
+
+ Full name for the lexer, in human-readable form.
+
+ .. attribute:: aliases
+
+ A list of short, unique identifiers that can be used to lookup
+ the lexer from a list, e.g. using `get_lexer_by_name()`.
+
+ .. attribute:: filenames
+
+ A list of `fnmatch` patterns that match filenames which contain
+ content for this lexer. The patterns in this list should be unique among
+ all lexers.
+
+ .. attribute:: alias_filenames
+
+ A list of `fnmatch` patterns that match filenames which may or may not
+ contain content for this lexer. This list is used by the
+ :func:`.guess_lexer_for_filename()` function, to determine which lexers
+ are then included in guessing the correct one. That means that
+ e.g. every lexer for HTML and a template language should include
+ ``\*.html`` in this list.
+
+ .. attribute:: mimetypes
+
+ A list of MIME types for content that can be lexed with this
+ lexer.
+
+
+.. module:: pygments.formatter
+
+Formatters
+==========
+
+A formatter is derived from this class:
+
+
+.. class:: Formatter(**options)
+
+ As with lexers, this constructor processes options and then must call the
+ base class :meth:`__init__`.
+
+ The :class:`Formatter` class recognizes the options `style`, `full` and
+ `title`. It is up to the formatter class whether it uses them.
+
+ .. method:: get_style_defs(arg='')
+
+ This method must return statements or declarations suitable to define
+ the current style for subsequent highlighted text (e.g. CSS classes
+ in the `HTMLFormatter`).
+
+ The optional argument `arg` can be used to modify the generation and
+ is formatter dependent (it is standardized because it can be given on
+ the command line).
+
+ This method is called by the ``-S`` :doc:`command-line option `,
+ the `arg` is then given by the ``-a`` option.
+
+ .. method:: format(tokensource, outfile)
+
+ This method must format the tokens from the `tokensource` iterable and
+ write the formatted version to the file object `outfile`.
+
+ Formatter options can control how exactly the tokens are converted.
+
+ .. versionadded:: 0.7
+ A formatter must have the following attributes that are used by the
+ builtin lookup mechanism.
+
+ .. attribute:: name
+
+ Full name for the formatter, in human-readable form.
+
+ .. attribute:: aliases
+
+ A list of short, unique identifiers that can be used to lookup
+ the formatter from a list, e.g. using :func:`.get_formatter_by_name()`.
+
+ .. attribute:: filenames
+
+ A list of :mod:`fnmatch` patterns that match filenames for which this
+ formatter can produce output. The patterns in this list should be unique
+ among all formatters.
+
+
+.. module:: pygments.util
+
+Option processing
+=================
+
+The :mod:`pygments.util` module has some utility functions usable for option
+processing:
+
+.. exception:: OptionError
+
+ This exception will be raised by all option processing functions if
+ the type or value of the argument is not correct.
+
+.. function:: get_bool_opt(options, optname, default=None)
+
+ Interpret the key `optname` from the dictionary `options` as a boolean and
+ return it. Return `default` if `optname` is not in `options`.
+
+ The valid string values for ``True`` are ``1``, ``yes``, ``true`` and
+ ``on``, the ones for ``False`` are ``0``, ``no``, ``false`` and ``off``
+ (matched case-insensitively).
+
+.. function:: get_int_opt(options, optname, default=None)
+
+ As :func:`get_bool_opt`, but interpret the value as an integer.
+
+.. function:: get_list_opt(options, optname, default=None)
+
+ If the key `optname` from the dictionary `options` is a string,
+ split it at whitespace and return it. If it is already a list
+ or a tuple, it is returned as a list.
+
+.. function:: get_choice_opt(options, optname, allowed, default=None)
+
+ If the key `optname` from the dictionary is not in the sequence
+ `allowed`, raise an error, otherwise return it.
+
+ .. versionadded:: 0.8
diff --git a/vendor/pygments/doc/docs/authors.rst b/vendor/pygments/doc/docs/authors.rst
new file mode 100644
index 0000000..f8373f0
--- /dev/null
+++ b/vendor/pygments/doc/docs/authors.rst
@@ -0,0 +1,4 @@
+Full contributor list
+=====================
+
+.. include:: ../../AUTHORS
diff --git a/vendor/pygments/doc/docs/changelog.rst b/vendor/pygments/doc/docs/changelog.rst
new file mode 100644
index 0000000..f264cab
--- /dev/null
+++ b/vendor/pygments/doc/docs/changelog.rst
@@ -0,0 +1 @@
+.. include:: ../../CHANGES
diff --git a/vendor/pygments/docs/src/cmdline.txt b/vendor/pygments/doc/docs/cmdline.rst
similarity index 86%
rename from vendor/pygments/docs/src/cmdline.txt
rename to vendor/pygments/doc/docs/cmdline.rst
index a48a5c2..165af96 100644
--- a/vendor/pygments/docs/src/cmdline.txt
+++ b/vendor/pygments/doc/docs/cmdline.rst
@@ -4,8 +4,8 @@
Command Line Interface
======================
-You can use Pygments from the shell, provided you installed the `pygmentize`
-script::
+You can use Pygments from the shell, provided you installed the
+:program:`pygmentize` script::
$ pygmentize test.py
print "Hello World"
@@ -28,7 +28,7 @@ written to stdout.
The ``-f`` option selects a formatter (as with ``-l``, it can also be omitted
if an output file name is given and has a supported extension).
If no output file name is given and ``-f`` is omitted, the
-`TerminalFormatter` is used.
+:class:`.TerminalFormatter` is used.
The above command could therefore also be given as::
@@ -82,14 +82,15 @@ Usage is as follows::
generates a CSS style sheet (because you selected the HTML formatter) for
the "colorful" style prepending a ".syntax" selector to all style rules.
-For an explanation what ``-a`` means for `a particular formatter`_, look for
-the `arg` argument for the formatter's `get_style_defs()` method.
+For an explanation what ``-a`` means for :doc:`a particular formatter
+`, look for the `arg` argument for the formatter's
+:meth:`.get_style_defs()` method.
Getting lexer names
-------------------
-*New in Pygments 1.0.*
+.. versionadded:: 1.0
The ``-N`` option guesses a lexer name for a given filename, so that ::
@@ -125,7 +126,7 @@ will print the help for the Python lexer, etc.
A note on encodings
-------------------
-*New in Pygments 0.9.*
+.. versionadded:: 0.9
Pygments tries to be smart regarding encodings in the formatting process:
@@ -135,13 +136,14 @@ Pygments tries to be smart regarding encodings in the formatting process:
* If you give an ``outencoding`` option, it will override ``encoding``
as the output encoding.
+* If you give an ``inencoding`` option, it will override ``encoding``
+ as the input encoding.
+
* If you don't give an encoding and have given an output file, the default
- encoding for lexer and formatter is ``latin1`` (which will pass through
- all non-ASCII characters).
+ encoding for lexer and formatter is the terminal encoding or the default
+ locale encoding of the system. As a last resort, ``latin1`` is used (which
+ will pass through all non-ASCII characters).
* If you don't give an encoding and haven't given an output file (that means
output is written to the console), the default encoding for lexer and
- formatter is the terminal encoding (`sys.stdout.encoding`).
-
-
-.. _a particular formatter: formatters.txt
+ formatter is the terminal encoding (``sys.stdout.encoding``).
diff --git a/vendor/pygments/docs/src/filterdevelopment.txt b/vendor/pygments/doc/docs/filterdevelopment.rst
similarity index 88%
rename from vendor/pygments/docs/src/filterdevelopment.txt
rename to vendor/pygments/doc/docs/filterdevelopment.rst
index c60e1e8..fbcd0a0 100644
--- a/vendor/pygments/docs/src/filterdevelopment.txt
+++ b/vendor/pygments/doc/docs/filterdevelopment.rst
@@ -4,11 +4,11 @@
Write your own filter
=====================
-*New in Pygments 0.7.*
+.. versionadded:: 0.7
Writing own filters is very easy. All you have to do is to subclass
the `Filter` class and override the `filter` method. Additionally a
-filter is instanciated with some keyword arguments you can use to
+filter is instantiated with some keyword arguments you can use to
adjust the behavior of your filter.
@@ -58,7 +58,7 @@ You can also use the `simplefilter` decorator from the `pygments.filter` module:
@simplefilter
- def uncolor(lexer, stream, options):
+ def uncolor(self, lexer, stream, options):
class_too = get_bool_opt(options, 'classtoo')
for ttype, value in stream:
if ttype is Name.Function or (class_too and
@@ -67,4 +67,5 @@ You can also use the `simplefilter` decorator from the `pygments.filter` module:
yield ttype, value
The decorator automatically subclasses an internal filter class and uses the
-decorated function for filtering.
+decorated function as a method for filtering. (That's why there is a `self`
+argument that you probably won't end up using in the method.)
diff --git a/vendor/pygments/docs/src/filters.txt b/vendor/pygments/doc/docs/filters.rst
similarity index 85%
rename from vendor/pygments/docs/src/filters.txt
rename to vendor/pygments/doc/docs/filters.rst
index 522f633..ff2519a 100644
--- a/vendor/pygments/docs/src/filters.txt
+++ b/vendor/pygments/doc/docs/filters.rst
@@ -4,7 +4,7 @@
Filters
=======
-*New in Pygments 0.7.*
+.. versionadded:: 0.7
You can filter token streams coming from lexers to improve or annotate the
output. For example, you can highlight special words in comments, convert
@@ -31,12 +31,11 @@ To get a list of all registered filters by name, you can use the
`get_all_filters()` function from the `pygments.filters` module that returns an
iterable for all known filters.
-If you want to write your own filter, have a look at `Write your own filter`_.
-
-.. _Write your own filter: filterdevelopment.txt
+If you want to write your own filter, have a look at :doc:`Write your own filter
+`.
Builtin Filters
===============
-[builtin_filter_docs]
+.. pygmentsdoc:: filters
diff --git a/vendor/pygments/docs/src/formatterdevelopment.txt b/vendor/pygments/doc/docs/formatterdevelopment.rst
similarity index 98%
rename from vendor/pygments/docs/src/formatterdevelopment.txt
rename to vendor/pygments/doc/docs/formatterdevelopment.rst
index 83a13b6..2bfac05 100644
--- a/vendor/pygments/docs/src/formatterdevelopment.txt
+++ b/vendor/pygments/doc/docs/formatterdevelopment.rst
@@ -4,7 +4,7 @@
Write your own formatter
========================
-As well as creating `your own lexer `_, writing a new
+As well as creating :doc:`your own lexer `, writing a new
formatter for Pygments is easy and straightforward.
A formatter is a class that is initialized with some keyword arguments (the
diff --git a/vendor/pygments/docs/src/formatters.txt b/vendor/pygments/doc/docs/formatters.rst
similarity index 90%
rename from vendor/pygments/docs/src/formatters.txt
rename to vendor/pygments/doc/docs/formatters.rst
index 7a59064..9e7074e 100644
--- a/vendor/pygments/docs/src/formatters.txt
+++ b/vendor/pygments/doc/docs/formatters.rst
@@ -12,8 +12,6 @@ Common options
All formatters support these options:
`encoding`
- *New in Pygments 0.6.*
-
If given, must be an encoding name (such as ``"utf-8"``). This will
be used to convert the token strings (which are Unicode strings)
to byte strings in the output (default: ``None``).
@@ -30,19 +28,21 @@ All formatters support these options:
supports Unicode arguments to `write()`. Using a regular file object
wouldn't work.
-`outencoding`
- *New in Pygments 0.7.*
+ .. versionadded:: 0.6
+`outencoding`
When using Pygments from the command line, any `encoding` option given is
passed to the lexer and the formatter. This is sometimes not desirable,
for example if you want to set the input encoding to ``"guess"``.
Therefore, `outencoding` has been introduced which overrides `encoding`
for the formatter if given.
+ .. versionadded:: 0.7
+
Formatter classes
=================
-All these classes are importable from `pygments.formatters`.
+All these classes are importable from :mod:`pygments.formatters`.
-[builtin_formatter_docs]
+.. pygmentsdoc:: formatters
diff --git a/vendor/pygments/doc/docs/index.rst b/vendor/pygments/doc/docs/index.rst
new file mode 100644
index 0000000..30d5c08
--- /dev/null
+++ b/vendor/pygments/doc/docs/index.rst
@@ -0,0 +1,66 @@
+Pygments documentation
+======================
+
+**Starting with Pygments**
+
+.. toctree::
+ :maxdepth: 1
+
+ ../download
+ quickstart
+ cmdline
+
+**Builtin components**
+
+.. toctree::
+ :maxdepth: 1
+
+ lexers
+ filters
+ formatters
+ styles
+
+**Reference**
+
+.. toctree::
+ :maxdepth: 1
+
+ unicode
+ tokens
+ api
+
+**Hacking for Pygments**
+
+.. toctree::
+ :maxdepth: 1
+
+ lexerdevelopment
+ formatterdevelopment
+ filterdevelopment
+ plugins
+
+**Hints and tricks**
+
+.. toctree::
+ :maxdepth: 1
+
+ rstdirective
+ moinmoin
+ java
+ integrate
+
+**About Pygments**
+
+.. toctree::
+ :maxdepth: 1
+
+ changelog
+ authors
+
+
+If you find bugs or have suggestions for the documentation, please look
+:ref:`here ` for info on how to contact the team.
+
+.. XXX You can download an offline version of this documentation from the
+ :doc:`download page `.
+
diff --git a/vendor/pygments/docs/src/integrate.txt b/vendor/pygments/doc/docs/integrate.rst
similarity index 54%
rename from vendor/pygments/docs/src/integrate.txt
rename to vendor/pygments/doc/docs/integrate.rst
index 6f8c125..77daaa4 100644
--- a/vendor/pygments/docs/src/integrate.txt
+++ b/vendor/pygments/doc/docs/integrate.rst
@@ -4,27 +4,13 @@
Using Pygments in various scenarios
===================================
-PyGtk
------
-
-Armin has written a piece of sample code that shows how to create a Gtk
-`TextBuffer` object containing Pygments-highlighted text.
-
-See the article here: http://lucumr.pocoo.org/cogitations/2007/05/30/pygments-gtk-rendering/
-
-Wordpress
----------
-
-He also has a snippet that shows how to use Pygments in WordPress:
-
-http://lucumr.pocoo.org/cogitations/2007/05/30/pygments-in-wordpress/
-
Markdown
--------
Since Pygments 0.9, the distribution ships Markdown_ preprocessor sample code
-that uses Pygments to render source code in `external/markdown-processor.py`.
-You can copy and adapt it to your liking.
+that uses Pygments to render source code in
+:file:`external/markdown-processor.py`. You can copy and adapt it to your
+liking.
.. _Markdown: http://www.freewisdom.org/projects/python-markdown/
@@ -42,7 +28,13 @@ Bash completion
The source distribution contains a file ``external/pygments.bashcomp`` that
sets up completion for the ``pygmentize`` command in bash.
-Java
-----
+Wrappers for other languages
+----------------------------
-See the `Java quickstart `_ document.
+These libraries provide Pygments highlighting for users of other languages
+than Python:
+
+* `pygments.rb `_, a pygments wrapper for Ruby
+* `Clygments `_, a pygments wrapper for
+ Clojure
+* `PHPygments `_, a pygments wrapper for PHP
diff --git a/vendor/pygments/docs/src/java.txt b/vendor/pygments/doc/docs/java.rst
similarity index 82%
rename from vendor/pygments/docs/src/java.txt
rename to vendor/pygments/doc/docs/java.rst
index 5eb6196..f553463 100644
--- a/vendor/pygments/docs/src/java.txt
+++ b/vendor/pygments/doc/docs/java.rst
@@ -2,18 +2,18 @@
Use Pygments in Java
=====================
-Thanks to `Jython `__ it is possible to use Pygments in
+Thanks to `Jython `_ it is possible to use Pygments in
Java.
-This page is a simple tutorial to get an idea of how this is working. You can
-then look at the `Jython documentation `__ for more
-advanced use.
+This page is a simple tutorial to get an idea of how this works. You can
+then look at the `Jython documentation `_ for more
+advanced uses.
Since version 1.5, Pygments is deployed on `Maven Central
-`__ as a JAR so is Jython
-which makes it a lot easier to create the Java project.
+`_ as a JAR, as is Jython
+which makes it a lot easier to create a Java project.
-Here is an example of a `Maven `__ ``pom.xml`` file for a
+Here is an example of a `Maven `_ ``pom.xml`` file for a
project running Pygments:
.. sourcecode:: xml
diff --git a/vendor/pygments/doc/docs/lexerdevelopment.rst b/vendor/pygments/doc/docs/lexerdevelopment.rst
new file mode 100644
index 0000000..2c86844
--- /dev/null
+++ b/vendor/pygments/doc/docs/lexerdevelopment.rst
@@ -0,0 +1,681 @@
+.. -*- mode: rst -*-
+
+.. highlight:: python
+
+====================
+Write your own lexer
+====================
+
+If a lexer for your favorite language is missing in the Pygments package, you
+can easily write your own and extend Pygments.
+
+All you need can be found inside the :mod:`pygments.lexer` module. As you can
+read in the :doc:`API documentation `, a lexer is a class that is
+initialized with some keyword arguments (the lexer options) and that provides a
+:meth:`.get_tokens_unprocessed()` method which is given a string or unicode
+object with the data to lex.
+
+The :meth:`.get_tokens_unprocessed()` method must return an iterator or iterable
+containing tuples in the form ``(index, token, value)``. Normally you don't
+need to do this since there are base lexers that do most of the work and that
+you can subclass.
+
+
+RegexLexer
+==========
+
+The lexer base class used by almost all of Pygments' lexers is the
+:class:`RegexLexer`. This class allows you to define lexing rules in terms of
+*regular expressions* for different *states*.
+
+States are groups of regular expressions that are matched against the input
+string at the *current position*. If one of these expressions matches, a
+corresponding action is performed (such as yielding a token with a specific
+type, or changing state), the current position is set to where the last match
+ended and the matching process continues with the first regex of the current
+state.
+
+Lexer states are kept on a stack: each time a new state is entered, the new
+state is pushed onto the stack. The most basic lexers (like the `DiffLexer`)
+just need one state.
+
+Each state is defined as a list of tuples in the form (`regex`, `action`,
+`new_state`) where the last item is optional. In the most basic form, `action`
+is a token type (like `Name.Builtin`). That means: When `regex` matches, emit a
+token with the match text and type `tokentype` and push `new_state` on the state
+stack. If the new state is ``'#pop'``, the topmost state is popped from the
+stack instead. To pop more than one state, use ``'#pop:2'`` and so on.
+``'#push'`` is a synonym for pushing the current state on the stack.
+
+The following example shows the `DiffLexer` from the builtin lexers. Note that
+it contains some additional attributes `name`, `aliases` and `filenames` which
+aren't required for a lexer. They are used by the builtin lexer lookup
+functions. ::
+
+ from pygments.lexer import RegexLexer
+ from pygments.token import *
+
+ class DiffLexer(RegexLexer):
+ name = 'Diff'
+ aliases = ['diff']
+ filenames = ['*.diff']
+
+ tokens = {
+ 'root': [
+ (r' .*\n', Text),
+ (r'\+.*\n', Generic.Inserted),
+ (r'-.*\n', Generic.Deleted),
+ (r'@.*\n', Generic.Subheading),
+ (r'Index.*\n', Generic.Heading),
+ (r'=.*\n', Generic.Heading),
+ (r'.*\n', Text),
+ ]
+ }
+
+As you can see this lexer only uses one state. When the lexer starts scanning
+the text, it first checks if the current character is a space. If this is true
+it scans everything until newline and returns the data as a `Text` token (which
+is the "no special highlighting" token).
+
+If this rule doesn't match, it checks if the current char is a plus sign. And
+so on.
+
+If no rule matches at the current position, the current char is emitted as an
+`Error` token that indicates a lexing error, and the position is increased by
+one.
+
+
+Adding and testing a new lexer
+==============================
+
+To make Pygments aware of your new lexer, you have to perform the following
+steps:
+
+First, change to the current directory containing the Pygments source code:
+
+.. code-block:: console
+
+ $ cd .../pygments-main
+
+Select a matching module under ``pygments/lexers``, or create a new module for
+your lexer class.
+
+Next, make sure the lexer is known from outside of the module. All modules in
+the ``pygments.lexers`` specify ``__all__``. For example, ``esoteric.py`` sets::
+
+ __all__ = ['BrainfuckLexer', 'BefungeLexer', ...]
+
+Simply add the name of your lexer class to this list.
+
+Finally the lexer can be made publicly known by rebuilding the lexer mapping:
+
+.. code-block:: console
+
+ $ make mapfiles
+
+To test the new lexer, store an example file with the proper extension in
+``tests/examplefiles``. For example, to test your ``DiffLexer``, add a
+``tests/examplefiles/example.diff`` containing a sample diff output.
+
+Now you can use pygmentize to render your example to HTML:
+
+.. code-block:: console
+
+ $ ./pygmentize -O full -f html -o /tmp/example.html tests/examplefiles/example.diff
+
+Note that this explicitly calls the ``pygmentize`` in the current directory
+by preceding it with ``./``. This ensures your modifications are used.
+Otherwise a possibly already installed, unmodified version without your new
+lexer would have been called from the system search path (``$PATH``).
+
+To view the result, open ``/tmp/example.html`` in your browser.
+
+Once the example renders as expected, you should run the complete test suite:
+
+.. code-block:: console
+
+ $ make test
+
+It also tests that your lexer fulfills the lexer API and certain invariants,
+such as that the concatenation of all token text is the same as the input text.
+
+
+Regex Flags
+===========
+
+You can either define regex flags locally in the regex (``r'(?x)foo bar'``) or
+globally by adding a `flags` attribute to your lexer class. If no attribute is
+defined, it defaults to `re.MULTILINE`. For more information about regular
+expression flags see the page about `regular expressions`_ in the Python
+documentation.
+
+.. _regular expressions: http://docs.python.org/library/re.html#regular-expression-syntax
+
+
+Scanning multiple tokens at once
+================================
+
+So far, the `action` element in the rule tuple of regex, action and state has
+been a single token type. Now we look at the first of several other possible
+values.
+
+Here is a more complex lexer that highlights INI files. INI files consist of
+sections, comments and ``key = value`` pairs::
+
+ from pygments.lexer import RegexLexer, bygroups
+ from pygments.token import *
+
+ class IniLexer(RegexLexer):
+ name = 'INI'
+ aliases = ['ini', 'cfg']
+ filenames = ['*.ini', '*.cfg']
+
+ tokens = {
+ 'root': [
+ (r'\s+', Text),
+ (r';.*?$', Comment),
+ (r'\[.*?\]$', Keyword),
+ (r'(.*?)(\s*)(=)(\s*)(.*?)$',
+ bygroups(Name.Attribute, Text, Operator, Text, String))
+ ]
+ }
+
+The lexer first looks for whitespace, comments and section names. Later it
+looks for a line that looks like a key, value pair, separated by an ``'='``
+sign, and optional whitespace.
+
+The `bygroups` helper yields each capturing group in the regex with a different
+token type. First the `Name.Attribute` token, then a `Text` token for the
+optional whitespace, after that a `Operator` token for the equals sign. Then a
+`Text` token for the whitespace again. The rest of the line is returned as
+`String`.
+
+Note that for this to work, every part of the match must be inside a capturing
+group (a ``(...)``), and there must not be any nested capturing groups. If you
+nevertheless need a group, use a non-capturing group defined using this syntax:
+``(?:some|words|here)`` (note the ``?:`` after the beginning parenthesis).
+
+If you find yourself needing a capturing group inside the regex which shouldn't
+be part of the output but is used in the regular expressions for backreferencing
+(eg: ``r'(<(foo|bar)>)(.*?)(\2>)'``), you can pass `None` to the bygroups
+function and that group will be skipped in the output.
+
+
+Changing states
+===============
+
+Many lexers need multiple states to work as expected. For example, some
+languages allow multiline comments to be nested. Since this is a recursive
+pattern it's impossible to lex just using regular expressions.
+
+Here is a lexer that recognizes C++ style comments (multi-line with ``/* */``
+and single-line with ``//`` until end of line)::
+
+ from pygments.lexer import RegexLexer
+ from pygments.token import *
+
+ class CppCommentLexer(RegexLexer):
+ name = 'Example Lexer with states'
+
+ tokens = {
+ 'root': [
+ (r'[^/]+', Text),
+ (r'/\*', Comment.Multiline, 'comment'),
+ (r'//.*?$', Comment.Singleline),
+ (r'/', Text)
+ ],
+ 'comment': [
+ (r'[^*/]', Comment.Multiline),
+ (r'/\*', Comment.Multiline, '#push'),
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'[*/]', Comment.Multiline)
+ ]
+ }
+
+This lexer starts lexing in the ``'root'`` state. It tries to match as much as
+possible until it finds a slash (``'/'``). If the next character after the slash
+is an asterisk (``'*'``) the `RegexLexer` sends those two characters to the
+output stream marked as `Comment.Multiline` and continues lexing with the rules
+defined in the ``'comment'`` state.
+
+If there wasn't an asterisk after the slash, the `RegexLexer` checks if it's a
+Singleline comment (i.e. followed by a second slash). If this also wasn't the
+case it must be a single slash, which is not a comment starter (the separate
+regex for a single slash must also be given, else the slash would be marked as
+an error token).
+
+Inside the ``'comment'`` state, we do the same thing again. Scan until the
+lexer finds a star or slash. If it's the opening of a multiline comment, push
+the ``'comment'`` state on the stack and continue scanning, again in the
+``'comment'`` state. Else, check if it's the end of the multiline comment. If
+yes, pop one state from the stack.
+
+Note: If you pop from an empty stack you'll get an `IndexError`. (There is an
+easy way to prevent this from happening: don't ``'#pop'`` in the root state).
+
+If the `RegexLexer` encounters a newline that is flagged as an error token, the
+stack is emptied and the lexer continues scanning in the ``'root'`` state. This
+can help producing error-tolerant highlighting for erroneous input, e.g. when a
+single-line string is not closed.
+
+
+Advanced state tricks
+=====================
+
+There are a few more things you can do with states:
+
+- You can push multiple states onto the stack if you give a tuple instead of a
+ simple string as the third item in a rule tuple. For example, if you want to
+ match a comment containing a directive, something like:
+
+ .. code-block:: text
+
+ /* rest of comment */
+
+ you can use this rule::
+
+ tokens = {
+ 'root': [
+ (r'/\* <', Comment, ('comment', 'directive')),
+ ...
+ ],
+ 'directive': [
+ (r'[^>]*', Comment.Directive),
+ (r'>', Comment, '#pop'),
+ ],
+ 'comment': [
+ (r'[^*]+', Comment),
+ (r'\*/', Comment, '#pop'),
+ (r'\*', Comment),
+ ]
+ }
+
+ When this encounters the above sample, first ``'comment'`` and ``'directive'``
+ are pushed onto the stack, then the lexer continues in the directive state
+ until it finds the closing ``>``, then it continues in the comment state until
+ the closing ``*/``. Then, both states are popped from the stack again and
+ lexing continues in the root state.
+
+ .. versionadded:: 0.9
+ The tuple can contain the special ``'#push'`` and ``'#pop'`` (but not
+ ``'#pop:n'``) directives.
+
+
+- You can include the rules of a state in the definition of another. This is
+ done by using `include` from `pygments.lexer`::
+
+ from pygments.lexer import RegexLexer, bygroups, include
+ from pygments.token import *
+
+ class ExampleLexer(RegexLexer):
+ tokens = {
+ 'comments': [
+ (r'/\*.*?\*/', Comment),
+ (r'//.*?\n', Comment),
+ ],
+ 'root': [
+ include('comments'),
+ (r'(function )(\w+)( {)',
+ bygroups(Keyword, Name, Keyword), 'function'),
+ (r'.', Text),
+ ],
+ 'function': [
+ (r'[^}/]+', Text),
+ include('comments'),
+ (r'/', Text),
+ (r'\}', Keyword, '#pop'),
+ ]
+ }
+
+ This is a hypothetical lexer for a language that consist of functions and
+ comments. Because comments can occur at toplevel and in functions, we need
+ rules for comments in both states. As you can see, the `include` helper saves
+ repeating rules that occur more than once (in this example, the state
+ ``'comment'`` will never be entered by the lexer, as it's only there to be
+ included in ``'root'`` and ``'function'``).
+
+- Sometimes, you may want to "combine" a state from existing ones. This is
+ possible with the `combined` helper from `pygments.lexer`.
+
+ If you, instead of a new state, write ``combined('state1', 'state2')`` as the
+ third item of a rule tuple, a new anonymous state will be formed from state1
+ and state2 and if the rule matches, the lexer will enter this state.
+
+ This is not used very often, but can be helpful in some cases, such as the
+ `PythonLexer`'s string literal processing.
+
+- If you want your lexer to start lexing in a different state you can modify the
+ stack by overriding the `get_tokens_unprocessed()` method::
+
+ from pygments.lexer import RegexLexer
+
+ class ExampleLexer(RegexLexer):
+ tokens = {...}
+
+ def get_tokens_unprocessed(self, text, stack=('root', 'otherstate')):
+ for item in RegexLexer.get_tokens_unprocessed(text, stack):
+ yield item
+
+ Some lexers like the `PhpLexer` use this to make the leading ``', Name.Tag),
+ ],
+ 'script-content': [
+ (r'(.+?)(<\s*/\s*script\s*>)',
+ bygroups(using(JavascriptLexer), Name.Tag),
+ '#pop'),
+ ]
+ }
+
+Here the content of a ```` end tag is processed by the `JavascriptLexer`,
+while the end tag is yielded as a normal token with the `Name.Tag` type.
+
+Also note the ``(r'<\s*script\s*', Name.Tag, ('script-content', 'tag'))`` rule.
+Here, two states are pushed onto the state stack, ``'script-content'`` and
+``'tag'``. That means that first ``'tag'`` is processed, which will lex
+attributes and the closing ``>``, then the ``'tag'`` state is popped and the
+next state on top of the stack will be ``'script-content'``.
+
+Since you cannot refer to the class currently being defined, use `this`
+(imported from `pygments.lexer`) to refer to the current lexer class, i.e.
+``using(this)``. This construct may seem unnecessary, but this is often the
+most obvious way of lexing arbitrary syntax between fixed delimiters without
+introducing deeply nested states.
+
+The `using()` helper has a special keyword argument, `state`, which works as
+follows: if given, the lexer to use initially is not in the ``"root"`` state,
+but in the state given by this argument. This does not work with advanced
+`RegexLexer` subclasses such as `ExtendedRegexLexer` (see below).
+
+Any other keywords arguments passed to `using()` are added to the keyword
+arguments used to create the lexer.
+
+
+Delegating Lexer
+================
+
+Another approach for nested lexers is the `DelegatingLexer` which is for example
+used for the template engine lexers. It takes two lexers as arguments on
+initialisation: a `root_lexer` and a `language_lexer`.
+
+The input is processed as follows: First, the whole text is lexed with the
+`language_lexer`. All tokens yielded with the special type of ``Other`` are
+then concatenated and given to the `root_lexer`. The language tokens of the
+`language_lexer` are then inserted into the `root_lexer`'s token stream at the
+appropriate positions. ::
+
+ from pygments.lexer import DelegatingLexer
+ from pygments.lexers.web import HtmlLexer, PhpLexer
+
+ class HtmlPhpLexer(DelegatingLexer):
+ def __init__(self, **options):
+ super(HtmlPhpLexer, self).__init__(HtmlLexer, PhpLexer, **options)
+
+This procedure ensures that e.g. HTML with template tags in it is highlighted
+correctly even if the template tags are put into HTML tags or attributes.
+
+If you want to change the needle token ``Other`` to something else, you can give
+the lexer another token type as the third parameter::
+
+ DelegatingLexer.__init__(MyLexer, OtherLexer, Text, **options)
+
+
+Callbacks
+=========
+
+Sometimes the grammar of a language is so complex that a lexer would be unable
+to process it just by using regular expressions and stacks.
+
+For this, the `RegexLexer` allows callbacks to be given in rule tuples, instead
+of token types (`bygroups` and `using` are nothing else but preimplemented
+callbacks). The callback must be a function taking two arguments:
+
+* the lexer itself
+* the match object for the last matched rule
+
+The callback must then return an iterable of (or simply yield) ``(index,
+tokentype, value)`` tuples, which are then just passed through by
+`get_tokens_unprocessed()`. The ``index`` here is the position of the token in
+the input string, ``tokentype`` is the normal token type (like `Name.Builtin`),
+and ``value`` the associated part of the input string.
+
+You can see an example here::
+
+ from pygments.lexer import RegexLexer
+ from pygments.token import Generic
+
+ class HypotheticLexer(RegexLexer):
+
+ def headline_callback(lexer, match):
+ equal_signs = match.group(1)
+ text = match.group(2)
+ yield match.start(), Generic.Headline, equal_signs + text + equal_signs
+
+ tokens = {
+ 'root': [
+ (r'(=+)(.*?)(\1)', headline_callback)
+ ]
+ }
+
+If the regex for the `headline_callback` matches, the function is called with
+the match object. Note that after the callback is done, processing continues
+normally, that is, after the end of the previous match. The callback has no
+possibility to influence the position.
+
+There are not really any simple examples for lexer callbacks, but you can see
+them in action e.g. in the `SMLLexer` class in `ml.py`_.
+
+.. _ml.py: http://bitbucket.org/birkenfeld/pygments-main/src/tip/pygments/lexers/ml.py
+
+
+The ExtendedRegexLexer class
+============================
+
+The `RegexLexer`, even with callbacks, unfortunately isn't powerful enough for
+the funky syntax rules of languages such as Ruby.
+
+But fear not; even then you don't have to abandon the regular expression
+approach: Pygments has a subclass of `RegexLexer`, the `ExtendedRegexLexer`.
+All features known from RegexLexers are available here too, and the tokens are
+specified in exactly the same way, *except* for one detail:
+
+The `get_tokens_unprocessed()` method holds its internal state data not as local
+variables, but in an instance of the `pygments.lexer.LexerContext` class, and
+that instance is passed to callbacks as a third argument. This means that you
+can modify the lexer state in callbacks.
+
+The `LexerContext` class has the following members:
+
+* `text` -- the input text
+* `pos` -- the current starting position that is used for matching regexes
+* `stack` -- a list containing the state stack
+* `end` -- the maximum position to which regexes are matched, this defaults to
+ the length of `text`
+
+Additionally, the `get_tokens_unprocessed()` method can be given a
+`LexerContext` instead of a string and will then process this context instead of
+creating a new one for the string argument.
+
+Note that because you can set the current position to anything in the callback,
+it won't be automatically be set by the caller after the callback is finished.
+For example, this is how the hypothetical lexer above would be written with the
+`ExtendedRegexLexer`::
+
+ from pygments.lexer import ExtendedRegexLexer
+ from pygments.token import Generic
+
+ class ExHypotheticLexer(ExtendedRegexLexer):
+
+ def headline_callback(lexer, match, ctx):
+ equal_signs = match.group(1)
+ text = match.group(2)
+ yield match.start(), Generic.Headline, equal_signs + text + equal_signs
+ ctx.pos = match.end()
+
+ tokens = {
+ 'root': [
+ (r'(=+)(.*?)(\1)', headline_callback)
+ ]
+ }
+
+This might sound confusing (and it can really be). But it is needed, and for an
+example look at the Ruby lexer in `ruby.py`_.
+
+.. _ruby.py: https://bitbucket.org/birkenfeld/pygments-main/src/tip/pygments/lexers/ruby.py
+
+
+Handling Lists of Keywords
+==========================
+
+For a relatively short list (hundreds) you can construct an optimized regular
+expression directly using ``words()`` (longer lists, see next section). This
+function handles a few things for you automatically, including escaping
+metacharacters and Python's first-match rather than longest-match in
+alternations. Feel free to put the lists themselves in
+``pygments/lexers/_$lang_builtins.py`` (see examples there), and generated by
+code if possible.
+
+An example of using ``words()`` is something like::
+
+ from pygments.lexer import RegexLexer, words, Name
+
+ class MyLexer(RegexLexer):
+
+ tokens = {
+ 'root': [
+ (words(('else', 'elseif'), suffix=r'\b'), Name.Builtin),
+ (r'\w+', Name),
+ ],
+ }
+
+As you can see, you can add ``prefix`` and ``suffix`` parts to the constructed
+regex.
+
+
+Modifying Token Streams
+=======================
+
+Some languages ship a lot of builtin functions (for example PHP). The total
+amount of those functions differs from system to system because not everybody
+has every extension installed. In the case of PHP there are over 3000 builtin
+functions. That's an incredibly huge amount of functions, much more than you
+want to put into a regular expression.
+
+But because only `Name` tokens can be function names this is solvable by
+overriding the ``get_tokens_unprocessed()`` method. The following lexer
+subclasses the `PythonLexer` so that it highlights some additional names as
+pseudo keywords::
+
+ from pygments.lexers.python import PythonLexer
+ from pygments.token import Name, Keyword
+
+ class MyPythonLexer(PythonLexer):
+ EXTRA_KEYWORDS = set(('foo', 'bar', 'foobar', 'barfoo', 'spam', 'eggs'))
+
+ def get_tokens_unprocessed(self, text):
+ for index, token, value in PythonLexer.get_tokens_unprocessed(self, text):
+ if token is Name and value in self.EXTRA_KEYWORDS:
+ yield index, Keyword.Pseudo, value
+ else:
+ yield index, token, value
+
+The `PhpLexer` and `LuaLexer` use this method to resolve builtin functions.
diff --git a/vendor/pygments/docs/src/lexers.txt b/vendor/pygments/doc/docs/lexers.rst
similarity index 89%
rename from vendor/pygments/docs/src/lexers.txt
rename to vendor/pygments/doc/docs/lexers.rst
index 016de6c..9262efb 100644
--- a/vendor/pygments/docs/src/lexers.txt
+++ b/vendor/pygments/doc/docs/lexers.rst
@@ -18,35 +18,37 @@ Currently, **all lexers** support these options:
`ensurenl`
Make sure that the input ends with a newline (default: ``True``). This
is required for some lexers that consume input linewise.
- *New in Pygments 1.3.*
+
+ .. versionadded:: 1.3
`tabsize`
If given and greater than 0, expand tabs in the input (default: ``0``).
`encoding`
- *New in Pygments 0.6.*
-
If given, must be an encoding name (such as ``"utf-8"``). This encoding
will be used to convert the input string to Unicode (if it is not already
- a Unicode string). The default is ``"latin1"``.
+ a Unicode string). The default is ``"guess"``.
If this option is set to ``"guess"``, a simple UTF-8 vs. Latin-1
detection is used, if it is set to ``"chardet"``, the
- `chardet library `__ is used to
+ `chardet library `_ is used to
guess the encoding of the input.
+ .. versionadded:: 0.6
+
The "Short Names" field lists the identifiers that can be used with the
`get_lexer_by_name()` function.
These lexers are builtin and can be imported from `pygments.lexers`:
-[builtin_lexer_docs]
+.. pygmentsdoc:: lexers
+
Iterating over all lexers
-------------------------
-*New in Pygments 0.6.*
+.. versionadded:: 0.6
To get all lexers (both the builtin and the plugin ones), you can
use the `get_all_lexers()` function from the `pygments.lexers`
diff --git a/vendor/pygments/docs/src/moinmoin.txt b/vendor/pygments/doc/docs/moinmoin.rst
similarity index 100%
rename from vendor/pygments/docs/src/moinmoin.txt
rename to vendor/pygments/doc/docs/moinmoin.rst
diff --git a/vendor/pygments/docs/src/plugins.txt b/vendor/pygments/doc/docs/plugins.rst
similarity index 100%
rename from vendor/pygments/docs/src/plugins.txt
rename to vendor/pygments/doc/docs/plugins.rst
diff --git a/vendor/pygments/docs/src/quickstart.txt b/vendor/pygments/doc/docs/quickstart.rst
similarity index 80%
rename from vendor/pygments/docs/src/quickstart.txt
rename to vendor/pygments/doc/docs/quickstart.rst
index 4040910..dba7698 100644
--- a/vendor/pygments/docs/src/quickstart.txt
+++ b/vendor/pygments/doc/docs/quickstart.rst
@@ -58,8 +58,8 @@ can be produced by:
print HtmlFormatter().get_style_defs('.highlight')
-The argument to `get_style_defs` is used as an additional CSS selector: the output
-may look like this:
+The argument to :func:`get_style_defs` is used as an additional CSS selector:
+the output may look like this:
.. sourcecode:: css
@@ -71,9 +71,9 @@ may look like this:
Options
=======
-The `highlight()` function supports a fourth argument called `outfile`, it must be
-a file object if given. The formatted output will then be written to this file
-instead of being returned as a string.
+The :func:`highlight()` function supports a fourth argument called *outfile*, it
+must be a file object if given. The formatted output will then be written to
+this file instead of being returned as a string.
Lexers and formatters both support options. They are given to them as keyword
arguments either to the class or to the lookup method:
@@ -103,9 +103,9 @@ Important options include:
For an overview of builtin lexers and formatters and their options, visit the
-`lexer `_ and `formatters `_ lists.
+:doc:`lexer ` and :doc:`formatters ` lists.
-For a documentation on filters, see `this page `_.
+For a documentation on filters, see :doc:`this page `.
Lexer and formatter lookup
@@ -131,9 +131,9 @@ one of the following methods:
All these functions accept keyword arguments; they will be passed to the lexer
as options.
-A similar API is available for formatters: use `get_formatter_by_name()` and
-`get_formatter_for_filename()` from the `pygments.formatters` module
-for this purpose.
+A similar API is available for formatters: use :func:`.get_formatter_by_name()`
+and :func:`.get_formatter_for_filename()` from the :mod:`pygments.formatters`
+module for this purpose.
Guessing lexers
@@ -153,16 +153,17 @@ or some template tags), use these functions:
>>> guess_lexer_for_filename('test.py', 'print "Hello World!"')
-`guess_lexer()` passes the given content to the lexer classes' `analyse_text()`
-method and returns the one for which it returns the highest number.
+:func:`.guess_lexer()` passes the given content to the lexer classes'
+:meth:`analyse_text()` method and returns the one for which it returns the
+highest number.
All lexers have two different filename pattern lists: the primary and the
-secondary one. The `get_lexer_for_filename()` function only uses the primary
-list, whose entries are supposed to be unique among all lexers.
-`guess_lexer_for_filename()`, however, will first loop through all lexers and
-look at the primary and secondary filename patterns if the filename matches.
+secondary one. The :func:`.get_lexer_for_filename()` function only uses the
+primary list, whose entries are supposed to be unique among all lexers.
+:func:`.guess_lexer_for_filename()`, however, will first loop through all lexers
+and look at the primary and secondary filename patterns if the filename matches.
If only one lexer matches, it is returned, else the guessing mechanism of
-`guess_lexer()` is used with the matching lexers.
+:func:`.guess_lexer()` is used with the matching lexers.
As usual, keyword arguments to these functions are given to the created lexer
as options.
@@ -171,7 +172,8 @@ as options.
Command line usage
==================
-You can use Pygments from the command line, using the `pygmentize` script::
+You can use Pygments from the command line, using the :program:`pygmentize`
+script::
$ pygmentize test.py
@@ -199,4 +201,5 @@ it can be created with::
where ``default`` is the style name.
-More options and tricks and be found in the `command line reference `_.
+More options and tricks and be found in the :doc:`command line reference
+`.
diff --git a/vendor/pygments/docs/src/rstdirective.txt b/vendor/pygments/doc/docs/rstdirective.rst
similarity index 100%
rename from vendor/pygments/docs/src/rstdirective.txt
rename to vendor/pygments/doc/docs/rstdirective.rst
diff --git a/vendor/pygments/docs/src/styles.txt b/vendor/pygments/doc/docs/styles.rst
similarity index 95%
rename from vendor/pygments/docs/src/styles.txt
rename to vendor/pygments/doc/docs/styles.rst
index e3e9cfb..d56db0d 100644
--- a/vendor/pygments/docs/src/styles.txt
+++ b/vendor/pygments/doc/docs/styles.rst
@@ -21,6 +21,7 @@ option in form of a string:
.. sourcecode:: pycon
>>> from pygments.styles import get_style_by_name
+ >>> from pygments.formatters import HtmlFormatter
>>> HtmlFormatter(style='colorful').style
@@ -30,6 +31,7 @@ Or you can also import your own style (which must be a subclass of
.. sourcecode:: pycon
>>> from yourapp.yourmodule import YourStyle
+ >>> from pygments.formatters import HtmlFormatter
>>> HtmlFormatter(style=YourStyle).style
@@ -68,7 +70,7 @@ they can be used for a variety of formatters.)
To make the style usable for Pygments, you must
-* either register it as a plugin (see `the plugin docs `_)
+* either register it as a plugin (see :doc:`the plugin docs `)
* or drop it into the `styles` subpackage of your Pygments distribution one style
class per style, where the file name is the style name and the class name is
`StylenameClass`. For example, if your style should be called
@@ -132,7 +134,7 @@ To get a list of known styles you can use this snippet:
Getting a list of available styles
==================================
-*New in Pygments 0.6.*
+.. versionadded:: 0.6
Because it could be that a plugin registered a style, there is
a way to iterate over all styles:
diff --git a/vendor/pygments/docs/src/tokens.txt b/vendor/pygments/doc/docs/tokens.rst
similarity index 94%
rename from vendor/pygments/docs/src/tokens.txt
rename to vendor/pygments/doc/docs/tokens.rst
index 4900a9a..6455a50 100644
--- a/vendor/pygments/docs/src/tokens.txt
+++ b/vendor/pygments/doc/docs/tokens.rst
@@ -4,7 +4,9 @@
Builtin Tokens
==============
-Inside the `pygments.token` module, there is a special object called `Token`
+.. module:: pygments.token
+
+In the :mod:`pygments.token` module, there is a special object called `Token`
that is used to create token types.
You can create a new token type by accessing an attribute of `Token`:
@@ -30,8 +32,8 @@ As of Pygments 0.7 you can also use the ``in`` operator to perform set tests:
>>> Comment in Comment.Multi
False
-This can be useful in `filters`_ and if you write lexers on your own without
-using the base lexers.
+This can be useful in :doc:`filters ` and if you write lexers on your
+own without using the base lexers.
You can also split a token type into a hierarchy, and get the parent of it:
@@ -55,7 +57,7 @@ For some tokens aliases are already defined:
>>> String
Token.Literal.String
-Inside the `pygments.token` module the following aliases are defined:
+Inside the :mod:`pygments.token` module the following aliases are defined:
============= ============================ ====================================
`Text` `Token.Text` for any type of text data
@@ -87,7 +89,7 @@ The `is_token_subtype()` function in the `pygments.token` module can be used to
test if a token type is a subtype of another (such as `Name.Tag` and `Name`).
(This is the same as ``Name.Tag in Name``. The overloaded `in` operator was newly
introduced in Pygments 0.7, the function still exists for backwards
-compatiblity.)
+compatibility.)
With Pygments 0.7, it's also possible to convert strings to token types (for example
if you want to supply a token from the command line):
@@ -158,7 +160,7 @@ Name Tokens
other languages constants are uppercase by definition (Ruby).
`Name.Decorator`
- Token type for decorators. Decorators are synatic elements in the Python
+ Token type for decorators. Decorators are syntactic elements in the Python
language. Similar syntax elements exist in C# and Java.
`Name.Entity`
@@ -251,6 +253,9 @@ Literals
`Number`
Token type for any number literal.
+`Number.Bin`
+ Token type for binary literals (e.g. ``0b101010``).
+
`Number.Float`
Token type for float literals (e.g. ``42.0``).
@@ -280,7 +285,7 @@ Operators
Punctuation
===========
-*New in Pygments 0.7.*
+.. versionadded:: 0.7
`Punctuation`
For any punctuation which is not an operator (e.g. ``[``, ``(``...)
@@ -292,6 +297,10 @@ Comments
`Comment`
Token type for any comment.
+`Comment.Hashbang`
+ Token type for hashbang comments (i.e. first lines of files that start with
+ ``#!``).
+
`Comment.Multiline`
Token type for multiline comments.
@@ -345,5 +354,3 @@ highlight a programming language but a patch file.
`Generic.Traceback`
Marks the token value as a part of an error traceback.
-
-.. _filters: filters.txt
diff --git a/vendor/pygments/docs/src/unicode.txt b/vendor/pygments/doc/docs/unicode.rst
similarity index 53%
rename from vendor/pygments/docs/src/unicode.txt
rename to vendor/pygments/doc/docs/unicode.rst
index dc6394a..17853a3 100644
--- a/vendor/pygments/docs/src/unicode.txt
+++ b/vendor/pygments/doc/docs/unicode.rst
@@ -3,15 +3,23 @@ Unicode and Encodings
=====================
Since Pygments 0.6, all lexers use unicode strings internally. Because of that
-you might encounter the occasional `UnicodeDecodeError` if you pass strings with the
-wrong encoding.
+you might encounter the occasional :exc:`UnicodeDecodeError` if you pass strings
+with the wrong encoding.
-Per default all lexers have their input encoding set to `latin1`.
-If you pass a lexer a string object (not unicode), it tries to decode the data
-using this encoding.
-You can override the encoding using the `encoding` lexer option. If you have the
-`chardet`_ library installed and set the encoding to ``chardet`` if will ananlyse
-the text and use the encoding it thinks is the right one automatically:
+Per default all lexers have their input encoding set to `guess`. This means
+that the following encodings are tried:
+
+* UTF-8 (including BOM handling)
+* The locale encoding (i.e. the result of `locale.getpreferredencoding()`)
+* As a last resort, `latin1`
+
+If you pass a lexer a byte string object (not unicode), it tries to decode the
+data using this encoding.
+
+You can override the encoding using the `encoding` or `inencoding` lexer
+options. If you have the `chardet`_ library installed and set the encoding to
+``chardet`` if will analyse the text and use the encoding it thinks is the
+right one automatically:
.. sourcecode:: python
@@ -39,11 +47,12 @@ Unicode string with this encoding before writing it. This is the case for
`sys.stdout`, for example. The other formatters don't have that behavior.
Another note: If you call Pygments via the command line (`pygmentize`),
-encoding is handled differently, see `the command line docs `_.
+encoding is handled differently, see :doc:`the command line docs `.
-*New in Pygments 0.7*: the formatters now also accept an `outencoding` option
-which will override the `encoding` option if given. This makes it possible to
-use a single options dict with lexers and formatters, and still have different
-input and output encodings.
+.. versionadded:: 0.7
+ The formatters now also accept an `outencoding` option which will override
+ the `encoding` option if given. This makes it possible to use a single
+ options dict with lexers and formatters, and still have different input and
+ output encodings.
.. _chardet: http://chardet.feedparser.org/
diff --git a/vendor/pygments/doc/download.rst b/vendor/pygments/doc/download.rst
new file mode 100644
index 0000000..cf32f48
--- /dev/null
+++ b/vendor/pygments/doc/download.rst
@@ -0,0 +1,41 @@
+Download and installation
+=========================
+
+The current release is version |version|.
+
+Packaged versions
+-----------------
+
+You can download it `from the Python Package Index
+`_. For installation of packages from
+PyPI, we recommend `Pip `_, which works on all
+major platforms.
+
+Under Linux, most distributions include a package for Pygments, usually called
+``pygments`` or ``python-pygments``. You can install it with the package
+manager as usual.
+
+Development sources
+-------------------
+
+We're using the `Mercurial `_ version control
+system. You can get the development source using this command::
+
+ hg clone http://bitbucket.org/birkenfeld/pygments-main pygments
+
+Development takes place at `Bitbucket
+`_, you can browse the source
+online `here `_.
+
+The latest changes in the development source code are listed in the `changelog
+`_.
+
+.. Documentation
+ -------------
+
+.. XXX todo
+
+ You can download the documentation either as
+ a bunch of rst files from the Mercurial repository, see above, or
+ as a tar.gz containing rendered HTML files:
diff --git a/vendor/pygments/doc/faq.rst b/vendor/pygments/doc/faq.rst
new file mode 100644
index 0000000..f375828
--- /dev/null
+++ b/vendor/pygments/doc/faq.rst
@@ -0,0 +1,139 @@
+:orphan:
+
+Pygments FAQ
+=============
+
+What is Pygments?
+-----------------
+
+Pygments is a syntax highlighting engine written in Python. That means, it will
+take source code (or other markup) in a supported language and output a
+processed version (in different formats) containing syntax highlighting markup.
+
+Its features include:
+
+* a wide range of common :doc:`languages and markup formats ` is supported
+* new languages and formats are added easily
+* a number of output formats is available, including:
+
+ - HTML
+ - ANSI sequences (console output)
+ - LaTeX
+ - RTF
+
+* it is usable as a command-line tool and as a library
+* parsing and formatting is fast
+
+Pygments is licensed under the BSD license.
+
+Where does the name Pygments come from?
+---------------------------------------
+
+*Py* of course stands for Python, while *pigments* are used for coloring paint,
+and in this case, source code!
+
+What are the system requirements?
+---------------------------------
+
+Pygments only needs a standard Python install, version 2.6 or higher or version
+3.3 or higher for Python 3. No additional libraries are needed.
+
+How can I use Pygments?
+-----------------------
+
+Pygments is usable as a command-line tool as well as a library.
+
+From the command-line, usage looks like this (assuming the pygmentize script is
+properly installed)::
+
+ pygmentize -f html /path/to/file.py
+
+This will print a HTML-highlighted version of /path/to/file.py to standard output.
+
+For a complete help, please run ``pygmentize -h``.
+
+Usage as a library is thoroughly demonstrated in the Documentation section.
+
+How do I make a new style?
+--------------------------
+
+Please see the :doc:`documentation on styles `.
+
+How can I report a bug or suggest a feature?
+--------------------------------------------
+
+Please report bugs and feature wishes in the tracker at Bitbucket.
+
+You can also e-mail the author or use IRC, see the contact details.
+
+I want this support for this language!
+--------------------------------------
+
+Instead of waiting for others to include language support, why not write it
+yourself? All you have to know is :doc:`outlined in the docs
+`.
+
+Can I use Pygments for programming language processing?
+-------------------------------------------------------
+
+The Pygments lexing machinery is quite powerful can be used to build lexers for
+basically all languages. However, parsing them is not possible, though some
+lexers go some steps in this direction in order to e.g. highlight function names
+differently.
+
+Also, error reporting is not the scope of Pygments. It focuses on correctly
+highlighting syntactically valid documents, not finding and compensating errors.
+
+Who uses Pygments?
+------------------
+
+This is an (incomplete) list of projects and sites known to use the Pygments highlighter.
+
+* `Wikipedia `_
+* `BitBucket `_, a Mercurial and Git hosting site
+* `The Sphinx documentation builder `_, for embedded source examples
+* `rst2pdf `_, a reStructuredText to PDF converter
+* `Codecov `_, a code coverage CI service
+* `Trac `_, the universal project management tool
+* `AsciiDoc `_, a text-based documentation generator
+* `ActiveState Code `_, the Python Cookbook successor
+* `ViewVC `_, a web-based version control repository browser
+* `BzrFruit `_, a Bazaar branch viewer
+* `QBzr `_, a cross-platform Qt-based GUI front end for Bazaar
+* `Review Board `_, a collaborative code reviewing tool
+* `Diamanda `_, a Django powered wiki system with support for Pygments
+* `Progopedia `_ (`English `_),
+ an encyclopedia of programming languages
+* `Bruce `_, a reStructuredText presentation tool
+* `PIDA `_, a universal IDE written in Python
+* `BPython `_, a curses-based intelligent Python shell
+* `PuDB `_, a console Python debugger
+* `XWiki `_, a wiki-based development framework in Java, using Jython
+* `roux `_, a script for running R scripts
+ and creating beautiful output including graphs
+* `hurl `_, a web service for making HTTP requests
+* `wxHTMLPygmentizer `_ is
+ a GUI utility, used to make code-colorization easier
+* `Postmarkup `_, a BBCode to XHTML generator
+* `WpPygments `_, and `WPygments
+ `_, highlighter plugins for WordPress
+* `Siafoo `_, a tool for sharing and storing useful code and programming experience
+* `D source `_, a community for the D programming language
+* `dpaste.com `_, another Django pastebin
+* `Django snippets `_, a pastebin for Django code
+* `Fayaa `_, a Chinese pastebin
+* `Incollo.com `_, a free collaborative debugging tool
+* `PasteBox `_, a pastebin focused on privacy
+* `hilite.me `_, a site to highlight code snippets
+* `patx.me `_, a pastebin
+* `Fluidic `_, an experiment in
+ integrating shells with a GUI
+* `pygments.rb `_, a pygments wrapper for Ruby
+* `Clygments `_, a pygments wrapper for
+ Clojure
+* `PHPygments `_, a pygments wrapper for PHP
+
+
+If you have a project or web site using Pygments, drop me a line, and I'll add a
+link here.
+
diff --git a/vendor/pygments/doc/index.rst b/vendor/pygments/doc/index.rst
new file mode 100644
index 0000000..2611404
--- /dev/null
+++ b/vendor/pygments/doc/index.rst
@@ -0,0 +1,54 @@
+Welcome!
+========
+
+This is the home of Pygments. It is a generic syntax highlighter suitable for
+use in code hosting, forums, wikis or other applications that need to prettify
+source code. Highlights are:
+
+* a wide range of over 300 languages and other text formats is supported
+* special attention is paid to details that increase highlighting quality
+* support for new languages and formats are added easily; most languages use a
+ simple regex-based lexing mechanism
+* a number of output formats is available, among them HTML, RTF, LaTeX and ANSI
+ sequences
+* it is usable as a command-line tool and as a library
+* ... and it highlights even Perl 6!
+
+Read more in the :doc:`FAQ list ` or the :doc:`documentation `,
+or `download the latest release `_.
+
+.. _contribute:
+
+Contribute
+----------
+
+Like every open-source project, we are always looking for volunteers to help us
+with programming. Python knowledge is required, but don't fear: Python is a very
+clear and easy to learn language.
+
+Development takes place on `Bitbucket
+`_, where the Mercurial
+repository, tickets and pull requests can be viewed.
+
+Our primary communication instrument is the IRC channel **#pocoo** on the
+Freenode network. To join it, let your IRC client connect to
+``irc.freenode.net`` and do ``/join #pocoo``.
+
+If you found a bug, just open a ticket in the Bitbucket tracker. Be sure to log
+in to be notified when the issue is fixed -- development is not fast-paced as
+the library is quite stable. You can also send an e-mail to the developers, see
+below.
+
+The authors
+-----------
+
+Pygments is maintained by **Georg Brandl**, e-mail address *georg*\ *@*\ *python.org*.
+
+Many lexers and fixes have been contributed by **Armin Ronacher**, the rest of
+the `Pocoo `_ team and **Tim Hatch**.
+
+.. toctree::
+ :maxdepth: 1
+ :hidden:
+
+ docs/index
diff --git a/vendor/pygments/doc/languages.rst b/vendor/pygments/doc/languages.rst
new file mode 100644
index 0000000..a495d15
--- /dev/null
+++ b/vendor/pygments/doc/languages.rst
@@ -0,0 +1,152 @@
+:orphan:
+
+Supported languages
+===================
+
+Pygments supports an ever-growing range of languages. Watch this space...
+
+Programming languages
+---------------------
+
+* ActionScript
+* Ada
+* ANTLR
+* AppleScript
+* Assembly (various)
+* Asymptote
+* Awk
+* Befunge
+* Boo
+* BrainFuck
+* C, C++
+* C#
+* Clojure
+* CoffeeScript
+* ColdFusion
+* Common Lisp
+* Coq
+* Cryptol (incl. Literate Cryptol)
+* `Cython `_
+* `D `_
+* Dart
+* Delphi
+* Dylan
+* Erlang
+* `Ezhil `_ Ezhil - A Tamil programming language
+* Factor
+* Fancy
+* Fortran
+* F#
+* GAP
+* Gherkin (Cucumber)
+* GL shaders
+* Groovy
+* `Haskell `_ (incl. Literate Haskell)
+* IDL
+* Io
+* Java
+* JavaScript
+* Lasso
+* LLVM
+* Logtalk
+* `Lua `_
+* Matlab
+* MiniD
+* Modelica
+* Modula-2
+* MuPad
+* Nemerle
+* Nimrod
+* Objective-C
+* Objective-J
+* Octave
+* OCaml
+* PHP
+* `Perl `_
+* PovRay
+* PostScript
+* PowerShell
+* Prolog
+* `Python `_ 2.x and 3.x (incl. console sessions and tracebacks)
+* `REBOL `_
+* `Red `_
+* Redcode
+* `Ruby `_ (incl. irb sessions)
+* Rust
+* S, S-Plus, R
+* Scala
+* Scheme
+* Scilab
+* Smalltalk
+* SNOBOL
+* Tcl
+* Vala
+* Verilog
+* VHDL
+* Visual Basic.NET
+* Visual FoxPro
+* XQuery
+* Zephir
+
+Template languages
+------------------
+
+* Cheetah templates
+* `Django `_ / `Jinja
+ `_ templates
+* ERB (Ruby templating)
+* `Genshi `_ (the Trac template language)
+* JSP (Java Server Pages)
+* `Myghty `_ (the HTML::Mason based framework)
+* `Mako `_ (the Myghty successor)
+* `Smarty `_ templates (PHP templating)
+* Tea
+
+Other markup
+------------
+
+* Apache config files
+* Bash shell scripts
+* BBCode
+* CMake
+* CSS
+* Debian control files
+* Diff files
+* DTD
+* Gettext catalogs
+* Gnuplot script
+* Groff markup
+* HTML
+* HTTP sessions
+* INI-style config files
+* IRC logs (irssi style)
+* Lighttpd config files
+* Makefiles
+* MoinMoin/Trac Wiki markup
+* MySQL
+* Nginx config files
+* POV-Ray scenes
+* Ragel
+* Redcode
+* ReST
+* Robot Framework
+* RPM spec files
+* SQL, also MySQL, SQLite
+* Squid configuration
+* TeX
+* tcsh
+* Vim Script
+* Windows batch files
+* XML
+* XSLT
+* YAML
+
+... that's all?
+---------------
+
+Well, why not write your own? Contributing to Pygments is easy and fun. Take a look at the
+:doc:`docs on lexer development ` and
+:ref:`contact details `.
+
+Note: the languages listed here are supported in the development version. The
+latest release may lack a few of them.
diff --git a/vendor/pygments/doc/make.bat b/vendor/pygments/doc/make.bat
new file mode 100644
index 0000000..8803c98
--- /dev/null
+++ b/vendor/pygments/doc/make.bat
@@ -0,0 +1,190 @@
+@ECHO OFF
+
+REM Command file for Sphinx documentation
+
+if "%SPHINXBUILD%" == "" (
+ set SPHINXBUILD=sphinx-build
+)
+set BUILDDIR=_build
+set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
+set I18NSPHINXOPTS=%SPHINXOPTS% .
+if NOT "%PAPER%" == "" (
+ set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
+ set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
+)
+
+if "%1" == "" goto help
+
+if "%1" == "help" (
+ :help
+ echo.Please use `make ^` where ^ is one of
+ echo. html to make standalone HTML files
+ echo. dirhtml to make HTML files named index.html in directories
+ echo. singlehtml to make a single large HTML file
+ echo. pickle to make pickle files
+ echo. json to make JSON files
+ echo. htmlhelp to make HTML files and a HTML help project
+ echo. qthelp to make HTML files and a qthelp project
+ echo. devhelp to make HTML files and a Devhelp project
+ echo. epub to make an epub
+ echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
+ echo. text to make text files
+ echo. man to make manual pages
+ echo. texinfo to make Texinfo files
+ echo. gettext to make PO message catalogs
+ echo. changes to make an overview over all changed/added/deprecated items
+ echo. linkcheck to check all external links for integrity
+ echo. doctest to run all doctests embedded in the documentation if enabled
+ goto end
+)
+
+if "%1" == "clean" (
+ for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
+ del /q /s %BUILDDIR%\*
+ goto end
+)
+
+if "%1" == "html" (
+ %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The HTML pages are in %BUILDDIR%/html.
+ goto end
+)
+
+if "%1" == "dirhtml" (
+ %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
+ goto end
+)
+
+if "%1" == "singlehtml" (
+ %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
+ goto end
+)
+
+if "%1" == "pickle" (
+ %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; now you can process the pickle files.
+ goto end
+)
+
+if "%1" == "json" (
+ %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; now you can process the JSON files.
+ goto end
+)
+
+if "%1" == "htmlhelp" (
+ %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; now you can run HTML Help Workshop with the ^
+.hhp project file in %BUILDDIR%/htmlhelp.
+ goto end
+)
+
+if "%1" == "qthelp" (
+ %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; now you can run "qcollectiongenerator" with the ^
+.qhcp project file in %BUILDDIR%/qthelp, like this:
+ echo.^> qcollectiongenerator %BUILDDIR%\qthelp\Pygments.qhcp
+ echo.To view the help file:
+ echo.^> assistant -collectionFile %BUILDDIR%\qthelp\Pygments.ghc
+ goto end
+)
+
+if "%1" == "devhelp" (
+ %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished.
+ goto end
+)
+
+if "%1" == "epub" (
+ %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The epub file is in %BUILDDIR%/epub.
+ goto end
+)
+
+if "%1" == "latex" (
+ %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
+ goto end
+)
+
+if "%1" == "text" (
+ %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The text files are in %BUILDDIR%/text.
+ goto end
+)
+
+if "%1" == "man" (
+ %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The manual pages are in %BUILDDIR%/man.
+ goto end
+)
+
+if "%1" == "texinfo" (
+ %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
+ goto end
+)
+
+if "%1" == "gettext" (
+ %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
+ goto end
+)
+
+if "%1" == "changes" (
+ %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.The overview file is in %BUILDDIR%/changes.
+ goto end
+)
+
+if "%1" == "linkcheck" (
+ %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Link check complete; look for any errors in the above output ^
+or in %BUILDDIR%/linkcheck/output.txt.
+ goto end
+)
+
+if "%1" == "doctest" (
+ %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Testing of doctests in the sources finished, look at the ^
+results in %BUILDDIR%/doctest/output.txt.
+ goto end
+)
+
+:end
diff --git a/vendor/pygments/docs/pygmentize.1 b/vendor/pygments/doc/pygmentize.1
similarity index 100%
rename from vendor/pygments/docs/pygmentize.1
rename to vendor/pygments/doc/pygmentize.1
diff --git a/vendor/pygments/docs/build/api.html b/vendor/pygments/docs/build/api.html
deleted file mode 100644
index 1225e28..0000000
--- a/vendor/pygments/docs/build/api.html
+++ /dev/null
@@ -1,458 +0,0 @@
-
-
-
- The full Pygments API — Pygments
-
-
-
-
-
Lex code with the lexer (must be a Lexer instance)
-and return an iterable of tokens. Currently, this only calls
-lexer.get_tokens().
-
def format(tokens, formatter, outfile=None):
-
Format a token stream (iterable of tokens) tokens with the
-formatter (must be a Formatter instance). The result is
-written to outfile, or if that is None, returned as a
-string.
This is the most high-level highlighting function.
-It combines lex and format in one function.
-
-
Functions from pygments.lexers:
-
-
def get_lexer_by_name(alias, **options):
-
Return an instance of a Lexer subclass that has alias in its
-aliases list. The lexer is given the options at its
-instantiation.
-
Will raise pygments.util.ClassNotFound if no lexer with that alias is
-found.
-
-
def get_lexer_for_filename(fn, **options):
-
Return a Lexer subclass instance that has a filename pattern
-matching fn. The lexer is given the options at its
-instantiation.
-
Will raise pygments.util.ClassNotFound if no lexer for that filename is
-found.
-
-
def get_lexer_for_mimetype(mime, **options):
-
Return a Lexer subclass instance that has mime in its mimetype
-list. The lexer is given the options at its instantiation.
-
Will raise pygments.util.ClassNotFound if not lexer for that mimetype is
-found.
-
-
def guess_lexer(text, **options):
-
Return a Lexer subclass instance that's guessed from the text
-in text. For that, the analyse_text() method of every known
-lexer class is called with the text as argument, and the lexer
-which returned the highest value will be instantiated and returned.
-
pygments.util.ClassNotFound is raised if no lexer thinks it can handle the
-content.
As guess_lexer(), but only lexers which have a pattern in filenames
-or alias_filenames that matches filename are taken into consideration.
-
pygments.util.ClassNotFound is raised if no lexer thinks it can handle the
-content.
-
-
def get_all_lexers():
-
Return an iterable over all registered lexers, yielding tuples in the
-format:
-
-(longname, tuple of aliases, tuple of filename patterns, tuple of mimetypes)
-
-
New in Pygments 0.6.
-
-
-
Functions from pygments.formatters:
-
-
def get_formatter_by_name(alias, **options):
-
Return an instance of a Formatter subclass that has alias in its
-aliases list. The formatter is given the options at its
-instantiation.
-
Will raise pygments.util.ClassNotFound if no formatter with that alias is
-found.
-
-
def get_formatter_for_filename(fn, **options):
-
Return a Formatter subclass instance that has a filename pattern
-matching fn. The formatter is given the options at its
-instantiation.
-
Will raise pygments.util.ClassNotFound if no formatter for that filename
-is found.
-
-
-
Functions from pygments.styles:
-
-
def get_style_by_name(name):
-
Return a style class by its short name. The names of the builtin styles
-are listed in pygments.styles.STYLE_MAP.
-
Will raise pygments.util.ClassNotFound if no style of that name is found.
-
-
def get_all_styles():
-
Return an iterable over all registered styles, yielding their names.
-
New in Pygments 0.6.
-
-
-
-
-
Lexers
-
A lexer (derived from pygments.lexer.Lexer) has the following functions:
-
-
def __init__(self, **options):
-
The constructor. Takes a **keywords dictionary of options.
-Every subclass must first process its own options and then call
-the Lexer constructor, since it processes the stripnl,
-stripall and tabsize options.
As these options must all be specifiable as strings (due to the
-command line usage), there are various utility functions
-available to help with that, see Option processing.
-
-
def get_tokens(self, text):
-
This method is the basic interface of a lexer. It is called by
-the highlight() function. It must process the text and return an
-iterable of (tokentype, value) pairs from text.
-
Normally, you don't need to override this method. The default
-implementation processes the stripnl, stripall and tabsize
-options and then yields all tokens from get_tokens_unprocessed(),
-with the index dropped.
-
-
def get_tokens_unprocessed(self, text):
-
This method should process the text and return an iterable of
-(index, tokentype, value) tuples where index is the starting
-position of the token within the input text.
-
This method must be overridden by subclasses.
-
-
def analyse_text(text):
-
A static method which is called for lexer guessing. It should analyse
-the text and return a float in the range from 0.0 to 1.0.
-If it returns 0.0, the lexer will not be selected as the most
-probable one, if it returns 1.0, it will be selected immediately.
-
-
For a list of known tokens have a look at the Tokens page.
-
A lexer also can have the following attributes (in fact, they are mandatory
-except alias_filenames) that are used by the builtin lookup mechanism.
-
-
name
-
Full name for the lexer, in human-readable form.
-
aliases
-
A list of short, unique identifiers that can be used to lookup
-the lexer from a list, e.g. using get_lexer_by_name().
-
filenames
-
A list of fnmatch patterns that match filenames which contain
-content for this lexer. The patterns in this list should be unique among
-all lexers.
-
alias_filenames
-
A list of fnmatch patterns that match filenames which may or may not
-contain content for this lexer. This list is used by the
-guess_lexer_for_filename() function, to determine which lexers are
-then included in guessing the correct one. That means that e.g. every
-lexer for HTML and a template language should include \*.html in
-this list.
-
mimetypes
-
A list of MIME types for content that can be lexed with this
-lexer.
-
-
-
-
Formatters
-
A formatter (derived from pygments.formatter.Formatter) has the following
-functions:
-
-
def __init__(self, **options):
-
As with lexers, this constructor processes options and then must call
-the base class __init__.
-
The Formatter class recognizes the options style, full and
-title. It is up to the formatter class whether it uses them.
-
-
def get_style_defs(self, arg=''):
-
This method must return statements or declarations suitable to define
-the current style for subsequent highlighted text (e.g. CSS classes
-in the HTMLFormatter).
-
The optional argument arg can be used to modify the generation and
-is formatter dependent (it is standardized because it can be given on
-the command line).
-
This method is called by the -Scommand-line option, the arg
-is then given by the -a option.
-
-
def format(self, tokensource, outfile):
-
This method must format the tokens from the tokensource iterable and
-write the formatted version to the file object outfile.
-
Formatter options can control how exactly the tokens are converted.
-
-
-
A formatter must have the following attributes that are used by the
-builtin lookup mechanism. (New in Pygments 0.7.)
-
-
name
-
Full name for the formatter, in human-readable form.
-
aliases
-
A list of short, unique identifiers that can be used to lookup
-the formatter from a list, e.g. using get_formatter_by_name().
-
filenames
-
A list of fnmatch patterns that match filenames for which this formatter
-can produce output. The patterns in this list should be unique among
-all formatters.
-
-
-
-
Option processing
-
The pygments.util module has some utility functions usable for option
-processing:
-
-
class OptionError
-
This exception will be raised by all option processing functions if
-the type or value of the argument is not correct.
-
def get_bool_opt(options, optname, default=None):
-
Interpret the key optname from the dictionary options
-as a boolean and return it. Return default if optname
-is not in options.
-
The valid string values for True are 1, yes,
-true and on, the ones for False are 0,
-no, false and off (matched case-insensitively).
-
-
def get_int_opt(options, optname, default=None):
-
As get_bool_opt, but interpret the value as an integer.
-
def get_list_opt(options, optname, default=None):
-
If the key optname from the dictionary options is a string,
-split it at whitespace and return it. If it is already a list
-or a tuple, it is returned as a list.
The HTML formatter now supports linking to tags using CTags files, when the
-python-ctags package is installed (PR#87).
-
The HTML formatter now has a "linespans" option that wraps every line in a
-<span> tag with a specific id (PR#82).
-
When deriving a lexer from another lexer with token definitions, definitions
-for states not in the child lexer are now inherited. If you override a state
-in the child lexer, an "inherit" keyword has been added to insert the base
-state at that position (PR#141).
-
The C family lexers now inherit token definitions from a common base class,
-removing code duplication (PR#141).
-
Use "colorama" on Windows for console color output (PR#142).
-
Fix Template Haskell highlighting (PR#63).
-
Fix some S/R lexer errors (PR#91).
-
Fix a bug in the Prolog lexer with names that start with 'is' (#810).
-
Rewrite Dylan lexer, add Dylan LID lexer (PR#147).
-
Add a Java quickstart document (PR#146).
-
Add a "external/autopygmentize" file that can be used as .lessfilter (#802).
-
-
-
-
Version 1.5
-
(codename Zeitdilatation, released Mar 10, 2012)
-
-
Lexers added:
-
Awk (#630)
-
Fancy (#633)
-
PyPy Log
-
eC
-
Nimrod
-
Nemerle (#667)
-
F# (#353)
-
Groovy (#501)
-
PostgreSQL (#660)
-
DTD
-
Gosu (#634)
-
Octave (PR#22)
-
Standard ML (PR#14)
-
CFengine3 (#601)
-
Opa (PR#37)
-
HTTP sessions (PR#42)
-
JSON (PR#31)
-
SNOBOL (PR#30)
-
MoonScript (PR#43)
-
ECL (PR#29)
-
Urbiscript (PR#17)
-
OpenEdge ABL (PR#27)
-
SystemVerilog (PR#35)
-
Coq (#734)
-
PowerShell (#654)
-
Dart (#715)
-
Fantom (PR#36)
-
Bro (PR#5)
-
NewLISP (PR#26)
-
VHDL (PR#45)
-
Scilab (#740)
-
Elixir (PR#57)
-
Tea (PR#56)
-
Kotlin (PR#58)
-
-
-
Fix Python 3 terminal highlighting with pygmentize (#691).
-
In the LaTeX formatter, escape special &, < and > chars (#648).
-
In the LaTeX formatter, fix display problems for styles with token
-background colors (#670).
-
Enhancements to the Squid conf lexer (#664).
-
Several fixes to the reStructuredText lexer (#636).
-
Recognize methods in the ObjC lexer (#638).
-
Fix Lua "class" highlighting: it does not have classes (#665).
-
Fix degenerate regex in Scala lexer (#671) and highlighting bugs (#713, 708).
-
Fix number pattern order in Ocaml lexer (#647).
-
Fix generic type highlighting in ActionScript 3 (#666).
-
Fixes to the Clojure lexer (PR#9).
-
Fix degenerate regex in Nemerle lexer (#706).
-
Fix infinite looping in CoffeeScript lexer (#729).
-
Fix crashes and analysis with ObjectiveC lexer (#693, #696).
-
Add some Fortran 2003 keywords.
-
Fix Boo string regexes (#679).
-
Add "rrt" style (#727).
-
Fix infinite looping in Darcs Patch lexer.
-
Lots of misc fixes to character-eating bugs and ordering problems in many
-different lexers.
-
-
-
-
Version 1.4
-
(codename Unschärfe, released Jan 03, 2011)
-
-
Lexers added:
-
Factor (#520)
-
PostScript (#486)
-
Verilog (#491)
-
BlitzMax Basic (#478)
-
Ioke (#465)
-
Java properties, split out of the INI lexer (#445)
-
Scss (#509)
-
Duel/JBST
-
XQuery (#617)
-
Mason (#615)
-
GoodData (#609)
-
SSP (#473)
-
Autohotkey (#417)
-
Google Protocol Buffers
-
Hybris (#506)
-
-
-
Do not fail in analyse_text methods (#618).
-
Performance improvements in the HTML formatter (#523).
-
With the noclasses option in the HTML formatter, some styles
-present in the stylesheet were not added as inline styles.
-
Four fixes to the Lua lexer (#480, #481, #482, #497).
-
More context-sensitive Gherkin lexer with support for more i18n translations.
-
Support new OO keywords in Matlab lexer (#521).
-
Small fix in the CoffeeScript lexer (#519).
-
A bugfix for backslashes in ocaml strings (#499).
-
Fix unicode/raw docstrings in the Python lexer (#489).
-
Allow PIL to work without PIL.pth (#502).
-
Allow seconds as a unit in CSS (#496).
-
Support application/javascript as a JavaScript mime type (#504).
-
Support Offload C++ Extensions as
-keywords in the C++ lexer (#484).
-
Escape more characters in LaTeX output (#505).
-
Update Haml/Sass lexers to version 3 (#509).
-
Small PHP lexer string escaping fix (#515).
-
Support comments before preprocessor directives, and unsigned/
-long long literals in C/C++ (#613, #616).
-
Support line continuations in the INI lexer (#494).
-
Fix lexing of Dylan string and char literals (#628).
-
Fix class/procedure name highlighting in VB.NET lexer (#624).
-
-
-
-
Version 1.3.1
-
(bugfix release, released Mar 05, 2010)
-
-
The pygmentize script was missing from the distribution.
-
-
-
-
Version 1.3
-
(codename Schneeglöckchen, released Mar 01, 2010)
-
-
Added the ensurenl lexer option, which can be used to suppress the
-automatic addition of a newline to the lexer input.
-
Lexers added:
-
Ada
-
Coldfusion
-
Modula-2
-
haXe
-
R console
-
Objective-J
-
Haml and Sass
-
CoffeeScript
-
-
-
Enhanced reStructuredText highlighting.
-
Added support for PHP 5.3 namespaces in the PHP lexer.
-
Added a bash completion script for pygmentize, to the external/
-directory (#466).
-
Fixed a bug in do_insertions() used for multi-lexer languages.
-
Fixed a Ruby regex highlighting bug (#476).
-
Fixed regex highlighting bugs in Perl lexer (#258).
-
Add small enhancements to the C lexer (#467) and Bash lexer (#469).
-
Small fixes for the Tcl, Debian control file, Nginx config,
-Smalltalk, Objective-C, Clojure, Lua lexers.
-
Gherkin lexer: Fixed single apostrophe bug and added new i18n keywords.
-
-
-
-
Version 1.2.2
-
(bugfix release, released Jan 02, 2010)
-
-
Removed a backwards incompatibility in the LaTeX formatter that caused
-Sphinx to produce invalid commands when writing LaTeX output (#463).
-
Fixed a forever-backtracking regex in the BashLexer (#462).
-
-
-
-
Version 1.2.1
-
(bugfix release, released Jan 02, 2010)
-
-
Fixed mishandling of an ellipsis in place of the frames in a Python
-console traceback, resulting in clobbered output.
-
-
-
-
Version 1.2
-
(codename Neujahr, released Jan 01, 2010)
-
-
Dropped Python 2.3 compatibility.
-
Lexers added:
-
Asymptote
-
Go
-
Gherkin (Cucumber)
-
CMake
-
Ooc
-
Coldfusion
-
haXe
-
R console
-
-
-
Added options for rendering LaTeX in source code comments in the
-LaTeX formatter (#461).
-
Updated the Logtalk lexer.
-
Added line_number_start option to image formatter (#456).
-
Added hl_lines and hl_color options to image formatter (#457).
-
Fixed the HtmlFormatter's handling of noclasses=True to not output any
-classes (#427).
-
Added the Monokai style (#453).
-
Fixed LLVM lexer identifier syntax and added new keywords (#442).
-
Fixed the PythonTracebackLexer to handle non-traceback data in header or
-trailer, and support more partial tracebacks that start on line 2 (#437).
-
Fixed the CLexer to not highlight ternary statements as labels.
-
Fixed lexing of some Ruby quoting peculiarities (#460).
-
A few ASM lexer fixes (#450).
-
-
-
-
Version 1.1.1
-
(bugfix release, released Sep 15, 2009)
-
-
Fixed the BBCode lexer (#435).
-
Added support for new Jinja2 keywords.
-
Fixed test suite failures.
-
Added Gentoo-specific suffixes to Bash lexer.
-
-
-
-
Version 1.1
-
(codename Brillouin, released Sep 11, 2009)
-
-
Ported Pygments to Python 3. This needed a few changes in the way
-encodings are handled; they may affect corner cases when used with
-Python 2 as well.
-
Lexers added:
-
Antlr/Ragel, thanks to Ana Nelson
-
(Ba)sh shell
-
Erlang shell
-
GLSL
-
Prolog
-
Evoque
-
Modelica
-
Rebol
-
MXML
-
Cython
-
ABAP
-
ASP.net (VB/C#)
-
Vala
-
Newspeak
-
-
-
Fixed the LaTeX formatter's output so that output generated for one style
-can be used with the style definitions of another (#384).
-
Added "anchorlinenos" and "noclobber_cssfile" (#396) options to HTML
-formatter.
-
Support multiline strings in Lua lexer.
-
Rewrite of the JavaScript lexer by Pumbaa80 to better support regular
-expression literals (#403).
-
When pygmentize is asked to highlight a file for which multiple lexers
-match the filename, use the analyse_text guessing engine to determine the
-winner (#355).
-
Fixed minor bugs in the JavaScript lexer (#383), the Matlab lexer (#378),
-the Scala lexer (#392), the INI lexer (#391), the Clojure lexer (#387)
-and the AS3 lexer (#389).
-
Fixed three Perl heredoc lexing bugs (#379, #400, #422).
-
Fixed a bug in the image formatter which misdetected lines (#380).
-
Fixed bugs lexing extended Ruby strings and regexes.
-
Fixed a bug when lexing git diffs.
-
Fixed a bug lexing the empty commit in the PHP lexer (#405).
-
Fixed a bug causing Python numbers to be mishighlighted as floats (#397).
-
Fixed a bug when backslashes are used in odd locations in Python (#395).
-
Fixed various bugs in Matlab and S-Plus lexers, thanks to Winston Chang (#410,
-#411, #413, #414) and fmarc (#419).
-
Fixed a bug in Haskell single-line comment detection (#426).
-
Added new-style reStructuredText directive for docutils 0.5+ (#428).
-
-
-
-
Version 1.0
-
(codename Dreiundzwanzig, released Nov 23, 2008)
-
-
Don't use join(splitlines()) when converting newlines to \n,
-because that doesn't keep all newlines at the end when the
-stripnl lexer option is False.
-
-
Added -N option to command-line interface to get a lexer name
-for a given filename.
-
-
Added Tango style, written by Andre Roberge for the Crunchy project.
-
-
Added Python3TracebackLexer and python3 option to
-PythonConsoleLexer.
-
-
Fixed a few bugs in the Haskell lexer.
-
-
Fixed PythonTracebackLexer to be able to recognize SyntaxError and
-KeyboardInterrupt (#360).
-
-
Provide one formatter class per image format, so that surprises like:
-
-pygmentize -f gif -o foo.gif foo.py
-
-
creating a PNG file are avoided.
-
-
Actually use the font_size option of the image formatter.
-
-
Fixed numpy lexer that it doesn't listen for *.py any longer.
-
-
Fixed HTML formatter so that text options can be Unicode
-strings (#371).
-
-
Unified Diff lexer supports the "udiff" alias now.
-
-
Fixed a few issues in Scala lexer (#367).
-
-
RubyConsoleLexer now supports simple prompt mode (#363).
-
-
JavascriptLexer is smarter about what constitutes a regex (#356).
-
-
Add Applescript lexer, thanks to Andreas Amann (#330).
-
-
Make the codetags more strict about matching words (#368).
-
-
NginxConfLexer is a little more accurate on mimetypes and
-variables (#370).
-
-
-
-
-
Version 0.11.1
-
(released Aug 24, 2008)
-
-
Fixed a Jython compatibility issue in pygments.unistring (#358).
-
-
-
-
Version 0.11
-
(codename Straußenei, released Aug 23, 2008)
-
Many thanks go to Tim Hatch for writing or integrating most of the bug
-fixes and new features.
-
-
Lexers added:
-
Nasm-style assembly language, thanks to delroth
-
YAML, thanks to Kirill Simonov
-
ActionScript 3, thanks to Pierre Bourdon
-
Cheetah/Spitfire templates, thanks to Matt Good
-
Lighttpd config files
-
Nginx config files
-
Gnuplot plotting scripts
-
Clojure
-
POV-Ray scene files
-
Sqlite3 interactive console sessions
-
Scala source files, thanks to Krzysiek Goj
-
-
-
Lexers improved:
-
C lexer highlights standard library functions now and supports C99
-types.
-
Bash lexer now correctly highlights heredocs without preceding
-whitespace.
-
Vim lexer now highlights hex colors properly and knows a couple
-more keywords.
-
Irc logs lexer now handles xchat's default time format (#340) and
-correctly highlights lines ending in >.
-
Support more delimiters for perl regular expressions (#258).
-
ObjectiveC lexer now supports 2.0 features.
-
-
-
Added "Visual Studio" style.
-
Updated markdown processor to Markdown 1.7.
-
Support roman/sans/mono style defs and use them in the LaTeX
-formatter.
-
The RawTokenFormatter is no longer registered to *.raw and it's
-documented that tokenization with this lexer may raise exceptions.
-
New option hl_lines to HTML formatter, to highlight certain
-lines.
-
New option prestyles to HTML formatter.
-
New option -g to pygmentize, to allow lexer guessing based on
-filetext (can be slowish, so file extensions are still checked
-first).
-
guess_lexer() now makes its decision much faster due to a cache
-of whether data is xml-like (a check which is used in several
-versions of analyse_text(). Several lexers also have more
-accurate analyse_text() now.
-
-
-
-
Version 0.10
-
(codename Malzeug, released May 06, 2008)
-
-
Lexers added:
-
Io
-
Smalltalk
-
Darcs patches
-
Tcl
-
Matlab
-
Matlab sessions
-
FORTRAN
-
XSLT
-
tcsh
-
NumPy
-
Python 3
-
S, S-plus, R statistics languages
-
Logtalk
-
-
-
In the LatexFormatter, the commandprefix option is now by default
-'PY' instead of 'C', since the latter resulted in several collisions
-with other packages. Also, the special meaning of the arg
-argument to get_style_defs() was removed.
-
Added ImageFormatter, to format code as PNG, JPG, GIF or BMP.
-(Needs the Python Imaging Library.)
-
Support doc comments in the PHP lexer.
-
Handle format specifications in the Perl lexer.
-
Fix comment handling in the Batch lexer.
-
Add more file name extensions for the C++, INI and XML lexers.
-
Fixes in the IRC and MuPad lexers.
-
Fix function and interface name highlighting in the Java lexer.
-
Fix at-rule handling in the CSS lexer.
-
Handle KeyboardInterrupts gracefully in pygmentize.
-
Added BlackWhiteStyle.
-
Bash lexer now correctly highlights math, does not require
-whitespace after semicolons, and correctly highlights boolean
-operators.
-
Makefile lexer is now capable of handling BSD and GNU make syntax.
-
-
-
-
Version 0.9
-
(codename Herbstzeitlose, released Oct 14, 2007)
-
-
Lexers added:
-
Erlang
-
ActionScript
-
Literate Haskell
-
Common Lisp
-
Various assembly languages
-
Gettext catalogs
-
Squid configuration
-
Debian control files
-
MySQL-style SQL
-
MOOCode
-
-
-
Lexers improved:
-
Greatly improved the Haskell and OCaml lexers.
-
Improved the Bash lexer's handling of nested constructs.
-
The C# and Java lexers exhibited abysmal performance with some
-input code; this should now be fixed.
-
The IRC logs lexer is now able to colorize weechat logs too.
-
The Lua lexer now recognizes multi-line comments.
-
Fixed bugs in the D and MiniD lexer.
-
-
-
The encoding handling of the command line mode (pygmentize) was
-enhanced. You shouldn't get UnicodeErrors from it anymore if you
-don't give an encoding option.
-
Added a -P option to the command line mode which can be used to
-give options whose values contain commas or equals signs.
-
Added 256-color terminal formatter.
-
Added an experimental SVG formatter.
-
Added the lineanchors option to the HTML formatter, thanks to
-Ian Charnas for the idea.
-
Gave the line numbers table a CSS class in the HTML formatter.
-
Added a Vim 7-like style.
-
-
-
-
Version 0.8.1
-
(released Jun 27, 2007)
-
-
Fixed POD highlighting in the Ruby lexer.
-
Fixed Unicode class and namespace name highlighting in the C# lexer.
-
Fixed Unicode string prefix highlighting in the Python lexer.
-
Fixed a bug in the D and MiniD lexers.
-
Fixed the included MoinMoin parser.
-
-
-
-
Version 0.8
-
(codename Maikäfer, released May 30, 2007)
-
-
Lexers added:
-
Haskell, thanks to Adam Blinkinsop
-
Redcode, thanks to Adam Blinkinsop
-
D, thanks to Kirk McDonald
-
MuPad, thanks to Christopher Creutzig
-
MiniD, thanks to Jarrett Billingsley
-
Vim Script, by Tim Hatch
-
-
-
The HTML formatter now has a second line-numbers mode in which it
-will just integrate the numbers in the same <pre> tag as the
-code.
-
The CSharpLexer now is Unicode-aware, which means that it has an
-option that can be set so that it correctly lexes Unicode
-identifiers allowed by the C# specs.
-
Added a RaiseOnErrorTokenFilter that raises an exception when the
-lexer generates an error token, and a VisibleWhitespaceFilter that
-converts whitespace (spaces, tabs, newlines) into visible
-characters.
-
Fixed the do_insertions() helper function to yield correct
-indices.
-
The ReST lexer now automatically highlights source code blocks in
-".. sourcecode:: language" and ".. code:: language" directive
-blocks.
-
Improved the default style (thanks to Tiberius Teng). The old
-default is still available as the "emacs" style (which was an alias
-before).
-
The get_style_defs method of HTML formatters now uses the
-cssclass option as the default selector if it was given.
-
Improved the ReST and Bash lexers a bit.
-
Fixed a few bugs in the Makefile and Bash lexers, thanks to Tim
-Hatch.
-
Fixed a bug in the command line code that disallowed -O options
-when using the -S option.
-
Fixed a bug in the RawTokenFormatter.
-
-
-
-
Version 0.7.1
-
(released Feb 15, 2007)
-
-
Fixed little highlighting bugs in the Python, Java, Scheme and
-Apache Config lexers.
-
Updated the included manpage.
-
Included a built version of the documentation in the source tarball.
-
-
-
-
Version 0.7
-
(codename Faschingskrapfn, released Feb 14, 2007)
-
-
Added a MoinMoin parser that uses Pygments. With it, you get
-Pygments highlighting in Moin Wiki pages.
-
Changed the exception raised if no suitable lexer, formatter etc. is
-found in one of the get_*_by_* functions to a custom exception,
-pygments.util.ClassNotFound. It is, however, a subclass of
-ValueError in order to retain backwards compatibility.
-
Added a -H command line option which can be used to get the
-docstring of a lexer, formatter or filter.
-
Made the handling of lexers and formatters more consistent. The
-aliases and filename patterns of formatters are now attributes on
-them.
-
Added an OCaml lexer, thanks to Adam Blinkinsop.
-
Made the HTML formatter more flexible, and easily subclassable in
-order to make it easy to implement custom wrappers, e.g. alternate
-line number markup. See the documentation.
-
Added an outencoding option to all formatters, making it possible
-to override the encoding (which is used by lexers and formatters)
-when using the command line interface. Also, if using the terminal
-formatter and the output file is a terminal and has an encoding
-attribute, use it if no encoding is given.
-
Made it possible to just drop style modules into the styles
-subpackage of the Pygments installation.
-
Added a "state" keyword argument to the using helper.
-
Added a commandprefix option to the LatexFormatter which allows
-to control how the command names are constructed.
-
Added quite a few new lexers, thanks to Tim Hatch:
-
Java Server Pages
-
Windows batch files
-
Trac Wiki markup
-
Python tracebacks
-
ReStructuredText
-
Dylan
-
and the Befunge esoteric programming language (yay!)
-
-
-
Added Mako lexers by Ben Bangert.
-
Added "fruity" style, another dark background originally vim-based
-theme.
-
Added sources.list lexer by Dennis Kaarsemaker.
-
Added token stream filters, and a pygmentize option to use them.
Added option for the HTML formatter to write the CSS to an external
-file in "full document" mode.
-
Added RTF formatter.
-
Added Bash and Apache configuration lexers (thanks to Tim Hatch).
-
Improved guessing methods for various lexers.
-
Added @media support to CSS lexer (thanks to Tim Hatch).
-
Added a Groff lexer (thanks to Tim Hatch).
-
License change to BSD.
-
Added lexers for the Myghty template language.
-
Added a Scheme lexer (thanks to Marek Kubica).
-
Added some functions to iterate over existing lexers, formatters and
-lexers.
-
The HtmlFormatter's get_style_defs() can now take a list as an
-argument to generate CSS with multiple prefixes.
-
Support for guessing input encoding added.
-
Encoding support added: all processing is now done with Unicode
-strings, input and output are converted from and optionally to byte
-strings (see the encoding option of lexers and formatters).
-
Some improvements in the C(++) lexers handling comments and line
-continuations.
-
-
-
-
Version 0.5.1
-
(released Oct 30, 2006)
-
-
Fixed traceback in pygmentize -L (thanks to Piotr Ozarowski).
-
-
-
-
Version 0.5
-
(codename PyKleur, released Oct 30, 2006)
-
-
Initial public release.
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/vendor/pygments/docs/build/cmdline.html b/vendor/pygments/docs/build/cmdline.html
deleted file mode 100644
index 51eeed5..0000000
--- a/vendor/pygments/docs/build/cmdline.html
+++ /dev/null
@@ -1,353 +0,0 @@
-
-
-
- Command Line Interface — Pygments
-
-
-
-
-
You can use Pygments from the shell, provided you installed the pygmentize
-script:
-
-$ pygmentize test.py
-print "Hello World"
-
-
will print the file test.py to standard output, using the Python lexer
-(inferred from the file name extension) and the terminal formatter (because
-you didn't give an explicit formatter name).
-
If you want HTML output:
-
-$ pygmentize -f html -l python -o test.html test.py
-
-
As you can see, the -l option explicitly selects a lexer. As seen above, if you
-give an input file name and it has an extension that Pygments recognizes, you can
-omit this option.
-
The -o option gives an output file name. If it is not given, output is
-written to stdout.
-
The -f option selects a formatter (as with -l, it can also be omitted
-if an output file name is given and has a supported extension).
-If no output file name is given and -f is omitted, the
-TerminalFormatter is used.
-
The above command could therefore also be given as:
-
-$ pygmentize -o test.html test.py
-
-
To create a full HTML document, including line numbers and stylesheet (using the
-"emacs" style), highlighting the Python file test.py to test.html:
Lexer and formatter options can be given using the -O option:
-
-$ pygmentize -f html -O style=colorful,linenos=1 -l python test.py
-
-
Be sure to enclose the option string in quotes if it contains any special shell
-characters, such as spaces or expansion wildcards like *. If an option
-expects a list value, separate the list entries with spaces (you'll have to
-quote the option value in this case too, so that the shell doesn't split it).
-
Since the -O option argument is split at commas and expects the split values
-to be of the form name=value, you can't give an option value that contains
-commas or equals signs. Therefore, an option -P is provided (as of Pygments
-0.9) that works like -O but can only pass one option per -P. Its value
-can then contain all characters:
-
-$ pygmentize -P "heading=Pygments, the Python highlighter" ...
-
-
Filters are added to the token stream using the -F option:
-
-$ pygmentize -f html -l pascal -F keywordcase:case=upper main.pas
-
-
As you see, options for the filter are given after a colon. As for -O, the
-filter name and options must be one shell word, so there may not be any spaces
-around the colon.
-
-
-
Generating styles
-
Formatters normally don't output full style information. For example, the HTML
-formatter by default only outputs <span> tags with class attributes.
-Therefore, there's a special -S option for generating style definitions.
-Usage is as follows:
-
-$ pygmentize -f html -S colorful -a .syntax
-
-
generates a CSS style sheet (because you selected the HTML formatter) for
-the "colorful" style prepending a ".syntax" selector to all style rules.
-
For an explanation what -a means for a particular formatter, look for
-the arg argument for the formatter's get_style_defs() method.
-
-
-
Getting lexer names
-
New in Pygments 1.0.
-
The -N option guesses a lexer name for a given filename, so that
-
-$ pygmentize -N setup.py
-
-
will print out python. It won't highlight anything yet. If no specific
-lexer is known for that filename, text is printed.
-
-
-
Getting help
-
The -L option lists lexers, formatters, along with their short
-names and supported file name extensions, styles and filters. If you want to see
-only one category, give it as an argument:
-
-$ pygmentize -L filters
-
-
will list only all installed filters.
-
The -H option will give you detailed information (the same that can be found
-in this documentation) about a lexer, formatter or filter. Usage is as follows:
-
-$ pygmentize -H formatter html
-
-
will print the help for the HTML formatter, while
-
-$ pygmentize -H lexer python
-
-
will print the help for the Python lexer, etc.
-
-
-
A note on encodings
-
New in Pygments 0.9.
-
Pygments tries to be smart regarding encodings in the formatting process:
-
-
If you give an encoding option, it will be used as the input and
-output encoding.
-
If you give an outencoding option, it will override encoding
-as the output encoding.
-
If you don't give an encoding and have given an output file, the default
-encoding for lexer and formatter is latin1 (which will pass through
-all non-ASCII characters).
-
If you don't give an encoding and haven't given an output file (that means
-output is written to the console), the default encoding for lexer and
-formatter is the terminal encoding (sys.stdout.encoding).
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/vendor/pygments/docs/build/filterdevelopment.html b/vendor/pygments/docs/build/filterdevelopment.html
deleted file mode 100644
index 25ffa59..0000000
--- a/vendor/pygments/docs/build/filterdevelopment.html
+++ /dev/null
@@ -1,282 +0,0 @@
-
-
-
- Write your own filter — Pygments
-
-
-
-
-
Writing own filters is very easy. All you have to do is to subclass
-the Filter class and override the filter method. Additionally a
-filter is instanciated with some keyword arguments you can use to
-adjust the behavior of your filter.
-
-
Subclassing Filters
-
As an example, we write a filter that converts all Name.Function tokens
-to normal Name tokens to make the output less colorful.
Some notes on the lexer argument: that can be quite confusing since it doesn't
-need to be a lexer instance. If a filter was added by using the add_filter()
-function of lexers, that lexer is registered for the filter. In that case
-lexer will refer to the lexer that has registered the filter. It can be used
-to access options passed to a lexer. Because it could be None you always have
-to check for that case if you access it.
-
-
-
Using a decorator
-
You can also use the simplefilter decorator from the pygments.filter module:
You can filter token streams coming from lexers to improve or annotate the
-output. For example, you can highlight special words in comments, convert
-keywords to upper or lowercase to enforce a style guide etc.
-
To apply a filter, you can use the add_filter() method of a lexer:
-
>>> frompygments.lexersimportPythonLexer
->>> l=PythonLexer()
->>> # add a filter given by a string and options
->>> l.add_filter('codetagify',case='lower')
->>> l.filters
-[<pygments.filters.CodeTagFilter object at 0xb785decc>]
->>> frompygments.filtersimportKeywordCaseFilter
->>> # or give an instance
->>> l.add_filter(KeywordCaseFilter(case='lower'))
-
-
The add_filter() method takes keyword arguments which are forwarded to
-the constructor of the filter.
-
To get a list of all registered filters by name, you can use the
-get_all_filters() function from the pygments.filters module that returns an
-iterable for all known filters.
Raise an exception when the lexer generates an error token.
-
Options accepted:
-
-
excclass :Exception class
-
The exception class to raise.
-The default is pygments.filters.ErrorToken.
-
-
New in Pygments 0.8.
-
-
-
-
-
Name:
raiseonerror
-
-
-
-
-
VisibleWhitespaceFilter
-
-
Convert tabs, newlines and/or spaces to visible characters.
-
Options accepted:
-
-
spaces :string or bool
-
If this is a one-character string, spaces will be replaces by this string.
-If it is another true value, spaces will be replaced by · (unicode
-MIDDLE DOT). If it is a false value, spaces will not be replaced. The
-default is False.
-
tabs :string or bool
-
The same as for spaces, but the default replacement character is »
-(unicode RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK). The default value
-is False. Note: this will not work if the tabsize option for the
-lexer is nonzero, as tabs will already have been expanded then.
-
tabsize :int
-
If tabs are to be replaced by this filter (see the tabs option), this
-is the total number of characters that a tab should be expanded to.
-The default is 8.
-
newlines :string or bool
-
The same as for spaces, but the default replacement character is ¶
-(unicode PILCROW SIGN). The default value is False.
-
wstokentype :bool
-
If true, give whitespace the special Whitespace token type. This allows
-styling the visible whitespace differently (e.g. greyed out), but it can
-disrupt background colors. The default is True.
-
-
New in Pygments 0.8.
-
-
-
-
-
Name:
whitespace
-
-
-
-
-
TokenMergeFilter
-
-
Merges consecutive tokens with the same token type in the output stream of a
-lexer.
-
New in Pygments 1.2.
-
-
-
-
-
Name:
tokenmerge
-
-
-
-
-
NameHighlightFilter
-
-
Highlight a normal Name token with a different token type.
This filter drops the first n characters off every line of code. This
-may be useful when the source code fed to the lexer is indented by a fixed
-amount of space that isn't desired in the output.
-
Options accepted:
-
-
n :int
-
The number of characters to gobble.
-
-
New in Pygments 1.2.
-
-
-
-
-
Name:
gobble
-
-
-
-
-
CodeTagFilter
-
-
Highlight special code tags in comments and docstrings.
-
Options accepted:
-
-
codetags :list of strings
-
A list of strings that are flagged as code tags. The default is to
-highlight XXX, TODO, BUG and NOTE.
-
-
-
-
-
-
Name:
codetagify
-
-
-
-
-
KeywordCaseFilter
-
-
Convert keywords to lowercase or uppercase or capitalize them, which
-means first letter uppercase, rest lowercase.
-
This can be useful e.g. if you highlight Pascal code and want to adapt the
-code to your styleguide.
-
Options accepted:
-
-
case :string
-
The casing to convert keywords to. Must be one of 'lower',
-'upper' or 'capitalize'. The default is 'lower'.
-
-
-
-
-
-
Name:
keywordcase
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/vendor/pygments/docs/build/formatterdevelopment.html b/vendor/pygments/docs/build/formatterdevelopment.html
deleted file mode 100644
index 1485dd1..0000000
--- a/vendor/pygments/docs/build/formatterdevelopment.html
+++ /dev/null
@@ -1,374 +0,0 @@
-
-
-
- Write your own formatter — Pygments
-
-
-
-
-
As well as creating your own lexer, writing a new
-formatter for Pygments is easy and straightforward.
-
A formatter is a class that is initialized with some keyword arguments (the
-formatter options) and that must provides a format() method.
-Additionally a formatter should provide a get_style_defs() method that
-returns the style definitions from the style in a form usable for the
-formatter's output format.
-
-
Quickstart
-
The most basic formatter shipped with Pygments is the NullFormatter. It just
-sends the value of a token to the output stream:
As you can see, the format() method is passed two parameters: tokensource
-and outfile. The first is an iterable of (token_type, value) tuples,
-the latter a file like object with a write() method.
-
Because the formatter is that basic it doesn't overwrite the get_style_defs()
-method.
-
-
-
Styles
-
Styles aren't instantiated but their metaclass provides some class functions
-so that you can access the style definitions easily.
-
Styles are iterable and yield tuples in the form (ttype, d) where ttype
-is a token and d is a dict with the following keys:
-
-
'color'
-
Hexadecimal color value (eg: 'ff0000' for red) or None if not
-defined.
-
'bold'
-
True if the value should be bold
-
'italic'
-
True if the value should be italic
-
'underline'
-
True if the value should be underlined
-
'bgcolor'
-
Hexadecimal color value for the background (eg: 'eeeeeee' for light
-gray) or None if not defined.
-
'border'
-
Hexadecimal color value for the border (eg: '0000aa' for a dark
-blue) or None for no border.
-
-
Additional keys might appear in the future, formatters should ignore all keys
-they don't support.
-
-
-
HTML 3.2 Formatter
-
For an more complex example, let's implement a HTML 3.2 Formatter. We don't
-use CSS but inline markup (<u>, <font>, etc). Because this isn't good
-style this formatter isn't in the standard library ;-)
-
frompygments.formatterimportFormatter
-
-classOldHtmlFormatter(Formatter):
-
- def__init__(self,**options):
- Formatter.__init__(self,**options)
-
- # create a dict of (start, end) tuples that wrap the
- # value of a token so that we can use it in the format
- # method later
- self.styles={}
-
- # we iterate over the `_styles` attribute of a style item
- # that contains the parsed style values.
- fortoken,styleinself.style:
- start=end=''
- # a style item is a tuple in the following form:
- # colors are readily specified in hex: 'RRGGBB'
- ifstyle['color']:
- start+='<font color="#%s">'%style['color']
- end='</font>'+end
- ifstyle['bold']:
- start+='<b>'
- end='</b>'+end
- ifstyle['italic']:
- start+='<i>'
- end='</i>'+end
- ifstyle['underline']:
- start+='<u>'
- end='</u>'+end
- self.styles[token]=(start,end)
-
- defformat(self,tokensource,outfile):
- # lastval is a string we use for caching
- # because it's possible that an lexer yields a number
- # of consecutive tokens with the same token type.
- # to minimize the size of the generated html markup we
- # try to join the values of same-type tokens here
- lastval=''
- lasttype=None
-
- # wrap the whole output with <pre>
- outfile.write('<pre>')
-
- forttype,valueintokensource:
- # if the token type doesn't exist in the stylemap
- # we try it with the parent of the token type
- # eg: parent of Token.Literal.String.Double is
- # Token.Literal.String
- whilettypenotinself.styles:
- ttype=ttype.parent
- ifttype==lasttype:
- # the current token type is the same of the last
- # iteration. cache it
- lastval+=value
- else:
- # not the same token as last iteration, but we
- # have some data in the buffer. wrap it with the
- # defined style and write it to the output file
- iflastval:
- stylebegin,styleend=self.styles[lasttype]
- outfile.write(stylebegin+lastval+styleend)
- # set lastval/lasttype to current values
- lastval=value
- lasttype=ttype
-
- # if something is left in the buffer, write it to the
- # output file, then close the opened <pre> tag
- iflastval:
- stylebegin,styleend=self.styles[lasttype]
- outfile.write(stylebegin+lastval+styleend)
- outfile.write('</pre>\n')
-
-
The comments should explain it. Again, this formatter doesn't override the
-get_style_defs() method. If we would have used CSS classes instead of
-inline HTML markup, we would need to generate the CSS first. For that
-purpose the get_style_defs() method exists:
-
-
-
Generating Style Definitions
-
Some formatters like the LatexFormatter and the HtmlFormatter don't
-output inline markup but reference either macros or css classes. Because
-the definitions of those are not part of the output, the get_style_defs()
-method exists. It is passed one parameter (if it's used and how it's used
-is up to the formatter) and has to return a string or None.
-
-
-
-
-
-
\ No newline at end of file
diff --git a/vendor/pygments/docs/build/formatters.html b/vendor/pygments/docs/build/formatters.html
deleted file mode 100644
index 02bfa5c..0000000
--- a/vendor/pygments/docs/build/formatters.html
+++ /dev/null
@@ -1,927 +0,0 @@
-
-
-
- Available formatters — Pygments
-
-
-
-
-
If given, must be an encoding name (such as "utf-8"). This will
-be used to convert the token strings (which are Unicode strings)
-to byte strings in the output (default: None).
-It will also be written in an encoding declaration suitable for the
-document format if the full option is given (e.g. a meta
-content-type directive in HTML or an invocation of the inputenc
-package in LaTeX).
-
If this is "" or None, Unicode strings will be written
-to the output file, which most file-like objects do not support.
-For example, pygments.highlight() will return a Unicode string if
-called with no outfile argument and a formatter that has encoding
-set to None because it uses a StringIO.StringIO object that
-supports Unicode arguments to write(). Using a regular file object
-wouldn't work.
-
-
outencoding
-
New in Pygments 0.7.
-
When using Pygments from the command line, any encoding option given is
-passed to the lexer and the formatter. This is sometimes not desirable,
-for example if you want to set the input encoding to "guess".
-Therefore, outencoding has been introduced which overrides encoding
-for the formatter if given.
-
-
-
-
-
Formatter classes
-
All these classes are importable from pygments.formatters.
-
-
BBCodeFormatter
-
-
Format tokens with BBcodes. These formatting codes are used by many
-bulletin boards, so you can highlight your sourcecode with pygments before
-posting it there.
-
This formatter has no support for background colors and borders, as there
-are no common BBcode tags for that.
-
Some board systems (e.g. phpBB) don't support colors in their [code] tag,
-so you can't use the highlighting together with that tag.
-Text in a [code] tag usually is shown with a monospace font (which this
-formatter can do with the monofont option) and no spaces (which you
-need for indentation) are removed.
-
Additional options accepted:
-
-
style
-
The style to use, can be a string or a Style subclass (default:
-'default').
-
codetag
-
If set to true, put the output into [code] tags (default:
-false)
-
monofont
-
If set to true, add a tag to show the code with a monospace font
-(default: false).
-
-
-
-
-
-
Short names:
bbcode, bb
-
-
Filename patterns:
None
-
-
-
-
-
-
-
BmpImageFormatter
-
-
Create a bitmap image from source code. This uses the Python Imaging Library to
-generate a pixmap from the source code.
-
New in Pygments 1.0. (You could create bitmap images before by passing a
-suitable image_format option to the ImageFormatter.)
-
-
-
-
-
Short names:
bmp, bitmap
-
-
Filename patterns:
*.bmp
-
-
-
-
-
-
-
GifImageFormatter
-
-
Create a GIF image from source code. This uses the Python Imaging Library to
-generate a pixmap from the source code.
-
New in Pygments 1.0. (You could create GIF images before by passing a
-suitable image_format option to the ImageFormatter.)
-
-
-
-
-
Short names:
gif
-
-
Filename patterns:
*.gif
-
-
-
-
-
-
-
HtmlFormatter
-
-
Format tokens as HTML 4 <span> tags within a <pre> tag, wrapped
-in a <div> tag. The <div>'s CSS class can be set by the cssclass
-option.
-
If the linenos option is set to "table", the <pre> is
-additionally wrapped inside a <table> which has one row and two
-cells: one containing the line numbers and one containing the code.
-Example:
A list of lines can be specified using the hl_lines option to make these
-lines highlighted (as of Pygments 0.11).
-
With the full option, a complete HTML 4 document is output, including
-the style definitions inside a <style> tag, or in a separate file if
-the cssfile option is given.
-
When tagsfile is set to the path of a ctags index file, it is used to
-generate hyperlinks from names to their definition. You must enable
-anchorlines and run ctags with the -n option for this to work. The
-python-ctags module from PyPI must be installed to use this feature;
-otherwise a RuntimeError will be raised.
-
The get_style_defs(arg='') method of a HtmlFormatter returns a string
-containing CSS rules for the CSS classes used by the formatter. The
-argument arg can be used to specify additional CSS selectors that
-are prepended to the classes. A call fmter.get_style_defs('td .code')
-would result in the following CSS classes:
If set to True, don't wrap the tokens at all, not even inside a <pre>
-tag. This disables most other options (default: False).
-
full
-
Tells the formatter to output a "full" document, i.e. a complete
-self-contained document (default: False).
-
title
-
If full is true, the title that should be used to caption the
-document (default: '').
-
style
-
The style to use, can be a string or a Style subclass (default:
-'default'). This option has no effect if the cssfile
-and noclobber_cssfile option are given and the file specified in
-cssfile exists.
-
noclasses
-
If set to true, token <span> tags will not use CSS classes, but
-inline styles. This is not recommended for larger pieces of code since
-it increases output size by quite a bit (default: False).
-
classprefix
-
Since the token types use relatively short class names, they may clash
-with some of your own class names. In this case you can use the
-classprefix option to give a string to prepend to all Pygments-generated
-CSS class names for token types.
-Note that this option also affects the output of get_style_defs().
-
cssclass
-
CSS class for the wrapping <div> tag (default: 'highlight').
-If you set this option, the default selector for get_style_defs()
-will be this class.
-
New in Pygments 0.9: If you select the 'table' line numbers, the
-wrapping table will have a CSS class of this string plus 'table',
-the default is accordingly 'highlighttable'.
-
-
cssstyles
-
Inline CSS styles for the wrapping <div> tag (default: '').
-
prestyles
-
Inline CSS styles for the <pre> tag (default: ''). New in
-Pygments 0.11.
-
cssfile
-
If the full option is true and this option is given, it must be the
-name of an external file. If the filename does not include an absolute
-path, the file's path will be assumed to be relative to the main output
-file's path, if the latter can be found. The stylesheet is then written
-to this file instead of the HTML file. New in Pygments 0.6.
-
noclobber_cssfile
-
If cssfile is given and the specified file exists, the css file will
-not be overwritten. This allows the use of the full option in
-combination with a user specified css file. Default is False.
-New in Pygments 1.1.
-
linenos
-
If set to 'table', output line numbers as a table with two cells,
-one containing the line numbers, the other the whole code. This is
-copy-and-paste-friendly, but may cause alignment problems with some
-browsers or fonts. If set to 'inline', the line numbers will be
-integrated in the <pre> tag that contains the code (that setting
-is new in Pygments 0.8).
-
For compatibility with Pygments 0.7 and earlier, every true value
-except 'inline' means the same as 'table' (in particular, that
-means also True).
-
The default value is False, which means no line numbers at all.
-
Note: with the default ("table") line number mechanism, the line
-numbers and code can have different line heights in Internet Explorer
-unless you give the enclosing <pre> tags an explicit line-height
-CSS property (you get the default line spacing with line-height:
-125%).
-
-
hl_lines
-
Specify a list of lines to be highlighted. New in Pygments 0.11.
-
linenostart
-
The line number for the first line (default: 1).
-
linenostep
-
If set to a number n > 1, only every nth line number is printed.
-
linenospecial
-
If set to a number n > 0, every nth line number is given the CSS
-class "special" (default: 0).
-
nobackground
-
If set to True, the formatter won't output the background color
-for the wrapping element (this automatically defaults to False
-when there is no wrapping element [eg: no argument for the
-get_syntax_defs method given]) (default: False). New in
-Pygments 0.6.
-
lineseparator
-
This string is output between lines of code. It defaults to "\n",
-which is enough to break a line inside <pre> tags, but you can
-e.g. set it to "<br>" to get HTML line breaks. New in Pygments
-0.7.
-
lineanchors
-
If set to a nonempty string, e.g. foo, the formatter will wrap each
-output line in an anchor tag with a name of foo-linenumber.
-This allows easy linking to certain lines. New in Pygments 0.9.
-
linespans
-
If set to a nonempty string, e.g. foo, the formatter will wrap each
-output line in a span tag with an id of foo-linenumber.
-This allows easy access to lines via javascript. New in Pygments 1.6.
-
anchorlinenos
-
If set to True, will wrap line numbers in <a> tags. Used in
-combination with linenos and lineanchors.
-
tagsfile
-
If set to the path of a ctags file, wrap names in anchor tags that
-link to their definitions. lineanchors should be used, and the
-tags file should specify line numbers (see the -n option to ctags).
-New in Pygments 1.6.
-
tagurlformat
-
A string formatting pattern used to generate links to ctags definitions.
-Avaliabe variable are %(path)s, %(fname)s and %(fext)s.
-Defaults to an empty string, resulting in just #prefix-number links.
-New in Pygments 1.6.
-
-
Subclassing the HTML formatter
-
New in Pygments 0.7.
-
The HTML formatter is now built in a way that allows easy subclassing, thus
-customizing the output HTML code. The format() method calls
-self._format_lines() which returns a generator that yields tuples of (1,
-line), where the 1 indicates that the line is a line of the
-formatted source code.
-
If the nowrap option is set, the generator is the iterated over and the
-resulting HTML is output.
-
Otherwise, format() calls self.wrap(), which wraps the generator with
-other generators. These may add some HTML code to the one generated by
-_format_lines(), either by modifying the lines generated by the latter,
-then yielding them again with (1, line), and/or by yielding other HTML
-code before or after the lines, with (0, html). The distinction between
-source lines and other code makes it possible to wrap the generator multiple
-times.
-
The default wrap() implementation adds a <div> and a <pre> tag.
-
A custom HtmlFormatter subclass could look like this:
-
classCodeHtmlFormatter(HtmlFormatter):
-
- defwrap(self,source,outfile):
- returnself._wrap_code(source)
-
- def_wrap_code(self,source):
- yield0,'<code>'
- fori,tinsource:
- ifi==1:
- # it's a line of formatted code
- t+='<br>'
- yieldi,t
- yield0,'</code>'
-
-
This results in wrapping the formatted lines with a <code> tag, where the
-source lines are broken using <br> tags.
-
After calling wrap(), the format() method also adds the "line numbers"
-and/or "full document" wrappers if the respective options are set. Then, all
-HTML yielded by the wrapped generator is output.
-
-
-
-
-
Short names:
html
-
-
Filename patterns:
*.html, *.htm
-
-
-
-
-
-
-
ImageFormatter
-
-
Create a PNG image from source code. This uses the Python Imaging Library to
-generate a pixmap from the source code.
-
New in Pygments 0.10.
-
Additional options accepted:
-
-
image_format
-
An image format to output to that is recognised by PIL, these include:
-
-
"PNG" (default)
-
"JPEG"
-
"BMP"
-
"GIF"
-
-
-
line_pad
-
The extra spacing (in pixels) between each line of text.
-
Default: 2
-
-
font_name
-
The font name to be used as the base font from which others, such as
-bold and italic fonts will be generated. This really should be a
-monospace font to look sane.
-
Default: "Bitstream Vera Sans Mono"
-
-
font_size
-
The font size in points to be used.
-
Default: 14
-
-
image_pad
-
The padding, in pixels to be used at each edge of the resulting image.
-
Default: 10
-
-
line_numbers
-
Whether line numbers should be shown: True/False
-
Default: True
-
-
line_number_start
-
The line number of the first line.
-
Default: 1
-
-
line_number_step
-
The step used when printing line numbers.
-
Default: 1
-
-
line_number_bg
-
The background colour (in "#123456" format) of the line number bar, or
-None to use the style background color.
-
Default: "#eed"
-
-
line_number_fg
-
The text color of the line numbers (in "#123456"-like format).
-
Default: "#886"
-
-
line_number_chars
-
The number of columns of line numbers allowable in the line number
-margin.
-
Default: 2
-
-
line_number_bold
-
Whether line numbers will be bold: True/False
-
Default: False
-
-
line_number_italic
-
Whether line numbers will be italicized: True/False
-
Default: False
-
-
line_number_separator
-
Whether a line will be drawn between the line number area and the
-source code area: True/False
-
Default: True
-
-
line_number_pad
-
The horizontal padding (in pixels) between the line number margin, and
-the source code area.
-
Default: 6
-
-
hl_lines
-
Specify a list of lines to be highlighted. New in Pygments 1.2.
-
Default: empty list
-
-
hl_color
-
Specify the color for highlighting lines. New in Pygments 1.2.
-
Default: highlight color of the selected style
-
-
-
-
-
-
-
Short names:
img, IMG, png
-
-
Filename patterns:
*.png
-
-
-
-
-
-
-
JpgImageFormatter
-
-
Create a JPEG image from source code. This uses the Python Imaging Library to
-generate a pixmap from the source code.
-
New in Pygments 1.0. (You could create JPEG images before by passing a
-suitable image_format option to the ImageFormatter.)
-
-
-
-
-
Short names:
jpg, jpeg
-
-
Filename patterns:
*.jpg
-
-
-
-
-
-
-
LatexFormatter
-
-
Format tokens as LaTeX code. This needs the fancyvrb and color
-standard packages.
-
Without the full option, code is formatted as one Verbatim
-environment, like this:
The special command used here (\PY) and all the other macros it needs
-are output by the get_style_defs method.
-
With the full option, a complete LaTeX document is output, including
-the command definitions in the preamble.
-
The get_style_defs() method of a LatexFormatter returns a string
-containing \def commands defining the macros needed inside the
-Verbatim environments.
-
Additional options accepted:
-
-
style
-
The style to use, can be a string or a Style subclass (default:
-'default').
-
full
-
Tells the formatter to output a "full" document, i.e. a complete
-self-contained document (default: False).
-
title
-
If full is true, the title that should be used to caption the
-document (default: '').
-
docclass
-
If the full option is enabled, this is the document class to use
-(default: 'article').
-
preamble
-
If the full option is enabled, this can be further preamble commands,
-e.g. \usepackage (default: '').
-
linenos
-
If set to True, output line numbers (default: False).
-
linenostart
-
The line number for the first line (default: 1).
-
linenostep
-
If set to a number n > 1, only every nth line number is printed.
-
verboptions
-
Additional options given to the Verbatim environment (see the fancyvrb
-docs for possible values) (default: '').
-
commandprefix
-
The LaTeX commands used to produce colored output are constructed
-using this prefix and some letters (default: 'PY').
-New in Pygments 0.7.
-
New in Pygments 0.10: the default is now 'PY' instead of 'C'.
-
-
texcomments
-
If set to True, enables LaTeX comment lines. That is, LaTex markup
-in comment tokens is not escaped so that LaTeX can render it (default:
-False). New in Pygments 1.2.
-
mathescape
-
If set to True, enables LaTeX math mode escape in comments. That
-is, '$...$' inside a comment will trigger math mode (default:
-False). New in Pygments 1.2.
-
-
-
-
-
-
Short names:
latex, tex
-
-
Filename patterns:
*.tex
-
-
-
-
-
-
-
NullFormatter
-
-
Output the text unchanged without any formatting.
-
-
-
-
-
Short names:
text, null
-
-
Filename patterns:
*.txt
-
-
-
-
-
-
-
RawTokenFormatter
-
-
Format tokens as a raw representation for storing token streams.
-
The format is tokentype<TAB>repr(tokenstring)\n. The output can later
-be converted to a token stream with the RawTokenLexer, described in the
-lexer list.
-
Only two options are accepted:
-
-
compress
-
If set to 'gz' or 'bz2', compress the output with the given
-compression algorithm after encoding (default: '').
-
error_color
-
If set to a color name, highlight error tokens using that color. If
-set but with no value, defaults to 'red'.
-New in Pygments 0.11.
-
-
-
-
-
-
Short names:
raw, tokens
-
-
Filename patterns:
*.raw
-
-
-
-
-
-
-
RtfFormatter
-
-
Format tokens as RTF markup. This formatter automatically outputs full RTF
-documents with color information and other useful stuff. Perfect for Copy and
-Paste into Microsoft® Word® documents.
-
New in Pygments 0.6.
-
Additional options accepted:
-
-
style
-
The style to use, can be a string or a Style subclass (default:
-'default').
-
fontface
-
The used font famliy, for example Bitstream Vera Sans. Defaults to
-some generic font which is supposed to have fixed width.
-
-
-
-
-
-
Short names:
rtf
-
-
Filename patterns:
*.rtf
-
-
-
-
-
-
-
SvgFormatter
-
-
Format tokens as an SVG graphics file. This formatter is still experimental.
-Each line of code is a <text> element with explicit x and y
-coordinates containing <tspan> elements with the individual token styles.
-
By default, this formatter outputs a full SVG document including doctype
-declaration and the <svg> root element.
-
New in Pygments 0.9.
-
Additional options accepted:
-
-
nowrap
-
Don't wrap the SVG <text> elements in <svg><g> elements and
-don't add a XML declaration and a doctype. If true, the fontfamily
-and fontsize options are ignored. Defaults to False.
-
fontfamily
-
The value to give the wrapping <g> element's font-family
-attribute, defaults to "monospace".
-
fontsize
-
The value to give the wrapping <g> element's font-size
-attribute, defaults to "14px".
-
xoffset
-
Starting offset in X direction, defaults to 0.
-
yoffset
-
Starting offset in Y direction, defaults to the font size if it is given
-in pixels, or 20 else. (This is necessary since text coordinates
-refer to the text baseline, not the top edge.)
-
ystep
-
Offset to add to the Y coordinate for each subsequent line. This should
-roughly be the text size plus 5. It defaults to that value if the text
-size is given in pixels, or 25 else.
-
spacehack
-
Convert spaces in the source to  , which are non-breaking
-spaces. SVG provides the xml:space attribute to control how
-whitespace inside tags is handled, in theory, the preserve value
-could be used to keep all whitespace as-is. However, many current SVG
-viewers don't obey that rule, so this option is provided as a workaround
-and defaults to True.
-
-
-
-
-
-
Short names:
svg
-
-
Filename patterns:
*.svg
-
-
-
-
-
-
-
Terminal256Formatter
-
-
Format tokens with ANSI color sequences, for output in a 256-color
-terminal or console. Like in TerminalFormatter color sequences
-are terminated at newlines, so that paging the output works correctly.
-
The formatter takes colors from a style defined by the style option
-and converts them to nearest ANSI 256-color escape sequences. Bold and
-underline attributes from the style are preserved (and displayed).
-
New in Pygments 0.9.
-
Options accepted:
-
-
style
-
The style to use, can be a string or a Style subclass (default:
-'default').
-
-
-
-
-
-
Short names:
terminal256, console256, 256
-
-
Filename patterns:
None
-
-
-
-
-
-
-
TerminalFormatter
-
-
Format tokens with ANSI color sequences, for output in a text console.
-Color sequences are terminated at newlines, so that paging the output
-works correctly.
-
The get_style_defs() method doesn't do anything special since there is
-no support for common styles.
-
Options accepted:
-
-
bg
-
Set to "light" or "dark" depending on the terminal's background
-(default: "light").
-
colorscheme
-
A dictionary mapping token types to (lightbg, darkbg) color names or
-None (default: None = use builtin colorscheme).
-
-
-
-
-
-
Short names:
terminal, console
-
-
Filename patterns:
None
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/vendor/pygments/docs/build/index.html b/vendor/pygments/docs/build/index.html
deleted file mode 100644
index 6ab5fd9..0000000
--- a/vendor/pygments/docs/build/index.html
+++ /dev/null
@@ -1,261 +0,0 @@
-
-
-
- Overview — Pygments
-
-
-
-
-
Pygments requires at least Python 2.4 to work correctly. Just to clarify:
-there won't ever be support for Python versions below 2.4. However, there
-are no other dependencies.
-
-
Installing a released version
-
-
As a Python egg (via easy_install)
-
You can install the most recent Pygments version using easy_install:
-
-sudo easy_install Pygments
-
-
This will install a Pygments egg in your Python installation's site-packages
-directory.
-
-
-
From the tarball release
-
-
Download the most recent tarball from the download page
-
Unpack the tarball
-
sudo python setup.py install
-
-
Note that the last command will automatically download and install
-setuptools if you don't already have it installed. This requires a working
-internet connection.
-
This will install Pygments into your Python installation's site-packages directory.
Since Pygments 0.9, the distribution ships Markdown preprocessor sample code
-that uses Pygments to render source code in external/markdown-processor.py.
-You can copy and adapt it to your liking.
-
-
-
TextMate
-
Antonio Cangiano has created a Pygments bundle for TextMate that allows to
-colorize code via a simple menu option. It can be found here.
-
-
-
Bash completion
-
The source distribution contains a file external/pygments.bashcomp that
-sets up completion for the pygmentize command in bash.
PythonInterpreterinterpreter=newPythonInterpreter();
-
-// Set a variable with the content you want to work with
-interpreter.set("code",code);
-
-// Simple use Pygments as you would in Python
-interpreter.exec("from pygments import highlight\n"
- +"from pygments.lexers import PythonLexer\n"
- +"from pygments.formatters import HtmlFormatter\n"
- +"\nresult = highlight(code, PythonLexer(), HtmlFormatter())");
-
-// Get the result that has been set in a variable
-System.out.println(interpreter.get("result",String.class));
-
If a lexer for your favorite language is missing in the Pygments package, you can
-easily write your own and extend Pygments.
-
All you need can be found inside the pygments.lexer module. As you can read in
-the API documentation, a lexer is a class that is initialized with
-some keyword arguments (the lexer options) and that provides a
-get_tokens_unprocessed() method which is given a string or unicode object with
-the data to parse.
-
The get_tokens_unprocessed() method must return an iterator or iterable
-containing tuples in the form (index, token, value). Normally you don't need
-to do this since there are numerous base lexers you can subclass.
-
-
RegexLexer
-
A very powerful (but quite easy to use) lexer is the RegexLexer. This lexer
-base class allows you to define lexing rules in terms of regular expressions
-for different states.
-
States are groups of regular expressions that are matched against the input
-string at the current position. If one of these expressions matches, a
-corresponding action is performed (normally yielding a token with a specific
-type), the current position is set to where the last match ended and the
-matching process continues with the first regex of the current state.
-
Lexer states are kept in a state stack: each time a new state is entered, the
-new state is pushed onto the stack. The most basic lexers (like the
-DiffLexer) just need one state.
-
Each state is defined as a list of tuples in the form (regex, action,
-new_state) where the last item is optional. In the most basic form, action
-is a token type (like Name.Builtin). That means: When regex matches, emit a
-token with the match text and type tokentype and push new_state on the state
-stack. If the new state is '#pop', the topmost state is popped from the
-stack instead. (To pop more than one state, use '#pop:2' and so on.)
-'#push' is a synonym for pushing the current state on the
-stack.
-
The following example shows the DiffLexer from the builtin lexers. Note that
-it contains some additional attributes name, aliases and filenames which
-aren't required for a lexer. They are used by the builtin lexer lookup
-functions.
As you can see this lexer only uses one state. When the lexer starts scanning
-the text, it first checks if the current character is a space. If this is true
-it scans everything until newline and returns the parsed data as Text token.
-
If this rule doesn't match, it checks if the current char is a plus sign. And
-so on.
-
If no rule matches at the current position, the current char is emitted as an
-Error token that indicates a parsing error, and the position is increased by
-1.
-
-
-
Regex Flags
-
You can either define regex flags in the regex (r'(?x)foo bar') or by adding
-a flags attribute to your lexer class. If no attribute is defined, it defaults
-to re.MULTILINE. For more informations about regular expression flags see the
-regular expressions help page in the python documentation.
-
-
-
Scanning multiple tokens at once
-
Here is a more complex lexer that highlights INI files. INI files consist of
-sections, comments and key = value pairs:
The lexer first looks for whitespace, comments and section names. And later it
-looks for a line that looks like a key, value pair, separated by an '='
-sign, and optional whitespace.
-
The bygroups helper makes sure that each group is yielded with a different
-token type. First the Name.Attribute token, then a Text token for the
-optional whitespace, after that a Operator token for the equals sign. Then a
-Text token for the whitespace again. The rest of the line is returned as
-String.
-
Note that for this to work, every part of the match must be inside a capturing
-group (a (...)), and there must not be any nested capturing groups. If you
-nevertheless need a group, use a non-capturing group defined using this syntax:
-r'(?:some|words|here)' (note the ?: after the beginning parenthesis).
-
If you find yourself needing a capturing group inside the regex which
-shouldn't be part of the output but is used in the regular expressions for
-backreferencing (eg: r'(<(foo|bar)>)(.*?)(</\2>)'), you can pass None
-to the bygroups function and it will skip that group will be skipped in the
-output.
-
-
-
Changing states
-
Many lexers need multiple states to work as expected. For example, some
-languages allow multiline comments to be nested. Since this is a recursive
-pattern it's impossible to lex just using regular expressions.
This lexer starts lexing in the 'root' state. It tries to match as much as
-possible until it finds a slash ('/'). If the next character after the slash
-is a star ('*') the RegexLexer sends those two characters to the output
-stream marked as Comment.Multiline and continues parsing with the rules
-defined in the 'comment' state.
-
If there wasn't a star after the slash, the RegexLexer checks if it's a
-singleline comment (eg: followed by a second slash). If this also wasn't the
-case it must be a single slash (the separate regex for a single slash must also
-be given, else the slash would be marked as an error token).
-
Inside the 'comment' state, we do the same thing again. Scan until the lexer
-finds a star or slash. If it's the opening of a multiline comment, push the
-'comment' state on the stack and continue scanning, again in the
-'comment' state. Else, check if it's the end of the multiline comment. If
-yes, pop one state from the stack.
-
Note: If you pop from an empty stack you'll get an IndexError. (There is an
-easy way to prevent this from happening: don't '#pop' in the root state).
-
If the RegexLexer encounters a newline that is flagged as an error token, the
-stack is emptied and the lexer continues scanning in the 'root' state. This
-helps producing error-tolerant highlighting for erroneous input, e.g. when a
-single-line string is not closed.
-
-
-
Advanced state tricks
-
There are a few more things you can do with states:
-
-
You can push multiple states onto the stack if you give a tuple instead of a
-simple string as the third item in a rule tuple. For example, if you want to
-match a comment containing a directive, something like:
When this encounters the above sample, first 'comment' and 'directive'
-are pushed onto the stack, then the lexer continues in the directive state
-until it finds the closing >, then it continues in the comment state until
-the closing */. Then, both states are popped from the stack again and
-lexing continues in the root state.
-
New in Pygments 0.9: The tuple can contain the special '#push' and
-'#pop' (but not '#pop:n') directives.
-
-
You can include the rules of a state in the definition of another. This is
-done by using include from pygments.lexer:
This is a hypothetical lexer for a language that consist of functions and
-comments. Because comments can occur at toplevel and in functions, we need
-rules for comments in both states. As you can see, the include helper saves
-repeating rules that occur more than once (in this example, the state
-'comment' will never be entered by the lexer, as it's only there to be
-included in 'root' and 'function').
-
-
Sometimes, you may want to "combine" a state from existing ones. This is
-possible with the combine helper from pygments.lexer.
-
If you, instead of a new state, write combined('state1', 'state2') as the
-third item of a rule tuple, a new anonymous state will be formed from state1
-and state2 and if the rule matches, the lexer will enter this state.
-
This is not used very often, but can be helpful in some cases, such as the
-PythonLexer's string literal processing.
-
-
If you want your lexer to start lexing in a different state you can modify
-the stack by overloading the get_tokens_unprocessed() method:
Some lexers like the PhpLexer use this to make the leading <?php
-preprocessor comments optional. Note that you can crash the lexer easily
-by putting values into the stack that don't exist in the token map. Also
-removing 'root' from the stack can result in strange errors!
-
-
An empty regex at the end of a state list, combined with '#pop', can
-act as a return point from a state that doesn't have a clear end marker.
-
-
-
-
-
Using multiple lexers
-
Using multiple lexers for the same input can be tricky. One of the easiest
-combination techniques is shown here: You can replace the token type entry in a
-rule tuple (the second item) with a lexer class. The matched text will then be
-lexed with that lexer, and the resulting tokens will be yielded.
-
For example, look at this stripped-down HTML lexer:
Here the content of a <script> tag is passed to a newly created instance of
-a JavascriptLexer and not processed by the HtmlLexer. This is done using the
-using helper that takes the other lexer class as its parameter.
-
Note the combination of bygroups and using. This makes sure that the content
-up to the </script> end tag is processed by the JavascriptLexer, while the
-end tag is yielded as a normal token with the Name.Tag type.
-
As an additional goodie, if the lexer class is replaced by this (imported from
-pygments.lexer), the "other" lexer will be the current one (because you cannot
-refer to the current class within the code that runs at class definition time).
-
Also note the (r'<\s*script\s*', Name.Tag, ('script-content','tag')) rule.
-Here, two states are pushed onto the state stack, 'script-content' and
-'tag'. That means that first 'tag' is processed, which will parse
-attributes and the closing >, then the 'tag' state is popped and the
-next state on top of the stack will be 'script-content'.
-
The using() helper has a special keyword argument, state, which works as
-follows: if given, the lexer to use initially is not in the "root" state,
-but in the state given by this argument. This only works with a RegexLexer.
-
Any other keywords arguments passed to using() are added to the keyword
-arguments used to create the lexer.
-
-
-
Delegating Lexer
-
Another approach for nested lexers is the DelegatingLexer which is for
-example used for the template engine lexers. It takes two lexers as
-arguments on initialisation: a root_lexer and a language_lexer.
-
The input is processed as follows: First, the whole text is lexed with the
-language_lexer. All tokens yielded with a type of Other are then
-concatenated and given to the root_lexer. The language tokens of the
-language_lexer are then inserted into the root_lexer's token stream
-at the appropriate positions.
This procedure ensures that e.g. HTML with template tags in it is highlighted
-correctly even if the template tags are put into HTML tags or attributes.
-
If you want to change the needle token Other to something else, you can
-give the lexer another token type as the third parameter:
Sometimes the grammar of a language is so complex that a lexer would be unable
-to parse it just by using regular expressions and stacks.
-
For this, the RegexLexer allows callbacks to be given in rule tuples, instead
-of token types (bygroups and using are nothing else but preimplemented
-callbacks). The callback must be a function taking two arguments:
-
-
the lexer itself
-
the match object for the last matched rule
-
-
The callback must then return an iterable of (or simply yield) (index,
-tokentype, value) tuples, which are then just passed through by
-get_tokens_unprocessed(). The index here is the position of the token in
-the input string, tokentype is the normal token type (like Name.Builtin),
-and value the associated part of the input string.
If the regex for the headline_callback matches, the function is called with the
-match object. Note that after the callback is done, processing continues
-normally, that is, after the end of the previous match. The callback has no
-possibility to influence the position.
-
There are not really any simple examples for lexer callbacks, but you can see
-them in action e.g. in the compiled.py source code in the CLexer and
-JavaLexer classes.
-
-
-
The ExtendedRegexLexer class
-
The RegexLexer, even with callbacks, unfortunately isn't powerful enough for
-the funky syntax rules of some languages that will go unnamed, such as Ruby.
-
But fear not; even then you don't have to abandon the regular expression
-approach. For Pygments has a subclass of RegexLexer, the ExtendedRegexLexer.
-All features known from RegexLexers are available here too, and the tokens are
-specified in exactly the same way, except for one detail:
-
The get_tokens_unprocessed() method holds its internal state data not as local
-variables, but in an instance of the pygments.lexer.LexerContext class, and
-that instance is passed to callbacks as a third argument. This means that you
-can modify the lexer state in callbacks.
-
The LexerContext class has the following members:
-
-
text -- the input text
-
pos -- the current starting position that is used for matching regexes
-
stack -- a list containing the state stack
-
end -- the maximum position to which regexes are matched, this defaults to
-the length of text
-
-
Additionally, the get_tokens_unprocessed() method can be given a
-LexerContext instead of a string and will then process this context instead of
-creating a new one for the string argument.
-
Note that because you can set the current position to anything in the callback,
-it won't be automatically be set by the caller after the callback is finished.
-For example, this is how the hypothetical lexer above would be written with the
-ExtendedRegexLexer:
This might sound confusing (and it can really be). But it is needed, and for an
-example look at the Ruby lexer in agile.py.
-
-
-
Filtering Token Streams
-
Some languages ship a lot of builtin functions (for example PHP). The total
-amount of those functions differs from system to system because not everybody
-has every extension installed. In the case of PHP there are over 3000 builtin
-functions. That's an incredible huge amount of functions, much more than you
-can put into a regular expression.
-
But because only Name tokens can be function names it's solvable by overriding
-the get_tokens_unprocessed() method. The following lexer subclasses the
-PythonLexer so that it highlights some additional names as pseudo keywords:
This page lists all available builtin lexers and the options they take.
-
Currently, all lexers support these options:
-
-
stripnl
-
Strip leading and trailing newlines from the input (default: True)
-
stripall
-
Strip all leading and trailing whitespace from the input (default:
-False).
-
ensurenl
-
Make sure that the input ends with a newline (default: True). This
-is required for some lexers that consume input linewise.
-New in Pygments 1.3.
-
tabsize
-
If given and greater than 0, expand tabs in the input (default: 0).
-
encoding
-
New in Pygments 0.6.
-
If given, must be an encoding name (such as "utf-8"). This encoding
-will be used to convert the input string to Unicode (if it is not already
-a Unicode string). The default is "latin1".
-
If this option is set to "guess", a simple UTF-8 vs. Latin-1
-detection is used, if it is set to "chardet", the
-chardet library is used to
-guess the encoding of the input.
-
-
-
The "Short Names" field lists the identifiers that can be used with the
-get_lexer_by_name() function.
-
These lexers are builtin and can be imported from pygments.lexers:
Fancy is a self-hosted, pure object-oriented, dynamic,
-class-based, concurrent general-purpose programming language
-running on Rubinius, the Ruby VM.
-
New in Pygments 1.5.
-
-
-
-
-
Short names:
fancy, fy
-
-
Filename patterns:
*.fy, *.fancypack
-
-
Mimetypes:
text/x-fancysrc
-
-
-
-
-
IoLexer
-
-
For Io (a small, prototype-based
-programming language) source.
Determines which Unicode characters this lexer allows for identifiers.
-The possible values are:
-
-
none -- only the ASCII letters and numbers are allowed. This
-is the fastest selection.
-
basic -- all Unicode characters from the specification except
-category Lo are allowed.
-
full -- all Unicode characters as specified in the C# specs
-are allowed. Note that this means a considerable slowdown since the
-Lo category has more than 40,000 characters in it!
Determines which Unicode characters this lexer allows for identifiers.
-The possible values are:
-
-
none -- only the ASCII letters and numbers are allowed. This
-is the fastest selection.
-
basic -- all Unicode characters from the specification except
-category Lo are allowed.
-
full -- all Unicode characters as specified in the C# specs
-are allowed. Note that this means a considerable slowdown since the
-Lo category has more than 40,000 characters in it!
-
-
The default value is basic.
-
-
-
New in Pygments 1.5.
-
-
-
-
-
Short names:
nemerle
-
-
Filename patterns:
*.n
-
-
Mimetypes:
text/x-nemerle
-
-
-
-
-
VbNetAspxLexer
-
-
Lexer for highligting Visual Basic.net within ASP.NET pages.
For Literate Haskell (Bird-style or LaTeX) source.
-
Additional options accepted:
-
-
litstyle
-
If given, must be "bird" or "latex". If not given, the style
-is autodetected: if the first non-whitespace character in the source
-is a backslash or percent character, LaTeX is assumed, else Bird.
Lexer for Racket source code (formerly known as
-PLT Scheme).
-
New in Pygments 1.6.
-
-
-
-
-
Short names:
racket, rkt
-
-
Filename patterns:
*.rkt, *.rktl
-
-
Mimetypes:
text/x-racket, application/x-racket
-
-
-
-
-
SMLLexer
-
-
For the Standard ML language.
-
New in Pygments 1.5.
-
-
-
-
-
Short names:
sml
-
-
Filename patterns:
*.sml, *.sig, *.fun
-
-
Mimetypes:
text/x-standardml, application/x-standardml
-
-
-
-
-
SchemeLexer
-
-
A Scheme lexer, parsing a stream and outputting the tokens
-needed to highlight scheme code.
-This lexer could be most probably easily subclassed to parse
-other LISP-Dialects like Common Lisp, Emacs Lisp or AutoLisp.
-
This parser is checked with pastes from the LISP pastebin
-at http://paste.lisp.org/ to cover as much syntax as possible.
-
It supports the full Scheme syntax as defined in R5RS.
-
New in Pygments 0.6.
-
-
-
-
-
Short names:
scheme, scm
-
-
Filename patterns:
*.scm, *.ss
-
-
Mimetypes:
text/x-scheme, application/x-scheme
-
-
-
-
-
-
-
Lexers for hardware descriptor languages
-
SystemVerilogLexer
-
-
Extends verilog lexer to recognise all SystemVerilog keywords from IEEE
-1800-2009 standard.
-
New in Pygments 1.5.
-
-
-
-
-
Short names:
sv
-
-
Filename patterns:
*.sv, *.svh
-
-
Mimetypes:
text/x-systemverilog
-
-
-
-
-
VerilogLexer
-
-
For verilog source code with preprocessor directives.
Determines which Unicode characters this lexer allows for identifiers.
-The possible values are:
-
-
none -- only the ASCII letters and numbers are allowed. This
-is the fastest selection.
-
basic -- all Unicode characters from the specification except
-category Lo are allowed.
-
full -- all Unicode characters as specified in the C# specs
-are allowed. Note that this means a considerable slowdown since the
-Lo category has more than 40,000 characters in it!
A Python lexer recognizing Numerical Python builtins.
-
New in Pygments 0.10.
-
-
-
-
-
Short names:
numpy
-
-
Filename patterns:
None
-
-
Mimetypes:
None
-
-
-
-
-
OctaveLexer
-
-
For GNU Octave source code.
-
New in Pygments 1.5.
-
-
-
-
-
Short names:
octave
-
-
Filename patterns:
*.m
-
-
Mimetypes:
text/octave
-
-
-
-
-
RConsoleLexer
-
-
For R console transcripts or R CMD BATCH output files.
-
-
-
-
-
Short names:
rconsole, rout
-
-
Filename patterns:
*.Rout
-
-
Mimetypes:
None
-
-
-
-
-
RdLexer
-
-
Pygments Lexer for R documentation (Rd) files
-
This is a very minimal implementation, highlighting little more
-than the macros. A description of Rd syntax is found in Writing R
-Extensions
-and Parsing Rd files.
Recreate a token stream formatted with the RawTokenFormatter. This
-lexer raises exceptions during parsing if the token stream in the
-file is malformed.
-
Additional options accepted:
-
-
compress
-
If set to "gz" or "bz2", decompress the token stream with
-the given compression algorithm before lexing (default: "").
-
-
-
-
-
-
Short names:
raw
-
-
Filename patterns:
None
-
-
Mimetypes:
application/x-pygments-tokens
-
-
-
-
-
TextLexer
-
-
"Null" lexer, doesn't highlight anything.
-
-
-
-
-
Short names:
text
-
-
Filename patterns:
*.txt
-
-
Mimetypes:
text/plain
-
-
-
-
-
-
-
Lexers for various SQL dialects and related interactive sessions
-
MySqlLexer
-
-
Special lexer for MySQL.
-
-
-
-
-
Short names:
mysql
-
-
Filename patterns:
None
-
-
Mimetypes:
text/x-mysql
-
-
-
-
-
PlPgsqlLexer
-
-
Handle the extra syntax in Pl/pgSQL language.
-
New in Pygments 1.5.
-
-
-
-
-
Short names:
plpgsql
-
-
Filename patterns:
None
-
-
Mimetypes:
text/x-plpgsql
-
-
-
-
-
PostgresConsoleLexer
-
-
Lexer for psql sessions.
-
New in Pygments 1.5.
-
-
-
-
-
Short names:
psql, postgresql-console, postgres-console
-
-
Filename patterns:
None
-
-
Mimetypes:
text/x-postgresql-psql
-
-
-
-
-
PostgresLexer
-
-
Lexer for the PostgreSQL dialect of SQL.
-
New in Pygments 1.5.
-
-
-
-
-
Short names:
postgresql, postgres
-
-
Filename patterns:
None
-
-
Mimetypes:
text/x-postgresql
-
-
-
-
-
SqlLexer
-
-
Lexer for Structured Query Language. Currently, this lexer does
-not recognize any special syntax except ANSI SQL.
-
-
-
-
-
Short names:
sql
-
-
Filename patterns:
*.sql
-
-
Mimetypes:
text/x-sql
-
-
-
-
-
SqliteConsoleLexer
-
-
Lexer for example sessions using sqlite3.
-
New in Pygments 0.11.
-
-
-
-
-
Short names:
sqlite3
-
-
Filename patterns:
*.sqlite3-console
-
-
Mimetypes:
text/x-sqlite3-console
-
-
-
-
-
-
-
Lexers for various template engines' markup
-
CheetahHtmlLexer
-
-
Subclass of the CheetahLexer that highlights unlexer data
-with the HtmlLexer.
-
-
-
-
-
Short names:
html+cheetah, html+spitfire
-
-
Filename patterns:
None
-
-
Mimetypes:
text/html+cheetah, text/html+spitfire
-
-
-
-
-
CheetahJavascriptLexer
-
-
Subclass of the CheetahLexer that highlights unlexer data
-with the JavascriptLexer.
Generic cheetah templates lexer. Code that isn't Cheetah
-markup is yielded as Token.Other. This also works for
-spitfire templates which use the same syntax.
-
-
-
-
-
Short names:
cheetah, spitfire
-
-
Filename patterns:
*.tmpl, *.spt
-
-
Mimetypes:
application/x-cheetah, application/x-spitfire
-
-
-
-
-
CheetahXmlLexer
-
-
Subclass of the CheetahLexer that highlights unlexer data
-with the XmlLexer.
-
-
-
-
-
Short names:
xml+cheetah, xml+spitfire
-
-
Filename patterns:
None
-
-
Mimetypes:
application/xml+cheetah, application/xml+spitfire
-
-
-
-
-
ColdfusionHtmlLexer
-
-
Coldfusion markup in html
-
-
-
-
-
Short names:
cfm
-
-
Filename patterns:
*.cfm, *.cfml, *.cfc
-
-
Mimetypes:
application/x-coldfusion
-
-
-
-
-
ColdfusionLexer
-
-
Coldfusion statements
-
-
-
-
-
Short names:
cfs
-
-
Filename patterns:
None
-
-
Mimetypes:
None
-
-
-
-
-
CssDjangoLexer
-
-
Subclass of the DjangoLexer that highlights unlexed data with the
-CssLexer.
-
-
-
-
-
Short names:
css+django, css+jinja
-
-
Filename patterns:
None
-
-
Mimetypes:
text/css+django, text/css+jinja
-
-
-
-
-
CssErbLexer
-
-
Subclass of ErbLexer which highlights unlexed data with the CssLexer.
-
-
-
-
-
Short names:
css+erb, css+ruby
-
-
Filename patterns:
None
-
-
Mimetypes:
text/css+ruby
-
-
-
-
-
CssGenshiLexer
-
-
A lexer that highlights CSS definitions in genshi text templates.
-
-
-
-
-
Short names:
css+genshitext, css+genshi
-
-
Filename patterns:
None
-
-
Mimetypes:
text/css+genshi
-
-
-
-
-
CssPhpLexer
-
-
Subclass of PhpLexer which highlights unmatched data with the CssLexer.
-
-
-
-
-
Short names:
css+php
-
-
Filename patterns:
None
-
-
Mimetypes:
text/css+php
-
-
-
-
-
CssSmartyLexer
-
-
Subclass of the SmartyLexer that highlights unlexed data with the
-CssLexer.
DarcsPatchLexer is a lexer for the various versions of the darcs patch
-format. Examples of this format are derived by commands such as
-darcs annotate --patch and darcs send.
-
New in Pygments 0.10.
-
-
-
-
-
Short names:
dpatch
-
-
Filename patterns:
*.dpatch, *.darcspatch
-
-
Mimetypes:
None
-
-
-
-
-
DebianControlLexer
-
-
Lexer for Debian control files and apt-cache show <pkg> outputs.
-
New in Pygments 0.9.
-
-
-
-
-
Short names:
control
-
-
Filename patterns:
control
-
-
Mimetypes:
None
-
-
-
-
-
DiffLexer
-
-
Lexer for unified or context-style diffs or patches.
Highlight the contents of .. sourcecode:: langauge and
-.. code:: language directives with a lexer for the given
-language (default: True). New in Pygments 0.8.
For MXML markup.
-Nested AS3 in <script> tags is highlighted by the appropriate lexer.
-
New in Pygments 1.1.
-
-
-
-
-
Short names:
mxml
-
-
Filename patterns:
*.mxml
-
-
Mimetypes:
None
-
-
-
-
-
ObjectiveJLexer
-
-
For Objective-J source code with preprocessor directives.
-
New in Pygments 1.3.
-
-
-
-
-
Short names:
objective-j, objectivej, obj-j, objj
-
-
Filename patterns:
*.j
-
-
Mimetypes:
text/x-objective-j
-
-
-
-
-
PhpLexer
-
-
For PHP source code.
-For PHP embedded in HTML, use the HtmlPhpLexer.
-
Additional options accepted:
-
-
startinline
-
If given and True the lexer starts highlighting with
-php code (i.e.: no starting <?php required). The default
-is False.
-
funcnamehighlighting
-
If given and True, highlight builtin function names
-(default: True).
-
disabledmodules
-
If given, must be a list of module names whose function names
-should not be highlighted. By default all modules are highlighted
-except the special 'unknown' module that includes functions
-that are known to php but are undocumented.
-
To get a list of allowed modules have a look into the
-_phpbuiltins module:
From Pygments 0.7, the source distribution ships a Moin parser plugin that
-can be used to get Pygments highlighting in Moin wiki pages.
-
To use it, copy the file external/moin-parser.py from the Pygments
-distribution to the data/plugin/parser subdirectory of your Moin instance.
-Edit the options at the top of the file (currently ATTACHMENTS and
-INLINESTYLES) and rename the file to the name that the parser directive
-should have. For example, if you name the file code.py, you can get a
-highlighted Python code sample with this Wiki markup:
-
-{{{
-#!code python
-[...]
-}}}
-
-
where python is the Pygments name of the lexer to use.
-
Additionally, if you set the ATTACHMENTS option to True, Pygments will also
-be called for all attachments for whose filenames there is no other parser
-registered.
-
You are responsible for including CSS rules that will map the Pygments CSS
-classes to colors. You can output a stylesheet file with pygmentize, put it
-into the htdocs directory of your Moin instance and then include it in the
-stylesheets configuration option in the Moin config, e.g.:
If you want to extend Pygments without hacking the sources, but want to
-use the lexer/formatter/style/filter lookup functions (lexers.get_lexer_by_name
-et al.), you can use setuptools entrypoints to add new lexers, formatters
-or styles as if they were in the Pygments core.
-
That means you can use your highlighter modules with the pygmentize script,
-which relies on the mentioned functions.
-
-
Entrypoints
-
Here is a list of setuptools entrypoints that Pygments understands:
-
pygments.lexers
-
-
This entrypoint is used for adding new lexers to the Pygments core.
-The name of the entrypoint values doesn't really matter, Pygments extracts
-required metadata from the class definition:
Note that you have to define name, aliases and filename
-attributes so that you can use the highlighter from the command line:
-
classYourLexer(...):
- name='Name Of Your Lexer'
- aliases=['alias']
- filenames=['*.ext']
-
-
-
pygments.formatters
-
-
You can use this entrypoint to add new formatters to Pygments. The
-name of an entrypoint item is the name of the formatter. If you
-prefix the name with a slash it's used as a filename pattern:
This documentation doesn't explain how to use those entrypoints because this is
-covered in the setuptools documentation. That page should cover everything
-you need to write a plugin.
-
-
-
Extending The Core
-
If you have written a Pygments plugin that is open source, please inform us
-about that. There is a high chance that we'll add it to the Pygments
-distribution.
-
-
-
-
-
-
\ No newline at end of file
diff --git a/vendor/pygments/docs/build/quickstart.html b/vendor/pygments/docs/build/quickstart.html
deleted file mode 100644
index 234aa80..0000000
--- a/vendor/pygments/docs/build/quickstart.html
+++ /dev/null
@@ -1,390 +0,0 @@
-
-
-
- Introduction and Quickstart — Pygments
-
-
-
-
-
Welcome to Pygments! This document explains the basic concepts and terms and
-gives a few examples of how to use the library.
-
-
Architecture
-
There are four types of components that work together highlighting a piece of
-code:
-
-
A lexer splits the source into tokens, fragments of the source that
-have a token type that determines what the text represents semantically
-(e.g., keyword, string, or comment). There is a lexer for every language
-or markup format that Pygments supports.
-
The token stream can be piped through filters, which usually modify
-the token types or text fragments, e.g. uppercasing all keywords.
-
A formatter then takes the token stream and writes it to an output
-file, in a format such as HTML, LaTeX or RTF.
-
While writing the output, a style determines how to highlight all the
-different token types. It maps them to attributes like "red and bold".
-
-
-
-
Example
-
Here is a small example for highlighting Python code:
As you can see, Pygments uses CSS classes (by default, but you can change that)
-instead of inline styles in order to avoid outputting redundant style information over
-and over. A CSS stylesheet that contains all CSS classes possibly used in the output
-can be produced by:
The highlight() function supports a fourth argument called outfile, it must be
-a file object if given. The formatted output will then be written to this file
-instead of being returned as a string.
-
Lexers and formatters both support options. They are given to them as keyword
-arguments either to the class or to the lookup method:
This makes the lexer strip all leading and trailing whitespace from the input
-(stripall option), lets the formatter output line numbers (linenos option),
-and sets the wrapping <div>'s class to source (instead of
-highlight).
-
Important options include:
-
-
encoding :for lexers and formatters
-
Since Pygments uses Unicode strings internally, this determines which
-encoding will be used to convert to or from byte strings.
-
style :for formatters
-
The name of the style to use when writing the output.
-
-
For an overview of builtin lexers and formatters and their options, visit the
-lexer and formatters lists.
All these functions accept keyword arguments; they will be passed to the lexer
-as options.
-
A similar API is available for formatters: use get_formatter_by_name() and
-get_formatter_for_filename() from the pygments.formatters module
-for this purpose.
-
-
-
Guessing lexers
-
If you don't know the content of the file, or you want to highlight a file
-whose extension is ambiguous, such as .html (which could contain plain HTML
-or some template tags), use these functions:
guess_lexer() passes the given content to the lexer classes' analyse_text()
-method and returns the one for which it returns the highest number.
-
All lexers have two different filename pattern lists: the primary and the
-secondary one. The get_lexer_for_filename() function only uses the primary
-list, whose entries are supposed to be unique among all lexers.
-guess_lexer_for_filename(), however, will first loop through all lexers and
-look at the primary and secondary filename patterns if the filename matches.
-If only one lexer matches, it is returned, else the guessing mechanism of
-guess_lexer() is used with the matching lexers.
-
As usual, keyword arguments to these functions are given to the created lexer
-as options.
-
-
-
Command line usage
-
You can use Pygments from the command line, using the pygmentize script:
-
-$ pygmentize test.py
-
-
will highlight the Python file test.py using ANSI escape sequences
-(a.k.a. terminal colors) and print the result to standard output.
-
To output HTML, use the -f option:
-
-$ pygmentize -f html -o test.html test.py
-
-
to write an HTML-highlighted version of test.py to the file test.html.
-Note that it will only be a snippet of HTML, if you want a full HTML document,
-use the "full" option:
-
-$ pygmentize -f html -O full -o test.html test.py
-
-
This will produce a full HTML document with included stylesheet.
-
A style can be selected with -Ostyle=<name>.
-
If you need a stylesheet for an existing HTML file using Pygments CSS classes,
-it can be created with:
Many Python people use ReST for documentation their sourcecode, programs,
-scripts et cetera. This also means that documentation often includes sourcecode
-samples or snippets.
-
You can easily enable Pygments support for your ReST texts using a custom
-directive -- this is also how this documentation displays source code.
-
From Pygments 0.9, the directive is shipped in the distribution as
-external/rst-directive.py. You can copy and adapt this code to your liking.
-
-
-
-
-
-
\ No newline at end of file
diff --git a/vendor/pygments/docs/build/styles.html b/vendor/pygments/docs/build/styles.html
deleted file mode 100644
index 0a897de..0000000
--- a/vendor/pygments/docs/build/styles.html
+++ /dev/null
@@ -1,341 +0,0 @@
-
-
-
- Styles — Pygments
-
-
-
-
-
That's it. There are just a few rules. When you define a style for Name
-the style automatically also affects Name.Function and so on. If you
-defined 'bold' and you don't want boldface for a subtoken use 'nobold'.
-
(Philosophy: the styles aren't written in CSS syntax since this way
-they can be used for a variety of formatters.)
-
default_style is the style inherited by all token types.
or drop it into the styles subpackage of your Pygments distribution one style
-class per style, where the file name is the style name and the class name is
-StylenameClass. For example, if your style should be called
-"mondrian", name the class MondrianStyle, put it into the file
-mondrian.py and this file into the pygments.styles subpackage
-directory.
-
-
-
-
Style Rules
-
Here a small overview of all allowed styles:
-
-
bold
-
render text as bold
-
nobold
-
don't render text as bold (to prevent subtokens being highlighted bold)
-
italic
-
render text italic
-
noitalic
-
don't render text as italic
-
underline
-
render text underlined
-
nounderline
-
don't render text underlined
-
bg:
-
transparent background
-
bg:#000000
-
background color (black)
-
border:
-
no border
-
border:#ffffff
-
border color (white)
-
#ff0000
-
text color (red)
-
noinherit
-
don't inherit styles from supertoken
-
-
Note that there may not be a space between bg: and the color value
-since the style definition string is split at whitespace.
-Also, using named colors is not allowed since the supported color names
-vary for different formatters.
-
Furthermore, not all lexers might support every style.
-
-
-
Builtin Styles
-
Pygments ships some builtin styles which are maintained by the Pygments team.
-
To get a list of known styles you can use this snippet:
In principle, you can create an unlimited number of token types but nobody can
-guarantee that a style would define style rules for a token type. Because of
-that, Pygments proposes some global token types defined in the
-pygments.token.STANDARD_TYPES dict.
Inside the pygments.token module the following aliases are defined:
-
-
-
-
-
-
-
-
Text
-
Token.Text
-
for any type of text data
-
-
Whitespace
-
Token.Text.Whitespace
-
for specially highlighted whitespace
-
-
Error
-
Token.Error
-
represents lexer errors
-
-
Other
-
Token.Other
-
special token for data not
-matched by a parser (e.g. HTML
-markup in PHP code)
-
-
Keyword
-
Token.Keyword
-
any kind of keywords
-
-
Name
-
Token.Name
-
variable/function names
-
-
Literal
-
Token.Literal
-
Any literals
-
-
String
-
Token.Literal.String
-
string literals
-
-
Number
-
Token.Literal.Number
-
number literals
-
-
Operator
-
Token.Operator
-
operators (+, not...)
-
-
Punctuation
-
Token.Punctuation
-
punctuation ([, (...)
-
-
Comment
-
Token.Comment
-
any kind of comments
-
-
Generic
-
Token.Generic
-
generic tokens (have a look at
-the explanation below)
-
-
-
-
The Whitespace token type is new in Pygments 0.8. It is used only by the
-VisibleWhitespaceFilter currently.
-
Normally you just create token types using the already defined aliases. For each
-of those token aliases, a number of subtypes exists (excluding the special tokens
-Token.Text, Token.Error and Token.Other)
-
The is_token_subtype() function in the pygments.token module can be used to
-test if a token type is a subtype of another (such as Name.Tag and Name).
-(This is the same as Name.Tag in Name. The overloaded in operator was newly
-introduced in Pygments 0.7, the function still exists for backwards
-compatiblity.)
-
With Pygments 0.7, it's also possible to convert strings to token types (for example
-if you want to supply a token from the command line):
For any kind of keyword (especially if it doesn't match any of the
-subtypes of course).
-
Keyword.Constant
-
For keywords that are constants (e.g. None in future Python versions).
-
Keyword.Declaration
-
For keywords used for variable declaration (e.g. var in some programming
-languages like JavaScript).
-
Keyword.Namespace
-
For keywords used for namespace declarations (e.g. import in Python and
-Java and package in Java).
-
Keyword.Pseudo
-
For keywords that aren't really keywords (e.g. None in old Python
-versions).
-
Keyword.Reserved
-
For reserved keywords.
-
Keyword.Type
-
For builtin types that can't be used as identifiers (e.g. int,
-char etc. in C).
-
-
-
-
Name Tokens
-
-
Name
-
For any name (variable names, function names, classes).
-
Name.Attribute
-
For all attributes (e.g. in HTML tags).
-
Name.Builtin
-
Builtin names; names that are available in the global namespace.
-
Name.Builtin.Pseudo
-
Builtin names that are implicit (e.g. self in Ruby, this in Java).
-
Name.Class
-
Class names. Because no lexer can know if a name is a class or a function
-or something else this token is meant for class declarations.
-
Name.Constant
-
Token type for constants. In some languages you can recognise a token by the
-way it's defined (the value after a const keyword for example). In
-other languages constants are uppercase by definition (Ruby).
-
Name.Decorator
-
Token type for decorators. Decorators are synatic elements in the Python
-language. Similar syntax elements exist in C# and Java.
-
Name.Entity
-
Token type for special entities. (e.g. in HTML).
-
Name.Exception
-
Token type for exception names (e.g. RuntimeError in Python). Some languages
-define exceptions in the function signature (Java). You can highlight
-the name of that exception using this token then.
-
Name.Function
-
Token type for function names.
-
Name.Label
-
Token type for label names (e.g. in languages that support goto).
-
Name.Namespace
-
Token type for namespaces. (e.g. import paths in Java/Python), names following
-the module/namespace keyword in other languages.
-
Name.Other
-
Other names. Normally unused.
-
Name.Tag
-
Tag names (in HTML/XML markup or configuration files).
-
Name.Variable
-
Token type for variables. Some languages have prefixes for variable names
-(PHP, Ruby, Perl). You can highlight them using this token.
-
Name.Variable.Class
-
same as Name.Variable but for class variables (also static variables).
-
Name.Variable.Global
-
same as Name.Variable but for global variables (used in Ruby, for
-example).
-
Name.Variable.Instance
-
same as Name.Variable but for instance variables.
-
-
-
-
Literals
-
-
Literal
-
For any literal (if not further defined).
-
Literal.Date
-
for date literals (e.g. 42d in Boo).
-
String
-
For any string literal.
-
String.Backtick
-
Token type for strings enclosed in backticks.
-
String.Char
-
Token type for single characters (e.g. Java, C).
-
String.Doc
-
Token type for documentation strings (for example Python).
-
String.Double
-
Double quoted strings.
-
String.Escape
-
Token type for escape sequences in strings.
-
String.Heredoc
-
Token type for "heredoc" strings (e.g. in Ruby or Perl).
-
String.Interpol
-
Token type for interpolated parts in strings (e.g. #{foo} in Ruby).
-
String.Other
-
Token type for any other strings (for example %q{foo} string constructs
-in Ruby).
-
String.Regex
-
Token type for regular expression literals (e.g. /foo/ in JavaScript).
-
String.Single
-
Token type for single quoted strings.
-
String.Symbol
-
Token type for symbols (e.g. :foo in LISP or Ruby).
-
Number
-
Token type for any number literal.
-
Number.Float
-
Token type for float literals (e.g. 42.0).
-
Number.Hex
-
Token type for hexadecimal number literals (e.g. 0xdeadbeef).
-
Number.Integer
-
Token type for integer literals (e.g. 42).
-
Number.Integer.Long
-
Token type for long integer literals (e.g. 42L in Python).
-
Number.Oct
-
Token type for octal literals.
-
-
-
-
Operators
-
-
Operator
-
For any punctuation operator (e.g. +, -).
-
Operator.Word
-
For any operator that is a word (e.g. not).
-
-
-
-
Punctuation
-
New in Pygments 0.7.
-
-
Punctuation
-
For any punctuation which is not an operator (e.g. [, (...)
-
-
-
-
Comments
-
-
Comment
-
Token type for any comment.
-
Comment.Multiline
-
Token type for multiline comments.
-
Comment.Preproc
-
Token type for preprocessor comments (also <?php/<% constructs).
-
Comment.Single
-
Token type for comments that end at the end of a line (e.g. # foo).
-
Comment.Special
-
Special data in comments. For example code tags, author and license
-information, etc.
-
-
-
-
Generic Tokens
-
Generic tokens are for special lexers like the DiffLexer that doesn't really
-highlight a programming language but a patch file.
-
-
Generic
-
A generic, unstyled token. Normally you don't use this token type.
-
Generic.Deleted
-
Marks the token value as deleted.
-
Generic.Emph
-
Marks the token value as emphasized.
-
Generic.Error
-
Marks the token value as an error message.
-
Generic.Heading
-
Marks the token value as headline.
-
Generic.Inserted
-
Marks the token value as inserted.
-
Generic.Output
-
Marks the token value as program output (e.g. for python cli lexer).
-
Generic.Prompt
-
Marks the token value as command prompt (e.g. bash lexer).
-
Generic.Strong
-
Marks the token value as bold (e.g. for rst lexer).
-
Generic.Subheading
-
Marks the token value as subheadline.
-
Generic.Traceback
-
Marks the token value as a part of an error traceback.
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/vendor/pygments/docs/build/unicode.html b/vendor/pygments/docs/build/unicode.html
deleted file mode 100644
index ba7784a..0000000
--- a/vendor/pygments/docs/build/unicode.html
+++ /dev/null
@@ -1,249 +0,0 @@
-
-
-
- Unicode and Encodings — Pygments
-
-
-
-
-
Since Pygments 0.6, all lexers use unicode strings internally. Because of that
-you might encounter the occasional UnicodeDecodeError if you pass strings with the
-wrong encoding.
-
Per default all lexers have their input encoding set to latin1.
-If you pass a lexer a string object (not unicode), it tries to decode the data
-using this encoding.
-You can override the encoding using the encoding lexer option. If you have the
-chardet library installed and set the encoding to chardet if will ananlyse
-the text and use the encoding it thinks is the right one automatically:
The best way is to pass Pygments unicode objects. In that case you can't get
-unexpected output.
-
The formatters now send Unicode objects to the stream if you don't set the
-output encoding. You can do so by passing the formatters an encoding option:
You will have to set this option if you have non-ASCII characters in the
-source and the output stream does not accept Unicode written to it!
-This is the case for all regular files and for terminals.
-
Note: The Terminal formatter tries to be smart: if its output stream has an
-encoding attribute, and you haven't set the option, it will encode any
-Unicode string with this encoding before writing it. This is the case for
-sys.stdout, for example. The other formatters don't have that behavior.
-
Another note: If you call Pygments via the command line (pygmentize),
-encoding is handled differently, see the command line docs.
-
New in Pygments 0.7: the formatters now also accept an outencoding option
-which will override the encoding option if given. This makes it possible to
-use a single options dict with lexers and formatters, and still have different
-input and output encodings.
-
-
-
-
-
\ No newline at end of file
diff --git a/vendor/pygments/docs/generate.py b/vendor/pygments/docs/generate.py
deleted file mode 100755
index f540507..0000000
--- a/vendor/pygments/docs/generate.py
+++ /dev/null
@@ -1,472 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
- Generate Pygments Documentation
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
- Generates a bunch of html files containing the documentation.
-
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-import os
-import sys
-from datetime import datetime
-from cgi import escape
-
-from docutils import nodes
-from docutils.parsers.rst import directives
-from docutils.core import publish_parts
-from docutils.writers import html4css1
-
-from jinja2 import Template
-
-# try to use the right Pygments to build the docs
-sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
-
-from pygments import highlight, __version__
-from pygments.lexers import get_lexer_by_name
-from pygments.formatters import HtmlFormatter
-
-
-LEXERDOC = '''
-`%s`
-%s
- :Short names: %s
- :Filename patterns: %s
- :Mimetypes: %s
-
-'''
-
-def generate_lexer_docs():
- from pygments.lexers import LEXERS
-
- out = []
-
- modules = {}
- moduledocstrings = {}
- for classname, data in sorted(LEXERS.iteritems(), key=lambda x: x[0]):
- module = data[0]
- mod = __import__(module, None, None, [classname])
- cls = getattr(mod, classname)
- if not cls.__doc__:
- print "Warning: %s does not have a docstring." % classname
- modules.setdefault(module, []).append((
- classname,
- cls.__doc__,
- ', '.join(data[2]) or 'None',
- ', '.join(data[3]).replace('*', '\\*').replace('_', '\\') or 'None',
- ', '.join(data[4]) or 'None'))
- if module not in moduledocstrings:
- moduledocstrings[module] = mod.__doc__
-
- for module, lexers in sorted(modules.iteritems(), key=lambda x: x[0]):
- heading = moduledocstrings[module].splitlines()[4].strip().rstrip('.')
- out.append('\n' + heading + '\n' + '-'*len(heading) + '\n')
- for data in lexers:
- out.append(LEXERDOC % data)
- return ''.join(out).decode('utf-8')
-
-def generate_formatter_docs():
- from pygments.formatters import FORMATTERS
-
- out = []
- for cls, data in sorted(FORMATTERS.iteritems(),
- key=lambda x: x[0].__name__):
- heading = cls.__name__
- out.append('`' + heading + '`\n' + '-'*(2+len(heading)) + '\n')
- out.append(cls.__doc__)
- out.append('''
- :Short names: %s
- :Filename patterns: %s
-
-
-''' % (', '.join(data[1]) or 'None', ', '.join(data[2]).replace('*', '\\*') or 'None'))
- return ''.join(out).decode('utf-8')
-
-def generate_filter_docs():
- from pygments.filters import FILTERS
-
- out = []
- for name, cls in FILTERS.iteritems():
- out.append('''
-`%s`
-%s
- :Name: %s
-''' % (cls.__name__, cls.__doc__, name))
- return ''.join(out).decode('utf-8')
-
-def generate_changelog():
- fn = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
- 'CHANGES'))
- f = file(fn)
- result = []
- in_header = False
- header = True
- for line in f:
- if header:
- if not in_header and line.strip():
- in_header = True
- elif in_header and not line.strip():
- header = False
- else:
- result.append(line.rstrip())
- f.close()
- return '\n'.join(result).decode('utf-8')
-
-def generate_authors():
- fn = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
- 'AUTHORS'))
- f = file(fn)
- r = f.read().rstrip().decode('utf-8')
- f.close()
- return r
-
-LEXERDOCS = generate_lexer_docs()
-FORMATTERDOCS = generate_formatter_docs()
-FILTERDOCS = generate_filter_docs()
-CHANGELOG = generate_changelog()
-AUTHORS = generate_authors()
-
-
-PYGMENTS_FORMATTER = HtmlFormatter(style='pastie', cssclass='syntax')
-
-USAGE = '''\
-Usage: %s [ ...]
-
-Generate either python or html files out of the documentation.
-
-Mode can either be python or html.\
-''' % sys.argv[0]
-
-TEMPLATE = '''\
-
-
-
- {{ title }} — Pygments
-
-
-
-
-
-
Pygments
-
{{ title }}
- {% if file_id != "index" %}
- « Back To Index
- {% endif %}
- {% if toc %}
-
-
-
-\
-'''
-
-STYLESHEET = '''\
-body {
- background-color: #f2f2f2;
- margin: 0;
- padding: 0;
- font-family: 'Georgia', serif;
- color: #111;
-}
-
-#content {
- background-color: white;
- padding: 20px;
- margin: 20px auto 20px auto;
- max-width: 800px;
- border: 4px solid #ddd;
-}
-
-h1 {
- font-weight: normal;
- font-size: 40px;
- color: #09839A;
-}
-
-h2 {
- font-weight: normal;
- font-size: 30px;
- color: #C73F00;
-}
-
-h1.heading {
- margin: 0 0 30px 0;
-}
-
-h2.subheading {
- margin: -30px 0 0 45px;
-}
-
-h3 {
- margin-top: 30px;
-}
-
-table.docutils {
- border-collapse: collapse;
- border: 2px solid #aaa;
- margin: 0.5em 1.5em 0.5em 1.5em;
-}
-
-table.docutils td {
- padding: 2px;
- border: 1px solid #ddd;
-}
-
-p, li, dd, dt, blockquote {
- font-size: 15px;
- color: #333;
-}
-
-p {
- line-height: 150%;
- margin-bottom: 0;
- margin-top: 10px;
-}
-
-hr {
- border-top: 1px solid #ccc;
- border-bottom: 0;
- border-right: 0;
- border-left: 0;
- margin-bottom: 10px;
- margin-top: 20px;
-}
-
-dl {
- margin-left: 10px;
-}
-
-li, dt {
- margin-top: 5px;
-}
-
-dt {
- font-weight: bold;
-}
-
-th {
- text-align: left;
-}
-
-a {
- color: #990000;
-}
-
-a:hover {
- color: #c73f00;
-}
-
-pre {
- background-color: #f9f9f9;
- border-top: 1px solid #ccc;
- border-bottom: 1px solid #ccc;
- padding: 5px;
- font-size: 13px;
- font-family: Bitstream Vera Sans Mono,monospace;
-}
-
-tt {
- font-size: 13px;
- font-family: Bitstream Vera Sans Mono,monospace;
- color: black;
- padding: 1px 2px 1px 2px;
- background-color: #f0f0f0;
-}
-
-cite {
- /* abusing , it's generated by ReST for `x` */
- font-size: 13px;
- font-family: Bitstream Vera Sans Mono,monospace;
- font-weight: bold;
- font-style: normal;
-}
-
-#backlink {
- float: right;
- font-size: 11px;
- color: #888;
-}
-
-div.toc {
- margin: 0 0 10px 0;
-}
-
-div.toc h2 {
- font-size: 20px;
-}
-''' #'
-
-
-def pygments_directive(name, arguments, options, content, lineno,
- content_offset, block_text, state, state_machine):
- try:
- lexer = get_lexer_by_name(arguments[0])
- except ValueError:
- # no lexer found
- lexer = get_lexer_by_name('text')
- parsed = highlight(u'\n'.join(content), lexer, PYGMENTS_FORMATTER)
- return [nodes.raw('', parsed, format="html")]
-pygments_directive.arguments = (1, 0, 1)
-pygments_directive.content = 1
-directives.register_directive('sourcecode', pygments_directive)
-
-
-def create_translator(link_style):
- class Translator(html4css1.HTMLTranslator):
- def visit_reference(self, node):
- refuri = node.get('refuri')
- if refuri is not None and '/' not in refuri and refuri.endswith('.txt'):
- node['refuri'] = link_style(refuri[:-4])
- html4css1.HTMLTranslator.visit_reference(self, node)
- return Translator
-
-
-class DocumentationWriter(html4css1.Writer):
-
- def __init__(self, link_style):
- html4css1.Writer.__init__(self)
- self.translator_class = create_translator(link_style)
-
- def translate(self):
- html4css1.Writer.translate(self)
- # generate table of contents
- contents = self.build_contents(self.document)
- contents_doc = self.document.copy()
- contents_doc.children = contents
- contents_visitor = self.translator_class(contents_doc)
- contents_doc.walkabout(contents_visitor)
- self.parts['toc'] = self._generated_toc
-
- def build_contents(self, node, level=0):
- sections = []
- i = len(node) - 1
- while i >= 0 and isinstance(node[i], nodes.section):
- sections.append(node[i])
- i -= 1
- sections.reverse()
- toc = []
- for section in sections:
- try:
- reference = nodes.reference('', '', refid=section['ids'][0], *section[0])
- except IndexError:
- continue
- ref_id = reference['refid']
- text = escape(reference.astext())
- toc.append((ref_id, text))
-
- self._generated_toc = [('#%s' % href, caption) for href, caption in toc]
- # no further processing
- return []
-
-
-def generate_documentation(data, link_style):
- writer = DocumentationWriter(link_style)
- data = data.replace('[builtin_lexer_docs]', LEXERDOCS).\
- replace('[builtin_formatter_docs]', FORMATTERDOCS).\
- replace('[builtin_filter_docs]', FILTERDOCS).\
- replace('[changelog]', CHANGELOG).\
- replace('[authors]', AUTHORS)
- parts = publish_parts(
- data,
- writer=writer,
- settings_overrides={
- 'initial_header_level': 3,
- 'field_name_limit': 50,
- }
- )
- return {
- 'title': parts['title'],
- 'body': parts['body'],
- 'toc': parts['toc']
- }
-
-
-def handle_python(filename, fp, dst):
- now = datetime.now()
- title = os.path.basename(filename)[:-4]
- content = fp.read()
- def urlize(href):
- # create links for the pygments webpage
- if href == 'index.txt':
- return '/docs/'
- else:
- return '/docs/%s/' % href
- parts = generate_documentation(content, urlize)
- result = file(os.path.join(dst, title + '.py'), 'w')
- result.write('# -*- coding: utf-8 -*-\n')
- result.write('"""\n Pygments Documentation - %s\n' % title)
- result.write(' %s\n\n' % ('~' * (24 + len(title))))
- result.write(' Generated on: %s\n"""\n\n' % now)
- result.write('import datetime\n')
- result.write('DATE = %r\n' % now)
- result.write('TITLE = %r\n' % parts['title'])
- result.write('TOC = %r\n' % parts['toc'])
- result.write('BODY = %r\n' % parts['body'])
- result.close()
-
-
-def handle_html(filename, fp, dst):
- now = datetime.now()
- title = os.path.basename(filename)[:-4]
- content = fp.read().decode('utf-8')
- c = generate_documentation(content, (lambda x: './%s.html' % x))
- result = file(os.path.join(dst, title + '.html'), 'w')
- c['style'] = STYLESHEET + PYGMENTS_FORMATTER.get_style_defs('.syntax')
- c['generation_date'] = now
- c['file_id'] = title
- t = Template(TEMPLATE)
- result.write(t.render(c).encode('utf-8'))
- result.close()
-
-
-def run(handle_file, dst, sources=()):
- path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'src'))
- if not sources:
- sources = [os.path.join(path, fn) for fn in os.listdir(path)]
- if not os.path.isdir(dst):
- os.makedirs(dst)
- print 'Making docs for Pygments %s in %s' % (__version__, dst)
- for fn in sources:
- if not os.path.isfile(fn):
- continue
- print 'Processing %s' % fn
- f = open(fn)
- try:
- handle_file(fn, f, dst)
- finally:
- f.close()
-
-
-def main(mode, dst='build/', *sources):
- try:
- handler = {
- 'html': handle_html,
- 'python': handle_python
- }[mode]
- except KeyError:
- print 'Error: unknown mode "%s"' % mode
- sys.exit(1)
- run(handler, os.path.realpath(dst), sources)
-
-
-if __name__ == '__main__':
- if len(sys.argv) == 1:
- print USAGE
- else:
- main(*sys.argv[1:])
diff --git a/vendor/pygments/docs/src/api.txt b/vendor/pygments/docs/src/api.txt
deleted file mode 100644
index 4276eea..0000000
--- a/vendor/pygments/docs/src/api.txt
+++ /dev/null
@@ -1,270 +0,0 @@
-.. -*- mode: rst -*-
-
-=====================
-The full Pygments API
-=====================
-
-This page describes the Pygments API.
-
-High-level API
-==============
-
-Functions from the `pygments` module:
-
-def `lex(code, lexer):`
- Lex `code` with the `lexer` (must be a `Lexer` instance)
- and return an iterable of tokens. Currently, this only calls
- `lexer.get_tokens()`.
-
-def `format(tokens, formatter, outfile=None):`
- Format a token stream (iterable of tokens) `tokens` with the
- `formatter` (must be a `Formatter` instance). The result is
- written to `outfile`, or if that is ``None``, returned as a
- string.
-
-def `highlight(code, lexer, formatter, outfile=None):`
- This is the most high-level highlighting function.
- It combines `lex` and `format` in one function.
-
-
-Functions from `pygments.lexers`:
-
-def `get_lexer_by_name(alias, **options):`
- Return an instance of a `Lexer` subclass that has `alias` in its
- aliases list. The lexer is given the `options` at its
- instantiation.
-
- Will raise `pygments.util.ClassNotFound` if no lexer with that alias is
- found.
-
-def `get_lexer_for_filename(fn, **options):`
- Return a `Lexer` subclass instance that has a filename pattern
- matching `fn`. The lexer is given the `options` at its
- instantiation.
-
- Will raise `pygments.util.ClassNotFound` if no lexer for that filename is
- found.
-
-def `get_lexer_for_mimetype(mime, **options):`
- Return a `Lexer` subclass instance that has `mime` in its mimetype
- list. The lexer is given the `options` at its instantiation.
-
- Will raise `pygments.util.ClassNotFound` if not lexer for that mimetype is
- found.
-
-def `guess_lexer(text, **options):`
- Return a `Lexer` subclass instance that's guessed from the text
- in `text`. For that, the `analyse_text()` method of every known
- lexer class is called with the text as argument, and the lexer
- which returned the highest value will be instantiated and returned.
-
- `pygments.util.ClassNotFound` is raised if no lexer thinks it can handle the
- content.
-
-def `guess_lexer_for_filename(filename, text, **options):`
- As `guess_lexer()`, but only lexers which have a pattern in `filenames`
- or `alias_filenames` that matches `filename` are taken into consideration.
-
- `pygments.util.ClassNotFound` is raised if no lexer thinks it can handle the
- content.
-
-def `get_all_lexers():`
- Return an iterable over all registered lexers, yielding tuples in the
- format::
-
- (longname, tuple of aliases, tuple of filename patterns, tuple of mimetypes)
-
- *New in Pygments 0.6.*
-
-
-Functions from `pygments.formatters`:
-
-def `get_formatter_by_name(alias, **options):`
- Return an instance of a `Formatter` subclass that has `alias` in its
- aliases list. The formatter is given the `options` at its
- instantiation.
-
- Will raise `pygments.util.ClassNotFound` if no formatter with that alias is
- found.
-
-def `get_formatter_for_filename(fn, **options):`
- Return a `Formatter` subclass instance that has a filename pattern
- matching `fn`. The formatter is given the `options` at its
- instantiation.
-
- Will raise `pygments.util.ClassNotFound` if no formatter for that filename
- is found.
-
-
-Functions from `pygments.styles`:
-
-def `get_style_by_name(name):`
- Return a style class by its short name. The names of the builtin styles
- are listed in `pygments.styles.STYLE_MAP`.
-
- Will raise `pygments.util.ClassNotFound` if no style of that name is found.
-
-def `get_all_styles():`
- Return an iterable over all registered styles, yielding their names.
-
- *New in Pygments 0.6.*
-
-
-Lexers
-======
-
-A lexer (derived from `pygments.lexer.Lexer`) has the following functions:
-
-def `__init__(self, **options):`
- The constructor. Takes a \*\*keywords dictionary of options.
- Every subclass must first process its own options and then call
- the `Lexer` constructor, since it processes the `stripnl`,
- `stripall` and `tabsize` options.
-
- An example looks like this:
-
- .. sourcecode:: python
-
- def __init__(self, **options):
- self.compress = options.get('compress', '')
- Lexer.__init__(self, **options)
-
- As these options must all be specifiable as strings (due to the
- command line usage), there are various utility functions
- available to help with that, see `Option processing`_.
-
-def `get_tokens(self, text):`
- This method is the basic interface of a lexer. It is called by
- the `highlight()` function. It must process the text and return an
- iterable of ``(tokentype, value)`` pairs from `text`.
-
- Normally, you don't need to override this method. The default
- implementation processes the `stripnl`, `stripall` and `tabsize`
- options and then yields all tokens from `get_tokens_unprocessed()`,
- with the ``index`` dropped.
-
-def `get_tokens_unprocessed(self, text):`
- This method should process the text and return an iterable of
- ``(index, tokentype, value)`` tuples where ``index`` is the starting
- position of the token within the input text.
-
- This method must be overridden by subclasses.
-
-def `analyse_text(text):`
- A static method which is called for lexer guessing. It should analyse
- the text and return a float in the range from ``0.0`` to ``1.0``.
- If it returns ``0.0``, the lexer will not be selected as the most
- probable one, if it returns ``1.0``, it will be selected immediately.
-
-For a list of known tokens have a look at the `Tokens`_ page.
-
-A lexer also can have the following attributes (in fact, they are mandatory
-except `alias_filenames`) that are used by the builtin lookup mechanism.
-
-`name`
- Full name for the lexer, in human-readable form.
-
-`aliases`
- A list of short, unique identifiers that can be used to lookup
- the lexer from a list, e.g. using `get_lexer_by_name()`.
-
-`filenames`
- A list of `fnmatch` patterns that match filenames which contain
- content for this lexer. The patterns in this list should be unique among
- all lexers.
-
-`alias_filenames`
- A list of `fnmatch` patterns that match filenames which may or may not
- contain content for this lexer. This list is used by the
- `guess_lexer_for_filename()` function, to determine which lexers are
- then included in guessing the correct one. That means that e.g. every
- lexer for HTML and a template language should include ``\*.html`` in
- this list.
-
-`mimetypes`
- A list of MIME types for content that can be lexed with this
- lexer.
-
-
-.. _Tokens: tokens.txt
-
-
-Formatters
-==========
-
-A formatter (derived from `pygments.formatter.Formatter`) has the following
-functions:
-
-def `__init__(self, **options):`
- As with lexers, this constructor processes options and then must call
- the base class `__init__`.
-
- The `Formatter` class recognizes the options `style`, `full` and
- `title`. It is up to the formatter class whether it uses them.
-
-def `get_style_defs(self, arg=''):`
- This method must return statements or declarations suitable to define
- the current style for subsequent highlighted text (e.g. CSS classes
- in the `HTMLFormatter`).
-
- The optional argument `arg` can be used to modify the generation and
- is formatter dependent (it is standardized because it can be given on
- the command line).
-
- This method is called by the ``-S`` `command-line option`_, the `arg`
- is then given by the ``-a`` option.
-
-def `format(self, tokensource, outfile):`
- This method must format the tokens from the `tokensource` iterable and
- write the formatted version to the file object `outfile`.
-
- Formatter options can control how exactly the tokens are converted.
-
-.. _command-line option: cmdline.txt
-
-A formatter must have the following attributes that are used by the
-builtin lookup mechanism. (*New in Pygments 0.7.*)
-
-`name`
- Full name for the formatter, in human-readable form.
-
-`aliases`
- A list of short, unique identifiers that can be used to lookup
- the formatter from a list, e.g. using `get_formatter_by_name()`.
-
-`filenames`
- A list of `fnmatch` patterns that match filenames for which this formatter
- can produce output. The patterns in this list should be unique among
- all formatters.
-
-
-Option processing
-=================
-
-The `pygments.util` module has some utility functions usable for option
-processing:
-
-class `OptionError`
- This exception will be raised by all option processing functions if
- the type or value of the argument is not correct.
-
-def `get_bool_opt(options, optname, default=None):`
- Interpret the key `optname` from the dictionary `options`
- as a boolean and return it. Return `default` if `optname`
- is not in `options`.
-
- The valid string values for ``True`` are ``1``, ``yes``,
- ``true`` and ``on``, the ones for ``False`` are ``0``,
- ``no``, ``false`` and ``off`` (matched case-insensitively).
-
-def `get_int_opt(options, optname, default=None):`
- As `get_bool_opt`, but interpret the value as an integer.
-
-def `get_list_opt(options, optname, default=None):`
- If the key `optname` from the dictionary `options` is a string,
- split it at whitespace and return it. If it is already a list
- or a tuple, it is returned as a list.
-
-def `get_choice_opt(options, optname, allowed, default=None):`
- If the key `optname` from the dictionary is not in the sequence
- `allowed`, raise an error, otherwise return it. *New in Pygments 0.8.*
diff --git a/vendor/pygments/docs/src/authors.txt b/vendor/pygments/docs/src/authors.txt
deleted file mode 100644
index c8c532a..0000000
--- a/vendor/pygments/docs/src/authors.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-=======
-Authors
-=======
-
-[authors]
diff --git a/vendor/pygments/docs/src/changelog.txt b/vendor/pygments/docs/src/changelog.txt
deleted file mode 100644
index 6caf0a3..0000000
--- a/vendor/pygments/docs/src/changelog.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-=========
-Changelog
-=========
-
-[changelog]
diff --git a/vendor/pygments/docs/src/index.txt b/vendor/pygments/docs/src/index.txt
deleted file mode 100644
index b1e099c..0000000
--- a/vendor/pygments/docs/src/index.txt
+++ /dev/null
@@ -1,69 +0,0 @@
-.. -*- mode: rst -*-
-
-========
-Overview
-========
-
-Welcome to the Pygments documentation.
-
-- Starting with Pygments
-
- - `Installation `_
-
- - `Introduction and Quickstart `_
-
- - `Command line interface `_
-
-- Builtin components
-
- - `Lexers `_
-
- - `Formatters `_
-
- - `Filters `_
-
- - `Styles `_
-
-- Reference
-
- - `Unicode and encodings `_
-
- - `Builtin tokens `_
-
- - `API documentation `_
-
-- Hacking for Pygments
-
- - `Write your own lexer `_
-
- - `Write your own formatter `_
-
- - `Write your own filter `_
-
- - `Register plugins `_
-
-- Hints and Tricks
-
- - `Using Pygments in ReST documents `_
-
- - `Using Pygments with MoinMoin `_
-
- - `Using Pygments in other contexts `_
-
-- About Pygments
-
- - `Changelog `_
-
- - `Authors `_
-
-
---------------
-
-If you find bugs or have suggestions for the documentation, please
-look `here`_ for info on how to contact the team.
-
-You can download an offline version of this documentation from the
-`download page`_.
-
-.. _here: http://pygments.org/contribute/
-.. _download page: http://pygments.org/download/
diff --git a/vendor/pygments/docs/src/installation.txt b/vendor/pygments/docs/src/installation.txt
deleted file mode 100644
index 17a9aad..0000000
--- a/vendor/pygments/docs/src/installation.txt
+++ /dev/null
@@ -1,71 +0,0 @@
-.. -*- mode: rst -*-
-
-============
-Installation
-============
-
-Pygments requires at least Python 2.4 to work correctly. Just to clarify:
-there *won't* ever be support for Python versions below 2.4. However, there
-are no other dependencies.
-
-
-Installing a released version
-=============================
-
-As a Python egg (via easy_install)
-----------------------------------
-
-You can install the most recent Pygments version using `easy_install`_::
-
- sudo easy_install Pygments
-
-This will install a Pygments egg in your Python installation's site-packages
-directory.
-
-
-From the tarball release
--------------------------
-
-1. Download the most recent tarball from the `download page`_
-2. Unpack the tarball
-3. ``sudo python setup.py install``
-
-Note that the last command will automatically download and install
-`setuptools`_ if you don't already have it installed. This requires a working
-internet connection.
-
-This will install Pygments into your Python installation's site-packages directory.
-
-
-Installing the development version
-==================================
-
-If you want to play around with the code
-----------------------------------------
-
-1. Install `Mercurial`_
-2. ``hg clone http://bitbucket.org/birkenfeld/pygments-main pygments``
-3. ``cd pygments``
-4. ``ln -s pygments /usr/lib/python2.X/site-packages``
-5. ``ln -s pygmentize /usr/local/bin``
-
-As an alternative to steps 4 and 5 you can also do ``python setup.py develop``
-which will install the package via setuptools in development mode.
-
-..
- If you just want the latest features and use them
- -------------------------------------------------
-
- ::
-
- sudo easy_install Pygments==dev
-
- This will install a Pygments egg containing the latest Subversion trunk code
- in your Python installation's site-packages directory. Every time the command
- is run, the sources are updated from Subversion.
-
-
-.. _download page: http://pygments.org/download/
-.. _setuptools: http://peak.telecommunity.com/DevCenter/setuptools
-.. _easy_install: http://peak.telecommunity.com/DevCenter/EasyInstall
-.. _Mercurial: http://selenic.com/mercurial/
diff --git a/vendor/pygments/docs/src/lexerdevelopment.txt b/vendor/pygments/docs/src/lexerdevelopment.txt
deleted file mode 100644
index 6ffc4b7..0000000
--- a/vendor/pygments/docs/src/lexerdevelopment.txt
+++ /dev/null
@@ -1,551 +0,0 @@
-.. -*- mode: rst -*-
-
-====================
-Write your own lexer
-====================
-
-If a lexer for your favorite language is missing in the Pygments package, you can
-easily write your own and extend Pygments.
-
-All you need can be found inside the `pygments.lexer` module. As you can read in
-the `API documentation `_, a lexer is a class that is initialized with
-some keyword arguments (the lexer options) and that provides a
-`get_tokens_unprocessed()` method which is given a string or unicode object with
-the data to parse.
-
-The `get_tokens_unprocessed()` method must return an iterator or iterable
-containing tuples in the form ``(index, token, value)``. Normally you don't need
-to do this since there are numerous base lexers you can subclass.
-
-
-RegexLexer
-==========
-
-A very powerful (but quite easy to use) lexer is the `RegexLexer`. This lexer
-base class allows you to define lexing rules in terms of *regular expressions*
-for different *states*.
-
-States are groups of regular expressions that are matched against the input
-string at the *current position*. If one of these expressions matches, a
-corresponding action is performed (normally yielding a token with a specific
-type), the current position is set to where the last match ended and the
-matching process continues with the first regex of the current state.
-
-Lexer states are kept in a state stack: each time a new state is entered, the
-new state is pushed onto the stack. The most basic lexers (like the
-`DiffLexer`) just need one state.
-
-Each state is defined as a list of tuples in the form (`regex`, `action`,
-`new_state`) where the last item is optional. In the most basic form, `action`
-is a token type (like `Name.Builtin`). That means: When `regex` matches, emit a
-token with the match text and type `tokentype` and push `new_state` on the state
-stack. If the new state is ``'#pop'``, the topmost state is popped from the
-stack instead. (To pop more than one state, use ``'#pop:2'`` and so on.)
-``'#push'`` is a synonym for pushing the current state on the
-stack.
-
-The following example shows the `DiffLexer` from the builtin lexers. Note that
-it contains some additional attributes `name`, `aliases` and `filenames` which
-aren't required for a lexer. They are used by the builtin lexer lookup
-functions.
-
-.. sourcecode:: python
-
- from pygments.lexer import RegexLexer
- from pygments.token import *
-
- class DiffLexer(RegexLexer):
- name = 'Diff'
- aliases = ['diff']
- filenames = ['*.diff']
-
- tokens = {
- 'root': [
- (r' .*\n', Text),
- (r'\+.*\n', Generic.Inserted),
- (r'-.*\n', Generic.Deleted),
- (r'@.*\n', Generic.Subheading),
- (r'Index.*\n', Generic.Heading),
- (r'=.*\n', Generic.Heading),
- (r'.*\n', Text),
- ]
- }
-
-As you can see this lexer only uses one state. When the lexer starts scanning
-the text, it first checks if the current character is a space. If this is true
-it scans everything until newline and returns the parsed data as `Text` token.
-
-If this rule doesn't match, it checks if the current char is a plus sign. And
-so on.
-
-If no rule matches at the current position, the current char is emitted as an
-`Error` token that indicates a parsing error, and the position is increased by
-1.
-
-
-Regex Flags
-===========
-
-You can either define regex flags in the regex (``r'(?x)foo bar'``) or by adding
-a `flags` attribute to your lexer class. If no attribute is defined, it defaults
-to `re.MULTILINE`. For more informations about regular expression flags see the
-`regular expressions`_ help page in the python documentation.
-
-.. _regular expressions: http://docs.python.org/lib/re-syntax.html
-
-
-Scanning multiple tokens at once
-================================
-
-Here is a more complex lexer that highlights INI files. INI files consist of
-sections, comments and key = value pairs:
-
-.. sourcecode:: python
-
- from pygments.lexer import RegexLexer, bygroups
- from pygments.token import *
-
- class IniLexer(RegexLexer):
- name = 'INI'
- aliases = ['ini', 'cfg']
- filenames = ['*.ini', '*.cfg']
-
- tokens = {
- 'root': [
- (r'\s+', Text),
- (r';.*?$', Comment),
- (r'\[.*?\]$', Keyword),
- (r'(.*?)(\s*)(=)(\s*)(.*?)$',
- bygroups(Name.Attribute, Text, Operator, Text, String))
- ]
- }
-
-The lexer first looks for whitespace, comments and section names. And later it
-looks for a line that looks like a key, value pair, separated by an ``'='``
-sign, and optional whitespace.
-
-The `bygroups` helper makes sure that each group is yielded with a different
-token type. First the `Name.Attribute` token, then a `Text` token for the
-optional whitespace, after that a `Operator` token for the equals sign. Then a
-`Text` token for the whitespace again. The rest of the line is returned as
-`String`.
-
-Note that for this to work, every part of the match must be inside a capturing
-group (a ``(...)``), and there must not be any nested capturing groups. If you
-nevertheless need a group, use a non-capturing group defined using this syntax:
-``r'(?:some|words|here)'`` (note the ``?:`` after the beginning parenthesis).
-
-If you find yourself needing a capturing group inside the regex which
-shouldn't be part of the output but is used in the regular expressions for
-backreferencing (eg: ``r'(<(foo|bar)>)(.*?)(\2>)'``), you can pass `None`
-to the bygroups function and it will skip that group will be skipped in the
-output.
-
-
-Changing states
-===============
-
-Many lexers need multiple states to work as expected. For example, some
-languages allow multiline comments to be nested. Since this is a recursive
-pattern it's impossible to lex just using regular expressions.
-
-Here is the solution:
-
-.. sourcecode:: python
-
- from pygments.lexer import RegexLexer
- from pygments.token import *
-
- class ExampleLexer(RegexLexer):
- name = 'Example Lexer with states'
-
- tokens = {
- 'root': [
- (r'[^/]+', Text),
- (r'/\*', Comment.Multiline, 'comment'),
- (r'//.*?$', Comment.Singleline),
- (r'/', Text)
- ],
- 'comment': [
- (r'[^*/]', Comment.Multiline),
- (r'/\*', Comment.Multiline, '#push'),
- (r'\*/', Comment.Multiline, '#pop'),
- (r'[*/]', Comment.Multiline)
- ]
- }
-
-This lexer starts lexing in the ``'root'`` state. It tries to match as much as
-possible until it finds a slash (``'/'``). If the next character after the slash
-is a star (``'*'``) the `RegexLexer` sends those two characters to the output
-stream marked as `Comment.Multiline` and continues parsing with the rules
-defined in the ``'comment'`` state.
-
-If there wasn't a star after the slash, the `RegexLexer` checks if it's a
-singleline comment (eg: followed by a second slash). If this also wasn't the
-case it must be a single slash (the separate regex for a single slash must also
-be given, else the slash would be marked as an error token).
-
-Inside the ``'comment'`` state, we do the same thing again. Scan until the lexer
-finds a star or slash. If it's the opening of a multiline comment, push the
-``'comment'`` state on the stack and continue scanning, again in the
-``'comment'`` state. Else, check if it's the end of the multiline comment. If
-yes, pop one state from the stack.
-
-Note: If you pop from an empty stack you'll get an `IndexError`. (There is an
-easy way to prevent this from happening: don't ``'#pop'`` in the root state).
-
-If the `RegexLexer` encounters a newline that is flagged as an error token, the
-stack is emptied and the lexer continues scanning in the ``'root'`` state. This
-helps producing error-tolerant highlighting for erroneous input, e.g. when a
-single-line string is not closed.
-
-
-Advanced state tricks
-=====================
-
-There are a few more things you can do with states:
-
-- You can push multiple states onto the stack if you give a tuple instead of a
- simple string as the third item in a rule tuple. For example, if you want to
- match a comment containing a directive, something like::
-
- /* rest of comment */
-
- you can use this rule:
-
- .. sourcecode:: python
-
- tokens = {
- 'root': [
- (r'/\* <', Comment, ('comment', 'directive')),
- ...
- ],
- 'directive': [
- (r'[^>]*', Comment.Directive),
- (r'>', Comment, '#pop'),
- ],
- 'comment': [
- (r'[^*]+', Comment),
- (r'\*/', Comment, '#pop'),
- (r'\*', Comment),
- ]
- }
-
- When this encounters the above sample, first ``'comment'`` and ``'directive'``
- are pushed onto the stack, then the lexer continues in the directive state
- until it finds the closing ``>``, then it continues in the comment state until
- the closing ``*/``. Then, both states are popped from the stack again and
- lexing continues in the root state.
-
- *New in Pygments 0.9:* The tuple can contain the special ``'#push'`` and
- ``'#pop'`` (but not ``'#pop:n'``) directives.
-
-
-- You can include the rules of a state in the definition of another. This is
- done by using `include` from `pygments.lexer`:
-
- .. sourcecode:: python
-
- from pygments.lexer import RegexLexer, bygroups, include
- from pygments.token import *
-
- class ExampleLexer(RegexLexer):
- tokens = {
- 'comments': [
- (r'/\*.*?\*/', Comment),
- (r'//.*?\n', Comment),
- ],
- 'root': [
- include('comments'),
- (r'(function )(\w+)( {)',
- bygroups(Keyword, Name, Keyword), 'function'),
- (r'.', Text),
- ],
- 'function': [
- (r'[^}/]+', Text),
- include('comments'),
- (r'/', Text),
- (r'}', Keyword, '#pop'),
- ]
- }
-
- This is a hypothetical lexer for a language that consist of functions and
- comments. Because comments can occur at toplevel and in functions, we need
- rules for comments in both states. As you can see, the `include` helper saves
- repeating rules that occur more than once (in this example, the state
- ``'comment'`` will never be entered by the lexer, as it's only there to be
- included in ``'root'`` and ``'function'``).
-
-
-- Sometimes, you may want to "combine" a state from existing ones. This is
- possible with the `combine` helper from `pygments.lexer`.
-
- If you, instead of a new state, write ``combined('state1', 'state2')`` as the
- third item of a rule tuple, a new anonymous state will be formed from state1
- and state2 and if the rule matches, the lexer will enter this state.
-
- This is not used very often, but can be helpful in some cases, such as the
- `PythonLexer`'s string literal processing.
-
-- If you want your lexer to start lexing in a different state you can modify
- the stack by overloading the `get_tokens_unprocessed()` method:
-
- .. sourcecode:: python
-
- from pygments.lexer import RegexLexer
-
- class MyLexer(RegexLexer):
- tokens = {...}
-
- def get_tokens_unprocessed(self, text):
- stack = ['root', 'otherstate']
- for item in RegexLexer.get_tokens_unprocessed(text, stack):
- yield item
-
- Some lexers like the `PhpLexer` use this to make the leading ``', Name.Tag),
- ],
- 'script-content': [
- (r'(.+?)(<\s*/\s*script\s*>)',
- bygroups(using(JavascriptLexer), Name.Tag),
- '#pop'),
- ]
- }
-
-Here the content of a ```` end tag is processed by the `JavascriptLexer`, while the
-end tag is yielded as a normal token with the `Name.Tag` type.
-
-As an additional goodie, if the lexer class is replaced by `this` (imported from
-`pygments.lexer`), the "other" lexer will be the current one (because you cannot
-refer to the current class within the code that runs at class definition time).
-
-Also note the ``(r'<\s*script\s*', Name.Tag, ('script-content', 'tag'))`` rule.
-Here, two states are pushed onto the state stack, ``'script-content'`` and
-``'tag'``. That means that first ``'tag'`` is processed, which will parse
-attributes and the closing ``>``, then the ``'tag'`` state is popped and the
-next state on top of the stack will be ``'script-content'``.
-
-The `using()` helper has a special keyword argument, `state`, which works as
-follows: if given, the lexer to use initially is not in the ``"root"`` state,
-but in the state given by this argument. This *only* works with a `RegexLexer`.
-
-Any other keywords arguments passed to `using()` are added to the keyword
-arguments used to create the lexer.
-
-
-Delegating Lexer
-================
-
-Another approach for nested lexers is the `DelegatingLexer` which is for
-example used for the template engine lexers. It takes two lexers as
-arguments on initialisation: a `root_lexer` and a `language_lexer`.
-
-The input is processed as follows: First, the whole text is lexed with the
-`language_lexer`. All tokens yielded with a type of ``Other`` are then
-concatenated and given to the `root_lexer`. The language tokens of the
-`language_lexer` are then inserted into the `root_lexer`'s token stream
-at the appropriate positions.
-
-.. sourcecode:: python
-
- from pygments.lexer import DelegatingLexer
- from pygments.lexers.web import HtmlLexer, PhpLexer
-
- class HtmlPhpLexer(DelegatingLexer):
- def __init__(self, **options):
- super(HtmlPhpLexer, self).__init__(HtmlLexer, PhpLexer, **options)
-
-This procedure ensures that e.g. HTML with template tags in it is highlighted
-correctly even if the template tags are put into HTML tags or attributes.
-
-If you want to change the needle token ``Other`` to something else, you can
-give the lexer another token type as the third parameter:
-
-.. sourcecode:: python
-
- DelegatingLexer.__init__(MyLexer, OtherLexer, Text, **options)
-
-
-Callbacks
-=========
-
-Sometimes the grammar of a language is so complex that a lexer would be unable
-to parse it just by using regular expressions and stacks.
-
-For this, the `RegexLexer` allows callbacks to be given in rule tuples, instead
-of token types (`bygroups` and `using` are nothing else but preimplemented
-callbacks). The callback must be a function taking two arguments:
-
-* the lexer itself
-* the match object for the last matched rule
-
-The callback must then return an iterable of (or simply yield) ``(index,
-tokentype, value)`` tuples, which are then just passed through by
-`get_tokens_unprocessed()`. The ``index`` here is the position of the token in
-the input string, ``tokentype`` is the normal token type (like `Name.Builtin`),
-and ``value`` the associated part of the input string.
-
-You can see an example here:
-
-.. sourcecode:: python
-
- from pygments.lexer import RegexLexer
- from pygments.token import Generic
-
- class HypotheticLexer(RegexLexer):
-
- def headline_callback(lexer, match):
- equal_signs = match.group(1)
- text = match.group(2)
- yield match.start(), Generic.Headline, equal_signs + text + equal_signs
-
- tokens = {
- 'root': [
- (r'(=+)(.*?)(\1)', headline_callback)
- ]
- }
-
-If the regex for the `headline_callback` matches, the function is called with the
-match object. Note that after the callback is done, processing continues
-normally, that is, after the end of the previous match. The callback has no
-possibility to influence the position.
-
-There are not really any simple examples for lexer callbacks, but you can see
-them in action e.g. in the `compiled.py`_ source code in the `CLexer` and
-`JavaLexer` classes.
-
-.. _compiled.py: http://bitbucket.org/birkenfeld/pygments-main/src/tip/pygments/lexers/compiled.py
-
-
-The ExtendedRegexLexer class
-============================
-
-The `RegexLexer`, even with callbacks, unfortunately isn't powerful enough for
-the funky syntax rules of some languages that will go unnamed, such as Ruby.
-
-But fear not; even then you don't have to abandon the regular expression
-approach. For Pygments has a subclass of `RegexLexer`, the `ExtendedRegexLexer`.
-All features known from RegexLexers are available here too, and the tokens are
-specified in exactly the same way, *except* for one detail:
-
-The `get_tokens_unprocessed()` method holds its internal state data not as local
-variables, but in an instance of the `pygments.lexer.LexerContext` class, and
-that instance is passed to callbacks as a third argument. This means that you
-can modify the lexer state in callbacks.
-
-The `LexerContext` class has the following members:
-
-* `text` -- the input text
-* `pos` -- the current starting position that is used for matching regexes
-* `stack` -- a list containing the state stack
-* `end` -- the maximum position to which regexes are matched, this defaults to
- the length of `text`
-
-Additionally, the `get_tokens_unprocessed()` method can be given a
-`LexerContext` instead of a string and will then process this context instead of
-creating a new one for the string argument.
-
-Note that because you can set the current position to anything in the callback,
-it won't be automatically be set by the caller after the callback is finished.
-For example, this is how the hypothetical lexer above would be written with the
-`ExtendedRegexLexer`:
-
-.. sourcecode:: python
-
- from pygments.lexer import ExtendedRegexLexer
- from pygments.token import Generic
-
- class ExHypotheticLexer(ExtendedRegexLexer):
-
- def headline_callback(lexer, match, ctx):
- equal_signs = match.group(1)
- text = match.group(2)
- yield match.start(), Generic.Headline, equal_signs + text + equal_signs
- ctx.pos = match.end()
-
- tokens = {
- 'root': [
- (r'(=+)(.*?)(\1)', headline_callback)
- ]
- }
-
-This might sound confusing (and it can really be). But it is needed, and for an
-example look at the Ruby lexer in `agile.py`_.
-
-.. _agile.py: https://bitbucket.org/birkenfeld/pygments-main/src/tip/pygments/lexers/agile.py
-
-
-Filtering Token Streams
-=======================
-
-Some languages ship a lot of builtin functions (for example PHP). The total
-amount of those functions differs from system to system because not everybody
-has every extension installed. In the case of PHP there are over 3000 builtin
-functions. That's an incredible huge amount of functions, much more than you
-can put into a regular expression.
-
-But because only `Name` tokens can be function names it's solvable by overriding
-the ``get_tokens_unprocessed()`` method. The following lexer subclasses the
-`PythonLexer` so that it highlights some additional names as pseudo keywords:
-
-.. sourcecode:: python
-
- from pygments.lexers.agile import PythonLexer
- from pygments.token import Name, Keyword
-
- class MyPythonLexer(PythonLexer):
- EXTRA_KEYWORDS = ['foo', 'bar', 'foobar', 'barfoo', 'spam', 'eggs']
-
- def get_tokens_unprocessed(self, text):
- for index, token, value in PythonLexer.get_tokens_unprocessed(self, text):
- if token is Name and value in self.EXTRA_KEYWORDS:
- yield index, Keyword.Pseudo, value
- else:
- yield index, token, value
-
-The `PhpLexer` and `LuaLexer` use this method to resolve builtin functions.
-
-**Note** Do not confuse this with the `filter`_ system.
-
-.. _filter: filters.txt
diff --git a/vendor/pygments/external/autopygmentize b/vendor/pygments/external/autopygmentize
index 85c8dfd..d2f969a 100755
--- a/vendor/pygments/external/autopygmentize
+++ b/vendor/pygments/external/autopygmentize
@@ -1,64 +1,83 @@
-#!/bin/sh
+#!/bin/bash
# Best effort auto-pygmentization with transparent decompression
-# (c) Reuben Thomas 2012-2013
+# by Reuben Thomas 2008-2015
# This program is in the public domain.
# Strategy: first see if pygmentize can find a lexer; if not, ask file; if that finds nothing, fail
-# Set the environment variable PYGMENTIZE_OPTS to configure pygments.
+# Set the environment variable PYGMENTIZE_OPTS or pass options before the file path to configure pygments.
# This program can be used as a .lessfilter for the less pager to auto-color less's output
-lexer=`pygmentize -N "$1"`
-if [ "$lexer" = "text" ]; then
- file_common_opts="--brief --dereference --uncompress"
+file="${!#}" # last argument
+options=${@:1:$(($#-1))} # handle others args as options to pass to pygmentize
- unset lexer
- case `file --mime-type $file_common_opts "$1"` in
- application/xml|image/svg+xml) lexer=xml;;
- text/html) lexer=html;;
- text/troff) lexer=nroff;;
- text/x-asm) lexer=nasm;;
- text/x-awk) lexer=awk;;
- text/x-c) lexer=c;;
- text/x-c++) lexer=cpp;;
- text/x-diff) lexer=diff;;
- text/x-fortran) lexer=fortran;;
- text/x-gawk) lexer=gawk;;
- text/x-java) lexer=java;;
- text/x-lisp) lexer=common-lisp;;
- text/x-lua) lexer=lua;;
- text/x-makefile) lexer=make;;
- text/x-msdos-batch) lexer=bat;;
- text/x-nawk) lexer=nawk;;
- text/x-pascal) lexer=pascal;;
- text/x-perl) lexer=perl;;
- text/x-php) lexer=php;;
- text/x-po) lexer=po;;
- text/x-python) lexer=python;;
- text/x-ruby) lexer=ruby;;
- text/x-shellscript) lexer=sh;;
- text/x-tcl) lexer=tcl;;
- text/x-tex|text/x-texinfo) lexer=latex;; # FIXME: texinfo really needs its own lexer
-
- # Types that file outputs which pygmentize didn't support as of file 5.11, pygments 1.6rc1
- # text/calendar
- # text/PGP
- # text/rtf
- # text/texmacs
- # text/x-bcpl
- # text/x-info
- # text/x-m4
- # text/x-vcard
- # text/x-xmcd
- esac
+file_common_opts="--brief --dereference"
+
+lexer=$(pygmentize -N "$file")
+if [[ "$lexer" == text ]]; then
+ unset lexer
+ case $(file --mime-type --uncompress $file_common_opts "$file") in
+ application/xml|image/svg+xml) lexer=xml;;
+ application/javascript) lexer=javascript;;
+ text/html) lexer=html;;
+ text/troff) lexer=nroff;;
+ text/x-asm) lexer=nasm;;
+ text/x-awk) lexer=awk;;
+ text/x-c) lexer=c;;
+ text/x-c++) lexer=cpp;;
+ text/x-diff) lexer=diff;;
+ text/x-fortran) lexer=fortran;;
+ text/x-gawk) lexer=gawk;;
+ text/x-java) lexer=java;;
+ text/x-lisp) lexer=common-lisp;;
+ text/x-lua) lexer=lua;;
+ text/x-makefile) lexer=make;;
+ text/x-msdos-batch) lexer=bat;;
+ text/x-nawk) lexer=nawk;;
+ text/x-pascal) lexer=pascal;;
+ text/x-perl) lexer=perl;;
+ text/x-php) lexer=php;;
+ text/x-po) lexer=po;;
+ text/x-python) lexer=python;;
+ text/x-ruby) lexer=ruby;;
+ text/x-shellscript) lexer=sh;;
+ text/x-tcl) lexer=tcl;;
+ text/x-tex|text/x-texinfo) lexer=latex;; # FIXME: texinfo really needs its own lexer
+
+ # Types that file outputs which pygmentize didn't support as of file 5.20, pygments 2.0
+ # text/calendar
+ # text/inf
+ # text/PGP
+ # text/rtf
+ # text/texmacs
+ # text/vnd.graphviz
+ # text/x-bcpl
+ # text/x-info
+ # text/x-m4
+ # text/x-vcard
+ # text/x-xmcd
+
+ text/plain) # special filenames. TODO: insert more
+ case $(basename "$file") in
+ .zshrc) lexer=sh;;
+ esac
+ ;;
+ esac
fi
-encoding=`file --brief --mime-encoding $file_common_opts "$1"`
+encoding=$(file --mime-encoding --uncompress $file_common_opts "$file")
+if [[ $encoding == "us-asciibinarybinary" ]]; then
+ encoding="us-ascii"
+fi
-if [ -n "$lexer" ]; then
- # FIXME: Specify input encoding rather than output encoding https://bitbucket.org/birkenfeld/pygments-main/issue/800
- zcat "$1" | pygmentize -O encoding=$encoding,outencoding=UTF-8 $PYGMENTIZE_OPTS -l $lexer
- exit 0
+if [[ -n "$lexer" ]]; then
+ concat=cat
+ case $(file $file_common_opts --mime-type "$file") in
+ application/x-gzip) concat=zcat;;
+ application/x-bzip2) concat=bzcat;;
+ application/x-xz) concat=xzcat;;
+ esac
+ exec $concat "$file" | pygmentize -O inencoding=$encoding $PYGMENTIZE_OPTS $options -l $lexer
fi
exit 1
diff --git a/vendor/pygments/external/lasso-builtins-generator-9.lasso b/vendor/pygments/external/lasso-builtins-generator-9.lasso
index bea8b2a..0156299 100755
--- a/vendor/pygments/external/lasso-builtins-generator-9.lasso
+++ b/vendor/pygments/external/lasso-builtins-generator-9.lasso
@@ -4,14 +4,20 @@
Builtins Generator for Lasso 9
This is the shell script that was used to extract Lasso 9's built-in keywords
- and generate most of the _lassobuiltins.py file. When run, it creates a file
- named "lassobuiltins-9.py" containing the types, traits, and methods of the
- currently-installed version of Lasso 9.
+ and generate most of the _lasso_builtins.py file. When run, it creates a file
+ containing the types, traits, methods, and members of the currently-installed
+ version of Lasso 9.
- A partial list of keywords in Lasso 8 can be generated with this code:
+ A list of tags in Lasso 8 can be generated with this code:
insert(string_removeleading(#i, -pattern='_global_'));
/iterate;
@@ -23,98 +29,133 @@
*/
output("This output statement is required for a complete list of methods.")
-local(f) = file("lassobuiltins-9.py")
+local(f) = file("_lasso_builtins-9.py")
#f->doWithClose => {
-#f->openWrite
+#f->openTruncate
#f->writeString('# -*- coding: utf-8 -*-
"""
- pygments.lexers._lassobuiltins
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ pygments.lexers._lasso_builtins
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- Built-in Lasso types, traits, and methods.
+ Built-in Lasso types, traits, methods, and members.
+
+ :copyright: Copyright 2006-'+date->year+' by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
"""
')
-lcapi_loadModules
+// Load and register contents of $LASSO9_MASTER_HOME/LassoModules/
+database_initialize
// Load all of the libraries from builtins and lassoserver
// This forces all possible available types and methods to be registered
local(srcs =
- tie(
- dir(sys_masterHomePath + 'LassoLibraries/builtins/')->eachFilePath,
- dir(sys_masterHomePath + 'LassoLibraries/lassoserver/')->eachFilePath
- )
+ (:
+ dir(sys_masterHomePath + '/LassoLibraries/builtins/')->eachFilePath,
+ dir(sys_masterHomePath + '/LassoLibraries/lassoserver/')->eachFilePath
+ )
)
-with topLevelDir in #srcs
-where !#topLevelDir->lastComponent->beginsWith('.')
+with topLevelDir in delve(#srcs)
+where not #topLevelDir->lastComponent->beginsWith('.')
do protect => {
- handle_error => {
+ handle_error => {
stdoutnl('Unable to load: ' + #topLevelDir + ' ' + error_msg)
}
library_thread_loader->loadLibrary(#topLevelDir)
stdoutnl('Loaded: ' + #topLevelDir)
}
+email_initialize
+log_initialize
+session_initialize
+
local(
- typesList = list(),
- traitsList = list(),
- methodsList = list()
+ typesList = set(),
+ traitsList = set(),
+ unboundMethodsList = set(),
+ memberMethodsList = set()
)
-// unbound methods
-with method in sys_listUnboundMethods
-where !#method->methodName->asString->endsWith('=')
-where #method->methodName->asString->isalpha(1)
-where #methodsList !>> #method->methodName->asString
-do #methodsList->insert(#method->methodName->asString)
-
-// traits
-with trait in sys_listTraits
-where !#trait->asString->beginsWith('$')
-where #traitsList !>> #trait->asString
-do {
- #traitsList->insert(#trait->asString)
- with tmethod in tie(#trait->getType->provides, #trait->getType->requires)
- where !#tmethod->methodName->asString->endsWith('=')
- where #tmethod->methodName->asString->isalpha(1)
- where #methodsList !>> #tmethod->methodName->asString
- do #methodsList->insert(#tmethod->methodName->asString)
-}
-
// types
with type in sys_listTypes
-where #typesList !>> #type->asString
+where not #type->asString->endsWith('$') // skip threads
do {
- #typesList->insert(#type->asString)
- with tmethod in #type->getType->listMethods
- where !#tmethod->methodName->asString->endsWith('=')
- where #tmethod->methodName->asString->isalpha(1)
- where #methodsList !>> #tmethod->methodName->asString
- do #methodsList->insert(#tmethod->methodName->asString)
+ #typesList->insert(#type)
}
-#f->writeString("BUILTINS = {
- 'Types': [
-")
-with t in #typesList
-do #f->writeString(" '"+string_lowercase(#t)+"',\n")
+// traits
+with trait in sys_listTraits
+where not #trait->asString->beginsWith('$') // skip combined traits
+do {
+ #traitsList->insert(#trait)
+}
-#f->writeString(" ],
- 'Traits': [
-")
-with t in #traitsList
-do #f->writeString(" '"+string_lowercase(#t)+"',\n")
+// member methods
+with type in #typesList
+do {
+ with method in #type->getType->listMethods
+ where #method->typeName == #type // skip inherited methods
+ let name = #method->methodName
+ where not #name->asString->endsWith('=') // skip setter methods
+ where #name->asString->isAlpha(1) // skip unpublished methods
+ do {
+ #memberMethodsList->insert(#name)
+ }
+}
+with trait in #traitsList
+do {
+ with method in #trait->getType->provides
+ where #method->typeName == #trait // skip inherited methods
+ let name = #method->methodName
+ where not #name->asString->endsWith('=') // skip setter methods
+ where #name->asString->isAlpha(1) // skip unpublished methods
+ do {
+ #memberMethodsList->insert(#name)
+ }
+}
-#f->writeString(" ],
- 'Methods': [
-")
-with t in #methodsList
-do #f->writeString(" '"+string_lowercase(#t)+"',\n")
+// unbound methods
+with method in sys_listUnboundMethods
+let name = #method->methodName
+where not #name->asString->endsWith('=') // skip setter methods
+where #name->asString->isAlpha(1) // skip unpublished methods
+where #typesList !>> #name
+where #traitsList !>> #name
+do {
+ #unboundMethodsList->insert(#name)
+}
-#f->writeString(" ],
+// write to file
+with i in (:
+ pair(#typesList, "BUILTINS = {
+ 'Types': (
+"),
+ pair(#traitsList, " ),
+ 'Traits': (
+"),
+ pair(#unboundMethodsList, " ),
+ 'Unbound Methods': (
+"),
+ pair(#memberMethodsList, " )
+}
+MEMBERS = {
+ 'Member Methods': (
+")
+)
+do {
+ #f->writeString(#i->second)
+ with t in (#i->first)
+ let ts = #t->asString
+ order by #ts
+ do {
+ #f->writeString(" '"+#ts->lowercase&asString+"',\n")
+ }
+}
+
+#f->writeString(" )
}
")
diff --git a/vendor/pygments/external/markdown-processor.py b/vendor/pygments/external/markdown-processor.py
index 12e6468..a3e178e 100644
--- a/vendor/pygments/external/markdown-processor.py
+++ b/vendor/pygments/external/markdown-processor.py
@@ -6,14 +6,9 @@
This fragment is a Markdown_ preprocessor that renders source code
to HTML via Pygments. To use it, invoke Markdown like so::
- from markdown import Markdown
+ import markdown
- md = Markdown()
- md.textPreprocessors.insert(0, CodeBlockPreprocessor())
- html = md.convert(someText)
-
- markdown is then a callable that can be passed to the context of
- a template and used in that template, for example.
+ html = markdown.markdown(someText, extensions=[CodeBlockExtension()])
This uses CSS classes by default, so use
``pygmentize -S -f html > pygments.css``
@@ -25,9 +20,9 @@
some code
[/sourcecode]
- .. _Markdown: http://www.freewisdom.org/projects/python-markdown/
+ .. _Markdown: https://pypi.python.org/pypi/Markdown
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
@@ -40,17 +35,17 @@ INLINESTYLES = False
import re
-from markdown import TextPreprocessor
+from markdown.preprocessors import Preprocessor
+from markdown.extensions import Extension
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import get_lexer_by_name, TextLexer
-class CodeBlockPreprocessor(TextPreprocessor):
+class CodeBlockPreprocessor(Preprocessor):
- pattern = re.compile(
- r'\[sourcecode:(.+?)\](.+?)\[/sourcecode\]', re.S)
+ pattern = re.compile(r'\[sourcecode:(.+?)\](.+?)\[/sourcecode\]', re.S)
formatter = HtmlFormatter(noclasses=INLINESTYLES)
@@ -63,5 +58,10 @@ class CodeBlockPreprocessor(TextPreprocessor):
code = highlight(m.group(2), lexer, self.formatter)
code = code.replace('\n\n', '\n \n').replace('\n', ' ')
return '\n\n
%s
\n\n' % code
- return self.pattern.sub(
- repl, lines)
+ joined_lines = "\n".join(lines)
+ joined_lines = self.pattern.sub(repl, joined_lines)
+ return joined_lines.split("\n")
+
+class CodeBlockExtension(Extension):
+ def extendMarkdown(self, md, md_globals):
+ md.preprocessors.add('CodeBlockPreprocessor', CodeBlockPreprocessor(), '_begin')
diff --git a/vendor/pygments/external/moin-parser.py b/vendor/pygments/external/moin-parser.py
index 6544da1..9cb082a 100644
--- a/vendor/pygments/external/moin-parser.py
+++ b/vendor/pygments/external/moin-parser.py
@@ -31,7 +31,7 @@
If you do not want to do that and are willing to accept larger HTML
output, you can set the INLINESTYLES option below to True.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
diff --git a/vendor/pygments/external/rst-directive-old.py b/vendor/pygments/external/rst-directive-old.py
deleted file mode 100644
index a074536..0000000
--- a/vendor/pygments/external/rst-directive-old.py
+++ /dev/null
@@ -1,77 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- The Pygments reStructuredText directive
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
- This fragment is a Docutils_ 0.4 directive that renders source code
- (to HTML only, currently) via Pygments.
-
- To use it, adjust the options below and copy the code into a module
- that you import on initialization. The code then automatically
- registers a ``sourcecode`` directive that you can use instead of
- normal code blocks like this::
-
- .. sourcecode:: python
-
- My code goes here.
-
- If you want to have different code styles, e.g. one with line numbers
- and one without, add formatters with their names in the VARIANTS dict
- below. You can invoke them instead of the DEFAULT one by using a
- directive option::
-
- .. sourcecode:: python
- :linenos:
-
- My code goes here.
-
- Look at the `directive documentation`_ to get all the gory details.
-
- .. _Docutils: http://docutils.sf.net/
- .. _directive documentation:
- http://docutils.sourceforge.net/docs/howto/rst-directives.html
-
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-# Options
-# ~~~~~~~
-
-# Set to True if you want inline CSS styles instead of classes
-INLINESTYLES = False
-
-from pygments.formatters import HtmlFormatter
-
-# The default formatter
-DEFAULT = HtmlFormatter(noclasses=INLINESTYLES)
-
-# Add name -> formatter pairs for every variant you want to use
-VARIANTS = {
- # 'linenos': HtmlFormatter(noclasses=INLINESTYLES, linenos=True),
-}
-
-
-from docutils import nodes
-from docutils.parsers.rst import directives
-
-from pygments import highlight
-from pygments.lexers import get_lexer_by_name, TextLexer
-
-def pygments_directive(name, arguments, options, content, lineno,
- content_offset, block_text, state, state_machine):
- try:
- lexer = get_lexer_by_name(arguments[0])
- except ValueError:
- # no lexer found - use the text one instead of an exception
- lexer = TextLexer()
- # take an arbitrary option if more than one is given
- formatter = options and VARIANTS[options.keys()[0]] or DEFAULT
- parsed = highlight(u'\n'.join(content), lexer, formatter)
- return [nodes.raw('', parsed, format='html')]
-
-pygments_directive.arguments = (1, 0, 1)
-pygments_directive.content = 1
-pygments_directive.options = dict([(key, directives.flag) for key in VARIANTS])
-
-directives.register_directive('sourcecode', pygments_directive)
diff --git a/vendor/pygments/external/rst-directive.py b/vendor/pygments/external/rst-directive.py
index 5c04038..f81677b 100644
--- a/vendor/pygments/external/rst-directive.py
+++ b/vendor/pygments/external/rst-directive.py
@@ -31,7 +31,7 @@
.. _directive documentation:
http://docutils.sourceforge.net/docs/howto/rst-directives.html
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
@@ -75,9 +75,8 @@ class Pygments(Directive):
# no lexer found - use the text one instead of an exception
lexer = TextLexer()
# take an arbitrary option if more than one is given
- formatter = self.options and VARIANTS[self.options.keys()[0]] or DEFAULT
+ formatter = self.options and VARIANTS[list(self.options)[0]] or DEFAULT
parsed = highlight(u'\n'.join(self.content), lexer, formatter)
return [nodes.raw('', parsed, format='html')]
directives.register_directive('sourcecode', Pygments)
-
diff --git a/vendor/pygments/ez_setup.py b/vendor/pygments/ez_setup.py
old mode 100755
new mode 100644
index e33744b..9dc2c87
--- a/vendor/pygments/ez_setup.py
+++ b/vendor/pygments/ez_setup.py
@@ -13,264 +13,370 @@ the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
+import os
+import shutil
import sys
-DEFAULT_VERSION = "0.6c9"
-DEFAULT_URL = "http://pypi.python.org/packages/%s/s/setuptools/" % sys.version[:3]
+import tempfile
+import tarfile
+import optparse
+import subprocess
+import platform
-md5_data = {
- 'setuptools-0.6b1-py2.3.egg': '8822caf901250d848b996b7f25c6e6ca',
- 'setuptools-0.6b1-py2.4.egg': 'b79a8a403e4502fbb85ee3f1941735cb',
- 'setuptools-0.6b2-py2.3.egg': '5657759d8a6d8fc44070a9d07272d99b',
- 'setuptools-0.6b2-py2.4.egg': '4996a8d169d2be661fa32a6e52e4f82a',
- 'setuptools-0.6b3-py2.3.egg': 'bb31c0fc7399a63579975cad9f5a0618',
- 'setuptools-0.6b3-py2.4.egg': '38a8c6b3d6ecd22247f179f7da669fac',
- 'setuptools-0.6b4-py2.3.egg': '62045a24ed4e1ebc77fe039aa4e6f7e5',
- 'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4',
- 'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c',
- 'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b',
- 'setuptools-0.6c2-py2.3.egg': 'f0064bf6aa2b7d0f3ba0b43f20817c27',
- 'setuptools-0.6c2-py2.4.egg': '616192eec35f47e8ea16cd6a122b7277',
- 'setuptools-0.6c3-py2.3.egg': 'f181fa125dfe85a259c9cd6f1d7b78fa',
- 'setuptools-0.6c3-py2.4.egg': 'e0ed74682c998bfb73bf803a50e7b71e',
- 'setuptools-0.6c3-py2.5.egg': 'abef16fdd61955514841c7c6bd98965e',
- 'setuptools-0.6c4-py2.3.egg': 'b0b9131acab32022bfac7f44c5d7971f',
- 'setuptools-0.6c4-py2.4.egg': '2a1f9656d4fbf3c97bf946c0a124e6e2',
- 'setuptools-0.6c4-py2.5.egg': '8f5a052e32cdb9c72bcf4b5526f28afc',
- 'setuptools-0.6c5-py2.3.egg': 'ee9fd80965da04f2f3e6b3576e9d8167',
- 'setuptools-0.6c5-py2.4.egg': 'afe2adf1c01701ee841761f5bcd8aa64',
- 'setuptools-0.6c5-py2.5.egg': 'a8d3f61494ccaa8714dfed37bccd3d5d',
- 'setuptools-0.6c6-py2.3.egg': '35686b78116a668847237b69d549ec20',
- 'setuptools-0.6c6-py2.4.egg': '3c56af57be3225019260a644430065ab',
- 'setuptools-0.6c6-py2.5.egg': 'b2f8a7520709a5b34f80946de5f02f53',
- 'setuptools-0.6c7-py2.3.egg': '209fdf9adc3a615e5115b725658e13e2',
- 'setuptools-0.6c7-py2.4.egg': '5a8f954807d46a0fb67cf1f26c55a82e',
- 'setuptools-0.6c7-py2.5.egg': '45d2ad28f9750e7434111fde831e8372',
- 'setuptools-0.6c8-py2.3.egg': '50759d29b349db8cfd807ba8303f1902',
- 'setuptools-0.6c8-py2.4.egg': 'cba38d74f7d483c06e9daa6070cce6de',
- 'setuptools-0.6c8-py2.5.egg': '1721747ee329dc150590a58b3e1ac95b',
- 'setuptools-0.6c9-py2.3.egg': 'a83c4020414807b496e4cfbe08507c03',
- 'setuptools-0.6c9-py2.4.egg': '260a2be2e5388d66bdaee06abec6342a',
- 'setuptools-0.6c9-py2.5.egg': 'fe67c3e5a17b12c0e7c541b7ea43a8e6',
- 'setuptools-0.6c9-py2.6.egg': 'ca37b1ff16fa2ede6e19383e7b59245a',
-}
+from distutils import log
-import sys, os
-try: from hashlib import md5
-except ImportError: from md5 import md5
+try:
+ from site import USER_SITE
+except ImportError:
+ USER_SITE = None
-def _validate_md5(egg_name, data):
- if egg_name in md5_data:
- digest = md5(data).hexdigest()
- if digest != md5_data[egg_name]:
- print >>sys.stderr, (
- "md5 validation of %s failed! (Possible download problem?)"
- % egg_name
- )
- sys.exit(2)
- return data
+DEFAULT_VERSION = "1.4.2"
+DEFAULT_URL = "https://pypi.python.org/packages/source/s/setuptools/"
-def use_setuptools(
- version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
- download_delay=15
-):
- """Automatically find/download setuptools and make it available on sys.path
+def _python_cmd(*args):
+ args = (sys.executable,) + args
+ return subprocess.call(args) == 0
- `version` should be a valid setuptools version number that is available
- as an egg for download under the `download_base` URL (which should end with
- a '/'). `to_dir` is the directory where setuptools will be downloaded, if
- it is not already available. If `download_delay` is specified, it should
- be the number of seconds that will be paused before initiating a download,
- should one be required. If an older version of setuptools is installed,
- this routine will print a message to ``sys.stderr`` and raise SystemExit in
- an attempt to abort the calling script.
- """
- was_imported = 'pkg_resources' in sys.modules or 'setuptools' in sys.modules
- def do_download():
- egg = download_setuptools(version, download_base, to_dir, download_delay)
- sys.path.insert(0, egg)
- import setuptools; setuptools.bootstrap_install_from = egg
+def _check_call_py24(cmd, *args, **kwargs):
+ res = subprocess.call(cmd, *args, **kwargs)
+ class CalledProcessError(Exception):
+ pass
+ if not res == 0:
+ msg = "Command '%s' return non-zero exit status %d" % (cmd, res)
+ raise CalledProcessError(msg)
+vars(subprocess).setdefault('check_call', _check_call_py24)
+
+def _install(tarball, install_args=()):
+ # extracting the tarball
+ tmpdir = tempfile.mkdtemp()
+ log.warn('Extracting in %s', tmpdir)
+ old_wd = os.getcwd()
+ try:
+ os.chdir(tmpdir)
+ tar = tarfile.open(tarball)
+ _extractall(tar)
+ tar.close()
+
+ # going in the directory
+ subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
+ os.chdir(subdir)
+ log.warn('Now working in %s', subdir)
+
+ # installing
+ log.warn('Installing Setuptools')
+ if not _python_cmd('setup.py', 'install', *install_args):
+ log.warn('Something went wrong during the installation.')
+ log.warn('See the error message above.')
+ # exitcode will be 2
+ return 2
+ finally:
+ os.chdir(old_wd)
+ shutil.rmtree(tmpdir)
+
+
+def _build_egg(egg, tarball, to_dir):
+ # extracting the tarball
+ tmpdir = tempfile.mkdtemp()
+ log.warn('Extracting in %s', tmpdir)
+ old_wd = os.getcwd()
+ try:
+ os.chdir(tmpdir)
+ tar = tarfile.open(tarball)
+ _extractall(tar)
+ tar.close()
+
+ # going in the directory
+ subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
+ os.chdir(subdir)
+ log.warn('Now working in %s', subdir)
+
+ # building an egg
+ log.warn('Building a Setuptools egg in %s', to_dir)
+ _python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
+
+ finally:
+ os.chdir(old_wd)
+ shutil.rmtree(tmpdir)
+ # returning the result
+ log.warn(egg)
+ if not os.path.exists(egg):
+ raise IOError('Could not build the egg.')
+
+
+def _do_download(version, download_base, to_dir, download_delay):
+ egg = os.path.join(to_dir, 'setuptools-%s-py%d.%d.egg'
+ % (version, sys.version_info[0], sys.version_info[1]))
+ if not os.path.exists(egg):
+ tarball = download_setuptools(version, download_base,
+ to_dir, download_delay)
+ _build_egg(egg, tarball, to_dir)
+ sys.path.insert(0, egg)
+
+ # Remove previously-imported pkg_resources if present (see
+ # https://bitbucket.org/pypa/setuptools/pull-request/7/ for details).
+ if 'pkg_resources' in sys.modules:
+ del sys.modules['pkg_resources']
+
+ import setuptools
+ setuptools.bootstrap_install_from = egg
+
+
+def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
+ to_dir=os.curdir, download_delay=15):
+ # making sure we use the absolute path
+ to_dir = os.path.abspath(to_dir)
+ was_imported = 'pkg_resources' in sys.modules or \
+ 'setuptools' in sys.modules
try:
import pkg_resources
except ImportError:
- return do_download()
+ return _do_download(version, download_base, to_dir, download_delay)
try:
- pkg_resources.require("setuptools>="+version); return
- except pkg_resources.VersionConflict, e:
+ pkg_resources.require("setuptools>=" + version)
+ return
+ except pkg_resources.VersionConflict:
+ e = sys.exc_info()[1]
if was_imported:
- print >>sys.stderr, (
- "The required version of setuptools (>=%s) is not available, and\n"
- "can't be installed while this script is running. Please install\n"
- " a more recent version first, using 'easy_install -U setuptools'."
- "\n\n(Currently using %r)"
- ) % (version, e.args[0])
+ sys.stderr.write(
+ "The required version of setuptools (>=%s) is not available,\n"
+ "and can't be installed while this script is running. Please\n"
+ "install a more recent version first, using\n"
+ "'easy_install -U setuptools'."
+ "\n\n(Currently using %r)\n" % (version, e.args[0]))
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
- return do_download()
+ return _do_download(version, download_base, to_dir,
+ download_delay)
except pkg_resources.DistributionNotFound:
- return do_download()
+ return _do_download(version, download_base, to_dir,
+ download_delay)
-def download_setuptools(
- version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
- delay = 15
-):
+def _clean_check(cmd, target):
+ """
+ Run the command to download target. If the command fails, clean up before
+ re-raising the error.
+ """
+ try:
+ subprocess.check_call(cmd)
+ except subprocess.CalledProcessError:
+ if os.access(target, os.F_OK):
+ os.unlink(target)
+ raise
+
+def download_file_powershell(url, target):
+ """
+ Download the file at url to target using Powershell (which will validate
+ trust). Raise an exception if the command cannot complete.
+ """
+ target = os.path.abspath(target)
+ cmd = [
+ 'powershell',
+ '-Command',
+ "(new-object System.Net.WebClient).DownloadFile(%(url)r, %(target)r)" % vars(),
+ ]
+ _clean_check(cmd, target)
+
+def has_powershell():
+ if platform.system() != 'Windows':
+ return False
+ cmd = ['powershell', '-Command', 'echo test']
+ devnull = open(os.path.devnull, 'wb')
+ try:
+ try:
+ subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
+ except:
+ return False
+ finally:
+ devnull.close()
+ return True
+
+download_file_powershell.viable = has_powershell
+
+def download_file_curl(url, target):
+ cmd = ['curl', url, '--silent', '--output', target]
+ _clean_check(cmd, target)
+
+def has_curl():
+ cmd = ['curl', '--version']
+ devnull = open(os.path.devnull, 'wb')
+ try:
+ try:
+ subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
+ except:
+ return False
+ finally:
+ devnull.close()
+ return True
+
+download_file_curl.viable = has_curl
+
+def download_file_wget(url, target):
+ cmd = ['wget', url, '--quiet', '--output-document', target]
+ _clean_check(cmd, target)
+
+def has_wget():
+ cmd = ['wget', '--version']
+ devnull = open(os.path.devnull, 'wb')
+ try:
+ try:
+ subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
+ except:
+ return False
+ finally:
+ devnull.close()
+ return True
+
+download_file_wget.viable = has_wget
+
+def download_file_insecure(url, target):
+ """
+ Use Python to download the file, even though it cannot authenticate the
+ connection.
+ """
+ try:
+ from urllib.request import urlopen
+ except ImportError:
+ from urllib2 import urlopen
+ src = dst = None
+ try:
+ src = urlopen(url)
+ # Read/write all in one block, so we don't create a corrupt file
+ # if the download is interrupted.
+ data = src.read()
+ dst = open(target, "wb")
+ dst.write(data)
+ finally:
+ if src:
+ src.close()
+ if dst:
+ dst.close()
+
+download_file_insecure.viable = lambda: True
+
+def get_best_downloader():
+ downloaders = [
+ download_file_powershell,
+ download_file_curl,
+ download_file_wget,
+ download_file_insecure,
+ ]
+
+ for dl in downloaders:
+ if dl.viable():
+ return dl
+
+def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
+ to_dir=os.curdir, delay=15,
+ downloader_factory=get_best_downloader):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
- `delay` is the number of seconds to pause before an actual download attempt.
+ `delay` is the number of seconds to pause before an actual download
+ attempt.
+
+ ``downloader_factory`` should be a function taking no arguments and
+ returning a function for downloading a URL to a target.
"""
- import urllib2, shutil
- egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3])
- url = download_base + egg_name
- saveto = os.path.join(to_dir, egg_name)
- src = dst = None
+ # making sure we use the absolute path
+ to_dir = os.path.abspath(to_dir)
+ tgz_name = "setuptools-%s.tar.gz" % version
+ url = download_base + tgz_name
+ saveto = os.path.join(to_dir, tgz_name)
if not os.path.exists(saveto): # Avoid repeated downloads
- try:
- from distutils import log
- if delay:
- log.warn("""
----------------------------------------------------------------------------
-This script requires setuptools version %s to run (even to display
-help). I will attempt to download it for you (from
-%s), but
-you may need to enable firewall access for this script first.
-I will start the download in %d seconds.
-
-(Note: if this machine does not have network access, please obtain the file
-
- %s
-
-and place it in this directory before rerunning this script.)
----------------------------------------------------------------------------""",
- version, download_base, delay, url
- ); from time import sleep; sleep(delay)
- log.warn("Downloading %s", url)
- src = urllib2.urlopen(url)
- # Read/write all in one block, so we don't create a corrupt file
- # if the download is interrupted.
- data = _validate_md5(egg_name, src.read())
- dst = open(saveto,"wb"); dst.write(data)
- finally:
- if src: src.close()
- if dst: dst.close()
+ log.warn("Downloading %s", url)
+ downloader = downloader_factory()
+ downloader(url, saveto)
return os.path.realpath(saveto)
+def _extractall(self, path=".", members=None):
+ """Extract all members from the archive to the current working
+ directory and set owner, modification time and permissions on
+ directories afterwards. `path' specifies a different directory
+ to extract to. `members' is optional and must be a subset of the
+ list returned by getmembers().
+ """
+ import copy
+ import operator
+ from tarfile import ExtractError
+ directories = []
+
+ if members is None:
+ members = self
+
+ for tarinfo in members:
+ if tarinfo.isdir():
+ # Extract directories with a safe mode.
+ directories.append(tarinfo)
+ tarinfo = copy.copy(tarinfo)
+ tarinfo.mode = 448 # decimal for oct 0700
+ self.extract(tarinfo, path)
+
+ # Reverse sort directories.
+ if sys.version_info < (2, 4):
+ def sorter(dir1, dir2):
+ return cmp(dir1.name, dir2.name)
+ directories.sort(sorter)
+ directories.reverse()
+ else:
+ directories.sort(key=operator.attrgetter('name'), reverse=True)
+
+ # Set correct owner, mtime and filemode on directories.
+ for tarinfo in directories:
+ dirpath = os.path.join(path, tarinfo.name)
+ try:
+ self.chown(tarinfo, dirpath)
+ self.utime(tarinfo, dirpath)
+ self.chmod(tarinfo, dirpath)
+ except ExtractError:
+ e = sys.exc_info()[1]
+ if self.errorlevel > 1:
+ raise
+ else:
+ self._dbg(1, "tarfile: %s" % e)
+def _build_install_args(options):
+ """
+ Build the arguments to 'python setup.py install' on the setuptools package
+ """
+ install_args = []
+ if options.user_install:
+ if sys.version_info < (2, 6):
+ log.warn("--user requires Python 2.6 or later")
+ raise SystemExit(1)
+ install_args.append('--user')
+ return install_args
+def _parse_args():
+ """
+ Parse the command line for options
+ """
+ parser = optparse.OptionParser()
+ parser.add_option(
+ '--user', dest='user_install', action='store_true', default=False,
+ help='install in user site package (requires Python 2.6 or later)')
+ parser.add_option(
+ '--download-base', dest='download_base', metavar="URL",
+ default=DEFAULT_URL,
+ help='alternative URL from where to download the setuptools package')
+ parser.add_option(
+ '--insecure', dest='downloader_factory', action='store_const',
+ const=lambda: download_file_insecure, default=get_best_downloader,
+ help='Use internal, non-validating downloader'
+ )
+ options, args = parser.parse_args()
+ # positional arguments are ignored
+ return options
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-def main(argv, version=DEFAULT_VERSION):
+def main(version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
- try:
- import setuptools
- except ImportError:
- egg = None
- try:
- egg = download_setuptools(version, delay=0)
- sys.path.insert(0,egg)
- from setuptools.command.easy_install import main
- return main(list(argv)+[egg]) # we're done here
- finally:
- if egg and os.path.exists(egg):
- os.unlink(egg)
- else:
- if setuptools.__version__ == '0.0.1':
- print >>sys.stderr, (
- "You have an obsolete version of setuptools installed. Please\n"
- "remove it from your system entirely before rerunning this script."
- )
- sys.exit(2)
-
- req = "setuptools>="+version
- import pkg_resources
- try:
- pkg_resources.require(req)
- except pkg_resources.VersionConflict:
- try:
- from setuptools.command.easy_install import main
- except ImportError:
- from easy_install import main
- main(list(argv)+[download_setuptools(delay=0)])
- sys.exit(0) # try to force an exit
- else:
- if argv:
- from setuptools.command.easy_install import main
- main(argv)
- else:
- print "Setuptools version",version,"or greater has been installed."
- print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)'
-
-def update_md5(filenames):
- """Update our built-in md5 registry"""
-
- import re
-
- for name in filenames:
- base = os.path.basename(name)
- f = open(name,'rb')
- md5_data[base] = md5(f.read()).hexdigest()
- f.close()
-
- data = [" %r: %r,\n" % it for it in md5_data.items()]
- data.sort()
- repl = "".join(data)
-
- import inspect
- srcfile = inspect.getsourcefile(sys.modules[__name__])
- f = open(srcfile, 'rb'); src = f.read(); f.close()
-
- match = re.search("\nmd5_data = {\n([^}]+)}", src)
- if not match:
- print >>sys.stderr, "Internal error!"
- sys.exit(2)
-
- src = src[:match.start(1)] + repl + src[match.end(1):]
- f = open(srcfile,'w')
- f.write(src)
- f.close()
-
-
-if __name__=='__main__':
- if len(sys.argv)>2 and sys.argv[1]=='--md5update':
- update_md5(sys.argv[2:])
- else:
- main(sys.argv[1:])
-
-
-
-
-
+ options = _parse_args()
+ tarball = download_setuptools(download_base=options.download_base,
+ downloader_factory=options.downloader_factory)
+ return _install(tarball, _build_install_args(options))
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/vendor/pygments/pygmentize b/vendor/pygments/pygmentize
index e237919..aea3872 100755
--- a/vendor/pygments/pygmentize
+++ b/vendor/pygments/pygmentize
@@ -1,6 +1,7 @@
-#!/usr/bin/env python
+#!/usr/bin/env python2
-import sys, pygments.cmdline
+import sys
+import pygments.cmdline
try:
sys.exit(pygments.cmdline.main(sys.argv))
except KeyboardInterrupt:
diff --git a/vendor/pygments/pygments/__init__.py b/vendor/pygments/pygments/__init__.py
index 2bfd8ba..c623440 100644
--- a/vendor/pygments/pygments/__init__.py
+++ b/vendor/pygments/pygments/__init__.py
@@ -22,11 +22,11 @@
.. _Pygments tip:
http://bitbucket.org/birkenfeld/pygments-main/get/tip.zip#egg=Pygments-dev
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-__version__ = '1.6'
+__version__ = '2.1.3'
__docformat__ = 'restructuredtext'
__all__ = ['lex', 'format', 'highlight']
@@ -43,15 +43,16 @@ def lex(code, lexer):
"""
try:
return lexer.get_tokens(code)
- except TypeError, err:
+ except TypeError as err:
if isinstance(err.args[0], str) and \
- 'unbound method get_tokens' in err.args[0]:
+ ('unbound method get_tokens' in err.args[0] or
+ 'missing 1 required positional argument' in err.args[0]):
raise TypeError('lex() argument must be a lexer instance, '
'not a class')
raise
-def format(tokens, formatter, outfile=None):
+def format(tokens, formatter, outfile=None): # pylint: disable=redefined-builtin
"""
Format a tokenlist ``tokens`` with the formatter ``formatter``.
@@ -61,15 +62,15 @@ def format(tokens, formatter, outfile=None):
"""
try:
if not outfile:
- #print formatter, 'using', formatter.encoding
- realoutfile = formatter.encoding and BytesIO() or StringIO()
+ realoutfile = getattr(formatter, 'encoding', None) and BytesIO() or StringIO()
formatter.format(tokens, realoutfile)
return realoutfile.getvalue()
else:
formatter.format(tokens, outfile)
- except TypeError, err:
+ except TypeError as err:
if isinstance(err.args[0], str) and \
- 'unbound method format' in err.args[0]:
+ ('unbound method format' in err.args[0] or
+ 'missing 1 required positional argument' in err.args[0]):
raise TypeError('format() argument must be a formatter instance, '
'not a class')
raise
@@ -86,6 +87,6 @@ def highlight(code, lexer, formatter, outfile=None):
return format(lex(code, lexer), formatter, outfile)
-if __name__ == '__main__':
+if __name__ == '__main__': # pragma: no cover
from pygments.cmdline import main
sys.exit(main(sys.argv))
diff --git a/vendor/pygments/pygments/cmdline.py b/vendor/pygments/pygments/cmdline.py
index c25204b..00745ed 100644
--- a/vendor/pygments/pygments/cmdline.py
+++ b/vendor/pygments/pygments/cmdline.py
@@ -5,27 +5,33 @@
Command line interface.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
+
+from __future__ import print_function
+
import sys
import getopt
from textwrap import dedent
from pygments import __version__, highlight
-from pygments.util import ClassNotFound, OptionError, docstring_headline
-from pygments.lexers import get_all_lexers, get_lexer_by_name, get_lexer_for_filename, \
- find_lexer_class, guess_lexer, TextLexer
+from pygments.util import ClassNotFound, OptionError, docstring_headline, \
+ guess_decode, guess_decode_from_terminal, terminal_encoding
+from pygments.lexers import get_all_lexers, get_lexer_by_name, guess_lexer, \
+ get_lexer_for_filename, find_lexer_class_for_filename
+from pygments.lexers.special import TextLexer
+from pygments.formatters.latex import LatexEmbeddedLexer, LatexFormatter
from pygments.formatters import get_all_formatters, get_formatter_by_name, \
- get_formatter_for_filename, find_formatter_class, \
- TerminalFormatter # pylint:disable-msg=E0611
+ get_formatter_for_filename, find_formatter_class
+from pygments.formatters.terminal import TerminalFormatter
from pygments.filters import get_all_filters, find_filter_class
from pygments.styles import get_all_styles, get_style_by_name
USAGE = """\
Usage: %s [-l | -g] [-F [:]] [-f ]
- [-O ] [-P