summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJharrod LaFon <jharrod.lafon@gmail.com>2014-04-14 14:01:51 -0400
committerJharrod LaFon <jharrod.lafon@gmail.com>2014-04-14 14:01:51 -0400
commitacd5cf2113bb179731a94984bb826528a31fcb06 (patch)
tree66da33b35d68d2dc3ecbc6cfb9b401ac6351a6a9
parenta88ed45d9bdc2e158fe7d69be8e01f798ded7b8e (diff)
parent5d57fe78405ac06a306f5ed2dd1b630a909cbdfb (diff)
downloadpygments-acd5cf2113bb179731a94984bb826528a31fcb06.tar.gz
Merged head
-rw-r--r--.hgignore1
-rw-r--r--AUTHORS7
-rw-r--r--CHANGES22
-rw-r--r--MANIFEST.in2
-rw-r--r--Makefile11
-rw-r--r--doc/Makefile153
-rw-r--r--doc/_static/favicon.icobin0 -> 16958 bytes
-rw-r--r--doc/_static/logo_new.pngbin0 -> 40944 bytes
-rw-r--r--doc/_static/logo_only.pngbin0 -> 16424 bytes
-rw-r--r--doc/_templates/docssidebar.html3
-rw-r--r--doc/_templates/indexsidebar.html25
-rw-r--r--doc/_themes/pygments14/layout.html98
-rw-r--r--doc/_themes/pygments14/static/bodybg.pngbin0 -> 51903 bytes
-rw-r--r--doc/_themes/pygments14/static/docbg.pngbin0 -> 61296 bytes
-rw-r--r--doc/_themes/pygments14/static/listitem.pngbin0 -> 207 bytes
-rw-r--r--doc/_themes/pygments14/static/logo.pngbin0 -> 26933 bytes
-rw-r--r--doc/_themes/pygments14/static/pocoo.pngbin0 -> 2154 bytes
-rw-r--r--doc/_themes/pygments14/static/pygments14.css_t401
-rw-r--r--doc/_themes/pygments14/theme.conf15
-rw-r--r--doc/conf.py249
-rw-r--r--doc/docs/api.rst316
-rw-r--r--doc/docs/authors.rst4
-rw-r--r--doc/docs/changelog.rst1
-rw-r--r--doc/docs/cmdline.rst (renamed from docs/src/cmdline.txt)20
-rw-r--r--doc/docs/filterdevelopment.rst (renamed from docs/src/filterdevelopment.txt)2
-rw-r--r--doc/docs/filters.rst (renamed from docs/src/filters.txt)9
-rw-r--r--doc/docs/formatterdevelopment.rst (renamed from docs/src/formatterdevelopment.txt)2
-rw-r--r--doc/docs/formatters.rst (renamed from docs/src/formatters.txt)12
-rw-r--r--doc/docs/index.rst66
-rw-r--r--doc/docs/integrate.rst (renamed from docs/src/integrate.txt)10
-rw-r--r--doc/docs/java.rst (renamed from docs/src/java.txt)0
-rw-r--r--doc/docs/lexerdevelopment.rst (renamed from docs/src/lexerdevelopment.txt)27
-rw-r--r--doc/docs/lexers.rst (renamed from docs/src/lexers.txt)12
-rw-r--r--doc/docs/moinmoin.rst (renamed from docs/src/moinmoin.txt)0
-rw-r--r--doc/docs/plugins.rst (renamed from docs/src/plugins.txt)0
-rw-r--r--doc/docs/quickstart.rst (renamed from docs/src/quickstart.txt)41
-rw-r--r--doc/docs/rstdirective.rst (renamed from docs/src/rstdirective.txt)0
-rw-r--r--doc/docs/styles.rst (renamed from docs/src/styles.txt)4
-rw-r--r--doc/docs/tokens.rst (renamed from docs/src/tokens.txt)14
-rw-r--r--doc/docs/unicode.rst (renamed from docs/src/unicode.txt)15
-rw-r--r--doc/download.rst41
-rw-r--r--doc/faq.rst143
-rw-r--r--doc/index.rst53
-rw-r--r--doc/languages.rst149
-rw-r--r--doc/make.bat190
-rw-r--r--doc/pygmentize.1 (renamed from docs/pygmentize.1)0
-rwxr-xr-xdocs/generate.py472
-rw-r--r--docs/src/api.txt270
-rw-r--r--docs/src/authors.txt5
-rw-r--r--docs/src/changelog.txt5
-rw-r--r--docs/src/index.txt69
-rw-r--r--docs/src/installation.txt71
-rw-r--r--external/rst-directive-old.py77
-rw-r--r--external/rst-directive.py3
-rw-r--r--pygments/__init__.py6
-rw-r--r--pygments/cmdline.py145
-rw-r--r--pygments/filters/__init__.py18
-rw-r--r--pygments/formatter.py4
-rwxr-xr-xpygments/formatters/_mapping.py4
-rw-r--r--pygments/formatters/html.py67
-rw-r--r--pygments/formatters/img.py33
-rw-r--r--pygments/formatters/latex.py112
-rw-r--r--pygments/formatters/other.py9
-rw-r--r--pygments/formatters/rtf.py13
-rw-r--r--pygments/formatters/svg.py2
-rw-r--r--pygments/formatters/terminal256.py2
-rw-r--r--pygments/lexer.py42
-rw-r--r--pygments/lexers/__init__.py19
-rw-r--r--pygments/lexers/_cocoabuiltins.py14
-rw-r--r--pygments/lexers/_luabuiltins.py20
-rw-r--r--pygments/lexers/_mapping.py25
-rw-r--r--pygments/lexers/_phpbuiltins.py14
-rw-r--r--pygments/lexers/_postgres_builtins.py12
-rw-r--r--pygments/lexers/_robotframeworklexer.py5
-rw-r--r--pygments/lexers/_sourcemodbuiltins.py17
-rw-r--r--pygments/lexers/agile.py99
-rw-r--r--pygments/lexers/asm.py112
-rw-r--r--pygments/lexers/compiled.py1302
-rw-r--r--pygments/lexers/dalvik.py2
-rw-r--r--pygments/lexers/dotnet.py18
-rw-r--r--pygments/lexers/foxpro.py2
-rw-r--r--pygments/lexers/functional.py183
-rw-r--r--pygments/lexers/hdl.py6
-rw-r--r--pygments/lexers/jvm.py164
-rw-r--r--pygments/lexers/math.py88
-rw-r--r--pygments/lexers/other.py208
-rw-r--r--pygments/lexers/parsers.py40
-rw-r--r--pygments/lexers/shell.py12
-rw-r--r--pygments/lexers/special.py13
-rw-r--r--pygments/lexers/sql.py24
-rw-r--r--pygments/lexers/templates.py82
-rw-r--r--pygments/lexers/text.py75
-rw-r--r--pygments/lexers/web.py303
-rw-r--r--pygments/sphinxext.py153
-rw-r--r--pygments/style.py3
-rw-r--r--pygments/token.py2
-rw-r--r--pygments/unistring.py5
-rw-r--r--pygments/util.py49
-rwxr-xr-xscripts/check_sources.py45
-rw-r--r--scripts/detect_missing_analyse_text.py9
-rwxr-xr-xscripts/find_codetags.py44
-rwxr-xr-xscripts/find_error.py27
-rw-r--r--scripts/get_vimkw.py6
-rwxr-xr-xscripts/reindent.py291
-rwxr-xr-x[-rw-r--r--]scripts/vim2pygments.py16
-rwxr-xr-xsetup.py8
-rw-r--r--tests/examplefiles/core.cljs52
-rw-r--r--tests/examplefiles/example.gd23
-rw-r--r--tests/examplefiles/example.gi64
-rw-r--r--tests/examplefiles/example.i6t32
-rw-r--r--tests/examplefiles/example.i7x45
-rw-r--r--tests/examplefiles/example.inf374
-rw-r--r--tests/examplefiles/example.mq4187
-rw-r--r--tests/examplefiles/example.mqh123
-rw-r--r--tests/examplefiles/example.ni57
-rw-r--r--tests/examplefiles/exampleScript.cfc241
-rw-r--r--tests/examplefiles/exampleTag.cfc18
-rw-r--r--tests/examplefiles/objc_example.m3
-rw-r--r--tests/examplefiles/scope.cirru43
-rw-r--r--tests/examplefiles/test.apl26
-rw-r--r--tests/examplefiles/test.idr93
-rw-r--r--tests/examplefiles/test.mask41
-rw-r--r--tests/examplefiles/test.pig148
-rw-r--r--tests/examplefiles/test.zep33
-rw-r--r--tests/old_run.py138
-rw-r--r--tests/run.py33
-rw-r--r--tests/test_basic_api.py33
-rw-r--r--tests/test_cmdline.py9
-rw-r--r--tests/test_examplefiles.py20
-rw-r--r--tests/test_html_formatter.py26
-rw-r--r--tests/test_latex_formatter.py4
-rw-r--r--tests/test_token.py4
132 files changed, 6644 insertions, 2290 deletions
diff --git a/.hgignore b/.hgignore
index 0a93a87b..57aaeff5 100644
--- a/.hgignore
+++ b/.hgignore
@@ -4,6 +4,7 @@ syntax: glob
*.egg
build/*
dist/*
+doc/_build
Pygments.egg-info/*
.ropeproject
tests/examplefiles/output
diff --git a/AUTHORS b/AUTHORS
index ffae0944..83c0eaca 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -16,6 +16,7 @@ Other contributors, listed alphabetically, are:
* Stefan Matthias Aust -- Smalltalk lexer
* Ben Bangert -- Mako lexers
* Max Battcher -- Darcs patch lexer
+* Thomas Baruchel -- APL lexer
* Tim Baumann -- (Literate) Agda lexer
* Paul Baumgart, 280 North, Inc. -- Objective-J lexer
* Michael Bayer -- Myghty lexers
@@ -27,6 +28,7 @@ Other contributors, listed alphabetically, are:
* Pierre Bourdon -- bugfixes
* Hiram Chirino -- Scaml and Jade lexers
* Ian Cooper -- VGL lexer
+* David Corbett -- Inform lexers
* Leaf Corcoran -- MoonScript lexer
* Christopher Creutzig -- MuPAD lexer
* Daniël W. Crompton - Pike lexer
@@ -63,6 +65,7 @@ Other contributors, listed alphabetically, are:
* Rob Hoelz -- Perl 6 lexer
* Doug Hogan -- Mscgen lexer
* Ben Hollis -- Mason lexer
+* Max Horn -- GAP lexer
* Dustin Howett -- Logos lexer
* Alastair Houghton -- Lexer inheritance facility
* Tim Howard -- BlitzMax lexer
@@ -70,6 +73,7 @@ Other contributors, listed alphabetically, are:
* Brian R. Jackson -- Tea lexer
* Christian Jann -- ShellSession lexer
* Dennis Kaarsemaker -- sources.list lexer
+* Alexander Kit -- MaskJS lexer
* Igor Kalnitsky -- vhdl lexer
* Pekka Klärck -- Robot Framework lexer
* Eric Knibbe -- Lasso lexer
@@ -95,6 +99,7 @@ Other contributors, listed alphabetically, are:
* Brian McKenna -- F# lexer
* Charles McLaughlin -- Puppet lexer
* Lukas Meuser -- BBCode formatter, Lua lexer
+* Cat Miller -- Pig lexer
* Paul Miller -- LiveScript lexer
* Hong Minhee -- HTTP lexer
* Michael Mior -- Awk lexer
@@ -115,9 +120,11 @@ Other contributors, listed alphabetically, are:
* Benjamin Peterson -- Test suite refactoring
* Dominik Picheta -- Nimrod lexer
* Clément Prévost -- UrbiScript lexer
+* raichoo -- Idris lexer
* Kashif Rasul -- CUDA lexer
* Justin Reidy -- MXML lexer
* Norman Richards -- JSON lexer
+* Corey Richardson -- Rust lexer updates
* Lubomir Rintel -- GoodData MAQL and CL lexers
* Andre Roberge -- Tango style
* Konrad Rudolph -- LaTeX formatter enhancements
diff --git a/CHANGES b/CHANGES
index 271c447d..8459e05a 100644
--- a/CHANGES
+++ b/CHANGES
@@ -6,11 +6,14 @@ Issue numbers refer to the tracker at
pull request numbers to the requests at
<http://bitbucket.org/birkenfeld/pygments-main/pull-requests/merged>.
-Version 1.7
+Version 2.0
-----------
(under development)
-- Dropped Python 2.4 compatibility.
+- Dropped Python 2.4 and 2.5 compatibility. This is in favor of single-source
+ compatibility between Python 2.6, 2.7 and 3.3+.
+
+- New website and documentation based on Sphinx (finally!)
- Lexers added:
@@ -30,10 +33,20 @@ Version 1.7
* Chapel (PR#256)
* Kal (PR#233)
* Eiffel (PR#273)
+ * Cirru (PR#275)
+ * ColdFusion CFC (PR#283)
+ * Idris (PR#210)
+ * Intel objdump (PR#279)
+ * MaskJS (PR#280)
+ * Inform 6/7 (PR#281)
+ * MQL (PR#285)
+ * APL (#969)
- New styles: "xcode" and "igor", similar to the default highlighting of
the respective IDEs.
+- Updated the Makefile lexer to yield a little more useful highlighting.
+
- Lexer aliases passed to ``get_lexer_by_name()`` are now case-insensitive.
- File name matching in lexers and formatters will now use a regex cache
@@ -71,7 +84,8 @@ Version 1.7
- Prolog lexer: add different kinds of numeric literals (#864).
-- F# lexer: rewrite with newest spec for F# 3.0 (#842).
+- F# lexer: rewrite with newest spec for F# 3.0 (#842), fix a bug with
+ dotted chains (#948).
- Kotlin lexer: general update (PR#271).
@@ -91,6 +105,8 @@ Version 1.7
- C family lexers: fix parsing of indented preprocessor directives (#944).
+- Rust lexer: update to 0.9 language version (PR#270).
+
Version 1.6
-----------
diff --git a/MANIFEST.in b/MANIFEST.in
index 312c1504..cfec4e94 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -2,5 +2,5 @@ include pygmentize
include external/*
include Makefile CHANGES LICENSE AUTHORS TODO ez_setup.py
recursive-include tests *
-recursive-include docs *
+recursive-include doc *
recursive-include scripts *
diff --git a/Makefile b/Makefile
index 2429d28d..e28c90c7 100644
--- a/Makefile
+++ b/Makefile
@@ -36,11 +36,8 @@ codetags:
@$(PYTHON) scripts/find_codetags.py -i tests/examplefiles -i scripts/pylintrc \
-i scripts/find_codetags.py -o codetags.html .
-docs: docs/build
-
-docs/build: docs/src/*.txt
- $(PYTHON) docs/generate.py html docs/build $?
- touch docs/build
+docs:
+ make -C doc html
mapfiles:
(cd pygments/lexers; $(PYTHON) _mapping.py)
@@ -53,7 +50,7 @@ reindent:
@$(PYTHON) scripts/reindent.py -r -B .
test:
- @$(PYTHON) tests/run.py $(TESTS)
+ @$(PYTHON) tests/run.py $(TEST)
test-coverage:
- @$(PYTHON) tests/run.py -C $(TESTS)
+ @$(PYTHON) tests/run.py -C $(TEST)
diff --git a/doc/Makefile b/doc/Makefile
new file mode 100644
index 00000000..7fb75411
--- /dev/null
+++ b/doc/Makefile
@@ -0,0 +1,153 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS =
+SPHINXBUILD = PYTHONPATH=.. sphinx-build
+PAPER =
+BUILDDIR = _build
+
+# Internal variables.
+PAPEROPT_a4 = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+# the i18n builder cannot share the environment and doctrees with the others
+I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
+
+help:
+ @echo "Please use \`make <target>' where <target> is one of"
+ @echo " html to make standalone HTML files"
+ @echo " dirhtml to make HTML files named index.html in directories"
+ @echo " singlehtml to make a single large HTML file"
+ @echo " pickle to make pickle files"
+ @echo " json to make JSON files"
+ @echo " htmlhelp to make HTML files and a HTML help project"
+ @echo " qthelp to make HTML files and a qthelp project"
+ @echo " devhelp to make HTML files and a Devhelp project"
+ @echo " epub to make an epub"
+ @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+ @echo " latexpdf to make LaTeX files and run them through pdflatex"
+ @echo " text to make text files"
+ @echo " man to make manual pages"
+ @echo " texinfo to make Texinfo files"
+ @echo " info to make Texinfo files and run them through makeinfo"
+ @echo " gettext to make PO message catalogs"
+ @echo " changes to make an overview of all changed/added/deprecated items"
+ @echo " linkcheck to check all external links for integrity"
+ @echo " doctest to run all doctests embedded in the documentation (if enabled)"
+
+clean:
+ -rm -rf $(BUILDDIR)/*
+
+html:
+ $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+ $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+singlehtml:
+ $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+ @echo
+ @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
+pickle:
+ $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+ @echo
+ @echo "Build finished; now you can process the pickle files."
+
+json:
+ $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+ @echo
+ @echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+ $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+ @echo
+ @echo "Build finished; now you can run HTML Help Workshop with the" \
+ ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+ $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+ @echo
+ @echo "Build finished; now you can run "qcollectiongenerator" with the" \
+ ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+ @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Pygments.qhcp"
+ @echo "To view the help file:"
+ @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Pygments.qhc"
+
+devhelp:
+ $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+ @echo
+ @echo "Build finished."
+ @echo "To view the help file:"
+ @echo "# mkdir -p $$HOME/.local/share/devhelp/Pygments"
+ @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Pygments"
+ @echo "# devhelp"
+
+epub:
+ $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+ @echo
+ @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+latex:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo
+ @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+ @echo "Run \`make' in that directory to run these through (pdf)latex" \
+ "(use \`make latexpdf' here to do that automatically)."
+
+latexpdf:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo "Running LaTeX files through pdflatex..."
+ $(MAKE) -C $(BUILDDIR)/latex all-pdf
+ @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+text:
+ $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+ @echo
+ @echo "Build finished. The text files are in $(BUILDDIR)/text."
+
+man:
+ $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+ @echo
+ @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+texinfo:
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+ @echo
+ @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
+ @echo "Run \`make' in that directory to run these through makeinfo" \
+ "(use \`make info' here to do that automatically)."
+
+info:
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+ @echo "Running Texinfo files through makeinfo..."
+ make -C $(BUILDDIR)/texinfo info
+ @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
+
+gettext:
+ $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
+ @echo
+ @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
+
+changes:
+ $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+ @echo
+ @echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+ $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+ @echo
+ @echo "Link check complete; look for any errors in the above output " \
+ "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+ $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+ @echo "Testing of doctests in the sources finished, look at the " \
+ "results in $(BUILDDIR)/doctest/output.txt."
diff --git a/doc/_static/favicon.ico b/doc/_static/favicon.ico
new file mode 100644
index 00000000..777f617d
--- /dev/null
+++ b/doc/_static/favicon.ico
Binary files differ
diff --git a/doc/_static/logo_new.png b/doc/_static/logo_new.png
new file mode 100644
index 00000000..0ae4b209
--- /dev/null
+++ b/doc/_static/logo_new.png
Binary files differ
diff --git a/doc/_static/logo_only.png b/doc/_static/logo_only.png
new file mode 100644
index 00000000..fdebcc47
--- /dev/null
+++ b/doc/_static/logo_only.png
Binary files differ
diff --git a/doc/_templates/docssidebar.html b/doc/_templates/docssidebar.html
new file mode 100644
index 00000000..913acaaf
--- /dev/null
+++ b/doc/_templates/docssidebar.html
@@ -0,0 +1,3 @@
+{% if pagename != 'docs/index' %}
+<strong>&laquo; <a href="{{ pathto('docs/index') }}">Back to docs index</a></strong>
+{% endif %}
diff --git a/doc/_templates/indexsidebar.html b/doc/_templates/indexsidebar.html
new file mode 100644
index 00000000..29954554
--- /dev/null
+++ b/doc/_templates/indexsidebar.html
@@ -0,0 +1,25 @@
+<h3>Download</h3>
+{% if version.endswith('(hg)') %}
+<p>This documentation is for version <b>{{ version }}</b>, which is
+ not released yet.</p>
+<p>You can use it from the
+ <a href="http://bitbucket.org/birkenfeld/sphinx/">Mercurial repo</a> or look for
+ released versions in the <a href="http://pypi.python.org/pypi/Sphinx">Python
+ Package Index</a>.</p>
+{% else %}
+<p>Current version: <b>{{ version }}</b></p>
+<p>Get Pygments from the <a href="http://pypi.python.org/pypi/Pygments">Python Package
+Index</a>, or install it with:</p>
+<pre>pip install Pygments</pre>
+{% endif %}
+
+<h3>Questions? Suggestions?</h3>
+
+<p>Clone at <a href="https://bitbucket.org/birkenfeld/pygments-main">Bitbucket</a>
+or come to the <tt>#pocoo</tt> channel on FreeNode.</p>
+<p>You can also open an issue at the
+ <a href="https://www.bitbucket.org/birkenfeld/pygments-main/issues/">tracker</a>.</p>
+
+<p class="logo">A <a href="http://pocoo.org/">
+ <img src="{{ pathto("_static/pocoo.png", 1) }}" /></a> project</a></p>
+
diff --git a/doc/_themes/pygments14/layout.html b/doc/_themes/pygments14/layout.html
new file mode 100644
index 00000000..93a3119e
--- /dev/null
+++ b/doc/_themes/pygments14/layout.html
@@ -0,0 +1,98 @@
+{#
+ sphinxdoc/layout.html
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ Sphinx layout template for the sphinxdoc theme.
+
+ :copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+#}
+{%- extends "basic/layout.html" %}
+
+{# put the sidebar before the body #}
+{% block sidebar1 %}{{ sidebar() }}{% endblock %}
+{% block sidebar2 %}{% endblock %}
+
+{% block relbar1 %}{% endblock %}
+{% block relbar2 %}{% endblock %}
+
+{% block extrahead %}
+ <link href='http://fonts.googleapis.com/css?family={{ theme_font|replace(' ', '+') }}:300,400,700'
+ rel='stylesheet' type='text/css'>
+{{ super() }}
+{%- if not embedded %}
+ <style type="text/css">
+ table.right { float: right; margin-left: 20px; }
+ table.right td { border: 1px solid #ccc; }
+ {% if pagename == 'index' %}
+ .related { display: none; }
+ {% endif %}
+ </style>
+ <script type="text/javascript">
+ // intelligent scrolling of the sidebar content
+ $(window).scroll(function() {
+ var sb = $('.sphinxsidebarwrapper');
+ var win = $(window);
+ var sbh = sb.height();
+ var offset = $('.sphinxsidebar').position()['top'];
+ var wintop = win.scrollTop();
+ var winbot = wintop + win.innerHeight();
+ var curtop = sb.position()['top'];
+ var curbot = curtop + sbh;
+ // does sidebar fit in window?
+ if (sbh < win.innerHeight()) {
+ // yes: easy case -- always keep at the top
+ sb.css('top', $u.min([$u.max([0, wintop - offset - 10]),
+ $(document).height() - sbh - 200]));
+ } else {
+ // no: only scroll if top/bottom edge of sidebar is at
+ // top/bottom edge of window
+ if (curtop > wintop && curbot > winbot) {
+ sb.css('top', $u.max([wintop - offset - 10, 0]));
+ } else if (curtop < wintop && curbot < winbot) {
+ sb.css('top', $u.min([winbot - sbh - offset - 20,
+ $(document).height() - sbh - 200]));
+ }
+ }
+ });
+ </script>
+{%- endif %}
+{% endblock %}
+
+{% block header %}
+<div class="outerwrapper">
+<div class="pageheader">
+ <ul>
+ <li><a href="{{ pathto('index') }}">Home</a></li>
+ {% if demo_active %}
+ <li><a href="{{ pathto('demo') }}">Demo</a></li>
+ {% endif %}
+ <li><a href="{{ pathto('languages') }}">Languages</a></li>
+ <li><a href="{{ pathto('faq') }}">FAQ</a></li>
+ <li><a href="{{ pathto('download') }}">Get it</a></li>
+ <li><a href="{{ pathto('docs/index') }}">Docs</a></li>
+ </ul>
+ <div>
+ <a href="{{ pathto('index') }}">
+ <img src="{{ pathto('_static/logo.png', 1) }}" alt="Pygments logo" />
+ </a>
+ </div>
+</div>
+{% endblock %}
+
+{% block footer %}
+ <div class="footer" role="contentinfo">
+ &copy; Copyright 2014, Georg Brandl and Pygments contributors.
+ Created using <a href="http://sphinx-doc.org/">Sphinx</a> {{
+ sphinx_version }}. <br/>
+ Pygments logo created by <a href="http://joelunger.com">Joel Unger</a>.
+ Backgrounds from <a href="http://subtlepatterns.com">subtlepatterns.com</a>.
+ </div>
+ </div> {# closes "outerwrapper" div #}
+{% endblock %}
+
+{% block sidebarrel %}
+{% endblock %}
+
+{% block sidebarsourcelink %}
+{% endblock %}
diff --git a/doc/_themes/pygments14/static/bodybg.png b/doc/_themes/pygments14/static/bodybg.png
new file mode 100644
index 00000000..46892b80
--- /dev/null
+++ b/doc/_themes/pygments14/static/bodybg.png
Binary files differ
diff --git a/doc/_themes/pygments14/static/docbg.png b/doc/_themes/pygments14/static/docbg.png
new file mode 100644
index 00000000..13e61f32
--- /dev/null
+++ b/doc/_themes/pygments14/static/docbg.png
Binary files differ
diff --git a/doc/_themes/pygments14/static/listitem.png b/doc/_themes/pygments14/static/listitem.png
new file mode 100644
index 00000000..e45715f9
--- /dev/null
+++ b/doc/_themes/pygments14/static/listitem.png
Binary files differ
diff --git a/doc/_themes/pygments14/static/logo.png b/doc/_themes/pygments14/static/logo.png
new file mode 100644
index 00000000..2c1a24dc
--- /dev/null
+++ b/doc/_themes/pygments14/static/logo.png
Binary files differ
diff --git a/doc/_themes/pygments14/static/pocoo.png b/doc/_themes/pygments14/static/pocoo.png
new file mode 100644
index 00000000..41741494
--- /dev/null
+++ b/doc/_themes/pygments14/static/pocoo.png
Binary files differ
diff --git a/doc/_themes/pygments14/static/pygments14.css_t b/doc/_themes/pygments14/static/pygments14.css_t
new file mode 100644
index 00000000..838782b5
--- /dev/null
+++ b/doc/_themes/pygments14/static/pygments14.css_t
@@ -0,0 +1,401 @@
+/*
+ * pygments14.css
+ * ~~~~~~~~~~~~~~
+ *
+ * Sphinx stylesheet -- pygments14 theme. Heavily copied from sphinx13.
+ *
+ * :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
+ * :license: BSD, see LICENSE for details.
+ *
+ */
+
+@import url("basic.css");
+
+/* -- page layout ----------------------------------------------------------- */
+
+body {
+ font-family: {{ theme_font }}, 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
+ 'Verdana', sans-serif;
+ font-size: 14px;
+ text-align: center;
+ background-image: url(bodybg.png);
+ background-color: {{ theme_background }};
+ color: black;
+ padding: 0;
+ /*
+ border-right: 1px solid {{ theme_border }};
+ border-left: 1px solid {{ theme_border }};
+ */
+
+ margin: 0 auto;
+ min-width: 780px;
+ max-width: 1080px;
+}
+
+.outerwrapper {
+ background-image: url(docbg.png);
+ background-attachment: fixed;
+}
+
+.pageheader {
+ text-align: left;
+ padding: 10px 15px;
+}
+
+.pageheader ul {
+ float: right;
+ color: white;
+ list-style-type: none;
+ padding-left: 0;
+ margin-top: 40px;
+ margin-right: 10px;
+}
+
+.pageheader li {
+ float: left;
+ margin: 0 0 0 10px;
+}
+
+.pageheader li a {
+ border-radius: 3px;
+ padding: 8px 12px;
+ color: {{ theme_darkgray }};
+ text-shadow: 0 0 5px rgba(0, 0, 0, 0.2);
+}
+
+.pageheader li a:hover {
+ background-color: {{ theme_yellow }};
+ color: black;
+ text-shadow: none;
+}
+
+div.document {
+ text-align: left;
+ /*border-left: 1em solid {{ theme_lightyellow }};*/
+}
+
+div.bodywrapper {
+ margin: 0 12px 0 240px;
+ background-color: white;
+/* border-right: 1px solid {{ theme_border }}; */
+}
+
+div.body {
+ margin: 0;
+ padding: 0.5em 20px 20px 20px;
+}
+
+div.related {
+ font-size: 1em;
+ color: {{ theme_darkgray }};
+}
+
+div.related ul {
+ background-image: url(relbg.png);
+ background-repeat: repeat-y;
+ background-color: {{ theme_yellow }};
+ height: 1.9em;
+ /*
+ border-top: 1px solid {{ theme_border }};
+ border-bottom: 1px solid {{ theme_border }};
+ */
+}
+
+div.related ul li {
+ margin: 0 5px 0 0;
+ padding: 0;
+ float: left;
+}
+
+div.related ul li.right {
+ float: right;
+ margin-right: 5px;
+}
+
+div.related ul li a {
+ margin: 0;
+ padding: 0 5px 0 5px;
+ line-height: 1.75em;
+ color: {{ theme_darkgray }};
+ /*text-shadow: 0px 0px 1px rgba(0, 0, 0, 0.5);*/
+}
+
+div.related ul li a:hover {
+ text-decoration: underline;
+ text-shadow: 0px 0px 1px rgba(255, 255, 255, 0.5);
+}
+
+div.sphinxsidebarwrapper {
+ position: relative;
+ top: 0px;
+ padding: 0;
+}
+
+div.sphinxsidebar {
+ margin: 0;
+ padding: 0 0px 15px 15px;
+ width: 210px;
+ float: left;
+ font-size: 1em;
+ text-align: left;
+}
+
+div.sphinxsidebar .logo {
+ font-size: 1.8em;
+ color: #666;
+ font-weight: 300;
+ text-align: center;
+}
+
+div.sphinxsidebar .logo img {
+ vertical-align: middle;
+}
+
+div.sphinxsidebar input {
+ border: 1px solid #aaa;
+ font-family: {{ theme_font }}, 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
+ 'Verdana', sans-serif;
+ font-size: 1em;
+}
+
+div.sphinxsidebar h3 {
+ font-size: 1.5em;
+ /* border-top: 1px solid {{ theme_border }}; */
+ margin-top: 1em;
+ margin-bottom: 0.5em;
+ padding-top: 0.5em;
+}
+
+div.sphinxsidebar h4 {
+ font-size: 1.2em;
+ margin-bottom: 0;
+}
+
+div.sphinxsidebar h3, div.sphinxsidebar h4 {
+ margin-right: -15px;
+ margin-left: -15px;
+ padding-right: 14px;
+ padding-left: 14px;
+ color: #333;
+ font-weight: 300;
+ /*text-shadow: 0px 0px 0.5px rgba(0, 0, 0, 0.4);*/
+}
+
+div.sphinxsidebarwrapper > h3:first-child {
+ margin-top: 0.5em;
+ border: none;
+}
+
+div.sphinxsidebar h3 a {
+ color: #333;
+}
+
+div.sphinxsidebar ul {
+ color: #444;
+ margin-top: 7px;
+ padding: 0;
+ line-height: 130%;
+}
+
+div.sphinxsidebar ul ul {
+ margin-left: 20px;
+ list-style-image: url(listitem.png);
+}
+
+div.footer {
+ color: {{ theme_darkgray }};
+ text-shadow: 0 0 .2px rgba(255, 255, 255, 0.8);
+ padding: 2em;
+ text-align: center;
+ clear: both;
+ font-size: 0.8em;
+}
+
+/* -- body styles ----------------------------------------------------------- */
+
+p {
+ margin: 0.8em 0 0.5em 0;
+}
+
+a {
+ color: {{ theme_darkgreen }};
+ text-decoration: none;
+}
+
+a:hover {
+ color: {{ theme_darkyellow }};
+}
+
+div.body a {
+ text-decoration: underline;
+}
+
+h1 {
+ margin: 10px 0 0 0;
+ font-size: 2.4em;
+ color: {{ theme_darkgray }};
+ font-weight: 300;
+}
+
+h2 {
+ margin: 1.em 0 0.2em 0;
+ font-size: 1.5em;
+ font-weight: 300;
+ padding: 0;
+ color: {{ theme_darkgreen }};
+}
+
+h3 {
+ margin: 1em 0 -0.3em 0;
+ font-size: 1.3em;
+ font-weight: 300;
+}
+
+div.body h1 a, div.body h2 a, div.body h3 a, div.body h4 a, div.body h5 a, div.body h6 a {
+ text-decoration: none;
+}
+
+div.body h1 a tt, div.body h2 a tt, div.body h3 a tt, div.body h4 a tt, div.body h5 a tt, div.body h6 a tt {
+ color: {{ theme_darkgreen }} !important;
+ font-size: inherit !important;
+}
+
+a.headerlink {
+ color: {{ theme_green }} !important;
+ font-size: 12px;
+ margin-left: 6px;
+ padding: 0 4px 0 4px;
+ text-decoration: none !important;
+ float: right;
+}
+
+a.headerlink:hover {
+ background-color: #ccc;
+ color: white!important;
+}
+
+cite, code, tt {
+ font-family: 'Consolas', 'DejaVu Sans Mono',
+ 'Bitstream Vera Sans Mono', monospace;
+ font-size: 14px;
+ letter-spacing: -0.02em;
+}
+
+tt {
+ background-color: #f2f2f2;
+ border: 1px solid #ddd;
+ border-radius: 2px;
+ color: #333;
+ padding: 1px;
+}
+
+tt.descname, tt.descclassname, tt.xref {
+ border: 0;
+}
+
+hr {
+ border: 1px solid #abc;
+ margin: 2em;
+}
+
+a tt {
+ border: 0;
+ color: {{ theme_darkgreen }};
+}
+
+a tt:hover {
+ color: {{ theme_darkyellow }};
+}
+
+pre {
+ font-family: 'Consolas', 'DejaVu Sans Mono',
+ 'Bitstream Vera Sans Mono', monospace;
+ font-size: 13px;
+ letter-spacing: 0.015em;
+ line-height: 120%;
+ padding: 0.5em;
+ border: 1px solid #ccc;
+ border-radius: 2px;
+ background-color: #f8f8f8;
+}
+
+pre a {
+ color: inherit;
+ text-decoration: underline;
+}
+
+td.linenos pre {
+ padding: 0.5em 0;
+}
+
+div.quotebar {
+ background-color: #f8f8f8;
+ max-width: 250px;
+ float: right;
+ padding: 0px 7px;
+ border: 1px solid #ccc;
+ margin-left: 1em;
+}
+
+div.topic {
+ background-color: #f8f8f8;
+}
+
+table {
+ border-collapse: collapse;
+ margin: 0 -0.5em 0 -0.5em;
+}
+
+table td, table th {
+ padding: 0.2em 0.5em 0.2em 0.5em;
+}
+
+div.admonition, div.warning {
+ font-size: 0.9em;
+ margin: 1em 0 1em 0;
+ border: 1px solid #86989B;
+ border-radius: 2px;
+ background-color: #f7f7f7;
+ padding: 0;
+}
+
+div.admonition p, div.warning p {
+ margin: 0.5em 1em 0.5em 1em;
+ padding: 0;
+}
+
+div.admonition pre, div.warning pre {
+ margin: 0.4em 1em 0.4em 1em;
+}
+
+div.admonition p.admonition-title,
+div.warning p.admonition-title {
+ margin-top: 1em;
+ padding-top: 0.5em;
+ font-weight: bold;
+}
+
+div.warning {
+ border: 1px solid #940000;
+/* background-color: #FFCCCF;*/
+}
+
+div.warning p.admonition-title {
+}
+
+div.admonition ul, div.admonition ol,
+div.warning ul, div.warning ol {
+ margin: 0.1em 0.5em 0.5em 3em;
+ padding: 0;
+}
+
+.viewcode-back {
+ font-family: {{ theme_font }}, 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
+ 'Verdana', sans-serif;
+}
+
+div.viewcode-block:target {
+ background-color: #f4debf;
+ border-top: 1px solid #ac9;
+ border-bottom: 1px solid #ac9;
+}
diff --git a/doc/_themes/pygments14/theme.conf b/doc/_themes/pygments14/theme.conf
new file mode 100644
index 00000000..fffe66d6
--- /dev/null
+++ b/doc/_themes/pygments14/theme.conf
@@ -0,0 +1,15 @@
+[theme]
+inherit = basic
+stylesheet = pygments14.css
+pygments_style = friendly
+
+[options]
+green = #66b55e
+darkgreen = #36852e
+darkgray = #666666
+border = #66b55e
+yellow = #f4cd00
+darkyellow = #d4ad00
+lightyellow = #fffbe3
+background = #f9f9f9
+font = PT Sans
diff --git a/doc/conf.py b/doc/conf.py
new file mode 100644
index 00000000..864ec7a1
--- /dev/null
+++ b/doc/conf.py
@@ -0,0 +1,249 @@
+# -*- coding: utf-8 -*-
+#
+# Pygments documentation build configuration file, created by
+# sphinx-quickstart on Sat Jan 18 17:07:37 2014.
+#
+# This file is execfile()d with the current directory set to its containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys, os
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+sys.path.insert(0, os.path.abspath('..'))
+
+import pygments
+
+# -- General configuration -----------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be extensions
+# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'pygments.sphinxext']
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'Pygments'
+copyright = u'2014, Georg Brandl'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = pygments.__version__
+# The full version, including alpha/beta/rc tags.
+release = version
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = ['_build']
+
+# The reST default role (used for this markup: `text`) to use for all documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+#pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+
+# -- Options for HTML output ---------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+html_theme = 'pygments14'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+html_theme_path = ['_themes']
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+html_favicon = 'favicon.ico'
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+html_sidebars = {'index': 'indexsidebar.html',
+ 'docs/*': 'docssidebar.html'}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_domain_indices = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = None
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'Pygmentsdoc'
+
+
+# -- Options for LaTeX output --------------------------------------------------
+
+latex_elements = {
+# The paper size ('letterpaper' or 'a4paper').
+#'papersize': 'letterpaper',
+
+# The font size ('10pt', '11pt' or '12pt').
+#'pointsize': '10pt',
+
+# Additional stuff for the LaTeX preamble.
+#'preamble': '',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass [howto/manual]).
+latex_documents = [
+ ('index', 'Pygments.tex', u'Pygments Documentation',
+ u'Georg Brandl', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_domain_indices = True
+
+
+# -- Options for manual page output --------------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+ ('index', 'pygments', u'Pygments Documentation',
+ [u'Georg Brandl'], 1)
+]
+
+# If true, show URL addresses after external links.
+#man_show_urls = False
+
+
+# -- Options for Texinfo output ------------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+# dir menu entry, description, category)
+texinfo_documents = [
+ ('index', 'Pygments', u'Pygments Documentation',
+ u'Georg Brandl', 'Pygments', 'One line description of project.',
+ 'Miscellaneous'),
+]
+
+# Documents to append as an appendix to all manuals.
+#texinfo_appendices = []
+
+# If false, no module index is generated.
+#texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+#texinfo_show_urls = 'footnote'
+
+
+# Example configuration for intersphinx: refer to the Python standard library.
+#intersphinx_mapping = {'http://docs.python.org/': None}
diff --git a/doc/docs/api.rst b/doc/docs/api.rst
new file mode 100644
index 00000000..123a4643
--- /dev/null
+++ b/doc/docs/api.rst
@@ -0,0 +1,316 @@
+.. -*- mode: rst -*-
+
+=====================
+The full Pygments API
+=====================
+
+This page describes the Pygments API.
+
+High-level API
+==============
+
+.. module:: pygments
+
+Functions from the :mod:`pygments` module:
+
+.. function:: lex(code, lexer)
+
+ Lex `code` with the `lexer` (must be a `Lexer` instance)
+ and return an iterable of tokens. Currently, this only calls
+ `lexer.get_tokens()`.
+
+.. function:: format(tokens, formatter, outfile=None)
+
+ Format a token stream (iterable of tokens) `tokens` with the
+ `formatter` (must be a `Formatter` instance). The result is
+ written to `outfile`, or if that is ``None``, returned as a
+ string.
+
+.. function:: highlight(code, lexer, formatter, outfile=None)
+
+ This is the most high-level highlighting function.
+ It combines `lex` and `format` in one function.
+
+
+.. module:: pygments.lexers
+
+Functions from :mod:`pygments.lexers`:
+
+.. function:: get_lexer_by_name(alias, **options)
+
+ Return an instance of a `Lexer` subclass that has `alias` in its
+ aliases list. The lexer is given the `options` at its
+ instantiation.
+
+ Will raise :exc:`pygments.util.ClassNotFound` if no lexer with that alias is
+ found.
+
+.. function:: get_lexer_for_filename(fn, **options)
+
+ Return a `Lexer` subclass instance that has a filename pattern
+ matching `fn`. The lexer is given the `options` at its
+ instantiation.
+
+ Will raise :exc:`pygments.util.ClassNotFound` if no lexer for that filename
+ is found.
+
+.. function:: get_lexer_for_mimetype(mime, **options)
+
+ Return a `Lexer` subclass instance that has `mime` in its mimetype
+ list. The lexer is given the `options` at its instantiation.
+
+ Will raise :exc:`pygments.util.ClassNotFound` if not lexer for that mimetype
+ is found.
+
+.. function:: guess_lexer(text, **options)
+
+ Return a `Lexer` subclass instance that's guessed from the text in
+ `text`. For that, the :meth:`.analyse_text()` method of every known lexer
+ class is called with the text as argument, and the lexer which returned the
+ highest value will be instantiated and returned.
+
+ :exc:`pygments.util.ClassNotFound` is raised if no lexer thinks it can
+ handle the content.
+
+.. function:: guess_lexer_for_filename(filename, text, **options)
+
+ As :func:`guess_lexer()`, but only lexers which have a pattern in `filenames`
+ or `alias_filenames` that matches `filename` are taken into consideration.
+
+ :exc:`pygments.util.ClassNotFound` is raised if no lexer thinks it can
+ handle the content.
+
+.. function:: get_all_lexers()
+
+ Return an iterable over all registered lexers, yielding tuples in the
+ format::
+
+ (longname, tuple of aliases, tuple of filename patterns, tuple of mimetypes)
+
+ .. versionadded:: 0.6
+
+
+.. module:: pygments.formatters
+
+Functions from :mod:`pygments.formatters`:
+
+.. function:: get_formatter_by_name(alias, **options)
+
+ Return an instance of a :class:`.Formatter` subclass that has `alias` in its
+ aliases list. The formatter is given the `options` at its instantiation.
+
+ Will raise :exc:`pygments.util.ClassNotFound` if no formatter with that
+ alias is found.
+
+.. function:: get_formatter_for_filename(fn, **options)
+
+ Return a :class:`.Formatter` subclass instance that has a filename pattern
+ matching `fn`. The formatter is given the `options` at its instantiation.
+
+ Will raise :exc:`pygments.util.ClassNotFound` if no formatter for that filename
+ is found.
+
+
+.. module:: pygments.styles
+
+Functions from :mod:`pygments.styles`:
+
+.. function:: get_style_by_name(name)
+
+ Return a style class by its short name. The names of the builtin styles
+ are listed in :data:`pygments.styles.STYLE_MAP`.
+
+ Will raise :exc:`pygments.util.ClassNotFound` if no style of that name is
+ found.
+
+.. function:: get_all_styles()
+
+ Return an iterable over all registered styles, yielding their names.
+
+ .. versionadded:: 0.6
+
+
+.. module:: pygments.lexer
+
+Lexers
+======
+
+The base lexer class from which all lexers are derived is:
+
+.. class:: Lexer(**options)
+
+ The constructor takes a \*\*keywords dictionary of options.
+ Every subclass must first process its own options and then call
+ the `Lexer` constructor, since it processes the `stripnl`,
+ `stripall` and `tabsize` options.
+
+ An example looks like this:
+
+ .. sourcecode:: python
+
+ def __init__(self, **options):
+ self.compress = options.get('compress', '')
+ Lexer.__init__(self, **options)
+
+ As these options must all be specifiable as strings (due to the
+ command line usage), there are various utility functions
+ available to help with that, see `Option processing`_.
+
+ .. method:: get_tokens(text)
+
+ This method is the basic interface of a lexer. It is called by
+ the `highlight()` function. It must process the text and return an
+ iterable of ``(tokentype, value)`` pairs from `text`.
+
+ Normally, you don't need to override this method. The default
+ implementation processes the `stripnl`, `stripall` and `tabsize`
+ options and then yields all tokens from `get_tokens_unprocessed()`,
+ with the ``index`` dropped.
+
+ .. method:: get_tokens_unprocessed(text)
+
+ This method should process the text and return an iterable of
+ ``(index, tokentype, value)`` tuples where ``index`` is the starting
+ position of the token within the input text.
+
+ This method must be overridden by subclasses.
+
+ .. staticmethod:: analyse_text(text)
+
+ A static method which is called for lexer guessing. It should analyse
+ the text and return a float in the range from ``0.0`` to ``1.0``.
+ If it returns ``0.0``, the lexer will not be selected as the most
+ probable one, if it returns ``1.0``, it will be selected immediately.
+
+ .. note:: You don't have to add ``@staticmethod`` to the definition of
+ this method, this will be taken care of by the Lexer's metaclass.
+
+ For a list of known tokens have a look at the :doc:`tokens` page.
+
+ A lexer also can have the following attributes (in fact, they are mandatory
+ except `alias_filenames`) that are used by the builtin lookup mechanism.
+
+ .. attribute:: name
+
+ Full name for the lexer, in human-readable form.
+
+ .. attribute:: aliases
+
+ A list of short, unique identifiers that can be used to lookup
+ the lexer from a list, e.g. using `get_lexer_by_name()`.
+
+ .. attribute:: filenames
+
+ A list of `fnmatch` patterns that match filenames which contain
+ content for this lexer. The patterns in this list should be unique among
+ all lexers.
+
+ .. attribute:: alias_filenames
+
+ A list of `fnmatch` patterns that match filenames which may or may not
+ contain content for this lexer. This list is used by the
+ :func:`.guess_lexer_for_filename()` function, to determine which lexers
+ are then included in guessing the correct one. That means that
+ e.g. every lexer for HTML and a template language should include
+ ``\*.html`` in this list.
+
+ .. attribute:: mimetypes
+
+ A list of MIME types for content that can be lexed with this
+ lexer.
+
+
+.. module:: pygments.formatter
+
+Formatters
+==========
+
+A formatter is derived from this class:
+
+
+.. class:: Formatter(**options)
+
+ As with lexers, this constructor processes options and then must call the
+ base class :meth:`__init__`.
+
+ The :class:`Formatter` class recognizes the options `style`, `full` and
+ `title`. It is up to the formatter class whether it uses them.
+
+ .. method:: get_style_defs(arg='')
+
+ This method must return statements or declarations suitable to define
+ the current style for subsequent highlighted text (e.g. CSS classes
+ in the `HTMLFormatter`).
+
+ The optional argument `arg` can be used to modify the generation and
+ is formatter dependent (it is standardized because it can be given on
+ the command line).
+
+ This method is called by the ``-S`` :doc:`command-line option <cmdline>`,
+ the `arg` is then given by the ``-a`` option.
+
+ .. method:: format(tokensource, outfile)
+
+ This method must format the tokens from the `tokensource` iterable and
+ write the formatted version to the file object `outfile`.
+
+ Formatter options can control how exactly the tokens are converted.
+
+ .. versionadded:: 0.7
+ A formatter must have the following attributes that are used by the
+ builtin lookup mechanism.
+
+ .. attribute:: name
+
+ Full name for the formatter, in human-readable form.
+
+ .. attribute:: aliases
+
+ A list of short, unique identifiers that can be used to lookup
+ the formatter from a list, e.g. using :func:`.get_formatter_by_name()`.
+
+ .. attribute:: filenames
+
+ A list of :mod:`fnmatch` patterns that match filenames for which this
+ formatter can produce output. The patterns in this list should be unique
+ among all formatters.
+
+
+.. module:: pygments.util
+
+Option processing
+=================
+
+The :mod:`pygments.util` module has some utility functions usable for option
+processing:
+
+.. exception:: OptionError
+
+ This exception will be raised by all option processing functions if
+ the type or value of the argument is not correct.
+
+.. function:: get_bool_opt(options, optname, default=None)
+
+ Interpret the key `optname` from the dictionary `options` as a boolean and
+ return it. Return `default` if `optname` is not in `options`.
+
+ The valid string values for ``True`` are ``1``, ``yes``, ``true`` and
+ ``on``, the ones for ``False`` are ``0``, ``no``, ``false`` and ``off``
+ (matched case-insensitively).
+
+.. function:: get_int_opt(options, optname, default=None)
+
+ As :func:`get_bool_opt`, but interpret the value as an integer.
+
+.. function:: get_list_opt(options, optname, default=None)
+
+ If the key `optname` from the dictionary `options` is a string,
+ split it at whitespace and return it. If it is already a list
+ or a tuple, it is returned as a list.
+
+.. function:: get_choice_opt(options, optname, allowed, default=None)
+
+ If the key `optname` from the dictionary is not in the sequence
+ `allowed`, raise an error, otherwise return it.
+
+ .. versionadded:: 0.8
diff --git a/doc/docs/authors.rst b/doc/docs/authors.rst
new file mode 100644
index 00000000..f8373f0a
--- /dev/null
+++ b/doc/docs/authors.rst
@@ -0,0 +1,4 @@
+Full contributor list
+=====================
+
+.. include:: ../../AUTHORS
diff --git a/doc/docs/changelog.rst b/doc/docs/changelog.rst
new file mode 100644
index 00000000..f264cab0
--- /dev/null
+++ b/doc/docs/changelog.rst
@@ -0,0 +1 @@
+.. include:: ../../CHANGES
diff --git a/docs/src/cmdline.txt b/doc/docs/cmdline.rst
index a48a5c27..bf0177a3 100644
--- a/docs/src/cmdline.txt
+++ b/doc/docs/cmdline.rst
@@ -4,8 +4,8 @@
Command Line Interface
======================
-You can use Pygments from the shell, provided you installed the `pygmentize`
-script::
+You can use Pygments from the shell, provided you installed the
+:program:`pygmentize` script::
$ pygmentize test.py
print "Hello World"
@@ -28,7 +28,7 @@ written to stdout.
The ``-f`` option selects a formatter (as with ``-l``, it can also be omitted
if an output file name is given and has a supported extension).
If no output file name is given and ``-f`` is omitted, the
-`TerminalFormatter` is used.
+:class:`.TerminalFormatter` is used.
The above command could therefore also be given as::
@@ -82,14 +82,15 @@ Usage is as follows::
generates a CSS style sheet (because you selected the HTML formatter) for
the "colorful" style prepending a ".syntax" selector to all style rules.
-For an explanation what ``-a`` means for `a particular formatter`_, look for
-the `arg` argument for the formatter's `get_style_defs()` method.
+For an explanation what ``-a`` means for :doc:`a particular formatter
+<formatters>`, look for the `arg` argument for the formatter's
+:meth:`.get_style_defs()` method.
Getting lexer names
-------------------
-*New in Pygments 1.0.*
+.. versionadded:: 1.0
The ``-N`` option guesses a lexer name for a given filename, so that ::
@@ -125,7 +126,7 @@ will print the help for the Python lexer, etc.
A note on encodings
-------------------
-*New in Pygments 0.9.*
+.. versionadded:: 0.9
Pygments tries to be smart regarding encodings in the formatting process:
@@ -141,7 +142,4 @@ Pygments tries to be smart regarding encodings in the formatting process:
* If you don't give an encoding and haven't given an output file (that means
output is written to the console), the default encoding for lexer and
- formatter is the terminal encoding (`sys.stdout.encoding`).
-
-
-.. _a particular formatter: formatters.txt
+ formatter is the terminal encoding (``sys.stdout.encoding``).
diff --git a/docs/src/filterdevelopment.txt b/doc/docs/filterdevelopment.rst
index c60e1e84..bc399a6f 100644
--- a/docs/src/filterdevelopment.txt
+++ b/doc/docs/filterdevelopment.rst
@@ -4,7 +4,7 @@
Write your own filter
=====================
-*New in Pygments 0.7.*
+.. versionadded:: 0.7
Writing own filters is very easy. All you have to do is to subclass
the `Filter` class and override the `filter` method. Additionally a
diff --git a/docs/src/filters.txt b/doc/docs/filters.rst
index 522f6330..ff2519a3 100644
--- a/docs/src/filters.txt
+++ b/doc/docs/filters.rst
@@ -4,7 +4,7 @@
Filters
=======
-*New in Pygments 0.7.*
+.. versionadded:: 0.7
You can filter token streams coming from lexers to improve or annotate the
output. For example, you can highlight special words in comments, convert
@@ -31,12 +31,11 @@ To get a list of all registered filters by name, you can use the
`get_all_filters()` function from the `pygments.filters` module that returns an
iterable for all known filters.
-If you want to write your own filter, have a look at `Write your own filter`_.
-
-.. _Write your own filter: filterdevelopment.txt
+If you want to write your own filter, have a look at :doc:`Write your own filter
+<filterdevelopment>`.
Builtin Filters
===============
-[builtin_filter_docs]
+.. pygmentsdoc:: filters
diff --git a/docs/src/formatterdevelopment.txt b/doc/docs/formatterdevelopment.rst
index 83a13b6a..2bfac05c 100644
--- a/docs/src/formatterdevelopment.txt
+++ b/doc/docs/formatterdevelopment.rst
@@ -4,7 +4,7 @@
Write your own formatter
========================
-As well as creating `your own lexer <lexerdevelopment.txt>`_, writing a new
+As well as creating :doc:`your own lexer <lexerdevelopment>`, writing a new
formatter for Pygments is easy and straightforward.
A formatter is a class that is initialized with some keyword arguments (the
diff --git a/docs/src/formatters.txt b/doc/docs/formatters.rst
index 7a590648..9e7074e8 100644
--- a/docs/src/formatters.txt
+++ b/doc/docs/formatters.rst
@@ -12,8 +12,6 @@ Common options
All formatters support these options:
`encoding`
- *New in Pygments 0.6.*
-
If given, must be an encoding name (such as ``"utf-8"``). This will
be used to convert the token strings (which are Unicode strings)
to byte strings in the output (default: ``None``).
@@ -30,19 +28,21 @@ All formatters support these options:
supports Unicode arguments to `write()`. Using a regular file object
wouldn't work.
-`outencoding`
- *New in Pygments 0.7.*
+ .. versionadded:: 0.6
+`outencoding`
When using Pygments from the command line, any `encoding` option given is
passed to the lexer and the formatter. This is sometimes not desirable,
for example if you want to set the input encoding to ``"guess"``.
Therefore, `outencoding` has been introduced which overrides `encoding`
for the formatter if given.
+ .. versionadded:: 0.7
+
Formatter classes
=================
-All these classes are importable from `pygments.formatters`.
+All these classes are importable from :mod:`pygments.formatters`.
-[builtin_formatter_docs]
+.. pygmentsdoc:: formatters
diff --git a/doc/docs/index.rst b/doc/docs/index.rst
new file mode 100644
index 00000000..30d5c085
--- /dev/null
+++ b/doc/docs/index.rst
@@ -0,0 +1,66 @@
+Pygments documentation
+======================
+
+**Starting with Pygments**
+
+.. toctree::
+ :maxdepth: 1
+
+ ../download
+ quickstart
+ cmdline
+
+**Builtin components**
+
+.. toctree::
+ :maxdepth: 1
+
+ lexers
+ filters
+ formatters
+ styles
+
+**Reference**
+
+.. toctree::
+ :maxdepth: 1
+
+ unicode
+ tokens
+ api
+
+**Hacking for Pygments**
+
+.. toctree::
+ :maxdepth: 1
+
+ lexerdevelopment
+ formatterdevelopment
+ filterdevelopment
+ plugins
+
+**Hints and tricks**
+
+.. toctree::
+ :maxdepth: 1
+
+ rstdirective
+ moinmoin
+ java
+ integrate
+
+**About Pygments**
+
+.. toctree::
+ :maxdepth: 1
+
+ changelog
+ authors
+
+
+If you find bugs or have suggestions for the documentation, please look
+:ref:`here <contribute>` for info on how to contact the team.
+
+.. XXX You can download an offline version of this documentation from the
+ :doc:`download page </download>`.
+
diff --git a/docs/src/integrate.txt b/doc/docs/integrate.rst
index 6f8c1253..03fc268f 100644
--- a/docs/src/integrate.txt
+++ b/doc/docs/integrate.rst
@@ -23,8 +23,9 @@ Markdown
--------
Since Pygments 0.9, the distribution ships Markdown_ preprocessor sample code
-that uses Pygments to render source code in `external/markdown-processor.py`.
-You can copy and adapt it to your liking.
+that uses Pygments to render source code in
+:file:`external/markdown-processor.py`. You can copy and adapt it to your
+liking.
.. _Markdown: http://www.freewisdom.org/projects/python-markdown/
@@ -41,8 +42,3 @@ Bash completion
The source distribution contains a file ``external/pygments.bashcomp`` that
sets up completion for the ``pygmentize`` command in bash.
-
-Java
-----
-
-See the `Java quickstart <java.txt>`_ document.
diff --git a/docs/src/java.txt b/doc/docs/java.rst
index 5eb6196a..5eb6196a 100644
--- a/docs/src/java.txt
+++ b/doc/docs/java.rst
diff --git a/docs/src/lexerdevelopment.txt b/doc/docs/lexerdevelopment.rst
index 730a08b2..eab1306a 100644
--- a/docs/src/lexerdevelopment.txt
+++ b/doc/docs/lexerdevelopment.rst
@@ -7,13 +7,13 @@ Write your own lexer
If a lexer for your favorite language is missing in the Pygments package, you can
easily write your own and extend Pygments.
-All you need can be found inside the `pygments.lexer` module. As you can read in
-the `API documentation <api.txt>`_, a lexer is a class that is initialized with
-some keyword arguments (the lexer options) and that provides a
-`get_tokens_unprocessed()` method which is given a string or unicode object with
-the data to parse.
+All you need can be found inside the :mod:`pygments.lexer` module. As you can
+read in the :doc:`API documentation <api>`, a lexer is a class that is
+initialized with some keyword arguments (the lexer options) and that provides a
+:meth:`.get_tokens_unprocessed()` method which is given a string or unicode
+object with the data to parse.
-The `get_tokens_unprocessed()` method must return an iterator or iterable
+The :meth:`.get_tokens_unprocessed()` method must return an iterator or iterable
containing tuples in the form ``(index, token, value)``. Normally you don't need
to do this since there are numerous base lexers you can subclass.
@@ -21,9 +21,9 @@ to do this since there are numerous base lexers you can subclass.
RegexLexer
==========
-A very powerful (but quite easy to use) lexer is the `RegexLexer`. This lexer
-base class allows you to define lexing rules in terms of *regular expressions*
-for different *states*.
+A very powerful (but quite easy to use) lexer is the :class:`RegexLexer`. This
+lexer base class allows you to define lexing rules in terms of *regular
+expressions* for different *states*.
States are groups of regular expressions that are matched against the input
string at the *current position*. If one of these expressions matches, a
@@ -289,8 +289,9 @@ There are a few more things you can do with states:
the closing ``*/``. Then, both states are popped from the stack again and
lexing continues in the root state.
- *New in Pygments 0.9:* The tuple can contain the special ``'#push'`` and
- ``'#pop'`` (but not ``'#pop:n'``) directives.
+ .. versionadded:: 0.9
+ The tuple can contain the special ``'#push'`` and ``'#pop'`` (but not
+ ``'#pop:n'``) directives.
- You can include the rules of a state in the definition of another. This is
@@ -598,6 +599,4 @@ the ``get_tokens_unprocessed()`` method. The following lexer subclasses the
The `PhpLexer` and `LuaLexer` use this method to resolve builtin functions.
-**Note** Do not confuse this with the `filter`_ system.
-
-.. _filter: filters.txt
+.. note:: Do not confuse this with the :doc:`filter <filters>` system.
diff --git a/docs/src/lexers.txt b/doc/docs/lexers.rst
index 016de6ce..914b53ef 100644
--- a/docs/src/lexers.txt
+++ b/doc/docs/lexers.rst
@@ -18,14 +18,13 @@ Currently, **all lexers** support these options:
`ensurenl`
Make sure that the input ends with a newline (default: ``True``). This
is required for some lexers that consume input linewise.
- *New in Pygments 1.3.*
+
+ .. versionadded:: 1.3
`tabsize`
If given and greater than 0, expand tabs in the input (default: ``0``).
`encoding`
- *New in Pygments 0.6.*
-
If given, must be an encoding name (such as ``"utf-8"``). This encoding
will be used to convert the input string to Unicode (if it is not already
a Unicode string). The default is ``"latin1"``.
@@ -35,18 +34,21 @@ Currently, **all lexers** support these options:
`chardet library <http://chardet.feedparser.org/>`__ is used to
guess the encoding of the input.
+ .. versionadded:: 0.6
+
The "Short Names" field lists the identifiers that can be used with the
`get_lexer_by_name()` function.
These lexers are builtin and can be imported from `pygments.lexers`:
-[builtin_lexer_docs]
+.. pygmentsdoc:: lexers
+
Iterating over all lexers
-------------------------
-*New in Pygments 0.6.*
+.. versionadded:: 0.6
To get all lexers (both the builtin and the plugin ones), you can
use the `get_all_lexers()` function from the `pygments.lexers`
diff --git a/docs/src/moinmoin.txt b/doc/docs/moinmoin.rst
index 8b2216b3..8b2216b3 100644
--- a/docs/src/moinmoin.txt
+++ b/doc/docs/moinmoin.rst
diff --git a/docs/src/plugins.txt b/doc/docs/plugins.rst
index a6f8d7b0..a6f8d7b0 100644
--- a/docs/src/plugins.txt
+++ b/doc/docs/plugins.rst
diff --git a/docs/src/quickstart.txt b/doc/docs/quickstart.rst
index 40409104..dba7698a 100644
--- a/docs/src/quickstart.txt
+++ b/doc/docs/quickstart.rst
@@ -58,8 +58,8 @@ can be produced by:
print HtmlFormatter().get_style_defs('.highlight')
-The argument to `get_style_defs` is used as an additional CSS selector: the output
-may look like this:
+The argument to :func:`get_style_defs` is used as an additional CSS selector:
+the output may look like this:
.. sourcecode:: css
@@ -71,9 +71,9 @@ may look like this:
Options
=======
-The `highlight()` function supports a fourth argument called `outfile`, it must be
-a file object if given. The formatted output will then be written to this file
-instead of being returned as a string.
+The :func:`highlight()` function supports a fourth argument called *outfile*, it
+must be a file object if given. The formatted output will then be written to
+this file instead of being returned as a string.
Lexers and formatters both support options. They are given to them as keyword
arguments either to the class or to the lookup method:
@@ -103,9 +103,9 @@ Important options include:
For an overview of builtin lexers and formatters and their options, visit the
-`lexer <lexers.txt>`_ and `formatters <formatters.txt>`_ lists.
+:doc:`lexer <lexers>` and :doc:`formatters <formatters>` lists.
-For a documentation on filters, see `this page <filters.txt>`_.
+For a documentation on filters, see :doc:`this page <filters>`.
Lexer and formatter lookup
@@ -131,9 +131,9 @@ one of the following methods:
All these functions accept keyword arguments; they will be passed to the lexer
as options.
-A similar API is available for formatters: use `get_formatter_by_name()` and
-`get_formatter_for_filename()` from the `pygments.formatters` module
-for this purpose.
+A similar API is available for formatters: use :func:`.get_formatter_by_name()`
+and :func:`.get_formatter_for_filename()` from the :mod:`pygments.formatters`
+module for this purpose.
Guessing lexers
@@ -153,16 +153,17 @@ or some template tags), use these functions:
>>> guess_lexer_for_filename('test.py', 'print "Hello World!"')
<pygments.lexers.PythonLexer>
-`guess_lexer()` passes the given content to the lexer classes' `analyse_text()`
-method and returns the one for which it returns the highest number.
+:func:`.guess_lexer()` passes the given content to the lexer classes'
+:meth:`analyse_text()` method and returns the one for which it returns the
+highest number.
All lexers have two different filename pattern lists: the primary and the
-secondary one. The `get_lexer_for_filename()` function only uses the primary
-list, whose entries are supposed to be unique among all lexers.
-`guess_lexer_for_filename()`, however, will first loop through all lexers and
-look at the primary and secondary filename patterns if the filename matches.
+secondary one. The :func:`.get_lexer_for_filename()` function only uses the
+primary list, whose entries are supposed to be unique among all lexers.
+:func:`.guess_lexer_for_filename()`, however, will first loop through all lexers
+and look at the primary and secondary filename patterns if the filename matches.
If only one lexer matches, it is returned, else the guessing mechanism of
-`guess_lexer()` is used with the matching lexers.
+:func:`.guess_lexer()` is used with the matching lexers.
As usual, keyword arguments to these functions are given to the created lexer
as options.
@@ -171,7 +172,8 @@ as options.
Command line usage
==================
-You can use Pygments from the command line, using the `pygmentize` script::
+You can use Pygments from the command line, using the :program:`pygmentize`
+script::
$ pygmentize test.py
@@ -199,4 +201,5 @@ it can be created with::
where ``default`` is the style name.
-More options and tricks and be found in the `command line reference <cmdline.txt>`_.
+More options and tricks and be found in the :doc:`command line reference
+<cmdline>`.
diff --git a/docs/src/rstdirective.txt b/doc/docs/rstdirective.rst
index c0d503b3..c0d503b3 100644
--- a/docs/src/rstdirective.txt
+++ b/doc/docs/rstdirective.rst
diff --git a/docs/src/styles.txt b/doc/docs/styles.rst
index e3e9cfb3..7ef4de1b 100644
--- a/docs/src/styles.txt
+++ b/doc/docs/styles.rst
@@ -68,7 +68,7 @@ they can be used for a variety of formatters.)
To make the style usable for Pygments, you must
-* either register it as a plugin (see `the plugin docs <plugins.txt>`_)
+* either register it as a plugin (see :doc:`the plugin docs <plugins>`)
* or drop it into the `styles` subpackage of your Pygments distribution one style
class per style, where the file name is the style name and the class name is
`StylenameClass`. For example, if your style should be called
@@ -132,7 +132,7 @@ To get a list of known styles you can use this snippet:
Getting a list of available styles
==================================
-*New in Pygments 0.6.*
+.. versionadded:: 0.6
Because it could be that a plugin registered a style, there is
a way to iterate over all styles:
diff --git a/docs/src/tokens.txt b/doc/docs/tokens.rst
index 4900a9ab..ffd87ab7 100644
--- a/docs/src/tokens.txt
+++ b/doc/docs/tokens.rst
@@ -4,7 +4,9 @@
Builtin Tokens
==============
-Inside the `pygments.token` module, there is a special object called `Token`
+.. module:: pygments.token
+
+In the :mod:`pygments.token` module, there is a special object called `Token`
that is used to create token types.
You can create a new token type by accessing an attribute of `Token`:
@@ -30,8 +32,8 @@ As of Pygments 0.7 you can also use the ``in`` operator to perform set tests:
>>> Comment in Comment.Multi
False
-This can be useful in `filters`_ and if you write lexers on your own without
-using the base lexers.
+This can be useful in :doc:`filters <filters>` and if you write lexers on your
+own without using the base lexers.
You can also split a token type into a hierarchy, and get the parent of it:
@@ -55,7 +57,7 @@ For some tokens aliases are already defined:
>>> String
Token.Literal.String
-Inside the `pygments.token` module the following aliases are defined:
+Inside the :mod:`pygments.token` module the following aliases are defined:
============= ============================ ====================================
`Text` `Token.Text` for any type of text data
@@ -280,7 +282,7 @@ Operators
Punctuation
===========
-*New in Pygments 0.7.*
+.. versionadded:: 0.7
`Punctuation`
For any punctuation which is not an operator (e.g. ``[``, ``(``...)
@@ -345,5 +347,3 @@ highlight a programming language but a patch file.
`Generic.Traceback`
Marks the token value as a part of an error traceback.
-
-.. _filters: filters.txt
diff --git a/docs/src/unicode.txt b/doc/docs/unicode.rst
index dc6394a9..e79b4bec 100644
--- a/docs/src/unicode.txt
+++ b/doc/docs/unicode.rst
@@ -3,8 +3,8 @@ Unicode and Encodings
=====================
Since Pygments 0.6, all lexers use unicode strings internally. Because of that
-you might encounter the occasional `UnicodeDecodeError` if you pass strings with the
-wrong encoding.
+you might encounter the occasional :exc:`UnicodeDecodeError` if you pass strings
+with the wrong encoding.
Per default all lexers have their input encoding set to `latin1`.
If you pass a lexer a string object (not unicode), it tries to decode the data
@@ -39,11 +39,12 @@ Unicode string with this encoding before writing it. This is the case for
`sys.stdout`, for example. The other formatters don't have that behavior.
Another note: If you call Pygments via the command line (`pygmentize`),
-encoding is handled differently, see `the command line docs <cmdline.txt>`_.
+encoding is handled differently, see :doc:`the command line docs <cmdline>`.
-*New in Pygments 0.7*: the formatters now also accept an `outencoding` option
-which will override the `encoding` option if given. This makes it possible to
-use a single options dict with lexers and formatters, and still have different
-input and output encodings.
+.. versionadded:: 0.7
+ The formatters now also accept an `outencoding` option which will override
+ the `encoding` option if given. This makes it possible to use a single
+ options dict with lexers and formatters, and still have different input and
+ output encodings.
.. _chardet: http://chardet.feedparser.org/
diff --git a/doc/download.rst b/doc/download.rst
new file mode 100644
index 00000000..cf32f481
--- /dev/null
+++ b/doc/download.rst
@@ -0,0 +1,41 @@
+Download and installation
+=========================
+
+The current release is version |version|.
+
+Packaged versions
+-----------------
+
+You can download it `from the Python Package Index
+<http://pypi.python.org/pypi/Pygments>`_. For installation of packages from
+PyPI, we recommend `Pip <http://www.pip-installer.org>`_, which works on all
+major platforms.
+
+Under Linux, most distributions include a package for Pygments, usually called
+``pygments`` or ``python-pygments``. You can install it with the package
+manager as usual.
+
+Development sources
+-------------------
+
+We're using the `Mercurial <http://selenic.com/mercurial>`_ version control
+system. You can get the development source using this command::
+
+ hg clone http://bitbucket.org/birkenfeld/pygments-main pygments
+
+Development takes place at `Bitbucket
+<http://bitbucket.org/birkenfeld/pygments-main>`_, you can browse the source
+online `here <http://bitbucket.org/birkenfeld/pygments-main/src>`_.
+
+The latest changes in the development source code are listed in the `changelog
+<http://bitbucket.org/birkenfeld/pygments-main/src/tip/CHANGES>`_.
+
+.. Documentation
+ -------------
+
+.. XXX todo
+
+ You can download the <a href="/docs/">documentation</a> either as
+ a bunch of rst files from the Mercurial repository, see above, or
+ as a tar.gz containing rendered HTML files:</p>
+ <p><a href="/docs/download/pygmentsdocs.tar.gz">pygmentsdocs.tar.gz</a></p>
diff --git a/doc/faq.rst b/doc/faq.rst
new file mode 100644
index 00000000..0f65b9fe
--- /dev/null
+++ b/doc/faq.rst
@@ -0,0 +1,143 @@
+:orphan:
+
+Pygments FAQ
+=============
+
+What is Pygments?
+-----------------
+
+Pygments is a syntax highlighting engine written in Python. That means, it will
+take source code (or other markup) in a supported language and output a
+processed version (in different formats) containing syntax highlighting markup.
+
+Its features include:
+
+* a wide range of common languages and markup formats is supported (look here
+ for a list)
+* new languages and formats are added easily
+* a number of output formats is available, including:
+
+ - HTML
+ - ANSI sequences (console output)
+ - LaTeX
+ - RTF
+
+* it is usable as a command-line tool and as a library
+* parsing and formatting is fast
+
+Pygments is licensed under the BSD license.
+
+Where does the name Pygments come from?
+---------------------------------------
+
+*Py* of course stands for Python, while *pigments* are used for coloring paint,
+and in this case, source code!
+
+What are the system requirements?
+---------------------------------
+
+Pygments only needs a standard Python install, version 2.6 or higher or version
+3.3 or higher for Python 3. No additional libraries are needed.
+
+How can I use Pygments?
+-----------------------
+
+Pygments is usable as a command-line tool as well as a library.
+
+From the command-line, usage looks like this (assuming the pygmentize script is
+properly installed)::
+
+ pygmentize -f html /path/to/file.py
+
+This will print a HTML-highlighted version of /path/to/file.py to standard output.
+
+For a complete help, please run ``pygmentize -h``.
+
+Usage as a library is thoroughly demonstrated in the Documentation section.
+
+How do I make a new style?
+--------------------------
+
+Please see the documentation on styles.
+
+How can I report a bug or suggest a feature?
+--------------------------------------------
+
+Please report bugs and feature wishes in the tracker at Bitbucket.
+
+You can also e-mail the author or use IRC, see the contact details.
+
+I want this support for this language!
+--------------------------------------
+
+Instead of waiting for others to include language support, why not write it
+yourself? All you have to know is :doc:`outlined in the docs
+<docs/lexerdevelopment>`.
+
+Can I use Pygments for programming language processing?
+-------------------------------------------------------
+
+The Pygments lexing machinery is quite powerful can be used to build lexers for
+basically all languages. However, parsing them is not possible, though some
+lexers go some steps in this direction in order to e.g. highlight function names
+differently.
+
+Also, error reporting is not the scope of Pygments. It focuses on correctly
+highlighting syntactically valid documents, not finding and compensating errors.
+
+Who uses Pygments?
+------------------
+
+This is an (incomplete) list of projects and sites known to use the Pygments highlighter.
+
+* `Pygments API <http://pygments.appspot.com/>`_, a HTTP POST interface to Pygments
+* `The Sphinx documentation builder <http://sphinx.pocoo.org/>`_, for embedded source examples
+* `rst2pdf <http://code.google.com/p/rst2pdf/>`_, a reStructuredText to PDF converter
+* `Zine <http://zine.pocoo.org/>`_, a Python blogging system
+* `Trac <http://trac.edgewall.org/>`_, the universal project management tool
+* `Bruce <http://r1chardj0n3s.googlepages.com/bruce>`_, a reStructuredText presentation tool
+* `AsciiDoc <http://www.methods.co.nz/asciidoc/>`_, a text-based documentation generator
+* `ActiveState Code <http://code.activestate.com/>`_, the Python Cookbook successor
+* `ViewVC <http://viewvc.org/>`_, a web-based version control repository browser
+* `BzrFruit <http://repo.or.cz/w/bzrfruit.git>`_, a Bazaar branch viewer
+* `QBzr <http://bazaar-vcs.org/QBzr>`_, a cross-platform Qt-based GUI front end for Bazaar
+* `BitBucket <http://bitbucket.org/>`_, a Mercurial and Git hosting site
+* `GitHub <http://github.com/>`_, a site offering secure Git hosting and collaborative development
+* `Review Board <http://www.review-board.org/>`_, a collaborative code reviewing tool
+* `skeletonz <http://orangoo.com/skeletonz/>`_, a Python powered content management system
+* `Diamanda <http://code.google.com/p/diamanda/>`_, a Django powered wiki system with support for Pygments
+* `Progopedia <http://progopedia.ru/>`_ (`English <http://progopedia.com/>`_),
+ an encyclopedia of programming languages
+* `Postmarkup <http://code.google.com/p/postmarkup/>`_, a BBCode to XHTML generator
+* `Language Comparison <http://michaelsilver.us/lc>`_, a site that compares different programming languages
+* `BPython <http://www.noiseforfree.com/bpython/>`_, a curses-based intelligent Python shell
+* `Challenge-You! <http://challenge-you.appspot.com/>`_, a site offering programming challenges
+* `PIDA <http://pida.co.uk/>`_, a universal IDE written in Python
+* `PuDB <http://pypi.python.org/pypi/pudb>`_, a console Python debugger
+* `XWiki <http://www.xwiki.org/>`_, a wiki-based development framework in Java, using Jython
+* `roux <http://ananelson.com/software/roux/>`_, a script for running R scripts
+ and creating beautiful output including graphs
+* `hurl <http://hurl.it/>`_, a web service for making HTTP requests
+* `wxHTMLPygmentizer <http://colinbarnette.net/projects/wxHTMLPygmentizer>`_ is
+ a GUI utility, used to make code-colorization easier
+* `WpPygments <http://blog.mirotin.net/?page_id=49>`_, a highlighter plugin for WordPress
+* `LodgeIt <http://paste.pocoo.org/>`_, a pastebin with XMLRPC support and diffs
+* `SpammCan <http://chrisarndt.de/projects/spammcan/>`_, a pastebin (demo see
+ `here <http://paste.chrisarndt.de/>`_)
+* `WowAce.com pastes <http://www.wowace.com/paste/>`_, a pastebin
+* `Siafoo <http://siafoo.net>`_, a tool for sharing and storing useful code and programming experience
+* `D source <http://www.dsource.org/>`_, a community for the D programming language
+* `dumpz.org <http://dumpz.org/>`_, a pastebin
+* `dpaste.com <http://dpaste.com/>`_, another Django pastebin
+* `PylonsHQ Pasties <http://pylonshq.com/pasties/new>`_, a pastebin
+* `Django snippets <http://www.djangosnippets.org/>`_, a pastebin for Django code
+* `Fayaa <http://www.fayaa.com/code/>`_, a Chinese pastebin
+* `Incollo.com <http://incollo.com>`_, a free collaborative debugging tool
+* `PasteBox <http://p.boxnet.eu/>`_, a pastebin focused on privacy
+* `xinotes.org <http://www.xinotes.org/>`_, a site to share notes, code snippets etc.
+* `hilite.me <http://www.hilite.me/>`_, a site to highlight code snippets
+* `patx.me <http://patx.me/paste>`_, a pastebin
+
+If you have a project or web site using Pygments, drop me a line, and I'll add a
+link here.
+
diff --git a/doc/index.rst b/doc/index.rst
new file mode 100644
index 00000000..a0e41210
--- /dev/null
+++ b/doc/index.rst
@@ -0,0 +1,53 @@
+Welcome!
+========
+
+This is the home of Pygments. It is a generic syntax highlighter for general use
+in all kinds of software such as forum systems, wikis or other applications that
+need to prettify source code. Highlights are:
+
+* a wide range of common languages and markup formats is supported
+* special attention is paid to details that increase highlighting quality
+* support for new languages and formats are added easily; most languages use a simple regex-based lexing mechanism
+* a number of output formats is available, among them HTML, RTF, LaTeX and ANSI sequences
+* it is usable as a command-line tool and as a library
+* ... and it highlights even Brainf*ck!
+
+Read more in the FAQ list or the documentation, or download the latest release.
+
+Though Pygments has not yet won an award, we trust that you will notice it's a top quality product <wink>.
+
+.. _contribute:
+
+Contribute
+----------
+
+Like every open-source project, we are always looking for volunteers to help us
+with programming. Python knowledge is required, but don't fear: Python is a very
+clear and easy to learn language.
+
+Development takes place on `Bitbucket
+<https://bitbucket.org/birkenfeld/pygments-main>`_, where the Mercurial
+repository, tickets and pull requests can be viewed.
+
+Our primary communication instrument is the IRC channel **#pocoo** on the
+Freenode network. To join it, let your IRC client connect to
+``irc.freenode.net`` and do ``/join #pocoo``.
+
+If you found a bug, just open a ticket in the Bitbucket tracker. Be sure to log
+in to be notified when the issue is fixed -- development is not fast-paced as
+the library is quite stable. You can also send an e-mail to the developers, see
+below.
+
+The authors
+-----------
+
+Pygments is maintained by **Georg Brandl**, e-mail address *georg*\ *@*\ *python.org*.
+
+Many lexers and fixes have been contributed by **Armin Ronacher**, the rest of
+the `Pocoo <http://dev.pocoo.org/>`_ team and **Tim Hatch**.
+
+.. toctree::
+ :maxdepth: 1
+ :hidden:
+
+ docs/index
diff --git a/doc/languages.rst b/doc/languages.rst
new file mode 100644
index 00000000..426a576b
--- /dev/null
+++ b/doc/languages.rst
@@ -0,0 +1,149 @@
+:orphan:
+
+Supported languages
+===================
+
+Pygments supports an ever-growing range of languages. Watch this space...
+
+Programming languages
+---------------------
+
+* ActionScript
+* Ada
+* ANTLR
+* AppleScript
+* Assembly (various)
+* Asymptote
+* Awk
+* Befunge
+* Boo
+* BrainFuck
+* C, C++
+* C#
+* Clojure
+* CoffeeScript
+* ColdFusion
+* Common Lisp
+* Coq
+* `Cython <http://cython.org>`_
+* `D <http://digitalmars.com/d>`_
+* Dart
+* Delphi
+* Dylan
+* Erlang
+* Factor
+* Fancy
+* Fortran
+* F#
+* GAP
+* Gherkin (Cucumber)
+* GL shaders
+* Groovy
+* `Haskell <http://www.haskell.org>`_ (incl. Literate Haskell)
+* IDL
+* Io
+* Java
+* JavaScript
+* LLVM
+* Logtalk
+* `Lua <http://www.lua.org>`_
+* Matlab
+* MiniD
+* Modelica
+* Modula-2
+* MuPad
+* Nemerle
+* Nimrod
+* Objective-C
+* Objective-J
+* Octave
+* OCaml
+* PHP
+* `Perl <http://perl.org>`_
+* PovRay
+* PostScript
+* PowerShell
+* Prolog
+* `Python <http://www.python.org>`_ 2.x and 3.x (incl. console sessions and tracebacks)
+* Rebol
+* Redcode
+* `Ruby <http://www.ruby-lang.org>`_ (incl. irb sessions)
+* Rust
+* S, S-Plus, R
+* Scala
+* Scheme
+* Scilab
+* Smalltalk
+* SNOBOL
+* Tcl
+* Vala
+* Verilog
+* VHDL
+* Visual Basic.NET
+* Visual FoxPro
+* XQuery
+* Zephir
+ </ul>
+
+Template languages
+------------------
+
+* Cheetah templates
+* `Django <http://www.djangoproject.com>`_ / `Jinja
+ <http://jinja.pocoo.org/jinja>`_ templates
+* ERB (Ruby templating)
+* `Genshi <http://genshi.edgewall.org>`_ (the Trac template language)
+* JSP (Java Server Pages)
+* `Myghty <http://www.myghty.org>`_ (the HTML::Mason based framework)
+* `Mako <http://www.makotemplates.org/>`_ (the Myghty successor)
+* `Smarty <http://smarty.php.net>`_ templates (PHP templating)
+* Tea
+
+Other markup
+------------
+
+* Apache config files
+* Bash shell scripts
+* BBCode
+* CMake
+* CSS
+* Debian control files
+* Diff files
+* DTD
+* Gettext catalogs
+* Gnuplot script
+* Groff markup
+* HTML
+* HTTP sessions
+* INI-style config files
+* IRC logs (irssi style)
+* Lighttpd config files
+* Makefiles
+* MoinMoin/Trac Wiki markup
+* MySQL
+* Nginx config files
+* POV-Ray scenes
+* Ragel
+* Redcode
+* ReST
+* Robot Framework
+* RPM spec files
+* SQL, also MySQL, SQLite
+* Squid configuration
+* TeX
+* tcsh
+* Vim Script
+* Windows batch files
+* XML
+* XSLT
+* YAML
+
+... that's all?
+---------------
+
+Well, why not write your own? Contributing to Pygments is easy and fun. Look
+:doc:`here <docs/lexerdevelopment>` for the docs on lexer development and
+:ref:`here <contribute>` for contact details.
+
+Note: the languages listed here are supported in the development version. The
+latest release may lack a few of them.
diff --git a/doc/make.bat b/doc/make.bat
new file mode 100644
index 00000000..8803c985
--- /dev/null
+++ b/doc/make.bat
@@ -0,0 +1,190 @@
+@ECHO OFF
+
+REM Command file for Sphinx documentation
+
+if "%SPHINXBUILD%" == "" (
+ set SPHINXBUILD=sphinx-build
+)
+set BUILDDIR=_build
+set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
+set I18NSPHINXOPTS=%SPHINXOPTS% .
+if NOT "%PAPER%" == "" (
+ set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
+ set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
+)
+
+if "%1" == "" goto help
+
+if "%1" == "help" (
+ :help
+ echo.Please use `make ^<target^>` where ^<target^> is one of
+ echo. html to make standalone HTML files
+ echo. dirhtml to make HTML files named index.html in directories
+ echo. singlehtml to make a single large HTML file
+ echo. pickle to make pickle files
+ echo. json to make JSON files
+ echo. htmlhelp to make HTML files and a HTML help project
+ echo. qthelp to make HTML files and a qthelp project
+ echo. devhelp to make HTML files and a Devhelp project
+ echo. epub to make an epub
+ echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
+ echo. text to make text files
+ echo. man to make manual pages
+ echo. texinfo to make Texinfo files
+ echo. gettext to make PO message catalogs
+ echo. changes to make an overview over all changed/added/deprecated items
+ echo. linkcheck to check all external links for integrity
+ echo. doctest to run all doctests embedded in the documentation if enabled
+ goto end
+)
+
+if "%1" == "clean" (
+ for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
+ del /q /s %BUILDDIR%\*
+ goto end
+)
+
+if "%1" == "html" (
+ %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The HTML pages are in %BUILDDIR%/html.
+ goto end
+)
+
+if "%1" == "dirhtml" (
+ %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
+ goto end
+)
+
+if "%1" == "singlehtml" (
+ %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
+ goto end
+)
+
+if "%1" == "pickle" (
+ %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; now you can process the pickle files.
+ goto end
+)
+
+if "%1" == "json" (
+ %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; now you can process the JSON files.
+ goto end
+)
+
+if "%1" == "htmlhelp" (
+ %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; now you can run HTML Help Workshop with the ^
+.hhp project file in %BUILDDIR%/htmlhelp.
+ goto end
+)
+
+if "%1" == "qthelp" (
+ %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; now you can run "qcollectiongenerator" with the ^
+.qhcp project file in %BUILDDIR%/qthelp, like this:
+ echo.^> qcollectiongenerator %BUILDDIR%\qthelp\Pygments.qhcp
+ echo.To view the help file:
+ echo.^> assistant -collectionFile %BUILDDIR%\qthelp\Pygments.ghc
+ goto end
+)
+
+if "%1" == "devhelp" (
+ %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished.
+ goto end
+)
+
+if "%1" == "epub" (
+ %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The epub file is in %BUILDDIR%/epub.
+ goto end
+)
+
+if "%1" == "latex" (
+ %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
+ goto end
+)
+
+if "%1" == "text" (
+ %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The text files are in %BUILDDIR%/text.
+ goto end
+)
+
+if "%1" == "man" (
+ %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The manual pages are in %BUILDDIR%/man.
+ goto end
+)
+
+if "%1" == "texinfo" (
+ %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
+ goto end
+)
+
+if "%1" == "gettext" (
+ %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
+ goto end
+)
+
+if "%1" == "changes" (
+ %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.The overview file is in %BUILDDIR%/changes.
+ goto end
+)
+
+if "%1" == "linkcheck" (
+ %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Link check complete; look for any errors in the above output ^
+or in %BUILDDIR%/linkcheck/output.txt.
+ goto end
+)
+
+if "%1" == "doctest" (
+ %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Testing of doctests in the sources finished, look at the ^
+results in %BUILDDIR%/doctest/output.txt.
+ goto end
+)
+
+:end
diff --git a/docs/pygmentize.1 b/doc/pygmentize.1
index 71bb6f9c..71bb6f9c 100644
--- a/docs/pygmentize.1
+++ b/doc/pygmentize.1
diff --git a/docs/generate.py b/docs/generate.py
deleted file mode 100755
index c379cded..00000000
--- a/docs/generate.py
+++ /dev/null
@@ -1,472 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
- Generate Pygments Documentation
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
- Generates a bunch of html files containing the documentation.
-
- :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-import os
-import sys
-from datetime import datetime
-from cgi import escape
-
-from docutils import nodes
-from docutils.parsers.rst import directives
-from docutils.core import publish_parts
-from docutils.writers import html4css1
-
-from jinja2 import Template
-
-# try to use the right Pygments to build the docs
-sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
-
-from pygments import highlight, __version__
-from pygments.lexers import get_lexer_by_name
-from pygments.formatters import HtmlFormatter
-
-
-LEXERDOC = '''
-`%s`
-%s
- :Short names: %s
- :Filename patterns: %s
- :Mimetypes: %s
-
-'''
-
-def generate_lexer_docs():
- from pygments.lexers import LEXERS
-
- out = []
-
- modules = {}
- moduledocstrings = {}
- for classname, data in sorted(LEXERS.iteritems(), key=lambda x: x[0]):
- module = data[0]
- mod = __import__(module, None, None, [classname])
- cls = getattr(mod, classname)
- if not cls.__doc__:
- print "Warning: %s does not have a docstring." % classname
- modules.setdefault(module, []).append((
- classname,
- cls.__doc__,
- ', '.join(data[2]) or 'None',
- ', '.join(data[3]).replace('*', '\\*').replace('_', '\\') or 'None',
- ', '.join(data[4]) or 'None'))
- if module not in moduledocstrings:
- moduledocstrings[module] = mod.__doc__
-
- for module, lexers in sorted(modules.iteritems(), key=lambda x: x[0]):
- heading = moduledocstrings[module].splitlines()[4].strip().rstrip('.')
- out.append('\n' + heading + '\n' + '-'*len(heading) + '\n')
- for data in lexers:
- out.append(LEXERDOC % data)
- return ''.join(out).decode('utf-8')
-
-def generate_formatter_docs():
- from pygments.formatters import FORMATTERS
-
- out = []
- for cls, data in sorted(FORMATTERS.iteritems(),
- key=lambda x: x[0].__name__):
- heading = cls.__name__
- out.append('`' + heading + '`\n' + '-'*(2+len(heading)) + '\n')
- out.append(cls.__doc__)
- out.append('''
- :Short names: %s
- :Filename patterns: %s
-
-
-''' % (', '.join(data[1]) or 'None', ', '.join(data[2]).replace('*', '\\*') or 'None'))
- return ''.join(out).decode('utf-8')
-
-def generate_filter_docs():
- from pygments.filters import FILTERS
-
- out = []
- for name, cls in FILTERS.iteritems():
- out.append('''
-`%s`
-%s
- :Name: %s
-''' % (cls.__name__, cls.__doc__, name))
- return ''.join(out).decode('utf-8')
-
-def generate_changelog():
- fn = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
- 'CHANGES'))
- f = file(fn)
- result = []
- in_header = False
- header = True
- for line in f:
- if header:
- if not in_header and line.strip():
- in_header = True
- elif in_header and not line.strip():
- header = False
- else:
- result.append(line.rstrip())
- f.close()
- return '\n'.join(result).decode('utf-8')
-
-def generate_authors():
- fn = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
- 'AUTHORS'))
- f = file(fn)
- r = f.read().rstrip().decode('utf-8')
- f.close()
- return r
-
-LEXERDOCS = generate_lexer_docs()
-FORMATTERDOCS = generate_formatter_docs()
-FILTERDOCS = generate_filter_docs()
-CHANGELOG = generate_changelog()
-AUTHORS = generate_authors()
-
-
-PYGMENTS_FORMATTER = HtmlFormatter(style='pastie', cssclass='syntax')
-
-USAGE = '''\
-Usage: %s <mode> <destination> [<source.txt> ...]
-
-Generate either python or html files out of the documentation.
-
-Mode can either be python or html.\
-''' % sys.argv[0]
-
-TEMPLATE = '''\
-<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"
- "http://www.w3.org/TR/html4/strict.dtd">
-<html>
-<head>
- <title>{{ title }} &mdash; Pygments</title>
- <meta http-equiv="content-type" content="text/html; charset=utf-8">
- <style type="text/css">
- {{ style }}
- </style>
-</head>
-<body>
- <div id="content">
- <h1 class="heading">Pygments</h1>
- <h2 class="subheading">{{ title }}</h2>
- {% if file_id != "index" %}
- <a id="backlink" href="index.html">&laquo; Back To Index</a>
- {% endif %}
- {% if toc %}
- <div class="toc">
- <h2>Contents</h2>
- <ul class="contents">
- {% for key, value in toc %}
- <li><a href="{{ key }}">{{ value }}</a></li>
- {% endfor %}
- </ul>
- </div>
- {% endif %}
- {{ body }}
- </div>
-</body>
-<!-- generated on: {{ generation_date }}
- file id: {{ file_id }} -->
-</html>\
-'''
-
-STYLESHEET = '''\
-body {
- background-color: #f2f2f2;
- margin: 0;
- padding: 0;
- font-family: 'Georgia', serif;
- color: #111;
-}
-
-#content {
- background-color: white;
- padding: 20px;
- margin: 20px auto 20px auto;
- max-width: 800px;
- border: 4px solid #ddd;
-}
-
-h1 {
- font-weight: normal;
- font-size: 40px;
- color: #09839A;
-}
-
-h2 {
- font-weight: normal;
- font-size: 30px;
- color: #C73F00;
-}
-
-h1.heading {
- margin: 0 0 30px 0;
-}
-
-h2.subheading {
- margin: -30px 0 0 45px;
-}
-
-h3 {
- margin-top: 30px;
-}
-
-table.docutils {
- border-collapse: collapse;
- border: 2px solid #aaa;
- margin: 0.5em 1.5em 0.5em 1.5em;
-}
-
-table.docutils td {
- padding: 2px;
- border: 1px solid #ddd;
-}
-
-p, li, dd, dt, blockquote {
- font-size: 15px;
- color: #333;
-}
-
-p {
- line-height: 150%;
- margin-bottom: 0;
- margin-top: 10px;
-}
-
-hr {
- border-top: 1px solid #ccc;
- border-bottom: 0;
- border-right: 0;
- border-left: 0;
- margin-bottom: 10px;
- margin-top: 20px;
-}
-
-dl {
- margin-left: 10px;
-}
-
-li, dt {
- margin-top: 5px;
-}
-
-dt {
- font-weight: bold;
-}
-
-th {
- text-align: left;
-}
-
-a {
- color: #990000;
-}
-
-a:hover {
- color: #c73f00;
-}
-
-pre {
- background-color: #f9f9f9;
- border-top: 1px solid #ccc;
- border-bottom: 1px solid #ccc;
- padding: 5px;
- font-size: 13px;
- font-family: Bitstream Vera Sans Mono,monospace;
-}
-
-tt {
- font-size: 13px;
- font-family: Bitstream Vera Sans Mono,monospace;
- color: black;
- padding: 1px 2px 1px 2px;
- background-color: #f0f0f0;
-}
-
-cite {
- /* abusing <cite>, it's generated by ReST for `x` */
- font-size: 13px;
- font-family: Bitstream Vera Sans Mono,monospace;
- font-weight: bold;
- font-style: normal;
-}
-
-#backlink {
- float: right;
- font-size: 11px;
- color: #888;
-}
-
-div.toc {
- margin: 0 0 10px 0;
-}
-
-div.toc h2 {
- font-size: 20px;
-}
-''' #'
-
-
-def pygments_directive(name, arguments, options, content, lineno,
- content_offset, block_text, state, state_machine):
- try:
- lexer = get_lexer_by_name(arguments[0])
- except ValueError:
- # no lexer found
- lexer = get_lexer_by_name('text')
- parsed = highlight(u'\n'.join(content), lexer, PYGMENTS_FORMATTER)
- return [nodes.raw('', parsed, format="html")]
-pygments_directive.arguments = (1, 0, 1)
-pygments_directive.content = 1
-directives.register_directive('sourcecode', pygments_directive)
-
-
-def create_translator(link_style):
- class Translator(html4css1.HTMLTranslator):
- def visit_reference(self, node):
- refuri = node.get('refuri')
- if refuri is not None and '/' not in refuri and refuri.endswith('.txt'):
- node['refuri'] = link_style(refuri[:-4])
- html4css1.HTMLTranslator.visit_reference(self, node)
- return Translator
-
-
-class DocumentationWriter(html4css1.Writer):
-
- def __init__(self, link_style):
- html4css1.Writer.__init__(self)
- self.translator_class = create_translator(link_style)
-
- def translate(self):
- html4css1.Writer.translate(self)
- # generate table of contents
- contents = self.build_contents(self.document)
- contents_doc = self.document.copy()
- contents_doc.children = contents
- contents_visitor = self.translator_class(contents_doc)
- contents_doc.walkabout(contents_visitor)
- self.parts['toc'] = self._generated_toc
-
- def build_contents(self, node, level=0):
- sections = []
- i = len(node) - 1
- while i >= 0 and isinstance(node[i], nodes.section):
- sections.append(node[i])
- i -= 1
- sections.reverse()
- toc = []
- for section in sections:
- try:
- reference = nodes.reference('', '', refid=section['ids'][0], *section[0])
- except IndexError:
- continue
- ref_id = reference['refid']
- text = escape(reference.astext())
- toc.append((ref_id, text))
-
- self._generated_toc = [('#%s' % href, caption) for href, caption in toc]
- # no further processing
- return []
-
-
-def generate_documentation(data, link_style):
- writer = DocumentationWriter(link_style)
- data = data.replace('[builtin_lexer_docs]', LEXERDOCS).\
- replace('[builtin_formatter_docs]', FORMATTERDOCS).\
- replace('[builtin_filter_docs]', FILTERDOCS).\
- replace('[changelog]', CHANGELOG).\
- replace('[authors]', AUTHORS)
- parts = publish_parts(
- data,
- writer=writer,
- settings_overrides={
- 'initial_header_level': 3,
- 'field_name_limit': 50,
- }
- )
- return {
- 'title': parts['title'],
- 'body': parts['body'],
- 'toc': parts['toc']
- }
-
-
-def handle_python(filename, fp, dst):
- now = datetime.now()
- title = os.path.basename(filename)[:-4]
- content = fp.read()
- def urlize(href):
- # create links for the pygments webpage
- if href == 'index.txt':
- return '/docs/'
- else:
- return '/docs/%s/' % href
- parts = generate_documentation(content, urlize)
- result = file(os.path.join(dst, title + '.py'), 'w')
- result.write('# -*- coding: utf-8 -*-\n')
- result.write('"""\n Pygments Documentation - %s\n' % title)
- result.write(' %s\n\n' % ('~' * (24 + len(title))))
- result.write(' Generated on: %s\n"""\n\n' % now)
- result.write('import datetime\n')
- result.write('DATE = %r\n' % now)
- result.write('TITLE = %r\n' % parts['title'])
- result.write('TOC = %r\n' % parts['toc'])
- result.write('BODY = %r\n' % parts['body'])
- result.close()
-
-
-def handle_html(filename, fp, dst):
- now = datetime.now()
- title = os.path.basename(filename)[:-4]
- content = fp.read().decode('utf-8')
- c = generate_documentation(content, (lambda x: './%s.html' % x))
- result = file(os.path.join(dst, title + '.html'), 'w')
- c['style'] = STYLESHEET + PYGMENTS_FORMATTER.get_style_defs('.syntax')
- c['generation_date'] = now
- c['file_id'] = title
- t = Template(TEMPLATE)
- result.write(t.render(c).encode('utf-8'))
- result.close()
-
-
-def run(handle_file, dst, sources=()):
- path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'src'))
- if not sources:
- sources = [os.path.join(path, fn) for fn in os.listdir(path)]
- if not os.path.isdir(dst):
- os.makedirs(dst)
- print 'Making docs for Pygments %s in %s' % (__version__, dst)
- for fn in sources:
- if not os.path.isfile(fn):
- continue
- print 'Processing %s' % fn
- f = open(fn)
- try:
- handle_file(fn, f, dst)
- finally:
- f.close()
-
-
-def main(mode, dst='build/', *sources):
- try:
- handler = {
- 'html': handle_html,
- 'python': handle_python
- }[mode]
- except KeyError:
- print 'Error: unknown mode "%s"' % mode
- sys.exit(1)
- run(handler, os.path.realpath(dst), sources)
-
-
-if __name__ == '__main__':
- if len(sys.argv) == 1:
- print USAGE
- else:
- main(*sys.argv[1:])
diff --git a/docs/src/api.txt b/docs/src/api.txt
deleted file mode 100644
index 4276eea2..00000000
--- a/docs/src/api.txt
+++ /dev/null
@@ -1,270 +0,0 @@
-.. -*- mode: rst -*-
-
-=====================
-The full Pygments API
-=====================
-
-This page describes the Pygments API.
-
-High-level API
-==============
-
-Functions from the `pygments` module:
-
-def `lex(code, lexer):`
- Lex `code` with the `lexer` (must be a `Lexer` instance)
- and return an iterable of tokens. Currently, this only calls
- `lexer.get_tokens()`.
-
-def `format(tokens, formatter, outfile=None):`
- Format a token stream (iterable of tokens) `tokens` with the
- `formatter` (must be a `Formatter` instance). The result is
- written to `outfile`, or if that is ``None``, returned as a
- string.
-
-def `highlight(code, lexer, formatter, outfile=None):`
- This is the most high-level highlighting function.
- It combines `lex` and `format` in one function.
-
-
-Functions from `pygments.lexers`:
-
-def `get_lexer_by_name(alias, **options):`
- Return an instance of a `Lexer` subclass that has `alias` in its
- aliases list. The lexer is given the `options` at its
- instantiation.
-
- Will raise `pygments.util.ClassNotFound` if no lexer with that alias is
- found.
-
-def `get_lexer_for_filename(fn, **options):`
- Return a `Lexer` subclass instance that has a filename pattern
- matching `fn`. The lexer is given the `options` at its
- instantiation.
-
- Will raise `pygments.util.ClassNotFound` if no lexer for that filename is
- found.
-
-def `get_lexer_for_mimetype(mime, **options):`
- Return a `Lexer` subclass instance that has `mime` in its mimetype
- list. The lexer is given the `options` at its instantiation.
-
- Will raise `pygments.util.ClassNotFound` if not lexer for that mimetype is
- found.
-
-def `guess_lexer(text, **options):`
- Return a `Lexer` subclass instance that's guessed from the text
- in `text`. For that, the `analyse_text()` method of every known
- lexer class is called with the text as argument, and the lexer
- which returned the highest value will be instantiated and returned.
-
- `pygments.util.ClassNotFound` is raised if no lexer thinks it can handle the
- content.
-
-def `guess_lexer_for_filename(filename, text, **options):`
- As `guess_lexer()`, but only lexers which have a pattern in `filenames`
- or `alias_filenames` that matches `filename` are taken into consideration.
-
- `pygments.util.ClassNotFound` is raised if no lexer thinks it can handle the
- content.
-
-def `get_all_lexers():`
- Return an iterable over all registered lexers, yielding tuples in the
- format::
-
- (longname, tuple of aliases, tuple of filename patterns, tuple of mimetypes)
-
- *New in Pygments 0.6.*
-
-
-Functions from `pygments.formatters`:
-
-def `get_formatter_by_name(alias, **options):`
- Return an instance of a `Formatter` subclass that has `alias` in its
- aliases list. The formatter is given the `options` at its
- instantiation.
-
- Will raise `pygments.util.ClassNotFound` if no formatter with that alias is
- found.
-
-def `get_formatter_for_filename(fn, **options):`
- Return a `Formatter` subclass instance that has a filename pattern
- matching `fn`. The formatter is given the `options` at its
- instantiation.
-
- Will raise `pygments.util.ClassNotFound` if no formatter for that filename
- is found.
-
-
-Functions from `pygments.styles`:
-
-def `get_style_by_name(name):`
- Return a style class by its short name. The names of the builtin styles
- are listed in `pygments.styles.STYLE_MAP`.
-
- Will raise `pygments.util.ClassNotFound` if no style of that name is found.
-
-def `get_all_styles():`
- Return an iterable over all registered styles, yielding their names.
-
- *New in Pygments 0.6.*
-
-
-Lexers
-======
-
-A lexer (derived from `pygments.lexer.Lexer`) has the following functions:
-
-def `__init__(self, **options):`
- The constructor. Takes a \*\*keywords dictionary of options.
- Every subclass must first process its own options and then call
- the `Lexer` constructor, since it processes the `stripnl`,
- `stripall` and `tabsize` options.
-
- An example looks like this:
-
- .. sourcecode:: python
-
- def __init__(self, **options):
- self.compress = options.get('compress', '')
- Lexer.__init__(self, **options)
-
- As these options must all be specifiable as strings (due to the
- command line usage), there are various utility functions
- available to help with that, see `Option processing`_.
-
-def `get_tokens(self, text):`
- This method is the basic interface of a lexer. It is called by
- the `highlight()` function. It must process the text and return an
- iterable of ``(tokentype, value)`` pairs from `text`.
-
- Normally, you don't need to override this method. The default
- implementation processes the `stripnl`, `stripall` and `tabsize`
- options and then yields all tokens from `get_tokens_unprocessed()`,
- with the ``index`` dropped.
-
-def `get_tokens_unprocessed(self, text):`
- This method should process the text and return an iterable of
- ``(index, tokentype, value)`` tuples where ``index`` is the starting
- position of the token within the input text.
-
- This method must be overridden by subclasses.
-
-def `analyse_text(text):`
- A static method which is called for lexer guessing. It should analyse
- the text and return a float in the range from ``0.0`` to ``1.0``.
- If it returns ``0.0``, the lexer will not be selected as the most
- probable one, if it returns ``1.0``, it will be selected immediately.
-
-For a list of known tokens have a look at the `Tokens`_ page.
-
-A lexer also can have the following attributes (in fact, they are mandatory
-except `alias_filenames`) that are used by the builtin lookup mechanism.
-
-`name`
- Full name for the lexer, in human-readable form.
-
-`aliases`
- A list of short, unique identifiers that can be used to lookup
- the lexer from a list, e.g. using `get_lexer_by_name()`.
-
-`filenames`
- A list of `fnmatch` patterns that match filenames which contain
- content for this lexer. The patterns in this list should be unique among
- all lexers.
-
-`alias_filenames`
- A list of `fnmatch` patterns that match filenames which may or may not
- contain content for this lexer. This list is used by the
- `guess_lexer_for_filename()` function, to determine which lexers are
- then included in guessing the correct one. That means that e.g. every
- lexer for HTML and a template language should include ``\*.html`` in
- this list.
-
-`mimetypes`
- A list of MIME types for content that can be lexed with this
- lexer.
-
-
-.. _Tokens: tokens.txt
-
-
-Formatters
-==========
-
-A formatter (derived from `pygments.formatter.Formatter`) has the following
-functions:
-
-def `__init__(self, **options):`
- As with lexers, this constructor processes options and then must call
- the base class `__init__`.
-
- The `Formatter` class recognizes the options `style`, `full` and
- `title`. It is up to the formatter class whether it uses them.
-
-def `get_style_defs(self, arg=''):`
- This method must return statements or declarations suitable to define
- the current style for subsequent highlighted text (e.g. CSS classes
- in the `HTMLFormatter`).
-
- The optional argument `arg` can be used to modify the generation and
- is formatter dependent (it is standardized because it can be given on
- the command line).
-
- This method is called by the ``-S`` `command-line option`_, the `arg`
- is then given by the ``-a`` option.
-
-def `format(self, tokensource, outfile):`
- This method must format the tokens from the `tokensource` iterable and
- write the formatted version to the file object `outfile`.
-
- Formatter options can control how exactly the tokens are converted.
-
-.. _command-line option: cmdline.txt
-
-A formatter must have the following attributes that are used by the
-builtin lookup mechanism. (*New in Pygments 0.7.*)
-
-`name`
- Full name for the formatter, in human-readable form.
-
-`aliases`
- A list of short, unique identifiers that can be used to lookup
- the formatter from a list, e.g. using `get_formatter_by_name()`.
-
-`filenames`
- A list of `fnmatch` patterns that match filenames for which this formatter
- can produce output. The patterns in this list should be unique among
- all formatters.
-
-
-Option processing
-=================
-
-The `pygments.util` module has some utility functions usable for option
-processing:
-
-class `OptionError`
- This exception will be raised by all option processing functions if
- the type or value of the argument is not correct.
-
-def `get_bool_opt(options, optname, default=None):`
- Interpret the key `optname` from the dictionary `options`
- as a boolean and return it. Return `default` if `optname`
- is not in `options`.
-
- The valid string values for ``True`` are ``1``, ``yes``,
- ``true`` and ``on``, the ones for ``False`` are ``0``,
- ``no``, ``false`` and ``off`` (matched case-insensitively).
-
-def `get_int_opt(options, optname, default=None):`
- As `get_bool_opt`, but interpret the value as an integer.
-
-def `get_list_opt(options, optname, default=None):`
- If the key `optname` from the dictionary `options` is a string,
- split it at whitespace and return it. If it is already a list
- or a tuple, it is returned as a list.
-
-def `get_choice_opt(options, optname, allowed, default=None):`
- If the key `optname` from the dictionary is not in the sequence
- `allowed`, raise an error, otherwise return it. *New in Pygments 0.8.*
diff --git a/docs/src/authors.txt b/docs/src/authors.txt
deleted file mode 100644
index c8c532aa..00000000
--- a/docs/src/authors.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-=======
-Authors
-=======
-
-[authors]
diff --git a/docs/src/changelog.txt b/docs/src/changelog.txt
deleted file mode 100644
index 6caf0a32..00000000
--- a/docs/src/changelog.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-=========
-Changelog
-=========
-
-[changelog]
diff --git a/docs/src/index.txt b/docs/src/index.txt
deleted file mode 100644
index d24785ac..00000000
--- a/docs/src/index.txt
+++ /dev/null
@@ -1,69 +0,0 @@
-.. -*- mode: rst -*-
-
-========
-Overview
-========
-
-Welcome to the Pygments documentation.
-
-- Starting with Pygments
-
- - `Installation <installation.txt>`_
-
- - `Introduction and Quickstart <quickstart.txt>`_
-
- - `Command line interface <cmdline.txt>`_
-
-- Builtin components
-
- - `Lexers <lexers.txt>`_
-
- - `Filters <filters.txt>`_
-
- - `Formatters <formatters.txt>`_
-
- - `Styles <styles.txt>`_
-
-- Reference
-
- - `Unicode and encodings <unicode.txt>`_
-
- - `Builtin tokens <tokens.txt>`_
-
- - `API documentation <api.txt>`_
-
-- Hacking for Pygments
-
- - `Write your own lexer <lexerdevelopment.txt>`_
-
- - `Write your own formatter <formatterdevelopment.txt>`_
-
- - `Write your own filter <filterdevelopment.txt>`_
-
- - `Register plugins <plugins.txt>`_
-
-- Hints and Tricks
-
- - `Using Pygments in ReST documents <rstdirective.txt>`_
-
- - `Using Pygments with MoinMoin <moinmoin.txt>`_
-
- - `Using Pygments in other contexts <integrate.txt>`_
-
-- About Pygments
-
- - `Changelog <changelog.txt>`_
-
- - `Authors <authors.txt>`_
-
-
---------------
-
-If you find bugs or have suggestions for the documentation, please
-look `here`_ for info on how to contact the team.
-
-You can download an offline version of this documentation from the
-`download page`_.
-
-.. _here: http://pygments.org/contribute/
-.. _download page: http://pygments.org/download/
diff --git a/docs/src/installation.txt b/docs/src/installation.txt
deleted file mode 100644
index 17a9aad5..00000000
--- a/docs/src/installation.txt
+++ /dev/null
@@ -1,71 +0,0 @@
-.. -*- mode: rst -*-
-
-============
-Installation
-============
-
-Pygments requires at least Python 2.4 to work correctly. Just to clarify:
-there *won't* ever be support for Python versions below 2.4. However, there
-are no other dependencies.
-
-
-Installing a released version
-=============================
-
-As a Python egg (via easy_install)
-----------------------------------
-
-You can install the most recent Pygments version using `easy_install`_::
-
- sudo easy_install Pygments
-
-This will install a Pygments egg in your Python installation's site-packages
-directory.
-
-
-From the tarball release
--------------------------
-
-1. Download the most recent tarball from the `download page`_
-2. Unpack the tarball
-3. ``sudo python setup.py install``
-
-Note that the last command will automatically download and install
-`setuptools`_ if you don't already have it installed. This requires a working
-internet connection.
-
-This will install Pygments into your Python installation's site-packages directory.
-
-
-Installing the development version
-==================================
-
-If you want to play around with the code
-----------------------------------------
-
-1. Install `Mercurial`_
-2. ``hg clone http://bitbucket.org/birkenfeld/pygments-main pygments``
-3. ``cd pygments``
-4. ``ln -s pygments /usr/lib/python2.X/site-packages``
-5. ``ln -s pygmentize /usr/local/bin``
-
-As an alternative to steps 4 and 5 you can also do ``python setup.py develop``
-which will install the package via setuptools in development mode.
-
-..
- If you just want the latest features and use them
- -------------------------------------------------
-
- ::
-
- sudo easy_install Pygments==dev
-
- This will install a Pygments egg containing the latest Subversion trunk code
- in your Python installation's site-packages directory. Every time the command
- is run, the sources are updated from Subversion.
-
-
-.. _download page: http://pygments.org/download/
-.. _setuptools: http://peak.telecommunity.com/DevCenter/setuptools
-.. _easy_install: http://peak.telecommunity.com/DevCenter/EasyInstall
-.. _Mercurial: http://selenic.com/mercurial/
diff --git a/external/rst-directive-old.py b/external/rst-directive-old.py
deleted file mode 100644
index 4965576c..00000000
--- a/external/rst-directive-old.py
+++ /dev/null
@@ -1,77 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- The Pygments reStructuredText directive
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
- This fragment is a Docutils_ 0.4 directive that renders source code
- (to HTML only, currently) via Pygments.
-
- To use it, adjust the options below and copy the code into a module
- that you import on initialization. The code then automatically
- registers a ``sourcecode`` directive that you can use instead of
- normal code blocks like this::
-
- .. sourcecode:: python
-
- My code goes here.
-
- If you want to have different code styles, e.g. one with line numbers
- and one without, add formatters with their names in the VARIANTS dict
- below. You can invoke them instead of the DEFAULT one by using a
- directive option::
-
- .. sourcecode:: python
- :linenos:
-
- My code goes here.
-
- Look at the `directive documentation`_ to get all the gory details.
-
- .. _Docutils: http://docutils.sf.net/
- .. _directive documentation:
- http://docutils.sourceforge.net/docs/howto/rst-directives.html
-
- :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-# Options
-# ~~~~~~~
-
-# Set to True if you want inline CSS styles instead of classes
-INLINESTYLES = False
-
-from pygments.formatters import HtmlFormatter
-
-# The default formatter
-DEFAULT = HtmlFormatter(noclasses=INLINESTYLES)
-
-# Add name -> formatter pairs for every variant you want to use
-VARIANTS = {
- # 'linenos': HtmlFormatter(noclasses=INLINESTYLES, linenos=True),
-}
-
-
-from docutils import nodes
-from docutils.parsers.rst import directives
-
-from pygments import highlight
-from pygments.lexers import get_lexer_by_name, TextLexer
-
-def pygments_directive(name, arguments, options, content, lineno,
- content_offset, block_text, state, state_machine):
- try:
- lexer = get_lexer_by_name(arguments[0])
- except ValueError:
- # no lexer found - use the text one instead of an exception
- lexer = TextLexer()
- # take an arbitrary option if more than one is given
- formatter = options and VARIANTS[options.keys()[0]] or DEFAULT
- parsed = highlight(u'\n'.join(content), lexer, formatter)
- return [nodes.raw('', parsed, format='html')]
-
-pygments_directive.arguments = (1, 0, 1)
-pygments_directive.content = 1
-pygments_directive.options = dict([(key, directives.flag) for key in VARIANTS])
-
-directives.register_directive('sourcecode', pygments_directive)
diff --git a/external/rst-directive.py b/external/rst-directive.py
index f15e7dc8..8ce150c4 100644
--- a/external/rst-directive.py
+++ b/external/rst-directive.py
@@ -75,9 +75,8 @@ class Pygments(Directive):
# no lexer found - use the text one instead of an exception
lexer = TextLexer()
# take an arbitrary option if more than one is given
- formatter = self.options and VARIANTS[self.options.keys()[0]] or DEFAULT
+ formatter = self.options and VARIANTS[list(self.options)[0]] or DEFAULT
parsed = highlight(u'\n'.join(self.content), lexer, formatter)
return [nodes.raw('', parsed, format='html')]
directives.register_directive('sourcecode', Pygments)
-
diff --git a/pygments/__init__.py b/pygments/__init__.py
index ce3312b1..a47f686e 100644
--- a/pygments/__init__.py
+++ b/pygments/__init__.py
@@ -26,7 +26,7 @@
:license: BSD, see LICENSE for details.
"""
-__version__ = '1.6'
+__version__ = '2.0pre'
__docformat__ = 'restructuredtext'
__all__ = ['lex', 'format', 'highlight']
@@ -43,7 +43,7 @@ def lex(code, lexer):
"""
try:
return lexer.get_tokens(code)
- except TypeError, err:
+ except TypeError as err:
if isinstance(err.args[0], str) and \
'unbound method get_tokens' in err.args[0]:
raise TypeError('lex() argument must be a lexer instance, '
@@ -67,7 +67,7 @@ def format(tokens, formatter, outfile=None):
return realoutfile.getvalue()
else:
formatter.format(tokens, outfile)
- except TypeError, err:
+ except TypeError as err:
if isinstance(err.args[0], str) and \
'unbound method format' in err.args[0]:
raise TypeError('format() argument must be a formatter instance, '
diff --git a/pygments/cmdline.py b/pygments/cmdline.py
index 687cdad0..7c23ebee 100644
--- a/pygments/cmdline.py
+++ b/pygments/cmdline.py
@@ -8,6 +8,9 @@
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
+
+from __future__ import print_function
+
import sys
import getopt
from textwrap import dedent
@@ -16,6 +19,7 @@ from pygments import __version__, highlight
from pygments.util import ClassNotFound, OptionError, docstring_headline
from pygments.lexers import get_all_lexers, get_lexer_by_name, get_lexer_for_filename, \
find_lexer_class, guess_lexer, TextLexer
+from pygments.formatters.latex import LatexEmbeddedLexer, LatexFormatter
from pygments.formatters import get_all_formatters, get_formatter_by_name, \
get_formatter_for_filename, find_formatter_class, \
TerminalFormatter # pylint:disable-msg=E0611
@@ -119,25 +123,25 @@ def _print_help(what, name):
try:
if what == 'lexer':
cls = find_lexer_class(name)
- print "Help on the %s lexer:" % cls.name
- print dedent(cls.__doc__)
+ print("Help on the %s lexer:" % cls.name)
+ print(dedent(cls.__doc__))
elif what == 'formatter':
cls = find_formatter_class(name)
- print "Help on the %s formatter:" % cls.name
- print dedent(cls.__doc__)
+ print("Help on the %s formatter:" % cls.name)
+ print(dedent(cls.__doc__))
elif what == 'filter':
cls = find_filter_class(name)
- print "Help on the %s filter:" % name
- print dedent(cls.__doc__)
+ print("Help on the %s filter:" % name)
+ print(dedent(cls.__doc__))
except AttributeError:
- print >>sys.stderr, "%s not found!" % what
+ print("%s not found!" % what, file=sys.stderr)
def _print_list(what):
if what == 'lexer':
- print
- print "Lexers:"
- print "~~~~~~~"
+ print()
+ print("Lexers:")
+ print("~~~~~~~")
info = []
for fullname, names, exts, _ in get_all_lexers():
@@ -146,12 +150,12 @@ def _print_list(what):
info.append(tup)
info.sort()
for i in info:
- print ('* %s\n %s %s') % i
+ print(('* %s\n %s %s') % i)
elif what == 'formatter':
- print
- print "Formatters:"
- print "~~~~~~~~~~~"
+ print()
+ print("Formatters:")
+ print("~~~~~~~~~~~")
info = []
for cls in get_all_formatters():
@@ -161,27 +165,27 @@ def _print_list(what):
info.append(tup)
info.sort()
for i in info:
- print ('* %s\n %s %s') % i
+ print(('* %s\n %s %s') % i)
elif what == 'filter':
- print
- print "Filters:"
- print "~~~~~~~~"
+ print()
+ print("Filters:")
+ print("~~~~~~~~")
for name in get_all_filters():
cls = find_filter_class(name)
- print "* " + name + ':'
- print " %s" % docstring_headline(cls)
+ print("* " + name + ':')
+ print(" %s" % docstring_headline(cls))
elif what == 'style':
- print
- print "Styles:"
- print "~~~~~~~"
+ print()
+ print("Styles:")
+ print("~~~~~~~")
for name in get_all_styles():
cls = get_style_by_name(name)
- print "* " + name + ':'
- print " %s" % docstring_headline(cls)
+ print("* " + name + ':')
+ print(" %s" % docstring_headline(cls))
def main(args=sys.argv):
@@ -202,8 +206,8 @@ def main(args=sys.argv):
try:
popts, args = getopt.getopt(args[1:], "l:f:F:o:O:P:LS:a:N:hVHg")
- except getopt.GetoptError, err:
- print >>sys.stderr, usage
+ except getopt.GetoptError:
+ print(usage, file=sys.stderr)
return 2
opts = {}
O_opts = []
@@ -219,22 +223,22 @@ def main(args=sys.argv):
opts[opt] = arg
if not opts and not args:
- print usage
+ print(usage)
return 0
if opts.pop('-h', None) is not None:
- print usage
+ print(usage)
return 0
if opts.pop('-V', None) is not None:
- print 'Pygments version %s, (c) 2006-2014 by Georg Brandl.' % __version__
+ print('Pygments version %s, (c) 2006-2014 by Georg Brandl.' % __version__)
return 0
# handle ``pygmentize -L``
L_opt = opts.pop('-L', None)
if L_opt is not None:
if opts:
- print >>sys.stderr, usage
+ print(usage, file=sys.stderr)
return 2
# print version
@@ -249,12 +253,12 @@ def main(args=sys.argv):
H_opt = opts.pop('-H', None)
if H_opt is not None:
if opts or len(args) != 2:
- print >>sys.stderr, usage
+ print(usage, file=sys.stderr)
return 2
what, name = args
if what not in ('lexer', 'formatter', 'filter'):
- print >>sys.stderr, usage
+ print(usage, file=sys.stderr)
return 2
_print_help(what, name)
@@ -279,13 +283,13 @@ def main(args=sys.argv):
if infn is not None:
try:
lexer = get_lexer_for_filename(infn, **parsed_opts)
- except ClassNotFound, err:
+ except ClassNotFound as err:
lexer = TextLexer()
- except OptionError, err:
- print >>sys.stderr, 'Error:', err
+ except OptionError as err:
+ print('Error:', err, file=sys.stderr)
return 1
- print lexer.aliases[0]
+ print(lexer.aliases[0])
return 0
# handle ``pygmentize -S``
@@ -294,30 +298,30 @@ def main(args=sys.argv):
if S_opt is not None:
f_opt = opts.pop('-f', None)
if not f_opt:
- print >>sys.stderr, usage
+ print(usage, file=sys.stderr)
return 2
if opts or args:
- print >>sys.stderr, usage
+ print(usage, file=sys.stderr)
return 2
try:
parsed_opts['style'] = S_opt
fmter = get_formatter_by_name(f_opt, **parsed_opts)
- except ClassNotFound, err:
- print >>sys.stderr, err
+ except ClassNotFound as err:
+ print(err, file=sys.stderr)
return 1
arg = a_opt or ''
try:
- print fmter.get_style_defs(arg)
- except Exception, err:
- print >>sys.stderr, 'Error:', err
+ print(fmter.get_style_defs(arg))
+ except Exception as err:
+ print('Error:', err, file=sys.stderr)
return 1
return 0
# if no -S is given, -a is not allowed
if a_opt is not None:
- print >>sys.stderr, usage
+ print(usage, file=sys.stderr)
return 2
# parse -F options
@@ -330,21 +334,21 @@ def main(args=sys.argv):
if fmter:
try:
fmter = get_formatter_by_name(fmter, **parsed_opts)
- except (OptionError, ClassNotFound), err:
- print >>sys.stderr, 'Error:', err
+ except (OptionError, ClassNotFound) as err:
+ print('Error:', err, file=sys.stderr)
return 1
if outfn:
if not fmter:
try:
fmter = get_formatter_for_filename(outfn, **parsed_opts)
- except (OptionError, ClassNotFound), err:
- print >>sys.stderr, 'Error:', err
+ except (OptionError, ClassNotFound) as err:
+ print('Error:', err, file=sys.stderr)
return 1
try:
outfile = open(outfn, 'wb')
- except Exception, err:
- print >>sys.stderr, 'Error: cannot open outfile:', err
+ except Exception as err:
+ print('Error: cannot open outfile:', err, file=sys.stderr)
return 1
else:
if not fmter:
@@ -356,36 +360,36 @@ def main(args=sys.argv):
if lexer:
try:
lexer = get_lexer_by_name(lexer, **parsed_opts)
- except (OptionError, ClassNotFound), err:
- print >>sys.stderr, 'Error:', err
+ except (OptionError, ClassNotFound) as err:
+ print('Error:', err, file=sys.stderr)
return 1
if args:
if len(args) > 1:
- print >>sys.stderr, usage
+ print(usage, file=sys.stderr)
return 2
infn = args[0]
try:
code = open(infn, 'rb').read()
- except Exception, err:
- print >>sys.stderr, 'Error: cannot read infile:', err
+ except Exception as err:
+ print('Error: cannot read infile:', err, file=sys.stderr)
return 1
if not lexer:
try:
lexer = get_lexer_for_filename(infn, code, **parsed_opts)
- except ClassNotFound, err:
+ except ClassNotFound as err:
if '-g' in opts:
try:
lexer = guess_lexer(code, **parsed_opts)
except ClassNotFound:
lexer = TextLexer(**parsed_opts)
else:
- print >>sys.stderr, 'Error:', err
+ print('Error:', err, file=sys.stderr)
return 1
- except OptionError, err:
- print >>sys.stderr, 'Error:', err
+ except OptionError as err:
+ print('Error:', err, file=sys.stderr)
return 1
else:
@@ -396,12 +400,21 @@ def main(args=sys.argv):
except ClassNotFound:
lexer = TextLexer(**parsed_opts)
elif not lexer:
- print >>sys.stderr, 'Error: no lexer name given and reading ' + \
- 'from stdin (try using -g or -l <lexer>)'
+ print('Error: no lexer name given and reading ' + \
+ 'from stdin (try using -g or -l <lexer>)', file=sys.stderr)
return 2
else:
code = sys.stdin.read()
+ # When using the LaTeX formatter and the option `escapeinside` is
+ # specified, we need a special lexer which collects escaped text
+ # before running the chosen language lexer.
+ escapeinside = parsed_opts.get('escapeinside', '')
+ if len(escapeinside) == 2 and isinstance(fmter, LatexFormatter):
+ left = escapeinside[0]
+ right = escapeinside[1]
+ lexer = LatexEmbeddedLexer(left, right, lexer)
+
# No encoding given? Use latin1 if output file given,
# stdin/stdout encoding otherwise.
# (This is a compromise, I'm not too happy with it...)
@@ -426,16 +439,16 @@ def main(args=sys.argv):
for fname, fopts in F_opts:
lexer.add_filter(fname, **fopts)
highlight(code, lexer, fmter, outfile)
- except Exception, err:
+ except Exception:
import traceback
info = traceback.format_exception(*sys.exc_info())
msg = info[-1].strip()
if len(info) >= 3:
# extract relevant file and position info
msg += '\n (f%s)' % info[-2].split('\n')[0].strip()[1:]
- print >>sys.stderr
- print >>sys.stderr, '*** Error while highlighting:'
- print >>sys.stderr, msg
+ print(file=sys.stderr)
+ print('*** Error while highlighting:', file=sys.stderr)
+ print(msg, file=sys.stderr)
return 1
return 0
diff --git a/pygments/filters/__init__.py b/pygments/filters/__init__.py
index c33dac7e..2de661c7 100644
--- a/pygments/filters/__init__.py
+++ b/pygments/filters/__init__.py
@@ -16,7 +16,7 @@ from pygments.token import String, Comment, Keyword, Name, Error, Whitespace, \
string_to_tokentype
from pygments.filter import Filter
from pygments.util import get_list_opt, get_int_opt, get_bool_opt, \
- get_choice_opt, ClassNotFound, OptionError
+ get_choice_opt, ClassNotFound, OptionError, text_type, string_types
from pygments.plugin import find_plugin_filters
@@ -117,7 +117,7 @@ class KeywordCaseFilter(Filter):
def __init__(self, **options):
Filter.__init__(self, **options)
case = get_choice_opt(options, 'case', ['lower', 'upper', 'capitalize'], 'lower')
- self.convert = getattr(unicode, case)
+ self.convert = getattr(text_type, case)
def filter(self, lexer, stream):
for ttype, value in stream:
@@ -182,7 +182,7 @@ class RaiseOnErrorTokenFilter(Filter):
The exception class to raise.
The default is `pygments.filters.ErrorToken`.
- *New in Pygments 0.8.*
+ .. versionadded:: 0.8
"""
def __init__(self, **options):
@@ -230,14 +230,16 @@ class VisibleWhitespaceFilter(Filter):
styling the visible whitespace differently (e.g. greyed out), but it can
disrupt background colors. The default is ``True``.
- *New in Pygments 0.8.*
+ .. versionadded:: 0.8
"""
def __init__(self, **options):
Filter.__init__(self, **options)
- for name, default in {'spaces': u'·', 'tabs': u'»', 'newlines': u'¶'}.items():
+ for name, default in [('spaces', u'·'),
+ ('tabs', u'»'),
+ ('newlines', u'¶')]:
opt = options.get(name, False)
- if isinstance(opt, basestring) and len(opt) == 1:
+ if isinstance(opt, string_types) and len(opt) == 1:
setattr(self, name, opt)
else:
setattr(self, name, (opt and default or ''))
@@ -293,7 +295,7 @@ class GobbleFilter(Filter):
`n` : int
The number of characters to gobble.
- *New in Pygments 1.2.*
+ .. versionadded:: 1.2
"""
def __init__(self, **options):
Filter.__init__(self, **options)
@@ -325,7 +327,7 @@ class TokenMergeFilter(Filter):
Merges consecutive tokens with the same token type in the output stream of a
lexer.
- *New in Pygments 1.2.*
+ .. versionadded:: 1.2
"""
def __init__(self, **options):
Filter.__init__(self, **options)
diff --git a/pygments/formatter.py b/pygments/formatter.py
index e5ad0d25..b16ffee8 100644
--- a/pygments/formatter.py
+++ b/pygments/formatter.py
@@ -11,14 +11,14 @@
import codecs
-from pygments.util import get_bool_opt
+from pygments.util import get_bool_opt, string_types
from pygments.styles import get_style_by_name
__all__ = ['Formatter']
def _lookup_style(style):
- if isinstance(style, basestring):
+ if isinstance(style, string_types):
return get_style_by_name(style)
return style
diff --git a/pygments/formatters/_mapping.py b/pygments/formatters/_mapping.py
index d2aabeca..79f592b3 100755
--- a/pygments/formatters/_mapping.py
+++ b/pygments/formatters/_mapping.py
@@ -13,6 +13,8 @@
:license: BSD, see LICENSE for details.
"""
+from __future__ import print_function
+
# start
from pygments.formatters.bbcode import BBCodeFormatter
from pygments.formatters.html import HtmlFormatter
@@ -57,7 +59,7 @@ if __name__ == '__main__':
for filename in os.listdir('.'):
if filename.endswith('.py') and not filename.startswith('_'):
module_name = 'pygments.formatters.%s' % filename[:-3]
- print module_name
+ print(module_name)
module = __import__(module_name, None, None, [''])
for formatter_name in module.__all__:
imports.append((module_name, formatter_name))
diff --git a/pygments/formatters/html.py b/pygments/formatters/html.py
index 5e5f1e40..3bc60e8a 100644
--- a/pygments/formatters/html.py
+++ b/pygments/formatters/html.py
@@ -9,14 +9,16 @@
:license: BSD, see LICENSE for details.
"""
+from __future__ import print_function
+
import os
import sys
import os.path
-import StringIO
from pygments.formatter import Formatter
from pygments.token import Token, Text, STANDARD_TYPES
-from pygments.util import get_bool_opt, get_int_opt, get_list_opt, bytes
+from pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
+ StringIO, string_types, iteritems
try:
import ctags
@@ -218,29 +220,34 @@ class HtmlFormatter(Formatter):
If you set this option, the default selector for `get_style_defs()`
will be this class.
- *New in Pygments 0.9:* If you select the ``'table'`` line numbers, the
- wrapping table will have a CSS class of this string plus ``'table'``,
- the default is accordingly ``'highlighttable'``.
+ .. versionadded:: 0.9
+ If you select the ``'table'`` line numbers, the wrapping table will
+ have a CSS class of this string plus ``'table'``, the default is
+ accordingly ``'highlighttable'``.
`cssstyles`
Inline CSS styles for the wrapping ``<div>`` tag (default: ``''``).
`prestyles`
- Inline CSS styles for the ``<pre>`` tag (default: ``''``). *New in
- Pygments 0.11.*
+ Inline CSS styles for the ``<pre>`` tag (default: ``''``).
+
+ .. versionadded:: 0.11
`cssfile`
If the `full` option is true and this option is given, it must be the
name of an external file. If the filename does not include an absolute
path, the file's path will be assumed to be relative to the main output
file's path, if the latter can be found. The stylesheet is then written
- to this file instead of the HTML file. *New in Pygments 0.6.*
+ to this file instead of the HTML file.
+
+ .. versionadded:: 0.6
`noclobber_cssfile`
If `cssfile` is given and the specified file exists, the css file will
not be overwritten. This allows the use of the `full` option in
combination with a user specified css file. Default is ``False``.
- *New in Pygments 1.1.*
+
+ .. versionadded:: 1.1
`linenos`
If set to ``'table'``, output line numbers as a table with two cells,
@@ -263,7 +270,9 @@ class HtmlFormatter(Formatter):
125%``).
`hl_lines`
- Specify a list of lines to be highlighted. *New in Pygments 0.11.*
+ Specify a list of lines to be highlighted.
+
+ .. versionadded:: 0.11
`linenostart`
The line number for the first line (default: ``1``).
@@ -279,24 +288,30 @@ class HtmlFormatter(Formatter):
If set to ``True``, the formatter won't output the background color
for the wrapping element (this automatically defaults to ``False``
when there is no wrapping element [eg: no argument for the
- `get_syntax_defs` method given]) (default: ``False``). *New in
- Pygments 0.6.*
+ `get_syntax_defs` method given]) (default: ``False``).
+
+ .. versionadded:: 0.6
`lineseparator`
This string is output between lines of code. It defaults to ``"\n"``,
which is enough to break a line inside ``<pre>`` tags, but you can
- e.g. set it to ``"<br>"`` to get HTML line breaks. *New in Pygments
- 0.7.*
+ e.g. set it to ``"<br>"`` to get HTML line breaks.
+
+ .. versionadded:: 0.7
`lineanchors`
If set to a nonempty string, e.g. ``foo``, the formatter will wrap each
output line in an anchor tag with a ``name`` of ``foo-linenumber``.
- This allows easy linking to certain lines. *New in Pygments 0.9.*
+ This allows easy linking to certain lines.
+
+ .. versionadded:: 0.9
`linespans`
If set to a nonempty string, e.g. ``foo``, the formatter will wrap each
output line in a span tag with an ``id`` of ``foo-linenumber``.
- This allows easy access to lines via javascript. *New in Pygments 1.6.*
+ This allows easy access to lines via javascript.
+
+ .. versionadded:: 1.6
`anchorlinenos`
If set to `True`, will wrap line numbers in <a> tags. Used in
@@ -306,18 +321,20 @@ class HtmlFormatter(Formatter):
If set to the path of a ctags file, wrap names in anchor tags that
link to their definitions. `lineanchors` should be used, and the
tags file should specify line numbers (see the `-n` option to ctags).
- *New in Pygments 1.6.*
+
+ .. versionadded:: 1.6
`tagurlformat`
A string formatting pattern used to generate links to ctags definitions.
Available variables are `%(path)s`, `%(fname)s` and `%(fext)s`.
Defaults to an empty string, resulting in just `#prefix-number` links.
- *New in Pygments 1.6.*
+
+ .. versionadded:: 1.6
**Subclassing the HTML formatter**
- *New in Pygments 0.7.*
+ .. versionadded:: 0.7
The HTML formatter is now built in a way that allows easy subclassing, thus
customizing the output HTML code. The `format()` method calls
@@ -453,7 +470,7 @@ class HtmlFormatter(Formatter):
"""
if arg is None:
arg = ('cssclass' in self.options and '.'+self.cssclass or '')
- if isinstance(arg, basestring):
+ if isinstance(arg, string_types):
args = [arg]
else:
args = list(arg)
@@ -467,7 +484,7 @@ class HtmlFormatter(Formatter):
return ', '.join(tmp)
styles = [(level, ttype, cls, style)
- for cls, (style, ttype, level) in self.class2style.iteritems()
+ for cls, (style, ttype, level) in iteritems(self.class2style)
if cls and style]
styles.sort()
lines = ['%s { %s } /* %s */' % (prefix(cls), style, repr(ttype)[6:])
@@ -505,8 +522,8 @@ class HtmlFormatter(Formatter):
cssfilename = os.path.join(os.path.dirname(filename),
self.cssfile)
except AttributeError:
- print >>sys.stderr, 'Note: Cannot determine output file name, ' \
- 'using current directory as base for the CSS file name'
+ print('Note: Cannot determine output file name, ' \
+ 'using current directory as base for the CSS file name', file=sys.stderr)
cssfilename = self.cssfile
# write CSS file only if noclobber_cssfile isn't given as an option.
try:
@@ -515,7 +532,7 @@ class HtmlFormatter(Formatter):
cf.write(CSSFILE_TEMPLATE %
{'styledefs': self.get_style_defs('body')})
cf.close()
- except IOError, err:
+ except IOError as err:
err.strerror = 'Error writing CSS file: ' + err.strerror
raise
@@ -534,7 +551,7 @@ class HtmlFormatter(Formatter):
yield 0, DOC_FOOTER
def _wrap_tablelinenos(self, inner):
- dummyoutfile = StringIO.StringIO()
+ dummyoutfile = StringIO()
lncount = 0
for t, line in inner:
if t:
diff --git a/pygments/formatters/img.py b/pygments/formatters/img.py
index 615c722d..8e2b5f9e 100644
--- a/pygments/formatters/img.py
+++ b/pygments/formatters/img.py
@@ -12,8 +12,8 @@
import sys
from pygments.formatter import Formatter
-from pygments.util import get_bool_opt, get_int_opt, \
- get_list_opt, get_choice_opt
+from pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
+ get_choice_opt, xrange
# Import this carefully
try:
@@ -25,7 +25,10 @@ except ImportError:
try:
import _winreg
except ImportError:
- _winreg = None
+ try:
+ import winreg as _winreg
+ except ImportError:
+ _winreg = None
__all__ = ['ImageFormatter', 'GifImageFormatter', 'JpgImageFormatter',
'BmpImageFormatter']
@@ -72,7 +75,10 @@ class FontManager(object):
self._create_nix()
def _get_nix_font_path(self, name, style):
- from commands import getstatusoutput
+ try:
+ from commands import getstatusoutput
+ except ImportError:
+ from subprocess import getstatusoutput
exit, out = getstatusoutput('fc-list "%s:style=%s" file' %
(name, style))
if not exit:
@@ -169,7 +175,7 @@ class ImageFormatter(Formatter):
Create a PNG image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
- *New in Pygments 0.10.*
+ .. versionadded:: 0.10
Additional options accepted:
@@ -258,12 +264,16 @@ class ImageFormatter(Formatter):
Default: 6
`hl_lines`
- Specify a list of lines to be highlighted. *New in Pygments 1.2.*
+ Specify a list of lines to be highlighted.
+
+ .. versionadded:: 1.2
Default: empty list
`hl_color`
- Specify the color for highlighting lines. *New in Pygments 1.2.*
+ Specify the color for highlighting lines.
+
+ .. versionadded:: 1.2
Default: highlight color of the selected style
"""
@@ -513,8 +523,7 @@ class GifImageFormatter(ImageFormatter):
Create a GIF image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
- *New in Pygments 1.0.* (You could create GIF images before by passing a
- suitable `image_format` option to the `ImageFormatter`.)
+ .. versionadded:: 1.0
"""
name = 'img_gif'
@@ -528,8 +537,7 @@ class JpgImageFormatter(ImageFormatter):
Create a JPEG image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
- *New in Pygments 1.0.* (You could create JPEG images before by passing a
- suitable `image_format` option to the `ImageFormatter`.)
+ .. versionadded:: 1.0
"""
name = 'img_jpg'
@@ -543,8 +551,7 @@ class BmpImageFormatter(ImageFormatter):
Create a bitmap image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
- *New in Pygments 1.0.* (You could create bitmap images before by passing a
- suitable `image_format` option to the `ImageFormatter`.)
+ .. versionadded:: 1.0
"""
name = 'img_bmp'
diff --git a/pygments/formatters/latex.py b/pygments/formatters/latex.py
index 2cacca86..fee177c5 100644
--- a/pygments/formatters/latex.py
+++ b/pygments/formatters/latex.py
@@ -9,9 +9,13 @@
:license: BSD, see LICENSE for details.
"""
+from __future__ import division
+
from pygments.formatter import Formatter
+from pygments.lexer import Lexer
from pygments.token import Token, STANDARD_TYPES
-from pygments.util import get_bool_opt, get_int_opt, StringIO
+from pygments.util import get_bool_opt, get_int_opt, StringIO, xrange, \
+ iteritems
__all__ = ['LatexFormatter']
@@ -205,19 +209,33 @@ class LatexFormatter(Formatter):
`commandprefix`
The LaTeX commands used to produce colored output are constructed
using this prefix and some letters (default: ``'PY'``).
- *New in Pygments 0.7.*
- *New in Pygments 0.10:* the default is now ``'PY'`` instead of ``'C'``.
+ .. versionadded:: 0.7
+ .. versionchanged:: 0.10
+ The default is now ``'PY'`` instead of ``'C'``.
`texcomments`
If set to ``True``, enables LaTeX comment lines. That is, LaTex markup
in comment tokens is not escaped so that LaTeX can render it (default:
- ``False``). *New in Pygments 1.2.*
+ ``False``).
+
+ .. versionadded:: 1.2
`mathescape`
If set to ``True``, enables LaTeX math mode escape in comments. That
is, ``'$...$'`` inside a comment will trigger math mode (default:
- ``False``). *New in Pygments 1.2.*
+ ``False``).
+
+ .. versionadded:: 1.2
+
+ `escapeinside`
+ If set to a string of length 2, enables escaping to LaTeX. Text
+ delimited by these 2 characters is read as LaTeX code and
+ typeset accordingly. It has no effect in string literals. It has
+ no effect in comments if `texcomments` or `mathescape` is
+ set. (default: ``''``).
+
+ .. versionadded:: 2.0
"""
name = 'LaTeX'
aliases = ['latex', 'tex']
@@ -235,6 +253,13 @@ class LatexFormatter(Formatter):
self.commandprefix = options.get('commandprefix', 'PY')
self.texcomments = get_bool_opt(options, 'texcomments', False)
self.mathescape = get_bool_opt(options, 'mathescape', False)
+ self.escapeinside = options.get('escapeinside', '')
+
+ if len(self.escapeinside) == 2:
+ self.left = self.escapeinside[0]
+ self.right = self.escapeinside[1]
+ else:
+ self.escapeinside = ''
self._create_stylesheet()
@@ -291,7 +316,7 @@ class LatexFormatter(Formatter):
"""
cp = self.commandprefix
styles = []
- for name, definition in self.cmd2def.iteritems():
+ for name, definition in iteritems(self.cmd2def):
styles.append(r'\expandafter\def\csname %s@tok@%s\endcsname{%s}' %
(cp, name, definition))
return STYLE_TEMPLATE % {'cp': self.commandprefix,
@@ -306,14 +331,14 @@ class LatexFormatter(Formatter):
realoutfile = outfile
outfile = StringIO()
- outfile.write(ur'\begin{Verbatim}[commandchars=\\\{\}')
+ outfile.write(u'\\begin{Verbatim}[commandchars=\\\\\\{\\}')
if self.linenos:
start, step = self.linenostart, self.linenostep
outfile.write(u',numbers=left' +
(start and u',firstnumber=%d' % start or u'') +
(step and u',stepnumber=%d' % step or u''))
- if self.mathescape or self.texcomments:
- outfile.write(ur',codes={\catcode`\$=3\catcode`\^=7\catcode`\_=8}')
+ if self.mathescape or self.texcomments or self.escapeinside:
+ outfile.write(u',codes={\\catcode`\\$=3\\catcode`\\^=7\\catcode`\\_=8}')
if self.verboptions:
outfile.write(u',' + self.verboptions)
outfile.write(u']\n')
@@ -342,9 +367,22 @@ class LatexFormatter(Formatter):
parts[i] = escape_tex(part, self.commandprefix)
in_math = not in_math
value = '$'.join(parts)
+ elif self.escapeinside:
+ text = value
+ value = ''
+ while len(text) > 0:
+ a,sep1,text = text.partition(self.left)
+ if len(sep1) > 0:
+ b,sep2,text = text.partition(self.right)
+ if len(sep2) > 0:
+ value = value + escape_tex(a, self.commandprefix) + b
+ else:
+ value = value + escape_tex(a + sep1 + b, self.commandprefix)
+ else:
+ value = value + escape_tex(a, self.commandprefix)
else:
value = escape_tex(value, self.commandprefix)
- else:
+ elif not (ttype in Token.Escape):
value = escape_tex(value, self.commandprefix)
styles = []
while ttype is not Token:
@@ -376,3 +414,57 @@ class LatexFormatter(Formatter):
encoding = self.encoding or 'latin1',
styledefs = self.get_style_defs(),
code = outfile.getvalue()))
+
+
+class LatexEmbeddedLexer(Lexer):
+ r"""
+
+ This lexer takes one lexer as argument, the lexer for the language
+ being formatted, and the left and right delimiters for escaped text.
+
+ First everything is scanned using the language lexer to obtain
+ strings and comments. All other consecutive tokens are merged and
+ the resulting text is scanned for escaped segments, which are given
+ the Token.Escape type. Finally text that is not escaped is scanned
+ again with the language lexer.
+ """
+ def __init__(self, left, right, lang, **options):
+ self.left = left
+ self.right = right
+ self.lang = lang
+ Lexer.__init__(self, **options)
+
+ def get_tokens_unprocessed(self, text):
+ buf = ''
+ for i, t, v in self.lang.get_tokens_unprocessed(text):
+ if t in Token.Comment or t in Token.String:
+ if buf:
+ for x in self.get_tokens_aux(idx, buf):
+ yield x
+ buf = ''
+ yield i, t, v
+ else:
+ if not buf:
+ idx = i;
+ buf += v
+ if buf:
+ for x in self.get_tokens_aux(idx, buf):
+ yield x
+
+ def get_tokens_aux(self, index, text):
+ while text:
+ a, sep1, text = text.partition(self.left)
+ if a:
+ for i, t, v in self.lang.get_tokens_unprocessed(a):
+ yield index + i, t, v
+ index += len(a)
+ if sep1:
+ b, sep2, text = text.partition(self.right)
+ if sep2:
+ yield index + len(sep1), Token.Escape, b
+ index += len(sep1) + len(b) + len(sep2)
+ else:
+ yield index, Token.Error, sep1
+ index += len(sep1)
+ text = b
+
diff --git a/pygments/formatters/other.py b/pygments/formatters/other.py
index 00fe8ba6..7368a642 100644
--- a/pygments/formatters/other.py
+++ b/pygments/formatters/other.py
@@ -10,7 +10,7 @@
"""
from pygments.formatter import Formatter
-from pygments.util import OptionError, get_choice_opt, b
+from pygments.util import OptionError, get_choice_opt
from pygments.token import Token
from pygments.console import colorize
@@ -40,7 +40,7 @@ class RawTokenFormatter(Formatter):
The format is ``tokentype<TAB>repr(tokenstring)\n``. The output can later
be converted to a token stream with the `RawTokenLexer`, described in the
- `lexer list <lexers.txt>`_.
+ :doc:`lexer list <lexers>`.
Only two options are accepted:
@@ -50,7 +50,8 @@ class RawTokenFormatter(Formatter):
`error_color`
If set to a color name, highlight error tokens using that color. If
set but with no value, defaults to ``'red'``.
- *New in Pygments 0.11.*
+
+ .. versionadded:: 0.11
"""
name = 'Raw tokens'
@@ -79,7 +80,7 @@ class RawTokenFormatter(Formatter):
def format(self, tokensource, outfile):
try:
- outfile.write(b(''))
+ outfile.write(b'')
except TypeError:
raise TypeError('The raw tokens formatter needs a binary '
'output file')
diff --git a/pygments/formatters/rtf.py b/pygments/formatters/rtf.py
index 45cb2d88..9d87e8f1 100644
--- a/pygments/formatters/rtf.py
+++ b/pygments/formatters/rtf.py
@@ -10,6 +10,7 @@
"""
from pygments.formatter import Formatter
+from pygments.util import get_int_opt
__all__ = ['RtfFormatter']
@@ -21,7 +22,7 @@ class RtfFormatter(Formatter):
documents with color information and other useful stuff. Perfect for Copy and
Paste into Microsoft® Word® documents.
- *New in Pygments 0.6.*
+ .. versionadded:: 0.6
Additional options accepted:
@@ -32,6 +33,12 @@ class RtfFormatter(Formatter):
`fontface`
The used font famliy, for example ``Bitstream Vera Sans``. Defaults to
some generic font which is supposed to have fixed width.
+
+ `fontsize`
+ Size of the font used. Size is specified in half points. The
+ default is 24 half-points, giving a size 12 font.
+
+ .. versionadded:: 2.0
"""
name = 'RTF'
aliases = ['rtf']
@@ -49,9 +56,11 @@ class RtfFormatter(Formatter):
specification claims that ``\fmodern`` are "Fixed-pitch serif
and sans serif fonts". Hope every RTF implementation thinks
the same about modern...
+
"""
Formatter.__init__(self, **options)
self.fontface = options.get('fontface') or ''
+ self.fontsize = get_int_opt(options, 'fontsize', 0)
def _escape(self, text):
return text.replace('\\', '\\\\') \
@@ -106,6 +115,8 @@ class RtfFormatter(Formatter):
))
offset += 1
outfile.write(r'}\f0')
+ if self.fontsize:
+ outfile.write(r'\fs%d' % (self.fontsize))
# highlight stream
for ttype, value in tokensource:
diff --git a/pygments/formatters/svg.py b/pygments/formatters/svg.py
index b67b54a2..07636943 100644
--- a/pygments/formatters/svg.py
+++ b/pygments/formatters/svg.py
@@ -35,7 +35,7 @@ class SvgFormatter(Formatter):
By default, this formatter outputs a full SVG document including doctype
declaration and the ``<svg>`` root element.
- *New in Pygments 0.9.*
+ .. versionadded:: 0.9
Additional options accepted:
diff --git a/pygments/formatters/terminal256.py b/pygments/formatters/terminal256.py
index b40bdd0d..60b698c9 100644
--- a/pygments/formatters/terminal256.py
+++ b/pygments/formatters/terminal256.py
@@ -76,7 +76,7 @@ class Terminal256Formatter(Formatter):
and converts them to nearest ANSI 256-color escape sequences. Bold and
underline attributes from the style are preserved (and displayed).
- *New in Pygments 0.9.*
+ .. versionadded:: 0.9
Options accepted:
diff --git a/pygments/lexer.py b/pygments/lexer.py
index 2f191619..567e85f8 100644
--- a/pygments/lexer.py
+++ b/pygments/lexer.py
@@ -14,18 +14,18 @@ from pygments.filter import apply_filters, Filter
from pygments.filters import get_filter_by_name
from pygments.token import Error, Text, Other, _TokenType
from pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
- make_analysator
+ make_analysator, text_type, add_metaclass, iteritems
__all__ = ['Lexer', 'RegexLexer', 'ExtendedRegexLexer', 'DelegatingLexer',
'LexerContext', 'include', 'inherit', 'bygroups', 'using', 'this']
-_encoding_map = [('\xef\xbb\xbf', 'utf-8'),
- ('\xff\xfe\0\0', 'utf-32'),
- ('\0\0\xfe\xff', 'utf-32be'),
- ('\xff\xfe', 'utf-16'),
- ('\xfe\xff', 'utf-16be')]
+_encoding_map = [(b'\xef\xbb\xbf', 'utf-8'),
+ (b'\xff\xfe\0\0', 'utf-32'),
+ (b'\0\0\xfe\xff', 'utf-32be'),
+ (b'\xff\xfe', 'utf-16'),
+ (b'\xfe\xff', 'utf-16be')]
_default_analyse = staticmethod(lambda x: 0.0)
@@ -42,6 +42,7 @@ class LexerMeta(type):
return type.__new__(cls, name, bases, d)
+@add_metaclass(LexerMeta)
class Lexer(object):
"""
Lexer for a specific language.
@@ -55,7 +56,9 @@ class Lexer(object):
``ensurenl``
Make sure that the input ends with a newline (default: True). This
is required for some lexers that consume input linewise.
- *New in Pygments 1.3.*
+
+ .. versionadded:: 1.3
+
``tabsize``
If given and greater than 0, expand tabs in the input (default: 0).
``encoding``
@@ -84,8 +87,6 @@ class Lexer(object):
#: Priority, should multiple lexers match and no content is provided
priority = 0
- __metaclass__ = LexerMeta
-
def __init__(self, **options):
self.options = options
self.stripnl = get_bool_opt(options, 'stripnl', True)
@@ -136,7 +137,7 @@ class Lexer(object):
Also preprocess the text, i.e. expand tabs and strip it if
wanted and applies registered filters.
"""
- if not isinstance(text, unicode):
+ if not isinstance(text, text_type):
if self.encoding == 'guess':
try:
text = text.decode('utf-8')
@@ -155,14 +156,13 @@ class Lexer(object):
decoded = None
for bom, encoding in _encoding_map:
if text.startswith(bom):
- decoded = unicode(text[len(bom):], encoding,
- errors='replace')
+ decoded = text[len(bom):].decode(encoding, 'replace')
break
# no BOM found, so use chardet
if decoded is None:
enc = chardet.detect(text[:1024]) # Guess using first 1KB
- decoded = unicode(text, enc.get('encoding') or 'utf-8',
- errors='replace')
+ decoded = text.decode(enc.get('encoding') or 'utf-8',
+ 'replace')
text = decoded
else:
text = text.decode(self.encoding)
@@ -457,7 +457,7 @@ class RegexLexerMeta(LexerMeta):
try:
rex = cls._process_regex(tdef[0], rflags)
- except Exception, err:
+ except Exception as err:
raise ValueError("uncompilable regex %r in state %r of %r: %s" %
(tdef[0], state, cls, err))
@@ -476,7 +476,7 @@ class RegexLexerMeta(LexerMeta):
"""Preprocess a dictionary of token definitions."""
processed = cls._all_tokens[name] = {}
tokendefs = tokendefs or cls.tokens[name]
- for state in tokendefs.keys():
+ for state in list(tokendefs):
cls._process_state(tokendefs, processed, state)
return processed
@@ -497,7 +497,7 @@ class RegexLexerMeta(LexerMeta):
for c in itertools.chain((cls,), cls.__mro__):
toks = c.__dict__.get('tokens', {})
- for state, items in toks.iteritems():
+ for state, items in iteritems(toks):
curitems = tokens.get(state)
if curitems is None:
tokens[state] = items
@@ -537,13 +537,13 @@ class RegexLexerMeta(LexerMeta):
return type.__call__(cls, *args, **kwds)
+@add_metaclass(RegexLexerMeta)
class RegexLexer(Lexer):
"""
Base for simple stateful regular expression-based lexers.
Simplifies the lexing process so that you need only
provide a list of states and regular expressions.
"""
- __metaclass__ = RegexLexerMeta
#: Flags for compiling the regular expressions.
#: Defaults to MULTILINE.
@@ -722,7 +722,7 @@ def do_insertions(insertions, tokens):
"""
insertions = iter(insertions)
try:
- index, itokens = insertions.next()
+ index, itokens = next(insertions)
except StopIteration:
# no insertions
for item in tokens:
@@ -748,7 +748,7 @@ def do_insertions(insertions, tokens):
realpos += len(it_value)
oldi = index - i
try:
- index, itokens = insertions.next()
+ index, itokens = next(insertions)
except StopIteration:
insleft = False
break # not strictly necessary
@@ -763,7 +763,7 @@ def do_insertions(insertions, tokens):
yield realpos, t, v
realpos += len(v)
try:
- index, itokens = insertions.next()
+ index, itokens = next(insertions)
except StopIteration:
insleft = False
break # not strictly necessary
diff --git a/pygments/lexers/__init__.py b/pygments/lexers/__init__.py
index 6334b54f..caedd479 100644
--- a/pygments/lexers/__init__.py
+++ b/pygments/lexers/__init__.py
@@ -18,11 +18,11 @@ from os.path import basename
from pygments.lexers._mapping import LEXERS
from pygments.modeline import get_filetype_from_buffer
from pygments.plugin import find_plugin_lexers
-from pygments.util import ClassNotFound, bytes
+from pygments.util import ClassNotFound, itervalues
__all__ = ['get_lexer_by_name', 'get_lexer_for_filename', 'find_lexer_class',
- 'guess_lexer'] + LEXERS.keys()
+ 'guess_lexer'] + list(LEXERS)
_lexer_cache = {}
_pattern_cache = {}
@@ -55,7 +55,7 @@ def get_all_lexers():
Return a generator of tuples in the form ``(name, aliases,
filenames, mimetypes)`` of all know lexers.
"""
- for item in LEXERS.itervalues():
+ for item in itervalues(LEXERS):
yield item[1:]
for lexer in find_plugin_lexers():
yield lexer.name, lexer.aliases, lexer.filenames, lexer.mimetypes
@@ -68,7 +68,7 @@ def find_lexer_class(name):
if name in _lexer_cache:
return _lexer_cache[name]
# lookup builtin lexers
- for module_name, lname, aliases, _, _ in LEXERS.itervalues():
+ for module_name, lname, aliases, _, _ in itervalues(LEXERS):
if name == lname:
_load_lexers(module_name)
return _lexer_cache[name]
@@ -82,8 +82,11 @@ def get_lexer_by_name(_alias, **options):
"""
Get a lexer by an alias.
"""
+ if not _alias:
+ raise ClassNotFound('no lexer for alias %r found' % _alias)
+
# lookup builtin lexers
- for module_name, name, aliases, _, _ in LEXERS.itervalues():
+ for module_name, name, aliases, _, _ in itervalues(LEXERS):
if _alias.lower() in aliases:
if name not in _lexer_cache:
_load_lexers(module_name)
@@ -98,12 +101,12 @@ def get_lexer_by_name(_alias, **options):
def get_lexer_for_filename(_fn, code=None, **options):
"""
Get a lexer for a filename. If multiple lexers match the filename
- pattern, use ``analyze_text()`` to figure out which one is more
+ pattern, use ``analyse_text()`` to figure out which one is more
appropriate.
"""
matches = []
fn = basename(_fn)
- for modname, name, _, filenames, _ in LEXERS.itervalues():
+ for modname, name, _, filenames, _ in itervalues(LEXERS):
for filename in filenames:
if _fn_matches(fn, filename):
if name not in _lexer_cache:
@@ -141,7 +144,7 @@ def get_lexer_for_mimetype(_mime, **options):
"""
Get a lexer for a mimetype.
"""
- for modname, name, _, _, mimetypes in LEXERS.itervalues():
+ for modname, name, _, _, mimetypes in itervalues(LEXERS):
if _mime in mimetypes:
if name not in _lexer_cache:
_load_lexers(modname)
diff --git a/pygments/lexers/_cocoabuiltins.py b/pygments/lexers/_cocoabuiltins.py
index 312e28d0..1bfa0cdf 100644
--- a/pygments/lexers/_cocoabuiltins.py
+++ b/pygments/lexers/_cocoabuiltins.py
@@ -12,6 +12,8 @@
:license: BSD, see LICENSE for details.
"""
+from __future__ import print_function
+
COCOA_INTERFACES = set(['UITableViewCell', 'NSURLSessionDataTask', 'NSLinguisticTagger', 'NSStream', 'UIPrintInfo', 'SKPaymentTransaction', 'SKPhysicsWorld', 'NSString', 'CMAttitude', 'SKSpriteNode', 'JSContext', 'UICollectionReusableView', 'AVMutableCompositionTrack', 'GKLeaderboard', 'NSFetchedResultsController', 'MKTileOverlayRenderer', 'MIDINetworkSession', 'UITextSelectionRect', 'MKRoute', 'MPVolumeView', 'UIKeyCommand', 'AVMutableAudioMix', 'GLKEffectPropertyLight', 'UICollectionViewLayout', 'NSMutableCharacterSet', 'UIAccessibilityElement', 'NSShadow', 'NSAtomicStoreCacheNode', 'UIPushBehavior', 'CBCharacteristic', 'CBUUID', 'CMStepCounter', 'NSNetService', 'UICollectionView', 'UIViewPrintFormatter', 'CAShapeLayer', 'MCPeerID', 'NSFileVersion', 'CMGyroData', 'SKPhysicsJointSpring', 'CIFilter', 'UIView', 'MKMapItem', 'PKPass', 'MKPolygonRenderer', 'JSValue', 'CLGeocoder', 'NSByteCountFormatter', 'AVCaptureScreenInput', 'CAAnimation', 'MKOverlayPathView', 'UIActionSheet', 'UIMotionEffectGroup', 'UIBarItem', 'SKProduct', 'AVAssetExportSession', 'NSKeyedUnarchiver', 'NSMutableSet', 'MKMapView', 'CATransition', 'CLCircularRegion', 'MKTileOverlay', 'UICollisionBehavior', 'ACAccountCredential', 'SKPhysicsJointLimit', 'AVMediaSelectionGroup', 'NSIndexSet', 'AVAudioRecorder', 'NSURL', 'CBCentral', 'NSNumber', 'UITableView', 'AVCaptureStillImageOutput', 'GCController', 'NSAssertionHandler', 'AVAudioSessionPortDescription', 'NSHTTPURLResponse', 'NSPropertyListSerialization', 'AVPlayerItemAccessLogEvent', 'UISwipeGestureRecognizer', 'MKOverlayRenderer', 'NSDecimalNumber', 'EKReminder', 'MKPolylineView', 'AVCaptureMovieFileOutput', 'UIImagePickerController', 'GKAchievementDescription', 'EKParticipant', 'NSBlockOperation', 'UIActivityItemProvider', 'CLLocation', 'GKLeaderboardViewController', 'MPMoviePlayerController', 'GKScore', 'NSURLConnection', 'ABUnknownPersonViewController', 'UIMenuController', 'NSEvent', 'SKTextureAtlas', 'NSKeyedArchiver', 'GKLeaderboardSet', 'NSSimpleCString', 'CBATTRequest', 'GKMatchRequest', 'AVMetadataObject', 'UIAlertView', 'NSIncrementalStore', 'MFMailComposeViewController', 'SSReadingList', 'MPMovieAccessLog', 'NSManagedObjectContext', 'AVCaptureAudioDataOutput', 'ACAccount', 'AVMetadataItem', 'AVCaptureDeviceInputSource', 'CLLocationManager', 'UIStepper', 'UIRefreshControl', 'GKTurnBasedParticipant', 'UICollectionViewTransitionLayout', 'CBCentralManager', 'NSPurgeableData', 'SLComposeViewController', 'NSHashTable', 'MKUserTrackingBarButtonItem', 'UITabBarController', 'CMMotionActivity', 'SKAction', 'AVPlayerItemOutput', 'UIDocumentInteractionController', 'UIDynamicItemBehavior', 'NSMutableDictionary', 'UILabel', 'AVCaptureInputPort', 'NSExpression', 'SKMutablePayment', 'UIStoryboardSegue', 'NSOrderedSet', 'UIPopoverBackgroundView', 'UIToolbar', 'NSNotificationCenter', 'NSEntityMigrationPolicy', 'NSLocale', 'NSURLSession', 'NSTimeZone', 'UIManagedDocument', 'AVMutableVideoCompositionLayerInstruction', 'AVAssetTrackGroup', 'NSInvocationOperation', 'ALAssetRepresentation', 'AVQueuePlayer', 'UIPasteboard', 'NSLayoutManager', 'EKCalendarChooser', 'EKObject', 'CATiledLayer', 'GLKReflectionMapEffect', 'NSManagedObjectID', 'NSUserDefaults', 'SLRequest', 'AVPlayerLayer', 'NSPointerArray', 'AVAudioMix', 'MCAdvertiserAssistant', 'MKMapSnapshotOptions', 'GKMatch', 'AVTimedMetadataGroup', 'CBMutableCharacteristic', 'NSFetchRequest', 'UIDevice', 'NSManagedObject', 'NKAssetDownload', 'AVOutputSettingsAssistant', 'SKPhysicsJointPin', 'UITabBar', 'UITextInputMode', 'NSFetchRequestExpression', 'NSPipe', 'AVComposition', 'ADBannerView', 'AVPlayerItem', 'AVSynchronizedLayer', 'MKDirectionsRequest', 'NSMetadataItem', 'UINavigationItem', 'CBPeripheralManager', 'UIStoryboardPopoverSegue', 'SKProductsRequest', 'UIGravityBehavior', 'UIWindow', 'CBMutableDescriptor', 'UIBezierPath', 'UINavigationController', 'ABPeoplePickerNavigationController', 'EKSource', 'AVAssetWriterInput', 'AVPlayerItemTrack', 'GLKEffectPropertyTexture', 'NSURLResponse', 'SKPaymentQueue', 'MKReverseGeocoder', 'GCControllerAxisInput', 'MKMapSnapshotter', 'NSOrthography', 'NSURLSessionUploadTask', 'NSCharacterSet', 'AVAssetReaderOutput', 'EAGLContext', 'UICollectionViewController', 'AVAssetTrack', 'SKEmitterNode', 'AVCaptureDeviceInput', 'AVVideoCompositionCoreAnimationTool', 'NSURLRequest', 'CMAccelerometerData', 'NSNetServiceBrowser', 'AVAsynchronousVideoCompositionRequest', 'CAGradientLayer', 'NSFormatter', 'CATransaction', 'MPMovieAccessLogEvent', 'UIStoryboard', 'MPMediaLibrary', 'UITapGestureRecognizer', 'MPMediaItemArtwork', 'NSURLSessionTask', 'MCBrowserViewController', 'NSRelationshipDescription', 'NSMutableAttributedString', 'MPNowPlayingInfoCenter', 'MKLocalSearch', 'EAAccessory', 'MKETAResponse', 'CATextLayer', 'NSNotificationQueue', 'NSValue', 'NSMutableIndexSet', 'SKPhysicsContact', 'NSProgress', 'CAScrollLayer', 'NSTextCheckingResult', 'NSEntityDescription', 'NSURLCredentialStorage', 'UIApplication', 'SKDownload', 'MKLocalSearchRequest', 'SKScene', 'UISearchDisplayController', 'CAReplicatorLayer', 'UIPrintPageRenderer', 'EKCalendarItem', 'NSUUID', 'EAAccessoryManager', 'AVAssetResourceLoader', 'AVMutableVideoCompositionInstruction', 'MyClass', 'CTCall', 'CIVector', 'UINavigationBar', 'UIPanGestureRecognizer', 'MPMediaQuery', 'ABNewPersonViewController', 'ACAccountType', 'GKSession', 'SKVideoNode', 'GCExtendedGamepadSnapshot', 'GCExtendedGamepad', 'CAValueFunction', 'UIActivityIndicatorView', 'NSNotification', 'SKReceiptRefreshRequest', 'AVCaptureDeviceFormat', 'AVPlayerItemErrorLog', 'NSMapTable', 'NSSet', 'CMMotionManager', 'GKVoiceChatService', 'UIPageControl', 'MKGeodesicPolyline', 'AVMutableComposition', 'NSLayoutConstraint', 'UIWebView', 'NSIncrementalStoreNode', 'EKEventStore', 'UISlider', 'AVAssetResourceLoadingRequest', 'AVCaptureInput', 'SKPhysicsBody', 'NSOperation', 'MKMapCamera', 'SKProductsResponse', 'GLKEffectPropertyMaterial', 'AVCaptureDevice', 'CTCallCenter', 'CBMutableService', 'SKTransition', 'UIDynamicAnimator', 'NSMutableArray', 'MCNearbyServiceBrowser', 'NSOperationQueue', 'MKPolylineRenderer', 'UICollectionViewLayoutAttributes', 'NSValueTransformer', 'UICollectionViewFlowLayout', 'NSEntityMapping', 'SKTexture', 'NSMergePolicy', 'UITextInputStringTokenizer', 'NSRecursiveLock', 'AVAsset', 'NSUndoManager', 'MPMediaPickerController', 'NSFileCoordinator', 'NSFileHandle', 'NSConditionLock', 'UISegmentedControl', 'NSManagedObjectModel', 'UITabBarItem', 'MPMediaItem', 'EKRecurrenceRule', 'UIEvent', 'UITouch', 'UIPrintInteractionController', 'CMDeviceMotion', 'NSCompoundPredicate', 'MKMultiPoint', 'UIPrintFormatter', 'SKView', 'NSConstantString', 'UIPopoverController', 'AVMetadataFaceObject', 'EKEventViewController', 'NSPort', 'MKCircleRenderer', 'AVCompositionTrack', 'UINib', 'NSUbiquitousKeyValueStore', 'NSMetadataQueryResultGroup', 'AVAssetResourceLoadingDataRequest', 'UITableViewHeaderFooterView', 'UISplitViewController', 'AVAudioSession', 'CAEmitterLayer', 'NSNull', 'MKCircleView', 'UIColor', 'UIAttachmentBehavior', 'CLBeacon', 'NSInputStream', 'NSURLCache', 'GKPlayer', 'NSMappingModel', 'NSHTTPCookie', 'AVMutableVideoComposition', 'NSAttributeDescription', 'AVPlayer', 'MKAnnotationView', 'UIFontDescriptor', 'NSTimer', 'CBDescriptor', 'MKOverlayView', 'EKEventEditViewController', 'NSSaveChangesRequest', 'UIReferenceLibraryViewController', 'SKPhysicsJointFixed', 'UILocalizedIndexedCollation', 'UIInterpolatingMotionEffect', 'AVAssetWriter', 'NSBundle', 'SKStoreProductViewController', 'GLKViewController', 'NSMetadataQueryAttributeValueTuple', 'GKTurnBasedMatch', 'UIActivity', 'MKShape', 'NSMergeConflict', 'CIImage', 'UIRotationGestureRecognizer', 'AVPlayerItemLegibleOutput', 'AVAssetImageGenerator', 'GCControllerButtonInput', 'NSSortDescriptor', 'MPTimedMetadata', 'NKIssue', 'UIScreenMode', 'GKTurnBasedEventHandler', 'MKPolyline', 'JSVirtualMachine', 'AVAssetReader', 'NSAttributedString', 'GKMatchmakerViewController', 'NSCountedSet', 'UIButton', 'GKLocalPlayer', 'MPMovieErrorLog', 'AVSpeechUtterance', 'AVURLAsset', 'CBPeripheral', 'AVAssetWriterInputGroup', 'AVAssetReaderAudioMixOutput', 'NSEnumerator', 'UIDocument', 'MKLocalSearchResponse', 'UISimpleTextPrintFormatter', 'CBService', 'MCSession', 'QLPreviewController', 'CAMediaTimingFunction', 'UITextPosition', 'NSNumberFormatter', 'UIPinchGestureRecognizer', 'UIMarkupTextPrintFormatter', 'MKRouteStep', 'NSMetadataQuery', 'AVAssetResourceLoadingContentInformationRequest', 'CTSubscriber', 'CTCarrier', 'NSFileSecurity', 'UIAcceleration', 'UIMotionEffect', 'CLHeading', 'NSFileWrapper', 'MKDirectionsResponse', 'UILocalNotification', 'UICollectionViewCell', 'UITextView', 'CMMagnetometerData', 'UIProgressView', 'GKInvite', 'UISearchBar', 'MKPlacemark', 'AVCaptureConnection', 'ALAssetsFilter', 'AVPlayerItemErrorLogEvent', 'NSJSONSerialization', 'AVAssetReaderVideoCompositionOutput', 'ABPersonViewController', 'CIDetector', 'GKTurnBasedMatchmakerViewController', 'MPMediaItemCollection', 'NSCondition', 'NSURLCredential', 'MIDINetworkConnection', 'NSDecimalNumberHandler', 'NSURLSessionConfiguration', 'EKCalendar', 'NSDictionary', 'CAPropertyAnimation', 'UIPercentDrivenInteractiveTransition', 'MKPolygon', 'AVAssetTrackSegment', 'NSExpressionDescription', 'UIViewController', 'NSURLAuthenticationChallenge', 'NSDirectoryEnumerator', 'MKDistanceFormatter', 'GCControllerElement', 'GKPeerPickerController', 'UITableViewController', 'GKNotificationBanner', 'MKPointAnnotation', 'NSCache', 'SKPhysicsJoint', 'NSXMLParser', 'MFMessageComposeViewController', 'AVCaptureSession', 'NSDataDetector', 'AVCaptureVideoPreviewLayer', 'NSURLComponents', 'UISnapBehavior', 'AVMetadataMachineReadableCodeObject', 'GLKTextureLoader', 'NSTextAttachment', 'NSException', 'UIMenuItem', 'CMMotionActivityManager', 'MKUserLocation', 'CIFeature', 'NSMachPort', 'ALAsset', 'NSURLSessionDownloadTask', 'MPMoviePlayerViewController', 'NSMutableOrderedSet', 'AVCaptureVideoDataOutput', 'NSCachedURLResponse', 'ALAssetsLibrary', 'NSInvocation', 'UILongPressGestureRecognizer', 'NSTextStorage', 'CIFaceFeature', 'MKMapSnapshot', 'GLKEffectPropertyFog', 'NSPersistentStoreRequest', 'AVAudioMixInputParameters', 'CAEmitterBehavior', 'PKPassLibrary', 'NSLock', 'UIDynamicBehavior', 'AVPlayerMediaSelectionCriteria', 'CALayer', 'UIBarButtonItem', 'AVAudioSessionRouteDescription', 'CLBeaconRegion', 'SKEffectNode', 'CABasicAnimation', 'AVVideoCompositionInstruction', 'AVMutableTimedMetadataGroup', 'EKRecurrenceEnd', 'NSTextContainer', 'TWTweetComposeViewController', 'UIScrollView', 'EKRecurrenceDayOfWeek', 'ASIdentifierManager', 'UIScreen', 'CLRegion', 'NSProcessInfo', 'GLKTextureInfo', 'AVCaptureMetadataOutput', 'NSTextTab', 'JSManagedValue', 'NSDate', 'UITextChecker', 'NSData', 'NSParagraphStyle', 'AVMutableMetadataItem', 'EKAlarm', 'NSMutableURLRequest', 'UIVideoEditorController', 'NSAtomicStore', 'UIResponder', 'AVCompositionTrackSegment', 'GCGamepadSnapshot', 'MPMediaEntity', 'GLKSkyboxEffect', 'UISwitch', 'EKStructuredLocation', 'UIGestureRecognizer', 'NSProxy', 'GLKBaseEffect', 'GKScoreChallenge', 'NSCoder', 'MPMediaPlaylist', 'NSDateComponents', 'EKEvent', 'NSDateFormatter', 'AVAssetWriterInputPixelBufferAdaptor', 'UICollectionViewFlowLayoutInvalidationContext', 'UITextField', 'CLPlacemark', 'AVCaptureOutput', 'NSPropertyDescription', 'GCGamepad', 'NSPersistentStoreCoordinator', 'GKMatchmaker', 'CIContext', 'NSThread', 'SKRequest', 'SKPhysicsJointSliding', 'NSPredicate', 'GKVoiceChat', 'SKCropNode', 'AVCaptureAudioPreviewOutput', 'NSStringDrawingContext', 'GKGameCenterViewController', 'UIPrintPaper', 'UICollectionViewLayoutInvalidationContext', 'GLKEffectPropertyTransform', 'UIDatePicker', 'MKDirections', 'ALAssetsGroup', 'CAEmitterCell', 'UIFont', 'MKPinAnnotationView', 'UIPickerView', 'UIImageView', 'SKNode', 'MPMediaQuerySection', 'GKFriendRequestComposeViewController', 'NSError', 'CTSubscriberInfo', 'AVPlayerItemAccessLog', 'MPMediaPropertyPredicate', 'CMLogItem', 'NSAutoreleasePool', 'NSSocketPort', 'AVAssetReaderTrackOutput', 'AVSpeechSynthesisVoice', 'UIImage', 'AVCaptureAudioChannel', 'GKTurnBasedExchangeReply', 'AVVideoCompositionLayerInstruction', 'AVSpeechSynthesizer', 'GKChallengeEventHandler', 'AVCaptureFileOutput', 'UIControl', 'SKPayment', 'ADInterstitialAd', 'AVAudioSessionDataSourceDescription', 'NSArray', 'GCControllerDirectionPad', 'NSFileManager', 'AVMutableAudioMixInputParameters', 'UIScreenEdgePanGestureRecognizer', 'CAKeyframeAnimation', 'EASession', 'UIInputView', 'NSHTTPCookieStorage', 'NSPointerFunctions', 'AVMediaSelectionOption', 'NSRunLoop', 'CAAnimationGroup', 'MKCircle', 'NSMigrationManager', 'UICollectionViewUpdateItem', 'NSMutableData', 'NSMutableParagraphStyle', 'GLKEffectProperty', 'SKShapeNode', 'MPMovieErrorLogEvent', 'MKPolygonView', 'UIAccelerometer', 'NSScanner', 'GKAchievementChallenge', 'AVAudioPlayer', 'AVVideoComposition', 'NKLibrary', 'NSPersistentStore', 'NSPropertyMapping', 'GKChallenge', 'NSURLProtectionSpace', 'ACAccountStore', 'UITextRange', 'NSComparisonPredicate', 'NSOutputStream', 'PKAddPassesViewController', 'CTTelephonyNetworkInfo', 'AVTextStyleRule', 'NSFetchedPropertyDescription', 'UIPageViewController', 'CATransformLayer', 'MCNearbyServiceAdvertiser', 'NSObject', 'MPMusicPlayerController', 'MKOverlayPathRenderer', 'GKAchievement', 'AVCaptureAudioFileOutput', 'TWRequest', 'SKLabelNode', 'MIDINetworkHost', 'MPMediaPredicate', 'AVFrameRateRange', 'NSIndexPath', 'AVVideoCompositionRenderContext', 'CADisplayLink', 'CAEAGLLayer', 'NSMutableString', 'NSMessagePort', 'AVAudioSessionChannelDescription', 'GLKView', 'UIActivityViewController', 'GKAchievementViewController', 'NSURLProtocol', 'NSCalendar', 'SKKeyframeSequence', 'AVMetadataItemFilter', 'NSMethodSignature', 'NSRegularExpression', 'EAGLSharegroup', 'AVPlayerItemVideoOutput', 'CIColor', 'UIDictationPhrase'])
COCOA_PROTOCOLS = set(['SKStoreProductViewControllerDelegate', 'AVVideoCompositionInstruction', 'AVAudioSessionDelegate', 'GKMatchDelegate', 'NSFileManagerDelegate', 'UILayoutSupport', 'NSCopying', 'UIPrintInteractionControllerDelegate', 'QLPreviewControllerDataSource', 'SKProductsRequestDelegate', 'NSTextStorageDelegate', 'MCBrowserViewControllerDelegate', 'UIViewControllerTransitionCoordinatorContext', 'NSTextAttachmentContainer', 'NSDecimalNumberBehaviors', 'NSMutableCopying', 'UIViewControllerTransitioningDelegate', 'UIAlertViewDelegate', 'AVAudioPlayerDelegate', 'MKReverseGeocoderDelegate', 'NSCoding', 'UITextInputTokenizer', 'GKFriendRequestComposeViewControllerDelegate', 'UIActivityItemSource', 'NSCacheDelegate', 'UITableViewDelegate', 'GKAchievementViewControllerDelegate', 'EKEventEditViewDelegate', 'NSURLConnectionDelegate', 'GKPeerPickerControllerDelegate', 'UIGuidedAccessRestrictionDelegate', 'AVSpeechSynthesizerDelegate', 'MFMailComposeViewControllerDelegate', 'AVPlayerItemLegibleOutputPushDelegate', 'ADInterstitialAdDelegate', 'AVAssetResourceLoaderDelegate', 'UITabBarControllerDelegate', 'SKPaymentTransactionObserver', 'AVCaptureAudioDataOutputSampleBufferDelegate', 'UIInputViewAudioFeedback', 'GKChallengeListener', 'UIPickerViewDelegate', 'UIWebViewDelegate', 'UIApplicationDelegate', 'GKInviteEventListener', 'MPMediaPlayback', 'MyClassJavaScriptMethods', 'AVAsynchronousKeyValueLoading', 'QLPreviewItem', 'NSPortDelegate', 'SKRequestDelegate', 'SKPhysicsContactDelegate', 'UIPageViewControllerDataSource', 'AVPlayerItemOutputPushDelegate', 'UICollectionViewDelegate', 'UIImagePickerControllerDelegate', 'UIToolbarDelegate', 'UIViewControllerTransitionCoordinator', 'NSURLConnectionDataDelegate', 'MKOverlay', 'CBCentralManagerDelegate', 'JSExport', 'NSTextLayoutOrientationProvider', 'UIPickerViewDataSource', 'UITextInputTraits', 'NSLayoutManagerDelegate', 'NSFetchedResultsControllerDelegate', 'ABPeoplePickerNavigationControllerDelegate', 'NSDiscardableContent', 'UITextFieldDelegate', 'GKGameCenterControllerDelegate', 'MPMediaPickerControllerDelegate', 'UIAppearance', 'UIPickerViewAccessibilityDelegate', 'UIScrollViewAccessibilityDelegate', 'ADBannerViewDelegate', 'NSURLSessionDelegate', 'NSXMLParserDelegate', 'UIViewControllerRestoration', 'UISearchBarDelegate', 'UIBarPositioning', 'CBPeripheralDelegate', 'UISearchDisplayDelegate', 'CAAction', 'PKAddPassesViewControllerDelegate', 'MCNearbyServiceAdvertiserDelegate', 'GKTurnBasedMatchmakerViewControllerDelegate', 'UIActionSheetDelegate', 'AVCaptureVideoDataOutputSampleBufferDelegate', 'UIAppearanceContainer', 'UIStateRestoring', 'NSURLSessionTaskDelegate', 'NSFilePresenter', 'UIViewControllerContextTransitioning', 'UITextInput', 'CBPeripheralManagerDelegate', 'UITextInputDelegate', 'NSFastEnumeration', 'NSURLAuthenticationChallengeSender', 'AVVideoCompositing', 'NSSecureCoding', 'MCAdvertiserAssistantDelegate', 'GKLocalPlayerListener', 'GLKNamedEffect', 'UIPopoverControllerDelegate', 'AVCaptureMetadataOutputObjectsDelegate', 'MFMessageComposeViewControllerDelegate', 'UITextSelecting', 'NSURLProtocolClient', 'UIVideoEditorControllerDelegate', 'UITableViewDataSource', 'UIDynamicAnimatorDelegate', 'NSURLSessionDataDelegate', 'UICollisionBehaviorDelegate', 'NSStreamDelegate', 'MCNearbyServiceBrowserDelegate', 'UINavigationControllerDelegate', 'MCSessionDelegate', 'UIViewControllerInteractiveTransitioning', 'GKTurnBasedEventListener', 'GLKViewDelegate', 'EAAccessoryDelegate', 'NSKeyedUnarchiverDelegate', 'NSMachPortDelegate', 'UIBarPositioningDelegate', 'ABPersonViewControllerDelegate', 'NSNetServiceBrowserDelegate', 'EKEventViewDelegate', 'UIScrollViewDelegate', 'NSURLConnectionDownloadDelegate', 'UIGestureRecognizerDelegate', 'UINavigationBarDelegate', 'GKVoiceChatClient', 'NSFetchedResultsSectionInfo', 'UIDocumentInteractionControllerDelegate', 'QLPreviewControllerDelegate', 'UIAccessibilityReadingContent', 'ABUnknownPersonViewControllerDelegate', 'GLKViewControllerDelegate', 'UICollectionViewDelegateFlowLayout', 'UISplitViewControllerDelegate', 'MKAnnotation', 'UIAccessibilityIdentification', 'ABNewPersonViewControllerDelegate', 'CAMediaTiming', 'AVCaptureFileOutputRecordingDelegate', 'UITextViewDelegate', 'UITabBarDelegate', 'GKLeaderboardViewControllerDelegate', 'MKMapViewDelegate', 'UIKeyInput', 'UICollectionViewDataSource', 'NSLocking', 'AVCaptureFileOutputDelegate', 'GKChallengeEventHandlerDelegate', 'UIObjectRestoration', 'CIFilterConstructor', 'AVPlayerItemOutputPullDelegate', 'EAGLDrawable', 'AVVideoCompositionValidationHandling', 'UIViewControllerAnimatedTransitioning', 'NSURLSessionDownloadDelegate', 'UIAccelerometerDelegate', 'UIPageViewControllerDelegate', 'UIDataSourceModelAssociation', 'AVAudioRecorderDelegate', 'GKSessionDelegate', 'NSKeyedArchiverDelegate', 'UIDynamicItem', 'CLLocationManagerDelegate', 'NSMetadataQueryDelegate', 'NSNetServiceDelegate', 'GKMatchmakerViewControllerDelegate', 'EKCalendarChooserDelegate'])
COCOA_PRIMITIVES = set(['ROTAHeader', '__CFBundle', 'MortSubtable', 'AudioFilePacketTableInfo', 'CGPDFOperatorTable', 'KerxStateEntry', 'ExtendedTempoEvent', 'CTParagraphStyleSetting', 'OpaqueMIDIPort', 'CFStreamErrorHTTP', '__CFMachPort', '_GLKMatrix4', 'ExtendedControlEvent', 'CAFAudioDescription', 'KernVersion0Header', 'CGTextDrawingMode', 'EKErrorCode', 'gss_buffer_desc_struct', 'AudioUnitParameterInfo', '__SCPreferences', '__CTFrame', '__CTLine', 'CFStreamSocketSecurityProtocol', 'gss_krb5_lucid_context_v1', 'OpaqueJSValue', 'TrakTableEntry', 'AudioFramePacketTranslation', 'CGImageSource', 'OpaqueJSPropertyNameAccumulator', 'JustPCGlyphRepeatAddAction', 'BslnFormat0Part', 'OpaqueMIDIThruConnection', 'opaqueCMBufferQueue', 'OpaqueMusicSequence', 'MortRearrangementSubtable', 'MixerDistanceParams', 'MorxSubtable', 'MIDIObjectPropertyChangeNotification', '__CFDictionary', 'CGImageMetadataErrors', 'CGPath', 'OpaqueMIDIEndpoint', 'ALMXHeader', 'AudioComponentPlugInInterface', 'gss_ctx_id_t_desc_struct', 'sfntFontFeatureSetting', 'OpaqueJSContextGroup', '__SCNetworkConnection', 'AudioUnitParameterValueTranslation', 'CGImageMetadataType', 'CGPattern', 'AudioFileTypeAndFormatID', 'CGContext', 'AUNodeInteraction', 'SFNTLookupTable', 'JustPCDecompositionAction', 'KerxControlPointHeader', 'PKErrorCode', 'AudioStreamPacketDescription', 'KernSubtableHeader', '__CFNull', 'AUMIDIOutputCallbackStruct', 'MIDIMetaEvent', 'AudioQueueChannelAssignment', '__CFString', 'AnchorPoint', 'JustTable', '__CFNetService', 'gss_krb5_lucid_key', 'CGPDFDictionary', 'MIDIThruConnectionParams', 'CAF_UUID_ChunkHeader', 'gss_krb5_cfx_keydata', '_GLKMatrix3', 'CGGradient', 'OpaqueMIDISetup', '_GLKMatrix2', 'JustPostcompTable', '__CTParagraphStyle', 'AudioUnitParameterHistoryInfo', 'OpaqueJSContext', 'CGShading', '__CFBinaryHeap', 'SFNTLookupSingle', '__CFHost', '__SecRandom', '__CTFontDescriptor', '_NSRange', 'sfntDirectory', 'AudioQueueLevelMeterState', 'CAFPositionPeak', '__CFBoolean', 'PropLookupSegment', '__CVOpenGLESTextureCache', 'sfntInstance', '_GLKQuaternion', 'KernStateEntry', '__SCNetworkProtocol', 'CAFFileHeader', 'KerxOrderedListHeader', 'CGBlendMode', 'STXEntryOne', 'CAFRegion', 'SFNTLookupTrimmedArrayHeader', 'KerxControlPointEntry', '__CFCharacterSet', 'OpaqueMusicTrack', '_GLKVector4', 'gss_OID_set_desc_struct', 'OpaqueMusicPlayer', '_CFHTTPAuthentication', 'CGAffineTransform', 'CAFMarkerChunk', 'AUHostIdentifier', 'ROTAGlyphEntry', 'BslnTable', 'gss_krb5_lucid_context_version', '_GLKMatrixStack', 'CGImage', 'AnkrTable', 'SFNTLookupSingleHeader', 'MortLigatureSubtable', 'AudioFile_SMPTE_Time', 'CAFUMIDChunk', 'SMPTETime', 'CAFDataChunk', 'CGPDFStream', 'AudioFileRegionList', 'STEntryTwo', 'SFNTLookupBinarySearchHeader', 'OpbdTable', '__CTGlyphInfo', 'BslnFormat2Part', 'KerxIndexArrayHeader', 'TrakTable', 'KerxKerningPair', '__CFBitVector', 'KernVersion0SubtableHeader', 'OpaqueAudioComponentInstance', 'AudioChannelLayout', '__CFUUID', 'MIDISysexSendRequest', '__CFNumberFormatter', 'CGImageSourceStatus', '__CFURL', 'AudioFileMarkerList', 'AUSamplerBankPresetData', 'CGDataProvider', 'AudioFormatInfo', '__SecIdentity', 'sfntCMapExtendedSubHeader', 'MIDIChannelMessage', 'KernOffsetTable', 'CGColorSpaceModel', 'MFMailComposeErrorCode', 'CGFunction', '__SecTrust', 'CFHostInfoType', 'KernSimpleArrayHeader', 'CGFontPostScriptFormat', 'KernStateHeader', 'AudioUnitCocoaViewInfo', 'CGDataConsumer', 'OpaqueMIDIDevice', 'OpaqueCMBlockBuffer', 'AnchorPointTable', 'CGImageDestination', 'CAFInstrumentChunk', 'AudioUnitMeterClipping', '__CFNumber', 'MorxChain', '__CTFontCollection', 'STEntryOne', 'STXEntryTwo', 'ExtendedNoteOnEvent', '__CFArray', 'CGColorRenderingIntent', 'KerxSimpleArrayHeader', 'MorxTable', '_GLKVector3', '_GLKVector2', 'MortTable', 'CGPDFBox', 'AudioUnitParameterValueFromString', '__CFSocket', 'ALCdevice_struct', 'MIDINoteMessage', 'sfntFeatureHeader', 'CGRect', '__SCNetworkInterface', '__CFTree', 'MusicEventUserData', 'TrakTableData', 'MortContextualSubtable', '__CTRun', 'AudioUnitFrequencyResponseBin', 'MortChain', 'MorxInsertionSubtable', 'CGImageMetadata', 'gss_auth_identity', 'AudioUnitMIDIControlMapping', 'CAFChunkHeader', 'PropTable', 'CGPDFScanner', 'OpaqueMusicEventIterator', '__CFFileSecurity', 'AudioUnitNodeConnection', 'OpaqueMIDIDeviceList', 'ExtendedAudioFormatInfo', 'CGRectEdge', 'sfntFontDescriptor', '__CFRunLoopObserver', 'CGPatternTiling', 'MIDINotification', 'MorxLigatureSubtable', 'SFNTLookupSegment', 'MessageComposeResult', 'MIDIThruConnectionEndpoint', 'MusicDeviceStdNoteParams', 'opaqueCMSimpleQueue', 'ALCcontext_struct', 'OpaqueAudioQueue', 'PropLookupSingle', 'CGColor', 'AudioOutputUnitStartAtTimeParams', 'gss_name_t_desc_struct', 'CGFunctionCallbacks', 'CAFPacketTableHeader', 'AudioChannelDescription', 'sfntFeatureName', 'MorxContextualSubtable', 'CVSMPTETime', 'AudioValueRange', 'CGTextEncoding', 'AudioStreamBasicDescription', 'AUNodeRenderCallback', 'AudioPanningInfo', '__CFData', '__CFDate', 'KerxOrderedListEntry', '__CFAllocator', 'OpaqueJSPropertyNameArray', '__SCDynamicStore', 'OpaqueMIDIEntity', 'CFHostClientContext', 'CFNetServiceClientContext', 'AudioUnitPresetMAS_SettingData', 'opaqueCMBufferQueueTriggerToken', 'AudioUnitProperty', 'CAFRegionChunk', 'CGPDFString', '__CFWriteStream', '__CFAttributedString', '__CFStringTokenizer', 'JustWidthDeltaEntry', '__CFSet', 'sfntVariationAxis', '__CFNetDiagnostic', 'CAFOverviewSample', 'sfntCMapEncoding', 'CGVector', '__SCNetworkService', 'opaqueCMSampleBuffer', 'AUHostVersionIdentifier', 'AudioBalanceFade', 'sfntFontRunFeature', 'KerxCoordinateAction', 'sfntCMapSubHeader', 'CVPlanarPixelBufferInfo', 'AUNumVersion', '__CFTimeZone', 'AUSamplerInstrumentData', 'AUPreset', '__CTRunDelegate', 'OpaqueAudioQueueProcessingTap', 'KerxTableHeader', '_NSZone', 'OpaqueExtAudioFile', '__CFRunLoopSource', 'KerxAnchorPointAction', 'OpaqueJSString', 'AudioQueueParameterEvent', '__CFHTTPMessage', 'OpaqueCMClock', 'ScheduledAudioFileRegion', 'STEntryZero', 'gss_channel_bindings_struct', 'sfntVariationHeader', 'AUChannelInfo', 'UIOffset', 'GLKEffectPropertyPrv', 'KerxStateHeader', 'CGLineJoin', 'CGPDFDocument', '__CFBag', 'CFStreamErrorHTTPAuthentication', 'KernOrderedListHeader', '__SCNetworkSet', '__SecKey', 'MIDIObjectAddRemoveNotification', 'sfntDescriptorHeader', 'AudioUnitParameter', 'JustPCActionSubrecord', 'AudioComponentDescription', 'AudioUnitParameterValueName', 'AudioUnitParameterEvent', 'KerxControlPointAction', 'AudioTimeStamp', 'KernKerningPair', 'gss_buffer_set_desc_struct', 'MortFeatureEntry', 'FontVariation', 'CAFStringID', 'LcarCaretClassEntry', 'AudioUnitParameterStringFromValue', 'ACErrorCode', 'ALMXGlyphEntry', 'LtagTable', '__CTTypesetter', 'AuthorizationOpaqueRef', 'UIEdgeInsets', 'CGPathElement', 'CAFMarker', 'KernTableHeader', 'NoteParamsControlValue', 'SSLContext', 'gss_cred_id_t_desc_struct', 'AudioUnitParameterNameInfo', '__SecCertificate', 'CGDataConsumerCallbacks', 'CGInterpolationQuality', 'CGLineCap', 'MIDIControlTransform', 'BslnFormat1Part', 'CGPDFArray', '__SecPolicy', 'AudioConverterPrimeInfo', '__CTTextTab', '__CFNetServiceMonitor', 'AUInputSamplesInOutputCallbackStruct', '__CTFramesetter', 'CGPDFDataFormat', 'STHeader', 'CVPlanarPixelBufferInfo_YCbCrPlanar', 'MIDIValueMap', 'JustDirectionTable', '__SCBondStatus', 'SFNTLookupSegmentHeader', 'OpaqueCMMemoryPool', 'CGPathDrawingMode', 'CGFont', '__SCNetworkReachability', 'AudioClassDescription', 'CGPoint', 'CAFStrings', '__CFNetServiceBrowser', 'opaqueMTAudioProcessingTap', 'sfntNameRecord', 'CGPDFPage', 'CGLayer', 'ComponentInstanceRecord', 'CAFInfoStrings', 'HostCallbackInfo', 'MusicDeviceNoteParams', 'KernIndexArrayHeader', 'CVPlanarPixelBufferInfo_YCbCrBiPlanar', 'MusicTrackLoopInfo', 'opaqueCMFormatDescription', 'STClassTable', 'sfntDirectoryEntry', 'OpaqueCMTimebase', 'CGDataProviderDirectCallbacks', 'MIDIPacketList', 'CAFOverviewChunk', 'MIDIPacket', 'ScheduledAudioSlice', 'CGDataProviderSequentialCallbacks', 'AudioBuffer', 'MorxRearrangementSubtable', 'CGPatternCallbacks', 'AUDistanceAttenuationData', 'MIDIIOErrorNotification', 'CGPDFContentStream', 'IUnknownVTbl', 'MIDITransform', 'MortInsertionSubtable', 'CABarBeatTime', 'AudioBufferList', 'KerxSubtableHeader', '__CVBuffer', 'AURenderCallbackStruct', 'STXEntryZero', 'JustPCDuctilityAction', 'OpaqueAudioQueueTimeline', 'OpaqueMIDIClient', '__CFPlugInInstance', 'AudioQueueBuffer', '__CFFileDescriptor', 'AudioUnitConnection', '_GKTurnBasedExchangeStatus', 'LcarCaretTable', 'CVPlanarComponentInfo', 'JustWidthDeltaGroup', 'OpaqueAudioComponent', 'ParameterEvent', '__CVPixelBufferPool', '__CTFont', 'OpaqueJSClass', 'CGColorSpace', 'CGSize', 'AUDependentParameter', 'MIDIDriverInterface', 'gss_krb5_rfc1964_keydata', '__CFDateFormatter', 'LtagStringRange', 'CFNetServiceMonitorType', 'gss_iov_buffer_desc_struct', 'AUPresetEvent', 'CFNetServicesError', 'KernOrderedListEntry', '__CFLocale', 'gss_OID_desc_struct', 'AudioUnitPresetMAS_Settings', 'AudioFileMarker', 'JustPCConditionalAddAction', 'BslnFormat3Part', '__CFNotificationCenter', 'MortSwashSubtable', 'AUParameterMIDIMapping', 'OpaqueAudioConverter', 'MIDIRawData', 'CFNetDiagnosticStatusValues', 'sfntNameHeader', '__CFRunLoop', 'MFMailComposeResult', 'CATransform3D', 'OpbdSideValues', 'CAF_SMPTE_Time', 'JustPCAction', 'CGPathElementType', '__CFRunLoopTimer', '__CFError', 'AudioFormatListItem', '__CFReadStream', 'AudioUnitExternalBuffer', 'AudioFileRegion', 'AudioValueTranslation', 'CGImageMetadataTag', 'CAFPeakChunk', 'AudioBytePacketTranslation', 'CFNetworkErrors', 'sfntCMapHeader', '__CFURLEnumerator', '__CFCalendar', '__CFMessagePort', 'STXHeader', 'CGPDFObjectType', 'SFNTLookupArrayHeader'])
@@ -61,11 +63,11 @@ if __name__ == '__main__':
all_primitives.add(r)
- print "ALL interfaces: \n"
- print all_interfaces
+ print("ALL interfaces: \n")
+ print(all_interfaces)
- print "\nALL protocols: \n"
- print all_protocols
+ print("\nALL protocols: \n")
+ print(all_protocols)
- print "\nALL primitives: \n"
- print all_primitives
+ print("\nALL primitives: \n")
+ print(all_primitives)
diff --git a/pygments/lexers/_luabuiltins.py b/pygments/lexers/_luabuiltins.py
index 671dfeaa..40037357 100644
--- a/pygments/lexers/_luabuiltins.py
+++ b/pygments/lexers/_luabuiltins.py
@@ -13,6 +13,9 @@
:license: BSD, see LICENSE for details.
"""
+from __future__ import print_function
+
+
MODULES = {'basic': ['_G',
'_VERSION',
'assert',
@@ -142,7 +145,10 @@ MODULES = {'basic': ['_G',
if __name__ == '__main__':
import re
- import urllib
+ try:
+ from urllib import urlopen
+ except ImportError:
+ from urllib.request import urlopen
import pprint
# you can't generally find out what module a function belongs to if you
@@ -188,7 +194,7 @@ if __name__ == '__main__':
def get_newest_version():
- f = urllib.urlopen('http://www.lua.org/manual/')
+ f = urlopen('http://www.lua.org/manual/')
r = re.compile(r'^<A HREF="(\d\.\d)/">Lua \1</A>')
for line in f:
m = r.match(line)
@@ -196,7 +202,7 @@ if __name__ == '__main__':
return m.groups()[0]
def get_lua_functions(version):
- f = urllib.urlopen('http://www.lua.org/manual/%s/' % version)
+ f = urlopen('http://www.lua.org/manual/%s/' % version)
r = re.compile(r'^<A HREF="manual.html#pdf-(.+)">\1</A>')
functions = []
for line in f:
@@ -206,7 +212,7 @@ if __name__ == '__main__':
return functions
def get_function_module(name):
- for mod, cb in module_callbacks().iteritems():
+ for mod, cb in module_callbacks().items():
if cb(name):
return mod
if '.' in name:
@@ -233,13 +239,13 @@ if __name__ == '__main__':
def run():
version = get_newest_version()
- print '> Downloading function index for Lua %s' % version
+ print('> Downloading function index for Lua %s' % version)
functions = get_lua_functions(version)
- print '> %d functions found:' % len(functions)
+ print('> %d functions found:' % len(functions))
modules = {}
for full_function_name in functions:
- print '>> %s' % full_function_name
+ print('>> %s' % full_function_name)
m = get_function_module(full_function_name)
modules.setdefault(m, []).append(full_function_name)
diff --git a/pygments/lexers/_mapping.py b/pygments/lexers/_mapping.py
index e9380914..1a190d1c 100644
--- a/pygments/lexers/_mapping.py
+++ b/pygments/lexers/_mapping.py
@@ -13,8 +13,11 @@
:license: BSD, see LICENSE for details.
"""
+from __future__ import print_function
+
LEXERS = {
'ABAPLexer': ('pygments.lexers.other', 'ABAP', ('abap',), ('*.abap',), ('text/x-abap',)),
+ 'APLLexer': ('pygments.lexers.other', 'APL', ('apl',), ('*.apl',), ()),
'ActionScript3Lexer': ('pygments.lexers.web', 'ActionScript 3', ('as3', 'actionscript3'), ('*.as',), ('application/x-actionscript', 'text/x-actionscript', 'text/actionscript')),
'ActionScriptLexer': ('pygments.lexers.web', 'ActionScript', ('as', 'actionscript'), ('*.as',), ('application/x-actionscript3', 'text/x-actionscript3', 'text/actionscript3')),
'AdaLexer': ('pygments.lexers.compiled', 'Ada', ('ada', 'ada95ada2005'), ('*.adb', '*.ads', '*.ada'), ('text/x-ada',)),
@@ -61,14 +64,17 @@ LEXERS = {
'CheetahJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Cheetah', ('js+cheetah', 'javascript+cheetah', 'js+spitfire', 'javascript+spitfire'), (), ('application/x-javascript+cheetah', 'text/x-javascript+cheetah', 'text/javascript+cheetah', 'application/x-javascript+spitfire', 'text/x-javascript+spitfire', 'text/javascript+spitfire')),
'CheetahLexer': ('pygments.lexers.templates', 'Cheetah', ('cheetah', 'spitfire'), ('*.tmpl', '*.spt'), ('application/x-cheetah', 'application/x-spitfire')),
'CheetahXmlLexer': ('pygments.lexers.templates', 'XML+Cheetah', ('xml+cheetah', 'xml+spitfire'), (), ('application/xml+cheetah', 'application/xml+spitfire')),
+ 'CirruLexer': ('pygments.lexers.web', 'Cirru', ('cirru',), ('*.cirru', '*.cr'), ('text/x-cirru',)),
'ClayLexer': ('pygments.lexers.compiled', 'Clay', ('clay',), ('*.clay',), ('text/x-clay',)),
'ClojureLexer': ('pygments.lexers.jvm', 'Clojure', ('clojure', 'clj'), ('*.clj',), ('text/x-clojure', 'application/x-clojure')),
+ 'ClojureScriptLexer': ('pygments.lexers.jvm', 'ClojureScript', ('clojurescript', 'cljs'), ('*.cljs',), ('text/x-clojurescript', 'application/x-clojurescript')),
'CobolFreeformatLexer': ('pygments.lexers.compiled', 'COBOLFree', ('cobolfree',), ('*.cbl', '*.CBL'), ()),
'CobolLexer': ('pygments.lexers.compiled', 'COBOL', ('cobol',), ('*.cob', '*.COB', '*.cpy', '*.CPY'), ('text/x-cobol',)),
'CoffeeScriptLexer': ('pygments.lexers.web', 'CoffeeScript', ('coffee-script', 'coffeescript', 'coffee'), ('*.coffee',), ('text/coffeescript',)),
- 'ColdfusionHtmlLexer': ('pygments.lexers.templates', 'Coldfusion HTML', ('cfm',), ('*.cfm', '*.cfml', '*.cfc'), ('application/x-coldfusion',)),
+ 'ColdfusionCFCLexer': ('pygments.lexers.templates', 'Coldfusion CFC', ('cfc',), ('*.cfc',), ()),
+ 'ColdfusionHtmlLexer': ('pygments.lexers.templates', 'Coldfusion HTML', ('cfm',), ('*.cfm', '*.cfml'), ('application/x-coldfusion',)),
'ColdfusionLexer': ('pygments.lexers.templates', 'cfstatement', ('cfs',), (), ()),
- 'CommonLispLexer': ('pygments.lexers.functional', 'Common Lisp', ('common-lisp', 'cl', 'lisp'), ('*.cl', '*.lisp', '*.el'), ('text/x-common-lisp',)),
+ 'CommonLispLexer': ('pygments.lexers.functional', 'Common Lisp', ('common-lisp', 'cl', 'lisp', 'elisp', 'emacs'), ('*.cl', '*.lisp', '*.el'), ('text/x-common-lisp',)),
'CoqLexer': ('pygments.lexers.functional', 'Coq', ('coq',), ('*.v',), ('text/x-coq',)),
'CppLexer': ('pygments.lexers.compiled', 'C++', ('cpp', 'c++'), ('*.cpp', '*.hpp', '*.c++', '*.h++', '*.cc', '*.hh', '*.cxx', '*.hxx', '*.C', '*.H', '*.cp', '*.CPP'), ('text/x-c++hdr', 'text/x-c++src')),
'CppObjdumpLexer': ('pygments.lexers.asm', 'cpp-objdump', ('cpp-objdump', 'c++-objdumb', 'cxx-objdump'), ('*.cpp-objdump', '*.c++-objdump', '*.cxx-objdump'), ('text/x-cpp-objdump',)),
@@ -114,6 +120,7 @@ LEXERS = {
'FelixLexer': ('pygments.lexers.compiled', 'Felix', ('felix', 'flx'), ('*.flx', '*.flxh'), ('text/x-felix',)),
'FortranLexer': ('pygments.lexers.compiled', 'Fortran', ('fortran',), ('*.f', '*.f90', '*.F', '*.F90'), ('text/x-fortran',)),
'FoxProLexer': ('pygments.lexers.foxpro', 'FoxPro', ('foxpro', 'vfp', 'clipper', 'xbase'), ('*.PRG', '*.prg'), ()),
+ 'GAPLexer': ('pygments.lexers.math', 'GAP', ('gap',), ('*.g', '*.gd', '*.gi', '*.gap'), ()),
'GLShaderLexer': ('pygments.lexers.compiled', 'GLSL', ('glsl',), ('*.vert', '*.frag', '*.geo'), ('text/x-glslsrc',)),
'GasLexer': ('pygments.lexers.asm', 'GAS', ('gas', 'asm'), ('*.s', '*.S'), ('text/x-gas',)),
'GenshiLexer': ('pygments.lexers.templates', 'Genshi', ('genshi', 'kid', 'xml+genshi', 'xml+kid'), ('*.kid',), ('application/x-genshi', 'application/x-kid')),
@@ -140,7 +147,11 @@ LEXERS = {
'HyLexer': ('pygments.lexers.agile', 'Hy', ('hylang',), ('*.hy',), ('text/x-hy', 'application/x-hy')),
'HybrisLexer': ('pygments.lexers.other', 'Hybris', ('hybris', 'hy'), ('*.hy', '*.hyb'), ('text/x-hybris', 'application/x-hybris')),
'IDLLexer': ('pygments.lexers.math', 'IDL', ('idl',), ('*.pro',), ('text/idl',)),
+ 'IdrisLexer': ('pygments.lexers.functional', 'Idris', ('idris', 'idr'), ('*.idr',), ('text/x-idris',)),
'IgorLexer': ('pygments.lexers.math', 'Igor', ('igor', 'igorpro'), ('*.ipf',), ('text/ipf',)),
+ 'Inform6Lexer': ('pygments.lexers.compiled', 'Inform 6', ('inform6', 'i6'), ('*.inf',), ()),
+ 'Inform6TemplateLexer': ('pygments.lexers.compiled', 'Inform 6 template', ('i6t',), ('*.i6t',), ()),
+ 'Inform7Lexer': ('pygments.lexers.compiled', 'Inform 7', ('inform7', 'i7'), ('*.ni', '*.i7x'), ()),
'IniLexer': ('pygments.lexers.text', 'INI', ('ini', 'cfg', 'dosini'), ('*.ini', '*.cfg'), ('text/x-ini',)),
'IoLexer': ('pygments.lexers.agile', 'Io', ('io',), ('*.io',), ('text/x-iosrc',)),
'IokeLexer': ('pygments.lexers.jvm', 'Ioke', ('ioke', 'ik'), ('*.ik',), ('text/x-iokesrc',)),
@@ -170,6 +181,7 @@ LEXERS = {
'LighttpdConfLexer': ('pygments.lexers.text', 'Lighttpd configuration file', ('lighty', 'lighttpd'), (), ('text/x-lighttpd-conf',)),
'LiterateAgdaLexer': ('pygments.lexers.functional', 'Literate Agda', ('lagda', 'literate-agda'), ('*.lagda',), ('text/x-literate-agda',)),
'LiterateHaskellLexer': ('pygments.lexers.functional', 'Literate Haskell', ('lhs', 'literate-haskell', 'lhaskell'), ('*.lhs',), ('text/x-literate-haskell',)),
+ 'LiterateIdrisLexer': ('pygments.lexers.functional', 'Literate Idris', ('lidr', 'literate-idris', 'lidris'), ('*.lidr',), ('text/x-literate-idris',)),
'LiveScriptLexer': ('pygments.lexers.web', 'LiveScript', ('live-script', 'livescript'), ('*.ls',), ('text/livescript',)),
'LlvmLexer': ('pygments.lexers.asm', 'LLVM', ('llvm',), ('*.ll',), ('text/x-llvm',)),
'LogosLexer': ('pygments.lexers.compiled', 'Logos', ('logos',), ('*.x', '*.xi', '*.xm', '*.xmi'), ('text/x-logos',)),
@@ -183,6 +195,7 @@ LEXERS = {
'MakoLexer': ('pygments.lexers.templates', 'Mako', ('mako',), ('*.mao',), ('application/x-mako',)),
'MakoXmlLexer': ('pygments.lexers.templates', 'XML+Mako', ('xml+mako',), (), ('application/xml+mako',)),
'MaqlLexer': ('pygments.lexers.other', 'MAQL', ('maql',), ('*.maql',), ('text/x-gooddata-maql', 'application/x-gooddata-maql')),
+ 'MaskLexer': ('pygments.lexers.web', 'Mask', ('mask',), ('*.mask',), ('text/x-mask',)),
'MasonLexer': ('pygments.lexers.templates', 'Mason', ('mason',), ('*.m', '*.mhtml', '*.mc', '*.mi', 'autohandler', 'dhandler'), ('application/x-mason',)),
'MathematicaLexer': ('pygments.lexers.math', 'Mathematica', ('mathematica', 'mma', 'nb'), ('*.nb', '*.cdf', '*.nbp', '*.ma'), ('application/mathematica', 'application/vnd.wolfram.mathematica', 'application/vnd.wolfram.mathematica.package', 'application/vnd.wolfram.cdf')),
'MatlabLexer': ('pygments.lexers.math', 'Matlab', ('matlab',), ('*.m',), ('text/matlab',)),
@@ -193,6 +206,7 @@ LEXERS = {
'MoinWikiLexer': ('pygments.lexers.text', 'MoinMoin/Trac Wiki markup', ('trac-wiki', 'moin'), (), ('text/x-trac-wiki',)),
'MonkeyLexer': ('pygments.lexers.compiled', 'Monkey', ('monkey',), ('*.monkey',), ('text/x-monkey',)),
'MoonScriptLexer': ('pygments.lexers.agile', 'MoonScript', ('moon', 'moonscript'), ('*.moon',), ('text/x-moonscript', 'application/x-moonscript')),
+ 'MqlLexer': ('pygments.lexers.compiled', 'MQL', ('mql', 'mq4', 'mq5', 'mql4', 'mql5'), ('*.mq4', '*.mq5', '*.mqh'), ('text/x-mql',)),
'MscgenLexer': ('pygments.lexers.other', 'Mscgen', ('mscgen', 'msc'), ('*.msc',), ()),
'MuPADLexer': ('pygments.lexers.math', 'MuPAD', ('mupad',), ('*.mu',), ()),
'MxmlLexer': ('pygments.lexers.web', 'MXML', ('mxml',), ('*.mxml',), ()),
@@ -204,6 +218,7 @@ LEXERS = {
'MyghtyXmlLexer': ('pygments.lexers.templates', 'XML+Myghty', ('xml+myghty',), (), ('application/xml+myghty',)),
'NSISLexer': ('pygments.lexers.other', 'NSIS', ('nsis', 'nsi', 'nsh'), ('*.nsi', '*.nsh'), ('text/x-nsis',)),
'NasmLexer': ('pygments.lexers.asm', 'NASM', ('nasm',), ('*.asm', '*.ASM'), ('text/x-nasm',)),
+ 'NasmObjdumpLexer': ('pygments.lexers.asm', 'objdump-nasm', ('objdump-nasm',), ('*.objdump-intel',), ('text/x-nasm-objdump',)),
'NemerleLexer': ('pygments.lexers.dotnet', 'Nemerle', ('nemerle',), ('*.n',), ('text/x-nemerle',)),
'NesCLexer': ('pygments.lexers.compiled', 'nesC', ('nesc',), ('*.nc',), ('text/x-nescsrc',)),
'NewLispLexer': ('pygments.lexers.functional', 'NewLisp', ('newlisp',), ('*.lsp', '*.nl'), ('text/x-newlisp', 'application/x-newlisp')),
@@ -224,6 +239,7 @@ LEXERS = {
'Perl6Lexer': ('pygments.lexers.agile', 'Perl6', ('perl6', 'pl6'), ('*.pl', '*.pm', '*.nqp', '*.p6', '*.6pl', '*.p6l', '*.pl6', '*.6pm', '*.p6m', '*.pm6', '*.t'), ('text/x-perl6', 'application/x-perl6')),
'PerlLexer': ('pygments.lexers.agile', 'Perl', ('perl', 'pl'), ('*.pl', '*.pm', '*.t'), ('text/x-perl', 'application/x-perl')),
'PhpLexer': ('pygments.lexers.web', 'PHP', ('php', 'php3', 'php4', 'php5'), ('*.php', '*.php[345]', '*.inc'), ('text/x-php',)),
+ 'PigLexer': ('pygments.lexers.jvm', 'Pig', ('pig',), ('*.pig',), ('text/x-pig',)),
'PikeLexer': ('pygments.lexers.compiled', 'Pike', ('pike',), ('*.pike', '*.pmod'), ('text/x-pike',)),
'PlPgsqlLexer': ('pygments.lexers.sql', 'PL/pgSQL', ('plpgsql',), (), ('text/x-plpgsql',)),
'PostScriptLexer': ('pygments.lexers.other', 'PostScript', ('postscript', 'postscr'), ('*.ps', '*.eps'), ('application/postscript',)),
@@ -264,7 +280,7 @@ LEXERS = {
'RstLexer': ('pygments.lexers.text', 'reStructuredText', ('rst', 'rest', 'restructuredtext'), ('*.rst', '*.rest'), ('text/x-rst', 'text/prs.fallenstein.rst')),
'RubyConsoleLexer': ('pygments.lexers.agile', 'Ruby irb session', ('rbcon', 'irb'), (), ('text/x-ruby-shellsession',)),
'RubyLexer': ('pygments.lexers.agile', 'Ruby', ('rb', 'ruby', 'duby'), ('*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec', '*.rbx', '*.duby'), ('text/x-ruby', 'application/x-ruby')),
- 'RustLexer': ('pygments.lexers.compiled', 'Rust', ('rust',), ('*.rs', '*.rc'), ('text/x-rustsrc',)),
+ 'RustLexer': ('pygments.lexers.compiled', 'Rust', ('rust',), ('*.rs',), ('text/x-rustsrc',)),
'SLexer': ('pygments.lexers.math', 'S', ('splus', 's', 'r'), ('*.S', '*.R', '.Rhistory', '.Rprofile'), ('text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r', 'text/x-R', 'text/x-r-history', 'text/x-r-profile')),
'SMLLexer': ('pygments.lexers.functional', 'Standard ML', ('sml',), ('*.sml', '*.sig', '*.fun'), ('text/x-standardml', 'application/x-standardml')),
'SassLexer': ('pygments.lexers.web', 'Sass', ('sass',), ('*.sass',), ('text/x-sass',)),
@@ -315,6 +331,7 @@ LEXERS = {
'XsltLexer': ('pygments.lexers.web', 'XSLT', ('xslt',), ('*.xsl', '*.xslt', '*.xpl'), ('application/xsl+xml', 'application/xslt+xml')),
'XtendLexer': ('pygments.lexers.jvm', 'Xtend', ('xtend',), ('*.xtend',), ('text/x-xtend',)),
'YamlLexer': ('pygments.lexers.text', 'YAML', ('yaml',), ('*.yaml', '*.yml'), ('text/x-yaml',)),
+ 'ZephirLexer': ('pygments.lexers.web', 'Zephir', ('zephir',), ('*.zep',), ()),
}
if __name__ == '__main__':
@@ -327,7 +344,7 @@ if __name__ == '__main__':
for filename in os.listdir('.'):
if filename.endswith('.py') and not filename.startswith('_'):
module_name = 'pygments.lexers.%s' % filename[:-3]
- print module_name
+ print(module_name)
module = __import__(module_name, None, None, [''])
for lexer_name in module.__all__:
lexer = getattr(module, lexer_name)
diff --git a/pygments/lexers/_phpbuiltins.py b/pygments/lexers/_phpbuiltins.py
index 571f564a..2f5ec851 100644
--- a/pygments/lexers/_phpbuiltins.py
+++ b/pygments/lexers/_phpbuiltins.py
@@ -16,6 +16,7 @@
:license: BSD, see LICENSE for details.
"""
+from __future__ import print_function
MODULES = {'.NET': ['dotnet_load'],
'APC': ['apc_add',
@@ -3711,7 +3712,10 @@ if __name__ == '__main__':
import re
import shutil
import tarfile
- import urllib
+ try:
+ from urllib import urlretrieve
+ except ImportError:
+ from urllib.request import urlretrieve
PHP_MANUAL_URL = 'http://us3.php.net/distributions/manual/php_manual_en.tar.gz'
PHP_MANUAL_DIR = './php-chunked-xhtml/'
@@ -3752,7 +3756,7 @@ if __name__ == '__main__':
return modules
def get_php_references():
- download = urllib.urlretrieve(PHP_MANUAL_URL)
+ download = urlretrieve(PHP_MANUAL_URL)
tar = tarfile.open(download[0])
tar.extractall()
tar.close()
@@ -3777,10 +3781,10 @@ if __name__ == '__main__':
f.close()
def run():
- print '>> Downloading Function Index'
+ print('>> Downloading Function Index')
modules = get_php_functions()
- total = sum(len(v) for v in modules.itervalues())
- print '%d functions found' % total
+ total = sum(len(v) for v in modules.values())
+ print('%d functions found' % total)
regenerate(__file__, modules)
shutil.rmtree(PHP_MANUAL_DIR)
diff --git a/pygments/lexers/_postgres_builtins.py b/pygments/lexers/_postgres_builtins.py
index 32206e9b..11dc6dec 100644
--- a/pygments/lexers/_postgres_builtins.py
+++ b/pygments/lexers/_postgres_builtins.py
@@ -10,7 +10,10 @@
"""
import re
-import urllib
+try:
+ from urllib import urlopen
+except ImportError:
+ from urllib.request import urlopen
# One man's constant is another man's variable.
SOURCE_URL = 'https://github.com/postgres/postgres/raw/master'
@@ -18,11 +21,11 @@ KEYWORDS_URL = SOURCE_URL + '/doc/src/sgml/keywords.sgml'
DATATYPES_URL = SOURCE_URL + '/doc/src/sgml/datatype.sgml'
def update_myself():
- data_file = list(fetch(DATATYPES_URL))
+ data_file = list(urlopen(DATATYPES_URL))
datatypes = parse_datatypes(data_file)
pseudos = parse_pseudos(data_file)
- keywords = parse_keywords(fetch(KEYWORDS_URL))
+ keywords = parse_keywords(urlopen(KEYWORDS_URL))
update_consts(__file__, 'DATATYPES', datatypes)
update_consts(__file__, 'PSEUDO_TYPES', pseudos)
update_consts(__file__, 'KEYWORDS', keywords)
@@ -96,9 +99,6 @@ def parse_pseudos(f):
return dt
-def fetch(url):
- return urllib.urlopen(url)
-
def update_consts(filename, constname, content):
f = open(filename)
lines = f.readlines()
diff --git a/pygments/lexers/_robotframeworklexer.py b/pygments/lexers/_robotframeworklexer.py
index e90918da..2889e1b8 100644
--- a/pygments/lexers/_robotframeworklexer.py
+++ b/pygments/lexers/_robotframeworklexer.py
@@ -27,6 +27,7 @@ import re
from pygments.lexer import Lexer
from pygments.token import Token
+from pygments.util import text_type
HEADING = Token.Generic.Heading
@@ -57,7 +58,7 @@ class RobotFrameworkLexer(Lexer):
Supports both space and pipe separated plain text formats.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'RobotFramework'
aliases = ['robotframework']
@@ -77,7 +78,7 @@ class RobotFrameworkLexer(Lexer):
for value, token in row_tokenizer.tokenize(row):
for value, token in var_tokenizer.tokenize(value, token):
if value:
- yield index, token, unicode(value)
+ yield index, token, text_type(value)
index += len(value)
diff --git a/pygments/lexers/_sourcemodbuiltins.py b/pygments/lexers/_sourcemodbuiltins.py
index 03967055..eee84d0b 100644
--- a/pygments/lexers/_sourcemodbuiltins.py
+++ b/pygments/lexers/_sourcemodbuiltins.py
@@ -12,6 +12,8 @@
:license: BSD, see LICENSE for details.
"""
+from __future__ import print_function
+
FUNCTIONS = ['TopMenuHandler',
'CreateTopMenu',
'LoadTopMenuConfig',
@@ -1012,7 +1014,10 @@ if __name__ == '__main__':
import pprint
import re
import sys
- import urllib
+ try:
+ from urllib import urlopen
+ except ImportError:
+ from urllib.request import urlopen
# urllib ends up wanting to import a module called 'math' -- if
# pygments/lexers is in the path, this ends badly.
@@ -1021,7 +1026,7 @@ if __name__ == '__main__':
del sys.path[i]
def get_version():
- f = urllib.urlopen('http://docs.sourcemod.net/api/index.php')
+ f = urlopen('http://docs.sourcemod.net/api/index.php')
r = re.compile(r'SourceMod v\.<b>([\d\.]+)</td>')
for line in f:
m = r.search(line)
@@ -1029,7 +1034,7 @@ if __name__ == '__main__':
return m.groups()[0]
def get_sm_functions():
- f = urllib.urlopen('http://docs.sourcemod.net/api/SMfuncs.js')
+ f = urlopen('http://docs.sourcemod.net/api/SMfuncs.js')
r = re.compile(r'SMfunctions\[\d+\] = Array \("(?:public )?([^,]+)",".+"\);')
functions = []
for line in f:
@@ -1057,13 +1062,13 @@ if __name__ == '__main__':
def run():
version = get_version()
- print '> Downloading function index for SourceMod %s' % version
+ print('> Downloading function index for SourceMod %s' % version)
functions = get_sm_functions()
- print '> %d functions found:' % len(functions)
+ print('> %d functions found:' % len(functions))
functionlist = []
for full_function_name in functions:
- print '>> %s' % full_function_name
+ print('>> %s' % full_function_name)
functionlist.append(full_function_name)
regenerate(__file__, functionlist)
diff --git a/pygments/lexers/agile.py b/pygments/lexers/agile.py
index 88f0d983..a49289dc 100644
--- a/pygments/lexers/agile.py
+++ b/pygments/lexers/agile.py
@@ -15,7 +15,7 @@ from pygments.lexer import Lexer, RegexLexer, ExtendedRegexLexer, \
LexerContext, include, combined, do_insertions, bygroups, using, this
from pygments.token import Error, Text, Other, \
Comment, Operator, Keyword, Name, String, Number, Generic, Punctuation
-from pygments.util import get_bool_opt, get_list_opt, shebang_matches
+from pygments.util import get_bool_opt, get_list_opt, shebang_matches, iteritems
from pygments import unistring as uni
@@ -194,7 +194,7 @@ class Python3Lexer(RegexLexer):
"""
For `Python <http://www.python.org>`_ source code (version 3.0).
- *New in Pygments 0.10.*
+ .. versionadded:: 0.10
"""
name = 'Python 3'
@@ -308,7 +308,8 @@ class PythonConsoleLexer(Lexer):
`python3`
Use Python 3 lexer for code. Default is ``False``.
- *New in Pygments 1.0.*
+
+ .. versionadded:: 1.0
"""
name = 'Python console session'
aliases = ['pycon']
@@ -353,7 +354,7 @@ class PythonConsoleLexer(Lexer):
curcode = ''
insertions = []
if (line.startswith(u'Traceback (most recent call last):') or
- re.match(ur' File "[^"]+", line \d+\n$', line)):
+ re.match(u' File "[^"]+", line \\d+\\n$', line)):
tb = 1
curtb = line
tbindex = match.start()
@@ -377,7 +378,7 @@ class PythonTracebackLexer(RegexLexer):
"""
For Python tracebacks.
- *New in Pygments 0.7.*
+ .. versionadded:: 0.7
"""
name = 'Python Traceback'
@@ -414,7 +415,7 @@ class Python3TracebackLexer(RegexLexer):
"""
For Python 3.0 tracebacks, with support for chained exceptions.
- *New in Pygments 1.0.*
+ .. versionadded:: 1.0
"""
name = 'Python 3.0 Traceback'
@@ -1020,7 +1021,7 @@ class PerlLexer(RegexLexer):
def analyse_text(text):
if shebang_matches(text, r'perl'):
return True
- if 'my $' in text:
+ if re.search('(?:my|our)\s+[$@%(]', text):
return 0.9
@@ -1126,7 +1127,7 @@ class LuaLexer(RegexLexer):
self._functions = set()
if self.func_name_highlighting:
from pygments.lexers._luabuiltins import MODULES
- for mod, func in MODULES.iteritems():
+ for mod, func in iteritems(MODULES):
if mod not in self.disabled_modules:
self._functions.update(func)
RegexLexer.__init__(self, **options)
@@ -1151,7 +1152,7 @@ class MoonScriptLexer(LuaLexer):
"""
For `MoonScript <http://moonscript.org.org>`_ source code.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = "MoonScript"
@@ -1290,7 +1291,7 @@ class IoLexer(RegexLexer):
For `Io <http://iolanguage.com/>`_ (a small, prototype-based
programming language) source.
- *New in Pygments 0.10.*
+ .. versionadded:: 0.10
"""
name = 'Io'
filenames = ['*.io']
@@ -1336,7 +1337,7 @@ class TclLexer(RegexLexer):
"""
For Tcl source code.
- *New in Pygments 0.10.*
+ .. versionadded:: 0.10
"""
keyword_cmds_re = (
@@ -1466,7 +1467,7 @@ class FactorLexer(RegexLexer):
"""
Lexer for the `Factor <http://factorcode.org>`_ language.
- *New in Pygments 1.4.*
+ .. versionadded:: 1.4
"""
name = 'Factor'
aliases = ['factor']
@@ -1757,7 +1758,7 @@ class FancyLexer(RegexLexer):
class-based, concurrent general-purpose programming language
running on Rubinius, the Ruby VM.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'Fancy'
filenames = ['*.fy', '*.fancypack']
@@ -1839,7 +1840,7 @@ class DgLexer(RegexLexer):
a functional and object-oriented programming language
running on the CPython 3 VM.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'dg'
aliases = ['dg']
@@ -1929,7 +1930,7 @@ class Perl6Lexer(ExtendedRegexLexer):
"""
For `Perl 6 <http://www.perl6.org>`_ source code.
- *New in Pygments 1.7.*
+ .. versionadded:: 2.0
"""
name = 'Perl6'
@@ -1939,7 +1940,7 @@ class Perl6Lexer(ExtendedRegexLexer):
mimetypes = ['text/x-perl6', 'application/x-perl6']
flags = re.MULTILINE | re.DOTALL | re.UNICODE
- PERL6_IDENTIFIER_RANGE = "['a-zA-Z0-9_:-]"
+ PERL6_IDENTIFIER_RANGE = "['a-zA-Z0-9_:-]" # if you alter this, search for a copy made of it below
PERL6_KEYWORDS = (
'BEGIN', 'CATCH', 'CHECK', 'CONTROL', 'END', 'ENTER', 'FIRST', 'INIT',
@@ -2124,12 +2125,16 @@ class Perl6Lexer(ExtendedRegexLexer):
end_pos = next_close_pos
+ if end_pos < 0: # if we didn't find a closer, just highlight the
+ # rest of the text in this class
+ end_pos = len(text)
+
if adverbs is not None and re.search(r':to\b', adverbs):
heredoc_terminator = text[match.start('delimiter') + n_chars : end_pos]
- end_heredoc = re.search(r'^\s*' + re.escape(heredoc_terminator) + r'\s*$', text[ match.end('delimiter') : ], re.MULTILINE)
+ end_heredoc = re.search(r'^\s*' + re.escape(heredoc_terminator) + r'\s*$', text[ end_pos : ], re.MULTILINE)
if end_heredoc:
- end_pos = match.end('delimiter') + end_heredoc.end()
+ end_pos += end_heredoc.end()
else:
end_pos = len(text)
@@ -2177,7 +2182,7 @@ class Perl6Lexer(ExtendedRegexLexer):
# process the corresponding one!
tokens = {
'common' : [
- (r'#[`|=](?P<delimiter>(?P<first_char>[' + ''.join(PERL6_BRACKETS.keys()) + r'])(?P=first_char)*)', brackets_callback(Comment.Multiline)),
+ (r'#[`|=](?P<delimiter>(?P<first_char>[' + ''.join(PERL6_BRACKETS) + r'])(?P=first_char)*)', brackets_callback(Comment.Multiline)),
(r'#[^\n]*$', Comment.Singleline),
(r'^(\s*)=begin\s+(\w+)\b.*?^\1=end\s+\2', Comment.Multiline),
(r'^(\s*)=for.*?\n\s*?\n', Comment.Multiline),
@@ -2206,7 +2211,7 @@ class Perl6Lexer(ExtendedRegexLexer):
(r'(?<=~~)\s*/(?:\\\\|\\/|.)*?/', String.Regex),
(r'(?<=[=(,])\s*/(?:\\\\|\\/|.)*?/', String.Regex),
(r'm\w+(?=\()', Name),
- (r'(?:m|ms|rx)\s*(?P<adverbs>:[\w\s:]+)?\s*(?P<delimiter>(?P<first_char>[^0-9a-zA-Z:\s])(?P=first_char)*)', brackets_callback(String.Regex)),
+ (r'(?:m|ms|rx)\s*(?P<adverbs>:[\w\s:]+)?\s*(?P<delimiter>(?P<first_char>[^0-9a-zA-Z_:\s])(?P=first_char)*)', brackets_callback(String.Regex)),
(r'(?:s|ss|tr)\s*(?::[\w\s:]+)?\s*/(?:\\\\|\\/|.)*?/(?:\\\\|\\/|.)*?/', String.Regex),
(r'<[^\s=].*?\S>', String),
(_build_word_match(PERL6_OPERATORS), Operator),
@@ -2226,7 +2231,7 @@ class Perl6Lexer(ExtendedRegexLexer):
(r'.+?', Text),
],
'token-sym-brackets' : [
- (r'(?P<delimiter>(?P<first_char>[' + ''.join(PERL6_BRACKETS.keys()) + '])(?P=first_char)*)', brackets_callback(Name), ('#pop', 'pre-token')),
+ (r'(?P<delimiter>(?P<first_char>[' + ''.join(PERL6_BRACKETS) + '])(?P=first_char)*)', brackets_callback(Name), ('#pop', 'pre-token')),
(r'', Name, ('#pop', 'pre-token')),
],
'token': [
@@ -2244,9 +2249,6 @@ class Perl6Lexer(ExtendedRegexLexer):
}
def analyse_text(text):
- # disabled for now; the lexer is not bug-free and will loop sometimes,
- # so let's be sure to use it only for "real" Perl 6 code.
- return False
def strip_pod(lines):
in_pod = False
stripped_lines = []
@@ -2261,30 +2263,41 @@ class Perl6Lexer(ExtendedRegexLexer):
return stripped_lines
+ # XXX handle block comments
lines = text.splitlines()
lines = strip_pod(lines)
text = '\n'.join(lines)
- if shebang_matches(text, r'perl6|rakudo|niecza'):
+ if shebang_matches(text, r'perl6|rakudo|niecza|pugs'):
return True
- if 'use v6' in text:
- return 0.91 # 0.01 greater than Perl says for 'my $'
- if re.search(r'[$@%]\*[A-Z]+', text): # Perl 6-style globals ($*OS)
- return 0.91
- if re.search(r'[$@%]\?[A-Z]+', text): # Perl 6 compiler variables ($?PACKAGE)
- return 0.91
- if re.search(r'[$@%][!.][A-Za-z0-9_-]+', text): # Perl 6 member variables
- return 0.91
-
- for line in text.splitlines():
- if re.match(r'\s*(?:my|our)?\s*module', line): # module declarations
- return 0.91
- if re.match(r'\s*(?:my|our)?\s*role', line): # role declarations
- return 0.91
- if re.match(r'\s*(?:my|our)?\s*class\b', line): # class declarations
- return 0.91
- return False
+ saw_perl_decl = False
+ rating = False
+
+ # check for my/our/has declarations
+ # copied PERL6_IDENTIFIER_RANGE from above; not happy about that
+ if re.search("(?:my|our|has)\s+(?:['a-zA-Z0-9_:-]+\s+)?[$@%&(]", text):
+ rating = 0.8
+ saw_perl_decl = True
+
+ for line in lines:
+ line = re.sub('#.*', '', line)
+ if re.match('^\s*$', line):
+ continue
+
+ # match v6; use v6; use v6.0; use v6.0.0;
+ if re.match('^\s*(?:use\s+)?v6(?:\.\d(?:\.\d)?)?;', line):
+ return True
+ # match class, module, role, enum, grammar declarations
+ class_decl = re.match('^\s*(?:(?P<scope>my|our)\s+)?(?:module|class|role|enum|grammar)', line)
+ if class_decl:
+ if saw_perl_decl or class_decl.group('scope') is not None:
+ return True
+ rating = 0.05
+ continue
+ break
+
+ return rating
def __init__(self, **options):
super(Perl6Lexer, self).__init__(**options)
@@ -2295,7 +2308,7 @@ class HyLexer(RegexLexer):
"""
Lexer for `Hy <http://hylang.org/>`_ source code.
- *New in Pygments 1.7.*
+ .. versionadded:: 2.0
"""
name = 'Hy'
aliases = ['hylang']
diff --git a/pygments/lexers/asm.py b/pygments/lexers/asm.py
index 655669bf..2727a55d 100644
--- a/pygments/lexers/asm.py
+++ b/pygments/lexers/asm.py
@@ -17,7 +17,7 @@ from pygments.token import Text, Name, Number, String, Comment, Punctuation, \
Other, Keyword, Operator
__all__ = ['GasLexer', 'ObjdumpLexer','DObjdumpLexer', 'CppObjdumpLexer',
- 'CObjdumpLexer', 'LlvmLexer', 'NasmLexer', 'Ca65Lexer']
+ 'CObjdumpLexer', 'LlvmLexer', 'NasmLexer', 'NasmObjdumpLexer', 'Ca65Lexer']
class GasLexer(RegexLexer):
@@ -96,6 +96,55 @@ class GasLexer(RegexLexer):
return 0.1
+def _objdump_lexer_tokens(asm_lexer):
+ """
+ Common objdump lexer tokens to wrap an ASM lexer.
+ """
+ hex_re = r'[0-9A-Za-z]'
+ return {
+ 'root': [
+ # File name & format:
+ ('(.*?)(:)( +file format )(.*?)$',
+ bygroups(Name.Label, Punctuation, Text, String)),
+ # Section header
+ ('(Disassembly of section )(.*?)(:)$',
+ bygroups(Text, Name.Label, Punctuation)),
+ # Function labels
+ # (With offset)
+ ('('+hex_re+'+)( )(<)(.*?)([-+])(0[xX][A-Za-z0-9]+)(>:)$',
+ bygroups(Number.Hex, Text, Punctuation, Name.Function,
+ Punctuation, Number.Hex, Punctuation)),
+ # (Without offset)
+ ('('+hex_re+'+)( )(<)(.*?)(>:)$',
+ bygroups(Number.Hex, Text, Punctuation, Name.Function,
+ Punctuation)),
+ # Code line with disassembled instructions
+ ('( *)('+hex_re+r'+:)(\t)((?:'+hex_re+hex_re+' )+)( *\t)([a-zA-Z].*?)$',
+ bygroups(Text, Name.Label, Text, Number.Hex, Text,
+ using(asm_lexer))),
+ # Code line with ascii
+ ('( *)('+hex_re+r'+:)(\t)((?:'+hex_re+hex_re+' )+)( *)(.*?)$',
+ bygroups(Text, Name.Label, Text, Number.Hex, Text, String)),
+ # Continued code line, only raw opcodes without disassembled
+ # instruction
+ ('( *)('+hex_re+r'+:)(\t)((?:'+hex_re+hex_re+' )+)$',
+ bygroups(Text, Name.Label, Text, Number.Hex)),
+ # Skipped a few bytes
+ (r'\t\.\.\.$', Text),
+ # Relocation line
+ # (With offset)
+ (r'(\t\t\t)('+hex_re+r'+:)( )([^\t]+)(\t)(.*?)([-+])(0x'+hex_re+'+)$',
+ bygroups(Text, Name.Label, Text, Name.Property, Text,
+ Name.Constant, Punctuation, Number.Hex)),
+ # (Without offset)
+ (r'(\t\t\t)('+hex_re+r'+:)( )([^\t]+)(\t)(.*?)$',
+ bygroups(Text, Name.Label, Text, Name.Property, Text,
+ Name.Constant)),
+ (r'[^\n]+\n', Other)
+ ]
+ }
+
+
class ObjdumpLexer(RegexLexer):
"""
For the output of 'objdump -dr'
@@ -105,50 +154,9 @@ class ObjdumpLexer(RegexLexer):
filenames = ['*.objdump']
mimetypes = ['text/x-objdump']
- hex = r'[0-9A-Za-z]'
- tokens = {
- 'root': [
- # File name & format:
- ('(.*?)(:)( +file format )(.*?)$',
- bygroups(Name.Label, Punctuation, Text, String)),
- # Section header
- ('(Disassembly of section )(.*?)(:)$',
- bygroups(Text, Name.Label, Punctuation)),
- # Function labels
- # (With offset)
- ('('+hex+'+)( )(<)(.*?)([-+])(0[xX][A-Za-z0-9]+)(>:)$',
- bygroups(Number.Hex, Text, Punctuation, Name.Function,
- Punctuation, Number.Hex, Punctuation)),
- # (Without offset)
- ('('+hex+'+)( )(<)(.*?)(>:)$',
- bygroups(Number.Hex, Text, Punctuation, Name.Function,
- Punctuation)),
- # Code line with disassembled instructions
- ('( *)('+hex+r'+:)(\t)((?:'+hex+hex+' )+)( *\t)([a-zA-Z].*?)$',
- bygroups(Text, Name.Label, Text, Number.Hex, Text,
- using(GasLexer))),
- # Code line with ascii
- ('( *)('+hex+r'+:)(\t)((?:'+hex+hex+' )+)( *)(.*?)$',
- bygroups(Text, Name.Label, Text, Number.Hex, Text, String)),
- # Continued code line, only raw opcodes without disassembled
- # instruction
- ('( *)('+hex+r'+:)(\t)((?:'+hex+hex+' )+)$',
- bygroups(Text, Name.Label, Text, Number.Hex)),
- # Skipped a few bytes
- (r'\t\.\.\.$', Text),
- # Relocation line
- # (With offset)
- (r'(\t\t\t)('+hex+r'+:)( )([^\t]+)(\t)(.*?)([-+])(0x' + hex + '+)$',
- bygroups(Text, Name.Label, Text, Name.Property, Text,
- Name.Constant, Punctuation, Number.Hex)),
- # (Without offset)
- (r'(\t\t\t)('+hex+r'+:)( )([^\t]+)(\t)(.*?)$',
- bygroups(Text, Name.Label, Text, Name.Property, Text,
- Name.Constant)),
- (r'[^\n]+\n', Other)
- ]
- }
+ tokens = _objdump_lexer_tokens(GasLexer)
+
class DObjdumpLexer(DelegatingLexer):
@@ -374,11 +382,25 @@ class NasmLexer(RegexLexer):
}
+class NasmObjdumpLexer(ObjdumpLexer):
+ """
+ For the output of 'objdump -d -M intel'.
+
+ .. versionadded:: 2.0
+ """
+ name = 'objdump-nasm'
+ aliases = ['objdump-nasm']
+ filenames = ['*.objdump-intel']
+ mimetypes = ['text/x-nasm-objdump']
+
+ tokens = _objdump_lexer_tokens(NasmLexer)
+
+
class Ca65Lexer(RegexLexer):
"""
For ca65 assembler sources.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'ca65'
aliases = ['ca65']
diff --git a/pygments/lexers/compiled.py b/pygments/lexers/compiled.py
index 809b1db3..e6e10098 100644
--- a/pygments/lexers/compiled.py
+++ b/pygments/lexers/compiled.py
@@ -31,7 +31,8 @@ __all__ = ['CLexer', 'CppLexer', 'DLexer', 'DelphiLexer', 'ECLexer',
'FantomLexer', 'RustLexer', 'CudaLexer', 'MonkeyLexer', 'SwigLexer',
'DylanLidLexer', 'DylanConsoleLexer', 'CobolLexer',
'CobolFreeformatLexer', 'LogosLexer', 'ClayLexer', 'PikeLexer',
- 'ChapelLexer', 'EiffelLexer']
+ 'ChapelLexer', 'EiffelLexer', 'Inform6Lexer', 'Inform7Lexer',
+ 'Inform6TemplateLexer', 'MqlLexer']
class CFamilyLexer(RegexLexer):
@@ -240,7 +241,7 @@ class PikeLexer(CppLexer):
"""
For `Pike <http://pike.lysator.liu.se/>`_ source code.
- *New in Pygments 1.7.*
+ .. versionadded:: 2.0
"""
name = 'Pike'
aliases = ['pike']
@@ -279,7 +280,7 @@ class SwigLexer(CppLexer):
"""
For `SWIG <http://www.swig.org/>`_ source code.
- *New in Pygments 1.7.*
+ .. versionadded:: 2.0
"""
name = 'SWIG'
aliases = ['swig']
@@ -336,7 +337,7 @@ class ECLexer(CLexer):
"""
For eC source code with preprocessor directives.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'eC'
aliases = ['ec']
@@ -372,7 +373,7 @@ class NesCLexer(CLexer):
For `nesC <https://github.com/tinyos/nesc>`_ source code with preprocessor
directives.
- *New in Pygments 1.7.*
+ .. versionadded:: 2.0
"""
name = 'nesC'
aliases = ['nesc']
@@ -397,7 +398,7 @@ class ClayLexer(RegexLexer):
"""
For `Clay <http://claylabs.com/clay/>`_ source.
- *New in Pygments 1.7.*
+ .. versionadded:: 2.0
"""
name = 'Clay'
filenames = ['*.clay']
@@ -448,7 +449,7 @@ class DLexer(RegexLexer):
"""
For D source.
- *New in Pygments 1.2.*
+ .. versionadded:: 1.2
"""
name = 'D'
filenames = ['*.d', '*.di']
@@ -468,20 +469,23 @@ class DLexer(RegexLexer):
(r'(abstract|alias|align|asm|assert|auto|body|break|case|cast'
r'|catch|class|const|continue|debug|default|delegate|delete'
r'|deprecated|do|else|enum|export|extern|finally|final'
- r'|foreach_reverse|foreach|for|function|goto|if|import|inout'
- r'|interface|invariant|in|is|lazy|mixin|module|new|nothrow|out'
+ r'|foreach_reverse|foreach|for|function|goto|if|immutable|import'
+ r'|interface|invariant|inout|in|is|lazy|mixin|module|new|nothrow|out'
r'|override|package|pragma|private|protected|public|pure|ref|return'
- r'|scope|static|struct|super|switch|synchronized|template|this'
+ r'|scope|shared|static|struct|super|switch|synchronized|template|this'
r'|throw|try|typedef|typeid|typeof|union|unittest|version|volatile'
- r'|while|with|__traits)\b', Keyword
+ r'|while|with|__gshared|__traits|__vector|__parameters)\b', Keyword
),
(r'(bool|byte|cdouble|cent|cfloat|char|creal|dchar|double|float'
r'|idouble|ifloat|int|ireal|long|real|short|ubyte|ucent|uint|ulong'
r'|ushort|void|wchar)\b', Keyword.Type
),
(r'(false|true|null)\b', Keyword.Constant),
+ (r'(__FILE__|__MODULE__|__LINE__|__FUNCTION__|__PRETTY_FUNCTION__'
+ r'|__DATE__|__EOF__|__TIME__|__TIMESTAMP__|__VENDOR__|__VERSION__)\b',
+ Keyword.Pseudo),
(r'macro\b', Keyword.Reserved),
- (r'(string|wstring|dstring)\b', Name.Builtin),
+ (r'(string|wstring|dstring|size_t|ptrdiff_t)\b', Name.Builtin),
# FloatLiteral
# -- HexFloat
(r'0[xX]([0-9a-fA-F_]*\.[0-9a-fA-F_]+|[0-9a-fA-F_]+)'
@@ -527,6 +531,8 @@ class DLexer(RegexLexer):
(r'q"(.).*?\1"', String),
# -- TokenString
(r'q{', String, 'token_string'),
+ # Attributes
+ (r'@([a-zA-Z_]\w*)?', Name.Decorator),
# Tokens
(r'(~=|\^=|%=|\*=|==|!>=|!<=|!<>=|!<>|!<|!>|!=|>>>=|>>>|>>=|>>|>='
r'|<>=|<>|<<=|<<|<=|\+\+|\+=|--|-=|\|\||\|=|&&|&=|\.\.\.|\.\.|/=)'
@@ -534,6 +540,8 @@ class DLexer(RegexLexer):
),
# Identifier
(r'[a-zA-Z_]\w*', Name),
+ # Line
+ (r'#line\s.*\n', Comment.Special),
],
'nested_comment': [
(r'[^+/]+', Comment.Multiline),
@@ -878,7 +886,7 @@ class DelphiLexer(Lexer):
if get_bool_opt(options, 'freepascal', True):
self.keywords.update(self.FREE_PASCAL_KEYWORDS)
self.builtins = set()
- for unit in get_list_opt(options, 'units', self.BUILTIN_UNITS.keys()):
+ for unit in get_list_opt(options, 'units', list(self.BUILTIN_UNITS)):
self.builtins.update(self.BUILTIN_UNITS[unit])
def get_tokens_unprocessed(self, text):
@@ -1082,7 +1090,7 @@ class DylanLexer(RegexLexer):
"""
For the `Dylan <http://www.opendylan.org/>`_ language.
- *New in Pygments 0.7.*
+ .. versionadded:: 0.7
"""
name = 'Dylan'
@@ -1274,7 +1282,7 @@ class DylanLidLexer(RegexLexer):
"""
For Dylan LID (Library Interchange Definition) files.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'DylanLID'
@@ -1312,7 +1320,7 @@ class DylanConsoleLexer(Lexer):
This is based on a copy of the RubyConsoleLexer.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'Dylan session'
aliases = ['dylan-console', 'dylan-repl']
@@ -1422,7 +1430,7 @@ def objective(baselexer):
(r'^([-+])(\s*)' # method marker
r'(\(.*?\))?(\s*)' # return type
r'([a-zA-Z$_][a-zA-Z0-9$_]*:?)', # begin of method name
- bygroups(Keyword, Text, using(this),
+ bygroups(Punctuation, Text, using(this),
Text, Name.Function),
'method'),
inherit,
@@ -1433,8 +1441,8 @@ def objective(baselexer):
# discussion in Issue 789
(r',', Punctuation),
(r'\.\.\.', Punctuation),
- (r'(\(.*?\))([a-zA-Z$_][a-zA-Z0-9$_]*)', bygroups(using(this),
- Name.Variable)),
+ (r'(\(.*?\))(\s*)([a-zA-Z$_][a-zA-Z0-9$_]*)',
+ bygroups(using(this), Text, Name.Variable)),
(r'[a-zA-Z$_][a-zA-Z0-9$_]*:', Name.Function),
(';', Punctuation, '#pop'),
('{', Punctuation, 'function'),
@@ -1447,6 +1455,8 @@ def objective(baselexer):
return 1.0
elif '@"' in text: # strings
return 0.8
+ elif re.search('@[0-9]+', text):
+ return 0.7
elif _oc_message.search(text):
return 0.8
return 0
@@ -1495,7 +1505,7 @@ class FortranLexer(RegexLexer):
"""
Lexer for FORTRAN 90 code.
- *New in Pygments 0.10.*
+ .. versionadded:: 0.10
"""
name = 'Fortran'
aliases = ['fortran']
@@ -1610,7 +1620,7 @@ class GLShaderLexer(RegexLexer):
"""
GLSL (OpenGL Shader) lexer.
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'GLSL'
aliases = ['glsl']
@@ -1722,7 +1732,7 @@ class CythonLexer(RegexLexer):
"""
For Pyrex and `Cython <http://cython.org>`_ source code.
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'Cython'
@@ -1888,7 +1898,7 @@ class ValaLexer(RegexLexer):
"""
For Vala source code with preprocessor directives.
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'Vala'
aliases = ['vala', 'vapi']
@@ -1977,7 +1987,7 @@ class OocLexer(RegexLexer):
"""
For `Ooc <http://ooc-lang.org/>`_ source code
- *New in Pygments 1.2.*
+ .. versionadded:: 1.2
"""
name = 'Ooc'
aliases = ['ooc']
@@ -2116,7 +2126,7 @@ class FelixLexer(RegexLexer):
"""
For `Felix <http://www.felix-lang.org>`_ source code.
- *New in Pygments 1.2.*
+ .. versionadded:: 1.2
"""
name = 'Felix'
@@ -2370,7 +2380,7 @@ class AdaLexer(RegexLexer):
"""
For Ada source code.
- *New in Pygments 1.3.*
+ .. versionadded:: 1.3
"""
name = 'Ada'
@@ -2513,7 +2523,7 @@ class Modula2Lexer(RegexLexer):
`gm2ext`
Also highlight GNU extensions (default: False).
- *New in Pygments 1.3.*
+ .. versionadded:: 1.3
"""
name = 'Modula-2'
aliases = ['modula2', 'm2']
@@ -2695,7 +2705,7 @@ class BlitzMaxLexer(RegexLexer):
"""
For `BlitzMax <http://blitzbasic.com>`_ source code.
- *New in Pygments 1.4.*
+ .. versionadded:: 1.4
"""
name = 'BlitzMax'
@@ -2789,7 +2799,7 @@ class BlitzBasicLexer(RegexLexer):
"""
For `BlitzBasic <http://blitzbasic.com>`_ source code.
- *New in Pygments 1.7.*
+ .. versionadded:: 2.0
"""
name = 'BlitzBasic'
@@ -2871,7 +2881,7 @@ class NimrodLexer(RegexLexer):
"""
For `Nimrod <http://nimrod-code.org/>`_ source code.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'Nimrod'
@@ -3012,7 +3022,7 @@ class FantomLexer(RegexLexer):
"""
For Fantom source code.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'Fantom'
aliases = ['fan']
@@ -3242,12 +3252,12 @@ class FantomLexer(RegexLexer):
class RustLexer(RegexLexer):
"""
- Lexer for Mozilla's Rust programming language.
+ Lexer for the Rust programming language (version 0.9).
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'Rust'
- filenames = ['*.rs', '*.rc']
+ filenames = ['*.rs']
aliases = ['rust']
mimetypes = ['text/x-rustsrc']
@@ -3256,18 +3266,55 @@ class RustLexer(RegexLexer):
# Whitespace and Comments
(r'\n', Text),
(r'\s+', Text),
+ (r'//[/!](.*?)\n', Comment.Doc),
(r'//(.*?)\n', Comment.Single),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
# Keywords
- (r'(as|assert|break|const'
- r'|copy|do|else|enum|extern|fail'
- r'|false|fn|for|if|impl|let|log'
- r'|loop|match|mod|move|mut|once|priv|pub|pure'
- r'|ref|return|static|struct|trait|true|type|unsafe|use|while'
- r'|u8|u16|u32|u64|i8|i16|i32|i64|uint'
- r'|int|float|f32|f64|str)\b', Keyword),
-
+ (r'(as|box|break|continue'
+ r'|do|else|enum|extern'
+ r'|fn|for|if|impl|in'
+ r'|loop|match|mut|priv|proc|pub'
+ r'|ref|return|static|\'static|struct|trait|true|type'
+ r'|unsafe|while)\b',
+ Keyword),
+ (r'(alignof|be|const|offsetof|pure|sizeof|typeof|once|unsized'
+ r'|yield)\b', Keyword.Reserved),
+ (r'(mod|use)\b', Keyword.Namespace),
+ (r'(true|false)\b', Keyword.Constant),
+ (r'let\b', Keyword.Declaration),
+ (r'(u8|u16|u32|u64|i8|i16|i32|i64|uint|int|f32|f64'
+ r'|str|bool)\b', Keyword.Type),
+ (r'self\b', Name.Builtin.Pseudo),
+ # Prelude
+ (r'(Freeze|Pod|Send|Sized|Add|Sub|Mul|Div|Rem|Neg|Not|BitAnd'
+ r'|BitOr|BitXor|Drop|Shl|Shr|Index|Option|Some|None|Result'
+ r'|Ok|Err|from_str|range|print|println|Any|AnyOwnExt|AnyRefExt'
+ r'|AnyMutRefExt|Ascii|AsciiCast|OnwedAsciiCast|AsciiStr'
+ r'|IntoBytes|Bool|ToCStr|Char|Clone|DeepClone|Eq|ApproxEq'
+ r'|Ord|TotalEq|Ordering|Less|Equal|Greater|Equiv|Container'
+ r'|Mutable|Map|MutableMap|Set|MutableSet|Default|FromStr'
+ r'|Hash|FromIterator|Extendable|Iterator|DoubleEndedIterator'
+ r'|RandomAccessIterator|CloneableIterator|OrdIterator'
+ r'|MutableDoubleEndedIterator|ExactSize|Times|Algebraic'
+ r'|Trigonometric|Exponential|Hyperbolic|Bitwise|BitCount'
+ r'|Bounded|Integer|Fractional|Real|RealExt|Num|NumCast'
+ r'|CheckedAdd|CheckedSub|CheckedMul|Orderable|Signed'
+ r'|Unsigned|Round|Primitive|Int|Float|ToStrRadix'
+ r'|ToPrimitive|FromPrimitive|GenericPath|Path|PosixPath'
+ r'|WindowsPath|RawPtr|Buffer|Writer|Reader|Seek'
+ r'|SendStr|SendStrOwned|SendStrStatic|IntoSendStr|Str'
+ r'|StrVector|StrSlice|OwnedStr|IterBytes|ToStr|IntoStr'
+ r'|CopyableTuple|ImmutableTuple|ImmutableTuple\d+'
+ r'|Tuple\d+|ImmutableEqVector|ImmutableTotalOrdVector'
+ r'|ImmutableCopyableVector|OwnedVector|OwnedCopyableVector'
+ r'|OwnedEqVector|MutableVector|MutableTotalOrdVector'
+ r'|Vector|VectorVector|CopyableVector|ImmutableVector'
+ r'|Port|Chan|SharedChan|spawn|drop)\b', Name.Builtin),
+ # Borrowed pointer
+ (r'(&)(\'[A-Za-z_]\w*)?', bygroups(Operator, Name)),
+ # Labels
+ (r'\'[A-Za-z_]\w*:', Name.Label),
# Character Literal
(r"""'(\\['"\\nrt]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}"""
r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|.)'""",
@@ -3275,9 +3322,9 @@ class RustLexer(RegexLexer):
# Lifetime
(r"""'[a-zA-Z_][a-zA-Z0-9_]*""", Name.Label),
# Binary Literal
- (r'0[Bb][01_]+', Number, 'number_lit'),
+ (r'0b[01_]+', Number, 'number_lit'),
# Octal Literal
- (r'0[0-7_]+', Number.Oct, 'number_lit'),
+ (r'0o[0-7_]+', Number.Oct, 'number_lit'),
# Hexadecimal Literal
(r'0[xX][0-9a-fA-F_]+', Number.Hex, 'number_lit'),
# Decimal Literal
@@ -3285,20 +3332,22 @@ class RustLexer(RegexLexer):
r'[0-9_]+|\.[0-9_]*|[eE][+\-]?[0-9_]+)?', Number, 'number_lit'),
# String Literal
(r'"', String, 'string'),
+ (r'r(#*)".*?"\1', String.Raw),
# Operators and Punctuation
(r'[{}()\[\],.;]', Punctuation),
(r'[+\-*/%&|<>^!~@=:?]', Operator),
# Identifier
- (r'[a-zA-Z_$][a-zA-Z0-9_]*', Name),
+ (r'[a-zA-Z_]\w*', Name),
# Attributes
(r'#\[', Comment.Preproc, 'attribute['),
- (r'#\(', Comment.Preproc, 'attribute('),
# Macros
- (r'[A-Za-z_][A-Za-z0-9_]*!\[', Comment.Preproc, 'attribute['),
- (r'[A-Za-z_][A-Za-z0-9_]*!\(', Comment.Preproc, 'attribute('),
+ (r'([A-Za-z_]\w*)!\s*([A-Za-z_]\w*)?\s*\{',
+ bygroups(Comment.Preproc, Name), 'macro{'),
+ (r'([A-Za-z_]\w*)!\s*([A-Za-z_]\w*)?\(',
+ bygroups(Comment.Preproc, Name), 'macro('),
],
'number_lit': [
(r'(([ui](8|16|32|64)?)|(f(32|64)?))?', Keyword, '#pop'),
@@ -3310,6 +3359,14 @@ class RustLexer(RegexLexer):
(r'[^\\"]+', String),
(r'\\', String),
],
+ 'macro{': [
+ (r'\{', Operator, '#push'),
+ (r'\}', Operator, '#pop'),
+ ],
+ 'macro(': [
+ (r'\(', Operator, '#push'),
+ (r'\)', Operator, '#pop'),
+ ],
'attribute_common': [
(r'"', String, 'string'),
(r'\[', Comment.Preproc, 'attribute['),
@@ -3333,7 +3390,7 @@ class CudaLexer(CLexer):
For NVIDIA `CUDA™ <http://developer.nvidia.com/category/zone/cuda-zone>`_
source.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'CUDA'
filenames = ['*.cu', '*.cuh']
@@ -3383,7 +3440,7 @@ class MonkeyLexer(RegexLexer):
`Monkey <https://en.wikipedia.org/wiki/Monkey_(programming_language)>`_
source code.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'Monkey'
@@ -3511,7 +3568,7 @@ class CobolLexer(RegexLexer):
"""
Lexer for OpenCOBOL code.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'COBOL'
aliases = ['cobol']
@@ -3705,7 +3762,7 @@ class CobolFreeformatLexer(CobolLexer):
"""
Lexer for Free format OpenCOBOL code.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'COBOLFree'
aliases = ['cobolfree']
@@ -3724,7 +3781,7 @@ class LogosLexer(ObjectiveCppLexer):
"""
For Logos + Objective-C source code with preprocessor directives.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'Logos'
@@ -3788,7 +3845,7 @@ class ChapelLexer(RegexLexer):
"""
For `Chapel <http://chapel.cray.com/>`_ source.
- *New in Pygments 1.7.*
+ .. versionadded:: 2.0
"""
name = 'Chapel'
filenames = ['*.chpl']
@@ -3865,7 +3922,7 @@ class EiffelLexer(RegexLexer):
"""
For `Eiffel <http://www.eiffel.com>`_ source code.
- *New in Pygments 1.7.*
+ .. versionadded:: 2.0
"""
name = 'Eiffel'
aliases = ['eiffel']
@@ -3904,3 +3961,1138 @@ class EiffelLexer(RegexLexer):
(r'[0-9]+', Number.Integer),
],
}
+
+
+class Inform6Lexer(RegexLexer):
+ """
+ For `Inform 6 <http://inform-fiction.org/>`_ source code.
+
+ .. versionadded:: 2.0
+ """
+
+ name = 'Inform 6'
+ aliases = ['inform6', 'i6']
+ filenames = ['*.inf']
+
+ flags = re.MULTILINE | re.DOTALL | re.UNICODE
+
+ _name = r'[a-zA-Z_][a-zA-Z_0-9]*'
+
+ # Inform 7 maps these four character classes to their ASCII
+ # equivalents. To support Inform 6 inclusions within Inform 7,
+ # Inform6Lexer maps them too.
+ _dash = u'\\-\u2010-\u2014'
+ _dquote = u'"\u201c\u201d'
+ _squote = u"'\u2018\u2019"
+ _newline = u'\\n\u0085\u2028\u2029'
+
+ tokens = {
+ 'root': [
+ (r'(\A(!%%[^%s]*[%s])+)?' % (_newline, _newline), Comment.Preproc,
+ 'directive')
+ ],
+ '_whitespace': [
+ (r'\s+', Text),
+ (r'![^%s]*' % _newline, Comment.Single)
+ ],
+ 'default': [
+ include('_whitespace'),
+ (r'\[', Punctuation, 'many-values'), # Array initialization
+ (r':|(?=;)', Punctuation, '#pop'),
+ (r'<', Punctuation), # Second angle bracket in an action statement
+ (r'', Text, ('expression', '_expression'))
+ ],
+
+ # Expressions
+ '_expression': [
+ include('_whitespace'),
+ (r'(?=sp\b)', Text, '#pop'),
+ (r'(?=[%s%s$0-9#a-zA-Z_])' % (_dquote, _squote), Text,
+ ('#pop', 'value')),
+ (r'\+\+|[%s]{1,2}(?!>)|~~?' % _dash, Operator),
+ (r'(?=[()\[%s,?@{:;])' % _dash, Text, '#pop')
+ ],
+ 'expression': [
+ include('_whitespace'),
+ (r'\(', Punctuation, ('expression', '_expression')),
+ (r'\)', Punctuation, '#pop'),
+ (r'\[', Punctuation, ('#pop', 'statements', 'locals')),
+ (r'>(?=(\s+|(![^%s]*))*[>;])' % _newline, Punctuation),
+ (r'\+\+|[%s]{2}(?!>)' % _dash, Operator),
+ (r',', Punctuation, '_expression'),
+ (r'&&?|\|\|?|[=~><]?=|[%s]{1,2}>?|\.\.?[&#]?|::|[<>+*/%%]' % _dash,
+ Operator, '_expression'),
+ (r'(has|hasnt|in|notin|ofclass|or|provides)\b', Operator.Word,
+ '_expression'),
+ (r'sp\b', Name),
+ (r'\?~?', Name.Label, 'label?'),
+ (r'[@{]', Error),
+ (r'', Text, '#pop')
+ ],
+ '_assembly-expression': [
+ (r'\(', Punctuation, ('#push', '_expression')),
+ (r'[\[\]]', Punctuation),
+ (r'[%s]>' % _dash, Punctuation, '_expression'),
+ (r'sp\b', Keyword.Pseudo),
+ (r';', Punctuation, '#pop:3'),
+ include('expression')
+ ],
+ '_for-expression': [
+ (r'\)', Punctuation, '#pop:2'),
+ (r':', Punctuation, '#pop'),
+ include('expression')
+ ],
+ '_keyword-expression': [
+ (r'(from|near|to)\b', Keyword, '_expression'),
+ include('expression')
+ ],
+ '_list-expression': [
+ (r',', Punctuation, '#pop'),
+ include('expression')
+ ],
+ '_object-expression': [
+ (r'has\b', Keyword.Declaration, '#pop'),
+ include('_list-expression')
+ ],
+
+ # Values
+ 'value': [
+ include('_whitespace'),
+ # Strings
+ (r'[%s][^@][%s]' % (_squote, _squote), String.Char, '#pop'),
+ (r'([%s])(@{[0-9a-fA-F]{1,4}})([%s])' % (_squote, _squote),
+ bygroups(String.Char, String.Escape, String.Char), '#pop'),
+ (r'([%s])(@..)([%s])' % (_squote, _squote),
+ bygroups(String.Char, String.Escape, String.Char), '#pop'),
+ (r'[%s]' % _squote, String.Single, ('#pop', 'dictionary-word')),
+ (r'[%s]' % _dquote, String.Double, ('#pop', 'string')),
+ # Numbers
+ (r'\$[+%s][0-9]*\.?[0-9]*([eE][+%s]?[0-9]+)?' % (_dash, _dash),
+ Number.Float, '#pop'),
+ (r'\$[0-9a-fA-F]+', Number.Hex, '#pop'),
+ (r'\$\$[01]+', Number, '#pop'), # Binary
+ (r'[0-9]+', Number.Integer, '#pop'),
+ # Values prefixed by hashes
+ (r'(##|#a\$)(%s)' % _name, bygroups(Operator, Name), '#pop'),
+ (r'(#g\$)(%s)' % _name,
+ bygroups(Operator, Name.Variable.Global), '#pop'),
+ (r'#[nw]\$', Operator, ('#pop', 'obsolete-dictionary-word')),
+ (r'(#r\$)(%s)' % _name, bygroups(Operator, Name.Function), '#pop'),
+ (r'#', Name.Builtin, ('#pop', 'system-constant')),
+ # System functions
+ (r'(child|children|elder|eldest|glk|indirect|metaclass|parent|'
+ r'random|sibling|younger|youngest)\b', Name.Builtin, '#pop'),
+ # Metaclasses
+ (r'(?i)(Class|Object|Routine|String)\b', Name.Builtin, '#pop'),
+ # Veneer routines
+ (r'(?i)(Box__Routine|CA__Pr|CDefArt|CInDefArt|Cl__Ms|'
+ r'Copy__Primitive|CP__Tab|DA__Pr|DB__Pr|DefArt|Dynam__String|'
+ r'EnglishNumber|Glk__Wrap|IA__Pr|IB__Pr|InDefArt|Main__|'
+ r'Meta__class|OB__Move|OB__Remove|OC__Cl|OP__Pr|Print__Addr|'
+ r'Print__PName|PrintShortName|RA__Pr|RA__Sc|RL__Pr|R_Process|'
+ r'RT__ChG|RT__ChGt|RT__ChLDB|RT__ChLDW|RT__ChPR|RT__ChPrintA|'
+ r'RT__ChPrintC|RT__ChPrintO|RT__ChPrintS|RT__ChPS|RT__ChR|'
+ r'RT__ChSTB|RT__ChSTW|RT__ChT|RT__Err|RT__TrPS|RV__Pr|'
+ r'Symb__Tab|Unsigned__Compare|WV__Pr|Z__Region)\b', Name.Builtin,
+ '#pop'),
+ # Other built-in symbols
+ (r'(?i)(call|copy|create|DEBUG|destroy|DICT_CHAR_SIZE|'
+ r'DICT_ENTRY_BYTES|DICT_IS_UNICODE|DICT_WORD_SIZE|false|'
+ r'FLOAT_INFINITY|FLOAT_NAN|FLOAT_NINFINITY|GOBJFIELD_CHAIN|'
+ r'GOBJFIELD_CHILD|GOBJFIELD_NAME|GOBJFIELD_PARENT|'
+ r'GOBJFIELD_PROPTAB|GOBJFIELD_SIBLING|GOBJ_EXT_START|'
+ r'GOBJ_TOTAL_LENGTH|Grammar__Version|INDIV_PROP_START|INFIX|'
+ r'infix__watching|MODULE_MODE|name|nothing|NUM_ATTR_BYTES|print|'
+ r'print_to_array|recreate|remaining|self|sender|STRICT_MODE|'
+ r'sw__var|sys__glob0|sys__glob1|sys__glob2|sys_statusline_flag|'
+ r'TARGET_GLULX|TARGET_ZCODE|temp__global2|temp__global3|'
+ r'temp__global4|temp_global|true|USE_MODULES|WORDSIZE)\b',
+ Name.Builtin, '#pop'),
+ # Other values
+ (_name, Name, '#pop')
+ ],
+ # Strings
+ 'dictionary-word': [
+ (r'[~^]+', String.Escape),
+ (r'[^~^\\@({%s]+' % _squote, String.Single),
+ (r'[({]', String.Single),
+ (r'@{[0-9a-fA-F]{,4}}', String.Escape),
+ (r'@..', String.Escape),
+ (r'[%s]' % _squote, String.Single, '#pop')
+ ],
+ 'string': [
+ (r'[~^]+', String.Escape),
+ (r'[^~^\\@({%s]+' % _dquote, String.Double),
+ (r'[({]', String.Double),
+ (r'\\', String.Escape),
+ (r'@(\\\s*[%s]\s*)*@((\\\s*[%s]\s*)*[0-9])*' %
+ (_newline, _newline), String.Escape),
+ (r'@(\\\s*[%s]\s*)*{((\\\s*[%s]\s*)*[0-9a-fA-F]){,4}'
+ r'(\\\s*[%s]\s*)*}' % (_newline, _newline, _newline),
+ String.Escape),
+ (r'@(\\\s*[%s]\s*)*.(\\\s*[%s]\s*)*.' % (_newline, _newline),
+ String.Escape),
+ (r'[%s]' % _dquote, String.Double, '#pop')
+ ],
+ 'plain-string': [
+ (r'[^~^\\({\[\]%s]+' % _dquote, String.Double),
+ (r'[~^({\[\]]', String.Double),
+ (r'\\', String.Escape),
+ (r'[%s]' % _dquote, String.Double, '#pop')
+ ],
+ # Names
+ '_constant': [
+ include('_whitespace'),
+ (_name, Name.Constant, '#pop'),
+ include('value')
+ ],
+ '_global': [
+ include('_whitespace'),
+ (_name, Name.Variable.Global, '#pop'),
+ include('value')
+ ],
+ 'label?': [
+ include('_whitespace'),
+ (r'(%s)?' % _name, Name.Label, '#pop')
+ ],
+ 'variable?': [
+ include('_whitespace'),
+ (r'(%s)?' % _name, Name.Variable, '#pop')
+ ],
+ # Values after hashes
+ 'obsolete-dictionary-word': [
+ (r'\S[a-zA-Z_0-9]*', String.Other, '#pop')
+ ],
+ 'system-constant': [
+ include('_whitespace'),
+ (_name, Name.Builtin, '#pop')
+ ],
+
+ # Directives
+ 'directive': [
+ include('_whitespace'),
+ (r'#', Punctuation),
+ (r';', Punctuation, '#pop'),
+ (r'\[', Punctuation,
+ ('default', 'statements', 'locals', 'routine-name?')),
+ (r'(?i)(abbreviate|endif|dictionary|ifdef|iffalse|ifndef|ifnot|'
+ r'iftrue|ifv3|ifv5|release|serial|switches|system_file|version)'
+ r'\b', Keyword, 'default'),
+ (r'(?i)(array|global)\b', Keyword,
+ ('default', 'directive-keyword?', '_global')),
+ (r'(?i)attribute\b', Keyword, ('default', 'alias?', '_constant')),
+ (r'(?i)class\b', Keyword,
+ ('object-body', 'duplicates', 'class-name')),
+ (r'(?i)(constant|default)\b', Keyword,
+ ('default', 'expression', '_constant')),
+ (r'(?i)(end\b)(.*)', bygroups(Keyword, Text)),
+ (r'(?i)(extend|verb)\b', Keyword, 'grammar'),
+ (r'(?i)fake_action\b', Keyword, ('default', '_constant')),
+ (r'(?i)import\b', Keyword, 'manifest'),
+ (r'(?i)(include|link)\b', Keyword,
+ ('default', 'before-plain-string')),
+ (r'(?i)(lowstring|undef)\b', Keyword, ('default', '_constant')),
+ (r'(?i)message\b', Keyword, ('default', 'diagnostic')),
+ (r'(?i)(nearby|object)\b', Keyword,
+ ('object-body', '_object-head')),
+ (r'(?i)property\b', Keyword,
+ ('default', 'alias?', '_constant', 'property-keyword*')),
+ (r'(?i)replace\b', Keyword,
+ ('default', 'routine-name?', 'routine-name?')),
+ (r'(?i)statusline\b', Keyword, ('default', 'directive-keyword?')),
+ (r'(?i)stub\b', Keyword, ('default', 'routine-name?')),
+ (r'(?i)trace\b', Keyword,
+ ('default', 'trace-keyword?', 'trace-keyword?')),
+ (r'(?i)zcharacter\b', Keyword,
+ ('default', 'directive-keyword?', 'directive-keyword?')),
+ (_name, Name.Class, ('object-body', '_object-head'))
+ ],
+ # [, Replace, Stub
+ 'routine-name?': [
+ include('_whitespace'),
+ (r'(%s)?' % _name, Name.Function, '#pop')
+ ],
+ 'locals': [
+ include('_whitespace'),
+ (r';', Punctuation, '#pop'),
+ (r'\*', Punctuation),
+ (_name, Name.Variable)
+ ],
+ # Array
+ 'many-values': [
+ include('_whitespace'),
+ (r';', Punctuation),
+ (r'\]', Punctuation, '#pop'),
+ (r':', Error),
+ (r'', Text, ('expression', '_expression'))
+ ],
+ # Attribute, Property
+ 'alias?': [
+ include('_whitespace'),
+ (r'alias\b', Keyword, ('#pop', '_constant')),
+ (r'', Text, '#pop')
+ ],
+ # Class, Object, Nearby
+ 'class-name': [
+ include('_whitespace'),
+ (r'(?=[,;]|(class|has|private|with)\b)', Text, '#pop'),
+ (_name, Name.Class, '#pop')
+ ],
+ 'duplicates': [
+ include('_whitespace'),
+ (r'\(', Punctuation, ('#pop', 'expression', '_expression')),
+ (r'', Text, '#pop')
+ ],
+ '_object-head': [
+ (r'[%s]>' % _dash, Punctuation),
+ (r'(class|has|private|with)\b', Keyword.Declaration, '#pop'),
+ include('_global')
+ ],
+ 'object-body': [
+ include('_whitespace'),
+ (r';', Punctuation, '#pop:2'),
+ (r',', Punctuation),
+ (r'class\b', Keyword.Declaration, 'class-segment'),
+ (r'(has|private|with)\b', Keyword.Declaration),
+ (r':', Error),
+ (r'', Text, ('_object-expression', '_expression'))
+ ],
+ 'class-segment': [
+ include('_whitespace'),
+ (r'(?=[,;]|(class|has|private|with)\b)', Text, '#pop'),
+ (_name, Name.Class),
+ (r'', Text, 'value')
+ ],
+ # Extend, Verb
+ 'grammar': [
+ include('_whitespace'),
+ (r'=', Punctuation, ('#pop', 'default')),
+ (r'\*', Punctuation, ('#pop', 'grammar-line')),
+ (r'', Text, '_directive-keyword')
+ ],
+ 'grammar-line': [
+ include('_whitespace'),
+ (r';', Punctuation, '#pop'),
+ (r'[/*]', Punctuation),
+ (r'[%s]>' % _dash, Punctuation, 'value'),
+ (r'(noun|scope)\b', Keyword, '=routine'),
+ (r'', Text, '_directive-keyword')
+ ],
+ '=routine': [
+ include('_whitespace'),
+ (r'=', Punctuation, 'routine-name?'),
+ (r'', Text, '#pop')
+ ],
+ # Import
+ 'manifest': [
+ include('_whitespace'),
+ (r';', Punctuation, '#pop'),
+ (r',', Punctuation),
+ (r'(?i)(global\b)?', Keyword, '_global')
+ ],
+ # Include, Link, Message
+ 'diagnostic': [
+ include('_whitespace'),
+ (r'[%s]' % _dquote, String.Double, ('#pop', 'message-string')),
+ (r'', Text, ('#pop', 'before-plain-string', 'directive-keyword?'))
+ ],
+ 'before-plain-string': [
+ include('_whitespace'),
+ (r'[%s]' % _dquote, String.Double, ('#pop', 'plain-string'))
+ ],
+ 'message-string': [
+ (r'[~^]+', String.Escape),
+ include('plain-string')
+ ],
+
+ # Keywords used in directives
+ '_directive-keyword!': [
+ include('_whitespace'),
+ (r'(additive|alias|buffer|class|creature|data|error|fatalerror|'
+ r'first|has|held|initial|initstr|last|long|meta|multi|'
+ r'multiexcept|multiheld|multiinside|noun|number|only|private|'
+ r'replace|reverse|scope|score|special|string|table|terminating|'
+ r'time|topic|warning|with)\b', Keyword, '#pop'),
+ (r'[%s]{1,2}>|[+=]' % _dash, Punctuation, '#pop')
+ ],
+ '_directive-keyword': [
+ include('_directive-keyword!'),
+ include('value')
+ ],
+ 'directive-keyword?': [
+ include('_directive-keyword!'),
+ (r'', Text, '#pop')
+ ],
+ 'property-keyword*': [
+ include('_whitespace'),
+ (r'(additive|long)\b', Keyword),
+ (r'', Text, '#pop')
+ ],
+ 'trace-keyword?': [
+ include('_whitespace'),
+ (r'(assembly|dictionary|expressions|lines|linker|objects|off|on|'
+ r'symbols|tokens|verbs)\b', Keyword, '#pop'),
+ (r'', Text, '#pop')
+ ],
+
+ # Statements
+ 'statements': [
+ include('_whitespace'),
+ (r'\]', Punctuation, '#pop'),
+ (r'[;{}]', Punctuation),
+ (r'(box|break|continue|default|give|inversion|new_line|quit|read|'
+ r'remove|return|rfalse|rtrue|spaces|string|until)\b', Keyword,
+ 'default'),
+ (r'(do|else)\b', Keyword),
+ (r'(font|style)\b', Keyword,
+ ('default', 'miscellaneous-keyword?')),
+ (r'for\b', Keyword, ('for', '(?')),
+ (r'(if|switch|while)', Keyword,
+ ('expression', '_expression', '(?')),
+ (r'(jump|save|restore)\b', Keyword, ('default', 'label?')),
+ (r'objectloop\b', Keyword,
+ ('_keyword-expression', 'variable?', '(?')),
+ (r'print(_ret)?\b|(?=[%s])' % _dquote, Keyword, 'print-list'),
+ (r'\.', Name.Label, 'label?'),
+ (r'@', Keyword, 'opcode'),
+ (r'#(?![agrnw]\$|#)', Punctuation, 'directive'),
+ (r'<', Punctuation, 'default'),
+ (r'(move\b)?', Keyword,
+ ('default', '_keyword-expression', '_expression'))
+ ],
+ 'miscellaneous-keyword?': [
+ include('_whitespace'),
+ (r'(bold|fixed|from|near|off|on|reverse|roman|to|underline)\b',
+ Keyword, '#pop'),
+ (r'(a|A|an|address|char|name|number|object|property|string|the|'
+ r'The)\b(?=(\s+|(![^%s]*))*\))' % _newline, Keyword.Pseudo,
+ '#pop'),
+ (r'%s(?=(\s+|(![^%s]*))*\))' % (_name, _newline), Name.Function,
+ '#pop'),
+ (r'', Text, '#pop')
+ ],
+ '(?': [
+ include('_whitespace'),
+ (r'\(?', Punctuation, '#pop')
+ ],
+ 'for': [
+ include('_whitespace'),
+ (r';?', Punctuation, ('_for-expression', '_expression'))
+ ],
+ 'print-list': [
+ include('_whitespace'),
+ (r';', Punctuation, '#pop'),
+ (r':', Error),
+ (r'', Text,
+ ('_list-expression', '_expression', '_list-expression', 'form'))
+ ],
+ 'form': [
+ include('_whitespace'),
+ (r'\(', Punctuation, ('#pop', 'miscellaneous-keyword?')),
+ (r'', Text, '#pop')
+ ],
+
+ # Assembly
+ 'opcode': [
+ include('_whitespace'),
+ (r'[%s]' % _dquote, String.Double, ('operands', 'plain-string')),
+ (_name, Keyword, 'operands')
+ ],
+ 'operands': [
+ (r':', Error),
+ (r'', Text, ('_assembly-expression', '_expression'))
+ ]
+ }
+
+ def get_tokens_unprocessed(self, text):
+ # 'in' is either a keyword or an operator.
+ # If the token two tokens after 'in' is ')', 'in' is a keyword:
+ # objectloop(a in b)
+ # Otherwise, it is an operator:
+ # objectloop(a in b && true)
+ objectloop_queue = []
+ objectloop_token_count = -1
+ previous_token = None
+ for index, token, value in RegexLexer.get_tokens_unprocessed(self,
+ text):
+ if previous_token is Name.Variable and value == 'in':
+ objectloop_queue = [[index, token, value]]
+ objectloop_token_count = 2
+ elif objectloop_token_count > 0:
+ if token not in Comment and token not in Text:
+ objectloop_token_count -= 1
+ objectloop_queue.append((index, token, value))
+ else:
+ if objectloop_token_count == 0:
+ if objectloop_queue[-1][2] == ')':
+ objectloop_queue[0][1] = Keyword
+ while objectloop_queue:
+ yield objectloop_queue.pop(0)
+ objectloop_token_count = -1
+ yield index, token, value
+ if token not in Comment and token not in Text:
+ previous_token = token
+ while objectloop_queue:
+ yield objectloop_queue.pop(0)
+
+
+class Inform7Lexer(RegexLexer):
+ """
+ For `Inform 7 <http://inform7.com/>`_ source code.
+
+ .. versionadded:: 2.0
+ """
+
+ name = 'Inform 7'
+ aliases = ['inform7', 'i7']
+ filenames = ['*.ni', '*.i7x']
+
+ flags = re.MULTILINE | re.DOTALL | re.UNICODE
+
+ _dash = Inform6Lexer._dash
+ _dquote = Inform6Lexer._dquote
+ _newline = Inform6Lexer._newline
+ _start = r'\A|(?<=[%s])' % _newline
+
+ # There are three variants of Inform 7, differing in how to
+ # interpret at signs and braces in I6T. In top-level inclusions, at
+ # signs in the first column are inweb syntax. In phrase definitions
+ # and use options, tokens in braces are treated as I7. Use options
+ # also interpret "{N}".
+ tokens = {}
+ token_variants = ['+i6t-not-inline', '+i6t-inline', '+i6t-use-option']
+
+ for level in token_variants:
+ tokens[level] = {
+ '+i6-root': list(Inform6Lexer.tokens['root']),
+ '+i6t-root': [ # For Inform6TemplateLexer
+ (r'[^%s]*' % Inform6Lexer._newline, Comment.Preproc,
+ ('directive', '+p'))
+ ],
+ 'root': [
+ (r'(\|?\s)+', Text),
+ (r'\[', Comment.Multiline, '+comment'),
+ (r'[%s]' % _dquote, Generic.Heading,
+ ('+main', '+titling', '+titling-string')),
+ (r'', Text, ('+main', '+heading?'))
+ ],
+ '+titling-string': [
+ (r'[^%s]+' % _dquote, Generic.Heading),
+ (r'[%s]' % _dquote, Generic.Heading, '#pop')
+ ],
+ '+titling': [
+ (r'\[', Comment.Multiline, '+comment'),
+ (r'[^%s.;:|%s]+' % (_dquote, _newline), Generic.Heading),
+ (r'[%s]' % _dquote, Generic.Heading, '+titling-string'),
+ (r'[%s]{2}|(?<=[\s%s])\|[\s%s]' % (_newline, _dquote, _dquote),
+ Text, ('#pop', '+heading?')),
+ (r'[.;:]|(?<=[\s%s])\|' % _dquote, Text, '#pop'),
+ (r'[|%s]' % _newline, Generic.Heading)
+ ],
+ '+main': [
+ (r'(?i)[^%s:a\[(|%s]+' % (_dquote, _newline), Text),
+ (r'[%s]' % _dquote, String.Double, '+text'),
+ (r':', Text, '+phrase-definition'),
+ (r'(?i)\bas\b', Text, '+use-option'),
+ (r'\[', Comment.Multiline, '+comment'),
+ (r'(\([%s])(.*?)([%s]\))' % (_dash, _dash),
+ bygroups(Punctuation,
+ using(this, state=('+i6-root', 'directive'),
+ i6t='+i6t-not-inline'), Punctuation)),
+ (r'(%s|(?<=[\s;:.%s]))\|\s|[%s]{2,}' %
+ (_start, _dquote, _newline), Text, '+heading?'),
+ (r'(?i)[a(|%s]' % _newline, Text)
+ ],
+ '+phrase-definition': [
+ (r'\s+', Text),
+ (r'\[', Comment.Multiline, '+comment'),
+ (r'(\([%s])(.*?)([%s]\))' % (_dash, _dash),
+ bygroups(Punctuation,
+ using(this, state=('+i6-root', 'directive',
+ 'default', 'statements'),
+ i6t='+i6t-inline'), Punctuation), '#pop'),
+ (r'', Text, '#pop')
+ ],
+ '+use-option': [
+ (r'\s+', Text),
+ (r'\[', Comment.Multiline, '+comment'),
+ (r'(\([%s])(.*?)([%s]\))' % (_dash, _dash),
+ bygroups(Punctuation,
+ using(this, state=('+i6-root', 'directive'),
+ i6t='+i6t-use-option'), Punctuation), '#pop'),
+ (r'', Text, '#pop')
+ ],
+ '+comment': [
+ (r'[^\[\]]+', Comment.Multiline),
+ (r'\[', Comment.Multiline, '#push'),
+ (r'\]', Comment.Multiline, '#pop')
+ ],
+ '+text': [
+ (r'[^\[%s]+' % _dquote, String.Double),
+ (r'\[.*?\]', String.Interpol),
+ (r'[%s]' % _dquote, String.Double, '#pop')
+ ],
+ '+heading?': [
+ (r'(\|?\s)+', Text),
+ (r'\[', Comment.Multiline, '+comment'),
+ (r'[%s]{4}\s+' % _dash, Text, '+documentation-heading'),
+ (r'[%s]{1,3}' % _dash, Text),
+ (r'(?i)(volume|book|part|chapter|section)\b[^%s]*' % _newline,
+ Generic.Heading, '#pop'),
+ (r'', Text, '#pop')
+ ],
+ '+documentation-heading': [
+ (r'\s+', Text),
+ (r'\[', Comment.Multiline, '+comment'),
+ (r'(?i)documentation\s+', Text, '+documentation-heading2'),
+ (r'', Text, '#pop')
+ ],
+ '+documentation-heading2': [
+ (r'\s+', Text),
+ (r'\[', Comment.Multiline, '+comment'),
+ (r'[%s]{4}\s' % _dash, Text, '+documentation'),
+ (r'', Text, '#pop:2')
+ ],
+ '+documentation': [
+ (r'(?i)(%s)\s*(chapter|example)\s*:[^%s]*' %
+ (_start, _newline), Generic.Heading),
+ (r'(?i)(%s)\s*section\s*:[^%s]*' % (_start, _newline),
+ Generic.Subheading),
+ (r'((%s)\t.*?[%s])+' % (_start, _newline),
+ using(this, state='+main')),
+ (r'[^%s\[]+|[%s\[]' % (_newline, _newline), Text),
+ (r'\[', Comment.Multiline, '+comment'),
+ ],
+ '+i6t-not-inline': [
+ (r'(%s)@c( .*?)?([%s]|\Z)' % (_start, _newline),
+ Comment.Preproc),
+ (r'(%s)@([%s]+|Purpose:)[^%s]*' % (_start, _dash, _newline),
+ Comment.Preproc),
+ (r'(%s)@p( .*?)?([%s]|\Z)' % (_start, _newline),
+ Generic.Heading, '+p')
+ ],
+ '+i6t-use-option': [
+ include('+i6t-not-inline'),
+ (r'({)(N)(})', bygroups(Punctuation, Text, Punctuation))
+ ],
+ '+i6t-inline': [
+ (r'({)(\S[^}]*)?(})',
+ bygroups(Punctuation, using(this, state='+main'),
+ Punctuation))
+ ],
+ '+i6t': [
+ (r'({[%s])(![^}]*)(}?)' % _dash,
+ bygroups(Punctuation, Comment.Single, Punctuation)),
+ (r'({[%s])(lines)(:)([^}]*)(}?)' % _dash,
+ bygroups(Punctuation, Keyword, Punctuation, Text,
+ Punctuation), '+lines'),
+ (r'({[%s])([^:}]*)(:?)([^}]*)(}?)' % _dash,
+ bygroups(Punctuation, Keyword, Punctuation, Text,
+ Punctuation)),
+ (r'(\(\+)(.*?)(\+\)|\Z)',
+ bygroups(Punctuation, using(this, state='+main'),
+ Punctuation))
+ ],
+ '+p': [
+ (r'[^@]+', Comment.Preproc),
+ (r'(%s)@c( .*?)?([%s]|\Z)' % (_start, _newline),
+ Comment.Preproc, '#pop'),
+ (r'(%s)@([%s]|Purpose:)' % (_start, _dash), Comment.Preproc),
+ (r'(%s)@p( .*?)?([%s]|\Z)' % (_start, _newline),
+ Generic.Heading),
+ (r'@', Comment.Preproc)
+ ],
+ '+lines': [
+ (r'(%s)@c( .*?)?([%s]|\Z)' % (_start, _newline),
+ Comment.Preproc),
+ (r'(%s)@([%s]|Purpose:)[^%s]*' % (_start, _dash, _newline),
+ Comment.Preproc),
+ (r'(%s)@p( .*?)?([%s]|\Z)' % (_start, _newline),
+ Generic.Heading, '+p'),
+ (r'(%s)@[a-zA-Z_0-9]*[ %s]' % (_start, _newline), Keyword),
+ (r'![^%s]*' % _newline, Comment.Single),
+ (r'({)([%s]endlines)(})' % _dash,
+ bygroups(Punctuation, Keyword, Punctuation), '#pop'),
+ (r'[^@!{]+?([%s]|\Z)|.' % _newline, Text)
+ ]
+ }
+ # Inform 7 can include snippets of Inform 6 template language,
+ # so all of Inform6Lexer's states are copied here, with
+ # modifications to account for template syntax. Inform7Lexer's
+ # own states begin with '+' to avoid name conflicts. Some of
+ # Inform6Lexer's states begin with '_': these are not modified.
+ # They deal with template syntax either by including modified
+ # states, or by matching r'' then pushing to modified states.
+ for token in Inform6Lexer.tokens:
+ if token == 'root':
+ continue
+ tokens[level][token] = list(Inform6Lexer.tokens[token])
+ if not token.startswith('_'):
+ tokens[level][token][:0] = [include('+i6t'), include(level)]
+
+ def __init__(self, **options):
+ level = options.get('i6t', '+i6t-not-inline')
+ if level not in self._all_tokens:
+ self._tokens = self.__class__.process_tokendef(level)
+ else:
+ self._tokens = self._all_tokens[level]
+ RegexLexer.__init__(self, **options)
+
+
+class Inform6TemplateLexer(Inform7Lexer):
+ """
+ For `Inform 6 template
+ <http://inform7.com/sources/src/i6template/Woven/index.html>`_ code.
+
+ .. versionadded:: 2.0
+ """
+
+ name = 'Inform 6 template'
+ aliases = ['i6t']
+ filenames = ['*.i6t']
+
+ def get_tokens_unprocessed(self, text, stack=('+i6t-root',)):
+ return Inform7Lexer.get_tokens_unprocessed(self, text, stack)
+
+
+class MqlLexer(CppLexer):
+ """
+ For `MQL4 <http://docs.mql4.com/>`_ and
+ `MQL5 <http://www.mql5.com/en/docs>`_ source code.
+
+ .. versionadded:: 2.0
+ """
+ name = 'MQL'
+ aliases = ['mql', 'mq4', 'mq5', 'mql4', 'mql5']
+ filenames = ['*.mq4', '*.mq5', '*.mqh']
+ mimetypes = ['text/x-mql']
+
+ tokens = {
+ 'statements': [
+ (r'(input|_Digits|_Point|_LastError|_Period|_RandomSeed|'
+ r'_StopFlag|_Symbol|_UninitReason|'
+ r'Ask|Bars|Bid|Close|Digits|High|Low|Open|Point|Time|Volume)\b',
+ Keyword),
+ (r'(void|char|uchar|bool|short|ushort|int|uint|color|long|ulong|datetime|'
+ r'float|double|string)\b',
+ Keyword.Type),
+ (r'(Alert|CheckPointer|Comment|DebugBreak|ExpertRemove|'
+ r'GetPointer|GetTickCount|MessageBox|PeriodSeconds|PlaySound|'
+ r'Print|PrintFormat|ResetLastError|ResourceCreate|ResourceFree|'
+ r'ResourceReadImage|ResourceSave|SendFTP|SendMail|SendNotification|'
+ r'Sleep|TerminalClose|TesterStatistics|ZeroMemory|'
+ r'ArrayBsearch|ArrayCopy|ArrayCompare|ArrayFree|ArrayGetAsSeries|'
+ r'ArrayInitialize|ArrayFill|ArrayIsSeries|ArrayIsDynamic|'
+ r'ArrayMaximum|ArrayMinimum|ArrayRange|ArrayResize|'
+ r'ArraySetAsSeries|ArraySize|ArraySort|ArrayCopyRates|'
+ r'ArrayCopySeries|ArrayDimension|'
+ r'CharToString|DoubleToString|EnumToString|NormalizeDouble|'
+ r'StringToDouble|StringToInteger|StringToTime|TimeToString|'
+ r'IntegerToString|ShortToString|ShortArrayToString|'
+ r'StringToShortArray|CharArrayToString|StringToCharArray|'
+ r'ColorToARGB|ColorToString|StringToColor|StringFormat|'
+ r'CharToStr|DoubleToStr|StrToDouble|StrToInteger|StrToTime|TimeToStr|'
+ r'MathAbs|MathArccos|MathArcsin|MathArctan|MathCeil|MathCos|MathExp|'
+ r'MathFloor|MathLog|MathMax|MathMin|MathMod|MathPow|MathRand|'
+ r'MathRound|MathSin|MathSqrt|MathSrand|MathTan|MathIsValidNumber|'
+ r'StringAdd|StringBufferLen|StringCompare|StringConcatenate|StringFill|'
+ r'StringFind|StringGetCharacter|StringInit|StringLen|StringReplace|'
+ r'StringSetCharacter|StringSplit|StringSubstr|StringToLower|StringToUpper|'
+ r'StringTrimLeft|StringTrimRight|StringGetChar|StringSetChar|'
+ r'TimeCurrent|TimeTradeServer|TimeLocal|TimeGMT|TimeDaylightSavings|'
+ r'TimeGMTOffset|TimeToStruct|StructToTime|Day|DayOfWeek|DayOfYear|'
+ r'Hour|Minute|Month|Seconds|TimeDay|TimeDayOfWeek|TimeDayOfYear|TimeHour|'
+ r'TimeMinute|TimeMonth|TimeSeconds|TimeYear|Year|'
+ r'AccountInfoDouble|AccountInfoInteger|AccountInfoString|AccountBalance|'
+ r'AccountCredit|AccountCompany|AccountCurrency|AccountEquity|'
+ r'AccountFreeMargin|AccountFreeMarginCheck|AccountFreeMarginMode|'
+ r'AccountLeverage|AccountMargin|AccountName|AccountNumber|AccountProfit|'
+ r'AccountServer|AccountStopoutLevel|AccountStopoutMode|'
+ r'GetLastError|IsStopped|UninitializeReason|MQLInfoInteger|MQLInfoString|'
+ r'Symbol|Period|Digits|Point|IsConnected|IsDemo|IsDllsAllowed|'
+ r'IsExpertEnabled|IsLibrariesAllowed|IsOptimization|IsTesting|'
+ r'IsTradeAllowed|'
+ r'IsTradeContextBusy|IsVisualMode|TerminalCompany|TerminalName|'
+ r'TerminalPath|'
+ r'SymbolsTotal|SymbolName|SymbolSelect|SymbolIsSynchronized|'
+ r'SymbolInfoDouble|'
+ r'SymbolInfoInteger|SymbolInfoString|SymbolInfoTick|'
+ r'SymbolInfoSessionQuote|'
+ r'SymbolInfoSessionTrade|MarketInfo|'
+ r'SeriesInfoInteger|CopyRates|CopyTime|CopyOpen|'
+ r'CopyHigh|CopyLow|CopyClose|'
+ r'CopyTickVolume|CopyRealVolume|CopySpread|iBars|iBarShift|iClose|'
+ r'iHigh|iHighest|iLow|iLowest|iOpen|iTime|iVolume|'
+ r'HideTestIndicators|Period|RefreshRates|Symbol|WindowBarsPerChart|'
+ r'WindowExpertName|WindowFind|WindowFirstVisibleBar|WindowHandle|'
+ r'WindowIsVisible|WindowOnDropped|WindowPriceMax|WindowPriceMin|'
+ r'WindowPriceOnDropped|WindowRedraw|WindowScreenShot|'
+ r'WindowTimeOnDropped|WindowsTotal|WindowXOnDropped|WindowYOnDropped|'
+ r'OrderClose|OrderCloseBy|OrderClosePrice|OrderCloseTime|OrderComment|'
+ r'OrderCommission|OrderDelete|OrderExpiration|OrderLots|OrderMagicNumber|'
+ r'OrderModify|OrderOpenPrice|OrderOpenTime|OrderPrint|OrderProfit|'
+ r'OrderSelect|OrderSend|OrdersHistoryTotal|OrderStopLoss|OrdersTotal|'
+ r'OrderSwap|OrderSymbol|OrderTakeProfit|OrderTicket|OrderType|'
+ r'GlobalVariableCheck|GlobalVariableTime|'
+ r'GlobalVariableDel|GlobalVariableGet|GlobalVariableName|'
+ r'GlobalVariableSet|GlobalVariablesFlush|GlobalVariableTemp|'
+ r'GlobalVariableSetOnCondition|GlobalVariablesDeleteAll|'
+ r'GlobalVariablesTotal|GlobalVariableCheck|GlobalVariableTime|'
+ r'GlobalVariableDel|GlobalVariableGet|'
+ r'GlobalVariableName|GlobalVariableSet|GlobalVariablesFlush|'
+ r'GlobalVariableTemp|GlobalVariableSetOnCondition|'
+ r'GlobalVariablesDeleteAll|GlobalVariablesTotal|'
+ r'GlobalVariableCheck|GlobalVariableTime|GlobalVariableDel|'
+ r'GlobalVariableGet|GlobalVariableName|GlobalVariableSet|'
+ r'GlobalVariablesFlush|GlobalVariableTemp|'
+ r'GlobalVariableSetOnCondition|GlobalVariablesDeleteAll|'
+ r'GlobalVariablesTotal|'
+ r'FileFindFirst|FileFindNext|FileFindClose|FileOpen|FileDelete|'
+ r'FileFlush|FileGetInteger|FileIsEnding|FileIsLineEnding|'
+ r'FileClose|FileIsExist|FileCopy|FileMove|FileReadArray|'
+ r'FileReadBool|FileReadDatetime|FileReadDouble|FileReadFloat|'
+ r'FileReadInteger|FileReadLong|FileReadNumber|FileReadString|'
+ r'FileReadStruct|FileSeek|FileSize|FileTell|FileWrite|'
+ r'FileWriteArray|FileWriteDouble|FileWriteFloat|FileWriteInteger|'
+ r'FileWriteLong|FileWriteString|FileWriteStruct|FolderCreate|'
+ r'FolderDelete|FolderClean|FileOpenHistory|'
+ r'IndicatorSetDouble|IndicatorSetInteger|IndicatorSetString|'
+ r'SetIndexBuffer|IndicatorBuffers|IndicatorCounted|IndicatorDigits|'
+ r'IndicatorShortName|SetIndexArrow|SetIndexDrawBegin|'
+ r'SetIndexEmptyValue|SetIndexLabel|SetIndexShift|'
+ r'SetIndexStyle|SetLevelStyle|SetLevelValue|'
+ r'ObjectCreate|ObjectName|ObjectDelete|ObjectsDeleteAll|'
+ r'ObjectFind|ObjectGetTimeByValue|ObjectGetValueByTime|'
+ r'ObjectMove|ObjectsTotal|ObjectGetDouble|ObjectGetInteger|'
+ r'ObjectGetString|ObjectSetDouble|ObjectSetInteger|'
+ r'ObjectSetString|TextSetFont|TextOut|TextGetSize|'
+ r'ObjectDescription|ObjectGet|ObjectGetFiboDescription|'
+ r'ObjectGetShiftByValue|ObjectGetValueByShift|ObjectSet|'
+ r'ObjectSetFiboDescription|ObjectSetText|ObjectType|'
+ r'iAC|iAD|iADX|iAlligator|iAO|iATR|iBearsPower|'
+ r'iBands|iBandsOnArray|iBullsPower|iCCI|iCCIOnArray|'
+ r'iCustom|iDeMarker|iEnvelopes|iEnvelopesOnArray|'
+ r'iForce|iFractals|iGator|iIchimoku|iBWMFI|iMomentum|'
+ r'iMomentumOnArray|iMFI|iMA|iMAOnArray|iOsMA|iMACD|'
+ r'iOBV|iSAR|iRSI|iRSIOnArray|iRVI|iStdDev|iStdDevOnArray|'
+ r'iStochastic|iWPR|'
+ r'EventSetMillisecondTimer|EventSetTimer|'
+ r'EventKillTimer|EventChartCustom)\b', Name.Function),
+ (r'(CHARTEVENT_KEYDOWN|CHARTEVENT_MOUSE_MOVE|'
+ r'CHARTEVENT_OBJECT_CREATE|'
+ r'CHARTEVENT_OBJECT_CHANGE|CHARTEVENT_OBJECT_DELETE|'
+ r'CHARTEVENT_CLICK|'
+ r'CHARTEVENT_OBJECT_CLICK|CHARTEVENT_OBJECT_DRAG|'
+ r'CHARTEVENT_OBJECT_ENDEDIT|'
+ r'CHARTEVENT_CHART_CHANGE|CHARTEVENT_CUSTOM|'
+ r'CHARTEVENT_CUSTOM_LAST|'
+ r'PERIOD_CURRENT|PERIOD_M1|PERIOD_M2|PERIOD_M3|'
+ r'PERIOD_M4|PERIOD_M5|'
+ r'PERIOD_M6|PERIOD_M10|PERIOD_M12|PERIOD_M15|'
+ r'PERIOD_M20|PERIOD_M30|'
+ r'PERIOD_H1|PERIOD_H2|PERIOD_H3|PERIOD_H4|'
+ r'PERIOD_H6|PERIOD_H8|'
+ r'PERIOD_H12|PERIOD_D1|PERIOD_W1|PERIOD_MN1|'
+ r'CHART_IS_OBJECT|CHART_BRING_TO_TOP|'
+ r'CHART_MOUSE_SCROLL|CHART_EVENT_MOUSE_MOVE|'
+ r'CHART_EVENT_OBJECT_CREATE|'
+ r'CHART_EVENT_OBJECT_DELETE|CHART_MODE|CHART_FOREGROUND|'
+ r'CHART_SHIFT|'
+ r'CHART_AUTOSCROLL|CHART_SCALE|CHART_SCALEFIX|'
+ r'CHART_SCALEFIX_11|'
+ r'CHART_SCALE_PT_PER_BAR|CHART_SHOW_OHLC|'
+ r'CHART_SHOW_BID_LINE|'
+ r'CHART_SHOW_ASK_LINE|CHART_SHOW_LAST_LINE|'
+ r'CHART_SHOW_PERIOD_SEP|'
+ r'CHART_SHOW_GRID|CHART_SHOW_VOLUMES|'
+ r'CHART_SHOW_OBJECT_DESCR|'
+ r'CHART_VISIBLE_BARS|CHART_WINDOWS_TOTAL|'
+ r'CHART_WINDOW_IS_VISIBLE|'
+ r'CHART_WINDOW_HANDLE|CHART_WINDOW_YDISTANCE|'
+ r'CHART_FIRST_VISIBLE_BAR|'
+ r'CHART_WIDTH_IN_BARS|CHART_WIDTH_IN_PIXELS|'
+ r'CHART_HEIGHT_IN_PIXELS|'
+ r'CHART_COLOR_BACKGROUND|CHART_COLOR_FOREGROUND|'
+ r'CHART_COLOR_GRID|'
+ r'CHART_COLOR_VOLUME|CHART_COLOR_CHART_UP|'
+ r'CHART_COLOR_CHART_DOWN|'
+ r'CHART_COLOR_CHART_LINE|CHART_COLOR_CANDLE_BULL|'
+ r'CHART_COLOR_CANDLE_BEAR|'
+ r'CHART_COLOR_BID|CHART_COLOR_ASK|CHART_COLOR_LAST|'
+ r'CHART_COLOR_STOP_LEVEL|'
+ r'CHART_SHOW_TRADE_LEVELS|CHART_DRAG_TRADE_LEVELS|'
+ r'CHART_SHOW_DATE_SCALE|'
+ r'CHART_SHOW_PRICE_SCALE|CHART_SHIFT_SIZE|'
+ r'CHART_FIXED_POSITION|'
+ r'CHART_FIXED_MAX|CHART_FIXED_MIN|CHART_POINTS_PER_BAR|'
+ r'CHART_PRICE_MIN|'
+ r'CHART_PRICE_MAX|CHART_COMMENT|CHART_BEGIN|'
+ r'CHART_CURRENT_POS|CHART_END|'
+ r'CHART_BARS|CHART_CANDLES|CHART_LINE|CHART_VOLUME_HIDE|'
+ r'CHART_VOLUME_TICK|CHART_VOLUME_REAL|'
+ r'OBJ_VLINE|OBJ_HLINE|OBJ_TREND|OBJ_TRENDBYANGLE|OBJ_CYCLES|'
+ r'OBJ_CHANNEL|OBJ_STDDEVCHANNEL|OBJ_REGRESSION|OBJ_PITCHFORK|'
+ r'OBJ_GANNLINE|OBJ_GANNFAN|OBJ_GANNGRID|OBJ_FIBO|'
+ r'OBJ_FIBOTIMES|OBJ_FIBOFAN|OBJ_FIBOARC|OBJ_FIBOCHANNEL|'
+ r'OBJ_EXPANSION|OBJ_RECTANGLE|OBJ_TRIANGLE|OBJ_ELLIPSE|'
+ r'OBJ_ARROW_THUMB_UP|OBJ_ARROW_THUMB_DOWN|'
+ r'OBJ_ARROW_UP|OBJ_ARROW_DOWN|'
+ r'OBJ_ARROW_STOP|OBJ_ARROW_CHECK|OBJ_ARROW_LEFT_PRICE|'
+ r'OBJ_ARROW_RIGHT_PRICE|OBJ_ARROW_BUY|OBJ_ARROW_SELL|'
+ r'OBJ_ARROW|'
+ r'OBJ_TEXT|OBJ_LABEL|OBJ_BUTTON|OBJ_BITMAP|'
+ r'OBJ_BITMAP_LABEL|'
+ r'OBJ_EDIT|OBJ_EVENT|OBJ_RECTANGLE_LABEL|'
+ r'OBJPROP_TIME1|OBJPROP_PRICE1|OBJPROP_TIME2|'
+ r'OBJPROP_PRICE2|OBJPROP_TIME3|'
+ r'OBJPROP_PRICE3|OBJPROP_COLOR|OBJPROP_STYLE|'
+ r'OBJPROP_WIDTH|'
+ r'OBJPROP_BACK|OBJPROP_RAY|OBJPROP_ELLIPSE|'
+ r'OBJPROP_SCALE|'
+ r'OBJPROP_ANGLE|OBJPROP_ARROWCODE|OBJPROP_TIMEFRAMES|'
+ r'OBJPROP_DEVIATION|OBJPROP_FONTSIZE|OBJPROP_CORNER|'
+ r'OBJPROP_XDISTANCE|OBJPROP_YDISTANCE|OBJPROP_FIBOLEVELS|'
+ r'OBJPROP_LEVELCOLOR|OBJPROP_LEVELSTYLE|OBJPROP_LEVELWIDTH|'
+ r'OBJPROP_FIRSTLEVEL|OBJPROP_COLOR|OBJPROP_STYLE|OBJPROP_WIDTH|'
+ r'OBJPROP_BACK|OBJPROP_ZORDER|OBJPROP_FILL|OBJPROP_HIDDEN|'
+ r'OBJPROP_SELECTED|OBJPROP_READONLY|OBJPROP_TYPE|OBJPROP_TIME|'
+ r'OBJPROP_SELECTABLE|OBJPROP_CREATETIME|OBJPROP_LEVELS|'
+ r'OBJPROP_LEVELCOLOR|OBJPROP_LEVELSTYLE|OBJPROP_LEVELWIDTH|'
+ r'OBJPROP_ALIGN|OBJPROP_FONTSIZE|OBJPROP_RAY_RIGHT|OBJPROP_RAY|'
+ r'OBJPROP_ELLIPSE|OBJPROP_ARROWCODE|OBJPROP_TIMEFRAMES|OBJPROP_ANCHOR|'
+ r'OBJPROP_XDISTANCE|OBJPROP_YDISTANCE|OBJPROP_DRAWLINES|OBJPROP_STATE|'
+ r'OBJPROP_CHART_ID|OBJPROP_XSIZE|OBJPROP_YSIZE|OBJPROP_XOFFSET|'
+ r'OBJPROP_YOFFSET|OBJPROP_PERIOD|OBJPROP_DATE_SCALE|OBJPROP_PRICE_SCALE|'
+ r'OBJPROP_CHART_SCALE|OBJPROP_BGCOLOR|OBJPROP_CORNER|OBJPROP_BORDER_TYPE|'
+ r'OBJPROP_BORDER_COLOR|OBJPROP_PRICE|OBJPROP_LEVELVALUE|OBJPROP_SCALE|'
+ r'OBJPROP_ANGLE|OBJPROP_DEVIATION|'
+ r'OBJPROP_NAME|OBJPROP_TEXT|OBJPROP_TOOLTIP|OBJPROP_LEVELTEXT|'
+ r'OBJPROP_FONT|OBJPROP_BMPFILE|OBJPROP_SYMBOL|'
+ r'BORDER_FLAT|BORDER_RAISED|BORDER_SUNKEN|ALIGN_LEFT|ALIGN_CENTER|'
+ r'ALIGN_RIGHT|ANCHOR_LEFT_UPPER|ANCHOR_LEFT|ANCHOR_LEFT_LOWER|'
+ r'ANCHOR_LOWER|ANCHOR_RIGHT_LOWER|ANCHOR_RIGHT|ANCHOR_RIGHT_UPPER|'
+ r'ANCHOR_UPPER|ANCHOR_CENTER|ANCHOR_TOP|ANCHOR_BOTTOM|'
+ r'CORNER_LEFT_UPPER|CORNER_LEFT_LOWER|CORNER_RIGHT_LOWER|'
+ r'CORNER_RIGHT_UPPER|'
+ r'OBJ_NO_PERIODS|EMPTY|OBJ_PERIOD_M1|OBJ_PERIOD_M5|OBJ_PERIOD_M15|'
+ r'OBJ_PERIOD_M30|OBJ_PERIOD_H1|OBJ_PERIOD_H4|OBJ_PERIOD_D1|'
+ r'OBJ_PERIOD_W1|OBJ_PERIOD_MN1|OBJ_ALL_PERIODS|'
+ r'GANN_UP_TREND|GANN_DOWN_TREND|'
+ r'((clr)?(Black|DarkGreen|DarkSlateGray|Olive|'
+ r'Green|Teal|Navy|Purple|'
+ r'Maroon|Indigo|MidnightBlue|DarkBlue|'
+ r'DarkOliveGreen|SaddleBrown|'
+ r'ForestGreen|OliveDrab|SeaGreen|'
+ r'DarkGoldenrod|DarkSlateBlue|'
+ r'Sienna|MediumBlue|Brown|DarkTurquoise|'
+ r'DimGray|LightSeaGreen|'
+ r'DarkViolet|FireBrick|MediumVioletRed|'
+ r'MediumSeaGreen|Chocolate|'
+ r'Crimson|SteelBlue|Goldenrod|MediumSpringGreen|'
+ r'LawnGreen|CadetBlue|'
+ r'DarkOrchid|YellowGreen|LimeGreen|OrangeRed|'
+ r'DarkOrange|Orange|'
+ r'Gold|Yellow|Chartreuse|Lime|SpringGreen|'
+ r'Aqua|DeepSkyBlue|Blue|'
+ r'Magenta|Red|Gray|SlateGray|Peru|BlueViolet|'
+ r'LightSlateGray|DeepPink|'
+ r'MediumTurquoise|DodgerBlue|Turquoise|RoyalBlue|'
+ r'SlateBlue|DarkKhaki|'
+ r'IndianRed|MediumOrchid|GreenYellow|'
+ r'MediumAquamarine|DarkSeaGreen|'
+ r'Tomato|RosyBrown|Orchid|MediumPurple|'
+ r'PaleVioletRed|Coral|CornflowerBlue|'
+ r'DarkGray|SandyBrown|MediumSlateBlue|'
+ r'Tan|DarkSalmon|BurlyWood|'
+ r'HotPink|Salmon|Violet|LightCoral|SkyBlue|'
+ r'LightSalmon|Plum|'
+ r'Khaki|LightGreen|Aquamarine|Silver|'
+ r'LightSkyBlue|LightSteelBlue|'
+ r'LightBlue|PaleGreen|Thistle|PowderBlue|'
+ r'PaleGoldenrod|PaleTurquoise|'
+ r'LightGray|Wheat|NavajoWhite|Moccasin|'
+ r'LightPink|Gainsboro|PeachPuff|'
+ r'Pink|Bisque|LightGoldenrod|BlanchedAlmond|'
+ r'LemonChiffon|Beige|'
+ r'AntiqueWhite|PapayaWhip|Cornsilk|'
+ r'LightYellow|LightCyan|Linen|'
+ r'Lavender|MistyRose|OldLace|WhiteSmoke|'
+ r'Seashell|Ivory|Honeydew|'
+ r'AliceBlue|LavenderBlush|MintCream|Snow|White))|'
+ r'SYMBOL_THUMBSUP|SYMBOL_THUMBSDOWN|'
+ r'SYMBOL_ARROWUP|SYMBOL_ARROWDOWN|'
+ r'SYMBOL_STOPSIGN|SYMBOL_CHECKSIGN|'
+ r'SYMBOL_LEFTPRICE|SYMBOL_RIGHTPRICE|'
+ r'PRICE_CLOSE|PRICE_OPEN|PRICE_HIGH|PRICE_LOW|'
+ r'PRICE_MEDIAN|PRICE_TYPICAL|PRICE_WEIGHTED|'
+ r'VOLUME_TICK|VOLUME_REAL|'
+ r'STO_LOWHIGH|STO_CLOSECLOSE|'
+ r'MODE_OPEN|MODE_LOW|MODE_HIGH|MODE_CLOSE|MODE_VOLUME|MODE_TIME|'
+ r'MODE_SMA|MODE_EMA|MODE_SMMA|MODE_LWMA|'
+ r'MODE_MAIN|MODE_SIGNAL|MODE_MAIN|'
+ r'MODE_PLUSDI|MODE_MINUSDI|MODE_UPPER|'
+ r'MODE_LOWER|MODE_GATORJAW|MODE_GATORTEETH|'
+ r'MODE_GATORLIPS|MODE_TENKANSEN|'
+ r'MODE_KIJUNSEN|MODE_SENKOUSPANA|'
+ r'MODE_SENKOUSPANB|MODE_CHINKOUSPAN|'
+ r'DRAW_LINE|DRAW_SECTION|DRAW_HISTOGRAM|'
+ r'DRAW_ARROW|DRAW_ZIGZAG|DRAW_NONE|'
+ r'STYLE_SOLID|STYLE_DASH|STYLE_DOT|'
+ r'STYLE_DASHDOT|STYLE_DASHDOTDOT|'
+ r'DRAW_NONE|DRAW_LINE|DRAW_SECTION|DRAW_HISTOGRAM|'
+ r'DRAW_ARROW|DRAW_ZIGZAG|DRAW_FILLING|'
+ r'INDICATOR_DATA|INDICATOR_COLOR_INDEX|'
+ r'INDICATOR_CALCULATIONS|INDICATOR_DIGITS|'
+ r'INDICATOR_HEIGHT|INDICATOR_LEVELS|'
+ r'INDICATOR_LEVELCOLOR|INDICATOR_LEVELSTYLE|'
+ r'INDICATOR_LEVELWIDTH|INDICATOR_MINIMUM|'
+ r'INDICATOR_MAXIMUM|INDICATOR_LEVELVALUE|'
+ r'INDICATOR_SHORTNAME|INDICATOR_LEVELTEXT|'
+ r'TERMINAL_BUILD|TERMINAL_CONNECTED|'
+ r'TERMINAL_DLLS_ALLOWED|TERMINAL_TRADE_ALLOWED|'
+ r'TERMINAL_EMAIL_ENABLED|'
+ r'TERMINAL_FTP_ENABLED|TERMINAL_MAXBARS|'
+ r'TERMINAL_CODEPAGE|TERMINAL_CPU_CORES|'
+ r'TERMINAL_DISK_SPACE|TERMINAL_MEMORY_PHYSICAL|'
+ r'TERMINAL_MEMORY_TOTAL|'
+ r'TERMINAL_MEMORY_AVAILABLE|TERMINAL_MEMORY_USED|'
+ r'TERMINAL_X64|'
+ r'TERMINAL_OPENCL_SUPPORT|TERMINAL_LANGUAGE|'
+ r'TERMINAL_COMPANY|TERMINAL_NAME|'
+ r'TERMINAL_PATH|TERMINAL_DATA_PATH|'
+ r'TERMINAL_COMMONDATA_PATH|'
+ r'MQL_PROGRAM_TYPE|MQL_DLLS_ALLOWED|'
+ r'MQL_TRADE_ALLOWED|MQL_DEBUG|'
+ r'MQL_PROFILER|MQL_TESTER|MQL_OPTIMIZATION|'
+ r'MQL_VISUAL_MODE|'
+ r'MQL_FRAME_MODE|MQL_LICENSE_TYPE|MQL_PROGRAM_NAME|'
+ r'MQL_PROGRAM_PATH|'
+ r'PROGRAM_SCRIPT|PROGRAM_EXPERT|'
+ r'PROGRAM_INDICATOR|LICENSE_FREE|'
+ r'LICENSE_DEMO|LICENSE_FULL|LICENSE_TIME|'
+ r'MODE_LOW|MODE_HIGH|MODE_TIME|MODE_BID|'
+ r'MODE_ASK|MODE_POINT|'
+ r'MODE_DIGITS|MODE_SPREAD|MODE_STOPLEVEL|'
+ r'MODE_LOTSIZE|MODE_TICKVALUE|'
+ r'MODE_TICKSIZE|MODE_SWAPLONG|'
+ r'MODE_SWAPSHORT|MODE_STARTING|'
+ r'MODE_EXPIRATION|MODE_TRADEALLOWED|'
+ r'MODE_MINLOT|MODE_LOTSTEP|MODE_MAXLOT|'
+ r'MODE_SWAPTYPE|MODE_PROFITCALCMODE|'
+ r'MODE_MARGINCALCMODE|MODE_MARGININIT|'
+ r'MODE_MARGINMAINTENANCE|MODE_MARGINHEDGED|'
+ r'MODE_MARGINREQUIRED|MODE_FREEZELEVEL|'
+ r'SUNDAY|MONDAY|TUESDAY|WEDNESDAY|THURSDAY|'
+ r'FRIDAY|SATURDAY|'
+ r'ACCOUNT_LOGIN|ACCOUNT_TRADE_MODE|'
+ r'ACCOUNT_LEVERAGE|'
+ r'ACCOUNT_LIMIT_ORDERS|ACCOUNT_MARGIN_SO_MODE|'
+ r'ACCOUNT_TRADE_ALLOWED|ACCOUNT_TRADE_EXPERT|'
+ r'ACCOUNT_BALANCE|'
+ r'ACCOUNT_CREDIT|ACCOUNT_PROFIT|ACCOUNT_EQUITY|'
+ r'ACCOUNT_MARGIN|'
+ r'ACCOUNT_FREEMARGIN|ACCOUNT_MARGIN_LEVEL|'
+ r'ACCOUNT_MARGIN_SO_CALL|'
+ r'ACCOUNT_MARGIN_SO_SO|ACCOUNT_NAME|'
+ r'ACCOUNT_SERVER|ACCOUNT_CURRENCY|'
+ r'ACCOUNT_COMPANY|ACCOUNT_TRADE_MODE_DEMO|'
+ r'ACCOUNT_TRADE_MODE_CONTEST|'
+ r'ACCOUNT_TRADE_MODE_REAL|ACCOUNT_STOPOUT_MODE_PERCENT|'
+ r'ACCOUNT_STOPOUT_MODE_MONEY|'
+ r'STAT_INITIAL_DEPOSIT|STAT_WITHDRAWAL|STAT_PROFIT|'
+ r'STAT_GROSS_PROFIT|'
+ r'STAT_GROSS_LOSS|STAT_MAX_PROFITTRADE|'
+ r'STAT_MAX_LOSSTRADE|STAT_CONPROFITMAX|'
+ r'STAT_CONPROFITMAX_TRADES|STAT_MAX_CONWINS|'
+ r'STAT_MAX_CONPROFIT_TRADES|'
+ r'STAT_CONLOSSMAX|STAT_CONLOSSMAX_TRADES|'
+ r'STAT_MAX_CONLOSSES|'
+ r'STAT_MAX_CONLOSS_TRADES|STAT_BALANCEMIN|'
+ r'STAT_BALANCE_DD|'
+ r'STAT_BALANCEDD_PERCENT|STAT_BALANCE_DDREL_PERCENT|'
+ r'STAT_BALANCE_DD_RELATIVE|STAT_EQUITYMIN|'
+ r'STAT_EQUITY_DD|'
+ r'STAT_EQUITYDD_PERCENT|STAT_EQUITY_DDREL_PERCENT|'
+ r'STAT_EQUITY_DD_RELATIVE|STAT_EXPECTED_PAYOFF|'
+ r'STAT_PROFIT_FACTOR|'
+ r'STAT_RECOVERY_FACTOR|STAT_SHARPE_RATIO|'
+ r'STAT_MIN_MARGINLEVEL|'
+ r'STAT_CUSTOM_ONTESTER|STAT_DEALS|STAT_TRADES|'
+ r'STAT_PROFIT_TRADES|'
+ r'STAT_LOSS_TRADES|STAT_SHORT_TRADES|STAT_LONG_TRADES|'
+ r'STAT_PROFIT_SHORTTRADES|STAT_PROFIT_LONGTRADES|'
+ r'STAT_PROFITTRADES_AVGCON|STAT_LOSSTRADES_AVGCON|'
+ r'SERIES_BARS_COUNT|SERIES_FIRSTDATE|SERIES_LASTBAR_DATE|'
+ r'SERIES_SERVER_FIRSTDATE|SERIES_TERMINAL_FIRSTDATE|'
+ r'SERIES_SYNCHRONIZED|'
+ r'OP_BUY|OP_SELL|OP_BUYLIMIT|OP_SELLLIMIT|'
+ r'OP_BUYSTOP|OP_SELLSTOP|'
+ r'TRADE_ACTION_DEAL|TRADE_ACTION_PENDING|'
+ r'TRADE_ACTION_SLTP|'
+ r'TRADE_ACTION_MODIFY|TRADE_ACTION_REMOVE|'
+ r'__DATE__|__DATETIME__|__LINE__|__FILE__|'
+ r'__PATH__|__FUNCTION__|'
+ r'__FUNCSIG__|__MQLBUILD__|__MQL4BUILD__|'
+ r'M_E|M_LOG2E|M_LOG10E|M_LN2|M_LN10|'
+ r'M_PI|M_PI_2|M_PI_4|M_1_PI|'
+ r'M_2_PI|M_2_SQRTPI|M_SQRT2|M_SQRT1_2|'
+ r'CHAR_MIN|CHAR_MAX|UCHAR_MAX|'
+ r'SHORT_MIN|SHORT_MAX|USHORT_MAX|'
+ r'INT_MIN|INT_MAX|UINT_MAX|'
+ r'LONG_MIN|LONG_MAX|ULONG_MAX|'
+ r'DBL_MIN|DBL_MAX|DBL_EPSILON|DBL_DIG|DBL_MANT_DIG|'
+ r'DBL_MAX_10_EXP|DBL_MAX_EXP|DBL_MIN_10_EXP|DBL_MIN_EXP|'
+ r'FLT_MIN|FLT_MAX|FLT_EPSILON|'
+ r'FLT_DIG|FLT_MANT_DIG|FLT_MAX_10_EXP|'
+ r'FLT_MAX_EXP|FLT_MIN_10_EXP|FLT_MIN_EXP|REASON_PROGRAM'
+ r'REASON_REMOVE|REASON_RECOMPILE|'
+ r'REASON_CHARTCHANGE|REASON_CHARTCLOSE|'
+ r'REASON_PARAMETERS|REASON_ACCOUNT|'
+ r'REASON_TEMPLATE|REASON_INITFAILED|'
+ r'REASON_CLOSE|POINTER_INVALID'
+ r'POINTER_DYNAMIC|POINTER_AUTOMATIC|'
+ r'NULL|EMPTY|EMPTY_VALUE|CLR_NONE|WHOLE_ARRAY|'
+ r'CHARTS_MAX|clrNONE|EMPTY_VALUE|INVALID_HANDLE|'
+ r'IS_DEBUG_MODE|IS_PROFILE_MODE|NULL|WHOLE_ARRAY|WRONG_VALUE|'
+ r'ERR_NO_ERROR|ERR_NO_RESULT|ERR_COMMON_ERROR|'
+ r'ERR_INVALID_TRADE_PARAMETERS|'
+ r'ERR_SERVER_BUSY|ERR_OLD_VERSION|ERR_NO_CONNECTION|'
+ r'ERR_NOT_ENOUGH_RIGHTS|'
+ r'ERR_TOO_FREQUENT_REQUESTS|ERR_MALFUNCTIONAL_TRADE|'
+ r'ERR_ACCOUNT_DISABLED|'
+ r'ERR_INVALID_ACCOUNT|ERR_TRADE_TIMEOUT|'
+ r'ERR_INVALID_PRICE|ERR_INVALID_STOPS|'
+ r'ERR_INVALID_TRADE_VOLUME|ERR_MARKET_CLOSED|'
+ r'ERR_TRADE_DISABLED|'
+ r'ERR_NOT_ENOUGH_MONEY|ERR_PRICE_CHANGED|'
+ r'ERR_OFF_QUOTES|ERR_BROKER_BUSY|'
+ r'ERR_REQUOTE|ERR_ORDER_LOCKED|'
+ r'ERR_LONG_POSITIONS_ONLY_ALLOWED|ERR_TOO_MANY_REQUESTS|'
+ r'ERR_TRADE_MODIFY_DENIED|ERR_TRADE_CONTEXT_BUSY|'
+ r'ERR_TRADE_EXPIRATION_DENIED|'
+ r'ERR_TRADE_TOO_MANY_ORDERS|ERR_TRADE_HEDGE_PROHIBITED|'
+ r'ERR_TRADE_PROHIBITED_BY_FIFO|'
+ r'FILE_READ|FILE_WRITE|FILE_BIN|FILE_CSV|FILE_TXT|'
+ r'FILE_ANSI|FILE_UNICODE|'
+ r'FILE_SHARE_READ|FILE_SHARE_WRITE|FILE_REWRITE|'
+ r'FILE_COMMON|FILE_EXISTS|'
+ r'FILE_CREATE_DATE|FILE_MODIFY_DATE|'
+ r'FILE_ACCESS_DATE|FILE_SIZE|FILE_POSITION|'
+ r'FILE_END|FILE_LINE_END|FILE_IS_COMMON|'
+ r'FILE_IS_TEXT|FILE_IS_BINARY|'
+ r'FILE_IS_CSV|FILE_IS_ANSI|FILE_IS_READABLE|FILE_IS_WRITABLE|'
+ r'SEEK_SET|SEEK_CUR|SEEK_END|CP_ACP|'
+ r'CP_OEMCP|CP_MACCP|CP_THREAD_ACP|'
+ r'CP_SYMBOL|CP_UTF7|CP_UTF8|IDOK|IDCANCEL|IDABORT|'
+ r'IDRETRY|IDIGNORE|IDYES|IDNO|IDTRYAGAIN|IDCONTINUE|'
+ r'MB_OK|MB_OKCANCEL|MB_ABORTRETRYIGNORE|MB_YESNOCANCEL|'
+ r'MB_YESNO|MB_RETRYCANCEL|'
+ r'MB_CANCELTRYCONTINUE|MB_ICONSTOP|MB_ICONERROR|'
+ r'MB_ICONHAND|MB_ICONQUESTION|'
+ r'MB_ICONEXCLAMATION|MB_ICONWARNING|'
+ r'MB_ICONINFORMATION|MB_ICONASTERISK|'
+ r'MB_DEFBUTTON1|MB_DEFBUTTON2|MB_DEFBUTTON3|MB_DEFBUTTON4)\b',
+ Name.Constant),
+ inherit,
+ ],
+ }
diff --git a/pygments/lexers/dalvik.py b/pygments/lexers/dalvik.py
index 07d97eb6..901b7c5a 100644
--- a/pygments/lexers/dalvik.py
+++ b/pygments/lexers/dalvik.py
@@ -21,7 +21,7 @@ class SmaliLexer(RegexLexer):
For `Smali <http://code.google.com/p/smali/>`_ (Android/Dalvik) assembly
code.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'Smali'
aliases = ['smali']
diff --git a/pygments/lexers/dotnet.py b/pygments/lexers/dotnet.py
index 5bda5c2d..0754ba02 100644
--- a/pygments/lexers/dotnet.py
+++ b/pygments/lexers/dotnet.py
@@ -14,7 +14,7 @@ from pygments.lexer import RegexLexer, DelegatingLexer, bygroups, include, \
using, this
from pygments.token import Punctuation, \
Text, Comment, Operator, Keyword, Name, String, Number, Literal, Other
-from pygments.util import get_choice_opt
+from pygments.util import get_choice_opt, iteritems
from pygments import unistring as uni
from pygments.lexers.web import XmlLexer
@@ -44,7 +44,7 @@ class CSharpLexer(RegexLexer):
The default value is ``basic``.
- *New in Pygments 0.8.*
+ .. versionadded:: 0.8
"""
name = 'C#'
@@ -71,7 +71,7 @@ class CSharpLexer(RegexLexer):
tokens = {}
token_variants = True
- for levelname, cs_ident in levels.items():
+ for levelname, cs_ident in iteritems(levels):
tokens[levelname] = {
'root': [
# method names
@@ -126,7 +126,7 @@ class CSharpLexer(RegexLexer):
}
def __init__(self, **options):
- level = get_choice_opt(options, 'unicodelevel', self.tokens.keys(), 'basic')
+ level = get_choice_opt(options, 'unicodelevel', list(self.tokens), 'basic')
if level not in self._all_tokens:
# compile the regexes now
self._tokens = self.__class__.process_tokendef(level)
@@ -156,7 +156,7 @@ class NemerleLexer(RegexLexer):
The default value is ``basic``.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'Nemerle'
@@ -183,7 +183,7 @@ class NemerleLexer(RegexLexer):
tokens = {}
token_variants = True
- for levelname, cs_ident in levels.items():
+ for levelname, cs_ident in iteritems(levels):
tokens[levelname] = {
'root': [
# method names
@@ -284,7 +284,7 @@ class NemerleLexer(RegexLexer):
}
def __init__(self, **options):
- level = get_choice_opt(options, 'unicodelevel', self.tokens.keys(),
+ level = get_choice_opt(options, 'unicodelevel', list(self.tokens),
'basic')
if level not in self._all_tokens:
# compile the regexes now
@@ -531,7 +531,7 @@ class FSharpLexer(RegexLexer):
"""
For the F# language (version 3.0).
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'FSharp'
@@ -638,6 +638,8 @@ class FSharpLexer(RegexLexer):
(r'[A-Z][A-Za-z0-9_\']*(?=\s*\.)', Name.Namespace),
(r'[A-Z][A-Za-z0-9_\']*', Name, '#pop'),
(r'[a-z_][A-Za-z0-9_\']*', Name, '#pop'),
+ # e.g. dictionary index access
+ (r'', Text, '#pop'),
],
'comment': [
(r'[^(*)@"]+', Comment),
diff --git a/pygments/lexers/foxpro.py b/pygments/lexers/foxpro.py
index bc6cc296..99a65ce7 100644
--- a/pygments/lexers/foxpro.py
+++ b/pygments/lexers/foxpro.py
@@ -24,7 +24,7 @@ class FoxProLexer(RegexLexer):
FoxPro syntax allows to shorten all keywords and function names
to 4 characters. Shortened forms are not recognized by this lexer.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'FoxPro'
diff --git a/pygments/lexers/functional.py b/pygments/lexers/functional.py
index 9df88d97..122114fa 100644
--- a/pygments/lexers/functional.py
+++ b/pygments/lexers/functional.py
@@ -19,7 +19,7 @@ __all__ = ['RacketLexer', 'SchemeLexer', 'CommonLispLexer', 'HaskellLexer',
'AgdaLexer', 'LiterateHaskellLexer', 'LiterateAgdaLexer',
'SMLLexer', 'OcamlLexer', 'ErlangLexer', 'ErlangShellLexer',
'OpaLexer', 'CoqLexer', 'NewLispLexer', 'NixLexer', 'ElixirLexer',
- 'ElixirConsoleLexer', 'KokaLexer']
+ 'ElixirConsoleLexer', 'KokaLexer', 'IdrisLexer', 'LiterateIdrisLexer']
line_re = re.compile('.*?\n')
@@ -30,7 +30,7 @@ class RacketLexer(RegexLexer):
Lexer for `Racket <http://racket-lang.org/>`_ source code (formerly known as
PLT Scheme).
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'Racket'
@@ -599,7 +599,7 @@ class SchemeLexer(RegexLexer):
It supports the full Scheme syntax as defined in R5RS.
- *New in Pygments 0.6.*
+ .. versionadded:: 0.6
"""
name = 'Scheme'
aliases = ['scheme', 'scm']
@@ -720,10 +720,10 @@ class CommonLispLexer(RegexLexer):
"""
A Common Lisp lexer.
- *New in Pygments 0.9.*
+ .. versionadded:: 0.9
"""
name = 'Common Lisp'
- aliases = ['common-lisp', 'cl', 'lisp']
+ aliases = ['common-lisp', 'cl', 'lisp', 'elisp', 'emacs']
filenames = ['*.cl', '*.lisp', '*.el'] # use for Elisp too
mimetypes = ['text/x-common-lisp']
@@ -898,7 +898,7 @@ class HaskellLexer(RegexLexer):
"""
A Haskell lexer based on the lexemes defined in the Haskell 98 Report.
- *New in Pygments 0.8.*
+ .. versionadded:: 0.8
"""
name = 'Haskell'
aliases = ['haskell', 'hs']
@@ -996,7 +996,7 @@ class HaskellLexer(RegexLexer):
],
'character': [
# Allows multi-chars, incorrectly.
- (r"[^\\']", String.Char),
+ (r"[^\\']'", String.Char, '#pop'),
(r"\\", String.Escape, 'escape'),
("'", String.Char, '#pop'),
],
@@ -1017,12 +1017,121 @@ class HaskellLexer(RegexLexer):
}
+class IdrisLexer(RegexLexer):
+ """
+ A lexer for the dependently typed programming language Idris.
+
+ Based on the Haskell and Agda Lexer.
+
+ .. versionadded:: 2.0
+ """
+ name = 'Idris'
+ aliases = ['idris', 'idr']
+ filenames = ['*.idr']
+ mimetypes = ['text/x-idris']
+
+ reserved = ['case','class','data','default','using','do','else',
+ 'if','in','infix[lr]?','instance','rewrite','auto',
+ 'namespace','codata','mutual','private','public','abstract',
+ 'total','partial',
+ 'let','proof','of','then','static','where','_','with',
+ 'pattern', 'term', 'syntax','prefix',
+ 'postulate','parameters','record','dsl','impossible','implicit',
+ 'tactics','intros','intro','compute','refine','exaxt','trivial']
+
+ ascii = ['NUL','SOH','[SE]TX','EOT','ENQ','ACK',
+ 'BEL','BS','HT','LF','VT','FF','CR','S[OI]','DLE',
+ 'DC[1-4]','NAK','SYN','ETB','CAN',
+ 'EM','SUB','ESC','[FGRU]S','SP','DEL']
+
+ annotations = ['assert_total','lib','link','include','provide','access',
+ 'default']
+
+ tokens = {
+ 'root': [
+ # Declaration
+ (r'^(\s*)([^\s\(\)\{\}]+)(\s*)(:)(\s*)',
+ bygroups(Text, Name.Function, Text, Operator.Word, Text)),
+ # Comments
+ (r'^(\s*)(%%%s)' % '|'.join(annotations),
+ bygroups(Text, Keyword.Reserved)),
+ (r'--(?![!#$%&*+./<=>?@\^|_~:\\]).*?$', Comment.Single),
+ (r'{-', Comment.Multiline, 'comment'),
+ # Identifiers
+ (r'\b(%s)(?!\')\b' % '|'.join(reserved), Keyword.Reserved),
+ (r'(import|module)(\s+)', bygroups(Keyword.Reserved, Text), 'module'),
+ (r"('')?[A-Z][\w\']*", Keyword.Type),
+ (r'[a-z][A-Za-z0-9_\']*', Text),
+ # Special Symbols
+ (r'(<-|::|->|=>|=)', Operator.Word), # specials
+ (r'([\(\)\{\}\[\]:!#$%&*+.\\/<=>?@^|~-]+)', Operator.Word), # specials
+ # Numbers
+ (r'\d+[eE][+-]?\d+', Number.Float),
+ (r'\d+\.\d+([eE][+-]?\d+)?', Number.Float),
+ (r'0[xX][\da-fA-F]+', Number.Hex),
+ (r'\d+', Number.Integer),
+ # Strings
+ (r"'", String.Char, 'character'),
+ (r'"', String, 'string'),
+ (r'[^\s\(\)\{\}]+', Text),
+ (r'\s+?', Text), # Whitespace
+ ],
+ 'module': [
+ (r'\s+', Text),
+ (r'([A-Z][a-zA-Z0-9_.]*)(\s+)(\()',
+ bygroups(Name.Namespace, Text, Punctuation), 'funclist'),
+ (r'[A-Z][a-zA-Z0-9_.]*', Name.Namespace, '#pop'),
+ ],
+ 'funclist': [
+ (r'\s+', Text),
+ (r'[A-Z][a-zA-Z0-9_]*', Keyword.Type),
+ (r'(_[\w\']+|[a-z][\w\']*)', Name.Function),
+ (r'--.*$', Comment.Single),
+ (r'{-', Comment.Multiline, 'comment'),
+ (r',', Punctuation),
+ (r'[:!#$%&*+.\\/<=>?@^|~-]+', Operator),
+ # (HACK, but it makes sense to push two instances, believe me)
+ (r'\(', Punctuation, ('funclist', 'funclist')),
+ (r'\)', Punctuation, '#pop:2'),
+ ],
+ # NOTE: the next four states are shared in the AgdaLexer; make sure
+ # any change is compatible with Agda as well or copy over and change
+ 'comment': [
+ # Multiline Comments
+ (r'[^-{}]+', Comment.Multiline),
+ (r'{-', Comment.Multiline, '#push'),
+ (r'-}', Comment.Multiline, '#pop'),
+ (r'[-{}]', Comment.Multiline),
+ ],
+ 'character': [
+ # Allows multi-chars, incorrectly.
+ (r"[^\\']", String.Char),
+ (r"\\", String.Escape, 'escape'),
+ ("'", String.Char, '#pop'),
+ ],
+ 'string': [
+ (r'[^\\"]+', String),
+ (r"\\", String.Escape, 'escape'),
+ ('"', String, '#pop'),
+ ],
+ 'escape': [
+ (r'[abfnrtv"\'&\\]', String.Escape, '#pop'),
+ (r'\^[][A-Z@\^_]', String.Escape, '#pop'),
+ ('|'.join(ascii), String.Escape, '#pop'),
+ (r'o[0-7]+', String.Escape, '#pop'),
+ (r'x[\da-fA-F]+', String.Escape, '#pop'),
+ (r'\d+', String.Escape, '#pop'),
+ (r'\s+\\', String.Escape, '#pop')
+ ],
+ }
+
+
class AgdaLexer(RegexLexer):
"""
For the `Agda <http://wiki.portal.chalmers.se/agda/pmwiki.php>`_
dependently typed functional programming language and proof assistant.
- *New in Pygments 1.7.*
+ .. versionadded:: 2.0
"""
name = 'Agda'
@@ -1049,12 +1158,12 @@ class AgdaLexer(RegexLexer):
(r'{!', Comment.Directive, 'hole'),
# Lexemes:
# Identifiers
- (ur'\b(%s)(?!\')\b' % '|'.join(reserved), Keyword.Reserved),
+ (r'\b(%s)(?!\')\b' % '|'.join(reserved), Keyword.Reserved),
(r'(import|module)(\s+)', bygroups(Keyword.Reserved, Text), 'module'),
(r'\b(Set|Prop)\b', Keyword.Type),
# Special Symbols
(r'(\(|\)|\{|\})', Operator),
- (ur'(\.{1,3}|\||[\u039B]|[\u2200]|[\u2192]|:|=|->)', Operator.Word),
+ (u'(\\.{1,3}|\\||[\u039B]|[\u2200]|[\u2192]|:|=|->)', Operator.Word),
# Numbers
(r'\d+[eE][+-]?\d+', Number.Float),
(r'\d+\.\d+([eE][+-]?\d+)?', Number.Float),
@@ -1161,7 +1270,7 @@ class LiterateHaskellLexer(LiterateLexer):
is autodetected: if the first non-whitespace character in the source
is a backslash or percent character, LaTeX is assumed, else Bird.
- *New in Pygments 0.9.*
+ .. versionadded:: 0.9
"""
name = 'Literate Haskell'
aliases = ['lhs', 'literate-haskell', 'lhaskell']
@@ -1173,6 +1282,29 @@ class LiterateHaskellLexer(LiterateLexer):
LiterateLexer.__init__(self, hslexer, **options)
+class LiterateIdrisLexer(LiterateLexer):
+ """
+ For Literate Idris (Bird-style or LaTeX) source.
+
+ Additional options accepted:
+
+ `litstyle`
+ If given, must be ``"bird"`` or ``"latex"``. If not given, the style
+ is autodetected: if the first non-whitespace character in the source
+ is a backslash or percent character, LaTeX is assumed, else Bird.
+
+ .. versionadded:: 2.0
+ """
+ name = 'Literate Idris'
+ aliases = ['lidr', 'literate-idris', 'lidris']
+ filenames = ['*.lidr']
+ mimetypes = ['text/x-literate-idris']
+
+ def __init__(self, **options):
+ hslexer = IdrisLexer(**options)
+ LiterateLexer.__init__(self, hslexer, **options)
+
+
class LiterateAgdaLexer(LiterateLexer):
"""
For Literate Agda source.
@@ -1184,7 +1316,7 @@ class LiterateAgdaLexer(LiterateLexer):
is autodetected: if the first non-whitespace character in the source
is a backslash or percent character, LaTeX is assumed, else Bird.
- *New in Pygments 1.7.*
+ .. versionadded:: 2.0
"""
name = 'Literate Agda'
aliases = ['lagda', 'literate-agda']
@@ -1200,7 +1332,7 @@ class SMLLexer(RegexLexer):
"""
For the Standard ML language.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'Standard ML'
@@ -1526,7 +1658,7 @@ class OcamlLexer(RegexLexer):
"""
For the OCaml language.
- *New in Pygments 0.7.*
+ .. versionadded:: 0.7
"""
name = 'OCaml'
@@ -1620,7 +1752,7 @@ class ErlangLexer(RegexLexer):
Blame Jeremy Thurgood (http://jerith.za.net/).
- *New in Pygments 0.9.*
+ .. versionadded:: 0.9
"""
name = 'Erlang'
@@ -1725,7 +1857,7 @@ class ErlangShellLexer(Lexer):
"""
Shell sessions in erl (for Erlang code).
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'Erlang erl session'
aliases = ['erl']
@@ -1768,7 +1900,7 @@ class OpaLexer(RegexLexer):
"""
Lexer for the Opa language (http://opalang.org).
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'Opa'
@@ -2091,7 +2223,7 @@ class CoqLexer(RegexLexer):
"""
For the `Coq <http://coq.inria.fr/>`_ theorem prover.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'Coq'
@@ -2232,7 +2364,7 @@ class NewLispLexer(RegexLexer):
"""
For `newLISP. <www.newlisp.org>`_ source code (version 10.3.0).
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'NewLisp'
@@ -2363,7 +2495,7 @@ class NixLexer(RegexLexer):
"""
For the `Nix language <http://nixos.org/nix/>`_.
- *New in Pygments 1.7.*
+ .. versionadded:: 2.0
"""
name = 'Nix'
@@ -2400,7 +2532,7 @@ class NixLexer(RegexLexer):
('(%s)' % '|'.join(re.escape(entry) + '\\b' for entry in builtins),
Name.Builtin),
- (r'\b(true|false)\b', Name.Constant),
+ (r'\b(true|false|null)\b', Name.Constant),
# operators
('(%s)' % '|'.join(re.escape(entry) for entry in operators),
@@ -2427,7 +2559,8 @@ class NixLexer(RegexLexer):
(r'[a-zA-Z][a-zA-Z0-9\+\-\.]*\:[a-zA-Z0-9%/?:@&=+$,\\_.!~*\'-]+', Literal),
# names of variables
- (r'[a-zA-Z_][a-zA-Z0-9_\'-]*', String.Symbol),
+ (r'[a-zA-Z0-9-_]+\s*=', String.Symbol),
+ (r'[a-zA-Z_][a-zA-Z0-9_\'-]*', Text),
],
'comment': [
@@ -2484,7 +2617,7 @@ class ElixirLexer(RegexLexer):
"""
For the `Elixir language <http://elixir-lang.org>`_.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'Elixir'
@@ -2595,7 +2728,7 @@ class ElixirConsoleLexer(Lexer):
iex> length [head | tail]
3
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'Elixir iex session'
@@ -2641,7 +2774,7 @@ class KokaLexer(RegexLexer):
Lexer for the `Koka <http://koka.codeplex.com>`_
language.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'Koka'
diff --git a/pygments/lexers/hdl.py b/pygments/lexers/hdl.py
index 0ea9a7c5..1ebe4e5c 100644
--- a/pygments/lexers/hdl.py
+++ b/pygments/lexers/hdl.py
@@ -22,7 +22,7 @@ class VerilogLexer(RegexLexer):
"""
For verilog source code with preprocessor directives.
- *New in Pygments 1.4.*
+ .. versionadded:: 1.4
"""
name = 'verilog'
aliases = ['verilog', 'v']
@@ -134,7 +134,7 @@ class SystemVerilogLexer(RegexLexer):
Extends verilog lexer to recognise all SystemVerilog keywords from IEEE
1800-2009 standard.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'systemverilog'
aliases = ['systemverilog', 'sv']
@@ -274,7 +274,7 @@ class VhdlLexer(RegexLexer):
"""
For VHDL source code.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'vhdl'
aliases = ['vhdl']
diff --git a/pygments/lexers/jvm.py b/pygments/lexers/jvm.py
index c4029822..64c47b6e 100644
--- a/pygments/lexers/jvm.py
+++ b/pygments/lexers/jvm.py
@@ -19,8 +19,9 @@ from pygments import unistring as uni
__all__ = ['JavaLexer', 'ScalaLexer', 'GosuLexer', 'GosuTemplateLexer',
- 'GroovyLexer', 'IokeLexer', 'ClojureLexer', 'KotlinLexer',
- 'XtendLexer', 'AspectJLexer', 'CeylonLexer']
+ 'GroovyLexer', 'IokeLexer', 'ClojureLexer', 'ClojureScriptLexer',
+ 'KotlinLexer', 'XtendLexer', 'AspectJLexer', 'CeylonLexer',
+ 'PigLexer']
class JavaLexer(RegexLexer):
@@ -66,7 +67,7 @@ class JavaLexer(RegexLexer):
(r'[~\^\*!%&\[\]\(\)\{\}<>\|+=:;,./?-]', Operator),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
- (r'[0-9]+L?', Number.Integer),
+ (r'[0-9]+(_+[0-9]+)*L?', Number.Integer),
(r'\n', Text)
],
'class': [
@@ -82,7 +83,7 @@ class AspectJLexer(JavaLexer):
"""
For `AspectJ <http://www.eclipse.org/aspectj/>`_ source code.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'AspectJ'
@@ -242,25 +243,25 @@ class ScalaLexer(RegexLexer):
u'\ua760\ua762\ua764\ua766\ua768\ua76a\ua76c\ua76e\ua779\ua77b'
u'\ua77d-\ua77e\ua780\ua782\ua784\ua786\ua78b\uff21-\uff3a]')
- idrest = ur'%s(?:%s|[0-9])*(?:(?<=_)%s)?' % (letter, letter, op)
+ idrest = u'%s(?:%s|[0-9])*(?:(?<=_)%s)?' % (letter, letter, op)
tokens = {
'root': [
# method names
(r'(class|trait|object)(\s+)', bygroups(Keyword, Text), 'class'),
- (ur"'%s" % idrest, Text.Symbol),
+ (u"'%s" % idrest, Text.Symbol),
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*', Comment.Multiline, 'comment'),
- (ur'@%s' % idrest, Name.Decorator),
- (ur'(abstract|ca(?:se|tch)|d(?:ef|o)|e(?:lse|xtends)|'
- ur'f(?:inal(?:ly)?|or(?:Some)?)|i(?:f|mplicit)|'
- ur'lazy|match|new|override|pr(?:ivate|otected)'
- ur'|re(?:quires|turn)|s(?:ealed|uper)|'
- ur't(?:h(?:is|row)|ry)|va[lr]|w(?:hile|ith)|yield)\b|'
+ (u'@%s' % idrest, Name.Decorator),
+ (u'(abstract|ca(?:se|tch)|d(?:ef|o)|e(?:lse|xtends)|'
+ u'f(?:inal(?:ly)?|or(?:Some)?)|i(?:f|mplicit)|'
+ u'lazy|match|new|override|pr(?:ivate|otected)'
+ u'|re(?:quires|turn)|s(?:ealed|uper)|'
+ u't(?:h(?:is|row)|ry)|va[lr]|w(?:hile|ith)|yield)\\b|'
u'(<[%:-]|=>|>:|[#=@_\u21D2\u2190])(\\b|(?=\\s)|$)', Keyword),
- (ur':(?!%s)' % op, Keyword, 'type'),
- (ur'%s%s\b' % (upper, idrest), Name.Class),
+ (u':(?!%s)' % op, Keyword, 'type'),
+ (u'%s%s\\b' % (upper, idrest), Name.Class),
(r'(true|false|null)\b', Keyword.Constant),
(r'(import|package)(\s+)', bygroups(Keyword, Text), 'import'),
(r'(type)(\s+)', bygroups(Keyword, Text), 'type'),
@@ -281,34 +282,34 @@ class ScalaLexer(RegexLexer):
(r'\n', Text)
],
'class': [
- (ur'(%s|%s|`[^`]+`)(\s*)(\[)' % (idrest, op),
+ (u'(%s|%s|`[^`]+`)(\\s*)(\\[)' % (idrest, op),
bygroups(Name.Class, Text, Operator), 'typeparam'),
(r'\s+', Text),
(r'{', Operator, '#pop'),
(r'\(', Operator, '#pop'),
(r'//.*?\n', Comment.Single, '#pop'),
- (ur'%s|%s|`[^`]+`' % (idrest, op), Name.Class, '#pop'),
+ (u'%s|%s|`[^`]+`' % (idrest, op), Name.Class, '#pop'),
],
'type': [
(r'\s+', Text),
(u'<[%:]|>:|[#_\u21D2]|forSome|type', Keyword),
(r'([,\);}]|=>|=)(\s*)', bygroups(Operator, Text), '#pop'),
(r'[\(\{]', Operator, '#push'),
- (ur'((?:%s|%s|`[^`]+`)(?:\.(?:%s|%s|`[^`]+`))*)(\s*)(\[)' %
+ (u'((?:%s|%s|`[^`]+`)(?:\\.(?:%s|%s|`[^`]+`))*)(\\s*)(\\[)' %
(idrest, op, idrest, op),
bygroups(Keyword.Type, Text, Operator), ('#pop', 'typeparam')),
- (ur'((?:%s|%s|`[^`]+`)(?:\.(?:%s|%s|`[^`]+`))*)(\s*)$' %
+ (u'((?:%s|%s|`[^`]+`)(?:\\.(?:%s|%s|`[^`]+`))*)(\\s*)$' %
(idrest, op, idrest, op),
bygroups(Keyword.Type, Text), '#pop'),
(r'//.*?\n', Comment.Single, '#pop'),
- (ur'\.|%s|%s|`[^`]+`' % (idrest, op), Keyword.Type)
+ (u'\\.|%s|%s|`[^`]+`' % (idrest, op), Keyword.Type)
],
'typeparam': [
(r'[\s,]+', Text),
(u'<[%:]|=>|>:|[#_\u21D2]|forSome|type', Keyword),
(r'([\]\)\}])', Operator, '#pop'),
(r'[\(\[\{]', Operator, '#push'),
- (ur'\.|%s|%s|`[^`]+`' % (idrest, op), Keyword.Type)
+ (u'\\.|%s|%s|`[^`]+`' % (idrest, op), Keyword.Type)
],
'comment': [
(r'[^/\*]+', Comment.Multiline),
@@ -317,7 +318,7 @@ class ScalaLexer(RegexLexer):
(r'[*/]', Comment.Multiline)
],
'import': [
- (ur'(%s|\.)+' % idrest, Name.Namespace, '#pop')
+ (u'(%s|\\.)+' % idrest, Name.Namespace, '#pop')
],
}
@@ -326,7 +327,7 @@ class GosuLexer(RegexLexer):
"""
For Gosu source code.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'Gosu'
@@ -405,7 +406,7 @@ class GosuTemplateLexer(Lexer):
"""
For Gosu templates.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'Gosu Template'
@@ -424,7 +425,7 @@ class GroovyLexer(RegexLexer):
"""
For `Groovy <http://groovy.codehaus.org/>`_ source code.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'Groovy'
@@ -486,7 +487,7 @@ class IokeLexer(RegexLexer):
For `Ioke <http://ioke.org/>`_ (a strongly typed, dynamic,
prototype based programming language) source.
- *New in Pygments 1.4.*
+ .. versionadded:: 1.4
"""
name = 'Ioke'
filenames = ['*.ik']
@@ -638,9 +639,9 @@ class IokeLexer(RegexLexer):
r'System|Text|Tuple)(?![a-zA-Z0-9!:_?])', Name.Builtin),
# functions
- (ur'(generateMatchMethod|aliasMethod|\u03bb|\u028E|fnx|fn|method|'
- ur'dmacro|dlecro|syntax|macro|dlecrox|lecrox|lecro|syntax)'
- ur'(?![a-zA-Z0-9!:_?])', Name.Function),
+ (u'(generateMatchMethod|aliasMethod|\u03bb|\u028E|fnx|fn|method|'
+ u'dmacro|dlecro|syntax|macro|dlecrox|lecrox|lecro|syntax)'
+ u'(?![a-zA-Z0-9!:_?])', Name.Function),
# Numbers
(r'-?0[xX][0-9a-fA-F]+', Number.Hex),
@@ -650,13 +651,13 @@ class IokeLexer(RegexLexer):
(r'#\(', Punctuation),
# Operators
- (ur'(&&>>|\|\|>>|\*\*>>|:::|::|\.\.\.|===|\*\*>|\*\*=|&&>|&&=|'
- ur'\|\|>|\|\|=|\->>|\+>>|!>>|<>>>|<>>|&>>|%>>|#>>|@>>|/>>|\*>>|'
- ur'\?>>|\|>>|\^>>|~>>|\$>>|=>>|<<=|>>=|<=>|<\->|=~|!~|=>|\+\+|'
- ur'\-\-|<=|>=|==|!=|&&|\.\.|\+=|\-=|\*=|\/=|%=|&=|\^=|\|=|<\-|'
- ur'\+>|!>|<>|&>|%>|#>|\@>|\/>|\*>|\?>|\|>|\^>|~>|\$>|<\->|\->|'
- ur'<<|>>|\*\*|\?\||\?&|\|\||>|<|\*|\/|%|\+|\-|&|\^|\||=|\$|!|~|'
- ur'\?|#|\u2260|\u2218|\u2208|\u2209)', Operator),
+ (r'(&&>>|\|\|>>|\*\*>>|:::|::|\.\.\.|===|\*\*>|\*\*=|&&>|&&=|'
+ r'\|\|>|\|\|=|\->>|\+>>|!>>|<>>>|<>>|&>>|%>>|#>>|@>>|/>>|\*>>|'
+ r'\?>>|\|>>|\^>>|~>>|\$>>|=>>|<<=|>>=|<=>|<\->|=~|!~|=>|\+\+|'
+ r'\-\-|<=|>=|==|!=|&&|\.\.|\+=|\-=|\*=|\/=|%=|&=|\^=|\|=|<\-|'
+ r'\+>|!>|<>|&>|%>|#>|\@>|\/>|\*>|\?>|\|>|\^>|~>|\$>|<\->|\->|'
+ r'<<|>>|\*\*|\?\||\?&|\|\||>|<|\*|\/|%|\+|\-|&|\^|\||=|\$|!|~|'
+ u'\\?|#|\u2260|\u2218|\u2208|\u2209)', Operator),
(r'(and|nand|or|xor|nor|return|import)(?![a-zA-Z0-9_!?])',
Operator),
@@ -676,7 +677,7 @@ class ClojureLexer(RegexLexer):
"""
Lexer for `Clojure <http://clojure.org/>`_ source code.
- *New in Pygments 0.11.*
+ .. versionadded:: 0.11
"""
name = 'Clojure'
aliases = ['clojure', 'clj']
@@ -813,12 +814,25 @@ class ClojureLexer(RegexLexer):
}
+class ClojureScriptLexer(ClojureLexer):
+ """
+ Lexer for `ClojureScript <http://clojure.org/clojurescript>`_
+ source code.
+
+ .. versionadded:: 2.0
+ """
+ name = 'ClojureScript'
+ aliases = ['clojurescript', 'cljs']
+ filenames = ['*.cljs']
+ mimetypes = ['text/x-clojurescript', 'application/x-clojurescript']
+
+
class TeaLangLexer(RegexLexer):
"""
For `Tea <http://teatrove.org/>`_ source code. Only used within a
TeaTemplateLexer.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
flags = re.MULTILINE | re.DOTALL
@@ -864,7 +878,7 @@ class CeylonLexer(RegexLexer):
"""
For `Ceylon <http://ceylon-lang.org/>`_ source code.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'Ceylon'
@@ -945,7 +959,7 @@ class KotlinLexer(RegexLexer):
For `Kotlin <http://kotlin.jetbrains.org/>`_
source code.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'Kotlin'
@@ -1007,7 +1021,7 @@ class XtendLexer(RegexLexer):
"""
For `Xtend <http://xtend-lang.org/>`_ source code.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'Xtend'
@@ -1043,7 +1057,7 @@ class XtendLexer(RegexLexer):
'class'),
(r'(import)(\s+)', bygroups(Keyword.Namespace, Text), 'import'),
(r"(''')", String, 'template'),
- (ur"(\u00BB)", String, 'template'),
+ (u'(\u00BB)', String, 'template'),
(r'"(\\\\|\\"|[^"])*"', String),
(r"'(\\\\|\\'|[^'])*'", String),
(r'[a-zA-Z_][a-zA-Z0-9_]*:', Name.Label),
@@ -1062,7 +1076,73 @@ class XtendLexer(RegexLexer):
],
'template': [
(r"'''", String, '#pop'),
- (ur"\u00AB", String, '#pop'),
+ (u'\u00AB', String, '#pop'),
(r'.', String)
],
}
+
+class PigLexer(RegexLexer):
+ """
+ For `Pig Latin <https://pig.apache.org/>`_ source code.
+
+ .. versionadded:: 2.0
+ """
+
+ name = 'Pig'
+ aliases = ['pig']
+ filenames = ['*.pig']
+ mimetypes = ['text/x-pig']
+
+ flags = re.MULTILINE | re.IGNORECASE
+
+ tokens = {
+ 'root': [
+ (r'\s+', Text),
+ (r'--.*', Comment),
+ (r'/\*[\w\W]*?\*/', Comment.Multiline),
+ (r'\\\n', Text),
+ (r'\\', Text),
+ (r'\'(?:\\[ntbrf\\\']|\\u[0-9a-f]{4}|[^\'\\\n\r])*\'', String),
+ include('keywords'),
+ include('types'),
+ include('builtins'),
+ include('punct'),
+ include('operators'),
+ (r'[0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
+ (r'0x[0-9a-f]+', Number.Hex),
+ (r'[0-9]+L?', Number.Integer),
+ (r'\n', Text),
+ (r'([a-z_][a-z0-9_]*)(\s*)(\()',
+ bygroups(Name.Function, Text, Punctuation)),
+ (r'[()#:]', Text),
+ (r'[^(:#\'\")\s]+', Text),
+ (r'\S+\s+', Text) # TODO: make tests pass without \s+
+ ],
+ 'keywords': [
+ (r'(assert|and|any|all|arrange|as|asc|bag|by|cache|CASE|cat|cd|cp|'
+ r'%declare|%default|define|dense|desc|describe|distinct|du|dump|'
+ r'eval|exex|explain|filter|flatten|foreach|full|generate|group|'
+ r'help|if|illustrate|import|inner|input|into|is|join|kill|left|'
+ r'limit|load|ls|map|matches|mkdir|mv|not|null|onschema|or|order|'
+ r'outer|output|parallel|pig|pwd|quit|register|returns|right|rm|'
+ r'rmf|rollup|run|sample|set|ship|split|stderr|stdin|stdout|store|'
+ r'stream|through|union|using|void)\b', Keyword)
+ ],
+ 'builtins': [
+ (r'(AVG|BinStorage|cogroup|CONCAT|copyFromLocal|copyToLocal|COUNT|'
+ r'cross|DIFF|MAX|MIN|PigDump|PigStorage|SIZE|SUM|TextLoader|'
+ r'TOKENIZE)\b', Name.Builtin)
+ ],
+ 'types': [
+ (r'(bytearray|BIGINTEGER|BIGDECIMAL|chararray|datetime|double|float|'
+ r'int|long|tuple)\b', Keyword.Type)
+ ],
+ 'punct': [
+ (r'[;(){}\[\]]', Punctuation),
+ ],
+ 'operators': [
+ (r'[#=,./%+\-?]', Operator),
+ (r'(eq|gt|lt|gte|lte|neq|matches)\b', Operator),
+ (r'(==|<=|<|>=|>|!=)', Operator),
+ ],
+ }
diff --git a/pygments/lexers/math.py b/pygments/lexers/math.py
index 93c7cbd6..e7a8948b 100644
--- a/pygments/lexers/math.py
+++ b/pygments/lexers/math.py
@@ -9,6 +9,8 @@
:license: BSD, see LICENSE for details.
"""
+from __future__ import print_function
+
import re
from pygments.util import shebang_matches
@@ -24,14 +26,14 @@ from pygments.lexers import _stan_builtins
__all__ = ['JuliaLexer', 'JuliaConsoleLexer', 'MuPADLexer', 'MatlabLexer',
'MatlabSessionLexer', 'OctaveLexer', 'ScilabLexer', 'NumPyLexer',
'RConsoleLexer', 'SLexer', 'JagsLexer', 'BugsLexer', 'StanLexer',
- 'IDLLexer', 'RdLexer', 'IgorLexer', 'MathematicaLexer']
+ 'IDLLexer', 'RdLexer', 'IgorLexer', 'MathematicaLexer', 'GAPLexer']
class JuliaLexer(RegexLexer):
"""
For `Julia <http://julialang.org/>`_ source code.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'Julia'
aliases = ['julia','jl']
@@ -151,7 +153,7 @@ class JuliaConsoleLexer(Lexer):
"""
For Julia console sessions. Modeled after MatlabSessionLexer.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'Julia console'
aliases = ['jlcon']
@@ -200,7 +202,7 @@ class MuPADLexer(RegexLexer):
A `MuPAD <http://www.mupad.com>`_ lexer.
Contributed by Christopher Creutzig <christopher@creutzig.de>.
- *New in Pygments 0.8.*
+ .. versionadded:: 0.8
"""
name = 'MuPAD'
aliases = ['mupad']
@@ -270,7 +272,7 @@ class MatlabLexer(RegexLexer):
"""
For Matlab source code.
- *New in Pygments 0.10.*
+ .. versionadded:: 0.10
"""
name = 'Matlab'
aliases = ['matlab']
@@ -388,7 +390,7 @@ class MatlabSessionLexer(Lexer):
For Matlab sessions. Modeled after PythonConsoleLexer.
Contributed by Ken Schutte <kschutte@csail.mit.edu>.
- *New in Pygments 0.10.*
+ .. versionadded:: 0.10
"""
name = 'Matlab session'
aliases = ['matlabsession']
@@ -431,7 +433,7 @@ class MatlabSessionLexer(Lexer):
yield match.start(), Generic.Output, line
- print insertions
+ print(insertions)
if curcode: # or item:
for item in do_insertions(
insertions, mlexer.get_tokens_unprocessed(curcode)):
@@ -442,7 +444,7 @@ class OctaveLexer(RegexLexer):
"""
For GNU Octave source code.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'Octave'
aliases = ['octave']
@@ -833,7 +835,7 @@ class ScilabLexer(RegexLexer):
"""
For Scilab source code.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'Scilab'
aliases = ['scilab']
@@ -899,7 +901,7 @@ class NumPyLexer(PythonLexer):
"""
A Python lexer recognizing Numerical Python builtins.
- *New in Pygments 0.10.*
+ .. versionadded:: 0.10
"""
name = 'NumPy'
@@ -1040,7 +1042,7 @@ class SLexer(RegexLexer):
"""
For S, S-plus, and R source code.
- *New in Pygments 0.10.*
+ .. versionadded:: 0.10
"""
name = 'S'
@@ -1128,7 +1130,7 @@ class BugsLexer(RegexLexer):
Pygments Lexer for `OpenBugs <http://www.openbugs.info/w/>`_ and WinBugs
models.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'BUGS'
@@ -1223,7 +1225,7 @@ class JagsLexer(RegexLexer):
"""
Pygments Lexer for JAGS.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'JAGS'
@@ -1313,7 +1315,7 @@ class StanLexer(RegexLexer):
Modeling Language Manual* `pdf
<https://github.com/stan-dev/stan/releases/download/v2.0.1/stan-reference-2.0.1.pdf>`__
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'Stan'
@@ -1386,7 +1388,7 @@ class IDLLexer(RegexLexer):
"""
Pygments Lexer for IDL (Interactive Data Language).
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'IDL'
aliases = ['idl']
@@ -1632,7 +1634,7 @@ class RdLexer(RegexLexer):
Extensions <http://cran.r-project.org/doc/manuals/R-exts.html>`_
and `Parsing Rd files <developer.r-project.org/parseRd.pdf>`_.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'Rd'
aliases = ['rd']
@@ -1667,7 +1669,7 @@ class IgorLexer(RegexLexer):
Pygments Lexer for Igor Pro procedure files (.ipf).
See http://www.wavemetrics.com/ and http://www.igorexchange.com/.
- *New in Pygments 1.7.*
+ .. versionadded:: 2.0
"""
name = 'Igor'
@@ -1929,7 +1931,7 @@ class MathematicaLexer(RegexLexer):
"""
Lexer for `Mathematica <http://www.wolfram.com/mathematica/>`_ source code.
- *New in Pygments 1.7.*
+ .. versionadded:: 2.0
"""
name = 'Mathematica'
aliases = ['mathematica', 'mma', 'nb']
@@ -1970,3 +1972,53 @@ class MathematicaLexer(RegexLexer):
(r'\s+', Text.Whitespace),
],
}
+
+class GAPLexer(RegexLexer):
+ """
+ For `GAP <http://www.gap-system.org>`_ source code.
+
+ .. versionadded:: 2.0
+ """
+ name = 'GAP'
+ aliases = ['gap']
+ filenames = ['*.g', '*.gd', '*.gi', '*.gap']
+
+ tokens = {
+ 'root' : [
+ (r'#.*$', Comment.Single),
+ (r'"(?:[^"\\]|\\.)*"', String),
+ (r'\(|\)|\[|\]|\{|\}', Punctuation),
+ (r'''(?x)\b(?:
+ if|then|elif|else|fi|
+ for|while|do|od|
+ repeat|until|
+ break|continue|
+ function|local|return|end|
+ rec|
+ quit|QUIT|
+ IsBound|Unbind|
+ TryNextMethod|
+ Info|Assert
+ )\b''', Keyword),
+ (r'''(?x)\b(?:
+ true|false|fail|infinity
+ )\b''',
+ Name.Constant),
+ (r'''(?x)\b(?:
+ (Declare|Install)([A-Z][A-Za-z]+)|
+ BindGlobal|BIND_GLOBAL
+ )\b''',
+ Name.Builtin),
+ (r'\.|,|:=|;|=|\+|-|\*|/|\^|>|<', Operator),
+ (r'''(?x)\b(?:
+ and|or|not|mod|in
+ )\b''',
+ Operator.Word),
+ (r'''(?x)
+ (?:[a-zA-Z_0-9]+|`[^`]*`)
+ (?:::[a-zA-Z_0-9]+|`[^`]*`)*''', Name.Variable),
+ (r'[0-9]+(?:\.[0-9]*)?(?:e[0-9]+)?', Number),
+ (r'\.[0-9]+(?:e[0-9]+)?', Number),
+ (r'.', Text)
+ ]
+ }
diff --git a/pygments/lexers/other.py b/pygments/lexers/other.py
index dc16954a..ba777e28 100644
--- a/pygments/lexers/other.py
+++ b/pygments/lexers/other.py
@@ -36,7 +36,7 @@ __all__ = ['BrainfuckLexer', 'BefungeLexer', 'RedcodeLexer', 'MOOCodeLexer',
'ECLLexer', 'UrbiscriptLexer', 'OpenEdgeLexer', 'BroLexer',
'MscgenLexer', 'KconfigLexer', 'VGLLexer', 'SourcePawnLexer',
'RobotFrameworkLexer', 'PuppetLexer', 'NSISLexer', 'RPMSpecLexer',
- 'CbmBasicV2Lexer', 'AutoItLexer', 'RexxLexer']
+ 'CbmBasicV2Lexer', 'AutoItLexer', 'RexxLexer', 'APLLexer']
class ECLLexer(RegexLexer):
@@ -45,7 +45,7 @@ class ECLLexer(RegexLexer):
<http://hpccsystems.com/community/docs/ecl-language-reference/html>`_
language.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'ECL'
@@ -176,7 +176,7 @@ class BefungeLexer(RegexLexer):
Lexer for the esoteric `Befunge <http://en.wikipedia.org/wiki/Befunge>`_
language.
- *New in Pygments 0.7.*
+ .. versionadded:: 0.7
"""
name = 'Befunge'
aliases = ['befunge']
@@ -206,7 +206,7 @@ class RedcodeLexer(RegexLexer):
A simple Redcode lexer based on ICWS'94.
Contributed by Adam Blinkinsop <blinks@acm.org>.
- *New in Pygments 0.8.*
+ .. versionadded:: 0.8
"""
name = 'Redcode'
aliases = ['redcode']
@@ -242,7 +242,7 @@ class MOOCodeLexer(RegexLexer):
For `MOOCode <http://www.moo.mud.org/>`_ (the MOO scripting
language).
- *New in Pygments 0.9.*
+ .. versionadded:: 0.9
"""
name = 'MOOCode'
filenames = ['*.moo']
@@ -286,7 +286,7 @@ class SmalltalkLexer(RegexLexer):
Contributed by Stefan Matthias Aust.
Rewritten by Nils Winter.
- *New in Pygments 0.10.*
+ .. versionadded:: 0.10
"""
name = 'Smalltalk'
filenames = ['*.st']
@@ -405,7 +405,7 @@ class LogtalkLexer(RegexLexer):
"""
For `Logtalk <http://logtalk.org/>`_ source code.
- *New in Pygments 0.10.*
+ .. versionadded:: 0.10
"""
name = 'Logtalk'
@@ -633,7 +633,7 @@ class GnuplotLexer(RegexLexer):
"""
For `Gnuplot <http://gnuplot.info/>`_ plotting scripts.
- *New in Pygments 0.11.*
+ .. versionadded:: 0.11
"""
name = 'Gnuplot'
@@ -792,7 +792,7 @@ class PovrayLexer(RegexLexer):
"""
For `Persistence of Vision Raytracer <http://www.povray.org/>`_ files.
- *New in Pygments 0.11.*
+ .. versionadded:: 0.11
"""
name = 'POVRay'
aliases = ['pov']
@@ -1150,18 +1150,18 @@ class AppleScriptLexer(RegexLexer):
tokens = {
'root': [
(r'\s+', Text),
- (ur'¬\n', String.Escape),
+ (u'¬\\n', String.Escape),
(r"'s\s+", Text), # This is a possessive, consider moving
(r'(--|#).*?$', Comment),
(r'\(\*', Comment.Multiline, 'comment'),
(r'[\(\){}!,.:]', Punctuation),
- (ur'(«)([^»]+)(»)',
+ (u'(«)([^»]+)(»)',
bygroups(Text, Name.Builtin, Text)),
(r'\b((?:considering|ignoring)\s*)'
r'(application responses|case|diacriticals|hyphens|'
r'numeric strings|punctuation|white space)',
bygroups(Keyword, Name.Builtin)),
- (ur'(-|\*|\+|&|≠|>=?|<=?|=|≥|≤|/|÷|\^)', Operator),
+ (u'(-|\\*|\\+|&|≠|>=?|<=?|=|≥|≤|/|÷|\\^)', Operator),
(r"\b(%s)\b" % '|'.join(Operators), Operator.Word),
(r'^(\s*(?:on|end)\s+)'
r'(%s)' % '|'.join(StudioEvents[::-1]),
@@ -1198,7 +1198,7 @@ class ModelicaLexer(RegexLexer):
"""
For `Modelica <http://www.modelica.org/>`_ source code.
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'Modelica'
aliases = ['modelica']
@@ -1217,23 +1217,27 @@ class ModelicaLexer(RegexLexer):
],
'statements': [
(r'"', String, 'string'),
- (r'\'', Name, 'quoted_ident'),
(r'(\d+\.\d*|\.\d+|\d+|\d.)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+)', Number.Float),
(r'\d+[Ll]?', Number.Integer),
(r'[~!%^&*+=|?:<>/-]', Operator),
- (r'[()\[\]{},.;]', Punctuation),
(r'(true|false|NULL|Real|Integer|Boolean)\b', Name.Builtin),
- (r'([a-zA-Z_][\w\[\]]*|\'[a-zA-Z_\+\-\*\/\^][\w]*\')'
- r'(\.([a-zA-Z_\][\w\[\]]*|\'[a-zA-Z_\+\-\*\/\^][\w]*\'))+', Name.Class),
+ (r'([a-zA-Z_][\w]*|[\'][^\']+[\'])'
+ r'([\[\d,:\]]*)'
+ r'(\.([a-zA-Z_][\w]*|[\'][^\']+[\']))+'
+ r'([\[\d,:\]]*)', Name.Class),
+ (r'([a-zA-Z_][\w]*|[\'][^\']+[\'])'
+ r'([\[\d,:\]]+)', Name.Class),
(r'(\'[\w\+\-\*\/\^]+\'|\w+)', Name),
+ (r'[()\[\]{},.;]', Punctuation),
+ (r'\'', Name, 'quoted_ident'),
],
'root': [
include('whitespace'),
include('keywords'),
+ include('classes'),
include('functions'),
include('operators'),
- include('classes'),
(r'("<html>|<html>)', Name.Tag, 'html-content'),
include('statements'),
],
@@ -1253,15 +1257,16 @@ class ModelicaLexer(RegexLexer):
r'tanh|zeros)\b', Name.Function),
],
'operators': [
- (r'(actualStream|and|assert|cardinality|change|Clock|delay|der|edge|'
- r'hold|homotopy|initial|inStream|noEvent|not|or|pre|previous|reinit|'
- r'return|sample|smooth|spatialDistribution|subSample|terminal|'
+ (r'(actualStream|and|assert|backSample|cardinality|change|Clock|'
+ r'delay|der|edge|hold|homotopy|initial|inStream|noClock|noEvent|'
+ r'not|or|pre|previous|reinit|return|sample|smooth|'
+ r'spatialDistribution|shiftSample|subSample|superSample|terminal|'
r'terminate)\b', Name.Builtin),
],
'classes': [
- (r'(block|class|connector|end|function|model|package|'
+ (r'(operator)?(\s+)?(block|class|connector|end|function|model|operator|package|'
r'record|type)(\s+)((?!if|when|while)[A-Za-z_]\w*|[\'][^\']+[\'])([;]?)',
- bygroups(Keyword, Text, Name.Class, Text))
+ bygroups(Keyword, Text, Keyword, Text, Name.Class, Text))
],
'quoted_ident': [
(r'\'', Name, '#pop'),
@@ -1286,7 +1291,7 @@ class RebolLexer(RegexLexer):
"""
A `REBOL <http://www.rebol.com/>`_ lexer.
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'REBOL'
aliases = ['rebol']
@@ -1514,7 +1519,7 @@ class ABAPLexer(RegexLexer):
"""
Lexer for ABAP, SAP's integrated language.
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'ABAP'
aliases = ['abap']
@@ -1762,17 +1767,17 @@ class GherkinLexer(RegexLexer):
"""
For `Gherkin <http://github.com/aslakhellesoy/gherkin/>` syntax.
- *New in Pygments 1.2.*
+ .. versionadded:: 1.2
"""
name = 'Gherkin'
aliases = ['cucumber', 'gherkin']
filenames = ['*.feature']
mimetypes = ['text/x-gherkin']
- feature_keywords = ur'^(기능|機能|功能|フィーチャ|خاصية|תכונה|Функціонал|Функционалност|Функционал|Фича|Особина|Могућност|Özellik|Właściwość|Tính năng|Trajto|Savybė|Požiadavka|Požadavek|Osobina|Ominaisuus|Omadus|OH HAI|Mogućnost|Mogucnost|Jellemző|Fīča|Funzionalità|Funktionalität|Funkcionalnost|Funkcionalitāte|Funcționalitate|Functionaliteit|Functionalitate|Funcionalitat|Funcionalidade|Fonctionnalité|Fitur|Feature|Egenskap|Egenskab|Crikey|Característica|Arwedd)(:)(.*)$'
- feature_element_keywords = ur'^(\s*)(시나리오 개요|시나리오|배경|背景|場景大綱|場景|场景大纲|场景|劇本大綱|劇本|テンプレ|シナリオテンプレート|シナリオテンプレ|シナリオアウトライン|シナリオ|سيناريو مخطط|سيناريو|الخلفية|תרחיש|תבנית תרחיש|רקע|Тарих|Сценарій|Сценарио|Сценарий структураси|Сценарий|Структура сценарію|Структура сценарија|Структура сценария|Скица|Рамка на сценарий|Пример|Предыстория|Предистория|Позадина|Передумова|Основа|Концепт|Контекст|Założenia|Wharrimean is|Tình huống|The thing of it is|Tausta|Taust|Tapausaihio|Tapaus|Szenariogrundriss|Szenario|Szablon scenariusza|Stsenaarium|Struktura scenarija|Skica|Skenario konsep|Skenario|Situācija|Senaryo taslağı|Senaryo|Scénář|Scénario|Schema dello scenario|Scenārijs pēc parauga|Scenārijs|Scenár|Scenaro|Scenariusz|Scenariul de şablon|Scenariul de sablon|Scenariu|Scenario Outline|Scenario Amlinellol|Scenario|Scenarijus|Scenarijaus šablonas|Scenarij|Scenarie|Rerefons|Raamstsenaarium|Primer|Pozadí|Pozadina|Pozadie|Plan du scénario|Plan du Scénario|Osnova scénáře|Osnova|Náčrt Scénáře|Náčrt Scenáru|Mate|MISHUN SRSLY|MISHUN|Kịch bản|Konturo de la scenaro|Kontext|Konteksts|Kontekstas|Kontekst|Koncept|Khung tình huống|Khung kịch bản|Háttér|Grundlage|Geçmiş|Forgatókönyv vázlat|Forgatókönyv|Fono|Esquema do Cenário|Esquema do Cenario|Esquema del escenario|Esquema de l\'escenari|Escenario|Escenari|Dis is what went down|Dasar|Contexto|Contexte|Contesto|Condiţii|Conditii|Cenário|Cenario|Cefndir|Bối cảnh|Blokes|Bakgrunn|Bakgrund|Baggrund|Background|B4|Antecedents|Antecedentes|All y\'all|Achtergrond|Abstrakt Scenario|Abstract Scenario)(:)(.*)$'
- examples_keywords = ur'^(\s*)(예|例子|例|サンプル|امثلة|דוגמאות|Сценарији|Примери|Приклади|Мисоллар|Значения|Örnekler|Voorbeelden|Variantai|Tapaukset|Scenarios|Scenariji|Scenarijai|Příklady|Példák|Príklady|Przykłady|Primjeri|Primeri|Piemēri|Pavyzdžiai|Paraugs|Juhtumid|Exemplos|Exemples|Exemplele|Exempel|Examples|Esempi|Enghreifftiau|Ekzemploj|Eksempler|Ejemplos|EXAMPLZ|Dữ liệu|Contoh|Cobber|Beispiele)(:)(.*)$'
- step_keywords = ur'^(\s*)(하지만|조건|먼저|만일|만약|단|그리고|그러면|那麼|那么|而且|當|当|前提|假設|假如|但是|但し|並且|もし|ならば|ただし|しかし|かつ|و |متى |لكن |عندما |ثم |بفرض |اذاً |כאשר |וגם |בהינתן |אזי |אז |אבל |Якщо |Унда |То |Припустимо, що |Припустимо |Онда |Но |Нехай |Лекин |Когато |Када |Кад |К тому же |И |Задато |Задати |Задате |Если |Допустим |Дадено |Ва |Бирок |Аммо |Али |Але |Агар |А |І |Și |És |Zatati |Zakładając |Zadato |Zadate |Zadano |Zadani |Zadan |Youse know when youse got |Youse know like when |Yna |Ya know how |Ya gotta |Y |Wun |Wtedy |When y\'all |When |Wenn |WEN |Và |Ve |Und |Un |Thì |Then y\'all |Then |Tapi |Tak |Tada |Tad |Så |Stel |Soit |Siis |Si |Sed |Se |Quando |Quand |Quan |Pryd |Pokud |Pokiaľ |Però |Pero |Pak |Oraz |Onda |Ond |Oletetaan |Og |Och |O zaman |Når |När |Niin |Nhưng |N |Mutta |Men |Mas |Maka |Majd |Mais |Maar |Ma |Lorsque |Lorsqu\'|Kun |Kuid |Kui |Khi |Keď |Ketika |Když |Kaj |Kai |Kada |Kad |Jeżeli |Ja |Ir |I CAN HAZ |I |Ha |Givun |Givet |Given y\'all |Given |Gitt |Gegeven |Gegeben sei |Fakat |Eğer ki |Etant donné |Et |Então |Entonces |Entao |En |Eeldades |E |Duota |Dun |Donitaĵo |Donat |Donada |Do |Diyelim ki |Dengan |Den youse gotta |De |Dato |Dar |Dann |Dan |Dado |Dacă |Daca |DEN |Când |Cuando |Cho |Cept |Cand |Cal |But y\'all |But |Buh |Biết |Bet |BUT |Atès |Atunci |Atesa |Anrhegedig a |Angenommen |And y\'all |And |An |Ama |Als |Alors |Allora |Ali |Aleshores |Ale |Akkor |Aber |AN |A také |A |\* )'
+ feature_keywords = u'^(기능|機能|功能|フィーチャ|خاصية|תכונה|Функціонал|Функционалност|Функционал|Фича|Особина|Могућност|Özellik|Właściwość|Tính năng|Trajto|Savybė|Požiadavka|Požadavek|Osobina|Ominaisuus|Omadus|OH HAI|Mogućnost|Mogucnost|Jellemző|Fīča|Funzionalità|Funktionalität|Funkcionalnost|Funkcionalitāte|Funcționalitate|Functionaliteit|Functionalitate|Funcionalitat|Funcionalidade|Fonctionnalité|Fitur|Feature|Egenskap|Egenskab|Crikey|Característica|Arwedd)(:)(.*)$'
+ feature_element_keywords = u'^(\\s*)(시나리오 개요|시나리오|배경|背景|場景大綱|場景|场景大纲|场景|劇本大綱|劇本|テンプレ|シナリオテンプレート|シナリオテンプレ|シナリオアウトライン|シナリオ|سيناريو مخطط|سيناريو|الخلفية|תרחיש|תבנית תרחיש|רקע|Тарих|Сценарій|Сценарио|Сценарий структураси|Сценарий|Структура сценарію|Структура сценарија|Структура сценария|Скица|Рамка на сценарий|Пример|Предыстория|Предистория|Позадина|Передумова|Основа|Концепт|Контекст|Założenia|Wharrimean is|Tình huống|The thing of it is|Tausta|Taust|Tapausaihio|Tapaus|Szenariogrundriss|Szenario|Szablon scenariusza|Stsenaarium|Struktura scenarija|Skica|Skenario konsep|Skenario|Situācija|Senaryo taslağı|Senaryo|Scénář|Scénario|Schema dello scenario|Scenārijs pēc parauga|Scenārijs|Scenár|Scenaro|Scenariusz|Scenariul de şablon|Scenariul de sablon|Scenariu|Scenario Outline|Scenario Amlinellol|Scenario|Scenarijus|Scenarijaus šablonas|Scenarij|Scenarie|Rerefons|Raamstsenaarium|Primer|Pozadí|Pozadina|Pozadie|Plan du scénario|Plan du Scénario|Osnova scénáře|Osnova|Náčrt Scénáře|Náčrt Scenáru|Mate|MISHUN SRSLY|MISHUN|Kịch bản|Konturo de la scenaro|Kontext|Konteksts|Kontekstas|Kontekst|Koncept|Khung tình huống|Khung kịch bản|Háttér|Grundlage|Geçmiş|Forgatókönyv vázlat|Forgatókönyv|Fono|Esquema do Cenário|Esquema do Cenario|Esquema del escenario|Esquema de l\'escenari|Escenario|Escenari|Dis is what went down|Dasar|Contexto|Contexte|Contesto|Condiţii|Conditii|Cenário|Cenario|Cefndir|Bối cảnh|Blokes|Bakgrunn|Bakgrund|Baggrund|Background|B4|Antecedents|Antecedentes|All y\'all|Achtergrond|Abstrakt Scenario|Abstract Scenario)(:)(.*)$'
+ examples_keywords = u'^(\\s*)(예|例子|例|サンプル|امثلة|דוגמאות|Сценарији|Примери|Приклади|Мисоллар|Значения|Örnekler|Voorbeelden|Variantai|Tapaukset|Scenarios|Scenariji|Scenarijai|Příklady|Példák|Príklady|Przykłady|Primjeri|Primeri|Piemēri|Pavyzdžiai|Paraugs|Juhtumid|Exemplos|Exemples|Exemplele|Exempel|Examples|Esempi|Enghreifftiau|Ekzemploj|Eksempler|Ejemplos|EXAMPLZ|Dữ liệu|Contoh|Cobber|Beispiele)(:)(.*)$'
+ step_keywords = u'^(\\s*)(하지만|조건|먼저|만일|만약|단|그리고|그러면|那麼|那么|而且|當|当|前提|假設|假如|但是|但し|並且|もし|ならば|ただし|しかし|かつ|و |متى |لكن |عندما |ثم |بفرض |اذاً |כאשר |וגם |בהינתן |אזי |אז |אבל |Якщо |Унда |То |Припустимо, що |Припустимо |Онда |Но |Нехай |Лекин |Когато |Када |Кад |К тому же |И |Задато |Задати |Задате |Если |Допустим |Дадено |Ва |Бирок |Аммо |Али |Але |Агар |А |І |Și |És |Zatati |Zakładając |Zadato |Zadate |Zadano |Zadani |Zadan |Youse know when youse got |Youse know like when |Yna |Ya know how |Ya gotta |Y |Wun |Wtedy |When y\'all |When |Wenn |WEN |Và |Ve |Und |Un |Thì |Then y\'all |Then |Tapi |Tak |Tada |Tad |Så |Stel |Soit |Siis |Si |Sed |Se |Quando |Quand |Quan |Pryd |Pokud |Pokiaľ |Però |Pero |Pak |Oraz |Onda |Ond |Oletetaan |Og |Och |O zaman |Når |När |Niin |Nhưng |N |Mutta |Men |Mas |Maka |Majd |Mais |Maar |Ma |Lorsque |Lorsqu\'|Kun |Kuid |Kui |Khi |Keď |Ketika |Když |Kaj |Kai |Kada |Kad |Jeżeli |Ja |Ir |I CAN HAZ |I |Ha |Givun |Givet |Given y\'all |Given |Gitt |Gegeven |Gegeben sei |Fakat |Eğer ki |Etant donné |Et |Então |Entonces |Entao |En |Eeldades |E |Duota |Dun |Donitaĵo |Donat |Donada |Do |Diyelim ki |Dengan |Den youse gotta |De |Dato |Dar |Dann |Dan |Dado |Dacă |Daca |DEN |Când |Cuando |Cho |Cept |Cand |Cal |But y\'all |But |Buh |Biết |Bet |BUT |Atès |Atunci |Atesa |Anrhegedig a |Angenommen |And y\'all |And |An |Ama |Als |Alors |Allora |Ali |Aleshores |Ale |Akkor |Aber |AN |A také |A |\* )'
tokens = {
'comments': [
@@ -1874,7 +1879,7 @@ class AsymptoteLexer(RegexLexer):
"""
For `Asymptote <http://asymptote.sf.net/>`_ source code.
- *New in Pygments 1.2.*
+ .. versionadded:: 1.2
"""
name = 'Asymptote'
aliases = ['asy', 'asymptote']
@@ -1995,7 +2000,7 @@ class PostScriptLexer(RegexLexer):
<http://partners.adobe.com/public/developer/en/ps/PLRM.pdf>
is the authority for this.
- *New in Pygments 1.4.*
+ .. versionadded:: 1.4
"""
name = 'PostScript'
aliases = ['postscript', 'postscr']
@@ -2083,7 +2088,7 @@ class AutohotkeyLexer(RegexLexer):
"""
For `autohotkey <http://www.autohotkey.com/>`_ source code.
- *New in Pygments 1.4.*
+ .. versionadded:: 1.4
"""
name = 'autohotkey'
aliases = ['ahk', 'autohotkey']
@@ -2263,7 +2268,7 @@ class MaqlLexer(RegexLexer):
<https://secure.gooddata.com/docs/html/advanced.metric.tutorial.html>`_
scripts.
- *New in Pygments 1.4.*
+ .. versionadded:: 1.4
"""
name = 'MAQL'
@@ -2322,7 +2327,7 @@ class GoodDataCLLexer(RegexLexer):
Lexer for `GoodData-CL <http://github.com/gooddata/GoodData-CL/raw/master/cli/src/main/resources/com/gooddata/processor/COMMANDS.txt>`_
script files.
- *New in Pygments 1.4.*
+ .. versionadded:: 1.4
"""
name = 'GoodData-CL'
@@ -2367,7 +2372,7 @@ class ProtoBufLexer(RegexLexer):
Lexer for `Protocol Buffer <http://code.google.com/p/protobuf/>`_
definition files.
- *New in Pygments 1.4.*
+ .. versionadded:: 1.4
"""
name = 'Protocol Buffer'
@@ -2419,7 +2424,7 @@ class HybrisLexer(RegexLexer):
"""
For `Hybris <http://www.hybris-lang.org>`_ source code.
- *New in Pygments 1.4.*
+ .. versionadded:: 1.4
"""
name = 'Hybris'
@@ -2497,7 +2502,7 @@ class AwkLexer(RegexLexer):
"""
For Awk scripts.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'Awk'
@@ -2523,11 +2528,11 @@ class AwkLexer(RegexLexer):
'root': [
(r'^(?=\s|/)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
- (r'\+\+|--|\|\||&&|in|\$|!?~|'
+ (r'\+\+|--|\|\||&&|in\b|\$|!?~|'
r'(\*\*|[-<>+*%\^/!=])=?', Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
- (r'(break|continue|do|while|exit|for|if|'
+ (r'(break|continue|do|while|exit|for|if|else|'
r'return)\b', Keyword, 'slashstartsregex'),
(r'function\b', Keyword.Declaration, 'slashstartsregex'),
(r'(atan2|cos|exp|int|log|rand|sin|sqrt|srand|gensub|gsub|index|'
@@ -2551,7 +2556,7 @@ class Cfengine3Lexer(RegexLexer):
"""
Lexer for `CFEngine3 <http://cfengine.org>`_ policy files.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'CFEngine3'
@@ -2615,7 +2620,7 @@ class SnobolLexer(RegexLexer):
Recognizes the common ASCII equivalents of the original SNOBOL4 operators.
Does not require spaces around binary operators.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = "Snobol"
@@ -2679,7 +2684,7 @@ class UrbiscriptLexer(ExtendedRegexLexer):
"""
For UrbiScript source code.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'UrbiScript'
@@ -2786,7 +2791,7 @@ class OpenEdgeLexer(RegexLexer):
Lexer for `OpenEdge ABL (formerly Progress)
<http://web.progress.com/en/openedge/abl.html>`_ source code.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'OpenEdge ABL'
aliases = ['openedge', 'abl', 'progress']
@@ -2838,7 +2843,7 @@ class BroLexer(RegexLexer):
"""
For `Bro <http://bro-ids.org/>`_ scripts.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'Bro'
aliases = ['bro']
@@ -2916,7 +2921,7 @@ class CbmBasicV2Lexer(RegexLexer):
"""
For CBM BASIC V2 sources.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'CBM BASIC V2'
aliases = ['cbmbas']
@@ -2954,7 +2959,7 @@ class MscgenLexer(RegexLexer):
"""
For `Mscgen <http://www.mcternan.me.uk/mscgen/>`_ files.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'Mscgen'
aliases = ['mscgen', 'msc']
@@ -3015,7 +3020,7 @@ class KconfigLexer(RegexLexer):
"""
For Linux-style Kconfig files.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'Kconfig'
@@ -3090,7 +3095,7 @@ class VGLLexer(RegexLexer):
For `SampleManager VGL <http://www.thermoscientific.com/samplemanager>`_
source code.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'VGL'
aliases = ['vgl']
@@ -3123,7 +3128,7 @@ class SourcePawnLexer(RegexLexer):
"""
For SourcePawn source code with preprocessor directives.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'SourcePawn'
aliases = ['sp']
@@ -3232,7 +3237,7 @@ class PuppetLexer(RegexLexer):
"""
For `Puppet <http://puppetlabs.com/>`__ configuration DSL.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'Puppet'
aliases = ['puppet']
@@ -3313,7 +3318,7 @@ class NSISLexer(RegexLexer):
"""
For `NSIS <http://nsis.sourceforge.net/>`_ scripts.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'NSIS'
aliases = ['nsis', 'nsi', 'nsh']
@@ -3435,9 +3440,9 @@ class NSISLexer(RegexLexer):
class RPMSpecLexer(RegexLexer):
"""
- For RPM *.spec files
+ For RPM ``.spec`` files.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'RPMSpec'
@@ -3513,7 +3518,7 @@ class AutoItLexer(RegexLexer):
AutoIt is a freeware BASIC-like scripting language
designed for automating the Windows GUI and general scripting
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'AutoIt'
aliases = ['autoit']
@@ -3693,7 +3698,7 @@ class RexxLexer(RegexLexer):
systems. It is popular for I/O- and data based tasks and can act as glue
language to bind different applications together.
- *New in Pygments 1.7.*
+ .. versionadded:: 2.0
"""
name = 'Rexx'
aliases = ['rexx', 'arexx']
@@ -3736,9 +3741,9 @@ class RexxLexer(RegexLexer):
r'while)\b', Keyword.Reserved),
],
'operator': [
- (ur'(-|//|/|\(|\)|\*\*|\*|\\<<|\\<|\\==|\\=|\\>>|\\>|\\|\|\||\||'
- ur'&&|&|%|\+|<<=|<<|<=|<>|<|==|=|><|>=|>>=|>>|>|¬<<|¬<|¬==|¬=|'
- ur'¬>>|¬>|¬|\.|,)', Operator),
+ (r'(-|//|/|\(|\)|\*\*|\*|\\<<|\\<|\\==|\\=|\\>>|\\>|\\|\|\||\||'
+ r'&&|&|%|\+|<<=|<<|<=|<>|<|==|=|><|>=|>>=|>>|>|¬<<|¬<|¬==|¬=|'
+ r'¬>>|¬>|¬|\.|,)', Operator),
],
'string_double': [
(r'[^"\n]+', String),
@@ -3794,3 +3799,88 @@ class RexxLexer(RegexLexer):
for (pattern, weight) in RexxLexer.PATTERNS_AND_WEIGHTS
if pattern.search(lowerText)) + 0.01
return min(result, 1.0)
+
+
+class APLLexer(RegexLexer):
+ """
+ A simple APL lexer.
+
+ .. versionadded:: 2.0
+ """
+ name = 'APL'
+ aliases = ['apl']
+ filenames = ['*.apl']
+
+ tokens = {
+ 'root': [
+ # Whitespace
+ # ==========
+ (r'\s+', Text),
+ #
+ # Comment
+ # =======
+ # '⍝' is traditional; '#' is supported by GNU APL and NGN (but not Dyalog)
+ (u'[⍝#].*$', Comment.Single),
+ #
+ # Strings
+ # =======
+ (r'\'((\'\')|[^\'])*\'', String.Single),
+ (r'"(("")|[^"])*"', String.Double), # supported by NGN APL
+ #
+ # Punctuation
+ # ===========
+ # This token type is used for diamond and parenthesis
+ # but not for bracket and ; (see below)
+ (u'[⋄◇()]', Punctuation),
+ #
+ # Array indexing
+ # ==============
+ # Since this token type is very important in APL, it is not included in
+ # the punctuation token type but rather in the following one
+ (r'[\[\];]', String.Regex),
+ #
+ # Distinguished names
+ # ===================
+ # following IBM APL2 standard
+ (u'⎕[A-Za-zΔ∆⍙][A-Za-zΔ∆⍙_¯0-9]*', Name.Function),
+ #
+ # Labels
+ # ======
+ # following IBM APL2 standard
+ # (u'[A-Za-zΔ∆⍙][A-Za-zΔ∆⍙_¯0-9]*:', Name.Label),
+ #
+ # Variables
+ # =========
+ # following IBM APL2 standard
+ (u'[A-Za-zΔ∆⍙][A-Za-zΔ∆⍙_¯0-9]*', Name.Variable),
+ #
+ # Numbers
+ # =======
+ (u'¯?(0[Xx][0-9A-Fa-f]+|[0-9]*\.?[0-9]+([Ee][+¯]?[0-9]+)?|¯|∞)'
+ u'([Jj]¯?(0[Xx][0-9A-Fa-f]+|[0-9]*\.?[0-9]+([Ee][+¯]?[0-9]+)?|¯|∞))?',
+ Number),
+ #
+ # Operators
+ # ==========
+ (u'[\.\\\/⌿⍀¨⍣⍨⍠⍤∘]', Name.Attribute), # closest token type
+ (u'[+\-×÷⌈⌊∣|⍳?*⍟○!⌹<≤=>≥≠≡≢∊⍷∪∩~∨∧⍱⍲⍴,⍪⌽⊖⍉↑↓⊂⊃⌷⍋⍒⊤⊥⍕⍎⊣⊢⍁⍂≈⌸⍯↗]',
+ Operator),
+ #
+ # Constant
+ # ========
+ (u'⍬', Name.Constant),
+ #
+ # Quad symbol
+ # ===========
+ (u'[⎕⍞]', Name.Variable.Global),
+ #
+ # Arrows left/right
+ # =================
+ (u'[←→]', Keyword.Declaration),
+ #
+ # D-Fn
+ # ====
+ (u'[⍺⍵⍶⍹∇:]', Name.Builtin.Pseudo),
+ (r'[{}]', Keyword.Type),
+ ],
+ }
diff --git a/pygments/lexers/parsers.py b/pygments/lexers/parsers.py
index 80d52ac4..fc8cbb6f 100644
--- a/pygments/lexers/parsers.py
+++ b/pygments/lexers/parsers.py
@@ -38,7 +38,7 @@ class RagelLexer(RegexLexer):
fragments of Ragel. For ``.rl`` files, use RagelEmbeddedLexer instead
(or one of the language-specific subclasses).
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'Ragel'
@@ -132,7 +132,7 @@ class RagelEmbeddedLexer(RegexLexer):
This will only highlight Ragel statements. If you want host language
highlighting then call the language-specific Ragel lexer.
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'Embedded Ragel'
@@ -212,7 +212,7 @@ class RagelRubyLexer(DelegatingLexer):
"""
A lexer for `Ragel`_ in a Ruby host file.
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'Ragel in Ruby Host'
@@ -231,7 +231,7 @@ class RagelCLexer(DelegatingLexer):
"""
A lexer for `Ragel`_ in a C host file.
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'Ragel in C Host'
@@ -250,7 +250,7 @@ class RagelDLexer(DelegatingLexer):
"""
A lexer for `Ragel`_ in a D host file.
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'Ragel in D Host'
@@ -268,7 +268,7 @@ class RagelCppLexer(DelegatingLexer):
"""
A lexer for `Ragel`_ in a CPP host file.
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'Ragel in CPP Host'
@@ -286,7 +286,7 @@ class RagelObjectiveCLexer(DelegatingLexer):
"""
A lexer for `Ragel`_ in an Objective C host file.
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'Ragel in Objective C Host'
@@ -306,7 +306,7 @@ class RagelJavaLexer(DelegatingLexer):
"""
A lexer for `Ragel`_ in a Java host file.
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'Ragel in Java Host'
@@ -327,7 +327,7 @@ class AntlrLexer(RegexLexer):
Should not be called directly, instead
use DelegatingLexer for your target language.
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
.. _ANTLR: http://www.antlr.org/
"""
@@ -524,7 +524,7 @@ class AntlrLexer(RegexLexer):
# """
# ANTLR with C Target
#
-# *New in Pygments 1.1*
+# .. versionadded:: 1.1
# """
#
# name = 'ANTLR With C Target'
@@ -541,7 +541,7 @@ class AntlrCppLexer(DelegatingLexer):
"""
`ANTLR`_ with CPP Target
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'ANTLR With CPP Target'
@@ -560,7 +560,7 @@ class AntlrObjectiveCLexer(DelegatingLexer):
"""
`ANTLR`_ with Objective-C Target
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'ANTLR With ObjectiveC Target'
@@ -580,7 +580,7 @@ class AntlrCSharpLexer(DelegatingLexer):
"""
`ANTLR`_ with C# Target
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'ANTLR With C# Target'
@@ -600,7 +600,7 @@ class AntlrPythonLexer(DelegatingLexer):
"""
`ANTLR`_ with Python Target
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'ANTLR With Python Target'
@@ -620,7 +620,7 @@ class AntlrJavaLexer(DelegatingLexer):
"""
`ANTLR`_ with Java Target
- *New in Pygments 1.1*
+ .. versionadded:: 1.
"""
name = 'ANTLR With Java Target'
@@ -640,7 +640,7 @@ class AntlrRubyLexer(DelegatingLexer):
"""
`ANTLR`_ with Ruby Target
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'ANTLR With Ruby Target'
@@ -660,7 +660,7 @@ class AntlrPerlLexer(DelegatingLexer):
"""
`ANTLR`_ with Perl Target
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'ANTLR With Perl Target'
@@ -680,7 +680,7 @@ class AntlrActionScriptLexer(DelegatingLexer):
"""
`ANTLR`_ with ActionScript Target
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'ANTLR With ActionScript Target'
@@ -700,7 +700,7 @@ class TreetopBaseLexer(RegexLexer):
A base lexer for `Treetop <http://treetop.rubyforge.org/>`_ grammars.
Not for direct use; use TreetopLexer instead.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
tokens = {
@@ -767,7 +767,7 @@ class TreetopLexer(DelegatingLexer):
"""
A lexer for `Treetop <http://treetop.rubyforge.org/>`_ grammars.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'Treetop'
diff --git a/pygments/lexers/shell.py b/pygments/lexers/shell.py
index 4376611a..b069b375 100644
--- a/pygments/lexers/shell.py
+++ b/pygments/lexers/shell.py
@@ -27,7 +27,7 @@ class BashLexer(RegexLexer):
"""
Lexer for (ba|k|)sh shell scripts.
- *New in Pygments 0.6.*
+ .. versionadded:: 0.6
"""
name = 'Bash'
@@ -111,7 +111,7 @@ class BashSessionLexer(Lexer):
"""
Lexer for simplistic shell sessions.
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'Bash Session'
@@ -162,7 +162,7 @@ class ShellSessionLexer(Lexer):
"""
Lexer for shell sessions that works with different command prompts
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'Shell Session'
@@ -208,7 +208,7 @@ class BatchLexer(RegexLexer):
"""
Lexer for the DOS/Windows Batch file format.
- *New in Pygments 0.7.*
+ .. versionadded:: 0.7
"""
name = 'Batchfile'
aliases = ['bat', 'batch', 'dosbatch', 'winbatch']
@@ -264,7 +264,7 @@ class TcshLexer(RegexLexer):
"""
Lexer for tcsh scripts.
- *New in Pygments 0.10.*
+ .. versionadded:: 0.10
"""
name = 'Tcsh'
@@ -331,7 +331,7 @@ class PowerShellLexer(RegexLexer):
"""
For Windows PowerShell code.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'PowerShell'
aliases = ['powershell', 'posh', 'ps1', 'psm1']
diff --git a/pygments/lexers/special.py b/pygments/lexers/special.py
index d7fe6b53..9ea2e22c 100644
--- a/pygments/lexers/special.py
+++ b/pygments/lexers/special.py
@@ -10,11 +10,10 @@
"""
import re
-import cStringIO
from pygments.lexer import Lexer
from pygments.token import Token, Error, Text
-from pygments.util import get_choice_opt, b
+from pygments.util import get_choice_opt, text_type, BytesIO
__all__ = ['TextLexer', 'RawTokenLexer']
@@ -35,7 +34,7 @@ class TextLexer(Lexer):
_ttype_cache = {}
-line_re = re.compile(b('.*?\n'))
+line_re = re.compile(b'.*?\n')
class RawTokenLexer(Lexer):
"""
@@ -60,12 +59,12 @@ class RawTokenLexer(Lexer):
Lexer.__init__(self, **options)
def get_tokens(self, text):
- if isinstance(text, unicode):
+ if isinstance(text, text_type):
# raw token stream never has any non-ASCII characters
text = text.encode('ascii')
if self.compress == 'gz':
import gzip
- gzipfile = gzip.GzipFile('', 'rb', 9, cStringIO.StringIO(text))
+ gzipfile = gzip.GzipFile('', 'rb', 9, BytesIO(text))
text = gzipfile.read()
elif self.compress == 'bz2':
import bz2
@@ -73,7 +72,7 @@ class RawTokenLexer(Lexer):
# do not call Lexer.get_tokens() because we do not want Unicode
# decoding to occur, and stripping is not optional.
- text = text.strip(b('\n')) + b('\n')
+ text = text.strip(b'\n') + b'\n'
for i, t, v in self.get_tokens_unprocessed(text):
yield t, v
@@ -81,7 +80,7 @@ class RawTokenLexer(Lexer):
length = 0
for match in line_re.finditer(text):
try:
- ttypestr, val = match.group().split(b('\t'), 1)
+ ttypestr, val = match.group().split(b'\t', 1)
except ValueError:
val = match.group().decode(self.encoding)
ttype = Error
diff --git a/pygments/lexers/sql.py b/pygments/lexers/sql.py
index 94a131ff..73180772 100644
--- a/pygments/lexers/sql.py
+++ b/pygments/lexers/sql.py
@@ -42,8 +42,9 @@ import re
from pygments.lexer import Lexer, RegexLexer, do_insertions, bygroups
from pygments.token import Punctuation, \
- Text, Comment, Operator, Keyword, Name, String, Number, Generic
+ Text, Comment, Operator, Keyword, Name, String, Number, Generic
from pygments.lexers import get_lexer_by_name, ClassNotFound
+from pygments.util import iteritems
from pygments.lexers._postgres_builtins import KEYWORDS, DATATYPES, \
PSEUDO_TYPES, PLPGSQL_KEYWORDS
@@ -124,7 +125,7 @@ class PostgresLexer(PostgresBase, RegexLexer):
"""
Lexer for the PostgreSQL dialect of SQL.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'PostgreSQL SQL dialect'
@@ -169,14 +170,14 @@ class PlPgsqlLexer(PostgresBase, RegexLexer):
"""
Handle the extra syntax in Pl/pgSQL language.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'PL/pgSQL'
aliases = ['plpgsql']
mimetypes = ['text/x-plpgsql']
flags = re.IGNORECASE
- tokens = dict((k, l[:]) for (k, l) in PostgresLexer.tokens.iteritems())
+ tokens = dict((k, l[:]) for (k, l) in iteritems(PostgresLexer.tokens))
# extend the keywords list
for i, pattern in enumerate(tokens['root']):
@@ -210,7 +211,7 @@ class PsqlRegexLexer(PostgresBase, RegexLexer):
aliases = [] # not public
flags = re.IGNORECASE
- tokens = dict((k, l[:]) for (k, l) in PostgresLexer.tokens.iteritems())
+ tokens = dict((k, l[:]) for (k, l) in iteritems(PostgresLexer.tokens))
tokens['root'].append(
(r'\\[^\s]+', Keyword.Pseudo, 'psql-command'))
@@ -244,19 +245,20 @@ class lookahead(object):
def send(self, i):
self._nextitem = i
return i
- def next(self):
+ def __next__(self):
if self._nextitem is not None:
ni = self._nextitem
self._nextitem = None
return ni
- return self.iter.next()
+ return next(self.iter)
+ next = __next__
class PostgresConsoleLexer(Lexer):
"""
Lexer for psql sessions.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'PostgreSQL console (psql)'
@@ -277,7 +279,7 @@ class PostgresConsoleLexer(Lexer):
insertions = []
while 1:
try:
- line = lines.next()
+ line = next(lines)
except StopIteration:
# allow the emission of partially collected items
# the repl loop will be broken below
@@ -314,7 +316,7 @@ class PostgresConsoleLexer(Lexer):
# Emit the output lines
out_token = Generic.Output
while 1:
- line = lines.next()
+ line = next(lines)
mprompt = re_prompt.match(line)
if mprompt is not None:
# push the line back to have it processed by the prompt
@@ -523,7 +525,7 @@ class SqliteConsoleLexer(Lexer):
"""
Lexer for example sessions using sqlite3.
- *New in Pygments 0.11.*
+ .. versionadded:: 0.11
"""
name = 'sqlite3con'
diff --git a/pygments/lexers/templates.py b/pygments/lexers/templates.py
index 987faee8..72f81d63 100644
--- a/pygments/lexers/templates.py
+++ b/pygments/lexers/templates.py
@@ -36,9 +36,10 @@ __all__ = ['HtmlPhpLexer', 'XmlPhpLexer', 'CssPhpLexer',
'MakoCssLexer', 'JspLexer', 'CheetahLexer', 'CheetahHtmlLexer',
'CheetahXmlLexer', 'CheetahJavascriptLexer', 'EvoqueLexer',
'EvoqueHtmlLexer', 'EvoqueXmlLexer', 'ColdfusionLexer',
- 'ColdfusionHtmlLexer', 'VelocityLexer', 'VelocityHtmlLexer',
- 'VelocityXmlLexer', 'SspLexer', 'TeaTemplateLexer', 'LassoHtmlLexer',
- 'LassoXmlLexer', 'LassoCssLexer', 'LassoJavascriptLexer']
+ 'ColdfusionHtmlLexer', 'ColdfusionCFCLexer', 'VelocityLexer',
+ 'VelocityHtmlLexer', 'VelocityXmlLexer', 'SspLexer',
+ 'TeaTemplateLexer', 'LassoHtmlLexer', 'LassoXmlLexer',
+ 'LassoCssLexer', 'LassoJavascriptLexer']
class ErbLexer(Lexer):
@@ -399,7 +400,7 @@ class MyghtyLexer(RegexLexer):
Generic `myghty templates`_ lexer. Code that isn't Myghty
markup is yielded as `Token.Other`.
- *New in Pygments 0.6.*
+ .. versionadded:: 0.6
.. _myghty templates: http://www.myghty.org/
"""
@@ -447,7 +448,7 @@ class MyghtyHtmlLexer(DelegatingLexer):
Subclass of the `MyghtyLexer` that highlights unlexer data
with the `HtmlLexer`.
- *New in Pygments 0.6.*
+ .. versionadded:: 0.6
"""
name = 'HTML+Myghty'
@@ -464,7 +465,7 @@ class MyghtyXmlLexer(DelegatingLexer):
Subclass of the `MyghtyLexer` that highlights unlexer data
with the `XmlLexer`.
- *New in Pygments 0.6.*
+ .. versionadded:: 0.6
"""
name = 'XML+Myghty'
@@ -481,7 +482,7 @@ class MyghtyJavascriptLexer(DelegatingLexer):
Subclass of the `MyghtyLexer` that highlights unlexer data
with the `JavascriptLexer`.
- *New in Pygments 0.6.*
+ .. versionadded:: 0.6
"""
name = 'JavaScript+Myghty'
@@ -500,7 +501,7 @@ class MyghtyCssLexer(DelegatingLexer):
Subclass of the `MyghtyLexer` that highlights unlexer data
with the `CssLexer`.
- *New in Pygments 0.6.*
+ .. versionadded:: 0.6
"""
name = 'CSS+Myghty'
@@ -519,7 +520,7 @@ class MasonLexer(RegexLexer):
.. _mason templates: http://www.masonhq.com/
- *New in Pygments 1.4.*
+ .. versionadded:: 1.4
"""
name = 'Mason'
aliases = ['mason']
@@ -572,7 +573,7 @@ class MakoLexer(RegexLexer):
Generic `mako templates`_ lexer. Code that isn't Mako
markup is yielded as `Token.Other`.
- *New in Pygments 0.7.*
+ .. versionadded:: 0.7
.. _mako templates: http://www.makotemplates.org/
"""
@@ -640,7 +641,7 @@ class MakoHtmlLexer(DelegatingLexer):
Subclass of the `MakoLexer` that highlights unlexed data
with the `HtmlLexer`.
- *New in Pygments 0.7.*
+ .. versionadded:: 0.7
"""
name = 'HTML+Mako'
@@ -656,7 +657,7 @@ class MakoXmlLexer(DelegatingLexer):
Subclass of the `MakoLexer` that highlights unlexer data
with the `XmlLexer`.
- *New in Pygments 0.7.*
+ .. versionadded:: 0.7
"""
name = 'XML+Mako'
@@ -672,7 +673,7 @@ class MakoJavascriptLexer(DelegatingLexer):
Subclass of the `MakoLexer` that highlights unlexer data
with the `JavascriptLexer`.
- *New in Pygments 0.7.*
+ .. versionadded:: 0.7
"""
name = 'JavaScript+Mako'
@@ -690,7 +691,7 @@ class MakoCssLexer(DelegatingLexer):
Subclass of the `MakoLexer` that highlights unlexer data
with the `CssLexer`.
- *New in Pygments 0.7.*
+ .. versionadded:: 0.7
"""
name = 'CSS+Mako'
@@ -1343,7 +1344,7 @@ class JspRootLexer(RegexLexer):
Base for the `JspLexer`. Yields `Token.Other` for area outside of
JSP tags.
- *New in Pygments 0.7.*
+ .. versionadded:: 0.7
"""
tokens = {
@@ -1367,7 +1368,7 @@ class JspLexer(DelegatingLexer):
"""
Lexer for Java Server Pages.
- *New in Pygments 0.7.*
+ .. versionadded:: 0.7
"""
name = 'Java Server Page'
aliases = ['jsp']
@@ -1390,7 +1391,7 @@ class EvoqueLexer(RegexLexer):
"""
For files using the Evoque templating system.
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'Evoque'
aliases = ['evoque']
@@ -1443,7 +1444,7 @@ class EvoqueHtmlLexer(DelegatingLexer):
Subclass of the `EvoqueLexer` that highlights unlexed data with the
`HtmlLexer`.
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'HTML+Evoque'
aliases = ['html+evoque']
@@ -1459,7 +1460,7 @@ class EvoqueXmlLexer(DelegatingLexer):
Subclass of the `EvoqueLexer` that highlights unlexed data with the
`XmlLexer`.
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'XML+Evoque'
aliases = ['xml+evoque']
@@ -1478,23 +1479,28 @@ class ColdfusionLexer(RegexLexer):
aliases = ['cfs']
filenames = []
mimetypes = []
- flags = re.IGNORECASE | re.MULTILINE
+ flags = re.IGNORECASE
tokens = {
'root': [
- (r'//.*', Comment),
+ (r'//.*?\n', Comment.Single),
+ (r'/\*(?:.|\n)*?\*/', Comment.Multiline),
(r'\+\+|--', Operator),
(r'[-+*/^&=!]', Operator),
- (r'<=|>=|<|>', Operator),
+ (r'<=|>=|<|>|==', Operator),
(r'mod\b', Operator),
(r'(eq|lt|gt|lte|gte|not|is|and|or)\b', Operator),
(r'\|\||&&', Operator),
+ (r'\?', Operator),
(r'"', String.Double, 'string'),
# There is a special rule for allowing html in single quoted
# strings, evidently.
(r"'.*?'", String.Single),
(r'\d+', Number),
- (r'(if|else|len|var|case|default|break|switch)\b', Keyword),
+ (r'(if|else|len|var|case|default|break|switch|component|property|function|do|try|catch|in|continue|for|return|while)\b', Keyword),
+ (r'(required|any|array|binary|boolean|component|date|guid|numeric|query|string|struct|uuid|xml)\b', Keyword),
+ (r'(true|false|null)\b', Keyword.Constant),
+ (r'(application|session|client|cookie|super|this|variables|arguments)\b', Name.Constant),
(r'([A-Za-z_$][A-Za-z0-9_.]*)(\s*)(\()',
bygroups(Name.Function, Text, Punctuation)),
(r'[A-Za-z_$][A-Za-z0-9_.]*', Name.Variable),
@@ -1558,7 +1564,7 @@ class ColdfusionHtmlLexer(DelegatingLexer):
"""
name = 'Coldfusion HTML'
aliases = ['cfm']
- filenames = ['*.cfm', '*.cfml', '*.cfc']
+ filenames = ['*.cfm', '*.cfml']
mimetypes = ['application/x-coldfusion']
def __init__(self, **options):
@@ -1566,11 +1572,25 @@ class ColdfusionHtmlLexer(DelegatingLexer):
**options)
+class ColdfusionCFCLexer(DelegatingLexer):
+ """
+ Coldfusion markup/script components
+ """
+ name = 'Coldfusion CFC'
+ aliases = ['cfc']
+ filenames = ['*.cfc']
+ mimetypes = []
+
+ def __init__(self, **options):
+ super(ColdfusionCFCLexer, self).__init__(ColdfusionHtmlLexer, ColdfusionLexer,
+ **options)
+
+
class SspLexer(DelegatingLexer):
"""
Lexer for Scalate Server Pages.
- *New in Pygments 1.4.*
+ .. versionadded:: 1.4
"""
name = 'Scalate Server Page'
aliases = ['ssp']
@@ -1596,7 +1616,7 @@ class TeaTemplateRootLexer(RegexLexer):
Base for the `TeaTemplateLexer`. Yields `Token.Other` for area outside of
code blocks.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
tokens = {
@@ -1617,7 +1637,7 @@ class TeaTemplateLexer(DelegatingLexer):
"""
Lexer for `Tea Templates <http://teatrove.org/>`_.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'Tea'
aliases = ['tea']
@@ -1644,7 +1664,7 @@ class LassoHtmlLexer(DelegatingLexer):
Nested JavaScript and CSS is also highlighted.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'HTML+Lasso'
@@ -1672,7 +1692,7 @@ class LassoXmlLexer(DelegatingLexer):
Subclass of the `LassoLexer` which highlights unhandled data with the
`XmlLexer`.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'XML+Lasso'
@@ -1696,7 +1716,7 @@ class LassoCssLexer(DelegatingLexer):
Subclass of the `LassoLexer` which highlights unhandled data with the
`CssLexer`.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'CSS+Lasso'
@@ -1722,7 +1742,7 @@ class LassoJavascriptLexer(DelegatingLexer):
Subclass of the `LassoLexer` which highlights unhandled data with the
`JavascriptLexer`.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'JavaScript+Lasso'
diff --git a/pygments/lexers/text.py b/pygments/lexers/text.py
index e4c5656b..82cc82b3 100644
--- a/pygments/lexers/text.py
+++ b/pygments/lexers/text.py
@@ -62,7 +62,7 @@ class RegeditLexer(RegexLexer):
<http://en.wikipedia.org/wiki/Windows_Registry#.REG_files>`_ files produced
by regedit.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'reg'
@@ -103,7 +103,7 @@ class PropertiesLexer(RegexLexer):
"""
Lexer for configuration files in Java's properties format.
- *New in Pygments 1.4.*
+ .. versionadded:: 1.4
"""
name = 'Properties'
@@ -125,7 +125,7 @@ class SourcesListLexer(RegexLexer):
"""
Lexer that highlights debian sources.list files.
- *New in Pygments 0.7.*
+ .. versionadded:: 0.7
"""
name = 'Debian Sourcelist'
@@ -218,7 +218,7 @@ class BaseMakefileLexer(RegexLexer):
"""
Lexer for simple Makefiles (no preprocessing).
- *New in Pygments 0.10.*
+ .. versionadded:: 0.10
"""
name = 'Base Makefile'
@@ -228,8 +228,10 @@ class BaseMakefileLexer(RegexLexer):
tokens = {
'root': [
+ # recipes (need to allow spaces because of expandtabs)
(r'^(?:[\t ]+.*\n|\n)+', using(BashLexer)),
- (r'\$\((?:.*\\\n|.*\n)+', using(BashLexer)),
+ # special variables
+ (r'\$[<@$+%?|*]', Keyword),
(r'\s+', Text),
(r'#.*?\n', Comment),
(r'(export)(\s+)(?=[a-zA-Z0-9_${}\t -]+\n)',
@@ -244,7 +246,15 @@ class BaseMakefileLexer(RegexLexer):
# targets
(r'([^\n:]+)(:+)([ \t]*)', bygroups(Name.Function, Operator, Text),
'block-header'),
- # TODO: add paren handling (grr)
+ # expansions
+ (r'\$\(', Keyword, 'expansion'),
+ ],
+ 'expansion': [
+ (r'[^$a-zA-Z_)]+', Text),
+ (r'[a-zA-Z_]+', Name.Variable),
+ (r'\$', Keyword),
+ (r'\(', Keyword, '#push'),
+ (r'\)', Keyword, '#pop'),
],
'export': [
(r'[a-zA-Z0-9_${}-]+', Name.Variable),
@@ -252,12 +262,13 @@ class BaseMakefileLexer(RegexLexer):
(r'\s+', Text),
],
'block-header': [
- (r'[^,\\\n#]+', Number),
- (r',', Punctuation),
- (r'#.*?\n', Comment),
+ (r'[,|]', Punctuation),
+ (r'#.*?\n', Comment, '#pop'),
(r'\\\n', Text), # line continuation
- (r'\\.', Text),
- (r'(?:[\t ]+.*\n|\n)+', using(BashLexer), '#pop'),
+ (r'\$\(', Keyword, 'expansion'),
+ (r'[a-zA-Z_]+', Name),
+ (r'\n', Text, '#pop'),
+ (r'.', Text),
],
}
@@ -303,7 +314,7 @@ class DarcsPatchLexer(RegexLexer):
format. Examples of this format are derived by commands such as
``darcs annotate --patch`` and ``darcs send``.
- *New in Pygments 0.10.*
+ .. versionadded:: 0.10
"""
name = 'Darcs Patch'
aliases = ['dpatch']
@@ -416,7 +427,7 @@ class BBCodeLexer(RegexLexer):
"""
A lexer that highlights BBCode(-like) syntax.
- *New in Pygments 0.6.*
+ .. versionadded:: 0.6
"""
name = 'BBCode'
@@ -507,7 +518,7 @@ class GroffLexer(RegexLexer):
Lexer for the (g)roff typesetting language, supporting groff
extensions. Mainly useful for highlighting manpage sources.
- *New in Pygments 0.6.*
+ .. versionadded:: 0.6
"""
name = 'Groff'
@@ -562,7 +573,7 @@ class ApacheConfLexer(RegexLexer):
Lexer for configuration files following the Apache config file
format.
- *New in Pygments 0.6.*
+ .. versionadded:: 0.6
"""
name = 'ApacheConf'
@@ -601,7 +612,7 @@ class MoinWikiLexer(RegexLexer):
"""
For MoinMoin (and Trac) Wiki markup.
- *New in Pygments 0.7.*
+ .. versionadded:: 0.7
"""
name = 'MoinMoin/Trac Wiki markup'
@@ -646,7 +657,7 @@ class RstLexer(RegexLexer):
"""
For `reStructuredText <http://docutils.sf.net/rst.html>`_ markup.
- *New in Pygments 0.7.*
+ .. versionadded:: 0.7
Additional options accepted:
@@ -654,7 +665,9 @@ class RstLexer(RegexLexer):
Highlight the contents of ``.. sourcecode:: language``,
``.. code:: language`` and ``.. code-block:: language``
directives with a lexer for the given language (default:
- ``True``). *New in Pygments 0.8.*
+ ``True``).
+
+ .. versionadded:: 0.8
"""
name = 'reStructuredText'
aliases = ['rst', 'rest', 'restructuredtext']
@@ -813,7 +826,7 @@ class VimLexer(RegexLexer):
"""
Lexer for VimL script files.
- *New in Pygments 0.8.*
+ .. versionadded:: 0.8
"""
name = 'VimL'
aliases = ['vim']
@@ -830,7 +843,7 @@ class VimLexer(RegexLexer):
# TODO: regexes can have other delims
(r'/(\\\\|\\/|[^\n/])*/', String.Regex),
(r'"(\\\\|\\"|[^\n"])*"', String.Double),
- (r"'(\\\\|\\'|[^\n'])*'", String.Single),
+ (r"'(''|[^\n'])*'", String.Single),
# Who decided that doublequote was a good comment character??
(r'(?<=\s)"[^\-:.%#=*].*', Comment),
@@ -897,7 +910,7 @@ class GettextLexer(RegexLexer):
"""
Lexer for Gettext catalog files.
- *New in Pygments 0.9.*
+ .. versionadded:: 0.9
"""
name = 'Gettext Catalog'
aliases = ['pot', 'po']
@@ -925,7 +938,7 @@ class SquidConfLexer(RegexLexer):
"""
Lexer for `squid <http://www.squid-cache.org/>`_ configuration files.
- *New in Pygments 0.9.*
+ .. versionadded:: 0.9
"""
name = 'SquidConf'
@@ -1057,7 +1070,7 @@ class DebianControlLexer(RegexLexer):
"""
Lexer for Debian ``control`` files and ``apt-cache show <pkg>`` outputs.
- *New in Pygments 0.9.*
+ .. versionadded:: 0.9
"""
name = 'Debian Control file'
aliases = ['control', 'debcontrol']
@@ -1127,7 +1140,7 @@ class YamlLexer(ExtendedRegexLexer):
Lexer for `YAML <http://yaml.org/>`_, a human-friendly data serialization
language.
- *New in Pygments 0.11.*
+ .. versionadded:: 0.11
"""
name = 'YAML'
@@ -1529,7 +1542,7 @@ class LighttpdConfLexer(RegexLexer):
"""
Lexer for `Lighttpd <http://lighttpd.net/>`_ configuration files.
- *New in Pygments 0.11.*
+ .. versionadded:: 0.11
"""
name = 'Lighttpd configuration file'
aliases = ['lighty', 'lighttpd']
@@ -1557,7 +1570,7 @@ class NginxConfLexer(RegexLexer):
"""
Lexer for `Nginx <http://nginx.net/>`_ configuration files.
- *New in Pygments 0.11.*
+ .. versionadded:: 0.11
"""
name = 'Nginx configuration file'
aliases = ['nginx']
@@ -1603,7 +1616,7 @@ class CMakeLexer(RegexLexer):
"""
Lexer for `CMake <http://cmake.org/Wiki/CMake>`_ files.
- *New in Pygments 1.2.*
+ .. versionadded:: 1.2
"""
name = 'CMake'
aliases = ['cmake']
@@ -1673,7 +1686,7 @@ class HttpLexer(RegexLexer):
"""
Lexer for HTTP sessions.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'HTTP'
@@ -1742,7 +1755,7 @@ class PyPyLogLexer(RegexLexer):
"""
Lexer for PyPy log files.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = "PyPy Log"
aliases = ["pypylog", "pypy"]
@@ -1814,7 +1827,7 @@ class HxmlLexer(RegexLexer):
"""
Lexer for `haXe build <http://haxe.org/doc/compiler>`_ files.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'Hxml'
aliases = ['haxeml', 'hxml']
@@ -1857,7 +1870,7 @@ class EbnfLexer(RegexLexer):
<http://en.wikipedia.org/wiki/Extended_Backus%E2%80%93Naur_Form>`_
grammars.
- *New in Pygments 1.7.*
+ .. versionadded:: 2.0
"""
name = 'EBNF'
diff --git a/pygments/lexers/web.py b/pygments/lexers/web.py
index 10522aa4..7d3073f1 100644
--- a/pygments/lexers/web.py
+++ b/pygments/lexers/web.py
@@ -13,11 +13,11 @@ import re
import copy
from pygments.lexer import RegexLexer, ExtendedRegexLexer, bygroups, using, \
- include, this
+ include, this
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
- Number, Other, Punctuation, Literal
+ Number, Other, Punctuation, Literal
from pygments.util import get_bool_opt, get_list_opt, looks_like_xml, \
- html_doctype_matches, unirange
+ html_doctype_matches, unirange, iteritems
from pygments.lexers.agile import RubyLexer
from pygments.lexers.compiled import ScalaLexer
@@ -28,7 +28,7 @@ __all__ = ['HtmlLexer', 'XmlLexer', 'JavascriptLexer', 'JsonLexer', 'CssLexer',
'ObjectiveJLexer', 'CoffeeScriptLexer', 'LiveScriptLexer',
'DuelLexer', 'ScamlLexer', 'JadeLexer', 'XQueryLexer',
'DtdLexer', 'DartLexer', 'LassoLexer', 'QmlLexer', 'TypeScriptLexer',
- 'KalLexer']
+ 'KalLexer', 'CirruLexer', 'MaskLexer', 'ZephirLexer']
class JavascriptLexer(RegexLexer):
@@ -95,7 +95,7 @@ class JsonLexer(RegexLexer):
"""
For JSON data structures.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'JSON'
@@ -178,7 +178,7 @@ class ActionScriptLexer(RegexLexer):
"""
For ActionScript source code.
- *New in Pygments 0.9.*
+ .. versionadded:: 0.9
"""
name = 'ActionScript'
@@ -262,7 +262,7 @@ class ActionScript3Lexer(RegexLexer):
"""
For ActionScript 3 source code.
- *New in Pygments 0.11.*
+ .. versionadded:: 0.11
"""
name = 'ActionScript 3'
@@ -484,7 +484,7 @@ class ObjectiveJLexer(RegexLexer):
"""
For Objective-J source code with preprocessor directives.
- *New in Pygments 1.3.*
+ .. versionadded:: 1.3
"""
name = 'Objective-J'
@@ -891,7 +891,7 @@ class PhpLexer(RegexLexer):
self._functions = set()
if self.funcnamehighlighting:
from pygments.lexers._phpbuiltins import MODULES
- for key, value in MODULES.iteritems():
+ for key, value in iteritems(MODULES):
if key not in self.disabledmodules:
self._functions.update(value)
RegexLexer.__init__(self, **options)
@@ -921,7 +921,7 @@ class DtdLexer(RegexLexer):
"""
A lexer for DTDs (Document Type Definitions).
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
flags = re.MULTILINE | re.DOTALL
@@ -1056,7 +1056,7 @@ class XsltLexer(XmlLexer):
'''
A lexer for XSLT.
- *New in Pygments 0.10.*
+ .. versionadded:: 0.10
'''
name = 'XSLT'
@@ -1094,7 +1094,7 @@ class MxmlLexer(RegexLexer):
For MXML markup.
Nested AS3 in <script> tags is highlighted by the appropriate lexer.
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
flags = re.MULTILINE | re.DOTALL
name = 'MXML'
@@ -1137,7 +1137,7 @@ class HaxeLexer(ExtendedRegexLexer):
"""
For Haxe source code (http://haxe.org/).
- *New in Pygments 1.3.*
+ .. versionadded:: 1.3
"""
name = 'Haxe'
@@ -2007,7 +2007,7 @@ class HamlLexer(ExtendedRegexLexer):
"""
For Haml markup.
- *New in Pygments 1.3.*
+ .. versionadded:: 1.3
"""
name = 'Haml'
@@ -2284,7 +2284,7 @@ class SassLexer(ExtendedRegexLexer):
"""
For Sass stylesheets.
- *New in Pygments 1.3.*
+ .. versionadded:: 1.3
"""
name = 'Sass'
@@ -2355,7 +2355,7 @@ class SassLexer(ExtendedRegexLexer):
(r"\*/", Comment, '#pop'),
],
}
- for group, common in common_sass_tokens.iteritems():
+ for group, common in iteritems(common_sass_tokens):
tokens[group] = copy.copy(common)
tokens['value'].append((r'\n', Text, 'root'))
tokens['selector'].append((r'\n', Text, 'root'))
@@ -2402,7 +2402,7 @@ class ScssLexer(RegexLexer):
(r"\*/", Comment, '#pop'),
],
}
- for group, common in common_sass_tokens.iteritems():
+ for group, common in iteritems(common_sass_tokens):
tokens[group] = copy.copy(common)
tokens['value'].extend([(r'\n', Text), (r'[;{}]', Punctuation, 'root')])
tokens['selector'].extend([(r'\n', Text), (r'[;{}]', Punctuation, 'root')])
@@ -2414,7 +2414,7 @@ class CoffeeScriptLexer(RegexLexer):
.. _CoffeeScript: http://coffeescript.org
- *New in Pygments 1.3.*
+ .. versionadded:: 1.3
"""
name = 'CoffeeScript'
@@ -2521,7 +2521,7 @@ class KalLexer(RegexLexer):
.. _Kal: http://rzimmerman.github.io/kal
- *New in Pygments 1.7.*
+ .. versionadded:: 2.0
"""
name = 'Kal'
@@ -2748,7 +2748,7 @@ class DuelLexer(RegexLexer):
See http://duelengine.org/.
See http://jsonml.org/jbst/.
- *New in Pygments 1.4.*
+ .. versionadded:: 1.4
"""
name = 'Duel'
@@ -2779,7 +2779,7 @@ class ScamlLexer(ExtendedRegexLexer):
"""
For `Scaml markup <http://scalate.fusesource.org/>`_. Scaml is Haml for Scala.
- *New in Pygments 1.4.*
+ .. versionadded:: 1.4
"""
name = 'Scaml'
@@ -2893,7 +2893,7 @@ class JadeLexer(ExtendedRegexLexer):
Jade is a variant of Scaml, see:
http://scalate.fusesource.org/documentation/scaml-reference.html
- *New in Pygments 1.4.*
+ .. versionadded:: 1.4
"""
name = 'Jade'
@@ -3001,7 +3001,7 @@ class XQueryLexer(ExtendedRegexLexer):
An XQuery lexer, parsing a stream and outputting the tokens needed to
highlight xquery code.
- *New in Pygments 1.4.*
+ .. versionadded:: 1.4
"""
name = 'XQuery'
aliases = ['xquery', 'xqy', 'xq', 'xql', 'xqm']
@@ -3406,7 +3406,7 @@ class XQueryLexer(ExtendedRegexLexer):
'xml_comment': [
(r'(-->)', popstate_xmlcomment_callback),
(r'[^-]{1,2}', Literal),
- (ur'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
+ (u'\\t|\\r|\\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
unirange(0x10000, 0x10ffff), Literal),
],
'processing_instruction': [
@@ -3416,12 +3416,12 @@ class XQueryLexer(ExtendedRegexLexer):
],
'processing_instruction_content': [
(r'\?>', String.Doc, '#pop'),
- (ur'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
+ (u'\\t|\\r|\\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
unirange(0x10000, 0x10ffff), Literal),
],
'cdata_section': [
(r']]>', String.Doc, '#pop'),
- (ur'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
+ (u'\\t|\\r|\\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
unirange(0x10000, 0x10ffff), Literal),
],
'start_tag': [
@@ -3490,7 +3490,7 @@ class XQueryLexer(ExtendedRegexLexer):
],
'pragmacontents': [
(r'#\)', Punctuation, 'operator'),
- (ur'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
+ (u'\\t|\\r|\\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
unirange(0x10000, 0x10ffff), Literal),
(r'(\s+)', Text),
],
@@ -3663,7 +3663,7 @@ class DartLexer(RegexLexer):
"""
For `Dart <http://dartlang.org/>`_ source code.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'Dart'
@@ -3691,7 +3691,7 @@ class DartLexer(RegexLexer):
r'native|operator|set|static|typedef|var)\b', Keyword.Declaration),
(r'\b(bool|double|Dynamic|int|num|Object|String|void)\b', Keyword.Type),
(r'\b(false|null|true)\b', Keyword.Constant),
- (r'[~!%^&*+=|?:<>/-]|as', Operator),
+ (r'[~!%^&*+=|?:<>/-]|as\b', Operator),
(r'[a-zA-Z_$][a-zA-Z0-9_]*:', Name.Label),
(r'[a-zA-Z_$][a-zA-Z0-9_]*', Name),
(r'[(){}\[\],.;]', Punctuation),
@@ -3763,7 +3763,7 @@ class TypeScriptLexer(RegexLexer):
"""
For `TypeScript <http://typescriptlang.org/>`_ source code.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'TypeScript'
@@ -3850,7 +3850,7 @@ class LassoLexer(RegexLexer):
If given and ``True``, only highlight code between delimiters as Lasso
(default: ``False``).
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'Lasso'
@@ -4063,9 +4063,9 @@ class LassoLexer(RegexLexer):
self._members = set()
if self.builtinshighlighting:
from pygments.lexers._lassobuiltins import BUILTINS, MEMBERS
- for key, value in BUILTINS.iteritems():
+ for key, value in iteritems(BUILTINS):
self._builtins.update(value)
- for key, value in MEMBERS.iteritems():
+ for key, value in iteritems(MEMBERS):
self._members.update(value)
RegexLexer.__init__(self, **options)
@@ -4099,7 +4099,7 @@ class QmlLexer(RegexLexer):
"""
For QML files. See http://doc.qt.digia.com/4.7/qdeclarativeintroduction.html.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
# QML is based on javascript, so much of this is taken from the
@@ -4166,3 +4166,238 @@ class QmlLexer(RegexLexer):
(r"'(\\\\|\\'|[^'])*'", String.Single),
]
}
+
+
+class CirruLexer(RegexLexer):
+ """
+ Syntax rules of Cirru can be found at:
+ http://grammar.cirru.org/
+
+ * using ``()`` to markup blocks, but limited in the same line
+ * using ``""`` to markup strings, allow ``\`` to escape
+ * using ``$`` as a shorthand for ``()`` till indentation end or ``)``
+ * using indentations for create nesting
+
+ .. versionadded:: 2.0
+ """
+
+ name = 'Cirru'
+ aliases = ['cirru']
+ filenames = ['*.cirru', '*.cr']
+ mimetypes = ['text/x-cirru']
+ flags = re.MULTILINE
+
+ tokens = {
+ 'string': [
+ (r'[^"\\\n]', String),
+ (r'\\', String.Escape, 'escape'),
+ (r'"', String, '#pop'),
+ ],
+ 'escape': [
+ (r'.', String.Escape, '#pop'),
+ ],
+ 'function': [
+ (r'[\w-][^\s\(\)\"]*', Name.Function, '#pop'),
+ (r'\)', Operator, '#pop'),
+ (r'(?=\n)', Text, '#pop'),
+ (r'\(', Operator, '#push'),
+ (r'"', String, ('#pop', 'string')),
+ (r'\s+', Text.Whitespace),
+ (r'\,', Operator, '#pop'),
+ ],
+ 'line': [
+ (r'^\B', Text.Whitespace, 'function'),
+ (r'\$', Operator, 'function'),
+ (r'\(', Operator, 'function'),
+ (r'\)', Operator),
+ (r'(?=\n)', Text, '#pop'),
+ (r'\n', Text, '#pop'),
+ (r'"', String, 'string'),
+ (r'\s+', Text.Whitespace),
+ (r'[\d\.]+', Number),
+ (r'[\w-][^\"\(\)\s]*', Name.Variable),
+ (r'--', Comment.Single)
+ ],
+ 'root': [
+ (r'^\s*', Text.Whitespace, ('line', 'function')),
+ (r'^\s+$', Text.Whitespace),
+ ]
+ }
+
+
+class MaskLexer(RegexLexer):
+ """
+ For `Mask <http://github.com/atmajs/MaskJS>`__ markup.
+
+ .. versionadded:: 2.0
+ """
+ name = 'Mask'
+ aliases = ['mask']
+ filenames = ['*.mask']
+ mimetypes = ['text/x-mask']
+
+ flags = re.MULTILINE | re.IGNORECASE | re.DOTALL
+ tokens = {
+ 'root': [
+ (r'\s+', Text),
+ (r'//.*?\n', Comment.Single),
+ (r'/\*.*?\*/', Comment.Multiline),
+ (r'[\{\};>]', Punctuation),
+ (r"'''", String, 'string-trpl-single'),
+ (r'"""', String, 'string-trpl-double'),
+ (r"'", String, 'string-single'),
+ (r'"', String, 'string-double'),
+ (r'([\w-]+)', Name.Tag, 'node'),
+ (r'([^\.#;{>\s]+)', Name.Class, 'node'),
+ (r'(#[\w_-]+)', Name.Function, 'node'),
+ (r'(\.[\w_-]+)', Name.Variable.Class, 'node')
+ ],
+ 'string-base': [
+ (r'\\.', String.Escape),
+ (r'~\[', String.Interpol, 'interpolation'),
+ (r'.', String.Single),
+ ],
+ 'string-single':[
+ (r"'", String.Single, '#pop'),
+ include('string-base')
+ ],
+ 'string-double':[
+ (r'"', String.Single, '#pop'),
+ include('string-base')
+ ],
+ 'string-trpl-single':[
+ (r"'''", String.Single, '#pop'),
+ include('string-base')
+ ],
+ 'string-trpl-double':[
+ (r'"""', String.Single, '#pop'),
+ include('string-base')
+ ],
+ 'interpolation': [
+ (r'\]', String.Interpol, '#pop'),
+ (r'\s*:', String.Interpol, 'expression'),
+ (r'\s*\w+:', Name.Other),
+ (r'[^\]]+', String.Interpol)
+ ],
+ 'expression': [
+ (r'[^\]]+', using(JavascriptLexer), '#pop')
+ ],
+ 'node': [
+ (r'\s+', Text),
+ (r'\.', Name.Variable.Class, 'node-class'),
+ (r'\#', Name.Function, 'node-id'),
+ (r'style[ \t]*=', Name.Attribute, 'node-attr-style-value'),
+ (r'[\w_:-]+[ \t]*=', Name.Attribute, 'node-attr-value'),
+ (r'[\w_:-]+', Name.Attribute),
+ (r'[>{;]', Punctuation, '#pop')
+ ],
+ 'node-class': [
+ (r'[\w-]+', Name.Variable.Class),
+ (r'~\[', String.Interpol, 'interpolation'),
+ (r'', Text, '#pop')
+ ],
+ 'node-id': [
+ (r'[\w-]+', Name.Function),
+ (r'~\[', String.Interpol, 'interpolation'),
+ (r'', Text, '#pop')
+ ],
+ 'node-attr-value':[
+ (r'\s+', Text),
+ (r'[\w_]+', Name.Variable, '#pop'),
+ (r"'", String, 'string-single-pop2'),
+ (r'"', String, 'string-double-pop2'),
+ (r'', Text, '#pop')
+ ],
+ 'node-attr-style-value':[
+ (r'\s+', Text),
+ (r"'", String.Single, 'css-single-end'),
+ (r'"', String.Single, 'css-double-end'),
+ include('node-attr-value')
+ ],
+ 'css-base': [
+ (r'\s+', Text),
+ (r"[;]", Punctuation),
+ (r"[\w\-_]+\s*:", Name.Builtin)
+ ],
+ 'css-single-end': [
+ include('css-base'),
+ (r"'", String.Single, '#pop:2'),
+ (r"[^;']+", Name.Entity)
+ ],
+ 'css-double-end': [
+ include('css-base'),
+ (r'"', String.Single, '#pop:2'),
+ (r"[^;\"]+", Name.Entity)
+ ],
+ 'string-single-pop2':[
+ (r"'", String.Single, '#pop:2'),
+ include('string-base')
+ ],
+ 'string-double-pop2':[
+ (r'"', String.Single, '#pop:2'),
+ include('string-base')
+ ],
+ }
+
+
+class ZephirLexer(RegexLexer):
+ """
+ For `Zephir language <http://zephir-lang.com/>`_ source code.
+
+ Zephir is a compiled high level language aimed
+ to the creation of C-extensions for PHP.
+
+ .. versionadded:: 2.0
+ """
+
+ name = 'Zephir'
+ aliases = ['zephir']
+ filenames = ['*.zep']
+
+ zephir_keywords = [ 'fetch', 'echo', 'isset', 'empty']
+ zephir_type = [ 'bit', 'bits' , 'string' ]
+
+ flags = re.DOTALL | re.MULTILINE
+
+ tokens = {
+ 'commentsandwhitespace': [
+ (r'\s+', Text),
+ (r'//.*?\n', Comment.Single),
+ (r'/\*.*?\*/', Comment.Multiline)
+ ],
+ 'slashstartsregex': [
+ include('commentsandwhitespace'),
+ (r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
+ r'([gim]+\b|\B)', String.Regex, '#pop'),
+ (r'', Text, '#pop')
+ ],
+ 'badregex': [
+ (r'\n', Text, '#pop')
+ ],
+ 'root': [
+ (r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
+ include('commentsandwhitespace'),
+ (r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
+ r'(<<|>>>?|==?|!=?|->|[-<>+*%&\|\^/])=?', Operator, 'slashstartsregex'),
+ (r'[{(\[;,]', Punctuation, 'slashstartsregex'),
+ (r'[})\].]', Punctuation),
+ (r'(for|in|while|do|break|return|continue|switch|case|default|if|else|loop|require|inline|'
+ r'throw|try|catch|finally|new|delete|typeof|instanceof|void|namespace|use|extends|'
+ r'this|fetch|isset|unset|echo|fetch|likely|unlikely|empty)\b', Keyword, 'slashstartsregex'),
+ (r'(var|let|with|function)\b', Keyword.Declaration, 'slashstartsregex'),
+ (r'(abstract|boolean|bool|char|class|const|double|enum|export|'
+ r'extends|final|float|goto|implements|import|int|string|interface|long|ulong|char|uchar|native|unsigned|'
+ r'private|protected|public|short|static|self|throws|reverse|'
+ r'transient|volatile)\b', Keyword.Reserved),
+ (r'(true|false|null|undefined)\b', Keyword.Constant),
+ (r'(Array|Boolean|Date|_REQUEST|_COOKIE|_SESSION|'
+ r'_GET|_POST|_SERVER|this|stdClass|range|count|iterator|'
+ r'window)\b', Name.Builtin),
+ (r'[$a-zA-Z_][a-zA-Z0-9_\\]*', Name.Other),
+ (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
+ (r'0x[0-9a-fA-F]+', Number.Hex),
+ (r'[0-9]+', Number.Integer),
+ (r'"(\\\\|\\"|[^"])*"', String.Double),
+ (r"'(\\\\|\\'|[^'])*'", String.Single),
+ ]
+ }
diff --git a/pygments/sphinxext.py b/pygments/sphinxext.py
new file mode 100644
index 00000000..5ab8f060
--- /dev/null
+++ b/pygments/sphinxext.py
@@ -0,0 +1,153 @@
+# -*- coding: utf-8 -*-
+"""
+ pygments.sphinxext
+ ~~~~~~~~~~~~~~~~~~
+
+ Sphinx extension to generate automatic documentation of lexers,
+ formatters and filters.
+
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from __future__ import print_function
+
+import sys
+
+from docutils import nodes
+from docutils.statemachine import ViewList
+from sphinx.util.compat import Directive
+from sphinx.util.nodes import nested_parse_with_titles
+
+
+MODULEDOC = '''
+.. module:: %s
+
+%s
+%s
+'''
+
+LEXERDOC = '''
+.. class:: %s
+
+ :Short names: %s
+ :Filenames: %s
+ :MIME types: %s
+
+ %s
+
+'''
+
+FMTERDOC = '''
+.. class:: %s
+
+ :Short names: %s
+ :Filenames: %s
+
+ %s
+
+'''
+
+FILTERDOC = '''
+.. class:: %s
+
+ :Name: %s
+
+ %s
+
+'''
+
+class PygmentsDoc(Directive):
+ """
+ A directive to collect all lexers/formatters/filters and generate
+ autoclass directives for them.
+ """
+ has_content = False
+ required_arguments = 1
+ optional_arguments = 0
+ final_argument_whitespace = False
+ option_spec = {}
+
+ def run(self):
+ self.filenames = set()
+ if self.arguments[0] == 'lexers':
+ out = self.document_lexers()
+ elif self.arguments[0] == 'formatters':
+ out = self.document_formatters()
+ elif self.arguments[0] == 'filters':
+ out = self.document_filters()
+ else:
+ raise Exception('invalid argument for "pygmentsdoc" directive')
+ node = nodes.compound()
+ vl = ViewList(out.split('\n'), source='')
+ nested_parse_with_titles(self.state, vl, node)
+ for fn in self.filenames:
+ self.state.document.settings.record_dependencies.add(fn)
+ return node.children
+
+ def document_lexers(self):
+ from pygments.lexers._mapping import LEXERS
+ out = []
+ modules = {}
+ moduledocstrings = {}
+ for classname, data in sorted(LEXERS.items(), key=lambda x: x[0]):
+ module = data[0]
+ mod = __import__(module, None, None, [classname])
+ self.filenames.add(mod.__file__)
+ cls = getattr(mod, classname)
+ if not cls.__doc__:
+ print("Warning: %s does not have a docstring." % classname)
+ docstring = cls.__doc__
+ if isinstance(docstring, bytes):
+ docstring = docstring.decode('utf8')
+ modules.setdefault(module, []).append((
+ classname,
+ ', '.join(data[2]) or 'None',
+ ', '.join(data[3]).replace('*', '\\*').replace('_', '\\') or 'None',
+ ', '.join(data[4]) or 'None',
+ docstring))
+ if module not in moduledocstrings:
+ moddoc = mod.__doc__
+ if isinstance(moddoc, bytes):
+ moddoc = moddoc.decode('utf8')
+ moduledocstrings[module] = moddoc
+
+ for module, lexers in sorted(modules.items(), key=lambda x: x[0]):
+ heading = moduledocstrings[module].splitlines()[4].strip().rstrip('.')
+ out.append(MODULEDOC % (module, heading, '-'*len(heading)))
+ for data in lexers:
+ out.append(LEXERDOC % data)
+
+ return ''.join(out)
+
+ def document_formatters(self):
+ from pygments.formatters import FORMATTERS
+
+ out = []
+ for cls, data in sorted(FORMATTERS.items(),
+ key=lambda x: x[0].__name__):
+ self.filenames.add(sys.modules[cls.__module__].__file__)
+ docstring = cls.__doc__
+ if isinstance(docstring, bytes):
+ docstring = docstring.decode('utf8')
+ heading = cls.__name__
+ out.append(FMTERDOC % (heading, ', '.join(data[1]) or 'None',
+ ', '.join(data[2]).replace('*', '\\*') or 'None',
+ docstring))
+ return ''.join(out)
+
+ def document_filters(self):
+ from pygments.filters import FILTERS
+
+ out = []
+ for name, cls in FILTERS.items():
+ self.filenames.add(sys.modules[cls.__module__].__file__)
+ docstring = cls.__doc__
+ if isinstance(docstring, bytes):
+ docstring = docstring.decode('utf8')
+ out.append(FILTERDOC % (cls.__name__, name, docstring))
+ return ''.join(out)
+
+
+def setup(app):
+ app.add_directive('pygmentsdoc', PygmentsDoc)
diff --git a/pygments/style.py b/pygments/style.py
index d0fc26be..bb54377c 100644
--- a/pygments/style.py
+++ b/pygments/style.py
@@ -10,6 +10,7 @@
"""
from pygments.token import Token, STANDARD_TYPES
+from pygments.util import add_metaclass
class StyleMeta(type):
@@ -104,8 +105,8 @@ class StyleMeta(type):
return len(cls._styles)
+@add_metaclass(StyleMeta)
class Style(object):
- __metaclass__ = StyleMeta
#: overall background color (``None`` means transparent)
background_color = '#ffffff'
diff --git a/pygments/token.py b/pygments/token.py
index f6c3066d..c40ffd33 100644
--- a/pygments/token.py
+++ b/pygments/token.py
@@ -49,6 +49,7 @@ Token = _TokenType()
# Special token types
Text = Token.Text
Whitespace = Text.Whitespace
+Escape = Token.Escape
Error = Token.Error
# Text that doesn't belong to this lexer (e.g. HTML in PHP)
Other = Token.Other
@@ -116,6 +117,7 @@ STANDARD_TYPES = {
Text: '',
Whitespace: 'w',
+ Escape: 'esc',
Error: 'err',
Other: 'x',
diff --git a/pygments/unistring.py b/pygments/unistring.py
index da87b6df..2752037f 100644
--- a/pygments/unistring.py
+++ b/pygments/unistring.py
@@ -11,7 +11,8 @@
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from pygments.util import u_prefix
+
+from pygments.util import u_prefix, unichr
Cc = u'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f\x7f\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f'
@@ -133,7 +134,7 @@ except UnicodeDecodeError:
Cs = '' # Jython can't handle isolated surrogates\n\n""" % repr(val).lstrip('u'))
else:
f.write('%s = %r\n\n' % (cat, val))
- f.write('cats = %r\n\n' % sorted(categories.keys()))
+ f.write('cats = %r\n\n' % sorted(categories))
f.write('# Generated from unidata %s\n\n' % (unicodedata.unidata_version,))
f.write(footer)
diff --git a/pygments/util.py b/pygments/util.py
index d40a88c8..c302900f 100644
--- a/pygments/util.py
+++ b/pygments/util.py
@@ -11,7 +11,6 @@
import re
import sys
-import codecs
split_path_re = re.compile(r'[/\\ ]')
@@ -52,7 +51,7 @@ def get_bool_opt(options, optname, default=None):
return string
elif isinstance(string, int):
return bool(string)
- elif not isinstance(string, basestring):
+ elif not isinstance(string, string_types):
raise OptionError('Invalid type %r for option %s; use '
'1/0, yes/no, true/false, on/off' % (
string, optname))
@@ -82,7 +81,7 @@ def get_int_opt(options, optname, default=None):
def get_list_opt(options, optname, default=None):
val = options.get(optname, default)
- if isinstance(val, basestring):
+ if isinstance(val, string_types):
return val.split()
elif isinstance(val, (list, tuple)):
return list(val)
@@ -253,25 +252,35 @@ def unirange(a, b):
# Python 2/3 compatibility
-if sys.version_info < (3,0):
- b = bytes = str
+if sys.version_info < (3, 0):
+ unichr = unichr
+ xrange = xrange
+ string_types = (str, unicode)
+ text_type = unicode
u_prefix = 'u'
+ iteritems = dict.iteritems
+ itervalues = dict.itervalues
import StringIO, cStringIO
- BytesIO = cStringIO.StringIO
+ # unfortunately, io.StringIO in Python 2 doesn't accept str at all
StringIO = StringIO.StringIO
- uni_open = codecs.open
+ BytesIO = cStringIO.StringIO
else:
- import builtins
- bytes = builtins.bytes
+ unichr = chr
+ xrange = range
+ string_types = (str,)
+ text_type = str
u_prefix = ''
- def b(s):
- if isinstance(s, str):
- return bytes(map(ord, s))
- elif isinstance(s, bytes):
- return s
- else:
- raise TypeError("Invalid argument %r for b()" % (s,))
- import io
- BytesIO = io.BytesIO
- StringIO = io.StringIO
- uni_open = builtins.open
+ iteritems = dict.items
+ itervalues = dict.values
+ from io import StringIO, BytesIO
+
+def add_metaclass(metaclass):
+ """Class decorator for creating a class with a metaclass."""
+ def wrapper(cls):
+ orig_vars = cls.__dict__.copy()
+ orig_vars.pop('__dict__', None)
+ orig_vars.pop('__weakref__', None)
+ for slots_var in orig_vars.get('__slots__', ()):
+ orig_vars.pop(slots_var)
+ return metaclass(cls.__name__, cls.__bases__, orig_vars)
+ return wrapper
diff --git a/scripts/check_sources.py b/scripts/check_sources.py
index a0bebe27..71aff299 100755
--- a/scripts/check_sources.py
+++ b/scripts/check_sources.py
@@ -11,9 +11,13 @@
:license: BSD, see LICENSE for details.
"""
-import sys, os, re
+from __future__ import print_function
+
+import io
+import os
+import re
+import sys
import getopt
-import cStringIO
from os.path import join, splitext, abspath
@@ -46,7 +50,7 @@ misspellings = ["developement", "adress", "verificate", # ALLOW-MISSPELLING
def check_syntax(fn, lines):
try:
compile(''.join(lines), fn, "exec")
- except SyntaxError, err:
+ except SyntaxError as err:
yield 0, "not compilable: %s" % err
@@ -67,9 +71,12 @@ def check_style_and_encoding(fn, lines):
encoding = co.group(1)
try:
line.decode(encoding)
- except UnicodeDecodeError, err:
+ except AttributeError:
+ # Python 3 - encoding was already checked
+ pass
+ except UnicodeDecodeError as err:
yield lno+1, "not decodable: %s\n Line: %r" % (err, line)
- except LookupError, err:
+ except LookupError as err:
yield 0, "unknown encoding: %s" % encoding
encoding = 'latin1'
@@ -130,7 +137,7 @@ def check_fileheader(fn, lines):
yield 0, "no correct license info"
ci = -3
- copyright = [s.decode('utf-8') for s in llist[ci:ci+1]]
+ copyright = llist[ci:ci+1]
while copyright and copyright_2_re.match(copyright[0]):
ci -= 1
copyright = llist[ci:ci+1]
@@ -165,7 +172,7 @@ def main(argv):
try:
gopts, args = getopt.getopt(argv[1:], "vi:")
except getopt.GetoptError:
- print "Usage: %s [-v] [-i ignorepath]* [path]" % argv[0]
+ print("Usage: %s [-v] [-i ignorepath]* [path]" % argv[0])
return 2
opts = {}
for opt, val in gopts:
@@ -178,20 +185,20 @@ def main(argv):
elif len(args) == 1:
path = args[0]
else:
- print "Usage: %s [-v] [-i ignorepath]* [path]" % argv[0]
+ print("Usage: %s [-v] [-i ignorepath]* [path]" % argv[0])
return 2
verbose = '-v' in opts
num = 0
- out = cStringIO.StringIO()
+ out = io.StringIO()
# TODO: replace os.walk run with iteration over output of
# `svn list -R`.
for root, dirs, files in os.walk(path):
- if '.svn' in dirs:
- dirs.remove('.svn')
+ if '.hg' in dirs:
+ dirs.remove('.hg')
if '-i' in opts and abspath(root) in opts['-i']:
del dirs[:]
continue
@@ -212,13 +219,13 @@ def main(argv):
continue
if verbose:
- print "Checking %s..." % fn
+ print("Checking %s..." % fn)
try:
f = open(fn, 'r')
lines = list(f)
- except (IOError, OSError), err:
- print "%s: cannot open: %s" % (fn, err)
+ except (IOError, OSError) as err:
+ print("%s: cannot open: %s" % (fn, err))
num += 1
continue
@@ -226,15 +233,15 @@ def main(argv):
if not in_pocoo_pkg and checker.only_pkg:
continue
for lno, msg in checker(fn, lines):
- print >>out, "%s:%d: %s" % (fn, lno, msg)
+ print(u"%s:%d: %s" % (fn, lno, msg), file=out)
num += 1
if verbose:
- print
+ print()
if num == 0:
- print "No errors found."
+ print("No errors found.")
else:
- print out.getvalue().rstrip('\n')
- print "%d error%s found." % (num, num > 1 and "s" or "")
+ print(out.getvalue().rstrip('\n'))
+ print("%d error%s found." % (num, num > 1 and "s" or ""))
return int(num > 0)
diff --git a/scripts/detect_missing_analyse_text.py b/scripts/detect_missing_analyse_text.py
index 1312648f..ab58558e 100644
--- a/scripts/detect_missing_analyse_text.py
+++ b/scripts/detect_missing_analyse_text.py
@@ -1,3 +1,4 @@
+from __future__ import print_function
import sys
from pygments.lexers import get_all_lexers, find_lexer_class
@@ -9,22 +10,22 @@ def main():
for name, aliases, filenames, mimetypes in get_all_lexers():
cls = find_lexer_class(name)
if not cls.aliases:
- print cls, "has no aliases"
+ print(cls, "has no aliases")
for f in filenames:
if f not in uses:
uses[f] = []
uses[f].append(cls)
ret = 0
- for k, v in uses.iteritems():
+ for k, v in uses.items():
if len(v) > 1:
#print "Multiple for", k, v
for i in v:
if i.analyse_text is None:
- print i, "has a None analyse_text"
+ print(i, "has a None analyse_text")
ret |= 1
elif Lexer.analyse_text.__doc__ == i.analyse_text.__doc__:
- print i, "needs analyse_text, multiple lexers for", k
+ print(i, "needs analyse_text, multiple lexers for", k)
ret |= 2
return ret
diff --git a/scripts/find_codetags.py b/scripts/find_codetags.py
index 44c30a41..f8204e6e 100755
--- a/scripts/find_codetags.py
+++ b/scripts/find_codetags.py
@@ -11,7 +11,11 @@
:license: BSD, see LICENSE for details.
"""
-import sys, os, re
+from __future__ import print_function
+
+import os
+import re
+import sys
import getopt
from os.path import join, abspath, isdir, isfile
@@ -73,8 +77,8 @@ def main():
try:
gopts, args = getopt.getopt(sys.argv[1:], "vo:i:")
except getopt.GetoptError:
- print ("Usage: %s [-v] [-i ignoredir]* [-o reportfile.html] "
- "path ..." % sys.argv[0])
+ print(("Usage: %s [-v] [-i ignoredir]* [-o reportfile.html] "
+ "path ..." % sys.argv[0]))
return 2
opts = {}
for opt, val in gopts:
@@ -97,18 +101,18 @@ def main():
num = 0
for path in args:
- print "Searching for code tags in %s, please wait." % path
+ print("Searching for code tags in %s, please wait." % path)
if isfile(path):
gnum += 1
if process_file(store, path):
if verbose:
- print path + ": found %d tags" % \
- (path in store and len(store[path]) or 0)
+ print(path + ": found %d tags" % \
+ (path in store and len(store[path]) or 0))
num += 1
else:
if verbose:
- print path + ": binary or not readable"
+ print(path + ": binary or not readable")
continue
elif not isdir(path):
continue
@@ -117,11 +121,15 @@ def main():
if '-i' in opts and abspath(root) in opts['-i']:
del dirs[:]
continue
- if '.svn' in dirs:
- dirs.remove('.svn')
+ if '.hg' in dirs:
+ dirs.remove('.hg')
+ if 'examplefiles' in dirs:
+ dirs.remove('examplefiles')
+ if 'dist' in dirs:
+ dirs.remove('dist')
for fn in files:
gnum += 1
- if gnum % 50 == 0 and not verbose:
+ if gnum % 25 == 0 and not verbose:
sys.stdout.write('.')
sys.stdout.flush()
@@ -137,16 +145,16 @@ def main():
if fn[:2] == './': fn = fn[2:]
if process_file(store, fn):
if verbose:
- print fn + ": found %d tags" % \
- (fn in store and len(store[fn]) or 0)
+ print(fn + ": found %d tags" % \
+ (fn in store and len(store[fn]) or 0))
num += 1
else:
if verbose:
- print fn + ": binary or not readable"
- print
+ print(fn + ": binary or not readable")
+ print()
- print "Processed %d of %d files. Found %d tags in %d files." % (
- num, gnum, sum(len(fitem) for fitem in store.itervalues()), len(store))
+ print("Processed %d of %d files. Found %d tags in %d files." % (
+ num, gnum, sum(len(fitem) for fitem in store.values()), len(store)))
if not store:
return 0
@@ -190,7 +198,7 @@ td { padding: 2px 5px 2px 5px;
'<td class="tag %%(tag)s">%%(tag)s</td>'
'<td class="who">%%(who)s</td><td class="what">%%(what)s</td></tr>')
- f = file(output, 'w')
+ f = open(output, 'w')
table = '\n'.join(TABLE % fname +
'\n'.join(TR % (no % 2,) % entry
for no, entry in enumerate(store[fname]))
@@ -198,7 +206,7 @@ td { padding: 2px 5px 2px 5px;
f.write(HTML % (', '.join(map(abspath, args)), table))
f.close()
- print "Report written to %s." % output
+ print("Report written to %s." % output)
return 0
if __name__ == '__main__':
diff --git a/scripts/find_error.py b/scripts/find_error.py
index 75f4b0ff..7aaa9bee 100755
--- a/scripts/find_error.py
+++ b/scripts/find_error.py
@@ -12,7 +12,10 @@
:license: BSD, see LICENSE for details.
"""
-import sys, os
+from __future__ import print_function
+
+import os
+import sys
# always prefer Pygments from source if exists
srcpath = os.path.join(os.path.dirname(__file__), '..')
@@ -104,36 +107,36 @@ def main(fn, lexer=None, options={}):
# already debugged before
debug_lexer = True
lno = 1
- text = file(fn, 'U').read()
+ text = open(fn, 'U').read()
text = text.strip('\n') + '\n'
tokens = []
states = []
def show_token(tok, state):
reprs = map(repr, tok)
- print ' ' + reprs[1] + ' ' + ' ' * (29-len(reprs[1])) + reprs[0],
+ print(' ' + reprs[1] + ' ' + ' ' * (29-len(reprs[1])) + reprs[0], end=' ')
if debug_lexer:
- print ' ' + ' ' * (29-len(reprs[0])) + repr(state),
- print
+ print(' ' + ' ' * (29-len(reprs[0])) + repr(state), end=' ')
+ print()
for type, val in lx.get_tokens(text):
lno += val.count('\n')
if type == Error:
- print 'Error parsing', fn, 'on line', lno
- print 'Previous tokens' + (debug_lexer and ' and states' or '') + ':'
+ print('Error parsing', fn, 'on line', lno)
+ print('Previous tokens' + (debug_lexer and ' and states' or '') + ':')
if showall:
for tok, state in map(None, tokens, states):
show_token(tok, state)
else:
for i in range(max(len(tokens) - num, 0), len(tokens)):
show_token(tokens[i], states[i])
- print 'Error token:'
+ print('Error token:')
l = len(repr(val))
- print ' ' + repr(val),
+ print(' ' + repr(val), end=' ')
if debug_lexer and hasattr(lx, 'statestack'):
- print ' ' * (60-l) + repr(lx.statestack),
- print
- print
+ print(' ' * (60-l) + repr(lx.statestack), end=' ')
+ print()
+ print()
return 1
tokens.append((type, val))
if debug_lexer:
diff --git a/scripts/get_vimkw.py b/scripts/get_vimkw.py
index 153c88c3..4ea302f4 100644
--- a/scripts/get_vimkw.py
+++ b/scripts/get_vimkw.py
@@ -1,5 +1,5 @@
+from __future__ import print_function
import re
-from pprint import pprint
r_line = re.compile(r"^(syn keyword vimCommand contained|syn keyword vimOption "
r"contained|syn keyword vimAutoEvent contained)\s+(.*)")
@@ -31,12 +31,12 @@ def getkw(input, output):
for a, b in output_info.items():
b.sort()
- print >>out, '%s=[%s]' % (a, ','.join(b))
+ print('%s=[%s]' % (a, ','.join(b)), file=out)
def is_keyword(w, keywords):
for i in range(len(w), 0, -1):
if w[:i] in keywords:
- return signals[w[:i]][:len(w)] == w
+ return keywords[w[:i]][:len(w)] == w
return False
if __name__ == "__main__":
diff --git a/scripts/reindent.py b/scripts/reindent.py
deleted file mode 100755
index e6ee8287..00000000
--- a/scripts/reindent.py
+++ /dev/null
@@ -1,291 +0,0 @@
-#! /usr/bin/env python
-
-# Released to the public domain, by Tim Peters, 03 October 2000.
-# -B option added by Georg Brandl, 2006.
-
-"""reindent [-d][-r][-v] [ path ... ]
-
--d (--dryrun) Dry run. Analyze, but don't make any changes to files.
--r (--recurse) Recurse. Search for all .py files in subdirectories too.
--B (--no-backup) Don't write .bak backup files.
--v (--verbose) Verbose. Print informative msgs; else only names of changed files.
--h (--help) Help. Print this usage information and exit.
-
-Change Python (.py) files to use 4-space indents and no hard tab characters.
-Also trim excess spaces and tabs from ends of lines, and remove empty lines
-at the end of files. Also ensure the last line ends with a newline.
-
-If no paths are given on the command line, reindent operates as a filter,
-reading a single source file from standard input and writing the transformed
-source to standard output. In this case, the -d, -r and -v flags are
-ignored.
-
-You can pass one or more file and/or directory paths. When a directory
-path, all .py files within the directory will be examined, and, if the -r
-option is given, likewise recursively for subdirectories.
-
-If output is not to standard output, reindent overwrites files in place,
-renaming the originals with a .bak extension. If it finds nothing to
-change, the file is left alone. If reindent does change a file, the changed
-file is a fixed-point for future runs (i.e., running reindent on the
-resulting .py file won't change it again).
-
-The hard part of reindenting is figuring out what to do with comment
-lines. So long as the input files get a clean bill of health from
-tabnanny.py, reindent should do a good job.
-"""
-
-__version__ = "1"
-
-import tokenize
-import os
-import sys
-
-verbose = 0
-recurse = 0
-dryrun = 0
-no_backup = 0
-
-def usage(msg=None):
- if msg is not None:
- print >> sys.stderr, msg
- print >> sys.stderr, __doc__
-
-def errprint(*args):
- sep = ""
- for arg in args:
- sys.stderr.write(sep + str(arg))
- sep = " "
- sys.stderr.write("\n")
-
-def main():
- import getopt
- global verbose, recurse, dryrun, no_backup
-
- try:
- opts, args = getopt.getopt(sys.argv[1:], "drvhB",
- ["dryrun", "recurse", "verbose", "help",
- "no-backup"])
- except getopt.error, msg:
- usage(msg)
- return
- for o, a in opts:
- if o in ('-d', '--dryrun'):
- dryrun += 1
- elif o in ('-r', '--recurse'):
- recurse += 1
- elif o in ('-v', '--verbose'):
- verbose += 1
- elif o in ('-B', '--no-backup'):
- no_backup += 1
- elif o in ('-h', '--help'):
- usage()
- return
- if not args:
- r = Reindenter(sys.stdin)
- r.run()
- r.write(sys.stdout)
- return
- for arg in args:
- check(arg)
-
-def check(file):
- if os.path.isdir(file) and not os.path.islink(file):
- if verbose:
- print "listing directory", file
- names = os.listdir(file)
- for name in names:
- fullname = os.path.join(file, name)
- if ((recurse and os.path.isdir(fullname) and
- not os.path.islink(fullname))
- or name.lower().endswith(".py")):
- check(fullname)
- return
-
- if verbose:
- print "checking", file, "...",
- try:
- f = open(file)
- except IOError, msg:
- errprint("%s: I/O Error: %s" % (file, str(msg)))
- return
-
- r = Reindenter(f)
- f.close()
- if r.run():
- if verbose:
- print "changed."
- if dryrun:
- print "But this is a dry run, so leaving it alone."
- else:
- print "reindented", file, (dryrun and "(dry run => not really)" or "")
- if not dryrun:
- if not no_backup:
- bak = file + ".bak"
- if os.path.exists(bak):
- os.remove(bak)
- os.rename(file, bak)
- if verbose:
- print "renamed", file, "to", bak
- f = open(file, "w")
- r.write(f)
- f.close()
- if verbose:
- print "wrote new", file
- else:
- if verbose:
- print "unchanged."
-
-
-class Reindenter:
-
- def __init__(self, f):
- self.find_stmt = 1 # next token begins a fresh stmt?
- self.level = 0 # current indent level
-
- # Raw file lines.
- self.raw = f.readlines()
-
- # File lines, rstripped & tab-expanded. Dummy at start is so
- # that we can use tokenize's 1-based line numbering easily.
- # Note that a line is all-blank iff it's "\n".
- self.lines = [line.rstrip('\n \t').expandtabs() + "\n"
- for line in self.raw]
- self.lines.insert(0, None)
- self.index = 1 # index into self.lines of next line
-
- # List of (lineno, indentlevel) pairs, one for each stmt and
- # comment line. indentlevel is -1 for comment lines, as a
- # signal that tokenize doesn't know what to do about them;
- # indeed, they're our headache!
- self.stats = []
-
- def run(self):
- tokenize.tokenize(self.getline, self.tokeneater)
- # Remove trailing empty lines.
- lines = self.lines
- while lines and lines[-1] == "\n":
- lines.pop()
- # Sentinel.
- stats = self.stats
- stats.append((len(lines), 0))
- # Map count of leading spaces to # we want.
- have2want = {}
- # Program after transformation.
- after = self.after = []
- # Copy over initial empty lines -- there's nothing to do until
- # we see a line with *something* on it.
- i = stats[0][0]
- after.extend(lines[1:i])
- for i in range(len(stats)-1):
- thisstmt, thislevel = stats[i]
- nextstmt = stats[i+1][0]
- have = getlspace(lines[thisstmt])
- want = thislevel * 4
- if want < 0:
- # A comment line.
- if have:
- # An indented comment line. If we saw the same
- # indentation before, reuse what it most recently
- # mapped to.
- want = have2want.get(have, -1)
- if want < 0:
- # Then it probably belongs to the next real stmt.
- for j in xrange(i+1, len(stats)-1):
- jline, jlevel = stats[j]
- if jlevel >= 0:
- if have == getlspace(lines[jline]):
- want = jlevel * 4
- break
- if want < 0: # Maybe it's a hanging
- # comment like this one,
- # in which case we should shift it like its base
- # line got shifted.
- for j in xrange(i-1, -1, -1):
- jline, jlevel = stats[j]
- if jlevel >= 0:
- want = have + getlspace(after[jline-1]) - \
- getlspace(lines[jline])
- break
- if want < 0:
- # Still no luck -- leave it alone.
- want = have
- else:
- want = 0
- assert want >= 0
- have2want[have] = want
- diff = want - have
- if diff == 0 or have == 0:
- after.extend(lines[thisstmt:nextstmt])
- else:
- for line in lines[thisstmt:nextstmt]:
- if diff > 0:
- if line == "\n":
- after.append(line)
- else:
- after.append(" " * diff + line)
- else:
- remove = min(getlspace(line), -diff)
- after.append(line[remove:])
- return self.raw != self.after
-
- def write(self, f):
- f.writelines(self.after)
-
- # Line-getter for tokenize.
- def getline(self):
- if self.index >= len(self.lines):
- line = ""
- else:
- line = self.lines[self.index]
- self.index += 1
- return line
-
- # Line-eater for tokenize.
- def tokeneater(self, type, token, (sline, scol), end, line,
- INDENT=tokenize.INDENT,
- DEDENT=tokenize.DEDENT,
- NEWLINE=tokenize.NEWLINE,
- COMMENT=tokenize.COMMENT,
- NL=tokenize.NL):
-
- if type == NEWLINE:
- # A program statement, or ENDMARKER, will eventually follow,
- # after some (possibly empty) run of tokens of the form
- # (NL | COMMENT)* (INDENT | DEDENT+)?
- self.find_stmt = 1
-
- elif type == INDENT:
- self.find_stmt = 1
- self.level += 1
-
- elif type == DEDENT:
- self.find_stmt = 1
- self.level -= 1
-
- elif type == COMMENT:
- if self.find_stmt:
- self.stats.append((sline, -1))
- # but we're still looking for a new stmt, so leave
- # find_stmt alone
-
- elif type == NL:
- pass
-
- elif self.find_stmt:
- # This is the first "real token" following a NEWLINE, so it
- # must be the first token of the next program statement, or an
- # ENDMARKER.
- self.find_stmt = 0
- if line: # not endmarker
- self.stats.append((sline, self.level))
-
-# Count number of leading blanks.
-def getlspace(line):
- i, n = 0, len(line)
- while i < n and line[i] == " ":
- i += 1
- return i
-
-if __name__ == '__main__':
- main()
diff --git a/scripts/vim2pygments.py b/scripts/vim2pygments.py
index 80f0ada2..42af0bbe 100644..100755
--- a/scripts/vim2pygments.py
+++ b/scripts/vim2pygments.py
@@ -11,10 +11,12 @@
:license: BSD, see LICENSE for details.
"""
+from __future__ import print_function
+
import sys
import re
from os import path
-from cStringIO import StringIO
+from io import StringIO
split_re = re.compile(r'(?<!\\)\s+')
@@ -765,7 +767,7 @@ TOKENS = {
}
TOKEN_TYPES = set()
-for token in TOKENS.itervalues():
+for token in TOKENS.values():
if not isinstance(token, tuple):
token = (token,)
for token in token:
@@ -836,7 +838,7 @@ def find_colors(code):
colors['Normal']['bgcolor'] = bg_color
color_map = {}
- for token, styles in colors.iteritems():
+ for token, styles in colors.items():
if token in TOKENS:
tmp = []
if styles.get('noinherit'):
@@ -879,7 +881,7 @@ class StyleWriter(object):
def write(self, out):
self.write_header(out)
default_token, tokens = find_colors(self.code)
- tokens = tokens.items()
+ tokens = list(tokens.items())
tokens.sort(lambda a, b: cmp(len(a[0]), len(a[1])))
bg_color = [x[3:] for x in default_token.split() if x.startswith('bg:')]
if bg_color:
@@ -916,14 +918,14 @@ def convert(filename, stream=None):
def main():
if len(sys.argv) != 2 or sys.argv[1] in ('-h', '--help'):
- print 'Usage: %s <filename.vim>' % sys.argv[0]
+ print('Usage: %s <filename.vim>' % sys.argv[0])
return 2
if sys.argv[1] in ('-v', '--version'):
- print '%s %s' % (SCRIPT_NAME, SCRIPT_VERSION)
+ print('%s %s' % (SCRIPT_NAME, SCRIPT_VERSION))
return
filename = sys.argv[1]
if not (path.exists(filename) and path.isfile(filename)):
- print 'Error: %s not found' % filename
+ print('Error: %s not found' % filename)
return 1
convert(filename, sys.stdout)
sys.stdout.write('\n')
diff --git a/setup.py b/setup.py
index 45054508..a0b2e90b 100755
--- a/setup.py
+++ b/setup.py
@@ -48,11 +48,6 @@ except ImportError:
]
have_setuptools = False
-try:
- from distutils.command.build_py import build_py_2to3 as build_py
-except ImportError:
- from distutils.command.build_py import build_py
-
if have_setuptools:
add_keywords = dict(
entry_points = {
@@ -66,7 +61,7 @@ else:
setup(
name = 'Pygments',
- version = '1.6',
+ version = '2.0pre',
url = 'http://pygments.org/',
license = 'BSD License',
author = 'Georg Brandl',
@@ -91,6 +86,5 @@ setup(
'Topic :: Text Processing :: Filters',
'Topic :: Utilities',
],
- cmdclass = {'build_py': build_py},
**add_keywords
)
diff --git a/tests/examplefiles/core.cljs b/tests/examplefiles/core.cljs
new file mode 100644
index 00000000..f135b832
--- /dev/null
+++ b/tests/examplefiles/core.cljs
@@ -0,0 +1,52 @@
+
+(ns bounder.core
+ (:require [bounder.html :as html]
+ [domina :refer [value set-value! single-node]]
+ [domina.css :refer [sel]]
+ [lowline.functions :refer [debounce]]
+ [enfocus.core :refer [at]]
+ [cljs.reader :as reader]
+ [clojure.string :as s])
+ (:require-macros [enfocus.macros :as em]))
+
+(def filter-input
+ (single-node
+ (sel ".search input")))
+
+(defn project-matches [query project]
+ (let [words (cons (:name project)
+ (map name (:categories project)))
+ to-match (->> words
+ (s/join "")
+ (s/lower-case))]
+ (<= 0 (.indexOf to-match (s/lower-case query)))))
+
+(defn apply-filter-for [projects]
+ (let [query (value filter-input)]
+ (html/render-projects
+ (filter (partial project-matches query)
+ projects))))
+
+(defn filter-category [projects evt]
+ (let [target (.-currentTarget evt)]
+ (set-value! filter-input
+ (.-innerHTML target))
+ (apply-filter-for projects)))
+
+(defn init-listeners [projects]
+ (at js/document
+ ["input"] (em/listen
+ :keyup
+ (debounce
+ (partial apply-filter-for projects)
+ 500))
+ [".category-links li"] (em/listen
+ :click
+ (partial filter-category projects))))
+
+(defn init [projects-edn]
+ (let [projects (reader/read-string projects-edn)]
+ (init-listeners projects)
+ (html/render-projects projects)
+ (html/loaded)))
+
diff --git a/tests/examplefiles/example.gd b/tests/examplefiles/example.gd
new file mode 100644
index 00000000..c285ea32
--- /dev/null
+++ b/tests/examplefiles/example.gd
@@ -0,0 +1,23 @@
+#############################################################################
+##
+#W example.gd
+##
+## This file contains a sample of a GAP declaration file.
+##
+DeclareProperty( "SomeProperty", IsLeftModule );
+DeclareGlobalFunction( "SomeGlobalFunction" );
+
+
+#############################################################################
+##
+#C IsQuuxFrobnicator(<R>)
+##
+## <ManSection>
+## <Filt Name="IsQuuxFrobnicator" Arg='R' Type='Category'/>
+##
+## <Description>
+## Tests whether R is a quux frobnicator.
+## </Description>
+## </ManSection>
+##
+DeclareSynonym( "IsQuuxFrobnicator", IsField and IsGroup );
diff --git a/tests/examplefiles/example.gi b/tests/examplefiles/example.gi
new file mode 100644
index 00000000..c9c5e55d
--- /dev/null
+++ b/tests/examplefiles/example.gi
@@ -0,0 +1,64 @@
+#############################################################################
+##
+#W example.gd
+##
+## This file contains a sample of a GAP implementation file.
+##
+
+
+#############################################################################
+##
+#M SomeOperation( <val> )
+##
+## performs some operation on <val>
+##
+InstallMethod( SomeProperty,
+ "for left modules",
+ [ IsLeftModule ], 0,
+ function( M )
+ if IsFreeLeftModule( M ) and not IsTrivial( M ) then
+ return true;
+ fi;
+ TryNextMethod();
+ end );
+
+
+
+#############################################################################
+##
+#F SomeGlobalFunction( )
+##
+## A global variadic funfion.
+##
+InstallGlobalFunction( SomeGlobalFunction, function( arg )
+ if Length( arg ) = 3 then
+ return arg[1] + arg[2] * arg[3];
+ elif Length( arg ) = 2 then
+ return arg[1] - arg[2]
+ else
+ Error( "usage: SomeGlobalFunction( <x>, <y>[, <z>] )" );
+ fi;
+ end );
+
+
+#
+# A plain function.
+#
+SomeFunc := function(x, y)
+ local z, func, tmp, j;
+ z := x * 1.0;
+ y := 17^17 - y;
+ func := a -> a mod 5;
+ tmp := List( [1..50], func );
+ while y > 0 do
+ for j in tmp do
+ Print(j, "\n");
+ od;
+ repeat
+ y := y - 1;
+ until 0 < 1;
+ y := y -1;
+ od;
+ return z;
+end;
+ \ No newline at end of file
diff --git a/tests/examplefiles/example.i6t b/tests/examplefiles/example.i6t
new file mode 100644
index 00000000..0f41b425
--- /dev/null
+++ b/tests/examplefiles/example.i6t
@@ -0,0 +1,32 @@
+B/examt: Example Template.
+
+@Purpose: To show the syntax of I6T, specifically the parts relating to the
+inclusion of I7 and at signs in the first column.
+
+@-------------------------------------------------------------------------------
+
+@p Lines.
+
+@c
+{-lines:type}
+! This is a comment.
+{-endlines}
+
+@-This line begins with @-, so it is ignored.
+
+@p Paragraph.
+This is a paragraph.
+@p Another paragraph.
+So
+
+is
+
+this.
+
+@Purpose: This purpose line is ignored.
+
+@c At signs and (+ +).
+[ Foo i;
+print (+score [an I7 value]+), "^";
+@add sp 1 -> i; ! Assembly works even in the first column.
+];
diff --git a/tests/examplefiles/example.i7x b/tests/examplefiles/example.i7x
new file mode 100644
index 00000000..ab94ac69
--- /dev/null
+++ b/tests/examplefiles/example.i7x
@@ -0,0 +1,45 @@
+example by David Corbett begins here.
+
+"Implements testable examples."
+
+An example is a kind of thing. An example can be tested. An example is seldom tested.
+
+example ends here.
+
+----
+[The] documentation [starts here.]
+----
+
+This extension adds examples, which may be tested.
+
+Chapter: Usage
+
+To add an example to the story, we write:
+
+ The foobar is an example.
+
+To interact with it in Inform 6, we write something like:
+
+ To say (E - example): (-
+ print (object) {E};
+ -).
+ [The IDE's documentation viewer does not display the closing -). I don't know how to fix that.]
+
+Section: Testing
+
+We can make an example be tested using:
+
+ now the foobar is tested;
+
+Example: * Exempli Gratia - A simple example.
+
+ *: "Exempli Gratia"
+
+ Include example by David Corbett.
+
+ The Kitchen is a room. The egg is an example, here.
+
+ Before dropping the egg:
+ now the egg is tested.
+
+ Test me with "get egg / drop egg".
diff --git a/tests/examplefiles/example.inf b/tests/examplefiles/example.inf
new file mode 100644
index 00000000..73cdd087
--- /dev/null
+++ b/tests/examplefiles/example.inf
@@ -0,0 +1,374 @@
+!% $SMALL ! This is ICL, not a comment.
+!% -w
+
+!% A comprehensive test of Inform6Lexer.
+
+Switches d2SDq;
+
+Constant Story "Informal Testing";
+Constant Headline "^Not a game.^";!% This is a comment, not ICL.
+
+Release 2;
+Serial "140308";
+Version 5;
+
+Ifndef TARGET_ZCODE;
+Ifndef TARGET_GLULX;
+Ifndef WORDSIZE;
+Default WORDSIZE 2;
+Constant TARGET_ZCODE;
+Endif;
+Endif;
+Endif;
+
+Ifv3; Message "Compiling to version 3"; Endif;
+Ifv5; Message "Not compiling to version 3"; endif;
+ifdef TARGET_ZCODE;
+#IFTRUE (#version_number == 5);
+Message "Compiling to version 5";
+#ENDIF;
+endif ;
+
+Replace CreatureTest;
+
+Include "Parser";
+Include "VerbLib";
+
+# ! A hash is optional at the top level.
+Object kitchen "Kitchen"
+ with description "You are in a kitchen.",
+ arr 1 2 3 4,
+ has light;
+
+#[ Initialise;
+ location = kitchen;
+ print "v"; inversion; "^";
+];
+
+Ifdef VN_1633;
+Replace IsSeeThrough IsSeeThroughOrig;
+[ IsSeeThrough * o;
+ return o hasnt opaque || IsSeeThroughOrig(o);
+];
+Endif;
+
+Abbreviate "test";
+
+Array table buffer 260;
+
+Attribute reversed;
+Attribute opaque alias locked;
+Constant to reversed;
+
+Property long additive additive long alias;
+Property long long long wingspan alias alias;
+
+Class Flier with wingspan 5;
+Class Bird(10) has animate class Flier with wingspan 2;
+
+Constant Constant1;
+Constant Constant2 Constant1;
+Constant Constant3 = Constant2;
+Ifdef VN_1633; Undef Constant; Endif;
+
+Ifdef VN_1633;
+Dictionary 'word' 1 2;
+Ifnot;
+Dictionary dict_word "word";
+Endif;
+
+Fake_action NotReal;
+
+Global global1;
+Global global2 = 69105;
+
+Lowstring low_string "low string";
+
+Iftrue false;
+Message error "Uh-oh!^~false~ shouldn't be ~true~.";
+Endif;
+Iffalse true;
+Message fatalerror "Uh-oh!^~true~ shouldn't be ~false~.";
+Endif;
+
+Nearby person "person"
+ with name 'person',
+ description "This person is barely implemented.",
+ life [ * x y z;
+ Ask: print_ret (The) self, " says nothing.";
+ Answer: print (The) self, " didn't say anything.^"; rfalse;
+ ]
+ has has animate transparent;
+
+Object -> -> test_tube "test tube"
+ with name 'test' "tube" 'testtube',
+ has ~openable ~opaque container;
+
+Bird -> pigeon
+ with name 'pigeon',
+ description [;
+ "The pigeon has a wingspan of ", self.&wingspan-->0, " wing units.";
+ ];
+
+Object -> "thimble" with name 'thimble';
+
+Object -> pebble "pebble" with name 'pebble';
+
+Ifdef TARGET_ZCODE; Trace objects; Endif;
+
+Statusline score;
+
+Stub StubR 3;
+
+Ifdef TARGET_ZCODE;
+Zcharacter "abcdefghijklmnopqrstuvwxyz"
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "123456789.,!?_#'0/@{005C}-:()";
+Zcharacter table '@!!' '@<<' '@'A';
+Zcharacter table + '@AE' '@{dc}' '@et' '@:y';
+Ifnot;
+Ifdef TARGET_GLULX;
+Message "Glulx doesn't use ~Zcharacter~.^Oh well."; ! '~' and '^' work here.
+Ifnot;
+Message warning "Uh-oh! ^~^"; ! They don't work in other Messages.
+Endif;
+Endif;
+
+Include "Grammar";
+
+Verb"acquire"'collect'='take';
+
+[ NounFilter; return noun ofclass Bird; ];
+
+[ ScopeFilter obj;
+ switch (scope_stage) {
+ 1: rtrue;
+ 2: objectloop (obj in compass) PlaceInScope(obj);
+ 3: "Nothing is in scope.";
+ }
+];
+
+Verb meta "t" 'test'
+ * 'held' held -> TestHeld
+ * number -> TestNumber
+ * reversed -> TestAttribute
+ * 'creature' creature -> TestCreature
+ * 'multiheld' multiheld -> TestMultiheld
+ * 'm' multiexcept 'into'/"in" noun -> TestMultiexcept
+ * 'm' multiinside 'from' noun -> TestMultiinside
+ * multi -> TestMulti
+ * 'filter'/'f' noun=NounFilter -> TestNounFilter
+ * 'filter'/'f' scope=ScopeFilter -> TestScopeFilter
+ * 'special' special -> TestSpecial
+ * topic -> TestTopic;
+
+Verb 'reverse' 'swap' 'exchange'
+ * held 'for' noun -> reverse
+ * noun 'with' noun -> reverse reverse;
+
+Extend "t" last * noun -> TestNoun;
+
+Extend 't' first * -> Test;
+
+Extend 'wave' replace * -> NewWave;
+
+Extend only 'feel' 'touch' replace * noun -> Feel;
+
+[ TestSub a b o;
+ string 25 low_string;
+ print "Test what?> ";
+ table->0 = 260;
+ parse->0 = 61;
+ #Ifdef TARGET_ZCODE;
+ read buffer parse;
+ #Ifnot; ! TARGET_GLULX
+ KeyboardPrimitive(buffer, parse);
+ #Endif; ! TARGET_
+ switch (parse-->1) {
+ 'save':
+ #Ifdef TARGET_ZCODE;
+ #Ifv3;
+ @save ?saved;
+ #Ifnot;
+ save saved;
+ #Endif;
+ #Endif;
+ print "Saving failed.^";
+ 'restore':
+ #Ifdef TARGET_ZCODE;
+ restore saved;
+ #Endif;
+ print "Restoring failed.^";
+ 'restart':
+ @restart;
+ 'quit', 'q//':
+ quit;
+ return 2; rtrue; rfalse; return;
+ 'print', 'p//':
+ print "Print:^",
+ " (string): ", (string) "xyzzy^",
+ " (number): ", (number) 123, "^",
+ " (char): ", (char) 'x', "^",
+ " (address): ", (address) 'plugh//p', "^",
+ " (The): ", (The) person, "^",
+ " (the): ", (the) person, "^",
+ " (A): ", (A) person, "^",
+ " (a): ", (a) person, "^",
+ " (an): ", (an) person, "^",
+ " (name): ", (name) person, "^",
+ " (object): ", (object) person, "^",
+ " (property): ", (property) alias, "^",
+ " (<routine>): ", (LanguageNumber) 123, "^",
+ " <expression>: ", a * 2 - 1, "^",
+ " (<expression>): ", (a + person), "^";
+ print "Escapes:^",
+ " by mnemonic: @!! @<< @'A @AE @et @:y^",
+ " by decimal value: @@64 @@126^",
+ " by Unicode value: @{DC}@{002b}^",
+ " by string variable: @25^";
+ 'font', 'style':
+ font off; print "font off^";
+ font on; print "font on^";
+ style reverse; print "style reverse^"; style roman;
+ style bold; print "style bold^";
+ style underline; print "style underline^";
+ style fixed; print "style fixed^";
+ style roman; print "style roman^";
+ 'statements':
+ spaces 8;
+ objectloop (o) {
+ print "objectloop (o): ", (the) o, "^";
+ }
+ objectloop (o in compass) { ! 'in' is a keyword
+ print "objectloop (o in compass): ", (the) o, "^";
+ }
+ objectloop (o in compass && true) { ! 'in' is an operator
+ print "objectloop (o in compass && true): ", (the) o, "^";
+ }
+ objectloop (o from se_obj) {
+ print "objectloop (o from se_obj): ", (the) o, "^";
+ }
+ objectloop (o near person) {
+ print "objectloop (o near person): ", (the) o, "^";
+ }
+ #Ifdef TARGET_ZCODE;
+ #Trace assembly on;
+@ ! This is assembly.
+ add -4 ($$1+$3)*2 -> b;
+ @get_sibling test_tube -> b ?saved;
+ @inc [b];
+ @je sp (1+3*0) ? equal;
+ @je 1 ((sp)) ?~ different;
+ .! This is a label:
+ equal;
+ print "sp == 1^";
+ jump label;
+ .different;
+ print "sp @@126= 1^";
+ .label;
+ #Trace off; #Endif; ! TARGET_ZCODE
+ a = random(10);
+ switch (a) {
+ 1, 9:
+ box "Testing oneself is best when done alone."
+ " -- Jimmy Carter";
+ 2, 6, to, 3 to 5, to to to:
+ <Take pigeon>;
+ #Ifdef VN_1633;
+ <Jump, person>;
+ #Endif;
+ a = ##Drop;
+ < ! The angle brackets may be separated by whitespace.
+ < (a) pigeon > >;
+ default:
+ do {
+ give person general ~general;
+ } until (person provides life && ~~false);
+ if (a == 7) a = 4;
+ else a = 5;
+ }
+ 'expressions':
+ a = 1+1-1*1/1%1&1|1&&1||1==(1~=(1>(1<(1>=(1<=1)))));
+ a++; ++a; a--; --a;
+ a = person.life;
+ a = kitchen.&arr;
+ a = kitchen.#arr;
+ a = Bird::wingspan;
+ a = kitchen has general;
+ a = kitchen hasnt general;
+ a = kitchen provides arr;
+ a = person in kitchen;
+ a = person notin kitchen;
+ a = person ofclass Bird;
+ a = a == 0 or 1;
+ a = StubR();
+ a = StubR(a);
+ a = StubR(, a);
+ a = "string";
+ a = 'word';
+ a = '''; ! character
+ a = $09afAF;
+ a = $$01;
+ a = ##Eat; a = #a$Eat;
+ a = #g$self;
+ a = #n$!word;
+ a = #r$StubR;
+ a = #dict_par1;
+ default:
+ for (a = 2, b = a; (a < buffer->1 + 2) && (Bird::wingspan): ++a, b--) {
+ print (char) buffer->a;
+ }
+ new_line;
+ for (::) break;
+ }
+ .saved;;
+];
+
+[ TestNumberSub;
+ print_ret parsed_number, " is ", (number) parsed_number, ".";
+];
+
+[ TestAttributeSub; print_ret (The) noun, " has been reversed."; ];
+
+[ CreatureTest obj; return obj has animate; ];
+
+[ TestCreatureSub; print_ret (The) noun, " is a creature."; ];
+
+[ TestMultiheldSub; print_ret "You are holding ", (the) noun, "."; ];
+
+[ TestMultiexceptSub; "You test ", (the) noun, " with ", (the) second, "."; ];
+
+[ TestMultiinsideSub; "You test ", (the) noun, " from ", (the) second, "."; ];
+
+[ TestMultiSub; print_ret (The) noun, " is a thing."; ];
+
+[ TestNounFilterSub; print_ret (The) noun, " is a bird."; ];
+
+[ TestScopeFilterSub; print_ret (The) noun, " is a direction."; ];
+
+[ TestSpecialSub; "Your lucky number is ", parsed_number, "."; ];
+
+[ TestTopicSub; "You discuss a topic."; ];
+
+[ TestNounSub; "That is ", (a) noun, "."; ];
+
+[ TestHeldSub; "You are holding ", (a) noun, "."; ];
+
+[ NewWaveSub; "That would be foolish."; ];
+
+[ FeelSub; print_ret (The) noun, " feels normal."; ];
+
+[ ReverseSub from;
+ from = parent(noun);
+ move noun to parent(second);
+ if (from == to)
+ move second to to;
+ else
+ move second to from;
+ give noun to;
+ from = to;
+ give second from;
+ "You swap ", (the) noun, " and ", (the) second, ".";
+];
+
+End: The End directive ends the source code.
diff --git a/tests/examplefiles/example.mq4 b/tests/examplefiles/example.mq4
new file mode 100644
index 00000000..54a5fa60
--- /dev/null
+++ b/tests/examplefiles/example.mq4
@@ -0,0 +1,187 @@
+//+------------------------------------------------------------------+
+//| PeriodConverter.mq4 |
+//| Copyright 2006-2014, MetaQuotes Software Corp. |
+//| http://www.metaquotes.net |
+//+------------------------------------------------------------------+
+#property copyright "2006-2014, MetaQuotes Software Corp."
+#property link "http://www.mql4.com"
+#property description "Period Converter to updated format of history base"
+#property strict
+#property show_inputs
+#include <WinUser32.mqh>
+
+input int InpPeriodMultiplier=3; // Period multiplier factor
+int ExtHandle=-1;
+//+------------------------------------------------------------------+
+//| script program start function |
+//+------------------------------------------------------------------+
+void OnStart()
+ {
+ datetime time0;
+ ulong last_fpos=0;
+ long last_volume=0;
+ int i,start_pos,periodseconds;
+ int hwnd=0,cnt=0;
+//---- History header
+ int file_version=401;
+ string c_copyright;
+ string c_symbol=Symbol();
+ int i_period=Period()*InpPeriodMultiplier;
+ int i_digits=Digits;
+ int i_unused[13];
+ MqlRates rate;
+//---
+ ExtHandle=FileOpenHistory(c_symbol+(string)i_period+".hst",FILE_BIN|FILE_WRITE|FILE_SHARE_WRITE|FILE_SHARE_READ|FILE_ANSI);
+ if(ExtHandle<0)
+ return;
+ c_copyright="(C)opyright 2003, MetaQuotes Software Corp.";
+ ArrayInitialize(i_unused,0);
+//--- write history file header
+ FileWriteInteger(ExtHandle,file_version,LONG_VALUE);
+ FileWriteString(ExtHandle,c_copyright,64);
+ FileWriteString(ExtHandle,c_symbol,12);
+ FileWriteInteger(ExtHandle,i_period,LONG_VALUE);
+ FileWriteInteger(ExtHandle,i_digits,LONG_VALUE);
+ FileWriteInteger(ExtHandle,0,LONG_VALUE);
+ FileWriteInteger(ExtHandle,0,LONG_VALUE);
+ FileWriteArray(ExtHandle,i_unused,0,13);
+//--- write history file
+ periodseconds=i_period*60;
+ start_pos=Bars-1;
+ rate.open=Open[start_pos];
+ rate.low=Low[start_pos];
+ rate.high=High[start_pos];
+ rate.tick_volume=(long)Volume[start_pos];
+ rate.spread=0;
+ rate.real_volume=0;
+ //--- normalize open time
+ rate.time=Time[start_pos]/periodseconds;
+ rate.time*=periodseconds;
+ for(i=start_pos-1; i>=0; i--)
+ {
+ if(IsStopped())
+ break;
+ time0=Time[i];
+ //--- history may be updated
+ if(i==0)
+ {
+ //--- modify index if history was updated
+ if(RefreshRates())
+ i=iBarShift(NULL,0,time0);
+ }
+ //---
+ if(time0>=rate.time+periodseconds || i==0)
+ {
+ if(i==0 && time0<rate.time+periodseconds)
+ {
+ rate.tick_volume+=(long)Volume[0];
+ if(rate.low>Low[0])
+ rate.low=Low[0];
+ if(rate.high<High[0])
+ rate.high=High[0];
+ rate.close=Close[0];
+ }
+ last_fpos=FileTell(ExtHandle);
+ last_volume=(long)Volume[i];
+ FileWriteStruct(ExtHandle,rate);
+ cnt++;
+ if(time0>=rate.time+periodseconds)
+ {
+ rate.time=time0/periodseconds;
+ rate.time*=periodseconds;
+ rate.open=Open[i];
+ rate.low=Low[i];
+ rate.high=High[i];
+ rate.close=Close[i];
+ rate.tick_volume=last_volume;
+ }
+ }
+ else
+ {
+ rate.tick_volume+=(long)Volume[i];
+ if(rate.low>Low[i])
+ rate.low=Low[i];
+ if(rate.high<High[i])
+ rate.high=High[i];
+ rate.close=Close[i];
+ }
+ }
+ FileFlush(ExtHandle);
+ Print(cnt," record(s) written");
+//--- collect incoming ticks
+ datetime last_time=LocalTime()-5;
+ while(!IsStopped())
+ {
+ datetime cur_time=LocalTime();
+ //--- check for new rates
+ if(RefreshRates())
+ {
+ time0=Time[0];
+ FileSeek(ExtHandle,last_fpos,SEEK_SET);
+ //--- is there current bar?
+ if(time0<rate.time+periodseconds)
+ {
+ rate.tick_volume+=(long)Volume[0]-last_volume;
+ last_volume=(long)Volume[0];
+ if(rate.low>Low[0])
+ rate.low=Low[0];
+ if(rate.high<High[0])
+ rate.high=High[0];
+ rate.close=Close[0];
+ }
+ else
+ {
+ //--- no, there is new bar
+ rate.tick_volume+=(long)Volume[1]-last_volume;
+ if(rate.low>Low[1])
+ rate.low=Low[1];
+ if(rate.high<High[1])
+ rate.high=High[1];
+ //--- write previous bar remains
+ FileWriteStruct(ExtHandle,rate);
+ last_fpos=FileTell(ExtHandle);
+ //----
+ rate.time=time0/periodseconds;
+ rate.time*=periodseconds;
+ rate.open=Open[0];
+ rate.low=Low[0];
+ rate.high=High[0];
+ rate.close=Close[0];
+ rate.tick_volume=(long)Volume[0];
+ last_volume=rate.tick_volume;
+ }
+ //----
+ FileWriteStruct(ExtHandle,rate);
+ FileFlush(ExtHandle);
+ //---
+ if(hwnd==0)
+ {
+ hwnd=WindowHandle(Symbol(),i_period);
+ if(hwnd!=0)
+ Print("Chart window detected");
+ }
+ //--- refresh window not frequently than 1 time in 2 seconds
+ if(hwnd!=0 && cur_time-last_time>=2)
+ {
+ PostMessageA(hwnd,WM_COMMAND,33324,0);
+ last_time=cur_time;
+ }
+ }
+ Sleep(50);
+ }
+//---
+ }
+//+------------------------------------------------------------------+
+//| |
+//+------------------------------------------------------------------+
+void OnDeinit(const int reason)
+ {
+//---
+ if(ExtHandle>=0)
+ {
+ FileClose(ExtHandle);
+ ExtHandle=-1;
+ }
+//---
+ }
+//+------------------------------------------------------------------+ \ No newline at end of file
diff --git a/tests/examplefiles/example.mqh b/tests/examplefiles/example.mqh
new file mode 100644
index 00000000..ee80ed52
--- /dev/null
+++ b/tests/examplefiles/example.mqh
@@ -0,0 +1,123 @@
+//+------------------------------------------------------------------+
+//| Array.mqh |
+//| Copyright 2009-2013, MetaQuotes Software Corp. |
+//| http://www.mql4.com |
+//+------------------------------------------------------------------+
+#include <Object.mqh>
+//+------------------------------------------------------------------+
+//| Class CArray |
+//| Purpose: Base class of dynamic arrays. |
+//| Derives from class CObject. |
+//+------------------------------------------------------------------+
+class CArray : public CObject
+ {
+protected:
+ int m_step_resize; // increment size of the array
+ int m_data_total; // number of elements
+ int m_data_max; // maximmum size of the array without memory reallocation
+ int m_sort_mode; // mode of array sorting
+
+public:
+ CArray(void);
+ ~CArray(void);
+ //--- methods of access to protected data
+ int Step(void) const { return(m_step_resize); }
+ bool Step(const int step);
+ int Total(void) const { return(m_data_total); }
+ int Available(void) const { return(m_data_max-m_data_total); }
+ int Max(void) const { return(m_data_max); }
+ bool IsSorted(const int mode=0) const { return(m_sort_mode==mode); }
+ int SortMode(void) const { return(m_sort_mode); }
+ //--- cleaning method
+ void Clear(void) { m_data_total=0; }
+ //--- methods for working with files
+ virtual bool Save(const int file_handle);
+ virtual bool Load(const int file_handle);
+ //--- sorting method
+ void Sort(const int mode=0);
+
+protected:
+ virtual void QuickSort(int beg,int end,const int mode=0) { }
+ };
+//+------------------------------------------------------------------+
+//| Constructor |
+//+------------------------------------------------------------------+
+CArray::CArray(void) : m_step_resize(16),
+ m_data_total(0),
+ m_data_max(0),
+ m_sort_mode(-1)
+ {
+ }
+//+------------------------------------------------------------------+
+//| Destructor |
+//+------------------------------------------------------------------+
+CArray::~CArray(void)
+ {
+ }
+//+------------------------------------------------------------------+
+//| Method Set for variable m_step_resize |
+//+------------------------------------------------------------------+
+bool CArray::Step(const int step)
+ {
+//--- check
+ if(step>0)
+ {
+ m_step_resize=step;
+ return(true);
+ }
+//--- failure
+ return(false);
+ }
+//+------------------------------------------------------------------+
+//| Sorting an array in ascending order |
+//+------------------------------------------------------------------+
+void CArray::Sort(const int mode)
+ {
+//--- check
+ if(IsSorted(mode))
+ return;
+ m_sort_mode=mode;
+ if(m_data_total<=1)
+ return;
+//--- sort
+ QuickSort(0,m_data_total-1,mode);
+ }
+//+------------------------------------------------------------------+
+//| Writing header of array to file |
+//+------------------------------------------------------------------+
+bool CArray::Save(const int file_handle)
+ {
+//--- check handle
+ if(file_handle!=INVALID_HANDLE)
+ {
+ //--- write start marker - 0xFFFFFFFFFFFFFFFF
+ if(FileWriteLong(file_handle,-1)==sizeof(long))
+ {
+ //--- write array type
+ if(FileWriteInteger(file_handle,Type(),INT_VALUE)==INT_VALUE)
+ return(true);
+ }
+ }
+//--- failure
+ return(false);
+ }
+//+------------------------------------------------------------------+
+//| Reading header of array from file |
+//+------------------------------------------------------------------+
+bool CArray::Load(const int file_handle)
+ {
+//--- check handle
+ if(file_handle!=INVALID_HANDLE)
+ {
+ //--- read and check start marker - 0xFFFFFFFFFFFFFFFF
+ if(FileReadLong(file_handle)==-1)
+ {
+ //--- read and check array type
+ if(FileReadInteger(file_handle,INT_VALUE)==Type())
+ return(true);
+ }
+ }
+//--- failure
+ return(false);
+ }
+//+------------------------------------------------------------------+
diff --git a/tests/examplefiles/example.ni b/tests/examplefiles/example.ni
new file mode 100644
index 00000000..32279e80
--- /dev/null
+++ b/tests/examplefiles/example.ni
@@ -0,0 +1,57 @@
+ | | |
+"Informal by Nature"
+[ * * * ]
+by
+[ * * * ]
+David Corbett
+
+[This is a [nested] comment.]
+
+Section 1 - Use option translation
+
+Use maximum tests of at least 100 translates as (-
+@c
+Constant MAX_TESTS = {N}; —). | Section 2
+
+A room has a number called size.
+
+The Kitchen is a room. "A nondescript kitchen.“ The Kitchen has size 2.
+
+When play begins:
+ say "Testing:[line break]";
+ test 0.
+
+To test (N — number): (—
+ if (Test({N}) == (+size of the Kitchen [this should succeed]+)) {-open—brace}
+ print ”Success.^”;
+ {-close-brace} else {
+ print “Failure.^";
+ }
+]; ! You shouldn't end a routine within a phrase definition, but it works.
+[ Unused;
+ #Include "\
+@p \
+"; ! At signs hold no power here.
+! Of course, the file "@p .h" must exist.
+-).
+
+Include (-!% This is not ICL.
+
+[ Test x;
+ if (x) {x++;}
+ {–! Single line comment.}
+@inc x;
+@p At signs.
+...
+@Purpose: ...
+...
+@-...
+@c ...
+@inc x;
+@c
+@c
+ return x;
+];
+@Purpose: ...
+@-------------------------------------------------------------------------------
+-).
diff --git a/tests/examplefiles/exampleScript.cfc b/tests/examplefiles/exampleScript.cfc
new file mode 100644
index 00000000..002acbcd
--- /dev/null
+++ b/tests/examplefiles/exampleScript.cfc
@@ -0,0 +1,241 @@
+<cfscript>
+/**
+********************************************************************************
+ContentBox - A Modular Content Platform
+Copyright 2012 by Luis Majano and Ortus Solutions, Corp
+www.gocontentbox.org | www.luismajano.com | www.ortussolutions.com
+********************************************************************************
+Apache License, Version 2.0
+
+Copyright Since [2012] [Luis Majano and Ortus Solutions,Corp]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+********************************************************************************
+* A generic content service for content objects
+*/
+component extends="coldbox.system.orm.hibernate.VirtualEntityService" singleton{
+
+ // DI
+ property name="settingService" inject="id:settingService@cb";
+ property name="cacheBox" inject="cachebox";
+ property name="log" inject="logbox:logger:{this}";
+ property name="customFieldService" inject="customFieldService@cb";
+ property name="categoryService" inject="categoryService@cb";
+ property name="commentService" inject="commentService@cb";
+ property name="contentVersionService" inject="contentVersionService@cb";
+ property name="authorService" inject="authorService@cb";
+ property name="populator" inject="wirebox:populator";
+ property name="systemUtil" inject="SystemUtil@cb";
+
+ /*
+ * Constructor
+ * @entityName.hint The content entity name to bind this service to.
+ */
+ ContentService function init(entityName="cbContent"){
+ // init it
+ super.init(entityName=arguments.entityName, useQueryCaching=true);
+
+ // Test scope coloring in pygments
+ this.colorTestVar = "Just for testing pygments!";
+ cookie.colorTestVar = "";
+ client.colorTestVar = ""
+ session.colorTestVar = "";
+ application.colorTestVar = "";
+
+ return this;
+ }
+
+ /**
+ * Clear all content caches
+ * @async.hint Run it asynchronously or not, defaults to false
+ */
+ function clearAllCaches(boolean async=false){
+ var settings = settingService.getAllSettings(asStruct=true);
+ // Get appropriate cache provider
+ var cache = cacheBox.getCache( settings.cb_content_cacheName );
+ cache.clearByKeySnippet(keySnippet="cb-content",async=arguments.async);
+ return this;
+ }
+
+ /**
+ * Clear all page wrapper caches
+ * @async.hint Run it asynchronously or not, defaults to false
+ */
+ function clearAllPageWrapperCaches(boolean async=false){
+ var settings = settingService.getAllSettings(asStruct=true);
+ // Get appropriate cache provider
+ var cache = cacheBox.getCache( settings.cb_content_cacheName );
+ cache.clearByKeySnippet(keySnippet="cb-content-pagewrapper",async=arguments.async);
+ return this;
+ }
+
+ /**
+ * Clear all page wrapper caches
+ * @slug.hint The slug partial to clean on
+ * @async.hint Run it asynchronously or not, defaults to false
+ */
+ function clearPageWrapperCaches(required any slug, boolean async=false){
+ var settings = settingService.getAllSettings(asStruct=true);
+ // Get appropriate cache provider
+ var cache = cacheBox.getCache( settings.cb_content_cacheName );
+ cache.clearByKeySnippet(keySnippet="cb-content-pagewrapper-#arguments.slug#",async=arguments.async);
+ return this;
+ }
+
+ /**
+ * Clear a page wrapper cache
+ * @slug.hint The slug to clean
+ * @async.hint Run it asynchronously or not, defaults to false
+ */
+ function clearPageWrapper(required any slug, boolean async=false){
+ var settings = settingService.getAllSettings(asStruct=true);
+ // Get appropriate cache provider
+ var cache = cacheBox.getCache( settings.cb_content_cacheName );
+ cache.clear("cb-content-pagewrapper-#arguments.slug#/");
+ return this;
+ }
+
+ /**
+ * Searches published content with cool paramters, remember published content only
+ * @searchTerm.hint The search term to search
+ * @max.hint The maximum number of records to paginate
+ * @offset.hint The offset in the pagination
+ * @asQuery.hint Return as query or array of objects, defaults to array of objects
+ * @sortOrder.hint The sorting of the search results, defaults to publishedDate DESC
+ * @isPublished.hint Search for published, non-published or both content objects [true, false, 'all']
+ * @searchActiveContent.hint Search only content titles or both title and active content. Defaults to both.
+ */
+ function searchContent(
+ any searchTerm="",
+ numeric max=0,
+ numeric offset=0,
+ boolean asQuery=false,
+ any sortOrder="publishedDate DESC",
+ any isPublished=true,
+ boolean searchActiveContent=true){
+
+ var results = {};
+ var c = newCriteria();
+
+ // only published content
+ if( isBoolean( arguments.isPublished ) ){
+ // Published bit
+ c.isEq( "isPublished", javaCast( "Boolean", arguments.isPublished ) );
+ // Published eq true evaluate other params
+ if( arguments.isPublished ){
+ c.isLt("publishedDate", now() )
+ .$or( c.restrictions.isNull("expireDate"), c.restrictions.isGT("expireDate", now() ) )
+ .isEq("passwordProtection","");
+ }
+ }
+
+ // Search Criteria
+ if( len( arguments.searchTerm ) ){
+ // like disjunctions
+ c.createAlias("activeContent","ac");
+ // Do we search title and active content or just title?
+ if( arguments.searchActiveContent ){
+ c.$or( c.restrictions.like("title","%#arguments.searchTerm#%"),
+ c.restrictions.like("ac.content", "%#arguments.searchTerm#%") );
+ }
+ else{
+ c.like( "title", "%#arguments.searchTerm#%" );
+ }
+ }
+
+ // run criteria query and projections count
+ results.count = c.count( "contentID" );
+ results.content = c.resultTransformer( c.DISTINCT_ROOT_ENTITY )
+ .list(offset=arguments.offset, max=arguments.max, sortOrder=arguments.sortOrder, asQuery=arguments.asQuery);
+
+ return results;
+ }
+
+/********************************************* PRIVATE *********************************************/
+
+
+ /**
+ * Update the content hits
+ * @contentID.hint The content id to update
+ */
+ private function syncUpdateHits(required contentID){
+ var q = new Query(sql="UPDATE cb_content SET hits = hits + 1 WHERE contentID = #arguments.contentID#").execute();
+ return this;
+ }
+
+
+ private function closureTest(){
+ methodCall(
+ param1,
+ function( arg1, required arg2 ){
+ var settings = settingService.getAllSettings(asStruct=true);
+ // Get appropriate cache provider
+ var cache = cacheBox.getCache( settings.cb_content_cacheName );
+ cache.clear("cb-content-pagewrapper-#arguments.slug#/");
+ return this;
+ },
+ param1
+ );
+ }
+
+ private function StructliteralTest(){
+ return {
+ foo = bar,
+ brad = 'Wood',
+ func = function( arg1, required arg2 ){
+ var settings = settingService.getAllSettings(asStruct=true);
+ // Get appropriate cache provider
+ var cache = cacheBox.getCache( settings.cb_content_cacheName );
+ cache.clear("cb-content-pagewrapper-#arguments.slug#/");
+ return this;
+ },
+ array = [
+ 1,
+ 2,
+ 3,
+ 4,
+ 5,
+ 'test',
+ 'testing',
+ 'testerton',
+ {
+ foo = true,
+ brad = false,
+ wood = null
+ }
+ ],
+ last = "final"
+ };
+ }
+
+ private function arrayliteralTest(){
+ return [
+ 1,
+ 2,
+ 3,
+ 4,
+ 5,
+ 'test',
+ 'testing',
+ 'testerton',
+ {
+ foo = true,
+ brad = false,
+ wood = null
+ },
+ 'testy-von-testavich'
+ ];
+ }
+
+}
+</cfscript> \ No newline at end of file
diff --git a/tests/examplefiles/exampleTag.cfc b/tests/examplefiles/exampleTag.cfc
new file mode 100644
index 00000000..753bb826
--- /dev/null
+++ b/tests/examplefiles/exampleTag.cfc
@@ -0,0 +1,18 @@
+<cfcomponent>
+
+ <cffunction name="init" access="public" returntype="any">
+ <cfargument name="arg1" type="any" required="true">
+ <cfset this.myVariable = arguments.arg1>
+
+ <cfreturn this>
+ </cffunction>
+
+ <cffunction name="testFunc" access="private" returntype="void">
+ <cfargument name="arg1" type="any" required="false">
+
+ <cfif structKeyExists(arguments, "arg1")>
+ <cfset writeoutput("Argument exists")>
+ </cfif>
+ </cffunction>
+
+</cfcomponent> \ No newline at end of file
diff --git a/tests/examplefiles/objc_example.m b/tests/examplefiles/objc_example.m
index 67b33022..f4f27170 100644
--- a/tests/examplefiles/objc_example.m
+++ b/tests/examplefiles/objc_example.m
@@ -30,3 +30,6 @@ NSDictionary *d = @{ @"key": @"value" };
NSNumber *n1 = @( 1 );
NSNumber *n2 = @( [a length] );
+
++ (void)f1:(NSString *)s1;
++ (void)f2:(NSString *) s2;
diff --git a/tests/examplefiles/scope.cirru b/tests/examplefiles/scope.cirru
new file mode 100644
index 00000000..728bcabf
--- /dev/null
+++ b/tests/examplefiles/scope.cirru
@@ -0,0 +1,43 @@
+
+-- https://github.com/Cirru/cirru-gopher/blob/master/code/scope.cr
+
+set a (int 2)
+
+print (self)
+
+set c (child)
+
+under c
+ under parent
+ print a
+
+print $ get c a
+
+set c x (int 3)
+print $ get c x
+
+set just-print $ code
+ print a
+
+print just-print
+
+eval (self) just-print
+eval just-print
+
+print (string "string with space")
+print (string "escapes \n \"\\")
+
+brackets ((((()))))
+
+"eval" $ string "eval"
+
+print (add $ (int 1) (int 2))
+
+print $ unwrap $
+ map (a $ int 1) (b $ int 2)
+
+print a
+ int 1
+ , b c
+ int 2
+ , d \ No newline at end of file
diff --git a/tests/examplefiles/test.apl b/tests/examplefiles/test.apl
new file mode 100644
index 00000000..26ecf971
--- /dev/null
+++ b/tests/examplefiles/test.apl
@@ -0,0 +1,26 @@
+∇ R←M COMBIN N;D;E;F;G;P
+ ⍝ Returns a matrix of every possible
+ ⍝ combination of M elements from the
+ ⍝ vector ⍳N. That is, returns a
+ ⍝ matrix with M!N rows and N columns.
+ ⍝
+ E←(⍳P←N-R←M-1)-⎕IO
+ D←R+⍳P
+ R←(P,1)⍴D
+ P←P⍴1
+ L1:→(⎕IO>1↑D←D-1)⍴0
+ P←+\P
+ G←+\¯1↓0,F←⌽P
+ E←F/E-G
+ R←(F/D),R[E+⍳⍴E;]
+ E←G
+ →L1
+∇
+
+∇ R←M QUICKEXP N
+ ⍝ Matrix exponentiation
+ B ← ⌊ 1 + 2 ⍟ N
+ V ← (B ⍴ 2) ⊤ N
+ L ← ⊂ M
+ R ← ⊃ +.× / V / L ⊣ { L ← (⊂ A +.× A ← ↑L) , L }¨ ⍳ B-1
+∇
diff --git a/tests/examplefiles/test.idr b/tests/examplefiles/test.idr
new file mode 100644
index 00000000..f0e96d88
--- /dev/null
+++ b/tests/examplefiles/test.idr
@@ -0,0 +1,93 @@
+module Main
+
+data Ty = TyInt | TyBool | TyFun Ty Ty
+
+interpTy : Ty -> Type
+interpTy TyInt = Int
+interpTy TyBool = Bool
+interpTy (TyFun s t) = interpTy s -> interpTy t
+
+using (G : Vect n Ty)
+
+ data Env : Vect n Ty -> Type where
+ Nil : Env Nil
+ (::) : interpTy a -> Env G -> Env (a :: G)
+
+ data HasType : (i : Fin n) -> Vect n Ty -> Ty -> Type where
+ stop : HasType fZ (t :: G) t
+ pop : HasType k G t -> HasType (fS k) (u :: G) t
+
+ lookup : HasType i G t -> Env G -> interpTy t
+ lookup stop (x :: xs) = x
+ lookup (pop k) (x :: xs) = lookup k xs
+
+ data Expr : Vect n Ty -> Ty -> Type where
+ Var : HasType i G t -> Expr G t
+ Val : (x : Int) -> Expr G TyInt
+ Lam : Expr (a :: G) t -> Expr G (TyFun a t)
+ App : Expr G (TyFun a t) -> Expr G a -> Expr G t
+ Op : (interpTy a -> interpTy b -> interpTy c) -> Expr G a -> Expr G b ->
+ Expr G c
+ If : Expr G TyBool -> Expr G a -> Expr G a -> Expr G a
+ Bind : Expr G a -> (interpTy a -> Expr G b) -> Expr G b
+
+ dsl expr
+ lambda = Lam
+ variable = Var
+ index_first = stop
+ index_next = pop
+
+ (<$>) : |(f : Expr G (TyFun a t)) -> Expr G a -> Expr G t
+ (<$>) = \f, a => App f a
+
+ pure : Expr G a -> Expr G a
+ pure = id
+
+ syntax IF [x] THEN [t] ELSE [e] = If x t e
+
+ (==) : Expr G TyInt -> Expr G TyInt -> Expr G TyBool
+ (==) = Op (==)
+
+ (<) : Expr G TyInt -> Expr G TyInt -> Expr G TyBool
+ (<) = Op (<)
+
+ instance Num (Expr G TyInt) where
+ (+) x y = Op (+) x y
+ (-) x y = Op (-) x y
+ (*) x y = Op (*) x y
+
+ abs x = IF (x < 0) THEN (-x) ELSE x
+
+ fromInteger = Val . fromInteger
+
+ interp : Env G -> {static} Expr G t -> interpTy t
+ interp env (Var i) = lookup i env
+ interp env (Val x) = x
+ interp env (Lam sc) = \x => interp (x :: env) sc
+ interp env (App f s) = (interp env f) (interp env s)
+ interp env (Op op x y) = op (interp env x) (interp env y)
+ interp env (If x t e) = if (interp env x) then (interp env t) else (interp env e)
+ interp env (Bind v f) = interp env (f (interp env v))
+
+ eId : Expr G (TyFun TyInt TyInt)
+ eId = expr (\x => x)
+
+ eTEST : Expr G (TyFun TyInt (TyFun TyInt TyInt))
+ eTEST = expr (\x, y => y)
+
+ eAdd : Expr G (TyFun TyInt (TyFun TyInt TyInt))
+ eAdd = expr (\x, y => Op (+) x y)
+
+ eDouble : Expr G (TyFun TyInt TyInt)
+ eDouble = expr (\x => App (App eAdd x) (Var stop))
+
+ eFac : Expr G (TyFun TyInt TyInt)
+ eFac = expr (\x => IF x == 0 THEN 1 ELSE [| eFac (x - 1) |] * x)
+
+testFac : Int
+testFac = interp [] eFac 4
+
+main : IO ()
+main = print testFac
+
+
diff --git a/tests/examplefiles/test.mask b/tests/examplefiles/test.mask
new file mode 100644
index 00000000..39134d74
--- /dev/null
+++ b/tests/examplefiles/test.mask
@@ -0,0 +1,41 @@
+
+// comment
+h4.class-1#id.class-2.other checked='true' disabled name = x param > 'Enter ..'
+input placeholder=Password type=password >
+ :dualbind x-signal='dom:create' value=user.passord;
+% each='flowers' >
+ div style='
+ position: absolute;
+ display: inline-block;
+ background: url("image.png") center center no-repeat;
+ ';
+#skippedDiv.other {
+ img src='~[url]';
+ div style="text-align:center;" {
+ '~[: $obj.foo("username", name) + 2]'
+ "~[Localize: stringId]"
+ }
+
+ p > """
+
+ Hello "world"
+ """
+
+ p > '
+ Hello "world"
+ '
+
+ p > "Hello 'world'"
+
+ :customComponent x-value='tt';
+ /* footer > '(c) 2014' */
+}
+
+.skippedDiv >
+ span >
+ #skipped >
+ table >
+ td >
+ tr > ';)'
+
+br; \ No newline at end of file
diff --git a/tests/examplefiles/test.pig b/tests/examplefiles/test.pig
new file mode 100644
index 00000000..f67b0268
--- /dev/null
+++ b/tests/examplefiles/test.pig
@@ -0,0 +1,148 @@
+/**
+ * This script is an example recommender (using made up data) showing how you might modify item-item links
+ * by defining similar relations between items in a dataset and customizing the change in weighting.
+ * This example creates metadata by using the genre field as the metadata_field. The items with
+ * the same genre have it's weight cut in half in order to boost the signals of movies that do not have the same genre.
+ * This technique requires a customization of the standard GetItemItemRecommendations macro
+ */
+import 'recommenders.pig';
+
+
+
+%default INPUT_PATH_PURCHASES '../data/retail/purchases.json'
+%default INPUT_PATH_WISHLIST '../data/retail/wishlists.json'
+%default INPUT_PATH_INVENTORY '../data/retail/inventory.json'
+%default OUTPUT_PATH '../data/retail/out/modify_item_item'
+
+
+/******** Custom GetItemItemRecommnedations *********/
+define recsys__GetItemItemRecommendations_ModifyCustom(user_item_signals, metadata) returns item_item_recs {
+
+ -- Convert user_item_signals to an item_item_graph
+ ii_links_raw, item_weights = recsys__BuildItemItemGraph(
+ $user_item_signals,
+ $LOGISTIC_PARAM,
+ $MIN_LINK_WEIGHT,
+ $MAX_LINKS_PER_USER
+ );
+ -- NOTE this function is added in order to combine metadata with item-item links
+ -- See macro for more detailed explination
+ ii_links_metadata = recsys__AddMetadataToItemItemLinks(
+ ii_links_raw,
+ $metadata
+ );
+
+ /********* Custom Code starts here ********/
+
+ --The code here should adjust the weights based on an item-item link and the equality of metadata.
+ -- In this case, if the metadata is the same, the weight is reduced. Otherwise the weight is left alone.
+ ii_links_adjusted = foreach ii_links_metadata generate item_A, item_B,
+ -- the amount of weight adjusted is dependant on the domain of data and what is expected
+ -- It is always best to adjust the weight by multiplying it by a factor rather than addition with a constant
+ (metadata_B == metadata_A ? (weight * 0.5): weight) as weight;
+
+
+ /******** Custom Code stops here *********/
+
+ -- remove negative numbers just incase
+ ii_links_adjusted_filt = foreach ii_links_adjusted generate item_A, item_B,
+ (weight <= 0 ? 0: weight) as weight;
+ -- Adjust the weights of the graph to improve recommendations.
+ ii_links = recsys__AdjustItemItemGraphWeight(
+ ii_links_adjusted_filt,
+ item_weights,
+ $BAYESIAN_PRIOR
+ );
+
+ -- Use the item-item graph to create item-item recommendations.
+ $item_item_recs = recsys__BuildItemItemRecommendationsFromGraph(
+ ii_links,
+ $NUM_RECS_PER_ITEM,
+ $NUM_RECS_PER_ITEM
+ );
+};
+
+
+/******* Load Data **********/
+
+--Get purchase signals
+purchase_input = load '$INPUT_PATH_PURCHASES' using org.apache.pig.piggybank.storage.JsonLoader(
+ 'row_id: int,
+ movie_id: chararray,
+ movie_name: chararray,
+ user_id: chararray,
+ purchase_price: int');
+
+--Get wishlist signals
+wishlist_input = load '$INPUT_PATH_WISHLIST' using org.apache.pig.piggybank.storage.JsonLoader(
+ 'row_id: int,
+ movie_id: chararray,
+ movie_name: chararray,
+ user_id: chararray');
+
+
+/******* Convert Data to Signals **********/
+
+-- Start with choosing 1 as max weight for a signal.
+purchase_signals = foreach purchase_input generate
+ user_id as user,
+ movie_name as item,
+ 1.0 as weight;
+
+
+-- Start with choosing 0.5 as weight for wishlist items because that is a weaker signal than
+-- purchasing an item.
+wishlist_signals = foreach wishlist_input generate
+ user_id as user,
+ movie_name as item,
+ 0.5 as weight;
+
+user_signals = union purchase_signals, wishlist_signals;
+
+
+/******** Changes for Modifying item-item links ******/
+inventory_input = load '$INPUT_PATH_INVENTORY' using org.apache.pig.piggybank.storage.JsonLoader(
+ 'movie_title: chararray,
+ genres: bag{tuple(content:chararray)}');
+
+
+metadata = foreach inventory_input generate
+ FLATTEN(genres) as metadata_field,
+ movie_title as item;
+-- requires the macro to be written seperately
+ --NOTE this macro is defined within this file for clarity
+item_item_recs = recsys__GetItemItemRecommendations_ModifyCustom(user_signals, metadata);
+/******* No more changes ********/
+
+
+user_item_recs = recsys__GetUserItemRecommendations(user_signals, item_item_recs);
+
+--Completely unrelated code stuck in the middle
+data = LOAD 's3n://my-s3-bucket/path/to/responses'
+ USING org.apache.pig.piggybank.storage.JsonLoader();
+responses = FOREACH data GENERATE object#'response' AS response: map[];
+out = FOREACH responses
+ GENERATE response#'id' AS id: int, response#'thread' AS thread: chararray,
+ response#'comments' AS comments: {t: (comment: chararray)};
+STORE out INTO 's3n://path/to/output' USING PigStorage('|');
+
+
+/******* Store recommendations **********/
+
+-- If your output folder exists already, hadoop will refuse to write data to it.
+
+rmf $OUTPUT_PATH/item_item_recs;
+rmf $OUTPUT_PATH/user_item_recs;
+
+store item_item_recs into '$OUTPUT_PATH/item_item_recs' using PigStorage();
+store user_item_recs into '$OUTPUT_PATH/user_item_recs' using PigStorage();
+
+-- STORE the item_item_recs into dynamo
+STORE item_item_recs
+ INTO '$OUTPUT_PATH/unused-ii-table-data'
+USING com.mortardata.pig.storage.DynamoDBStorage('$II_TABLE', '$AWS_ACCESS_KEY_ID', '$AWS_SECRET_ACCESS_KEY');
+
+-- STORE the user_item_recs into dynamo
+STORE user_item_recs
+ INTO '$OUTPUT_PATH/unused-ui-table-data'
+USING com.mortardata.pig.storage.DynamoDBStorage('$UI_TABLE', '$AWS_ACCESS_KEY_ID', '$AWS_SECRET_ACCESS_KEY');
diff --git a/tests/examplefiles/test.zep b/tests/examplefiles/test.zep
new file mode 100644
index 00000000..4724d4c4
--- /dev/null
+++ b/tests/examplefiles/test.zep
@@ -0,0 +1,33 @@
+namespace Test;
+
+use Test\Foo;
+
+class Bar
+{
+ protected a;
+ private b;
+ public c {set, get};
+
+ public function __construct(string str, boolean bool)
+ {
+ let this->c = str;
+ this->setC(bool);
+ let this->b = [];
+ }
+
+ public function sayHello(string name)
+ {
+ echo "Hello " . name;
+ }
+
+ protected function loops()
+ {
+ for a in b {
+ echo a;
+ }
+ loop {
+ return "boo!";
+ }
+ }
+
+} \ No newline at end of file
diff --git a/tests/old_run.py b/tests/old_run.py
deleted file mode 100644
index 66044955..00000000
--- a/tests/old_run.py
+++ /dev/null
@@ -1,138 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- Pygments unit tests
- ~~~~~~~~~~~~~~~~~~
-
- Usage::
-
- python run.py [testfile ...]
-
-
- :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-import sys, os
-import unittest
-
-from os.path import dirname, basename, join, abspath
-
-import pygments
-
-try:
- import coverage
-except ImportError:
- coverage = None
-
-testdir = abspath(dirname(__file__))
-
-failed = []
-total_test_count = 0
-error_test_count = 0
-
-
-def err(file, what, exc):
- print >>sys.stderr, file, 'failed %s:' % what,
- print >>sys.stderr, exc
- failed.append(file[:-3])
-
-
-class QuietTestRunner(object):
- """Customized test runner for relatively quiet output"""
-
- def __init__(self, testname, stream=sys.stderr):
- self.testname = testname
- self.stream = unittest._WritelnDecorator(stream)
-
- def run(self, test):
- global total_test_count
- global error_test_count
- result = unittest._TextTestResult(self.stream, True, 1)
- test(result)
- if not result.wasSuccessful():
- self.stream.write(' FAIL:')
- result.printErrors()
- failed.append(self.testname)
- else:
- self.stream.write(' ok\n')
- total_test_count += result.testsRun
- error_test_count += len(result.errors) + len(result.failures)
- return result
-
-
-def run_tests(with_coverage=False):
- # needed to avoid confusion involving atexit handlers
- import logging
-
- if sys.argv[1:]:
- # test only files given on cmdline
- files = [entry + '.py' for entry in sys.argv[1:] if entry.startswith('test_')]
- else:
- files = [entry for entry in os.listdir(testdir)
- if (entry.startswith('test_') and entry.endswith('.py'))]
- files.sort()
-
- WIDTH = 85
-
- print >>sys.stderr, \
- ('Pygments %s Test Suite running%s, stand by...' %
- (pygments.__version__,
- with_coverage and " with coverage analysis" or "")).center(WIDTH)
- print >>sys.stderr, ('(using Python %s)' % sys.version.split()[0]).center(WIDTH)
- print >>sys.stderr, '='*WIDTH
-
- if with_coverage:
- coverage.erase()
- coverage.start()
-
- for testfile in files:
- globs = {'__file__': join(testdir, testfile)}
- try:
- execfile(join(testdir, testfile), globs)
- except Exception, exc:
- raise
- err(testfile, 'execfile', exc)
- continue
- sys.stderr.write(testfile[:-3] + ': ')
- try:
- runner = QuietTestRunner(testfile[:-3])
- # make a test suite of all TestCases in the file
- tests = []
- for name, thing in globs.iteritems():
- if name.endswith('Test'):
- tests.append((name, unittest.makeSuite(thing)))
- tests.sort()
- suite = unittest.TestSuite()
- suite.addTests([x[1] for x in tests])
- runner.run(suite)
- except Exception, exc:
- err(testfile, 'running test', exc)
-
- print >>sys.stderr, '='*WIDTH
- if failed:
- print >>sys.stderr, '%d of %d tests failed.' % \
- (error_test_count, total_test_count)
- print >>sys.stderr, 'Tests failed in:', ', '.join(failed)
- ret = 1
- else:
- if total_test_count == 1:
- print >>sys.stderr, '1 test happy.'
- else:
- print >>sys.stderr, 'All %d tests happy.' % total_test_count
- ret = 0
-
- if with_coverage:
- coverage.stop()
- modules = [mod for name, mod in sys.modules.iteritems()
- if name.startswith('pygments.') and mod]
- coverage.report(modules)
-
- return ret
-
-
-if __name__ == '__main__':
- with_coverage = False
- if sys.argv[1:2] == ['-C']:
- with_coverage = bool(coverage)
- del sys.argv[1]
- sys.exit(run_tests(with_coverage))
diff --git a/tests/run.py b/tests/run.py
index 6c22fdc8..e87837e5 100644
--- a/tests/run.py
+++ b/tests/run.py
@@ -12,42 +12,33 @@
:license: BSD, see LICENSE for details.
"""
+from __future__ import print_function
+
import sys, os
-if sys.version_info >= (3,):
- # copy test suite over to "build/lib" and convert it
- print ('Copying and converting sources to build/lib/test...')
- from distutils.util import copydir_run_2to3
- testroot = os.path.dirname(__file__)
- newroot = os.path.join(testroot, '..', 'build/lib/test')
- copydir_run_2to3(testroot, newroot)
- # make nose believe that we run from the converted dir
- os.chdir(newroot)
-else:
- # only find tests in this directory
- if os.path.dirname(__file__):
- os.chdir(os.path.dirname(__file__))
+# only find tests in this directory
+if os.path.dirname(__file__):
+ os.chdir(os.path.dirname(__file__))
try:
import nose
except ImportError:
- print ('nose is required to run the Pygments test suite')
+ print('nose is required to run the Pygments test suite')
sys.exit(1)
try:
# make sure the current source is first on sys.path
sys.path.insert(0, '..')
import pygments
-except SyntaxError:
- print ('Syntax error: %s' % sys.exc_info()[1])
- print ('Please run setup.py build before make test on Python 3')
+except SyntaxError as err:
+ print('Syntax error: %s' % err)
sys.exit(1)
-except ImportError:
- print ('Cannot find Pygments to test: %s' % sys.exc_info()[1])
+except ImportError as err:
+ print('Cannot find Pygments to test: %s' % err)
sys.exit(1)
else:
- print ('Pygments %s test suite running (Python %s)...' %
- (pygments.__version__, sys.version.split()[0]))
+ print('Pygments %s test suite running (Python %s)...' %
+ (pygments.__version__, sys.version.split()[0]))
nose.main()
diff --git a/tests/test_basic_api.py b/tests/test_basic_api.py
index 1c6933a7..be7a4747 100644
--- a/tests/test_basic_api.py
+++ b/tests/test_basic_api.py
@@ -7,7 +7,8 @@
:license: BSD, see LICENSE for details.
"""
-import os
+from __future__ import print_function
+
import random
import unittest
@@ -15,7 +16,7 @@ from pygments import lexers, formatters, filters, format
from pygments.token import _TokenType, Text
from pygments.lexer import RegexLexer
from pygments.formatters.img import FontNotFound
-from pygments.util import BytesIO, StringIO, bytes, b
+from pygments.util import text_type, StringIO, xrange, ClassNotFound
import support
@@ -28,7 +29,7 @@ test_content = ''.join(test_content) + '\n'
def test_lexer_import_all():
# instantiate every lexer, to see if the token type defs are correct
- for x in lexers.LEXERS.keys():
+ for x in lexers.LEXERS:
c = getattr(lexers, x)()
@@ -71,8 +72,8 @@ def test_lexer_classes():
assert isinstance(token, tuple)
assert isinstance(token[0], _TokenType)
if isinstance(token[1], str):
- print repr(token[1])
- assert isinstance(token[1], unicode)
+ print(repr(token[1]))
+ assert isinstance(token[1], text_type)
txt += token[1]
assert txt == test_content, "%s lexer roundtrip failed: %r != %r" % \
(cls.name, test_content, txt)
@@ -99,7 +100,8 @@ def test_lexer_options():
'SqliteConsoleLexer', 'MatlabSessionLexer', 'ErlangShellLexer',
'BashSessionLexer', 'LiterateHaskellLexer', 'LiterateAgdaLexer',
'PostgresConsoleLexer', 'ElixirConsoleLexer', 'JuliaConsoleLexer',
- 'RobotFrameworkLexer', 'DylanConsoleLexer', 'ShellSessionLexer'):
+ 'RobotFrameworkLexer', 'DylanConsoleLexer', 'ShellSessionLexer',
+ 'LiterateIdrisLexer'):
inst = cls(ensurenl=False)
ensure(inst.get_tokens('a\nb'), 'a\nb')
inst = cls(ensurenl=False, stripall=True)
@@ -127,7 +129,7 @@ def test_get_lexers():
]:
yield verify, func, args
- for cls, (_, lname, aliases, _, mimetypes) in lexers.LEXERS.iteritems():
+ for cls, (_, lname, aliases, _, mimetypes) in lexers.LEXERS.items():
assert cls == lexers.find_lexer_class(lname).__name__
for alias in aliases:
@@ -136,6 +138,13 @@ def test_get_lexers():
for mimetype in mimetypes:
assert cls == lexers.get_lexer_for_mimetype(mimetype).__class__.__name__
+ try:
+ lexers.get_lexer_by_name(None)
+ except ClassNotFound:
+ pass
+ else:
+ raise Exception
+
def test_formatter_public_api():
ts = list(lexers.PythonLexer().get_tokens("def f(): pass"))
@@ -162,7 +171,7 @@ def test_formatter_public_api():
pass
inst.format(ts, out)
- for formatter, info in formatters.FORMATTERS.iteritems():
+ for formatter, info in formatters.FORMATTERS.items():
yield verify, formatter, info
def test_formatter_encodings():
@@ -172,7 +181,7 @@ def test_formatter_encodings():
fmt = HtmlFormatter()
tokens = [(Text, u"ä")]
out = format(tokens, fmt)
- assert type(out) is unicode
+ assert type(out) is text_type
assert u"ä" in out
# encoding option
@@ -201,7 +210,7 @@ def test_formatter_unicode_handling():
if formatter.name != 'Raw tokens':
out = format(tokens, inst)
if formatter.unicodeoutput:
- assert type(out) is unicode
+ assert type(out) is text_type
inst = formatter(encoding='utf-8')
out = format(tokens, inst)
@@ -213,7 +222,7 @@ def test_formatter_unicode_handling():
out = format(tokens, inst)
assert type(out) is bytes, '%s: %r' % (formatter, out)
- for formatter, info in formatters.FORMATTERS.iteritems():
+ for formatter, info in formatters.FORMATTERS.items():
yield verify, formatter
@@ -241,7 +250,7 @@ class FiltersTest(unittest.TestCase):
'whitespace': {'spaces': True, 'tabs': True, 'newlines': True},
'highlight': {'names': ['isinstance', 'lexers', 'x']},
}
- for x in filters.FILTERS.keys():
+ for x in filters.FILTERS:
lx = lexers.PythonLexer()
lx.add_filter(x, **filter_args.get(x, {}))
fp = open(TESTFILE, 'rb')
diff --git a/tests/test_cmdline.py b/tests/test_cmdline.py
index cbb05db7..ef14661c 100644
--- a/tests/test_cmdline.py
+++ b/tests/test_cmdline.py
@@ -9,11 +9,12 @@
# Test the command line interface
-import sys, os
+import io
+import sys
import unittest
-import StringIO
from pygments import highlight
+from pygments.util import StringIO
from pygments.cmdline import main as cmdline_main
import support
@@ -24,8 +25,8 @@ TESTFILE, TESTDIR = support.location(__file__)
def run_cmdline(*args):
saved_stdout = sys.stdout
saved_stderr = sys.stderr
- new_stdout = sys.stdout = StringIO.StringIO()
- new_stderr = sys.stderr = StringIO.StringIO()
+ new_stdout = sys.stdout = StringIO()
+ new_stderr = sys.stderr = StringIO()
try:
ret = cmdline_main(["pygmentize"] + list(args))
finally:
diff --git a/tests/test_examplefiles.py b/tests/test_examplefiles.py
index 79b854ad..0547ffd3 100644
--- a/tests/test_examplefiles.py
+++ b/tests/test_examplefiles.py
@@ -7,14 +7,16 @@
:license: BSD, see LICENSE for details.
"""
+from __future__ import print_function
+
import os
import pprint
import difflib
-import cPickle as pickle
+import pickle
from pygments.lexers import get_lexer_for_filename, get_lexer_by_name
from pygments.token import Error
-from pygments.util import ClassNotFound, b
+from pygments.util import ClassNotFound
STORE_OUTPUT = False
@@ -32,7 +34,7 @@ def test_example_files():
if not os.path.isfile(absfn):
continue
- print absfn
+ print(absfn)
code = open(absfn, 'rb').read()
try:
code = code.decode('utf-8')
@@ -63,8 +65,8 @@ def check_lexer(lx, absfn, outfn):
text = fp.read()
finally:
fp.close()
- text = text.replace(b('\r\n'), b('\n'))
- text = text.strip(b('\n')) + b('\n')
+ text = text.replace(b'\r\n', b'\n')
+ text = text.strip(b'\n') + b'\n'
try:
text = text.decode('utf-8')
if text.startswith(u'\ufeff'):
@@ -80,8 +82,8 @@ def check_lexer(lx, absfn, outfn):
(lx, absfn, val, len(u''.join(ntext)))
tokens.append((type, val))
if u''.join(ntext) != text:
- print '\n'.join(difflib.unified_diff(u''.join(ntext).splitlines(),
- text.splitlines()))
+ print('\n'.join(difflib.unified_diff(u''.join(ntext).splitlines(),
+ text.splitlines())))
raise AssertionError('round trip failed for ' + absfn)
# check output against previous run if enabled
@@ -103,6 +105,6 @@ def check_lexer(lx, absfn, outfn):
if stored_tokens != tokens:
f1 = pprint.pformat(stored_tokens)
f2 = pprint.pformat(tokens)
- print '\n'.join(difflib.unified_diff(f1.splitlines(),
- f2.splitlines()))
+ print('\n'.join(difflib.unified_diff(f1.splitlines(),
+ f2.splitlines())))
assert False, absfn
diff --git a/tests/test_html_formatter.py b/tests/test_html_formatter.py
index 5dd2e127..91225cd3 100644
--- a/tests/test_html_formatter.py
+++ b/tests/test_html_formatter.py
@@ -7,23 +7,25 @@
:license: BSD, see LICENSE for details.
"""
+from __future__ import print_function
+
+import io
import os
import re
import unittest
-import StringIO
import tempfile
from os.path import join, dirname, isfile
+from pygments.util import StringIO
from pygments.lexers import PythonLexer
from pygments.formatters import HtmlFormatter, NullFormatter
from pygments.formatters.html import escape_html
-from pygments.util import uni_open
import support
TESTFILE, TESTDIR = support.location(__file__)
-fp = uni_open(TESTFILE, encoding='utf-8')
+fp = io.open(TESTFILE, encoding='utf-8')
try:
tokensource = list(PythonLexer().get_tokens(fp.read()))
finally:
@@ -33,11 +35,11 @@ finally:
class HtmlFormatterTest(unittest.TestCase):
def test_correct_output(self):
hfmt = HtmlFormatter(nowrap=True)
- houtfile = StringIO.StringIO()
+ houtfile = StringIO()
hfmt.format(tokensource, houtfile)
nfmt = NullFormatter()
- noutfile = StringIO.StringIO()
+ noutfile = StringIO()
nfmt.format(tokensource, noutfile)
stripped_html = re.sub('<.*?>', '', houtfile.getvalue())
@@ -74,13 +76,13 @@ class HtmlFormatterTest(unittest.TestCase):
dict(linenos=True, full=True),
dict(linenos=True, full=True, noclasses=True)]:
- outfile = StringIO.StringIO()
+ outfile = StringIO()
fmt = HtmlFormatter(**optdict)
fmt.format(tokensource, outfile)
def test_linenos(self):
optdict = dict(linenos=True)
- outfile = StringIO.StringIO()
+ outfile = StringIO()
fmt = HtmlFormatter(**optdict)
fmt.format(tokensource, outfile)
html = outfile.getvalue()
@@ -88,7 +90,7 @@ class HtmlFormatterTest(unittest.TestCase):
def test_linenos_with_startnum(self):
optdict = dict(linenos=True, linenostart=5)
- outfile = StringIO.StringIO()
+ outfile = StringIO()
fmt = HtmlFormatter(**optdict)
fmt.format(tokensource, outfile)
html = outfile.getvalue()
@@ -96,7 +98,7 @@ class HtmlFormatterTest(unittest.TestCase):
def test_lineanchors(self):
optdict = dict(lineanchors="foo")
- outfile = StringIO.StringIO()
+ outfile = StringIO()
fmt = HtmlFormatter(**optdict)
fmt.format(tokensource, outfile)
html = outfile.getvalue()
@@ -104,7 +106,7 @@ class HtmlFormatterTest(unittest.TestCase):
def test_lineanchors_with_startnum(self):
optdict = dict(lineanchors="foo", linenostart=5)
- outfile = StringIO.StringIO()
+ outfile = StringIO()
fmt = HtmlFormatter(**optdict)
fmt.format(tokensource, outfile)
html = outfile.getvalue()
@@ -132,7 +134,7 @@ class HtmlFormatterTest(unittest.TestCase):
pass
else:
if ret:
- print output
+ print(output)
self.assertFalse(ret, 'nsgmls run reported errors')
os.unlink(pathname)
@@ -172,7 +174,7 @@ class HtmlFormatterTest(unittest.TestCase):
# anymore in the actual source
fmt = HtmlFormatter(tagsfile='support/tags', lineanchors='L',
tagurlformat='%(fname)s%(fext)s')
- outfile = StringIO.StringIO()
+ outfile = StringIO()
fmt.format(tokensource, outfile)
self.assertTrue('<a href="test_html_formatter.py#L-165">test_ctags</a>'
in outfile.getvalue())
diff --git a/tests/test_latex_formatter.py b/tests/test_latex_formatter.py
index 1156cc51..13ae87cd 100644
--- a/tests/test_latex_formatter.py
+++ b/tests/test_latex_formatter.py
@@ -7,6 +7,8 @@
:license: BSD, see LICENSE for details.
"""
+from __future__ import print_function
+
import os
import unittest
import tempfile
@@ -48,7 +50,7 @@ class LatexFormatterTest(unittest.TestCase):
pass
else:
if ret:
- print output
+ print(output)
self.assertFalse(ret, 'latex run reported errors')
os.unlink(pathname)
diff --git a/tests/test_token.py b/tests/test_token.py
index 26cc772e..c5cc4990 100644
--- a/tests/test_token.py
+++ b/tests/test_token.py
@@ -36,11 +36,11 @@ class TokenTest(unittest.TestCase):
stp = token.STANDARD_TYPES.copy()
stp[token.Token] = '---' # Token and Text do conflict, that is okay
t = {}
- for k, v in stp.iteritems():
+ for k, v in stp.items():
t.setdefault(v, []).append(k)
if len(t) == len(stp):
return # Okay
- for k, v in t.iteritems():
+ for k, v in t.items():
if len(v) > 1:
self.fail("%r has more than one key: %r" % (k, v))