summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTim Hatch <tim@timhatch.com>2014-04-14 13:03:29 -0400
committerTim Hatch <tim@timhatch.com>2014-04-14 13:03:29 -0400
commite0efb984487bb2e99bd2684689fee6ded477a478 (patch)
treebca99bafdaa9aa26b42a309157d973c253b49927
parentae6b1507776410421b97faf0054111e34526dcbc (diff)
parent68296591d500a2106c8fe2b7dccd144b58a2d6b7 (diff)
downloadpygments-e0efb984487bb2e99bd2684689fee6ded477a478.tar.gz
Merged in brodie/pygments (pull request #191)
Conflicts: pygments/lexers/jvm.py
-rw-r--r--.hgignore2
-rw-r--r--AUTHORS29
-rw-r--r--CHANGES89
-rw-r--r--LICENSE2
-rw-r--r--MANIFEST.in2
-rw-r--r--Makefile13
-rw-r--r--doc/Makefile153
-rw-r--r--doc/_static/favicon.icobin0 -> 16958 bytes
-rw-r--r--doc/_static/logo_new.pngbin0 -> 40944 bytes
-rw-r--r--doc/_static/logo_only.pngbin0 -> 16424 bytes
-rw-r--r--doc/_templates/docssidebar.html3
-rw-r--r--doc/_templates/indexsidebar.html25
-rw-r--r--doc/_themes/pygments14/layout.html98
-rw-r--r--doc/_themes/pygments14/static/bodybg.pngbin0 -> 51903 bytes
-rw-r--r--doc/_themes/pygments14/static/docbg.pngbin0 -> 61296 bytes
-rw-r--r--doc/_themes/pygments14/static/listitem.pngbin0 -> 207 bytes
-rw-r--r--doc/_themes/pygments14/static/logo.pngbin0 -> 26933 bytes
-rw-r--r--doc/_themes/pygments14/static/pocoo.pngbin0 -> 2154 bytes
-rw-r--r--doc/_themes/pygments14/static/pygments14.css_t401
-rw-r--r--doc/_themes/pygments14/theme.conf15
-rw-r--r--doc/conf.py249
-rw-r--r--doc/docs/api.rst316
-rw-r--r--doc/docs/authors.rst4
-rw-r--r--doc/docs/changelog.rst1
-rw-r--r--doc/docs/cmdline.rst (renamed from docs/src/cmdline.txt)20
-rw-r--r--doc/docs/filterdevelopment.rst (renamed from docs/src/filterdevelopment.txt)2
-rw-r--r--doc/docs/filters.rst (renamed from docs/src/filters.txt)9
-rw-r--r--doc/docs/formatterdevelopment.rst (renamed from docs/src/formatterdevelopment.txt)2
-rw-r--r--doc/docs/formatters.rst (renamed from docs/src/formatters.txt)12
-rw-r--r--doc/docs/index.rst66
-rw-r--r--doc/docs/integrate.rst (renamed from docs/src/integrate.txt)10
-rw-r--r--doc/docs/java.rst (renamed from docs/src/java.txt)0
-rw-r--r--doc/docs/lexerdevelopment.rst (renamed from docs/src/lexerdevelopment.txt)27
-rw-r--r--doc/docs/lexers.rst (renamed from docs/src/lexers.txt)12
-rw-r--r--doc/docs/moinmoin.rst (renamed from docs/src/moinmoin.txt)0
-rw-r--r--doc/docs/plugins.rst (renamed from docs/src/plugins.txt)0
-rw-r--r--doc/docs/quickstart.rst (renamed from docs/src/quickstart.txt)41
-rw-r--r--doc/docs/rstdirective.rst (renamed from docs/src/rstdirective.txt)0
-rw-r--r--doc/docs/styles.rst (renamed from docs/src/styles.txt)4
-rw-r--r--doc/docs/tokens.rst (renamed from docs/src/tokens.txt)14
-rw-r--r--doc/docs/unicode.rst (renamed from docs/src/unicode.txt)15
-rw-r--r--doc/download.rst41
-rw-r--r--doc/faq.rst143
-rw-r--r--doc/index.rst53
-rw-r--r--doc/languages.rst148
-rw-r--r--doc/make.bat190
-rw-r--r--doc/pygmentize.1 (renamed from docs/pygmentize.1)0
-rwxr-xr-xdocs/generate.py472
-rw-r--r--docs/src/api.txt270
-rw-r--r--docs/src/authors.txt5
-rw-r--r--docs/src/changelog.txt5
-rw-r--r--docs/src/index.txt69
-rw-r--r--docs/src/installation.txt71
-rwxr-xr-xexternal/autopygmentize9
-rw-r--r--external/markdown-processor.py30
-rw-r--r--external/moin-parser.py2
-rw-r--r--external/rst-directive-old.py77
-rw-r--r--external/rst-directive.py5
-rw-r--r--[-rwxr-xr-x]ez_setup.py574
-rwxr-xr-xpygmentize2
-rw-r--r--pygments/__init__.py8
-rw-r--r--pygments/cmdline.py149
-rw-r--r--pygments/console.py2
-rw-r--r--pygments/filter.py2
-rw-r--r--pygments/filters/__init__.py24
-rw-r--r--pygments/formatter.py9
-rw-r--r--pygments/formatters/__init__.py8
-rwxr-xr-xpygments/formatters/_mapping.py10
-rw-r--r--pygments/formatters/bbcode.py2
-rw-r--r--pygments/formatters/html.py69
-rw-r--r--pygments/formatters/img.py35
-rw-r--r--pygments/formatters/latex.py114
-rw-r--r--pygments/formatters/other.py11
-rw-r--r--pygments/formatters/rtf.py4
-rw-r--r--pygments/formatters/svg.py4
-rw-r--r--pygments/formatters/terminal.py2
-rw-r--r--pygments/formatters/terminal256.py4
-rw-r--r--pygments/lexer.py52
-rw-r--r--pygments/lexers/__init__.py64
-rw-r--r--pygments/lexers/_asybuiltins.py2
-rw-r--r--pygments/lexers/_clbuiltins.py30
-rw-r--r--pygments/lexers/_cocoabuiltins.py73
-rw-r--r--pygments/lexers/_lassobuiltins.py12
-rw-r--r--pygments/lexers/_luabuiltins.py22
-rw-r--r--pygments/lexers/_mapping.py109
-rw-r--r--pygments/lexers/_openedgebuiltins.py2
-rw-r--r--pygments/lexers/_phpbuiltins.py16
-rw-r--r--pygments/lexers/_postgres_builtins.py14
-rw-r--r--pygments/lexers/_robotframeworklexer.py9
-rw-r--r--pygments/lexers/_scilab_builtins.py2
-rw-r--r--pygments/lexers/_sourcemodbuiltins.py19
-rw-r--r--pygments/lexers/_stan_builtins.py123
-rw-r--r--pygments/lexers/_vimbuiltins.py2
-rw-r--r--pygments/lexers/agile.py607
-rw-r--r--pygments/lexers/asm.py136
-rw-r--r--pygments/lexers/compiled.py1672
-rw-r--r--pygments/lexers/dalvik.py4
-rw-r--r--pygments/lexers/dotnet.py155
-rw-r--r--pygments/lexers/foxpro.py6
-rw-r--r--pygments/lexers/functional.py540
-rw-r--r--pygments/lexers/hdl.py14
-rw-r--r--pygments/lexers/jvm.py313
-rw-r--r--pygments/lexers/math.py443
-rw-r--r--pygments/lexers/other.py385
-rw-r--r--pygments/lexers/parsers.py44
-rw-r--r--pygments/lexers/shell.py72
-rw-r--r--pygments/lexers/special.py15
-rw-r--r--pygments/lexers/sql.py28
-rw-r--r--pygments/lexers/templates.py92
-rw-r--r--pygments/lexers/text.py162
-rw-r--r--pygments/lexers/web.py1439
-rw-r--r--pygments/modeline.py40
-rw-r--r--pygments/plugin.py2
-rw-r--r--pygments/scanner.py2
-rw-r--r--pygments/sphinxext.py153
-rw-r--r--pygments/style.py5
-rw-r--r--pygments/styles/__init__.py4
-rw-r--r--pygments/styles/autumn.py2
-rw-r--r--pygments/styles/borland.py2
-rw-r--r--pygments/styles/bw.py2
-rw-r--r--pygments/styles/colorful.py2
-rw-r--r--pygments/styles/default.py2
-rw-r--r--pygments/styles/emacs.py2
-rw-r--r--pygments/styles/friendly.py2
-rw-r--r--pygments/styles/fruity.py2
-rw-r--r--pygments/styles/igor.py29
-rw-r--r--pygments/styles/manni.py2
-rw-r--r--pygments/styles/monokai.py8
-rw-r--r--pygments/styles/murphy.py2
-rw-r--r--pygments/styles/native.py2
-rw-r--r--pygments/styles/pastie.py2
-rw-r--r--pygments/styles/perldoc.py2
-rw-r--r--pygments/styles/rrt.py2
-rw-r--r--pygments/styles/tango.py2
-rw-r--r--pygments/styles/trac.py2
-rw-r--r--pygments/styles/vim.py2
-rw-r--r--pygments/styles/vs.py2
-rw-r--r--pygments/styles/xcode.py50
-rw-r--r--pygments/token.py4
-rw-r--r--pygments/unistring.py7
-rw-r--r--pygments/util.py51
-rwxr-xr-xscripts/check_sources.py49
-rw-r--r--scripts/detect_missing_analyse_text.py9
-rwxr-xr-xscripts/find_codetags.py46
-rwxr-xr-xscripts/find_error.py29
-rw-r--r--scripts/get_vimkw.py6
-rwxr-xr-xscripts/reindent.py291
-rwxr-xr-x[-rw-r--r--]scripts/vim2pygments.py16
-rwxr-xr-xsetup.py38
-rw-r--r--tests/examplefiles/99_bottles_of_beer.chpl118
-rwxr-xr-xtests/examplefiles/Deflate.fs578
-rw-r--r--tests/examplefiles/Error.pmod38
-rw-r--r--tests/examplefiles/FakeFile.pike360
-rw-r--r--tests/examplefiles/Get-CommandDefinitionHtml.ps166
-rw-r--r--tests/examplefiles/IPDispatchC.nc104
-rw-r--r--tests/examplefiles/IPDispatchP.nc671
-rw-r--r--tests/examplefiles/RoleQ.pm623
-rw-r--r--tests/examplefiles/antlr_ANTLRv3.g (renamed from tests/examplefiles/ANTLRv3.g)0
-rw-r--r--tests/examplefiles/example.e124
-rw-r--r--tests/examplefiles/example.gd23
-rw-r--r--tests/examplefiles/example.gi64
-rw-r--r--tests/examplefiles/example.hx142
-rw-r--r--tests/examplefiles/example.i6t32
-rw-r--r--tests/examplefiles/example.i7x45
-rw-r--r--tests/examplefiles/example.inf374
-rw-r--r--tests/examplefiles/example.kal75
-rw-r--r--tests/examplefiles/example.lagda19
-rw-r--r--tests/examplefiles/example.ma8
-rw-r--r--tests/examplefiles/example.mq4187
-rw-r--r--tests/examplefiles/example.mqh123
-rw-r--r--tests/examplefiles/example.ni57
-rw-r--r--tests/examplefiles/example.nix80
-rw-r--r--tests/examplefiles/example.rexx50
-rw-r--r--tests/examplefiles/example.stan2
-rw-r--r--tests/examplefiles/exampleScript.cfc241
-rw-r--r--tests/examplefiles/exampleTag.cfc18
-rw-r--r--tests/examplefiles/example_elixir.ex4
-rw-r--r--tests/examplefiles/function_arrows.coffee11
-rw-r--r--tests/examplefiles/garcia-wachs.kk70
-rw-r--r--tests/examplefiles/grammar-test.p622
-rw-r--r--tests/examplefiles/hash_syntax.rb5
-rw-r--r--tests/examplefiles/hybris_File.hy (renamed from tests/examplefiles/File.hy)0
-rw-r--r--tests/examplefiles/idl_sample.pro (renamed from tests/examplefiles/mg_sample.pro)0
-rw-r--r--tests/examplefiles/inet_pton6.dg48
-rw-r--r--tests/examplefiles/language.hy165
-rw-r--r--tests/examplefiles/livescript-demo.ls2
-rw-r--r--tests/examplefiles/objc_example.m10
-rw-r--r--tests/examplefiles/py3tb_test.py3tb4
-rw-r--r--tests/examplefiles/robotframework_test.txt (renamed from tests/examplefiles/robotframework.txt)0
-rw-r--r--tests/examplefiles/scope.cirru43
-rw-r--r--tests/examplefiles/swig_java.swg1329
-rw-r--r--tests/examplefiles/swig_std_vector.i225
-rw-r--r--tests/examplefiles/test.agda102
-rw-r--r--tests/examplefiles/test.apl26
-rw-r--r--tests/examplefiles/test.bb95
-rw-r--r--tests/examplefiles/test.ebnf31
-rw-r--r--tests/examplefiles/test.idr93
-rw-r--r--tests/examplefiles/test.mask41
-rw-r--r--tests/examplefiles/test.p6252
-rw-r--r--tests/examplefiles/test.pig148
-rw-r--r--tests/examplefiles/type.lisp16
-rw-r--r--tests/examplefiles/vbnet_test.bas (renamed from tests/examplefiles/test.bas)0
-rw-r--r--tests/old_run.py138
-rw-r--r--tests/run.py33
-rw-r--r--tests/test_basic_api.py46
-rw-r--r--tests/test_clexer.py2
-rw-r--r--tests/test_cmdline.py11
-rw-r--r--tests/test_examplefiles.py49
-rw-r--r--tests/test_html_formatter.py28
-rw-r--r--tests/test_latex_formatter.py6
-rw-r--r--tests/test_lexers_other.py68
-rw-r--r--tests/test_perllexer.py2
-rw-r--r--tests/test_regexlexer.py2
-rw-r--r--tests/test_token.py6
-rw-r--r--tests/test_using_api.py2
-rw-r--r--tests/test_util.py2
216 files changed, 15237 insertions, 3334 deletions
diff --git a/.hgignore b/.hgignore
index f5d9f0c2..57aaeff5 100644
--- a/.hgignore
+++ b/.hgignore
@@ -4,6 +4,8 @@ syntax: glob
*.egg
build/*
dist/*
+doc/_build
Pygments.egg-info/*
.ropeproject
tests/examplefiles/output
+.idea/
diff --git a/AUTHORS b/AUTHORS
index 34b40db4..83c0eaca 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -6,8 +6,9 @@ Major developers are Tim Hatch <tim@timhatch.com> and Armin Ronacher
Other contributors, listed alphabetically, are:
* Sam Aaron -- Ioke lexer
-* Kumar Appaiah -- Debian control lexer
* Ali Afshar -- image formatter
+* Thomas Aglassinger -- Rexx lexer
+* Kumar Appaiah -- Debian control lexer
* Andreas Amann -- AppleScript lexer
* Timothy Armstrong -- Dart lexer fixes
* Jeffrey Arnold -- R/S, Rd, BUGS, Jags, and Stan lexers
@@ -15,6 +16,8 @@ Other contributors, listed alphabetically, are:
* Stefan Matthias Aust -- Smalltalk lexer
* Ben Bangert -- Mako lexers
* Max Battcher -- Darcs patch lexer
+* Thomas Baruchel -- APL lexer
+* Tim Baumann -- (Literate) Agda lexer
* Paul Baumgart, 280 North, Inc. -- Objective-J lexer
* Michael Bayer -- Myghty lexers
* John Benediktsson -- Factor lexer
@@ -25,24 +28,30 @@ Other contributors, listed alphabetically, are:
* Pierre Bourdon -- bugfixes
* Hiram Chirino -- Scaml and Jade lexers
* Ian Cooper -- VGL lexer
+* David Corbett -- Inform lexers
* Leaf Corcoran -- MoonScript lexer
-* Christian Jann -- ShellSession lexer
* Christopher Creutzig -- MuPAD lexer
+* Daniël W. Crompton - Pike lexer
* Pete Curry -- bugfixes
-* Owen Durni -- haXe lexer
+* Bryan Davis -- EBNF lexer
+* Owen Durni -- Haxe lexer
* Nick Efford -- Python 3 lexer
* Sven Efftinge -- Xtend lexer
* Artem Egorkine -- terminal256 formatter
* James H. Fisher -- PostScript lexer
+* William S. Fulton -- SWIG lexer
* Carlos Galdino -- Elixir and Elixir Console lexers
* Michael Galloy -- IDL lexer
* Naveen Garg -- Autohotkey lexer
* Laurent Gautier -- R/S lexer
* Alex Gaynor -- PyPy log lexer
+* Richard Gerkin -- Igor Pro lexer
* Alain Gilbert -- TypeScript lexer
+* Alex Gilding -- BlitzBasic lexer
* Bertrand Goetzmann -- Groovy lexer
* Krzysiek Goj -- Scala lexer
* Matt Good -- Genshi, Cheetah lexers
+* Michał Górny -- vim modeline support
* Patrick Gotthardt -- PHP namespaces support
* Olivier Guibe -- Asymptote lexer
* Jordi Gutiérrez Hermoso -- Octave lexer
@@ -53,20 +62,25 @@ Other contributors, listed alphabetically, are:
* Greg Hendershott -- Racket lexer
* David Hess, Fish Software, Inc. -- Objective-J lexer
* Varun Hiremath -- Debian control lexer
+* Rob Hoelz -- Perl 6 lexer
* Doug Hogan -- Mscgen lexer
* Ben Hollis -- Mason lexer
+* Max Horn -- GAP lexer
* Dustin Howett -- Logos lexer
* Alastair Houghton -- Lexer inheritance facility
* Tim Howard -- BlitzMax lexer
* Ivan Inozemtsev -- Fantom lexer
* Brian R. Jackson -- Tea lexer
+* Christian Jann -- ShellSession lexer
* Dennis Kaarsemaker -- sources.list lexer
+* Alexander Kit -- MaskJS lexer
* Igor Kalnitsky -- vhdl lexer
* Pekka Klärck -- Robot Framework lexer
* Eric Knibbe -- Lasso lexer
* Stepan Koltsov -- Clay lexer
* Adam Koprowski -- Opa lexer
* Benjamin Kowarsch -- Modula-2 lexer
+* Domen Kožar -- Nix lexer
* Alexander Kriegisch -- Kconfig and AspectJ lexers
* Marek Kubica -- Scheme lexer
* Jochen Kupperschmidt -- Markdown processor
@@ -77,6 +91,7 @@ Other contributors, listed alphabetically, are:
* Mark Lee -- Vala lexer
* Ben Mabey -- Gherkin lexer
* Angus MacArthur -- QML lexer
+* Louis Marchand -- Eiffel lexer
* Simone Margaritelli -- Hybris lexer
* Kirk McDonald -- D lexer
* Gordon McGregor -- SystemVerilog lexer
@@ -84,6 +99,7 @@ Other contributors, listed alphabetically, are:
* Brian McKenna -- F# lexer
* Charles McLaughlin -- Puppet lexer
* Lukas Meuser -- BBCode formatter, Lua lexer
+* Cat Miller -- Pig lexer
* Paul Miller -- LiveScript lexer
* Hong Minhee -- HTTP lexer
* Michael Mior -- Awk lexer
@@ -98,14 +114,17 @@ Other contributors, listed alphabetically, are:
* Mike Nolta -- Julia lexer
* Jonas Obrist -- BBCode lexer
* David Oliva -- Rebol lexer
+* Pat Pannuto -- nesC lexer
* Jon Parise -- Protocol buffers lexer
* Ronny Pfannschmidt -- BBCode lexer
* Benjamin Peterson -- Test suite refactoring
* Dominik Picheta -- Nimrod lexer
* Clément Prévost -- UrbiScript lexer
+* raichoo -- Idris lexer
* Kashif Rasul -- CUDA lexer
* Justin Reidy -- MXML lexer
* Norman Richards -- JSON lexer
+* Corey Richardson -- Rust lexer updates
* Lubomir Rintel -- GoodData MAQL and CL lexers
* Andre Roberge -- Tango style
* Konrad Rudolph -- LaTeX formatter enhancements
@@ -127,7 +146,10 @@ Other contributors, listed alphabetically, are:
* Tiberius Teng -- default style overhaul
* Jeremy Thurgood -- Erlang, Squid config lexers
* Brian Tiffin -- OpenCOBOL lexer
+* Bob Tolbert -- Hy lexer
* Erick Tryzelaar -- Felix lexer
+* Alexander Udalov -- Kotlin lexer improvements
+* Thomas Van Doren -- Chapel lexer
* Daniele Varrazzo -- PostgreSQL lexers
* Abe Voelker -- OpenEdge ABL lexer
* Pepijn de Vos -- HTML formatter CTags support
@@ -140,5 +162,6 @@ Other contributors, listed alphabetically, are:
* Diego Zamboni -- CFengine3 lexer
* Enrique Zamudio -- Ceylon lexer
* Alex Zimin -- Nemerle lexer
+* Rob Zimmerman -- Kal lexer
Many thanks for all contributions!
diff --git a/CHANGES b/CHANGES
index a422db88..8459e05a 100644
--- a/CHANGES
+++ b/CHANGES
@@ -6,13 +6,56 @@ Issue numbers refer to the tracker at
pull request numbers to the requests at
<http://bitbucket.org/birkenfeld/pygments-main/pull-requests/merged>.
-Version 1.7
+Version 2.0
-----------
(under development)
+- Dropped Python 2.4 and 2.5 compatibility. This is in favor of single-source
+ compatibility between Python 2.6, 2.7 and 3.3+.
+
+- New website and documentation based on Sphinx (finally!)
+
- Lexers added:
* Clay (PR#184)
+ * Perl 6 (PR#181)
+ * Swig (PR#168)
+ * nesC (PR#166)
+ * BlitzBasic (PR#197)
+ * EBNF (PR#193)
+ * Igor Pro (PR#172)
+ * Rexx (PR#199)
+ * Agda and Literate Agda (PR#203)
+ * Mathematica (PR#245)
+ * Nix (PR#267)
+ * Pike (PR#237)
+ * Hy (PR#238)
+ * Chapel (PR#256)
+ * Kal (PR#233)
+ * Eiffel (PR#273)
+ * Cirru (PR#275)
+ * ColdFusion CFC (PR#283)
+ * Idris (PR#210)
+ * Intel objdump (PR#279)
+ * MaskJS (PR#280)
+ * Inform 6/7 (PR#281)
+ * MQL (PR#285)
+ * APL (#969)
+
+- New styles: "xcode" and "igor", similar to the default highlighting of
+ the respective IDEs.
+
+- Updated the Makefile lexer to yield a little more useful highlighting.
+
+- Lexer aliases passed to ``get_lexer_by_name()`` are now case-insensitive.
+
+- File name matching in lexers and formatters will now use a regex cache
+ for speed (PR#205).
+
+- Pygments will now recognize "vim" modelines when guessing the lexer for
+ a file based on content (PR#118).
+
+- The NameHighlightFilter now works with any Name.* token type (#790).
- Python 3 lexer: add new exceptions from PEP 3151.
@@ -24,6 +67,46 @@ Version 1.7
- Lasso lexer: fix method highlighting, update builtins. Fix
guessing so that plain XML isn't always taken as Lasso (PR#163).
+- Objective C/C++ lexers: allow "@" prefixing any expression (#871).
+
+- Ruby lexer: fix lexing of Name::Space tokens (#860) and of symbols
+ in hashes (#873).
+
+- Stan lexer: update for version 1.3.0 of the language (PR#162).
+
+- JavaScript lexer: add the "yield" keyword (PR#196).
+
+- HTTP lexer: support for PATCH method (PR#190).
+
+- Koka lexer: update to newest language spec (PR#201).
+
+- Haxe lexer: rewrite and support for Haxe 3 (PR#174).
+
+- Prolog lexer: add different kinds of numeric literals (#864).
+
+- F# lexer: rewrite with newest spec for F# 3.0 (#842), fix a bug with
+ dotted chains (#948).
+
+- Kotlin lexer: general update (PR#271).
+
+- Rebol lexer: fix comment detection and analyse_text (PR#261).
+
+- Stan lexer: update to v2.0.1 (PR#255).
+
+- LLVM lexer: update keywords to v3.4 (PR#258).
+
+- PHP lexer: add new keywords and binary literals (PR#222).
+
+- external/markdown-processor.py updated to newest python-markdown (PR#221).
+
+- CSS lexer: some highlighting order fixes (PR#231).
+
+- Ceylon lexer: fix parsing of nested multiline comments (#915).
+
+- C family lexers: fix parsing of indented preprocessor directives (#944).
+
+- Rust lexer: update to 0.9 language version (PR#270).
+
Version 1.6
-----------
@@ -278,7 +361,7 @@ Version 1.3
* Ada
* Coldfusion
* Modula-2
- * haXe
+ * Haxe
* R console
* Objective-J
* Haml and Sass
@@ -337,7 +420,7 @@ Version 1.2
* CMake
* Ooc
* Coldfusion
- * haXe
+ * Haxe
* R console
- Added options for rendering LaTeX in source code comments in the
diff --git a/LICENSE b/LICENSE
index 1e091194..641c8e8e 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2006-2013 by the respective authors (see AUTHORS file).
+Copyright (c) 2006-2014 by the respective authors (see AUTHORS file).
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/MANIFEST.in b/MANIFEST.in
index 312c1504..cfec4e94 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -2,5 +2,5 @@ include pygmentize
include external/*
include Makefile CHANGES LICENSE AUTHORS TODO ez_setup.py
recursive-include tests *
-recursive-include docs *
+recursive-include doc *
recursive-include scripts *
diff --git a/Makefile b/Makefile
index f24dd084..e28c90c7 100644
--- a/Makefile
+++ b/Makefile
@@ -4,7 +4,7 @@
#
# Combines scripts for common tasks.
#
-# :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+# :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
# :license: BSD, see LICENSE for details.
#
@@ -36,11 +36,8 @@ codetags:
@$(PYTHON) scripts/find_codetags.py -i tests/examplefiles -i scripts/pylintrc \
-i scripts/find_codetags.py -o codetags.html .
-docs: docs/build
-
-docs/build: docs/src/*.txt
- $(PYTHON) docs/generate.py html docs/build $?
- touch docs/build
+docs:
+ make -C doc html
mapfiles:
(cd pygments/lexers; $(PYTHON) _mapping.py)
@@ -53,7 +50,7 @@ reindent:
@$(PYTHON) scripts/reindent.py -r -B .
test:
- @$(PYTHON) tests/run.py $(TESTS)
+ @$(PYTHON) tests/run.py $(TEST)
test-coverage:
- @$(PYTHON) tests/run.py -C $(TESTS)
+ @$(PYTHON) tests/run.py -C $(TEST)
diff --git a/doc/Makefile b/doc/Makefile
new file mode 100644
index 00000000..7fb75411
--- /dev/null
+++ b/doc/Makefile
@@ -0,0 +1,153 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS =
+SPHINXBUILD = PYTHONPATH=.. sphinx-build
+PAPER =
+BUILDDIR = _build
+
+# Internal variables.
+PAPEROPT_a4 = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+# the i18n builder cannot share the environment and doctrees with the others
+I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
+
+help:
+ @echo "Please use \`make <target>' where <target> is one of"
+ @echo " html to make standalone HTML files"
+ @echo " dirhtml to make HTML files named index.html in directories"
+ @echo " singlehtml to make a single large HTML file"
+ @echo " pickle to make pickle files"
+ @echo " json to make JSON files"
+ @echo " htmlhelp to make HTML files and a HTML help project"
+ @echo " qthelp to make HTML files and a qthelp project"
+ @echo " devhelp to make HTML files and a Devhelp project"
+ @echo " epub to make an epub"
+ @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+ @echo " latexpdf to make LaTeX files and run them through pdflatex"
+ @echo " text to make text files"
+ @echo " man to make manual pages"
+ @echo " texinfo to make Texinfo files"
+ @echo " info to make Texinfo files and run them through makeinfo"
+ @echo " gettext to make PO message catalogs"
+ @echo " changes to make an overview of all changed/added/deprecated items"
+ @echo " linkcheck to check all external links for integrity"
+ @echo " doctest to run all doctests embedded in the documentation (if enabled)"
+
+clean:
+ -rm -rf $(BUILDDIR)/*
+
+html:
+ $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+ $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+singlehtml:
+ $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+ @echo
+ @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
+pickle:
+ $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+ @echo
+ @echo "Build finished; now you can process the pickle files."
+
+json:
+ $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+ @echo
+ @echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+ $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+ @echo
+ @echo "Build finished; now you can run HTML Help Workshop with the" \
+ ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+ $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+ @echo
+ @echo "Build finished; now you can run "qcollectiongenerator" with the" \
+ ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+ @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Pygments.qhcp"
+ @echo "To view the help file:"
+ @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Pygments.qhc"
+
+devhelp:
+ $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+ @echo
+ @echo "Build finished."
+ @echo "To view the help file:"
+ @echo "# mkdir -p $$HOME/.local/share/devhelp/Pygments"
+ @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Pygments"
+ @echo "# devhelp"
+
+epub:
+ $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+ @echo
+ @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+latex:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo
+ @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+ @echo "Run \`make' in that directory to run these through (pdf)latex" \
+ "(use \`make latexpdf' here to do that automatically)."
+
+latexpdf:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo "Running LaTeX files through pdflatex..."
+ $(MAKE) -C $(BUILDDIR)/latex all-pdf
+ @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+text:
+ $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+ @echo
+ @echo "Build finished. The text files are in $(BUILDDIR)/text."
+
+man:
+ $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+ @echo
+ @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+texinfo:
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+ @echo
+ @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
+ @echo "Run \`make' in that directory to run these through makeinfo" \
+ "(use \`make info' here to do that automatically)."
+
+info:
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+ @echo "Running Texinfo files through makeinfo..."
+ make -C $(BUILDDIR)/texinfo info
+ @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
+
+gettext:
+ $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
+ @echo
+ @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
+
+changes:
+ $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+ @echo
+ @echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+ $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+ @echo
+ @echo "Link check complete; look for any errors in the above output " \
+ "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+ $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+ @echo "Testing of doctests in the sources finished, look at the " \
+ "results in $(BUILDDIR)/doctest/output.txt."
diff --git a/doc/_static/favicon.ico b/doc/_static/favicon.ico
new file mode 100644
index 00000000..777f617d
--- /dev/null
+++ b/doc/_static/favicon.ico
Binary files differ
diff --git a/doc/_static/logo_new.png b/doc/_static/logo_new.png
new file mode 100644
index 00000000..0ae4b209
--- /dev/null
+++ b/doc/_static/logo_new.png
Binary files differ
diff --git a/doc/_static/logo_only.png b/doc/_static/logo_only.png
new file mode 100644
index 00000000..fdebcc47
--- /dev/null
+++ b/doc/_static/logo_only.png
Binary files differ
diff --git a/doc/_templates/docssidebar.html b/doc/_templates/docssidebar.html
new file mode 100644
index 00000000..913acaaf
--- /dev/null
+++ b/doc/_templates/docssidebar.html
@@ -0,0 +1,3 @@
+{% if pagename != 'docs/index' %}
+<strong>&laquo; <a href="{{ pathto('docs/index') }}">Back to docs index</a></strong>
+{% endif %}
diff --git a/doc/_templates/indexsidebar.html b/doc/_templates/indexsidebar.html
new file mode 100644
index 00000000..29954554
--- /dev/null
+++ b/doc/_templates/indexsidebar.html
@@ -0,0 +1,25 @@
+<h3>Download</h3>
+{% if version.endswith('(hg)') %}
+<p>This documentation is for version <b>{{ version }}</b>, which is
+ not released yet.</p>
+<p>You can use it from the
+ <a href="http://bitbucket.org/birkenfeld/sphinx/">Mercurial repo</a> or look for
+ released versions in the <a href="http://pypi.python.org/pypi/Sphinx">Python
+ Package Index</a>.</p>
+{% else %}
+<p>Current version: <b>{{ version }}</b></p>
+<p>Get Pygments from the <a href="http://pypi.python.org/pypi/Pygments">Python Package
+Index</a>, or install it with:</p>
+<pre>pip install Pygments</pre>
+{% endif %}
+
+<h3>Questions? Suggestions?</h3>
+
+<p>Clone at <a href="https://bitbucket.org/birkenfeld/pygments-main">Bitbucket</a>
+or come to the <tt>#pocoo</tt> channel on FreeNode.</p>
+<p>You can also open an issue at the
+ <a href="https://www.bitbucket.org/birkenfeld/pygments-main/issues/">tracker</a>.</p>
+
+<p class="logo">A <a href="http://pocoo.org/">
+ <img src="{{ pathto("_static/pocoo.png", 1) }}" /></a> project</a></p>
+
diff --git a/doc/_themes/pygments14/layout.html b/doc/_themes/pygments14/layout.html
new file mode 100644
index 00000000..93a3119e
--- /dev/null
+++ b/doc/_themes/pygments14/layout.html
@@ -0,0 +1,98 @@
+{#
+ sphinxdoc/layout.html
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ Sphinx layout template for the sphinxdoc theme.
+
+ :copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+#}
+{%- extends "basic/layout.html" %}
+
+{# put the sidebar before the body #}
+{% block sidebar1 %}{{ sidebar() }}{% endblock %}
+{% block sidebar2 %}{% endblock %}
+
+{% block relbar1 %}{% endblock %}
+{% block relbar2 %}{% endblock %}
+
+{% block extrahead %}
+ <link href='http://fonts.googleapis.com/css?family={{ theme_font|replace(' ', '+') }}:300,400,700'
+ rel='stylesheet' type='text/css'>
+{{ super() }}
+{%- if not embedded %}
+ <style type="text/css">
+ table.right { float: right; margin-left: 20px; }
+ table.right td { border: 1px solid #ccc; }
+ {% if pagename == 'index' %}
+ .related { display: none; }
+ {% endif %}
+ </style>
+ <script type="text/javascript">
+ // intelligent scrolling of the sidebar content
+ $(window).scroll(function() {
+ var sb = $('.sphinxsidebarwrapper');
+ var win = $(window);
+ var sbh = sb.height();
+ var offset = $('.sphinxsidebar').position()['top'];
+ var wintop = win.scrollTop();
+ var winbot = wintop + win.innerHeight();
+ var curtop = sb.position()['top'];
+ var curbot = curtop + sbh;
+ // does sidebar fit in window?
+ if (sbh < win.innerHeight()) {
+ // yes: easy case -- always keep at the top
+ sb.css('top', $u.min([$u.max([0, wintop - offset - 10]),
+ $(document).height() - sbh - 200]));
+ } else {
+ // no: only scroll if top/bottom edge of sidebar is at
+ // top/bottom edge of window
+ if (curtop > wintop && curbot > winbot) {
+ sb.css('top', $u.max([wintop - offset - 10, 0]));
+ } else if (curtop < wintop && curbot < winbot) {
+ sb.css('top', $u.min([winbot - sbh - offset - 20,
+ $(document).height() - sbh - 200]));
+ }
+ }
+ });
+ </script>
+{%- endif %}
+{% endblock %}
+
+{% block header %}
+<div class="outerwrapper">
+<div class="pageheader">
+ <ul>
+ <li><a href="{{ pathto('index') }}">Home</a></li>
+ {% if demo_active %}
+ <li><a href="{{ pathto('demo') }}">Demo</a></li>
+ {% endif %}
+ <li><a href="{{ pathto('languages') }}">Languages</a></li>
+ <li><a href="{{ pathto('faq') }}">FAQ</a></li>
+ <li><a href="{{ pathto('download') }}">Get it</a></li>
+ <li><a href="{{ pathto('docs/index') }}">Docs</a></li>
+ </ul>
+ <div>
+ <a href="{{ pathto('index') }}">
+ <img src="{{ pathto('_static/logo.png', 1) }}" alt="Pygments logo" />
+ </a>
+ </div>
+</div>
+{% endblock %}
+
+{% block footer %}
+ <div class="footer" role="contentinfo">
+ &copy; Copyright 2014, Georg Brandl and Pygments contributors.
+ Created using <a href="http://sphinx-doc.org/">Sphinx</a> {{
+ sphinx_version }}. <br/>
+ Pygments logo created by <a href="http://joelunger.com">Joel Unger</a>.
+ Backgrounds from <a href="http://subtlepatterns.com">subtlepatterns.com</a>.
+ </div>
+ </div> {# closes "outerwrapper" div #}
+{% endblock %}
+
+{% block sidebarrel %}
+{% endblock %}
+
+{% block sidebarsourcelink %}
+{% endblock %}
diff --git a/doc/_themes/pygments14/static/bodybg.png b/doc/_themes/pygments14/static/bodybg.png
new file mode 100644
index 00000000..46892b80
--- /dev/null
+++ b/doc/_themes/pygments14/static/bodybg.png
Binary files differ
diff --git a/doc/_themes/pygments14/static/docbg.png b/doc/_themes/pygments14/static/docbg.png
new file mode 100644
index 00000000..13e61f32
--- /dev/null
+++ b/doc/_themes/pygments14/static/docbg.png
Binary files differ
diff --git a/doc/_themes/pygments14/static/listitem.png b/doc/_themes/pygments14/static/listitem.png
new file mode 100644
index 00000000..e45715f9
--- /dev/null
+++ b/doc/_themes/pygments14/static/listitem.png
Binary files differ
diff --git a/doc/_themes/pygments14/static/logo.png b/doc/_themes/pygments14/static/logo.png
new file mode 100644
index 00000000..2c1a24dc
--- /dev/null
+++ b/doc/_themes/pygments14/static/logo.png
Binary files differ
diff --git a/doc/_themes/pygments14/static/pocoo.png b/doc/_themes/pygments14/static/pocoo.png
new file mode 100644
index 00000000..41741494
--- /dev/null
+++ b/doc/_themes/pygments14/static/pocoo.png
Binary files differ
diff --git a/doc/_themes/pygments14/static/pygments14.css_t b/doc/_themes/pygments14/static/pygments14.css_t
new file mode 100644
index 00000000..838782b5
--- /dev/null
+++ b/doc/_themes/pygments14/static/pygments14.css_t
@@ -0,0 +1,401 @@
+/*
+ * pygments14.css
+ * ~~~~~~~~~~~~~~
+ *
+ * Sphinx stylesheet -- pygments14 theme. Heavily copied from sphinx13.
+ *
+ * :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
+ * :license: BSD, see LICENSE for details.
+ *
+ */
+
+@import url("basic.css");
+
+/* -- page layout ----------------------------------------------------------- */
+
+body {
+ font-family: {{ theme_font }}, 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
+ 'Verdana', sans-serif;
+ font-size: 14px;
+ text-align: center;
+ background-image: url(bodybg.png);
+ background-color: {{ theme_background }};
+ color: black;
+ padding: 0;
+ /*
+ border-right: 1px solid {{ theme_border }};
+ border-left: 1px solid {{ theme_border }};
+ */
+
+ margin: 0 auto;
+ min-width: 780px;
+ max-width: 1080px;
+}
+
+.outerwrapper {
+ background-image: url(docbg.png);
+ background-attachment: fixed;
+}
+
+.pageheader {
+ text-align: left;
+ padding: 10px 15px;
+}
+
+.pageheader ul {
+ float: right;
+ color: white;
+ list-style-type: none;
+ padding-left: 0;
+ margin-top: 40px;
+ margin-right: 10px;
+}
+
+.pageheader li {
+ float: left;
+ margin: 0 0 0 10px;
+}
+
+.pageheader li a {
+ border-radius: 3px;
+ padding: 8px 12px;
+ color: {{ theme_darkgray }};
+ text-shadow: 0 0 5px rgba(0, 0, 0, 0.2);
+}
+
+.pageheader li a:hover {
+ background-color: {{ theme_yellow }};
+ color: black;
+ text-shadow: none;
+}
+
+div.document {
+ text-align: left;
+ /*border-left: 1em solid {{ theme_lightyellow }};*/
+}
+
+div.bodywrapper {
+ margin: 0 12px 0 240px;
+ background-color: white;
+/* border-right: 1px solid {{ theme_border }}; */
+}
+
+div.body {
+ margin: 0;
+ padding: 0.5em 20px 20px 20px;
+}
+
+div.related {
+ font-size: 1em;
+ color: {{ theme_darkgray }};
+}
+
+div.related ul {
+ background-image: url(relbg.png);
+ background-repeat: repeat-y;
+ background-color: {{ theme_yellow }};
+ height: 1.9em;
+ /*
+ border-top: 1px solid {{ theme_border }};
+ border-bottom: 1px solid {{ theme_border }};
+ */
+}
+
+div.related ul li {
+ margin: 0 5px 0 0;
+ padding: 0;
+ float: left;
+}
+
+div.related ul li.right {
+ float: right;
+ margin-right: 5px;
+}
+
+div.related ul li a {
+ margin: 0;
+ padding: 0 5px 0 5px;
+ line-height: 1.75em;
+ color: {{ theme_darkgray }};
+ /*text-shadow: 0px 0px 1px rgba(0, 0, 0, 0.5);*/
+}
+
+div.related ul li a:hover {
+ text-decoration: underline;
+ text-shadow: 0px 0px 1px rgba(255, 255, 255, 0.5);
+}
+
+div.sphinxsidebarwrapper {
+ position: relative;
+ top: 0px;
+ padding: 0;
+}
+
+div.sphinxsidebar {
+ margin: 0;
+ padding: 0 0px 15px 15px;
+ width: 210px;
+ float: left;
+ font-size: 1em;
+ text-align: left;
+}
+
+div.sphinxsidebar .logo {
+ font-size: 1.8em;
+ color: #666;
+ font-weight: 300;
+ text-align: center;
+}
+
+div.sphinxsidebar .logo img {
+ vertical-align: middle;
+}
+
+div.sphinxsidebar input {
+ border: 1px solid #aaa;
+ font-family: {{ theme_font }}, 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
+ 'Verdana', sans-serif;
+ font-size: 1em;
+}
+
+div.sphinxsidebar h3 {
+ font-size: 1.5em;
+ /* border-top: 1px solid {{ theme_border }}; */
+ margin-top: 1em;
+ margin-bottom: 0.5em;
+ padding-top: 0.5em;
+}
+
+div.sphinxsidebar h4 {
+ font-size: 1.2em;
+ margin-bottom: 0;
+}
+
+div.sphinxsidebar h3, div.sphinxsidebar h4 {
+ margin-right: -15px;
+ margin-left: -15px;
+ padding-right: 14px;
+ padding-left: 14px;
+ color: #333;
+ font-weight: 300;
+ /*text-shadow: 0px 0px 0.5px rgba(0, 0, 0, 0.4);*/
+}
+
+div.sphinxsidebarwrapper > h3:first-child {
+ margin-top: 0.5em;
+ border: none;
+}
+
+div.sphinxsidebar h3 a {
+ color: #333;
+}
+
+div.sphinxsidebar ul {
+ color: #444;
+ margin-top: 7px;
+ padding: 0;
+ line-height: 130%;
+}
+
+div.sphinxsidebar ul ul {
+ margin-left: 20px;
+ list-style-image: url(listitem.png);
+}
+
+div.footer {
+ color: {{ theme_darkgray }};
+ text-shadow: 0 0 .2px rgba(255, 255, 255, 0.8);
+ padding: 2em;
+ text-align: center;
+ clear: both;
+ font-size: 0.8em;
+}
+
+/* -- body styles ----------------------------------------------------------- */
+
+p {
+ margin: 0.8em 0 0.5em 0;
+}
+
+a {
+ color: {{ theme_darkgreen }};
+ text-decoration: none;
+}
+
+a:hover {
+ color: {{ theme_darkyellow }};
+}
+
+div.body a {
+ text-decoration: underline;
+}
+
+h1 {
+ margin: 10px 0 0 0;
+ font-size: 2.4em;
+ color: {{ theme_darkgray }};
+ font-weight: 300;
+}
+
+h2 {
+ margin: 1.em 0 0.2em 0;
+ font-size: 1.5em;
+ font-weight: 300;
+ padding: 0;
+ color: {{ theme_darkgreen }};
+}
+
+h3 {
+ margin: 1em 0 -0.3em 0;
+ font-size: 1.3em;
+ font-weight: 300;
+}
+
+div.body h1 a, div.body h2 a, div.body h3 a, div.body h4 a, div.body h5 a, div.body h6 a {
+ text-decoration: none;
+}
+
+div.body h1 a tt, div.body h2 a tt, div.body h3 a tt, div.body h4 a tt, div.body h5 a tt, div.body h6 a tt {
+ color: {{ theme_darkgreen }} !important;
+ font-size: inherit !important;
+}
+
+a.headerlink {
+ color: {{ theme_green }} !important;
+ font-size: 12px;
+ margin-left: 6px;
+ padding: 0 4px 0 4px;
+ text-decoration: none !important;
+ float: right;
+}
+
+a.headerlink:hover {
+ background-color: #ccc;
+ color: white!important;
+}
+
+cite, code, tt {
+ font-family: 'Consolas', 'DejaVu Sans Mono',
+ 'Bitstream Vera Sans Mono', monospace;
+ font-size: 14px;
+ letter-spacing: -0.02em;
+}
+
+tt {
+ background-color: #f2f2f2;
+ border: 1px solid #ddd;
+ border-radius: 2px;
+ color: #333;
+ padding: 1px;
+}
+
+tt.descname, tt.descclassname, tt.xref {
+ border: 0;
+}
+
+hr {
+ border: 1px solid #abc;
+ margin: 2em;
+}
+
+a tt {
+ border: 0;
+ color: {{ theme_darkgreen }};
+}
+
+a tt:hover {
+ color: {{ theme_darkyellow }};
+}
+
+pre {
+ font-family: 'Consolas', 'DejaVu Sans Mono',
+ 'Bitstream Vera Sans Mono', monospace;
+ font-size: 13px;
+ letter-spacing: 0.015em;
+ line-height: 120%;
+ padding: 0.5em;
+ border: 1px solid #ccc;
+ border-radius: 2px;
+ background-color: #f8f8f8;
+}
+
+pre a {
+ color: inherit;
+ text-decoration: underline;
+}
+
+td.linenos pre {
+ padding: 0.5em 0;
+}
+
+div.quotebar {
+ background-color: #f8f8f8;
+ max-width: 250px;
+ float: right;
+ padding: 0px 7px;
+ border: 1px solid #ccc;
+ margin-left: 1em;
+}
+
+div.topic {
+ background-color: #f8f8f8;
+}
+
+table {
+ border-collapse: collapse;
+ margin: 0 -0.5em 0 -0.5em;
+}
+
+table td, table th {
+ padding: 0.2em 0.5em 0.2em 0.5em;
+}
+
+div.admonition, div.warning {
+ font-size: 0.9em;
+ margin: 1em 0 1em 0;
+ border: 1px solid #86989B;
+ border-radius: 2px;
+ background-color: #f7f7f7;
+ padding: 0;
+}
+
+div.admonition p, div.warning p {
+ margin: 0.5em 1em 0.5em 1em;
+ padding: 0;
+}
+
+div.admonition pre, div.warning pre {
+ margin: 0.4em 1em 0.4em 1em;
+}
+
+div.admonition p.admonition-title,
+div.warning p.admonition-title {
+ margin-top: 1em;
+ padding-top: 0.5em;
+ font-weight: bold;
+}
+
+div.warning {
+ border: 1px solid #940000;
+/* background-color: #FFCCCF;*/
+}
+
+div.warning p.admonition-title {
+}
+
+div.admonition ul, div.admonition ol,
+div.warning ul, div.warning ol {
+ margin: 0.1em 0.5em 0.5em 3em;
+ padding: 0;
+}
+
+.viewcode-back {
+ font-family: {{ theme_font }}, 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
+ 'Verdana', sans-serif;
+}
+
+div.viewcode-block:target {
+ background-color: #f4debf;
+ border-top: 1px solid #ac9;
+ border-bottom: 1px solid #ac9;
+}
diff --git a/doc/_themes/pygments14/theme.conf b/doc/_themes/pygments14/theme.conf
new file mode 100644
index 00000000..fffe66d6
--- /dev/null
+++ b/doc/_themes/pygments14/theme.conf
@@ -0,0 +1,15 @@
+[theme]
+inherit = basic
+stylesheet = pygments14.css
+pygments_style = friendly
+
+[options]
+green = #66b55e
+darkgreen = #36852e
+darkgray = #666666
+border = #66b55e
+yellow = #f4cd00
+darkyellow = #d4ad00
+lightyellow = #fffbe3
+background = #f9f9f9
+font = PT Sans
diff --git a/doc/conf.py b/doc/conf.py
new file mode 100644
index 00000000..864ec7a1
--- /dev/null
+++ b/doc/conf.py
@@ -0,0 +1,249 @@
+# -*- coding: utf-8 -*-
+#
+# Pygments documentation build configuration file, created by
+# sphinx-quickstart on Sat Jan 18 17:07:37 2014.
+#
+# This file is execfile()d with the current directory set to its containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys, os
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+sys.path.insert(0, os.path.abspath('..'))
+
+import pygments
+
+# -- General configuration -----------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be extensions
+# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'pygments.sphinxext']
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'Pygments'
+copyright = u'2014, Georg Brandl'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = pygments.__version__
+# The full version, including alpha/beta/rc tags.
+release = version
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = ['_build']
+
+# The reST default role (used for this markup: `text`) to use for all documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+#pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+
+# -- Options for HTML output ---------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+html_theme = 'pygments14'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+html_theme_path = ['_themes']
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+html_favicon = 'favicon.ico'
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+html_sidebars = {'index': 'indexsidebar.html',
+ 'docs/*': 'docssidebar.html'}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_domain_indices = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = None
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'Pygmentsdoc'
+
+
+# -- Options for LaTeX output --------------------------------------------------
+
+latex_elements = {
+# The paper size ('letterpaper' or 'a4paper').
+#'papersize': 'letterpaper',
+
+# The font size ('10pt', '11pt' or '12pt').
+#'pointsize': '10pt',
+
+# Additional stuff for the LaTeX preamble.
+#'preamble': '',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass [howto/manual]).
+latex_documents = [
+ ('index', 'Pygments.tex', u'Pygments Documentation',
+ u'Georg Brandl', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_domain_indices = True
+
+
+# -- Options for manual page output --------------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+ ('index', 'pygments', u'Pygments Documentation',
+ [u'Georg Brandl'], 1)
+]
+
+# If true, show URL addresses after external links.
+#man_show_urls = False
+
+
+# -- Options for Texinfo output ------------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+# dir menu entry, description, category)
+texinfo_documents = [
+ ('index', 'Pygments', u'Pygments Documentation',
+ u'Georg Brandl', 'Pygments', 'One line description of project.',
+ 'Miscellaneous'),
+]
+
+# Documents to append as an appendix to all manuals.
+#texinfo_appendices = []
+
+# If false, no module index is generated.
+#texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+#texinfo_show_urls = 'footnote'
+
+
+# Example configuration for intersphinx: refer to the Python standard library.
+#intersphinx_mapping = {'http://docs.python.org/': None}
diff --git a/doc/docs/api.rst b/doc/docs/api.rst
new file mode 100644
index 00000000..123a4643
--- /dev/null
+++ b/doc/docs/api.rst
@@ -0,0 +1,316 @@
+.. -*- mode: rst -*-
+
+=====================
+The full Pygments API
+=====================
+
+This page describes the Pygments API.
+
+High-level API
+==============
+
+.. module:: pygments
+
+Functions from the :mod:`pygments` module:
+
+.. function:: lex(code, lexer)
+
+ Lex `code` with the `lexer` (must be a `Lexer` instance)
+ and return an iterable of tokens. Currently, this only calls
+ `lexer.get_tokens()`.
+
+.. function:: format(tokens, formatter, outfile=None)
+
+ Format a token stream (iterable of tokens) `tokens` with the
+ `formatter` (must be a `Formatter` instance). The result is
+ written to `outfile`, or if that is ``None``, returned as a
+ string.
+
+.. function:: highlight(code, lexer, formatter, outfile=None)
+
+ This is the most high-level highlighting function.
+ It combines `lex` and `format` in one function.
+
+
+.. module:: pygments.lexers
+
+Functions from :mod:`pygments.lexers`:
+
+.. function:: get_lexer_by_name(alias, **options)
+
+ Return an instance of a `Lexer` subclass that has `alias` in its
+ aliases list. The lexer is given the `options` at its
+ instantiation.
+
+ Will raise :exc:`pygments.util.ClassNotFound` if no lexer with that alias is
+ found.
+
+.. function:: get_lexer_for_filename(fn, **options)
+
+ Return a `Lexer` subclass instance that has a filename pattern
+ matching `fn`. The lexer is given the `options` at its
+ instantiation.
+
+ Will raise :exc:`pygments.util.ClassNotFound` if no lexer for that filename
+ is found.
+
+.. function:: get_lexer_for_mimetype(mime, **options)
+
+ Return a `Lexer` subclass instance that has `mime` in its mimetype
+ list. The lexer is given the `options` at its instantiation.
+
+ Will raise :exc:`pygments.util.ClassNotFound` if not lexer for that mimetype
+ is found.
+
+.. function:: guess_lexer(text, **options)
+
+ Return a `Lexer` subclass instance that's guessed from the text in
+ `text`. For that, the :meth:`.analyse_text()` method of every known lexer
+ class is called with the text as argument, and the lexer which returned the
+ highest value will be instantiated and returned.
+
+ :exc:`pygments.util.ClassNotFound` is raised if no lexer thinks it can
+ handle the content.
+
+.. function:: guess_lexer_for_filename(filename, text, **options)
+
+ As :func:`guess_lexer()`, but only lexers which have a pattern in `filenames`
+ or `alias_filenames` that matches `filename` are taken into consideration.
+
+ :exc:`pygments.util.ClassNotFound` is raised if no lexer thinks it can
+ handle the content.
+
+.. function:: get_all_lexers()
+
+ Return an iterable over all registered lexers, yielding tuples in the
+ format::
+
+ (longname, tuple of aliases, tuple of filename patterns, tuple of mimetypes)
+
+ .. versionadded:: 0.6
+
+
+.. module:: pygments.formatters
+
+Functions from :mod:`pygments.formatters`:
+
+.. function:: get_formatter_by_name(alias, **options)
+
+ Return an instance of a :class:`.Formatter` subclass that has `alias` in its
+ aliases list. The formatter is given the `options` at its instantiation.
+
+ Will raise :exc:`pygments.util.ClassNotFound` if no formatter with that
+ alias is found.
+
+.. function:: get_formatter_for_filename(fn, **options)
+
+ Return a :class:`.Formatter` subclass instance that has a filename pattern
+ matching `fn`. The formatter is given the `options` at its instantiation.
+
+ Will raise :exc:`pygments.util.ClassNotFound` if no formatter for that filename
+ is found.
+
+
+.. module:: pygments.styles
+
+Functions from :mod:`pygments.styles`:
+
+.. function:: get_style_by_name(name)
+
+ Return a style class by its short name. The names of the builtin styles
+ are listed in :data:`pygments.styles.STYLE_MAP`.
+
+ Will raise :exc:`pygments.util.ClassNotFound` if no style of that name is
+ found.
+
+.. function:: get_all_styles()
+
+ Return an iterable over all registered styles, yielding their names.
+
+ .. versionadded:: 0.6
+
+
+.. module:: pygments.lexer
+
+Lexers
+======
+
+The base lexer class from which all lexers are derived is:
+
+.. class:: Lexer(**options)
+
+ The constructor takes a \*\*keywords dictionary of options.
+ Every subclass must first process its own options and then call
+ the `Lexer` constructor, since it processes the `stripnl`,
+ `stripall` and `tabsize` options.
+
+ An example looks like this:
+
+ .. sourcecode:: python
+
+ def __init__(self, **options):
+ self.compress = options.get('compress', '')
+ Lexer.__init__(self, **options)
+
+ As these options must all be specifiable as strings (due to the
+ command line usage), there are various utility functions
+ available to help with that, see `Option processing`_.
+
+ .. method:: get_tokens(text)
+
+ This method is the basic interface of a lexer. It is called by
+ the `highlight()` function. It must process the text and return an
+ iterable of ``(tokentype, value)`` pairs from `text`.
+
+ Normally, you don't need to override this method. The default
+ implementation processes the `stripnl`, `stripall` and `tabsize`
+ options and then yields all tokens from `get_tokens_unprocessed()`,
+ with the ``index`` dropped.
+
+ .. method:: get_tokens_unprocessed(text)
+
+ This method should process the text and return an iterable of
+ ``(index, tokentype, value)`` tuples where ``index`` is the starting
+ position of the token within the input text.
+
+ This method must be overridden by subclasses.
+
+ .. staticmethod:: analyse_text(text)
+
+ A static method which is called for lexer guessing. It should analyse
+ the text and return a float in the range from ``0.0`` to ``1.0``.
+ If it returns ``0.0``, the lexer will not be selected as the most
+ probable one, if it returns ``1.0``, it will be selected immediately.
+
+ .. note:: You don't have to add ``@staticmethod`` to the definition of
+ this method, this will be taken care of by the Lexer's metaclass.
+
+ For a list of known tokens have a look at the :doc:`tokens` page.
+
+ A lexer also can have the following attributes (in fact, they are mandatory
+ except `alias_filenames`) that are used by the builtin lookup mechanism.
+
+ .. attribute:: name
+
+ Full name for the lexer, in human-readable form.
+
+ .. attribute:: aliases
+
+ A list of short, unique identifiers that can be used to lookup
+ the lexer from a list, e.g. using `get_lexer_by_name()`.
+
+ .. attribute:: filenames
+
+ A list of `fnmatch` patterns that match filenames which contain
+ content for this lexer. The patterns in this list should be unique among
+ all lexers.
+
+ .. attribute:: alias_filenames
+
+ A list of `fnmatch` patterns that match filenames which may or may not
+ contain content for this lexer. This list is used by the
+ :func:`.guess_lexer_for_filename()` function, to determine which lexers
+ are then included in guessing the correct one. That means that
+ e.g. every lexer for HTML and a template language should include
+ ``\*.html`` in this list.
+
+ .. attribute:: mimetypes
+
+ A list of MIME types for content that can be lexed with this
+ lexer.
+
+
+.. module:: pygments.formatter
+
+Formatters
+==========
+
+A formatter is derived from this class:
+
+
+.. class:: Formatter(**options)
+
+ As with lexers, this constructor processes options and then must call the
+ base class :meth:`__init__`.
+
+ The :class:`Formatter` class recognizes the options `style`, `full` and
+ `title`. It is up to the formatter class whether it uses them.
+
+ .. method:: get_style_defs(arg='')
+
+ This method must return statements or declarations suitable to define
+ the current style for subsequent highlighted text (e.g. CSS classes
+ in the `HTMLFormatter`).
+
+ The optional argument `arg` can be used to modify the generation and
+ is formatter dependent (it is standardized because it can be given on
+ the command line).
+
+ This method is called by the ``-S`` :doc:`command-line option <cmdline>`,
+ the `arg` is then given by the ``-a`` option.
+
+ .. method:: format(tokensource, outfile)
+
+ This method must format the tokens from the `tokensource` iterable and
+ write the formatted version to the file object `outfile`.
+
+ Formatter options can control how exactly the tokens are converted.
+
+ .. versionadded:: 0.7
+ A formatter must have the following attributes that are used by the
+ builtin lookup mechanism.
+
+ .. attribute:: name
+
+ Full name for the formatter, in human-readable form.
+
+ .. attribute:: aliases
+
+ A list of short, unique identifiers that can be used to lookup
+ the formatter from a list, e.g. using :func:`.get_formatter_by_name()`.
+
+ .. attribute:: filenames
+
+ A list of :mod:`fnmatch` patterns that match filenames for which this
+ formatter can produce output. The patterns in this list should be unique
+ among all formatters.
+
+
+.. module:: pygments.util
+
+Option processing
+=================
+
+The :mod:`pygments.util` module has some utility functions usable for option
+processing:
+
+.. exception:: OptionError
+
+ This exception will be raised by all option processing functions if
+ the type or value of the argument is not correct.
+
+.. function:: get_bool_opt(options, optname, default=None)
+
+ Interpret the key `optname` from the dictionary `options` as a boolean and
+ return it. Return `default` if `optname` is not in `options`.
+
+ The valid string values for ``True`` are ``1``, ``yes``, ``true`` and
+ ``on``, the ones for ``False`` are ``0``, ``no``, ``false`` and ``off``
+ (matched case-insensitively).
+
+.. function:: get_int_opt(options, optname, default=None)
+
+ As :func:`get_bool_opt`, but interpret the value as an integer.
+
+.. function:: get_list_opt(options, optname, default=None)
+
+ If the key `optname` from the dictionary `options` is a string,
+ split it at whitespace and return it. If it is already a list
+ or a tuple, it is returned as a list.
+
+.. function:: get_choice_opt(options, optname, allowed, default=None)
+
+ If the key `optname` from the dictionary is not in the sequence
+ `allowed`, raise an error, otherwise return it.
+
+ .. versionadded:: 0.8
diff --git a/doc/docs/authors.rst b/doc/docs/authors.rst
new file mode 100644
index 00000000..f8373f0a
--- /dev/null
+++ b/doc/docs/authors.rst
@@ -0,0 +1,4 @@
+Full contributor list
+=====================
+
+.. include:: ../../AUTHORS
diff --git a/doc/docs/changelog.rst b/doc/docs/changelog.rst
new file mode 100644
index 00000000..f264cab0
--- /dev/null
+++ b/doc/docs/changelog.rst
@@ -0,0 +1 @@
+.. include:: ../../CHANGES
diff --git a/docs/src/cmdline.txt b/doc/docs/cmdline.rst
index a48a5c27..bf0177a3 100644
--- a/docs/src/cmdline.txt
+++ b/doc/docs/cmdline.rst
@@ -4,8 +4,8 @@
Command Line Interface
======================
-You can use Pygments from the shell, provided you installed the `pygmentize`
-script::
+You can use Pygments from the shell, provided you installed the
+:program:`pygmentize` script::
$ pygmentize test.py
print "Hello World"
@@ -28,7 +28,7 @@ written to stdout.
The ``-f`` option selects a formatter (as with ``-l``, it can also be omitted
if an output file name is given and has a supported extension).
If no output file name is given and ``-f`` is omitted, the
-`TerminalFormatter` is used.
+:class:`.TerminalFormatter` is used.
The above command could therefore also be given as::
@@ -82,14 +82,15 @@ Usage is as follows::
generates a CSS style sheet (because you selected the HTML formatter) for
the "colorful" style prepending a ".syntax" selector to all style rules.
-For an explanation what ``-a`` means for `a particular formatter`_, look for
-the `arg` argument for the formatter's `get_style_defs()` method.
+For an explanation what ``-a`` means for :doc:`a particular formatter
+<formatters>`, look for the `arg` argument for the formatter's
+:meth:`.get_style_defs()` method.
Getting lexer names
-------------------
-*New in Pygments 1.0.*
+.. versionadded:: 1.0
The ``-N`` option guesses a lexer name for a given filename, so that ::
@@ -125,7 +126,7 @@ will print the help for the Python lexer, etc.
A note on encodings
-------------------
-*New in Pygments 0.9.*
+.. versionadded:: 0.9
Pygments tries to be smart regarding encodings in the formatting process:
@@ -141,7 +142,4 @@ Pygments tries to be smart regarding encodings in the formatting process:
* If you don't give an encoding and haven't given an output file (that means
output is written to the console), the default encoding for lexer and
- formatter is the terminal encoding (`sys.stdout.encoding`).
-
-
-.. _a particular formatter: formatters.txt
+ formatter is the terminal encoding (``sys.stdout.encoding``).
diff --git a/docs/src/filterdevelopment.txt b/doc/docs/filterdevelopment.rst
index c60e1e84..bc399a6f 100644
--- a/docs/src/filterdevelopment.txt
+++ b/doc/docs/filterdevelopment.rst
@@ -4,7 +4,7 @@
Write your own filter
=====================
-*New in Pygments 0.7.*
+.. versionadded:: 0.7
Writing own filters is very easy. All you have to do is to subclass
the `Filter` class and override the `filter` method. Additionally a
diff --git a/docs/src/filters.txt b/doc/docs/filters.rst
index 522f6330..ff2519a3 100644
--- a/docs/src/filters.txt
+++ b/doc/docs/filters.rst
@@ -4,7 +4,7 @@
Filters
=======
-*New in Pygments 0.7.*
+.. versionadded:: 0.7
You can filter token streams coming from lexers to improve or annotate the
output. For example, you can highlight special words in comments, convert
@@ -31,12 +31,11 @@ To get a list of all registered filters by name, you can use the
`get_all_filters()` function from the `pygments.filters` module that returns an
iterable for all known filters.
-If you want to write your own filter, have a look at `Write your own filter`_.
-
-.. _Write your own filter: filterdevelopment.txt
+If you want to write your own filter, have a look at :doc:`Write your own filter
+<filterdevelopment>`.
Builtin Filters
===============
-[builtin_filter_docs]
+.. pygmentsdoc:: filters
diff --git a/docs/src/formatterdevelopment.txt b/doc/docs/formatterdevelopment.rst
index 83a13b6a..2bfac05c 100644
--- a/docs/src/formatterdevelopment.txt
+++ b/doc/docs/formatterdevelopment.rst
@@ -4,7 +4,7 @@
Write your own formatter
========================
-As well as creating `your own lexer <lexerdevelopment.txt>`_, writing a new
+As well as creating :doc:`your own lexer <lexerdevelopment>`, writing a new
formatter for Pygments is easy and straightforward.
A formatter is a class that is initialized with some keyword arguments (the
diff --git a/docs/src/formatters.txt b/doc/docs/formatters.rst
index 7a590648..9e7074e8 100644
--- a/docs/src/formatters.txt
+++ b/doc/docs/formatters.rst
@@ -12,8 +12,6 @@ Common options
All formatters support these options:
`encoding`
- *New in Pygments 0.6.*
-
If given, must be an encoding name (such as ``"utf-8"``). This will
be used to convert the token strings (which are Unicode strings)
to byte strings in the output (default: ``None``).
@@ -30,19 +28,21 @@ All formatters support these options:
supports Unicode arguments to `write()`. Using a regular file object
wouldn't work.
-`outencoding`
- *New in Pygments 0.7.*
+ .. versionadded:: 0.6
+`outencoding`
When using Pygments from the command line, any `encoding` option given is
passed to the lexer and the formatter. This is sometimes not desirable,
for example if you want to set the input encoding to ``"guess"``.
Therefore, `outencoding` has been introduced which overrides `encoding`
for the formatter if given.
+ .. versionadded:: 0.7
+
Formatter classes
=================
-All these classes are importable from `pygments.formatters`.
+All these classes are importable from :mod:`pygments.formatters`.
-[builtin_formatter_docs]
+.. pygmentsdoc:: formatters
diff --git a/doc/docs/index.rst b/doc/docs/index.rst
new file mode 100644
index 00000000..30d5c085
--- /dev/null
+++ b/doc/docs/index.rst
@@ -0,0 +1,66 @@
+Pygments documentation
+======================
+
+**Starting with Pygments**
+
+.. toctree::
+ :maxdepth: 1
+
+ ../download
+ quickstart
+ cmdline
+
+**Builtin components**
+
+.. toctree::
+ :maxdepth: 1
+
+ lexers
+ filters
+ formatters
+ styles
+
+**Reference**
+
+.. toctree::
+ :maxdepth: 1
+
+ unicode
+ tokens
+ api
+
+**Hacking for Pygments**
+
+.. toctree::
+ :maxdepth: 1
+
+ lexerdevelopment
+ formatterdevelopment
+ filterdevelopment
+ plugins
+
+**Hints and tricks**
+
+.. toctree::
+ :maxdepth: 1
+
+ rstdirective
+ moinmoin
+ java
+ integrate
+
+**About Pygments**
+
+.. toctree::
+ :maxdepth: 1
+
+ changelog
+ authors
+
+
+If you find bugs or have suggestions for the documentation, please look
+:ref:`here <contribute>` for info on how to contact the team.
+
+.. XXX You can download an offline version of this documentation from the
+ :doc:`download page </download>`.
+
diff --git a/docs/src/integrate.txt b/doc/docs/integrate.rst
index 6f8c1253..03fc268f 100644
--- a/docs/src/integrate.txt
+++ b/doc/docs/integrate.rst
@@ -23,8 +23,9 @@ Markdown
--------
Since Pygments 0.9, the distribution ships Markdown_ preprocessor sample code
-that uses Pygments to render source code in `external/markdown-processor.py`.
-You can copy and adapt it to your liking.
+that uses Pygments to render source code in
+:file:`external/markdown-processor.py`. You can copy and adapt it to your
+liking.
.. _Markdown: http://www.freewisdom.org/projects/python-markdown/
@@ -41,8 +42,3 @@ Bash completion
The source distribution contains a file ``external/pygments.bashcomp`` that
sets up completion for the ``pygmentize`` command in bash.
-
-Java
-----
-
-See the `Java quickstart <java.txt>`_ document.
diff --git a/docs/src/java.txt b/doc/docs/java.rst
index 5eb6196a..5eb6196a 100644
--- a/docs/src/java.txt
+++ b/doc/docs/java.rst
diff --git a/docs/src/lexerdevelopment.txt b/doc/docs/lexerdevelopment.rst
index 730a08b2..eab1306a 100644
--- a/docs/src/lexerdevelopment.txt
+++ b/doc/docs/lexerdevelopment.rst
@@ -7,13 +7,13 @@ Write your own lexer
If a lexer for your favorite language is missing in the Pygments package, you can
easily write your own and extend Pygments.
-All you need can be found inside the `pygments.lexer` module. As you can read in
-the `API documentation <api.txt>`_, a lexer is a class that is initialized with
-some keyword arguments (the lexer options) and that provides a
-`get_tokens_unprocessed()` method which is given a string or unicode object with
-the data to parse.
+All you need can be found inside the :mod:`pygments.lexer` module. As you can
+read in the :doc:`API documentation <api>`, a lexer is a class that is
+initialized with some keyword arguments (the lexer options) and that provides a
+:meth:`.get_tokens_unprocessed()` method which is given a string or unicode
+object with the data to parse.
-The `get_tokens_unprocessed()` method must return an iterator or iterable
+The :meth:`.get_tokens_unprocessed()` method must return an iterator or iterable
containing tuples in the form ``(index, token, value)``. Normally you don't need
to do this since there are numerous base lexers you can subclass.
@@ -21,9 +21,9 @@ to do this since there are numerous base lexers you can subclass.
RegexLexer
==========
-A very powerful (but quite easy to use) lexer is the `RegexLexer`. This lexer
-base class allows you to define lexing rules in terms of *regular expressions*
-for different *states*.
+A very powerful (but quite easy to use) lexer is the :class:`RegexLexer`. This
+lexer base class allows you to define lexing rules in terms of *regular
+expressions* for different *states*.
States are groups of regular expressions that are matched against the input
string at the *current position*. If one of these expressions matches, a
@@ -289,8 +289,9 @@ There are a few more things you can do with states:
the closing ``*/``. Then, both states are popped from the stack again and
lexing continues in the root state.
- *New in Pygments 0.9:* The tuple can contain the special ``'#push'`` and
- ``'#pop'`` (but not ``'#pop:n'``) directives.
+ .. versionadded:: 0.9
+ The tuple can contain the special ``'#push'`` and ``'#pop'`` (but not
+ ``'#pop:n'``) directives.
- You can include the rules of a state in the definition of another. This is
@@ -598,6 +599,4 @@ the ``get_tokens_unprocessed()`` method. The following lexer subclasses the
The `PhpLexer` and `LuaLexer` use this method to resolve builtin functions.
-**Note** Do not confuse this with the `filter`_ system.
-
-.. _filter: filters.txt
+.. note:: Do not confuse this with the :doc:`filter <filters>` system.
diff --git a/docs/src/lexers.txt b/doc/docs/lexers.rst
index 016de6ce..914b53ef 100644
--- a/docs/src/lexers.txt
+++ b/doc/docs/lexers.rst
@@ -18,14 +18,13 @@ Currently, **all lexers** support these options:
`ensurenl`
Make sure that the input ends with a newline (default: ``True``). This
is required for some lexers that consume input linewise.
- *New in Pygments 1.3.*
+
+ .. versionadded:: 1.3
`tabsize`
If given and greater than 0, expand tabs in the input (default: ``0``).
`encoding`
- *New in Pygments 0.6.*
-
If given, must be an encoding name (such as ``"utf-8"``). This encoding
will be used to convert the input string to Unicode (if it is not already
a Unicode string). The default is ``"latin1"``.
@@ -35,18 +34,21 @@ Currently, **all lexers** support these options:
`chardet library <http://chardet.feedparser.org/>`__ is used to
guess the encoding of the input.
+ .. versionadded:: 0.6
+
The "Short Names" field lists the identifiers that can be used with the
`get_lexer_by_name()` function.
These lexers are builtin and can be imported from `pygments.lexers`:
-[builtin_lexer_docs]
+.. pygmentsdoc:: lexers
+
Iterating over all lexers
-------------------------
-*New in Pygments 0.6.*
+.. versionadded:: 0.6
To get all lexers (both the builtin and the plugin ones), you can
use the `get_all_lexers()` function from the `pygments.lexers`
diff --git a/docs/src/moinmoin.txt b/doc/docs/moinmoin.rst
index 8b2216b3..8b2216b3 100644
--- a/docs/src/moinmoin.txt
+++ b/doc/docs/moinmoin.rst
diff --git a/docs/src/plugins.txt b/doc/docs/plugins.rst
index a6f8d7b0..a6f8d7b0 100644
--- a/docs/src/plugins.txt
+++ b/doc/docs/plugins.rst
diff --git a/docs/src/quickstart.txt b/doc/docs/quickstart.rst
index 40409104..dba7698a 100644
--- a/docs/src/quickstart.txt
+++ b/doc/docs/quickstart.rst
@@ -58,8 +58,8 @@ can be produced by:
print HtmlFormatter().get_style_defs('.highlight')
-The argument to `get_style_defs` is used as an additional CSS selector: the output
-may look like this:
+The argument to :func:`get_style_defs` is used as an additional CSS selector:
+the output may look like this:
.. sourcecode:: css
@@ -71,9 +71,9 @@ may look like this:
Options
=======
-The `highlight()` function supports a fourth argument called `outfile`, it must be
-a file object if given. The formatted output will then be written to this file
-instead of being returned as a string.
+The :func:`highlight()` function supports a fourth argument called *outfile*, it
+must be a file object if given. The formatted output will then be written to
+this file instead of being returned as a string.
Lexers and formatters both support options. They are given to them as keyword
arguments either to the class or to the lookup method:
@@ -103,9 +103,9 @@ Important options include:
For an overview of builtin lexers and formatters and their options, visit the
-`lexer <lexers.txt>`_ and `formatters <formatters.txt>`_ lists.
+:doc:`lexer <lexers>` and :doc:`formatters <formatters>` lists.
-For a documentation on filters, see `this page <filters.txt>`_.
+For a documentation on filters, see :doc:`this page <filters>`.
Lexer and formatter lookup
@@ -131,9 +131,9 @@ one of the following methods:
All these functions accept keyword arguments; they will be passed to the lexer
as options.
-A similar API is available for formatters: use `get_formatter_by_name()` and
-`get_formatter_for_filename()` from the `pygments.formatters` module
-for this purpose.
+A similar API is available for formatters: use :func:`.get_formatter_by_name()`
+and :func:`.get_formatter_for_filename()` from the :mod:`pygments.formatters`
+module for this purpose.
Guessing lexers
@@ -153,16 +153,17 @@ or some template tags), use these functions:
>>> guess_lexer_for_filename('test.py', 'print "Hello World!"')
<pygments.lexers.PythonLexer>
-`guess_lexer()` passes the given content to the lexer classes' `analyse_text()`
-method and returns the one for which it returns the highest number.
+:func:`.guess_lexer()` passes the given content to the lexer classes'
+:meth:`analyse_text()` method and returns the one for which it returns the
+highest number.
All lexers have two different filename pattern lists: the primary and the
-secondary one. The `get_lexer_for_filename()` function only uses the primary
-list, whose entries are supposed to be unique among all lexers.
-`guess_lexer_for_filename()`, however, will first loop through all lexers and
-look at the primary and secondary filename patterns if the filename matches.
+secondary one. The :func:`.get_lexer_for_filename()` function only uses the
+primary list, whose entries are supposed to be unique among all lexers.
+:func:`.guess_lexer_for_filename()`, however, will first loop through all lexers
+and look at the primary and secondary filename patterns if the filename matches.
If only one lexer matches, it is returned, else the guessing mechanism of
-`guess_lexer()` is used with the matching lexers.
+:func:`.guess_lexer()` is used with the matching lexers.
As usual, keyword arguments to these functions are given to the created lexer
as options.
@@ -171,7 +172,8 @@ as options.
Command line usage
==================
-You can use Pygments from the command line, using the `pygmentize` script::
+You can use Pygments from the command line, using the :program:`pygmentize`
+script::
$ pygmentize test.py
@@ -199,4 +201,5 @@ it can be created with::
where ``default`` is the style name.
-More options and tricks and be found in the `command line reference <cmdline.txt>`_.
+More options and tricks and be found in the :doc:`command line reference
+<cmdline>`.
diff --git a/docs/src/rstdirective.txt b/doc/docs/rstdirective.rst
index c0d503b3..c0d503b3 100644
--- a/docs/src/rstdirective.txt
+++ b/doc/docs/rstdirective.rst
diff --git a/docs/src/styles.txt b/doc/docs/styles.rst
index e3e9cfb3..7ef4de1b 100644
--- a/docs/src/styles.txt
+++ b/doc/docs/styles.rst
@@ -68,7 +68,7 @@ they can be used for a variety of formatters.)
To make the style usable for Pygments, you must
-* either register it as a plugin (see `the plugin docs <plugins.txt>`_)
+* either register it as a plugin (see :doc:`the plugin docs <plugins>`)
* or drop it into the `styles` subpackage of your Pygments distribution one style
class per style, where the file name is the style name and the class name is
`StylenameClass`. For example, if your style should be called
@@ -132,7 +132,7 @@ To get a list of known styles you can use this snippet:
Getting a list of available styles
==================================
-*New in Pygments 0.6.*
+.. versionadded:: 0.6
Because it could be that a plugin registered a style, there is
a way to iterate over all styles:
diff --git a/docs/src/tokens.txt b/doc/docs/tokens.rst
index 4900a9ab..ffd87ab7 100644
--- a/docs/src/tokens.txt
+++ b/doc/docs/tokens.rst
@@ -4,7 +4,9 @@
Builtin Tokens
==============
-Inside the `pygments.token` module, there is a special object called `Token`
+.. module:: pygments.token
+
+In the :mod:`pygments.token` module, there is a special object called `Token`
that is used to create token types.
You can create a new token type by accessing an attribute of `Token`:
@@ -30,8 +32,8 @@ As of Pygments 0.7 you can also use the ``in`` operator to perform set tests:
>>> Comment in Comment.Multi
False
-This can be useful in `filters`_ and if you write lexers on your own without
-using the base lexers.
+This can be useful in :doc:`filters <filters>` and if you write lexers on your
+own without using the base lexers.
You can also split a token type into a hierarchy, and get the parent of it:
@@ -55,7 +57,7 @@ For some tokens aliases are already defined:
>>> String
Token.Literal.String
-Inside the `pygments.token` module the following aliases are defined:
+Inside the :mod:`pygments.token` module the following aliases are defined:
============= ============================ ====================================
`Text` `Token.Text` for any type of text data
@@ -280,7 +282,7 @@ Operators
Punctuation
===========
-*New in Pygments 0.7.*
+.. versionadded:: 0.7
`Punctuation`
For any punctuation which is not an operator (e.g. ``[``, ``(``...)
@@ -345,5 +347,3 @@ highlight a programming language but a patch file.
`Generic.Traceback`
Marks the token value as a part of an error traceback.
-
-.. _filters: filters.txt
diff --git a/docs/src/unicode.txt b/doc/docs/unicode.rst
index dc6394a9..e79b4bec 100644
--- a/docs/src/unicode.txt
+++ b/doc/docs/unicode.rst
@@ -3,8 +3,8 @@ Unicode and Encodings
=====================
Since Pygments 0.6, all lexers use unicode strings internally. Because of that
-you might encounter the occasional `UnicodeDecodeError` if you pass strings with the
-wrong encoding.
+you might encounter the occasional :exc:`UnicodeDecodeError` if you pass strings
+with the wrong encoding.
Per default all lexers have their input encoding set to `latin1`.
If you pass a lexer a string object (not unicode), it tries to decode the data
@@ -39,11 +39,12 @@ Unicode string with this encoding before writing it. This is the case for
`sys.stdout`, for example. The other formatters don't have that behavior.
Another note: If you call Pygments via the command line (`pygmentize`),
-encoding is handled differently, see `the command line docs <cmdline.txt>`_.
+encoding is handled differently, see :doc:`the command line docs <cmdline>`.
-*New in Pygments 0.7*: the formatters now also accept an `outencoding` option
-which will override the `encoding` option if given. This makes it possible to
-use a single options dict with lexers and formatters, and still have different
-input and output encodings.
+.. versionadded:: 0.7
+ The formatters now also accept an `outencoding` option which will override
+ the `encoding` option if given. This makes it possible to use a single
+ options dict with lexers and formatters, and still have different input and
+ output encodings.
.. _chardet: http://chardet.feedparser.org/
diff --git a/doc/download.rst b/doc/download.rst
new file mode 100644
index 00000000..cf32f481
--- /dev/null
+++ b/doc/download.rst
@@ -0,0 +1,41 @@
+Download and installation
+=========================
+
+The current release is version |version|.
+
+Packaged versions
+-----------------
+
+You can download it `from the Python Package Index
+<http://pypi.python.org/pypi/Pygments>`_. For installation of packages from
+PyPI, we recommend `Pip <http://www.pip-installer.org>`_, which works on all
+major platforms.
+
+Under Linux, most distributions include a package for Pygments, usually called
+``pygments`` or ``python-pygments``. You can install it with the package
+manager as usual.
+
+Development sources
+-------------------
+
+We're using the `Mercurial <http://selenic.com/mercurial>`_ version control
+system. You can get the development source using this command::
+
+ hg clone http://bitbucket.org/birkenfeld/pygments-main pygments
+
+Development takes place at `Bitbucket
+<http://bitbucket.org/birkenfeld/pygments-main>`_, you can browse the source
+online `here <http://bitbucket.org/birkenfeld/pygments-main/src>`_.
+
+The latest changes in the development source code are listed in the `changelog
+<http://bitbucket.org/birkenfeld/pygments-main/src/tip/CHANGES>`_.
+
+.. Documentation
+ -------------
+
+.. XXX todo
+
+ You can download the <a href="/docs/">documentation</a> either as
+ a bunch of rst files from the Mercurial repository, see above, or
+ as a tar.gz containing rendered HTML files:</p>
+ <p><a href="/docs/download/pygmentsdocs.tar.gz">pygmentsdocs.tar.gz</a></p>
diff --git a/doc/faq.rst b/doc/faq.rst
new file mode 100644
index 00000000..0f65b9fe
--- /dev/null
+++ b/doc/faq.rst
@@ -0,0 +1,143 @@
+:orphan:
+
+Pygments FAQ
+=============
+
+What is Pygments?
+-----------------
+
+Pygments is a syntax highlighting engine written in Python. That means, it will
+take source code (or other markup) in a supported language and output a
+processed version (in different formats) containing syntax highlighting markup.
+
+Its features include:
+
+* a wide range of common languages and markup formats is supported (look here
+ for a list)
+* new languages and formats are added easily
+* a number of output formats is available, including:
+
+ - HTML
+ - ANSI sequences (console output)
+ - LaTeX
+ - RTF
+
+* it is usable as a command-line tool and as a library
+* parsing and formatting is fast
+
+Pygments is licensed under the BSD license.
+
+Where does the name Pygments come from?
+---------------------------------------
+
+*Py* of course stands for Python, while *pigments* are used for coloring paint,
+and in this case, source code!
+
+What are the system requirements?
+---------------------------------
+
+Pygments only needs a standard Python install, version 2.6 or higher or version
+3.3 or higher for Python 3. No additional libraries are needed.
+
+How can I use Pygments?
+-----------------------
+
+Pygments is usable as a command-line tool as well as a library.
+
+From the command-line, usage looks like this (assuming the pygmentize script is
+properly installed)::
+
+ pygmentize -f html /path/to/file.py
+
+This will print a HTML-highlighted version of /path/to/file.py to standard output.
+
+For a complete help, please run ``pygmentize -h``.
+
+Usage as a library is thoroughly demonstrated in the Documentation section.
+
+How do I make a new style?
+--------------------------
+
+Please see the documentation on styles.
+
+How can I report a bug or suggest a feature?
+--------------------------------------------
+
+Please report bugs and feature wishes in the tracker at Bitbucket.
+
+You can also e-mail the author or use IRC, see the contact details.
+
+I want this support for this language!
+--------------------------------------
+
+Instead of waiting for others to include language support, why not write it
+yourself? All you have to know is :doc:`outlined in the docs
+<docs/lexerdevelopment>`.
+
+Can I use Pygments for programming language processing?
+-------------------------------------------------------
+
+The Pygments lexing machinery is quite powerful can be used to build lexers for
+basically all languages. However, parsing them is not possible, though some
+lexers go some steps in this direction in order to e.g. highlight function names
+differently.
+
+Also, error reporting is not the scope of Pygments. It focuses on correctly
+highlighting syntactically valid documents, not finding and compensating errors.
+
+Who uses Pygments?
+------------------
+
+This is an (incomplete) list of projects and sites known to use the Pygments highlighter.
+
+* `Pygments API <http://pygments.appspot.com/>`_, a HTTP POST interface to Pygments
+* `The Sphinx documentation builder <http://sphinx.pocoo.org/>`_, for embedded source examples
+* `rst2pdf <http://code.google.com/p/rst2pdf/>`_, a reStructuredText to PDF converter
+* `Zine <http://zine.pocoo.org/>`_, a Python blogging system
+* `Trac <http://trac.edgewall.org/>`_, the universal project management tool
+* `Bruce <http://r1chardj0n3s.googlepages.com/bruce>`_, a reStructuredText presentation tool
+* `AsciiDoc <http://www.methods.co.nz/asciidoc/>`_, a text-based documentation generator
+* `ActiveState Code <http://code.activestate.com/>`_, the Python Cookbook successor
+* `ViewVC <http://viewvc.org/>`_, a web-based version control repository browser
+* `BzrFruit <http://repo.or.cz/w/bzrfruit.git>`_, a Bazaar branch viewer
+* `QBzr <http://bazaar-vcs.org/QBzr>`_, a cross-platform Qt-based GUI front end for Bazaar
+* `BitBucket <http://bitbucket.org/>`_, a Mercurial and Git hosting site
+* `GitHub <http://github.com/>`_, a site offering secure Git hosting and collaborative development
+* `Review Board <http://www.review-board.org/>`_, a collaborative code reviewing tool
+* `skeletonz <http://orangoo.com/skeletonz/>`_, a Python powered content management system
+* `Diamanda <http://code.google.com/p/diamanda/>`_, a Django powered wiki system with support for Pygments
+* `Progopedia <http://progopedia.ru/>`_ (`English <http://progopedia.com/>`_),
+ an encyclopedia of programming languages
+* `Postmarkup <http://code.google.com/p/postmarkup/>`_, a BBCode to XHTML generator
+* `Language Comparison <http://michaelsilver.us/lc>`_, a site that compares different programming languages
+* `BPython <http://www.noiseforfree.com/bpython/>`_, a curses-based intelligent Python shell
+* `Challenge-You! <http://challenge-you.appspot.com/>`_, a site offering programming challenges
+* `PIDA <http://pida.co.uk/>`_, a universal IDE written in Python
+* `PuDB <http://pypi.python.org/pypi/pudb>`_, a console Python debugger
+* `XWiki <http://www.xwiki.org/>`_, a wiki-based development framework in Java, using Jython
+* `roux <http://ananelson.com/software/roux/>`_, a script for running R scripts
+ and creating beautiful output including graphs
+* `hurl <http://hurl.it/>`_, a web service for making HTTP requests
+* `wxHTMLPygmentizer <http://colinbarnette.net/projects/wxHTMLPygmentizer>`_ is
+ a GUI utility, used to make code-colorization easier
+* `WpPygments <http://blog.mirotin.net/?page_id=49>`_, a highlighter plugin for WordPress
+* `LodgeIt <http://paste.pocoo.org/>`_, a pastebin with XMLRPC support and diffs
+* `SpammCan <http://chrisarndt.de/projects/spammcan/>`_, a pastebin (demo see
+ `here <http://paste.chrisarndt.de/>`_)
+* `WowAce.com pastes <http://www.wowace.com/paste/>`_, a pastebin
+* `Siafoo <http://siafoo.net>`_, a tool for sharing and storing useful code and programming experience
+* `D source <http://www.dsource.org/>`_, a community for the D programming language
+* `dumpz.org <http://dumpz.org/>`_, a pastebin
+* `dpaste.com <http://dpaste.com/>`_, another Django pastebin
+* `PylonsHQ Pasties <http://pylonshq.com/pasties/new>`_, a pastebin
+* `Django snippets <http://www.djangosnippets.org/>`_, a pastebin for Django code
+* `Fayaa <http://www.fayaa.com/code/>`_, a Chinese pastebin
+* `Incollo.com <http://incollo.com>`_, a free collaborative debugging tool
+* `PasteBox <http://p.boxnet.eu/>`_, a pastebin focused on privacy
+* `xinotes.org <http://www.xinotes.org/>`_, a site to share notes, code snippets etc.
+* `hilite.me <http://www.hilite.me/>`_, a site to highlight code snippets
+* `patx.me <http://patx.me/paste>`_, a pastebin
+
+If you have a project or web site using Pygments, drop me a line, and I'll add a
+link here.
+
diff --git a/doc/index.rst b/doc/index.rst
new file mode 100644
index 00000000..a0e41210
--- /dev/null
+++ b/doc/index.rst
@@ -0,0 +1,53 @@
+Welcome!
+========
+
+This is the home of Pygments. It is a generic syntax highlighter for general use
+in all kinds of software such as forum systems, wikis or other applications that
+need to prettify source code. Highlights are:
+
+* a wide range of common languages and markup formats is supported
+* special attention is paid to details that increase highlighting quality
+* support for new languages and formats are added easily; most languages use a simple regex-based lexing mechanism
+* a number of output formats is available, among them HTML, RTF, LaTeX and ANSI sequences
+* it is usable as a command-line tool and as a library
+* ... and it highlights even Brainf*ck!
+
+Read more in the FAQ list or the documentation, or download the latest release.
+
+Though Pygments has not yet won an award, we trust that you will notice it's a top quality product <wink>.
+
+.. _contribute:
+
+Contribute
+----------
+
+Like every open-source project, we are always looking for volunteers to help us
+with programming. Python knowledge is required, but don't fear: Python is a very
+clear and easy to learn language.
+
+Development takes place on `Bitbucket
+<https://bitbucket.org/birkenfeld/pygments-main>`_, where the Mercurial
+repository, tickets and pull requests can be viewed.
+
+Our primary communication instrument is the IRC channel **#pocoo** on the
+Freenode network. To join it, let your IRC client connect to
+``irc.freenode.net`` and do ``/join #pocoo``.
+
+If you found a bug, just open a ticket in the Bitbucket tracker. Be sure to log
+in to be notified when the issue is fixed -- development is not fast-paced as
+the library is quite stable. You can also send an e-mail to the developers, see
+below.
+
+The authors
+-----------
+
+Pygments is maintained by **Georg Brandl**, e-mail address *georg*\ *@*\ *python.org*.
+
+Many lexers and fixes have been contributed by **Armin Ronacher**, the rest of
+the `Pocoo <http://dev.pocoo.org/>`_ team and **Tim Hatch**.
+
+.. toctree::
+ :maxdepth: 1
+ :hidden:
+
+ docs/index
diff --git a/doc/languages.rst b/doc/languages.rst
new file mode 100644
index 00000000..2e79a35e
--- /dev/null
+++ b/doc/languages.rst
@@ -0,0 +1,148 @@
+:orphan:
+
+Supported languages
+===================
+
+Pygments supports an ever-growing range of languages. Watch this space...
+
+Programming languages
+---------------------
+
+* ActionScript
+* Ada
+* ANTLR
+* AppleScript
+* Assembly (various)
+* Asymptote
+* Awk
+* Befunge
+* Boo
+* BrainFuck
+* C, C++
+* C#
+* Clojure
+* CoffeeScript
+* ColdFusion
+* Common Lisp
+* Coq
+* `Cython <http://cython.org>`_
+* `D <http://digitalmars.com/d>`_
+* Dart
+* Delphi
+* Dylan
+* Erlang
+* Factor
+* Fancy
+* Fortran
+* F#
+* GAP
+* Gherkin (Cucumber)
+* GL shaders
+* Groovy
+* `Haskell <http://www.haskell.org>`_ (incl. Literate Haskell)
+* IDL
+* Io
+* Java
+* JavaScript
+* LLVM
+* Logtalk
+* `Lua <http://www.lua.org>`_
+* Matlab
+* MiniD
+* Modelica
+* Modula-2
+* MuPad
+* Nemerle
+* Nimrod
+* Objective-C
+* Objective-J
+* Octave
+* OCaml
+* PHP
+* `Perl <http://perl.org>`_
+* PovRay
+* PostScript
+* PowerShell
+* Prolog
+* `Python <http://www.python.org>`_ 2.x and 3.x (incl. console sessions and tracebacks)
+* Rebol
+* Redcode
+* `Ruby <http://www.ruby-lang.org>`_ (incl. irb sessions)
+* Rust
+* S, S-Plus, R
+* Scala
+* Scheme
+* Scilab
+* Smalltalk
+* SNOBOL
+* Tcl
+* Vala
+* Verilog
+* VHDL
+* Visual Basic.NET
+* Visual FoxPro
+* XQuery
+ </ul>
+
+Template languages
+------------------
+
+* Cheetah templates
+* `Django <http://www.djangoproject.com>`_ / `Jinja
+ <http://jinja.pocoo.org/jinja>`_ templates
+* ERB (Ruby templating)
+* `Genshi <http://genshi.edgewall.org>`_ (the Trac template language)
+* JSP (Java Server Pages)
+* `Myghty <http://www.myghty.org>`_ (the HTML::Mason based framework)
+* `Mako <http://www.makotemplates.org/>`_ (the Myghty successor)
+* `Smarty <http://smarty.php.net>`_ templates (PHP templating)
+* Tea
+
+Other markup
+------------
+
+* Apache config files
+* Bash shell scripts
+* BBCode
+* CMake
+* CSS
+* Debian control files
+* Diff files
+* DTD
+* Gettext catalogs
+* Gnuplot script
+* Groff markup
+* HTML
+* HTTP sessions
+* INI-style config files
+* IRC logs (irssi style)
+* Lighttpd config files
+* Makefiles
+* MoinMoin/Trac Wiki markup
+* MySQL
+* Nginx config files
+* POV-Ray scenes
+* Ragel
+* Redcode
+* ReST
+* Robot Framework
+* RPM spec files
+* SQL, also MySQL, SQLite
+* Squid configuration
+* TeX
+* tcsh
+* Vim Script
+* Windows batch files
+* XML
+* XSLT
+* YAML
+
+... that's all?
+---------------
+
+Well, why not write your own? Contributing to Pygments is easy and fun. Look
+:doc:`here <docs/lexerdevelopment>` for the docs on lexer development and
+:ref:`here <contribute>` for contact details.
+
+Note: the languages listed here are supported in the development version. The
+latest release may lack a few of them.
diff --git a/doc/make.bat b/doc/make.bat
new file mode 100644
index 00000000..8803c985
--- /dev/null
+++ b/doc/make.bat
@@ -0,0 +1,190 @@
+@ECHO OFF
+
+REM Command file for Sphinx documentation
+
+if "%SPHINXBUILD%" == "" (
+ set SPHINXBUILD=sphinx-build
+)
+set BUILDDIR=_build
+set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
+set I18NSPHINXOPTS=%SPHINXOPTS% .
+if NOT "%PAPER%" == "" (
+ set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
+ set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
+)
+
+if "%1" == "" goto help
+
+if "%1" == "help" (
+ :help
+ echo.Please use `make ^<target^>` where ^<target^> is one of
+ echo. html to make standalone HTML files
+ echo. dirhtml to make HTML files named index.html in directories
+ echo. singlehtml to make a single large HTML file
+ echo. pickle to make pickle files
+ echo. json to make JSON files
+ echo. htmlhelp to make HTML files and a HTML help project
+ echo. qthelp to make HTML files and a qthelp project
+ echo. devhelp to make HTML files and a Devhelp project
+ echo. epub to make an epub
+ echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
+ echo. text to make text files
+ echo. man to make manual pages
+ echo. texinfo to make Texinfo files
+ echo. gettext to make PO message catalogs
+ echo. changes to make an overview over all changed/added/deprecated items
+ echo. linkcheck to check all external links for integrity
+ echo. doctest to run all doctests embedded in the documentation if enabled
+ goto end
+)
+
+if "%1" == "clean" (
+ for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
+ del /q /s %BUILDDIR%\*
+ goto end
+)
+
+if "%1" == "html" (
+ %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The HTML pages are in %BUILDDIR%/html.
+ goto end
+)
+
+if "%1" == "dirhtml" (
+ %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
+ goto end
+)
+
+if "%1" == "singlehtml" (
+ %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
+ goto end
+)
+
+if "%1" == "pickle" (
+ %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; now you can process the pickle files.
+ goto end
+)
+
+if "%1" == "json" (
+ %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; now you can process the JSON files.
+ goto end
+)
+
+if "%1" == "htmlhelp" (
+ %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; now you can run HTML Help Workshop with the ^
+.hhp project file in %BUILDDIR%/htmlhelp.
+ goto end
+)
+
+if "%1" == "qthelp" (
+ %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; now you can run "qcollectiongenerator" with the ^
+.qhcp project file in %BUILDDIR%/qthelp, like this:
+ echo.^> qcollectiongenerator %BUILDDIR%\qthelp\Pygments.qhcp
+ echo.To view the help file:
+ echo.^> assistant -collectionFile %BUILDDIR%\qthelp\Pygments.ghc
+ goto end
+)
+
+if "%1" == "devhelp" (
+ %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished.
+ goto end
+)
+
+if "%1" == "epub" (
+ %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The epub file is in %BUILDDIR%/epub.
+ goto end
+)
+
+if "%1" == "latex" (
+ %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
+ goto end
+)
+
+if "%1" == "text" (
+ %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The text files are in %BUILDDIR%/text.
+ goto end
+)
+
+if "%1" == "man" (
+ %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The manual pages are in %BUILDDIR%/man.
+ goto end
+)
+
+if "%1" == "texinfo" (
+ %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
+ goto end
+)
+
+if "%1" == "gettext" (
+ %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
+ goto end
+)
+
+if "%1" == "changes" (
+ %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.The overview file is in %BUILDDIR%/changes.
+ goto end
+)
+
+if "%1" == "linkcheck" (
+ %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Link check complete; look for any errors in the above output ^
+or in %BUILDDIR%/linkcheck/output.txt.
+ goto end
+)
+
+if "%1" == "doctest" (
+ %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Testing of doctests in the sources finished, look at the ^
+results in %BUILDDIR%/doctest/output.txt.
+ goto end
+)
+
+:end
diff --git a/docs/pygmentize.1 b/doc/pygmentize.1
index 71bb6f9c..71bb6f9c 100644
--- a/docs/pygmentize.1
+++ b/doc/pygmentize.1
diff --git a/docs/generate.py b/docs/generate.py
deleted file mode 100755
index f5405074..00000000
--- a/docs/generate.py
+++ /dev/null
@@ -1,472 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
- Generate Pygments Documentation
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
- Generates a bunch of html files containing the documentation.
-
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-import os
-import sys
-from datetime import datetime
-from cgi import escape
-
-from docutils import nodes
-from docutils.parsers.rst import directives
-from docutils.core import publish_parts
-from docutils.writers import html4css1
-
-from jinja2 import Template
-
-# try to use the right Pygments to build the docs
-sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
-
-from pygments import highlight, __version__
-from pygments.lexers import get_lexer_by_name
-from pygments.formatters import HtmlFormatter
-
-
-LEXERDOC = '''
-`%s`
-%s
- :Short names: %s
- :Filename patterns: %s
- :Mimetypes: %s
-
-'''
-
-def generate_lexer_docs():
- from pygments.lexers import LEXERS
-
- out = []
-
- modules = {}
- moduledocstrings = {}
- for classname, data in sorted(LEXERS.iteritems(), key=lambda x: x[0]):
- module = data[0]
- mod = __import__(module, None, None, [classname])
- cls = getattr(mod, classname)
- if not cls.__doc__:
- print "Warning: %s does not have a docstring." % classname
- modules.setdefault(module, []).append((
- classname,
- cls.__doc__,
- ', '.join(data[2]) or 'None',
- ', '.join(data[3]).replace('*', '\\*').replace('_', '\\') or 'None',
- ', '.join(data[4]) or 'None'))
- if module not in moduledocstrings:
- moduledocstrings[module] = mod.__doc__
-
- for module, lexers in sorted(modules.iteritems(), key=lambda x: x[0]):
- heading = moduledocstrings[module].splitlines()[4].strip().rstrip('.')
- out.append('\n' + heading + '\n' + '-'*len(heading) + '\n')
- for data in lexers:
- out.append(LEXERDOC % data)
- return ''.join(out).decode('utf-8')
-
-def generate_formatter_docs():
- from pygments.formatters import FORMATTERS
-
- out = []
- for cls, data in sorted(FORMATTERS.iteritems(),
- key=lambda x: x[0].__name__):
- heading = cls.__name__
- out.append('`' + heading + '`\n' + '-'*(2+len(heading)) + '\n')
- out.append(cls.__doc__)
- out.append('''
- :Short names: %s
- :Filename patterns: %s
-
-
-''' % (', '.join(data[1]) or 'None', ', '.join(data[2]).replace('*', '\\*') or 'None'))
- return ''.join(out).decode('utf-8')
-
-def generate_filter_docs():
- from pygments.filters import FILTERS
-
- out = []
- for name, cls in FILTERS.iteritems():
- out.append('''
-`%s`
-%s
- :Name: %s
-''' % (cls.__name__, cls.__doc__, name))
- return ''.join(out).decode('utf-8')
-
-def generate_changelog():
- fn = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
- 'CHANGES'))
- f = file(fn)
- result = []
- in_header = False
- header = True
- for line in f:
- if header:
- if not in_header and line.strip():
- in_header = True
- elif in_header and not line.strip():
- header = False
- else:
- result.append(line.rstrip())
- f.close()
- return '\n'.join(result).decode('utf-8')
-
-def generate_authors():
- fn = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
- 'AUTHORS'))
- f = file(fn)
- r = f.read().rstrip().decode('utf-8')
- f.close()
- return r
-
-LEXERDOCS = generate_lexer_docs()
-FORMATTERDOCS = generate_formatter_docs()
-FILTERDOCS = generate_filter_docs()
-CHANGELOG = generate_changelog()
-AUTHORS = generate_authors()
-
-
-PYGMENTS_FORMATTER = HtmlFormatter(style='pastie', cssclass='syntax')
-
-USAGE = '''\
-Usage: %s <mode> <destination> [<source.txt> ...]
-
-Generate either python or html files out of the documentation.
-
-Mode can either be python or html.\
-''' % sys.argv[0]
-
-TEMPLATE = '''\
-<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"
- "http://www.w3.org/TR/html4/strict.dtd">
-<html>
-<head>
- <title>{{ title }} &mdash; Pygments</title>
- <meta http-equiv="content-type" content="text/html; charset=utf-8">
- <style type="text/css">
- {{ style }}
- </style>
-</head>
-<body>
- <div id="content">
- <h1 class="heading">Pygments</h1>
- <h2 class="subheading">{{ title }}</h2>
- {% if file_id != "index" %}
- <a id="backlink" href="index.html">&laquo; Back To Index</a>
- {% endif %}
- {% if toc %}
- <div class="toc">
- <h2>Contents</h2>
- <ul class="contents">
- {% for key, value in toc %}
- <li><a href="{{ key }}">{{ value }}</a></li>
- {% endfor %}
- </ul>
- </div>
- {% endif %}
- {{ body }}
- </div>
-</body>
-<!-- generated on: {{ generation_date }}
- file id: {{ file_id }} -->
-</html>\
-'''
-
-STYLESHEET = '''\
-body {
- background-color: #f2f2f2;
- margin: 0;
- padding: 0;
- font-family: 'Georgia', serif;
- color: #111;
-}
-
-#content {
- background-color: white;
- padding: 20px;
- margin: 20px auto 20px auto;
- max-width: 800px;
- border: 4px solid #ddd;
-}
-
-h1 {
- font-weight: normal;
- font-size: 40px;
- color: #09839A;
-}
-
-h2 {
- font-weight: normal;
- font-size: 30px;
- color: #C73F00;
-}
-
-h1.heading {
- margin: 0 0 30px 0;
-}
-
-h2.subheading {
- margin: -30px 0 0 45px;
-}
-
-h3 {
- margin-top: 30px;
-}
-
-table.docutils {
- border-collapse: collapse;
- border: 2px solid #aaa;
- margin: 0.5em 1.5em 0.5em 1.5em;
-}
-
-table.docutils td {
- padding: 2px;
- border: 1px solid #ddd;
-}
-
-p, li, dd, dt, blockquote {
- font-size: 15px;
- color: #333;
-}
-
-p {
- line-height: 150%;
- margin-bottom: 0;
- margin-top: 10px;
-}
-
-hr {
- border-top: 1px solid #ccc;
- border-bottom: 0;
- border-right: 0;
- border-left: 0;
- margin-bottom: 10px;
- margin-top: 20px;
-}
-
-dl {
- margin-left: 10px;
-}
-
-li, dt {
- margin-top: 5px;
-}
-
-dt {
- font-weight: bold;
-}
-
-th {
- text-align: left;
-}
-
-a {
- color: #990000;
-}
-
-a:hover {
- color: #c73f00;
-}
-
-pre {
- background-color: #f9f9f9;
- border-top: 1px solid #ccc;
- border-bottom: 1px solid #ccc;
- padding: 5px;
- font-size: 13px;
- font-family: Bitstream Vera Sans Mono,monospace;
-}
-
-tt {
- font-size: 13px;
- font-family: Bitstream Vera Sans Mono,monospace;
- color: black;
- padding: 1px 2px 1px 2px;
- background-color: #f0f0f0;
-}
-
-cite {
- /* abusing <cite>, it's generated by ReST for `x` */
- font-size: 13px;
- font-family: Bitstream Vera Sans Mono,monospace;
- font-weight: bold;
- font-style: normal;
-}
-
-#backlink {
- float: right;
- font-size: 11px;
- color: #888;
-}
-
-div.toc {
- margin: 0 0 10px 0;
-}
-
-div.toc h2 {
- font-size: 20px;
-}
-''' #'
-
-
-def pygments_directive(name, arguments, options, content, lineno,
- content_offset, block_text, state, state_machine):
- try:
- lexer = get_lexer_by_name(arguments[0])
- except ValueError:
- # no lexer found
- lexer = get_lexer_by_name('text')
- parsed = highlight(u'\n'.join(content), lexer, PYGMENTS_FORMATTER)
- return [nodes.raw('', parsed, format="html")]
-pygments_directive.arguments = (1, 0, 1)
-pygments_directive.content = 1
-directives.register_directive('sourcecode', pygments_directive)
-
-
-def create_translator(link_style):
- class Translator(html4css1.HTMLTranslator):
- def visit_reference(self, node):
- refuri = node.get('refuri')
- if refuri is not None and '/' not in refuri and refuri.endswith('.txt'):
- node['refuri'] = link_style(refuri[:-4])
- html4css1.HTMLTranslator.visit_reference(self, node)
- return Translator
-
-
-class DocumentationWriter(html4css1.Writer):
-
- def __init__(self, link_style):
- html4css1.Writer.__init__(self)
- self.translator_class = create_translator(link_style)
-
- def translate(self):
- html4css1.Writer.translate(self)
- # generate table of contents
- contents = self.build_contents(self.document)
- contents_doc = self.document.copy()
- contents_doc.children = contents
- contents_visitor = self.translator_class(contents_doc)
- contents_doc.walkabout(contents_visitor)
- self.parts['toc'] = self._generated_toc
-
- def build_contents(self, node, level=0):
- sections = []
- i = len(node) - 1
- while i >= 0 and isinstance(node[i], nodes.section):
- sections.append(node[i])
- i -= 1
- sections.reverse()
- toc = []
- for section in sections:
- try:
- reference = nodes.reference('', '', refid=section['ids'][0], *section[0])
- except IndexError:
- continue
- ref_id = reference['refid']
- text = escape(reference.astext())
- toc.append((ref_id, text))
-
- self._generated_toc = [('#%s' % href, caption) for href, caption in toc]
- # no further processing
- return []
-
-
-def generate_documentation(data, link_style):
- writer = DocumentationWriter(link_style)
- data = data.replace('[builtin_lexer_docs]', LEXERDOCS).\
- replace('[builtin_formatter_docs]', FORMATTERDOCS).\
- replace('[builtin_filter_docs]', FILTERDOCS).\
- replace('[changelog]', CHANGELOG).\
- replace('[authors]', AUTHORS)
- parts = publish_parts(
- data,
- writer=writer,
- settings_overrides={
- 'initial_header_level': 3,
- 'field_name_limit': 50,
- }
- )
- return {
- 'title': parts['title'],
- 'body': parts['body'],
- 'toc': parts['toc']
- }
-
-
-def handle_python(filename, fp, dst):
- now = datetime.now()
- title = os.path.basename(filename)[:-4]
- content = fp.read()
- def urlize(href):
- # create links for the pygments webpage
- if href == 'index.txt':
- return '/docs/'
- else:
- return '/docs/%s/' % href
- parts = generate_documentation(content, urlize)
- result = file(os.path.join(dst, title + '.py'), 'w')
- result.write('# -*- coding: utf-8 -*-\n')
- result.write('"""\n Pygments Documentation - %s\n' % title)
- result.write(' %s\n\n' % ('~' * (24 + len(title))))
- result.write(' Generated on: %s\n"""\n\n' % now)
- result.write('import datetime\n')
- result.write('DATE = %r\n' % now)
- result.write('TITLE = %r\n' % parts['title'])
- result.write('TOC = %r\n' % parts['toc'])
- result.write('BODY = %r\n' % parts['body'])
- result.close()
-
-
-def handle_html(filename, fp, dst):
- now = datetime.now()
- title = os.path.basename(filename)[:-4]
- content = fp.read().decode('utf-8')
- c = generate_documentation(content, (lambda x: './%s.html' % x))
- result = file(os.path.join(dst, title + '.html'), 'w')
- c['style'] = STYLESHEET + PYGMENTS_FORMATTER.get_style_defs('.syntax')
- c['generation_date'] = now
- c['file_id'] = title
- t = Template(TEMPLATE)
- result.write(t.render(c).encode('utf-8'))
- result.close()
-
-
-def run(handle_file, dst, sources=()):
- path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'src'))
- if not sources:
- sources = [os.path.join(path, fn) for fn in os.listdir(path)]
- if not os.path.isdir(dst):
- os.makedirs(dst)
- print 'Making docs for Pygments %s in %s' % (__version__, dst)
- for fn in sources:
- if not os.path.isfile(fn):
- continue
- print 'Processing %s' % fn
- f = open(fn)
- try:
- handle_file(fn, f, dst)
- finally:
- f.close()
-
-
-def main(mode, dst='build/', *sources):
- try:
- handler = {
- 'html': handle_html,
- 'python': handle_python
- }[mode]
- except KeyError:
- print 'Error: unknown mode "%s"' % mode
- sys.exit(1)
- run(handler, os.path.realpath(dst), sources)
-
-
-if __name__ == '__main__':
- if len(sys.argv) == 1:
- print USAGE
- else:
- main(*sys.argv[1:])
diff --git a/docs/src/api.txt b/docs/src/api.txt
deleted file mode 100644
index 4276eea2..00000000
--- a/docs/src/api.txt
+++ /dev/null
@@ -1,270 +0,0 @@
-.. -*- mode: rst -*-
-
-=====================
-The full Pygments API
-=====================
-
-This page describes the Pygments API.
-
-High-level API
-==============
-
-Functions from the `pygments` module:
-
-def `lex(code, lexer):`
- Lex `code` with the `lexer` (must be a `Lexer` instance)
- and return an iterable of tokens. Currently, this only calls
- `lexer.get_tokens()`.
-
-def `format(tokens, formatter, outfile=None):`
- Format a token stream (iterable of tokens) `tokens` with the
- `formatter` (must be a `Formatter` instance). The result is
- written to `outfile`, or if that is ``None``, returned as a
- string.
-
-def `highlight(code, lexer, formatter, outfile=None):`
- This is the most high-level highlighting function.
- It combines `lex` and `format` in one function.
-
-
-Functions from `pygments.lexers`:
-
-def `get_lexer_by_name(alias, **options):`
- Return an instance of a `Lexer` subclass that has `alias` in its
- aliases list. The lexer is given the `options` at its
- instantiation.
-
- Will raise `pygments.util.ClassNotFound` if no lexer with that alias is
- found.
-
-def `get_lexer_for_filename(fn, **options):`
- Return a `Lexer` subclass instance that has a filename pattern
- matching `fn`. The lexer is given the `options` at its
- instantiation.
-
- Will raise `pygments.util.ClassNotFound` if no lexer for that filename is
- found.
-
-def `get_lexer_for_mimetype(mime, **options):`
- Return a `Lexer` subclass instance that has `mime` in its mimetype
- list. The lexer is given the `options` at its instantiation.
-
- Will raise `pygments.util.ClassNotFound` if not lexer for that mimetype is
- found.
-
-def `guess_lexer(text, **options):`
- Return a `Lexer` subclass instance that's guessed from the text
- in `text`. For that, the `analyse_text()` method of every known
- lexer class is called with the text as argument, and the lexer
- which returned the highest value will be instantiated and returned.
-
- `pygments.util.ClassNotFound` is raised if no lexer thinks it can handle the
- content.
-
-def `guess_lexer_for_filename(filename, text, **options):`
- As `guess_lexer()`, but only lexers which have a pattern in `filenames`
- or `alias_filenames` that matches `filename` are taken into consideration.
-
- `pygments.util.ClassNotFound` is raised if no lexer thinks it can handle the
- content.
-
-def `get_all_lexers():`
- Return an iterable over all registered lexers, yielding tuples in the
- format::
-
- (longname, tuple of aliases, tuple of filename patterns, tuple of mimetypes)
-
- *New in Pygments 0.6.*
-
-
-Functions from `pygments.formatters`:
-
-def `get_formatter_by_name(alias, **options):`
- Return an instance of a `Formatter` subclass that has `alias` in its
- aliases list. The formatter is given the `options` at its
- instantiation.
-
- Will raise `pygments.util.ClassNotFound` if no formatter with that alias is
- found.
-
-def `get_formatter_for_filename(fn, **options):`
- Return a `Formatter` subclass instance that has a filename pattern
- matching `fn`. The formatter is given the `options` at its
- instantiation.
-
- Will raise `pygments.util.ClassNotFound` if no formatter for that filename
- is found.
-
-
-Functions from `pygments.styles`:
-
-def `get_style_by_name(name):`
- Return a style class by its short name. The names of the builtin styles
- are listed in `pygments.styles.STYLE_MAP`.
-
- Will raise `pygments.util.ClassNotFound` if no style of that name is found.
-
-def `get_all_styles():`
- Return an iterable over all registered styles, yielding their names.
-
- *New in Pygments 0.6.*
-
-
-Lexers
-======
-
-A lexer (derived from `pygments.lexer.Lexer`) has the following functions:
-
-def `__init__(self, **options):`
- The constructor. Takes a \*\*keywords dictionary of options.
- Every subclass must first process its own options and then call
- the `Lexer` constructor, since it processes the `stripnl`,
- `stripall` and `tabsize` options.
-
- An example looks like this:
-
- .. sourcecode:: python
-
- def __init__(self, **options):
- self.compress = options.get('compress', '')
- Lexer.__init__(self, **options)
-
- As these options must all be specifiable as strings (due to the
- command line usage), there are various utility functions
- available to help with that, see `Option processing`_.
-
-def `get_tokens(self, text):`
- This method is the basic interface of a lexer. It is called by
- the `highlight()` function. It must process the text and return an
- iterable of ``(tokentype, value)`` pairs from `text`.
-
- Normally, you don't need to override this method. The default
- implementation processes the `stripnl`, `stripall` and `tabsize`
- options and then yields all tokens from `get_tokens_unprocessed()`,
- with the ``index`` dropped.
-
-def `get_tokens_unprocessed(self, text):`
- This method should process the text and return an iterable of
- ``(index, tokentype, value)`` tuples where ``index`` is the starting
- position of the token within the input text.
-
- This method must be overridden by subclasses.
-
-def `analyse_text(text):`
- A static method which is called for lexer guessing. It should analyse
- the text and return a float in the range from ``0.0`` to ``1.0``.
- If it returns ``0.0``, the lexer will not be selected as the most
- probable one, if it returns ``1.0``, it will be selected immediately.
-
-For a list of known tokens have a look at the `Tokens`_ page.
-
-A lexer also can have the following attributes (in fact, they are mandatory
-except `alias_filenames`) that are used by the builtin lookup mechanism.
-
-`name`
- Full name for the lexer, in human-readable form.
-
-`aliases`
- A list of short, unique identifiers that can be used to lookup
- the lexer from a list, e.g. using `get_lexer_by_name()`.
-
-`filenames`
- A list of `fnmatch` patterns that match filenames which contain
- content for this lexer. The patterns in this list should be unique among
- all lexers.
-
-`alias_filenames`
- A list of `fnmatch` patterns that match filenames which may or may not
- contain content for this lexer. This list is used by the
- `guess_lexer_for_filename()` function, to determine which lexers are
- then included in guessing the correct one. That means that e.g. every
- lexer for HTML and a template language should include ``\*.html`` in
- this list.
-
-`mimetypes`
- A list of MIME types for content that can be lexed with this
- lexer.
-
-
-.. _Tokens: tokens.txt
-
-
-Formatters
-==========
-
-A formatter (derived from `pygments.formatter.Formatter`) has the following
-functions:
-
-def `__init__(self, **options):`
- As with lexers, this constructor processes options and then must call
- the base class `__init__`.
-
- The `Formatter` class recognizes the options `style`, `full` and
- `title`. It is up to the formatter class whether it uses them.
-
-def `get_style_defs(self, arg=''):`
- This method must return statements or declarations suitable to define
- the current style for subsequent highlighted text (e.g. CSS classes
- in the `HTMLFormatter`).
-
- The optional argument `arg` can be used to modify the generation and
- is formatter dependent (it is standardized because it can be given on
- the command line).
-
- This method is called by the ``-S`` `command-line option`_, the `arg`
- is then given by the ``-a`` option.
-
-def `format(self, tokensource, outfile):`
- This method must format the tokens from the `tokensource` iterable and
- write the formatted version to the file object `outfile`.
-
- Formatter options can control how exactly the tokens are converted.
-
-.. _command-line option: cmdline.txt
-
-A formatter must have the following attributes that are used by the
-builtin lookup mechanism. (*New in Pygments 0.7.*)
-
-`name`
- Full name for the formatter, in human-readable form.
-
-`aliases`
- A list of short, unique identifiers that can be used to lookup
- the formatter from a list, e.g. using `get_formatter_by_name()`.
-
-`filenames`
- A list of `fnmatch` patterns that match filenames for which this formatter
- can produce output. The patterns in this list should be unique among
- all formatters.
-
-
-Option processing
-=================
-
-The `pygments.util` module has some utility functions usable for option
-processing:
-
-class `OptionError`
- This exception will be raised by all option processing functions if
- the type or value of the argument is not correct.
-
-def `get_bool_opt(options, optname, default=None):`
- Interpret the key `optname` from the dictionary `options`
- as a boolean and return it. Return `default` if `optname`
- is not in `options`.
-
- The valid string values for ``True`` are ``1``, ``yes``,
- ``true`` and ``on``, the ones for ``False`` are ``0``,
- ``no``, ``false`` and ``off`` (matched case-insensitively).
-
-def `get_int_opt(options, optname, default=None):`
- As `get_bool_opt`, but interpret the value as an integer.
-
-def `get_list_opt(options, optname, default=None):`
- If the key `optname` from the dictionary `options` is a string,
- split it at whitespace and return it. If it is already a list
- or a tuple, it is returned as a list.
-
-def `get_choice_opt(options, optname, allowed, default=None):`
- If the key `optname` from the dictionary is not in the sequence
- `allowed`, raise an error, otherwise return it. *New in Pygments 0.8.*
diff --git a/docs/src/authors.txt b/docs/src/authors.txt
deleted file mode 100644
index c8c532aa..00000000
--- a/docs/src/authors.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-=======
-Authors
-=======
-
-[authors]
diff --git a/docs/src/changelog.txt b/docs/src/changelog.txt
deleted file mode 100644
index 6caf0a32..00000000
--- a/docs/src/changelog.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-=========
-Changelog
-=========
-
-[changelog]
diff --git a/docs/src/index.txt b/docs/src/index.txt
deleted file mode 100644
index b1e099c7..00000000
--- a/docs/src/index.txt
+++ /dev/null
@@ -1,69 +0,0 @@
-.. -*- mode: rst -*-
-
-========
-Overview
-========
-
-Welcome to the Pygments documentation.
-
-- Starting with Pygments
-
- - `Installation <installation.txt>`_
-
- - `Introduction and Quickstart <quickstart.txt>`_
-
- - `Command line interface <cmdline.txt>`_
-
-- Builtin components
-
- - `Lexers <lexers.txt>`_
-
- - `Formatters <formatters.txt>`_
-
- - `Filters <filters.txt>`_
-
- - `Styles <styles.txt>`_
-
-- Reference
-
- - `Unicode and encodings <unicode.txt>`_
-
- - `Builtin tokens <tokens.txt>`_
-
- - `API documentation <api.txt>`_
-
-- Hacking for Pygments
-
- - `Write your own lexer <lexerdevelopment.txt>`_
-
- - `Write your own formatter <formatterdevelopment.txt>`_
-
- - `Write your own filter <filterdevelopment.txt>`_
-
- - `Register plugins <plugins.txt>`_
-
-- Hints and Tricks
-
- - `Using Pygments in ReST documents <rstdirective.txt>`_
-
- - `Using Pygments with MoinMoin <moinmoin.txt>`_
-
- - `Using Pygments in other contexts <integrate.txt>`_
-
-- About Pygments
-
- - `Changelog <changelog.txt>`_
-
- - `Authors <authors.txt>`_
-
-
---------------
-
-If you find bugs or have suggestions for the documentation, please
-look `here`_ for info on how to contact the team.
-
-You can download an offline version of this documentation from the
-`download page`_.
-
-.. _here: http://pygments.org/contribute/
-.. _download page: http://pygments.org/download/
diff --git a/docs/src/installation.txt b/docs/src/installation.txt
deleted file mode 100644
index 17a9aad5..00000000
--- a/docs/src/installation.txt
+++ /dev/null
@@ -1,71 +0,0 @@
-.. -*- mode: rst -*-
-
-============
-Installation
-============
-
-Pygments requires at least Python 2.4 to work correctly. Just to clarify:
-there *won't* ever be support for Python versions below 2.4. However, there
-are no other dependencies.
-
-
-Installing a released version
-=============================
-
-As a Python egg (via easy_install)
-----------------------------------
-
-You can install the most recent Pygments version using `easy_install`_::
-
- sudo easy_install Pygments
-
-This will install a Pygments egg in your Python installation's site-packages
-directory.
-
-
-From the tarball release
--------------------------
-
-1. Download the most recent tarball from the `download page`_
-2. Unpack the tarball
-3. ``sudo python setup.py install``
-
-Note that the last command will automatically download and install
-`setuptools`_ if you don't already have it installed. This requires a working
-internet connection.
-
-This will install Pygments into your Python installation's site-packages directory.
-
-
-Installing the development version
-==================================
-
-If you want to play around with the code
-----------------------------------------
-
-1. Install `Mercurial`_
-2. ``hg clone http://bitbucket.org/birkenfeld/pygments-main pygments``
-3. ``cd pygments``
-4. ``ln -s pygments /usr/lib/python2.X/site-packages``
-5. ``ln -s pygmentize /usr/local/bin``
-
-As an alternative to steps 4 and 5 you can also do ``python setup.py develop``
-which will install the package via setuptools in development mode.
-
-..
- If you just want the latest features and use them
- -------------------------------------------------
-
- ::
-
- sudo easy_install Pygments==dev
-
- This will install a Pygments egg containing the latest Subversion trunk code
- in your Python installation's site-packages directory. Every time the command
- is run, the sources are updated from Subversion.
-
-
-.. _download page: http://pygments.org/download/
-.. _setuptools: http://peak.telecommunity.com/DevCenter/setuptools
-.. _easy_install: http://peak.telecommunity.com/DevCenter/EasyInstall
-.. _Mercurial: http://selenic.com/mercurial/
diff --git a/external/autopygmentize b/external/autopygmentize
index 85c8dfd2..2df6d469 100755
--- a/external/autopygmentize
+++ b/external/autopygmentize
@@ -9,9 +9,8 @@
# This program can be used as a .lessfilter for the less pager to auto-color less's output
lexer=`pygmentize -N "$1"`
+file_common_opts="--brief --dereference --uncompress"
if [ "$lexer" = "text" ]; then
- file_common_opts="--brief --dereference --uncompress"
-
unset lexer
case `file --mime-type $file_common_opts "$1"` in
application/xml|image/svg+xml) lexer=xml;;
@@ -40,11 +39,13 @@ if [ "$lexer" = "text" ]; then
text/x-tcl) lexer=tcl;;
text/x-tex|text/x-texinfo) lexer=latex;; # FIXME: texinfo really needs its own lexer
- # Types that file outputs which pygmentize didn't support as of file 5.11, pygments 1.6rc1
+ # Types that file outputs which pygmentize didn't support as of file 5.14, pygments 1.6
# text/calendar
+ # text/inf
# text/PGP
# text/rtf
# text/texmacs
+ # text/vnd.graphviz
# text/x-bcpl
# text/x-info
# text/x-m4
@@ -53,7 +54,7 @@ if [ "$lexer" = "text" ]; then
esac
fi
-encoding=`file --brief --mime-encoding $file_common_opts "$1"`
+encoding=`file --mime-encoding $file_common_opts "$1"`
if [ -n "$lexer" ]; then
# FIXME: Specify input encoding rather than output encoding https://bitbucket.org/birkenfeld/pygments-main/issue/800
diff --git a/external/markdown-processor.py b/external/markdown-processor.py
index 12e64680..2a92a40e 100644
--- a/external/markdown-processor.py
+++ b/external/markdown-processor.py
@@ -6,14 +6,9 @@
This fragment is a Markdown_ preprocessor that renders source code
to HTML via Pygments. To use it, invoke Markdown like so::
- from markdown import Markdown
+ import markdown
- md = Markdown()
- md.textPreprocessors.insert(0, CodeBlockPreprocessor())
- html = md.convert(someText)
-
- markdown is then a callable that can be passed to the context of
- a template and used in that template, for example.
+ html = markdown.markdown(someText, extensions=[CodeBlockExtension()])
This uses CSS classes by default, so use
``pygmentize -S <some style> -f html > pygments.css``
@@ -25,9 +20,9 @@
some code
[/sourcecode]
- .. _Markdown: http://www.freewisdom.org/projects/python-markdown/
+ .. _Markdown: https://pypi.python.org/pypi/Markdown
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
@@ -40,17 +35,17 @@ INLINESTYLES = False
import re
-from markdown import TextPreprocessor
+from markdown.preprocessors import Preprocessor
+from markdown.extensions import Extension
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import get_lexer_by_name, TextLexer
-class CodeBlockPreprocessor(TextPreprocessor):
+class CodeBlockPreprocessor(Preprocessor):
- pattern = re.compile(
- r'\[sourcecode:(.+?)\](.+?)\[/sourcecode\]', re.S)
+ pattern = re.compile(r'\[sourcecode:(.+?)\](.+?)\[/sourcecode\]', re.S)
formatter = HtmlFormatter(noclasses=INLINESTYLES)
@@ -63,5 +58,10 @@ class CodeBlockPreprocessor(TextPreprocessor):
code = highlight(m.group(2), lexer, self.formatter)
code = code.replace('\n\n', '\n&nbsp;\n').replace('\n', '<br />')
return '\n\n<div class="code">%s</div>\n\n' % code
- return self.pattern.sub(
- repl, lines)
+ joined_lines = "\n".join(lines)
+ joined_lines = self.pattern.sub(repl, joined_lines)
+ return joined_lines.split("\n")
+
+class CodeBlockExtension(Extension):
+ def extendMarkdown(self, md, md_globals):
+ md.preprocessors.add('CodeBlockPreprocessor', CodeBlockPreprocessor(), '_begin')
diff --git a/external/moin-parser.py b/external/moin-parser.py
index 6544da1b..41131185 100644
--- a/external/moin-parser.py
+++ b/external/moin-parser.py
@@ -31,7 +31,7 @@
If you do not want to do that and are willing to accept larger HTML
output, you can set the INLINESTYLES option below to True.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
diff --git a/external/rst-directive-old.py b/external/rst-directive-old.py
deleted file mode 100644
index a074536f..00000000
--- a/external/rst-directive-old.py
+++ /dev/null
@@ -1,77 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- The Pygments reStructuredText directive
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
- This fragment is a Docutils_ 0.4 directive that renders source code
- (to HTML only, currently) via Pygments.
-
- To use it, adjust the options below and copy the code into a module
- that you import on initialization. The code then automatically
- registers a ``sourcecode`` directive that you can use instead of
- normal code blocks like this::
-
- .. sourcecode:: python
-
- My code goes here.
-
- If you want to have different code styles, e.g. one with line numbers
- and one without, add formatters with their names in the VARIANTS dict
- below. You can invoke them instead of the DEFAULT one by using a
- directive option::
-
- .. sourcecode:: python
- :linenos:
-
- My code goes here.
-
- Look at the `directive documentation`_ to get all the gory details.
-
- .. _Docutils: http://docutils.sf.net/
- .. _directive documentation:
- http://docutils.sourceforge.net/docs/howto/rst-directives.html
-
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-# Options
-# ~~~~~~~
-
-# Set to True if you want inline CSS styles instead of classes
-INLINESTYLES = False
-
-from pygments.formatters import HtmlFormatter
-
-# The default formatter
-DEFAULT = HtmlFormatter(noclasses=INLINESTYLES)
-
-# Add name -> formatter pairs for every variant you want to use
-VARIANTS = {
- # 'linenos': HtmlFormatter(noclasses=INLINESTYLES, linenos=True),
-}
-
-
-from docutils import nodes
-from docutils.parsers.rst import directives
-
-from pygments import highlight
-from pygments.lexers import get_lexer_by_name, TextLexer
-
-def pygments_directive(name, arguments, options, content, lineno,
- content_offset, block_text, state, state_machine):
- try:
- lexer = get_lexer_by_name(arguments[0])
- except ValueError:
- # no lexer found - use the text one instead of an exception
- lexer = TextLexer()
- # take an arbitrary option if more than one is given
- formatter = options and VARIANTS[options.keys()[0]] or DEFAULT
- parsed = highlight(u'\n'.join(content), lexer, formatter)
- return [nodes.raw('', parsed, format='html')]
-
-pygments_directive.arguments = (1, 0, 1)
-pygments_directive.content = 1
-pygments_directive.options = dict([(key, directives.flag) for key in VARIANTS])
-
-directives.register_directive('sourcecode', pygments_directive)
diff --git a/external/rst-directive.py b/external/rst-directive.py
index 5c04038d..8ce150c4 100644
--- a/external/rst-directive.py
+++ b/external/rst-directive.py
@@ -31,7 +31,7 @@
.. _directive documentation:
http://docutils.sourceforge.net/docs/howto/rst-directives.html
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
@@ -75,9 +75,8 @@ class Pygments(Directive):
# no lexer found - use the text one instead of an exception
lexer = TextLexer()
# take an arbitrary option if more than one is given
- formatter = self.options and VARIANTS[self.options.keys()[0]] or DEFAULT
+ formatter = self.options and VARIANTS[list(self.options)[0]] or DEFAULT
parsed = highlight(u'\n'.join(self.content), lexer, formatter)
return [nodes.raw('', parsed, format='html')]
directives.register_directive('sourcecode', Pygments)
-
diff --git a/ez_setup.py b/ez_setup.py
index e33744ba..9dc2c872 100755..100644
--- a/ez_setup.py
+++ b/ez_setup.py
@@ -13,264 +13,370 @@ the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
+import os
+import shutil
import sys
-DEFAULT_VERSION = "0.6c9"
-DEFAULT_URL = "http://pypi.python.org/packages/%s/s/setuptools/" % sys.version[:3]
-
-md5_data = {
- 'setuptools-0.6b1-py2.3.egg': '8822caf901250d848b996b7f25c6e6ca',
- 'setuptools-0.6b1-py2.4.egg': 'b79a8a403e4502fbb85ee3f1941735cb',
- 'setuptools-0.6b2-py2.3.egg': '5657759d8a6d8fc44070a9d07272d99b',
- 'setuptools-0.6b2-py2.4.egg': '4996a8d169d2be661fa32a6e52e4f82a',
- 'setuptools-0.6b3-py2.3.egg': 'bb31c0fc7399a63579975cad9f5a0618',
- 'setuptools-0.6b3-py2.4.egg': '38a8c6b3d6ecd22247f179f7da669fac',
- 'setuptools-0.6b4-py2.3.egg': '62045a24ed4e1ebc77fe039aa4e6f7e5',
- 'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4',
- 'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c',
- 'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b',
- 'setuptools-0.6c2-py2.3.egg': 'f0064bf6aa2b7d0f3ba0b43f20817c27',
- 'setuptools-0.6c2-py2.4.egg': '616192eec35f47e8ea16cd6a122b7277',
- 'setuptools-0.6c3-py2.3.egg': 'f181fa125dfe85a259c9cd6f1d7b78fa',
- 'setuptools-0.6c3-py2.4.egg': 'e0ed74682c998bfb73bf803a50e7b71e',
- 'setuptools-0.6c3-py2.5.egg': 'abef16fdd61955514841c7c6bd98965e',
- 'setuptools-0.6c4-py2.3.egg': 'b0b9131acab32022bfac7f44c5d7971f',
- 'setuptools-0.6c4-py2.4.egg': '2a1f9656d4fbf3c97bf946c0a124e6e2',
- 'setuptools-0.6c4-py2.5.egg': '8f5a052e32cdb9c72bcf4b5526f28afc',
- 'setuptools-0.6c5-py2.3.egg': 'ee9fd80965da04f2f3e6b3576e9d8167',
- 'setuptools-0.6c5-py2.4.egg': 'afe2adf1c01701ee841761f5bcd8aa64',
- 'setuptools-0.6c5-py2.5.egg': 'a8d3f61494ccaa8714dfed37bccd3d5d',
- 'setuptools-0.6c6-py2.3.egg': '35686b78116a668847237b69d549ec20',
- 'setuptools-0.6c6-py2.4.egg': '3c56af57be3225019260a644430065ab',
- 'setuptools-0.6c6-py2.5.egg': 'b2f8a7520709a5b34f80946de5f02f53',
- 'setuptools-0.6c7-py2.3.egg': '209fdf9adc3a615e5115b725658e13e2',
- 'setuptools-0.6c7-py2.4.egg': '5a8f954807d46a0fb67cf1f26c55a82e',
- 'setuptools-0.6c7-py2.5.egg': '45d2ad28f9750e7434111fde831e8372',
- 'setuptools-0.6c8-py2.3.egg': '50759d29b349db8cfd807ba8303f1902',
- 'setuptools-0.6c8-py2.4.egg': 'cba38d74f7d483c06e9daa6070cce6de',
- 'setuptools-0.6c8-py2.5.egg': '1721747ee329dc150590a58b3e1ac95b',
- 'setuptools-0.6c9-py2.3.egg': 'a83c4020414807b496e4cfbe08507c03',
- 'setuptools-0.6c9-py2.4.egg': '260a2be2e5388d66bdaee06abec6342a',
- 'setuptools-0.6c9-py2.5.egg': 'fe67c3e5a17b12c0e7c541b7ea43a8e6',
- 'setuptools-0.6c9-py2.6.egg': 'ca37b1ff16fa2ede6e19383e7b59245a',
-}
-
-import sys, os
-try: from hashlib import md5
-except ImportError: from md5 import md5
-
-def _validate_md5(egg_name, data):
- if egg_name in md5_data:
- digest = md5(data).hexdigest()
- if digest != md5_data[egg_name]:
- print >>sys.stderr, (
- "md5 validation of %s failed! (Possible download problem?)"
- % egg_name
- )
- sys.exit(2)
- return data
-
-def use_setuptools(
- version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
- download_delay=15
-):
- """Automatically find/download setuptools and make it available on sys.path
-
- `version` should be a valid setuptools version number that is available
- as an egg for download under the `download_base` URL (which should end with
- a '/'). `to_dir` is the directory where setuptools will be downloaded, if
- it is not already available. If `download_delay` is specified, it should
- be the number of seconds that will be paused before initiating a download,
- should one be required. If an older version of setuptools is installed,
- this routine will print a message to ``sys.stderr`` and raise SystemExit in
- an attempt to abort the calling script.
- """
- was_imported = 'pkg_resources' in sys.modules or 'setuptools' in sys.modules
- def do_download():
- egg = download_setuptools(version, download_base, to_dir, download_delay)
- sys.path.insert(0, egg)
- import setuptools; setuptools.bootstrap_install_from = egg
+import tempfile
+import tarfile
+import optparse
+import subprocess
+import platform
+
+from distutils import log
+
+try:
+ from site import USER_SITE
+except ImportError:
+ USER_SITE = None
+
+DEFAULT_VERSION = "1.4.2"
+DEFAULT_URL = "https://pypi.python.org/packages/source/s/setuptools/"
+
+def _python_cmd(*args):
+ args = (sys.executable,) + args
+ return subprocess.call(args) == 0
+
+def _check_call_py24(cmd, *args, **kwargs):
+ res = subprocess.call(cmd, *args, **kwargs)
+ class CalledProcessError(Exception):
+ pass
+ if not res == 0:
+ msg = "Command '%s' return non-zero exit status %d" % (cmd, res)
+ raise CalledProcessError(msg)
+vars(subprocess).setdefault('check_call', _check_call_py24)
+
+def _install(tarball, install_args=()):
+ # extracting the tarball
+ tmpdir = tempfile.mkdtemp()
+ log.warn('Extracting in %s', tmpdir)
+ old_wd = os.getcwd()
+ try:
+ os.chdir(tmpdir)
+ tar = tarfile.open(tarball)
+ _extractall(tar)
+ tar.close()
+
+ # going in the directory
+ subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
+ os.chdir(subdir)
+ log.warn('Now working in %s', subdir)
+
+ # installing
+ log.warn('Installing Setuptools')
+ if not _python_cmd('setup.py', 'install', *install_args):
+ log.warn('Something went wrong during the installation.')
+ log.warn('See the error message above.')
+ # exitcode will be 2
+ return 2
+ finally:
+ os.chdir(old_wd)
+ shutil.rmtree(tmpdir)
+
+
+def _build_egg(egg, tarball, to_dir):
+ # extracting the tarball
+ tmpdir = tempfile.mkdtemp()
+ log.warn('Extracting in %s', tmpdir)
+ old_wd = os.getcwd()
+ try:
+ os.chdir(tmpdir)
+ tar = tarfile.open(tarball)
+ _extractall(tar)
+ tar.close()
+
+ # going in the directory
+ subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
+ os.chdir(subdir)
+ log.warn('Now working in %s', subdir)
+
+ # building an egg
+ log.warn('Building a Setuptools egg in %s', to_dir)
+ _python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
+
+ finally:
+ os.chdir(old_wd)
+ shutil.rmtree(tmpdir)
+ # returning the result
+ log.warn(egg)
+ if not os.path.exists(egg):
+ raise IOError('Could not build the egg.')
+
+
+def _do_download(version, download_base, to_dir, download_delay):
+ egg = os.path.join(to_dir, 'setuptools-%s-py%d.%d.egg'
+ % (version, sys.version_info[0], sys.version_info[1]))
+ if not os.path.exists(egg):
+ tarball = download_setuptools(version, download_base,
+ to_dir, download_delay)
+ _build_egg(egg, tarball, to_dir)
+ sys.path.insert(0, egg)
+
+ # Remove previously-imported pkg_resources if present (see
+ # https://bitbucket.org/pypa/setuptools/pull-request/7/ for details).
+ if 'pkg_resources' in sys.modules:
+ del sys.modules['pkg_resources']
+
+ import setuptools
+ setuptools.bootstrap_install_from = egg
+
+
+def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
+ to_dir=os.curdir, download_delay=15):
+ # making sure we use the absolute path
+ to_dir = os.path.abspath(to_dir)
+ was_imported = 'pkg_resources' in sys.modules or \
+ 'setuptools' in sys.modules
try:
import pkg_resources
except ImportError:
- return do_download()
+ return _do_download(version, download_base, to_dir, download_delay)
try:
- pkg_resources.require("setuptools>="+version); return
- except pkg_resources.VersionConflict, e:
+ pkg_resources.require("setuptools>=" + version)
+ return
+ except pkg_resources.VersionConflict:
+ e = sys.exc_info()[1]
if was_imported:
- print >>sys.stderr, (
- "The required version of setuptools (>=%s) is not available, and\n"
- "can't be installed while this script is running. Please install\n"
- " a more recent version first, using 'easy_install -U setuptools'."
- "\n\n(Currently using %r)"
- ) % (version, e.args[0])
+ sys.stderr.write(
+ "The required version of setuptools (>=%s) is not available,\n"
+ "and can't be installed while this script is running. Please\n"
+ "install a more recent version first, using\n"
+ "'easy_install -U setuptools'."
+ "\n\n(Currently using %r)\n" % (version, e.args[0]))
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
- return do_download()
+ return _do_download(version, download_base, to_dir,
+ download_delay)
except pkg_resources.DistributionNotFound:
- return do_download()
+ return _do_download(version, download_base, to_dir,
+ download_delay)
-def download_setuptools(
- version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
- delay = 15
-):
- """Download setuptools from a specified location and return its filename
+def _clean_check(cmd, target):
+ """
+ Run the command to download target. If the command fails, clean up before
+ re-raising the error.
+ """
+ try:
+ subprocess.check_call(cmd)
+ except subprocess.CalledProcessError:
+ if os.access(target, os.F_OK):
+ os.unlink(target)
+ raise
- `version` should be a valid setuptools version number that is available
- as an egg for download under the `download_base` URL (which should end
- with a '/'). `to_dir` is the directory where the egg will be downloaded.
- `delay` is the number of seconds to pause before an actual download attempt.
+def download_file_powershell(url, target):
"""
- import urllib2, shutil
- egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3])
- url = download_base + egg_name
- saveto = os.path.join(to_dir, egg_name)
- src = dst = None
- if not os.path.exists(saveto): # Avoid repeated downloads
+ Download the file at url to target using Powershell (which will validate
+ trust). Raise an exception if the command cannot complete.
+ """
+ target = os.path.abspath(target)
+ cmd = [
+ 'powershell',
+ '-Command',
+ "(new-object System.Net.WebClient).DownloadFile(%(url)r, %(target)r)" % vars(),
+ ]
+ _clean_check(cmd, target)
+
+def has_powershell():
+ if platform.system() != 'Windows':
+ return False
+ cmd = ['powershell', '-Command', 'echo test']
+ devnull = open(os.path.devnull, 'wb')
+ try:
try:
- from distutils import log
- if delay:
- log.warn("""
----------------------------------------------------------------------------
-This script requires setuptools version %s to run (even to display
-help). I will attempt to download it for you (from
-%s), but
-you may need to enable firewall access for this script first.
-I will start the download in %d seconds.
-
-(Note: if this machine does not have network access, please obtain the file
-
- %s
-
-and place it in this directory before rerunning this script.)
----------------------------------------------------------------------------""",
- version, download_base, delay, url
- ); from time import sleep; sleep(delay)
- log.warn("Downloading %s", url)
- src = urllib2.urlopen(url)
- # Read/write all in one block, so we don't create a corrupt file
- # if the download is interrupted.
- data = _validate_md5(egg_name, src.read())
- dst = open(saveto,"wb"); dst.write(data)
- finally:
- if src: src.close()
- if dst: dst.close()
- return os.path.realpath(saveto)
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-def main(argv, version=DEFAULT_VERSION):
- """Install or upgrade setuptools and EasyInstall"""
+ subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
+ except:
+ return False
+ finally:
+ devnull.close()
+ return True
+
+download_file_powershell.viable = has_powershell
+
+def download_file_curl(url, target):
+ cmd = ['curl', url, '--silent', '--output', target]
+ _clean_check(cmd, target)
+
+def has_curl():
+ cmd = ['curl', '--version']
+ devnull = open(os.path.devnull, 'wb')
try:
- import setuptools
- except ImportError:
- egg = None
try:
- egg = download_setuptools(version, delay=0)
- sys.path.insert(0,egg)
- from setuptools.command.easy_install import main
- return main(list(argv)+[egg]) # we're done here
- finally:
- if egg and os.path.exists(egg):
- os.unlink(egg)
- else:
- if setuptools.__version__ == '0.0.1':
- print >>sys.stderr, (
- "You have an obsolete version of setuptools installed. Please\n"
- "remove it from your system entirely before rerunning this script."
- )
- sys.exit(2)
-
- req = "setuptools>="+version
- import pkg_resources
+ subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
+ except:
+ return False
+ finally:
+ devnull.close()
+ return True
+
+download_file_curl.viable = has_curl
+
+def download_file_wget(url, target):
+ cmd = ['wget', url, '--quiet', '--output-document', target]
+ _clean_check(cmd, target)
+
+def has_wget():
+ cmd = ['wget', '--version']
+ devnull = open(os.path.devnull, 'wb')
try:
- pkg_resources.require(req)
- except pkg_resources.VersionConflict:
try:
- from setuptools.command.easy_install import main
- except ImportError:
- from easy_install import main
- main(list(argv)+[download_setuptools(delay=0)])
- sys.exit(0) # try to force an exit
- else:
- if argv:
- from setuptools.command.easy_install import main
- main(argv)
- else:
- print "Setuptools version",version,"or greater has been installed."
- print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)'
+ subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
+ except:
+ return False
+ finally:
+ devnull.close()
+ return True
-def update_md5(filenames):
- """Update our built-in md5 registry"""
+download_file_wget.viable = has_wget
- import re
-
- for name in filenames:
- base = os.path.basename(name)
- f = open(name,'rb')
- md5_data[base] = md5(f.read()).hexdigest()
- f.close()
-
- data = [" %r: %r,\n" % it for it in md5_data.items()]
- data.sort()
- repl = "".join(data)
-
- import inspect
- srcfile = inspect.getsourcefile(sys.modules[__name__])
- f = open(srcfile, 'rb'); src = f.read(); f.close()
+def download_file_insecure(url, target):
+ """
+ Use Python to download the file, even though it cannot authenticate the
+ connection.
+ """
+ try:
+ from urllib.request import urlopen
+ except ImportError:
+ from urllib2 import urlopen
+ src = dst = None
+ try:
+ src = urlopen(url)
+ # Read/write all in one block, so we don't create a corrupt file
+ # if the download is interrupted.
+ data = src.read()
+ dst = open(target, "wb")
+ dst.write(data)
+ finally:
+ if src:
+ src.close()
+ if dst:
+ dst.close()
+
+download_file_insecure.viable = lambda: True
+
+def get_best_downloader():
+ downloaders = [
+ download_file_powershell,
+ download_file_curl,
+ download_file_wget,
+ download_file_insecure,
+ ]
+
+ for dl in downloaders:
+ if dl.viable():
+ return dl
+
+def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
+ to_dir=os.curdir, delay=15,
+ downloader_factory=get_best_downloader):
+ """Download setuptools from a specified location and return its filename
- match = re.search("\nmd5_data = {\n([^}]+)}", src)
- if not match:
- print >>sys.stderr, "Internal error!"
- sys.exit(2)
+ `version` should be a valid setuptools version number that is available
+ as an egg for download under the `download_base` URL (which should end
+ with a '/'). `to_dir` is the directory where the egg will be downloaded.
+ `delay` is the number of seconds to pause before an actual download
+ attempt.
- src = src[:match.start(1)] + repl + src[match.end(1):]
- f = open(srcfile,'w')
- f.write(src)
- f.close()
+ ``downloader_factory`` should be a function taking no arguments and
+ returning a function for downloading a URL to a target.
+ """
+ # making sure we use the absolute path
+ to_dir = os.path.abspath(to_dir)
+ tgz_name = "setuptools-%s.tar.gz" % version
+ url = download_base + tgz_name
+ saveto = os.path.join(to_dir, tgz_name)
+ if not os.path.exists(saveto): # Avoid repeated downloads
+ log.warn("Downloading %s", url)
+ downloader = downloader_factory()
+ downloader(url, saveto)
+ return os.path.realpath(saveto)
-if __name__=='__main__':
- if len(sys.argv)>2 and sys.argv[1]=='--md5update':
- update_md5(sys.argv[2:])
+def _extractall(self, path=".", members=None):
+ """Extract all members from the archive to the current working
+ directory and set owner, modification time and permissions on
+ directories afterwards. `path' specifies a different directory
+ to extract to. `members' is optional and must be a subset of the
+ list returned by getmembers().
+ """
+ import copy
+ import operator
+ from tarfile import ExtractError
+ directories = []
+
+ if members is None:
+ members = self
+
+ for tarinfo in members:
+ if tarinfo.isdir():
+ # Extract directories with a safe mode.
+ directories.append(tarinfo)
+ tarinfo = copy.copy(tarinfo)
+ tarinfo.mode = 448 # decimal for oct 0700
+ self.extract(tarinfo, path)
+
+ # Reverse sort directories.
+ if sys.version_info < (2, 4):
+ def sorter(dir1, dir2):
+ return cmp(dir1.name, dir2.name)
+ directories.sort(sorter)
+ directories.reverse()
else:
- main(sys.argv[1:])
-
-
-
-
+ directories.sort(key=operator.attrgetter('name'), reverse=True)
+ # Set correct owner, mtime and filemode on directories.
+ for tarinfo in directories:
+ dirpath = os.path.join(path, tarinfo.name)
+ try:
+ self.chown(tarinfo, dirpath)
+ self.utime(tarinfo, dirpath)
+ self.chmod(tarinfo, dirpath)
+ except ExtractError:
+ e = sys.exc_info()[1]
+ if self.errorlevel > 1:
+ raise
+ else:
+ self._dbg(1, "tarfile: %s" % e)
+
+
+def _build_install_args(options):
+ """
+ Build the arguments to 'python setup.py install' on the setuptools package
+ """
+ install_args = []
+ if options.user_install:
+ if sys.version_info < (2, 6):
+ log.warn("--user requires Python 2.6 or later")
+ raise SystemExit(1)
+ install_args.append('--user')
+ return install_args
+
+def _parse_args():
+ """
+ Parse the command line for options
+ """
+ parser = optparse.OptionParser()
+ parser.add_option(
+ '--user', dest='user_install', action='store_true', default=False,
+ help='install in user site package (requires Python 2.6 or later)')
+ parser.add_option(
+ '--download-base', dest='download_base', metavar="URL",
+ default=DEFAULT_URL,
+ help='alternative URL from where to download the setuptools package')
+ parser.add_option(
+ '--insecure', dest='downloader_factory', action='store_const',
+ const=lambda: download_file_insecure, default=get_best_downloader,
+ help='Use internal, non-validating downloader'
+ )
+ options, args = parser.parse_args()
+ # positional arguments are ignored
+ return options
+
+def main(version=DEFAULT_VERSION):
+ """Install or upgrade setuptools and EasyInstall"""
+ options = _parse_args()
+ tarball = download_setuptools(download_base=options.download_base,
+ downloader_factory=options.downloader_factory)
+ return _install(tarball, _build_install_args(options))
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/pygmentize b/pygmentize
index e2379199..8b3b2067 100755
--- a/pygmentize
+++ b/pygmentize
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python2
import sys, pygments.cmdline
try:
diff --git a/pygments/__init__.py b/pygments/__init__.py
index 2bfd8ba5..a47f686e 100644
--- a/pygments/__init__.py
+++ b/pygments/__init__.py
@@ -22,11 +22,11 @@
.. _Pygments tip:
http://bitbucket.org/birkenfeld/pygments-main/get/tip.zip#egg=Pygments-dev
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-__version__ = '1.6'
+__version__ = '2.0pre'
__docformat__ = 'restructuredtext'
__all__ = ['lex', 'format', 'highlight']
@@ -43,7 +43,7 @@ def lex(code, lexer):
"""
try:
return lexer.get_tokens(code)
- except TypeError, err:
+ except TypeError as err:
if isinstance(err.args[0], str) and \
'unbound method get_tokens' in err.args[0]:
raise TypeError('lex() argument must be a lexer instance, '
@@ -67,7 +67,7 @@ def format(tokens, formatter, outfile=None):
return realoutfile.getvalue()
else:
formatter.format(tokens, outfile)
- except TypeError, err:
+ except TypeError as err:
if isinstance(err.args[0], str) and \
'unbound method format' in err.args[0]:
raise TypeError('format() argument must be a formatter instance, '
diff --git a/pygments/cmdline.py b/pygments/cmdline.py
index c25204bf..a4ed83fa 100644
--- a/pygments/cmdline.py
+++ b/pygments/cmdline.py
@@ -5,9 +5,12 @@
Command line interface.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
+
+from __future__ import print_function
+
import sys
import getopt
from textwrap import dedent
@@ -16,6 +19,7 @@ from pygments import __version__, highlight
from pygments.util import ClassNotFound, OptionError, docstring_headline
from pygments.lexers import get_all_lexers, get_lexer_by_name, get_lexer_for_filename, \
find_lexer_class, guess_lexer, TextLexer
+from pygments.formatters.latex import LatexEmbededLexer, LatexFormatter
from pygments.formatters import get_all_formatters, get_formatter_by_name, \
get_formatter_for_filename, find_formatter_class, \
TerminalFormatter # pylint:disable-msg=E0611
@@ -92,7 +96,7 @@ def _parse_options(o_strs):
for o_arg in o_args:
o_arg = o_arg.strip()
try:
- o_key, o_val = o_arg.split('=')
+ o_key, o_val = o_arg.split('=', 1)
o_key = o_key.strip()
o_val = o_val.strip()
except ValueError:
@@ -119,25 +123,25 @@ def _print_help(what, name):
try:
if what == 'lexer':
cls = find_lexer_class(name)
- print "Help on the %s lexer:" % cls.name
- print dedent(cls.__doc__)
+ print("Help on the %s lexer:" % cls.name)
+ print(dedent(cls.__doc__))
elif what == 'formatter':
cls = find_formatter_class(name)
- print "Help on the %s formatter:" % cls.name
- print dedent(cls.__doc__)
+ print("Help on the %s formatter:" % cls.name)
+ print(dedent(cls.__doc__))
elif what == 'filter':
cls = find_filter_class(name)
- print "Help on the %s filter:" % name
- print dedent(cls.__doc__)
+ print("Help on the %s filter:" % name)
+ print(dedent(cls.__doc__))
except AttributeError:
- print >>sys.stderr, "%s not found!" % what
+ print("%s not found!" % what, file=sys.stderr)
def _print_list(what):
if what == 'lexer':
- print
- print "Lexers:"
- print "~~~~~~~"
+ print()
+ print("Lexers:")
+ print("~~~~~~~")
info = []
for fullname, names, exts, _ in get_all_lexers():
@@ -146,12 +150,12 @@ def _print_list(what):
info.append(tup)
info.sort()
for i in info:
- print ('* %s\n %s %s') % i
+ print(('* %s\n %s %s') % i)
elif what == 'formatter':
- print
- print "Formatters:"
- print "~~~~~~~~~~~"
+ print()
+ print("Formatters:")
+ print("~~~~~~~~~~~")
info = []
for cls in get_all_formatters():
@@ -161,27 +165,27 @@ def _print_list(what):
info.append(tup)
info.sort()
for i in info:
- print ('* %s\n %s %s') % i
+ print(('* %s\n %s %s') % i)
elif what == 'filter':
- print
- print "Filters:"
- print "~~~~~~~~"
+ print()
+ print("Filters:")
+ print("~~~~~~~~")
for name in get_all_filters():
cls = find_filter_class(name)
- print "* " + name + ':'
- print " %s" % docstring_headline(cls)
+ print("* " + name + ':')
+ print(" %s" % docstring_headline(cls))
elif what == 'style':
- print
- print "Styles:"
- print "~~~~~~~"
+ print()
+ print("Styles:")
+ print("~~~~~~~")
for name in get_all_styles():
cls = get_style_by_name(name)
- print "* " + name + ':'
- print " %s" % docstring_headline(cls)
+ print("* " + name + ':')
+ print(" %s" % docstring_headline(cls))
def main(args=sys.argv):
@@ -202,8 +206,8 @@ def main(args=sys.argv):
try:
popts, args = getopt.getopt(args[1:], "l:f:F:o:O:P:LS:a:N:hVHg")
- except getopt.GetoptError, err:
- print >>sys.stderr, usage
+ except getopt.GetoptError:
+ print(usage, file=sys.stderr)
return 2
opts = {}
O_opts = []
@@ -219,22 +223,22 @@ def main(args=sys.argv):
opts[opt] = arg
if not opts and not args:
- print usage
+ print(usage)
return 0
if opts.pop('-h', None) is not None:
- print usage
+ print(usage)
return 0
if opts.pop('-V', None) is not None:
- print 'Pygments version %s, (c) 2006-2013 by Georg Brandl.' % __version__
+ print('Pygments version %s, (c) 2006-2014 by Georg Brandl.' % __version__)
return 0
# handle ``pygmentize -L``
L_opt = opts.pop('-L', None)
if L_opt is not None:
if opts:
- print >>sys.stderr, usage
+ print(usage, file=sys.stderr)
return 2
# print version
@@ -249,12 +253,12 @@ def main(args=sys.argv):
H_opt = opts.pop('-H', None)
if H_opt is not None:
if opts or len(args) != 2:
- print >>sys.stderr, usage
+ print(usage, file=sys.stderr)
return 2
what, name = args
if what not in ('lexer', 'formatter', 'filter'):
- print >>sys.stderr, usage
+ print(usage, file=sys.stderr)
return 2
_print_help(what, name)
@@ -279,13 +283,13 @@ def main(args=sys.argv):
if infn is not None:
try:
lexer = get_lexer_for_filename(infn, **parsed_opts)
- except ClassNotFound, err:
+ except ClassNotFound as err:
lexer = TextLexer()
- except OptionError, err:
- print >>sys.stderr, 'Error:', err
+ except OptionError as err:
+ print('Error:', err, file=sys.stderr)
return 1
- print lexer.aliases[0]
+ print(lexer.aliases[0])
return 0
# handle ``pygmentize -S``
@@ -294,30 +298,30 @@ def main(args=sys.argv):
if S_opt is not None:
f_opt = opts.pop('-f', None)
if not f_opt:
- print >>sys.stderr, usage
+ print(usage, file=sys.stderr)
return 2
if opts or args:
- print >>sys.stderr, usage
+ print(usage, file=sys.stderr)
return 2
try:
parsed_opts['style'] = S_opt
fmter = get_formatter_by_name(f_opt, **parsed_opts)
- except ClassNotFound, err:
- print >>sys.stderr, err
+ except ClassNotFound as err:
+ print(err, file=sys.stderr)
return 1
arg = a_opt or ''
try:
- print fmter.get_style_defs(arg)
- except Exception, err:
- print >>sys.stderr, 'Error:', err
+ print(fmter.get_style_defs(arg))
+ except Exception as err:
+ print('Error:', err, file=sys.stderr)
return 1
return 0
# if no -S is given, -a is not allowed
if a_opt is not None:
- print >>sys.stderr, usage
+ print(usage, file=sys.stderr)
return 2
# parse -F options
@@ -330,21 +334,21 @@ def main(args=sys.argv):
if fmter:
try:
fmter = get_formatter_by_name(fmter, **parsed_opts)
- except (OptionError, ClassNotFound), err:
- print >>sys.stderr, 'Error:', err
+ except (OptionError, ClassNotFound) as err:
+ print('Error:', err, file=sys.stderr)
return 1
if outfn:
if not fmter:
try:
fmter = get_formatter_for_filename(outfn, **parsed_opts)
- except (OptionError, ClassNotFound), err:
- print >>sys.stderr, 'Error:', err
+ except (OptionError, ClassNotFound) as err:
+ print('Error:', err, file=sys.stderr)
return 1
try:
outfile = open(outfn, 'wb')
- except Exception, err:
- print >>sys.stderr, 'Error: cannot open outfile:', err
+ except Exception as err:
+ print('Error: cannot open outfile:', err, file=sys.stderr)
return 1
else:
if not fmter:
@@ -356,36 +360,36 @@ def main(args=sys.argv):
if lexer:
try:
lexer = get_lexer_by_name(lexer, **parsed_opts)
- except (OptionError, ClassNotFound), err:
- print >>sys.stderr, 'Error:', err
+ except (OptionError, ClassNotFound) as err:
+ print('Error:', err, file=sys.stderr)
return 1
if args:
if len(args) > 1:
- print >>sys.stderr, usage
+ print(usage, file=sys.stderr)
return 2
infn = args[0]
try:
code = open(infn, 'rb').read()
- except Exception, err:
- print >>sys.stderr, 'Error: cannot read infile:', err
+ except Exception as err:
+ print('Error: cannot read infile:', err, file=sys.stderr)
return 1
if not lexer:
try:
lexer = get_lexer_for_filename(infn, code, **parsed_opts)
- except ClassNotFound, err:
+ except ClassNotFound as err:
if '-g' in opts:
try:
lexer = guess_lexer(code, **parsed_opts)
except ClassNotFound:
lexer = TextLexer(**parsed_opts)
else:
- print >>sys.stderr, 'Error:', err
+ print('Error:', err, file=sys.stderr)
return 1
- except OptionError, err:
- print >>sys.stderr, 'Error:', err
+ except OptionError as err:
+ print('Error:', err, file=sys.stderr)
return 1
else:
@@ -396,12 +400,21 @@ def main(args=sys.argv):
except ClassNotFound:
lexer = TextLexer(**parsed_opts)
elif not lexer:
- print >>sys.stderr, 'Error: no lexer name given and reading ' + \
- 'from stdin (try using -g or -l <lexer>)'
+ print('Error: no lexer name given and reading ' + \
+ 'from stdin (try using -g or -l <lexer>)', file=sys.stderr)
return 2
else:
code = sys.stdin.read()
+ # When using the LaTeX formatter and the option `escapeinside` is
+ # specified, we need a special lexer which collects escaped text
+ # before running the chosen language lexer.
+ escapeinside = parsed_opts.get('escapeinside', '')
+ if len(escapeinside) == 2 and isinstance(fmter, LatexFormatter):
+ left = escapeinside[0]
+ right = escapeinside[1]
+ lexer = LatexEmbededLexer(left, right, lexer)
+
# No encoding given? Use latin1 if output file given,
# stdin/stdout encoding otherwise.
# (This is a compromise, I'm not too happy with it...)
@@ -426,16 +439,16 @@ def main(args=sys.argv):
for fname, fopts in F_opts:
lexer.add_filter(fname, **fopts)
highlight(code, lexer, fmter, outfile)
- except Exception, err:
+ except Exception:
import traceback
info = traceback.format_exception(*sys.exc_info())
msg = info[-1].strip()
if len(info) >= 3:
# extract relevant file and position info
msg += '\n (f%s)' % info[-2].split('\n')[0].strip()[1:]
- print >>sys.stderr
- print >>sys.stderr, '*** Error while highlighting:'
- print >>sys.stderr, msg
+ print(file=sys.stderr)
+ print('*** Error while highlighting:', file=sys.stderr)
+ print(msg, file=sys.stderr)
return 1
return 0
diff --git a/pygments/console.py b/pygments/console.py
index c8dfbd1f..d93f6a9f 100644
--- a/pygments/console.py
+++ b/pygments/console.py
@@ -5,7 +5,7 @@
Format colored console output.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
diff --git a/pygments/filter.py b/pygments/filter.py
index 0b9224f2..092ef331 100644
--- a/pygments/filter.py
+++ b/pygments/filter.py
@@ -5,7 +5,7 @@
Module that implements the default filter.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
diff --git a/pygments/filters/__init__.py b/pygments/filters/__init__.py
index f12d025c..2de661c7 100644
--- a/pygments/filters/__init__.py
+++ b/pygments/filters/__init__.py
@@ -6,7 +6,7 @@
Module containing filter lookup functions and default
filters.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
@@ -16,7 +16,7 @@ from pygments.token import String, Comment, Keyword, Name, Error, Whitespace, \
string_to_tokentype
from pygments.filter import Filter
from pygments.util import get_list_opt, get_int_opt, get_bool_opt, \
- get_choice_opt, ClassNotFound, OptionError
+ get_choice_opt, ClassNotFound, OptionError, text_type, string_types
from pygments.plugin import find_plugin_filters
@@ -117,7 +117,7 @@ class KeywordCaseFilter(Filter):
def __init__(self, **options):
Filter.__init__(self, **options)
case = get_choice_opt(options, 'case', ['lower', 'upper', 'capitalize'], 'lower')
- self.convert = getattr(unicode, case)
+ self.convert = getattr(text_type, case)
def filter(self, lexer, stream):
for ttype, value in stream:
@@ -129,7 +129,7 @@ class KeywordCaseFilter(Filter):
class NameHighlightFilter(Filter):
"""
- Highlight a normal Name token with a different token type.
+ Highlight a normal Name (and Name.*) token with a different token type.
Example::
@@ -163,7 +163,7 @@ class NameHighlightFilter(Filter):
def filter(self, lexer, stream):
for ttype, value in stream:
- if ttype is Name and value in self.names:
+ if ttype in Name and value in self.names:
yield self.tokentype, value
else:
yield ttype, value
@@ -182,7 +182,7 @@ class RaiseOnErrorTokenFilter(Filter):
The exception class to raise.
The default is `pygments.filters.ErrorToken`.
- *New in Pygments 0.8.*
+ .. versionadded:: 0.8
"""
def __init__(self, **options):
@@ -230,14 +230,16 @@ class VisibleWhitespaceFilter(Filter):
styling the visible whitespace differently (e.g. greyed out), but it can
disrupt background colors. The default is ``True``.
- *New in Pygments 0.8.*
+ .. versionadded:: 0.8
"""
def __init__(self, **options):
Filter.__init__(self, **options)
- for name, default in {'spaces': u'·', 'tabs': u'»', 'newlines': u'¶'}.items():
+ for name, default in [('spaces', u'·'),
+ ('tabs', u'»'),
+ ('newlines', u'¶')]:
opt = options.get(name, False)
- if isinstance(opt, basestring) and len(opt) == 1:
+ if isinstance(opt, string_types) and len(opt) == 1:
setattr(self, name, opt)
else:
setattr(self, name, (opt and default or ''))
@@ -293,7 +295,7 @@ class GobbleFilter(Filter):
`n` : int
The number of characters to gobble.
- *New in Pygments 1.2.*
+ .. versionadded:: 1.2
"""
def __init__(self, **options):
Filter.__init__(self, **options)
@@ -325,7 +327,7 @@ class TokenMergeFilter(Filter):
Merges consecutive tokens with the same token type in the output stream of a
lexer.
- *New in Pygments 1.2.*
+ .. versionadded:: 1.2
"""
def __init__(self, **options):
Filter.__init__(self, **options)
diff --git a/pygments/formatter.py b/pygments/formatter.py
index 4b69f2a4..b16ffee8 100644
--- a/pygments/formatter.py
+++ b/pygments/formatter.py
@@ -5,20 +5,20 @@
Base formatter class.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import codecs
-from pygments.util import get_bool_opt
+from pygments.util import get_bool_opt, string_types
from pygments.styles import get_style_by_name
__all__ = ['Formatter']
def _lookup_style(style):
- if isinstance(style, basestring):
+ if isinstance(style, string_types):
return get_style_by_name(style)
return style
@@ -68,6 +68,9 @@ class Formatter(object):
self.full = get_bool_opt(options, 'full', False)
self.title = options.get('title', '')
self.encoding = options.get('encoding', None) or None
+ if self.encoding == 'guess':
+ # can happen for pygmentize -O encoding=guess
+ self.encoding = 'utf-8'
self.encoding = options.get('outencoding', None) or self.encoding
self.options = options
diff --git a/pygments/formatters/__init__.py b/pygments/formatters/__init__.py
index d842b96b..f0c5dc41 100644
--- a/pygments/formatters/__init__.py
+++ b/pygments/formatters/__init__.py
@@ -5,11 +5,12 @@
Pygments formatters.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os.path
import fnmatch
+import re
from pygments.formatters._mapping import FORMATTERS
from pygments.plugin import find_plugin_formatters
@@ -34,7 +35,8 @@ def _init_formatter_cache():
for alias in cls.aliases:
_formatter_alias_cache[alias] = cls
for fn in cls.filenames:
- _formatter_filename_cache.append((fn, cls))
+ _formatter_filename_cache.append((
+ re.compile(fnmatch.translate(fn)), cls))
def find_formatter_class(name):
@@ -55,7 +57,7 @@ def get_formatter_for_filename(fn, **options):
_init_formatter_cache()
fn = os.path.basename(fn)
for pattern, cls in _formatter_filename_cache:
- if fnmatch.fnmatch(fn, pattern):
+ if pattern.match(fn):
return cls(**options)
raise ClassNotFound("No formatter found for file name %r" % fn)
diff --git a/pygments/formatters/_mapping.py b/pygments/formatters/_mapping.py
index a423ba50..79f592b3 100755
--- a/pygments/formatters/_mapping.py
+++ b/pygments/formatters/_mapping.py
@@ -3,16 +3,18 @@
pygments.formatters._mapping
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- Formatter mapping defintions. This file is generated by itself. Everytime
- you change something on a builtin formatter defintion, run this script from
+ Formatter mapping definitions. This file is generated by itself. Everytime
+ you change something on a builtin formatter definition, run this script from
the formatters folder to update it.
Do not alter the FORMATTERS dictionary by hand.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
+from __future__ import print_function
+
# start
from pygments.formatters.bbcode import BBCodeFormatter
from pygments.formatters.html import HtmlFormatter
@@ -57,7 +59,7 @@ if __name__ == '__main__':
for filename in os.listdir('.'):
if filename.endswith('.py') and not filename.startswith('_'):
module_name = 'pygments.formatters.%s' % filename[:-3]
- print module_name
+ print(module_name)
module = __import__(module_name, None, None, [''])
for formatter_name in module.__all__:
imports.append((module_name, formatter_name))
diff --git a/pygments/formatters/bbcode.py b/pygments/formatters/bbcode.py
index 15faff6c..21525f1c 100644
--- a/pygments/formatters/bbcode.py
+++ b/pygments/formatters/bbcode.py
@@ -5,7 +5,7 @@
BBcode formatter.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
diff --git a/pygments/formatters/html.py b/pygments/formatters/html.py
index 06096930..3bc60e8a 100644
--- a/pygments/formatters/html.py
+++ b/pygments/formatters/html.py
@@ -5,18 +5,20 @@
Formatter for HTML output.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
+from __future__ import print_function
+
import os
import sys
import os.path
-import StringIO
from pygments.formatter import Formatter
from pygments.token import Token, Text, STANDARD_TYPES
-from pygments.util import get_bool_opt, get_int_opt, get_list_opt, bytes
+from pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
+ StringIO, string_types, iteritems
try:
import ctags
@@ -218,29 +220,34 @@ class HtmlFormatter(Formatter):
If you set this option, the default selector for `get_style_defs()`
will be this class.
- *New in Pygments 0.9:* If you select the ``'table'`` line numbers, the
- wrapping table will have a CSS class of this string plus ``'table'``,
- the default is accordingly ``'highlighttable'``.
+ .. versionadded:: 0.9
+ If you select the ``'table'`` line numbers, the wrapping table will
+ have a CSS class of this string plus ``'table'``, the default is
+ accordingly ``'highlighttable'``.
`cssstyles`
Inline CSS styles for the wrapping ``<div>`` tag (default: ``''``).
`prestyles`
- Inline CSS styles for the ``<pre>`` tag (default: ``''``). *New in
- Pygments 0.11.*
+ Inline CSS styles for the ``<pre>`` tag (default: ``''``).
+
+ .. versionadded:: 0.11
`cssfile`
If the `full` option is true and this option is given, it must be the
name of an external file. If the filename does not include an absolute
path, the file's path will be assumed to be relative to the main output
file's path, if the latter can be found. The stylesheet is then written
- to this file instead of the HTML file. *New in Pygments 0.6.*
+ to this file instead of the HTML file.
+
+ .. versionadded:: 0.6
`noclobber_cssfile`
If `cssfile` is given and the specified file exists, the css file will
not be overwritten. This allows the use of the `full` option in
combination with a user specified css file. Default is ``False``.
- *New in Pygments 1.1.*
+
+ .. versionadded:: 1.1
`linenos`
If set to ``'table'``, output line numbers as a table with two cells,
@@ -263,7 +270,9 @@ class HtmlFormatter(Formatter):
125%``).
`hl_lines`
- Specify a list of lines to be highlighted. *New in Pygments 0.11.*
+ Specify a list of lines to be highlighted.
+
+ .. versionadded:: 0.11
`linenostart`
The line number for the first line (default: ``1``).
@@ -279,24 +288,30 @@ class HtmlFormatter(Formatter):
If set to ``True``, the formatter won't output the background color
for the wrapping element (this automatically defaults to ``False``
when there is no wrapping element [eg: no argument for the
- `get_syntax_defs` method given]) (default: ``False``). *New in
- Pygments 0.6.*
+ `get_syntax_defs` method given]) (default: ``False``).
+
+ .. versionadded:: 0.6
`lineseparator`
This string is output between lines of code. It defaults to ``"\n"``,
which is enough to break a line inside ``<pre>`` tags, but you can
- e.g. set it to ``"<br>"`` to get HTML line breaks. *New in Pygments
- 0.7.*
+ e.g. set it to ``"<br>"`` to get HTML line breaks.
+
+ .. versionadded:: 0.7
`lineanchors`
If set to a nonempty string, e.g. ``foo``, the formatter will wrap each
output line in an anchor tag with a ``name`` of ``foo-linenumber``.
- This allows easy linking to certain lines. *New in Pygments 0.9.*
+ This allows easy linking to certain lines.
+
+ .. versionadded:: 0.9
`linespans`
If set to a nonempty string, e.g. ``foo``, the formatter will wrap each
output line in a span tag with an ``id`` of ``foo-linenumber``.
- This allows easy access to lines via javascript. *New in Pygments 1.6.*
+ This allows easy access to lines via javascript.
+
+ .. versionadded:: 1.6
`anchorlinenos`
If set to `True`, will wrap line numbers in <a> tags. Used in
@@ -306,18 +321,20 @@ class HtmlFormatter(Formatter):
If set to the path of a ctags file, wrap names in anchor tags that
link to their definitions. `lineanchors` should be used, and the
tags file should specify line numbers (see the `-n` option to ctags).
- *New in Pygments 1.6.*
+
+ .. versionadded:: 1.6
`tagurlformat`
A string formatting pattern used to generate links to ctags definitions.
Available variables are `%(path)s`, `%(fname)s` and `%(fext)s`.
Defaults to an empty string, resulting in just `#prefix-number` links.
- *New in Pygments 1.6.*
+
+ .. versionadded:: 1.6
**Subclassing the HTML formatter**
- *New in Pygments 0.7.*
+ .. versionadded:: 0.7
The HTML formatter is now built in a way that allows easy subclassing, thus
customizing the output HTML code. The `format()` method calls
@@ -453,7 +470,7 @@ class HtmlFormatter(Formatter):
"""
if arg is None:
arg = ('cssclass' in self.options and '.'+self.cssclass or '')
- if isinstance(arg, basestring):
+ if isinstance(arg, string_types):
args = [arg]
else:
args = list(arg)
@@ -467,7 +484,7 @@ class HtmlFormatter(Formatter):
return ', '.join(tmp)
styles = [(level, ttype, cls, style)
- for cls, (style, ttype, level) in self.class2style.iteritems()
+ for cls, (style, ttype, level) in iteritems(self.class2style)
if cls and style]
styles.sort()
lines = ['%s { %s } /* %s */' % (prefix(cls), style, repr(ttype)[6:])
@@ -505,8 +522,8 @@ class HtmlFormatter(Formatter):
cssfilename = os.path.join(os.path.dirname(filename),
self.cssfile)
except AttributeError:
- print >>sys.stderr, 'Note: Cannot determine output file name, ' \
- 'using current directory as base for the CSS file name'
+ print('Note: Cannot determine output file name, ' \
+ 'using current directory as base for the CSS file name', file=sys.stderr)
cssfilename = self.cssfile
# write CSS file only if noclobber_cssfile isn't given as an option.
try:
@@ -515,7 +532,7 @@ class HtmlFormatter(Formatter):
cf.write(CSSFILE_TEMPLATE %
{'styledefs': self.get_style_defs('body')})
cf.close()
- except IOError, err:
+ except IOError as err:
err.strerror = 'Error writing CSS file: ' + err.strerror
raise
@@ -534,7 +551,7 @@ class HtmlFormatter(Formatter):
yield 0, DOC_FOOTER
def _wrap_tablelinenos(self, inner):
- dummyoutfile = StringIO.StringIO()
+ dummyoutfile = StringIO()
lncount = 0
for t, line in inner:
if t:
diff --git a/pygments/formatters/img.py b/pygments/formatters/img.py
index 394c3b6a..8e2b5f9e 100644
--- a/pygments/formatters/img.py
+++ b/pygments/formatters/img.py
@@ -5,15 +5,15 @@
Formatter for Pixmap output.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
from pygments.formatter import Formatter
-from pygments.util import get_bool_opt, get_int_opt, \
- get_list_opt, get_choice_opt
+from pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
+ get_choice_opt, xrange
# Import this carefully
try:
@@ -25,7 +25,10 @@ except ImportError:
try:
import _winreg
except ImportError:
- _winreg = None
+ try:
+ import winreg as _winreg
+ except ImportError:
+ _winreg = None
__all__ = ['ImageFormatter', 'GifImageFormatter', 'JpgImageFormatter',
'BmpImageFormatter']
@@ -72,7 +75,10 @@ class FontManager(object):
self._create_nix()
def _get_nix_font_path(self, name, style):
- from commands import getstatusoutput
+ try:
+ from commands import getstatusoutput
+ except ImportError:
+ from subprocess import getstatusoutput
exit, out = getstatusoutput('fc-list "%s:style=%s" file' %
(name, style))
if not exit:
@@ -169,7 +175,7 @@ class ImageFormatter(Formatter):
Create a PNG image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
- *New in Pygments 0.10.*
+ .. versionadded:: 0.10
Additional options accepted:
@@ -258,12 +264,16 @@ class ImageFormatter(Formatter):
Default: 6
`hl_lines`
- Specify a list of lines to be highlighted. *New in Pygments 1.2.*
+ Specify a list of lines to be highlighted.
+
+ .. versionadded:: 1.2
Default: empty list
`hl_color`
- Specify the color for highlighting lines. *New in Pygments 1.2.*
+ Specify the color for highlighting lines.
+
+ .. versionadded:: 1.2
Default: highlight color of the selected style
"""
@@ -513,8 +523,7 @@ class GifImageFormatter(ImageFormatter):
Create a GIF image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
- *New in Pygments 1.0.* (You could create GIF images before by passing a
- suitable `image_format` option to the `ImageFormatter`.)
+ .. versionadded:: 1.0
"""
name = 'img_gif'
@@ -528,8 +537,7 @@ class JpgImageFormatter(ImageFormatter):
Create a JPEG image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
- *New in Pygments 1.0.* (You could create JPEG images before by passing a
- suitable `image_format` option to the `ImageFormatter`.)
+ .. versionadded:: 1.0
"""
name = 'img_jpg'
@@ -543,8 +551,7 @@ class BmpImageFormatter(ImageFormatter):
Create a bitmap image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
- *New in Pygments 1.0.* (You could create bitmap images before by passing a
- suitable `image_format` option to the `ImageFormatter`.)
+ .. versionadded:: 1.0
"""
name = 'img_bmp'
diff --git a/pygments/formatters/latex.py b/pygments/formatters/latex.py
index 47fd1239..9968aa4c 100644
--- a/pygments/formatters/latex.py
+++ b/pygments/formatters/latex.py
@@ -5,13 +5,17 @@
Formatter for LaTeX fancyvrb output.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
+from __future__ import division
+
from pygments.formatter import Formatter
+from pygments.lexer import Lexer
from pygments.token import Token, STANDARD_TYPES
-from pygments.util import get_bool_opt, get_int_opt, StringIO
+from pygments.util import get_bool_opt, get_int_opt, StringIO, xrange, \
+ iteritems
__all__ = ['LatexFormatter']
@@ -152,7 +156,7 @@ class LatexFormatter(Formatter):
.. sourcecode:: latex
- \begin{Verbatim}[commandchars=\\{\}]
+ \begin{Verbatim}[commandchars=\\\{\}]
\PY{k}{def }\PY{n+nf}{foo}(\PY{n}{bar}):
\PY{k}{pass}
\end{Verbatim}
@@ -205,19 +209,33 @@ class LatexFormatter(Formatter):
`commandprefix`
The LaTeX commands used to produce colored output are constructed
using this prefix and some letters (default: ``'PY'``).
- *New in Pygments 0.7.*
- *New in Pygments 0.10:* the default is now ``'PY'`` instead of ``'C'``.
+ .. versionadded:: 0.7
+ .. versionchanged:: 0.10
+ The default is now ``'PY'`` instead of ``'C'``.
`texcomments`
If set to ``True``, enables LaTeX comment lines. That is, LaTex markup
in comment tokens is not escaped so that LaTeX can render it (default:
- ``False``). *New in Pygments 1.2.*
+ ``False``).
+
+ .. versionadded:: 1.2
`mathescape`
If set to ``True``, enables LaTeX math mode escape in comments. That
is, ``'$...$'`` inside a comment will trigger math mode (default:
- ``False``). *New in Pygments 1.2.*
+ ``False``).
+
+ .. versionadded:: 1.2
+
+ `escapeinside`
+ If set to a string of length 2, enables escaping to LaTeX. Text
+ delimited by these 2 characters is read as LaTeX code and
+ typeset accordingly. It has no effect in string literals. It has
+ no effect in comments if `texcomments` or `mathescape` is
+ set. (default: ``''``).
+
+ .. versionadded:: 2.0
"""
name = 'LaTeX'
aliases = ['latex', 'tex']
@@ -235,6 +253,13 @@ class LatexFormatter(Formatter):
self.commandprefix = options.get('commandprefix', 'PY')
self.texcomments = get_bool_opt(options, 'texcomments', False)
self.mathescape = get_bool_opt(options, 'mathescape', False)
+ self.escapeinside = options.get('escapeinside', '')
+
+ if len(self.escapeinside) == 2:
+ self.left = self.escapeinside[0]
+ self.right = self.escapeinside[1]
+ else:
+ self.escapeinside = ''
self._create_stylesheet()
@@ -291,7 +316,7 @@ class LatexFormatter(Formatter):
"""
cp = self.commandprefix
styles = []
- for name, definition in self.cmd2def.iteritems():
+ for name, definition in iteritems(self.cmd2def):
styles.append(r'\expandafter\def\csname %s@tok@%s\endcsname{%s}' %
(cp, name, definition))
return STYLE_TEMPLATE % {'cp': self.commandprefix,
@@ -306,13 +331,13 @@ class LatexFormatter(Formatter):
realoutfile = outfile
outfile = StringIO()
- outfile.write(ur'\begin{Verbatim}[commandchars=\\\{\}')
+ outfile.write(r'\begin{Verbatim}[commandchars=\\\{\}')
if self.linenos:
start, step = self.linenostart, self.linenostep
outfile.write(u',numbers=left' +
(start and u',firstnumber=%d' % start or u'') +
(step and u',stepnumber=%d' % step or u''))
- if self.mathescape or self.texcomments:
+ if self.mathescape or self.texcomments or self.escapeinside:
outfile.write(ur',codes={\catcode`\$=3\catcode`\^=7\catcode`\_=8}')
if self.verboptions:
outfile.write(u',' + self.verboptions)
@@ -342,9 +367,22 @@ class LatexFormatter(Formatter):
parts[i] = escape_tex(part, self.commandprefix)
in_math = not in_math
value = '$'.join(parts)
+ elif self.escapeinside:
+ text = value
+ value = ''
+ while len(text) > 0:
+ a,sep1,text = text.partition(self.left)
+ if len(sep1) > 0:
+ b,sep2,text = text.partition(self.right)
+ if len(sep2) > 0:
+ value = value + escape_tex(a, self.commandprefix) + b
+ else:
+ value = value + escape_tex(a + sep1 + b, self.commandprefix)
+ else:
+ value = value + escape_tex(a, self.commandprefix)
else:
value = escape_tex(value, self.commandprefix)
- else:
+ elif not (ttype in Token.Escape):
value = escape_tex(value, self.commandprefix)
styles = []
while ttype is not Token:
@@ -376,3 +414,57 @@ class LatexFormatter(Formatter):
encoding = self.encoding or 'latin1',
styledefs = self.get_style_defs(),
code = outfile.getvalue()))
+
+
+class LatexEmbededLexer(Lexer):
+ r"""
+
+ This lexer takes one lexer as argument, the lexer for the language
+ being formatted, and the left and right delimiters for escaped text.
+
+ First everything is scanned using the language lexer to obtain
+ strings and comments. All other consecutive tokens are merged and
+ the resulting text is scanned for escaped segments, which are given
+ the Token.Escape type. Finally text that is not escaped is scanned
+ again with the language lexer.
+ """
+ def __init__(self, left, right, lang, **options):
+ self.left = left
+ self.right = right
+ self.lang = lang
+ Lexer.__init__(self, **options)
+
+ def get_tokens_unprocessed(self, text):
+ buf = ''
+ for i, t, v in self.lang.get_tokens_unprocessed(text):
+ if t in Token.Comment or t in Token.String:
+ if buf:
+ for x in self.get_tokens_aux(idx, buf):
+ yield x
+ buf = ''
+ yield i, t, v
+ else:
+ if not buf:
+ idx = i;
+ buf += v
+ if buf:
+ for x in self.get_tokens_aux(idx, buf):
+ yield x
+
+ def get_tokens_aux(self, index, text):
+ while text:
+ a, sep1, text = text.partition(self.left)
+ if a:
+ for i, t, v in self.lang.get_tokens_unprocessed(a):
+ yield index + i, t, v
+ index += len(a)
+ if sep1:
+ b, sep2, text = text.partition(self.right)
+ if sep2:
+ yield index + len(sep1), Token.Escape, b
+ index += len(sep1) + len(b) + len(sep2)
+ else:
+ yield index, Token.Error, sep1
+ index += len(sep1)
+ text = b
+
diff --git a/pygments/formatters/other.py b/pygments/formatters/other.py
index 1029a7a7..7368a642 100644
--- a/pygments/formatters/other.py
+++ b/pygments/formatters/other.py
@@ -5,12 +5,12 @@
Other formatters: NullFormatter, RawTokenFormatter.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
-from pygments.util import OptionError, get_choice_opt, b
+from pygments.util import OptionError, get_choice_opt
from pygments.token import Token
from pygments.console import colorize
@@ -40,7 +40,7 @@ class RawTokenFormatter(Formatter):
The format is ``tokentype<TAB>repr(tokenstring)\n``. The output can later
be converted to a token stream with the `RawTokenLexer`, described in the
- `lexer list <lexers.txt>`_.
+ :doc:`lexer list <lexers>`.
Only two options are accepted:
@@ -50,7 +50,8 @@ class RawTokenFormatter(Formatter):
`error_color`
If set to a color name, highlight error tokens using that color. If
set but with no value, defaults to ``'red'``.
- *New in Pygments 0.11.*
+
+ .. versionadded:: 0.11
"""
name = 'Raw tokens'
@@ -79,7 +80,7 @@ class RawTokenFormatter(Formatter):
def format(self, tokensource, outfile):
try:
- outfile.write(b(''))
+ outfile.write(b'')
except TypeError:
raise TypeError('The raw tokens formatter needs a binary '
'output file')
diff --git a/pygments/formatters/rtf.py b/pygments/formatters/rtf.py
index 3efda284..59d97742 100644
--- a/pygments/formatters/rtf.py
+++ b/pygments/formatters/rtf.py
@@ -5,7 +5,7 @@
A formatter that generates RTF files.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
@@ -21,7 +21,7 @@ class RtfFormatter(Formatter):
documents with color information and other useful stuff. Perfect for Copy and
Paste into Microsoft® Word® documents.
- *New in Pygments 0.6.*
+ .. versionadded:: 0.6
Additional options accepted:
diff --git a/pygments/formatters/svg.py b/pygments/formatters/svg.py
index 271f22a7..07636943 100644
--- a/pygments/formatters/svg.py
+++ b/pygments/formatters/svg.py
@@ -5,7 +5,7 @@
Formatter for SVG output.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
@@ -35,7 +35,7 @@ class SvgFormatter(Formatter):
By default, this formatter outputs a full SVG document including doctype
declaration and the ``<svg>`` root element.
- *New in Pygments 0.9.*
+ .. versionadded:: 0.9
Additional options accepted:
diff --git a/pygments/formatters/terminal.py b/pygments/formatters/terminal.py
index 94e078f2..539b0be9 100644
--- a/pygments/formatters/terminal.py
+++ b/pygments/formatters/terminal.py
@@ -5,7 +5,7 @@
Formatter for terminal output with ANSI sequences.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
diff --git a/pygments/formatters/terminal256.py b/pygments/formatters/terminal256.py
index 772ed423..60b698c9 100644
--- a/pygments/formatters/terminal256.py
+++ b/pygments/formatters/terminal256.py
@@ -11,7 +11,7 @@
Formatter version 1.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
@@ -76,7 +76,7 @@ class Terminal256Formatter(Formatter):
and converts them to nearest ANSI 256-color escape sequences. Bold and
underline attributes from the style are preserved (and displayed).
- *New in Pygments 0.9.*
+ .. versionadded:: 0.9
Options accepted:
diff --git a/pygments/lexer.py b/pygments/lexer.py
index 8f88dfda..567e85f8 100644
--- a/pygments/lexer.py
+++ b/pygments/lexer.py
@@ -5,7 +5,7 @@
Base lexer classes.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re, itertools
@@ -14,18 +14,18 @@ from pygments.filter import apply_filters, Filter
from pygments.filters import get_filter_by_name
from pygments.token import Error, Text, Other, _TokenType
from pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
- make_analysator
+ make_analysator, text_type, add_metaclass, iteritems
__all__ = ['Lexer', 'RegexLexer', 'ExtendedRegexLexer', 'DelegatingLexer',
'LexerContext', 'include', 'inherit', 'bygroups', 'using', 'this']
-_encoding_map = [('\xef\xbb\xbf', 'utf-8'),
- ('\xff\xfe\0\0', 'utf-32'),
- ('\0\0\xfe\xff', 'utf-32be'),
- ('\xff\xfe', 'utf-16'),
- ('\xfe\xff', 'utf-16be')]
+_encoding_map = [(b'\xef\xbb\xbf', 'utf-8'),
+ (b'\xff\xfe\0\0', 'utf-32'),
+ (b'\0\0\xfe\xff', 'utf-32be'),
+ (b'\xff\xfe', 'utf-16'),
+ (b'\xfe\xff', 'utf-16be')]
_default_analyse = staticmethod(lambda x: 0.0)
@@ -42,6 +42,7 @@ class LexerMeta(type):
return type.__new__(cls, name, bases, d)
+@add_metaclass(LexerMeta)
class Lexer(object):
"""
Lexer for a specific language.
@@ -55,7 +56,9 @@ class Lexer(object):
``ensurenl``
Make sure that the input ends with a newline (default: True). This
is required for some lexers that consume input linewise.
- *New in Pygments 1.3.*
+
+ .. versionadded:: 1.3
+
``tabsize``
If given and greater than 0, expand tabs in the input (default: 0).
``encoding``
@@ -84,8 +87,6 @@ class Lexer(object):
#: Priority, should multiple lexers match and no content is provided
priority = 0
- __metaclass__ = LexerMeta
-
def __init__(self, **options):
self.options = options
self.stripnl = get_bool_opt(options, 'stripnl', True)
@@ -136,7 +137,7 @@ class Lexer(object):
Also preprocess the text, i.e. expand tabs and strip it if
wanted and applies registered filters.
"""
- if not isinstance(text, unicode):
+ if not isinstance(text, text_type):
if self.encoding == 'guess':
try:
text = text.decode('utf-8')
@@ -155,17 +156,18 @@ class Lexer(object):
decoded = None
for bom, encoding in _encoding_map:
if text.startswith(bom):
- decoded = unicode(text[len(bom):], encoding,
- errors='replace')
+ decoded = text[len(bom):].decode(encoding, 'replace')
break
# no BOM found, so use chardet
if decoded is None:
enc = chardet.detect(text[:1024]) # Guess using first 1KB
- decoded = unicode(text, enc.get('encoding') or 'utf-8',
- errors='replace')
+ decoded = text.decode(enc.get('encoding') or 'utf-8',
+ 'replace')
text = decoded
else:
text = text.decode(self.encoding)
+ if text.startswith(u'\ufeff'):
+ text = text[len(u'\ufeff'):]
else:
if text.startswith(u'\ufeff'):
text = text[len(u'\ufeff'):]
@@ -192,7 +194,9 @@ class Lexer(object):
def get_tokens_unprocessed(self, text):
"""
- Return an iterable of (tokentype, value) pairs.
+ Return an iterable of (index, tokentype, value) pairs where "index"
+ is the starting position of the token within the input text.
+
In subclasses, implement this method as a generator to
maximize effectiveness.
"""
@@ -453,7 +457,7 @@ class RegexLexerMeta(LexerMeta):
try:
rex = cls._process_regex(tdef[0], rflags)
- except Exception, err:
+ except Exception as err:
raise ValueError("uncompilable regex %r in state %r of %r: %s" %
(tdef[0], state, cls, err))
@@ -472,7 +476,7 @@ class RegexLexerMeta(LexerMeta):
"""Preprocess a dictionary of token definitions."""
processed = cls._all_tokens[name] = {}
tokendefs = tokendefs or cls.tokens[name]
- for state in tokendefs.keys():
+ for state in list(tokendefs):
cls._process_state(tokendefs, processed, state)
return processed
@@ -493,7 +497,7 @@ class RegexLexerMeta(LexerMeta):
for c in itertools.chain((cls,), cls.__mro__):
toks = c.__dict__.get('tokens', {})
- for state, items in toks.iteritems():
+ for state, items in iteritems(toks):
curitems = tokens.get(state)
if curitems is None:
tokens[state] = items
@@ -533,13 +537,13 @@ class RegexLexerMeta(LexerMeta):
return type.__call__(cls, *args, **kwds)
+@add_metaclass(RegexLexerMeta)
class RegexLexer(Lexer):
"""
Base for simple stateful regular expression-based lexers.
Simplifies the lexing process so that you need only
provide a list of states and regular expressions.
"""
- __metaclass__ = RegexLexerMeta
#: Flags for compiling the regular expressions.
#: Defaults to MULTILINE.
@@ -673,7 +677,7 @@ class ExtendedRegexLexer(RegexLexer):
if state == '#pop':
ctx.stack.pop()
elif state == '#push':
- ctx.stack.append(statestack[-1])
+ ctx.stack.append(ctx.stack[-1])
else:
ctx.stack.append(state)
elif isinstance(new_state, int):
@@ -718,7 +722,7 @@ def do_insertions(insertions, tokens):
"""
insertions = iter(insertions)
try:
- index, itokens = insertions.next()
+ index, itokens = next(insertions)
except StopIteration:
# no insertions
for item in tokens:
@@ -744,7 +748,7 @@ def do_insertions(insertions, tokens):
realpos += len(it_value)
oldi = index - i
try:
- index, itokens = insertions.next()
+ index, itokens = next(insertions)
except StopIteration:
insleft = False
break # not strictly necessary
@@ -759,7 +763,7 @@ def do_insertions(insertions, tokens):
yield realpos, t, v
realpos += len(v)
try:
- index, itokens = insertions.next()
+ index, itokens = next(insertions)
except StopIteration:
insleft = False
break # not strictly necessary
diff --git a/pygments/lexers/__init__.py b/pygments/lexers/__init__.py
index 9af6ce68..caedd479 100644
--- a/pygments/lexers/__init__.py
+++ b/pygments/lexers/__init__.py
@@ -5,24 +5,39 @@
Pygments lexers.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
import types
import fnmatch
+import re
from os.path import basename
from pygments.lexers._mapping import LEXERS
+from pygments.modeline import get_filetype_from_buffer
from pygments.plugin import find_plugin_lexers
-from pygments.util import ClassNotFound, bytes
+from pygments.util import ClassNotFound, itervalues
__all__ = ['get_lexer_by_name', 'get_lexer_for_filename', 'find_lexer_class',
- 'guess_lexer'] + LEXERS.keys()
+ 'guess_lexer'] + list(LEXERS)
_lexer_cache = {}
+_pattern_cache = {}
+
+def _fn_matches(fn, glob):
+ """
+ Return whether the supplied file name fn matches pattern filename
+ """
+ if glob not in _pattern_cache:
+ pattern = re.compile(fnmatch.translate(glob))
+ _pattern_cache[glob] = pattern
+ else:
+ pattern = _pattern_cache[glob]
+
+ return pattern.match(fn)
def _load_lexers(module_name):
@@ -40,7 +55,7 @@ def get_all_lexers():
Return a generator of tuples in the form ``(name, aliases,
filenames, mimetypes)`` of all know lexers.
"""
- for item in LEXERS.itervalues():
+ for item in itervalues(LEXERS):
yield item[1:]
for lexer in find_plugin_lexers():
yield lexer.name, lexer.aliases, lexer.filenames, lexer.mimetypes
@@ -53,7 +68,7 @@ def find_lexer_class(name):
if name in _lexer_cache:
return _lexer_cache[name]
# lookup builtin lexers
- for module_name, lname, aliases, _, _ in LEXERS.itervalues():
+ for module_name, lname, aliases, _, _ in itervalues(LEXERS):
if name == lname:
_load_lexers(module_name)
return _lexer_cache[name]
@@ -67,9 +82,12 @@ def get_lexer_by_name(_alias, **options):
"""
Get a lexer by an alias.
"""
+ if not _alias:
+ raise ClassNotFound('no lexer for alias %r found' % _alias)
+
# lookup builtin lexers
- for module_name, name, aliases, _, _ in LEXERS.itervalues():
- if _alias in aliases:
+ for module_name, name, aliases, _, _ in itervalues(LEXERS):
+ if _alias.lower() in aliases:
if name not in _lexer_cache:
_load_lexers(module_name)
return _lexer_cache[name](**options)
@@ -83,20 +101,20 @@ def get_lexer_by_name(_alias, **options):
def get_lexer_for_filename(_fn, code=None, **options):
"""
Get a lexer for a filename. If multiple lexers match the filename
- pattern, use ``analyze_text()`` to figure out which one is more
+ pattern, use ``analyse_text()`` to figure out which one is more
appropriate.
"""
matches = []
fn = basename(_fn)
- for modname, name, _, filenames, _ in LEXERS.itervalues():
+ for modname, name, _, filenames, _ in itervalues(LEXERS):
for filename in filenames:
- if fnmatch.fnmatch(fn, filename):
+ if _fn_matches(fn, filename):
if name not in _lexer_cache:
_load_lexers(modname)
matches.append((_lexer_cache[name], filename))
for cls in find_plugin_lexers():
for filename in cls.filenames:
- if fnmatch.fnmatch(fn, filename):
+ if _fn_matches(fn, filename):
matches.append((cls, filename))
if sys.version_info > (3,) and isinstance(code, bytes):
@@ -126,7 +144,7 @@ def get_lexer_for_mimetype(_mime, **options):
"""
Get a lexer for a mimetype.
"""
- for modname, name, _, _, mimetypes in LEXERS.itervalues():
+ for modname, name, _, _, mimetypes in itervalues(LEXERS):
if _mime in mimetypes:
if name not in _lexer_cache:
_load_lexers(modname)
@@ -171,11 +189,11 @@ def guess_lexer_for_filename(_fn, _text, **options):
matching_lexers = set()
for lexer in _iter_lexerclasses():
for filename in lexer.filenames:
- if fnmatch.fnmatch(fn, filename):
+ if _fn_matches(fn, filename):
matching_lexers.add(lexer)
primary = lexer
for filename in lexer.alias_filenames:
- if fnmatch.fnmatch(fn, filename):
+ if _fn_matches(fn, filename):
matching_lexers.add(lexer)
if not matching_lexers:
raise ClassNotFound('no lexer for filename %r found' % fn)
@@ -187,7 +205,13 @@ def guess_lexer_for_filename(_fn, _text, **options):
if rv == 1.0:
return lexer(**options)
result.append((rv, lexer))
- result.sort()
+
+ # since py3 can no longer sort by class name by default, here is the
+ # sorting function that works in both
+ def type_sort(type_):
+ return (type_[0], type_[1].__name__)
+ result.sort(key=type_sort)
+
if not result[-1][0] and primary is not None:
return primary(**options)
return result[-1][1](**options)
@@ -197,6 +221,16 @@ def guess_lexer(_text, **options):
"""
Guess a lexer by strong distinctions in the text (eg, shebang).
"""
+
+ # try to get a vim modeline first
+ ft = get_filetype_from_buffer(_text)
+
+ if ft is not None:
+ try:
+ return get_lexer_by_name(ft, **options)
+ except ClassNotFound:
+ pass
+
best_lexer = [0.0, None]
for lexer in _iter_lexerclasses():
rv = lexer.analyse_text(_text)
diff --git a/pygments/lexers/_asybuiltins.py b/pygments/lexers/_asybuiltins.py
index 108fa199..5472cb63 100644
--- a/pygments/lexers/_asybuiltins.py
+++ b/pygments/lexers/_asybuiltins.py
@@ -10,7 +10,7 @@
TODO: perl/python script in Asymptote SVN similar to asy-list.pl but only
for function and variable names.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
diff --git a/pygments/lexers/_clbuiltins.py b/pygments/lexers/_clbuiltins.py
index 59f948ba..3f9adf2f 100644
--- a/pygments/lexers/_clbuiltins.py
+++ b/pygments/lexers/_clbuiltins.py
@@ -5,11 +5,11 @@
ANSI Common Lisp builtins.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-BUILTIN_FUNCTIONS = [ # 638 functions
+BUILTIN_FUNCTIONS = set([ # 638 functions
'<', '<=', '=', '>', '>=', '-', '/', '/=', '*', '+', '1-', '1+',
'abort', 'abs', 'acons', 'acos', 'acosh', 'add-method', 'adjoin',
'adjustable-array-p', 'adjust-array', 'allocate-instance',
@@ -157,17 +157,17 @@ BUILTIN_FUNCTIONS = [ # 638 functions
'wild-pathname-p', 'write', 'write-byte', 'write-char', 'write-line',
'write-sequence', 'write-string', 'write-to-string', 'yes-or-no-p',
'y-or-n-p', 'zerop',
-]
+])
-SPECIAL_FORMS = [
+SPECIAL_FORMS = set([
'block', 'catch', 'declare', 'eval-when', 'flet', 'function', 'go', 'if',
'labels', 'lambda', 'let', 'let*', 'load-time-value', 'locally', 'macrolet',
'multiple-value-call', 'multiple-value-prog1', 'progn', 'progv', 'quote',
'return-from', 'setq', 'symbol-macrolet', 'tagbody', 'the', 'throw',
'unwind-protect',
-]
+])
-MACROS = [
+MACROS = set([
'and', 'assert', 'call-method', 'case', 'ccase', 'check-type', 'cond',
'ctypecase', 'decf', 'declaim', 'defclass', 'defconstant', 'defgeneric',
'define-compiler-macro', 'define-condition', 'define-method-combination',
@@ -188,19 +188,19 @@ MACROS = [
'with-input-from-string', 'with-open-file', 'with-open-stream',
'with-output-to-string', 'with-package-iterator', 'with-simple-restart',
'with-slots', 'with-standard-io-syntax',
-]
+])
-LAMBDA_LIST_KEYWORDS = [
+LAMBDA_LIST_KEYWORDS = set([
'&allow-other-keys', '&aux', '&body', '&environment', '&key', '&optional',
'&rest', '&whole',
-]
+])
-DECLARATIONS = [
+DECLARATIONS = set([
'dynamic-extent', 'ignore', 'optimize', 'ftype', 'inline', 'special',
'ignorable', 'notinline', 'type',
-]
+])
-BUILTIN_TYPES = [
+BUILTIN_TYPES = set([
'atom', 'boolean', 'base-char', 'base-string', 'bignum', 'bit',
'compiled-function', 'extended-char', 'fixnum', 'keyword', 'nil',
'signed-byte', 'short-float', 'single-float', 'double-float', 'long-float',
@@ -217,9 +217,9 @@ BUILTIN_TYPES = [
'simple-type-error', 'simple-warning', 'stream-error', 'storage-condition',
'style-warning', 'type-error', 'unbound-variable', 'unbound-slot',
'undefined-function', 'warning',
-]
+])
-BUILTIN_CLASSES = [
+BUILTIN_CLASSES = set([
'array', 'broadcast-stream', 'bit-vector', 'built-in-class', 'character',
'class', 'complex', 'concatenated-stream', 'cons', 'echo-stream',
'file-stream', 'float', 'function', 'generic-function', 'hash-table',
@@ -229,4 +229,4 @@ BUILTIN_CLASSES = [
'standard-generic-function', 'standard-method', 'standard-object',
'string-stream', 'stream', 'string', 'structure-class', 'structure-object',
'symbol', 'synonym-stream', 't', 'two-way-stream', 'vector',
-]
+])
diff --git a/pygments/lexers/_cocoabuiltins.py b/pygments/lexers/_cocoabuiltins.py
new file mode 100644
index 00000000..1bfa0cdf
--- /dev/null
+++ b/pygments/lexers/_cocoabuiltins.py
@@ -0,0 +1,73 @@
+# -*- coding: utf-8 -*-
+"""
+ pygments.lexers._cocoabuiltins
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ This file defines a set of types used across Cocoa frameworks from Apple.
+ There is a list of @interfaces, @protocols and some other (structs, unions)
+
+ File may be also used as standalone generator for aboves.
+
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from __future__ import print_function
+
+COCOA_INTERFACES = set(['UITableViewCell', 'NSURLSessionDataTask', 'NSLinguisticTagger', 'NSStream', 'UIPrintInfo', 'SKPaymentTransaction', 'SKPhysicsWorld', 'NSString', 'CMAttitude', 'SKSpriteNode', 'JSContext', 'UICollectionReusableView', 'AVMutableCompositionTrack', 'GKLeaderboard', 'NSFetchedResultsController', 'MKTileOverlayRenderer', 'MIDINetworkSession', 'UITextSelectionRect', 'MKRoute', 'MPVolumeView', 'UIKeyCommand', 'AVMutableAudioMix', 'GLKEffectPropertyLight', 'UICollectionViewLayout', 'NSMutableCharacterSet', 'UIAccessibilityElement', 'NSShadow', 'NSAtomicStoreCacheNode', 'UIPushBehavior', 'CBCharacteristic', 'CBUUID', 'CMStepCounter', 'NSNetService', 'UICollectionView', 'UIViewPrintFormatter', 'CAShapeLayer', 'MCPeerID', 'NSFileVersion', 'CMGyroData', 'SKPhysicsJointSpring', 'CIFilter', 'UIView', 'MKMapItem', 'PKPass', 'MKPolygonRenderer', 'JSValue', 'CLGeocoder', 'NSByteCountFormatter', 'AVCaptureScreenInput', 'CAAnimation', 'MKOverlayPathView', 'UIActionSheet', 'UIMotionEffectGroup', 'UIBarItem', 'SKProduct', 'AVAssetExportSession', 'NSKeyedUnarchiver', 'NSMutableSet', 'MKMapView', 'CATransition', 'CLCircularRegion', 'MKTileOverlay', 'UICollisionBehavior', 'ACAccountCredential', 'SKPhysicsJointLimit', 'AVMediaSelectionGroup', 'NSIndexSet', 'AVAudioRecorder', 'NSURL', 'CBCentral', 'NSNumber', 'UITableView', 'AVCaptureStillImageOutput', 'GCController', 'NSAssertionHandler', 'AVAudioSessionPortDescription', 'NSHTTPURLResponse', 'NSPropertyListSerialization', 'AVPlayerItemAccessLogEvent', 'UISwipeGestureRecognizer', 'MKOverlayRenderer', 'NSDecimalNumber', 'EKReminder', 'MKPolylineView', 'AVCaptureMovieFileOutput', 'UIImagePickerController', 'GKAchievementDescription', 'EKParticipant', 'NSBlockOperation', 'UIActivityItemProvider', 'CLLocation', 'GKLeaderboardViewController', 'MPMoviePlayerController', 'GKScore', 'NSURLConnection', 'ABUnknownPersonViewController', 'UIMenuController', 'NSEvent', 'SKTextureAtlas', 'NSKeyedArchiver', 'GKLeaderboardSet', 'NSSimpleCString', 'CBATTRequest', 'GKMatchRequest', 'AVMetadataObject', 'UIAlertView', 'NSIncrementalStore', 'MFMailComposeViewController', 'SSReadingList', 'MPMovieAccessLog', 'NSManagedObjectContext', 'AVCaptureAudioDataOutput', 'ACAccount', 'AVMetadataItem', 'AVCaptureDeviceInputSource', 'CLLocationManager', 'UIStepper', 'UIRefreshControl', 'GKTurnBasedParticipant', 'UICollectionViewTransitionLayout', 'CBCentralManager', 'NSPurgeableData', 'SLComposeViewController', 'NSHashTable', 'MKUserTrackingBarButtonItem', 'UITabBarController', 'CMMotionActivity', 'SKAction', 'AVPlayerItemOutput', 'UIDocumentInteractionController', 'UIDynamicItemBehavior', 'NSMutableDictionary', 'UILabel', 'AVCaptureInputPort', 'NSExpression', 'SKMutablePayment', 'UIStoryboardSegue', 'NSOrderedSet', 'UIPopoverBackgroundView', 'UIToolbar', 'NSNotificationCenter', 'NSEntityMigrationPolicy', 'NSLocale', 'NSURLSession', 'NSTimeZone', 'UIManagedDocument', 'AVMutableVideoCompositionLayerInstruction', 'AVAssetTrackGroup', 'NSInvocationOperation', 'ALAssetRepresentation', 'AVQueuePlayer', 'UIPasteboard', 'NSLayoutManager', 'EKCalendarChooser', 'EKObject', 'CATiledLayer', 'GLKReflectionMapEffect', 'NSManagedObjectID', 'NSUserDefaults', 'SLRequest', 'AVPlayerLayer', 'NSPointerArray', 'AVAudioMix', 'MCAdvertiserAssistant', 'MKMapSnapshotOptions', 'GKMatch', 'AVTimedMetadataGroup', 'CBMutableCharacteristic', 'NSFetchRequest', 'UIDevice', 'NSManagedObject', 'NKAssetDownload', 'AVOutputSettingsAssistant', 'SKPhysicsJointPin', 'UITabBar', 'UITextInputMode', 'NSFetchRequestExpression', 'NSPipe', 'AVComposition', 'ADBannerView', 'AVPlayerItem', 'AVSynchronizedLayer', 'MKDirectionsRequest', 'NSMetadataItem', 'UINavigationItem', 'CBPeripheralManager', 'UIStoryboardPopoverSegue', 'SKProductsRequest', 'UIGravityBehavior', 'UIWindow', 'CBMutableDescriptor', 'UIBezierPath', 'UINavigationController', 'ABPeoplePickerNavigationController', 'EKSource', 'AVAssetWriterInput', 'AVPlayerItemTrack', 'GLKEffectPropertyTexture', 'NSURLResponse', 'SKPaymentQueue', 'MKReverseGeocoder', 'GCControllerAxisInput', 'MKMapSnapshotter', 'NSOrthography', 'NSURLSessionUploadTask', 'NSCharacterSet', 'AVAssetReaderOutput', 'EAGLContext', 'UICollectionViewController', 'AVAssetTrack', 'SKEmitterNode', 'AVCaptureDeviceInput', 'AVVideoCompositionCoreAnimationTool', 'NSURLRequest', 'CMAccelerometerData', 'NSNetServiceBrowser', 'AVAsynchronousVideoCompositionRequest', 'CAGradientLayer', 'NSFormatter', 'CATransaction', 'MPMovieAccessLogEvent', 'UIStoryboard', 'MPMediaLibrary', 'UITapGestureRecognizer', 'MPMediaItemArtwork', 'NSURLSessionTask', 'MCBrowserViewController', 'NSRelationshipDescription', 'NSMutableAttributedString', 'MPNowPlayingInfoCenter', 'MKLocalSearch', 'EAAccessory', 'MKETAResponse', 'CATextLayer', 'NSNotificationQueue', 'NSValue', 'NSMutableIndexSet', 'SKPhysicsContact', 'NSProgress', 'CAScrollLayer', 'NSTextCheckingResult', 'NSEntityDescription', 'NSURLCredentialStorage', 'UIApplication', 'SKDownload', 'MKLocalSearchRequest', 'SKScene', 'UISearchDisplayController', 'CAReplicatorLayer', 'UIPrintPageRenderer', 'EKCalendarItem', 'NSUUID', 'EAAccessoryManager', 'AVAssetResourceLoader', 'AVMutableVideoCompositionInstruction', 'MyClass', 'CTCall', 'CIVector', 'UINavigationBar', 'UIPanGestureRecognizer', 'MPMediaQuery', 'ABNewPersonViewController', 'ACAccountType', 'GKSession', 'SKVideoNode', 'GCExtendedGamepadSnapshot', 'GCExtendedGamepad', 'CAValueFunction', 'UIActivityIndicatorView', 'NSNotification', 'SKReceiptRefreshRequest', 'AVCaptureDeviceFormat', 'AVPlayerItemErrorLog', 'NSMapTable', 'NSSet', 'CMMotionManager', 'GKVoiceChatService', 'UIPageControl', 'MKGeodesicPolyline', 'AVMutableComposition', 'NSLayoutConstraint', 'UIWebView', 'NSIncrementalStoreNode', 'EKEventStore', 'UISlider', 'AVAssetResourceLoadingRequest', 'AVCaptureInput', 'SKPhysicsBody', 'NSOperation', 'MKMapCamera', 'SKProductsResponse', 'GLKEffectPropertyMaterial', 'AVCaptureDevice', 'CTCallCenter', 'CBMutableService', 'SKTransition', 'UIDynamicAnimator', 'NSMutableArray', 'MCNearbyServiceBrowser', 'NSOperationQueue', 'MKPolylineRenderer', 'UICollectionViewLayoutAttributes', 'NSValueTransformer', 'UICollectionViewFlowLayout', 'NSEntityMapping', 'SKTexture', 'NSMergePolicy', 'UITextInputStringTokenizer', 'NSRecursiveLock', 'AVAsset', 'NSUndoManager', 'MPMediaPickerController', 'NSFileCoordinator', 'NSFileHandle', 'NSConditionLock', 'UISegmentedControl', 'NSManagedObjectModel', 'UITabBarItem', 'MPMediaItem', 'EKRecurrenceRule', 'UIEvent', 'UITouch', 'UIPrintInteractionController', 'CMDeviceMotion', 'NSCompoundPredicate', 'MKMultiPoint', 'UIPrintFormatter', 'SKView', 'NSConstantString', 'UIPopoverController', 'AVMetadataFaceObject', 'EKEventViewController', 'NSPort', 'MKCircleRenderer', 'AVCompositionTrack', 'UINib', 'NSUbiquitousKeyValueStore', 'NSMetadataQueryResultGroup', 'AVAssetResourceLoadingDataRequest', 'UITableViewHeaderFooterView', 'UISplitViewController', 'AVAudioSession', 'CAEmitterLayer', 'NSNull', 'MKCircleView', 'UIColor', 'UIAttachmentBehavior', 'CLBeacon', 'NSInputStream', 'NSURLCache', 'GKPlayer', 'NSMappingModel', 'NSHTTPCookie', 'AVMutableVideoComposition', 'NSAttributeDescription', 'AVPlayer', 'MKAnnotationView', 'UIFontDescriptor', 'NSTimer', 'CBDescriptor', 'MKOverlayView', 'EKEventEditViewController', 'NSSaveChangesRequest', 'UIReferenceLibraryViewController', 'SKPhysicsJointFixed', 'UILocalizedIndexedCollation', 'UIInterpolatingMotionEffect', 'AVAssetWriter', 'NSBundle', 'SKStoreProductViewController', 'GLKViewController', 'NSMetadataQueryAttributeValueTuple', 'GKTurnBasedMatch', 'UIActivity', 'MKShape', 'NSMergeConflict', 'CIImage', 'UIRotationGestureRecognizer', 'AVPlayerItemLegibleOutput', 'AVAssetImageGenerator', 'GCControllerButtonInput', 'NSSortDescriptor', 'MPTimedMetadata', 'NKIssue', 'UIScreenMode', 'GKTurnBasedEventHandler', 'MKPolyline', 'JSVirtualMachine', 'AVAssetReader', 'NSAttributedString', 'GKMatchmakerViewController', 'NSCountedSet', 'UIButton', 'GKLocalPlayer', 'MPMovieErrorLog', 'AVSpeechUtterance', 'AVURLAsset', 'CBPeripheral', 'AVAssetWriterInputGroup', 'AVAssetReaderAudioMixOutput', 'NSEnumerator', 'UIDocument', 'MKLocalSearchResponse', 'UISimpleTextPrintFormatter', 'CBService', 'MCSession', 'QLPreviewController', 'CAMediaTimingFunction', 'UITextPosition', 'NSNumberFormatter', 'UIPinchGestureRecognizer', 'UIMarkupTextPrintFormatter', 'MKRouteStep', 'NSMetadataQuery', 'AVAssetResourceLoadingContentInformationRequest', 'CTSubscriber', 'CTCarrier', 'NSFileSecurity', 'UIAcceleration', 'UIMotionEffect', 'CLHeading', 'NSFileWrapper', 'MKDirectionsResponse', 'UILocalNotification', 'UICollectionViewCell', 'UITextView', 'CMMagnetometerData', 'UIProgressView', 'GKInvite', 'UISearchBar', 'MKPlacemark', 'AVCaptureConnection', 'ALAssetsFilter', 'AVPlayerItemErrorLogEvent', 'NSJSONSerialization', 'AVAssetReaderVideoCompositionOutput', 'ABPersonViewController', 'CIDetector', 'GKTurnBasedMatchmakerViewController', 'MPMediaItemCollection', 'NSCondition', 'NSURLCredential', 'MIDINetworkConnection', 'NSDecimalNumberHandler', 'NSURLSessionConfiguration', 'EKCalendar', 'NSDictionary', 'CAPropertyAnimation', 'UIPercentDrivenInteractiveTransition', 'MKPolygon', 'AVAssetTrackSegment', 'NSExpressionDescription', 'UIViewController', 'NSURLAuthenticationChallenge', 'NSDirectoryEnumerator', 'MKDistanceFormatter', 'GCControllerElement', 'GKPeerPickerController', 'UITableViewController', 'GKNotificationBanner', 'MKPointAnnotation', 'NSCache', 'SKPhysicsJoint', 'NSXMLParser', 'MFMessageComposeViewController', 'AVCaptureSession', 'NSDataDetector', 'AVCaptureVideoPreviewLayer', 'NSURLComponents', 'UISnapBehavior', 'AVMetadataMachineReadableCodeObject', 'GLKTextureLoader', 'NSTextAttachment', 'NSException', 'UIMenuItem', 'CMMotionActivityManager', 'MKUserLocation', 'CIFeature', 'NSMachPort', 'ALAsset', 'NSURLSessionDownloadTask', 'MPMoviePlayerViewController', 'NSMutableOrderedSet', 'AVCaptureVideoDataOutput', 'NSCachedURLResponse', 'ALAssetsLibrary', 'NSInvocation', 'UILongPressGestureRecognizer', 'NSTextStorage', 'CIFaceFeature', 'MKMapSnapshot', 'GLKEffectPropertyFog', 'NSPersistentStoreRequest', 'AVAudioMixInputParameters', 'CAEmitterBehavior', 'PKPassLibrary', 'NSLock', 'UIDynamicBehavior', 'AVPlayerMediaSelectionCriteria', 'CALayer', 'UIBarButtonItem', 'AVAudioSessionRouteDescription', 'CLBeaconRegion', 'SKEffectNode', 'CABasicAnimation', 'AVVideoCompositionInstruction', 'AVMutableTimedMetadataGroup', 'EKRecurrenceEnd', 'NSTextContainer', 'TWTweetComposeViewController', 'UIScrollView', 'EKRecurrenceDayOfWeek', 'ASIdentifierManager', 'UIScreen', 'CLRegion', 'NSProcessInfo', 'GLKTextureInfo', 'AVCaptureMetadataOutput', 'NSTextTab', 'JSManagedValue', 'NSDate', 'UITextChecker', 'NSData', 'NSParagraphStyle', 'AVMutableMetadataItem', 'EKAlarm', 'NSMutableURLRequest', 'UIVideoEditorController', 'NSAtomicStore', 'UIResponder', 'AVCompositionTrackSegment', 'GCGamepadSnapshot', 'MPMediaEntity', 'GLKSkyboxEffect', 'UISwitch', 'EKStructuredLocation', 'UIGestureRecognizer', 'NSProxy', 'GLKBaseEffect', 'GKScoreChallenge', 'NSCoder', 'MPMediaPlaylist', 'NSDateComponents', 'EKEvent', 'NSDateFormatter', 'AVAssetWriterInputPixelBufferAdaptor', 'UICollectionViewFlowLayoutInvalidationContext', 'UITextField', 'CLPlacemark', 'AVCaptureOutput', 'NSPropertyDescription', 'GCGamepad', 'NSPersistentStoreCoordinator', 'GKMatchmaker', 'CIContext', 'NSThread', 'SKRequest', 'SKPhysicsJointSliding', 'NSPredicate', 'GKVoiceChat', 'SKCropNode', 'AVCaptureAudioPreviewOutput', 'NSStringDrawingContext', 'GKGameCenterViewController', 'UIPrintPaper', 'UICollectionViewLayoutInvalidationContext', 'GLKEffectPropertyTransform', 'UIDatePicker', 'MKDirections', 'ALAssetsGroup', 'CAEmitterCell', 'UIFont', 'MKPinAnnotationView', 'UIPickerView', 'UIImageView', 'SKNode', 'MPMediaQuerySection', 'GKFriendRequestComposeViewController', 'NSError', 'CTSubscriberInfo', 'AVPlayerItemAccessLog', 'MPMediaPropertyPredicate', 'CMLogItem', 'NSAutoreleasePool', 'NSSocketPort', 'AVAssetReaderTrackOutput', 'AVSpeechSynthesisVoice', 'UIImage', 'AVCaptureAudioChannel', 'GKTurnBasedExchangeReply', 'AVVideoCompositionLayerInstruction', 'AVSpeechSynthesizer', 'GKChallengeEventHandler', 'AVCaptureFileOutput', 'UIControl', 'SKPayment', 'ADInterstitialAd', 'AVAudioSessionDataSourceDescription', 'NSArray', 'GCControllerDirectionPad', 'NSFileManager', 'AVMutableAudioMixInputParameters', 'UIScreenEdgePanGestureRecognizer', 'CAKeyframeAnimation', 'EASession', 'UIInputView', 'NSHTTPCookieStorage', 'NSPointerFunctions', 'AVMediaSelectionOption', 'NSRunLoop', 'CAAnimationGroup', 'MKCircle', 'NSMigrationManager', 'UICollectionViewUpdateItem', 'NSMutableData', 'NSMutableParagraphStyle', 'GLKEffectProperty', 'SKShapeNode', 'MPMovieErrorLogEvent', 'MKPolygonView', 'UIAccelerometer', 'NSScanner', 'GKAchievementChallenge', 'AVAudioPlayer', 'AVVideoComposition', 'NKLibrary', 'NSPersistentStore', 'NSPropertyMapping', 'GKChallenge', 'NSURLProtectionSpace', 'ACAccountStore', 'UITextRange', 'NSComparisonPredicate', 'NSOutputStream', 'PKAddPassesViewController', 'CTTelephonyNetworkInfo', 'AVTextStyleRule', 'NSFetchedPropertyDescription', 'UIPageViewController', 'CATransformLayer', 'MCNearbyServiceAdvertiser', 'NSObject', 'MPMusicPlayerController', 'MKOverlayPathRenderer', 'GKAchievement', 'AVCaptureAudioFileOutput', 'TWRequest', 'SKLabelNode', 'MIDINetworkHost', 'MPMediaPredicate', 'AVFrameRateRange', 'NSIndexPath', 'AVVideoCompositionRenderContext', 'CADisplayLink', 'CAEAGLLayer', 'NSMutableString', 'NSMessagePort', 'AVAudioSessionChannelDescription', 'GLKView', 'UIActivityViewController', 'GKAchievementViewController', 'NSURLProtocol', 'NSCalendar', 'SKKeyframeSequence', 'AVMetadataItemFilter', 'NSMethodSignature', 'NSRegularExpression', 'EAGLSharegroup', 'AVPlayerItemVideoOutput', 'CIColor', 'UIDictationPhrase'])
+COCOA_PROTOCOLS = set(['SKStoreProductViewControllerDelegate', 'AVVideoCompositionInstruction', 'AVAudioSessionDelegate', 'GKMatchDelegate', 'NSFileManagerDelegate', 'UILayoutSupport', 'NSCopying', 'UIPrintInteractionControllerDelegate', 'QLPreviewControllerDataSource', 'SKProductsRequestDelegate', 'NSTextStorageDelegate', 'MCBrowserViewControllerDelegate', 'UIViewControllerTransitionCoordinatorContext', 'NSTextAttachmentContainer', 'NSDecimalNumberBehaviors', 'NSMutableCopying', 'UIViewControllerTransitioningDelegate', 'UIAlertViewDelegate', 'AVAudioPlayerDelegate', 'MKReverseGeocoderDelegate', 'NSCoding', 'UITextInputTokenizer', 'GKFriendRequestComposeViewControllerDelegate', 'UIActivityItemSource', 'NSCacheDelegate', 'UITableViewDelegate', 'GKAchievementViewControllerDelegate', 'EKEventEditViewDelegate', 'NSURLConnectionDelegate', 'GKPeerPickerControllerDelegate', 'UIGuidedAccessRestrictionDelegate', 'AVSpeechSynthesizerDelegate', 'MFMailComposeViewControllerDelegate', 'AVPlayerItemLegibleOutputPushDelegate', 'ADInterstitialAdDelegate', 'AVAssetResourceLoaderDelegate', 'UITabBarControllerDelegate', 'SKPaymentTransactionObserver', 'AVCaptureAudioDataOutputSampleBufferDelegate', 'UIInputViewAudioFeedback', 'GKChallengeListener', 'UIPickerViewDelegate', 'UIWebViewDelegate', 'UIApplicationDelegate', 'GKInviteEventListener', 'MPMediaPlayback', 'MyClassJavaScriptMethods', 'AVAsynchronousKeyValueLoading', 'QLPreviewItem', 'NSPortDelegate', 'SKRequestDelegate', 'SKPhysicsContactDelegate', 'UIPageViewControllerDataSource', 'AVPlayerItemOutputPushDelegate', 'UICollectionViewDelegate', 'UIImagePickerControllerDelegate', 'UIToolbarDelegate', 'UIViewControllerTransitionCoordinator', 'NSURLConnectionDataDelegate', 'MKOverlay', 'CBCentralManagerDelegate', 'JSExport', 'NSTextLayoutOrientationProvider', 'UIPickerViewDataSource', 'UITextInputTraits', 'NSLayoutManagerDelegate', 'NSFetchedResultsControllerDelegate', 'ABPeoplePickerNavigationControllerDelegate', 'NSDiscardableContent', 'UITextFieldDelegate', 'GKGameCenterControllerDelegate', 'MPMediaPickerControllerDelegate', 'UIAppearance', 'UIPickerViewAccessibilityDelegate', 'UIScrollViewAccessibilityDelegate', 'ADBannerViewDelegate', 'NSURLSessionDelegate', 'NSXMLParserDelegate', 'UIViewControllerRestoration', 'UISearchBarDelegate', 'UIBarPositioning', 'CBPeripheralDelegate', 'UISearchDisplayDelegate', 'CAAction', 'PKAddPassesViewControllerDelegate', 'MCNearbyServiceAdvertiserDelegate', 'GKTurnBasedMatchmakerViewControllerDelegate', 'UIActionSheetDelegate', 'AVCaptureVideoDataOutputSampleBufferDelegate', 'UIAppearanceContainer', 'UIStateRestoring', 'NSURLSessionTaskDelegate', 'NSFilePresenter', 'UIViewControllerContextTransitioning', 'UITextInput', 'CBPeripheralManagerDelegate', 'UITextInputDelegate', 'NSFastEnumeration', 'NSURLAuthenticationChallengeSender', 'AVVideoCompositing', 'NSSecureCoding', 'MCAdvertiserAssistantDelegate', 'GKLocalPlayerListener', 'GLKNamedEffect', 'UIPopoverControllerDelegate', 'AVCaptureMetadataOutputObjectsDelegate', 'MFMessageComposeViewControllerDelegate', 'UITextSelecting', 'NSURLProtocolClient', 'UIVideoEditorControllerDelegate', 'UITableViewDataSource', 'UIDynamicAnimatorDelegate', 'NSURLSessionDataDelegate', 'UICollisionBehaviorDelegate', 'NSStreamDelegate', 'MCNearbyServiceBrowserDelegate', 'UINavigationControllerDelegate', 'MCSessionDelegate', 'UIViewControllerInteractiveTransitioning', 'GKTurnBasedEventListener', 'GLKViewDelegate', 'EAAccessoryDelegate', 'NSKeyedUnarchiverDelegate', 'NSMachPortDelegate', 'UIBarPositioningDelegate', 'ABPersonViewControllerDelegate', 'NSNetServiceBrowserDelegate', 'EKEventViewDelegate', 'UIScrollViewDelegate', 'NSURLConnectionDownloadDelegate', 'UIGestureRecognizerDelegate', 'UINavigationBarDelegate', 'GKVoiceChatClient', 'NSFetchedResultsSectionInfo', 'UIDocumentInteractionControllerDelegate', 'QLPreviewControllerDelegate', 'UIAccessibilityReadingContent', 'ABUnknownPersonViewControllerDelegate', 'GLKViewControllerDelegate', 'UICollectionViewDelegateFlowLayout', 'UISplitViewControllerDelegate', 'MKAnnotation', 'UIAccessibilityIdentification', 'ABNewPersonViewControllerDelegate', 'CAMediaTiming', 'AVCaptureFileOutputRecordingDelegate', 'UITextViewDelegate', 'UITabBarDelegate', 'GKLeaderboardViewControllerDelegate', 'MKMapViewDelegate', 'UIKeyInput', 'UICollectionViewDataSource', 'NSLocking', 'AVCaptureFileOutputDelegate', 'GKChallengeEventHandlerDelegate', 'UIObjectRestoration', 'CIFilterConstructor', 'AVPlayerItemOutputPullDelegate', 'EAGLDrawable', 'AVVideoCompositionValidationHandling', 'UIViewControllerAnimatedTransitioning', 'NSURLSessionDownloadDelegate', 'UIAccelerometerDelegate', 'UIPageViewControllerDelegate', 'UIDataSourceModelAssociation', 'AVAudioRecorderDelegate', 'GKSessionDelegate', 'NSKeyedArchiverDelegate', 'UIDynamicItem', 'CLLocationManagerDelegate', 'NSMetadataQueryDelegate', 'NSNetServiceDelegate', 'GKMatchmakerViewControllerDelegate', 'EKCalendarChooserDelegate'])
+COCOA_PRIMITIVES = set(['ROTAHeader', '__CFBundle', 'MortSubtable', 'AudioFilePacketTableInfo', 'CGPDFOperatorTable', 'KerxStateEntry', 'ExtendedTempoEvent', 'CTParagraphStyleSetting', 'OpaqueMIDIPort', 'CFStreamErrorHTTP', '__CFMachPort', '_GLKMatrix4', 'ExtendedControlEvent', 'CAFAudioDescription', 'KernVersion0Header', 'CGTextDrawingMode', 'EKErrorCode', 'gss_buffer_desc_struct', 'AudioUnitParameterInfo', '__SCPreferences', '__CTFrame', '__CTLine', 'CFStreamSocketSecurityProtocol', 'gss_krb5_lucid_context_v1', 'OpaqueJSValue', 'TrakTableEntry', 'AudioFramePacketTranslation', 'CGImageSource', 'OpaqueJSPropertyNameAccumulator', 'JustPCGlyphRepeatAddAction', 'BslnFormat0Part', 'OpaqueMIDIThruConnection', 'opaqueCMBufferQueue', 'OpaqueMusicSequence', 'MortRearrangementSubtable', 'MixerDistanceParams', 'MorxSubtable', 'MIDIObjectPropertyChangeNotification', '__CFDictionary', 'CGImageMetadataErrors', 'CGPath', 'OpaqueMIDIEndpoint', 'ALMXHeader', 'AudioComponentPlugInInterface', 'gss_ctx_id_t_desc_struct', 'sfntFontFeatureSetting', 'OpaqueJSContextGroup', '__SCNetworkConnection', 'AudioUnitParameterValueTranslation', 'CGImageMetadataType', 'CGPattern', 'AudioFileTypeAndFormatID', 'CGContext', 'AUNodeInteraction', 'SFNTLookupTable', 'JustPCDecompositionAction', 'KerxControlPointHeader', 'PKErrorCode', 'AudioStreamPacketDescription', 'KernSubtableHeader', '__CFNull', 'AUMIDIOutputCallbackStruct', 'MIDIMetaEvent', 'AudioQueueChannelAssignment', '__CFString', 'AnchorPoint', 'JustTable', '__CFNetService', 'gss_krb5_lucid_key', 'CGPDFDictionary', 'MIDIThruConnectionParams', 'CAF_UUID_ChunkHeader', 'gss_krb5_cfx_keydata', '_GLKMatrix3', 'CGGradient', 'OpaqueMIDISetup', '_GLKMatrix2', 'JustPostcompTable', '__CTParagraphStyle', 'AudioUnitParameterHistoryInfo', 'OpaqueJSContext', 'CGShading', '__CFBinaryHeap', 'SFNTLookupSingle', '__CFHost', '__SecRandom', '__CTFontDescriptor', '_NSRange', 'sfntDirectory', 'AudioQueueLevelMeterState', 'CAFPositionPeak', '__CFBoolean', 'PropLookupSegment', '__CVOpenGLESTextureCache', 'sfntInstance', '_GLKQuaternion', 'KernStateEntry', '__SCNetworkProtocol', 'CAFFileHeader', 'KerxOrderedListHeader', 'CGBlendMode', 'STXEntryOne', 'CAFRegion', 'SFNTLookupTrimmedArrayHeader', 'KerxControlPointEntry', '__CFCharacterSet', 'OpaqueMusicTrack', '_GLKVector4', 'gss_OID_set_desc_struct', 'OpaqueMusicPlayer', '_CFHTTPAuthentication', 'CGAffineTransform', 'CAFMarkerChunk', 'AUHostIdentifier', 'ROTAGlyphEntry', 'BslnTable', 'gss_krb5_lucid_context_version', '_GLKMatrixStack', 'CGImage', 'AnkrTable', 'SFNTLookupSingleHeader', 'MortLigatureSubtable', 'AudioFile_SMPTE_Time', 'CAFUMIDChunk', 'SMPTETime', 'CAFDataChunk', 'CGPDFStream', 'AudioFileRegionList', 'STEntryTwo', 'SFNTLookupBinarySearchHeader', 'OpbdTable', '__CTGlyphInfo', 'BslnFormat2Part', 'KerxIndexArrayHeader', 'TrakTable', 'KerxKerningPair', '__CFBitVector', 'KernVersion0SubtableHeader', 'OpaqueAudioComponentInstance', 'AudioChannelLayout', '__CFUUID', 'MIDISysexSendRequest', '__CFNumberFormatter', 'CGImageSourceStatus', '__CFURL', 'AudioFileMarkerList', 'AUSamplerBankPresetData', 'CGDataProvider', 'AudioFormatInfo', '__SecIdentity', 'sfntCMapExtendedSubHeader', 'MIDIChannelMessage', 'KernOffsetTable', 'CGColorSpaceModel', 'MFMailComposeErrorCode', 'CGFunction', '__SecTrust', 'CFHostInfoType', 'KernSimpleArrayHeader', 'CGFontPostScriptFormat', 'KernStateHeader', 'AudioUnitCocoaViewInfo', 'CGDataConsumer', 'OpaqueMIDIDevice', 'OpaqueCMBlockBuffer', 'AnchorPointTable', 'CGImageDestination', 'CAFInstrumentChunk', 'AudioUnitMeterClipping', '__CFNumber', 'MorxChain', '__CTFontCollection', 'STEntryOne', 'STXEntryTwo', 'ExtendedNoteOnEvent', '__CFArray', 'CGColorRenderingIntent', 'KerxSimpleArrayHeader', 'MorxTable', '_GLKVector3', '_GLKVector2', 'MortTable', 'CGPDFBox', 'AudioUnitParameterValueFromString', '__CFSocket', 'ALCdevice_struct', 'MIDINoteMessage', 'sfntFeatureHeader', 'CGRect', '__SCNetworkInterface', '__CFTree', 'MusicEventUserData', 'TrakTableData', 'MortContextualSubtable', '__CTRun', 'AudioUnitFrequencyResponseBin', 'MortChain', 'MorxInsertionSubtable', 'CGImageMetadata', 'gss_auth_identity', 'AudioUnitMIDIControlMapping', 'CAFChunkHeader', 'PropTable', 'CGPDFScanner', 'OpaqueMusicEventIterator', '__CFFileSecurity', 'AudioUnitNodeConnection', 'OpaqueMIDIDeviceList', 'ExtendedAudioFormatInfo', 'CGRectEdge', 'sfntFontDescriptor', '__CFRunLoopObserver', 'CGPatternTiling', 'MIDINotification', 'MorxLigatureSubtable', 'SFNTLookupSegment', 'MessageComposeResult', 'MIDIThruConnectionEndpoint', 'MusicDeviceStdNoteParams', 'opaqueCMSimpleQueue', 'ALCcontext_struct', 'OpaqueAudioQueue', 'PropLookupSingle', 'CGColor', 'AudioOutputUnitStartAtTimeParams', 'gss_name_t_desc_struct', 'CGFunctionCallbacks', 'CAFPacketTableHeader', 'AudioChannelDescription', 'sfntFeatureName', 'MorxContextualSubtable', 'CVSMPTETime', 'AudioValueRange', 'CGTextEncoding', 'AudioStreamBasicDescription', 'AUNodeRenderCallback', 'AudioPanningInfo', '__CFData', '__CFDate', 'KerxOrderedListEntry', '__CFAllocator', 'OpaqueJSPropertyNameArray', '__SCDynamicStore', 'OpaqueMIDIEntity', 'CFHostClientContext', 'CFNetServiceClientContext', 'AudioUnitPresetMAS_SettingData', 'opaqueCMBufferQueueTriggerToken', 'AudioUnitProperty', 'CAFRegionChunk', 'CGPDFString', '__CFWriteStream', '__CFAttributedString', '__CFStringTokenizer', 'JustWidthDeltaEntry', '__CFSet', 'sfntVariationAxis', '__CFNetDiagnostic', 'CAFOverviewSample', 'sfntCMapEncoding', 'CGVector', '__SCNetworkService', 'opaqueCMSampleBuffer', 'AUHostVersionIdentifier', 'AudioBalanceFade', 'sfntFontRunFeature', 'KerxCoordinateAction', 'sfntCMapSubHeader', 'CVPlanarPixelBufferInfo', 'AUNumVersion', '__CFTimeZone', 'AUSamplerInstrumentData', 'AUPreset', '__CTRunDelegate', 'OpaqueAudioQueueProcessingTap', 'KerxTableHeader', '_NSZone', 'OpaqueExtAudioFile', '__CFRunLoopSource', 'KerxAnchorPointAction', 'OpaqueJSString', 'AudioQueueParameterEvent', '__CFHTTPMessage', 'OpaqueCMClock', 'ScheduledAudioFileRegion', 'STEntryZero', 'gss_channel_bindings_struct', 'sfntVariationHeader', 'AUChannelInfo', 'UIOffset', 'GLKEffectPropertyPrv', 'KerxStateHeader', 'CGLineJoin', 'CGPDFDocument', '__CFBag', 'CFStreamErrorHTTPAuthentication', 'KernOrderedListHeader', '__SCNetworkSet', '__SecKey', 'MIDIObjectAddRemoveNotification', 'sfntDescriptorHeader', 'AudioUnitParameter', 'JustPCActionSubrecord', 'AudioComponentDescription', 'AudioUnitParameterValueName', 'AudioUnitParameterEvent', 'KerxControlPointAction', 'AudioTimeStamp', 'KernKerningPair', 'gss_buffer_set_desc_struct', 'MortFeatureEntry', 'FontVariation', 'CAFStringID', 'LcarCaretClassEntry', 'AudioUnitParameterStringFromValue', 'ACErrorCode', 'ALMXGlyphEntry', 'LtagTable', '__CTTypesetter', 'AuthorizationOpaqueRef', 'UIEdgeInsets', 'CGPathElement', 'CAFMarker', 'KernTableHeader', 'NoteParamsControlValue', 'SSLContext', 'gss_cred_id_t_desc_struct', 'AudioUnitParameterNameInfo', '__SecCertificate', 'CGDataConsumerCallbacks', 'CGInterpolationQuality', 'CGLineCap', 'MIDIControlTransform', 'BslnFormat1Part', 'CGPDFArray', '__SecPolicy', 'AudioConverterPrimeInfo', '__CTTextTab', '__CFNetServiceMonitor', 'AUInputSamplesInOutputCallbackStruct', '__CTFramesetter', 'CGPDFDataFormat', 'STHeader', 'CVPlanarPixelBufferInfo_YCbCrPlanar', 'MIDIValueMap', 'JustDirectionTable', '__SCBondStatus', 'SFNTLookupSegmentHeader', 'OpaqueCMMemoryPool', 'CGPathDrawingMode', 'CGFont', '__SCNetworkReachability', 'AudioClassDescription', 'CGPoint', 'CAFStrings', '__CFNetServiceBrowser', 'opaqueMTAudioProcessingTap', 'sfntNameRecord', 'CGPDFPage', 'CGLayer', 'ComponentInstanceRecord', 'CAFInfoStrings', 'HostCallbackInfo', 'MusicDeviceNoteParams', 'KernIndexArrayHeader', 'CVPlanarPixelBufferInfo_YCbCrBiPlanar', 'MusicTrackLoopInfo', 'opaqueCMFormatDescription', 'STClassTable', 'sfntDirectoryEntry', 'OpaqueCMTimebase', 'CGDataProviderDirectCallbacks', 'MIDIPacketList', 'CAFOverviewChunk', 'MIDIPacket', 'ScheduledAudioSlice', 'CGDataProviderSequentialCallbacks', 'AudioBuffer', 'MorxRearrangementSubtable', 'CGPatternCallbacks', 'AUDistanceAttenuationData', 'MIDIIOErrorNotification', 'CGPDFContentStream', 'IUnknownVTbl', 'MIDITransform', 'MortInsertionSubtable', 'CABarBeatTime', 'AudioBufferList', 'KerxSubtableHeader', '__CVBuffer', 'AURenderCallbackStruct', 'STXEntryZero', 'JustPCDuctilityAction', 'OpaqueAudioQueueTimeline', 'OpaqueMIDIClient', '__CFPlugInInstance', 'AudioQueueBuffer', '__CFFileDescriptor', 'AudioUnitConnection', '_GKTurnBasedExchangeStatus', 'LcarCaretTable', 'CVPlanarComponentInfo', 'JustWidthDeltaGroup', 'OpaqueAudioComponent', 'ParameterEvent', '__CVPixelBufferPool', '__CTFont', 'OpaqueJSClass', 'CGColorSpace', 'CGSize', 'AUDependentParameter', 'MIDIDriverInterface', 'gss_krb5_rfc1964_keydata', '__CFDateFormatter', 'LtagStringRange', 'CFNetServiceMonitorType', 'gss_iov_buffer_desc_struct', 'AUPresetEvent', 'CFNetServicesError', 'KernOrderedListEntry', '__CFLocale', 'gss_OID_desc_struct', 'AudioUnitPresetMAS_Settings', 'AudioFileMarker', 'JustPCConditionalAddAction', 'BslnFormat3Part', '__CFNotificationCenter', 'MortSwashSubtable', 'AUParameterMIDIMapping', 'OpaqueAudioConverter', 'MIDIRawData', 'CFNetDiagnosticStatusValues', 'sfntNameHeader', '__CFRunLoop', 'MFMailComposeResult', 'CATransform3D', 'OpbdSideValues', 'CAF_SMPTE_Time', 'JustPCAction', 'CGPathElementType', '__CFRunLoopTimer', '__CFError', 'AudioFormatListItem', '__CFReadStream', 'AudioUnitExternalBuffer', 'AudioFileRegion', 'AudioValueTranslation', 'CGImageMetadataTag', 'CAFPeakChunk', 'AudioBytePacketTranslation', 'CFNetworkErrors', 'sfntCMapHeader', '__CFURLEnumerator', '__CFCalendar', '__CFMessagePort', 'STXHeader', 'CGPDFObjectType', 'SFNTLookupArrayHeader'])
+
+
+if __name__ == '__main__':
+ import os
+ import re
+
+ FRAMEWORKS_PATH = '/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS7.0.sdk/System/Library/Frameworks/'
+ frameworks = os.listdir(FRAMEWORKS_PATH)
+
+ all_interfaces = set()
+ all_protocols = set()
+ all_primitives = set()
+ for framework in frameworks:
+ frameworkHeadersDir = FRAMEWORKS_PATH + framework + '/Headers/'
+ if not os.path.exists(frameworkHeadersDir):
+ continue
+
+ headerFilenames = os.listdir(frameworkHeadersDir)
+
+ for f in headerFilenames:
+ if not f.endswith('.h'):
+ continue
+
+ headerFilePath = frameworkHeadersDir + f
+ content = open(headerFilePath).read()
+ res = re.findall('(?<=@interface )\w+', content)
+ for r in res:
+ all_interfaces.add(r)
+
+ res = re.findall('(?<=@protocol )\w+', content)
+ for r in res:
+ all_protocols.add(r)
+
+ res = re.findall('(?<=typedef enum )\w+', content)
+ for r in res:
+ all_primitives.add(r)
+
+ res = re.findall('(?<=typedef struct )\w+', content)
+ for r in res:
+ all_primitives.add(r)
+
+ res = re.findall('(?<=typedef const struct )\w+', content)
+ for r in res:
+ all_primitives.add(r)
+
+
+ print("ALL interfaces: \n")
+ print(all_interfaces)
+
+ print("\nALL protocols: \n")
+ print(all_protocols)
+
+ print("\nALL primitives: \n")
+ print(all_primitives)
diff --git a/pygments/lexers/_lassobuiltins.py b/pygments/lexers/_lassobuiltins.py
index f3e5147e..9a0a89da 100644
--- a/pygments/lexers/_lassobuiltins.py
+++ b/pygments/lexers/_lassobuiltins.py
@@ -5,7 +5,7 @@
Built-in Lasso types, traits, methods, and members.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
@@ -136,8 +136,10 @@ BUILTINS = {
'timeonly',
'net_tcp',
'net_tcpssl',
+ 'net_tcp_ssl',
'net_named_pipe',
'net_udppacket',
+ 'net_udp_packet',
'net_udp',
'pdf_typebase',
'pdf_doc',
@@ -1275,6 +1277,7 @@ BUILTINS = {
'lcapi_datasourcesortascending',
'lcapi_datasourcesortdescending',
'lcapi_datasourcesortcustom',
+ 'lcapi_updatedatasourceslist',
'lcapi_loadmodules',
'lasso_version',
'lasso_uniqueid',
@@ -4024,6 +4027,10 @@ MEMBERS = {
'statuscode',
'raw',
'version',
+ 'download',
+ 'upload',
+ 'ftpdeletefile',
+ 'ftpgetlisting',
'perform',
'performonce',
's',
@@ -4114,8 +4121,11 @@ MEMBERS = {
'foreachaccept',
'writeobjecttcp',
'readobjecttcp',
+ 'beginssl',
+ 'endssl',
'begintls',
'endtls',
+ 'acceptnossl',
'loadcerts',
'sslerrfail',
'fromname',
diff --git a/pygments/lexers/_luabuiltins.py b/pygments/lexers/_luabuiltins.py
index 069c44fd..40037357 100644
--- a/pygments/lexers/_luabuiltins.py
+++ b/pygments/lexers/_luabuiltins.py
@@ -9,10 +9,13 @@
Do not edit the MODULES dict by hand.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
+from __future__ import print_function
+
+
MODULES = {'basic': ['_G',
'_VERSION',
'assert',
@@ -142,7 +145,10 @@ MODULES = {'basic': ['_G',
if __name__ == '__main__':
import re
- import urllib
+ try:
+ from urllib import urlopen
+ except ImportError:
+ from urllib.request import urlopen
import pprint
# you can't generally find out what module a function belongs to if you
@@ -188,7 +194,7 @@ if __name__ == '__main__':
def get_newest_version():
- f = urllib.urlopen('http://www.lua.org/manual/')
+ f = urlopen('http://www.lua.org/manual/')
r = re.compile(r'^<A HREF="(\d\.\d)/">Lua \1</A>')
for line in f:
m = r.match(line)
@@ -196,7 +202,7 @@ if __name__ == '__main__':
return m.groups()[0]
def get_lua_functions(version):
- f = urllib.urlopen('http://www.lua.org/manual/%s/' % version)
+ f = urlopen('http://www.lua.org/manual/%s/' % version)
r = re.compile(r'^<A HREF="manual.html#pdf-(.+)">\1</A>')
functions = []
for line in f:
@@ -206,7 +212,7 @@ if __name__ == '__main__':
return functions
def get_function_module(name):
- for mod, cb in module_callbacks().iteritems():
+ for mod, cb in module_callbacks().items():
if cb(name):
return mod
if '.' in name:
@@ -233,13 +239,13 @@ if __name__ == '__main__':
def run():
version = get_newest_version()
- print '> Downloading function index for Lua %s' % version
+ print('> Downloading function index for Lua %s' % version)
functions = get_lua_functions(version)
- print '> %d functions found:' % len(functions)
+ print('> %d functions found:' % len(functions))
modules = {}
for full_function_name in functions:
- print '>> %s' % full_function_name
+ print('>> %s' % full_function_name)
m = get_function_module(full_function_name)
modules.setdefault(m, []).append(full_function_name)
diff --git a/pygments/lexers/_mapping.py b/pygments/lexers/_mapping.py
index e972ace8..eaaed7dc 100644
--- a/pygments/lexers/_mapping.py
+++ b/pygments/lexers/_mapping.py
@@ -3,21 +3,25 @@
pygments.lexers._mapping
~~~~~~~~~~~~~~~~~~~~~~~~
- Lexer mapping defintions. This file is generated by itself. Everytime
- you change something on a builtin lexer defintion, run this script from
+ Lexer mapping definitions. This file is generated by itself. Everytime
+ you change something on a builtin lexer definition, run this script from
the lexers folder to update it.
Do not alter the LEXERS dictionary by hand.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
+from __future__ import print_function
+
LEXERS = {
'ABAPLexer': ('pygments.lexers.other', 'ABAP', ('abap',), ('*.abap',), ('text/x-abap',)),
+ 'APLLexer': ('pygments.lexers.other', 'APL', ('apl',), ('*.apl',), ()),
'ActionScript3Lexer': ('pygments.lexers.web', 'ActionScript 3', ('as3', 'actionscript3'), ('*.as',), ('application/x-actionscript', 'text/x-actionscript', 'text/actionscript')),
'ActionScriptLexer': ('pygments.lexers.web', 'ActionScript', ('as', 'actionscript'), ('*.as',), ('application/x-actionscript3', 'text/x-actionscript3', 'text/actionscript3')),
'AdaLexer': ('pygments.lexers.compiled', 'Ada', ('ada', 'ada95ada2005'), ('*.adb', '*.ads', '*.ada'), ('text/x-ada',)),
+ 'AgdaLexer': ('pygments.lexers.functional', 'Agda', ('agda',), ('*.agda',), ('text/x-agda',)),
'AntlrActionScriptLexer': ('pygments.lexers.parsers', 'ANTLR With ActionScript Target', ('antlr-as', 'antlr-actionscript'), ('*.G', '*.g'), ()),
'AntlrCSharpLexer': ('pygments.lexers.parsers', 'ANTLR With C# Target', ('antlr-csharp', 'antlr-c#'), ('*.G', '*.g'), ()),
'AntlrCppLexer': ('pygments.lexers.parsers', 'ANTLR With CPP Target', ('antlr-cpp',), ('*.G', '*.g'), ()),
@@ -31,15 +35,16 @@ LEXERS = {
'AppleScriptLexer': ('pygments.lexers.other', 'AppleScript', ('applescript',), ('*.applescript',), ()),
'AspectJLexer': ('pygments.lexers.jvm', 'AspectJ', ('aspectj',), ('*.aj',), ('text/x-aspectj',)),
'AsymptoteLexer': ('pygments.lexers.other', 'Asymptote', ('asy', 'asymptote'), ('*.asy',), ('text/x-asymptote',)),
- 'AutoItLexer': ('pygments.lexers.other', 'AutoIt', ('autoit', 'Autoit'), ('*.au3',), ('text/x-autoit',)),
- 'AutohotkeyLexer': ('pygments.lexers.other', 'autohotkey', ('ahk',), ('*.ahk', '*.ahkl'), ('text/x-autohotkey',)),
+ 'AutoItLexer': ('pygments.lexers.other', 'AutoIt', ('autoit',), ('*.au3',), ('text/x-autoit',)),
+ 'AutohotkeyLexer': ('pygments.lexers.other', 'autohotkey', ('ahk', 'autohotkey'), ('*.ahk', '*.ahkl'), ('text/x-autohotkey',)),
'AwkLexer': ('pygments.lexers.other', 'Awk', ('awk', 'gawk', 'mawk', 'nawk'), ('*.awk',), ('application/x-awk',)),
'BBCodeLexer': ('pygments.lexers.text', 'BBCode', ('bbcode',), (), ('text/x-bbcode',)),
'BaseMakefileLexer': ('pygments.lexers.text', 'Base Makefile', ('basemake',), (), ()),
- 'BashLexer': ('pygments.lexers.shell', 'Bash', ('bash', 'sh', 'ksh'), ('*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass', '.bashrc', 'bashrc', '.bash_*', 'bash_*'), ('application/x-sh', 'application/x-shellscript')),
+ 'BashLexer': ('pygments.lexers.shell', 'Bash', ('bash', 'sh', 'ksh'), ('*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass', '.bashrc', 'bashrc', '.bash_*', 'bash_*', 'PKGBUILD'), ('application/x-sh', 'application/x-shellscript')),
'BashSessionLexer': ('pygments.lexers.shell', 'Bash Session', ('console',), ('*.sh-session',), ('application/x-shell-session',)),
- 'BatchLexer': ('pygments.lexers.shell', 'Batchfile', ('bat',), ('*.bat', '*.cmd'), ('application/x-dos-batch',)),
+ 'BatchLexer': ('pygments.lexers.shell', 'Batchfile', ('bat', 'batch', 'dosbatch', 'winbatch'), ('*.bat', '*.cmd'), ('application/x-dos-batch',)),
'BefungeLexer': ('pygments.lexers.other', 'Befunge', ('befunge',), ('*.befunge',), ('application/x-befunge',)),
+ 'BlitzBasicLexer': ('pygments.lexers.compiled', 'BlitzBasic', ('blitzbasic', 'b3d', 'bplus'), ('*.bb', '*.decls'), ('text/x-bb',)),
'BlitzMaxLexer': ('pygments.lexers.compiled', 'BlitzMax', ('blitzmax', 'bmax'), ('*.bmx',), ('text/x-bmx',)),
'BooLexer': ('pygments.lexers.dotnet', 'Boo', ('boo',), ('*.boo',), ('text/x-boo',)),
'BrainfuckLexer': ('pygments.lexers.other', 'Brainfuck', ('brainfuck', 'bf'), ('*.bf', '*.b'), ('application/x-brainfuck',)),
@@ -54,19 +59,22 @@ LEXERS = {
'CbmBasicV2Lexer': ('pygments.lexers.other', 'CBM BASIC V2', ('cbmbas',), ('*.bas',), ()),
'CeylonLexer': ('pygments.lexers.jvm', 'Ceylon', ('ceylon',), ('*.ceylon',), ('text/x-ceylon',)),
'Cfengine3Lexer': ('pygments.lexers.other', 'CFEngine3', ('cfengine3', 'cf3'), ('*.cf',), ()),
- 'CheetahHtmlLexer': ('pygments.lexers.templates', 'HTML+Cheetah', ('html+cheetah', 'html+spitfire'), (), ('text/html+cheetah', 'text/html+spitfire')),
+ 'ChapelLexer': ('pygments.lexers.compiled', 'Chapel', ('chapel', 'chpl'), ('*.chpl',), ()),
+ 'CheetahHtmlLexer': ('pygments.lexers.templates', 'HTML+Cheetah', ('html+cheetah', 'html+spitfire', 'htmlcheetah'), (), ('text/html+cheetah', 'text/html+spitfire')),
'CheetahJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Cheetah', ('js+cheetah', 'javascript+cheetah', 'js+spitfire', 'javascript+spitfire'), (), ('application/x-javascript+cheetah', 'text/x-javascript+cheetah', 'text/javascript+cheetah', 'application/x-javascript+spitfire', 'text/x-javascript+spitfire', 'text/javascript+spitfire')),
'CheetahLexer': ('pygments.lexers.templates', 'Cheetah', ('cheetah', 'spitfire'), ('*.tmpl', '*.spt'), ('application/x-cheetah', 'application/x-spitfire')),
'CheetahXmlLexer': ('pygments.lexers.templates', 'XML+Cheetah', ('xml+cheetah', 'xml+spitfire'), (), ('application/xml+cheetah', 'application/xml+spitfire')),
+ 'CirruLexer': ('pygments.lexers.web', 'Cirru', ('cirru',), ('*.cirru', '*.cr'), ('text/x-cirru',)),
'ClayLexer': ('pygments.lexers.compiled', 'Clay', ('clay',), ('*.clay',), ('text/x-clay',)),
'ClojureLexer': ('pygments.lexers.jvm', 'Clojure', ('clojure', 'clj'), ('*.clj',), ('text/x-clojure', 'application/x-clojure')),
'ClojureScriptLexer': ('pygments.lexers.jvm', 'ClojureScript', ('clojurescript', 'cljs'), ('*.cljs',), ('text/x-clojurescript', 'application/x-clojurescript')),
'CobolFreeformatLexer': ('pygments.lexers.compiled', 'COBOLFree', ('cobolfree',), ('*.cbl', '*.CBL'), ()),
'CobolLexer': ('pygments.lexers.compiled', 'COBOL', ('cobol',), ('*.cob', '*.COB', '*.cpy', '*.CPY'), ('text/x-cobol',)),
- 'CoffeeScriptLexer': ('pygments.lexers.web', 'CoffeeScript', ('coffee-script', 'coffeescript'), ('*.coffee',), ('text/coffeescript',)),
- 'ColdfusionHtmlLexer': ('pygments.lexers.templates', 'Coldfusion HTML', ('cfm',), ('*.cfm', '*.cfml', '*.cfc'), ('application/x-coldfusion',)),
+ 'CoffeeScriptLexer': ('pygments.lexers.web', 'CoffeeScript', ('coffee-script', 'coffeescript', 'coffee'), ('*.coffee',), ('text/coffeescript',)),
+ 'ColdfusionCFCLexer': ('pygments.lexers.templates', 'Coldfusion CFC', ('cfc',), ('*.cfc',), ()),
+ 'ColdfusionHtmlLexer': ('pygments.lexers.templates', 'Coldfusion HTML', ('cfm',), ('*.cfm', '*.cfml'), ('application/x-coldfusion',)),
'ColdfusionLexer': ('pygments.lexers.templates', 'cfstatement', ('cfs',), (), ()),
- 'CommonLispLexer': ('pygments.lexers.functional', 'Common Lisp', ('common-lisp', 'cl'), ('*.cl', '*.lisp', '*.el'), ('text/x-common-lisp',)),
+ 'CommonLispLexer': ('pygments.lexers.functional', 'Common Lisp', ('common-lisp', 'cl', 'lisp', 'elisp', 'emacs'), ('*.cl', '*.lisp', '*.el'), ('text/x-common-lisp',)),
'CoqLexer': ('pygments.lexers.functional', 'Coq', ('coq',), ('*.v',), ('text/x-coq',)),
'CppLexer': ('pygments.lexers.compiled', 'C++', ('cpp', 'c++'), ('*.cpp', '*.hpp', '*.c++', '*.h++', '*.cc', '*.hh', '*.cxx', '*.hxx', '*.C', '*.H', '*.cp', '*.CPP'), ('text/x-c++hdr', 'text/x-c++src')),
'CppObjdumpLexer': ('pygments.lexers.asm', 'cpp-objdump', ('cpp-objdump', 'c++-objdumb', 'cxx-objdump'), ('*.cpp-objdump', '*.c++-objdump', '*.cxx-objdump'), ('text/x-cpp-objdump',)),
@@ -78,23 +86,25 @@ LEXERS = {
'CssPhpLexer': ('pygments.lexers.templates', 'CSS+PHP', ('css+php',), (), ('text/css+php',)),
'CssSmartyLexer': ('pygments.lexers.templates', 'CSS+Smarty', ('css+smarty',), (), ('text/css+smarty',)),
'CudaLexer': ('pygments.lexers.compiled', 'CUDA', ('cuda', 'cu'), ('*.cu', '*.cuh'), ('text/x-cuda',)),
- 'CythonLexer': ('pygments.lexers.compiled', 'Cython', ('cython', 'pyx'), ('*.pyx', '*.pxd', '*.pxi'), ('text/x-cython', 'application/x-cython')),
+ 'CythonLexer': ('pygments.lexers.compiled', 'Cython', ('cython', 'pyx', 'pyrex'), ('*.pyx', '*.pxd', '*.pxi'), ('text/x-cython', 'application/x-cython')),
'DLexer': ('pygments.lexers.compiled', 'D', ('d',), ('*.d', '*.di'), ('text/x-dsrc',)),
'DObjdumpLexer': ('pygments.lexers.asm', 'd-objdump', ('d-objdump',), ('*.d-objdump',), ('text/x-d-objdump',)),
'DarcsPatchLexer': ('pygments.lexers.text', 'Darcs Patch', ('dpatch',), ('*.dpatch', '*.darcspatch'), ()),
'DartLexer': ('pygments.lexers.web', 'Dart', ('dart',), ('*.dart',), ('text/x-dart',)),
- 'DebianControlLexer': ('pygments.lexers.text', 'Debian Control file', ('control',), ('control',), ()),
+ 'DebianControlLexer': ('pygments.lexers.text', 'Debian Control file', ('control', 'debcontrol'), ('control',), ()),
'DelphiLexer': ('pygments.lexers.compiled', 'Delphi', ('delphi', 'pas', 'pascal', 'objectpascal'), ('*.pas',), ('text/x-pascal',)),
'DgLexer': ('pygments.lexers.agile', 'dg', ('dg',), ('*.dg',), ('text/x-dg',)),
'DiffLexer': ('pygments.lexers.text', 'Diff', ('diff', 'udiff'), ('*.diff', '*.patch'), ('text/x-diff', 'text/x-patch')),
'DjangoLexer': ('pygments.lexers.templates', 'Django/Jinja', ('django', 'jinja'), (), ('application/x-django-templating', 'application/x-jinja')),
'DtdLexer': ('pygments.lexers.web', 'DTD', ('dtd',), ('*.dtd',), ('application/xml-dtd',)),
- 'DuelLexer': ('pygments.lexers.web', 'Duel', ('duel', 'Duel Engine', 'Duel View', 'JBST', 'jbst', 'JsonML+BST'), ('*.duel', '*.jbst'), ('text/x-duel', 'text/x-jbst')),
+ 'DuelLexer': ('pygments.lexers.web', 'Duel', ('duel', 'jbst', 'jsonml+bst'), ('*.duel', '*.jbst'), ('text/x-duel', 'text/x-jbst')),
'DylanConsoleLexer': ('pygments.lexers.compiled', 'Dylan session', ('dylan-console', 'dylan-repl'), ('*.dylan-console',), ('text/x-dylan-console',)),
'DylanLexer': ('pygments.lexers.compiled', 'Dylan', ('dylan',), ('*.dylan', '*.dyl', '*.intr'), ('text/x-dylan',)),
'DylanLidLexer': ('pygments.lexers.compiled', 'DylanLID', ('dylan-lid', 'lid'), ('*.lid', '*.hdp'), ('text/x-dylan-lid',)),
'ECLLexer': ('pygments.lexers.other', 'ECL', ('ecl',), ('*.ecl',), ('application/x-ecl',)),
'ECLexer': ('pygments.lexers.compiled', 'eC', ('ec',), ('*.ec', '*.eh'), ('text/x-echdr', 'text/x-ecsrc')),
+ 'EbnfLexer': ('pygments.lexers.text', 'EBNF', ('ebnf',), ('*.ebnf',), ('text/x-ebnf',)),
+ 'EiffelLexer': ('pygments.lexers.compiled', 'Eiffel', ('eiffel',), ('*.e',), ('text/x-eiffel',)),
'ElixirConsoleLexer': ('pygments.lexers.functional', 'Elixir iex session', ('iex',), (), ('text/x-elixir-shellsession',)),
'ElixirLexer': ('pygments.lexers.functional', 'Elixir', ('elixir', 'ex', 'exs'), ('*.ex', '*.exs'), ('text/x-elixir',)),
'ErbLexer': ('pygments.lexers.templates', 'ERB', ('erb',), (), ('application/x-ruby-templating',)),
@@ -109,13 +119,14 @@ LEXERS = {
'FantomLexer': ('pygments.lexers.compiled', 'Fantom', ('fan',), ('*.fan',), ('application/x-fantom',)),
'FelixLexer': ('pygments.lexers.compiled', 'Felix', ('felix', 'flx'), ('*.flx', '*.flxh'), ('text/x-felix',)),
'FortranLexer': ('pygments.lexers.compiled', 'Fortran', ('fortran',), ('*.f', '*.f90', '*.F', '*.F90'), ('text/x-fortran',)),
- 'FoxProLexer': ('pygments.lexers.foxpro', 'FoxPro', ('Clipper', 'XBase'), ('*.PRG', '*.prg'), ()),
+ 'FoxProLexer': ('pygments.lexers.foxpro', 'FoxPro', ('foxpro', 'vfp', 'clipper', 'xbase'), ('*.PRG', '*.prg'), ()),
+ 'GAPLexer': ('pygments.lexers.math', 'GAP', ('gap',), ('*.g', '*.gd', '*.gi', '*.gap'), ()),
'GLShaderLexer': ('pygments.lexers.compiled', 'GLSL', ('glsl',), ('*.vert', '*.frag', '*.geo'), ('text/x-glslsrc',)),
- 'GasLexer': ('pygments.lexers.asm', 'GAS', ('gas',), ('*.s', '*.S'), ('text/x-gas',)),
+ 'GasLexer': ('pygments.lexers.asm', 'GAS', ('gas', 'asm'), ('*.s', '*.S'), ('text/x-gas',)),
'GenshiLexer': ('pygments.lexers.templates', 'Genshi', ('genshi', 'kid', 'xml+genshi', 'xml+kid'), ('*.kid',), ('application/x-genshi', 'application/x-kid')),
'GenshiTextLexer': ('pygments.lexers.templates', 'Genshi Text', ('genshitext',), (), ('application/x-genshi-text', 'text/x-genshi')),
'GettextLexer': ('pygments.lexers.text', 'Gettext Catalog', ('pot', 'po'), ('*.pot', '*.po'), ('application/x-gettext', 'text/x-gettext', 'text/gettext')),
- 'GherkinLexer': ('pygments.lexers.other', 'Gherkin', ('Cucumber', 'cucumber', 'Gherkin', 'gherkin'), ('*.feature',), ('text/x-gherkin',)),
+ 'GherkinLexer': ('pygments.lexers.other', 'Gherkin', ('cucumber', 'gherkin'), ('*.feature',), ('text/x-gherkin',)),
'GnuplotLexer': ('pygments.lexers.other', 'Gnuplot', ('gnuplot',), ('*.plot', '*.plt'), ('text/x-gnuplot',)),
'GoLexer': ('pygments.lexers.compiled', 'Go', ('go',), ('*.go',), ('text/x-gosrc',)),
'GoodDataCLLexer': ('pygments.lexers.other', 'GoodData-CL', ('gooddata-cl',), ('*.gdc',), ('text/x-gooddata-cl',)),
@@ -123,23 +134,29 @@ LEXERS = {
'GosuTemplateLexer': ('pygments.lexers.jvm', 'Gosu Template', ('gst',), ('*.gst',), ('text/x-gosu-template',)),
'GroffLexer': ('pygments.lexers.text', 'Groff', ('groff', 'nroff', 'man'), ('*.[1234567]', '*.man'), ('application/x-troff', 'text/troff')),
'GroovyLexer': ('pygments.lexers.jvm', 'Groovy', ('groovy',), ('*.groovy',), ('text/x-groovy',)),
- 'HamlLexer': ('pygments.lexers.web', 'Haml', ('haml', 'HAML'), ('*.haml',), ('text/x-haml',)),
+ 'HamlLexer': ('pygments.lexers.web', 'Haml', ('haml',), ('*.haml',), ('text/x-haml',)),
'HaskellLexer': ('pygments.lexers.functional', 'Haskell', ('haskell', 'hs'), ('*.hs',), ('text/x-haskell',)),
- 'HaxeLexer': ('pygments.lexers.web', 'haXe', ('hx', 'haXe'), ('*.hx',), ('text/haxe',)),
- 'HtmlDjangoLexer': ('pygments.lexers.templates', 'HTML+Django/Jinja', ('html+django', 'html+jinja'), (), ('text/html+django', 'text/html+jinja')),
+ 'HaxeLexer': ('pygments.lexers.web', 'Haxe', ('hx', 'haxe', 'hxsl'), ('*.hx', '*.hxsl'), ('text/haxe', 'text/x-haxe', 'text/x-hx')),
+ 'HtmlDjangoLexer': ('pygments.lexers.templates', 'HTML+Django/Jinja', ('html+django', 'html+jinja', 'htmldjango'), (), ('text/html+django', 'text/html+jinja')),
'HtmlGenshiLexer': ('pygments.lexers.templates', 'HTML+Genshi', ('html+genshi', 'html+kid'), (), ('text/html+genshi',)),
'HtmlLexer': ('pygments.lexers.web', 'HTML', ('html',), ('*.html', '*.htm', '*.xhtml', '*.xslt'), ('text/html', 'application/xhtml+xml')),
'HtmlPhpLexer': ('pygments.lexers.templates', 'HTML+PHP', ('html+php',), ('*.phtml',), ('application/x-php', 'application/x-httpd-php', 'application/x-httpd-php3', 'application/x-httpd-php4', 'application/x-httpd-php5')),
'HtmlSmartyLexer': ('pygments.lexers.templates', 'HTML+Smarty', ('html+smarty',), (), ('text/html+smarty',)),
'HttpLexer': ('pygments.lexers.text', 'HTTP', ('http',), (), ()),
'HxmlLexer': ('pygments.lexers.text', 'Hxml', ('haxeml', 'hxml'), ('*.hxml',), ()),
+ 'HyLexer': ('pygments.lexers.agile', 'Hy', ('hylang',), ('*.hy',), ('text/x-hy', 'application/x-hy')),
'HybrisLexer': ('pygments.lexers.other', 'Hybris', ('hybris', 'hy'), ('*.hy', '*.hyb'), ('text/x-hybris', 'application/x-hybris')),
'IDLLexer': ('pygments.lexers.math', 'IDL', ('idl',), ('*.pro',), ('text/idl',)),
- 'IniLexer': ('pygments.lexers.text', 'INI', ('ini', 'cfg'), ('*.ini', '*.cfg'), ('text/x-ini',)),
+ 'IdrisLexer': ('pygments.lexers.functional', 'Idris', ('idris', 'idr'), ('*.idr',), ('text/x-idris',)),
+ 'IgorLexer': ('pygments.lexers.math', 'Igor', ('igor', 'igorpro'), ('*.ipf',), ('text/ipf',)),
+ 'Inform6Lexer': ('pygments.lexers.compiled', 'Inform 6', ('inform6', 'i6'), ('*.inf',), ()),
+ 'Inform6TemplateLexer': ('pygments.lexers.compiled', 'Inform 6 template', ('i6t',), ('*.i6t',), ()),
+ 'Inform7Lexer': ('pygments.lexers.compiled', 'Inform 7', ('inform7', 'i7'), ('*.ni', '*.i7x'), ()),
+ 'IniLexer': ('pygments.lexers.text', 'INI', ('ini', 'cfg', 'dosini'), ('*.ini', '*.cfg'), ('text/x-ini',)),
'IoLexer': ('pygments.lexers.agile', 'Io', ('io',), ('*.io',), ('text/x-iosrc',)),
'IokeLexer': ('pygments.lexers.jvm', 'Ioke', ('ioke', 'ik'), ('*.ik',), ('text/x-iokesrc',)),
'IrcLogsLexer': ('pygments.lexers.text', 'IRC logs', ('irc',), ('*.weechatlog',), ('text/x-irclog',)),
- 'JadeLexer': ('pygments.lexers.web', 'Jade', ('jade', 'JADE'), ('*.jade',), ('text/x-jade',)),
+ 'JadeLexer': ('pygments.lexers.web', 'Jade', ('jade',), ('*.jade',), ('text/x-jade',)),
'JagsLexer': ('pygments.lexers.math', 'JAGS', ('jags',), ('*.jag', '*.bug'), ()),
'JavaLexer': ('pygments.lexers.jvm', 'Java', ('java',), ('*.java',), ('text/x-java',)),
'JavascriptDjangoLexer': ('pygments.lexers.templates', 'JavaScript+Django/Jinja', ('js+django', 'javascript+django', 'js+jinja', 'javascript+jinja'), (), ('application/x-javascript+django', 'application/x-javascript+jinja', 'text/x-javascript+django', 'text/x-javascript+jinja', 'text/javascript+django', 'text/javascript+jinja')),
@@ -152,6 +169,7 @@ LEXERS = {
'JspLexer': ('pygments.lexers.templates', 'Java Server Page', ('jsp',), ('*.jsp',), ('application/x-jsp',)),
'JuliaConsoleLexer': ('pygments.lexers.math', 'Julia console', ('jlcon',), (), ()),
'JuliaLexer': ('pygments.lexers.math', 'Julia', ('julia', 'jl'), ('*.jl',), ('text/x-julia', 'application/x-julia')),
+ 'KalLexer': ('pygments.lexers.web', 'Kal', ('kal',), ('*.kal',), ('text/kal', 'application/kal')),
'KconfigLexer': ('pygments.lexers.other', 'Kconfig', ('kconfig', 'menuconfig', 'linux-config', 'kernel-config'), ('Kconfig', '*Config.in*', 'external.in*', 'standard-modules.in'), ('text/x-kconfig',)),
'KokaLexer': ('pygments.lexers.functional', 'Koka', ('koka',), ('*.kk', '*.kki'), ('text/x-koka',)),
'KotlinLexer': ('pygments.lexers.jvm', 'Kotlin', ('kotlin',), ('*.kt',), ('text/x-kotlin',)),
@@ -161,21 +179,25 @@ LEXERS = {
'LassoLexer': ('pygments.lexers.web', 'Lasso', ('lasso', 'lassoscript'), ('*.lasso', '*.lasso[89]'), ('text/x-lasso',)),
'LassoXmlLexer': ('pygments.lexers.templates', 'XML+Lasso', ('xml+lasso',), (), ('application/xml+lasso',)),
'LighttpdConfLexer': ('pygments.lexers.text', 'Lighttpd configuration file', ('lighty', 'lighttpd'), (), ('text/x-lighttpd-conf',)),
- 'LiterateHaskellLexer': ('pygments.lexers.functional', 'Literate Haskell', ('lhs', 'literate-haskell'), ('*.lhs',), ('text/x-literate-haskell',)),
+ 'LiterateAgdaLexer': ('pygments.lexers.functional', 'Literate Agda', ('lagda', 'literate-agda'), ('*.lagda',), ('text/x-literate-agda',)),
+ 'LiterateHaskellLexer': ('pygments.lexers.functional', 'Literate Haskell', ('lhs', 'literate-haskell', 'lhaskell'), ('*.lhs',), ('text/x-literate-haskell',)),
+ 'LiterateIdrisLexer': ('pygments.lexers.functional', 'Literate Idris', ('lidr', 'literate-idris', 'lidris'), ('*.lidr',), ('text/x-literate-idris',)),
'LiveScriptLexer': ('pygments.lexers.web', 'LiveScript', ('live-script', 'livescript'), ('*.ls',), ('text/livescript',)),
'LlvmLexer': ('pygments.lexers.asm', 'LLVM', ('llvm',), ('*.ll',), ('text/x-llvm',)),
'LogosLexer': ('pygments.lexers.compiled', 'Logos', ('logos',), ('*.x', '*.xi', '*.xm', '*.xmi'), ('text/x-logos',)),
'LogtalkLexer': ('pygments.lexers.other', 'Logtalk', ('logtalk',), ('*.lgt',), ('text/x-logtalk',)),
'LuaLexer': ('pygments.lexers.agile', 'Lua', ('lua',), ('*.lua', '*.wlua'), ('text/x-lua', 'application/x-lua')),
- 'MOOCodeLexer': ('pygments.lexers.other', 'MOOCode', ('moocode',), ('*.moo',), ('text/x-moocode',)),
- 'MakefileLexer': ('pygments.lexers.text', 'Makefile', ('make', 'makefile', 'mf', 'bsdmake'), ('*.mak', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile'), ('text/x-makefile',)),
+ 'MOOCodeLexer': ('pygments.lexers.other', 'MOOCode', ('moocode', 'moo'), ('*.moo',), ('text/x-moocode',)),
+ 'MakefileLexer': ('pygments.lexers.text', 'Makefile', ('make', 'makefile', 'mf', 'bsdmake'), ('*.mak', '*.mk', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile'), ('text/x-makefile',)),
'MakoCssLexer': ('pygments.lexers.templates', 'CSS+Mako', ('css+mako',), (), ('text/css+mako',)),
'MakoHtmlLexer': ('pygments.lexers.templates', 'HTML+Mako', ('html+mako',), (), ('text/html+mako',)),
'MakoJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Mako', ('js+mako', 'javascript+mako'), (), ('application/x-javascript+mako', 'text/x-javascript+mako', 'text/javascript+mako')),
'MakoLexer': ('pygments.lexers.templates', 'Mako', ('mako',), ('*.mao',), ('application/x-mako',)),
'MakoXmlLexer': ('pygments.lexers.templates', 'XML+Mako', ('xml+mako',), (), ('application/xml+mako',)),
'MaqlLexer': ('pygments.lexers.other', 'MAQL', ('maql',), ('*.maql',), ('text/x-gooddata-maql', 'application/x-gooddata-maql')),
+ 'MaskLexer': ('pygments.lexers.web', 'Mask', ('mask',), ('*.mask',), ('text/x-mask',)),
'MasonLexer': ('pygments.lexers.templates', 'Mason', ('mason',), ('*.m', '*.mhtml', '*.mc', '*.mi', 'autohandler', 'dhandler'), ('application/x-mason',)),
+ 'MathematicaLexer': ('pygments.lexers.math', 'Mathematica', ('mathematica', 'mma', 'nb'), ('*.nb', '*.cdf', '*.nbp', '*.ma'), ('application/mathematica', 'application/vnd.wolfram.mathematica', 'application/vnd.wolfram.mathematica.package', 'application/vnd.wolfram.cdf')),
'MatlabLexer': ('pygments.lexers.math', 'Matlab', ('matlab',), ('*.m',), ('text/matlab',)),
'MatlabSessionLexer': ('pygments.lexers.math', 'Matlab session', ('matlabsession',), (), ()),
'MiniDLexer': ('pygments.lexers.agile', 'MiniD', ('minid',), ('*.md',), ('text/x-minidsrc',)),
@@ -184,6 +206,7 @@ LEXERS = {
'MoinWikiLexer': ('pygments.lexers.text', 'MoinMoin/Trac Wiki markup', ('trac-wiki', 'moin'), (), ('text/x-trac-wiki',)),
'MonkeyLexer': ('pygments.lexers.compiled', 'Monkey', ('monkey',), ('*.monkey',), ('text/x-monkey',)),
'MoonScriptLexer': ('pygments.lexers.agile', 'MoonScript', ('moon', 'moonscript'), ('*.moon',), ('text/x-moonscript', 'application/x-moonscript')),
+ 'MqlLexer': ('pygments.lexers.compiled', 'MQL', ('mql', 'mq4', 'mq5', 'mql4', 'mql5'), ('*.mq4', '*.mq5', '*.mqh'), ('text/x-mql',)),
'MscgenLexer': ('pygments.lexers.other', 'Mscgen', ('mscgen', 'msc'), ('*.msc',), ()),
'MuPADLexer': ('pygments.lexers.math', 'MuPAD', ('mupad',), ('*.mu',), ()),
'MxmlLexer': ('pygments.lexers.web', 'MXML', ('mxml',), ('*.mxml',), ()),
@@ -195,11 +218,14 @@ LEXERS = {
'MyghtyXmlLexer': ('pygments.lexers.templates', 'XML+Myghty', ('xml+myghty',), (), ('application/xml+myghty',)),
'NSISLexer': ('pygments.lexers.other', 'NSIS', ('nsis', 'nsi', 'nsh'), ('*.nsi', '*.nsh'), ('text/x-nsis',)),
'NasmLexer': ('pygments.lexers.asm', 'NASM', ('nasm',), ('*.asm', '*.ASM'), ('text/x-nasm',)),
+ 'NasmObjdumpLexer': ('pygments.lexers.asm', 'objdump-nasm', ('objdump-nasm',), ('*.objdump-intel',), ('text/x-nasm-objdump',)),
'NemerleLexer': ('pygments.lexers.dotnet', 'Nemerle', ('nemerle',), ('*.n',), ('text/x-nemerle',)),
+ 'NesCLexer': ('pygments.lexers.compiled', 'nesC', ('nesc',), ('*.nc',), ('text/x-nescsrc',)),
'NewLispLexer': ('pygments.lexers.functional', 'NewLisp', ('newlisp',), ('*.lsp', '*.nl'), ('text/x-newlisp', 'application/x-newlisp')),
'NewspeakLexer': ('pygments.lexers.other', 'Newspeak', ('newspeak',), ('*.ns2',), ('text/x-newspeak',)),
'NginxConfLexer': ('pygments.lexers.text', 'Nginx configuration file', ('nginx',), (), ('text/x-nginx-conf',)),
'NimrodLexer': ('pygments.lexers.compiled', 'Nimrod', ('nimrod', 'nim'), ('*.nim', '*.nimrod'), ('text/x-nimrod',)),
+ 'NixLexer': ('pygments.lexers.functional', 'Nix', ('nixos', 'nix'), ('*.nix',), ('text/x-nix',)),
'NumPyLexer': ('pygments.lexers.math', 'NumPy', ('numpy',), (), ()),
'ObjdumpLexer': ('pygments.lexers.asm', 'objdump', ('objdump',), ('*.objdump',), ('text/x-objdump',)),
'ObjectiveCLexer': ('pygments.lexers.compiled', 'Objective-C', ('objective-c', 'objectivec', 'obj-c', 'objc'), ('*.m', '*.h'), ('text/x-objective-c',)),
@@ -210,17 +236,20 @@ LEXERS = {
'OocLexer': ('pygments.lexers.compiled', 'Ooc', ('ooc',), ('*.ooc',), ('text/x-ooc',)),
'OpaLexer': ('pygments.lexers.functional', 'Opa', ('opa',), ('*.opa',), ('text/x-opa',)),
'OpenEdgeLexer': ('pygments.lexers.other', 'OpenEdge ABL', ('openedge', 'abl', 'progress'), ('*.p', '*.cls'), ('text/x-openedge', 'application/x-openedge')),
- 'PerlLexer': ('pygments.lexers.agile', 'Perl', ('perl', 'pl'), ('*.pl', '*.pm'), ('text/x-perl', 'application/x-perl')),
+ 'Perl6Lexer': ('pygments.lexers.agile', 'Perl6', ('perl6', 'pl6'), ('*.pl', '*.pm', '*.nqp', '*.p6', '*.6pl', '*.p6l', '*.pl6', '*.6pm', '*.p6m', '*.pm6', '*.t'), ('text/x-perl6', 'application/x-perl6')),
+ 'PerlLexer': ('pygments.lexers.agile', 'Perl', ('perl', 'pl'), ('*.pl', '*.pm', '*.t'), ('text/x-perl', 'application/x-perl')),
'PhpLexer': ('pygments.lexers.web', 'PHP', ('php', 'php3', 'php4', 'php5'), ('*.php', '*.php[345]', '*.inc'), ('text/x-php',)),
+ 'PigLexer': ('pygments.lexers.jvm', 'Pig', ('pig',), ('*.pig',), ('text/x-pig',)),
+ 'PikeLexer': ('pygments.lexers.compiled', 'Pike', ('pike',), ('*.pike', '*.pmod'), ('text/x-pike',)),
'PlPgsqlLexer': ('pygments.lexers.sql', 'PL/pgSQL', ('plpgsql',), (), ('text/x-plpgsql',)),
- 'PostScriptLexer': ('pygments.lexers.other', 'PostScript', ('postscript',), ('*.ps', '*.eps'), ('application/postscript',)),
+ 'PostScriptLexer': ('pygments.lexers.other', 'PostScript', ('postscript', 'postscr'), ('*.ps', '*.eps'), ('application/postscript',)),
'PostgresConsoleLexer': ('pygments.lexers.sql', 'PostgreSQL console (psql)', ('psql', 'postgresql-console', 'postgres-console'), (), ('text/x-postgresql-psql',)),
'PostgresLexer': ('pygments.lexers.sql', 'PostgreSQL SQL dialect', ('postgresql', 'postgres'), (), ('text/x-postgresql',)),
'PovrayLexer': ('pygments.lexers.other', 'POVRay', ('pov',), ('*.pov', '*.inc'), ('text/x-povray',)),
- 'PowerShellLexer': ('pygments.lexers.shell', 'PowerShell', ('powershell', 'posh', 'ps1'), ('*.ps1',), ('text/x-powershell',)),
+ 'PowerShellLexer': ('pygments.lexers.shell', 'PowerShell', ('powershell', 'posh', 'ps1', 'psm1'), ('*.ps1', '*.psm1'), ('text/x-powershell',)),
'PrologLexer': ('pygments.lexers.compiled', 'Prolog', ('prolog',), ('*.prolog', '*.pro', '*.pl'), ('text/x-prolog',)),
- 'PropertiesLexer': ('pygments.lexers.text', 'Properties', ('properties',), ('*.properties',), ('text/x-java-properties',)),
- 'ProtoBufLexer': ('pygments.lexers.other', 'Protocol Buffer', ('protobuf',), ('*.proto',), ()),
+ 'PropertiesLexer': ('pygments.lexers.text', 'Properties', ('properties', 'jproperties'), ('*.properties',), ('text/x-java-properties',)),
+ 'ProtoBufLexer': ('pygments.lexers.other', 'Protocol Buffer', ('protobuf', 'proto'), ('*.proto',), ()),
'PuppetLexer': ('pygments.lexers.other', 'Puppet', ('puppet',), ('*.pp',), ()),
'PyPyLogLexer': ('pygments.lexers.text', 'PyPy Log', ('pypylog', 'pypy'), ('*.pypylog',), ('application/x-pypylog',)),
'Python3Lexer': ('pygments.lexers.agile', 'Python 3', ('python3', 'py3'), (), ('text/x-python3', 'application/x-python3')),
@@ -228,7 +257,7 @@ LEXERS = {
'PythonConsoleLexer': ('pygments.lexers.agile', 'Python console session', ('pycon',), (), ('text/x-python-doctest',)),
'PythonLexer': ('pygments.lexers.agile', 'Python', ('python', 'py', 'sage'), ('*.py', '*.pyw', '*.sc', 'SConstruct', 'SConscript', '*.tac', '*.sage'), ('text/x-python', 'application/x-python')),
'PythonTracebackLexer': ('pygments.lexers.agile', 'Python Traceback', ('pytb',), ('*.pytb',), ('text/x-python-traceback',)),
- 'QmlLexer': ('pygments.lexers.web', 'QML', ('qml', 'Qt Meta Language', 'Qt modeling Language'), ('*.qml',), ('application/x-qml',)),
+ 'QmlLexer': ('pygments.lexers.web', 'QML', ('qml',), ('*.qml',), ('application/x-qml',)),
'RConsoleLexer': ('pygments.lexers.math', 'RConsole', ('rconsole', 'rout'), ('*.Rout',), ()),
'RPMSpecLexer': ('pygments.lexers.other', 'RPMSpec', ('spec',), ('*.spec',), ('text/x-rpm-spec',)),
'RacketLexer': ('pygments.lexers.functional', 'Racket', ('racket', 'rkt'), ('*.rkt', '*.rktl'), ('text/x-racket', 'application/x-racket')),
@@ -245,32 +274,34 @@ LEXERS = {
'RebolLexer': ('pygments.lexers.other', 'REBOL', ('rebol',), ('*.r', '*.r3'), ('text/x-rebol',)),
'RedcodeLexer': ('pygments.lexers.other', 'Redcode', ('redcode',), ('*.cw',), ()),
'RegeditLexer': ('pygments.lexers.text', 'reg', ('registry',), ('*.reg',), ('text/x-windows-registry',)),
+ 'RexxLexer': ('pygments.lexers.other', 'Rexx', ('rexx', 'arexx'), ('*.rexx', '*.rex', '*.rx', '*.arexx'), ('text/x-rexx',)),
'RhtmlLexer': ('pygments.lexers.templates', 'RHTML', ('rhtml', 'html+erb', 'html+ruby'), ('*.rhtml',), ('text/html+ruby',)),
- 'RobotFrameworkLexer': ('pygments.lexers.other', 'RobotFramework', ('RobotFramework', 'robotframework'), ('*.txt', '*.robot'), ('text/x-robotframework',)),
+ 'RobotFrameworkLexer': ('pygments.lexers.other', 'RobotFramework', ('robotframework',), ('*.txt', '*.robot'), ('text/x-robotframework',)),
'RstLexer': ('pygments.lexers.text', 'reStructuredText', ('rst', 'rest', 'restructuredtext'), ('*.rst', '*.rest'), ('text/x-rst', 'text/prs.fallenstein.rst')),
'RubyConsoleLexer': ('pygments.lexers.agile', 'Ruby irb session', ('rbcon', 'irb'), (), ('text/x-ruby-shellsession',)),
'RubyLexer': ('pygments.lexers.agile', 'Ruby', ('rb', 'ruby', 'duby'), ('*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec', '*.rbx', '*.duby'), ('text/x-ruby', 'application/x-ruby')),
- 'RustLexer': ('pygments.lexers.compiled', 'Rust', ('rust',), ('*.rs', '*.rc'), ('text/x-rustsrc',)),
+ 'RustLexer': ('pygments.lexers.compiled', 'Rust', ('rust',), ('*.rs',), ('text/x-rustsrc',)),
'SLexer': ('pygments.lexers.math', 'S', ('splus', 's', 'r'), ('*.S', '*.R', '.Rhistory', '.Rprofile'), ('text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r', 'text/x-R', 'text/x-r-history', 'text/x-r-profile')),
'SMLLexer': ('pygments.lexers.functional', 'Standard ML', ('sml',), ('*.sml', '*.sig', '*.fun'), ('text/x-standardml', 'application/x-standardml')),
- 'SassLexer': ('pygments.lexers.web', 'Sass', ('sass', 'SASS'), ('*.sass',), ('text/x-sass',)),
+ 'SassLexer': ('pygments.lexers.web', 'Sass', ('sass',), ('*.sass',), ('text/x-sass',)),
'ScalaLexer': ('pygments.lexers.jvm', 'Scala', ('scala',), ('*.scala',), ('text/x-scala',)),
- 'ScamlLexer': ('pygments.lexers.web', 'Scaml', ('scaml', 'SCAML'), ('*.scaml',), ('text/x-scaml',)),
+ 'ScamlLexer': ('pygments.lexers.web', 'Scaml', ('scaml',), ('*.scaml',), ('text/x-scaml',)),
'SchemeLexer': ('pygments.lexers.functional', 'Scheme', ('scheme', 'scm'), ('*.scm', '*.ss'), ('text/x-scheme', 'application/x-scheme')),
'ScilabLexer': ('pygments.lexers.math', 'Scilab', ('scilab',), ('*.sci', '*.sce', '*.tst'), ('text/scilab',)),
'ScssLexer': ('pygments.lexers.web', 'SCSS', ('scss',), ('*.scss',), ('text/x-scss',)),
'ShellSessionLexer': ('pygments.lexers.shell', 'Shell Session', ('shell-session',), ('*.shell-session',), ('application/x-sh-session',)),
'SmaliLexer': ('pygments.lexers.dalvik', 'Smali', ('smali',), ('*.smali',), ('text/smali',)),
- 'SmalltalkLexer': ('pygments.lexers.other', 'Smalltalk', ('smalltalk', 'squeak'), ('*.st',), ('text/x-smalltalk',)),
+ 'SmalltalkLexer': ('pygments.lexers.other', 'Smalltalk', ('smalltalk', 'squeak', 'st'), ('*.st',), ('text/x-smalltalk',)),
'SmartyLexer': ('pygments.lexers.templates', 'Smarty', ('smarty',), ('*.tpl',), ('application/x-smarty',)),
'SnobolLexer': ('pygments.lexers.other', 'Snobol', ('snobol',), ('*.snobol',), ('text/x-snobol',)),
'SourcePawnLexer': ('pygments.lexers.other', 'SourcePawn', ('sp',), ('*.sp',), ('text/x-sourcepawn',)),
- 'SourcesListLexer': ('pygments.lexers.text', 'Debian Sourcelist', ('sourceslist', 'sources.list'), ('sources.list',), ()),
+ 'SourcesListLexer': ('pygments.lexers.text', 'Debian Sourcelist', ('sourceslist', 'sources.list', 'debsources'), ('sources.list',), ()),
'SqlLexer': ('pygments.lexers.sql', 'SQL', ('sql',), ('*.sql',), ('text/x-sql',)),
'SqliteConsoleLexer': ('pygments.lexers.sql', 'sqlite3con', ('sqlite3',), ('*.sqlite3-console',), ('text/x-sqlite3-console',)),
'SquidConfLexer': ('pygments.lexers.text', 'SquidConf', ('squidconf', 'squid.conf', 'squid'), ('squid.conf',), ('text/x-squidconf',)),
'SspLexer': ('pygments.lexers.templates', 'Scalate Server Page', ('ssp',), ('*.ssp',), ('application/x-ssp',)),
'StanLexer': ('pygments.lexers.math', 'Stan', ('stan',), ('*.stan',), ()),
+ 'SwigLexer': ('pygments.lexers.compiled', 'SWIG', ('swig',), ('*.swg', '*.i'), ('text/swig',)),
'SystemVerilogLexer': ('pygments.lexers.hdl', 'systemverilog', ('systemverilog', 'sv'), ('*.sv', '*.svh'), ('text/x-systemverilog',)),
'TclLexer': ('pygments.lexers.agile', 'Tcl', ('tcl',), ('*.tcl',), ('text/x-tcl', 'text/x-script.tcl', 'application/x-tcl')),
'TcshLexer': ('pygments.lexers.shell', 'Tcsh', ('tcsh', 'csh'), ('*.tcsh', '*.csh'), ('application/x-csh',)),
@@ -293,7 +324,7 @@ LEXERS = {
'XQueryLexer': ('pygments.lexers.web', 'XQuery', ('xquery', 'xqy', 'xq', 'xql', 'xqm'), ('*.xqy', '*.xquery', '*.xq', '*.xql', '*.xqm'), ('text/xquery', 'application/xquery')),
'XmlDjangoLexer': ('pygments.lexers.templates', 'XML+Django/Jinja', ('xml+django', 'xml+jinja'), (), ('application/xml+django', 'application/xml+jinja')),
'XmlErbLexer': ('pygments.lexers.templates', 'XML+Ruby', ('xml+erb', 'xml+ruby'), (), ('application/xml+ruby',)),
- 'XmlLexer': ('pygments.lexers.web', 'XML', ('xml',), ('*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd', '*.wsdl'), ('text/xml', 'application/xml', 'image/svg+xml', 'application/rss+xml', 'application/atom+xml')),
+ 'XmlLexer': ('pygments.lexers.web', 'XML', ('xml',), ('*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd', '*.wsdl', '*.wsf'), ('text/xml', 'application/xml', 'image/svg+xml', 'application/rss+xml', 'application/atom+xml')),
'XmlPhpLexer': ('pygments.lexers.templates', 'XML+PHP', ('xml+php',), (), ('application/xml+php',)),
'XmlSmartyLexer': ('pygments.lexers.templates', 'XML+Smarty', ('xml+smarty',), (), ('application/xml+smarty',)),
'XsltLexer': ('pygments.lexers.web', 'XSLT', ('xslt',), ('*.xsl', '*.xslt', '*.xpl'), ('application/xsl+xml', 'application/xslt+xml')),
@@ -311,7 +342,7 @@ if __name__ == '__main__':
for filename in os.listdir('.'):
if filename.endswith('.py') and not filename.startswith('_'):
module_name = 'pygments.lexers.%s' % filename[:-3]
- print module_name
+ print(module_name)
module = __import__(module_name, None, None, [''])
for lexer_name in module.__all__:
lexer = getattr(module, lexer_name)
diff --git a/pygments/lexers/_openedgebuiltins.py b/pygments/lexers/_openedgebuiltins.py
index 4561b07b..4750e80e 100644
--- a/pygments/lexers/_openedgebuiltins.py
+++ b/pygments/lexers/_openedgebuiltins.py
@@ -5,7 +5,7 @@
Builtin list for the OpenEdgeLexer.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
diff --git a/pygments/lexers/_phpbuiltins.py b/pygments/lexers/_phpbuiltins.py
index 08eaaf2e..2f5ec851 100644
--- a/pygments/lexers/_phpbuiltins.py
+++ b/pygments/lexers/_phpbuiltins.py
@@ -12,10 +12,11 @@
internet connection. don't run that at home, use
a server ;-)
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
+from __future__ import print_function
MODULES = {'.NET': ['dotnet_load'],
'APC': ['apc_add',
@@ -3711,7 +3712,10 @@ if __name__ == '__main__':
import re
import shutil
import tarfile
- import urllib
+ try:
+ from urllib import urlretrieve
+ except ImportError:
+ from urllib.request import urlretrieve
PHP_MANUAL_URL = 'http://us3.php.net/distributions/manual/php_manual_en.tar.gz'
PHP_MANUAL_DIR = './php-chunked-xhtml/'
@@ -3752,7 +3756,7 @@ if __name__ == '__main__':
return modules
def get_php_references():
- download = urllib.urlretrieve(PHP_MANUAL_URL)
+ download = urlretrieve(PHP_MANUAL_URL)
tar = tarfile.open(download[0])
tar.extractall()
tar.close()
@@ -3777,10 +3781,10 @@ if __name__ == '__main__':
f.close()
def run():
- print '>> Downloading Function Index'
+ print('>> Downloading Function Index')
modules = get_php_functions()
- total = sum(len(v) for v in modules.itervalues())
- print '%d functions found' % total
+ total = sum(len(v) for v in modules.values())
+ print('%d functions found' % total)
regenerate(__file__, modules)
shutil.rmtree(PHP_MANUAL_DIR)
diff --git a/pygments/lexers/_postgres_builtins.py b/pygments/lexers/_postgres_builtins.py
index b2322137..11dc6dec 100644
--- a/pygments/lexers/_postgres_builtins.py
+++ b/pygments/lexers/_postgres_builtins.py
@@ -5,12 +5,15 @@
Self-updating data files for PostgreSQL lexer.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
-import urllib
+try:
+ from urllib import urlopen
+except ImportError:
+ from urllib.request import urlopen
# One man's constant is another man's variable.
SOURCE_URL = 'https://github.com/postgres/postgres/raw/master'
@@ -18,11 +21,11 @@ KEYWORDS_URL = SOURCE_URL + '/doc/src/sgml/keywords.sgml'
DATATYPES_URL = SOURCE_URL + '/doc/src/sgml/datatype.sgml'
def update_myself():
- data_file = list(fetch(DATATYPES_URL))
+ data_file = list(urlopen(DATATYPES_URL))
datatypes = parse_datatypes(data_file)
pseudos = parse_pseudos(data_file)
- keywords = parse_keywords(fetch(KEYWORDS_URL))
+ keywords = parse_keywords(urlopen(KEYWORDS_URL))
update_consts(__file__, 'DATATYPES', datatypes)
update_consts(__file__, 'PSEUDO_TYPES', pseudos)
update_consts(__file__, 'KEYWORDS', keywords)
@@ -96,9 +99,6 @@ def parse_pseudos(f):
return dt
-def fetch(url):
- return urllib.urlopen(url)
-
def update_consts(filename, constname, content):
f = open(filename)
lines = f.readlines()
diff --git a/pygments/lexers/_robotframeworklexer.py b/pygments/lexers/_robotframeworklexer.py
index bc64e12b..2889e1b8 100644
--- a/pygments/lexers/_robotframeworklexer.py
+++ b/pygments/lexers/_robotframeworklexer.py
@@ -5,7 +5,7 @@
Lexer for Robot Framework.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
@@ -27,6 +27,7 @@ import re
from pygments.lexer import Lexer
from pygments.token import Token
+from pygments.util import text_type
HEADING = Token.Generic.Heading
@@ -57,10 +58,10 @@ class RobotFrameworkLexer(Lexer):
Supports both space and pipe separated plain text formats.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'RobotFramework'
- aliases = ['RobotFramework', 'robotframework']
+ aliases = ['robotframework']
filenames = ['*.txt', '*.robot']
mimetypes = ['text/x-robotframework']
@@ -77,7 +78,7 @@ class RobotFrameworkLexer(Lexer):
for value, token in row_tokenizer.tokenize(row):
for value, token in var_tokenizer.tokenize(value, token):
if value:
- yield index, token, unicode(value)
+ yield index, token, text_type(value)
index += len(value)
diff --git a/pygments/lexers/_scilab_builtins.py b/pygments/lexers/_scilab_builtins.py
index ed0dc819..7b27daab 100644
--- a/pygments/lexers/_scilab_builtins.py
+++ b/pygments/lexers/_scilab_builtins.py
@@ -5,7 +5,7 @@
Builtin list for the ScilabLexer.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
diff --git a/pygments/lexers/_sourcemodbuiltins.py b/pygments/lexers/_sourcemodbuiltins.py
index 0f6b4770..eee84d0b 100644
--- a/pygments/lexers/_sourcemodbuiltins.py
+++ b/pygments/lexers/_sourcemodbuiltins.py
@@ -8,10 +8,12 @@
Do not edit the FUNCTIONS list by hand.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
+from __future__ import print_function
+
FUNCTIONS = ['TopMenuHandler',
'CreateTopMenu',
'LoadTopMenuConfig',
@@ -1012,7 +1014,10 @@ if __name__ == '__main__':
import pprint
import re
import sys
- import urllib
+ try:
+ from urllib import urlopen
+ except ImportError:
+ from urllib.request import urlopen
# urllib ends up wanting to import a module called 'math' -- if
# pygments/lexers is in the path, this ends badly.
@@ -1021,7 +1026,7 @@ if __name__ == '__main__':
del sys.path[i]
def get_version():
- f = urllib.urlopen('http://docs.sourcemod.net/api/index.php')
+ f = urlopen('http://docs.sourcemod.net/api/index.php')
r = re.compile(r'SourceMod v\.<b>([\d\.]+)</td>')
for line in f:
m = r.search(line)
@@ -1029,7 +1034,7 @@ if __name__ == '__main__':
return m.groups()[0]
def get_sm_functions():
- f = urllib.urlopen('http://docs.sourcemod.net/api/SMfuncs.js')
+ f = urlopen('http://docs.sourcemod.net/api/SMfuncs.js')
r = re.compile(r'SMfunctions\[\d+\] = Array \("(?:public )?([^,]+)",".+"\);')
functions = []
for line in f:
@@ -1057,13 +1062,13 @@ if __name__ == '__main__':
def run():
version = get_version()
- print '> Downloading function index for SourceMod %s' % version
+ print('> Downloading function index for SourceMod %s' % version)
functions = get_sm_functions()
- print '> %d functions found:' % len(functions)
+ print('> %d functions found:' % len(functions))
functionlist = []
for full_function_name in functions:
- print '>> %s' % full_function_name
+ print('>> %s' % full_function_name)
functionlist.append(full_function_name)
regenerate(__file__, functionlist)
diff --git a/pygments/lexers/_stan_builtins.py b/pygments/lexers/_stan_builtins.py
index 637072e4..fa45738d 100644
--- a/pygments/lexers/_stan_builtins.py
+++ b/pygments/lexers/_stan_builtins.py
@@ -1,18 +1,19 @@
# -*- coding: utf-8 -*-
"""
-pygments.lexers._stan_builtins
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ pygments.lexers._stan_builtins
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-This file contains the names of functions for Stan used by
-``pygments.lexers.math.StanLexer.
+ This file contains the names of functions for Stan used by
+ `pygments.lexers.math.StanLexer`.
-:copyright: Copyright 2013 by the Pygments team, see AUTHORS.
-:license: BSD, see LICENSE for details.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
"""
-KEYWORDS = ['else', 'for', 'if', 'in', 'lower', 'lp__', 'print', 'upper', 'while']
+KEYWORDS = ['else', 'for', 'if', 'in', 'lp__', 'print', 'while']
-TYPES = [ 'corr_matrix',
+TYPES = [ 'cholesky_factor_cov',
+ 'corr_matrix',
'cov_matrix',
'int',
'matrix',
@@ -34,35 +35,53 @@ FUNCTIONS = [ 'Phi',
'atan',
'atan2',
'atanh',
+ 'bernoulli_ccdf_log',
'bernoulli_cdf',
+ 'bernoulli_cdf_log',
'bernoulli_log',
'bernoulli_logit_log',
'bernoulli_rng',
+ 'bessel_first_kind',
+ 'bessel_second_kind',
+ 'beta_binomial_ccdf_log',
'beta_binomial_cdf',
+ 'beta_binomial_cdf_log',
'beta_binomial_log',
'beta_binomial_rng',
+ 'beta_ccdf_log',
'beta_cdf',
+ 'beta_cdf_log',
'beta_log',
'beta_rng',
'binary_log_loss',
+ 'binomial_ccdf_log',
'binomial_cdf',
+ 'binomial_cdf_log',
'binomial_coefficient_log',
'binomial_log',
'binomial_logit_log',
'binomial_rng',
'block',
'categorical_log',
+ 'categorical_logit_log',
'categorical_rng',
+ 'cauchy_ccdf_log',
'cauchy_cdf',
+ 'cauchy_cdf_log',
'cauchy_log',
'cauchy_rng',
'cbrt',
'ceil',
+ 'chi_square_ccdf_log',
+ 'chi_square_cdf',
+ 'chi_square_cdf_log',
'chi_square_log',
'chi_square_rng',
'cholesky_decompose',
'col',
'cols',
+ 'columns_dot_product',
+ 'columns_dot_self',
'cos',
'cosh',
'crossprod',
@@ -77,55 +96,78 @@ FUNCTIONS = [ 'Phi',
'dirichlet_rng',
'dot_product',
'dot_self',
+ 'double_exponential_ccdf_log',
+ 'double_exponential_cdf',
+ 'double_exponential_cdf_log',
'double_exponential_log',
'double_exponential_rng',
'e',
'eigenvalues_sym',
'eigenvectors_sym',
- 'epsilon',
'erf',
'erfc',
'exp',
'exp2',
+ 'exp_mod_normal_ccdf_log',
'exp_mod_normal_cdf',
+ 'exp_mod_normal_cdf_log',
'exp_mod_normal_log',
'exp_mod_normal_rng',
'expm1',
+ 'exponential_ccdf_log',
'exponential_cdf',
+ 'exponential_cdf_log',
'exponential_log',
'exponential_rng',
'fabs',
+ 'falling_factorial',
'fdim',
'floor',
'fma',
'fmax',
'fmin',
'fmod',
+ 'gamma_ccdf_log',
+ 'gamma_cdf',
+ 'gamma_cdf_log',
'gamma_log',
+ 'gamma_p',
+ 'gamma_q',
'gamma_rng',
+ 'gaussian_dlm_obs_log',
+ 'gumbel_ccdf_log',
'gumbel_cdf',
+ 'gumbel_cdf_log',
'gumbel_log',
'gumbel_rng',
+ 'head',
'hypergeometric_log',
'hypergeometric_rng',
'hypot',
'if_else',
+ 'increment_log_prob',
'int_step',
+ 'inv',
+ 'inv_chi_square_ccdf_log',
'inv_chi_square_cdf',
+ 'inv_chi_square_cdf_log',
'inv_chi_square_log',
'inv_chi_square_rng',
'inv_cloglog',
+ 'inv_gamma_ccdf_log',
'inv_gamma_cdf',
+ 'inv_gamma_cdf_log',
'inv_gamma_log',
'inv_gamma_rng',
'inv_logit',
+ 'inv_sqrt',
+ 'inv_square',
'inv_wishart_log',
'inv_wishart_rng',
'inverse',
+ 'inverse_spd',
'lbeta',
'lgamma',
- 'lkj_corr_cholesky_log',
- 'lkj_corr_cholesky_rng',
'lkj_corr_log',
'lkj_corr_rng',
'lkj_cov_log',
@@ -133,96 +175,145 @@ FUNCTIONS = [ 'Phi',
'log',
'log10',
'log1m',
+ 'log1m_exp',
'log1m_inv_logit',
'log1p',
'log1p_exp',
'log2',
'log_determinant',
+ 'log_diff_exp',
+ 'log_falling_factorial',
'log_inv_logit',
+ 'log_rising_factorial',
+ 'log_softmax',
'log_sum_exp',
+ 'logistic_ccdf_log',
'logistic_cdf',
+ 'logistic_cdf_log',
'logistic_log',
'logistic_rng',
'logit',
+ 'lognormal_ccdf_log',
'lognormal_cdf',
+ 'lognormal_cdf_log',
'lognormal_log',
'lognormal_rng',
+ 'machine_precision',
'max',
'mdivide_left_tri_low',
'mdivide_right_tri_low',
'mean',
'min',
+ 'modified_bessel_first_kind',
+ 'modified_bessel_second_kind',
'multi_normal_cholesky_log',
'multi_normal_log',
'multi_normal_prec_log',
'multi_normal_rng',
'multi_student_t_log',
'multi_student_t_rng',
- 'multinomial_cdf',
'multinomial_log',
'multinomial_rng',
'multiply_log',
'multiply_lower_tri_self_transpose',
+ 'neg_binomial_ccdf_log',
'neg_binomial_cdf',
+ 'neg_binomial_cdf_log',
'neg_binomial_log',
'neg_binomial_rng',
- 'negative_epsilon',
'negative_infinity',
+ 'normal_ccdf_log',
'normal_cdf',
+ 'normal_cdf_log',
'normal_log',
'normal_rng',
'not_a_number',
'ordered_logistic_log',
'ordered_logistic_rng',
'owens_t',
+ 'pareto_ccdf_log',
'pareto_cdf',
+ 'pareto_cdf_log',
'pareto_log',
'pareto_rng',
'pi',
+ 'poisson_ccdf_log',
'poisson_cdf',
+ 'poisson_cdf_log',
'poisson_log',
'poisson_log_log',
'poisson_rng',
'positive_infinity',
'pow',
'prod',
+ 'quad_form',
+ 'rank',
+ 'rayleigh_ccdf_log',
+ 'rayleigh_cdf',
+ 'rayleigh_cdf_log',
+ 'rayleigh_log',
+ 'rayleigh_rng',
'rep_array',
'rep_matrix',
'rep_row_vector',
'rep_vector',
+ 'rising_factorial',
'round',
'row',
'rows',
+ 'rows_dot_product',
+ 'rows_dot_self',
+ 'scaled_inv_chi_square_ccdf_log',
'scaled_inv_chi_square_cdf',
+ 'scaled_inv_chi_square_cdf_log',
'scaled_inv_chi_square_log',
'scaled_inv_chi_square_rng',
'sd',
+ 'segment',
'sin',
'singular_values',
'sinh',
'size',
+ 'skew_normal_ccdf_log',
'skew_normal_cdf',
+ 'skew_normal_cdf_log',
'skew_normal_log',
'skew_normal_rng',
'softmax',
+ 'sort_asc',
+ 'sort_desc',
'sqrt',
'sqrt2',
'square',
'step',
+ 'student_t_ccdf_log',
'student_t_cdf',
+ 'student_t_cdf_log',
'student_t_log',
'student_t_rng',
+ 'sub_col',
+ 'sub_row',
'sum',
+ 'tail',
'tan',
'tanh',
'tcrossprod',
'tgamma',
+ 'to_vector',
'trace',
+ 'trace_gen_quad_form',
+ 'trace_quad_form',
'trunc',
+ 'uniform_ccdf_log',
+ 'uniform_cdf',
+ 'uniform_cdf_log',
'uniform_log',
'uniform_rng',
'variance',
+ 'von_mises_log',
+ 'weibull_ccdf_log',
'weibull_cdf',
+ 'weibull_cdf_log',
'weibull_log',
'weibull_rng',
'wishart_log',
@@ -236,6 +327,7 @@ DISTRIBUTIONS = [ 'bernoulli',
'binomial_coefficient',
'binomial_logit',
'categorical',
+ 'categorical_logit',
'cauchy',
'chi_square',
'dirichlet',
@@ -243,13 +335,13 @@ DISTRIBUTIONS = [ 'bernoulli',
'exp_mod_normal',
'exponential',
'gamma',
+ 'gaussian_dlm_obs',
'gumbel',
'hypergeometric',
'inv_chi_square',
'inv_gamma',
'inv_wishart',
'lkj_corr',
- 'lkj_corr_cholesky',
'lkj_cov',
'logistic',
'lognormal',
@@ -265,10 +357,12 @@ DISTRIBUTIONS = [ 'bernoulli',
'pareto',
'poisson',
'poisson_log',
+ 'rayleigh',
'scaled_inv_chi_square',
'skew_normal',
'student_t',
'uniform',
+ 'von_mises',
'weibull',
'wishart']
@@ -357,4 +451,3 @@ RESERVED = [ 'alignas',
'wchar_t',
'xor',
'xor_eq']
-
diff --git a/pygments/lexers/_vimbuiltins.py b/pygments/lexers/_vimbuiltins.py
index 9fc1b15e..e95a8ec5 100644
--- a/pygments/lexers/_vimbuiltins.py
+++ b/pygments/lexers/_vimbuiltins.py
@@ -4,7 +4,7 @@
def _getauto():
return [('BufAdd','BufAdd'),('BufCreate','BufCreate'),('BufDelete','BufDelete'),('BufEnter','BufEnter'),('BufFilePost','BufFilePost'),('BufFilePre','BufFilePre'),('BufHidden','BufHidden'),('BufLeave','BufLeave'),('BufNew','BufNew'),('BufNewFile','BufNewFile'),('BufRead','BufRead'),('BufReadCmd','BufReadCmd'),('BufReadPost','BufReadPost'),('BufReadPre','BufReadPre'),('BufUnload','BufUnload'),('BufWinEnter','BufWinEnter'),('BufWinLeave','BufWinLeave'),('BufWipeout','BufWipeout'),('BufWrite','BufWrite'),('BufWriteCmd','BufWriteCmd'),('BufWritePost','BufWritePost'),('BufWritePre','BufWritePre'),('Cmd','Cmd'),('CmdwinEnter','CmdwinEnter'),('CmdwinLeave','CmdwinLeave'),('ColorScheme','ColorScheme'),('CursorHold','CursorHold'),('CursorHoldI','CursorHoldI'),('CursorMoved','CursorMoved'),('CursorMovedI','CursorMovedI'),('EncodingChanged','EncodingChanged'),('FileAppendCmd','FileAppendCmd'),('FileAppendPost','FileAppendPost'),('FileAppendPre','FileAppendPre'),('FileChangedRO','FileChangedRO'),('FileChangedShell','FileChangedShell'),('FileChangedShellPost','FileChangedShellPost'),('FileEncoding','FileEncoding'),('FileReadCmd','FileReadCmd'),('FileReadPost','FileReadPost'),('FileReadPre','FileReadPre'),('FileType','FileType'),('FileWriteCmd','FileWriteCmd'),('FileWritePost','FileWritePost'),('FileWritePre','FileWritePre'),('FilterReadPost','FilterReadPost'),('FilterReadPre','FilterReadPre'),('FilterWritePost','FilterWritePost'),('FilterWritePre','FilterWritePre'),('FocusGained','FocusGained'),('FocusLost','FocusLost'),('FuncUndefined','FuncUndefined'),('GUIEnter','GUIEnter'),('GUIFailed','GUIFailed'),('InsertChange','InsertChange'),('InsertCharPre','InsertCharPre'),('InsertEnter','InsertEnter'),('InsertLeave','InsertLeave'),('MenuPopup','MenuPopup'),('QuickFixCmdPost','QuickFixCmdPost'),('QuickFixCmdPre','QuickFixCmdPre'),('RemoteReply','RemoteReply'),('SessionLoadPost','SessionLoadPost'),('ShellCmdPost','ShellCmdPost'),('ShellFilterPost','ShellFilterPost'),('SourceCmd','SourceCmd'),('SourcePre','SourcePre'),('SpellFileMissing','SpellFileMissing'),('StdinReadPost','StdinReadPost'),('StdinReadPre','StdinReadPre'),('SwapExists','SwapExists'),('Syntax','Syntax'),('TabEnter','TabEnter'),('TabLeave','TabLeave'),('TermChanged','TermChanged'),('TermResponse','TermResponse'),('User','User'),('UserGettingBored','UserGettingBored'),('VimEnter','VimEnter'),('VimLeave','VimLeave'),('VimLeavePre','VimLeavePre'),('VimResized','VimResized'),('WinEnter','WinEnter'),('WinLeave','WinLeave'),('event','event')]
def _getcommand():
- return [('Allargs','Allargs'),('DiffOrig','DiffOrig'),('Error','Error'),('Man','Man'),('MyCommand','MyCommand'),('Mycmd','Mycmd'),('N','N'),('N','Next'),('P','P'),('P','Print'),('Ren','Ren'),('Rena','Rena'),('Renu','Renu'),('TOhtml','TOhtml'),('X','X'),('XMLent','XMLent'),('XMLns','XMLns'),('a','a'),('ab','ab'),('abc','abclear'),('abo','aboveleft'),('al','all'),('ar','ar'),('ar','args'),('arga','argadd'),('argd','argdelete'),('argdo','argdo'),('arge','argedit'),('argg','argglobal'),('argl','arglocal'),('argu','argument'),('as','ascii'),('au','au'),('b','buffer'),('bN','bNext'),('ba','ball'),('bad','badd'),('bar','bar'),('bd','bdelete'),('bel','belowright'),('bf','bfirst'),('bl','blast'),('bm','bmodified'),('bn','bnext'),('bo','botright'),('bp','bprevious'),('br','br'),('br','brewind'),('brea','break'),('breaka','breakadd'),('breakd','breakdel'),('breakl','breaklist'),('bro','browse'),('browseset','browseset'),('bu','bu'),('buf','buf'),('bufdo','bufdo'),('buffers','buffers'),('bun','bunload'),('bw','bwipeout'),('c','c'),('c','change'),('cN','cN'),('cN','cNext'),('cNf','cNf'),('cNf','cNfile'),('cabc','cabclear'),('cad','cad'),('cad','caddexpr'),('caddb','caddbuffer'),('caddf','caddfile'),('cal','call'),('cat','catch'),('cb','cbuffer'),('cc','cc'),('ccl','cclose'),('cd','cd'),('ce','center'),('cex','cexpr'),('cf','cfile'),('cfir','cfirst'),('cg','cgetfile'),('cgetb','cgetbuffer'),('cgete','cgetexpr'),('changes','changes'),('chd','chdir'),('che','checkpath'),('checkt','checktime'),('cl','cl'),('cl','clist'),('cla','clast'),('clo','close'),('cmapc','cmapclear'),('cmdname','cmdname'),('cn','cn'),('cn','cnext'),('cnew','cnewer'),('cnf','cnf'),('cnf','cnfile'),('co','copy'),('col','colder'),('colo','colorscheme'),('com','com'),('comc','comclear'),('comment','comment'),('comp','compiler'),('con','con'),('con','continue'),('conf','confirm'),('cope','copen'),('count','count'),('cp','cprevious'),('cpf','cpfile'),('cq','cquit'),('cr','crewind'),('cs','cs'),('cscope','cscope'),('cstag','cstag'),('cuna','cunabbrev'),('cw','cwindow'),('d','d'),('d','delete'),('de','de'),('debug','debug'),('debugg','debuggreedy'),('del','del'),('delc','delcommand'),('delf','delf'),('delf','delfunction'),('delm','delmarks'),('di','di'),('di','display'),('diffg','diffget'),('diffo','diffo'),('diffoff','diffoff'),('diffp','diffp'),('diffpatch','diffpatch'),('diffpu','diffput'),('diffsplit','diffsplit'),('difft','difft'),('diffthis','diffthis'),('diffu','diffupdate'),('dig','dig'),('dig','digraphs'),('dj','djump'),('dl','dlist'),('do','do'),('doau','doau'),('dr','drop'),('ds','dsearch'),('dsp','dsplit'),('dwim','dwim'),('e','e'),('e','e'),('e','e'),('e','e'),('e','e'),('e','e'),('e','e'),('e','e'),('e','e'),('e','edit'),('ea','ea'),('earlier','earlier'),('ec','ec'),('echoe','echoerr'),('echom','echomsg'),('echon','echon'),('el','else'),('elsei','elseif'),('em','emenu'),('emenu','emenu'),('en','en'),('en','endif'),('endf','endf'),('endf','endfunction'),('endfo','endfor'),('endfun','endfun'),('endt','endtry'),('endw','endwhile'),('ene','enew'),('ex','ex'),('exi','exit'),('exu','exusage'),('f','f'),('f','file'),('filename','filename'),('files','files'),('filet','filet'),('filetype','filetype'),('fin','fin'),('fin','find'),('fina','finally'),('fini','finish'),('fir','first'),('fix','fixdel'),('fo','fold'),('foldc','foldclose'),('foldd','folddoopen'),('folddoc','folddoclosed'),('foldo','foldopen'),('for','for'),('fu','fu'),('fu','function'),('fun','fun'),('g','g'),('get','get'),('go','goto'),('gr','grep'),('grepa','grepadd'),('gs','gs'),('gs','gs'),('gui','gui'),('gvim','gvim'),('h','h'),('h','h'),('h','h'),('h','h'),('h','help'),('ha','hardcopy'),('helpf','helpfind'),('helpg','helpgrep'),('helpt','helptags'),('hi','hi'),('hid','hide'),('his','history'),('i','i'),('ia','ia'),('iabc','iabclear'),('if','if'),('ij','ijump'),('il','ilist'),('imapc','imapclear'),('in','in'),('index','index'),('intro','intro'),('is','isearch'),('isp','isplit'),('iuna','iunabbrev'),('j','join'),('ju','jumps'),('k','k'),('kee','keepmarks'),('keepa','keepa'),('keepalt','keepalt'),('keepj','keepjumps'),('l','l'),('l','list'),('lN','lN'),('lN','lNext'),('lNf','lNf'),('lNf','lNfile'),('la','la'),('la','last'),('lad','lad'),('lad','laddexpr'),('laddb','laddbuffer'),('laddf','laddfile'),('lan','lan'),('lan','language'),('lat','lat'),('later','later'),('lb','lbuffer'),('lc','lcd'),('lch','lchdir'),('lcl','lclose'),('lcs','lcs'),('lcscope','lcscope'),('le','left'),('lefta','leftabove'),('let','let'),('lex','lexpr'),('lf','lfile'),('lfir','lfirst'),('lg','lgetfile'),('lgetb','lgetbuffer'),('lgete','lgetexpr'),('lgr','lgrep'),('lgrepa','lgrepadd'),('lh','lhelpgrep'),('ll','ll'),('lla','llast'),('lli','llist'),('lmak','lmake'),('lmapc','lmapclear'),('lne','lne'),('lne','lnext'),('lnew','lnewer'),('lnf','lnf'),('lnf','lnfile'),('lo','lo'),('lo','loadview'),('loadk','loadk'),('loadkeymap','loadkeymap'),('loc','lockmarks'),('locale','locale'),('lockv','lockvar'),('lol','lolder'),('lop','lopen'),('lp','lprevious'),('lpf','lpfile'),('lr','lrewind'),('ls','ls'),('lt','ltag'),('lua','lua'),('luado','luado'),('luafile','luafile'),('lv','lvimgrep'),('lvimgrepa','lvimgrepadd'),('lw','lwindow'),('m','move'),('ma','ma'),('ma','mark'),('main','main'),('main','main'),('mak','make'),('marks','marks'),('mat','match'),('menut','menut'),('menut','menutranslate'),('mes','mes'),('messages','messages'),('mk','mk'),('mk','mkexrc'),('mkdir','mkdir'),('mks','mksession'),('mksp','mkspell'),('mkv','mkv'),('mkv','mkvimrc'),('mkvie','mkview'),('mo','mo'),('mod','mode'),('mv','mv'),('mz','mz'),('mz','mzscheme'),('mzf','mzfile'),('n','n'),('n','n'),('n','next'),('nb','nbkey'),('nbc','nbclose'),('nbs','nbstart'),('ne','ne'),('new','new'),('nkf','nkf'),('nmapc','nmapclear'),('noa','noa'),('noautocmd','noautocmd'),('noh','nohlsearch'),('nu','number'),('o','o'),('o','open'),('ol','oldfiles'),('omapc','omapclear'),('on','only'),('opt','options'),('ownsyntax','ownsyntax'),('p','p'),('p','p'),('p','p'),('p','p'),('p','p'),('p','p'),('p','p'),('p','p'),('p','p'),('p','print'),('pat','pat'),('pat','pat'),('pc','pclose'),('pe','pe'),('pe','perl'),('ped','pedit'),('perld','perldo'),('po','pop'),('popu','popu'),('popu','popup'),('pp','ppop'),('pr','pr'),('pre','preserve'),('prev','previous'),('pro','pro'),('prof','profile'),('profd','profdel'),('promptf','promptfind'),('promptr','promptrepl'),('ps','psearch'),('ptN','ptN'),('ptN','ptNext'),('pta','ptag'),('ptf','ptfirst'),('ptj','ptjump'),('ptl','ptlast'),('ptn','ptn'),('ptn','ptnext'),('ptp','ptprevious'),('ptr','ptrewind'),('pts','ptselect'),('pu','put'),('pw','pwd'),('py','py'),('py','python'),('py3','py3'),('py3','py3'),('py3file','py3file'),('pyf','pyfile'),('python3','python3'),('q','q'),('q','quit'),('qa','qall'),('quita','quitall'),('quote','quote'),('r','r'),('r','r'),('r','r'),('r','r'),('r','r'),('r','r'),('r','r'),('r','r'),('r','r'),('r','r'),('r','r'),('r','read'),('re','re'),('rec','recover'),('red','red'),('red','redo'),('redi','redir'),('redr','redraw'),('redraws','redrawstatus'),('reg','registers'),('res','resize'),('ret','retab'),('retu','return'),('rew','rewind'),('ri','right'),('rightb','rightbelow'),('ru','ru'),('ru','runtime'),('rub','ruby'),('rubyd','rubydo'),('rubyf','rubyfile'),('rundo','rundo'),('rv','rviminfo'),('s','s'),('s','s'),('s','s'),('s','s'),('sN','sNext'),('sa','sargument'),('sal','sall'),('san','sandbox'),('sav','saveas'),('sb','sbuffer'),('sbN','sbNext'),('sba','sball'),('sbf','sbfirst'),('sbl','sblast'),('sbm','sbmodified'),('sbn','sbnext'),('sbp','sbprevious'),('sbr','sbrewind'),('scrip','scrip'),('scrip','scriptnames'),('scripte','scriptencoding'),('scs','scs'),('scscope','scscope'),('se','set'),('setf','setfiletype'),('setg','setglobal'),('setl','setlocal'),('sf','sfind'),('sfir','sfirst'),('sh','shell'),('si','si'),('sig','sig'),('sign','sign'),('sil','silent'),('sim','simalt'),('sl','sl'),('sl','sleep'),('sla','slast'),('sm','smagic'),('sm','smap'),('sme','sme'),('smenu','smenu'),('sn','snext'),('sni','sniff'),('sno','snomagic'),('snoreme','snoreme'),('snoremenu','snoremenu'),('so','so'),('so','source'),('sor','sort'),('sp','split'),('spe','spe'),('spe','spellgood'),('spelld','spelldump'),('spelli','spellinfo'),('spellr','spellrepall'),('spellu','spellundo'),('spellw','spellwrong'),('spr','sprevious'),('sre','srewind'),('st','st'),('st','stop'),('sta','stag'),('star','star'),('star','startinsert'),('start','start'),('startg','startgreplace'),('startr','startreplace'),('stj','stjump'),('stopi','stopinsert'),('sts','stselect'),('sub','sub'),('sub','sub'),('sun','sunhide'),('sunme','sunme'),('sunmenu','sunmenu'),('sus','suspend'),('sv','sview'),('sw','swapname'),('sy','sy'),('syn','syn'),('sync','sync'),('syncbind','syncbind'),('synlist','synlist'),('t','t'),('t','t'),('t','t'),('tN','tN'),('tN','tNext'),('ta','ta'),('ta','tag'),('tab','tab'),('tabN','tabN'),('tabN','tabNext'),('tabc','tabclose'),('tabd','tabdo'),('tabe','tabedit'),('tabf','tabfind'),('tabfir','tabfirst'),('tabl','tablast'),('tabm','tabmove'),('tabn','tabnext'),('tabnew','tabnew'),('tabo','tabonly'),('tabp','tabprevious'),('tabr','tabrewind'),('tabs','tabs'),('tags','tags'),('tc','tcl'),('tcld','tcldo'),('tclf','tclfile'),('te','tearoff'),('tf','tfirst'),('th','throw'),('tj','tjump'),('tl','tlast'),('tm','tm'),('tm','tmenu'),('tn','tn'),('tn','tnext'),('to','topleft'),('tp','tprevious'),('tr','tr'),('tr','trewind'),('try','try'),('ts','tselect'),('tu','tu'),('tu','tunmenu'),('u','u'),('u','undo'),('un','un'),('una','unabbreviate'),('undoj','undojoin'),('undol','undolist'),('unh','unhide'),('unl','unl'),('unlo','unlockvar'),('uns','unsilent'),('up','update'),('v','v'),('ve','ve'),('ve','version'),('verb','verbose'),('version','version'),('version','version'),('vert','vertical'),('vi','vi'),('vi','visual'),('vie','view'),('vim','vimgrep'),('vimgrepa','vimgrepadd'),('viu','viusage'),('vmapc','vmapclear'),('vne','vnew'),('vs','vsplit'),('w','w'),('w','write'),('wN','wNext'),('wa','wall'),('wh','while'),('win','win'),('win','winsize'),('winc','wincmd'),('windo','windo'),('winp','winpos'),('wn','wnext'),('wp','wprevious'),('wq','wq'),('wqa','wqall'),('ws','wsverb'),('wundo','wundo'),('wv','wviminfo'),('x','x'),('x','xit'),('xa','xall'),('xmapc','xmapclear'),('xme','xme'),('xmenu','xmenu'),('xnoreme','xnoreme'),('xnoremenu','xnoremenu'),('xterm','xterm'),('xunme','xunme'),('xunmenu','xunmenu'),('xwininfo','xwininfo'),('y','yank')]
+ return [('Allargs','Allargs'),('DiffOrig','DiffOrig'),('Error','Error'),('Man','Man'),('MyCommand','MyCommand'),('Mycmd','Mycmd'),('N','N'),('N','Next'),('P','P'),('P','Print'),('Ren','Ren'),('Rena','Rena'),('Renu','Renu'),('TOhtml','TOhtml'),('X','X'),('XMLent','XMLent'),('XMLns','XMLns'),('a','a'),('ab','ab'),('abc','abclear'),('abo','aboveleft'),('al','all'),('ar','ar'),('ar','args'),('arga','argadd'),('argd','argdelete'),('argdo','argdo'),('arge','argedit'),('argg','argglobal'),('argl','arglocal'),('argu','argument'),('as','ascii'),('au','autocmd'),('b','buffer'),('bN','bNext'),('ba','ball'),('bad','badd'),('bar','bar'),('bd','bdelete'),('bel','belowright'),('bf','bfirst'),('bl','blast'),('bm','bmodified'),('bn','bnext'),('bo','botright'),('bp','bprevious'),('br','br'),('br','brewind'),('brea','break'),('breaka','breakadd'),('breakd','breakdel'),('breakl','breaklist'),('bro','browse'),('browseset','browseset'),('bu','bu'),('buf','buf'),('bufdo','bufdo'),('buffers','buffers'),('bun','bunload'),('bw','bwipeout'),('c','c'),('c','change'),('cN','cN'),('cN','cNext'),('cNf','cNf'),('cNf','cNfile'),('cabc','cabclear'),('cad','cad'),('cad','caddexpr'),('caddb','caddbuffer'),('caddf','caddfile'),('cal','call'),('cat','catch'),('cb','cbuffer'),('cc','cc'),('ccl','cclose'),('cd','cd'),('ce','center'),('cex','cexpr'),('cf','cfile'),('cfir','cfirst'),('cg','cgetfile'),('cgetb','cgetbuffer'),('cgete','cgetexpr'),('changes','changes'),('chd','chdir'),('che','checkpath'),('checkt','checktime'),('cl','cl'),('cl','clist'),('cla','clast'),('clo','close'),('cmapc','cmapclear'),('cmdname','cmdname'),('cn','cn'),('cn','cnext'),('cnew','cnewer'),('cnf','cnf'),('cnf','cnfile'),('co','copy'),('col','colder'),('colo','colorscheme'),('com','com'),('comc','comclear'),('comment','comment'),('comp','compiler'),('con','con'),('con','continue'),('conf','confirm'),('cope','copen'),('count','count'),('cp','cprevious'),('cpf','cpfile'),('cq','cquit'),('cr','crewind'),('cs','cs'),('cscope','cscope'),('cstag','cstag'),('cuna','cunabbrev'),('cw','cwindow'),('d','d'),('d','delete'),('de','de'),('debug','debug'),('debugg','debuggreedy'),('del','del'),('delc','delcommand'),('delf','delf'),('delf','delfunction'),('delm','delmarks'),('di','di'),('di','display'),('diffg','diffget'),('diffo','diffo'),('diffoff','diffoff'),('diffp','diffp'),('diffpatch','diffpatch'),('diffpu','diffput'),('diffsplit','diffsplit'),('difft','difft'),('diffthis','diffthis'),('diffu','diffupdate'),('dig','dig'),('dig','digraphs'),('dj','djump'),('dl','dlist'),('do','do'),('doau','doau'),('dr','drop'),('ds','dsearch'),('dsp','dsplit'),('dwim','dwim'),('e','e'),('e','e'),('e','e'),('e','e'),('e','e'),('e','e'),('e','e'),('e','e'),('e','e'),('e','edit'),('ea','ea'),('earlier','earlier'),('ec','ec'),('echoe','echoerr'),('echom','echomsg'),('echon','echon'),('el','else'),('elsei','elseif'),('em','emenu'),('emenu','emenu'),('en','en'),('en','endif'),('endf','endf'),('endf','endfunction'),('endfo','endfor'),('endfun','endfun'),('endt','endtry'),('endw','endwhile'),('ene','enew'),('ex','ex'),('exi','exit'),('exu','exusage'),('f','f'),('f','file'),('filename','filename'),('files','files'),('filet','filet'),('filetype','filetype'),('fin','fin'),('fin','find'),('fina','finally'),('fini','finish'),('fir','first'),('fix','fixdel'),('fo','fold'),('foldc','foldclose'),('foldd','folddoopen'),('folddoc','folddoclosed'),('foldo','foldopen'),('for','for'),('fu','fu'),('fu','function'),('fun','fun'),('g','g'),('get','get'),('go','goto'),('gr','grep'),('grepa','grepadd'),('gs','gs'),('gs','gs'),('gui','gui'),('gvim','gvim'),('h','h'),('h','h'),('h','h'),('h','h'),('h','help'),('ha','hardcopy'),('helpf','helpfind'),('helpg','helpgrep'),('helpt','helptags'),('hi','hi'),('hid','hide'),('his','history'),('i','i'),('ia','ia'),('iabc','iabclear'),('if','if'),('ij','ijump'),('il','ilist'),('imapc','imapclear'),('in','in'),('index','index'),('intro','intro'),('is','isearch'),('isp','isplit'),('iuna','iunabbrev'),('j','join'),('ju','jumps'),('k','k'),('kee','keepmarks'),('keepa','keepa'),('keepalt','keepalt'),('keepj','keepjumps'),('l','l'),('l','list'),('lN','lN'),('lN','lNext'),('lNf','lNf'),('lNf','lNfile'),('la','la'),('la','last'),('lad','lad'),('lad','laddexpr'),('laddb','laddbuffer'),('laddf','laddfile'),('lan','lan'),('lan','language'),('lat','lat'),('later','later'),('lb','lbuffer'),('lc','lcd'),('lch','lchdir'),('lcl','lclose'),('lcs','lcs'),('lcscope','lcscope'),('le','left'),('lefta','leftabove'),('let','let'),('lex','lexpr'),('lf','lfile'),('lfir','lfirst'),('lg','lgetfile'),('lgetb','lgetbuffer'),('lgete','lgetexpr'),('lgr','lgrep'),('lgrepa','lgrepadd'),('lh','lhelpgrep'),('ll','ll'),('lla','llast'),('lli','llist'),('lmak','lmake'),('lmapc','lmapclear'),('lne','lne'),('lne','lnext'),('lnew','lnewer'),('lnf','lnf'),('lnf','lnfile'),('lo','lo'),('lo','loadview'),('loadk','loadk'),('loadkeymap','loadkeymap'),('loc','lockmarks'),('locale','locale'),('lockv','lockvar'),('lol','lolder'),('lop','lopen'),('lp','lprevious'),('lpf','lpfile'),('lr','lrewind'),('ls','ls'),('lt','ltag'),('lua','lua'),('luado','luado'),('luafile','luafile'),('lv','lvimgrep'),('lvimgrepa','lvimgrepadd'),('lw','lwindow'),('m','move'),('ma','ma'),('ma','mark'),('main','main'),('main','main'),('mak','make'),('marks','marks'),('mat','match'),('menut','menut'),('menut','menutranslate'),('mes','mes'),('messages','messages'),('mk','mk'),('mk','mkexrc'),('mkdir','mkdir'),('mks','mksession'),('mksp','mkspell'),('mkv','mkv'),('mkv','mkvimrc'),('mkvie','mkview'),('mo','mo'),('mod','mode'),('mv','mv'),('mz','mz'),('mz','mzscheme'),('mzf','mzfile'),('n','n'),('n','n'),('n','next'),('nb','nbkey'),('nbc','nbclose'),('nbs','nbstart'),('ne','ne'),('new','new'),('nkf','nkf'),('nmapc','nmapclear'),('noa','noa'),('noautocmd','noautocmd'),('noh','nohlsearch'),('nu','number'),('o','o'),('o','open'),('ol','oldfiles'),('omapc','omapclear'),('on','only'),('opt','options'),('ownsyntax','ownsyntax'),('p','p'),('p','p'),('p','p'),('p','p'),('p','p'),('p','p'),('p','p'),('p','p'),('p','p'),('p','print'),('pat','pat'),('pat','pat'),('pc','pclose'),('pe','pe'),('pe','perl'),('ped','pedit'),('perld','perldo'),('po','pop'),('popu','popu'),('popu','popup'),('pp','ppop'),('pr','pr'),('pre','preserve'),('prev','previous'),('pro','pro'),('prof','profile'),('profd','profdel'),('promptf','promptfind'),('promptr','promptrepl'),('ps','psearch'),('ptN','ptN'),('ptN','ptNext'),('pta','ptag'),('ptf','ptfirst'),('ptj','ptjump'),('ptl','ptlast'),('ptn','ptn'),('ptn','ptnext'),('ptp','ptprevious'),('ptr','ptrewind'),('pts','ptselect'),('pu','put'),('pw','pwd'),('py','py'),('py','python'),('py3','py3'),('py3','py3'),('py3file','py3file'),('pyf','pyfile'),('python3','python3'),('q','q'),('q','quit'),('qa','qall'),('quita','quitall'),('quote','quote'),('r','r'),('r','r'),('r','r'),('r','r'),('r','r'),('r','r'),('r','r'),('r','r'),('r','r'),('r','r'),('r','r'),('r','read'),('re','re'),('rec','recover'),('red','red'),('red','redo'),('redi','redir'),('redr','redraw'),('redraws','redrawstatus'),('reg','registers'),('res','resize'),('ret','retab'),('retu','return'),('rew','rewind'),('ri','right'),('rightb','rightbelow'),('ru','ru'),('ru','runtime'),('rub','ruby'),('rubyd','rubydo'),('rubyf','rubyfile'),('rundo','rundo'),('rv','rviminfo'),('s','s'),('s','s'),('s','s'),('s','s'),('sN','sNext'),('sa','sargument'),('sal','sall'),('san','sandbox'),('sav','saveas'),('sb','sbuffer'),('sbN','sbNext'),('sba','sball'),('sbf','sbfirst'),('sbl','sblast'),('sbm','sbmodified'),('sbn','sbnext'),('sbp','sbprevious'),('sbr','sbrewind'),('scrip','scrip'),('scrip','scriptnames'),('scripte','scriptencoding'),('scs','scs'),('scscope','scscope'),('se','set'),('setf','setfiletype'),('setg','setglobal'),('setl','setlocal'),('sf','sfind'),('sfir','sfirst'),('sh','shell'),('si','si'),('sig','sig'),('sign','sign'),('sil','silent'),('sim','simalt'),('sl','sl'),('sl','sleep'),('sla','slast'),('sm','smagic'),('sm','smap'),('sme','sme'),('smenu','smenu'),('sn','snext'),('sni','sniff'),('sno','snomagic'),('snoreme','snoreme'),('snoremenu','snoremenu'),('so','so'),('so','source'),('sor','sort'),('sp','split'),('spe','spe'),('spe','spellgood'),('spelld','spelldump'),('spelli','spellinfo'),('spellr','spellrepall'),('spellu','spellundo'),('spellw','spellwrong'),('spr','sprevious'),('sre','srewind'),('st','st'),('st','stop'),('sta','stag'),('star','star'),('star','startinsert'),('start','start'),('startg','startgreplace'),('startr','startreplace'),('stj','stjump'),('stopi','stopinsert'),('sts','stselect'),('sub','sub'),('sub','sub'),('sun','sunhide'),('sunme','sunme'),('sunmenu','sunmenu'),('sus','suspend'),('sv','sview'),('sw','swapname'),('sy','sy'),('syn','syn'),('sync','sync'),('syncbind','syncbind'),('synlist','synlist'),('t','t'),('t','t'),('t','t'),('tN','tN'),('tN','tNext'),('ta','ta'),('ta','tag'),('tab','tab'),('tabN','tabN'),('tabN','tabNext'),('tabc','tabclose'),('tabd','tabdo'),('tabe','tabedit'),('tabf','tabfind'),('tabfir','tabfirst'),('tabl','tablast'),('tabm','tabmove'),('tabn','tabnext'),('tabnew','tabnew'),('tabo','tabonly'),('tabp','tabprevious'),('tabr','tabrewind'),('tabs','tabs'),('tags','tags'),('tc','tcl'),('tcld','tcldo'),('tclf','tclfile'),('te','tearoff'),('tf','tfirst'),('th','throw'),('tj','tjump'),('tl','tlast'),('tm','tm'),('tm','tmenu'),('tn','tn'),('tn','tnext'),('to','topleft'),('tp','tprevious'),('tr','tr'),('tr','trewind'),('try','try'),('ts','tselect'),('tu','tu'),('tu','tunmenu'),('u','u'),('u','undo'),('un','un'),('una','unabbreviate'),('undoj','undojoin'),('undol','undolist'),('unh','unhide'),('unl','unl'),('unlo','unlockvar'),('uns','unsilent'),('up','update'),('v','v'),('ve','ve'),('ve','version'),('verb','verbose'),('version','version'),('version','version'),('vert','vertical'),('vi','vi'),('vi','visual'),('vie','view'),('vim','vimgrep'),('vimgrepa','vimgrepadd'),('viu','viusage'),('vmapc','vmapclear'),('vne','vnew'),('vs','vsplit'),('w','w'),('w','write'),('wN','wNext'),('wa','wall'),('wh','while'),('win','win'),('win','winsize'),('winc','wincmd'),('windo','windo'),('winp','winpos'),('wn','wnext'),('wp','wprevious'),('wq','wq'),('wqa','wqall'),('ws','wsverb'),('wundo','wundo'),('wv','wviminfo'),('x','x'),('x','xit'),('xa','xall'),('xmapc','xmapclear'),('xme','xme'),('xmenu','xmenu'),('xnoreme','xnoreme'),('xnoremenu','xnoremenu'),('xterm','xterm'),('xunme','xunme'),('xunmenu','xunmenu'),('xwininfo','xwininfo'),('y','yank')]
def _getoption():
return [('acd','acd'),('ai','ai'),('akm','akm'),('al','al'),('aleph','aleph'),('allowrevins','allowrevins'),('altkeymap','altkeymap'),('ambiwidth','ambiwidth'),('ambw','ambw'),('anti','anti'),('antialias','antialias'),('ar','ar'),('arab','arab'),('arabic','arabic'),('arabicshape','arabicshape'),('ari','ari'),('arshape','arshape'),('autochdir','autochdir'),('autoindent','autoindent'),('autoread','autoread'),('autowrite','autowrite'),('autowriteall','autowriteall'),('aw','aw'),('awa','awa'),('background','background'),('backspace','backspace'),('backup','backup'),('backupcopy','backupcopy'),('backupdir','backupdir'),('backupext','backupext'),('backupskip','backupskip'),('balloondelay','balloondelay'),('ballooneval','ballooneval'),('balloonexpr','balloonexpr'),('bdir','bdir'),('bdlay','bdlay'),('beval','beval'),('bex','bex'),('bexpr','bexpr'),('bg','bg'),('bh','bh'),('bin','bin'),('binary','binary'),('biosk','biosk'),('bioskey','bioskey'),('bk','bk'),('bkc','bkc'),('bl','bl'),('bomb','bomb'),('breakat','breakat'),('brk','brk'),('browsedir','browsedir'),('bs','bs'),('bsdir','bsdir'),('bsk','bsk'),('bt','bt'),('bufhidden','bufhidden'),('buflisted','buflisted'),('buftype','buftype'),('casemap','casemap'),('cb','cb'),('cc','cc'),('ccv','ccv'),('cd','cd'),('cdpath','cdpath'),('cedit','cedit'),('cf','cf'),('cfu','cfu'),('ch','ch'),('charconvert','charconvert'),('ci','ci'),('cin','cin'),('cindent','cindent'),('cink','cink'),('cinkeys','cinkeys'),('cino','cino'),('cinoptions','cinoptions'),('cinw','cinw'),('cinwords','cinwords'),('clipboard','clipboard'),('cmdheight','cmdheight'),('cmdwinheight','cmdwinheight'),('cmp','cmp'),('cms','cms'),('co','co'),('cocu','cocu'),('cole','cole'),('colorcolumn','colorcolumn'),('columns','columns'),('com','com'),('comments','comments'),('commentstring','commentstring'),('compatible','compatible'),('complete','complete'),('completefunc','completefunc'),('completeopt','completeopt'),('concealcursor','concealcursor'),('conceallevel','conceallevel'),('confirm','confirm'),('consk','consk'),('conskey','conskey'),('copyindent','copyindent'),('cot','cot'),('cp','cp'),('cpo','cpo'),('cpoptions','cpoptions'),('cpt','cpt'),('crb','crb'),('cryptmethod','cryptmethod'),('cscopepathcomp','cscopepathcomp'),('cscopeprg','cscopeprg'),('cscopequickfix','cscopequickfix'),('cscoperelative','cscoperelative'),('cscopetag','cscopetag'),('cscopetagorder','cscopetagorder'),('cscopeverbose','cscopeverbose'),('cspc','cspc'),('csprg','csprg'),('csqf','csqf'),('csre','csre'),('cst','cst'),('csto','csto'),('csverb','csverb'),('cuc','cuc'),('cul','cul'),('cursorbind','cursorbind'),('cursorcolumn','cursorcolumn'),('cursorline','cursorline'),('cwh','cwh'),('debug','debug'),('deco','deco'),('def','def'),('define','define'),('delcombine','delcombine'),('dex','dex'),('dg','dg'),('dict','dict'),('dictionary','dictionary'),('diff','diff'),('diffexpr','diffexpr'),('diffopt','diffopt'),('digraph','digraph'),('dip','dip'),('dir','dir'),('directory','directory'),('display','display'),('dy','dy'),('ea','ea'),('ead','ead'),('eadirection','eadirection'),('eb','eb'),('ed','ed'),('edcompatible','edcompatible'),('ef','ef'),('efm','efm'),('ei','ei'),('ek','ek'),('enc','enc'),('encoding','encoding'),('endofline','endofline'),('eol','eol'),('ep','ep'),('equalalways','equalalways'),('equalprg','equalprg'),('errorbells','errorbells'),('errorfile','errorfile'),('errorformat','errorformat'),('esckeys','esckeys'),('et','et'),('eventignore','eventignore'),('ex','ex'),('expandtab','expandtab'),('exrc','exrc'),('fcl','fcl'),('fcs','fcs'),('fdc','fdc'),('fde','fde'),('fdi','fdi'),('fdl','fdl'),('fdls','fdls'),('fdm','fdm'),('fdn','fdn'),('fdo','fdo'),('fdt','fdt'),('fen','fen'),('fenc','fenc'),('fencs','fencs'),('fex','fex'),('ff','ff'),('ffs','ffs'),('fileencoding','fileencoding'),('fileencodings','fileencodings'),('fileformat','fileformat'),('fileformats','fileformats'),('filetype','filetype'),('fillchars','fillchars'),('fk','fk'),('fkmap','fkmap'),('flp','flp'),('fml','fml'),('fmr','fmr'),('fo','fo'),('foldclose','foldclose'),('foldcolumn','foldcolumn'),('foldenable','foldenable'),('foldexpr','foldexpr'),('foldignore','foldignore'),('foldlevel','foldlevel'),('foldlevelstart','foldlevelstart'),('foldmarker','foldmarker'),('foldmethod','foldmethod'),('foldminlines','foldminlines'),('foldnestmax','foldnestmax'),('foldopen','foldopen'),('foldtext','foldtext'),('formatexpr','formatexpr'),('formatlistpat','formatlistpat'),('formatoptions','formatoptions'),('formatprg','formatprg'),('fp','fp'),('fs','fs'),('fsync','fsync'),('ft','ft'),('gcr','gcr'),('gd','gd'),('gdefault','gdefault'),('gfm','gfm'),('gfn','gfn'),('gfs','gfs'),('gfw','gfw'),('ghr','ghr'),('go','go'),('gp','gp'),('grepformat','grepformat'),('grepprg','grepprg'),('gtl','gtl'),('gtt','gtt'),('guicursor','guicursor'),('guifont','guifont'),('guifontset','guifontset'),('guifontwide','guifontwide'),('guiheadroom','guiheadroom'),('guioptions','guioptions'),('guipty','guipty'),('guitablabel','guitablabel'),('guitabtooltip','guitabtooltip'),('helpfile','helpfile'),('helpheight','helpheight'),('helplang','helplang'),('hf','hf'),('hh','hh'),('hi','hi'),('hid','hid'),('hidden','hidden'),('highlight','highlight'),('history','history'),('hk','hk'),('hkmap','hkmap'),('hkmapp','hkmapp'),('hkp','hkp'),('hl','hl'),('hlg','hlg'),('hls','hls'),('hlsearch','hlsearch'),('ic','ic'),('icon','icon'),('iconstring','iconstring'),('ignorecase','ignorecase'),('im','im'),('imactivatekey','imactivatekey'),('imak','imak'),('imc','imc'),('imcmdline','imcmdline'),('imd','imd'),('imdisable','imdisable'),('imi','imi'),('iminsert','iminsert'),('ims','ims'),('imsearch','imsearch'),('inc','inc'),('include','include'),('includeexpr','includeexpr'),('incsearch','incsearch'),('inde','inde'),('indentexpr','indentexpr'),('indentkeys','indentkeys'),('indk','indk'),('inex','inex'),('inf','inf'),('infercase','infercase'),('inoremap','inoremap'),('insertmode','insertmode'),('invacd','invacd'),('invai','invai'),('invakm','invakm'),('invallowrevins','invallowrevins'),('invaltkeymap','invaltkeymap'),('invanti','invanti'),('invantialias','invantialias'),('invar','invar'),('invarab','invarab'),('invarabic','invarabic'),('invarabicshape','invarabicshape'),('invari','invari'),('invarshape','invarshape'),('invautochdir','invautochdir'),('invautoindent','invautoindent'),('invautoread','invautoread'),('invautowrite','invautowrite'),('invautowriteall','invautowriteall'),('invaw','invaw'),('invawa','invawa'),('invbackup','invbackup'),('invballooneval','invballooneval'),('invbeval','invbeval'),('invbin','invbin'),('invbinary','invbinary'),('invbiosk','invbiosk'),('invbioskey','invbioskey'),('invbk','invbk'),('invbl','invbl'),('invbomb','invbomb'),('invbuflisted','invbuflisted'),('invcf','invcf'),('invci','invci'),('invcin','invcin'),('invcindent','invcindent'),('invcompatible','invcompatible'),('invconfirm','invconfirm'),('invconsk','invconsk'),('invconskey','invconskey'),('invcopyindent','invcopyindent'),('invcp','invcp'),('invcrb','invcrb'),('invcscopetag','invcscopetag'),('invcscopeverbose','invcscopeverbose'),('invcst','invcst'),('invcsverb','invcsverb'),('invcuc','invcuc'),('invcul','invcul'),('invcursorbind','invcursorbind'),('invcursorcolumn','invcursorcolumn'),('invcursorline','invcursorline'),('invdeco','invdeco'),('invdelcombine','invdelcombine'),('invdg','invdg'),('invdiff','invdiff'),('invdigraph','invdigraph'),('invea','invea'),('inveb','inveb'),('inved','inved'),('invedcompatible','invedcompatible'),('invek','invek'),('invendofline','invendofline'),('inveol','inveol'),('invequalalways','invequalalways'),('inverrorbells','inverrorbells'),('invesckeys','invesckeys'),('invet','invet'),('invex','invex'),('invexpandtab','invexpandtab'),('invexrc','invexrc'),('invfen','invfen'),('invfk','invfk'),('invfkmap','invfkmap'),('invfoldenable','invfoldenable'),('invgd','invgd'),('invgdefault','invgdefault'),('invguipty','invguipty'),('invhid','invhid'),('invhidden','invhidden'),('invhk','invhk'),('invhkmap','invhkmap'),('invhkmapp','invhkmapp'),('invhkp','invhkp'),('invhls','invhls'),('invhlsearch','invhlsearch'),('invic','invic'),('invicon','invicon'),('invignorecase','invignorecase'),('invim','invim'),('invimc','invimc'),('invimcmdline','invimcmdline'),('invimd','invimd'),('invimdisable','invimdisable'),('invincsearch','invincsearch'),('invinf','invinf'),('invinfercase','invinfercase'),('invinsertmode','invinsertmode'),('invis','invis'),('invjoinspaces','invjoinspaces'),('invjs','invjs'),('invlazyredraw','invlazyredraw'),('invlbr','invlbr'),('invlinebreak','invlinebreak'),('invlisp','invlisp'),('invlist','invlist'),('invloadplugins','invloadplugins'),('invlpl','invlpl'),('invlz','invlz'),('invma','invma'),('invmacatsui','invmacatsui'),('invmagic','invmagic'),('invmh','invmh'),('invml','invml'),('invmod','invmod'),('invmodeline','invmodeline'),('invmodifiable','invmodifiable'),('invmodified','invmodified'),('invmore','invmore'),('invmousef','invmousef'),('invmousefocus','invmousefocus'),('invmousehide','invmousehide'),('invnu','invnu'),('invnumber','invnumber'),('invodev','invodev'),('invopendevice','invopendevice'),('invpaste','invpaste'),('invpi','invpi'),('invpreserveindent','invpreserveindent'),('invpreviewwindow','invpreviewwindow'),('invprompt','invprompt'),('invpvw','invpvw'),('invreadonly','invreadonly'),('invrelativenumber','invrelativenumber'),('invremap','invremap'),('invrestorescreen','invrestorescreen'),('invrevins','invrevins'),('invri','invri'),('invrightleft','invrightleft'),('invrl','invrl'),('invrnu','invrnu'),('invro','invro'),('invrs','invrs'),('invru','invru'),('invruler','invruler'),('invsb','invsb'),('invsc','invsc'),('invscb','invscb'),('invscrollbind','invscrollbind'),('invscs','invscs'),('invsecure','invsecure'),('invsft','invsft'),('invshellslash','invshellslash'),('invshelltemp','invshelltemp'),('invshiftround','invshiftround'),('invshortname','invshortname'),('invshowcmd','invshowcmd'),('invshowfulltag','invshowfulltag'),('invshowmatch','invshowmatch'),('invshowmode','invshowmode'),('invsi','invsi'),('invsm','invsm'),('invsmartcase','invsmartcase'),('invsmartindent','invsmartindent'),('invsmarttab','invsmarttab'),('invsmd','invsmd'),('invsn','invsn'),('invsol','invsol'),('invspell','invspell'),('invsplitbelow','invsplitbelow'),('invsplitright','invsplitright'),('invspr','invspr'),('invsr','invsr'),('invssl','invssl'),('invsta','invsta'),('invstartofline','invstartofline'),('invstmp','invstmp'),('invswapfile','invswapfile'),('invswf','invswf'),('invta','invta'),('invtagbsearch','invtagbsearch'),('invtagrelative','invtagrelative'),('invtagstack','invtagstack'),('invtbi','invtbi'),('invtbidi','invtbidi'),('invtbs','invtbs'),('invtermbidi','invtermbidi'),('invterse','invterse'),('invtextauto','invtextauto'),('invtextmode','invtextmode'),('invtf','invtf'),('invtgst','invtgst'),('invtildeop','invtildeop'),('invtimeout','invtimeout'),('invtitle','invtitle'),('invto','invto'),('invtop','invtop'),('invtr','invtr'),('invttimeout','invttimeout'),('invttybuiltin','invttybuiltin'),('invttyfast','invttyfast'),('invtx','invtx'),('invvb','invvb'),('invvisualbell','invvisualbell'),('invwa','invwa'),('invwarn','invwarn'),('invwb','invwb'),('invweirdinvert','invweirdinvert'),('invwfh','invwfh'),('invwfw','invwfw'),('invwildignorecase','invwildignorecase'),('invwildmenu','invwildmenu'),('invwinfixheight','invwinfixheight'),('invwinfixwidth','invwinfixwidth'),('invwiv','invwiv'),('invwmnu','invwmnu'),('invwrap','invwrap'),('invwrapscan','invwrapscan'),('invwrite','invwrite'),('invwriteany','invwriteany'),('invwritebackup','invwritebackup'),('invws','invws'),('is','is'),('isf','isf'),('isfname','isfname'),('isi','isi'),('isident','isident'),('isk','isk'),('iskeyword','iskeyword'),('isp','isp'),('isprint','isprint'),('joinspaces','joinspaces'),('js','js'),('key','key'),('keymap','keymap'),('keymodel','keymodel'),('keywordprg','keywordprg'),('km','km'),('kmp','kmp'),('kp','kp'),('langmap','langmap'),('langmenu','langmenu'),('laststatus','laststatus'),('lazyredraw','lazyredraw'),('lbr','lbr'),('lcs','lcs'),('linebreak','linebreak'),('lines','lines'),('linespace','linespace'),('lisp','lisp'),('lispwords','lispwords'),('list','list'),('listchars','listchars'),('lm','lm'),('lmap','lmap'),('loadplugins','loadplugins'),('lpl','lpl'),('ls','ls'),('lsp','lsp'),('lw','lw'),('lz','lz'),('ma','ma'),('macatsui','macatsui'),('magic','magic'),('makeef','makeef'),('makeprg','makeprg'),('mat','mat'),('matchpairs','matchpairs'),('matchtime','matchtime'),('maxcombine','maxcombine'),('maxfuncdepth','maxfuncdepth'),('maxmapdepth','maxmapdepth'),('maxmem','maxmem'),('maxmempattern','maxmempattern'),('maxmemtot','maxmemtot'),('mco','mco'),('mef','mef'),('menuitems','menuitems'),('mfd','mfd'),('mh','mh'),('mis','mis'),('mkspellmem','mkspellmem'),('ml','ml'),('mls','mls'),('mm','mm'),('mmd','mmd'),('mmp','mmp'),('mmt','mmt'),('mod','mod'),('modeline','modeline'),('modelines','modelines'),('modifiable','modifiable'),('modified','modified'),('more','more'),('mouse','mouse'),('mousef','mousef'),('mousefocus','mousefocus'),('mousehide','mousehide'),('mousem','mousem'),('mousemodel','mousemodel'),('mouses','mouses'),('mouseshape','mouseshape'),('mouset','mouset'),('mousetime','mousetime'),('mp','mp'),('mps','mps'),('msm','msm'),('mzq','mzq'),('mzquantum','mzquantum'),('nf','nf'),('nnoremap','nnoremap'),('noacd','noacd'),('noai','noai'),('noakm','noakm'),('noallowrevins','noallowrevins'),('noaltkeymap','noaltkeymap'),('noanti','noanti'),('noantialias','noantialias'),('noar','noar'),('noarab','noarab'),('noarabic','noarabic'),('noarabicshape','noarabicshape'),('noari','noari'),('noarshape','noarshape'),('noautochdir','noautochdir'),('noautoindent','noautoindent'),('noautoread','noautoread'),('noautowrite','noautowrite'),('noautowriteall','noautowriteall'),('noaw','noaw'),('noawa','noawa'),('nobackup','nobackup'),('noballooneval','noballooneval'),('nobeval','nobeval'),('nobin','nobin'),('nobinary','nobinary'),('nobiosk','nobiosk'),('nobioskey','nobioskey'),('nobk','nobk'),('nobl','nobl'),('nobomb','nobomb'),('nobuflisted','nobuflisted'),('nocf','nocf'),('noci','noci'),('nocin','nocin'),('nocindent','nocindent'),('nocompatible','nocompatible'),('noconfirm','noconfirm'),('noconsk','noconsk'),('noconskey','noconskey'),('nocopyindent','nocopyindent'),('nocp','nocp'),('nocrb','nocrb'),('nocscopetag','nocscopetag'),('nocscopeverbose','nocscopeverbose'),('nocst','nocst'),('nocsverb','nocsverb'),('nocuc','nocuc'),('nocul','nocul'),('nocursorbind','nocursorbind'),('nocursorcolumn','nocursorcolumn'),('nocursorline','nocursorline'),('nodeco','nodeco'),('nodelcombine','nodelcombine'),('nodg','nodg'),('nodiff','nodiff'),('nodigraph','nodigraph'),('noea','noea'),('noeb','noeb'),('noed','noed'),('noedcompatible','noedcompatible'),('noek','noek'),('noendofline','noendofline'),('noeol','noeol'),('noequalalways','noequalalways'),('noerrorbells','noerrorbells'),('noesckeys','noesckeys'),('noet','noet'),('noex','noex'),('noexpandtab','noexpandtab'),('noexrc','noexrc'),('nofen','nofen'),('nofk','nofk'),('nofkmap','nofkmap'),('nofoldenable','nofoldenable'),('nogd','nogd'),('nogdefault','nogdefault'),('noguipty','noguipty'),('nohid','nohid'),('nohidden','nohidden'),('nohk','nohk'),('nohkmap','nohkmap'),('nohkmapp','nohkmapp'),('nohkp','nohkp'),('nohls','nohls'),('nohlsearch','nohlsearch'),('noic','noic'),('noicon','noicon'),('noignorecase','noignorecase'),('noim','noim'),('noimc','noimc'),('noimcmdline','noimcmdline'),('noimd','noimd'),('noimdisable','noimdisable'),('noincsearch','noincsearch'),('noinf','noinf'),('noinfercase','noinfercase'),('noinsertmode','noinsertmode'),('nois','nois'),('nojoinspaces','nojoinspaces'),('nojs','nojs'),('nolazyredraw','nolazyredraw'),('nolbr','nolbr'),('nolinebreak','nolinebreak'),('nolisp','nolisp'),('nolist','nolist'),('noloadplugins','noloadplugins'),('nolpl','nolpl'),('nolz','nolz'),('noma','noma'),('nomacatsui','nomacatsui'),('nomagic','nomagic'),('nomh','nomh'),('noml','noml'),('nomod','nomod'),('nomodeline','nomodeline'),('nomodifiable','nomodifiable'),('nomodified','nomodified'),('nomore','nomore'),('nomousef','nomousef'),('nomousefocus','nomousefocus'),('nomousehide','nomousehide'),('nonu','nonu'),('nonumber','nonumber'),('noodev','noodev'),('noopendevice','noopendevice'),('nopaste','nopaste'),('nopi','nopi'),('nopreserveindent','nopreserveindent'),('nopreviewwindow','nopreviewwindow'),('noprompt','noprompt'),('nopvw','nopvw'),('noreadonly','noreadonly'),('norelativenumber','norelativenumber'),('noremap','noremap'),('norestorescreen','norestorescreen'),('norevins','norevins'),('nori','nori'),('norightleft','norightleft'),('norl','norl'),('nornu','nornu'),('noro','noro'),('nors','nors'),('noru','noru'),('noruler','noruler'),('nosb','nosb'),('nosc','nosc'),('noscb','noscb'),('noscrollbind','noscrollbind'),('noscs','noscs'),('nosecure','nosecure'),('nosft','nosft'),('noshellslash','noshellslash'),('noshelltemp','noshelltemp'),('noshiftround','noshiftround'),('noshortname','noshortname'),('noshowcmd','noshowcmd'),('noshowfulltag','noshowfulltag'),('noshowmatch','noshowmatch'),('noshowmode','noshowmode'),('nosi','nosi'),('nosm','nosm'),('nosmartcase','nosmartcase'),('nosmartindent','nosmartindent'),('nosmarttab','nosmarttab'),('nosmd','nosmd'),('nosn','nosn'),('nosol','nosol'),('nospell','nospell'),('nosplitbelow','nosplitbelow'),('nosplitright','nosplitright'),('nospr','nospr'),('nosr','nosr'),('nossl','nossl'),('nosta','nosta'),('nostartofline','nostartofline'),('nostmp','nostmp'),('noswapfile','noswapfile'),('noswf','noswf'),('nota','nota'),('notagbsearch','notagbsearch'),('notagrelative','notagrelative'),('notagstack','notagstack'),('notbi','notbi'),('notbidi','notbidi'),('notbs','notbs'),('notermbidi','notermbidi'),('noterse','noterse'),('notextauto','notextauto'),('notextmode','notextmode'),('notf','notf'),('notgst','notgst'),('notildeop','notildeop'),('notimeout','notimeout'),('notitle','notitle'),('noto','noto'),('notop','notop'),('notr','notr'),('nottimeout','nottimeout'),('nottybuiltin','nottybuiltin'),('nottyfast','nottyfast'),('notx','notx'),('novb','novb'),('novisualbell','novisualbell'),('nowa','nowa'),('nowarn','nowarn'),('nowb','nowb'),('noweirdinvert','noweirdinvert'),('nowfh','nowfh'),('nowfw','nowfw'),('nowildignorecase','nowildignorecase'),('nowildmenu','nowildmenu'),('nowinfixheight','nowinfixheight'),('nowinfixwidth','nowinfixwidth'),('nowiv','nowiv'),('nowmnu','nowmnu'),('nowrap','nowrap'),('nowrapscan','nowrapscan'),('nowrite','nowrite'),('nowriteany','nowriteany'),('nowritebackup','nowritebackup'),('nows','nows'),('nrformats','nrformats'),('nu','nu'),('number','number'),('numberwidth','numberwidth'),('nuw','nuw'),('odev','odev'),('oft','oft'),('ofu','ofu'),('omnifunc','omnifunc'),('opendevice','opendevice'),('operatorfunc','operatorfunc'),('opfunc','opfunc'),('osfiletype','osfiletype'),('pa','pa'),('para','para'),('paragraphs','paragraphs'),('paste','paste'),('pastetoggle','pastetoggle'),('patchexpr','patchexpr'),('patchmode','patchmode'),('path','path'),('pdev','pdev'),('penc','penc'),('pex','pex'),('pexpr','pexpr'),('pfn','pfn'),('ph','ph'),('pheader','pheader'),('pi','pi'),('pm','pm'),('pmbcs','pmbcs'),('pmbfn','pmbfn'),('popt','popt'),('preserveindent','preserveindent'),('previewheight','previewheight'),('previewwindow','previewwindow'),('printdevice','printdevice'),('printencoding','printencoding'),('printexpr','printexpr'),('printfont','printfont'),('printheader','printheader'),('printmbcharset','printmbcharset'),('printmbfont','printmbfont'),('printoptions','printoptions'),('prompt','prompt'),('pt','pt'),('pumheight','pumheight'),('pvh','pvh'),('pvw','pvw'),('qe','qe'),('quoteescape','quoteescape'),('rdt','rdt'),('readonly','readonly'),('redrawtime','redrawtime'),('relativenumber','relativenumber'),('remap','remap'),('report','report'),('restorescreen','restorescreen'),('revins','revins'),('ri','ri'),('rightleft','rightleft'),('rightleftcmd','rightleftcmd'),('rl','rl'),('rlc','rlc'),('rnu','rnu'),('ro','ro'),('rs','rs'),('rtp','rtp'),('ru','ru'),('ruf','ruf'),('ruler','ruler'),('rulerformat','rulerformat'),('runtimepath','runtimepath'),('sb','sb'),('sbo','sbo'),('sbr','sbr'),('sc','sc'),('scb','scb'),('scr','scr'),('scroll','scroll'),('scrollbind','scrollbind'),('scrolljump','scrolljump'),('scrolloff','scrolloff'),('scrollopt','scrollopt'),('scs','scs'),('sect','sect'),('sections','sections'),('secure','secure'),('sel','sel'),('selection','selection'),('selectmode','selectmode'),('sessionoptions','sessionoptions'),('sft','sft'),('sh','sh'),('shcf','shcf'),('shell','shell'),('shellcmdflag','shellcmdflag'),('shellpipe','shellpipe'),('shellquote','shellquote'),('shellredir','shellredir'),('shellslash','shellslash'),('shelltemp','shelltemp'),('shelltype','shelltype'),('shellxquote','shellxquote'),('shiftround','shiftround'),('shiftwidth','shiftwidth'),('shm','shm'),('shortmess','shortmess'),('shortname','shortname'),('showbreak','showbreak'),('showcmd','showcmd'),('showfulltag','showfulltag'),('showmatch','showmatch'),('showmode','showmode'),('showtabline','showtabline'),('shq','shq'),('si','si'),('sidescroll','sidescroll'),('sidescrolloff','sidescrolloff'),('siso','siso'),('sj','sj'),('slm','slm'),('sm','sm'),('smartcase','smartcase'),('smartindent','smartindent'),('smarttab','smarttab'),('smc','smc'),('smd','smd'),('sn','sn'),('so','so'),('softtabstop','softtabstop'),('sol','sol'),('sp','sp'),('spc','spc'),('spell','spell'),('spellcapcheck','spellcapcheck'),('spellfile','spellfile'),('spelllang','spelllang'),('spellsuggest','spellsuggest'),('spf','spf'),('spl','spl'),('splitbelow','splitbelow'),('splitright','splitright'),('spr','spr'),('sps','sps'),('sr','sr'),('srr','srr'),('ss','ss'),('ssl','ssl'),('ssop','ssop'),('st','st'),('sta','sta'),('stal','stal'),('startofline','startofline'),('statusline','statusline'),('stl','stl'),('stmp','stmp'),('sts','sts'),('su','su'),('sua','sua'),('suffixes','suffixes'),('suffixesadd','suffixesadd'),('sw','sw'),('swapfile','swapfile'),('swapsync','swapsync'),('swb','swb'),('swf','swf'),('switchbuf','switchbuf'),('sws','sws'),('sxq','sxq'),('syn','syn'),('synmaxcol','synmaxcol'),('syntax','syntax'),('t_AB','t_AB'),('t_AF','t_AF'),('t_AL','t_AL'),('t_CS','t_CS'),('t_CV','t_CV'),('t_Ce','t_Ce'),('t_Co','t_Co'),('t_Cs','t_Cs'),('t_DL','t_DL'),('t_EI','t_EI'),('t_F1','t_F1'),('t_F2','t_F2'),('t_F3','t_F3'),('t_F4','t_F4'),('t_F5','t_F5'),('t_F6','t_F6'),('t_F7','t_F7'),('t_F8','t_F8'),('t_F9','t_F9'),('t_IE','t_IE'),('t_IS','t_IS'),('t_K1','t_K1'),('t_K3','t_K3'),('t_K4','t_K4'),('t_K5','t_K5'),('t_K6','t_K6'),('t_K7','t_K7'),('t_K8','t_K8'),('t_K9','t_K9'),('t_KA','t_KA'),('t_KB','t_KB'),('t_KC','t_KC'),('t_KD','t_KD'),('t_KE','t_KE'),('t_KF','t_KF'),('t_KG','t_KG'),('t_KH','t_KH'),('t_KI','t_KI'),('t_KJ','t_KJ'),('t_KK','t_KK'),('t_KL','t_KL'),('t_RI','t_RI'),('t_RV','t_RV'),('t_SI','t_SI'),('t_Sb','t_Sb'),('t_Sf','t_Sf'),('t_WP','t_WP'),('t_WS','t_WS'),('t_ZH','t_ZH'),('t_ZR','t_ZR'),('t_al','t_al'),('t_bc','t_bc'),('t_cd','t_cd'),('t_ce','t_ce'),('t_cl','t_cl'),('t_cm','t_cm'),('t_cs','t_cs'),('t_da','t_da'),('t_db','t_db'),('t_dl','t_dl'),('t_fs','t_fs'),('t_k1','t_k1'),('t_k2','t_k2'),('t_k3','t_k3'),('t_k4','t_k4'),('t_k5','t_k5'),('t_k6','t_k6'),('t_k7','t_k7'),('t_k8','t_k8'),('t_k9','t_k9'),('t_kB','t_kB'),('t_kD','t_kD'),('t_kI','t_kI'),('t_kN','t_kN'),('t_kP','t_kP'),('t_kb','t_kb'),('t_kd','t_kd'),('t_ke','t_ke'),('t_kh','t_kh'),('t_kl','t_kl'),('t_kr','t_kr'),('t_ks','t_ks'),('t_ku','t_ku'),('t_le','t_le'),('t_mb','t_mb'),('t_md','t_md'),('t_me','t_me'),('t_mr','t_mr'),('t_ms','t_ms'),('t_nd','t_nd'),('t_op','t_op'),('t_se','t_se'),('t_so','t_so'),('t_sr','t_sr'),('t_te','t_te'),('t_ti','t_ti'),('t_ts','t_ts'),('t_ue','t_ue'),('t_us','t_us'),('t_ut','t_ut'),('t_vb','t_vb'),('t_ve','t_ve'),('t_vi','t_vi'),('t_vs','t_vs'),('t_xs','t_xs'),('ta','ta'),('tabline','tabline'),('tabpagemax','tabpagemax'),('tabstop','tabstop'),('tag','tag'),('tagbsearch','tagbsearch'),('taglength','taglength'),('tagrelative','tagrelative'),('tags','tags'),('tagstack','tagstack'),('tal','tal'),('tb','tb'),('tbi','tbi'),('tbidi','tbidi'),('tbis','tbis'),('tbs','tbs'),('tenc','tenc'),('term','term'),('termbidi','termbidi'),('termencoding','termencoding'),('terse','terse'),('textauto','textauto'),('textmode','textmode'),('textwidth','textwidth'),('tf','tf'),('tgst','tgst'),('thesaurus','thesaurus'),('tildeop','tildeop'),('timeout','timeout'),('timeoutlen','timeoutlen'),('title','title'),('titlelen','titlelen'),('titleold','titleold'),('titlestring','titlestring'),('tl','tl'),('tm','tm'),('to','to'),('toolbar','toolbar'),('toolbariconsize','toolbariconsize'),('top','top'),('tpm','tpm'),('tr','tr'),('ts','ts'),('tsl','tsl'),('tsr','tsr'),('ttimeout','ttimeout'),('ttimeoutlen','ttimeoutlen'),('ttm','ttm'),('tty','tty'),('ttybuiltin','ttybuiltin'),('ttyfast','ttyfast'),('ttym','ttym'),('ttymouse','ttymouse'),('ttyscroll','ttyscroll'),('ttytype','ttytype'),('tw','tw'),('tx','tx'),('uc','uc'),('udf','udf'),('udir','udir'),('ul','ul'),('undodir','undodir'),('undofile','undofile'),('undolevels','undolevels'),('undoreload','undoreload'),('updatecount','updatecount'),('updatetime','updatetime'),('ur','ur'),('ut','ut'),('vb','vb'),('vbs','vbs'),('vdir','vdir'),('ve','ve'),('verbose','verbose'),('verbosefile','verbosefile'),('vfile','vfile'),('vi','vi'),('viewdir','viewdir'),('viewoptions','viewoptions'),('viminfo','viminfo'),('virtualedit','virtualedit'),('visualbell','visualbell'),('vnoremap','vnoremap'),('vop','vop'),('wa','wa'),('wak','wak'),('warn','warn'),('wb','wb'),('wc','wc'),('wcm','wcm'),('wd','wd'),('weirdinvert','weirdinvert'),('wfh','wfh'),('wfw','wfw'),('wh','wh'),('whichwrap','whichwrap'),('wi','wi'),('wic','wic'),('wig','wig'),('wildchar','wildchar'),('wildcharm','wildcharm'),('wildignore','wildignore'),('wildignorecase','wildignorecase'),('wildmenu','wildmenu'),('wildmode','wildmode'),('wildoptions','wildoptions'),('wim','wim'),('winaltkeys','winaltkeys'),('window','window'),('winfixheight','winfixheight'),('winfixwidth','winfixwidth'),('winheight','winheight'),('winminheight','winminheight'),('winminwidth','winminwidth'),('winwidth','winwidth'),('wiv','wiv'),('wiw','wiw'),('wm','wm'),('wmh','wmh'),('wmnu','wmnu'),('wmw','wmw'),('wop','wop'),('wrap','wrap'),('wrapmargin','wrapmargin'),('wrapscan','wrapscan'),('write','write'),('writeany','writeany'),('writebackup','writebackup'),('writedelay','writedelay'),('ws','ws'),('ww','ww')]
diff --git a/pygments/lexers/agile.py b/pygments/lexers/agile.py
index 3c1525d0..a49289dc 100644
--- a/pygments/lexers/agile.py
+++ b/pygments/lexers/agile.py
@@ -5,17 +5,17 @@
Lexers for agile languages.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, ExtendedRegexLexer, \
- LexerContext, include, combined, do_insertions, bygroups, using
+ LexerContext, include, combined, do_insertions, bygroups, using, this
from pygments.token import Error, Text, Other, \
Comment, Operator, Keyword, Name, String, Number, Generic, Punctuation
-from pygments.util import get_bool_opt, get_list_opt, shebang_matches
+from pygments.util import get_bool_opt, get_list_opt, shebang_matches, iteritems
from pygments import unistring as uni
@@ -23,7 +23,7 @@ __all__ = ['PythonLexer', 'PythonConsoleLexer', 'PythonTracebackLexer',
'Python3Lexer', 'Python3TracebackLexer', 'RubyLexer',
'RubyConsoleLexer', 'PerlLexer', 'LuaLexer', 'MoonScriptLexer',
'CrocLexer', 'MiniDLexer', 'IoLexer', 'TclLexer', 'FactorLexer',
- 'FancyLexer', 'DgLexer']
+ 'FancyLexer', 'DgLexer', 'Perl6Lexer', 'HyLexer']
# b/w compatibility
from pygments.lexers.functional import SchemeLexer
@@ -109,6 +109,7 @@ class PythonLexer(RegexLexer):
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float),
(r'\d+[eE][+-]?[0-9]+j?', Number.Float),
(r'0[0-7]+j?', Number.Oct),
+ (r'0[bB][01]+', Number.Bin),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+j?', Number.Integer)
@@ -185,14 +186,15 @@ class PythonLexer(RegexLexer):
}
def analyse_text(text):
- return shebang_matches(text, r'pythonw?(2(\.\d)?)?')
+ return shebang_matches(text, r'pythonw?(2(\.\d)?)?') or \
+ 'import ' in text[:1000]
class Python3Lexer(RegexLexer):
"""
For `Python <http://www.python.org>`_ source code (version 3.0).
- *New in Pygments 0.10.*
+ .. versionadded:: 0.10
"""
name = 'Python 3'
@@ -306,7 +308,8 @@ class PythonConsoleLexer(Lexer):
`python3`
Use Python 3 lexer for code. Default is ``False``.
- *New in Pygments 1.0.*
+
+ .. versionadded:: 1.0
"""
name = 'Python console session'
aliases = ['pycon']
@@ -351,7 +354,7 @@ class PythonConsoleLexer(Lexer):
curcode = ''
insertions = []
if (line.startswith(u'Traceback (most recent call last):') or
- re.match(ur' File "[^"]+", line \d+\n$', line)):
+ re.match(u' File "[^"]+", line \\d+\\n$', line)):
tb = 1
curtb = line
tbindex = match.start()
@@ -375,7 +378,7 @@ class PythonTracebackLexer(RegexLexer):
"""
For Python tracebacks.
- *New in Pygments 0.7.*
+ .. versionadded:: 0.7
"""
name = 'Python Traceback'
@@ -412,7 +415,7 @@ class Python3TracebackLexer(RegexLexer):
"""
For Python 3.0 tracebacks, with support for chained exceptions.
- *New in Pygments 1.0.*
+ .. versionadded:: 1.0
"""
name = 'Python 3.0 Traceback'
@@ -428,10 +431,13 @@ class Python3TracebackLexer(RegexLexer):
r'exception occurred:\n\n', Generic.Traceback),
(r'^The above exception was the direct cause of the '
r'following exception:\n\n', Generic.Traceback),
+ (r'^(?= File "[^"]+", line \d+)', Generic.Traceback, 'intb'),
],
'intb': [
(r'^( File )("[^"]+")(, line )(\d+)(, in )(.+)(\n)',
bygroups(Text, Name.Builtin, Text, Number, Text, Name, Text)),
+ (r'^( File )("[^"]+")(, line )(\d+)(\n)',
+ bygroups(Text, Name.Builtin, Text, Number, Text)),
(r'^( )(.+)(\n)',
bygroups(Text, using(Python3Lexer), Text)),
(r'^([ \t]*)(\.\.\.)(\n)',
@@ -528,7 +534,7 @@ class RubyLexer(ExtendedRegexLexer):
(r":'(\\\\|\\'|[^'])*'", String.Symbol),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r':"', String.Symbol, 'simple-sym'),
- (r'([a-zA-Z_][a-zA-Z0-9]*)(:)',
+ (r'([a-zA-Z_][a-zA-Z0-9_]*)(:)(?!:)',
bygroups(String.Symbol, Punctuation)), # Since Ruby 1.9
(r'"', String.Double, 'simple-string'),
(r'(?<!\.)`', String.Backtick, 'simple-backtick'),
@@ -844,7 +850,7 @@ class PerlLexer(RegexLexer):
name = 'Perl'
aliases = ['perl', 'pl']
- filenames = ['*.pl', '*.pm']
+ filenames = ['*.pl', '*.pm', '*.t']
mimetypes = ['text/x-perl', 'application/x-perl']
flags = re.DOTALL | re.MULTILINE
@@ -1015,9 +1021,8 @@ class PerlLexer(RegexLexer):
def analyse_text(text):
if shebang_matches(text, r'perl'):
return True
- if 'my $' in text:
+ if re.search('(?:my|our)\s+[$@%(]', text):
return 0.9
- return 0.1 # who knows, might still be perl!
class LuaLexer(RegexLexer):
@@ -1122,7 +1127,7 @@ class LuaLexer(RegexLexer):
self._functions = set()
if self.func_name_highlighting:
from pygments.lexers._luabuiltins import MODULES
- for mod, func in MODULES.iteritems():
+ for mod, func in iteritems(MODULES):
if mod not in self.disabled_modules:
self._functions.update(func)
RegexLexer.__init__(self, **options)
@@ -1147,7 +1152,7 @@ class MoonScriptLexer(LuaLexer):
"""
For `MoonScript <http://moonscript.org.org>`_ source code.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = "MoonScript"
@@ -1286,7 +1291,7 @@ class IoLexer(RegexLexer):
For `Io <http://iolanguage.com/>`_ (a small, prototype-based
programming language) source.
- *New in Pygments 0.10.*
+ .. versionadded:: 0.10
"""
name = 'Io'
filenames = ['*.io']
@@ -1332,7 +1337,7 @@ class TclLexer(RegexLexer):
"""
For Tcl source code.
- *New in Pygments 0.10.*
+ .. versionadded:: 0.10
"""
keyword_cmds_re = (
@@ -1462,7 +1467,7 @@ class FactorLexer(RegexLexer):
"""
Lexer for the `Factor <http://factorcode.org>`_ language.
- *New in Pygments 1.4.*
+ .. versionadded:: 1.4
"""
name = 'Factor'
aliases = ['factor']
@@ -1753,7 +1758,7 @@ class FancyLexer(RegexLexer):
class-based, concurrent general-purpose programming language
running on Rubinius, the Ruby VM.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'Fancy'
filenames = ['*.fy', '*.fancypack']
@@ -1835,7 +1840,7 @@ class DgLexer(RegexLexer):
a functional and object-oriented programming language
running on the CPython 3 VM.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'dg'
aliases = ['dg']
@@ -1844,56 +1849,53 @@ class DgLexer(RegexLexer):
tokens = {
'root': [
- # Whitespace:
(r'\s+', Text),
(r'#.*?$', Comment.Single),
- # Lexemes:
- # Numbers
- (r'0[bB][01]+', Number.Bin),
- (r'0[oO][0-7]+', Number.Oct),
- (r'0[xX][\da-fA-F]+', Number.Hex),
- (r'[+-]?\d+\.\d+([eE][+-]?\d+)?[jJ]?', Number.Float),
- (r'[+-]?\d+[eE][+-]?\d+[jJ]?', Number.Float),
- (r'[+-]?\d+[jJ]?', Number.Integer),
- # Character/String Literals
- (r"[br]*'''", String, combined('stringescape', 'tsqs', 'string')),
- (r'[br]*"""', String, combined('stringescape', 'tdqs', 'string')),
- (r"[br]*'", String, combined('stringescape', 'sqs', 'string')),
- (r'[br]*"', String, combined('stringescape', 'dqs', 'string')),
- # Operators
- (r"`\w+'*`", Operator), # Infix links
- # Reserved infix links
- (r'\b(or|and|if|else|where|is|in)\b', Operator.Word),
+
+ (r'(?i)0b[01]+', Number.Bin),
+ (r'(?i)0o[0-7]+', Number.Oct),
+ (r'(?i)0x[0-9a-f]+', Number.Hex),
+ (r'(?i)[+-]?[0-9]+\.[0-9]+(e[+-]?[0-9]+)?j?', Number.Float),
+ (r'(?i)[+-]?[0-9]+e[+-]?\d+j?', Number.Float),
+ (r'(?i)[+-]?[0-9]+j?', Number.Integer),
+
+ (r"(?i)(br|r?b?)'''", String, combined('stringescape', 'tsqs', 'string')),
+ (r'(?i)(br|r?b?)"""', String, combined('stringescape', 'tdqs', 'string')),
+ (r"(?i)(br|r?b?)'", String, combined('stringescape', 'sqs', 'string')),
+ (r'(?i)(br|r?b?)"', String, combined('stringescape', 'dqs', 'string')),
+
+ (r"`\w+'*`", Operator),
+ (r'\b(and|in|is|or|where)\b', Operator.Word),
(r'[!$%&*+\-./:<-@\\^|~;,]+', Operator),
- # Identifiers
- # Python 3 types
+
(r"(?<!\.)(bool|bytearray|bytes|classmethod|complex|dict'?|"
r"float|frozenset|int|list'?|memoryview|object|property|range|"
r"set'?|slice|staticmethod|str|super|tuple'?|type)"
r"(?!['\w])", Name.Builtin),
- # Python 3 builtins + some more
(r'(?<!\.)(__import__|abs|all|any|bin|bind|chr|cmp|compile|complex|'
- r'delattr|dir|divmod|drop|dropwhile|enumerate|eval|filter|flip|'
- r'foldl1?|format|fst|getattr|globals|hasattr|hash|head|hex|id|'
- r'init|input|isinstance|issubclass|iter|iterate|last|len|locals|'
- r'map|max|min|next|oct|open|ord|pow|print|repr|reversed|round|'
- r'setattr|scanl1?|snd|sorted|sum|tail|take|takewhile|vars|zip)'
- r"(?!['\w])", Name.Builtin),
+ r'delattr|dir|divmod|drop|dropwhile|enumerate|eval|exhaust|'
+ r'filter|flip|foldl1?|format|fst|getattr|globals|hasattr|hash|'
+ r'head|hex|id|init|input|isinstance|issubclass|iter|iterate|last|'
+ r'len|locals|map|max|min|next|oct|open|ord|pow|print|repr|'
+ r'reversed|round|setattr|scanl1?|snd|sorted|sum|tail|take|'
+ r"takewhile|vars|zip)(?!['\w])", Name.Builtin),
(r"(?<!\.)(self|Ellipsis|NotImplemented|None|True|False)(?!['\w])",
Name.Builtin.Pseudo),
+
(r"(?<!\.)[A-Z]\w*(Error|Exception|Warning)'*(?!['\w])",
Name.Exception),
- (r"(?<!\.)(KeyboardInterrupt|SystemExit|StopIteration|"
- r"GeneratorExit)(?!['\w])", Name.Exception),
- # Compiler-defined identifiers
- (r"(?<![\.\w])(import|inherit|for|while|switch|not|raise|unsafe|"
- r"yield|with)(?!['\w])", Keyword.Reserved),
- # Other links
- (r"[A-Z_']+\b", Name),
- (r"[A-Z][\w']*\b", Keyword.Type),
+ (r"(?<!\.)(Exception|GeneratorExit|KeyboardInterrupt|StopIteration|"
+ r"SystemExit)(?!['\w])", Name.Exception),
+
+ (r"(?<![\.\w])(except|finally|for|if|import|not|otherwise|raise|"
+ r"subclass|while|with|yield)(?!['\w])", Keyword.Reserved),
+
+ (r"[A-Z_]+'*(?!['\w])", Name),
+ (r"[A-Z]\w+'*(?!['\w])", Keyword.Type),
(r"\w+'*", Name),
- # Blocks
+
(r'[()]', Punctuation),
+ (r'.', Error),
],
'stringescape': [
(r'\\([\\abfnrtv"\']|\n|N{.*?}|u[a-fA-F0-9]{4}|'
@@ -1922,3 +1924,494 @@ class DgLexer(RegexLexer):
(r"'''", String, '#pop')
],
}
+
+
+class Perl6Lexer(ExtendedRegexLexer):
+ """
+ For `Perl 6 <http://www.perl6.org>`_ source code.
+
+ .. versionadded:: 2.0
+ """
+
+ name = 'Perl6'
+ aliases = ['perl6', 'pl6']
+ filenames = ['*.pl', '*.pm', '*.nqp', '*.p6', '*.6pl', '*.p6l', '*.pl6',
+ '*.6pm', '*.p6m', '*.pm6', '*.t']
+ mimetypes = ['text/x-perl6', 'application/x-perl6']
+ flags = re.MULTILINE | re.DOTALL | re.UNICODE
+
+ PERL6_IDENTIFIER_RANGE = "['a-zA-Z0-9_:-]" # if you alter this, search for a copy made of it below
+
+ PERL6_KEYWORDS = (
+ 'BEGIN', 'CATCH', 'CHECK', 'CONTROL', 'END', 'ENTER', 'FIRST', 'INIT',
+ 'KEEP', 'LAST', 'LEAVE', 'NEXT', 'POST', 'PRE', 'START', 'TEMP',
+ 'UNDO', 'as', 'assoc', 'async', 'augment', 'binary', 'break', 'but',
+ 'cached', 'category', 'class', 'constant', 'contend', 'continue',
+ 'copy', 'deep', 'default', 'defequiv', 'defer', 'die', 'do', 'else',
+ 'elsif', 'enum', 'equiv', 'exit', 'export', 'fail', 'fatal', 'for',
+ 'gather', 'given', 'goto', 'grammar', 'handles', 'has', 'if', 'inline',
+ 'irs', 'is', 'last', 'leave', 'let', 'lift', 'loop', 'looser', 'macro',
+ 'make', 'maybe', 'method', 'module', 'multi', 'my', 'next', 'of',
+ 'ofs', 'only', 'oo', 'ors', 'our', 'package', 'parsed', 'prec',
+ 'proto', 'readonly', 'redo', 'ref', 'regex', 'reparsed', 'repeat',
+ 'require', 'required', 'return', 'returns', 'role', 'rule', 'rw',
+ 'self', 'slang', 'state', 'sub', 'submethod', 'subset', 'supersede',
+ 'take', 'temp', 'tighter', 'token', 'trusts', 'try', 'unary',
+ 'unless', 'until', 'use', 'warn', 'when', 'where', 'while', 'will',
+ )
+
+ PERL6_BUILTINS = (
+ 'ACCEPTS', 'HOW', 'REJECTS', 'VAR', 'WHAT', 'WHENCE', 'WHERE', 'WHICH',
+ 'WHO', 'abs', 'acos', 'acosec', 'acosech', 'acosh', 'acotan', 'acotanh',
+ 'all', 'any', 'approx', 'arity', 'asec', 'asech', 'asin', 'asinh'
+ 'assuming', 'atan', 'atan2', 'atanh', 'attr', 'bless', 'body', 'by'
+ 'bytes', 'caller', 'callsame', 'callwith', 'can', 'capitalize', 'cat',
+ 'ceiling', 'chars', 'chmod', 'chomp', 'chop', 'chr', 'chroot',
+ 'circumfix', 'cis', 'classify', 'clone', 'close', 'cmp_ok', 'codes',
+ 'comb', 'connect', 'contains', 'context', 'cos', 'cosec', 'cosech',
+ 'cosh', 'cotan', 'cotanh', 'count', 'defined', 'delete', 'diag',
+ 'dies_ok', 'does', 'e', 'each', 'eager', 'elems', 'end', 'eof', 'eval',
+ 'eval_dies_ok', 'eval_elsewhere', 'eval_lives_ok', 'evalfile', 'exists',
+ 'exp', 'first', 'flip', 'floor', 'flunk', 'flush', 'fmt', 'force_todo',
+ 'fork', 'from', 'getc', 'gethost', 'getlogin', 'getpeername', 'getpw',
+ 'gmtime', 'graphs', 'grep', 'hints', 'hyper', 'im', 'index', 'infix',
+ 'invert', 'is_approx', 'is_deeply', 'isa', 'isa_ok', 'isnt', 'iterator',
+ 'join', 'key', 'keys', 'kill', 'kv', 'lastcall', 'lazy', 'lc', 'lcfirst',
+ 'like', 'lines', 'link', 'lives_ok', 'localtime', 'log', 'log10', 'map',
+ 'max', 'min', 'minmax', 'name', 'new', 'nextsame', 'nextwith', 'nfc',
+ 'nfd', 'nfkc', 'nfkd', 'nok_error', 'nonce', 'none', 'normalize', 'not',
+ 'nothing', 'ok', 'once', 'one', 'open', 'opendir', 'operator', 'ord',
+ 'p5chomp', 'p5chop', 'pack', 'pair', 'pairs', 'pass', 'perl', 'pi',
+ 'pick', 'plan', 'plan_ok', 'polar', 'pop', 'pos', 'postcircumfix',
+ 'postfix', 'pred', 'prefix', 'print', 'printf', 'push', 'quasi',
+ 'quotemeta', 'rand', 're', 'read', 'readdir', 'readline', 'reduce',
+ 'reverse', 'rewind', 'rewinddir', 'rindex', 'roots', 'round',
+ 'roundrobin', 'run', 'runinstead', 'sameaccent', 'samecase', 'say',
+ 'sec', 'sech', 'sech', 'seek', 'shape', 'shift', 'sign', 'signature',
+ 'sin', 'sinh', 'skip', 'skip_rest', 'sleep', 'slurp', 'sort', 'splice',
+ 'split', 'sprintf', 'sqrt', 'srand', 'strand', 'subst', 'substr', 'succ',
+ 'sum', 'symlink', 'tan', 'tanh', 'throws_ok', 'time', 'times', 'to',
+ 'todo', 'trim', 'trim_end', 'trim_start', 'true', 'truncate', 'uc',
+ 'ucfirst', 'undef', 'undefine', 'uniq', 'unlike', 'unlink', 'unpack',
+ 'unpolar', 'unshift', 'unwrap', 'use_ok', 'value', 'values', 'vec',
+ 'version_lt', 'void', 'wait', 'want', 'wrap', 'write', 'zip',
+ )
+
+ PERL6_BUILTIN_CLASSES = (
+ 'Abstraction', 'Any', 'AnyChar', 'Array', 'Associative', 'Bag', 'Bit',
+ 'Blob', 'Block', 'Bool', 'Buf', 'Byte', 'Callable', 'Capture', 'Char', 'Class',
+ 'Code', 'Codepoint', 'Comparator', 'Complex', 'Decreasing', 'Exception',
+ 'Failure', 'False', 'Grammar', 'Grapheme', 'Hash', 'IO', 'Increasing',
+ 'Int', 'Junction', 'KeyBag', 'KeyExtractor', 'KeyHash', 'KeySet',
+ 'KitchenSink', 'List', 'Macro', 'Mapping', 'Match', 'Matcher', 'Method',
+ 'Module', 'Num', 'Object', 'Ordered', 'Ordering', 'OrderingPair',
+ 'Package', 'Pair', 'Positional', 'Proxy', 'Range', 'Rat', 'Regex',
+ 'Role', 'Routine', 'Scalar', 'Seq', 'Set', 'Signature', 'Str', 'StrLen',
+ 'StrPos', 'Sub', 'Submethod', 'True', 'UInt', 'Undef', 'Version', 'Void',
+ 'Whatever', 'bit', 'bool', 'buf', 'buf1', 'buf16', 'buf2', 'buf32',
+ 'buf4', 'buf64', 'buf8', 'complex', 'int', 'int1', 'int16', 'int2',
+ 'int32', 'int4', 'int64', 'int8', 'num', 'rat', 'rat1', 'rat16', 'rat2',
+ 'rat32', 'rat4', 'rat64', 'rat8', 'uint', 'uint1', 'uint16', 'uint2',
+ 'uint32', 'uint4', 'uint64', 'uint8', 'utf16', 'utf32', 'utf8',
+ )
+
+ PERL6_OPERATORS = (
+ 'X', 'Z', 'after', 'also', 'and', 'andthen', 'before', 'cmp', 'div',
+ 'eq', 'eqv', 'extra', 'ff', 'fff', 'ge', 'gt', 'le', 'leg', 'lt', 'm',
+ 'mm', 'mod', 'ne', 'or', 'orelse', 'rx', 's', 'tr', 'x', 'xor', 'xx',
+ '++', '--', '**', '!', '+', '-', '~', '?', '|', '||', '+^', '~^', '?^',
+ '^', '*', '/', '%', '%%', '+&', '+<', '+>', '~&', '~<', '~>', '?&',
+ 'gcd', 'lcm', '+', '-', '+|', '+^', '~|', '~^', '?|', '?^',
+ '~', '&', '^', 'but', 'does', '<=>', '..', '..^', '^..', '^..^',
+ '!=', '==', '<', '<=', '>', '>=', '~~', '===', '!eqv',
+ '&&', '||', '^^', '//', 'min', 'max', '??', '!!', 'ff', 'fff', 'so',
+ 'not', '<==', '==>', '<<==', '==>>',
+ )
+
+ # Perl 6 has a *lot* of possible bracketing characters
+ # this list was lifted from STD.pm6 (https://github.com/perl6/std)
+ PERL6_BRACKETS = {
+ u'\u0028' : u'\u0029', u'\u003c' : u'\u003e', u'\u005b' : u'\u005d', u'\u007b' : u'\u007d',
+ u'\u00ab' : u'\u00bb', u'\u0f3a' : u'\u0f3b', u'\u0f3c' : u'\u0f3d', u'\u169b' : u'\u169c',
+ u'\u2018' : u'\u2019', u'\u201a' : u'\u2019', u'\u201b' : u'\u2019', u'\u201c' : u'\u201d',
+ u'\u201e' : u'\u201d', u'\u201f' : u'\u201d', u'\u2039' : u'\u203a', u'\u2045' : u'\u2046',
+ u'\u207d' : u'\u207e', u'\u208d' : u'\u208e', u'\u2208' : u'\u220b', u'\u2209' : u'\u220c',
+ u'\u220a' : u'\u220d', u'\u2215' : u'\u29f5', u'\u223c' : u'\u223d', u'\u2243' : u'\u22cd',
+ u'\u2252' : u'\u2253', u'\u2254' : u'\u2255', u'\u2264' : u'\u2265', u'\u2266' : u'\u2267',
+ u'\u2268' : u'\u2269', u'\u226a' : u'\u226b', u'\u226e' : u'\u226f', u'\u2270' : u'\u2271',
+ u'\u2272' : u'\u2273', u'\u2274' : u'\u2275', u'\u2276' : u'\u2277', u'\u2278' : u'\u2279',
+ u'\u227a' : u'\u227b', u'\u227c' : u'\u227d', u'\u227e' : u'\u227f', u'\u2280' : u'\u2281',
+ u'\u2282' : u'\u2283', u'\u2284' : u'\u2285', u'\u2286' : u'\u2287', u'\u2288' : u'\u2289',
+ u'\u228a' : u'\u228b', u'\u228f' : u'\u2290', u'\u2291' : u'\u2292', u'\u2298' : u'\u29b8',
+ u'\u22a2' : u'\u22a3', u'\u22a6' : u'\u2ade', u'\u22a8' : u'\u2ae4', u'\u22a9' : u'\u2ae3',
+ u'\u22ab' : u'\u2ae5', u'\u22b0' : u'\u22b1', u'\u22b2' : u'\u22b3', u'\u22b4' : u'\u22b5',
+ u'\u22b6' : u'\u22b7', u'\u22c9' : u'\u22ca', u'\u22cb' : u'\u22cc', u'\u22d0' : u'\u22d1',
+ u'\u22d6' : u'\u22d7', u'\u22d8' : u'\u22d9', u'\u22da' : u'\u22db', u'\u22dc' : u'\u22dd',
+ u'\u22de' : u'\u22df', u'\u22e0' : u'\u22e1', u'\u22e2' : u'\u22e3', u'\u22e4' : u'\u22e5',
+ u'\u22e6' : u'\u22e7', u'\u22e8' : u'\u22e9', u'\u22ea' : u'\u22eb', u'\u22ec' : u'\u22ed',
+ u'\u22f0' : u'\u22f1', u'\u22f2' : u'\u22fa', u'\u22f3' : u'\u22fb', u'\u22f4' : u'\u22fc',
+ u'\u22f6' : u'\u22fd', u'\u22f7' : u'\u22fe', u'\u2308' : u'\u2309', u'\u230a' : u'\u230b',
+ u'\u2329' : u'\u232a', u'\u23b4' : u'\u23b5', u'\u2768' : u'\u2769', u'\u276a' : u'\u276b',
+ u'\u276c' : u'\u276d', u'\u276e' : u'\u276f', u'\u2770' : u'\u2771', u'\u2772' : u'\u2773',
+ u'\u2774' : u'\u2775', u'\u27c3' : u'\u27c4', u'\u27c5' : u'\u27c6', u'\u27d5' : u'\u27d6',
+ u'\u27dd' : u'\u27de', u'\u27e2' : u'\u27e3', u'\u27e4' : u'\u27e5', u'\u27e6' : u'\u27e7',
+ u'\u27e8' : u'\u27e9', u'\u27ea' : u'\u27eb', u'\u2983' : u'\u2984', u'\u2985' : u'\u2986',
+ u'\u2987' : u'\u2988', u'\u2989' : u'\u298a', u'\u298b' : u'\u298c', u'\u298d' : u'\u298e',
+ u'\u298f' : u'\u2990', u'\u2991' : u'\u2992', u'\u2993' : u'\u2994', u'\u2995' : u'\u2996',
+ u'\u2997' : u'\u2998', u'\u29c0' : u'\u29c1', u'\u29c4' : u'\u29c5', u'\u29cf' : u'\u29d0',
+ u'\u29d1' : u'\u29d2', u'\u29d4' : u'\u29d5', u'\u29d8' : u'\u29d9', u'\u29da' : u'\u29db',
+ u'\u29f8' : u'\u29f9', u'\u29fc' : u'\u29fd', u'\u2a2b' : u'\u2a2c', u'\u2a2d' : u'\u2a2e',
+ u'\u2a34' : u'\u2a35', u'\u2a3c' : u'\u2a3d', u'\u2a64' : u'\u2a65', u'\u2a79' : u'\u2a7a',
+ u'\u2a7d' : u'\u2a7e', u'\u2a7f' : u'\u2a80', u'\u2a81' : u'\u2a82', u'\u2a83' : u'\u2a84',
+ u'\u2a8b' : u'\u2a8c', u'\u2a91' : u'\u2a92', u'\u2a93' : u'\u2a94', u'\u2a95' : u'\u2a96',
+ u'\u2a97' : u'\u2a98', u'\u2a99' : u'\u2a9a', u'\u2a9b' : u'\u2a9c', u'\u2aa1' : u'\u2aa2',
+ u'\u2aa6' : u'\u2aa7', u'\u2aa8' : u'\u2aa9', u'\u2aaa' : u'\u2aab', u'\u2aac' : u'\u2aad',
+ u'\u2aaf' : u'\u2ab0', u'\u2ab3' : u'\u2ab4', u'\u2abb' : u'\u2abc', u'\u2abd' : u'\u2abe',
+ u'\u2abf' : u'\u2ac0', u'\u2ac1' : u'\u2ac2', u'\u2ac3' : u'\u2ac4', u'\u2ac5' : u'\u2ac6',
+ u'\u2acd' : u'\u2ace', u'\u2acf' : u'\u2ad0', u'\u2ad1' : u'\u2ad2', u'\u2ad3' : u'\u2ad4',
+ u'\u2ad5' : u'\u2ad6', u'\u2aec' : u'\u2aed', u'\u2af7' : u'\u2af8', u'\u2af9' : u'\u2afa',
+ u'\u2e02' : u'\u2e03', u'\u2e04' : u'\u2e05', u'\u2e09' : u'\u2e0a', u'\u2e0c' : u'\u2e0d',
+ u'\u2e1c' : u'\u2e1d', u'\u2e20' : u'\u2e21', u'\u3008' : u'\u3009', u'\u300a' : u'\u300b',
+ u'\u300c' : u'\u300d', u'\u300e' : u'\u300f', u'\u3010' : u'\u3011', u'\u3014' : u'\u3015',
+ u'\u3016' : u'\u3017', u'\u3018' : u'\u3019', u'\u301a' : u'\u301b', u'\u301d' : u'\u301e',
+ u'\ufd3e' : u'\ufd3f', u'\ufe17' : u'\ufe18', u'\ufe35' : u'\ufe36', u'\ufe37' : u'\ufe38',
+ u'\ufe39' : u'\ufe3a', u'\ufe3b' : u'\ufe3c', u'\ufe3d' : u'\ufe3e', u'\ufe3f' : u'\ufe40',
+ u'\ufe41' : u'\ufe42', u'\ufe43' : u'\ufe44', u'\ufe47' : u'\ufe48', u'\ufe59' : u'\ufe5a',
+ u'\ufe5b' : u'\ufe5c', u'\ufe5d' : u'\ufe5e', u'\uff08' : u'\uff09', u'\uff1c' : u'\uff1e',
+ u'\uff3b' : u'\uff3d', u'\uff5b' : u'\uff5d', u'\uff5f' : u'\uff60', u'\uff62' : u'\uff63',
+ }
+
+ def _build_word_match(words, boundary_regex_fragment = None, prefix = '', suffix = ''):
+ if boundary_regex_fragment is None:
+ return r'\b(' + prefix + r'|'.join([ re.escape(x) for x in words]) + suffix + r')\b'
+ else:
+ return r'(?<!' + boundary_regex_fragment + ')' + prefix + '(' + \
+ r'|'.join([ re.escape(x) for x in words]) + r')' + suffix + '(?!' + boundary_regex_fragment + ')'
+
+ def brackets_callback(token_class):
+ def callback(lexer, match, context):
+ groups = match.groupdict()
+ opening_chars = groups['delimiter']
+ n_chars = len(opening_chars)
+ adverbs = groups.get('adverbs')
+
+ closer = Perl6Lexer.PERL6_BRACKETS.get(opening_chars[0])
+ text = context.text
+
+ if closer is None: # it's not a mirrored character, which means we
+ # just need to look for the next occurrence
+
+ end_pos = text.find(opening_chars, match.start('delimiter') + n_chars)
+ else: # we need to look for the corresponding closing character,
+ # keep nesting in mind
+ closing_chars = closer * n_chars
+ nesting_level = 1
+
+ search_pos = match.start('delimiter')
+
+ while nesting_level > 0:
+ next_open_pos = text.find(opening_chars, search_pos + n_chars)
+ next_close_pos = text.find(closing_chars, search_pos + n_chars)
+
+ if next_close_pos == -1:
+ next_close_pos = len(text)
+ nesting_level = 0
+ elif next_open_pos != -1 and next_open_pos < next_close_pos:
+ nesting_level += 1
+ search_pos = next_open_pos
+ else: # next_close_pos < next_open_pos
+ nesting_level -= 1
+ search_pos = next_close_pos
+
+ end_pos = next_close_pos
+
+ if end_pos < 0: # if we didn't find a closer, just highlight the
+ # rest of the text in this class
+ end_pos = len(text)
+
+ if adverbs is not None and re.search(r':to\b', adverbs):
+ heredoc_terminator = text[match.start('delimiter') + n_chars : end_pos]
+ end_heredoc = re.search(r'^\s*' + re.escape(heredoc_terminator) + r'\s*$', text[ end_pos : ], re.MULTILINE)
+
+ if end_heredoc:
+ end_pos += end_heredoc.end()
+ else:
+ end_pos = len(text)
+
+ yield match.start(), token_class, text[match.start() : end_pos + n_chars]
+ context.pos = end_pos + n_chars
+
+ return callback
+
+ def opening_brace_callback(lexer, match, context):
+ stack = context.stack
+
+ yield match.start(), Text, context.text[match.start() : match.end()]
+ context.pos = match.end()
+
+ # if we encounter an opening brace and we're one level
+ # below a token state, it means we need to increment
+ # the nesting level for braces so we know later when
+ # we should return to the token rules.
+ if len(stack) > 2 and stack[-2] == 'token':
+ context.perl6_token_nesting_level += 1
+
+ def closing_brace_callback(lexer, match, context):
+ stack = context.stack
+
+ yield match.start(), Text, context.text[match.start() : match.end()]
+ context.pos = match.end()
+
+ # if we encounter a free closing brace and we're one level
+ # below a token state, it means we need to check the nesting
+ # level to see if we need to return to the token state.
+ if len(stack) > 2 and stack[-2] == 'token':
+ context.perl6_token_nesting_level -= 1
+ if context.perl6_token_nesting_level == 0:
+ stack.pop()
+
+ def embedded_perl6_callback(lexer, match, context):
+ context.perl6_token_nesting_level = 1
+ yield match.start(), Text, context.text[match.start() : match.end()]
+ context.pos = match.end()
+ context.stack.append('root')
+
+ # If you're modifying these rules, be careful if you need to process '{' or '}' characters.
+ # We have special logic for processing these characters (due to the fact that you can nest
+ # Perl 6 code in regex blocks), so if you need to process one of them, make sure you also
+ # process the corresponding one!
+ tokens = {
+ 'common' : [
+ (r'#[`|=](?P<delimiter>(?P<first_char>[' + ''.join(PERL6_BRACKETS) + r'])(?P=first_char)*)', brackets_callback(Comment.Multiline)),
+ (r'#[^\n]*$', Comment.Singleline),
+ (r'^(\s*)=begin\s+(\w+)\b.*?^\1=end\s+\2', Comment.Multiline),
+ (r'^(\s*)=for.*?\n\s*?\n', Comment.Multiline),
+ (r'^=.*?\n\s*?\n', Comment.Multiline),
+ (r'(regex|token|rule)(\s*' + PERL6_IDENTIFIER_RANGE + '+:sym)', bygroups(Keyword, Name), 'token-sym-brackets'),
+ (r'(regex|token|rule)(?!' + PERL6_IDENTIFIER_RANGE + ')(\s*' + PERL6_IDENTIFIER_RANGE + '+)?', bygroups(Keyword, Name), 'pre-token'),
+ # deal with a special case in the Perl 6 grammar (role q { ... })
+ (r'(role)(\s+)(q)(\s*)', bygroups(Keyword, Text, Name, Text)),
+ (_build_word_match(PERL6_KEYWORDS, PERL6_IDENTIFIER_RANGE), Keyword),
+ (_build_word_match(PERL6_BUILTIN_CLASSES, PERL6_IDENTIFIER_RANGE, suffix = '(?::[UD])?'), Name.Builtin),
+ (_build_word_match(PERL6_BUILTINS, PERL6_IDENTIFIER_RANGE), Name.Builtin),
+ # copied from PerlLexer
+ (r'[$@%&][.^:?=!~]?' + PERL6_IDENTIFIER_RANGE + u'+(?:<<.*?>>|<.*?>|«.*?»)*', Name.Variable),
+ (r'\$[!/](?:<<.*?>>|<.*?>|«.*?»)*', Name.Variable.Global),
+ (r'::\?\w+', Name.Variable.Global),
+ (r'[$@%&]\*' + PERL6_IDENTIFIER_RANGE + u'+(?:<<.*?>>|<.*?>|«.*?»)*', Name.Variable.Global),
+ (r'\$(?:<.*?>)+', Name.Variable),
+ (r'(?:q|qq|Q)[a-zA-Z]?\s*(?P<adverbs>:[\w\s:]+)?\s*(?P<delimiter>(?P<first_char>[^0-9a-zA-Z:\s])(?P=first_char)*)', brackets_callback(String)),
+ # copied from PerlLexer
+ (r'0_?[0-7]+(_[0-7]+)*', Number.Oct),
+ (r'0x[0-9A-Fa-f]+(_[0-9A-Fa-f]+)*', Number.Hex),
+ (r'0b[01]+(_[01]+)*', Number.Bin),
+ (r'(?i)(\d*(_\d*)*\.\d+(_\d*)*|\d+(_\d*)*\.\d+(_\d*)*)(e[+-]?\d+)?', Number.Float),
+ (r'(?i)\d+(_\d*)*e[+-]?\d+(_\d*)*', Number.Float),
+ (r'\d+(_\d+)*', Number.Integer),
+ (r'(?<=~~)\s*/(?:\\\\|\\/|.)*?/', String.Regex),
+ (r'(?<=[=(,])\s*/(?:\\\\|\\/|.)*?/', String.Regex),
+ (r'm\w+(?=\()', Name),
+ (r'(?:m|ms|rx)\s*(?P<adverbs>:[\w\s:]+)?\s*(?P<delimiter>(?P<first_char>[^0-9a-zA-Z_:\s])(?P=first_char)*)', brackets_callback(String.Regex)),
+ (r'(?:s|ss|tr)\s*(?::[\w\s:]+)?\s*/(?:\\\\|\\/|.)*?/(?:\\\\|\\/|.)*?/', String.Regex),
+ (r'<[^\s=].*?\S>', String),
+ (_build_word_match(PERL6_OPERATORS), Operator),
+ (r'[0-9a-zA-Z_]' + PERL6_IDENTIFIER_RANGE + '*', Name),
+ (r"'(\\\\|\\[^\\]|[^'\\])*'", String),
+ (r'"(\\\\|\\[^\\]|[^"\\])*"', String),
+ ],
+ 'root' : [
+ include('common'),
+ (r'\{', opening_brace_callback),
+ (r'\}', closing_brace_callback),
+ (r'.+?', Text),
+ ],
+ 'pre-token' : [
+ include('common'),
+ (r'\{', Text, ('#pop', 'token')),
+ (r'.+?', Text),
+ ],
+ 'token-sym-brackets' : [
+ (r'(?P<delimiter>(?P<first_char>[' + ''.join(PERL6_BRACKETS) + '])(?P=first_char)*)', brackets_callback(Name), ('#pop', 'pre-token')),
+ (r'', Name, ('#pop', 'pre-token')),
+ ],
+ 'token': [
+ (r'}', Text, '#pop'),
+ (r'(?<=:)(?:my|our|state|constant|temp|let).*?;', using(this)),
+ # make sure that quotes in character classes aren't treated as strings
+ (r'<(?:[-!?+.]\s*)?\[.*?\]>', String.Regex),
+ # make sure that '#' characters in quotes aren't treated as comments
+ (r"(?<!\\)'(\\\\|\\[^\\]|[^'\\])*'", String.Regex),
+ (r'(?<!\\)"(\\\\|\\[^\\]|[^"\\])*"', String.Regex),
+ (r'#.*?$', Comment.Singleline),
+ (r'\{', embedded_perl6_callback),
+ ('.+?', String.Regex),
+ ],
+ }
+
+ def analyse_text(text):
+ def strip_pod(lines):
+ in_pod = False
+ stripped_lines = []
+
+ for line in lines:
+ if re.match(r'^=(?:end|cut)', line):
+ in_pod = False
+ elif re.match(r'^=\w+', line):
+ in_pod = True
+ elif not in_pod:
+ stripped_lines.append(line)
+
+ return stripped_lines
+
+ # XXX handle block comments
+ lines = text.splitlines()
+ lines = strip_pod(lines)
+ text = '\n'.join(lines)
+
+ if shebang_matches(text, r'perl6|rakudo|niecza|pugs'):
+ return True
+
+ saw_perl_decl = False
+ rating = False
+
+ # check for my/our/has declarations
+ # copied PERL6_IDENTIFIER_RANGE from above; not happy about that
+ if re.search("(?:my|our|has)\s+(?:['a-zA-Z0-9_:-]+\s+)?[$@%&(]", text):
+ rating = 0.8
+ saw_perl_decl = True
+
+ for line in lines:
+ line = re.sub('#.*', '', line)
+ if re.match('^\s*$', line):
+ continue
+
+ # match v6; use v6; use v6.0; use v6.0.0;
+ if re.match('^\s*(?:use\s+)?v6(?:\.\d(?:\.\d)?)?;', line):
+ return True
+ # match class, module, role, enum, grammar declarations
+ class_decl = re.match('^\s*(?:(?P<scope>my|our)\s+)?(?:module|class|role|enum|grammar)', line)
+ if class_decl:
+ if saw_perl_decl or class_decl.group('scope') is not None:
+ return True
+ rating = 0.05
+ continue
+ break
+
+ return rating
+
+ def __init__(self, **options):
+ super(Perl6Lexer, self).__init__(**options)
+ self.encoding = options.get('encoding', 'utf-8')
+
+
+class HyLexer(RegexLexer):
+ """
+ Lexer for `Hy <http://hylang.org/>`_ source code.
+
+ .. versionadded:: 2.0
+ """
+ name = 'Hy'
+ aliases = ['hylang']
+ filenames = ['*.hy']
+ mimetypes = ['text/x-hy', 'application/x-hy']
+
+ special_forms = [
+ 'cond', 'for', '->', '->>', 'car',
+ 'cdr', 'first', 'rest', 'let', 'when', 'unless',
+ 'import', 'do', 'progn', 'get', 'slice', 'assoc', 'with-decorator',
+ ',', 'list_comp', 'kwapply', '~', 'is', 'in', 'is-not', 'not-in',
+ 'quasiquote', 'unquote', 'unquote-splice', 'quote', '|', '<<=', '>>=',
+ 'foreach', 'while',
+ 'eval-and-compile', 'eval-when-compile'
+ ]
+
+ declarations = [
+ 'def' 'defn', 'defun', 'defmacro', 'defclass', 'lambda', 'fn', 'setv'
+ ]
+
+ hy_builtins = []
+
+ hy_core = [
+ 'cycle', 'dec', 'distinct', 'drop', 'even?', 'filter', 'inc',
+ 'instance?', 'iterable?', 'iterate', 'iterator?', 'neg?',
+ 'none?', 'nth', 'numeric?', 'odd?', 'pos?', 'remove', 'repeat',
+ 'repeatedly', 'take', 'take_nth', 'take_while', 'zero?'
+ ]
+
+ builtins = hy_builtins + hy_core
+
+ # valid names for identifiers
+ # well, names can only not consist fully of numbers
+ # but this should be good enough for now
+ valid_name = r'(?!#)[\w!$%*+<=>?/.#-]+'
+
+ def _multi_escape(entries):
+ return '(%s)' % ('|'.join(re.escape(entry) + ' ' for entry in entries))
+
+ tokens = {
+ 'root': [
+ # the comments - always starting with semicolon
+ # and going to the end of the line
+ (r';.*$', Comment.Single),
+
+ # whitespaces - usually not relevant
+ (r'[,\s]+', Text),
+
+ # numbers
+ (r'-?\d+\.\d+', Number.Float),
+ (r'-?\d+', Number.Integer),
+ (r'0[0-7]+j?', Number.Oct),
+ (r'0[xX][a-fA-F0-9]+', Number.Hex),
+
+ # strings, symbols and characters
+ (r'"(\\\\|\\"|[^"])*"', String),
+ (r"'" + valid_name, String.Symbol),
+ (r"\\(.|[a-z]+)", String.Char),
+ (r'^(\s*)([rRuU]{,2}"""(?:.|\n)*?""")', bygroups(Text, String.Doc)),
+ (r"^(\s*)([rRuU]{,2}'''(?:.|\n)*?''')", bygroups(Text, String.Doc)),
+
+ # keywords
+ (r'::?' + valid_name, String.Symbol),
+
+ # special operators
+ (r'~@|[`\'#^~&@]', Operator),
+
+ include('py-keywords'),
+ include('py-builtins'),
+
+ # highlight the special forms
+ (_multi_escape(special_forms), Keyword),
+
+ # Technically, only the special forms are 'keywords'. The problem
+ # is that only treating them as keywords means that things like
+ # 'defn' and 'ns' need to be highlighted as builtins. This is ugly
+ # and weird for most styles. So, as a compromise we're going to
+ # highlight them as Keyword.Declarations.
+ (_multi_escape(declarations), Keyword.Declaration),
+
+ # highlight the builtins
+ (_multi_escape(builtins), Name.Builtin),
+
+ # the remaining functions
+ (r'(?<=\()' + valid_name, Name.Function),
+
+ # find the remaining variables
+ (valid_name, Name.Variable),
+
+ # Hy accepts vector notation
+ (r'(\[|\])', Punctuation),
+
+ # Hy accepts map notation
+ (r'(\{|\})', Punctuation),
+
+ # the famous parentheses!
+ (r'(\(|\))', Punctuation),
+
+ ],
+ 'py-keywords': PythonLexer.tokens['keywords'],
+ 'py-builtins': PythonLexer.tokens['builtins'],
+ }
+
+ def analyse_text(text):
+ if '(import ' in text or '(defn ' in text:
+ return 0.9
diff --git a/pygments/lexers/asm.py b/pygments/lexers/asm.py
index f080327b..2727a55d 100644
--- a/pygments/lexers/asm.py
+++ b/pygments/lexers/asm.py
@@ -5,7 +5,7 @@
Lexers for assembly languages.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
@@ -17,7 +17,7 @@ from pygments.token import Text, Name, Number, String, Comment, Punctuation, \
Other, Keyword, Operator
__all__ = ['GasLexer', 'ObjdumpLexer','DObjdumpLexer', 'CppObjdumpLexer',
- 'CObjdumpLexer', 'LlvmLexer', 'NasmLexer', 'Ca65Lexer']
+ 'CObjdumpLexer', 'LlvmLexer', 'NasmLexer', 'NasmObjdumpLexer', 'Ca65Lexer']
class GasLexer(RegexLexer):
@@ -25,7 +25,7 @@ class GasLexer(RegexLexer):
For Gas (AT&T) assembly code.
"""
name = 'GAS'
- aliases = ['gas']
+ aliases = ['gas', 'asm']
filenames = ['*.s', '*.S']
mimetypes = ['text/x-gas']
@@ -96,6 +96,55 @@ class GasLexer(RegexLexer):
return 0.1
+def _objdump_lexer_tokens(asm_lexer):
+ """
+ Common objdump lexer tokens to wrap an ASM lexer.
+ """
+ hex_re = r'[0-9A-Za-z]'
+ return {
+ 'root': [
+ # File name & format:
+ ('(.*?)(:)( +file format )(.*?)$',
+ bygroups(Name.Label, Punctuation, Text, String)),
+ # Section header
+ ('(Disassembly of section )(.*?)(:)$',
+ bygroups(Text, Name.Label, Punctuation)),
+ # Function labels
+ # (With offset)
+ ('('+hex_re+'+)( )(<)(.*?)([-+])(0[xX][A-Za-z0-9]+)(>:)$',
+ bygroups(Number.Hex, Text, Punctuation, Name.Function,
+ Punctuation, Number.Hex, Punctuation)),
+ # (Without offset)
+ ('('+hex_re+'+)( )(<)(.*?)(>:)$',
+ bygroups(Number.Hex, Text, Punctuation, Name.Function,
+ Punctuation)),
+ # Code line with disassembled instructions
+ ('( *)('+hex_re+r'+:)(\t)((?:'+hex_re+hex_re+' )+)( *\t)([a-zA-Z].*?)$',
+ bygroups(Text, Name.Label, Text, Number.Hex, Text,
+ using(asm_lexer))),
+ # Code line with ascii
+ ('( *)('+hex_re+r'+:)(\t)((?:'+hex_re+hex_re+' )+)( *)(.*?)$',
+ bygroups(Text, Name.Label, Text, Number.Hex, Text, String)),
+ # Continued code line, only raw opcodes without disassembled
+ # instruction
+ ('( *)('+hex_re+r'+:)(\t)((?:'+hex_re+hex_re+' )+)$',
+ bygroups(Text, Name.Label, Text, Number.Hex)),
+ # Skipped a few bytes
+ (r'\t\.\.\.$', Text),
+ # Relocation line
+ # (With offset)
+ (r'(\t\t\t)('+hex_re+r'+:)( )([^\t]+)(\t)(.*?)([-+])(0x'+hex_re+'+)$',
+ bygroups(Text, Name.Label, Text, Name.Property, Text,
+ Name.Constant, Punctuation, Number.Hex)),
+ # (Without offset)
+ (r'(\t\t\t)('+hex_re+r'+:)( )([^\t]+)(\t)(.*?)$',
+ bygroups(Text, Name.Label, Text, Name.Property, Text,
+ Name.Constant)),
+ (r'[^\n]+\n', Other)
+ ]
+ }
+
+
class ObjdumpLexer(RegexLexer):
"""
For the output of 'objdump -dr'
@@ -105,50 +154,9 @@ class ObjdumpLexer(RegexLexer):
filenames = ['*.objdump']
mimetypes = ['text/x-objdump']
- hex = r'[0-9A-Za-z]'
- tokens = {
- 'root': [
- # File name & format:
- ('(.*?)(:)( +file format )(.*?)$',
- bygroups(Name.Label, Punctuation, Text, String)),
- # Section header
- ('(Disassembly of section )(.*?)(:)$',
- bygroups(Text, Name.Label, Punctuation)),
- # Function labels
- # (With offset)
- ('('+hex+'+)( )(<)(.*?)([-+])(0[xX][A-Za-z0-9]+)(>:)$',
- bygroups(Number.Hex, Text, Punctuation, Name.Function,
- Punctuation, Number.Hex, Punctuation)),
- # (Without offset)
- ('('+hex+'+)( )(<)(.*?)(>:)$',
- bygroups(Number.Hex, Text, Punctuation, Name.Function,
- Punctuation)),
- # Code line with disassembled instructions
- ('( *)('+hex+r'+:)(\t)((?:'+hex+hex+' )+)( *\t)([a-zA-Z].*?)$',
- bygroups(Text, Name.Label, Text, Number.Hex, Text,
- using(GasLexer))),
- # Code line with ascii
- ('( *)('+hex+r'+:)(\t)((?:'+hex+hex+' )+)( *)(.*?)$',
- bygroups(Text, Name.Label, Text, Number.Hex, Text, String)),
- # Continued code line, only raw opcodes without disassembled
- # instruction
- ('( *)('+hex+r'+:)(\t)((?:'+hex+hex+' )+)$',
- bygroups(Text, Name.Label, Text, Number.Hex)),
- # Skipped a few bytes
- (r'\t\.\.\.$', Text),
- # Relocation line
- # (With offset)
- (r'(\t\t\t)('+hex+r'+:)( )([^\t]+)(\t)(.*?)([-+])(0x' + hex + '+)$',
- bygroups(Text, Name.Label, Text, Name.Property, Text,
- Name.Constant, Punctuation, Number.Hex)),
- # (Without offset)
- (r'(\t\t\t)('+hex+r'+:)( )([^\t]+)(\t)(.*?)$',
- bygroups(Text, Name.Label, Text, Name.Property, Text,
- Name.Constant)),
- (r'[^\n]+\n', Other)
- ]
- }
+ tokens = _objdump_lexer_tokens(GasLexer)
+
class DObjdumpLexer(DelegatingLexer):
@@ -216,6 +224,7 @@ class LlvmLexer(RegexLexer):
(r'@' + identifier, Name.Variable.Global),#Name.Identifier.Global),
(r'%\d+', Name.Variable.Anonymous),#Name.Identifier.Anonymous),
(r'@\d+', Name.Variable.Global),#Name.Identifier.Anonymous),
+ (r'#\d+', Name.Variable.Global),#Name.Identifier.Global),
(r'!' + identifier, Name.Variable),
(r'!\d+', Name.Variable.Anonymous),
(r'c?' + string, String),
@@ -242,17 +251,24 @@ class LlvmLexer(RegexLexer):
r'|thread_local|zeroinitializer|undef|null|to|tail|target|triple'
r'|datalayout|volatile|nuw|nsw|nnan|ninf|nsz|arcp|fast|exact|inbounds'
r'|align|addrspace|section|alias|module|asm|sideeffect|gc|dbg'
+ r'|linker_private_weak'
+ r'|attributes|blockaddress|initialexec|localdynamic|localexec'
+ r'|prefix|unnamed_addr'
r'|ccc|fastcc|coldcc|x86_stdcallcc|x86_fastcallcc|arm_apcscc'
r'|arm_aapcscc|arm_aapcs_vfpcc|ptx_device|ptx_kernel'
+ r'|intel_ocl_bicc|msp430_intrcc|spir_func|spir_kernel'
+ r'|x86_64_sysvcc|x86_64_win64cc|x86_thiscallcc'
r'|cc|c'
r'|signext|zeroext|inreg|sret|nounwind|noreturn|noalias|nocapture'
r'|byval|nest|readnone|readonly'
-
r'|inlinehint|noinline|alwaysinline|optsize|ssp|sspreq|noredzone'
r'|noimplicitfloat|naked'
+ r'|builtin|cold|nobuiltin|noduplicate|nonlazybind|optnone'
+ r'|returns_twice|sanitize_address|sanitize_memory|sanitize_thread'
+ r'|sspstrong|uwtable|returned'
r'|type|opaque'
@@ -261,24 +277,30 @@ class LlvmLexer(RegexLexer):
r'|oeq|one|olt|ogt|ole'
r'|oge|ord|uno|ueq|une'
r'|x'
+ r'|acq_rel|acquire|alignstack|atomic|catch|cleanup|filter'
+ r'|inteldialect|max|min|monotonic|nand|personality|release'
+ r'|seq_cst|singlethread|umax|umin|unordered|xchg'
# instructions
r'|add|fadd|sub|fsub|mul|fmul|udiv|sdiv|fdiv|urem|srem|frem|shl'
r'|lshr|ashr|and|or|xor|icmp|fcmp'
r'|phi|call|trunc|zext|sext|fptrunc|fpext|uitofp|sitofp|fptoui'
- r'fptosi|inttoptr|ptrtoint|bitcast|select|va_arg|ret|br|switch'
+ r'|fptosi|inttoptr|ptrtoint|bitcast|select|va_arg|ret|br|switch'
r'|invoke|unwind|unreachable'
+ r'|indirectbr|landingpad|resume'
r'|malloc|alloca|free|load|store|getelementptr'
r'|extractelement|insertelement|shufflevector|getresult'
r'|extractvalue|insertvalue'
+ r'|atomicrmw|cmpxchg|fence'
+
r')\b', Keyword),
# Types
- (r'void|float|double|x86_fp80|fp128|ppc_fp128|label|metadata',
+ (r'void|half|float|double|x86_fp80|fp128|ppc_fp128|label|metadata',
Keyword.Type),
# Integer types
@@ -360,11 +382,25 @@ class NasmLexer(RegexLexer):
}
+class NasmObjdumpLexer(ObjdumpLexer):
+ """
+ For the output of 'objdump -d -M intel'.
+
+ .. versionadded:: 2.0
+ """
+ name = 'objdump-nasm'
+ aliases = ['objdump-nasm']
+ filenames = ['*.objdump-intel']
+ mimetypes = ['text/x-nasm-objdump']
+
+ tokens = _objdump_lexer_tokens(NasmLexer)
+
+
class Ca65Lexer(RegexLexer):
"""
For ca65 assembler sources.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'ca65'
aliases = ['ca65']
diff --git a/pygments/lexers/compiled.py b/pygments/lexers/compiled.py
index d44ab6f6..2737859b 100644
--- a/pygments/lexers/compiled.py
+++ b/pygments/lexers/compiled.py
@@ -5,7 +5,7 @@
Lexers for compiled languages.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
@@ -23,13 +23,16 @@ from pygments.scanner import Scanner
from pygments.lexers.functional import OcamlLexer
from pygments.lexers.jvm import JavaLexer, ScalaLexer
-__all__ = ['CLexer', 'CppLexer', 'DLexer', 'DelphiLexer', 'ECLexer', 'DylanLexer',
- 'ObjectiveCLexer', 'ObjectiveCppLexer', 'FortranLexer', 'GLShaderLexer',
- 'PrologLexer', 'CythonLexer', 'ValaLexer', 'OocLexer', 'GoLexer',
- 'FelixLexer', 'AdaLexer', 'Modula2Lexer', 'BlitzMaxLexer',
- 'NimrodLexer', 'FantomLexer', 'RustLexer', 'CudaLexer', 'MonkeyLexer',
+__all__ = ['CLexer', 'CppLexer', 'DLexer', 'DelphiLexer', 'ECLexer',
+ 'NesCLexer', 'DylanLexer', 'ObjectiveCLexer', 'ObjectiveCppLexer',
+ 'FortranLexer', 'GLShaderLexer', 'PrologLexer', 'CythonLexer',
+ 'ValaLexer', 'OocLexer', 'GoLexer', 'FelixLexer', 'AdaLexer',
+ 'Modula2Lexer', 'BlitzMaxLexer', 'BlitzBasicLexer', 'NimrodLexer',
+ 'FantomLexer', 'RustLexer', 'CudaLexer', 'MonkeyLexer', 'SwigLexer',
'DylanLidLexer', 'DylanConsoleLexer', 'CobolLexer',
- 'CobolFreeformatLexer', 'LogosLexer', 'ClayLexer']
+ 'CobolFreeformatLexer', 'LogosLexer', 'ClayLexer', 'PikeLexer',
+ 'ChapelLexer', 'EiffelLexer', 'Inform6Lexer', 'Inform7Lexer',
+ 'Inform6TemplateLexer', 'MqlLexer']
class CFamilyLexer(RegexLexer):
@@ -41,7 +44,7 @@ class CFamilyLexer(RegexLexer):
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
#: only one /* */ style comment
- _ws1 = r':\s*/[*].*?[*]/\s*'
+ _ws1 = r'\s*(?:/[*].*?[*]/\s*)*'
tokens = {
'whitespace': [
@@ -72,7 +75,6 @@ class CFamilyLexer(RegexLexer):
(r'\*/', Error),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.]', Punctuation),
- (r'\b(case)(.+?)(:)', bygroups(Keyword, using(this), Text)),
(r'(auto|break|case|const|continue|default|do|else|enum|extern|'
r'for|goto|if|register|restricted|return|sizeof|static|struct|'
r'switch|typedef|union|volatile|while)\b', Keyword),
@@ -187,7 +189,8 @@ class CLexer(CFamilyLexer):
priority = 0.1
def analyse_text(text):
- return 0.1
+ if re.search('#include [<"]', text):
+ return 0.1
class CppLexer(CFamilyLexer):
@@ -228,14 +231,113 @@ class CppLexer(CFamilyLexer):
}
def analyse_text(text):
- return 0.1
+ if re.search('#include <[a-z]+>', text):
+ return 0.2
+ if re.search('using namespace ', text):
+ return 0.4
+
+
+class PikeLexer(CppLexer):
+ """
+ For `Pike <http://pike.lysator.liu.se/>`_ source code.
+
+ .. versionadded:: 2.0
+ """
+ name = 'Pike'
+ aliases = ['pike']
+ filenames = ['*.pike', '*.pmod']
+ mimetypes = ['text/x-pike']
+
+ tokens = {
+ 'statements': [
+ (r'(catch|new|private|protected|public|gauge|'
+ r'throw|throws|class|interface|implement|abstract|extends|from|'
+ r'this|super|new|constant|final|static|import|use|extern|'
+ r'inline|proto|break|continue|if|else|for|'
+ r'while|do|switch|case|as|in|version|return|true|false|null|'
+ r'__VERSION__|__MAJOR__|__MINOR__|__BUILD__|__REAL_VERSION__|'
+ r'__REAL_MAJOR__|__REAL_MINOR__|__REAL_BUILD__|__DATE__|__TIME__|'
+ r'__FILE__|__DIR__|__LINE__|__AUTO_BIGNUM__|__NT__|__PIKE__|'
+ r'__amigaos__|_Pragma|static_assert|defined|sscanf)\b',
+ Keyword),
+ (r'(bool|int|long|float|short|double|char|string|object|void|mapping|'
+ r'array|multiset|program|function|lambda|mixed|'
+ r'[a-z_][a-z0-9_]*_t)\b',
+ Keyword.Type),
+ (r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
+ (r'[~!%^&*+=|?:<>/-@]', Operator),
+ inherit,
+ ],
+ 'classname': [
+ (r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop'),
+ # template specification
+ (r'\s*(?=>)', Text, '#pop'),
+ ],
+ }
+
+
+class SwigLexer(CppLexer):
+ """
+ For `SWIG <http://www.swig.org/>`_ source code.
+
+ .. versionadded:: 2.0
+ """
+ name = 'SWIG'
+ aliases = ['swig']
+ filenames = ['*.swg', '*.i']
+ mimetypes = ['text/swig']
+ priority = 0.04 # Lower than C/C++ and Objective C/C++
+
+ tokens = {
+ 'statements': [
+ (r'(%[a-z_][a-z0-9_]*)', Name.Function), # SWIG directives
+ ('\$\**\&?[a-zA-Z0-9_]+', Name), # Special variables
+ (r'##*[a-zA-Z_][a-zA-Z0-9_]*', Comment.Preproc), # Stringification / additional preprocessor directives
+ inherit,
+ ],
+ }
+
+ # This is a far from complete set of SWIG directives
+ swig_directives = (
+ # Most common directives
+ '%apply', '%define', '%director', '%enddef', '%exception', '%extend',
+ '%feature', '%fragment', '%ignore', '%immutable', '%import', '%include',
+ '%inline', '%insert', '%module', '%newobject', '%nspace', '%pragma',
+ '%rename', '%shared_ptr', '%template', '%typecheck', '%typemap',
+ # Less common directives
+ '%arg', '%attribute', '%bang', '%begin', '%callback', '%catches', '%clear',
+ '%constant', '%copyctor', '%csconst', '%csconstvalue', '%csenum',
+ '%csmethodmodifiers', '%csnothrowexception', '%default', '%defaultctor',
+ '%defaultdtor', '%defined', '%delete', '%delobject', '%descriptor',
+ '%exceptionclass', '%exceptionvar', '%extend_smart_pointer', '%fragments',
+ '%header', '%ifcplusplus', '%ignorewarn', '%implicit', '%implicitconv',
+ '%init', '%javaconst', '%javaconstvalue', '%javaenum', '%javaexception',
+ '%javamethodmodifiers', '%kwargs', '%luacode', '%mutable', '%naturalvar',
+ '%nestedworkaround', '%perlcode', '%pythonabc', '%pythonappend',
+ '%pythoncallback', '%pythoncode', '%pythondynamic', '%pythonmaybecall',
+ '%pythonnondynamic', '%pythonprepend', '%refobject', '%shadow', '%sizeof',
+ '%trackobjects', '%types', '%unrefobject', '%varargs', '%warn', '%warnfilter')
+
+ def analyse_text(text):
+ rv = 0
+ # Search for SWIG directives, which are conventionally at the beginning of
+ # a line. The probability of them being within a line is low, so let another
+ # lexer win in this case.
+ matches = re.findall(r'^\s*(%[a-z_][a-z0-9_]*)', text, re.M)
+ for m in matches:
+ if m in SwigLexer.swig_directives:
+ rv = 0.98
+ break
+ else:
+ rv = 0.91 # Fraction higher than MatlabLexer
+ return rv
class ECLexer(CLexer):
"""
For eC source code with preprocessor directives.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'eC'
aliases = ['ec']
@@ -266,11 +368,37 @@ class ECLexer(CLexer):
}
+class NesCLexer(CLexer):
+ """
+ For `nesC <https://github.com/tinyos/nesc>`_ source code with preprocessor
+ directives.
+
+ .. versionadded:: 2.0
+ """
+ name = 'nesC'
+ aliases = ['nesc']
+ filenames = ['*.nc']
+ mimetypes = ['text/x-nescsrc']
+
+ tokens = {
+ 'statements': [
+ (r'(abstract|as|async|atomic|call|command|component|components|'
+ r'configuration|event|extends|generic|implementation|includes|'
+ r'interface|module|new|norace|post|provides|signal|task|uses)\b',
+ Keyword),
+ (r'(nx_struct|nx_union|nx_int8_t|nx_int16_t|nx_int32_t|nx_int64_t|'
+ r'nx_uint8_t|nx_uint16_t|nx_uint32_t|nx_uint64_t)\b',
+ Keyword.Type),
+ inherit,
+ ],
+ }
+
+
class ClayLexer(RegexLexer):
"""
For `Clay <http://claylabs.com/clay/>`_ source.
- *New in Pygments 1.7.*
+ .. versionadded:: 2.0
"""
name = 'Clay'
filenames = ['*.clay']
@@ -321,7 +449,7 @@ class DLexer(RegexLexer):
"""
For D source.
- *New in Pygments 1.2.*
+ .. versionadded:: 1.2
"""
name = 'D'
filenames = ['*.d', '*.di']
@@ -751,7 +879,7 @@ class DelphiLexer(Lexer):
if get_bool_opt(options, 'freepascal', True):
self.keywords.update(self.FREE_PASCAL_KEYWORDS)
self.builtins = set()
- for unit in get_list_opt(options, 'units', self.BUILTIN_UNITS.keys()):
+ for unit in get_list_opt(options, 'units', list(self.BUILTIN_UNITS)):
self.builtins.update(self.BUILTIN_UNITS[unit])
def get_tokens_unprocessed(self, text):
@@ -955,7 +1083,7 @@ class DylanLexer(RegexLexer):
"""
For the `Dylan <http://www.opendylan.org/>`_ language.
- *New in Pygments 0.7.*
+ .. versionadded:: 0.7
"""
name = 'Dylan'
@@ -1147,7 +1275,7 @@ class DylanLidLexer(RegexLexer):
"""
For Dylan LID (Library Interchange Definition) files.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'DylanLID'
@@ -1185,7 +1313,7 @@ class DylanConsoleLexer(Lexer):
This is based on a copy of the RubyConsoleLexer.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'Dylan session'
aliases = ['dylan-console', 'dylan-repl']
@@ -1249,24 +1377,29 @@ def objective(baselexer):
tokens = {
'statements': [
(r'@"', String, 'string'),
- (r"@'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'",
- String.Char),
+ (r"@'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'@(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'@(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'@0x[0-9a-fA-F]+[Ll]?', Number.Hex),
(r'@0[0-7]+[Ll]?', Number.Oct),
(r'@\d+[Ll]?', Number.Integer),
- (r'(in|@selector|@private|@protected|@public|@encode|'
+ (r'@\([^()]+\)', Number),
+ (r'(@selector|@private|@protected|@public|@encode|'
r'@synchronized|@try|@throw|@catch|@finally|@end|@property|'
- r'@synthesize|@dynamic|@optional)\b', Keyword),
- (r'(id|Class|IMP|SEL|BOOL|IBOutlet|IBAction|unichar)\b',
+ r'__bridge|__bridge_transfer|__autoreleasing|__block|__weak|__strong|'
+ r'weak|strong|retain|assign|unsafe_unretained|nonatomic|'
+ r'readonly|readwrite|setter|getter|typeof|in|out|inout|'
+ r'@synthesize|@dynamic|@optional|@required|@autoreleasepool)\b', Keyword),
+ (r'(id|instancetype|Class|IMP|SEL|BOOL|IBOutlet|IBAction|unichar)\b',
Keyword.Type),
(r'@(true|false|YES|NO)\n', Name.Builtin),
- (r'(YES|NO|nil)\b', Name.Builtin),
+ (r'(YES|NO|nil|self|super)\b', Name.Builtin),
(r'(@interface|@implementation)(\s+)', bygroups(Keyword, Text),
('#pop', 'oc_classname')),
(r'(@class|@protocol)(\s+)', bygroups(Keyword, Text),
('#pop', 'oc_forward_classname')),
+ # @ can also prefix other expressions like @{...} or @(...)
+ (r'@', Punctuation),
inherit,
],
'oc_classname' : [
@@ -1290,7 +1423,7 @@ def objective(baselexer):
(r'^([-+])(\s*)' # method marker
r'(\(.*?\))?(\s*)' # return type
r'([a-zA-Z$_][a-zA-Z0-9$_]*:?)', # begin of method name
- bygroups(Keyword, Text, using(this),
+ bygroups(Punctuation, Text, using(this),
Text, Name.Function),
'method'),
inherit,
@@ -1301,8 +1434,8 @@ def objective(baselexer):
# discussion in Issue 789
(r',', Punctuation),
(r'\.\.\.', Punctuation),
- (r'(\(.*?\))([a-zA-Z$_][a-zA-Z0-9$_]*)', bygroups(using(this),
- Name.Variable)),
+ (r'(\(.*?\))(\s*)([a-zA-Z$_][a-zA-Z0-9$_]*)',
+ bygroups(using(this), Text, Name.Variable)),
(r'[a-zA-Z$_][a-zA-Z0-9$_]*:', Name.Function),
(';', Punctuation, '#pop'),
('{', Punctuation, 'function'),
@@ -1315,10 +1448,25 @@ def objective(baselexer):
return 1.0
elif '@"' in text: # strings
return 0.8
+ elif re.search('@[0-9]+', text):
+ return 0.7
elif _oc_message.search(text):
return 0.8
return 0
+ def get_tokens_unprocessed(self, text):
+ from pygments.lexers._cocoabuiltins import COCOA_INTERFACES, \
+ COCOA_PROTOCOLS, COCOA_PRIMITIVES
+
+ for index, token, value in \
+ baselexer.get_tokens_unprocessed(self, text):
+ if token is Name:
+ if value in COCOA_INTERFACES or value in COCOA_PROTOCOLS \
+ or value in COCOA_PRIMITIVES:
+ token = Name.Builtin.Pseudo
+
+ yield index, token, value
+
return GeneratedObjectiveCVariant
@@ -1350,7 +1498,7 @@ class FortranLexer(RegexLexer):
"""
Lexer for FORTRAN 90 code.
- *New in Pygments 0.10.*
+ .. versionadded:: 0.10
"""
name = 'Fortran'
aliases = ['fortran']
@@ -1405,7 +1553,7 @@ class FortranLexer(RegexLexer):
(r'(::)', Keyword.Declaration),
- (r'[(),:&%;]', Punctuation),
+ (r'[()\[\],:&%;]', Punctuation),
# Intrinsics
(r'\b(Abort|Abs|Access|AChar|ACos|AdjustL|AdjustR|AImag|AInt|Alarm|'
@@ -1465,7 +1613,7 @@ class GLShaderLexer(RegexLexer):
"""
GLSL (OpenGL Shader) lexer.
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'GLSL'
aliases = ['glsl']
@@ -1522,11 +1670,19 @@ class PrologLexer(RegexLexer):
(r'^#.*', Comment.Single),
(r'/\*', Comment.Multiline, 'nested-comment'),
(r'%.*', Comment.Single),
- (r'[0-9]+', Number),
+ # character literal
+ (r'0\'.', String.Char),
+ (r'0b[01]+', Number.Bin),
+ (r'0o[0-7]+', Number.Oct),
+ (r'0x[0-9a-fA-F]+', Number.Hex),
+ # literal with prepended base
+ (r'\d\d?\'[a-zA-Z0-9]+', Number.Integer),
+ (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
+ (r'\d+', Number.Integer),
(r'[\[\](){}|.,;!]', Punctuation),
(r':-|-->', Punctuation),
(r'"(?:\\x[0-9a-fA-F]+\\|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|'
- r'\\[0-7]+\\|\\[\w\W]|[^"])*"', String.Double),
+ r'\\[0-7]+\\|\\["\nabcefnrstv]|[^\\"])*"', String.Double),
(r"'(?:''|[^'])*'", String.Atom), # quoted atom
# Needs to not be followed by an atom.
#(r'=(?=\s|[a-zA-Z\[])', Operator),
@@ -1569,11 +1725,11 @@ class CythonLexer(RegexLexer):
"""
For Pyrex and `Cython <http://cython.org>`_ source code.
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'Cython'
- aliases = ['cython', 'pyx']
+ aliases = ['cython', 'pyx', 'pyrex']
filenames = ['*.pyx', '*.pxd', '*.pxi']
mimetypes = ['text/x-cython', 'application/x-cython']
@@ -1735,7 +1891,7 @@ class ValaLexer(RegexLexer):
"""
For Vala source code with preprocessor directives.
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'Vala'
aliases = ['vala', 'vapi']
@@ -1824,7 +1980,7 @@ class OocLexer(RegexLexer):
"""
For `Ooc <http://ooc-lang.org/>`_ source code
- *New in Pygments 1.2.*
+ .. versionadded:: 1.2
"""
name = 'Ooc'
aliases = ['ooc']
@@ -1963,7 +2119,7 @@ class FelixLexer(RegexLexer):
"""
For `Felix <http://www.felix-lang.org>`_ source code.
- *New in Pygments 1.2.*
+ .. versionadded:: 1.2
"""
name = 'Felix'
@@ -2217,7 +2373,7 @@ class AdaLexer(RegexLexer):
"""
For Ada source code.
- *New in Pygments 1.3.*
+ .. versionadded:: 1.3
"""
name = 'Ada'
@@ -2360,7 +2516,7 @@ class Modula2Lexer(RegexLexer):
`gm2ext`
Also highlight GNU extensions (default: False).
- *New in Pygments 1.3.*
+ .. versionadded:: 1.3
"""
name = 'Modula-2'
aliases = ['modula2', 'm2']
@@ -2542,7 +2698,7 @@ class BlitzMaxLexer(RegexLexer):
"""
For `BlitzMax <http://blitzbasic.com>`_ source code.
- *New in Pygments 1.4.*
+ .. versionadded:: 1.4
"""
name = 'BlitzMax'
@@ -2632,11 +2788,93 @@ class BlitzMaxLexer(RegexLexer):
}
+class BlitzBasicLexer(RegexLexer):
+ """
+ For `BlitzBasic <http://blitzbasic.com>`_ source code.
+
+ .. versionadded:: 2.0
+ """
+
+ name = 'BlitzBasic'
+ aliases = ['blitzbasic', 'b3d', 'bplus']
+ filenames = ['*.bb', '*.decls']
+ mimetypes = ['text/x-bb']
+
+ bb_vopwords = (r'\b(Shl|Shr|Sar|Mod|Or|And|Not|'
+ r'Abs|Sgn|Handle|Int|Float|Str|'
+ r'First|Last|Before|After)\b')
+ bb_sktypes = r'@{1,2}|[#$%]'
+ bb_name = r'[a-z][a-z0-9_]*'
+ bb_var = (r'(%s)(?:([ \t]*)(%s)|([ \t]*)([.])([ \t]*)(?:(%s)))?') % \
+ (bb_name, bb_sktypes, bb_name)
+
+ flags = re.MULTILINE | re.IGNORECASE
+ tokens = {
+ 'root': [
+ # Text
+ (r'[ \t]+', Text),
+ # Comments
+ (r";.*?\n", Comment.Single),
+ # Data types
+ ('"', String.Double, 'string'),
+ # Numbers
+ (r'[0-9]+\.[0-9]*(?!\.)', Number.Float),
+ (r'\.[0-9]+(?!\.)', Number.Float),
+ (r'[0-9]+', Number.Integer),
+ (r'\$[0-9a-f]+', Number.Hex),
+ (r'\%[10]+', Number), # Binary
+ # Other
+ (r'(?:%s|([+\-*/~=<>^]))' % (bb_vopwords), Operator),
+ (r'[(),:\[\]\\]', Punctuation),
+ (r'\.([ \t]*)(%s)' % bb_name, Name.Label),
+ # Identifiers
+ (r'\b(New)\b([ \t]+)(%s)' % (bb_name),
+ bygroups(Keyword.Reserved, Text, Name.Class)),
+ (r'\b(Gosub|Goto)\b([ \t]+)(%s)' % (bb_name),
+ bygroups(Keyword.Reserved, Text, Name.Label)),
+ (r'\b(Object)\b([ \t]*)([.])([ \t]*)(%s)\b' % (bb_name),
+ bygroups(Operator, Text, Punctuation, Text, Name.Class)),
+ (r'\b%s\b([ \t]*)(\()' % bb_var,
+ bygroups(Name.Function, Text, Keyword.Type,Text, Punctuation,
+ Text, Name.Class, Text, Punctuation)),
+ (r'\b(Function)\b([ \t]+)%s' % bb_var,
+ bygroups(Keyword.Reserved, Text, Name.Function, Text, Keyword.Type,
+ Text, Punctuation, Text, Name.Class)),
+ (r'\b(Type)([ \t]+)(%s)' % (bb_name),
+ bygroups(Keyword.Reserved, Text, Name.Class)),
+ # Keywords
+ (r'\b(Pi|True|False|Null)\b', Keyword.Constant),
+ (r'\b(Local|Global|Const|Field|Dim)\b', Keyword.Declaration),
+ (r'\b(End|Return|Exit|'
+ r'Chr|Len|Asc|'
+ r'New|Delete|Insert|'
+ r'Include|'
+ r'Function|'
+ r'Type|'
+ r'If|Then|Else|ElseIf|EndIf|'
+ r'For|To|Next|Step|Each|'
+ r'While|Wend|'
+ r'Repeat|Until|Forever|'
+ r'Select|Case|Default|'
+ r'Goto|Gosub|Data|Read|Restore)\b', Keyword.Reserved),
+ # Final resolve (for variable names and such)
+# (r'(%s)' % (bb_name), Name.Variable),
+ (bb_var, bygroups(Name.Variable, Text, Keyword.Type,
+ Text, Punctuation, Text, Name.Class)),
+ ],
+ 'string': [
+ (r'""', String.Double),
+ (r'"C?', String.Double, '#pop'),
+ (r'[^"]+', String.Double),
+ ],
+ }
+
+
class NimrodLexer(RegexLexer):
"""
For `Nimrod <http://nimrod-code.org/>`_ source code.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'Nimrod'
@@ -2777,7 +3015,7 @@ class FantomLexer(RegexLexer):
"""
For Fantom source code.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'Fantom'
aliases = ['fan']
@@ -3007,12 +3245,12 @@ class FantomLexer(RegexLexer):
class RustLexer(RegexLexer):
"""
- Lexer for Mozilla's Rust programming language.
+ Lexer for the Rust programming language (version 0.9).
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'Rust'
- filenames = ['*.rs', '*.rc']
+ filenames = ['*.rs']
aliases = ['rust']
mimetypes = ['text/x-rustsrc']
@@ -3021,26 +3259,65 @@ class RustLexer(RegexLexer):
# Whitespace and Comments
(r'\n', Text),
(r'\s+', Text),
+ (r'//[/!](.*?)\n', Comment.Doc),
(r'//(.*?)\n', Comment.Single),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
# Keywords
- (r'(as|assert|break|const'
- r'|copy|do|else|enum|extern|fail'
- r'|false|fn|for|if|impl|let|log'
- r'|loop|match|mod|move|mut|once|priv|pub|pure'
- r'|ref|return|static|struct|trait|true|type|unsafe|use|while'
- r'|u8|u16|u32|u64|i8|i16|i32|i64|uint'
- r'|int|float|f32|f64|str)\b', Keyword),
-
+ (r'(as|box|break|continue'
+ r'|do|else|enum|extern'
+ r'|fn|for|if|impl|in'
+ r'|loop|match|mut|priv|proc|pub'
+ r'|ref|return|static|\'static|struct|trait|true|type'
+ r'|unsafe|while)\b',
+ Keyword),
+ (r'(alignof|be|const|offsetof|pure|sizeof|typeof|once|unsized'
+ r'|yield)\b', Keyword.Reserved),
+ (r'(mod|use)\b', Keyword.Namespace),
+ (r'(true|false)\b', Keyword.Constant),
+ (r'let\b', Keyword.Declaration),
+ (r'(u8|u16|u32|u64|i8|i16|i32|i64|uint|int|f32|f64'
+ r'|str|bool)\b', Keyword.Type),
+ (r'self\b', Name.Builtin.Pseudo),
+ # Prelude
+ (r'(Freeze|Pod|Send|Sized|Add|Sub|Mul|Div|Rem|Neg|Not|BitAnd'
+ r'|BitOr|BitXor|Drop|Shl|Shr|Index|Option|Some|None|Result'
+ r'|Ok|Err|from_str|range|print|println|Any|AnyOwnExt|AnyRefExt'
+ r'|AnyMutRefExt|Ascii|AsciiCast|OnwedAsciiCast|AsciiStr'
+ r'|IntoBytes|Bool|ToCStr|Char|Clone|DeepClone|Eq|ApproxEq'
+ r'|Ord|TotalEq|Ordering|Less|Equal|Greater|Equiv|Container'
+ r'|Mutable|Map|MutableMap|Set|MutableSet|Default|FromStr'
+ r'|Hash|FromIterator|Extendable|Iterator|DoubleEndedIterator'
+ r'|RandomAccessIterator|CloneableIterator|OrdIterator'
+ r'|MutableDoubleEndedIterator|ExactSize|Times|Algebraic'
+ r'|Trigonometric|Exponential|Hyperbolic|Bitwise|BitCount'
+ r'|Bounded|Integer|Fractional|Real|RealExt|Num|NumCast'
+ r'|CheckedAdd|CheckedSub|CheckedMul|Orderable|Signed'
+ r'|Unsigned|Round|Primitive|Int|Float|ToStrRadix'
+ r'|ToPrimitive|FromPrimitive|GenericPath|Path|PosixPath'
+ r'|WindowsPath|RawPtr|Buffer|Writer|Reader|Seek'
+ r'|SendStr|SendStrOwned|SendStrStatic|IntoSendStr|Str'
+ r'|StrVector|StrSlice|OwnedStr|IterBytes|ToStr|IntoStr'
+ r'|CopyableTuple|ImmutableTuple|ImmutableTuple\d+'
+ r'|Tuple\d+|ImmutableEqVector|ImmutableTotalOrdVector'
+ r'|ImmutableCopyableVector|OwnedVector|OwnedCopyableVector'
+ r'|OwnedEqVector|MutableVector|MutableTotalOrdVector'
+ r'|Vector|VectorVector|CopyableVector|ImmutableVector'
+ r'|Port|Chan|SharedChan|spawn|drop)\b', Name.Builtin),
+ # Borrowed pointer
+ (r'(&)(\'[A-Za-z_]\w*)?', bygroups(Operator, Name)),
+ # Labels
+ (r'\'[A-Za-z_]\w*:', Name.Label),
# Character Literal
(r"""'(\\['"\\nrt]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}"""
r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|.)'""",
String.Char),
+ # Lifetime
+ (r"""'[a-zA-Z_][a-zA-Z0-9_]*""", Name.Label),
# Binary Literal
- (r'0[Bb][01_]+', Number, 'number_lit'),
+ (r'0b[01_]+', Number, 'number_lit'),
# Octal Literal
- (r'0[0-7_]+', Number.Oct, 'number_lit'),
+ (r'0o[0-7_]+', Number.Oct, 'number_lit'),
# Hexadecimal Literal
(r'0[xX][0-9a-fA-F_]+', Number.Hex, 'number_lit'),
# Decimal Literal
@@ -3048,20 +3325,22 @@ class RustLexer(RegexLexer):
r'[0-9_]+|\.[0-9_]*|[eE][+\-]?[0-9_]+)?', Number, 'number_lit'),
# String Literal
(r'"', String, 'string'),
+ (r'r(#*)".*?"\1', String.Raw),
# Operators and Punctuation
(r'[{}()\[\],.;]', Punctuation),
(r'[+\-*/%&|<>^!~@=:?]', Operator),
# Identifier
- (r'[a-zA-Z_$][a-zA-Z0-9_]*', Name),
+ (r'[a-zA-Z_]\w*', Name),
# Attributes
(r'#\[', Comment.Preproc, 'attribute['),
- (r'#\(', Comment.Preproc, 'attribute('),
# Macros
- (r'[A-Za-z_][A-Za-z0-9_]*!\[', Comment.Preproc, 'attribute['),
- (r'[A-Za-z_][A-Za-z0-9_]*!\(', Comment.Preproc, 'attribute('),
+ (r'([A-Za-z_]\w*)!\s*([A-Za-z_]\w*)?\s*\{',
+ bygroups(Comment.Preproc, Name), 'macro{'),
+ (r'([A-Za-z_]\w*)!\s*([A-Za-z_]\w*)?\(',
+ bygroups(Comment.Preproc, Name), 'macro('),
],
'number_lit': [
(r'(([ui](8|16|32|64)?)|(f(32|64)?))?', Keyword, '#pop'),
@@ -3073,6 +3352,14 @@ class RustLexer(RegexLexer):
(r'[^\\"]+', String),
(r'\\', String),
],
+ 'macro{': [
+ (r'\{', Operator, '#push'),
+ (r'\}', Operator, '#pop'),
+ ],
+ 'macro(': [
+ (r'\(', Operator, '#push'),
+ (r'\)', Operator, '#pop'),
+ ],
'attribute_common': [
(r'"', String, 'string'),
(r'\[', Comment.Preproc, 'attribute['),
@@ -3096,7 +3383,7 @@ class CudaLexer(CLexer):
For NVIDIA `CUDA™ <http://developer.nvidia.com/category/zone/cuda-zone>`_
source.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'CUDA'
filenames = ['*.cu', '*.cuh']
@@ -3146,7 +3433,7 @@ class MonkeyLexer(RegexLexer):
`Monkey <https://en.wikipedia.org/wiki/Monkey_(programming_language)>`_
source code.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'Monkey'
@@ -3274,7 +3561,7 @@ class CobolLexer(RegexLexer):
"""
Lexer for OpenCOBOL code.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'COBOL'
aliases = ['cobol']
@@ -3468,7 +3755,7 @@ class CobolFreeformatLexer(CobolLexer):
"""
Lexer for Free format OpenCOBOL code.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'COBOLFree'
aliases = ['cobolfree']
@@ -3487,7 +3774,7 @@ class LogosLexer(ObjectiveCppLexer):
"""
For Logos + Objective-C source code with preprocessor directives.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'Logos'
@@ -3545,3 +3832,1260 @@ class LogosLexer(ObjectiveCppLexer):
if LogosLexer._logos_keywords.search(text):
return 1.0
return 0
+
+
+class ChapelLexer(RegexLexer):
+ """
+ For `Chapel <http://chapel.cray.com/>`_ source.
+
+ .. versionadded:: 2.0
+ """
+ name = 'Chapel'
+ filenames = ['*.chpl']
+ aliases = ['chapel', 'chpl']
+ # mimetypes = ['text/x-chapel']
+
+ tokens = {
+ 'root': [
+ (r'\n', Text),
+ (r'\s+', Text),
+ (r'\\\n', Text),
+
+ (r'//(.*?)\n', Comment.Single),
+ (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
+
+ (r'(config|const|in|inout|out|param|ref|type|var)\b',
+ Keyword.Declaration),
+ (r'(false|nil|true)\b', Keyword.Constant),
+ (r'(bool|complex|imag|int|opaque|range|real|string|uint)\b',
+ Keyword.Type),
+ (r'(atomic|begin|break|by|cobegin|coforall|continue|iter|'
+ r'delete|dmapped|do|domain|else|enum|export|extern|for|forall|'
+ r'if|index|inline|label|lambda|let|local|new|on|otherwise|'
+ r'reduce|return|scan|select|serial|single|sparse|'
+ r'subdomain|sync|then|use|when|where|while|yield|zip)\b',
+ Keyword),
+ (r'(proc)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'procname'),
+ (r'(class|module|record|union)(\s+)', bygroups(Keyword, Text),
+ 'classname'),
+
+ # imaginary integers
+ (r'\d+i', Number),
+ (r'\d+\.\d*([Ee][-+]\d+)?i', Number),
+ (r'\.\d+([Ee][-+]\d+)?i', Number),
+ (r'\d+[Ee][-+]\d+i', Number),
+
+ # reals cannot end with a period due to lexical ambiguity with
+ # .. operator. See reference for rationale.
+ (r'(\d*\.\d+)([eE][+-]?[0-9]+)?i?', Number.Float),
+ (r'\d+[eE][+-]?[0-9]+i?', Number.Float),
+
+ # integer literals
+ # -- binary
+ (r'0[bB][0-1]+', Number),
+ # -- hex
+ (r'0[xX][0-9a-fA-F]+', Number.Hex),
+ # -- decimal
+ (r'(0|[1-9][0-9]*)', Number.Integer),
+
+ # strings
+ (r'["\'](\\\\|\\"|[^"\'])*["\']', String),
+
+ # tokens
+ (r'(=|\+=|-=|\*=|/=|\*\*=|%=|&=|\|=|\^=|&&=|\|\|=|<<=|>>=|'
+ r'<=>|\.\.|by|#|\.\.\.|'
+ r'&&|\|\||!|&|\||\^|~|<<|>>|'
+ r'==|!=|<=|>=|<|>|'
+ r'[+\-*/%]|\*\*)', Operator),
+ (r'[:;,.?()\[\]{}]', Punctuation),
+
+ # identifiers
+ (r'[a-zA-Z_][a-zA-Z0-9_$]*', Name.Other),
+ ],
+ 'classname': [
+ (r'[a-zA-Z_][a-zA-Z0-9_$]*', Name.Class, '#pop'),
+ ],
+ 'procname': [
+ (r'[a-zA-Z_][a-zA-Z0-9_$]*', Name.Function, '#pop'),
+ ],
+ }
+
+
+class EiffelLexer(RegexLexer):
+ """
+ For `Eiffel <http://www.eiffel.com>`_ source code.
+
+ .. versionadded:: 2.0
+ """
+ name = 'Eiffel'
+ aliases = ['eiffel']
+ filenames = ['*.e']
+ mimetypes = ['text/x-eiffel']
+
+ tokens = {
+ 'root': [
+ (r'[^\S\n]+', Text),
+ (r'--.*?\n', Comment.Single),
+ (r'[^\S\n]+', Text),
+ # Please note that keyword and operator are case insensitive.
+ (r'(?i)(true|false|void|current|result|precursor)\b', Keyword.Constant),
+ (r'(?i)(and(\s+then)?|not|xor|implies|or(\s+else)?)\b', Operator.Word),
+ (r'(?i)\b(across|agent|alias|all|as|assign|attached|attribute|check|'
+ r'class|convert|create|debug|deferred|detachable|do|else|elseif|'
+ r'end|ensure|expanded|export|external|feature|from|frozen|if|'
+ r'inherit|inspect|invariant|like|local|loop|none|note|obsolete|'
+ r'old|once|only|redefine|rename|require|rescue|retry|select|'
+ r'separate|then|undefine|until|variant|when)\b',Keyword.Reserved),
+ (r'"\[(([^\]%]|\n)|%(.|\n)|\][^"])*?\]"', String),
+ (r'"([^"%\n]|%.)*?"', String),
+ include('numbers'),
+ (r"'([^'%]|%'|%%)'", String.Char),
+ (r"(//|\\\\|>=|<=|:=|/=|~|/~|[\\\?!#%&@|+/\-=\>\*$<|^\[\]])", Operator),
+ (r"([{}():;,.])", Punctuation),
+ (r'([a-z][a-zA-Z0-9_]*)|([A-Z][A-Z0-9_]*[a-z][a-zA-Z0-9_]*)', Name),
+ (r'([A-Z][A-Z0-9_]*)', Name.Class),
+ (r'\n+', Text),
+ ],
+ 'numbers': [
+ (r'0[xX][a-fA-F0-9]+', Number.Hex),
+ (r'0[bB][0-1]+', Number.Bin),
+ (r'0[cC][0-7]+', Number.Oct),
+ (r'([0-9]+\.[0-9]*)|([0-9]*\.[0-9]+)', Number.Float),
+ (r'[0-9]+', Number.Integer),
+ ],
+ }
+
+
+class Inform6Lexer(RegexLexer):
+ """
+ For `Inform 6 <http://inform-fiction.org/>`_ source code.
+
+ .. versionadded:: 2.0
+ """
+
+ name = 'Inform 6'
+ aliases = ['inform6', 'i6']
+ filenames = ['*.inf']
+
+ flags = re.MULTILINE | re.DOTALL | re.UNICODE
+
+ _name = r'[a-zA-Z_][a-zA-Z_0-9]*'
+
+ # Inform 7 maps these four character classes to their ASCII
+ # equivalents. To support Inform 6 inclusions within Inform 7,
+ # Inform6Lexer maps them too.
+ _dash = u'\\-\u2010-\u2014'
+ _dquote = u'"\u201c\u201d'
+ _squote = u"'\u2018\u2019"
+ _newline = u'\\n\u0085\u2028\u2029'
+
+ tokens = {
+ 'root': [
+ (r'(\A(!%%[^%s]*[%s])+)?' % (_newline, _newline), Comment.Preproc,
+ 'directive')
+ ],
+ '_whitespace': [
+ (r'\s+', Text),
+ (r'![^%s]*' % _newline, Comment.Single)
+ ],
+ 'default': [
+ include('_whitespace'),
+ (r'\[', Punctuation, 'many-values'), # Array initialization
+ (r':|(?=;)', Punctuation, '#pop'),
+ (r'<', Punctuation), # Second angle bracket in an action statement
+ (r'', Text, ('expression', '_expression'))
+ ],
+
+ # Expressions
+ '_expression': [
+ include('_whitespace'),
+ (r'(?=sp\b)', Text, '#pop'),
+ (r'(?=[%s%s$0-9#a-zA-Z_])' % (_dquote, _squote), Text,
+ ('#pop', 'value')),
+ (r'\+\+|[%s]{1,2}(?!>)|~~?' % _dash, Operator),
+ (r'(?=[()\[%s,?@{:;])' % _dash, Text, '#pop')
+ ],
+ 'expression': [
+ include('_whitespace'),
+ (r'\(', Punctuation, ('expression', '_expression')),
+ (r'\)', Punctuation, '#pop'),
+ (r'\[', Punctuation, ('#pop', 'statements', 'locals')),
+ (r'>(?=(\s+|(![^%s]*))*[>;])' % _newline, Punctuation),
+ (r'\+\+|[%s]{2}(?!>)' % _dash, Operator),
+ (r',', Punctuation, '_expression'),
+ (r'&&?|\|\|?|[=~><]?=|[%s]{1,2}>?|\.\.?[&#]?|::|[<>+*/%%]' % _dash,
+ Operator, '_expression'),
+ (r'(has|hasnt|in|notin|ofclass|or|provides)\b', Operator.Word,
+ '_expression'),
+ (r'sp\b', Name),
+ (r'\?~?', Name.Label, 'label?'),
+ (r'[@{]', Error),
+ (r'', Text, '#pop')
+ ],
+ '_assembly-expression': [
+ (r'\(', Punctuation, ('#push', '_expression')),
+ (r'[\[\]]', Punctuation),
+ (r'[%s]>' % _dash, Punctuation, '_expression'),
+ (r'sp\b', Keyword.Pseudo),
+ (r';', Punctuation, '#pop:3'),
+ include('expression')
+ ],
+ '_for-expression': [
+ (r'\)', Punctuation, '#pop:2'),
+ (r':', Punctuation, '#pop'),
+ include('expression')
+ ],
+ '_keyword-expression': [
+ (r'(from|near|to)\b', Keyword, '_expression'),
+ include('expression')
+ ],
+ '_list-expression': [
+ (r',', Punctuation, '#pop'),
+ include('expression')
+ ],
+ '_object-expression': [
+ (r'has\b', Keyword.Declaration, '#pop'),
+ include('_list-expression')
+ ],
+
+ # Values
+ 'value': [
+ include('_whitespace'),
+ # Strings
+ (r'[%s][^@][%s]' % (_squote, _squote), String.Char, '#pop'),
+ (r'([%s])(@{[0-9a-fA-F]{1,4}})([%s])' % (_squote, _squote),
+ bygroups(String.Char, String.Escape, String.Char), '#pop'),
+ (r'([%s])(@..)([%s])' % (_squote, _squote),
+ bygroups(String.Char, String.Escape, String.Char), '#pop'),
+ (r'[%s]' % _squote, String.Single, ('#pop', 'dictionary-word')),
+ (r'[%s]' % _dquote, String.Double, ('#pop', 'string')),
+ # Numbers
+ (r'\$[+%s][0-9]*\.?[0-9]*([eE][+%s]?[0-9]+)?' % (_dash, _dash),
+ Number.Float, '#pop'),
+ (r'\$[0-9a-fA-F]+', Number.Hex, '#pop'),
+ (r'\$\$[01]+', Number, '#pop'), # Binary
+ (r'[0-9]+', Number.Integer, '#pop'),
+ # Values prefixed by hashes
+ (r'(##|#a\$)(%s)' % _name, bygroups(Operator, Name), '#pop'),
+ (r'(#g\$)(%s)' % _name,
+ bygroups(Operator, Name.Variable.Global), '#pop'),
+ (r'#[nw]\$', Operator, ('#pop', 'obsolete-dictionary-word')),
+ (r'(#r\$)(%s)' % _name, bygroups(Operator, Name.Function), '#pop'),
+ (r'#', Name.Builtin, ('#pop', 'system-constant')),
+ # System functions
+ (r'(child|children|elder|eldest|glk|indirect|metaclass|parent|'
+ r'random|sibling|younger|youngest)\b', Name.Builtin, '#pop'),
+ # Metaclasses
+ (r'(?i)(Class|Object|Routine|String)\b', Name.Builtin, '#pop'),
+ # Veneer routines
+ (r'(?i)(Box__Routine|CA__Pr|CDefArt|CInDefArt|Cl__Ms|'
+ r'Copy__Primitive|CP__Tab|DA__Pr|DB__Pr|DefArt|Dynam__String|'
+ r'EnglishNumber|Glk__Wrap|IA__Pr|IB__Pr|InDefArt|Main__|'
+ r'Meta__class|OB__Move|OB__Remove|OC__Cl|OP__Pr|Print__Addr|'
+ r'Print__PName|PrintShortName|RA__Pr|RA__Sc|RL__Pr|R_Process|'
+ r'RT__ChG|RT__ChGt|RT__ChLDB|RT__ChLDW|RT__ChPR|RT__ChPrintA|'
+ r'RT__ChPrintC|RT__ChPrintO|RT__ChPrintS|RT__ChPS|RT__ChR|'
+ r'RT__ChSTB|RT__ChSTW|RT__ChT|RT__Err|RT__TrPS|RV__Pr|'
+ r'Symb__Tab|Unsigned__Compare|WV__Pr|Z__Region)\b', Name.Builtin,
+ '#pop'),
+ # Other built-in symbols
+ (r'(?i)(call|copy|create|DEBUG|destroy|DICT_CHAR_SIZE|'
+ r'DICT_ENTRY_BYTES|DICT_IS_UNICODE|DICT_WORD_SIZE|false|'
+ r'FLOAT_INFINITY|FLOAT_NAN|FLOAT_NINFINITY|GOBJFIELD_CHAIN|'
+ r'GOBJFIELD_CHILD|GOBJFIELD_NAME|GOBJFIELD_PARENT|'
+ r'GOBJFIELD_PROPTAB|GOBJFIELD_SIBLING|GOBJ_EXT_START|'
+ r'GOBJ_TOTAL_LENGTH|Grammar__Version|INDIV_PROP_START|INFIX|'
+ r'infix__watching|MODULE_MODE|name|nothing|NUM_ATTR_BYTES|print|'
+ r'print_to_array|recreate|remaining|self|sender|STRICT_MODE|'
+ r'sw__var|sys__glob0|sys__glob1|sys__glob2|sys_statusline_flag|'
+ r'TARGET_GLULX|TARGET_ZCODE|temp__global2|temp__global3|'
+ r'temp__global4|temp_global|true|USE_MODULES|WORDSIZE)\b',
+ Name.Builtin, '#pop'),
+ # Other values
+ (_name, Name, '#pop')
+ ],
+ # Strings
+ 'dictionary-word': [
+ (r'[~^]+', String.Escape),
+ (r'[^~^\\@({%s]+' % _squote, String.Single),
+ (r'[({]', String.Single),
+ (r'@{[0-9a-fA-F]{,4}}', String.Escape),
+ (r'@..', String.Escape),
+ (r'[%s]' % _squote, String.Single, '#pop')
+ ],
+ 'string': [
+ (r'[~^]+', String.Escape),
+ (r'[^~^\\@({%s]+' % _dquote, String.Double),
+ (r'[({]', String.Double),
+ (r'\\', String.Escape),
+ (r'@(\\\s*[%s]\s*)*@((\\\s*[%s]\s*)*[0-9])*' %
+ (_newline, _newline), String.Escape),
+ (r'@(\\\s*[%s]\s*)*{((\\\s*[%s]\s*)*[0-9a-fA-F]){,4}'
+ r'(\\\s*[%s]\s*)*}' % (_newline, _newline, _newline),
+ String.Escape),
+ (r'@(\\\s*[%s]\s*)*.(\\\s*[%s]\s*)*.' % (_newline, _newline),
+ String.Escape),
+ (r'[%s]' % _dquote, String.Double, '#pop')
+ ],
+ 'plain-string': [
+ (r'[^~^\\({\[\]%s]+' % _dquote, String.Double),
+ (r'[~^({\[\]]', String.Double),
+ (r'\\', String.Escape),
+ (r'[%s]' % _dquote, String.Double, '#pop')
+ ],
+ # Names
+ '_constant': [
+ include('_whitespace'),
+ (_name, Name.Constant, '#pop'),
+ include('value')
+ ],
+ '_global': [
+ include('_whitespace'),
+ (_name, Name.Variable.Global, '#pop'),
+ include('value')
+ ],
+ 'label?': [
+ include('_whitespace'),
+ (r'(%s)?' % _name, Name.Label, '#pop')
+ ],
+ 'variable?': [
+ include('_whitespace'),
+ (r'(%s)?' % _name, Name.Variable, '#pop')
+ ],
+ # Values after hashes
+ 'obsolete-dictionary-word': [
+ (r'\S[a-zA-Z_0-9]*', String.Other, '#pop')
+ ],
+ 'system-constant': [
+ include('_whitespace'),
+ (_name, Name.Builtin, '#pop')
+ ],
+
+ # Directives
+ 'directive': [
+ include('_whitespace'),
+ (r'#', Punctuation),
+ (r';', Punctuation, '#pop'),
+ (r'\[', Punctuation,
+ ('default', 'statements', 'locals', 'routine-name?')),
+ (r'(?i)(abbreviate|endif|dictionary|ifdef|iffalse|ifndef|ifnot|'
+ r'iftrue|ifv3|ifv5|release|serial|switches|system_file|version)'
+ r'\b', Keyword, 'default'),
+ (r'(?i)(array|global)\b', Keyword,
+ ('default', 'directive-keyword?', '_global')),
+ (r'(?i)attribute\b', Keyword, ('default', 'alias?', '_constant')),
+ (r'(?i)class\b', Keyword,
+ ('object-body', 'duplicates', 'class-name')),
+ (r'(?i)(constant|default)\b', Keyword,
+ ('default', 'expression', '_constant')),
+ (r'(?i)(end\b)(.*)', bygroups(Keyword, Text)),
+ (r'(?i)(extend|verb)\b', Keyword, 'grammar'),
+ (r'(?i)fake_action\b', Keyword, ('default', '_constant')),
+ (r'(?i)import\b', Keyword, 'manifest'),
+ (r'(?i)(include|link)\b', Keyword,
+ ('default', 'before-plain-string')),
+ (r'(?i)(lowstring|undef)\b', Keyword, ('default', '_constant')),
+ (r'(?i)message\b', Keyword, ('default', 'diagnostic')),
+ (r'(?i)(nearby|object)\b', Keyword,
+ ('object-body', '_object-head')),
+ (r'(?i)property\b', Keyword,
+ ('default', 'alias?', '_constant', 'property-keyword*')),
+ (r'(?i)replace\b', Keyword,
+ ('default', 'routine-name?', 'routine-name?')),
+ (r'(?i)statusline\b', Keyword, ('default', 'directive-keyword?')),
+ (r'(?i)stub\b', Keyword, ('default', 'routine-name?')),
+ (r'(?i)trace\b', Keyword,
+ ('default', 'trace-keyword?', 'trace-keyword?')),
+ (r'(?i)zcharacter\b', Keyword,
+ ('default', 'directive-keyword?', 'directive-keyword?')),
+ (_name, Name.Class, ('object-body', '_object-head'))
+ ],
+ # [, Replace, Stub
+ 'routine-name?': [
+ include('_whitespace'),
+ (r'(%s)?' % _name, Name.Function, '#pop')
+ ],
+ 'locals': [
+ include('_whitespace'),
+ (r';', Punctuation, '#pop'),
+ (r'\*', Punctuation),
+ (_name, Name.Variable)
+ ],
+ # Array
+ 'many-values': [
+ include('_whitespace'),
+ (r';', Punctuation),
+ (r'\]', Punctuation, '#pop'),
+ (r':', Error),
+ (r'', Text, ('expression', '_expression'))
+ ],
+ # Attribute, Property
+ 'alias?': [
+ include('_whitespace'),
+ (r'alias\b', Keyword, ('#pop', '_constant')),
+ (r'', Text, '#pop')
+ ],
+ # Class, Object, Nearby
+ 'class-name': [
+ include('_whitespace'),
+ (r'(?=[,;]|(class|has|private|with)\b)', Text, '#pop'),
+ (_name, Name.Class, '#pop')
+ ],
+ 'duplicates': [
+ include('_whitespace'),
+ (r'\(', Punctuation, ('#pop', 'expression', '_expression')),
+ (r'', Text, '#pop')
+ ],
+ '_object-head': [
+ (r'[%s]>' % _dash, Punctuation),
+ (r'(class|has|private|with)\b', Keyword.Declaration, '#pop'),
+ include('_global')
+ ],
+ 'object-body': [
+ include('_whitespace'),
+ (r';', Punctuation, '#pop:2'),
+ (r',', Punctuation),
+ (r'class\b', Keyword.Declaration, 'class-segment'),
+ (r'(has|private|with)\b', Keyword.Declaration),
+ (r':', Error),
+ (r'', Text, ('_object-expression', '_expression'))
+ ],
+ 'class-segment': [
+ include('_whitespace'),
+ (r'(?=[,;]|(class|has|private|with)\b)', Text, '#pop'),
+ (_name, Name.Class),
+ (r'', Text, 'value')
+ ],
+ # Extend, Verb
+ 'grammar': [
+ include('_whitespace'),
+ (r'=', Punctuation, ('#pop', 'default')),
+ (r'\*', Punctuation, ('#pop', 'grammar-line')),
+ (r'', Text, '_directive-keyword')
+ ],
+ 'grammar-line': [
+ include('_whitespace'),
+ (r';', Punctuation, '#pop'),
+ (r'[/*]', Punctuation),
+ (r'[%s]>' % _dash, Punctuation, 'value'),
+ (r'(noun|scope)\b', Keyword, '=routine'),
+ (r'', Text, '_directive-keyword')
+ ],
+ '=routine': [
+ include('_whitespace'),
+ (r'=', Punctuation, 'routine-name?'),
+ (r'', Text, '#pop')
+ ],
+ # Import
+ 'manifest': [
+ include('_whitespace'),
+ (r';', Punctuation, '#pop'),
+ (r',', Punctuation),
+ (r'(?i)(global\b)?', Keyword, '_global')
+ ],
+ # Include, Link, Message
+ 'diagnostic': [
+ include('_whitespace'),
+ (r'[%s]' % _dquote, String.Double, ('#pop', 'message-string')),
+ (r'', Text, ('#pop', 'before-plain-string', 'directive-keyword?'))
+ ],
+ 'before-plain-string': [
+ include('_whitespace'),
+ (r'[%s]' % _dquote, String.Double, ('#pop', 'plain-string'))
+ ],
+ 'message-string': [
+ (r'[~^]+', String.Escape),
+ include('plain-string')
+ ],
+
+ # Keywords used in directives
+ '_directive-keyword!': [
+ include('_whitespace'),
+ (r'(additive|alias|buffer|class|creature|data|error|fatalerror|'
+ r'first|has|held|initial|initstr|last|long|meta|multi|'
+ r'multiexcept|multiheld|multiinside|noun|number|only|private|'
+ r'replace|reverse|scope|score|special|string|table|terminating|'
+ r'time|topic|warning|with)\b', Keyword, '#pop'),
+ (r'[%s]{1,2}>|[+=]' % _dash, Punctuation, '#pop')
+ ],
+ '_directive-keyword': [
+ include('_directive-keyword!'),
+ include('value')
+ ],
+ 'directive-keyword?': [
+ include('_directive-keyword!'),
+ (r'', Text, '#pop')
+ ],
+ 'property-keyword*': [
+ include('_whitespace'),
+ (r'(additive|long)\b', Keyword),
+ (r'', Text, '#pop')
+ ],
+ 'trace-keyword?': [
+ include('_whitespace'),
+ (r'(assembly|dictionary|expressions|lines|linker|objects|off|on|'
+ r'symbols|tokens|verbs)\b', Keyword, '#pop'),
+ (r'', Text, '#pop')
+ ],
+
+ # Statements
+ 'statements': [
+ include('_whitespace'),
+ (r'\]', Punctuation, '#pop'),
+ (r'[;{}]', Punctuation),
+ (r'(box|break|continue|default|give|inversion|new_line|quit|read|'
+ r'remove|return|rfalse|rtrue|spaces|string|until)\b', Keyword,
+ 'default'),
+ (r'(do|else)\b', Keyword),
+ (r'(font|style)\b', Keyword,
+ ('default', 'miscellaneous-keyword?')),
+ (r'for\b', Keyword, ('for', '(?')),
+ (r'(if|switch|while)', Keyword,
+ ('expression', '_expression', '(?')),
+ (r'(jump|save|restore)\b', Keyword, ('default', 'label?')),
+ (r'objectloop\b', Keyword,
+ ('_keyword-expression', 'variable?', '(?')),
+ (r'print(_ret)?\b|(?=[%s])' % _dquote, Keyword, 'print-list'),
+ (r'\.', Name.Label, 'label?'),
+ (r'@', Keyword, 'opcode'),
+ (r'#(?![agrnw]\$|#)', Punctuation, 'directive'),
+ (r'<', Punctuation, 'default'),
+ (r'(move\b)?', Keyword,
+ ('default', '_keyword-expression', '_expression'))
+ ],
+ 'miscellaneous-keyword?': [
+ include('_whitespace'),
+ (r'(bold|fixed|from|near|off|on|reverse|roman|to|underline)\b',
+ Keyword, '#pop'),
+ (r'(a|A|an|address|char|name|number|object|property|string|the|'
+ r'The)\b(?=(\s+|(![^%s]*))*\))' % _newline, Keyword.Pseudo,
+ '#pop'),
+ (r'%s(?=(\s+|(![^%s]*))*\))' % (_name, _newline), Name.Function,
+ '#pop'),
+ (r'', Text, '#pop')
+ ],
+ '(?': [
+ include('_whitespace'),
+ (r'\(?', Punctuation, '#pop')
+ ],
+ 'for': [
+ include('_whitespace'),
+ (r';?', Punctuation, ('_for-expression', '_expression'))
+ ],
+ 'print-list': [
+ include('_whitespace'),
+ (r';', Punctuation, '#pop'),
+ (r':', Error),
+ (r'', Text,
+ ('_list-expression', '_expression', '_list-expression', 'form'))
+ ],
+ 'form': [
+ include('_whitespace'),
+ (r'\(', Punctuation, ('#pop', 'miscellaneous-keyword?')),
+ (r'', Text, '#pop')
+ ],
+
+ # Assembly
+ 'opcode': [
+ include('_whitespace'),
+ (r'[%s]' % _dquote, String.Double, ('operands', 'plain-string')),
+ (_name, Keyword, 'operands')
+ ],
+ 'operands': [
+ (r':', Error),
+ (r'', Text, ('_assembly-expression', '_expression'))
+ ]
+ }
+
+ def get_tokens_unprocessed(self, text):
+ # 'in' is either a keyword or an operator.
+ # If the token two tokens after 'in' is ')', 'in' is a keyword:
+ # objectloop(a in b)
+ # Otherwise, it is an operator:
+ # objectloop(a in b && true)
+ objectloop_queue = []
+ objectloop_token_count = -1
+ previous_token = None
+ for index, token, value in RegexLexer.get_tokens_unprocessed(self,
+ text):
+ if previous_token is Name.Variable and value == 'in':
+ objectloop_queue = [[index, token, value]]
+ objectloop_token_count = 2
+ elif objectloop_token_count > 0:
+ if token not in Comment and token not in Text:
+ objectloop_token_count -= 1
+ objectloop_queue.append((index, token, value))
+ else:
+ if objectloop_token_count == 0:
+ if objectloop_queue[-1][2] == ')':
+ objectloop_queue[0][1] = Keyword
+ while objectloop_queue:
+ yield objectloop_queue.pop(0)
+ objectloop_token_count = -1
+ yield index, token, value
+ if token not in Comment and token not in Text:
+ previous_token = token
+ while objectloop_queue:
+ yield objectloop_queue.pop(0)
+
+
+class Inform7Lexer(RegexLexer):
+ """
+ For `Inform 7 <http://inform7.com/>`_ source code.
+
+ .. versionadded:: 2.0
+ """
+
+ name = 'Inform 7'
+ aliases = ['inform7', 'i7']
+ filenames = ['*.ni', '*.i7x']
+
+ flags = re.MULTILINE | re.DOTALL | re.UNICODE
+
+ _dash = Inform6Lexer._dash
+ _dquote = Inform6Lexer._dquote
+ _newline = Inform6Lexer._newline
+ _start = r'\A|(?<=[%s])' % _newline
+
+ # There are three variants of Inform 7, differing in how to
+ # interpret at signs and braces in I6T. In top-level inclusions, at
+ # signs in the first column are inweb syntax. In phrase definitions
+ # and use options, tokens in braces are treated as I7. Use options
+ # also interpret "{N}".
+ tokens = {}
+ token_variants = ['+i6t-not-inline', '+i6t-inline', '+i6t-use-option']
+
+ for level in token_variants:
+ tokens[level] = {
+ '+i6-root': list(Inform6Lexer.tokens['root']),
+ '+i6t-root': [ # For Inform6TemplateLexer
+ (r'[^%s]*' % Inform6Lexer._newline, Comment.Preproc,
+ ('directive', '+p'))
+ ],
+ 'root': [
+ (r'(\|?\s)+', Text),
+ (r'\[', Comment.Multiline, '+comment'),
+ (r'[%s]' % _dquote, Generic.Heading,
+ ('+main', '+titling', '+titling-string')),
+ (r'', Text, ('+main', '+heading?'))
+ ],
+ '+titling-string': [
+ (r'[^%s]+' % _dquote, Generic.Heading),
+ (r'[%s]' % _dquote, Generic.Heading, '#pop')
+ ],
+ '+titling': [
+ (r'\[', Comment.Multiline, '+comment'),
+ (r'[^%s.;:|%s]+' % (_dquote, _newline), Generic.Heading),
+ (r'[%s]' % _dquote, Generic.Heading, '+titling-string'),
+ (r'[%s]{2}|(?<=[\s%s])\|[\s%s]' % (_newline, _dquote, _dquote),
+ Text, ('#pop', '+heading?')),
+ (r'[.;:]|(?<=[\s%s])\|' % _dquote, Text, '#pop'),
+ (r'[|%s]' % _newline, Generic.Heading)
+ ],
+ '+main': [
+ (r'(?i)[^%s:a\[(|%s]+' % (_dquote, _newline), Text),
+ (r'[%s]' % _dquote, String.Double, '+text'),
+ (r':', Text, '+phrase-definition'),
+ (r'(?i)\bas\b', Text, '+use-option'),
+ (r'\[', Comment.Multiline, '+comment'),
+ (r'(\([%s])(.*?)([%s]\))' % (_dash, _dash),
+ bygroups(Punctuation,
+ using(this, state=('+i6-root', 'directive'),
+ i6t='+i6t-not-inline'), Punctuation)),
+ (r'(%s|(?<=[\s;:.%s]))\|\s|[%s]{2,}' %
+ (_start, _dquote, _newline), Text, '+heading?'),
+ (r'(?i)[a(|%s]' % _newline, Text)
+ ],
+ '+phrase-definition': [
+ (r'\s+', Text),
+ (r'\[', Comment.Multiline, '+comment'),
+ (r'(\([%s])(.*?)([%s]\))' % (_dash, _dash),
+ bygroups(Punctuation,
+ using(this, state=('+i6-root', 'directive',
+ 'default', 'statements'),
+ i6t='+i6t-inline'), Punctuation), '#pop'),
+ (r'', Text, '#pop')
+ ],
+ '+use-option': [
+ (r'\s+', Text),
+ (r'\[', Comment.Multiline, '+comment'),
+ (r'(\([%s])(.*?)([%s]\))' % (_dash, _dash),
+ bygroups(Punctuation,
+ using(this, state=('+i6-root', 'directive'),
+ i6t='+i6t-use-option'), Punctuation), '#pop'),
+ (r'', Text, '#pop')
+ ],
+ '+comment': [
+ (r'[^\[\]]+', Comment.Multiline),
+ (r'\[', Comment.Multiline, '#push'),
+ (r'\]', Comment.Multiline, '#pop')
+ ],
+ '+text': [
+ (r'[^\[%s]+' % _dquote, String.Double),
+ (r'\[.*?\]', String.Interpol),
+ (r'[%s]' % _dquote, String.Double, '#pop')
+ ],
+ '+heading?': [
+ (r'(\|?\s)+', Text),
+ (r'\[', Comment.Multiline, '+comment'),
+ (r'[%s]{4}\s+' % _dash, Text, '+documentation-heading'),
+ (r'[%s]{1,3}' % _dash, Text),
+ (r'(?i)(volume|book|part|chapter|section)\b[^%s]*' % _newline,
+ Generic.Heading, '#pop'),
+ (r'', Text, '#pop')
+ ],
+ '+documentation-heading': [
+ (r'\s+', Text),
+ (r'\[', Comment.Multiline, '+comment'),
+ (r'(?i)documentation\s+', Text, '+documentation-heading2'),
+ (r'', Text, '#pop')
+ ],
+ '+documentation-heading2': [
+ (r'\s+', Text),
+ (r'\[', Comment.Multiline, '+comment'),
+ (r'[%s]{4}\s' % _dash, Text, '+documentation'),
+ (r'', Text, '#pop:2')
+ ],
+ '+documentation': [
+ (r'(?i)(%s)\s*(chapter|example)\s*:[^%s]*' %
+ (_start, _newline), Generic.Heading),
+ (r'(?i)(%s)\s*section\s*:[^%s]*' % (_start, _newline),
+ Generic.Subheading),
+ (r'((%s)\t.*?[%s])+' % (_start, _newline),
+ using(this, state='+main')),
+ (r'[^%s\[]+|[%s\[]' % (_newline, _newline), Text),
+ (r'\[', Comment.Multiline, '+comment'),
+ ],
+ '+i6t-not-inline': [
+ (r'(%s)@c( .*?)?([%s]|\Z)' % (_start, _newline),
+ Comment.Preproc),
+ (r'(%s)@([%s]+|Purpose:)[^%s]*' % (_start, _dash, _newline),
+ Comment.Preproc),
+ (r'(%s)@p( .*?)?([%s]|\Z)' % (_start, _newline),
+ Generic.Heading, '+p')
+ ],
+ '+i6t-use-option': [
+ include('+i6t-not-inline'),
+ (r'({)(N)(})', bygroups(Punctuation, Text, Punctuation))
+ ],
+ '+i6t-inline': [
+ (r'({)(\S[^}]*)?(})',
+ bygroups(Punctuation, using(this, state='+main'),
+ Punctuation))
+ ],
+ '+i6t': [
+ (r'({[%s])(![^}]*)(}?)' % _dash,
+ bygroups(Punctuation, Comment.Single, Punctuation)),
+ (r'({[%s])(lines)(:)([^}]*)(}?)' % _dash,
+ bygroups(Punctuation, Keyword, Punctuation, Text,
+ Punctuation), '+lines'),
+ (r'({[%s])([^:}]*)(:?)([^}]*)(}?)' % _dash,
+ bygroups(Punctuation, Keyword, Punctuation, Text,
+ Punctuation)),
+ (r'(\(\+)(.*?)(\+\)|\Z)',
+ bygroups(Punctuation, using(this, state='+main'),
+ Punctuation))
+ ],
+ '+p': [
+ (r'[^@]+', Comment.Preproc),
+ (r'(%s)@c( .*?)?([%s]|\Z)' % (_start, _newline),
+ Comment.Preproc, '#pop'),
+ (r'(%s)@([%s]|Purpose:)' % (_start, _dash), Comment.Preproc),
+ (r'(%s)@p( .*?)?([%s]|\Z)' % (_start, _newline),
+ Generic.Heading),
+ (r'@', Comment.Preproc)
+ ],
+ '+lines': [
+ (r'(%s)@c( .*?)?([%s]|\Z)' % (_start, _newline),
+ Comment.Preproc),
+ (r'(%s)@([%s]|Purpose:)[^%s]*' % (_start, _dash, _newline),
+ Comment.Preproc),
+ (r'(%s)@p( .*?)?([%s]|\Z)' % (_start, _newline),
+ Generic.Heading, '+p'),
+ (r'(%s)@[a-zA-Z_0-9]*[ %s]' % (_start, _newline), Keyword),
+ (r'![^%s]*' % _newline, Comment.Single),
+ (r'({)([%s]endlines)(})' % _dash,
+ bygroups(Punctuation, Keyword, Punctuation), '#pop'),
+ (r'[^@!{]+?([%s]|\Z)|.' % _newline, Text)
+ ]
+ }
+ # Inform 7 can include snippets of Inform 6 template language,
+ # so all of Inform6Lexer's states are copied here, with
+ # modifications to account for template syntax. Inform7Lexer's
+ # own states begin with '+' to avoid name conflicts. Some of
+ # Inform6Lexer's states begin with '_': these are not modified.
+ # They deal with template syntax either by including modified
+ # states, or by matching r'' then pushing to modified states.
+ for token in Inform6Lexer.tokens:
+ if token == 'root':
+ continue
+ tokens[level][token] = list(Inform6Lexer.tokens[token])
+ if not token.startswith('_'):
+ tokens[level][token][:0] = [include('+i6t'), include(level)]
+
+ def __init__(self, **options):
+ level = options.get('i6t', '+i6t-not-inline')
+ if level not in self._all_tokens:
+ self._tokens = self.__class__.process_tokendef(level)
+ else:
+ self._tokens = self._all_tokens[level]
+ RegexLexer.__init__(self, **options)
+
+
+class Inform6TemplateLexer(Inform7Lexer):
+ """
+ For `Inform 6 template
+ <http://inform7.com/sources/src/i6template/Woven/index.html>`_ code.
+
+ .. versionadded:: 2.0
+ """
+
+ name = 'Inform 6 template'
+ aliases = ['i6t']
+ filenames = ['*.i6t']
+
+ def get_tokens_unprocessed(self, text, stack=('+i6t-root',)):
+ return Inform7Lexer.get_tokens_unprocessed(self, text, stack)
+
+
+class MqlLexer(CppLexer):
+ """
+ For `MQL4 <http://docs.mql4.com/>`_ and
+ `MQL5 <http://www.mql5.com/en/docs>`_ source code.
+
+ .. versionadded:: 2.0
+ """
+ name = 'MQL'
+ aliases = ['mql', 'mq4', 'mq5', 'mql4', 'mql5']
+ filenames = ['*.mq4', '*.mq5', '*.mqh']
+ mimetypes = ['text/x-mql']
+
+ tokens = {
+ 'statements': [
+ (r'(input|_Digits|_Point|_LastError|_Period|_RandomSeed|'
+ r'_StopFlag|_Symbol|_UninitReason|'
+ r'Ask|Bars|Bid|Close|Digits|High|Low|Open|Point|Time|Volume)\b',
+ Keyword),
+ (r'(void|char|uchar|bool|short|ushort|int|uint|color|long|ulong|datetime|'
+ r'float|double|string)\b',
+ Keyword.Type),
+ (r'(Alert|CheckPointer|Comment|DebugBreak|ExpertRemove|'
+ r'GetPointer|GetTickCount|MessageBox|PeriodSeconds|PlaySound|'
+ r'Print|PrintFormat|ResetLastError|ResourceCreate|ResourceFree|'
+ r'ResourceReadImage|ResourceSave|SendFTP|SendMail|SendNotification|'
+ r'Sleep|TerminalClose|TesterStatistics|ZeroMemory|'
+ r'ArrayBsearch|ArrayCopy|ArrayCompare|ArrayFree|ArrayGetAsSeries|'
+ r'ArrayInitialize|ArrayFill|ArrayIsSeries|ArrayIsDynamic|'
+ r'ArrayMaximum|ArrayMinimum|ArrayRange|ArrayResize|'
+ r'ArraySetAsSeries|ArraySize|ArraySort|ArrayCopyRates|'
+ r'ArrayCopySeries|ArrayDimension|'
+ r'CharToString|DoubleToString|EnumToString|NormalizeDouble|'
+ r'StringToDouble|StringToInteger|StringToTime|TimeToString|'
+ r'IntegerToString|ShortToString|ShortArrayToString|'
+ r'StringToShortArray|CharArrayToString|StringToCharArray|'
+ r'ColorToARGB|ColorToString|StringToColor|StringFormat|'
+ r'CharToStr|DoubleToStr|StrToDouble|StrToInteger|StrToTime|TimeToStr|'
+ r'MathAbs|MathArccos|MathArcsin|MathArctan|MathCeil|MathCos|MathExp|'
+ r'MathFloor|MathLog|MathMax|MathMin|MathMod|MathPow|MathRand|'
+ r'MathRound|MathSin|MathSqrt|MathSrand|MathTan|MathIsValidNumber|'
+ r'StringAdd|StringBufferLen|StringCompare|StringConcatenate|StringFill|'
+ r'StringFind|StringGetCharacter|StringInit|StringLen|StringReplace|'
+ r'StringSetCharacter|StringSplit|StringSubstr|StringToLower|StringToUpper|'
+ r'StringTrimLeft|StringTrimRight|StringGetChar|StringSetChar|'
+ r'TimeCurrent|TimeTradeServer|TimeLocal|TimeGMT|TimeDaylightSavings|'
+ r'TimeGMTOffset|TimeToStruct|StructToTime|Day|DayOfWeek|DayOfYear|'
+ r'Hour|Minute|Month|Seconds|TimeDay|TimeDayOfWeek|TimeDayOfYear|TimeHour|'
+ r'TimeMinute|TimeMonth|TimeSeconds|TimeYear|Year|'
+ r'AccountInfoDouble|AccountInfoInteger|AccountInfoString|AccountBalance|'
+ r'AccountCredit|AccountCompany|AccountCurrency|AccountEquity|'
+ r'AccountFreeMargin|AccountFreeMarginCheck|AccountFreeMarginMode|'
+ r'AccountLeverage|AccountMargin|AccountName|AccountNumber|AccountProfit|'
+ r'AccountServer|AccountStopoutLevel|AccountStopoutMode|'
+ r'GetLastError|IsStopped|UninitializeReason|MQLInfoInteger|MQLInfoString|'
+ r'Symbol|Period|Digits|Point|IsConnected|IsDemo|IsDllsAllowed|'
+ r'IsExpertEnabled|IsLibrariesAllowed|IsOptimization|IsTesting|'
+ r'IsTradeAllowed|'
+ r'IsTradeContextBusy|IsVisualMode|TerminalCompany|TerminalName|'
+ r'TerminalPath|'
+ r'SymbolsTotal|SymbolName|SymbolSelect|SymbolIsSynchronized|'
+ r'SymbolInfoDouble|'
+ r'SymbolInfoInteger|SymbolInfoString|SymbolInfoTick|'
+ r'SymbolInfoSessionQuote|'
+ r'SymbolInfoSessionTrade|MarketInfo|'
+ r'SeriesInfoInteger|CopyRates|CopyTime|CopyOpen|'
+ r'CopyHigh|CopyLow|CopyClose|'
+ r'CopyTickVolume|CopyRealVolume|CopySpread|iBars|iBarShift|iClose|'
+ r'iHigh|iHighest|iLow|iLowest|iOpen|iTime|iVolume|'
+ r'HideTestIndicators|Period|RefreshRates|Symbol|WindowBarsPerChart|'
+ r'WindowExpertName|WindowFind|WindowFirstVisibleBar|WindowHandle|'
+ r'WindowIsVisible|WindowOnDropped|WindowPriceMax|WindowPriceMin|'
+ r'WindowPriceOnDropped|WindowRedraw|WindowScreenShot|'
+ r'WindowTimeOnDropped|WindowsTotal|WindowXOnDropped|WindowYOnDropped|'
+ r'OrderClose|OrderCloseBy|OrderClosePrice|OrderCloseTime|OrderComment|'
+ r'OrderCommission|OrderDelete|OrderExpiration|OrderLots|OrderMagicNumber|'
+ r'OrderModify|OrderOpenPrice|OrderOpenTime|OrderPrint|OrderProfit|'
+ r'OrderSelect|OrderSend|OrdersHistoryTotal|OrderStopLoss|OrdersTotal|'
+ r'OrderSwap|OrderSymbol|OrderTakeProfit|OrderTicket|OrderType|'
+ r'GlobalVariableCheck|GlobalVariableTime|'
+ r'GlobalVariableDel|GlobalVariableGet|GlobalVariableName|'
+ r'GlobalVariableSet|GlobalVariablesFlush|GlobalVariableTemp|'
+ r'GlobalVariableSetOnCondition|GlobalVariablesDeleteAll|'
+ r'GlobalVariablesTotal|GlobalVariableCheck|GlobalVariableTime|'
+ r'GlobalVariableDel|GlobalVariableGet|'
+ r'GlobalVariableName|GlobalVariableSet|GlobalVariablesFlush|'
+ r'GlobalVariableTemp|GlobalVariableSetOnCondition|'
+ r'GlobalVariablesDeleteAll|GlobalVariablesTotal|'
+ r'GlobalVariableCheck|GlobalVariableTime|GlobalVariableDel|'
+ r'GlobalVariableGet|GlobalVariableName|GlobalVariableSet|'
+ r'GlobalVariablesFlush|GlobalVariableTemp|'
+ r'GlobalVariableSetOnCondition|GlobalVariablesDeleteAll|'
+ r'GlobalVariablesTotal|'
+ r'FileFindFirst|FileFindNext|FileFindClose|FileOpen|FileDelete|'
+ r'FileFlush|FileGetInteger|FileIsEnding|FileIsLineEnding|'
+ r'FileClose|FileIsExist|FileCopy|FileMove|FileReadArray|'
+ r'FileReadBool|FileReadDatetime|FileReadDouble|FileReadFloat|'
+ r'FileReadInteger|FileReadLong|FileReadNumber|FileReadString|'
+ r'FileReadStruct|FileSeek|FileSize|FileTell|FileWrite|'
+ r'FileWriteArray|FileWriteDouble|FileWriteFloat|FileWriteInteger|'
+ r'FileWriteLong|FileWriteString|FileWriteStruct|FolderCreate|'
+ r'FolderDelete|FolderClean|FileOpenHistory|'
+ r'IndicatorSetDouble|IndicatorSetInteger|IndicatorSetString|'
+ r'SetIndexBuffer|IndicatorBuffers|IndicatorCounted|IndicatorDigits|'
+ r'IndicatorShortName|SetIndexArrow|SetIndexDrawBegin|'
+ r'SetIndexEmptyValue|SetIndexLabel|SetIndexShift|'
+ r'SetIndexStyle|SetLevelStyle|SetLevelValue|'
+ r'ObjectCreate|ObjectName|ObjectDelete|ObjectsDeleteAll|'
+ r'ObjectFind|ObjectGetTimeByValue|ObjectGetValueByTime|'
+ r'ObjectMove|ObjectsTotal|ObjectGetDouble|ObjectGetInteger|'
+ r'ObjectGetString|ObjectSetDouble|ObjectSetInteger|'
+ r'ObjectSetString|TextSetFont|TextOut|TextGetSize|'
+ r'ObjectDescription|ObjectGet|ObjectGetFiboDescription|'
+ r'ObjectGetShiftByValue|ObjectGetValueByShift|ObjectSet|'
+ r'ObjectSetFiboDescription|ObjectSetText|ObjectType|'
+ r'iAC|iAD|iADX|iAlligator|iAO|iATR|iBearsPower|'
+ r'iBands|iBandsOnArray|iBullsPower|iCCI|iCCIOnArray|'
+ r'iCustom|iDeMarker|iEnvelopes|iEnvelopesOnArray|'
+ r'iForce|iFractals|iGator|iIchimoku|iBWMFI|iMomentum|'
+ r'iMomentumOnArray|iMFI|iMA|iMAOnArray|iOsMA|iMACD|'
+ r'iOBV|iSAR|iRSI|iRSIOnArray|iRVI|iStdDev|iStdDevOnArray|'
+ r'iStochastic|iWPR|'
+ r'EventSetMillisecondTimer|EventSetTimer|'
+ r'EventKillTimer|EventChartCustom)\b', Name.Function),
+ (r'(CHARTEVENT_KEYDOWN|CHARTEVENT_MOUSE_MOVE|'
+ r'CHARTEVENT_OBJECT_CREATE|'
+ r'CHARTEVENT_OBJECT_CHANGE|CHARTEVENT_OBJECT_DELETE|'
+ r'CHARTEVENT_CLICK|'
+ r'CHARTEVENT_OBJECT_CLICK|CHARTEVENT_OBJECT_DRAG|'
+ r'CHARTEVENT_OBJECT_ENDEDIT|'
+ r'CHARTEVENT_CHART_CHANGE|CHARTEVENT_CUSTOM|'
+ r'CHARTEVENT_CUSTOM_LAST|'
+ r'PERIOD_CURRENT|PERIOD_M1|PERIOD_M2|PERIOD_M3|'
+ r'PERIOD_M4|PERIOD_M5|'
+ r'PERIOD_M6|PERIOD_M10|PERIOD_M12|PERIOD_M15|'
+ r'PERIOD_M20|PERIOD_M30|'
+ r'PERIOD_H1|PERIOD_H2|PERIOD_H3|PERIOD_H4|'
+ r'PERIOD_H6|PERIOD_H8|'
+ r'PERIOD_H12|PERIOD_D1|PERIOD_W1|PERIOD_MN1|'
+ r'CHART_IS_OBJECT|CHART_BRING_TO_TOP|'
+ r'CHART_MOUSE_SCROLL|CHART_EVENT_MOUSE_MOVE|'
+ r'CHART_EVENT_OBJECT_CREATE|'
+ r'CHART_EVENT_OBJECT_DELETE|CHART_MODE|CHART_FOREGROUND|'
+ r'CHART_SHIFT|'
+ r'CHART_AUTOSCROLL|CHART_SCALE|CHART_SCALEFIX|'
+ r'CHART_SCALEFIX_11|'
+ r'CHART_SCALE_PT_PER_BAR|CHART_SHOW_OHLC|'
+ r'CHART_SHOW_BID_LINE|'
+ r'CHART_SHOW_ASK_LINE|CHART_SHOW_LAST_LINE|'
+ r'CHART_SHOW_PERIOD_SEP|'
+ r'CHART_SHOW_GRID|CHART_SHOW_VOLUMES|'
+ r'CHART_SHOW_OBJECT_DESCR|'
+ r'CHART_VISIBLE_BARS|CHART_WINDOWS_TOTAL|'
+ r'CHART_WINDOW_IS_VISIBLE|'
+ r'CHART_WINDOW_HANDLE|CHART_WINDOW_YDISTANCE|'
+ r'CHART_FIRST_VISIBLE_BAR|'
+ r'CHART_WIDTH_IN_BARS|CHART_WIDTH_IN_PIXELS|'
+ r'CHART_HEIGHT_IN_PIXELS|'
+ r'CHART_COLOR_BACKGROUND|CHART_COLOR_FOREGROUND|'
+ r'CHART_COLOR_GRID|'
+ r'CHART_COLOR_VOLUME|CHART_COLOR_CHART_UP|'
+ r'CHART_COLOR_CHART_DOWN|'
+ r'CHART_COLOR_CHART_LINE|CHART_COLOR_CANDLE_BULL|'
+ r'CHART_COLOR_CANDLE_BEAR|'
+ r'CHART_COLOR_BID|CHART_COLOR_ASK|CHART_COLOR_LAST|'
+ r'CHART_COLOR_STOP_LEVEL|'
+ r'CHART_SHOW_TRADE_LEVELS|CHART_DRAG_TRADE_LEVELS|'
+ r'CHART_SHOW_DATE_SCALE|'
+ r'CHART_SHOW_PRICE_SCALE|CHART_SHIFT_SIZE|'
+ r'CHART_FIXED_POSITION|'
+ r'CHART_FIXED_MAX|CHART_FIXED_MIN|CHART_POINTS_PER_BAR|'
+ r'CHART_PRICE_MIN|'
+ r'CHART_PRICE_MAX|CHART_COMMENT|CHART_BEGIN|'
+ r'CHART_CURRENT_POS|CHART_END|'
+ r'CHART_BARS|CHART_CANDLES|CHART_LINE|CHART_VOLUME_HIDE|'
+ r'CHART_VOLUME_TICK|CHART_VOLUME_REAL|'
+ r'OBJ_VLINE|OBJ_HLINE|OBJ_TREND|OBJ_TRENDBYANGLE|OBJ_CYCLES|'
+ r'OBJ_CHANNEL|OBJ_STDDEVCHANNEL|OBJ_REGRESSION|OBJ_PITCHFORK|'
+ r'OBJ_GANNLINE|OBJ_GANNFAN|OBJ_GANNGRID|OBJ_FIBO|'
+ r'OBJ_FIBOTIMES|OBJ_FIBOFAN|OBJ_FIBOARC|OBJ_FIBOCHANNEL|'
+ r'OBJ_EXPANSION|OBJ_RECTANGLE|OBJ_TRIANGLE|OBJ_ELLIPSE|'
+ r'OBJ_ARROW_THUMB_UP|OBJ_ARROW_THUMB_DOWN|'
+ r'OBJ_ARROW_UP|OBJ_ARROW_DOWN|'
+ r'OBJ_ARROW_STOP|OBJ_ARROW_CHECK|OBJ_ARROW_LEFT_PRICE|'
+ r'OBJ_ARROW_RIGHT_PRICE|OBJ_ARROW_BUY|OBJ_ARROW_SELL|'
+ r'OBJ_ARROW|'
+ r'OBJ_TEXT|OBJ_LABEL|OBJ_BUTTON|OBJ_BITMAP|'
+ r'OBJ_BITMAP_LABEL|'
+ r'OBJ_EDIT|OBJ_EVENT|OBJ_RECTANGLE_LABEL|'
+ r'OBJPROP_TIME1|OBJPROP_PRICE1|OBJPROP_TIME2|'
+ r'OBJPROP_PRICE2|OBJPROP_TIME3|'
+ r'OBJPROP_PRICE3|OBJPROP_COLOR|OBJPROP_STYLE|'
+ r'OBJPROP_WIDTH|'
+ r'OBJPROP_BACK|OBJPROP_RAY|OBJPROP_ELLIPSE|'
+ r'OBJPROP_SCALE|'
+ r'OBJPROP_ANGLE|OBJPROP_ARROWCODE|OBJPROP_TIMEFRAMES|'
+ r'OBJPROP_DEVIATION|OBJPROP_FONTSIZE|OBJPROP_CORNER|'
+ r'OBJPROP_XDISTANCE|OBJPROP_YDISTANCE|OBJPROP_FIBOLEVELS|'
+ r'OBJPROP_LEVELCOLOR|OBJPROP_LEVELSTYLE|OBJPROP_LEVELWIDTH|'
+ r'OBJPROP_FIRSTLEVEL|OBJPROP_COLOR|OBJPROP_STYLE|OBJPROP_WIDTH|'
+ r'OBJPROP_BACK|OBJPROP_ZORDER|OBJPROP_FILL|OBJPROP_HIDDEN|'
+ r'OBJPROP_SELECTED|OBJPROP_READONLY|OBJPROP_TYPE|OBJPROP_TIME|'
+ r'OBJPROP_SELECTABLE|OBJPROP_CREATETIME|OBJPROP_LEVELS|'
+ r'OBJPROP_LEVELCOLOR|OBJPROP_LEVELSTYLE|OBJPROP_LEVELWIDTH|'
+ r'OBJPROP_ALIGN|OBJPROP_FONTSIZE|OBJPROP_RAY_RIGHT|OBJPROP_RAY|'
+ r'OBJPROP_ELLIPSE|OBJPROP_ARROWCODE|OBJPROP_TIMEFRAMES|OBJPROP_ANCHOR|'
+ r'OBJPROP_XDISTANCE|OBJPROP_YDISTANCE|OBJPROP_DRAWLINES|OBJPROP_STATE|'
+ r'OBJPROP_CHART_ID|OBJPROP_XSIZE|OBJPROP_YSIZE|OBJPROP_XOFFSET|'
+ r'OBJPROP_YOFFSET|OBJPROP_PERIOD|OBJPROP_DATE_SCALE|OBJPROP_PRICE_SCALE|'
+ r'OBJPROP_CHART_SCALE|OBJPROP_BGCOLOR|OBJPROP_CORNER|OBJPROP_BORDER_TYPE|'
+ r'OBJPROP_BORDER_COLOR|OBJPROP_PRICE|OBJPROP_LEVELVALUE|OBJPROP_SCALE|'
+ r'OBJPROP_ANGLE|OBJPROP_DEVIATION|'
+ r'OBJPROP_NAME|OBJPROP_TEXT|OBJPROP_TOOLTIP|OBJPROP_LEVELTEXT|'
+ r'OBJPROP_FONT|OBJPROP_BMPFILE|OBJPROP_SYMBOL|'
+ r'BORDER_FLAT|BORDER_RAISED|BORDER_SUNKEN|ALIGN_LEFT|ALIGN_CENTER|'
+ r'ALIGN_RIGHT|ANCHOR_LEFT_UPPER|ANCHOR_LEFT|ANCHOR_LEFT_LOWER|'
+ r'ANCHOR_LOWER|ANCHOR_RIGHT_LOWER|ANCHOR_RIGHT|ANCHOR_RIGHT_UPPER|'
+ r'ANCHOR_UPPER|ANCHOR_CENTER|ANCHOR_TOP|ANCHOR_BOTTOM|'
+ r'CORNER_LEFT_UPPER|CORNER_LEFT_LOWER|CORNER_RIGHT_LOWER|'
+ r'CORNER_RIGHT_UPPER|'
+ r'OBJ_NO_PERIODS|EMPTY|OBJ_PERIOD_M1|OBJ_PERIOD_M5|OBJ_PERIOD_M15|'
+ r'OBJ_PERIOD_M30|OBJ_PERIOD_H1|OBJ_PERIOD_H4|OBJ_PERIOD_D1|'
+ r'OBJ_PERIOD_W1|OBJ_PERIOD_MN1|OBJ_ALL_PERIODS|'
+ r'GANN_UP_TREND|GANN_DOWN_TREND|'
+ r'((clr)?(Black|DarkGreen|DarkSlateGray|Olive|'
+ r'Green|Teal|Navy|Purple|'
+ r'Maroon|Indigo|MidnightBlue|DarkBlue|'
+ r'DarkOliveGreen|SaddleBrown|'
+ r'ForestGreen|OliveDrab|SeaGreen|'
+ r'DarkGoldenrod|DarkSlateBlue|'
+ r'Sienna|MediumBlue|Brown|DarkTurquoise|'
+ r'DimGray|LightSeaGreen|'
+ r'DarkViolet|FireBrick|MediumVioletRed|'
+ r'MediumSeaGreen|Chocolate|'
+ r'Crimson|SteelBlue|Goldenrod|MediumSpringGreen|'
+ r'LawnGreen|CadetBlue|'
+ r'DarkOrchid|YellowGreen|LimeGreen|OrangeRed|'
+ r'DarkOrange|Orange|'
+ r'Gold|Yellow|Chartreuse|Lime|SpringGreen|'
+ r'Aqua|DeepSkyBlue|Blue|'
+ r'Magenta|Red|Gray|SlateGray|Peru|BlueViolet|'
+ r'LightSlateGray|DeepPink|'
+ r'MediumTurquoise|DodgerBlue|Turquoise|RoyalBlue|'
+ r'SlateBlue|DarkKhaki|'
+ r'IndianRed|MediumOrchid|GreenYellow|'
+ r'MediumAquamarine|DarkSeaGreen|'
+ r'Tomato|RosyBrown|Orchid|MediumPurple|'
+ r'PaleVioletRed|Coral|CornflowerBlue|'
+ r'DarkGray|SandyBrown|MediumSlateBlue|'
+ r'Tan|DarkSalmon|BurlyWood|'
+ r'HotPink|Salmon|Violet|LightCoral|SkyBlue|'
+ r'LightSalmon|Plum|'
+ r'Khaki|LightGreen|Aquamarine|Silver|'
+ r'LightSkyBlue|LightSteelBlue|'
+ r'LightBlue|PaleGreen|Thistle|PowderBlue|'
+ r'PaleGoldenrod|PaleTurquoise|'
+ r'LightGray|Wheat|NavajoWhite|Moccasin|'
+ r'LightPink|Gainsboro|PeachPuff|'
+ r'Pink|Bisque|LightGoldenrod|BlanchedAlmond|'
+ r'LemonChiffon|Beige|'
+ r'AntiqueWhite|PapayaWhip|Cornsilk|'
+ r'LightYellow|LightCyan|Linen|'
+ r'Lavender|MistyRose|OldLace|WhiteSmoke|'
+ r'Seashell|Ivory|Honeydew|'
+ r'AliceBlue|LavenderBlush|MintCream|Snow|White))|'
+ r'SYMBOL_THUMBSUP|SYMBOL_THUMBSDOWN|'
+ r'SYMBOL_ARROWUP|SYMBOL_ARROWDOWN|'
+ r'SYMBOL_STOPSIGN|SYMBOL_CHECKSIGN|'
+ r'SYMBOL_LEFTPRICE|SYMBOL_RIGHTPRICE|'
+ r'PRICE_CLOSE|PRICE_OPEN|PRICE_HIGH|PRICE_LOW|'
+ r'PRICE_MEDIAN|PRICE_TYPICAL|PRICE_WEIGHTED|'
+ r'VOLUME_TICK|VOLUME_REAL|'
+ r'STO_LOWHIGH|STO_CLOSECLOSE|'
+ r'MODE_OPEN|MODE_LOW|MODE_HIGH|MODE_CLOSE|MODE_VOLUME|MODE_TIME|'
+ r'MODE_SMA|MODE_EMA|MODE_SMMA|MODE_LWMA|'
+ r'MODE_MAIN|MODE_SIGNAL|MODE_MAIN|'
+ r'MODE_PLUSDI|MODE_MINUSDI|MODE_UPPER|'
+ r'MODE_LOWER|MODE_GATORJAW|MODE_GATORTEETH|'
+ r'MODE_GATORLIPS|MODE_TENKANSEN|'
+ r'MODE_KIJUNSEN|MODE_SENKOUSPANA|'
+ r'MODE_SENKOUSPANB|MODE_CHINKOUSPAN|'
+ r'DRAW_LINE|DRAW_SECTION|DRAW_HISTOGRAM|'
+ r'DRAW_ARROW|DRAW_ZIGZAG|DRAW_NONE|'
+ r'STYLE_SOLID|STYLE_DASH|STYLE_DOT|'
+ r'STYLE_DASHDOT|STYLE_DASHDOTDOT|'
+ r'DRAW_NONE|DRAW_LINE|DRAW_SECTION|DRAW_HISTOGRAM|'
+ r'DRAW_ARROW|DRAW_ZIGZAG|DRAW_FILLING|'
+ r'INDICATOR_DATA|INDICATOR_COLOR_INDEX|'
+ r'INDICATOR_CALCULATIONS|INDICATOR_DIGITS|'
+ r'INDICATOR_HEIGHT|INDICATOR_LEVELS|'
+ r'INDICATOR_LEVELCOLOR|INDICATOR_LEVELSTYLE|'
+ r'INDICATOR_LEVELWIDTH|INDICATOR_MINIMUM|'
+ r'INDICATOR_MAXIMUM|INDICATOR_LEVELVALUE|'
+ r'INDICATOR_SHORTNAME|INDICATOR_LEVELTEXT|'
+ r'TERMINAL_BUILD|TERMINAL_CONNECTED|'
+ r'TERMINAL_DLLS_ALLOWED|TERMINAL_TRADE_ALLOWED|'
+ r'TERMINAL_EMAIL_ENABLED|'
+ r'TERMINAL_FTP_ENABLED|TERMINAL_MAXBARS|'
+ r'TERMINAL_CODEPAGE|TERMINAL_CPU_CORES|'
+ r'TERMINAL_DISK_SPACE|TERMINAL_MEMORY_PHYSICAL|'
+ r'TERMINAL_MEMORY_TOTAL|'
+ r'TERMINAL_MEMORY_AVAILABLE|TERMINAL_MEMORY_USED|'
+ r'TERMINAL_X64|'
+ r'TERMINAL_OPENCL_SUPPORT|TERMINAL_LANGUAGE|'
+ r'TERMINAL_COMPANY|TERMINAL_NAME|'
+ r'TERMINAL_PATH|TERMINAL_DATA_PATH|'
+ r'TERMINAL_COMMONDATA_PATH|'
+ r'MQL_PROGRAM_TYPE|MQL_DLLS_ALLOWED|'
+ r'MQL_TRADE_ALLOWED|MQL_DEBUG|'
+ r'MQL_PROFILER|MQL_TESTER|MQL_OPTIMIZATION|'
+ r'MQL_VISUAL_MODE|'
+ r'MQL_FRAME_MODE|MQL_LICENSE_TYPE|MQL_PROGRAM_NAME|'
+ r'MQL_PROGRAM_PATH|'
+ r'PROGRAM_SCRIPT|PROGRAM_EXPERT|'
+ r'PROGRAM_INDICATOR|LICENSE_FREE|'
+ r'LICENSE_DEMO|LICENSE_FULL|LICENSE_TIME|'
+ r'MODE_LOW|MODE_HIGH|MODE_TIME|MODE_BID|'
+ r'MODE_ASK|MODE_POINT|'
+ r'MODE_DIGITS|MODE_SPREAD|MODE_STOPLEVEL|'
+ r'MODE_LOTSIZE|MODE_TICKVALUE|'
+ r'MODE_TICKSIZE|MODE_SWAPLONG|'
+ r'MODE_SWAPSHORT|MODE_STARTING|'
+ r'MODE_EXPIRATION|MODE_TRADEALLOWED|'
+ r'MODE_MINLOT|MODE_LOTSTEP|MODE_MAXLOT|'
+ r'MODE_SWAPTYPE|MODE_PROFITCALCMODE|'
+ r'MODE_MARGINCALCMODE|MODE_MARGININIT|'
+ r'MODE_MARGINMAINTENANCE|MODE_MARGINHEDGED|'
+ r'MODE_MARGINREQUIRED|MODE_FREEZELEVEL|'
+ r'SUNDAY|MONDAY|TUESDAY|WEDNESDAY|THURSDAY|'
+ r'FRIDAY|SATURDAY|'
+ r'ACCOUNT_LOGIN|ACCOUNT_TRADE_MODE|'
+ r'ACCOUNT_LEVERAGE|'
+ r'ACCOUNT_LIMIT_ORDERS|ACCOUNT_MARGIN_SO_MODE|'
+ r'ACCOUNT_TRADE_ALLOWED|ACCOUNT_TRADE_EXPERT|'
+ r'ACCOUNT_BALANCE|'
+ r'ACCOUNT_CREDIT|ACCOUNT_PROFIT|ACCOUNT_EQUITY|'
+ r'ACCOUNT_MARGIN|'
+ r'ACCOUNT_FREEMARGIN|ACCOUNT_MARGIN_LEVEL|'
+ r'ACCOUNT_MARGIN_SO_CALL|'
+ r'ACCOUNT_MARGIN_SO_SO|ACCOUNT_NAME|'
+ r'ACCOUNT_SERVER|ACCOUNT_CURRENCY|'
+ r'ACCOUNT_COMPANY|ACCOUNT_TRADE_MODE_DEMO|'
+ r'ACCOUNT_TRADE_MODE_CONTEST|'
+ r'ACCOUNT_TRADE_MODE_REAL|ACCOUNT_STOPOUT_MODE_PERCENT|'
+ r'ACCOUNT_STOPOUT_MODE_MONEY|'
+ r'STAT_INITIAL_DEPOSIT|STAT_WITHDRAWAL|STAT_PROFIT|'
+ r'STAT_GROSS_PROFIT|'
+ r'STAT_GROSS_LOSS|STAT_MAX_PROFITTRADE|'
+ r'STAT_MAX_LOSSTRADE|STAT_CONPROFITMAX|'
+ r'STAT_CONPROFITMAX_TRADES|STAT_MAX_CONWINS|'
+ r'STAT_MAX_CONPROFIT_TRADES|'
+ r'STAT_CONLOSSMAX|STAT_CONLOSSMAX_TRADES|'
+ r'STAT_MAX_CONLOSSES|'
+ r'STAT_MAX_CONLOSS_TRADES|STAT_BALANCEMIN|'
+ r'STAT_BALANCE_DD|'
+ r'STAT_BALANCEDD_PERCENT|STAT_BALANCE_DDREL_PERCENT|'
+ r'STAT_BALANCE_DD_RELATIVE|STAT_EQUITYMIN|'
+ r'STAT_EQUITY_DD|'
+ r'STAT_EQUITYDD_PERCENT|STAT_EQUITY_DDREL_PERCENT|'
+ r'STAT_EQUITY_DD_RELATIVE|STAT_EXPECTED_PAYOFF|'
+ r'STAT_PROFIT_FACTOR|'
+ r'STAT_RECOVERY_FACTOR|STAT_SHARPE_RATIO|'
+ r'STAT_MIN_MARGINLEVEL|'
+ r'STAT_CUSTOM_ONTESTER|STAT_DEALS|STAT_TRADES|'
+ r'STAT_PROFIT_TRADES|'
+ r'STAT_LOSS_TRADES|STAT_SHORT_TRADES|STAT_LONG_TRADES|'
+ r'STAT_PROFIT_SHORTTRADES|STAT_PROFIT_LONGTRADES|'
+ r'STAT_PROFITTRADES_AVGCON|STAT_LOSSTRADES_AVGCON|'
+ r'SERIES_BARS_COUNT|SERIES_FIRSTDATE|SERIES_LASTBAR_DATE|'
+ r'SERIES_SERVER_FIRSTDATE|SERIES_TERMINAL_FIRSTDATE|'
+ r'SERIES_SYNCHRONIZED|'
+ r'OP_BUY|OP_SELL|OP_BUYLIMIT|OP_SELLLIMIT|'
+ r'OP_BUYSTOP|OP_SELLSTOP|'
+ r'TRADE_ACTION_DEAL|TRADE_ACTION_PENDING|'
+ r'TRADE_ACTION_SLTP|'
+ r'TRADE_ACTION_MODIFY|TRADE_ACTION_REMOVE|'
+ r'__DATE__|__DATETIME__|__LINE__|__FILE__|'
+ r'__PATH__|__FUNCTION__|'
+ r'__FUNCSIG__|__MQLBUILD__|__MQL4BUILD__|'
+ r'M_E|M_LOG2E|M_LOG10E|M_LN2|M_LN10|'
+ r'M_PI|M_PI_2|M_PI_4|M_1_PI|'
+ r'M_2_PI|M_2_SQRTPI|M_SQRT2|M_SQRT1_2|'
+ r'CHAR_MIN|CHAR_MAX|UCHAR_MAX|'
+ r'SHORT_MIN|SHORT_MAX|USHORT_MAX|'
+ r'INT_MIN|INT_MAX|UINT_MAX|'
+ r'LONG_MIN|LONG_MAX|ULONG_MAX|'
+ r'DBL_MIN|DBL_MAX|DBL_EPSILON|DBL_DIG|DBL_MANT_DIG|'
+ r'DBL_MAX_10_EXP|DBL_MAX_EXP|DBL_MIN_10_EXP|DBL_MIN_EXP|'
+ r'FLT_MIN|FLT_MAX|FLT_EPSILON|'
+ r'FLT_DIG|FLT_MANT_DIG|FLT_MAX_10_EXP|'
+ r'FLT_MAX_EXP|FLT_MIN_10_EXP|FLT_MIN_EXP|REASON_PROGRAM'
+ r'REASON_REMOVE|REASON_RECOMPILE|'
+ r'REASON_CHARTCHANGE|REASON_CHARTCLOSE|'
+ r'REASON_PARAMETERS|REASON_ACCOUNT|'
+ r'REASON_TEMPLATE|REASON_INITFAILED|'
+ r'REASON_CLOSE|POINTER_INVALID'
+ r'POINTER_DYNAMIC|POINTER_AUTOMATIC|'
+ r'NULL|EMPTY|EMPTY_VALUE|CLR_NONE|WHOLE_ARRAY|'
+ r'CHARTS_MAX|clrNONE|EMPTY_VALUE|INVALID_HANDLE|'
+ r'IS_DEBUG_MODE|IS_PROFILE_MODE|NULL|WHOLE_ARRAY|WRONG_VALUE|'
+ r'ERR_NO_ERROR|ERR_NO_RESULT|ERR_COMMON_ERROR|'
+ r'ERR_INVALID_TRADE_PARAMETERS|'
+ r'ERR_SERVER_BUSY|ERR_OLD_VERSION|ERR_NO_CONNECTION|'
+ r'ERR_NOT_ENOUGH_RIGHTS|'
+ r'ERR_TOO_FREQUENT_REQUESTS|ERR_MALFUNCTIONAL_TRADE|'
+ r'ERR_ACCOUNT_DISABLED|'
+ r'ERR_INVALID_ACCOUNT|ERR_TRADE_TIMEOUT|'
+ r'ERR_INVALID_PRICE|ERR_INVALID_STOPS|'
+ r'ERR_INVALID_TRADE_VOLUME|ERR_MARKET_CLOSED|'
+ r'ERR_TRADE_DISABLED|'
+ r'ERR_NOT_ENOUGH_MONEY|ERR_PRICE_CHANGED|'
+ r'ERR_OFF_QUOTES|ERR_BROKER_BUSY|'
+ r'ERR_REQUOTE|ERR_ORDER_LOCKED|'
+ r'ERR_LONG_POSITIONS_ONLY_ALLOWED|ERR_TOO_MANY_REQUESTS|'
+ r'ERR_TRADE_MODIFY_DENIED|ERR_TRADE_CONTEXT_BUSY|'
+ r'ERR_TRADE_EXPIRATION_DENIED|'
+ r'ERR_TRADE_TOO_MANY_ORDERS|ERR_TRADE_HEDGE_PROHIBITED|'
+ r'ERR_TRADE_PROHIBITED_BY_FIFO|'
+ r'FILE_READ|FILE_WRITE|FILE_BIN|FILE_CSV|FILE_TXT|'
+ r'FILE_ANSI|FILE_UNICODE|'
+ r'FILE_SHARE_READ|FILE_SHARE_WRITE|FILE_REWRITE|'
+ r'FILE_COMMON|FILE_EXISTS|'
+ r'FILE_CREATE_DATE|FILE_MODIFY_DATE|'
+ r'FILE_ACCESS_DATE|FILE_SIZE|FILE_POSITION|'
+ r'FILE_END|FILE_LINE_END|FILE_IS_COMMON|'
+ r'FILE_IS_TEXT|FILE_IS_BINARY|'
+ r'FILE_IS_CSV|FILE_IS_ANSI|FILE_IS_READABLE|FILE_IS_WRITABLE|'
+ r'SEEK_SET|SEEK_CUR|SEEK_END|CP_ACP|'
+ r'CP_OEMCP|CP_MACCP|CP_THREAD_ACP|'
+ r'CP_SYMBOL|CP_UTF7|CP_UTF8|IDOK|IDCANCEL|IDABORT|'
+ r'IDRETRY|IDIGNORE|IDYES|IDNO|IDTRYAGAIN|IDCONTINUE|'
+ r'MB_OK|MB_OKCANCEL|MB_ABORTRETRYIGNORE|MB_YESNOCANCEL|'
+ r'MB_YESNO|MB_RETRYCANCEL|'
+ r'MB_CANCELTRYCONTINUE|MB_ICONSTOP|MB_ICONERROR|'
+ r'MB_ICONHAND|MB_ICONQUESTION|'
+ r'MB_ICONEXCLAMATION|MB_ICONWARNING|'
+ r'MB_ICONINFORMATION|MB_ICONASTERISK|'
+ r'MB_DEFBUTTON1|MB_DEFBUTTON2|MB_DEFBUTTON3|MB_DEFBUTTON4)\b',
+ Name.Constant),
+ inherit,
+ ],
+ }
diff --git a/pygments/lexers/dalvik.py b/pygments/lexers/dalvik.py
index de9b11fa..901b7c5a 100644
--- a/pygments/lexers/dalvik.py
+++ b/pygments/lexers/dalvik.py
@@ -5,7 +5,7 @@
Pygments lexers for Dalvik VM-related languages.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
@@ -21,7 +21,7 @@ class SmaliLexer(RegexLexer):
For `Smali <http://code.google.com/p/smali/>`_ (Android/Dalvik) assembly
code.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'Smali'
aliases = ['smali']
diff --git a/pygments/lexers/dotnet.py b/pygments/lexers/dotnet.py
index bdd9edc1..0754ba02 100644
--- a/pygments/lexers/dotnet.py
+++ b/pygments/lexers/dotnet.py
@@ -5,7 +5,7 @@
Lexers for .net languages.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
@@ -14,7 +14,7 @@ from pygments.lexer import RegexLexer, DelegatingLexer, bygroups, include, \
using, this
from pygments.token import Punctuation, \
Text, Comment, Operator, Keyword, Name, String, Number, Literal, Other
-from pygments.util import get_choice_opt
+from pygments.util import get_choice_opt, iteritems
from pygments import unistring as uni
from pygments.lexers.web import XmlLexer
@@ -44,7 +44,7 @@ class CSharpLexer(RegexLexer):
The default value is ``basic``.
- *New in Pygments 0.8.*
+ .. versionadded:: 0.8
"""
name = 'C#'
@@ -71,7 +71,7 @@ class CSharpLexer(RegexLexer):
tokens = {}
token_variants = True
- for levelname, cs_ident in levels.items():
+ for levelname, cs_ident in iteritems(levels):
tokens[levelname] = {
'root': [
# method names
@@ -126,7 +126,7 @@ class CSharpLexer(RegexLexer):
}
def __init__(self, **options):
- level = get_choice_opt(options, 'unicodelevel', self.tokens.keys(), 'basic')
+ level = get_choice_opt(options, 'unicodelevel', list(self.tokens), 'basic')
if level not in self._all_tokens:
# compile the regexes now
self._tokens = self.__class__.process_tokendef(level)
@@ -156,7 +156,7 @@ class NemerleLexer(RegexLexer):
The default value is ``basic``.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'Nemerle'
@@ -183,7 +183,7 @@ class NemerleLexer(RegexLexer):
tokens = {}
token_variants = True
- for levelname, cs_ident in levels.items():
+ for levelname, cs_ident in iteritems(levels):
tokens[levelname] = {
'root': [
# method names
@@ -284,7 +284,7 @@ class NemerleLexer(RegexLexer):
}
def __init__(self, **options):
- level = get_choice_opt(options, 'unicodelevel', self.tokens.keys(),
+ level = get_choice_opt(options, 'unicodelevel', list(self.tokens),
'basic')
if level not in self._all_tokens:
# compile the regexes now
@@ -529,9 +529,9 @@ class VbNetAspxLexer(DelegatingLexer):
# Very close to functional.OcamlLexer
class FSharpLexer(RegexLexer):
"""
- For the F# language.
+ For the F# language (version 3.0).
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'FSharp'
@@ -540,91 +540,134 @@ class FSharpLexer(RegexLexer):
mimetypes = ['text/x-fsharp']
keywords = [
- 'abstract', 'and', 'as', 'assert', 'base', 'begin', 'class',
- 'default', 'delegate', 'do', 'do!', 'done', 'downcast',
- 'downto', 'elif', 'else', 'end', 'exception', 'extern',
- 'false', 'finally', 'for', 'fun', 'function', 'global', 'if',
- 'in', 'inherit', 'inline', 'interface', 'internal', 'lazy',
- 'let', 'let!', 'match', 'member', 'module', 'mutable',
- 'namespace', 'new', 'null', 'of', 'open', 'or', 'override',
- 'private', 'public', 'rec', 'return', 'return!', 'sig',
- 'static', 'struct', 'then', 'to', 'true', 'try', 'type',
- 'upcast', 'use', 'use!', 'val', 'void', 'when', 'while',
- 'with', 'yield', 'yield!'
+ 'abstract', 'as', 'assert', 'base', 'begin', 'class', 'default',
+ 'delegate', 'do!', 'do', 'done', 'downcast', 'downto', 'elif', 'else',
+ 'end', 'exception', 'extern', 'false', 'finally', 'for', 'function',
+ 'fun', 'global', 'if', 'inherit', 'inline', 'interface', 'internal',
+ 'in', 'lazy', 'let!', 'let', 'match', 'member', 'module', 'mutable',
+ 'namespace', 'new', 'null', 'of', 'open', 'override', 'private', 'public',
+ 'rec', 'return!', 'return', 'select', 'static', 'struct', 'then', 'to',
+ 'true', 'try', 'type', 'upcast', 'use!', 'use', 'val', 'void', 'when',
+ 'while', 'with', 'yield!', 'yield',
+ ]
+ # Reserved words; cannot hurt to color them as keywords too.
+ keywords += [
+ 'atomic', 'break', 'checked', 'component', 'const', 'constraint',
+ 'constructor', 'continue', 'eager', 'event', 'external', 'fixed',
+ 'functor', 'include', 'method', 'mixin', 'object', 'parallel',
+ 'process', 'protected', 'pure', 'sealed', 'tailcall', 'trait',
+ 'virtual', 'volatile',
]
keyopts = [
- '!=','#','&&','&','\(','\)','\*','\+',',','-\.',
- '->','-','\.\.','\.','::',':=',':>',':',';;',';','<-',
- '<','>]','>','\?\?','\?','\[<','\[>','\[\|','\[',
- ']','_','`','{','\|\]','\|','}','~','<@','=','@>'
+ '!=', '#', '&&', '&', '\(', '\)', '\*', '\+', ',', '-\.',
+ '->', '-', '\.\.', '\.', '::', ':=', ':>', ':', ';;', ';', '<-',
+ '<\]', '<', '>\]', '>', '\?\?', '\?', '\[<', '\[\|', '\[', '\]',
+ '_', '`', '{', '\|\]', '\|', '}', '~', '<@@', '<@', '=', '@>', '@@>',
]
operators = r'[!$%&*+\./:<=>?@^|~-]'
- word_operators = ['and', 'asr', 'land', 'lor', 'lsl', 'lxor', 'mod', 'not', 'or']
+ word_operators = ['and', 'or', 'not']
prefix_syms = r'[!?~]'
infix_syms = r'[=<>@^|&+\*/$%-]'
- primitives = ['unit', 'int', 'float', 'bool', 'string', 'char', 'list', 'array',
- 'byte', 'sbyte', 'int16', 'uint16', 'uint32', 'int64', 'uint64'
- 'nativeint', 'unativeint', 'decimal', 'void', 'float32', 'single',
- 'double']
+ primitives = [
+ 'sbyte', 'byte', 'char', 'nativeint', 'unativeint', 'float32', 'single',
+ 'float', 'double', 'int8', 'uint8', 'int16', 'uint16', 'int32',
+ 'uint32', 'int64', 'uint64', 'decimal', 'unit', 'bool', 'string',
+ 'list', 'exn', 'obj', 'enum',
+ ]
+
+ # See http://msdn.microsoft.com/en-us/library/dd233181.aspx and/or
+ # http://fsharp.org/about/files/spec.pdf for reference. Good luck.
tokens = {
'escape-sequence': [
- (r'\\[\\\"\'ntbr]', String.Escape),
+ (r'\\[\\\"\'ntbrafv]', String.Escape),
(r'\\[0-9]{3}', String.Escape),
- (r'\\x[0-9a-fA-F]{2}', String.Escape),
+ (r'\\u[0-9a-fA-F]{4}', String.Escape),
+ (r'\\U[0-9a-fA-F]{8}', String.Escape),
],
'root': [
(r'\s+', Text),
- (r'false|true|\(\)|\[\]', Name.Builtin.Pseudo),
- (r'\b([A-Z][A-Za-z0-9_\']*)(?=\s*\.)',
+ (r'\(\)|\[\]', Name.Builtin.Pseudo),
+ (r'\b(?<!\.)([A-Z][A-Za-z0-9_\']*)(?=\s*\.)',
Name.Namespace, 'dotted'),
- (r'\b([A-Z][A-Za-z0-9_\']*)', Name.Class),
+ (r'\b([A-Z][A-Za-z0-9_\']*)', Name),
+ (r'///.*?\n', String.Doc),
(r'//.*?\n', Comment.Single),
(r'\(\*(?!\))', Comment, 'comment'),
+
+ (r'@"', String, 'lstring'),
+ (r'"""', String, 'tqs'),
+ (r'"', String, 'string'),
+
+ (r'\b(open|module)(\s+)([a-zA-Z0-9_.]+)',
+ bygroups(Keyword, Text, Name.Namespace)),
+ (r'\b(let!?)(\s+)([a-zA-Z0-9_]+)',
+ bygroups(Keyword, Text, Name.Variable)),
+ (r'\b(type)(\s+)([a-zA-Z0-9_]+)',
+ bygroups(Keyword, Text, Name.Class)),
+ (r'\b(member|override)(\s+)([a-zA-Z0-9_]+)(\.)([a-zA-Z0-9_]+)',
+ bygroups(Keyword, Text, Name, Punctuation, Name.Function)),
(r'\b(%s)\b' % '|'.join(keywords), Keyword),
(r'(%s)' % '|'.join(keyopts), Operator),
(r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator),
(r'\b(%s)\b' % '|'.join(word_operators), Operator.Word),
(r'\b(%s)\b' % '|'.join(primitives), Keyword.Type),
-
- (r'#[ \t]*(if|endif|else|line|nowarn|light)\b.*?\n',
+ (r'#[ \t]*(if|endif|else|line|nowarn|light|\d+)\b.*?\n',
Comment.Preproc),
(r"[^\W\d][\w']*", Name),
- (r'\d[\d_]*', Number.Integer),
- (r'0[xX][\da-fA-F][\da-fA-F_]*', Number.Hex),
- (r'0[oO][0-7][0-7_]*', Number.Oct),
- (r'0[bB][01][01_]*', Number.Binary),
- (r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)', Number.Float),
+ (r'\d[\d_]*[uU]?[yslLnQRZINGmM]?', Number.Integer),
+ (r'0[xX][\da-fA-F][\da-fA-F_]*[uU]?[yslLn]?[fF]?', Number.Hex),
+ (r'0[oO][0-7][0-7_]*[uU]?[yslLn]?', Number.Oct),
+ (r'0[bB][01][01_]*[uU]?[yslLn]?', Number.Binary),
+ (r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)[fFmM]?',
+ Number.Float),
- (r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'",
+ (r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'B?",
String.Char),
(r"'.'", String.Char),
(r"'", Keyword), # a stray quote is another syntax element
- (r'"', String.Double, 'string'),
-
(r'[~?][a-z][\w\']*:', Name.Variable),
],
+ 'dotted': [
+ (r'\s+', Text),
+ (r'\.', Punctuation),
+ (r'[A-Z][A-Za-z0-9_\']*(?=\s*\.)', Name.Namespace),
+ (r'[A-Z][A-Za-z0-9_\']*', Name, '#pop'),
+ (r'[a-z_][A-Za-z0-9_\']*', Name, '#pop'),
+ # e.g. dictionary index access
+ (r'', Text, '#pop'),
+ ],
'comment': [
- (r'[^(*)]+', Comment),
+ (r'[^(*)@"]+', Comment),
(r'\(\*', Comment, '#push'),
(r'\*\)', Comment, '#pop'),
- (r'[(*)]', Comment),
+ # comments cannot be closed within strings in comments
+ (r'@"', String, 'lstring'),
+ (r'"""', String, 'tqs'),
+ (r'"', String, 'string'),
+ (r'[(*)@]', Comment),
],
'string': [
- (r'[^\\"]+', String.Double),
+ (r'[^\\"]+', String),
include('escape-sequence'),
- (r'\\\n', String.Double),
- (r'"', String.Double, '#pop'),
+ (r'\\\n', String),
+ (r'\n', String), # newlines are allowed in any string
+ (r'"B?', String, '#pop'),
],
- 'dotted': [
- (r'\s+', Text),
- (r'\.', Punctuation),
- (r'[A-Z][A-Za-z0-9_\']*(?=\s*\.)', Name.Namespace),
- (r'[A-Z][A-Za-z0-9_\']*', Name.Class, '#pop'),
- (r'[a-z_][A-Za-z0-9_\']*', Name, '#pop'),
+ 'lstring': [
+ (r'[^"]+', String),
+ (r'\n', String),
+ (r'""', String),
+ (r'"B?', String, '#pop'),
+ ],
+ 'tqs': [
+ (r'[^"]+', String),
+ (r'\n', String),
+ (r'"""B?', String, '#pop'),
+ (r'"', String),
],
}
diff --git a/pygments/lexers/foxpro.py b/pygments/lexers/foxpro.py
index 51cd499b..99a65ce7 100644
--- a/pygments/lexers/foxpro.py
+++ b/pygments/lexers/foxpro.py
@@ -5,7 +5,7 @@
Simple lexer for Microsoft Visual FoxPro source code.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
@@ -24,11 +24,11 @@ class FoxProLexer(RegexLexer):
FoxPro syntax allows to shorten all keywords and function names
to 4 characters. Shortened forms are not recognized by this lexer.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'FoxPro'
- aliases = ['Clipper', 'XBase']
+ aliases = ['foxpro', 'vfp', 'clipper', 'xbase']
filenames = ['*.PRG', '*.prg']
mimetype = []
diff --git a/pygments/lexers/functional.py b/pygments/lexers/functional.py
index 889e7ec6..122114fa 100644
--- a/pygments/lexers/functional.py
+++ b/pygments/lexers/functional.py
@@ -5,7 +5,7 @@
Lexers for functional languages.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
@@ -16,9 +16,13 @@ from pygments.token import Text, Comment, Operator, Keyword, Name, \
String, Number, Punctuation, Literal, Generic, Error
__all__ = ['RacketLexer', 'SchemeLexer', 'CommonLispLexer', 'HaskellLexer',
- 'LiterateHaskellLexer', 'SMLLexer', 'OcamlLexer', 'ErlangLexer',
- 'ErlangShellLexer', 'OpaLexer', 'CoqLexer', 'NewLispLexer',
- 'ElixirLexer', 'ElixirConsoleLexer', 'KokaLexer']
+ 'AgdaLexer', 'LiterateHaskellLexer', 'LiterateAgdaLexer',
+ 'SMLLexer', 'OcamlLexer', 'ErlangLexer', 'ErlangShellLexer',
+ 'OpaLexer', 'CoqLexer', 'NewLispLexer', 'NixLexer', 'ElixirLexer',
+ 'ElixirConsoleLexer', 'KokaLexer', 'IdrisLexer', 'LiterateIdrisLexer']
+
+
+line_re = re.compile('.*?\n')
class RacketLexer(RegexLexer):
@@ -26,7 +30,7 @@ class RacketLexer(RegexLexer):
Lexer for `Racket <http://racket-lang.org/>`_ source code (formerly known as
PLT Scheme).
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'Racket'
@@ -67,7 +71,7 @@ class RacketLexer(RegexLexer):
'syntax/loc', 'time', 'transcript-off', 'transcript-on', 'unless',
'unquote', 'unquote-splicing', 'unsyntax', 'unsyntax-splicing',
'when', 'with-continuation-mark', 'with-handlers',
- 'with-handlers*', 'with-syntax', 'λ'
+ 'with-handlers*', 'with-syntax', u'λ'
]
# From namespace-mapped-symbols
@@ -595,7 +599,7 @@ class SchemeLexer(RegexLexer):
It supports the full Scheme syntax as defined in R5RS.
- *New in Pygments 0.6.*
+ .. versionadded:: 0.6
"""
name = 'Scheme'
aliases = ['scheme', 'scm']
@@ -716,10 +720,10 @@ class CommonLispLexer(RegexLexer):
"""
A Common Lisp lexer.
- *New in Pygments 0.9.*
+ .. versionadded:: 0.9
"""
name = 'Common Lisp'
- aliases = ['common-lisp', 'cl']
+ aliases = ['common-lisp', 'cl', 'lisp', 'elisp', 'emacs']
filenames = ['*.cl', '*.lisp', '*.el'] # use for Elisp too
mimetypes = ['text/x-common-lisp']
@@ -808,6 +812,8 @@ class CommonLispLexer(RegexLexer):
(r'"(\\.|\\\n|[^"\\])*"', String),
# quoting
(r":" + symbol, String.Symbol),
+ (r"::" + symbol, String.Symbol),
+ (r":#" + symbol, String.Symbol),
(r"'" + symbol, String.Symbol),
(r"'", Operator),
(r"`", Operator),
@@ -892,7 +898,7 @@ class HaskellLexer(RegexLexer):
"""
A Haskell lexer based on the lexemes defined in the Haskell 98 Report.
- *New in Pygments 0.8.*
+ .. versionadded:: 0.8
"""
name = 'Haskell'
aliases = ['haskell', 'hs']
@@ -979,6 +985,8 @@ class HaskellLexer(RegexLexer):
(r'\(', Punctuation, ('funclist', 'funclist')),
(r'\)', Punctuation, '#pop:2'),
],
+ # NOTE: the next four states are shared in the AgdaLexer; make sure
+ # any change is compatible with Agda as well or copy over and change
'comment': [
# Multiline Comments
(r'[^-{}]+', Comment.Multiline),
@@ -988,7 +996,7 @@ class HaskellLexer(RegexLexer):
],
'character': [
# Allows multi-chars, incorrectly.
- (r"[^\\']", String.Char),
+ (r"[^\\']'", String.Char, '#pop'),
(r"\\", String.Escape, 'escape'),
("'", String.Char, '#pop'),
],
@@ -1009,12 +1017,187 @@ class HaskellLexer(RegexLexer):
}
-line_re = re.compile('.*?\n')
-bird_re = re.compile(r'(>[ \t]*)(.*\n)')
+class IdrisLexer(RegexLexer):
+ """
+ A lexer for the dependently typed programming language Idris.
+
+ Based on the Haskell and Agda Lexer.
-class LiterateHaskellLexer(Lexer):
+ .. versionadded:: 2.0
"""
- For Literate Haskell (Bird-style or LaTeX) source.
+ name = 'Idris'
+ aliases = ['idris', 'idr']
+ filenames = ['*.idr']
+ mimetypes = ['text/x-idris']
+
+ reserved = ['case','class','data','default','using','do','else',
+ 'if','in','infix[lr]?','instance','rewrite','auto',
+ 'namespace','codata','mutual','private','public','abstract',
+ 'total','partial',
+ 'let','proof','of','then','static','where','_','with',
+ 'pattern', 'term', 'syntax','prefix',
+ 'postulate','parameters','record','dsl','impossible','implicit',
+ 'tactics','intros','intro','compute','refine','exaxt','trivial']
+
+ ascii = ['NUL','SOH','[SE]TX','EOT','ENQ','ACK',
+ 'BEL','BS','HT','LF','VT','FF','CR','S[OI]','DLE',
+ 'DC[1-4]','NAK','SYN','ETB','CAN',
+ 'EM','SUB','ESC','[FGRU]S','SP','DEL']
+
+ annotations = ['assert_total','lib','link','include','provide','access',
+ 'default']
+
+ tokens = {
+ 'root': [
+ # Declaration
+ (r'^(\s*)([^\s\(\)\{\}]+)(\s*)(:)(\s*)',
+ bygroups(Text, Name.Function, Text, Operator.Word, Text)),
+ # Comments
+ (r'^(\s*)(%%%s)' % '|'.join(annotations),
+ bygroups(Text, Keyword.Reserved)),
+ (r'--(?![!#$%&*+./<=>?@\^|_~:\\]).*?$', Comment.Single),
+ (r'{-', Comment.Multiline, 'comment'),
+ # Identifiers
+ (r'\b(%s)(?!\')\b' % '|'.join(reserved), Keyword.Reserved),
+ (r'(import|module)(\s+)', bygroups(Keyword.Reserved, Text), 'module'),
+ (r"('')?[A-Z][\w\']*", Keyword.Type),
+ (r'[a-z][A-Za-z0-9_\']*', Text),
+ # Special Symbols
+ (r'(<-|::|->|=>|=)', Operator.Word), # specials
+ (r'([\(\)\{\}\[\]:!#$%&*+.\\/<=>?@^|~-]+)', Operator.Word), # specials
+ # Numbers
+ (r'\d+[eE][+-]?\d+', Number.Float),
+ (r'\d+\.\d+([eE][+-]?\d+)?', Number.Float),
+ (r'0[xX][\da-fA-F]+', Number.Hex),
+ (r'\d+', Number.Integer),
+ # Strings
+ (r"'", String.Char, 'character'),
+ (r'"', String, 'string'),
+ (r'[^\s\(\)\{\}]+', Text),
+ (r'\s+?', Text), # Whitespace
+ ],
+ 'module': [
+ (r'\s+', Text),
+ (r'([A-Z][a-zA-Z0-9_.]*)(\s+)(\()',
+ bygroups(Name.Namespace, Text, Punctuation), 'funclist'),
+ (r'[A-Z][a-zA-Z0-9_.]*', Name.Namespace, '#pop'),
+ ],
+ 'funclist': [
+ (r'\s+', Text),
+ (r'[A-Z][a-zA-Z0-9_]*', Keyword.Type),
+ (r'(_[\w\']+|[a-z][\w\']*)', Name.Function),
+ (r'--.*$', Comment.Single),
+ (r'{-', Comment.Multiline, 'comment'),
+ (r',', Punctuation),
+ (r'[:!#$%&*+.\\/<=>?@^|~-]+', Operator),
+ # (HACK, but it makes sense to push two instances, believe me)
+ (r'\(', Punctuation, ('funclist', 'funclist')),
+ (r'\)', Punctuation, '#pop:2'),
+ ],
+ # NOTE: the next four states are shared in the AgdaLexer; make sure
+ # any change is compatible with Agda as well or copy over and change
+ 'comment': [
+ # Multiline Comments
+ (r'[^-{}]+', Comment.Multiline),
+ (r'{-', Comment.Multiline, '#push'),
+ (r'-}', Comment.Multiline, '#pop'),
+ (r'[-{}]', Comment.Multiline),
+ ],
+ 'character': [
+ # Allows multi-chars, incorrectly.
+ (r"[^\\']", String.Char),
+ (r"\\", String.Escape, 'escape'),
+ ("'", String.Char, '#pop'),
+ ],
+ 'string': [
+ (r'[^\\"]+', String),
+ (r"\\", String.Escape, 'escape'),
+ ('"', String, '#pop'),
+ ],
+ 'escape': [
+ (r'[abfnrtv"\'&\\]', String.Escape, '#pop'),
+ (r'\^[][A-Z@\^_]', String.Escape, '#pop'),
+ ('|'.join(ascii), String.Escape, '#pop'),
+ (r'o[0-7]+', String.Escape, '#pop'),
+ (r'x[\da-fA-F]+', String.Escape, '#pop'),
+ (r'\d+', String.Escape, '#pop'),
+ (r'\s+\\', String.Escape, '#pop')
+ ],
+ }
+
+
+class AgdaLexer(RegexLexer):
+ """
+ For the `Agda <http://wiki.portal.chalmers.se/agda/pmwiki.php>`_
+ dependently typed functional programming language and proof assistant.
+
+ .. versionadded:: 2.0
+ """
+
+ name = 'Agda'
+ aliases = ['agda']
+ filenames = ['*.agda']
+ mimetypes = ['text/x-agda']
+
+ reserved = ['abstract', 'codata', 'coinductive', 'constructor', 'data',
+ 'field', 'forall', 'hiding', 'in', 'inductive', 'infix',
+ 'infixl', 'infixr', 'let', 'open', 'pattern', 'primitive',
+ 'private', 'mutual', 'quote', 'quoteGoal', 'quoteTerm',
+ 'record', 'syntax', 'rewrite', 'unquote', 'using', 'where',
+ 'with']
+
+ tokens = {
+ 'root': [
+ # Declaration
+ (r'^(\s*)([^\s\(\)\{\}]+)(\s*)(:)(\s*)',
+ bygroups(Text, Name.Function, Text, Operator.Word, Text)),
+ # Comments
+ (r'--(?![!#$%&*+./<=>?@\^|_~:\\]).*?$', Comment.Single),
+ (r'{-', Comment.Multiline, 'comment'),
+ # Holes
+ (r'{!', Comment.Directive, 'hole'),
+ # Lexemes:
+ # Identifiers
+ (r'\b(%s)(?!\')\b' % '|'.join(reserved), Keyword.Reserved),
+ (r'(import|module)(\s+)', bygroups(Keyword.Reserved, Text), 'module'),
+ (r'\b(Set|Prop)\b', Keyword.Type),
+ # Special Symbols
+ (r'(\(|\)|\{|\})', Operator),
+ (u'(\\.{1,3}|\\||[\u039B]|[\u2200]|[\u2192]|:|=|->)', Operator.Word),
+ # Numbers
+ (r'\d+[eE][+-]?\d+', Number.Float),
+ (r'\d+\.\d+([eE][+-]?\d+)?', Number.Float),
+ (r'0[xX][\da-fA-F]+', Number.Hex),
+ (r'\d+', Number.Integer),
+ # Strings
+ (r"'", String.Char, 'character'),
+ (r'"', String, 'string'),
+ (r'[^\s\(\)\{\}]+', Text),
+ (r'\s+?', Text), # Whitespace
+ ],
+ 'hole': [
+ # Holes
+ (r'[^!{}]+', Comment.Directive),
+ (r'{!', Comment.Directive, '#push'),
+ (r'!}', Comment.Directive, '#pop'),
+ (r'[!{}]', Comment.Directive),
+ ],
+ 'module': [
+ (r'{-', Comment.Multiline, 'comment'),
+ (r'[a-zA-Z][a-zA-Z0-9_.]*', Name, '#pop'),
+ (r'[^a-zA-Z]*', Text)
+ ],
+ 'comment': HaskellLexer.tokens['comment'],
+ 'character': HaskellLexer.tokens['character'],
+ 'string': HaskellLexer.tokens['string'],
+ 'escape': HaskellLexer.tokens['escape']
+ }
+
+
+class LiterateLexer(Lexer):
+ """
+ Base class for lexers of literate file formats based on LaTeX or Bird-style
+ (prefixing each code line with ">").
Additional options accepted:
@@ -1022,17 +1205,15 @@ class LiterateHaskellLexer(Lexer):
If given, must be ``"bird"`` or ``"latex"``. If not given, the style
is autodetected: if the first non-whitespace character in the source
is a backslash or percent character, LaTeX is assumed, else Bird.
-
- *New in Pygments 0.9.*
"""
- name = 'Literate Haskell'
- aliases = ['lhs', 'literate-haskell']
- filenames = ['*.lhs']
- mimetypes = ['text/x-literate-haskell']
- def get_tokens_unprocessed(self, text):
- hslexer = HaskellLexer(**self.options)
+ bird_re = re.compile(r'(>[ \t]*)(.*\n)')
+
+ def __init__(self, baselexer, **options):
+ self.baselexer = baselexer
+ Lexer.__init__(self, **options)
+ def get_tokens_unprocessed(self, text):
style = self.options.get('litstyle')
if style is None:
style = (text.lstrip()[0:1] in '%\\') and 'latex' or 'bird'
@@ -1043,7 +1224,7 @@ class LiterateHaskellLexer(Lexer):
# bird-style
for match in line_re.finditer(text):
line = match.group()
- m = bird_re.match(line)
+ m = self.bird_re.match(line)
if m:
insertions.append((len(code),
[(0, Comment.Special, m.group(1))]))
@@ -1054,7 +1235,6 @@ class LiterateHaskellLexer(Lexer):
# latex-style
from pygments.lexers.text import TexLexer
lxlexer = TexLexer(**self.options)
-
codelines = 0
latex = ''
for match in line_re.finditer(text):
@@ -1075,15 +1255,84 @@ class LiterateHaskellLexer(Lexer):
latex += line
insertions.append((len(code),
list(lxlexer.get_tokens_unprocessed(latex))))
- for item in do_insertions(insertions, hslexer.get_tokens_unprocessed(code)):
+ for item in do_insertions(insertions, self.baselexer.get_tokens_unprocessed(code)):
yield item
+class LiterateHaskellLexer(LiterateLexer):
+ """
+ For Literate Haskell (Bird-style or LaTeX) source.
+
+ Additional options accepted:
+
+ `litstyle`
+ If given, must be ``"bird"`` or ``"latex"``. If not given, the style
+ is autodetected: if the first non-whitespace character in the source
+ is a backslash or percent character, LaTeX is assumed, else Bird.
+
+ .. versionadded:: 0.9
+ """
+ name = 'Literate Haskell'
+ aliases = ['lhs', 'literate-haskell', 'lhaskell']
+ filenames = ['*.lhs']
+ mimetypes = ['text/x-literate-haskell']
+
+ def __init__(self, **options):
+ hslexer = HaskellLexer(**options)
+ LiterateLexer.__init__(self, hslexer, **options)
+
+
+class LiterateIdrisLexer(LiterateLexer):
+ """
+ For Literate Idris (Bird-style or LaTeX) source.
+
+ Additional options accepted:
+
+ `litstyle`
+ If given, must be ``"bird"`` or ``"latex"``. If not given, the style
+ is autodetected: if the first non-whitespace character in the source
+ is a backslash or percent character, LaTeX is assumed, else Bird.
+
+ .. versionadded:: 2.0
+ """
+ name = 'Literate Idris'
+ aliases = ['lidr', 'literate-idris', 'lidris']
+ filenames = ['*.lidr']
+ mimetypes = ['text/x-literate-idris']
+
+ def __init__(self, **options):
+ hslexer = IdrisLexer(**options)
+ LiterateLexer.__init__(self, hslexer, **options)
+
+
+class LiterateAgdaLexer(LiterateLexer):
+ """
+ For Literate Agda source.
+
+ Additional options accepted:
+
+ `litstyle`
+ If given, must be ``"bird"`` or ``"latex"``. If not given, the style
+ is autodetected: if the first non-whitespace character in the source
+ is a backslash or percent character, LaTeX is assumed, else Bird.
+
+ .. versionadded:: 2.0
+ """
+ name = 'Literate Agda'
+ aliases = ['lagda', 'literate-agda']
+ filenames = ['*.lagda']
+ mimetypes = ['text/x-literate-agda']
+
+ def __init__(self, **options):
+ agdalexer = AgdaLexer(**options)
+ LiterateLexer.__init__(self, agdalexer, litstyle='latex', **options)
+
+
class SMLLexer(RegexLexer):
"""
For the Standard ML language.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'Standard ML'
@@ -1409,7 +1658,7 @@ class OcamlLexer(RegexLexer):
"""
For the OCaml language.
- *New in Pygments 0.7.*
+ .. versionadded:: 0.7
"""
name = 'OCaml'
@@ -1503,7 +1752,7 @@ class ErlangLexer(RegexLexer):
Blame Jeremy Thurgood (http://jerith.za.net/).
- *New in Pygments 0.9.*
+ .. versionadded:: 0.9
"""
name = 'Erlang'
@@ -1608,7 +1857,7 @@ class ErlangShellLexer(Lexer):
"""
Shell sessions in erl (for Erlang code).
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'Erlang erl session'
aliases = ['erl']
@@ -1651,7 +1900,7 @@ class OpaLexer(RegexLexer):
"""
Lexer for the Opa language (http://opalang.org).
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'Opa'
@@ -1974,7 +2223,7 @@ class CoqLexer(RegexLexer):
"""
For the `Coq <http://coq.inria.fr/>`_ theorem prover.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'Coq'
@@ -2115,7 +2364,7 @@ class NewLispLexer(RegexLexer):
"""
For `newLISP. <www.newlisp.org>`_ source code (version 10.3.0).
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'NewLisp'
@@ -2242,11 +2491,133 @@ class NewLispLexer(RegexLexer):
}
+class NixLexer(RegexLexer):
+ """
+ For the `Nix language <http://nixos.org/nix/>`_.
+
+ .. versionadded:: 2.0
+ """
+
+ name = 'Nix'
+ aliases = ['nixos', 'nix']
+ filenames = ['*.nix']
+ mimetypes = ['text/x-nix']
+
+ flags = re.MULTILINE | re.UNICODE
+
+ keywords = ['rec', 'with', 'let', 'in', 'inherit', 'assert', 'if',
+ 'else', 'then', '...']
+ builtins = ['import', 'abort', 'baseNameOf', 'dirOf', 'isNull', 'builtins',
+ 'map', 'removeAttrs', 'throw', 'toString', 'derivation']
+ operators = ['++', '+', '?', '.', '!', '//', '==',
+ '!=', '&&', '||', '->', '=']
+
+ punctuations = ["(", ")", "[", "]", ";", "{", "}", ":", ",", "@"]
+
+ tokens = {
+ 'root': [
+ # comments starting with #
+ (r'#.*$', Comment.Single),
+
+ # multiline comments
+ (r'/\*', Comment.Multiline, 'comment'),
+
+ # whitespace
+ (r'\s+', Text),
+
+ # keywords
+ ('(%s)' % '|'.join(re.escape(entry) + '\\b' for entry in keywords), Keyword),
+
+ # highlight the builtins
+ ('(%s)' % '|'.join(re.escape(entry) + '\\b' for entry in builtins),
+ Name.Builtin),
+
+ (r'\b(true|false|null)\b', Name.Constant),
+
+ # operators
+ ('(%s)' % '|'.join(re.escape(entry) for entry in operators),
+ Operator),
+
+ # word operators
+ (r'\b(or|and)\b', Operator.Word),
+
+ # punctuations
+ ('(%s)' % '|'.join(re.escape(entry) for entry in punctuations), Punctuation),
+
+ # integers
+ (r'[0-9]+', Number.Integer),
+
+ # strings
+ (r'"', String.Double, 'doublequote'),
+ (r"''", String.Single, 'singlequote'),
+
+ # paths
+ (r'[a-zA-Z0-9._+-]*(\/[a-zA-Z0-9._+-]+)+', Literal),
+ (r'\<[a-zA-Z0-9._+-]+(\/[a-zA-Z0-9._+-]+)*\>', Literal),
+
+ # urls
+ (r'[a-zA-Z][a-zA-Z0-9\+\-\.]*\:[a-zA-Z0-9%/?:@&=+$,\\_.!~*\'-]+', Literal),
+
+ # names of variables
+ (r'[a-zA-Z0-9-_]+\s*=', String.Symbol),
+ (r'[a-zA-Z_][a-zA-Z0-9_\'-]*', Text),
+
+ ],
+ 'comment': [
+ (r'[^/\*]+', Comment.Multiline),
+ (r'/\*', Comment.Multiline, '#push'),
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'[\*/]', Comment.Multiline),
+ ],
+ 'singlequote': [
+ (r"'''", String.Escape),
+ (r"''\$\{", String.Escape),
+ (r"''\n", String.Escape),
+ (r"''\r", String.Escape),
+ (r"''\t", String.Escape),
+ (r"''", String.Single, '#pop'),
+ (r'\$\{', String.Interpol, 'antiquote'),
+ (r"[^']", String.Single),
+ ],
+ 'doublequote': [
+ (r'\\', String.Escape),
+ (r'\\"', String.Escape),
+ (r'\\${', String.Escape),
+ (r'"', String.Double, '#pop'),
+ (r'\$\{', String.Interpol, 'antiquote'),
+ (r'[^"]', String.Double),
+ ],
+ 'antiquote': [
+ (r"}", String.Interpol, '#pop'),
+ # TODO: we should probably escape also here ''${ \${
+ (r"\$\{", String.Interpol, '#push'),
+ include('root'),
+ ],
+ }
+
+ def analyse_text(text):
+ rv = 0.0
+ # TODO: let/in
+ if re.search(r'import.+?<[^>]+>', text):
+ rv += 0.4
+ if re.search(r'mkDerivation\s+(\(|\{|rec)', text):
+ rv += 0.4
+ if re.search(r'with\s+[a-zA-Z\.]+;', text):
+ rv += 0.2
+ if re.search(r'inherit\s+[a-zA-Z()\.];', text):
+ rv += 0.2
+ if re.search(r'=\s+mkIf\s+', text):
+ rv += 0.4
+ if re.search(r'\{[a-zA-Z,\s]+\}:', text):
+ rv += 0.1
+ return rv
+
+
class ElixirLexer(RegexLexer):
"""
For the `Elixir language <http://elixir-lang.org>`_.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'Elixir'
@@ -2261,7 +2632,7 @@ class ElixirLexer(RegexLexer):
(r'(%[A-Ba-z])?"""(?:.|\n)*?"""', String.Doc),
(r"'''(?:.|\n)*?'''", String.Doc),
(r'"', String.Double, 'dqs'),
- (r"'.*'", String.Single),
+ (r"'.*?'", String.Single),
(r'(?<!\w)\?(\\(x\d{1,2}|\h{1,2}(?!\h)\b|0[0-7]{0,2}(?![0-7])\b|'
r'[^x0MC])|(\\[MC]-)+\w|[^\s\\])', String.Other)
]
@@ -2357,7 +2728,7 @@ class ElixirConsoleLexer(Lexer):
iex> length [head | tail]
3
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'Elixir iex session'
@@ -2400,10 +2771,10 @@ class ElixirConsoleLexer(Lexer):
class KokaLexer(RegexLexer):
"""
- Lexer for the `Koka <http://research.microsoft.com/en-us/projects/koka/>`_
+ Lexer for the `Koka <http://koka.codeplex.com>`_
language.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'Koka'
@@ -2412,7 +2783,7 @@ class KokaLexer(RegexLexer):
mimetypes = ['text/x-koka']
keywords = [
- 'infix', 'infixr', 'infixl', 'prefix', 'postfix',
+ 'infix', 'infixr', 'infixl',
'type', 'cotype', 'rectype', 'alias',
'struct', 'con',
'fun', 'function', 'val', 'var',
@@ -2451,7 +2822,12 @@ class KokaLexer(RegexLexer):
sboundary = '(?!'+symbols+')'
# name boundary: a keyword should not be followed by any of these
- boundary = '(?![a-zA-Z0-9_\\-])'
+ boundary = '(?![\w/])'
+
+ # koka token abstractions
+ tokenType = Name.Attribute
+ tokenTypeDef = Name.Class
+ tokenConstructor = Generic.Emph
# main lexer
tokens = {
@@ -2459,41 +2835,51 @@ class KokaLexer(RegexLexer):
include('whitespace'),
# go into type mode
- (r'::?' + sboundary, Keyword.Type, 'type'),
- (r'alias' + boundary, Keyword, 'alias-type'),
- (r'struct' + boundary, Keyword, 'struct-type'),
- (r'(%s)' % '|'.join(typeStartKeywords) + boundary, Keyword, 'type'),
+ (r'::?' + sboundary, tokenType, 'type'),
+ (r'(alias)(\s+)([a-z]\w*)?', bygroups(Keyword, Text, tokenTypeDef),
+ 'alias-type'),
+ (r'(struct)(\s+)([a-z]\w*)?', bygroups(Keyword, Text, tokenTypeDef),
+ 'struct-type'),
+ ((r'(%s)' % '|'.join(typeStartKeywords)) +
+ r'(\s+)([a-z]\w*)?', bygroups(Keyword, Text, tokenTypeDef),
+ 'type'),
# special sequences of tokens (we use ?: for non-capturing group as
# required by 'bygroups')
- (r'(module)(\s*)((?:interface)?)(\s*)'
- r'((?:[a-z](?:[a-zA-Z0-9_]|\-[a-zA-Z])*\.)*'
- r'[a-z](?:[a-zA-Z0-9_]|\-[a-zA-Z])*)',
- bygroups(Keyword, Text, Keyword, Text, Name.Namespace)),
- (r'(import)(\s+)((?:[a-z](?:[a-zA-Z0-9_]|\-[a-zA-Z])*\.)*[a-z]'
- r'(?:[a-zA-Z0-9_]|\-[a-zA-Z])*)(\s*)((?:as)?)'
- r'((?:[A-Z](?:[a-zA-Z0-9_]|\-[a-zA-Z])*)?)',
- bygroups(Keyword, Text, Name.Namespace, Text, Keyword,
- Name.Namespace)),
+ (r'(module)(\s+)(interface\s+)?((?:[a-z]\w*/)*[a-z]\w*)',
+ bygroups(Keyword, Text, Keyword, Name.Namespace)),
+ (r'(import)(\s+)((?:[a-z]\w*/)*[a-z]\w*)'
+ r'(?:(\s*)(=)(\s*)((?:qualified\s*)?)'
+ r'((?:[a-z]\w*/)*[a-z]\w*))?',
+ bygroups(Keyword, Text, Name.Namespace, Text, Keyword, Text,
+ Keyword, Name.Namespace)),
+
+ (r'(^(?:(?:public|private)\s*)?(?:function|fun|val))'
+ r'(\s+)([a-z]\w*|\((?:' + symbols + r'|/)\))',
+ bygroups(Keyword, Text, Name.Function)),
+ (r'(^(?:(?:public|private)\s*)?external)(\s+)(inline\s+)?'
+ r'([a-z]\w*|\((?:' + symbols + r'|/)\))',
+ bygroups(Keyword, Text, Keyword, Name.Function)),
# keywords
(r'(%s)' % '|'.join(typekeywords) + boundary, Keyword.Type),
(r'(%s)' % '|'.join(keywords) + boundary, Keyword),
(r'(%s)' % '|'.join(builtin) + boundary, Keyword.Pseudo),
- (r'::|:=|\->|[=\.:]' + sboundary, Keyword),
- (r'\-' + sboundary, Generic.Strong),
+ (r'::?|:=|\->|[=\.]' + sboundary, Keyword),
# names
- (r'[A-Z]([a-zA-Z0-9_]|\-[a-zA-Z])*(?=\.)', Name.Namespace),
- (r'[A-Z]([a-zA-Z0-9_]|\-[a-zA-Z])*(?!\.)', Name.Class),
- (r'[a-z]([a-zA-Z0-9_]|\-[a-zA-Z])*', Name),
- (r'_([a-zA-Z0-9_]|\-[a-zA-Z])*', Name.Variable),
+ (r'((?:[a-z]\w*/)*)([A-Z]\w*)',
+ bygroups(Name.Namespace, tokenConstructor)),
+ (r'((?:[a-z]\w*/)*)([a-z]\w*)', bygroups(Name.Namespace, Name)),
+ (r'((?:[a-z]\w*/)*)(\((?:' + symbols + r'|/)\))',
+ bygroups(Name.Namespace, Name)),
+ (r'_\w*', Name.Variable),
# literal string
(r'@"', String.Double, 'litstring'),
# operators
- (symbols, Operator),
+ (symbols + "|/(?![\*/])", Operator),
(r'`', Operator),
(r'[\{\}\(\)\[\];,]', Punctuation),
@@ -2520,17 +2906,17 @@ class KokaLexer(RegexLexer):
# type started by colon
'type': [
- (r'[\(\[<]', Keyword.Type, 'type-nested'),
+ (r'[\(\[<]', tokenType, 'type-nested'),
include('type-content')
],
# type nested in brackets: can contain parameters, comma etc.
'type-nested': [
- (r'[\)\]>]', Keyword.Type, '#pop'),
- (r'[\(\[<]', Keyword.Type, 'type-nested'),
- (r',', Keyword.Type),
- (r'([a-z](?:[a-zA-Z0-9_]|\-[a-zA-Z])*)(\s*)(:)(?!:)',
- bygroups(Name.Variable,Text,Keyword.Type)), # parameter name
+ (r'[\)\]>]', tokenType, '#pop'),
+ (r'[\(\[<]', tokenType, 'type-nested'),
+ (r',', tokenType),
+ (r'([a-z]\w*)(\s*)(:)(?!:)',
+ bygroups(Name, Text, tokenType)), # parameter name
include('type-content')
],
@@ -2539,23 +2925,23 @@ class KokaLexer(RegexLexer):
include('whitespace'),
# keywords
- (r'(%s)' % '|'.join(typekeywords) + boundary, Keyword.Type),
+ (r'(%s)' % '|'.join(typekeywords) + boundary, Keyword),
(r'(?=((%s)' % '|'.join(keywords) + boundary + '))',
Keyword, '#pop'), # need to match because names overlap...
# kinds
- (r'[EPH]' + boundary, Keyword.Type),
- (r'[*!]', Keyword.Type),
+ (r'[EPHVX]' + boundary, tokenType),
# type names
- (r'[A-Z]([a-zA-Z0-9_]|\-[a-zA-Z])*(?=\.)', Name.Namespace),
- (r'[A-Z]([a-zA-Z0-9_]|\-[a-zA-Z])*(?!\.)', Name.Class),
- (r'[a-z][0-9]*(?![a-zA-Z_\-])', Keyword.Type), # Generic.Emph
- (r'_([a-zA-Z0-9_]|\-[a-zA-Z])*', Keyword.Type), # Generic.Emph
- (r'[a-z]([a-zA-Z0-9_]|\-[a-zA-Z])*', Keyword.Type),
+ (r'[a-z][0-9]*(?![\w/])', tokenType ),
+ (r'_\w*', tokenType.Variable), # Generic.Emph
+ (r'((?:[a-z]\w*/)*)([A-Z]\w*)',
+ bygroups(Name.Namespace, tokenType)),
+ (r'((?:[a-z]\w*/)*)([a-z]\w+)',
+ bygroups(Name.Namespace, tokenType)),
# type keyword operators
- (r'::|\->|[\.:|]', Keyword.Type),
+ (r'::|\->|[\.:|]', tokenType),
#catchall
(r'', Text, '#pop')
@@ -2563,6 +2949,7 @@ class KokaLexer(RegexLexer):
# comments and literals
'whitespace': [
+ (r'\n\s*#.*$', Comment.Preproc),
(r'\s+', Text),
(r'/\*', Comment.Multiline, 'comment'),
(r'//.*$', Comment.Single)
@@ -2589,11 +2976,10 @@ class KokaLexer(RegexLexer):
(r'[\'\n]', String.Char, '#pop'),
],
'escape-sequence': [
- (r'\\[abfnrtv0\\\"\'\?]', String.Escape),
+ (r'\\[nrt\\\"\']', String.Escape),
(r'\\x[0-9a-fA-F]{2}', String.Escape),
(r'\\u[0-9a-fA-F]{4}', String.Escape),
# Yes, \U literals are 6 hex digits.
(r'\\U[0-9a-fA-F]{6}', String.Escape)
]
}
-
diff --git a/pygments/lexers/hdl.py b/pygments/lexers/hdl.py
index 57ffc349..1ebe4e5c 100644
--- a/pygments/lexers/hdl.py
+++ b/pygments/lexers/hdl.py
@@ -5,7 +5,7 @@
Lexers for hardware descriptor languages.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
@@ -22,7 +22,7 @@ class VerilogLexer(RegexLexer):
"""
For verilog source code with preprocessor directives.
- *New in Pygments 1.4.*
+ .. versionadded:: 1.4
"""
name = 'verilog'
aliases = ['verilog', 'v']
@@ -134,7 +134,7 @@ class SystemVerilogLexer(RegexLexer):
Extends verilog lexer to recognise all SystemVerilog keywords from IEEE
1800-2009 standard.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'systemverilog'
aliases = ['systemverilog', 'sv']
@@ -215,12 +215,12 @@ class SystemVerilogLexer(RegexLexer):
r'\$dumpportsall|\$dumpportsflush|\$dumpportslimit|\$dumpportsoff|'
r'\$dumpportson|\$dumpvars|\$fclose|\$fdisplay|\$fdisplayb|'
r'\$fdisplayh|\$fdisplayo|\$feof|\$ferror|\$fflush|\$fgetc|'
- r'\$fgets|\$fmonitor|\$fmonitorb|\$fmonitorh|\$fmonitoro|'
+ r'\$fgets|\$finish|\$fmonitor|\$fmonitorb|\$fmonitorh|\$fmonitoro|'
r'\$fopen|\$fread|\$fscanf|\$fseek|\$fstrobe|\$fstrobeb|\$fstrobeh|'
r'\$fstrobeo|\$ftell|\$fwrite|\$fwriteb|\$fwriteh|\$fwriteo|'
r'\$monitor|\$monitorb|\$monitorh|\$monitoro|\$monitoroff|'
- r'\$monitoron|\$plusargs|\$readmemb|\$readmemh|\$rewind|\$sformat|'
- r'\$sformatf|\$sscanf|\$strobe|\$strobeb|\$strobeh|\$strobeo|'
+ r'\$monitoron|\$plusargs|\$random\|$readmemb|\$readmemh|\$rewind|'
+ r'\$sformat|\$sformatf|\$sscanf|\$strobe|\$strobeb|\$strobeh|\$strobeo|'
r'\$swrite|\$swriteb|\$swriteh|\$swriteo|\$test|\$ungetc|'
r'\$value\$plusargs|\$write|\$writeb|\$writeh|\$writememb|'
r'\$writememh|\$writeo)\b' , Name.Builtin ),
@@ -274,7 +274,7 @@ class VhdlLexer(RegexLexer):
"""
For VHDL source code.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'vhdl'
aliases = ['vhdl']
diff --git a/pygments/lexers/jvm.py b/pygments/lexers/jvm.py
index 59f26811..2697053e 100644
--- a/pygments/lexers/jvm.py
+++ b/pygments/lexers/jvm.py
@@ -5,7 +5,7 @@
Pygments lexers for JVM languages.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
@@ -15,13 +15,13 @@ from pygments.lexer import Lexer, RegexLexer, include, bygroups, using, \
this
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
-from pygments.util import get_choice_opt
from pygments import unistring as uni
__all__ = ['JavaLexer', 'ScalaLexer', 'GosuLexer', 'GosuTemplateLexer',
'GroovyLexer', 'IokeLexer', 'ClojureLexer', 'ClojureScriptLexer',
- 'KotlinLexer', 'XtendLexer', 'AspectJLexer', 'CeylonLexer']
+ 'KotlinLexer', 'XtendLexer', 'AspectJLexer', 'CeylonLexer',
+ 'PigLexer']
class JavaLexer(RegexLexer):
@@ -38,11 +38,6 @@ class JavaLexer(RegexLexer):
tokens = {
'root': [
- # method names
- (r'^(\s*(?:[a-zA-Z_][a-zA-Z0-9_\.\[\]<>]*\s+)+?)' # return arguments
- r'([a-zA-Z_][a-zA-Z0-9_]*)' # method name
- r'(\s*)(\()', # signature start
- bygroups(using(this), Name.Function, Text, Operator)),
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
@@ -55,6 +50,11 @@ class JavaLexer(RegexLexer):
r'transient|volatile)\b', Keyword.Declaration),
(r'(boolean|byte|char|double|float|int|long|short|void)\b',
Keyword.Type),
+ # method names
+ (r'^(\s*(?:[a-zA-Z_][a-zA-Z0-9_\.\[\]<>]*\s+)+?)' # return arguments
+ r'([a-zA-Z_][a-zA-Z0-9_]*)' # method name
+ r'(\s*)(\()', # signature start
+ bygroups(using(this), Name.Function, Text, Operator)),
(r'(package)(\s+)', bygroups(Keyword.Namespace, Text)),
(r'(true|false|null)\b', Keyword.Constant),
(r'(class|interface)(\s+)', bygroups(Keyword.Declaration, Text), 'class'),
@@ -67,7 +67,7 @@ class JavaLexer(RegexLexer):
(r'[~\^\*!%&\[\]\(\)\{\}<>\|+=:;,./?-]', Operator),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
- (r'[0-9]+L?', Number.Integer),
+ (r'[0-9]+(_+[0-9]+)*L?', Number.Integer),
(r'\n', Text)
],
'class': [
@@ -83,7 +83,7 @@ class AspectJLexer(JavaLexer):
"""
For `AspectJ <http://www.eclipse.org/aspectj/>`_ source code.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'AspectJ'
@@ -243,25 +243,25 @@ class ScalaLexer(RegexLexer):
u'\ua760\ua762\ua764\ua766\ua768\ua76a\ua76c\ua76e\ua779\ua77b'
u'\ua77d-\ua77e\ua780\ua782\ua784\ua786\ua78b\uff21-\uff3a]')
- idrest = ur'%s(?:%s|[0-9])*(?:(?<=_)%s)?' % (letter, letter, op)
+ idrest = u'%s(?:%s|[0-9])*(?:(?<=_)%s)?' % (letter, letter, op)
tokens = {
'root': [
# method names
(r'(class|trait|object)(\s+)', bygroups(Keyword, Text), 'class'),
- (ur"'%s" % idrest, Text.Symbol),
+ (u"'%s" % idrest, Text.Symbol),
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*', Comment.Multiline, 'comment'),
- (ur'@%s' % idrest, Name.Decorator),
- (ur'(abstract|ca(?:se|tch)|d(?:ef|o)|e(?:lse|xtends)|'
- ur'f(?:inal(?:ly)?|or(?:Some)?)|i(?:f|mplicit)|'
- ur'lazy|match|new|override|pr(?:ivate|otected)'
- ur'|re(?:quires|turn)|s(?:ealed|uper)|'
- ur't(?:h(?:is|row)|ry)|va[lr]|w(?:hile|ith)|yield)\b|'
+ (u'@%s' % idrest, Name.Decorator),
+ (u'(abstract|ca(?:se|tch)|d(?:ef|o)|e(?:lse|xtends)|'
+ u'f(?:inal(?:ly)?|or(?:Some)?)|i(?:f|mplicit)|'
+ u'lazy|match|new|override|pr(?:ivate|otected)'
+ u'|re(?:quires|turn)|s(?:ealed|uper)|'
+ u't(?:h(?:is|row)|ry)|va[lr]|w(?:hile|ith)|yield)\\b|'
u'(<[%:-]|=>|>:|[#=@_\u21D2\u2190])(\\b|(?=\\s)|$)', Keyword),
- (ur':(?!%s)' % op, Keyword, 'type'),
- (ur'%s%s\b' % (upper, idrest), Name.Class),
+ (u':(?!%s)' % op, Keyword, 'type'),
+ (u'%s%s\\b' % (upper, idrest), Name.Class),
(r'(true|false|null)\b', Keyword.Constant),
(r'(import|package)(\s+)', bygroups(Keyword, Text), 'import'),
(r'(type)(\s+)', bygroups(Keyword, Text), 'type'),
@@ -282,34 +282,34 @@ class ScalaLexer(RegexLexer):
(r'\n', Text)
],
'class': [
- (ur'(%s|%s|`[^`]+`)(\s*)(\[)' % (idrest, op),
+ (u'(%s|%s|`[^`]+`)(\\s*)(\\[)' % (idrest, op),
bygroups(Name.Class, Text, Operator), 'typeparam'),
(r'\s+', Text),
(r'{', Operator, '#pop'),
(r'\(', Operator, '#pop'),
(r'//.*?\n', Comment.Single, '#pop'),
- (ur'%s|%s|`[^`]+`' % (idrest, op), Name.Class, '#pop'),
+ (u'%s|%s|`[^`]+`' % (idrest, op), Name.Class, '#pop'),
],
'type': [
(r'\s+', Text),
(u'<[%:]|>:|[#_\u21D2]|forSome|type', Keyword),
(r'([,\);}]|=>|=)(\s*)', bygroups(Operator, Text), '#pop'),
(r'[\(\{]', Operator, '#push'),
- (ur'((?:%s|%s|`[^`]+`)(?:\.(?:%s|%s|`[^`]+`))*)(\s*)(\[)' %
+ (u'((?:%s|%s|`[^`]+`)(?:\\.(?:%s|%s|`[^`]+`))*)(\\s*)(\\[)' %
(idrest, op, idrest, op),
bygroups(Keyword.Type, Text, Operator), ('#pop', 'typeparam')),
- (ur'((?:%s|%s|`[^`]+`)(?:\.(?:%s|%s|`[^`]+`))*)(\s*)$' %
+ (u'((?:%s|%s|`[^`]+`)(?:\\.(?:%s|%s|`[^`]+`))*)(\\s*)$' %
(idrest, op, idrest, op),
bygroups(Keyword.Type, Text), '#pop'),
(r'//.*?\n', Comment.Single, '#pop'),
- (ur'\.|%s|%s|`[^`]+`' % (idrest, op), Keyword.Type)
+ (u'\\.|%s|%s|`[^`]+`' % (idrest, op), Keyword.Type)
],
'typeparam': [
(r'[\s,]+', Text),
(u'<[%:]|=>|>:|[#_\u21D2]|forSome|type', Keyword),
(r'([\]\)\}])', Operator, '#pop'),
(r'[\(\[\{]', Operator, '#push'),
- (ur'\.|%s|%s|`[^`]+`' % (idrest, op), Keyword.Type)
+ (u'\\.|%s|%s|`[^`]+`' % (idrest, op), Keyword.Type)
],
'comment': [
(r'[^/\*]+', Comment.Multiline),
@@ -318,7 +318,7 @@ class ScalaLexer(RegexLexer):
(r'[*/]', Comment.Multiline)
],
'import': [
- (ur'(%s|\.)+' % idrest, Name.Namespace, '#pop')
+ (u'(%s|\\.)+' % idrest, Name.Namespace, '#pop')
],
}
@@ -327,7 +327,7 @@ class GosuLexer(RegexLexer):
"""
For Gosu source code.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'Gosu'
@@ -406,7 +406,7 @@ class GosuTemplateLexer(Lexer):
"""
For Gosu templates.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'Gosu Template'
@@ -425,7 +425,7 @@ class GroovyLexer(RegexLexer):
"""
For `Groovy <http://groovy.codehaus.org/>`_ source code.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'Groovy'
@@ -487,7 +487,7 @@ class IokeLexer(RegexLexer):
For `Ioke <http://ioke.org/>`_ (a strongly typed, dynamic,
prototype based programming language) source.
- *New in Pygments 1.4.*
+ .. versionadded:: 1.4
"""
name = 'Ioke'
filenames = ['*.ik']
@@ -639,9 +639,9 @@ class IokeLexer(RegexLexer):
r'System|Text|Tuple)(?![a-zA-Z0-9!:_?])', Name.Builtin),
# functions
- (ur'(generateMatchMethod|aliasMethod|\u03bb|\u028E|fnx|fn|method|'
- ur'dmacro|dlecro|syntax|macro|dlecrox|lecrox|lecro|syntax)'
- ur'(?![a-zA-Z0-9!:_?])', Name.Function),
+ (u'(generateMatchMethod|aliasMethod|\u03bb|\u028E|fnx|fn|method|'
+ u'dmacro|dlecro|syntax|macro|dlecrox|lecrox|lecro|syntax)'
+ u'(?![a-zA-Z0-9!:_?])', Name.Function),
# Numbers
(r'-?0[xX][0-9a-fA-F]+', Number.Hex),
@@ -651,13 +651,13 @@ class IokeLexer(RegexLexer):
(r'#\(', Punctuation),
# Operators
- (ur'(&&>>|\|\|>>|\*\*>>|:::|::|\.\.\.|===|\*\*>|\*\*=|&&>|&&=|'
- ur'\|\|>|\|\|=|\->>|\+>>|!>>|<>>>|<>>|&>>|%>>|#>>|@>>|/>>|\*>>|'
- ur'\?>>|\|>>|\^>>|~>>|\$>>|=>>|<<=|>>=|<=>|<\->|=~|!~|=>|\+\+|'
- ur'\-\-|<=|>=|==|!=|&&|\.\.|\+=|\-=|\*=|\/=|%=|&=|\^=|\|=|<\-|'
- ur'\+>|!>|<>|&>|%>|#>|\@>|\/>|\*>|\?>|\|>|\^>|~>|\$>|<\->|\->|'
- ur'<<|>>|\*\*|\?\||\?&|\|\||>|<|\*|\/|%|\+|\-|&|\^|\||=|\$|!|~|'
- ur'\?|#|\u2260|\u2218|\u2208|\u2209)', Operator),
+ (r'(&&>>|\|\|>>|\*\*>>|:::|::|\.\.\.|===|\*\*>|\*\*=|&&>|&&=|'
+ r'\|\|>|\|\|=|\->>|\+>>|!>>|<>>>|<>>|&>>|%>>|#>>|@>>|/>>|\*>>|'
+ r'\?>>|\|>>|\^>>|~>>|\$>>|=>>|<<=|>>=|<=>|<\->|=~|!~|=>|\+\+|'
+ r'\-\-|<=|>=|==|!=|&&|\.\.|\+=|\-=|\*=|\/=|%=|&=|\^=|\|=|<\-|'
+ r'\+>|!>|<>|&>|%>|#>|\@>|\/>|\*>|\?>|\|>|\^>|~>|\$>|<\->|\->|'
+ r'<<|>>|\*\*|\?\||\?&|\|\||>|<|\*|\/|%|\+|\-|&|\^|\||=|\$|!|~|'
+ u'\\?|#|\u2260|\u2218|\u2208|\u2209)', Operator),
(r'(and|nand|or|xor|nor|return|import)(?![a-zA-Z0-9_!?])',
Operator),
@@ -677,7 +677,7 @@ class ClojureLexer(RegexLexer):
"""
Lexer for `Clojure <http://clojure.org/>`_ source code.
- *New in Pygments 0.11.*
+ .. versionadded:: 0.11
"""
name = 'Clojure'
aliases = ['clojure', 'clj']
@@ -832,7 +832,7 @@ class TeaLangLexer(RegexLexer):
For `Tea <http://teatrove.org/>`_ source code. Only used within a
TeaTemplateLexer.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
flags = re.MULTILINE | re.DOTALL
@@ -878,7 +878,7 @@ class CeylonLexer(RegexLexer):
"""
For `Ceylon <http://ceylon-lang.org/>`_ source code.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'Ceylon'
@@ -900,7 +900,7 @@ class CeylonLexer(RegexLexer):
bygroups(using(this), Name.Function, Text, Operator)),
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
- (r'/\*.*?\*/', Comment.Multiline),
+ (r'/\*', Comment.Multiline, 'comment'),
(r'(variable|shared|abstract|doc|by|formal|actual|late|native)',
Name.Decorator),
(r'(break|case|catch|continue|default|else|finally|for|in|'
@@ -945,126 +945,83 @@ class CeylonLexer(RegexLexer):
(r'[a-z][a-zA-Z0-9_.]*',
Name.Namespace, '#pop')
],
+ 'comment': [
+ (r'[^*/]', Comment.Multiline),
+ (r'/\*', Comment.Multiline, '#push'),
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'[*/]', Comment.Multiline)
+ ],
}
class KotlinLexer(RegexLexer):
"""
- For `Kotlin <http://confluence.jetbrains.net/display/Kotlin/>`_
+ For `Kotlin <http://kotlin.jetbrains.org/>`_
source code.
- Additional options accepted:
-
- `unicodelevel`
- Determines which Unicode characters this lexer allows for identifiers.
- The possible values are:
-
- * ``none`` -- only the ASCII letters and numbers are allowed. This
- is the fastest selection.
- * ``basic`` -- all Unicode characters from the specification except
- category ``Lo`` are allowed.
- * ``full`` -- all Unicode characters as specified in the C# specs
- are allowed. Note that this means a considerable slowdown since the
- ``Lo`` category has more than 40,000 characters in it!
-
- The default value is ``basic``.
-
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'Kotlin'
aliases = ['kotlin']
filenames = ['*.kt']
- mimetypes = ['text/x-kotlin'] # inferred
+ mimetypes = ['text/x-kotlin']
flags = re.MULTILINE | re.DOTALL | re.UNICODE
- # for the range of allowed unicode characters in identifiers,
- # see http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-334.pdf
-
- levels = {
- 'none': '@?[_a-zA-Z][a-zA-Z0-9_]*',
- 'basic': ('@?[_' + uni.Lu + uni.Ll + uni.Lt + uni.Lm + uni.Nl + ']' +
- '[' + uni.Lu + uni.Ll + uni.Lt + uni.Lm + uni.Nl +
- uni.Nd + uni.Pc + uni.Cf + uni.Mn + uni.Mc + ']*'),
- 'full': ('@?(?:_|[^' +
- uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') + '])'
- + '[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl',
- 'Nd', 'Pc', 'Cf', 'Mn', 'Mc') + ']*'),
- }
+ kt_name = ('@?[_' + uni.Lu + uni.Ll + uni.Lt + uni.Lm + uni.Nl + ']' +
+ '[' + uni.Lu + uni.Ll + uni.Lt + uni.Lm + uni.Nl + uni.Nd +
+ uni.Pc + uni.Cf + uni.Mn + uni.Mc + ']*')
+ kt_id = '(' + kt_name + '|`' + kt_name + '`)'
- tokens = {}
- token_variants = True
-
- for levelname, cs_ident in levels.items():
- tokens[levelname] = {
- 'root': [
- # method names
- (r'^([ \t]*(?:' + cs_ident + r'(?:\[\])?\s+)+?)' # return type
- r'(' + cs_ident + ')' # method name
- r'(\s*)(\()', # signature start
- bygroups(using(this), Name.Function, Text, Punctuation)),
- (r'^\s*\[.*?\]', Name.Attribute),
- (r'[^\S\n]+', Text),
- (r'\\\n', Text), # line continuation
- (r'//.*?\n', Comment.Single),
- (r'/[*](.|\n)*?[*]/', Comment.Multiline),
- (r'\n', Text),
- (r'[~!%^&*()+=|\[\]:;,.<>/?-]', Punctuation),
- (r'[{}]', Punctuation),
- (r'@"(""|[^"])*"', String),
- (r'"(\\\\|\\"|[^"\n])*["\n]', String),
- (r"'\\.'|'[^\\]'", String.Char),
- (r"[0-9](\.[0-9]*)?([eE][+-][0-9]+)?"
- r"[flFLdD]?|0[xX][0-9a-fA-F]+[Ll]?", Number),
- (r'#[ \t]*(if|endif|else|elif|define|undef|'
- r'line|error|warning|region|endregion|pragma)\b.*?\n',
- Comment.Preproc),
- (r'\b(extern)(\s+)(alias)\b', bygroups(Keyword, Text,
- Keyword)),
- (r'(abstract|as|break|catch|'
- r'fun|continue|default|delegate|'
- r'do|else|enum|extern|false|finally|'
- r'fixed|for|goto|if|implicit|in|interface|'
- r'internal|is|lock|null|'
- r'out|override|private|protected|public|readonly|'
- r'ref|return|sealed|sizeof|'
- r'when|this|throw|true|try|typeof|'
- r'unchecked|unsafe|virtual|void|while|'
- r'get|set|new|partial|yield|val|var)\b', Keyword),
- (r'(global)(::)', bygroups(Keyword, Punctuation)),
- (r'(bool|byte|char|decimal|double|dynamic|float|int|long|'
- r'short)\b\??', Keyword.Type),
- (r'(class|struct)(\s+)', bygroups(Keyword, Text), 'class'),
- (r'(package|using)(\s+)', bygroups(Keyword, Text), 'package'),
- (cs_ident, Name),
- ],
- 'class': [
- (cs_ident, Name.Class, '#pop')
- ],
- 'package': [
- (r'(?=\()', Text, '#pop'), # using (resource)
- ('(' + cs_ident + r'|\.)+', Name.Namespace, '#pop')
- ]
- }
-
- def __init__(self, **options):
- level = get_choice_opt(options, 'unicodelevel', self.tokens.keys(),
- 'basic')
- if level not in self._all_tokens:
- # compile the regexes now
- self._tokens = self.__class__.process_tokendef(level)
- else:
- self._tokens = self._all_tokens[level]
-
- RegexLexer.__init__(self, **options)
+ tokens = {
+ 'root': [
+ (r'^\s*\[.*?\]', Name.Attribute),
+ (r'[^\S\n]+', Text),
+ (r'\\\n', Text), # line continuation
+ (r'//.*?\n', Comment.Single),
+ (r'/[*].*?[*]/', Comment.Multiline),
+ (r'\n', Text),
+ (r'::|!!|\?[:.]', Operator),
+ (r'[~!%^&*()+=|\[\]:;,.<>/?-]', Punctuation),
+ (r'[{}]', Punctuation),
+ (r'@"(""|[^"])*"', String),
+ (r'"(\\\\|\\"|[^"\n])*["\n]', String),
+ (r"'\\.'|'[^\\]'", String.Char),
+ (r"[0-9](\.[0-9]*)?([eE][+-][0-9]+)?[flFL]?|"
+ r"0[xX][0-9a-fA-F]+[Ll]?", Number),
+ (r'(class)(\s+)(object)', bygroups(Keyword, Text, Keyword)),
+ (r'(class|trait|object)(\s+)', bygroups(Keyword, Text), 'class'),
+ (r'(package|import)(\s+)', bygroups(Keyword, Text), 'package'),
+ (r'(val|var)(\s+)', bygroups(Keyword, Text), 'property'),
+ (r'(fun)(\s+)', bygroups(Keyword, Text), 'function'),
+ (r'(abstract|annotation|as|break|by|catch|class|continue|do|else|'
+ r'enum|false|final|finally|for|fun|get|if|import|in|inner|'
+ r'internal|is|null|object|open|out|override|package|private|'
+ r'protected|public|reified|return|set|super|this|throw|trait|'
+ r'true|try|type|val|var|vararg|when|where|while|This)\b', Keyword),
+ (kt_id, Name),
+ ],
+ 'package': [
+ (r'\S+', Name.Namespace, '#pop')
+ ],
+ 'class': [
+ (kt_id, Name.Class, '#pop')
+ ],
+ 'property': [
+ (kt_id, Name.Property, '#pop')
+ ],
+ 'function': [
+ (kt_id, Name.Function, '#pop')
+ ],
+ }
class XtendLexer(RegexLexer):
"""
For `Xtend <http://xtend-lang.org/>`_ source code.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'Xtend'
@@ -1100,7 +1057,7 @@ class XtendLexer(RegexLexer):
'class'),
(r'(import)(\s+)', bygroups(Keyword.Namespace, Text), 'import'),
(r"(''')", String, 'template'),
- (ur"(\u00BB)", String, 'template'),
+ (u'(\u00BB)', String, 'template'),
(r'"(\\\\|\\"|[^"])*"', String),
(r"'(\\\\|\\'|[^'])*'", String),
(r'[a-zA-Z_][a-zA-Z0-9_]*:', Name.Label),
@@ -1119,7 +1076,73 @@ class XtendLexer(RegexLexer):
],
'template': [
(r"'''", String, '#pop'),
- (ur"\u00AB", String, '#pop'),
+ (u'\u00AB', String, '#pop'),
(r'.', String)
],
}
+
+class PigLexer(RegexLexer):
+ """
+ For `Pig Latin <https://pig.apache.org/>`_ source code.
+
+ .. versionadded:: 2.0
+ """
+
+ name = 'Pig'
+ aliases = ['pig']
+ filenames = ['*.pig']
+ mimetypes = ['text/x-pig']
+
+ flags = re.MULTILINE | re.IGNORECASE
+
+ tokens = {
+ 'root': [
+ (r'\s+', Text),
+ (r'--.*', Comment),
+ (r'/\*[\w\W]*?\*/', Comment.Multiline),
+ (r'\\\n', Text),
+ (r'\\', Text),
+ (r'\'(?:\\[ntbrf\\\']|\\u[0-9a-f]{4}|[^\'\\\n\r])*\'', String),
+ include('keywords'),
+ include('types'),
+ include('builtins'),
+ include('punct'),
+ include('operators'),
+ (r'[0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
+ (r'0x[0-9a-f]+', Number.Hex),
+ (r'[0-9]+L?', Number.Integer),
+ (r'\n', Text),
+ (r'([a-z_][a-z0-9_]*)(\s*)(\()',
+ bygroups(Name.Function, Text, Punctuation)),
+ (r'[()#:]', Text),
+ (r'[^(:#\'\")\s]+', Text),
+ (r'\S+\s+', Text) # TODO: make tests pass without \s+
+ ],
+ 'keywords': [
+ (r'(assert|and|any|all|arrange|as|asc|bag|by|cache|CASE|cat|cd|cp|'
+ r'%declare|%default|define|dense|desc|describe|distinct|du|dump|'
+ r'eval|exex|explain|filter|flatten|foreach|full|generate|group|'
+ r'help|if|illustrate|import|inner|input|into|is|join|kill|left|'
+ r'limit|load|ls|map|matches|mkdir|mv|not|null|onschema|or|order|'
+ r'outer|output|parallel|pig|pwd|quit|register|returns|right|rm|'
+ r'rmf|rollup|run|sample|set|ship|split|stderr|stdin|stdout|store|'
+ r'stream|through|union|using|void)\b', Keyword)
+ ],
+ 'builtins': [
+ (r'(AVG|BinStorage|cogroup|CONCAT|copyFromLocal|copyToLocal|COUNT|'
+ r'cross|DIFF|MAX|MIN|PigDump|PigStorage|SIZE|SUM|TextLoader|'
+ r'TOKENIZE)\b', Name.Builtin)
+ ],
+ 'types': [
+ (r'(bytearray|BIGINTEGER|BIGDECIMAL|chararray|datetime|double|float|'
+ r'int|long|tuple)\b', Keyword.Type)
+ ],
+ 'punct': [
+ (r'[;(){}\[\]]', Punctuation),
+ ],
+ 'operators': [
+ (r'[#=,./%+\-?]', Operator),
+ (r'(eq|gt|lt|gte|lte|neq|matches)\b', Operator),
+ (r'(==|<=|<|>=|>|!=)', Operator),
+ ],
+ }
diff --git a/pygments/lexers/math.py b/pygments/lexers/math.py
index 0b757e44..e7a8948b 100644
--- a/pygments/lexers/math.py
+++ b/pygments/lexers/math.py
@@ -5,10 +5,12 @@
Lexers for math languages.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
+from __future__ import print_function
+
import re
from pygments.util import shebang_matches
@@ -24,14 +26,14 @@ from pygments.lexers import _stan_builtins
__all__ = ['JuliaLexer', 'JuliaConsoleLexer', 'MuPADLexer', 'MatlabLexer',
'MatlabSessionLexer', 'OctaveLexer', 'ScilabLexer', 'NumPyLexer',
'RConsoleLexer', 'SLexer', 'JagsLexer', 'BugsLexer', 'StanLexer',
- 'IDLLexer', 'RdLexer']
+ 'IDLLexer', 'RdLexer', 'IgorLexer', 'MathematicaLexer', 'GAPLexer']
class JuliaLexer(RegexLexer):
"""
For `Julia <http://julialang.org/>`_ source code.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'Julia'
aliases = ['julia','jl']
@@ -151,7 +153,7 @@ class JuliaConsoleLexer(Lexer):
"""
For Julia console sessions. Modeled after MatlabSessionLexer.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'Julia console'
aliases = ['jlcon']
@@ -167,8 +169,8 @@ class JuliaConsoleLexer(Lexer):
if line.startswith('julia>'):
insertions.append((len(curcode),
- [(0, Generic.Prompt, line[:3])]))
- curcode += line[3:]
+ [(0, Generic.Prompt, line[:6])]))
+ curcode += line[6:]
elif line.startswith(' '):
@@ -200,7 +202,7 @@ class MuPADLexer(RegexLexer):
A `MuPAD <http://www.mupad.com>`_ lexer.
Contributed by Christopher Creutzig <christopher@creutzig.de>.
- *New in Pygments 0.8.*
+ .. versionadded:: 0.8
"""
name = 'MuPAD'
aliases = ['mupad']
@@ -270,7 +272,7 @@ class MatlabLexer(RegexLexer):
"""
For Matlab source code.
- *New in Pygments 0.10.*
+ .. versionadded:: 0.10
"""
name = 'Matlab'
aliases = ['matlab']
@@ -348,13 +350,13 @@ class MatlabLexer(RegexLexer):
# quote can be transpose, instead of string:
# (not great, but handles common cases...)
- (r'(?<=[\w\)\]])\'', Operator),
+ (r'(?<=[\w\)\].])\'+', Operator),
(r'(\d+\.\d*|\d*\.\d+)([eEf][+-]?[0-9]+)?', Number.Float),
(r'\d+[eEf][+-]?[0-9]+', Number.Float),
(r'\d+', Number.Integer),
- (r'(?<![\w\)\]])\'', String, 'string'),
+ (r'(?<![\w\)\].])\'', String, 'string'),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
(r'.', Text),
],
@@ -376,10 +378,9 @@ class MatlabLexer(RegexLexer):
def analyse_text(text):
if re.match('^\s*%', text, re.M): # comment
- return 0.9
+ return 0.2
elif re.match('^!\w+', text, re.M): # system cmd
- return 0.9
- return 0.1
+ return 0.2
line_re = re.compile('.*?\n')
@@ -389,7 +390,7 @@ class MatlabSessionLexer(Lexer):
For Matlab sessions. Modeled after PythonConsoleLexer.
Contributed by Ken Schutte <kschutte@csail.mit.edu>.
- *New in Pygments 0.10.*
+ .. versionadded:: 0.10
"""
name = 'Matlab session'
aliases = ['matlabsession']
@@ -403,17 +404,22 @@ class MatlabSessionLexer(Lexer):
for match in line_re.finditer(text):
line = match.group()
- if line.startswith('>>'):
+ if line.startswith('>> '):
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:3])]))
curcode += line[3:]
+ elif line.startswith('>>'):
+ insertions.append((len(curcode),
+ [(0, Generic.Prompt, line[:2])]))
+ curcode += line[2:]
+
elif line.startswith('???'):
idx = len(curcode)
# without is showing error on same line as before...?
- line = "\n" + line
+ #line = "\n" + line
token = (0, Generic.Traceback, line)
insertions.append((idx, [token]))
@@ -427,6 +433,7 @@ class MatlabSessionLexer(Lexer):
yield match.start(), Generic.Output, line
+ print(insertions)
if curcode: # or item:
for item in do_insertions(
insertions, mlexer.get_tokens_unprocessed(curcode)):
@@ -437,7 +444,7 @@ class OctaveLexer(RegexLexer):
"""
For GNU Octave source code.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'Octave'
aliases = ['octave']
@@ -806,8 +813,8 @@ class OctaveLexer(RegexLexer):
# quote can be transpose, instead of string:
# (not great, but handles common cases...)
- (r'(?<=[\w\)\]])\'', Operator),
- (r'(?<![\w\)\]])\'', String, 'string'),
+ (r'(?<=[\w\)\].])\'+', Operator),
+ (r'(?<![\w\)\].])\'', String, 'string'),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
(r'.', Text),
@@ -823,16 +830,12 @@ class OctaveLexer(RegexLexer):
],
}
- def analyse_text(text):
- if re.match('^\s*[%#]', text, re.M): #Comment
- return 0.1
-
class ScilabLexer(RegexLexer):
"""
For Scilab source code.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'Scilab'
aliases = ['scilab']
@@ -871,8 +874,8 @@ class ScilabLexer(RegexLexer):
# quote can be transpose, instead of string:
# (not great, but handles common cases...)
- (r'(?<=[\w\)\]])\'', Operator),
- (r'(?<![\w\)\]])\'', String, 'string'),
+ (r'(?<=[\w\)\].])\'+', Operator),
+ (r'(?<![\w\)\].])\'', String, 'string'),
(r'(\d+\.\d*|\d*\.\d+)([eEf][+-]?[0-9]+)?', Number.Float),
(r'\d+[eEf][+-]?[0-9]+', Number.Float),
@@ -898,7 +901,7 @@ class NumPyLexer(PythonLexer):
"""
A Python lexer recognizing Numerical Python builtins.
- *New in Pygments 0.10.*
+ .. versionadded:: 0.10
"""
name = 'NumPy'
@@ -983,6 +986,11 @@ class NumPyLexer(PythonLexer):
else:
yield index, token, value
+ def analyse_text(text):
+ return (shebang_matches(text, r'pythonw?(2(\.\d)?)?') or
+ 'import ' in text[:1000]) \
+ and ('import numpy' in text or 'from numpy import' in text)
+
class RConsoleLexer(Lexer):
"""
@@ -1034,7 +1042,7 @@ class SLexer(RegexLexer):
"""
For S, S-plus, and R source code.
- *New in Pygments 0.10.*
+ .. versionadded:: 0.10
"""
name = 'S'
@@ -1113,7 +1121,8 @@ class SLexer(RegexLexer):
}
def analyse_text(text):
- return '<-' in text
+ if re.search(r'[a-z0-9_\])\s]<-(?!-)', text):
+ return 0.11
class BugsLexer(RegexLexer):
@@ -1121,7 +1130,7 @@ class BugsLexer(RegexLexer):
Pygments Lexer for `OpenBugs <http://www.openbugs.info/w/>`_ and WinBugs
models.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'BUGS'
@@ -1216,7 +1225,7 @@ class JagsLexer(RegexLexer):
"""
Pygments Lexer for JAGS.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'JAGS'
@@ -1300,13 +1309,13 @@ class JagsLexer(RegexLexer):
return 0
class StanLexer(RegexLexer):
- """Pygments Lexer for Stan models.
+ """Pygments Lexer for Stan models.
- The Stan modeling language is specified in the *Stan 1.3.0
+ The Stan modeling language is specified in the *Stan 2.0.1
Modeling Language Manual* `pdf
- <http://code.google.com/p/stan/downloads/detail?name=stan-reference-1.3.0.pdf>`_.
+ <https://github.com/stan-dev/stan/releases/download/v2.0.1/stan-reference-2.0.1.pdf>`__
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'Stan'
@@ -1379,7 +1388,7 @@ class IDLLexer(RegexLexer):
"""
Pygments Lexer for IDL (Interactive Data Language).
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'IDL'
aliases = ['idl']
@@ -1625,7 +1634,7 @@ class RdLexer(RegexLexer):
Extensions <http://cran.r-project.org/doc/manuals/R-exts.html>`_
and `Parsing Rd files <developer.r-project.org/parseRd.pdf>`_.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'Rd'
aliases = ['rd']
@@ -1653,3 +1662,363 @@ class RdLexer(RegexLexer):
(r'.', Text),
]
}
+
+
+class IgorLexer(RegexLexer):
+ """
+ Pygments Lexer for Igor Pro procedure files (.ipf).
+ See http://www.wavemetrics.com/ and http://www.igorexchange.com/.
+
+ .. versionadded:: 2.0
+ """
+
+ name = 'Igor'
+ aliases = ['igor', 'igorpro']
+ filenames = ['*.ipf']
+ mimetypes = ['text/ipf']
+
+ flags = re.IGNORECASE | re.MULTILINE
+
+ flowControl = [
+ 'if', 'else', 'elseif', 'endif', 'for', 'endfor', 'strswitch', 'switch',
+ 'case', 'default', 'endswitch', 'do', 'while', 'try', 'catch', 'endtry',
+ 'break', 'continue', 'return',
+ ]
+ types = [
+ 'variable', 'string', 'constant', 'strconstant', 'NVAR', 'SVAR', 'WAVE',
+ 'STRUCT', 'dfref'
+ ]
+ keywords = [
+ 'override', 'ThreadSafe', 'static', 'FuncFit', 'Proc', 'Picture',
+ 'Prompt', 'DoPrompt', 'macro', 'window', 'graph', 'function', 'end',
+ 'Structure', 'EndStructure', 'EndMacro', 'Menu', 'SubMenu', 'Prompt',
+ 'DoPrompt',
+ ]
+ operations = [
+ 'Abort', 'AddFIFOData', 'AddFIFOVectData', 'AddMovieAudio',
+ 'AddMovieFrame', 'APMath', 'Append', 'AppendImage',
+ 'AppendLayoutObject', 'AppendMatrixContour', 'AppendText',
+ 'AppendToGraph', 'AppendToLayout', 'AppendToTable', 'AppendXYZContour',
+ 'AutoPositionWindow', 'BackgroundInfo', 'Beep', 'BoundingBall',
+ 'BrowseURL', 'BuildMenu', 'Button', 'cd', 'Chart', 'CheckBox',
+ 'CheckDisplayed', 'ChooseColor', 'Close', 'CloseMovie', 'CloseProc',
+ 'ColorScale', 'ColorTab2Wave', 'Concatenate', 'ControlBar',
+ 'ControlInfo', 'ControlUpdate', 'ConvexHull', 'Convolve', 'CopyFile',
+ 'CopyFolder', 'CopyScales', 'Correlate', 'CreateAliasShortcut', 'Cross',
+ 'CtrlBackground', 'CtrlFIFO', 'CtrlNamedBackground', 'Cursor',
+ 'CurveFit', 'CustomControl', 'CWT', 'Debugger', 'DebuggerOptions',
+ 'DefaultFont', 'DefaultGuiControls', 'DefaultGuiFont', 'DefineGuide',
+ 'DelayUpdate', 'DeleteFile', 'DeleteFolder', 'DeletePoints',
+ 'Differentiate', 'dir', 'Display', 'DisplayHelpTopic',
+ 'DisplayProcedure', 'DoAlert', 'DoIgorMenu', 'DoUpdate', 'DoWindow',
+ 'DoXOPIdle', 'DrawAction', 'DrawArc', 'DrawBezier', 'DrawLine',
+ 'DrawOval', 'DrawPICT', 'DrawPoly', 'DrawRect', 'DrawRRect', 'DrawText',
+ 'DSPDetrend', 'DSPPeriodogram', 'Duplicate', 'DuplicateDataFolder',
+ 'DWT', 'EdgeStats', 'Edit', 'ErrorBars', 'Execute', 'ExecuteScriptText',
+ 'ExperimentModified', 'Extract', 'FastGaussTransform', 'FastOp',
+ 'FBinRead', 'FBinWrite', 'FFT', 'FIFO2Wave', 'FIFOStatus', 'FilterFIR',
+ 'FilterIIR', 'FindLevel', 'FindLevels', 'FindPeak', 'FindPointsInPoly',
+ 'FindRoots', 'FindSequence', 'FindValue', 'FPClustering', 'fprintf',
+ 'FReadLine', 'FSetPos', 'FStatus', 'FTPDelete', 'FTPDownload',
+ 'FTPUpload', 'FuncFit', 'FuncFitMD', 'GetAxis', 'GetFileFolderInfo',
+ 'GetLastUserMenuInfo', 'GetMarquee', 'GetSelection', 'GetWindow',
+ 'GraphNormal', 'GraphWaveDraw', 'GraphWaveEdit', 'Grep', 'GroupBox',
+ 'Hanning', 'HideIgorMenus', 'HideInfo', 'HideProcedures', 'HideTools',
+ 'HilbertTransform', 'Histogram', 'IFFT', 'ImageAnalyzeParticles',
+ 'ImageBlend', 'ImageBoundaryToMask', 'ImageEdgeDetection',
+ 'ImageFileInfo', 'ImageFilter', 'ImageFocus', 'ImageGenerateROIMask',
+ 'ImageHistModification', 'ImageHistogram', 'ImageInterpolate',
+ 'ImageLineProfile', 'ImageLoad', 'ImageMorphology', 'ImageRegistration',
+ 'ImageRemoveBackground', 'ImageRestore', 'ImageRotate', 'ImageSave',
+ 'ImageSeedFill', 'ImageSnake', 'ImageStats', 'ImageThreshold',
+ 'ImageTransform', 'ImageUnwrapPhase', 'ImageWindow', 'IndexSort',
+ 'InsertPoints', 'Integrate', 'IntegrateODE', 'Interp3DPath',
+ 'Interpolate3D', 'KillBackground', 'KillControl', 'KillDataFolder',
+ 'KillFIFO', 'KillFreeAxis', 'KillPath', 'KillPICTs', 'KillStrings',
+ 'KillVariables', 'KillWaves', 'KillWindow', 'KMeans', 'Label', 'Layout',
+ 'Legend', 'LinearFeedbackShiftRegister', 'ListBox', 'LoadData',
+ 'LoadPackagePreferences', 'LoadPICT', 'LoadWave', 'Loess',
+ 'LombPeriodogram', 'Make', 'MakeIndex', 'MarkPerfTestTime',
+ 'MatrixConvolve', 'MatrixCorr', 'MatrixEigenV', 'MatrixFilter',
+ 'MatrixGaussJ', 'MatrixInverse', 'MatrixLinearSolve',
+ 'MatrixLinearSolveTD', 'MatrixLLS', 'MatrixLUBkSub', 'MatrixLUD',
+ 'MatrixMultiply', 'MatrixOP', 'MatrixSchur', 'MatrixSolve',
+ 'MatrixSVBkSub', 'MatrixSVD', 'MatrixTranspose', 'MeasureStyledText',
+ 'Modify', 'ModifyContour', 'ModifyControl', 'ModifyControlList',
+ 'ModifyFreeAxis', 'ModifyGraph', 'ModifyImage', 'ModifyLayout',
+ 'ModifyPanel', 'ModifyTable', 'ModifyWaterfall', 'MoveDataFolder',
+ 'MoveFile', 'MoveFolder', 'MoveString', 'MoveSubwindow', 'MoveVariable',
+ 'MoveWave', 'MoveWindow', 'NeuralNetworkRun', 'NeuralNetworkTrain',
+ 'NewDataFolder', 'NewFIFO', 'NewFIFOChan', 'NewFreeAxis', 'NewImage',
+ 'NewLayout', 'NewMovie', 'NewNotebook', 'NewPanel', 'NewPath',
+ 'NewWaterfall', 'Note', 'Notebook', 'NotebookAction', 'Open',
+ 'OpenNotebook', 'Optimize', 'ParseOperationTemplate', 'PathInfo',
+ 'PauseForUser', 'PauseUpdate', 'PCA', 'PlayMovie', 'PlayMovieAction',
+ 'PlaySnd', 'PlaySound', 'PopupContextualMenu', 'PopupMenu',
+ 'Preferences', 'PrimeFactors', 'Print', 'printf', 'PrintGraphs',
+ 'PrintLayout', 'PrintNotebook', 'PrintSettings', 'PrintTable',
+ 'Project', 'PulseStats', 'PutScrapText', 'pwd', 'Quit',
+ 'RatioFromNumber', 'Redimension', 'Remove', 'RemoveContour',
+ 'RemoveFromGraph', 'RemoveFromLayout', 'RemoveFromTable', 'RemoveImage',
+ 'RemoveLayoutObjects', 'RemovePath', 'Rename', 'RenameDataFolder',
+ 'RenamePath', 'RenamePICT', 'RenameWindow', 'ReorderImages',
+ 'ReorderTraces', 'ReplaceText', 'ReplaceWave', 'Resample',
+ 'ResumeUpdate', 'Reverse', 'Rotate', 'Save', 'SaveData',
+ 'SaveExperiment', 'SaveGraphCopy', 'SaveNotebook',
+ 'SavePackagePreferences', 'SavePICT', 'SaveTableCopy',
+ 'SetActiveSubwindow', 'SetAxis', 'SetBackground', 'SetDashPattern',
+ 'SetDataFolder', 'SetDimLabel', 'SetDrawEnv', 'SetDrawLayer',
+ 'SetFileFolderInfo', 'SetFormula', 'SetIgorHook', 'SetIgorMenuMode',
+ 'SetIgorOption', 'SetMarquee', 'SetProcessSleep', 'SetRandomSeed',
+ 'SetScale', 'SetVariable', 'SetWaveLock', 'SetWindow', 'ShowIgorMenus',
+ 'ShowInfo', 'ShowTools', 'Silent', 'Sleep', 'Slider', 'Smooth',
+ 'SmoothCustom', 'Sort', 'SoundInRecord', 'SoundInSet',
+ 'SoundInStartChart', 'SoundInStatus', 'SoundInStopChart',
+ 'SphericalInterpolate', 'SphericalTriangulate', 'SplitString',
+ 'sprintf', 'sscanf', 'Stack', 'StackWindows',
+ 'StatsAngularDistanceTest', 'StatsANOVA1Test', 'StatsANOVA2NRTest',
+ 'StatsANOVA2RMTest', 'StatsANOVA2Test', 'StatsChiTest',
+ 'StatsCircularCorrelationTest', 'StatsCircularMeans',
+ 'StatsCircularMoments', 'StatsCircularTwoSampleTest',
+ 'StatsCochranTest', 'StatsContingencyTable', 'StatsDIPTest',
+ 'StatsDunnettTest', 'StatsFriedmanTest', 'StatsFTest',
+ 'StatsHodgesAjneTest', 'StatsJBTest', 'StatsKendallTauTest',
+ 'StatsKSTest', 'StatsKWTest', 'StatsLinearCorrelationTest',
+ 'StatsLinearRegression', 'StatsMultiCorrelationTest',
+ 'StatsNPMCTest', 'StatsNPNominalSRTest', 'StatsQuantiles',
+ 'StatsRankCorrelationTest', 'StatsResample', 'StatsSample',
+ 'StatsScheffeTest', 'StatsSignTest', 'StatsSRTest', 'StatsTTest',
+ 'StatsTukeyTest', 'StatsVariancesTest', 'StatsWatsonUSquaredTest',
+ 'StatsWatsonWilliamsTest', 'StatsWheelerWatsonTest',
+ 'StatsWilcoxonRankTest', 'StatsWRCorrelationTest', 'String',
+ 'StructGet', 'StructPut', 'TabControl', 'Tag', 'TextBox', 'Tile',
+ 'TileWindows', 'TitleBox', 'ToCommandLine', 'ToolsGrid',
+ 'Triangulate3d', 'Unwrap', 'ValDisplay', 'Variable', 'WaveMeanStdv',
+ 'WaveStats', 'WaveTransform', 'wfprintf', 'WignerTransform',
+ 'WindowFunction',
+ ]
+ functions = [
+ 'abs', 'acos', 'acosh', 'AiryA', 'AiryAD', 'AiryB', 'AiryBD', 'alog',
+ 'area', 'areaXY', 'asin', 'asinh', 'atan', 'atan2', 'atanh',
+ 'AxisValFromPixel', 'Besseli', 'Besselj', 'Besselk', 'Bessely', 'bessi',
+ 'bessj', 'bessk', 'bessy', 'beta', 'betai', 'BinarySearch',
+ 'BinarySearchInterp', 'binomial', 'binomialln', 'binomialNoise', 'cabs',
+ 'CaptureHistoryStart', 'ceil', 'cequal', 'char2num', 'chebyshev',
+ 'chebyshevU', 'CheckName', 'cmplx', 'cmpstr', 'conj', 'ContourZ', 'cos',
+ 'cosh', 'cot', 'CountObjects', 'CountObjectsDFR', 'cpowi',
+ 'CreationDate', 'csc', 'DataFolderExists', 'DataFolderRefsEqual',
+ 'DataFolderRefStatus', 'date2secs', 'datetime', 'DateToJulian',
+ 'Dawson', 'DDEExecute', 'DDEInitiate', 'DDEPokeString', 'DDEPokeWave',
+ 'DDERequestWave', 'DDEStatus', 'DDETerminate', 'deltax', 'digamma',
+ 'DimDelta', 'DimOffset', 'DimSize', 'ei', 'enoise', 'equalWaves', 'erf',
+ 'erfc', 'exists', 'exp', 'expInt', 'expNoise', 'factorial', 'fakedata',
+ 'faverage', 'faverageXY', 'FindDimLabel', 'FindListItem', 'floor',
+ 'FontSizeHeight', 'FontSizeStringWidth', 'FresnelCos', 'FresnelSin',
+ 'gamma', 'gammaInc', 'gammaNoise', 'gammln', 'gammp', 'gammq', 'Gauss',
+ 'Gauss1D', 'Gauss2D', 'gcd', 'GetDefaultFontSize',
+ 'GetDefaultFontStyle', 'GetKeyState', 'GetRTError', 'gnoise',
+ 'GrepString', 'hcsr', 'hermite', 'hermiteGauss', 'HyperG0F1',
+ 'HyperG1F1', 'HyperG2F1', 'HyperGNoise', 'HyperGPFQ', 'IgorVersion',
+ 'ilim', 'imag', 'Inf', 'Integrate1D', 'interp', 'Interp2D', 'Interp3D',
+ 'inverseERF', 'inverseERFC', 'ItemsInList', 'jlim', 'Laguerre',
+ 'LaguerreA', 'LaguerreGauss', 'leftx', 'LegendreA', 'limit', 'ln',
+ 'log', 'logNormalNoise', 'lorentzianNoise', 'magsqr', 'MandelbrotPoint',
+ 'MarcumQ', 'MatrixDet', 'MatrixDot', 'MatrixRank', 'MatrixTrace', 'max',
+ 'mean', 'min', 'mod', 'ModDate', 'NaN', 'norm', 'NumberByKey',
+ 'numpnts', 'numtype', 'NumVarOrDefault', 'NVAR_Exists', 'p2rect',
+ 'ParamIsDefault', 'pcsr', 'Pi', 'PixelFromAxisVal', 'pnt2x',
+ 'poissonNoise', 'poly', 'poly2D', 'PolygonArea', 'qcsr', 'r2polar',
+ 'real', 'rightx', 'round', 'sawtooth', 'ScreenResolution', 'sec',
+ 'SelectNumber', 'sign', 'sin', 'sinc', 'sinh', 'SphericalBessJ',
+ 'SphericalBessJD', 'SphericalBessY', 'SphericalBessYD',
+ 'SphericalHarmonics', 'sqrt', 'StartMSTimer', 'StatsBetaCDF',
+ 'StatsBetaPDF', 'StatsBinomialCDF', 'StatsBinomialPDF',
+ 'StatsCauchyCDF', 'StatsCauchyPDF', 'StatsChiCDF', 'StatsChiPDF',
+ 'StatsCMSSDCDF', 'StatsCorrelation', 'StatsDExpCDF', 'StatsDExpPDF',
+ 'StatsErlangCDF', 'StatsErlangPDF', 'StatsErrorPDF', 'StatsEValueCDF',
+ 'StatsEValuePDF', 'StatsExpCDF', 'StatsExpPDF', 'StatsFCDF',
+ 'StatsFPDF', 'StatsFriedmanCDF', 'StatsGammaCDF', 'StatsGammaPDF',
+ 'StatsGeometricCDF', 'StatsGeometricPDF', 'StatsHyperGCDF',
+ 'StatsHyperGPDF', 'StatsInvBetaCDF', 'StatsInvBinomialCDF',
+ 'StatsInvCauchyCDF', 'StatsInvChiCDF', 'StatsInvCMSSDCDF',
+ 'StatsInvDExpCDF', 'StatsInvEValueCDF', 'StatsInvExpCDF',
+ 'StatsInvFCDF', 'StatsInvFriedmanCDF', 'StatsInvGammaCDF',
+ 'StatsInvGeometricCDF', 'StatsInvKuiperCDF', 'StatsInvLogisticCDF',
+ 'StatsInvLogNormalCDF', 'StatsInvMaxwellCDF', 'StatsInvMooreCDF',
+ 'StatsInvNBinomialCDF', 'StatsInvNCChiCDF', 'StatsInvNCFCDF',
+ 'StatsInvNormalCDF', 'StatsInvParetoCDF', 'StatsInvPoissonCDF',
+ 'StatsInvPowerCDF', 'StatsInvQCDF', 'StatsInvQpCDF',
+ 'StatsInvRayleighCDF', 'StatsInvRectangularCDF', 'StatsInvSpearmanCDF',
+ 'StatsInvStudentCDF', 'StatsInvTopDownCDF', 'StatsInvTriangularCDF',
+ 'StatsInvUsquaredCDF', 'StatsInvVonMisesCDF', 'StatsInvWeibullCDF',
+ 'StatsKuiperCDF', 'StatsLogisticCDF', 'StatsLogisticPDF',
+ 'StatsLogNormalCDF', 'StatsLogNormalPDF', 'StatsMaxwellCDF',
+ 'StatsMaxwellPDF', 'StatsMedian', 'StatsMooreCDF', 'StatsNBinomialCDF',
+ 'StatsNBinomialPDF', 'StatsNCChiCDF', 'StatsNCChiPDF', 'StatsNCFCDF',
+ 'StatsNCFPDF', 'StatsNCTCDF', 'StatsNCTPDF', 'StatsNormalCDF',
+ 'StatsNormalPDF', 'StatsParetoCDF', 'StatsParetoPDF', 'StatsPermute',
+ 'StatsPoissonCDF', 'StatsPoissonPDF', 'StatsPowerCDF',
+ 'StatsPowerNoise', 'StatsPowerPDF', 'StatsQCDF', 'StatsQpCDF',
+ 'StatsRayleighCDF', 'StatsRayleighPDF', 'StatsRectangularCDF',
+ 'StatsRectangularPDF', 'StatsRunsCDF', 'StatsSpearmanRhoCDF',
+ 'StatsStudentCDF', 'StatsStudentPDF', 'StatsTopDownCDF',
+ 'StatsTriangularCDF', 'StatsTriangularPDF', 'StatsTrimmedMean',
+ 'StatsUSquaredCDF', 'StatsVonMisesCDF', 'StatsVonMisesNoise',
+ 'StatsVonMisesPDF', 'StatsWaldCDF', 'StatsWaldPDF', 'StatsWeibullCDF',
+ 'StatsWeibullPDF', 'StopMSTimer', 'str2num', 'stringCRC', 'stringmatch',
+ 'strlen', 'strsearch', 'StudentA', 'StudentT', 'sum', 'SVAR_Exists',
+ 'TagVal', 'tan', 'tanh', 'ThreadGroupCreate', 'ThreadGroupRelease',
+ 'ThreadGroupWait', 'ThreadProcessorCount', 'ThreadReturnValue', 'ticks',
+ 'trunc', 'Variance', 'vcsr', 'WaveCRC', 'WaveDims', 'WaveExists',
+ 'WaveMax', 'WaveMin', 'WaveRefsEqual', 'WaveType', 'WhichListItem',
+ 'WinType', 'WNoise', 'x', 'x2pnt', 'xcsr', 'y', 'z', 'zcsr', 'ZernikeR',
+ ]
+ functions += [
+ 'AddListItem', 'AnnotationInfo', 'AnnotationList', 'AxisInfo',
+ 'AxisList', 'CaptureHistory', 'ChildWindowList', 'CleanupName',
+ 'ContourInfo', 'ContourNameList', 'ControlNameList', 'CsrInfo',
+ 'CsrWave', 'CsrXWave', 'CTabList', 'DataFolderDir', 'date',
+ 'DDERequestString', 'FontList', 'FuncRefInfo', 'FunctionInfo',
+ 'FunctionList', 'FunctionPath', 'GetDataFolder', 'GetDefaultFont',
+ 'GetDimLabel', 'GetErrMessage', 'GetFormula',
+ 'GetIndependentModuleName', 'GetIndexedObjName', 'GetIndexedObjNameDFR',
+ 'GetRTErrMessage', 'GetRTStackInfo', 'GetScrapText', 'GetUserData',
+ 'GetWavesDataFolder', 'GrepList', 'GuideInfo', 'GuideNameList', 'Hash',
+ 'IgorInfo', 'ImageInfo', 'ImageNameList', 'IndexedDir', 'IndexedFile',
+ 'JulianToDate', 'LayoutInfo', 'ListMatch', 'LowerStr', 'MacroList',
+ 'NameOfWave', 'note', 'num2char', 'num2istr', 'num2str',
+ 'OperationList', 'PadString', 'ParseFilePath', 'PathList', 'PICTInfo',
+ 'PICTList', 'PossiblyQuoteName', 'ProcedureText', 'RemoveByKey',
+ 'RemoveEnding', 'RemoveFromList', 'RemoveListItem',
+ 'ReplaceNumberByKey', 'ReplaceString', 'ReplaceStringByKey',
+ 'Secs2Date', 'Secs2Time', 'SelectString', 'SortList',
+ 'SpecialCharacterInfo', 'SpecialCharacterList', 'SpecialDirPath',
+ 'StringByKey', 'StringFromList', 'StringList', 'StrVarOrDefault',
+ 'TableInfo', 'TextFile', 'ThreadGroupGetDF', 'time', 'TraceFromPixel',
+ 'TraceInfo', 'TraceNameList', 'UniqueName', 'UnPadString', 'UpperStr',
+ 'VariableList', 'WaveInfo', 'WaveList', 'WaveName', 'WaveUnits',
+ 'WinList', 'WinName', 'WinRecreation', 'XWaveName',
+ 'ContourNameToWaveRef', 'CsrWaveRef', 'CsrXWaveRef',
+ 'ImageNameToWaveRef', 'NewFreeWave', 'TagWaveRef', 'TraceNameToWaveRef',
+ 'WaveRefIndexed', 'XWaveRefFromTrace', 'GetDataFolderDFR',
+ 'GetWavesDataFolderDFR', 'NewFreeDataFolder', 'ThreadGroupGetDFR',
+ ]
+
+ tokens = {
+ 'root': [
+ (r'//.*$', Comment.Single),
+ (r'"([^"\\]|\\.)*"', String),
+ # Flow Control.
+ (r'\b(%s)\b' % '|'.join(flowControl), Keyword),
+ # Types.
+ (r'\b(%s)\b' % '|'.join(types), Keyword.Type),
+ # Keywords.
+ (r'\b(%s)\b' % '|'.join(keywords), Keyword.Reserved),
+ # Built-in operations.
+ (r'\b(%s)\b' % '|'.join(operations), Name.Class),
+ # Built-in functions.
+ (r'\b(%s)\b' % '|'.join(functions), Name.Function),
+ # Compiler directives.
+ (r'^#(include|pragma|define|ifdef|ifndef|endif)',
+ Name.Decorator),
+ (r'[^a-zA-Z"/]+$', Text),
+ (r'.', Text),
+ ],
+ }
+
+
+class MathematicaLexer(RegexLexer):
+ """
+ Lexer for `Mathematica <http://www.wolfram.com/mathematica/>`_ source code.
+
+ .. versionadded:: 2.0
+ """
+ name = 'Mathematica'
+ aliases = ['mathematica', 'mma', 'nb']
+ filenames = ['*.nb', '*.cdf', '*.nbp', '*.ma']
+ mimetypes = ['application/mathematica',
+ 'application/vnd.wolfram.mathematica',
+ 'application/vnd.wolfram.mathematica.package',
+ 'application/vnd.wolfram.cdf']
+
+ # http://reference.wolfram.com/mathematica/guide/Syntax.html
+ operators = [
+ ";;", "=", "=.", "!=" "==", ":=", "->", ":>", "/.", "+", "-", "*", "/",
+ "^", "&&", "||", "!", "<>", "|", "/;", "?", "@", "//", "/@", "@@",
+ "@@@", "~~", "===", "&"]
+ operators.sort(reverse=True)
+
+ punctuation = [",", ";", "(", ")", "[", "]", "{", "}"]
+
+ def _multi_escape(entries):
+ return '(%s)' % ('|'.join(re.escape(entry) for entry in entries))
+
+ tokens = {
+ 'root': [
+ (r'(?s)\(\*.*?\*\)', Comment),
+
+ (r'([a-zA-Z]+[A-Za-z0-9]*`)', Name.Namespace),
+ (r'([A-Za-z0-9]*_+[A-Za-z0-9]*)', Name.Variable),
+ (r'#\d*', Name.Variable),
+ (r'([a-zA-Z]+[a-zA-Z0-9]*)', Name),
+
+ (r'-?[0-9]+\.[0-9]*', Number.Float),
+ (r'-?[0-9]*\.[0-9]+', Number.Float),
+ (r'-?[0-9]+', Number.Integer),
+
+ (_multi_escape(operators), Operator),
+ (_multi_escape(punctuation), Punctuation),
+ (r'".*?"', String),
+ (r'\s+', Text.Whitespace),
+ ],
+ }
+
+class GAPLexer(RegexLexer):
+ """
+ For `GAP <http://www.gap-system.org>`_ source code.
+
+ .. versionadded:: 2.0
+ """
+ name = 'GAP'
+ aliases = ['gap']
+ filenames = ['*.g', '*.gd', '*.gi', '*.gap']
+
+ tokens = {
+ 'root' : [
+ (r'#.*$', Comment.Single),
+ (r'"(?:[^"\\]|\\.)*"', String),
+ (r'\(|\)|\[|\]|\{|\}', Punctuation),
+ (r'''(?x)\b(?:
+ if|then|elif|else|fi|
+ for|while|do|od|
+ repeat|until|
+ break|continue|
+ function|local|return|end|
+ rec|
+ quit|QUIT|
+ IsBound|Unbind|
+ TryNextMethod|
+ Info|Assert
+ )\b''', Keyword),
+ (r'''(?x)\b(?:
+ true|false|fail|infinity
+ )\b''',
+ Name.Constant),
+ (r'''(?x)\b(?:
+ (Declare|Install)([A-Z][A-Za-z]+)|
+ BindGlobal|BIND_GLOBAL
+ )\b''',
+ Name.Builtin),
+ (r'\.|,|:=|;|=|\+|-|\*|/|\^|>|<', Operator),
+ (r'''(?x)\b(?:
+ and|or|not|mod|in
+ )\b''',
+ Operator.Word),
+ (r'''(?x)
+ (?:[a-zA-Z_0-9]+|`[^`]*`)
+ (?:::[a-zA-Z_0-9]+|`[^`]*`)*''', Name.Variable),
+ (r'[0-9]+(?:\.[0-9]*)?(?:e[0-9]+)?', Number),
+ (r'\.[0-9]+(?:e[0-9]+)?', Number),
+ (r'.', Text)
+ ]
+ }
diff --git a/pygments/lexers/other.py b/pygments/lexers/other.py
index 8491d19d..80b06b28 100644
--- a/pygments/lexers/other.py
+++ b/pygments/lexers/other.py
@@ -5,7 +5,7 @@
Lexers for other languages.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
@@ -14,7 +14,8 @@ import re
from pygments.lexer import RegexLexer, include, bygroups, using, \
this, combined, ExtendedRegexLexer
from pygments.token import Error, Punctuation, Literal, Token, \
- Text, Comment, Operator, Keyword, Name, String, Number, Generic
+ Text, Comment, Operator, Keyword, Name, String, Number, Generic, \
+ Whitespace
from pygments.util import get_bool_opt
from pygments.lexers.web import HtmlLexer
@@ -35,7 +36,7 @@ __all__ = ['BrainfuckLexer', 'BefungeLexer', 'RedcodeLexer', 'MOOCodeLexer',
'ECLLexer', 'UrbiscriptLexer', 'OpenEdgeLexer', 'BroLexer',
'MscgenLexer', 'KconfigLexer', 'VGLLexer', 'SourcePawnLexer',
'RobotFrameworkLexer', 'PuppetLexer', 'NSISLexer', 'RPMSpecLexer',
- 'CbmBasicV2Lexer', 'AutoItLexer']
+ 'CbmBasicV2Lexer', 'AutoItLexer', 'RexxLexer', 'APLLexer']
class ECLLexer(RegexLexer):
@@ -44,7 +45,7 @@ class ECLLexer(RegexLexer):
<http://hpccsystems.com/community/docs/ecl-language-reference/html>`_
language.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'ECL'
@@ -175,7 +176,7 @@ class BefungeLexer(RegexLexer):
Lexer for the esoteric `Befunge <http://en.wikipedia.org/wiki/Befunge>`_
language.
- *New in Pygments 0.7.*
+ .. versionadded:: 0.7
"""
name = 'Befunge'
aliases = ['befunge']
@@ -205,7 +206,7 @@ class RedcodeLexer(RegexLexer):
A simple Redcode lexer based on ICWS'94.
Contributed by Adam Blinkinsop <blinks@acm.org>.
- *New in Pygments 0.8.*
+ .. versionadded:: 0.8
"""
name = 'Redcode'
aliases = ['redcode']
@@ -241,11 +242,11 @@ class MOOCodeLexer(RegexLexer):
For `MOOCode <http://www.moo.mud.org/>`_ (the MOO scripting
language).
- *New in Pygments 0.9.*
+ .. versionadded:: 0.9
"""
name = 'MOOCode'
filenames = ['*.moo']
- aliases = ['moocode']
+ aliases = ['moocode', 'moo']
mimetypes = ['text/x-moocode']
tokens = {
@@ -285,11 +286,11 @@ class SmalltalkLexer(RegexLexer):
Contributed by Stefan Matthias Aust.
Rewritten by Nils Winter.
- *New in Pygments 0.10.*
+ .. versionadded:: 0.10
"""
name = 'Smalltalk'
filenames = ['*.st']
- aliases = ['smalltalk', 'squeak']
+ aliases = ['smalltalk', 'squeak', 'st']
mimetypes = ['text/x-smalltalk']
tokens = {
@@ -363,7 +364,7 @@ class SmalltalkLexer(RegexLexer):
include('literals'),
],
'afterobject' : [
- (r'! !$', Keyword , '#pop'), # squeak chunk delimeter
+ (r'! !$', Keyword , '#pop'), # squeak chunk delimiter
include('whitespaces'),
(r'\b(ifTrue:|ifFalse:|whileTrue:|whileFalse:|timesRepeat:)',
Name.Builtin, '#pop'),
@@ -404,7 +405,7 @@ class LogtalkLexer(RegexLexer):
"""
For `Logtalk <http://logtalk.org/>`_ source code.
- *New in Pygments 0.10.*
+ .. versionadded:: 0.10
"""
name = 'Logtalk'
@@ -632,7 +633,7 @@ class GnuplotLexer(RegexLexer):
"""
For `Gnuplot <http://gnuplot.info/>`_ plotting scripts.
- *New in Pygments 0.11.*
+ .. versionadded:: 0.11
"""
name = 'Gnuplot'
@@ -791,7 +792,7 @@ class PovrayLexer(RegexLexer):
"""
For `Persistence of Vision Raytracer <http://www.povray.org/>`_ files.
- *New in Pygments 0.11.*
+ .. versionadded:: 0.11
"""
name = 'POVRay'
aliases = ['pov']
@@ -1149,18 +1150,18 @@ class AppleScriptLexer(RegexLexer):
tokens = {
'root': [
(r'\s+', Text),
- (ur'¬\n', String.Escape),
+ (u'¬\\n', String.Escape),
(r"'s\s+", Text), # This is a possessive, consider moving
(r'(--|#).*?$', Comment),
(r'\(\*', Comment.Multiline, 'comment'),
(r'[\(\){}!,.:]', Punctuation),
- (ur'(«)([^»]+)(»)',
+ (u'(«)([^»]+)(»)',
bygroups(Text, Name.Builtin, Text)),
(r'\b((?:considering|ignoring)\s*)'
r'(application responses|case|diacriticals|hyphens|'
r'numeric strings|punctuation|white space)',
bygroups(Keyword, Name.Builtin)),
- (ur'(-|\*|\+|&|≠|>=?|<=?|=|≥|≤|/|÷|\^)', Operator),
+ (u'(-|\\*|\\+|&|≠|>=?|<=?|=|≥|≤|/|÷|\\^)', Operator),
(r"\b(%s)\b" % '|'.join(Operators), Operator.Word),
(r'^(\s*(?:on|end)\s+)'
r'(%s)' % '|'.join(StudioEvents[::-1]),
@@ -1197,7 +1198,7 @@ class ModelicaLexer(RegexLexer):
"""
For `Modelica <http://www.modelica.org/>`_ source code.
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'Modelica'
aliases = ['modelica']
@@ -1220,11 +1221,16 @@ class ModelicaLexer(RegexLexer):
(r'(\d+\.\d*|\.\d+)', Number.Float),
(r'\d+[Ll]?', Number.Integer),
(r'[~!%^&*+=|?:<>/-]', Operator),
- (r'[()\[\]{},.;]', Punctuation),
(r'(true|false|NULL|Real|Integer|Boolean)\b', Name.Builtin),
- (r"([a-zA-Z_][\w]*|'[a-zA-Z_\+\-\*\/\^][\w]*')"
- r"(\.([a-zA-Z_][\w]*|'[a-zA-Z_\+\-\*\/\^][\w]*'))+", Name.Class),
- (r"('[\w\+\-\*\/\^]+'|\w+)", Name),
+ (r'([a-zA-Z_][\w]*|[\'][^\']+[\'])'
+ r'([\[\d,:\]]*)'
+ r'(\.([a-zA-Z_][\w]*|[\'][^\']+[\']))+'
+ r'([\[\d,:\]]*)', Name.Class),
+ (r'([a-zA-Z_][\w]*|[\'][^\']+[\'])'
+ r'([\[\d,:\]]+)', Name.Class),
+ (r'(\'[\w\+\-\*\/\^]+\'|\w+)', Name),
+ (r'[()\[\]{},.;]', Punctuation),
+ (r'\'', Name, 'quoted_ident'),
],
'root': [
include('whitespace'),
@@ -1238,7 +1244,7 @@ class ModelicaLexer(RegexLexer):
'keywords': [
(r'(algorithm|annotation|break|connect|constant|constrainedby|'
r'discrete|each|else|elseif|elsewhen|encapsulated|enumeration|'
- r'end|equation|exit|expandable|extends|'
+ r'equation|exit|expandable|extends|'
r'external|false|final|flow|for|if|import|impure|in|initial\sequation|'
r'inner|input|loop|nondiscrete|outer|output|parameter|partial|'
r'protected|public|pure|redeclare|replaceable|stream|time|then|true|'
@@ -1251,15 +1257,20 @@ class ModelicaLexer(RegexLexer):
r'tanh|zeros)\b', Name.Function),
],
'operators': [
- (r'(actualStream|and|assert|cardinality|change|Clock|delay|der|edge|'
- r'hold|homotopy|initial|inStream|noEvent|not|or|pre|previous|reinit|'
- r'return|sample|smooth|spatialDistribution|subSample|terminal|'
+ (r'(actualStream|and|assert|backSample|cardinality|change|Clock|'
+ r'delay|der|edge|hold|homotopy|initial|inStream|noClock|noEvent|'
+ r'not|or|pre|previous|reinit|return|sample|smooth|'
+ r'spatialDistribution|shiftSample|subSample|superSample|terminal|'
r'terminate)\b', Name.Builtin),
],
'classes': [
- (r'(block|class|connector|function|model|package|'
- r'record|type)(\s+)([A-Za-z_]+)',
- bygroups(Keyword, Text, Name.Class))
+ (r'(block|class|connector|end|function|model|package|'
+ r'record|type)(\s+)((?!if|when|while)[A-Za-z_]\w*|[\'][^\']+[\'])([;]?)',
+ bygroups(Keyword, Text, Name.Class, Text))
+ ],
+ 'quoted_ident': [
+ (r'\'', Name, '#pop'),
+ (r'[^\']+', Name), # all other characters
],
'string': [
(r'"', String, '#pop'),
@@ -1270,7 +1281,7 @@ class ModelicaLexer(RegexLexer):
(r'\\', String), # stray backslash
],
'html-content': [
- (r'<\s*/\s*html\s*>', Name.Tag, '#pop'),
+ (r'<\s*/\s*html\s*>"', Name.Tag, '#pop'),
(r'.+?(?=<\s*/\s*html\s*>)', using(HtmlLexer)),
]
}
@@ -1280,7 +1291,7 @@ class RebolLexer(RegexLexer):
"""
A `REBOL <http://www.rebol.com/>`_ lexer.
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'REBOL'
aliases = ['rebol']
@@ -1380,9 +1391,9 @@ class RebolLexer(RegexLexer):
tokens = {
'root': [
- (r'REBOL', Generic.Strong, 'script'),
- (r'R', Comment),
(r'[^R]+', Comment),
+ (r'REBOL\s+\[', Generic.Strong, 'script'),
+ (r'R', Comment)
],
'script': [
(r'\s+', Text),
@@ -1399,8 +1410,8 @@ class RebolLexer(RegexLexer):
(r'%[^(\^{^")\s\[\]]+', Name.Decorator),
(r'[+-]?([a-zA-Z]{1,3})?\$\d+(\.\d+)?', Number.Float), # money
(r'[+-]?\d+\:\d+(\:\d+)?(\.\d+)?', String.Other), # time
- (r'\d+\-[0-9a-zA-Z]+\-\d+(\/\d+\:\d+(\:\d+)?'
- r'([\.\d+]?([+-]?\d+:\d+)?)?)?', String.Other), # date
+ (r'\d+[\-\/][0-9a-zA-Z]+[\-\/]\d+(\/\d+\:\d+((\:\d+)?'
+ r'([\.\d+]?([+-]?\d+:\d+)?)?)?)?', String.Other), # date
(r'\d+(\.\d+)+\.\d+', Keyword.Constant), # tuple
(r'\d+[xX]\d+', Keyword.Constant), # pair
(r'[+-]?\d+(\'\d+)?([\.,]\d*)?[eE][+-]?\d+', Number.Float),
@@ -1492,13 +1503,23 @@ class RebolLexer(RegexLexer):
(r'[^(\[\])]+', Comment),
],
}
+ def analyse_text(text):
+ """
+ Check if code contains REBOL header and so it probably not R code
+ """
+ if re.match(r'^\s*REBOL\s*\[', text, re.IGNORECASE):
+ # The code starts with REBOL header
+ return 1.0
+ elif re.search(r'\s*REBOL\s*[', text, re.IGNORECASE):
+ # The code contains REBOL header but also some text before it
+ return 0.5
class ABAPLexer(RegexLexer):
"""
Lexer for ABAP, SAP's integrated language.
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'ABAP'
aliases = ['abap']
@@ -1679,6 +1700,7 @@ class ABAPLexer(RegexLexer):
# because < and > are part of field symbols.
(r'[?*<>=\-+]', Operator),
(r"'(''|[^'])*'", String.Single),
+ (r"`([^`])*`", String.Single),
(r'[/;:()\[\],\.]', Punctuation)
],
}
@@ -1745,17 +1767,17 @@ class GherkinLexer(RegexLexer):
"""
For `Gherkin <http://github.com/aslakhellesoy/gherkin/>` syntax.
- *New in Pygments 1.2.*
+ .. versionadded:: 1.2
"""
name = 'Gherkin'
- aliases = ['Cucumber', 'cucumber', 'Gherkin', 'gherkin']
+ aliases = ['cucumber', 'gherkin']
filenames = ['*.feature']
mimetypes = ['text/x-gherkin']
- feature_keywords = ur'^(기능|機能|功能|フィーチャ|خاصية|תכונה|Функціонал|Функционалност|Функционал|Фича|Особина|Могућност|Özellik|Właściwość|Tính năng|Trajto|Savybė|Požiadavka|Požadavek|Osobina|Ominaisuus|Omadus|OH HAI|Mogućnost|Mogucnost|Jellemző|Fīča|Funzionalità|Funktionalität|Funkcionalnost|Funkcionalitāte|Funcționalitate|Functionaliteit|Functionalitate|Funcionalitat|Funcionalidade|Fonctionnalité|Fitur|Feature|Egenskap|Egenskab|Crikey|Característica|Arwedd)(:)(.*)$'
- feature_element_keywords = ur'^(\s*)(시나리오 개요|시나리오|배경|背景|場景大綱|場景|场景大纲|场景|劇本大綱|劇本|テンプレ|シナリオテンプレート|シナリオテンプレ|シナリオアウトライン|シナリオ|سيناريو مخطط|سيناريو|الخلفية|תרחיש|תבנית תרחיש|רקע|Тарих|Сценарій|Сценарио|Сценарий структураси|Сценарий|Структура сценарію|Структура сценарија|Структура сценария|Скица|Рамка на сценарий|Пример|Предыстория|Предистория|Позадина|Передумова|Основа|Концепт|Контекст|Założenia|Wharrimean is|Tình huống|The thing of it is|Tausta|Taust|Tapausaihio|Tapaus|Szenariogrundriss|Szenario|Szablon scenariusza|Stsenaarium|Struktura scenarija|Skica|Skenario konsep|Skenario|Situācija|Senaryo taslağı|Senaryo|Scénář|Scénario|Schema dello scenario|Scenārijs pēc parauga|Scenārijs|Scenár|Scenaro|Scenariusz|Scenariul de şablon|Scenariul de sablon|Scenariu|Scenario Outline|Scenario Amlinellol|Scenario|Scenarijus|Scenarijaus šablonas|Scenarij|Scenarie|Rerefons|Raamstsenaarium|Primer|Pozadí|Pozadina|Pozadie|Plan du scénario|Plan du Scénario|Osnova scénáře|Osnova|Náčrt Scénáře|Náčrt Scenáru|Mate|MISHUN SRSLY|MISHUN|Kịch bản|Konturo de la scenaro|Kontext|Konteksts|Kontekstas|Kontekst|Koncept|Khung tình huống|Khung kịch bản|Háttér|Grundlage|Geçmiş|Forgatókönyv vázlat|Forgatókönyv|Fono|Esquema do Cenário|Esquema do Cenario|Esquema del escenario|Esquema de l\'escenari|Escenario|Escenari|Dis is what went down|Dasar|Contexto|Contexte|Contesto|Condiţii|Conditii|Cenário|Cenario|Cefndir|Bối cảnh|Blokes|Bakgrunn|Bakgrund|Baggrund|Background|B4|Antecedents|Antecedentes|All y\'all|Achtergrond|Abstrakt Scenario|Abstract Scenario)(:)(.*)$'
- examples_keywords = ur'^(\s*)(예|例子|例|サンプル|امثلة|דוגמאות|Сценарији|Примери|Приклади|Мисоллар|Значения|Örnekler|Voorbeelden|Variantai|Tapaukset|Scenarios|Scenariji|Scenarijai|Příklady|Példák|Príklady|Przykłady|Primjeri|Primeri|Piemēri|Pavyzdžiai|Paraugs|Juhtumid|Exemplos|Exemples|Exemplele|Exempel|Examples|Esempi|Enghreifftiau|Ekzemploj|Eksempler|Ejemplos|EXAMPLZ|Dữ liệu|Contoh|Cobber|Beispiele)(:)(.*)$'
- step_keywords = ur'^(\s*)(하지만|조건|먼저|만일|만약|단|그리고|그러면|那麼|那么|而且|當|当|前提|假設|假如|但是|但し|並且|もし|ならば|ただし|しかし|かつ|و |متى |لكن |عندما |ثم |بفرض |اذاً |כאשר |וגם |בהינתן |אזי |אז |אבל |Якщо |Унда |То |Припустимо, що |Припустимо |Онда |Но |Нехай |Лекин |Когато |Када |Кад |К тому же |И |Задато |Задати |Задате |Если |Допустим |Дадено |Ва |Бирок |Аммо |Али |Але |Агар |А |І |Și |És |Zatati |Zakładając |Zadato |Zadate |Zadano |Zadani |Zadan |Youse know when youse got |Youse know like when |Yna |Ya know how |Ya gotta |Y |Wun |Wtedy |When y\'all |When |Wenn |WEN |Và |Ve |Und |Un |Thì |Then y\'all |Then |Tapi |Tak |Tada |Tad |Så |Stel |Soit |Siis |Si |Sed |Se |Quando |Quand |Quan |Pryd |Pokud |Pokiaľ |Però |Pero |Pak |Oraz |Onda |Ond |Oletetaan |Og |Och |O zaman |Når |När |Niin |Nhưng |N |Mutta |Men |Mas |Maka |Majd |Mais |Maar |Ma |Lorsque |Lorsqu\'|Kun |Kuid |Kui |Khi |Keď |Ketika |Když |Kaj |Kai |Kada |Kad |Jeżeli |Ja |Ir |I CAN HAZ |I |Ha |Givun |Givet |Given y\'all |Given |Gitt |Gegeven |Gegeben sei |Fakat |Eğer ki |Etant donné |Et |Então |Entonces |Entao |En |Eeldades |E |Duota |Dun |Donitaĵo |Donat |Donada |Do |Diyelim ki |Dengan |Den youse gotta |De |Dato |Dar |Dann |Dan |Dado |Dacă |Daca |DEN |Când |Cuando |Cho |Cept |Cand |Cal |But y\'all |But |Buh |Biết |Bet |BUT |Atès |Atunci |Atesa |Anrhegedig a |Angenommen |And y\'all |And |An |Ama |Als |Alors |Allora |Ali |Aleshores |Ale |Akkor |Aber |AN |A také |A |\* )'
+ feature_keywords = u'^(기능|機能|功能|フィーチャ|خاصية|תכונה|Функціонал|Функционалност|Функционал|Фича|Особина|Могућност|Özellik|Właściwość|Tính năng|Trajto|Savybė|Požiadavka|Požadavek|Osobina|Ominaisuus|Omadus|OH HAI|Mogućnost|Mogucnost|Jellemző|Fīča|Funzionalità|Funktionalität|Funkcionalnost|Funkcionalitāte|Funcționalitate|Functionaliteit|Functionalitate|Funcionalitat|Funcionalidade|Fonctionnalité|Fitur|Feature|Egenskap|Egenskab|Crikey|Característica|Arwedd)(:)(.*)$'
+ feature_element_keywords = u'^(\\s*)(시나리오 개요|시나리오|배경|背景|場景大綱|場景|场景大纲|场景|劇本大綱|劇本|テンプレ|シナリオテンプレート|シナリオテンプレ|シナリオアウトライン|シナリオ|سيناريو مخطط|سيناريو|الخلفية|תרחיש|תבנית תרחיש|רקע|Тарих|Сценарій|Сценарио|Сценарий структураси|Сценарий|Структура сценарію|Структура сценарија|Структура сценария|Скица|Рамка на сценарий|Пример|Предыстория|Предистория|Позадина|Передумова|Основа|Концепт|Контекст|Założenia|Wharrimean is|Tình huống|The thing of it is|Tausta|Taust|Tapausaihio|Tapaus|Szenariogrundriss|Szenario|Szablon scenariusza|Stsenaarium|Struktura scenarija|Skica|Skenario konsep|Skenario|Situācija|Senaryo taslağı|Senaryo|Scénář|Scénario|Schema dello scenario|Scenārijs pēc parauga|Scenārijs|Scenár|Scenaro|Scenariusz|Scenariul de şablon|Scenariul de sablon|Scenariu|Scenario Outline|Scenario Amlinellol|Scenario|Scenarijus|Scenarijaus šablonas|Scenarij|Scenarie|Rerefons|Raamstsenaarium|Primer|Pozadí|Pozadina|Pozadie|Plan du scénario|Plan du Scénario|Osnova scénáře|Osnova|Náčrt Scénáře|Náčrt Scenáru|Mate|MISHUN SRSLY|MISHUN|Kịch bản|Konturo de la scenaro|Kontext|Konteksts|Kontekstas|Kontekst|Koncept|Khung tình huống|Khung kịch bản|Háttér|Grundlage|Geçmiş|Forgatókönyv vázlat|Forgatókönyv|Fono|Esquema do Cenário|Esquema do Cenario|Esquema del escenario|Esquema de l\'escenari|Escenario|Escenari|Dis is what went down|Dasar|Contexto|Contexte|Contesto|Condiţii|Conditii|Cenário|Cenario|Cefndir|Bối cảnh|Blokes|Bakgrunn|Bakgrund|Baggrund|Background|B4|Antecedents|Antecedentes|All y\'all|Achtergrond|Abstrakt Scenario|Abstract Scenario)(:)(.*)$'
+ examples_keywords = u'^(\\s*)(예|例子|例|サンプル|امثلة|דוגמאות|Сценарији|Примери|Приклади|Мисоллар|Значения|Örnekler|Voorbeelden|Variantai|Tapaukset|Scenarios|Scenariji|Scenarijai|Příklady|Példák|Príklady|Przykłady|Primjeri|Primeri|Piemēri|Pavyzdžiai|Paraugs|Juhtumid|Exemplos|Exemples|Exemplele|Exempel|Examples|Esempi|Enghreifftiau|Ekzemploj|Eksempler|Ejemplos|EXAMPLZ|Dữ liệu|Contoh|Cobber|Beispiele)(:)(.*)$'
+ step_keywords = u'^(\\s*)(하지만|조건|먼저|만일|만약|단|그리고|그러면|那麼|那么|而且|當|当|前提|假設|假如|但是|但し|並且|もし|ならば|ただし|しかし|かつ|و |متى |لكن |عندما |ثم |بفرض |اذاً |כאשר |וגם |בהינתן |אזי |אז |אבל |Якщо |Унда |То |Припустимо, що |Припустимо |Онда |Но |Нехай |Лекин |Когато |Када |Кад |К тому же |И |Задато |Задати |Задате |Если |Допустим |Дадено |Ва |Бирок |Аммо |Али |Але |Агар |А |І |Și |És |Zatati |Zakładając |Zadato |Zadate |Zadano |Zadani |Zadan |Youse know when youse got |Youse know like when |Yna |Ya know how |Ya gotta |Y |Wun |Wtedy |When y\'all |When |Wenn |WEN |Và |Ve |Und |Un |Thì |Then y\'all |Then |Tapi |Tak |Tada |Tad |Så |Stel |Soit |Siis |Si |Sed |Se |Quando |Quand |Quan |Pryd |Pokud |Pokiaľ |Però |Pero |Pak |Oraz |Onda |Ond |Oletetaan |Og |Och |O zaman |Når |När |Niin |Nhưng |N |Mutta |Men |Mas |Maka |Majd |Mais |Maar |Ma |Lorsque |Lorsqu\'|Kun |Kuid |Kui |Khi |Keď |Ketika |Když |Kaj |Kai |Kada |Kad |Jeżeli |Ja |Ir |I CAN HAZ |I |Ha |Givun |Givet |Given y\'all |Given |Gitt |Gegeven |Gegeben sei |Fakat |Eğer ki |Etant donné |Et |Então |Entonces |Entao |En |Eeldades |E |Duota |Dun |Donitaĵo |Donat |Donada |Do |Diyelim ki |Dengan |Den youse gotta |De |Dato |Dar |Dann |Dan |Dado |Dacă |Daca |DEN |Când |Cuando |Cho |Cept |Cand |Cal |But y\'all |But |Buh |Biết |Bet |BUT |Atès |Atunci |Atesa |Anrhegedig a |Angenommen |And y\'all |And |An |Ama |Als |Alors |Allora |Ali |Aleshores |Ale |Akkor |Aber |AN |A také |A |\* )'
tokens = {
'comments': [
@@ -1779,6 +1801,7 @@ class GherkinLexer(RegexLexer):
'examples_table_header': [
(r"\s+\|\s*$", Keyword, "#pop:2"),
include('comments'),
+ (r"\\\|", Name.Variable),
(r"\s*\|", Keyword),
(r"[^\|]", Name.Variable),
],
@@ -1821,6 +1844,7 @@ class GherkinLexer(RegexLexer):
'table_content': [
(r"\s+\|\s*$", Keyword, "#pop"),
include('comments'),
+ (r"\\\|", String),
(r"\s*\|", Keyword),
include('string'),
],
@@ -1855,7 +1879,7 @@ class AsymptoteLexer(RegexLexer):
"""
For `Asymptote <http://asymptote.sf.net/>`_ source code.
- *New in Pygments 1.2.*
+ .. versionadded:: 1.2
"""
name = 'Asymptote'
aliases = ['asy', 'asymptote']
@@ -1961,11 +1985,11 @@ class AsymptoteLexer(RegexLexer):
from pygments.lexers._asybuiltins import ASYFUNCNAME, ASYVARNAME
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
- if token is Name and value in ASYFUNCNAME:
- token = Name.Function
- elif token is Name and value in ASYVARNAME:
- token = Name.Variable
- yield index, token, value
+ if token is Name and value in ASYFUNCNAME:
+ token = Name.Function
+ elif token is Name and value in ASYVARNAME:
+ token = Name.Variable
+ yield index, token, value
class PostScriptLexer(RegexLexer):
@@ -1976,10 +2000,10 @@ class PostScriptLexer(RegexLexer):
<http://partners.adobe.com/public/developer/en/ps/PLRM.pdf>
is the authority for this.
- *New in Pygments 1.4.*
+ .. versionadded:: 1.4
"""
name = 'PostScript'
- aliases = ['postscript']
+ aliases = ['postscript', 'postscr']
filenames = ['*.ps', '*.eps']
mimetypes = ['application/postscript']
@@ -2064,10 +2088,10 @@ class AutohotkeyLexer(RegexLexer):
"""
For `autohotkey <http://www.autohotkey.com/>`_ source code.
- *New in Pygments 1.4.*
+ .. versionadded:: 1.4
"""
name = 'autohotkey'
- aliases = ['ahk']
+ aliases = ['ahk', 'autohotkey']
filenames = ['*.ahk', '*.ahkl']
mimetypes = ['text/x-autohotkey']
@@ -2244,7 +2268,7 @@ class MaqlLexer(RegexLexer):
<https://secure.gooddata.com/docs/html/advanced.metric.tutorial.html>`_
scripts.
- *New in Pygments 1.4.*
+ .. versionadded:: 1.4
"""
name = 'MAQL'
@@ -2303,7 +2327,7 @@ class GoodDataCLLexer(RegexLexer):
Lexer for `GoodData-CL <http://github.com/gooddata/GoodData-CL/raw/master/cli/src/main/resources/com/gooddata/processor/COMMANDS.txt>`_
script files.
- *New in Pygments 1.4.*
+ .. versionadded:: 1.4
"""
name = 'GoodData-CL'
@@ -2348,11 +2372,11 @@ class ProtoBufLexer(RegexLexer):
Lexer for `Protocol Buffer <http://code.google.com/p/protobuf/>`_
definition files.
- *New in Pygments 1.4.*
+ .. versionadded:: 1.4
"""
name = 'Protocol Buffer'
- aliases = ['protobuf']
+ aliases = ['protobuf', 'proto']
filenames = ['*.proto']
tokens = {
@@ -2400,7 +2424,7 @@ class HybrisLexer(RegexLexer):
"""
For `Hybris <http://www.hybris-lang.org>`_ source code.
- *New in Pygments 1.4.*
+ .. versionadded:: 1.4
"""
name = 'Hybris'
@@ -2478,7 +2502,7 @@ class AwkLexer(RegexLexer):
"""
For Awk scripts.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'Awk'
@@ -2504,11 +2528,11 @@ class AwkLexer(RegexLexer):
'root': [
(r'^(?=\s|/)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
- (r'\+\+|--|\|\||&&|in|\$|!?~|'
+ (r'\+\+|--|\|\||&&|in\b|\$|!?~|'
r'(\*\*|[-<>+*%\^/!=])=?', Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
- (r'(break|continue|do|while|exit|for|if|'
+ (r'(break|continue|do|while|exit|for|if|else|'
r'return)\b', Keyword, 'slashstartsregex'),
(r'function\b', Keyword.Declaration, 'slashstartsregex'),
(r'(atan2|cos|exp|int|log|rand|sin|sqrt|srand|gensub|gsub|index|'
@@ -2532,7 +2556,7 @@ class Cfengine3Lexer(RegexLexer):
"""
Lexer for `CFEngine3 <http://cfengine.org>`_ policy files.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'CFEngine3'
@@ -2596,7 +2620,7 @@ class SnobolLexer(RegexLexer):
Recognizes the common ASCII equivalents of the original SNOBOL4 operators.
Does not require spaces around binary operators.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = "Snobol"
@@ -2660,7 +2684,7 @@ class UrbiscriptLexer(ExtendedRegexLexer):
"""
For UrbiScript source code.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'UrbiScript'
@@ -2767,7 +2791,7 @@ class OpenEdgeLexer(RegexLexer):
Lexer for `OpenEdge ABL (formerly Progress)
<http://web.progress.com/en/openedge/abl.html>`_ source code.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'OpenEdge ABL'
aliases = ['openedge', 'abl', 'progress']
@@ -2819,7 +2843,7 @@ class BroLexer(RegexLexer):
"""
For `Bro <http://bro-ids.org/>`_ scripts.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'Bro'
aliases = ['bro']
@@ -2839,8 +2863,8 @@ class BroLexer(RegexLexer):
(r'\\\n', Text),
# Keywords
(r'(add|alarm|break|case|const|continue|delete|do|else|enum|event'
- r'|export|for|function|if|global|local|module|next'
- r'|of|print|redef|return|schedule|type|when|while)\b', Keyword),
+ r'|export|for|function|if|global|hook|local|module|next'
+ r'|of|print|redef|return|schedule|switch|type|when|while)\b', Keyword),
(r'(addr|any|bool|count|counter|double|file|int|interval|net'
r'|pattern|port|record|set|string|subnet|table|time|timer'
r'|vector)\b', Keyword.Type),
@@ -2897,7 +2921,7 @@ class CbmBasicV2Lexer(RegexLexer):
"""
For CBM BASIC V2 sources.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'CBM BASIC V2'
aliases = ['cbmbas']
@@ -2935,7 +2959,7 @@ class MscgenLexer(RegexLexer):
"""
For `Mscgen <http://www.mcternan.me.uk/mscgen/>`_ files.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'Mscgen'
aliases = ['mscgen', 'msc']
@@ -2996,7 +3020,7 @@ class KconfigLexer(RegexLexer):
"""
For Linux-style Kconfig files.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'Kconfig'
@@ -3071,7 +3095,7 @@ class VGLLexer(RegexLexer):
For `SampleManager VGL <http://www.thermoscientific.com/samplemanager>`_
source code.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'VGL'
aliases = ['vgl']
@@ -3104,7 +3128,7 @@ class SourcePawnLexer(RegexLexer):
"""
For SourcePawn source code with preprocessor directives.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'SourcePawn'
aliases = ['sp']
@@ -3167,7 +3191,7 @@ class SourcePawnLexer(RegexLexer):
]
}
- SM_TYPES = ['Action', 'bool', 'Float', 'Plugin', 'String', 'any',
+ SM_TYPES = set(['Action', 'bool', 'Float', 'Plugin', 'String', 'any',
'AdminFlag', 'OverrideType', 'OverrideRule', 'ImmunityType',
'GroupId', 'AdminId', 'AdmAccessMode', 'AdminCachePart',
'CookieAccess', 'CookieMenu', 'CookieMenuAction', 'NetFlow',
@@ -3185,16 +3209,16 @@ class SourcePawnLexer(RegexLexer):
'SortFunc2D', 'APLRes', 'FeatureType', 'FeatureStatus',
'SMCResult', 'SMCError', 'TFClassType', 'TFTeam', 'TFCond',
'TFResourceType', 'Timer', 'TopMenuAction', 'TopMenuObjectType',
- 'TopMenuPosition', 'TopMenuObject', 'UserMsg']
+ 'TopMenuPosition', 'TopMenuObject', 'UserMsg'])
def __init__(self, **options):
self.smhighlighting = get_bool_opt(options,
'sourcemod', True)
- self._functions = []
+ self._functions = set()
if self.smhighlighting:
from pygments.lexers._sourcemodbuiltins import FUNCTIONS
- self._functions.extend(FUNCTIONS)
+ self._functions.update(FUNCTIONS)
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
@@ -3213,7 +3237,7 @@ class PuppetLexer(RegexLexer):
"""
For `Puppet <http://puppetlabs.com/>`__ configuration DSL.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'Puppet'
aliases = ['puppet']
@@ -3294,7 +3318,7 @@ class NSISLexer(RegexLexer):
"""
For `NSIS <http://nsis.sourceforge.net/>`_ scripts.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'NSIS'
aliases = ['nsis', 'nsi', 'nsh']
@@ -3416,9 +3440,9 @@ class NSISLexer(RegexLexer):
class RPMSpecLexer(RegexLexer):
"""
- For RPM *.spec files
+ For RPM ``.spec`` files.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'RPMSpec'
@@ -3494,10 +3518,10 @@ class AutoItLexer(RegexLexer):
AutoIt is a freeware BASIC-like scripting language
designed for automating the Windows GUI and general scripting
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'AutoIt'
- aliases = ['autoit', 'Autoit']
+ aliases = ['autoit']
filenames = ['*.au3']
mimetypes = ['text/x-autoit']
@@ -3624,7 +3648,7 @@ class AutoItLexer(RegexLexer):
(r'[a-zA-Z_#@$][a-zA-Z0-9_#@$]*', Name),
(r'\\|\'', Text),
(r'\`([\,\%\`abfnrtv\-\+;])', String.Escape),
- (r'_\n', Text), # Line continuation
+ (r'_\n', Text), # Line continuation
include('garbage'),
],
'commands': [
@@ -3665,3 +3689,198 @@ class AutoItLexer(RegexLexer):
(r'[^\S\n]', Text),
],
}
+
+
+class RexxLexer(RegexLexer):
+ """
+ `Rexx <http://www.rexxinfo.org/>`_ is a scripting language available for
+ a wide range of different platforms with its roots found on mainframe
+ systems. It is popular for I/O- and data based tasks and can act as glue
+ language to bind different applications together.
+
+ .. versionadded:: 2.0
+ """
+ name = 'Rexx'
+ aliases = ['rexx', 'arexx']
+ filenames = ['*.rexx', '*.rex', '*.rx', '*.arexx']
+ mimetypes = ['text/x-rexx']
+ flags = re.IGNORECASE
+
+ tokens = {
+ 'root': [
+ (r'\s', Whitespace),
+ (r'/\*', Comment.Multiline, 'comment'),
+ (r'"', String, 'string_double'),
+ (r"'", String, 'string_single'),
+ (r'[0-9]+(\.[0-9]+)?(e[+-]?[0-9])?', Number),
+ (r'([a-z_][a-z0-9_]*)(\s*)(:)(\s*)(procedure)\b',
+ bygroups(Name.Function, Whitespace, Operator, Whitespace,
+ Keyword.Declaration)),
+ (r'([a-z_][a-z0-9_]*)(\s*)(:)',
+ bygroups(Name.Label, Whitespace, Operator)),
+ include('function'),
+ include('keyword'),
+ include('operator'),
+ (r'[a-z_][a-z0-9_]*', Text),
+ ],
+ 'function': [
+ (r'(abbrev|abs|address|arg|b2x|bitand|bitor|bitxor|c2d|c2x|'
+ r'center|charin|charout|chars|compare|condition|copies|d2c|'
+ r'd2x|datatype|date|delstr|delword|digits|errortext|form|'
+ r'format|fuzz|insert|lastpos|left|length|linein|lineout|lines|'
+ r'max|min|overlay|pos|queued|random|reverse|right|sign|'
+ r'sourceline|space|stream|strip|substr|subword|symbol|time|'
+ r'trace|translate|trunc|value|verify|word|wordindex|'
+ r'wordlength|wordpos|words|x2b|x2c|x2d|xrange)(\s*)(\()',
+ bygroups(Name.Builtin, Whitespace, Operator)),
+ ],
+ 'keyword': [
+ (r'(address|arg|by|call|do|drop|else|end|exit|for|forever|if|'
+ r'interpret|iterate|leave|nop|numeric|off|on|options|parse|'
+ r'pull|push|queue|return|say|select|signal|to|then|trace|until|'
+ r'while)\b', Keyword.Reserved),
+ ],
+ 'operator': [
+ (r'(-|//|/|\(|\)|\*\*|\*|\\<<|\\<|\\==|\\=|\\>>|\\>|\\|\|\||\||'
+ r'&&|&|%|\+|<<=|<<|<=|<>|<|==|=|><|>=|>>=|>>|>|¬<<|¬<|¬==|¬=|'
+ r'¬>>|¬>|¬|\.|,)', Operator),
+ ],
+ 'string_double': [
+ (r'[^"\n]+', String),
+ (r'""', String),
+ (r'"', String, '#pop'),
+ (r'\n', Text, '#pop'), # Stray linefeed also terminates strings.
+ ],
+ 'string_single': [
+ (r'[^\'\n]', String),
+ (r'\'\'', String),
+ (r'\'', String, '#pop'),
+ (r'\n', Text, '#pop'), # Stray linefeed also terminates strings.
+ ],
+ 'comment': [
+ (r'[^*]+', Comment.Multiline),
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'\*', Comment.Multiline),
+ ]
+ }
+
+ _c = lambda s: re.compile(s, re.MULTILINE)
+ _ADDRESS_COMMAND_PATTERN = _c(r'^\s*address\s+command\b')
+ _ADDRESS_PATTERN = _c(r'^\s*address\s+')
+ _DO_WHILE_PATTERN = _c(r'^\s*do\s+while\b')
+ _IF_THEN_DO_PATTERN = _c(r'^\s*if\b.+\bthen\s+do\s*$')
+ _PROCEDURE_PATTERN = _c(r'^\s*([a-z_][a-z0-9_]*)(\s*)(:)(\s*)(procedure)\b')
+ _ELSE_DO_PATTERN = _c(r'\belse\s+do\s*$')
+ _PARSE_ARG_PATTERN = _c(r'^\s*parse\s+(upper\s+)?(arg|value)\b')
+ PATTERNS_AND_WEIGHTS = (
+ (_ADDRESS_COMMAND_PATTERN, 0.2),
+ (_ADDRESS_PATTERN, 0.05),
+ (_DO_WHILE_PATTERN, 0.1),
+ (_ELSE_DO_PATTERN, 0.1),
+ (_IF_THEN_DO_PATTERN, 0.1),
+ (_PROCEDURE_PATTERN, 0.5),
+ (_PARSE_ARG_PATTERN, 0.2),
+ )
+
+ def analyse_text(text):
+ """
+ Check for inital comment and patterns that distinguish Rexx from other
+ C-like languages.
+ """
+ if re.search(r'/\*\**\s*rexx', text, re.IGNORECASE):
+ # Header matches MVS Rexx requirements, this is certainly a Rexx
+ # script.
+ return 1.0
+ elif text.startswith('/*'):
+ # Header matches general Rexx requirements; the source code might
+ # still be any language using C comments such as C++, C# or Java.
+ lowerText = text.lower()
+ result = sum(weight
+ for (pattern, weight) in RexxLexer.PATTERNS_AND_WEIGHTS
+ if pattern.search(lowerText)) + 0.01
+ return min(result, 1.0)
+
+
+class APLLexer(RegexLexer):
+ """
+ A simple APL lexer.
+
+ .. versionadded:: 2.0
+ """
+ name = 'APL'
+ aliases = ['apl']
+ filenames = ['*.apl']
+
+ tokens = {
+ 'root': [
+ # Whitespace
+ # ==========
+ (r'\s+', Text),
+ #
+ # Comment
+ # =======
+ # '⍝' is traditional; '#' is supported by GNU APL and NGN (but not Dyalog)
+ (u'[⍝#].*$', Comment.Single),
+ #
+ # Strings
+ # =======
+ (r'\'((\'\')|[^\'])*\'', String.Single),
+ (r'"(("")|[^"])*"', String.Double), # supported by NGN APL
+ #
+ # Punctuation
+ # ===========
+ # This token type is used for diamond and parenthesis
+ # but not for bracket and ; (see below)
+ (u'[⋄◇()]', Punctuation),
+ #
+ # Array indexing
+ # ==============
+ # Since this token type is very important in APL, it is not included in
+ # the punctuation token type but rather in the following one
+ (r'[\[\];]', String.Regex),
+ #
+ # Distinguished names
+ # ===================
+ # following IBM APL2 standard
+ (u'⎕[A-Za-zΔ∆⍙][A-Za-zΔ∆⍙_¯0-9]*', Name.Function),
+ #
+ # Labels
+ # ======
+ # following IBM APL2 standard
+ # (u'[A-Za-zΔ∆⍙][A-Za-zΔ∆⍙_¯0-9]*:', Name.Label),
+ #
+ # Variables
+ # =========
+ # following IBM APL2 standard
+ (u'[A-Za-zΔ∆⍙][A-Za-zΔ∆⍙_¯0-9]*', Name.Variable),
+ #
+ # Numbers
+ # =======
+ (u'¯?(0[Xx][0-9A-Fa-f]+|[0-9]*\.?[0-9]+([Ee][+¯]?[0-9]+)?|¯|∞)'
+ u'([Jj]¯?(0[Xx][0-9A-Fa-f]+|[0-9]*\.?[0-9]+([Ee][+¯]?[0-9]+)?|¯|∞))?',
+ Number),
+ #
+ # Operators
+ # ==========
+ (u'[\.\\\/⌿⍀¨⍣⍨⍠⍤∘]', Name.Attribute), # closest token type
+ (u'[+\-×÷⌈⌊∣|⍳?*⍟○!⌹<≤=>≥≠≡≢∊⍷∪∩~∨∧⍱⍲⍴,⍪⌽⊖⍉↑↓⊂⊃⌷⍋⍒⊤⊥⍕⍎⊣⊢⍁⍂≈⌸⍯↗]',
+ Operator),
+ #
+ # Constant
+ # ========
+ (u'⍬', Name.Constant),
+ #
+ # Quad symbol
+ # ===========
+ (u'[⎕⍞]', Name.Variable.Global),
+ #
+ # Arrows left/right
+ # =================
+ (u'[←→]', Keyword.Declaration),
+ #
+ # D-Fn
+ # ====
+ (u'[⍺⍵⍶⍹∇:]', Name.Builtin.Pseudo),
+ (r'[{}]', Keyword.Type),
+ ],
+ }
diff --git a/pygments/lexers/parsers.py b/pygments/lexers/parsers.py
index c1ad710f..fc8cbb6f 100644
--- a/pygments/lexers/parsers.py
+++ b/pygments/lexers/parsers.py
@@ -5,7 +5,7 @@
Lexers for parser generators.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
@@ -38,7 +38,7 @@ class RagelLexer(RegexLexer):
fragments of Ragel. For ``.rl`` files, use RagelEmbeddedLexer instead
(or one of the language-specific subclasses).
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'Ragel'
@@ -132,7 +132,7 @@ class RagelEmbeddedLexer(RegexLexer):
This will only highlight Ragel statements. If you want host language
highlighting then call the language-specific Ragel lexer.
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'Embedded Ragel'
@@ -205,14 +205,14 @@ class RagelEmbeddedLexer(RegexLexer):
}
def analyse_text(text):
- return '@LANG: indep' in text or 0.1
+ return '@LANG: indep' in text
class RagelRubyLexer(DelegatingLexer):
"""
A lexer for `Ragel`_ in a Ruby host file.
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'Ragel in Ruby Host'
@@ -231,7 +231,7 @@ class RagelCLexer(DelegatingLexer):
"""
A lexer for `Ragel`_ in a C host file.
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'Ragel in C Host'
@@ -250,7 +250,7 @@ class RagelDLexer(DelegatingLexer):
"""
A lexer for `Ragel`_ in a D host file.
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'Ragel in D Host'
@@ -268,7 +268,7 @@ class RagelCppLexer(DelegatingLexer):
"""
A lexer for `Ragel`_ in a CPP host file.
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'Ragel in CPP Host'
@@ -286,7 +286,7 @@ class RagelObjectiveCLexer(DelegatingLexer):
"""
A lexer for `Ragel`_ in an Objective C host file.
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'Ragel in Objective C Host'
@@ -306,7 +306,7 @@ class RagelJavaLexer(DelegatingLexer):
"""
A lexer for `Ragel`_ in a Java host file.
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'Ragel in Java Host'
@@ -327,7 +327,7 @@ class AntlrLexer(RegexLexer):
Should not be called directly, instead
use DelegatingLexer for your target language.
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
.. _ANTLR: http://www.antlr.org/
"""
@@ -524,7 +524,7 @@ class AntlrLexer(RegexLexer):
# """
# ANTLR with C Target
#
-# *New in Pygments 1.1*
+# .. versionadded:: 1.1
# """
#
# name = 'ANTLR With C Target'
@@ -541,7 +541,7 @@ class AntlrCppLexer(DelegatingLexer):
"""
`ANTLR`_ with CPP Target
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'ANTLR With CPP Target'
@@ -560,7 +560,7 @@ class AntlrObjectiveCLexer(DelegatingLexer):
"""
`ANTLR`_ with Objective-C Target
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'ANTLR With ObjectiveC Target'
@@ -580,7 +580,7 @@ class AntlrCSharpLexer(DelegatingLexer):
"""
`ANTLR`_ with C# Target
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'ANTLR With C# Target'
@@ -600,7 +600,7 @@ class AntlrPythonLexer(DelegatingLexer):
"""
`ANTLR`_ with Python Target
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'ANTLR With Python Target'
@@ -620,7 +620,7 @@ class AntlrJavaLexer(DelegatingLexer):
"""
`ANTLR`_ with Java Target
- *New in Pygments 1.1*
+ .. versionadded:: 1.
"""
name = 'ANTLR With Java Target'
@@ -640,7 +640,7 @@ class AntlrRubyLexer(DelegatingLexer):
"""
`ANTLR`_ with Ruby Target
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'ANTLR With Ruby Target'
@@ -660,7 +660,7 @@ class AntlrPerlLexer(DelegatingLexer):
"""
`ANTLR`_ with Perl Target
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'ANTLR With Perl Target'
@@ -680,7 +680,7 @@ class AntlrActionScriptLexer(DelegatingLexer):
"""
`ANTLR`_ with ActionScript Target
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'ANTLR With ActionScript Target'
@@ -700,7 +700,7 @@ class TreetopBaseLexer(RegexLexer):
A base lexer for `Treetop <http://treetop.rubyforge.org/>`_ grammars.
Not for direct use; use TreetopLexer instead.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
tokens = {
@@ -767,7 +767,7 @@ class TreetopLexer(DelegatingLexer):
"""
A lexer for `Treetop <http://treetop.rubyforge.org/>`_ grammars.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'Treetop'
diff --git a/pygments/lexers/shell.py b/pygments/lexers/shell.py
index b95faf93..b069b375 100644
--- a/pygments/lexers/shell.py
+++ b/pygments/lexers/shell.py
@@ -5,7 +5,7 @@
Lexers for various shells.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
@@ -27,13 +27,13 @@ class BashLexer(RegexLexer):
"""
Lexer for (ba|k|)sh shell scripts.
- *New in Pygments 0.6.*
+ .. versionadded:: 0.6
"""
name = 'Bash'
aliases = ['bash', 'sh', 'ksh']
filenames = ['*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass',
- '.bashrc', 'bashrc', '.bash_*', 'bash_*']
+ '.bashrc', 'bashrc', '.bash_*', 'bash_*', 'PKGBUILD']
mimetypes = ['application/x-sh', 'application/x-shellscript']
tokens = {
@@ -47,8 +47,8 @@ class BashLexer(RegexLexer):
],
'basic': [
(r'\b(if|fi|else|while|do|done|for|then|return|function|case|'
- r'select|continue|until|esac|elif)\s*\b',
- Keyword),
+ r'select|continue|until|esac|elif)(\s*)\b',
+ bygroups(Keyword, Text)),
(r'\b(alias|bg|bind|break|builtin|caller|cd|command|compgen|'
r'complete|declare|dirs|disown|echo|enable|eval|exec|exit|'
r'export|false|fc|fg|getopts|hash|help|history|jobs|kill|let|'
@@ -67,9 +67,11 @@ class BashLexer(RegexLexer):
'data': [
(r'(?s)\$?"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double),
(r"(?s)\$?'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
- (r';', Text),
+ (r';', Punctuation),
+ (r'&', Punctuation),
+ (r'\|', Punctuation),
(r'\s+', Text),
- (r'[^=\s\[\]{}()$"\'`\\<]+', Text),
+ (r'[^=\s\[\]{}()$"\'`\\<&|;]+', Text),
(r'\d+(?= |\Z)', Number),
(r'\$#?(\w+|.)', Name.Variable),
(r'<', Text),
@@ -99,14 +101,17 @@ class BashLexer(RegexLexer):
}
def analyse_text(text):
- return shebang_matches(text, r'(ba|z|)sh')
+ if shebang_matches(text, r'(ba|z|)sh'):
+ return 1
+ if text.startswith('$ '):
+ return 0.2
class BashSessionLexer(Lexer):
"""
Lexer for simplistic shell sessions.
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'Bash Session'
@@ -157,7 +162,7 @@ class ShellSessionLexer(Lexer):
"""
Lexer for shell sessions that works with different command prompts
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'Shell Session'
@@ -174,7 +179,7 @@ class ShellSessionLexer(Lexer):
for match in line_re.finditer(text):
line = match.group()
- m = re.match(r'^((?:\[?\S+@[^$#%]+)[$#%])(.*\n?)', line)
+ m = re.match(r'^((?:\[?\S+@[^$#%]+\]?\s*)[$#%])(.*\n?)', line)
if m:
# To support output lexers (say diff output), the output
# needs to be broken by prompts whenever the output lexer
@@ -203,10 +208,10 @@ class BatchLexer(RegexLexer):
"""
Lexer for the DOS/Windows Batch file format.
- *New in Pygments 0.7.*
+ .. versionadded:: 0.7
"""
name = 'Batchfile'
- aliases = ['bat']
+ aliases = ['bat', 'batch', 'dosbatch', 'winbatch']
filenames = ['*.bat', '*.cmd']
mimetypes = ['application/x-dos-batch']
@@ -223,9 +228,9 @@ class BatchLexer(RegexLexer):
# like %~$VAR:zlt
(r'%%?[~$:\w]+%?', Name.Variable),
(r'::.*', Comment), # Technically :: only works at BOL
- (r'(set)(\s+)(\w+)', bygroups(Keyword, Text, Name.Variable)),
- (r'(call)(\s+)(:\w+)', bygroups(Keyword, Text, Name.Label)),
- (r'(goto)(\s+)(\w+)', bygroups(Keyword, Text, Name.Label)),
+ (r'\b(set)(\s+)(\w+)', bygroups(Keyword, Text, Name.Variable)),
+ (r'\b(call)(\s+)(:\w+)', bygroups(Keyword, Text, Name.Label)),
+ (r'\b(goto)(\s+)(\w+)', bygroups(Keyword, Text, Name.Label)),
(r'\b(set|call|echo|on|off|endlocal|for|do|goto|if|pause|'
r'setlocal|shift|errorlevel|exist|defined|cmdextversion|'
r'errorlevel|else|cd|md|del|deltree|cls|choice)\b', Keyword),
@@ -259,7 +264,7 @@ class TcshLexer(RegexLexer):
"""
Lexer for tcsh scripts.
- *New in Pygments 0.10.*
+ .. versionadded:: 0.10
"""
name = 'Tcsh'
@@ -326,11 +331,11 @@ class PowerShellLexer(RegexLexer):
"""
For Windows PowerShell code.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'PowerShell'
- aliases = ['powershell', 'posh', 'ps1']
- filenames = ['*.ps1']
+ aliases = ['powershell', 'posh', 'ps1', 'psm1']
+ filenames = ['*.ps1','*.psm1']
mimetypes = ['text/x-powershell']
flags = re.DOTALL | re.IGNORECASE | re.MULTILINE
@@ -342,7 +347,7 @@ class PowerShellLexer(RegexLexer):
'dynamicparam do default continue cmdletbinding break begin alias \\? '
'% #script #private #local #global mandatory parametersetname position '
'valuefrompipeline valuefrompipelinebypropertyname '
- 'valuefromremainingarguments helpmessage try catch').split()
+ 'valuefromremainingarguments helpmessage try catch throw').split()
operators = (
'and as band bnot bor bxor casesensitive ccontains ceq cge cgt cle '
@@ -368,12 +373,15 @@ class PowerShellLexer(RegexLexer):
tokens = {
'root': [
+ # we need to count pairs of parentheses for correct highlight
+ # of '$(...)' blocks in strings
+ (r'\(', Punctuation, 'child'),
(r'\s+', Text),
(r'^(\s*#[#\s]*)(\.(?:%s))([^\n]*$)' % '|'.join(commenthelp),
bygroups(Comment, String.Doc, Comment)),
(r'#[^\n]*?$', Comment),
(r'(&lt;|<)#', Comment.Multiline, 'multline'),
- (r'@"\n.*?\n"@', String.Heredoc),
+ (r'@"\n', String.Heredoc, 'heredoc-double'),
(r"@'\n.*?\n'@", String.Heredoc),
# escaped syntax
(r'`[\'"$@-]', Punctuation),
@@ -387,7 +395,11 @@ class PowerShellLexer(RegexLexer):
(r'\[[a-z_\[][a-z0-9_. `,\[\]]*\]', Name.Constant), # .net [type]s
(r'-[a-z_][a-z0-9_]*', Name),
(r'\w+', Name),
- (r'[.,{}\[\]$()=+*/\\&%!~?^`|<>-]', Punctuation),
+ (r'[.,;@{}\[\]$()=+*/\\&%!~?^`|<>-]|::', Punctuation),
+ ],
+ 'child': [
+ (r'\)', Punctuation, '#pop'),
+ include('root'),
],
'multline': [
(r'[^#&.]+', Comment.Multiline),
@@ -396,15 +408,17 @@ class PowerShellLexer(RegexLexer):
(r'[#&.]', Comment.Multiline),
],
'string': [
+ (r"`[0abfnrtv'\"\$`]", String.Escape),
(r'[^$`"]+', String.Double),
- (r'\$\(', String.Interpol, 'interpol'),
- (r'`"|""', String.Double),
+ (r'\$\(', Punctuation, 'child'),
+ (r'""', String.Double),
(r'[`$]', String.Double),
(r'"', String.Double, '#pop'),
],
- 'interpol': [
- (r'[^$)]+', String.Interpol),
- (r'\$\(', String.Interpol, '#push'),
- (r'\)', String.Interpol, '#pop'),
+ 'heredoc-double': [
+ (r'\n"@', String.Heredoc, '#pop'),
+ (r'\$\(', Punctuation, 'child'),
+ (r'[^@\n]+"]', String.Heredoc),
+ (r".", String.Heredoc),
]
}
diff --git a/pygments/lexers/special.py b/pygments/lexers/special.py
index 9b3cd508..9ea2e22c 100644
--- a/pygments/lexers/special.py
+++ b/pygments/lexers/special.py
@@ -5,16 +5,15 @@
Special lexers.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
-import cStringIO
from pygments.lexer import Lexer
from pygments.token import Token, Error, Text
-from pygments.util import get_choice_opt, b
+from pygments.util import get_choice_opt, text_type, BytesIO
__all__ = ['TextLexer', 'RawTokenLexer']
@@ -35,7 +34,7 @@ class TextLexer(Lexer):
_ttype_cache = {}
-line_re = re.compile(b('.*?\n'))
+line_re = re.compile(b'.*?\n')
class RawTokenLexer(Lexer):
"""
@@ -60,12 +59,12 @@ class RawTokenLexer(Lexer):
Lexer.__init__(self, **options)
def get_tokens(self, text):
- if isinstance(text, unicode):
+ if isinstance(text, text_type):
# raw token stream never has any non-ASCII characters
text = text.encode('ascii')
if self.compress == 'gz':
import gzip
- gzipfile = gzip.GzipFile('', 'rb', 9, cStringIO.StringIO(text))
+ gzipfile = gzip.GzipFile('', 'rb', 9, BytesIO(text))
text = gzipfile.read()
elif self.compress == 'bz2':
import bz2
@@ -73,7 +72,7 @@ class RawTokenLexer(Lexer):
# do not call Lexer.get_tokens() because we do not want Unicode
# decoding to occur, and stripping is not optional.
- text = text.strip(b('\n')) + b('\n')
+ text = text.strip(b'\n') + b'\n'
for i, t, v in self.get_tokens_unprocessed(text):
yield t, v
@@ -81,7 +80,7 @@ class RawTokenLexer(Lexer):
length = 0
for match in line_re.finditer(text):
try:
- ttypestr, val = match.group().split(b('\t'), 1)
+ ttypestr, val = match.group().split(b'\t', 1)
except ValueError:
val = match.group().decode(self.encoding)
ttype = Error
diff --git a/pygments/lexers/sql.py b/pygments/lexers/sql.py
index dcfd8fa8..73180772 100644
--- a/pygments/lexers/sql.py
+++ b/pygments/lexers/sql.py
@@ -34,7 +34,7 @@
The ``tests/examplefiles`` contains a few test files with data to be
parsed by these lexers.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
@@ -42,8 +42,9 @@ import re
from pygments.lexer import Lexer, RegexLexer, do_insertions, bygroups
from pygments.token import Punctuation, \
- Text, Comment, Operator, Keyword, Name, String, Number, Generic
+ Text, Comment, Operator, Keyword, Name, String, Number, Generic
from pygments.lexers import get_lexer_by_name, ClassNotFound
+from pygments.util import iteritems
from pygments.lexers._postgres_builtins import KEYWORDS, DATATYPES, \
PSEUDO_TYPES, PLPGSQL_KEYWORDS
@@ -124,7 +125,7 @@ class PostgresLexer(PostgresBase, RegexLexer):
"""
Lexer for the PostgreSQL dialect of SQL.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'PostgreSQL SQL dialect'
@@ -169,14 +170,14 @@ class PlPgsqlLexer(PostgresBase, RegexLexer):
"""
Handle the extra syntax in Pl/pgSQL language.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'PL/pgSQL'
aliases = ['plpgsql']
mimetypes = ['text/x-plpgsql']
flags = re.IGNORECASE
- tokens = dict((k, l[:]) for (k, l) in PostgresLexer.tokens.iteritems())
+ tokens = dict((k, l[:]) for (k, l) in iteritems(PostgresLexer.tokens))
# extend the keywords list
for i, pattern in enumerate(tokens['root']):
@@ -210,7 +211,7 @@ class PsqlRegexLexer(PostgresBase, RegexLexer):
aliases = [] # not public
flags = re.IGNORECASE
- tokens = dict((k, l[:]) for (k, l) in PostgresLexer.tokens.iteritems())
+ tokens = dict((k, l[:]) for (k, l) in iteritems(PostgresLexer.tokens))
tokens['root'].append(
(r'\\[^\s]+', Keyword.Pseudo, 'psql-command'))
@@ -244,19 +245,20 @@ class lookahead(object):
def send(self, i):
self._nextitem = i
return i
- def next(self):
+ def __next__(self):
if self._nextitem is not None:
ni = self._nextitem
self._nextitem = None
return ni
- return self.iter.next()
+ return next(self.iter)
+ next = __next__
class PostgresConsoleLexer(Lexer):
"""
Lexer for psql sessions.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'PostgreSQL console (psql)'
@@ -277,7 +279,7 @@ class PostgresConsoleLexer(Lexer):
insertions = []
while 1:
try:
- line = lines.next()
+ line = next(lines)
except StopIteration:
# allow the emission of partially collected items
# the repl loop will be broken below
@@ -314,7 +316,7 @@ class PostgresConsoleLexer(Lexer):
# Emit the output lines
out_token = Generic.Output
while 1:
- line = lines.next()
+ line = next(lines)
mprompt = re_prompt.match(line)
if mprompt is not None:
# push the line back to have it processed by the prompt
@@ -375,7 +377,7 @@ class SqlLexer(RegexLexer):
r'DIAGNOSTICS|DICTIONARY|DISCONNECT|DISPATCH|DISTINCT|DO|'
r'DOMAIN|DROP|DYNAMIC|DYNAMIC_FUNCTION|DYNAMIC_FUNCTION_CODE|'
r'EACH|ELSE|ENCODING|ENCRYPTED|END|END-EXEC|EQUALS|ESCAPE|EVERY|'
- r'EXCEPT|ESCEPTION|EXCLUDING|EXCLUSIVE|EXEC|EXECUTE|EXISTING|'
+ r'EXCEPTION|EXCEPT|EXCLUDING|EXCLUSIVE|EXEC|EXECUTE|EXISTING|'
r'EXISTS|EXPLAIN|EXTERNAL|EXTRACT|FALSE|FETCH|FINAL|FIRST|FOR|'
r'FORCE|FOREIGN|FORTRAN|FORWARD|FOUND|FREE|FREEZE|FROM|FULL|'
r'FUNCTION|G|GENERAL|GENERATED|GET|GLOBAL|GO|GOTO|GRANT|GRANTED|'
@@ -523,7 +525,7 @@ class SqliteConsoleLexer(Lexer):
"""
Lexer for example sessions using sqlite3.
- *New in Pygments 0.11.*
+ .. versionadded:: 0.11
"""
name = 'sqlite3con'
diff --git a/pygments/lexers/templates.py b/pygments/lexers/templates.py
index ff4a0453..72f81d63 100644
--- a/pygments/lexers/templates.py
+++ b/pygments/lexers/templates.py
@@ -5,7 +5,7 @@
Lexers for various template engines' markup.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
@@ -36,9 +36,10 @@ __all__ = ['HtmlPhpLexer', 'XmlPhpLexer', 'CssPhpLexer',
'MakoCssLexer', 'JspLexer', 'CheetahLexer', 'CheetahHtmlLexer',
'CheetahXmlLexer', 'CheetahJavascriptLexer', 'EvoqueLexer',
'EvoqueHtmlLexer', 'EvoqueXmlLexer', 'ColdfusionLexer',
- 'ColdfusionHtmlLexer', 'VelocityLexer', 'VelocityHtmlLexer',
- 'VelocityXmlLexer', 'SspLexer', 'TeaTemplateLexer', 'LassoHtmlLexer',
- 'LassoXmlLexer', 'LassoCssLexer', 'LassoJavascriptLexer']
+ 'ColdfusionHtmlLexer', 'ColdfusionCFCLexer', 'VelocityLexer',
+ 'VelocityHtmlLexer', 'VelocityXmlLexer', 'SspLexer',
+ 'TeaTemplateLexer', 'LassoHtmlLexer', 'LassoXmlLexer',
+ 'LassoCssLexer', 'LassoJavascriptLexer']
class ErbLexer(Lexer):
@@ -251,7 +252,9 @@ class VelocityLexer(RegexLexer):
(r"\b[0-9]+\b", Number),
(r'(true|false|null)\b', Keyword.Constant),
(r'\(', Punctuation, '#push'),
- (r'\)', Punctuation, '#pop')
+ (r'\)', Punctuation, '#pop'),
+ (r'\[', Punctuation, '#push'),
+ (r'\]', Punctuation, '#pop'),
]
}
@@ -397,7 +400,7 @@ class MyghtyLexer(RegexLexer):
Generic `myghty templates`_ lexer. Code that isn't Myghty
markup is yielded as `Token.Other`.
- *New in Pygments 0.6.*
+ .. versionadded:: 0.6
.. _myghty templates: http://www.myghty.org/
"""
@@ -445,7 +448,7 @@ class MyghtyHtmlLexer(DelegatingLexer):
Subclass of the `MyghtyLexer` that highlights unlexer data
with the `HtmlLexer`.
- *New in Pygments 0.6.*
+ .. versionadded:: 0.6
"""
name = 'HTML+Myghty'
@@ -462,7 +465,7 @@ class MyghtyXmlLexer(DelegatingLexer):
Subclass of the `MyghtyLexer` that highlights unlexer data
with the `XmlLexer`.
- *New in Pygments 0.6.*
+ .. versionadded:: 0.6
"""
name = 'XML+Myghty'
@@ -479,7 +482,7 @@ class MyghtyJavascriptLexer(DelegatingLexer):
Subclass of the `MyghtyLexer` that highlights unlexer data
with the `JavascriptLexer`.
- *New in Pygments 0.6.*
+ .. versionadded:: 0.6
"""
name = 'JavaScript+Myghty'
@@ -498,7 +501,7 @@ class MyghtyCssLexer(DelegatingLexer):
Subclass of the `MyghtyLexer` that highlights unlexer data
with the `CssLexer`.
- *New in Pygments 0.6.*
+ .. versionadded:: 0.6
"""
name = 'CSS+Myghty'
@@ -517,7 +520,7 @@ class MasonLexer(RegexLexer):
.. _mason templates: http://www.masonhq.com/
- *New in Pygments 1.4.*
+ .. versionadded:: 1.4
"""
name = 'Mason'
aliases = ['mason']
@@ -570,7 +573,7 @@ class MakoLexer(RegexLexer):
Generic `mako templates`_ lexer. Code that isn't Mako
markup is yielded as `Token.Other`.
- *New in Pygments 0.7.*
+ .. versionadded:: 0.7
.. _mako templates: http://www.makotemplates.org/
"""
@@ -638,7 +641,7 @@ class MakoHtmlLexer(DelegatingLexer):
Subclass of the `MakoLexer` that highlights unlexed data
with the `HtmlLexer`.
- *New in Pygments 0.7.*
+ .. versionadded:: 0.7
"""
name = 'HTML+Mako'
@@ -654,7 +657,7 @@ class MakoXmlLexer(DelegatingLexer):
Subclass of the `MakoLexer` that highlights unlexer data
with the `XmlLexer`.
- *New in Pygments 0.7.*
+ .. versionadded:: 0.7
"""
name = 'XML+Mako'
@@ -670,7 +673,7 @@ class MakoJavascriptLexer(DelegatingLexer):
Subclass of the `MakoLexer` that highlights unlexer data
with the `JavascriptLexer`.
- *New in Pygments 0.7.*
+ .. versionadded:: 0.7
"""
name = 'JavaScript+Mako'
@@ -688,7 +691,7 @@ class MakoCssLexer(DelegatingLexer):
Subclass of the `MakoLexer` that highlights unlexer data
with the `CssLexer`.
- *New in Pygments 0.7.*
+ .. versionadded:: 0.7
"""
name = 'CSS+Mako'
@@ -766,7 +769,7 @@ class CheetahHtmlLexer(DelegatingLexer):
"""
name = 'HTML+Cheetah'
- aliases = ['html+cheetah', 'html+spitfire']
+ aliases = ['html+cheetah', 'html+spitfire', 'htmlcheetah']
mimetypes = ['text/html+cheetah', 'text/html+spitfire']
def __init__(self, **options):
@@ -1258,7 +1261,7 @@ class HtmlDjangoLexer(DelegatingLexer):
"""
name = 'HTML+Django/Jinja'
- aliases = ['html+django', 'html+jinja']
+ aliases = ['html+django', 'html+jinja', 'htmldjango']
alias_filenames = ['*.html', '*.htm', '*.xhtml']
mimetypes = ['text/html+django', 'text/html+jinja']
@@ -1341,7 +1344,7 @@ class JspRootLexer(RegexLexer):
Base for the `JspLexer`. Yields `Token.Other` for area outside of
JSP tags.
- *New in Pygments 0.7.*
+ .. versionadded:: 0.7
"""
tokens = {
@@ -1365,7 +1368,7 @@ class JspLexer(DelegatingLexer):
"""
Lexer for Java Server Pages.
- *New in Pygments 0.7.*
+ .. versionadded:: 0.7
"""
name = 'Java Server Page'
aliases = ['jsp']
@@ -1388,7 +1391,7 @@ class EvoqueLexer(RegexLexer):
"""
For files using the Evoque templating system.
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'Evoque'
aliases = ['evoque']
@@ -1441,7 +1444,7 @@ class EvoqueHtmlLexer(DelegatingLexer):
Subclass of the `EvoqueLexer` that highlights unlexed data with the
`HtmlLexer`.
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'HTML+Evoque'
aliases = ['html+evoque']
@@ -1457,7 +1460,7 @@ class EvoqueXmlLexer(DelegatingLexer):
Subclass of the `EvoqueLexer` that highlights unlexed data with the
`XmlLexer`.
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
name = 'XML+Evoque'
aliases = ['xml+evoque']
@@ -1476,23 +1479,28 @@ class ColdfusionLexer(RegexLexer):
aliases = ['cfs']
filenames = []
mimetypes = []
- flags = re.IGNORECASE | re.MULTILINE
+ flags = re.IGNORECASE
tokens = {
'root': [
- (r'//.*', Comment),
+ (r'//.*?\n', Comment.Single),
+ (r'/\*(?:.|\n)*?\*/', Comment.Multiline),
(r'\+\+|--', Operator),
(r'[-+*/^&=!]', Operator),
- (r'<=|>=|<|>', Operator),
+ (r'<=|>=|<|>|==', Operator),
(r'mod\b', Operator),
(r'(eq|lt|gt|lte|gte|not|is|and|or)\b', Operator),
(r'\|\||&&', Operator),
+ (r'\?', Operator),
(r'"', String.Double, 'string'),
# There is a special rule for allowing html in single quoted
# strings, evidently.
(r"'.*?'", String.Single),
(r'\d+', Number),
- (r'(if|else|len|var|case|default|break|switch)\b', Keyword),
+ (r'(if|else|len|var|case|default|break|switch|component|property|function|do|try|catch|in|continue|for|return|while)\b', Keyword),
+ (r'(required|any|array|binary|boolean|component|date|guid|numeric|query|string|struct|uuid|xml)\b', Keyword),
+ (r'(true|false|null)\b', Keyword.Constant),
+ (r'(application|session|client|cookie|super|this|variables|arguments)\b', Name.Constant),
(r'([A-Za-z_$][A-Za-z0-9_.]*)(\s*)(\()',
bygroups(Name.Function, Text, Punctuation)),
(r'[A-Za-z_$][A-Za-z0-9_.]*', Name.Variable),
@@ -1556,7 +1564,7 @@ class ColdfusionHtmlLexer(DelegatingLexer):
"""
name = 'Coldfusion HTML'
aliases = ['cfm']
- filenames = ['*.cfm', '*.cfml', '*.cfc']
+ filenames = ['*.cfm', '*.cfml']
mimetypes = ['application/x-coldfusion']
def __init__(self, **options):
@@ -1564,11 +1572,25 @@ class ColdfusionHtmlLexer(DelegatingLexer):
**options)
+class ColdfusionCFCLexer(DelegatingLexer):
+ """
+ Coldfusion markup/script components
+ """
+ name = 'Coldfusion CFC'
+ aliases = ['cfc']
+ filenames = ['*.cfc']
+ mimetypes = []
+
+ def __init__(self, **options):
+ super(ColdfusionCFCLexer, self).__init__(ColdfusionHtmlLexer, ColdfusionLexer,
+ **options)
+
+
class SspLexer(DelegatingLexer):
"""
Lexer for Scalate Server Pages.
- *New in Pygments 1.4.*
+ .. versionadded:: 1.4
"""
name = 'Scalate Server Page'
aliases = ['ssp']
@@ -1594,7 +1616,7 @@ class TeaTemplateRootLexer(RegexLexer):
Base for the `TeaTemplateLexer`. Yields `Token.Other` for area outside of
code blocks.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
tokens = {
@@ -1615,7 +1637,7 @@ class TeaTemplateLexer(DelegatingLexer):
"""
Lexer for `Tea Templates <http://teatrove.org/>`_.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'Tea'
aliases = ['tea']
@@ -1642,7 +1664,7 @@ class LassoHtmlLexer(DelegatingLexer):
Nested JavaScript and CSS is also highlighted.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'HTML+Lasso'
@@ -1670,7 +1692,7 @@ class LassoXmlLexer(DelegatingLexer):
Subclass of the `LassoLexer` which highlights unhandled data with the
`XmlLexer`.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'XML+Lasso'
@@ -1694,7 +1716,7 @@ class LassoCssLexer(DelegatingLexer):
Subclass of the `LassoLexer` which highlights unhandled data with the
`CssLexer`.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'CSS+Lasso'
@@ -1720,7 +1742,7 @@ class LassoJavascriptLexer(DelegatingLexer):
Subclass of the `LassoLexer` which highlights unhandled data with the
`JavascriptLexer`.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'JavaScript+Lasso'
diff --git a/pygments/lexers/text.py b/pygments/lexers/text.py
index 5e340893..1bab62f3 100644
--- a/pygments/lexers/text.py
+++ b/pygments/lexers/text.py
@@ -5,7 +5,7 @@
Lexers for non-source code file types.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
@@ -25,7 +25,7 @@ __all__ = ['IniLexer', 'PropertiesLexer', 'SourcesListLexer', 'BaseMakefileLexer
'RstLexer', 'VimLexer', 'GettextLexer', 'SquidConfLexer',
'DebianControlLexer', 'DarcsPatchLexer', 'YamlLexer',
'LighttpdConfLexer', 'NginxConfLexer', 'CMakeLexer', 'HttpLexer',
- 'PyPyLogLexer', 'RegeditLexer', 'HxmlLexer']
+ 'PyPyLogLexer', 'RegeditLexer', 'HxmlLexer', 'EbnfLexer']
class IniLexer(RegexLexer):
@@ -34,7 +34,7 @@ class IniLexer(RegexLexer):
"""
name = 'INI'
- aliases = ['ini', 'cfg']
+ aliases = ['ini', 'cfg', 'dosini']
filenames = ['*.ini', '*.cfg']
mimetypes = ['text/x-ini']
@@ -61,7 +61,7 @@ class RegeditLexer(RegexLexer):
<http://en.wikipedia.org/wiki/Windows_Registry#.REG_files>`_ files produced
by regedit.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'reg'
@@ -102,11 +102,11 @@ class PropertiesLexer(RegexLexer):
"""
Lexer for configuration files in Java's properties format.
- *New in Pygments 1.4.*
+ .. versionadded:: 1.4
"""
name = 'Properties'
- aliases = ['properties']
+ aliases = ['properties', 'jproperties']
filenames = ['*.properties']
mimetypes = ['text/x-java-properties']
@@ -124,11 +124,11 @@ class SourcesListLexer(RegexLexer):
"""
Lexer that highlights debian sources.list files.
- *New in Pygments 0.7.*
+ .. versionadded:: 0.7
"""
name = 'Debian Sourcelist'
- aliases = ['sourceslist', 'sources.list']
+ aliases = ['sourceslist', 'sources.list', 'debsources']
filenames = ['sources.list']
mimetype = ['application/x-debian-sourceslist']
@@ -180,7 +180,7 @@ class MakefileLexer(Lexer):
name = 'Makefile'
aliases = ['make', 'makefile', 'mf', 'bsdmake']
- filenames = ['*.mak', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile']
+ filenames = ['*.mak', '*.mk', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile']
mimetypes = ['text/x-makefile']
r_special = re.compile(r'^(?:'
@@ -207,12 +207,17 @@ class MakefileLexer(Lexer):
for item in do_insertions(ins, lex.get_tokens_unprocessed(done)):
yield item
+ def analyse_text(text):
+ # Many makefiles have $(BIG_CAPS) style variables
+ if re.search(r'\$\([A-Z_]+\)', text):
+ return 0.1
+
class BaseMakefileLexer(RegexLexer):
"""
Lexer for simple Makefiles (no preprocessing).
- *New in Pygments 0.10.*
+ .. versionadded:: 0.10
"""
name = 'Base Makefile'
@@ -222,8 +227,10 @@ class BaseMakefileLexer(RegexLexer):
tokens = {
'root': [
+ # recipes (need to allow spaces because of expandtabs)
(r'^(?:[\t ]+.*\n|\n)+', using(BashLexer)),
- (r'\$\((?:.*\\\n|.*\n)+', using(BashLexer)),
+ # special variables
+ (r'\$[<@$+%?|*]', Keyword),
(r'\s+', Text),
(r'#.*?\n', Comment),
(r'(export)(\s+)(?=[a-zA-Z0-9_${}\t -]+\n)',
@@ -238,7 +245,15 @@ class BaseMakefileLexer(RegexLexer):
# targets
(r'([^\n:]+)(:+)([ \t]*)', bygroups(Name.Function, Operator, Text),
'block-header'),
- # TODO: add paren handling (grr)
+ # expansions
+ (r'\$\(', Keyword, 'expansion'),
+ ],
+ 'expansion': [
+ (r'[^$a-zA-Z_)]+', Text),
+ (r'[a-zA-Z_]+', Name.Variable),
+ (r'\$', Keyword),
+ (r'\(', Keyword, '#push'),
+ (r'\)', Keyword, '#pop'),
],
'export': [
(r'[a-zA-Z0-9_${}-]+', Name.Variable),
@@ -246,12 +261,13 @@ class BaseMakefileLexer(RegexLexer):
(r'\s+', Text),
],
'block-header': [
- (r'[^,\\\n#]+', Number),
- (r',', Punctuation),
- (r'#.*?\n', Comment),
+ (r'[,|]', Punctuation),
+ (r'#.*?\n', Comment, '#pop'),
(r'\\\n', Text), # line continuation
- (r'\\.', Text),
- (r'(?:[\t ]+.*\n|\n)+', using(BashLexer), '#pop'),
+ (r'\$\(', Keyword, 'expansion'),
+ (r'[a-zA-Z_]+', Name),
+ (r'\n', Text, '#pop'),
+ (r'.', Text),
],
}
@@ -297,7 +313,7 @@ class DarcsPatchLexer(RegexLexer):
format. Examples of this format are derived by commands such as
``darcs annotate --patch`` and ``darcs send``.
- *New in Pygments 0.10.*
+ .. versionadded:: 0.10
"""
name = 'Darcs Patch'
aliases = ['dpatch']
@@ -410,7 +426,7 @@ class BBCodeLexer(RegexLexer):
"""
A lexer that highlights BBCode(-like) syntax.
- *New in Pygments 0.6.*
+ .. versionadded:: 0.6
"""
name = 'BBCode'
@@ -501,7 +517,7 @@ class GroffLexer(RegexLexer):
Lexer for the (g)roff typesetting language, supporting groff
extensions. Mainly useful for highlighting manpage sources.
- *New in Pygments 0.6.*
+ .. versionadded:: 0.6
"""
name = 'Groff'
@@ -556,7 +572,7 @@ class ApacheConfLexer(RegexLexer):
Lexer for configuration files following the Apache config file
format.
- *New in Pygments 0.6.*
+ .. versionadded:: 0.6
"""
name = 'ApacheConf'
@@ -595,7 +611,7 @@ class MoinWikiLexer(RegexLexer):
"""
For MoinMoin (and Trac) Wiki markup.
- *New in Pygments 0.7.*
+ .. versionadded:: 0.7
"""
name = 'MoinMoin/Trac Wiki markup'
@@ -640,14 +656,17 @@ class RstLexer(RegexLexer):
"""
For `reStructuredText <http://docutils.sf.net/rst.html>`_ markup.
- *New in Pygments 0.7.*
+ .. versionadded:: 0.7
Additional options accepted:
`handlecodeblocks`
- Highlight the contents of ``.. sourcecode:: langauge`` and
- ``.. code:: language`` directives with a lexer for the given
- language (default: ``True``). *New in Pygments 0.8.*
+ Highlight the contents of ``.. sourcecode:: language``,
+ ``.. code:: language`` and ``.. code-block:: language``
+ directives with a lexer for the given language (default:
+ ``True``).
+
+ .. versionadded:: 0.8
"""
name = 'reStructuredText'
aliases = ['rst', 'rest', 'restructuredtext']
@@ -731,7 +750,7 @@ class RstLexer(RegexLexer):
(r'^(\s*)(\|)( .+\n(?:\| .+\n)*)',
bygroups(Text, Operator, using(this, state='inline'))),
# Sourcecode directives
- (r'^( *\.\.)(\s*)((?:source)?code)(::)([ \t]*)([^\n]+)'
+ (r'^( *\.\.)(\s*)((?:source)?code(?:-block)?)(::)([ \t]*)([^\n]+)'
r'(\n[ \t]*\n)([ \t]+)(.*)(\n)((?:(?:\8.*|)\n)+)',
_handle_sourcecode),
# A directive
@@ -755,7 +774,7 @@ class RstLexer(RegexLexer):
(r'^( *)(:.*?:)([ \t]+)(.*?)$',
bygroups(Text, Name.Class, Text, Name.Function)),
# Definition list
- (r'^([^ ].*(?<!::)\n)((?:(?: +.*)\n)+)',
+ (r'^([^\s].*(?<!::)\n)((?:(?: +.*)\n)+)',
bygroups(using(this, state='inline'), using(this, state='inline'))),
# Code blocks
(r'(::)(\n[ \t]*\n)([ \t]+)(.*)(\n)((?:(?:\3.*|)\n)+)',
@@ -806,7 +825,7 @@ class VimLexer(RegexLexer):
"""
Lexer for VimL script files.
- *New in Pygments 0.8.*
+ .. versionadded:: 0.8
"""
name = 'VimL'
aliases = ['vim']
@@ -823,7 +842,7 @@ class VimLexer(RegexLexer):
# TODO: regexes can have other delims
(r'/(\\\\|\\/|[^\n/])*/', String.Regex),
(r'"(\\\\|\\"|[^\n"])*"', String.Double),
- (r"'(\\\\|\\'|[^\n'])*'", String.Single),
+ (r"'(''|[^\n'])*'", String.Single),
# Who decided that doublequote was a good comment character??
(r'(?<=\s)"[^\-:.%#=*].*', Comment),
@@ -890,7 +909,7 @@ class GettextLexer(RegexLexer):
"""
Lexer for Gettext catalog files.
- *New in Pygments 0.9.*
+ .. versionadded:: 0.9
"""
name = 'Gettext Catalog'
aliases = ['pot', 'po']
@@ -918,7 +937,7 @@ class SquidConfLexer(RegexLexer):
"""
Lexer for `squid <http://www.squid-cache.org/>`_ configuration files.
- *New in Pygments 0.9.*
+ .. versionadded:: 0.9
"""
name = 'SquidConf'
@@ -1050,10 +1069,10 @@ class DebianControlLexer(RegexLexer):
"""
Lexer for Debian ``control`` files and ``apt-cache show <pkg>`` outputs.
- *New in Pygments 0.9.*
+ .. versionadded:: 0.9
"""
name = 'Debian Control file'
- aliases = ['control']
+ aliases = ['control', 'debcontrol']
filenames = ['control']
tokens = {
@@ -1120,7 +1139,7 @@ class YamlLexer(ExtendedRegexLexer):
Lexer for `YAML <http://yaml.org/>`_, a human-friendly data serialization
language.
- *New in Pygments 0.11.*
+ .. versionadded:: 0.11
"""
name = 'YAML'
@@ -1522,7 +1541,7 @@ class LighttpdConfLexer(RegexLexer):
"""
Lexer for `Lighttpd <http://lighttpd.net/>`_ configuration files.
- *New in Pygments 0.11.*
+ .. versionadded:: 0.11
"""
name = 'Lighttpd configuration file'
aliases = ['lighty', 'lighttpd']
@@ -1550,7 +1569,7 @@ class NginxConfLexer(RegexLexer):
"""
Lexer for `Nginx <http://nginx.net/>`_ configuration files.
- *New in Pygments 0.11.*
+ .. versionadded:: 0.11
"""
name = 'Nginx configuration file'
aliases = ['nginx']
@@ -1596,7 +1615,7 @@ class CMakeLexer(RegexLexer):
"""
Lexer for `CMake <http://cmake.org/Wiki/CMake>`_ files.
- *New in Pygments 1.2.*
+ .. versionadded:: 1.2
"""
name = 'CMake'
aliases = ['cmake']
@@ -1631,7 +1650,7 @@ class CMakeLexer(RegexLexer):
# r'VTK_MAKE_INSTANTIATOR|VTK_WRAP_JAVA|VTK_WRAP_PYTHON|'
# r'VTK_WRAP_TCL|WHILE|WRITE_FILE|'
# r'COUNTARGS)\b', Name.Builtin, 'args'),
- (r'\b([A-Za-z_]+)([ \t]*)(\()', bygroups(Name.Builtin, Text,
+ (r'\b(\w+)([ \t]*)(\()', bygroups(Name.Builtin, Text,
Punctuation), 'args'),
include('keywords'),
include('ws')
@@ -1640,6 +1659,7 @@ class CMakeLexer(RegexLexer):
(r'\(', Punctuation, '#push'),
(r'\)', Punctuation, '#pop'),
(r'(\${)(.+?)(})', bygroups(Operator, Name.Variable, Operator)),
+ (r'(\$<)(.+?)(>)', bygroups(Operator, Name.Variable, Operator)),
(r'(?s)".*?"', String.Double),
(r'\\\S+', String),
(r'[^\)$"# \t\n]+', String),
@@ -1656,7 +1676,7 @@ class CMakeLexer(RegexLexer):
],
'ws': [
(r'[ \t]+', Text),
- (r'#.+\n', Comment),
+ (r'#.*\n', Comment),
]
}
@@ -1665,7 +1685,7 @@ class HttpLexer(RegexLexer):
"""
Lexer for HTTP sessions.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'HTTP'
@@ -1709,12 +1729,12 @@ class HttpLexer(RegexLexer):
tokens = {
'root': [
- (r'(GET|POST|PUT|DELETE|HEAD|OPTIONS|TRACE)( +)([^ ]+)( +)'
- r'(HTTPS?)(/)(1\.[01])(\r?\n|$)',
+ (r'(GET|POST|PUT|DELETE|HEAD|OPTIONS|TRACE|PATCH)( +)([^ ]+)( +)'
+ r'(HTTP)(/)(1\.[01])(\r?\n|$)',
bygroups(Name.Function, Text, Name.Namespace, Text,
Keyword.Reserved, Operator, Number, Text),
'headers'),
- (r'(HTTPS?)(/)(1\.[01])( +)(\d{3})( +)([^\r\n]+)(\r?\n|$)',
+ (r'(HTTP)(/)(1\.[01])( +)(\d{3})( +)([^\r\n]+)(\r?\n|$)',
bygroups(Keyword.Reserved, Operator, Number, Text, Number,
Text, Name.Exception, Text),
'headers'),
@@ -1734,7 +1754,7 @@ class PyPyLogLexer(RegexLexer):
"""
Lexer for PyPy log files.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = "PyPy Log"
aliases = ["pypylog", "pypy"]
@@ -1806,7 +1826,7 @@ class HxmlLexer(RegexLexer):
"""
Lexer for `haXe build <http://haxe.org/doc/compiler>`_ files.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'Hxml'
aliases = ['haxeml', 'hxml']
@@ -1841,3 +1861,53 @@ class HxmlLexer(RegexLexer):
(r'#.*', Comment.Single)
]
}
+
+
+class EbnfLexer(RegexLexer):
+ """
+ Lexer for `ISO/IEC 14977 EBNF
+ <http://en.wikipedia.org/wiki/Extended_Backus%E2%80%93Naur_Form>`_
+ grammars.
+
+ .. versionadded:: 2.0
+ """
+
+ name = 'EBNF'
+ aliases = ['ebnf']
+ filenames = ['*.ebnf']
+ mimetypes = ['text/x-ebnf']
+
+ tokens = {
+ 'root': [
+ include('whitespace'),
+ include('comment_start'),
+ include('identifier'),
+ (r'=', Operator, 'production'),
+ ],
+ 'production': [
+ include('whitespace'),
+ include('comment_start'),
+ include('identifier'),
+ (r'"[^"]*"', String.Double),
+ (r"'[^']*'", String.Single),
+ (r'(\?[^?]*\?)', Name.Entity),
+ (r'[\[\]{}(),|]', Punctuation),
+ (r'-', Operator),
+ (r';', Punctuation, '#pop'),
+ ],
+ 'whitespace': [
+ (r'\s+', Text),
+ ],
+ 'comment_start': [
+ (r'\(\*', Comment.Multiline, 'comment'),
+ ],
+ 'comment': [
+ (r'[^*)]', Comment.Multiline),
+ include('comment_start'),
+ (r'\*\)', Comment.Multiline, '#pop'),
+ (r'[*)]', Comment.Multiline),
+ ],
+ 'identifier': [
+ (r'([a-zA-Z][a-zA-Z0-9 \-]*)', Keyword),
+ ],
+ }
diff --git a/pygments/lexers/web.py b/pygments/lexers/web.py
index dc8c7c5f..c975ad80 100644
--- a/pygments/lexers/web.py
+++ b/pygments/lexers/web.py
@@ -5,7 +5,7 @@
Lexers for web-related languages and markup.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
@@ -13,11 +13,11 @@ import re
import copy
from pygments.lexer import RegexLexer, ExtendedRegexLexer, bygroups, using, \
- include, this
+ include, this
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
- Number, Other, Punctuation, Literal
+ Number, Other, Punctuation, Literal
from pygments.util import get_bool_opt, get_list_opt, looks_like_xml, \
- html_doctype_matches, unirange
+ html_doctype_matches, unirange, iteritems
from pygments.lexers.agile import RubyLexer
from pygments.lexers.compiled import ScalaLexer
@@ -27,7 +27,8 @@ __all__ = ['HtmlLexer', 'XmlLexer', 'JavascriptLexer', 'JsonLexer', 'CssLexer',
'MxmlLexer', 'HaxeLexer', 'HamlLexer', 'SassLexer', 'ScssLexer',
'ObjectiveJLexer', 'CoffeeScriptLexer', 'LiveScriptLexer',
'DuelLexer', 'ScamlLexer', 'JadeLexer', 'XQueryLexer',
- 'DtdLexer', 'DartLexer', 'LassoLexer', 'QmlLexer', 'TypeScriptLexer']
+ 'DtdLexer', 'DartLexer', 'LassoLexer', 'QmlLexer', 'TypeScriptLexer',
+ 'KalLexer', 'CirruLexer', 'MaskLexer']
class JavascriptLexer(RegexLexer):
@@ -67,7 +68,7 @@ class JavascriptLexer(RegexLexer):
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(for|in|while|do|break|return|continue|switch|case|default|if|else|'
- r'throw|try|catch|finally|new|delete|typeof|instanceof|void|'
+ r'throw|try|catch|finally|new|delete|typeof|instanceof|void|yield|'
r'this)\b', Keyword, 'slashstartsregex'),
(r'(var|let|with|function)\b', Keyword.Declaration, 'slashstartsregex'),
(r'(abstract|boolean|byte|char|class|const|debugger|double|enum|export|'
@@ -94,7 +95,7 @@ class JsonLexer(RegexLexer):
"""
For JSON data structures.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'JSON'
@@ -177,7 +178,7 @@ class ActionScriptLexer(RegexLexer):
"""
For ActionScript source code.
- *New in Pygments 0.9.*
+ .. versionadded:: 0.9
"""
name = 'ActionScript'
@@ -261,7 +262,7 @@ class ActionScript3Lexer(RegexLexer):
"""
For ActionScript 3 source code.
- *New in Pygments 0.11.*
+ .. versionadded:: 0.11
"""
name = 'ActionScript 3'
@@ -363,7 +364,7 @@ class CssLexer(RegexLexer):
(r'\#[a-zA-Z0-9_-]+', Name.Function),
(r'@[a-zA-Z0-9_-]+', Keyword, 'atrule'),
(r'[a-zA-Z0-9_-]+', Name.Tag),
- (r'[~\^\*!%&\[\]\(\)<>\|+=@:;,./?-]', Operator),
+ (r'[~\^\*!%&$\[\]\(\)<>\|+=@:;,./?-]', Operator),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single)
],
@@ -398,19 +399,19 @@ class CssLexer(RegexLexer):
r'list-style-type|list-style-image|list-style-position|'
r'list-style|margin-bottom|margin-left|margin-right|'
r'margin-top|margin|marker-offset|marks|max-height|max-width|'
- r'min-height|min-width|opacity|orphans|outline|outline-color|'
- r'outline-style|outline-width|overflow(?:-x|-y)?|padding-bottom|'
+ r'min-height|min-width|opacity|orphans|outline-color|'
+ r'outline-style|outline-width|outline|overflow(?:-x|-y)?|padding-bottom|'
r'padding-left|padding-right|padding-top|padding|page|'
r'page-break-after|page-break-before|page-break-inside|'
- r'pause-after|pause-before|pause|pitch|pitch-range|'
+ r'pause-after|pause-before|pause|pitch-range|pitch|'
r'play-during|position|quotes|richness|right|size|'
r'speak-header|speak-numeral|speak-punctuation|speak|'
r'speech-rate|stress|table-layout|text-align|text-decoration|'
r'text-indent|text-shadow|text-transform|top|unicode-bidi|'
r'vertical-align|visibility|voice-family|volume|white-space|'
- r'widows|width|word-spacing|z-index|bottom|left|'
+ r'widows|width|word-spacing|z-index|bottom|'
r'above|absolute|always|armenian|aural|auto|avoid|baseline|'
- r'behind|below|bidi-override|blink|block|bold|bolder|both|'
+ r'behind|below|bidi-override|blink|block|bolder|bold|both|'
r'capitalize|center-left|center-right|center|circle|'
r'cjk-ideographic|close-quote|collapse|condensed|continuous|'
r'crop|crosshair|cross|cursive|dashed|decimal-leading-zero|'
@@ -420,7 +421,7 @@ class CssLexer(RegexLexer):
r'hidden|hide|higher|high|hiragana-iroha|hiragana|icon|'
r'inherit|inline-table|inline|inset|inside|invert|italic|'
r'justify|katakana-iroha|katakana|landscape|larger|large|'
- r'left-side|leftwards|level|lighter|line-through|list-item|'
+ r'left-side|leftwards|left|level|lighter|line-through|list-item|'
r'loud|lower-alpha|lower-greek|lower-roman|lowercase|ltr|'
r'lower|low|medium|message-box|middle|mix|monospace|'
r'n-resize|narrower|ne-resize|no-close-quote|no-open-quote|'
@@ -429,11 +430,11 @@ class CssLexer(RegexLexer):
r'relative|repeat-x|repeat-y|repeat|rgb|ridge|right-side|'
r'rightwards|s-resize|sans-serif|scroll|se-resize|'
r'semi-condensed|semi-expanded|separate|serif|show|silent|'
- r'slow|slower|small-caps|small-caption|smaller|soft|solid|'
+ r'slower|slow|small-caps|small-caption|smaller|soft|solid|'
r'spell-out|square|static|status-bar|super|sw-resize|'
r'table-caption|table-cell|table-column|table-column-group|'
r'table-footer-group|table-header-group|table-row|'
- r'table-row-group|text|text-bottom|text-top|thick|thin|'
+ r'table-row-group|text-bottom|text-top|text|thick|thin|'
r'transparent|ultra-condensed|ultra-expanded|underline|'
r'upper-alpha|upper-latin|upper-roman|uppercase|url|'
r'visible|w-resize|wait|wider|x-fast|x-high|x-large|x-loud|'
@@ -466,7 +467,9 @@ class CssLexer(RegexLexer):
(r'\!important', Comment.Preproc),
(r'/\*(?:.|\n)*?\*/', Comment),
(r'\#[a-zA-Z0-9]{1,6}', Number),
- (r'[\.-]?[0-9]*[\.]?[0-9]+(em|px|\%|pt|pc|in|mm|cm|ex|s)\b', Number),
+ (r'[\.-]?[0-9]*[\.]?[0-9]+(em|px|pt|pc|in|mm|cm|ex|s)\b', Number),
+ # Separate regex for percentages, as can't do word boundaries with %
+ (r'[\.-]?[0-9]*[\.]?[0-9]+%', Number),
(r'-?[0-9]+', Number),
(r'[~\^\*!%&<>\|+=@:,./?-]+', Operator),
(r'[\[\]();]+', Punctuation),
@@ -481,7 +484,7 @@ class ObjectiveJLexer(RegexLexer):
"""
For Objective-J source code with preprocessor directives.
- *New in Pygments 1.3.*
+ .. versionadded:: 1.3
"""
name = 'Objective-J'
@@ -722,8 +725,10 @@ class HtmlLexer(RegexLexer):
('<![^>]*>', Comment.Preproc),
(r'<\s*script\s*', Name.Tag, ('script-content', 'tag')),
(r'<\s*style\s*', Name.Tag, ('style-content', 'tag')),
- (r'<\s*[a-zA-Z0-9:]+', Name.Tag, 'tag'),
- (r'<\s*/\s*[a-zA-Z0-9:]+\s*>', Name.Tag),
+ # note: this allows tag names not used in HTML like <x:with-dash>,
+ # this is to support yet-unknown template engines and the like
+ (r'<\s*[\w:.-]+', Name.Tag, 'tag'),
+ (r'<\s*/\s*[\w:.-]+\s*>', Name.Tag),
],
'comment': [
('[^-]+', Comment),
@@ -831,7 +836,8 @@ class PhpLexer(RegexLexer):
r'endif|list|__LINE__|endswitch|new|__sleep|endwhile|not|'
r'array|__wakeup|E_ALL|NULL|final|php_user_filter|interface|'
r'implements|public|private|protected|abstract|clone|try|'
- r'catch|throw|this|use|namespace|trait)\b', Keyword),
+ r'catch|throw|this|use|namespace|trait|yield|'
+ r'finally)\b', Keyword),
(r'(true|false|null)\b', Keyword.Constant),
(r'\$\{\$+[a-zA-Z_][a-zA-Z0-9_]*\}', Name.Variable),
(r'\$+[a-zA-Z_][a-zA-Z0-9_]*', Name.Variable),
@@ -841,6 +847,7 @@ class PhpLexer(RegexLexer):
(r'0[0-7]+', Number.Oct),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+', Number.Integer),
+ (r'0b[01]+', Number.Binary),
(r"'([^'\\]*(?:\\.[^'\\]*)*)'", String.Single),
(r'`([^`\\]*(?:\\.[^`\\]*)*)`', String.Backtick),
(r'"', String.Double, 'string'),
@@ -855,7 +862,7 @@ class PhpLexer(RegexLexer):
(r'"', String.Double, '#pop'),
(r'[^{$"\\]+', String.Double),
(r'\\([nrt\"$\\]|[0-7]{1,3}|x[0-9A-Fa-f]{1,2})', String.Escape),
- (r'\$[a-zA-Z_][a-zA-Z0-9_]*(\[\S+\]|->[a-zA-Z_][a-zA-Z0-9_]*)?',
+ (r'\$[a-zA-Z_][a-zA-Z0-9_]*(\[\S+?\]|->[a-zA-Z_][a-zA-Z0-9_]*)?',
String.Interpol),
(r'(\{\$\{)(.*?)(\}\})',
bygroups(String.Interpol, using(this, _startinline=True),
@@ -884,7 +891,7 @@ class PhpLexer(RegexLexer):
self._functions = set()
if self.funcnamehighlighting:
from pygments.lexers._phpbuiltins import MODULES
- for key, value in MODULES.iteritems():
+ for key, value in iteritems(MODULES):
if key not in self.disabledmodules:
self._functions.update(value)
RegexLexer.__init__(self, **options)
@@ -914,7 +921,7 @@ class DtdLexer(RegexLexer):
"""
A lexer for DTDs (Document Type Definitions).
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
flags = re.MULTILINE | re.DOTALL
@@ -1006,7 +1013,8 @@ class XmlLexer(RegexLexer):
name = 'XML'
aliases = ['xml']
- filenames = ['*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd', '*.wsdl']
+ filenames = ['*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd',
+ '*.wsdl', '*.wsf']
mimetypes = ['text/xml', 'application/xml', 'image/svg+xml',
'application/rss+xml', 'application/atom+xml']
@@ -1048,7 +1056,7 @@ class XsltLexer(XmlLexer):
'''
A lexer for XSLT.
- *New in Pygments 0.10.*
+ .. versionadded:: 0.10
'''
name = 'XSLT'
@@ -1086,7 +1094,7 @@ class MxmlLexer(RegexLexer):
For MXML markup.
Nested AS3 in <script> tags is highlighted by the appropriate lexer.
- *New in Pygments 1.1.*
+ .. versionadded:: 1.1
"""
flags = re.MULTILINE | re.DOTALL
name = 'MXML'
@@ -1125,227 +1133,840 @@ class MxmlLexer(RegexLexer):
}
-class HaxeLexer(RegexLexer):
+class HaxeLexer(ExtendedRegexLexer):
"""
- For haXe source code (http://haxe.org/).
+ For Haxe source code (http://haxe.org/).
- *New in Pygments 1.3.*
+ .. versionadded:: 1.3
"""
- name = 'haXe'
- aliases = ['hx', 'haXe']
- filenames = ['*.hx']
- mimetypes = ['text/haxe']
+ name = 'Haxe'
+ aliases = ['hx', 'haxe', 'hxsl']
+ filenames = ['*.hx', '*.hxsl']
+ mimetypes = ['text/haxe', 'text/x-haxe', 'text/x-hx']
+
+ # keywords extracted from lexer.mll in the haxe compiler source
+ keyword = (r'(?:function|class|static|var|if|else|while|do|for|'
+ r'break|return|continue|extends|implements|import|'
+ r'switch|case|default|public|private|try|untyped|'
+ r'catch|new|this|throw|extern|enum|in|interface|'
+ r'cast|override|dynamic|typedef|package|'
+ r'inline|using|null|true|false|abstract)\b')
+
+ # idtype in lexer.mll
+ typeid = r'_*[A-Z][_a-zA-Z0-9]*'
+
+ # combined ident and dollar and idtype
+ ident = r'(?:_*[a-z][_a-zA-Z0-9]*|_+[0-9][_a-zA-Z0-9]*|' + typeid + \
+ '|_+|\$[_a-zA-Z0-9]+)'
+
+ binop = (r'(?:%=|&=|\|=|\^=|\+=|\-=|\*=|/=|<<=|>\s*>\s*=|>\s*>\s*>\s*=|==|'
+ r'!=|<=|>\s*=|&&|\|\||<<|>>>|>\s*>|\.\.\.|<|>|%|&|\||\^|\+|\*|'
+ r'/|\-|=>|=)')
- ident = r'(?:[a-zA-Z_][a-zA-Z0-9_]*)'
- typeid = r'(?:(?:[a-z0-9_\.])*[A-Z_][A-Za-z0-9_]*)'
- key_prop = r'(?:default|null|never)'
- key_decl_mod = r'(?:public|private|override|static|inline|extern|dynamic)'
+ # ident except keywords
+ ident_no_keyword = r'(?!' + keyword + ')' + ident
flags = re.DOTALL | re.MULTILINE
+ preproc_stack = []
+
+ def preproc_callback(self, match, ctx):
+ proc = match.group(2)
+
+ if proc == 'if':
+ # store the current stack
+ self.preproc_stack.append(ctx.stack[:])
+ elif proc in ['else', 'elseif']:
+ # restore the stack back to right before #if
+ if self.preproc_stack: ctx.stack = self.preproc_stack[-1][:]
+ elif proc == 'end':
+ # remove the saved stack of previous #if
+ if self.preproc_stack: self.preproc_stack.pop()
+
+ # #if and #elseif should follow by an expr
+ if proc in ['if', 'elseif']:
+ ctx.stack.append('preproc-expr')
+
+ # #error can be optionally follow by the error msg
+ if proc in ['error']:
+ ctx.stack.append('preproc-error')
+
+ yield match.start(), Comment.Preproc, '#' + proc
+ ctx.pos = match.end()
+
+
tokens = {
'root': [
- include('whitespace'),
- include('comments'),
- (key_decl_mod, Keyword.Declaration),
- include('enumdef'),
- include('typedef'),
- include('classdef'),
- include('imports'),
+ include('spaces'),
+ include('meta'),
+ (r'(?:package)\b', Keyword.Namespace, ('semicolon', 'package')),
+ (r'(?:import)\b', Keyword.Namespace, ('semicolon', 'import')),
+ (r'(?:using)\b', Keyword.Namespace, ('semicolon', 'using')),
+ (r'(?:extern|private)\b', Keyword.Declaration),
+ (r'(?:abstract)\b', Keyword.Declaration, 'abstract'),
+ (r'(?:class|interface)\b', Keyword.Declaration, 'class'),
+ (r'(?:enum)\b', Keyword.Declaration, 'enum'),
+ (r'(?:typedef)\b', Keyword.Declaration, 'typedef'),
+
+ # top-level expression
+ # although it is not supported in haxe, but it is common to write
+ # expression in web pages the positive lookahead here is to prevent
+ # an infinite loop at the EOF
+ (r'(?=.)', Text, 'expr-statement'),
+ ],
+
+ # space/tab/comment/preproc
+ 'spaces': [
+ (r'\s+', Text),
+ (r'//[^\n\r]*', Comment.Single),
+ (r'/\*.*?\*/', Comment.Multiline),
+ (r'(#)(if|elseif|else|end|error)\b', preproc_callback),
],
- # General constructs
- 'comments': [
- (r'//.*?\n', Comment.Single),
- (r'/\*.*?\*/', Comment.Multiline),
- (r'#[^\n]*', Comment.Preproc),
+ 'string-single-interpol': [
+ (r'\$\{', String.Interpol, ('string-interpol-close', 'expr')),
+ (r'\$\$', String.Escape),
+ (r'\$(?=' + ident + ')', String.Interpol, 'ident'),
+ include('string-single'),
],
- 'whitespace': [
- include('comments'),
- (r'\s+', Text),
+
+ 'string-single': [
+ (r"'", String.Single, '#pop'),
+ (r'\\.', String.Escape),
+ (r'.', String.Single),
],
- 'codekeywords': [
- (r'\b(if|else|while|do|for|in|break|continue|'
- r'return|switch|case|try|catch|throw|null|trace|'
- r'new|this|super|untyped|cast|callback|here)\b',
- Keyword.Reserved),
+
+ 'string-double': [
+ (r'"', String.Double, '#pop'),
+ (r'\\.', String.Escape),
+ (r'.', String.Double),
],
- 'literals': [
- (r'0[xX][0-9a-fA-F]+', Number.Hex),
- (r'[0-9]+', Number.Integer),
- (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
- (r"'(\\\\|\\'|[^'])*'", String.Single),
- (r'"(\\\\|\\"|[^"])*"', String.Double),
- (r'~/([^\n])*?/[gisx]*', String.Regex),
- (r'\b(true|false|null)\b', Keyword.Constant),
- ],
- 'codeblock': [
- include('whitespace'),
- include('new'),
- include('case'),
- include('anonfundef'),
- include('literals'),
- include('vardef'),
- include('codekeywords'),
- (r'[();,\[\]]', Punctuation),
- (r'(?:=|\+=|-=|\*=|/=|%=|&=|\|=|\^=|<<=|>>=|>>>=|\|\||&&|'
- r'\.\.\.|==|!=|>|<|>=|<=|\||&|\^|<<|>>>|>>|\+|\-|\*|/|%|'
- r'!|\+\+|\-\-|~|\.|\?|\:)',
- Operator),
- (ident, Name),
-
- (r'}', Punctuation,'#pop'),
- (r'{', Punctuation,'#push'),
- ],
-
- # Instance/Block level constructs
- 'propertydef': [
- (r'(\()(' + key_prop + ')(,)(' + key_prop + ')(\))',
- bygroups(Punctuation, Keyword.Reserved, Punctuation,
- Keyword.Reserved, Punctuation)),
+
+ 'string-interpol-close': [
+ (r'\$'+ident, String.Interpol),
+ (r'\}', String.Interpol, '#pop'),
],
- 'new': [
- (r'\bnew\b', Keyword, 'typedecl'),
+
+ 'package': [
+ include('spaces'),
+ (ident, Name.Namespace),
+ (r'\.', Punctuation, 'import-ident'),
+ (r'', Text, '#pop'),
],
- 'case': [
- (r'\b(case)(\s+)(' + ident + ')(\s*)(\()',
- bygroups(Keyword.Reserved, Text, Name, Text, Punctuation),
- 'funargdecl'),
+
+ 'import': [
+ include('spaces'),
+ (ident, Name.Namespace),
+ (r'\*', Keyword), # wildcard import
+ (r'\.', Punctuation, 'import-ident'),
+ (r'in', Keyword.Namespace, 'ident'),
+ (r'', Text, '#pop'),
],
- 'vardef': [
- (r'\b(var)(\s+)(' + ident + ')',
- bygroups(Keyword.Declaration, Text, Name.Variable), 'vardecl'),
+
+ 'import-ident': [
+ include('spaces'),
+ (r'\*', Keyword, '#pop'), # wildcard import
+ (ident, Name.Namespace, '#pop'),
],
- 'vardecl': [
- include('whitespace'),
- include('typelabel'),
- (r'=', Operator,'#pop'),
- (r';', Punctuation,'#pop'),
+
+ 'using': [
+ include('spaces'),
+ (ident, Name.Namespace),
+ (r'\.', Punctuation, 'import-ident'),
+ (r'', Text, '#pop'),
+ ],
+
+ 'preproc-error': [
+ (r'\s+', Comment.Preproc),
+ (r"'", String.Single, ('#pop', 'string-single')),
+ (r'"', String.Double, ('#pop', 'string-double')),
+ (r'', Text, '#pop'),
+ ],
+
+ 'preproc-expr': [
+ (r'\s+', Comment.Preproc),
+ (r'\!', Comment.Preproc),
+ (r'\(', Comment.Preproc, ('#pop', 'preproc-parenthesis')),
+
+ (ident, Comment.Preproc, '#pop'),
+ (r"'", String.Single, ('#pop', 'string-single')),
+ (r'"', String.Double, ('#pop', 'string-double')),
],
- 'instancevardef': [
- (key_decl_mod,Keyword.Declaration),
- (r'\b(var)(\s+)(' + ident + ')',
- bygroups(Keyword.Declaration, Text, Name.Variable.Instance),
- 'instancevardecl'),
+
+ 'preproc-parenthesis': [
+ (r'\s+', Comment.Preproc),
+ (r'\)', Comment.Preproc, '#pop'),
+ ('', Text, 'preproc-expr-in-parenthesis'),
],
- 'instancevardecl': [
- include('vardecl'),
- include('propertydef'),
+
+ 'preproc-expr-chain': [
+ (r'\s+', Comment.Preproc),
+ (binop, Comment.Preproc, ('#pop', 'preproc-expr-in-parenthesis')),
+ (r'', Text, '#pop'),
],
- 'anonfundef': [
- (r'\bfunction\b', Keyword.Declaration, 'fundecl'),
+ # same as 'preproc-expr' but able to chain 'preproc-expr-chain'
+ 'preproc-expr-in-parenthesis': [
+ (r'\s+', Comment.Preproc),
+ (r'\!', Comment.Preproc),
+ (r'\(', Comment.Preproc,
+ ('#pop', 'preproc-expr-chain', 'preproc-parenthesis')),
+
+ (ident, Comment.Preproc, ('#pop', 'preproc-expr-chain')),
+ (r"'", String.Single,
+ ('#pop', 'preproc-expr-chain', 'string-single')),
+ (r'"', String.Double,
+ ('#pop', 'preproc-expr-chain', 'string-double')),
],
- 'instancefundef': [
- (key_decl_mod, Keyword.Declaration),
- (r'\b(function)(\s+)(' + ident + ')',
- bygroups(Keyword.Declaration, Text, Name.Function), 'fundecl'),
+
+ 'abstract' : [
+ include('spaces'),
+ (r'', Text, ('#pop', 'abstract-body', 'abstract-relation',
+ 'abstract-opaque', 'type-param-constraint', 'type-name')),
],
- 'fundecl': [
- include('whitespace'),
- include('typelabel'),
- include('generictypedecl'),
- (r'\(',Punctuation,'funargdecl'),
- (r'(?=[a-zA-Z0-9_])',Text,'#pop'),
- (r'{',Punctuation,('#pop','codeblock')),
- (r';',Punctuation,'#pop'),
- ],
- 'funargdecl': [
- include('whitespace'),
- (ident, Name.Variable),
- include('typelabel'),
- include('literals'),
- (r'=', Operator),
+
+ 'abstract-body' : [
+ include('spaces'),
+ (r'\{', Punctuation, ('#pop', 'class-body')),
+ ],
+
+ 'abstract-opaque' : [
+ include('spaces'),
+ (r'\(', Punctuation, ('#pop', 'parenthesis-close', 'type')),
+ (r'', Text, '#pop'),
+ ],
+
+ 'abstract-relation': [
+ include('spaces'),
+ (r'(?:to|from)', Keyword.Declaration, 'type'),
(r',', Punctuation),
+ (r'', Text, '#pop'),
+ ],
+
+ 'meta': [
+ include('spaces'),
+ (r'@', Name.Decorator, ('meta-body', 'meta-ident', 'meta-colon')),
+ ],
+
+ # optional colon
+ 'meta-colon': [
+ include('spaces'),
+ (r':', Name.Decorator, '#pop'),
+ (r'', Text, '#pop'),
+ ],
+
+ # same as 'ident' but set token as Name.Decorator instead of Name
+ 'meta-ident': [
+ include('spaces'),
+ (ident, Name.Decorator, '#pop'),
+ ],
+
+ 'meta-body': [
+ include('spaces'),
+ (r'\(', Name.Decorator, ('#pop', 'meta-call')),
+ (r'', Text, '#pop'),
+ ],
+
+ 'meta-call': [
+ include('spaces'),
+ (r'\)', Name.Decorator, '#pop'),
+ (r'', Text, ('#pop', 'meta-call-sep', 'expr')),
+ ],
+
+ 'meta-call-sep': [
+ include('spaces'),
+ (r'\)', Name.Decorator, '#pop'),
+ (r',', Punctuation, ('#pop', 'meta-call')),
+ ],
+
+ 'typedef': [
+ include('spaces'),
+ (r'', Text, ('#pop', 'typedef-body', 'type-param-constraint',
+ 'type-name')),
+ ],
+
+ 'typedef-body': [
+ include('spaces'),
+ (r'=', Operator, ('#pop', 'optional-semicolon', 'type')),
+ ],
+
+ 'enum': [
+ include('spaces'),
+ (r'', Text, ('#pop', 'enum-body', 'bracket-open',
+ 'type-param-constraint', 'type-name')),
+ ],
+
+ 'enum-body': [
+ include('spaces'),
+ include('meta'),
+ (r'\}', Punctuation, '#pop'),
+ (ident_no_keyword, Name, ('enum-member', 'type-param-constraint')),
+ ],
+
+ 'enum-member': [
+ include('spaces'),
+ (r'\(', Punctuation,
+ ('#pop', 'semicolon', 'flag', 'function-param')),
+ (r'', Punctuation, ('#pop', 'semicolon', 'flag')),
+ ],
+
+ 'class': [
+ include('spaces'),
+ (r'', Text, ('#pop', 'class-body', 'bracket-open', 'extends',
+ 'type-param-constraint', 'type-name')),
+ ],
+
+ 'extends': [
+ include('spaces'),
+ (r'(?:extends|implements)\b', Keyword.Declaration, 'type'),
+ (r',', Punctuation), # the comma is made optional here, since haxe2
+ # requires the comma but haxe3 does not allow it
+ (r'', Text, '#pop'),
+ ],
+
+ 'bracket-open': [
+ include('spaces'),
+ (r'\{', Punctuation, '#pop'),
+ ],
+
+ 'bracket-close': [
+ include('spaces'),
+ (r'\}', Punctuation, '#pop'),
+ ],
+
+ 'class-body': [
+ include('spaces'),
+ include('meta'),
+ (r'\}', Punctuation, '#pop'),
+ (r'(?:static|public|private|override|dynamic|inline|macro)\b',
+ Keyword.Declaration),
+ (r'', Text, 'class-member'),
+ ],
+
+ 'class-member': [
+ include('spaces'),
+ (r'(var)\b', Keyword.Declaration,
+ ('#pop', 'optional-semicolon', 'prop')),
+ (r'(function)\b', Keyword.Declaration,
+ ('#pop', 'optional-semicolon', 'class-method')),
+ ],
+
+ # local function, anonymous or not
+ 'function-local': [
+ include('spaces'),
+ (r'(' + ident_no_keyword + ')?', Name.Function,
+ ('#pop', 'expr', 'flag', 'function-param',
+ 'parenthesis-open', 'type-param-constraint')),
+ ],
+
+ 'optional-expr': [
+ include('spaces'),
+ include('expr'),
+ (r'', Text, '#pop'),
+ ],
+
+ 'class-method': [
+ include('spaces'),
+ (ident, Name.Function, ('#pop', 'optional-expr', 'flag',
+ 'function-param', 'parenthesis-open',
+ 'type-param-constraint')),
+ ],
+
+ # function arguments
+ 'function-param': [
+ include('spaces'),
+ (r'\)', Punctuation, '#pop'),
(r'\?', Punctuation),
+ (ident_no_keyword, Name,
+ ('#pop', 'function-param-sep', 'assign', 'flag')),
+ ],
+
+ 'function-param-sep': [
+ include('spaces'),
(r'\)', Punctuation, '#pop'),
+ (r',', Punctuation, ('#pop', 'function-param')),
],
- 'typelabel': [
- (r':', Punctuation, 'type'),
+ # class property
+ # eg. var prop(default, null):String;
+ 'prop': [
+ include('spaces'),
+ (ident_no_keyword, Name, ('#pop', 'assign', 'flag', 'prop-get-set')),
],
- 'typedecl': [
- include('whitespace'),
- (typeid, Name.Class),
- (r'<', Punctuation, 'generictypedecl'),
- (r'(?=[{}()=,a-z])', Text,'#pop'),
+
+ 'prop-get-set': [
+ include('spaces'),
+ (r'\(', Punctuation, ('#pop', 'parenthesis-close',
+ 'prop-get-set-opt', 'comma', 'prop-get-set-opt')),
+ (r'', Text, '#pop'),
+ ],
+
+ 'prop-get-set-opt': [
+ include('spaces'),
+ (r'(?:default|null|never|dynamic|get|set)\b', Keyword, '#pop'),
+ (ident_no_keyword, Text, '#pop'), #custom getter/setter
+ ],
+
+ 'expr-statement': [
+ include('spaces'),
+ # makes semicolon optional here, just to avoid checking the last
+ # one is bracket or not.
+ (r'', Text, ('#pop', 'optional-semicolon', 'expr')),
+ ],
+
+ 'expr': [
+ include('spaces'),
+ (r'@', Name.Decorator, ('#pop', 'optional-expr', 'meta-body',
+ 'meta-ident', 'meta-colon')),
+ (r'(?:\+\+|\-\-|~(?!/)|!|\-)', Operator),
+ (r'\(', Punctuation, ('#pop', 'expr-chain', 'parenthesis')),
+ (r'(?:inline)\b', Keyword.Declaration),
+ (r'(?:function)\b', Keyword.Declaration, ('#pop', 'expr-chain',
+ 'function-local')),
+ (r'\{', Punctuation, ('#pop', 'expr-chain', 'bracket')),
+ (r'(?:true|false|null)\b', Keyword.Constant, ('#pop', 'expr-chain')),
+ (r'(?:this)\b', Keyword, ('#pop', 'expr-chain')),
+ (r'(?:cast)\b', Keyword, ('#pop', 'expr-chain', 'cast')),
+ (r'(?:try)\b', Keyword, ('#pop', 'catch', 'expr')),
+ (r'(?:var)\b', Keyword.Declaration, ('#pop', 'var')),
+ (r'(?:new)\b', Keyword, ('#pop', 'expr-chain', 'new')),
+ (r'(?:switch)\b', Keyword, ('#pop', 'switch')),
+ (r'(?:if)\b', Keyword, ('#pop', 'if')),
+ (r'(?:do)\b', Keyword, ('#pop', 'do')),
+ (r'(?:while)\b', Keyword, ('#pop', 'while')),
+ (r'(?:for)\b', Keyword, ('#pop', 'for')),
+ (r'(?:untyped|throw)\b', Keyword),
+ (r'(?:return)\b', Keyword, ('#pop', 'optional-expr')),
+ (r'(?:macro)\b', Keyword, ('#pop', 'macro')),
+ (r'(?:continue|break)\b', Keyword, '#pop'),
+ (r'(?:\$\s*[a-z]\b|\$(?!'+ident+'))', Name, ('#pop', 'dollar')),
+ (ident_no_keyword, Name, ('#pop', 'expr-chain')),
+
+ # Float
+ (r'\.[0-9]+', Number.Float, ('#pop', 'expr-chain')),
+ (r'[0-9]+[eE][\+\-]?[0-9]+', Number.Float, ('#pop', 'expr-chain')),
+ (r'[0-9]+\.[0-9]*[eE][\+\-]?[0-9]+', Number.Float, ('#pop', 'expr-chain')),
+ (r'[0-9]+\.[0-9]+', Number.Float, ('#pop', 'expr-chain')),
+ (r'[0-9]+\.(?!' + ident + '|\.\.)', Number.Float, ('#pop', 'expr-chain')),
+
+ # Int
+ (r'0x[0-9a-fA-F]+', Number.Hex, ('#pop', 'expr-chain')),
+ (r'[0-9]+', Number.Integer, ('#pop', 'expr-chain')),
+
+ # String
+ (r"'", String.Single, ('#pop', 'expr-chain', 'string-single-interpol')),
+ (r'"', String.Double, ('#pop', 'expr-chain', 'string-double')),
+
+ # EReg
+ (r'~/(\\\\|\\/|[^/\n])*/[gimsu]*', String.Regex, ('#pop', 'expr-chain')),
+
+ # Array
+ (r'\[', Punctuation, ('#pop', 'expr-chain', 'array-decl')),
+ ],
+
+ 'expr-chain': [
+ include('spaces'),
+ (r'(?:\+\+|\-\-)', Operator),
+ (binop, Operator, ('#pop', 'expr')),
+ (r'(?:in)\b', Keyword, ('#pop', 'expr')),
+ (r'\?', Operator, ('#pop', 'expr', 'ternary', 'expr')),
+ (r'(\.)(' + ident_no_keyword + ')', bygroups(Punctuation, Name)),
+ (r'\[', Punctuation, 'array-access'),
+ (r'\(', Punctuation, 'call'),
+ (r'', Text, '#pop'),
],
+
+ # macro reification
+ 'macro': [
+ include('spaces'),
+ (r':', Punctuation, ('#pop', 'type')),
+ (r'', Text, ('#pop', 'expr')),
+ ],
+
+ # cast can be written as "cast expr" or "cast(expr, type)"
+ 'cast': [
+ include('spaces'),
+ (r'\(', Punctuation, ('#pop', 'parenthesis-close',
+ 'cast-type', 'expr')),
+ (r'', Text, ('#pop', 'expr')),
+ ],
+
+ # optionally give a type as the 2nd argument of cast()
+ 'cast-type': [
+ include('spaces'),
+ (r',', Punctuation, ('#pop', 'type')),
+ (r'', Text, '#pop'),
+ ],
+
+ 'catch': [
+ include('spaces'),
+ (r'(?:catch)\b', Keyword, ('expr', 'function-param',
+ 'parenthesis-open')),
+ (r'', Text, '#pop'),
+ ],
+
+ # do-while loop
+ 'do': [
+ include('spaces'),
+ (r'', Punctuation, ('#pop', 'do-while', 'expr')),
+ ],
+
+ # the while after do
+ 'do-while': [
+ include('spaces'),
+ (r'(?:while)\b', Keyword, ('#pop', 'parenthesis',
+ 'parenthesis-open')),
+ ],
+
+ 'while': [
+ include('spaces'),
+ (r'\(', Punctuation, ('#pop', 'expr', 'parenthesis')),
+ ],
+
+ 'for': [
+ include('spaces'),
+ (r'\(', Punctuation, ('#pop', 'expr', 'parenthesis')),
+ ],
+
+ 'if': [
+ include('spaces'),
+ (r'\(', Punctuation, ('#pop', 'else', 'optional-semicolon', 'expr',
+ 'parenthesis')),
+ ],
+
+ 'else': [
+ include('spaces'),
+ (r'(?:else)\b', Keyword, ('#pop', 'expr')),
+ (r'', Text, '#pop'),
+ ],
+
+ 'switch': [
+ include('spaces'),
+ (r'', Text, ('#pop', 'switch-body', 'bracket-open', 'expr')),
+ ],
+
+ 'switch-body': [
+ include('spaces'),
+ (r'(?:case|default)\b', Keyword, ('case-block', 'case')),
+ (r'\}', Punctuation, '#pop'),
+ ],
+
+ 'case': [
+ include('spaces'),
+ (r':', Punctuation, '#pop'),
+ (r'', Text, ('#pop', 'case-sep', 'case-guard', 'expr')),
+ ],
+
+ 'case-sep': [
+ include('spaces'),
+ (r':', Punctuation, '#pop'),
+ (r',', Punctuation, ('#pop', 'case')),
+ ],
+
+ 'case-guard': [
+ include('spaces'),
+ (r'(?:if)\b', Keyword, ('#pop', 'parenthesis', 'parenthesis-open')),
+ (r'', Text, '#pop'),
+ ],
+
+ # optional multiple expr under a case
+ 'case-block': [
+ include('spaces'),
+ (r'(?!(?:case|default)\b|\})', Keyword, 'expr-statement'),
+ (r'', Text, '#pop'),
+ ],
+
+ 'new': [
+ include('spaces'),
+ (r'', Text, ('#pop', 'call', 'parenthesis-open', 'type')),
+ ],
+
+ 'array-decl': [
+ include('spaces'),
+ (r'\]', Punctuation, '#pop'),
+ (r'', Text, ('#pop', 'array-decl-sep', 'expr')),
+ ],
+
+ 'array-decl-sep': [
+ include('spaces'),
+ (r'\]', Punctuation, '#pop'),
+ (r',', Punctuation, ('#pop', 'array-decl')),
+ ],
+
+ 'array-access': [
+ include('spaces'),
+ (r'', Text, ('#pop', 'array-access-close', 'expr')),
+ ],
+
+ 'array-access-close': [
+ include('spaces'),
+ (r'\]', Punctuation, '#pop'),
+ ],
+
+ 'comma': [
+ include('spaces'),
+ (r',', Punctuation, '#pop'),
+ ],
+
+ 'colon': [
+ include('spaces'),
+ (r':', Punctuation, '#pop'),
+ ],
+
+ 'semicolon': [
+ include('spaces'),
+ (r';', Punctuation, '#pop'),
+ ],
+
+ 'optional-semicolon': [
+ include('spaces'),
+ (r';', Punctuation, '#pop'),
+ (r'', Text, '#pop'),
+ ],
+
+ # identity that CAN be a Haxe keyword
+ 'ident': [
+ include('spaces'),
+ (ident, Name, '#pop'),
+ ],
+
+ 'dollar': [
+ include('spaces'),
+ (r'\{', Keyword, ('#pop', 'bracket-close', 'expr')),
+ (r'', Text, ('#pop', 'expr-chain')),
+ ],
+
+ 'type-name': [
+ include('spaces'),
+ (typeid, Name, '#pop'),
+ ],
+
+ 'type-full-name': [
+ include('spaces'),
+ (r'\.', Punctuation, 'ident'),
+ (r'', Text, '#pop'),
+ ],
+
'type': [
- include('whitespace'),
- (typeid, Name.Class),
- (r'<', Punctuation, 'generictypedecl'),
- (r'->', Keyword.Type),
- (r'(?=[{}(),;=])', Text, '#pop'),
+ include('spaces'),
+ (r'\?', Punctuation),
+ (ident, Name, ('#pop', 'type-check', 'type-full-name')),
+ (r'\{', Punctuation, ('#pop', 'type-check', 'type-struct')),
+ (r'\(', Punctuation, ('#pop', 'type-check', 'type-parenthesis')),
],
- 'generictypedecl': [
- include('whitespace'),
- (typeid, Name.Class),
- (r'<', Punctuation, '#push'),
+
+ 'type-parenthesis': [
+ include('spaces'),
+ (r'', Text, ('#pop', 'parenthesis-close', 'type')),
+ ],
+
+ 'type-check': [
+ include('spaces'),
+ (r'->', Punctuation, ('#pop', 'type')),
+ (r'<(?!=)', Punctuation, 'type-param'),
+ (r'', Text, '#pop'),
+ ],
+
+ 'type-struct': [
+ include('spaces'),
+ (r'\}', Punctuation, '#pop'),
+ (r'\?', Punctuation),
+ (r'>', Punctuation, ('comma', 'type')),
+ (ident_no_keyword, Name, ('#pop', 'type-struct-sep', 'type', 'colon')),
+ include('class-body'),
+ ],
+
+ 'type-struct-sep': [
+ include('spaces'),
+ (r'\}', Punctuation, '#pop'),
+ (r',', Punctuation, ('#pop', 'type-struct')),
+ ],
+
+ # type-param can be a normal type or a constant literal...
+ 'type-param-type': [
+ # Float
+ (r'\.[0-9]+', Number.Float, '#pop'),
+ (r'[0-9]+[eE][\+\-]?[0-9]+', Number.Float, '#pop'),
+ (r'[0-9]+\.[0-9]*[eE][\+\-]?[0-9]+', Number.Float, '#pop'),
+ (r'[0-9]+\.[0-9]+', Number.Float, '#pop'),
+ (r'[0-9]+\.(?!' + ident + '|\.\.)', Number.Float, '#pop'),
+
+ # Int
+ (r'0x[0-9a-fA-F]+', Number.Hex, '#pop'),
+ (r'[0-9]+', Number.Integer, '#pop'),
+
+ # String
+ (r"'", String.Single, ('#pop', 'string-single')),
+ (r'"', String.Double, ('#pop', 'string-double')),
+
+ # EReg
+ (r'~/(\\\\|\\/|[^/\n])*/[gim]*', String.Regex, '#pop'),
+
+ # Array
+ (r'\[', Operator, ('#pop', 'array-decl')),
+
+ include('type'),
+ ],
+
+ # type-param part of a type
+ # ie. the <A,B> path in Map<A,B>
+ 'type-param': [
+ include('spaces'),
+ (r'', Text, ('#pop', 'type-param-sep', 'type-param-type')),
+ ],
+
+ 'type-param-sep': [
+ include('spaces'),
(r'>', Punctuation, '#pop'),
- (r',', Punctuation),
+ (r',', Punctuation, ('#pop', 'type-param')),
],
- # Top level constructs
- 'imports': [
- (r'(package|import|using)(\s+)([^;]+)(;)',
- bygroups(Keyword.Namespace, Text, Name.Namespace,Punctuation)),
+ # optional type-param that may include constraint
+ # ie. <T:Constraint, T2:(ConstraintA,ConstraintB)>
+ 'type-param-constraint': [
+ include('spaces'),
+ (r'<(?!=)', Punctuation, ('#pop', 'type-param-constraint-sep',
+ 'type-param-constraint-flag', 'type-name')),
+ (r'', Text, '#pop'),
],
- 'typedef': [
- (r'typedef', Keyword.Declaration, ('typedefprebody', 'typedecl')),
+
+ 'type-param-constraint-sep': [
+ include('spaces'),
+ (r'>', Punctuation, '#pop'),
+ (r',', Punctuation, ('#pop', 'type-param-constraint-sep',
+ 'type-param-constraint-flag', 'type-name')),
],
- 'typedefprebody': [
- include('whitespace'),
- (r'(=)(\s*)({)', bygroups(Punctuation, Text, Punctuation),
- ('#pop', 'typedefbody')),
+
+ # the optional constraint inside type-param
+ 'type-param-constraint-flag': [
+ include('spaces'),
+ (r':', Punctuation, ('#pop', 'type-param-constraint-flag-type')),
+ (r'', Text, '#pop'),
],
- 'enumdef': [
- (r'enum', Keyword.Declaration, ('enumdefprebody', 'typedecl')),
+
+ 'type-param-constraint-flag-type': [
+ include('spaces'),
+ (r'\(', Punctuation, ('#pop', 'type-param-constraint-flag-type-sep',
+ 'type')),
+ (r'', Text, ('#pop', 'type')),
],
- 'enumdefprebody': [
- include('whitespace'),
- (r'{', Punctuation, ('#pop','enumdefbody')),
+
+ 'type-param-constraint-flag-type-sep': [
+ include('spaces'),
+ (r'\)', Punctuation, '#pop'),
+ (r',', Punctuation, 'type'),
],
- 'classdef': [
- (r'class', Keyword.Declaration, ('classdefprebody', 'typedecl')),
+
+ # a parenthesis expr that contain exactly one expr
+ 'parenthesis': [
+ include('spaces'),
+ (r'', Text, ('#pop', 'parenthesis-close', 'expr')),
],
- 'classdefprebody': [
- include('whitespace'),
- (r'(extends|implements)', Keyword.Declaration,'typedecl'),
- (r'{', Punctuation, ('#pop', 'classdefbody')),
+
+ 'parenthesis-open': [
+ include('spaces'),
+ (r'\(', Punctuation, '#pop'),
+ ],
+
+ 'parenthesis-close': [
+ include('spaces'),
+ (r'\)', Punctuation, '#pop'),
],
- 'interfacedef': [
- (r'interface', Keyword.Declaration,
- ('interfacedefprebody', 'typedecl')),
+
+ 'var': [
+ include('spaces'),
+ (ident_no_keyword, Text, ('#pop', 'var-sep', 'assign', 'flag')),
],
- 'interfacedefprebody': [
- include('whitespace'),
- (r'(extends)', Keyword.Declaration, 'typedecl'),
- (r'{', Punctuation, ('#pop', 'classdefbody')),
- ],
-
- 'typedefbody': [
- include('whitespace'),
- include('instancevardef'),
- include('instancefundef'),
- (r'>', Punctuation, 'typedecl'),
- (r',', Punctuation),
- (r'}', Punctuation, '#pop'),
- ],
- 'enumdefbody': [
- include('whitespace'),
- (ident, Name.Variable.Instance),
- (r'\(', Punctuation, 'funargdecl'),
- (r';', Punctuation),
- (r'}', Punctuation, '#pop'),
- ],
- 'classdefbody': [
- include('whitespace'),
- include('instancevardef'),
- include('instancefundef'),
- (r'}', Punctuation, '#pop'),
- include('codeblock'),
+
+ # optional more var decl.
+ 'var-sep': [
+ include('spaces'),
+ (r',', Punctuation, ('#pop', 'var')),
+ (r'', Text, '#pop'),
+ ],
+
+ # optional assignment
+ 'assign': [
+ include('spaces'),
+ (r'=', Operator, ('#pop', 'expr')),
+ (r'', Text, '#pop'),
],
+
+ # optional type flag
+ 'flag': [
+ include('spaces'),
+ (r':', Punctuation, ('#pop', 'type')),
+ (r'', Text, '#pop'),
+ ],
+
+ # colon as part of a ternary operator (?:)
+ 'ternary': [
+ include('spaces'),
+ (r':', Operator, '#pop'),
+ ],
+
+ # function call
+ 'call': [
+ include('spaces'),
+ (r'\)', Punctuation, '#pop'),
+ (r'', Text, ('#pop', 'call-sep', 'expr')),
+ ],
+
+ # after a call param
+ 'call-sep': [
+ include('spaces'),
+ (r'\)', Punctuation, '#pop'),
+ (r',', Punctuation, ('#pop', 'call')),
+ ],
+
+ # bracket can be block or object
+ 'bracket': [
+ include('spaces'),
+ (r'(?!(?:\$\s*[a-z]\b|\$(?!'+ident+')))' + ident_no_keyword, Name,
+ ('#pop', 'bracket-check')),
+ (r"'", String.Single, ('#pop', 'bracket-check', 'string-single')),
+ (r'"', String.Double, ('#pop', 'bracket-check', 'string-double')),
+ (r'', Text, ('#pop', 'block')),
+ ],
+
+ 'bracket-check': [
+ include('spaces'),
+ (r':', Punctuation, ('#pop', 'object-sep', 'expr')), #is object
+ (r'', Text, ('#pop', 'block', 'optional-semicolon', 'expr-chain')), #is block
+ ],
+
+ # code block
+ 'block': [
+ include('spaces'),
+ (r'\}', Punctuation, '#pop'),
+ (r'', Text, 'expr-statement'),
+ ],
+
+ # object in key-value pairs
+ 'object': [
+ include('spaces'),
+ (r'\}', Punctuation, '#pop'),
+ (r'', Text, ('#pop', 'object-sep', 'expr', 'colon', 'ident-or-string'))
+ ],
+
+ # a key of an object
+ 'ident-or-string': [
+ include('spaces'),
+ (ident_no_keyword, Name, '#pop'),
+ (r"'", String.Single, ('#pop', 'string-single')),
+ (r'"', String.Double, ('#pop', 'string-double')),
+ ],
+
+ # after a key-value pair in object
+ 'object-sep': [
+ include('spaces'),
+ (r'\}', Punctuation, '#pop'),
+ (r',', Punctuation, ('#pop', 'object')),
+ ],
+
+
+
}
def analyse_text(text):
@@ -1386,11 +2007,11 @@ class HamlLexer(ExtendedRegexLexer):
"""
For Haml markup.
- *New in Pygments 1.3.*
+ .. versionadded:: 1.3
"""
name = 'Haml'
- aliases = ['haml', 'HAML']
+ aliases = ['haml']
filenames = ['*.haml']
mimetypes = ['text/x-haml']
@@ -1663,11 +2284,11 @@ class SassLexer(ExtendedRegexLexer):
"""
For Sass stylesheets.
- *New in Pygments 1.3.*
+ .. versionadded:: 1.3
"""
name = 'Sass'
- aliases = ['sass', 'SASS']
+ aliases = ['sass']
filenames = ['*.sass']
mimetypes = ['text/x-sass']
@@ -1734,7 +2355,7 @@ class SassLexer(ExtendedRegexLexer):
(r"\*/", Comment, '#pop'),
],
}
- for group, common in common_sass_tokens.iteritems():
+ for group, common in iteritems(common_sass_tokens):
tokens[group] = copy.copy(common)
tokens['value'].append((r'\n', Text, 'root'))
tokens['selector'].append((r'\n', Text, 'root'))
@@ -1781,7 +2402,7 @@ class ScssLexer(RegexLexer):
(r"\*/", Comment, '#pop'),
],
}
- for group, common in common_sass_tokens.iteritems():
+ for group, common in iteritems(common_sass_tokens):
tokens[group] = copy.copy(common)
tokens['value'].extend([(r'\n', Text), (r'[;{}]', Punctuation, 'root')])
tokens['selector'].extend([(r'\n', Text), (r'[;{}]', Punctuation, 'root')])
@@ -1793,11 +2414,11 @@ class CoffeeScriptLexer(RegexLexer):
.. _CoffeeScript: http://coffeescript.org
- *New in Pygments 1.3.*
+ .. versionadded:: 1.3
"""
name = 'CoffeeScript'
- aliases = ['coffee-script', 'coffeescript']
+ aliases = ['coffee-script', 'coffeescript', 'coffee']
filenames = ['*.coffee']
mimetypes = ['text/coffeescript']
@@ -1826,10 +2447,10 @@ class CoffeeScriptLexer(RegexLexer):
#(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'\+\+|~|&&|\band\b|\bor\b|\bis\b|\bisnt\b|\bnot\b|\?|:|'
- r'\|\||\\(?=\n)|(<<|>>>?|==?|!=?|'
- r'=(?!>)|-(?!>)|[<>+*`%&\|\^/])=?',
+ r'\|\||\\(?=\n)|'
+ r'(<<|>>>?|==?(?!>)|!=?|=(?!>)|-(?!>)|[<>+*`%&\|\^/])=?',
Operator, 'slashstartsregex'),
- (r'(?:\([^()]+\))?\s*[=-]>', Name.Function),
+ (r'(?:\([^()]*\))?\s*[=-]>', Name.Function),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(?<![\.\$])(for|own|in|of|while|until|'
@@ -1893,6 +2514,123 @@ class CoffeeScriptLexer(RegexLexer):
}
+class KalLexer(RegexLexer):
+ """
+ For `Kal`_ source code.
+
+ .. _Kal: http://rzimmerman.github.io/kal
+
+
+ .. versionadded:: 2.0
+ """
+
+ name = 'Kal'
+ aliases = ['kal']
+ filenames = ['*.kal']
+ mimetypes = ['text/kal', 'application/kal']
+
+ flags = re.DOTALL
+ tokens = {
+ 'commentsandwhitespace': [
+ (r'\s+', Text),
+ (r'###[^#].*?###', Comment.Multiline),
+ (r'#(?!##[^#]).*?\n', Comment.Single),
+ ],
+ 'functiondef': [
+ (r'[$a-zA-Z_][a-zA-Z0-9_\$]*\s*', Name.Function, '#pop'),
+ include('commentsandwhitespace'),
+ ],
+ 'classdef': [
+ (r'\binherits\s+from\b', Keyword),
+ (r'[$a-zA-Z_][a-zA-Z0-9_\$]*\s*\n', Name.Class, '#pop'),
+ (r'[$a-zA-Z_][a-zA-Z0-9_\$]*\s*', Name.Class),
+ include('commentsandwhitespace'),
+ ],
+ 'listcomprehension': [
+ (r'\]', Punctuation, '#pop'),
+ (r'\b(property|value)\b', Keyword),
+ include('root'),
+ ],
+ 'waitfor': [
+ (r'\n', Punctuation, '#pop'),
+ (r'\bfrom\b', Keyword),
+ include('root'),
+ ],
+ 'root': [
+ include('commentsandwhitespace'),
+ (r'/(?! )(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
+ r'([gim]+\b|\B)', String.Regex),
+ (r'\?|:|_(?=\n)|==?|!=|-(?!>)|[<>+*/-]=?',
+ Operator),
+ (r'\band\b|\bor\b|\bis\b|\bisnt\b|\bnot\b|'
+ r'\bbut\b|\bbitwise\b|\bmod\b|\^|\bxor\b|\bexists\b|\bdoesnt\s+exist\b',
+ Operator.Word),
+ (r'(?:\([^()]+\))?\s*>', Name.Function),
+ (r'[{(]', Punctuation),
+ (r'\[', Punctuation, 'listcomprehension'),
+ (r'[})\]\.\,]', Punctuation),
+ (r'\b(function|method|task)\b', Keyword.Declaration, 'functiondef'),
+ (r'\bclass\b', Keyword.Declaration, 'classdef'),
+ (r'\b(safe\s+)?wait\s+for\b', Keyword, 'waitfor'),
+ (r'\b(me|this)(\.[$a-zA-Z_][a-zA-Z0-9_\.\$]*)?\b', Name.Variable.Instance),
+ (r'(?<![\.\$])(for(\s+(parallel|series))?|in|of|while|until|'
+ r'break|return|continue|'
+ r'when|if|unless|else|otherwise|except\s+when|'
+ r'throw|raise|fail\s+with|try|catch|finally|new|delete|'
+ r'typeof|instanceof|super|run\s+in\s+parallel|'
+ r'inherits\s+from)\b', Keyword),
+ (r'(?<![\.\$])(true|false|yes|no|on|off|null|nothing|none|'
+ r'NaN|Infinity|undefined)\b',
+ Keyword.Constant),
+ (r'(Array|Boolean|Date|Error|Function|Math|netscape|'
+ r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
+ r'decodeURIComponent|encodeURI|encodeURIComponent|'
+ r'eval|isFinite|isNaN|parseFloat|parseInt|document|window|'
+ r'print)\b',
+ Name.Builtin),
+ (r'[$a-zA-Z_][a-zA-Z0-9_\.\$]*\s*(:|[\+\-\*\/]?\=)?\b', Name.Variable),
+ (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
+ (r'0x[0-9a-fA-F]+', Number.Hex),
+ (r'[0-9]+', Number.Integer),
+ ('"""', String, 'tdqs'),
+ ("'''", String, 'tsqs'),
+ ('"', String, 'dqs'),
+ ("'", String, 'sqs'),
+ ],
+ 'strings': [
+ (r'[^#\\\'"]+', String),
+ # note that all kal strings are multi-line.
+ # hashmarks, quotes and backslashes must be parsed one at a time
+ ],
+ 'interpoling_string' : [
+ (r'}', String.Interpol, "#pop"),
+ include('root')
+ ],
+ 'dqs': [
+ (r'"', String, '#pop'),
+ (r'\\.|\'', String), # double-quoted string don't need ' escapes
+ (r'#{', String.Interpol, "interpoling_string"),
+ include('strings')
+ ],
+ 'sqs': [
+ (r"'", String, '#pop'),
+ (r'#|\\.|"', String), # single quoted strings don't need " escapses
+ include('strings')
+ ],
+ 'tdqs': [
+ (r'"""', String, '#pop'),
+ (r'\\.|\'|"', String), # no need to escape quotes in triple-string
+ (r'#{', String.Interpol, "interpoling_string"),
+ include('strings'),
+ ],
+ 'tsqs': [
+ (r"'''", String, '#pop'),
+ (r'#|\\.|\'|"', String), # no need to escape quotes in triple-strings
+ include('strings')
+ ],
+ }
+
+
class LiveScriptLexer(RegexLexer):
"""
For `LiveScript`_ source code.
@@ -1966,7 +2704,7 @@ class LiveScriptLexer(RegexLexer):
('"', String, 'dqs'),
("'", String, 'sqs'),
(r'\\[\w$-]+', String),
- (r'<\[.*\]>', String),
+ (r'<\[.*?\]>', String),
],
'strings': [
(r'[^#\\\'"]+', String),
@@ -2010,11 +2748,11 @@ class DuelLexer(RegexLexer):
See http://duelengine.org/.
See http://jsonml.org/jbst/.
- *New in Pygments 1.4.*
+ .. versionadded:: 1.4
"""
name = 'Duel'
- aliases = ['duel', 'Duel Engine', 'Duel View', 'JBST', 'jbst', 'JsonML+BST']
+ aliases = ['duel', 'jbst', 'jsonml+bst']
filenames = ['*.duel','*.jbst']
mimetypes = ['text/x-duel','text/x-jbst']
@@ -2041,11 +2779,11 @@ class ScamlLexer(ExtendedRegexLexer):
"""
For `Scaml markup <http://scalate.fusesource.org/>`_. Scaml is Haml for Scala.
- *New in Pygments 1.4.*
+ .. versionadded:: 1.4
"""
name = 'Scaml'
- aliases = ['scaml', 'SCAML']
+ aliases = ['scaml']
filenames = ['*.scaml']
mimetypes = ['text/x-scaml']
@@ -2155,11 +2893,11 @@ class JadeLexer(ExtendedRegexLexer):
Jade is a variant of Scaml, see:
http://scalate.fusesource.org/documentation/scaml-reference.html
- *New in Pygments 1.4.*
+ .. versionadded:: 1.4
"""
name = 'Jade'
- aliases = ['jade', 'JADE']
+ aliases = ['jade']
filenames = ['*.jade']
mimetypes = ['text/x-jade']
@@ -2263,7 +3001,7 @@ class XQueryLexer(ExtendedRegexLexer):
An XQuery lexer, parsing a stream and outputting the tokens needed to
highlight xquery code.
- *New in Pygments 1.4.*
+ .. versionadded:: 1.4
"""
name = 'XQuery'
aliases = ['xquery', 'xqy', 'xq', 'xql', 'xqm']
@@ -2668,7 +3406,7 @@ class XQueryLexer(ExtendedRegexLexer):
'xml_comment': [
(r'(-->)', popstate_xmlcomment_callback),
(r'[^-]{1,2}', Literal),
- (ur'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
+ (u'\\t|\\r|\\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
unirange(0x10000, 0x10ffff), Literal),
],
'processing_instruction': [
@@ -2678,12 +3416,12 @@ class XQueryLexer(ExtendedRegexLexer):
],
'processing_instruction_content': [
(r'\?>', String.Doc, '#pop'),
- (ur'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
+ (u'\\t|\\r|\\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
unirange(0x10000, 0x10ffff), Literal),
],
'cdata_section': [
(r']]>', String.Doc, '#pop'),
- (ur'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
+ (u'\\t|\\r|\\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
unirange(0x10000, 0x10ffff), Literal),
],
'start_tag': [
@@ -2752,7 +3490,7 @@ class XQueryLexer(ExtendedRegexLexer):
],
'pragmacontents': [
(r'#\)', Punctuation, 'operator'),
- (ur'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
+ (u'\\t|\\r|\\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
unirange(0x10000, 0x10ffff), Literal),
(r'(\s+)', Text),
],
@@ -2925,7 +3663,7 @@ class DartLexer(RegexLexer):
"""
For `Dart <http://dartlang.org/>`_ source code.
- *New in Pygments 1.5.*
+ .. versionadded:: 1.5
"""
name = 'Dart'
@@ -3023,9 +3761,9 @@ class DartLexer(RegexLexer):
class TypeScriptLexer(RegexLexer):
"""
- For `TypeScript <http://www.python.org>`_ source code.
+ For `TypeScript <http://typescriptlang.org/>`_ source code.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'TypeScript'
@@ -3112,7 +3850,7 @@ class LassoLexer(RegexLexer):
If given and ``True``, only highlight code between delimiters as Lasso
(default: ``False``).
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
name = 'Lasso'
@@ -3130,7 +3868,7 @@ class LassoLexer(RegexLexer):
(r'\[', Comment.Preproc, ('delimiters', 'squarebrackets')),
(r'<\?(LassoScript|lasso|=)', Comment.Preproc,
('delimiters', 'anglebrackets')),
- (r'<', Other, 'delimiters'),
+ (r'<(!--.*?-->)?', Other, 'delimiters'),
(r'\s+', Other),
(r'', Other, ('delimiters', 'lassofile')),
],
@@ -3139,7 +3877,7 @@ class LassoLexer(RegexLexer):
(r'\[noprocess\]', Comment.Preproc, 'noprocess'),
(r'\[', Comment.Preproc, 'squarebrackets'),
(r'<\?(LassoScript|lasso|=)', Comment.Preproc, 'anglebrackets'),
- (r'<', Other),
+ (r'<(!--.*?-->)?', Other),
(r'[^[<]+', Other),
],
'nosquarebrackets': [
@@ -3161,8 +3899,7 @@ class LassoLexer(RegexLexer):
include('lasso'),
],
'lassofile': [
- (r'\]', Comment.Preproc, '#pop'),
- (r'\?>', Comment.Preproc, '#pop'),
+ (r'\]|\?>', Comment.Preproc, '#pop'),
include('lasso'),
],
'whitespacecomments': [
@@ -3191,13 +3928,13 @@ class LassoLexer(RegexLexer):
bygroups(Name.Builtin.Pseudo, Name.Variable.Class)),
(r"(self)(\s*->\s*)('[a-z_][\w.]*')",
bygroups(Name.Builtin.Pseudo, Operator, Name.Variable.Class)),
- (r'(\.\.?)([a-z_][\w.]*)',
+ (r'(\.\.?)([a-z_][\w.]*(=(?!=))?)',
bygroups(Name.Builtin.Pseudo, Name.Other.Member)),
- (r'(->\\?\s*|&\s*)([a-z_][\w.]*)',
+ (r'(->\\?\s*|&\s*)([a-z_][\w.]*(=(?!=))?)',
bygroups(Operator, Name.Other.Member)),
- (r'(self|inherited|global|void)\b', Name.Builtin.Pseudo),
+ (r'(self|inherited)\b', Name.Builtin.Pseudo),
(r'-[a-z_][\w.]*', Name.Attribute),
- (r'(::\s*)([a-z_][\w.]*)', bygroups(Punctuation, Name.Label)),
+ (r'::\s*[a-z_][\w.]*', Name.Label),
(r'(error_(code|msg)_\w+|Error_AddError|Error_ColumnRestriction|'
r'Error_DatabaseConnectionUnavailable|Error_DatabaseTimeout|'
r'Error_DeleteError|Error_FieldRestriction|Error_FileNotFound|'
@@ -3211,22 +3948,22 @@ class LassoLexer(RegexLexer):
# definitions
(r'(define)(\s+)([a-z_][\w.]*)(\s*=>\s*)(type|trait|thread)\b',
bygroups(Keyword.Declaration, Text, Name.Class, Operator, Keyword)),
- (r'(define)(\s+)([a-z_][\w.]*)(\s*->\s*)([a-z_][\w.]*=?|[-+*/%<>]|==)',
+ (r'(define)(\s+)([a-z_][\w.]*)(\s*->\s*)([a-z_][\w.]*=?|[-+*/%])',
bygroups(Keyword.Declaration, Text, Name.Class, Operator,
Name.Function), 'signature'),
(r'(define)(\s+)([a-z_][\w.]*)',
bygroups(Keyword.Declaration, Text, Name.Function), 'signature'),
- (r'(public|protected|private|provide)(\s+)(([a-z_][\w.]*=?|'
- r'[-+*/%<>]|==)(?=\s*\())', bygroups(Keyword, Text, Name.Function),
+ (r'(public|protected|private|provide)(\s+)(([a-z_][\w.]*=?|[-+*/%])'
+ r'(?=\s*\())', bygroups(Keyword, Text, Name.Function),
'signature'),
- (r'(public|protected|private)(\s+)([a-z_][\w.]*)',
+ (r'(public|protected|private|provide)(\s+)([a-z_][\w.]*)',
bygroups(Keyword, Text, Name.Function)),
# keywords
- (r'(true|false|none|minimal|full|all)\b', Keyword.Constant),
- (r'(local|var|variable|data(?=\s))\b', Keyword.Declaration),
+ (r'(true|false|none|minimal|full|all|void)\b', Keyword.Constant),
+ (r'(local|var|variable|global|data(?=\s))\b', Keyword.Declaration),
(r'(array|date|decimal|duration|integer|map|pair|string|tag|xml|'
- r'null|list|queue|set|stack|staticarray)\b', Keyword.Type),
+ r'null|bytes|list|queue|set|stack|staticarray|tie)\b', Keyword.Type),
(r'([a-z_][\w.]*)(\s+)(in)\b', bygroups(Name, Text, Keyword)),
(r'(let|into)(\s+)([a-z_][\w.]*)', bygroups(Keyword, Text, Name)),
(r'require\b', Keyword, 'requiresection'),
@@ -3246,17 +3983,18 @@ class LassoLexer(RegexLexer):
r'Run_Children|SOAP_DefineTag|SOAP_LastRequest|SOAP_LastResponse|'
r'Tag_Name|ascending|average|by|define|descending|do|equals|'
r'frozen|group|handle_failure|import|in|into|join|let|match|max|'
- r'min|on|order|parent|protected|provide|public|require|skip|'
- r'split_thread|sum|take|thread|to|trait|type|where|with|yield)\b',
+ r'min|on|order|parent|protected|provide|public|require|returnhome|'
+ r'skip|split_thread|sum|take|thread|to|trait|type|where|with|'
+ r'yield|yieldhome)\b',
bygroups(Punctuation, Keyword)),
# other
(r',', Punctuation, 'commamember'),
(r'(and|or|not)\b', Operator.Word),
- (r'([a-z_][\w.]*)(\s*::\s*)?([a-z_][\w.]*)?(\s*=(?!=))',
- bygroups(Name, Punctuation, Name.Label, Operator)),
+ (r'([a-z_][\w.]*)(\s*::\s*[a-z_][\w.]*)?(\s*=(?!=))',
+ bygroups(Name, Name.Label, Operator)),
(r'(/?)([\w.]+)', bygroups(Punctuation, Name.Other)),
- (r'(=)(bw|ew|cn|lte?|gte?|n?eq|ft|n?rx)\b',
+ (r'(=)(n?bw|n?ew|n?cn|lte?|gte?|n?eq|n?rx|ft)\b',
bygroups(Operator, Operator.Word)),
(r':=|[-+*/%=<>&|!?\\]+', Operator),
(r'[{}():;,@^]', Punctuation),
@@ -3265,13 +4003,13 @@ class LassoLexer(RegexLexer):
(r"'", String.Single, '#pop'),
(r"[^'\\]+", String.Single),
include('escape'),
- (r"\\+", String.Single),
+ (r"\\", String.Single),
],
'doublestring': [
(r'"', String.Double, '#pop'),
(r'[^"\\]+', String.Double),
include('escape'),
- (r'\\+', String.Double),
+ (r'\\', String.Double),
],
'escape': [
(r'\\(U[\da-f]{8}|u[\da-f]{4}|x[\da-f]{1,2}|[0-7]{1,3}|:[^:]+:|'
@@ -3290,10 +4028,10 @@ class LassoLexer(RegexLexer):
include('lasso'),
],
'requiresection': [
- (r'(([a-z_][\w.]*=?|[-+*/%<>]|==)(?=\s*\())', Name, 'requiresignature'),
- (r'(([a-z_][\w.]*=?|[-+*/%<>]|==)(?=(\s*::\s*[\w.]+)?\s*,))', Name),
- (r'[a-z_][\w.]*=?|[-+*/%<>]|==', Name, '#pop'),
- (r'(::\s*)([a-z_][\w.]*)', bygroups(Punctuation, Name.Label)),
+ (r'(([a-z_][\w.]*=?|[-+*/%])(?=\s*\())', Name, 'requiresignature'),
+ (r'(([a-z_][\w.]*=?|[-+*/%])(?=(\s*::\s*[\w.]+)?\s*,))', Name),
+ (r'[a-z_][\w.]*=?|[-+*/%]', Name, '#pop'),
+ (r'::\s*[a-z_][\w.]*', Name.Label),
(r',', Punctuation),
include('whitespacecomments'),
],
@@ -3301,13 +4039,13 @@ class LassoLexer(RegexLexer):
(r'(\)(?=(\s*::\s*[\w.]+)?\s*,))', Punctuation, '#pop'),
(r'\)', Punctuation, '#pop:2'),
(r'-?[a-z_][\w.]*', Name.Attribute),
- (r'(::\s*)([a-z_][\w.]*)', bygroups(Punctuation, Name.Label)),
+ (r'::\s*[a-z_][\w.]*', Name.Label),
(r'\.\.\.', Name.Builtin.Pseudo),
(r'[(,]', Punctuation),
include('whitespacecomments'),
],
'commamember': [
- (r'(([a-z_][\w.]*=?|[-+*/%<>]|==)'
+ (r'(([a-z_][\w.]*=?|[-+*/%])'
r'(?=\s*(\(([^()]*\([^()]*\))*[^)]*\)\s*)?(::[\w.\s]+)?=>))',
Name.Function, 'signature'),
include('whitespacecomments'),
@@ -3325,9 +4063,9 @@ class LassoLexer(RegexLexer):
self._members = set()
if self.builtinshighlighting:
from pygments.lexers._lassobuiltins import BUILTINS, MEMBERS
- for key, value in BUILTINS.iteritems():
+ for key, value in iteritems(BUILTINS):
self._builtins.update(value)
- for key, value in MEMBERS.iteritems():
+ for key, value in iteritems(MEMBERS):
self._members.update(value)
RegexLexer.__init__(self, **options)
@@ -3338,7 +4076,8 @@ class LassoLexer(RegexLexer):
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text, stack):
if (token is Name.Other and value.lower() in self._builtins or
- token is Name.Other.Member and value.lower() in self._members):
+ token is Name.Other.Member and
+ value.lower().rstrip('=') in self._members):
yield index, Name.Builtin, value
continue
yield index, token, value
@@ -3360,14 +4099,14 @@ class QmlLexer(RegexLexer):
"""
For QML files. See http://doc.qt.digia.com/4.7/qdeclarativeintroduction.html.
- *New in Pygments 1.6.*
+ .. versionadded:: 1.6
"""
# QML is based on javascript, so much of this is taken from the
# JavascriptLexer above.
name = 'QML'
- aliases = ['qml', 'Qt Meta Language', 'Qt modeling Language']
+ aliases = ['qml']
filenames = ['*.qml',]
mimetypes = [ 'application/x-qml',]
@@ -3427,3 +4166,175 @@ class QmlLexer(RegexLexer):
(r"'(\\\\|\\'|[^'])*'", String.Single),
]
}
+
+
+class CirruLexer(RegexLexer):
+ """
+ Syntax rules of Cirru can be found at:
+ http://grammar.cirru.org/
+
+ * using ``()`` to markup blocks, but limited in the same line
+ * using ``""`` to markup strings, allow ``\`` to escape
+ * using ``$`` as a shorthand for ``()`` till indentation end or ``)``
+ * using indentations for create nesting
+
+ .. versionadded:: 2.0
+ """
+
+ name = 'Cirru'
+ aliases = ['cirru']
+ filenames = ['*.cirru', '*.cr']
+ mimetypes = ['text/x-cirru']
+ flags = re.MULTILINE
+
+ tokens = {
+ 'string': [
+ (r'[^"\\\n]', String),
+ (r'\\', String.Escape, 'escape'),
+ (r'"', String, '#pop'),
+ ],
+ 'escape': [
+ (r'.', String.Escape, '#pop'),
+ ],
+ 'function': [
+ (r'[\w-][^\s\(\)\"]*', Name.Function, '#pop'),
+ (r'\)', Operator, '#pop'),
+ (r'(?=\n)', Text, '#pop'),
+ (r'\(', Operator, '#push'),
+ (r'"', String, ('#pop', 'string')),
+ (r'\s+', Text.Whitespace),
+ (r'\,', Operator, '#pop'),
+ ],
+ 'line': [
+ (r'^\B', Text.Whitespace, 'function'),
+ (r'\$', Operator, 'function'),
+ (r'\(', Operator, 'function'),
+ (r'\)', Operator),
+ (r'(?=\n)', Text, '#pop'),
+ (r'\n', Text, '#pop'),
+ (r'"', String, 'string'),
+ (r'\s+', Text.Whitespace),
+ (r'[\d\.]+', Number),
+ (r'[\w-][^\"\(\)\s]*', Name.Variable),
+ (r'--', Comment.Single)
+ ],
+ 'root': [
+ (r'^\s*', Text.Whitespace, ('line', 'function')),
+ (r'^\s+$', Text.Whitespace),
+ ]
+ }
+
+
+class MaskLexer(RegexLexer):
+ """
+ For `Mask <http://github.com/atmajs/MaskJS>`__ markup.
+
+ .. versionadded:: 2.0
+ """
+ name = 'Mask'
+ aliases = ['mask']
+ filenames = ['*.mask']
+ mimetypes = ['text/x-mask']
+
+ flags = re.MULTILINE | re.IGNORECASE | re.DOTALL
+ tokens = {
+ 'root': [
+ (r'\s+', Text),
+ (r'//.*?\n', Comment.Single),
+ (r'/\*.*?\*/', Comment.Multiline),
+ (r'[\{\};>]', Punctuation),
+ (r"'''", String, 'string-trpl-single'),
+ (r'"""', String, 'string-trpl-double'),
+ (r"'", String, 'string-single'),
+ (r'"', String, 'string-double'),
+ (r'([\w-]+)', Name.Tag, 'node'),
+ (r'([^\.#;{>\s]+)', Name.Class, 'node'),
+ (r'(#[\w_-]+)', Name.Function, 'node'),
+ (r'(\.[\w_-]+)', Name.Variable.Class, 'node')
+ ],
+ 'string-base': [
+ (r'\\.', String.Escape),
+ (r'~\[', String.Interpol, 'interpolation'),
+ (r'.', String.Single),
+ ],
+ 'string-single':[
+ (r"'", String.Single, '#pop'),
+ include('string-base')
+ ],
+ 'string-double':[
+ (r'"', String.Single, '#pop'),
+ include('string-base')
+ ],
+ 'string-trpl-single':[
+ (r"'''", String.Single, '#pop'),
+ include('string-base')
+ ],
+ 'string-trpl-double':[
+ (r'"""', String.Single, '#pop'),
+ include('string-base')
+ ],
+ 'interpolation': [
+ (r'\]', String.Interpol, '#pop'),
+ (r'\s*:', String.Interpol, 'expression'),
+ (r'\s*\w+:', Name.Other),
+ (r'[^\]]+', String.Interpol)
+ ],
+ 'expression': [
+ (r'[^\]]+', using(JavascriptLexer), '#pop')
+ ],
+ 'node': [
+ (r'\s+', Text),
+ (r'\.', Name.Variable.Class, 'node-class'),
+ (r'\#', Name.Function, 'node-id'),
+ (r'style[ \t]*=', Name.Attribute, 'node-attr-style-value'),
+ (r'[\w_:-]+[ \t]*=', Name.Attribute, 'node-attr-value'),
+ (r'[\w_:-]+', Name.Attribute),
+ (r'[>{;]', Punctuation, '#pop')
+ ],
+ 'node-class': [
+ (r'[\w-]+', Name.Variable.Class),
+ (r'~\[', String.Interpol, 'interpolation'),
+ (r'', Text, '#pop')
+ ],
+ 'node-id': [
+ (r'[\w-]+', Name.Function),
+ (r'~\[', String.Interpol, 'interpolation'),
+ (r'', Text, '#pop')
+ ],
+ 'node-attr-value':[
+ (r'\s+', Text),
+ (r'[\w_]+', Name.Variable, '#pop'),
+ (r"'", String, 'string-single-pop2'),
+ (r'"', String, 'string-double-pop2'),
+ (r'', Text, '#pop')
+ ],
+ 'node-attr-style-value':[
+ (r'\s+', Text),
+ (r"'", String.Single, 'css-single-end'),
+ (r'"', String.Single, 'css-double-end'),
+ include('node-attr-value')
+ ],
+ 'css-base': [
+ (r'\s+', Text),
+ (r"[;]", Punctuation),
+ (r"[\w\-_]+\s*:", Name.Builtin)
+ ],
+ 'css-single-end': [
+ include('css-base'),
+ (r"'", String.Single, '#pop:2'),
+ (r"[^;']+", Name.Entity)
+ ],
+ 'css-double-end': [
+ include('css-base'),
+ (r'"', String.Single, '#pop:2'),
+ (r"[^;\"]+", Name.Entity)
+ ],
+ 'string-single-pop2':[
+ (r"'", String.Single, '#pop:2'),
+ include('string-base')
+ ],
+ 'string-double-pop2':[
+ (r'"', String.Single, '#pop:2'),
+ include('string-base')
+ ]
+ }
diff --git a/pygments/modeline.py b/pygments/modeline.py
new file mode 100644
index 00000000..e81afec0
--- /dev/null
+++ b/pygments/modeline.py
@@ -0,0 +1,40 @@
+# -*- coding: utf-8 -*-
+"""
+ pygments.modeline
+ ~~~~~~~~~~~~~~~~~
+
+ A simple modeline parser (based on pymodeline).
+
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+__all__ = ['get_filetype_from_buffer']
+
+modeline_re = re.compile(r'''
+ (?: vi | vim | ex ) (?: [<=>]? \d* )? :
+ .* (?: ft | filetype | syn | syntax ) = ( [^:\s]+ )
+''', re.VERBOSE)
+
+def get_filetype_from_line(l):
+ m = modeline_re.search(l)
+ if m:
+ return m.group(1)
+
+def get_filetype_from_buffer(buf, max_lines=5):
+ """
+ Scan the buffer for modelines and return filetype if one is found.
+ """
+ lines = buf.splitlines()
+ for l in lines[-1:-max_lines-1:-1]:
+ ret = get_filetype_from_line(l)
+ if ret:
+ return ret
+ for l in lines[max_lines:0:-1]:
+ ret = get_filetype_from_line(l)
+ if ret:
+ return ret
+
+ return None
diff --git a/pygments/plugin.py b/pygments/plugin.py
index 58662e96..103e7b71 100644
--- a/pygments/plugin.py
+++ b/pygments/plugin.py
@@ -32,7 +32,7 @@
yourfilter = yourfilter:YourFilter
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
try:
diff --git a/pygments/scanner.py b/pygments/scanner.py
index f469e694..269edadd 100644
--- a/pygments/scanner.py
+++ b/pygments/scanner.py
@@ -12,7 +12,7 @@
Have a look at the `DelphiLexer` to get an idea of how to use
this scanner.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
diff --git a/pygments/sphinxext.py b/pygments/sphinxext.py
new file mode 100644
index 00000000..5ab8f060
--- /dev/null
+++ b/pygments/sphinxext.py
@@ -0,0 +1,153 @@
+# -*- coding: utf-8 -*-
+"""
+ pygments.sphinxext
+ ~~~~~~~~~~~~~~~~~~
+
+ Sphinx extension to generate automatic documentation of lexers,
+ formatters and filters.
+
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from __future__ import print_function
+
+import sys
+
+from docutils import nodes
+from docutils.statemachine import ViewList
+from sphinx.util.compat import Directive
+from sphinx.util.nodes import nested_parse_with_titles
+
+
+MODULEDOC = '''
+.. module:: %s
+
+%s
+%s
+'''
+
+LEXERDOC = '''
+.. class:: %s
+
+ :Short names: %s
+ :Filenames: %s
+ :MIME types: %s
+
+ %s
+
+'''
+
+FMTERDOC = '''
+.. class:: %s
+
+ :Short names: %s
+ :Filenames: %s
+
+ %s
+
+'''
+
+FILTERDOC = '''
+.. class:: %s
+
+ :Name: %s
+
+ %s
+
+'''
+
+class PygmentsDoc(Directive):
+ """
+ A directive to collect all lexers/formatters/filters and generate
+ autoclass directives for them.
+ """
+ has_content = False
+ required_arguments = 1
+ optional_arguments = 0
+ final_argument_whitespace = False
+ option_spec = {}
+
+ def run(self):
+ self.filenames = set()
+ if self.arguments[0] == 'lexers':
+ out = self.document_lexers()
+ elif self.arguments[0] == 'formatters':
+ out = self.document_formatters()
+ elif self.arguments[0] == 'filters':
+ out = self.document_filters()
+ else:
+ raise Exception('invalid argument for "pygmentsdoc" directive')
+ node = nodes.compound()
+ vl = ViewList(out.split('\n'), source='')
+ nested_parse_with_titles(self.state, vl, node)
+ for fn in self.filenames:
+ self.state.document.settings.record_dependencies.add(fn)
+ return node.children
+
+ def document_lexers(self):
+ from pygments.lexers._mapping import LEXERS
+ out = []
+ modules = {}
+ moduledocstrings = {}
+ for classname, data in sorted(LEXERS.items(), key=lambda x: x[0]):
+ module = data[0]
+ mod = __import__(module, None, None, [classname])
+ self.filenames.add(mod.__file__)
+ cls = getattr(mod, classname)
+ if not cls.__doc__:
+ print("Warning: %s does not have a docstring." % classname)
+ docstring = cls.__doc__
+ if isinstance(docstring, bytes):
+ docstring = docstring.decode('utf8')
+ modules.setdefault(module, []).append((
+ classname,
+ ', '.join(data[2]) or 'None',
+ ', '.join(data[3]).replace('*', '\\*').replace('_', '\\') or 'None',
+ ', '.join(data[4]) or 'None',
+ docstring))
+ if module not in moduledocstrings:
+ moddoc = mod.__doc__
+ if isinstance(moddoc, bytes):
+ moddoc = moddoc.decode('utf8')
+ moduledocstrings[module] = moddoc
+
+ for module, lexers in sorted(modules.items(), key=lambda x: x[0]):
+ heading = moduledocstrings[module].splitlines()[4].strip().rstrip('.')
+ out.append(MODULEDOC % (module, heading, '-'*len(heading)))
+ for data in lexers:
+ out.append(LEXERDOC % data)
+
+ return ''.join(out)
+
+ def document_formatters(self):
+ from pygments.formatters import FORMATTERS
+
+ out = []
+ for cls, data in sorted(FORMATTERS.items(),
+ key=lambda x: x[0].__name__):
+ self.filenames.add(sys.modules[cls.__module__].__file__)
+ docstring = cls.__doc__
+ if isinstance(docstring, bytes):
+ docstring = docstring.decode('utf8')
+ heading = cls.__name__
+ out.append(FMTERDOC % (heading, ', '.join(data[1]) or 'None',
+ ', '.join(data[2]).replace('*', '\\*') or 'None',
+ docstring))
+ return ''.join(out)
+
+ def document_filters(self):
+ from pygments.filters import FILTERS
+
+ out = []
+ for name, cls in FILTERS.items():
+ self.filenames.add(sys.modules[cls.__module__].__file__)
+ docstring = cls.__doc__
+ if isinstance(docstring, bytes):
+ docstring = docstring.decode('utf8')
+ out.append(FILTERDOC % (cls.__name__, name, docstring))
+ return ''.join(out)
+
+
+def setup(app):
+ app.add_directive('pygmentsdoc', PygmentsDoc)
diff --git a/pygments/style.py b/pygments/style.py
index 0fc01b40..bb54377c 100644
--- a/pygments/style.py
+++ b/pygments/style.py
@@ -5,11 +5,12 @@
Basic style object.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.token import Token, STANDARD_TYPES
+from pygments.util import add_metaclass
class StyleMeta(type):
@@ -104,8 +105,8 @@ class StyleMeta(type):
return len(cls._styles)
+@add_metaclass(StyleMeta)
class Style(object):
- __metaclass__ = StyleMeta
#: overall background color (``None`` means transparent)
background_color = '#ffffff'
diff --git a/pygments/styles/__init__.py b/pygments/styles/__init__.py
index 3d6ef73c..04c2e70a 100644
--- a/pygments/styles/__init__.py
+++ b/pygments/styles/__init__.py
@@ -5,7 +5,7 @@
Contains built-in styles.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
@@ -34,6 +34,8 @@ STYLE_MAP = {
'vs': 'vs::VisualStudioStyle',
'tango': 'tango::TangoStyle',
'rrt': 'rrt::RrtStyle',
+ 'xcode': 'xcode::XcodeStyle',
+ 'igor': 'igor::IgorStyle',
}
diff --git a/pygments/styles/autumn.py b/pygments/styles/autumn.py
index 3960536b..0417a1f7 100644
--- a/pygments/styles/autumn.py
+++ b/pygments/styles/autumn.py
@@ -5,7 +5,7 @@
A colorful style, inspired by the terminal highlighting style.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
diff --git a/pygments/styles/borland.py b/pygments/styles/borland.py
index 9858034e..c087ca77 100644
--- a/pygments/styles/borland.py
+++ b/pygments/styles/borland.py
@@ -5,7 +5,7 @@
Style similar to the style used in the Borland IDEs.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
diff --git a/pygments/styles/bw.py b/pygments/styles/bw.py
index 170442ad..4efb1060 100644
--- a/pygments/styles/bw.py
+++ b/pygments/styles/bw.py
@@ -5,7 +5,7 @@
Simple black/white only style.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
diff --git a/pygments/styles/colorful.py b/pygments/styles/colorful.py
index eb595467..9cd7f658 100644
--- a/pygments/styles/colorful.py
+++ b/pygments/styles/colorful.py
@@ -5,7 +5,7 @@
A colorful style, inspired by CodeRay.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
diff --git a/pygments/styles/default.py b/pygments/styles/default.py
index 77bdac0d..c0998324 100644
--- a/pygments/styles/default.py
+++ b/pygments/styles/default.py
@@ -5,7 +5,7 @@
The default highlighting style.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
diff --git a/pygments/styles/emacs.py b/pygments/styles/emacs.py
index 9f8b4074..5b716730 100644
--- a/pygments/styles/emacs.py
+++ b/pygments/styles/emacs.py
@@ -5,7 +5,7 @@
A highlighting style for Pygments, inspired by Emacs.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
diff --git a/pygments/styles/friendly.py b/pygments/styles/friendly.py
index 732a1252..088e303d 100644
--- a/pygments/styles/friendly.py
+++ b/pygments/styles/friendly.py
@@ -5,7 +5,7 @@
A modern style based on the VIM pyte theme.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
diff --git a/pygments/styles/fruity.py b/pygments/styles/fruity.py
index 45334159..3758a118 100644
--- a/pygments/styles/fruity.py
+++ b/pygments/styles/fruity.py
@@ -5,7 +5,7 @@
pygments version of my "fruity" vim theme.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
diff --git a/pygments/styles/igor.py b/pygments/styles/igor.py
new file mode 100644
index 00000000..05dae1bc
--- /dev/null
+++ b/pygments/styles/igor.py
@@ -0,0 +1,29 @@
+# -*- coding: utf-8 -*-
+"""
+ pygments.styles.igor
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Igor Pro default style.
+
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.style import Style
+from pygments.token import Keyword, Name, Comment, String
+
+
+class IgorStyle(Style):
+ """
+ Pygments version of the official colors for Igor Pro procedures.
+ """
+ default_style = ""
+
+ styles = {
+ Comment: 'italic #FF0000',
+ Keyword: '#0000FF',
+ Name.Function: '#C34E00',
+ Name.Decorator: '#CC00A3',
+ Name.Class: '#007575',
+ String: '#009C00'
+ }
diff --git a/pygments/styles/manni.py b/pygments/styles/manni.py
index 036a2120..20fd544d 100644
--- a/pygments/styles/manni.py
+++ b/pygments/styles/manni.py
@@ -8,7 +8,7 @@
This is a port of the style used in the `php port`_ of pygments
by Manni. The style is called 'default' there.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
diff --git a/pygments/styles/monokai.py b/pygments/styles/monokai.py
index 31dc83b2..f8940db4 100644
--- a/pygments/styles/monokai.py
+++ b/pygments/styles/monokai.py
@@ -7,7 +7,7 @@
http://www.monokai.nl/blog/2006/07/15/textmate-color-theme/
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
@@ -93,14 +93,14 @@ class MonokaiStyle(Style):
String.Symbol: "", # class: 'ss'
Generic: "", # class: 'g'
- Generic.Deleted: "", # class: 'gd',
+ Generic.Deleted: "#f92672", # class: 'gd',
Generic.Emph: "italic", # class: 'ge'
Generic.Error: "", # class: 'gr'
Generic.Heading: "", # class: 'gh'
- Generic.Inserted: "", # class: 'gi'
+ Generic.Inserted: "#a6e22e", # class: 'gi'
Generic.Output: "", # class: 'go'
Generic.Prompt: "", # class: 'gp'
Generic.Strong: "bold", # class: 'gs'
- Generic.Subheading: "", # class: 'gu'
+ Generic.Subheading: "#75715e", # class: 'gu'
Generic.Traceback: "", # class: 'gt'
}
diff --git a/pygments/styles/murphy.py b/pygments/styles/murphy.py
index dbf4eba9..7a4369e1 100644
--- a/pygments/styles/murphy.py
+++ b/pygments/styles/murphy.py
@@ -5,7 +5,7 @@
Murphy's style from CodeRay.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
diff --git a/pygments/styles/native.py b/pygments/styles/native.py
index 0de84386..ccd1376a 100644
--- a/pygments/styles/native.py
+++ b/pygments/styles/native.py
@@ -5,7 +5,7 @@
pygments version of my "native" vim theme.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
diff --git a/pygments/styles/pastie.py b/pygments/styles/pastie.py
index 2a2f386f..f790f54d 100644
--- a/pygments/styles/pastie.py
+++ b/pygments/styles/pastie.py
@@ -7,7 +7,7 @@
.. _pastie: http://pastie.caboo.se/
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
diff --git a/pygments/styles/perldoc.py b/pygments/styles/perldoc.py
index b8b67b29..9103c402 100644
--- a/pygments/styles/perldoc.py
+++ b/pygments/styles/perldoc.py
@@ -7,7 +7,7 @@
.. _perldoc: http://perldoc.perl.org/
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
diff --git a/pygments/styles/rrt.py b/pygments/styles/rrt.py
index 1a2fc6a4..ed056e0d 100644
--- a/pygments/styles/rrt.py
+++ b/pygments/styles/rrt.py
@@ -5,7 +5,7 @@
pygments "rrt" theme, based on Zap and Emacs defaults.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
diff --git a/pygments/styles/tango.py b/pygments/styles/tango.py
index 7b1c4f3c..72b4cbdf 100644
--- a/pygments/styles/tango.py
+++ b/pygments/styles/tango.py
@@ -33,7 +33,7 @@
have been chosen to have the same style. Similarly, keywords (Keyword.*),
and Operator.Word (and, or, in) have been assigned the same style.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
diff --git a/pygments/styles/trac.py b/pygments/styles/trac.py
index 714e36cc..50c63d41 100644
--- a/pygments/styles/trac.py
+++ b/pygments/styles/trac.py
@@ -5,7 +5,7 @@
Port of the default trac highlighter design.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
diff --git a/pygments/styles/vim.py b/pygments/styles/vim.py
index a5462db3..7b6e0d83 100644
--- a/pygments/styles/vim.py
+++ b/pygments/styles/vim.py
@@ -5,7 +5,7 @@
A highlighting style for Pygments, inspired by vim.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
diff --git a/pygments/styles/vs.py b/pygments/styles/vs.py
index 14a56faa..6aa59dbb 100644
--- a/pygments/styles/vs.py
+++ b/pygments/styles/vs.py
@@ -5,7 +5,7 @@
Simple style with MS Visual Studio colors.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
diff --git a/pygments/styles/xcode.py b/pygments/styles/xcode.py
new file mode 100644
index 00000000..e2ecf2aa
--- /dev/null
+++ b/pygments/styles/xcode.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+"""
+ pygments.styles.xcode
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ Style similar to the `Xcode` default theme.
+
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.style import Style
+from pygments.token import Keyword, Name, Comment, String, Error, \
+ Number, Operator
+
+
+class XcodeStyle(Style):
+ """
+ Style similar to the Xcode default colouring theme.
+ """
+
+ default_style = ''
+
+ styles = {
+ Comment: '#177500',
+ Comment.Preproc: '#633820',
+
+ String: '#C41A16',
+ String.Char: '#2300CE',
+
+ Operator: '#000000',
+
+ Keyword: '#AA0D92',
+
+ Name: '#000000',
+ Name.Attribute: '#836C28',
+ Name.Class: '#000000',
+ Name.Function: '#000000',
+ Name.Builtin: '#AA0D92',
+ # In Obj-C code this token is used to colour Cocoa types
+ Name.Builtin.Pseudo: '#5B269A',
+ Name.Variable: '#000000',
+ Name.Tag: '#000000',
+ Name.Decorator: '#000000',
+ # Workaround for a BUG here: lexer treats multiline method signatres as labels
+ Name.Label: '#000000',
+
+ Number: '#2300CE',
+ Error: '#000000',
+ }
diff --git a/pygments/token.py b/pygments/token.py
index 19a83f2e..c40ffd33 100644
--- a/pygments/token.py
+++ b/pygments/token.py
@@ -5,7 +5,7 @@
Basic token types and the standard tokens.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
@@ -49,6 +49,7 @@ Token = _TokenType()
# Special token types
Text = Token.Text
Whitespace = Text.Whitespace
+Escape = Token.Escape
Error = Token.Error
# Text that doesn't belong to this lexer (e.g. HTML in PHP)
Other = Token.Other
@@ -116,6 +117,7 @@ STANDARD_TYPES = {
Text: '',
Whitespace: 'w',
+ Escape: 'esc',
Error: 'err',
Other: 'x',
diff --git a/pygments/unistring.py b/pygments/unistring.py
index 4b07028e..2752037f 100644
--- a/pygments/unistring.py
+++ b/pygments/unistring.py
@@ -8,10 +8,11 @@
Inspired by chartypes_create.py from the MoinMoin project.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from pygments.util import u_prefix
+
+from pygments.util import u_prefix, unichr
Cc = u'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f\x7f\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f'
@@ -133,7 +134,7 @@ except UnicodeDecodeError:
Cs = '' # Jython can't handle isolated surrogates\n\n""" % repr(val).lstrip('u'))
else:
f.write('%s = %r\n\n' % (cat, val))
- f.write('cats = %r\n\n' % sorted(categories.keys()))
+ f.write('cats = %r\n\n' % sorted(categories))
f.write('# Generated from unidata %s\n\n' % (unicodedata.unidata_version,))
f.write(footer)
diff --git a/pygments/util.py b/pygments/util.py
index caac1144..c302900f 100644
--- a/pygments/util.py
+++ b/pygments/util.py
@@ -5,13 +5,12 @@
Utility functions.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import sys
-import codecs
split_path_re = re.compile(r'[/\\ ]')
@@ -52,7 +51,7 @@ def get_bool_opt(options, optname, default=None):
return string
elif isinstance(string, int):
return bool(string)
- elif not isinstance(string, basestring):
+ elif not isinstance(string, string_types):
raise OptionError('Invalid type %r for option %s; use '
'1/0, yes/no, true/false, on/off' % (
string, optname))
@@ -82,7 +81,7 @@ def get_int_opt(options, optname, default=None):
def get_list_opt(options, optname, default=None):
val = options.get(optname, default)
- if isinstance(val, basestring):
+ if isinstance(val, string_types):
return val.split()
elif isinstance(val, (list, tuple)):
return list(val)
@@ -253,25 +252,35 @@ def unirange(a, b):
# Python 2/3 compatibility
-if sys.version_info < (3,0):
- b = bytes = str
+if sys.version_info < (3, 0):
+ unichr = unichr
+ xrange = xrange
+ string_types = (str, unicode)
+ text_type = unicode
u_prefix = 'u'
+ iteritems = dict.iteritems
+ itervalues = dict.itervalues
import StringIO, cStringIO
- BytesIO = cStringIO.StringIO
+ # unfortunately, io.StringIO in Python 2 doesn't accept str at all
StringIO = StringIO.StringIO
- uni_open = codecs.open
+ BytesIO = cStringIO.StringIO
else:
- import builtins
- bytes = builtins.bytes
+ unichr = chr
+ xrange = range
+ string_types = (str,)
+ text_type = str
u_prefix = ''
- def b(s):
- if isinstance(s, str):
- return bytes(map(ord, s))
- elif isinstance(s, bytes):
- return s
- else:
- raise TypeError("Invalid argument %r for b()" % (s,))
- import io
- BytesIO = io.BytesIO
- StringIO = io.StringIO
- uni_open = builtins.open
+ iteritems = dict.items
+ itervalues = dict.values
+ from io import StringIO, BytesIO
+
+def add_metaclass(metaclass):
+ """Class decorator for creating a class with a metaclass."""
+ def wrapper(cls):
+ orig_vars = cls.__dict__.copy()
+ orig_vars.pop('__dict__', None)
+ orig_vars.pop('__weakref__', None)
+ for slots_var in orig_vars.get('__slots__', ()):
+ orig_vars.pop(slots_var)
+ return metaclass(cls.__name__, cls.__bases__, orig_vars)
+ return wrapper
diff --git a/scripts/check_sources.py b/scripts/check_sources.py
index d9e5c2ae..71aff299 100755
--- a/scripts/check_sources.py
+++ b/scripts/check_sources.py
@@ -7,13 +7,17 @@
Make sure each Python file has a correct file header
including copyright and license information.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-import sys, os, re
+from __future__ import print_function
+
+import io
+import os
+import re
+import sys
import getopt
-import cStringIO
from os.path import join, splitext, abspath
@@ -30,7 +34,7 @@ def checker(*suffixes, **kwds):
name_mail_re = r'[\w ]+(<.*?>)?'
-copyright_re = re.compile(r'^ :copyright: Copyright 2006-2013 by '
+copyright_re = re.compile(r'^ :copyright: Copyright 2006-2014 by '
r'the Pygments team, see AUTHORS\.$', re.UNICODE)
copyright_2_re = re.compile(r'^ %s(, %s)*[,.]$' %
(name_mail_re, name_mail_re), re.UNICODE)
@@ -46,7 +50,7 @@ misspellings = ["developement", "adress", "verificate", # ALLOW-MISSPELLING
def check_syntax(fn, lines):
try:
compile(''.join(lines), fn, "exec")
- except SyntaxError, err:
+ except SyntaxError as err:
yield 0, "not compilable: %s" % err
@@ -67,9 +71,12 @@ def check_style_and_encoding(fn, lines):
encoding = co.group(1)
try:
line.decode(encoding)
- except UnicodeDecodeError, err:
+ except AttributeError:
+ # Python 3 - encoding was already checked
+ pass
+ except UnicodeDecodeError as err:
yield lno+1, "not decodable: %s\n Line: %r" % (err, line)
- except LookupError, err:
+ except LookupError as err:
yield 0, "unknown encoding: %s" % encoding
encoding = 'latin1'
@@ -130,7 +137,7 @@ def check_fileheader(fn, lines):
yield 0, "no correct license info"
ci = -3
- copyright = [s.decode('utf-8') for s in llist[ci:ci+1]]
+ copyright = llist[ci:ci+1]
while copyright and copyright_2_re.match(copyright[0]):
ci -= 1
copyright = llist[ci:ci+1]
@@ -165,7 +172,7 @@ def main(argv):
try:
gopts, args = getopt.getopt(argv[1:], "vi:")
except getopt.GetoptError:
- print "Usage: %s [-v] [-i ignorepath]* [path]" % argv[0]
+ print("Usage: %s [-v] [-i ignorepath]* [path]" % argv[0])
return 2
opts = {}
for opt, val in gopts:
@@ -178,20 +185,20 @@ def main(argv):
elif len(args) == 1:
path = args[0]
else:
- print "Usage: %s [-v] [-i ignorepath]* [path]" % argv[0]
+ print("Usage: %s [-v] [-i ignorepath]* [path]" % argv[0])
return 2
verbose = '-v' in opts
num = 0
- out = cStringIO.StringIO()
+ out = io.StringIO()
# TODO: replace os.walk run with iteration over output of
# `svn list -R`.
for root, dirs, files in os.walk(path):
- if '.svn' in dirs:
- dirs.remove('.svn')
+ if '.hg' in dirs:
+ dirs.remove('.hg')
if '-i' in opts and abspath(root) in opts['-i']:
del dirs[:]
continue
@@ -212,13 +219,13 @@ def main(argv):
continue
if verbose:
- print "Checking %s..." % fn
+ print("Checking %s..." % fn)
try:
f = open(fn, 'r')
lines = list(f)
- except (IOError, OSError), err:
- print "%s: cannot open: %s" % (fn, err)
+ except (IOError, OSError) as err:
+ print("%s: cannot open: %s" % (fn, err))
num += 1
continue
@@ -226,15 +233,15 @@ def main(argv):
if not in_pocoo_pkg and checker.only_pkg:
continue
for lno, msg in checker(fn, lines):
- print >>out, "%s:%d: %s" % (fn, lno, msg)
+ print(u"%s:%d: %s" % (fn, lno, msg), file=out)
num += 1
if verbose:
- print
+ print()
if num == 0:
- print "No errors found."
+ print("No errors found.")
else:
- print out.getvalue().rstrip('\n')
- print "%d error%s found." % (num, num > 1 and "s" or "")
+ print(out.getvalue().rstrip('\n'))
+ print("%d error%s found." % (num, num > 1 and "s" or ""))
return int(num > 0)
diff --git a/scripts/detect_missing_analyse_text.py b/scripts/detect_missing_analyse_text.py
index 1312648f..ab58558e 100644
--- a/scripts/detect_missing_analyse_text.py
+++ b/scripts/detect_missing_analyse_text.py
@@ -1,3 +1,4 @@
+from __future__ import print_function
import sys
from pygments.lexers import get_all_lexers, find_lexer_class
@@ -9,22 +10,22 @@ def main():
for name, aliases, filenames, mimetypes in get_all_lexers():
cls = find_lexer_class(name)
if not cls.aliases:
- print cls, "has no aliases"
+ print(cls, "has no aliases")
for f in filenames:
if f not in uses:
uses[f] = []
uses[f].append(cls)
ret = 0
- for k, v in uses.iteritems():
+ for k, v in uses.items():
if len(v) > 1:
#print "Multiple for", k, v
for i in v:
if i.analyse_text is None:
- print i, "has a None analyse_text"
+ print(i, "has a None analyse_text")
ret |= 1
elif Lexer.analyse_text.__doc__ == i.analyse_text.__doc__:
- print i, "needs analyse_text, multiple lexers for", k
+ print(i, "needs analyse_text, multiple lexers for", k)
ret |= 2
return ret
diff --git a/scripts/find_codetags.py b/scripts/find_codetags.py
index 2fb18333..f8204e6e 100755
--- a/scripts/find_codetags.py
+++ b/scripts/find_codetags.py
@@ -7,11 +7,15 @@
Find code tags in specified files and/or directories
and create a report in HTML format.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-import sys, os, re
+from __future__ import print_function
+
+import os
+import re
+import sys
import getopt
from os.path import join, abspath, isdir, isfile
@@ -73,8 +77,8 @@ def main():
try:
gopts, args = getopt.getopt(sys.argv[1:], "vo:i:")
except getopt.GetoptError:
- print ("Usage: %s [-v] [-i ignoredir]* [-o reportfile.html] "
- "path ..." % sys.argv[0])
+ print(("Usage: %s [-v] [-i ignoredir]* [-o reportfile.html] "
+ "path ..." % sys.argv[0]))
return 2
opts = {}
for opt, val in gopts:
@@ -97,18 +101,18 @@ def main():
num = 0
for path in args:
- print "Searching for code tags in %s, please wait." % path
+ print("Searching for code tags in %s, please wait." % path)
if isfile(path):
gnum += 1
if process_file(store, path):
if verbose:
- print path + ": found %d tags" % \
- (path in store and len(store[path]) or 0)
+ print(path + ": found %d tags" % \
+ (path in store and len(store[path]) or 0))
num += 1
else:
if verbose:
- print path + ": binary or not readable"
+ print(path + ": binary or not readable")
continue
elif not isdir(path):
continue
@@ -117,11 +121,15 @@ def main():
if '-i' in opts and abspath(root) in opts['-i']:
del dirs[:]
continue
- if '.svn' in dirs:
- dirs.remove('.svn')
+ if '.hg' in dirs:
+ dirs.remove('.hg')
+ if 'examplefiles' in dirs:
+ dirs.remove('examplefiles')
+ if 'dist' in dirs:
+ dirs.remove('dist')
for fn in files:
gnum += 1
- if gnum % 50 == 0 and not verbose:
+ if gnum % 25 == 0 and not verbose:
sys.stdout.write('.')
sys.stdout.flush()
@@ -137,16 +145,16 @@ def main():
if fn[:2] == './': fn = fn[2:]
if process_file(store, fn):
if verbose:
- print fn + ": found %d tags" % \
- (fn in store and len(store[fn]) or 0)
+ print(fn + ": found %d tags" % \
+ (fn in store and len(store[fn]) or 0))
num += 1
else:
if verbose:
- print fn + ": binary or not readable"
- print
+ print(fn + ": binary or not readable")
+ print()
- print "Processed %d of %d files. Found %d tags in %d files." % (
- num, gnum, sum(len(fitem) for fitem in store.itervalues()), len(store))
+ print("Processed %d of %d files. Found %d tags in %d files." % (
+ num, gnum, sum(len(fitem) for fitem in store.values()), len(store)))
if not store:
return 0
@@ -190,7 +198,7 @@ td { padding: 2px 5px 2px 5px;
'<td class="tag %%(tag)s">%%(tag)s</td>'
'<td class="who">%%(who)s</td><td class="what">%%(what)s</td></tr>')
- f = file(output, 'w')
+ f = open(output, 'w')
table = '\n'.join(TABLE % fname +
'\n'.join(TR % (no % 2,) % entry
for no, entry in enumerate(store[fname]))
@@ -198,7 +206,7 @@ td { padding: 2px 5px 2px 5px;
f.write(HTML % (', '.join(map(abspath, args)), table))
f.close()
- print "Report written to %s." % output
+ print("Report written to %s." % output)
return 0
if __name__ == '__main__':
diff --git a/scripts/find_error.py b/scripts/find_error.py
index 00923569..7aaa9bee 100755
--- a/scripts/find_error.py
+++ b/scripts/find_error.py
@@ -8,11 +8,14 @@
the text where Error tokens are being generated, along
with some context.
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-import sys, os
+from __future__ import print_function
+
+import os
+import sys
# always prefer Pygments from source if exists
srcpath = os.path.join(os.path.dirname(__file__), '..')
@@ -104,36 +107,36 @@ def main(fn, lexer=None, options={}):
# already debugged before
debug_lexer = True
lno = 1
- text = file(fn, 'U').read()
+ text = open(fn, 'U').read()
text = text.strip('\n') + '\n'
tokens = []
states = []
def show_token(tok, state):
reprs = map(repr, tok)
- print ' ' + reprs[1] + ' ' + ' ' * (29-len(reprs[1])) + reprs[0],
+ print(' ' + reprs[1] + ' ' + ' ' * (29-len(reprs[1])) + reprs[0], end=' ')
if debug_lexer:
- print ' ' + ' ' * (29-len(reprs[0])) + repr(state),
- print
+ print(' ' + ' ' * (29-len(reprs[0])) + repr(state), end=' ')
+ print()
for type, val in lx.get_tokens(text):
lno += val.count('\n')
if type == Error:
- print 'Error parsing', fn, 'on line', lno
- print 'Previous tokens' + (debug_lexer and ' and states' or '') + ':'
+ print('Error parsing', fn, 'on line', lno)
+ print('Previous tokens' + (debug_lexer and ' and states' or '') + ':')
if showall:
for tok, state in map(None, tokens, states):
show_token(tok, state)
else:
for i in range(max(len(tokens) - num, 0), len(tokens)):
show_token(tokens[i], states[i])
- print 'Error token:'
+ print('Error token:')
l = len(repr(val))
- print ' ' + repr(val),
+ print(' ' + repr(val), end=' ')
if debug_lexer and hasattr(lx, 'statestack'):
- print ' ' * (60-l) + repr(lx.statestack),
- print
- print
+ print(' ' * (60-l) + repr(lx.statestack), end=' ')
+ print()
+ print()
return 1
tokens.append((type, val))
if debug_lexer:
diff --git a/scripts/get_vimkw.py b/scripts/get_vimkw.py
index 153c88c3..4ea302f4 100644
--- a/scripts/get_vimkw.py
+++ b/scripts/get_vimkw.py
@@ -1,5 +1,5 @@
+from __future__ import print_function
import re
-from pprint import pprint
r_line = re.compile(r"^(syn keyword vimCommand contained|syn keyword vimOption "
r"contained|syn keyword vimAutoEvent contained)\s+(.*)")
@@ -31,12 +31,12 @@ def getkw(input, output):
for a, b in output_info.items():
b.sort()
- print >>out, '%s=[%s]' % (a, ','.join(b))
+ print('%s=[%s]' % (a, ','.join(b)), file=out)
def is_keyword(w, keywords):
for i in range(len(w), 0, -1):
if w[:i] in keywords:
- return signals[w[:i]][:len(w)] == w
+ return keywords[w[:i]][:len(w)] == w
return False
if __name__ == "__main__":
diff --git a/scripts/reindent.py b/scripts/reindent.py
deleted file mode 100755
index e6ee8287..00000000
--- a/scripts/reindent.py
+++ /dev/null
@@ -1,291 +0,0 @@
-#! /usr/bin/env python
-
-# Released to the public domain, by Tim Peters, 03 October 2000.
-# -B option added by Georg Brandl, 2006.
-
-"""reindent [-d][-r][-v] [ path ... ]
-
--d (--dryrun) Dry run. Analyze, but don't make any changes to files.
--r (--recurse) Recurse. Search for all .py files in subdirectories too.
--B (--no-backup) Don't write .bak backup files.
--v (--verbose) Verbose. Print informative msgs; else only names of changed files.
--h (--help) Help. Print this usage information and exit.
-
-Change Python (.py) files to use 4-space indents and no hard tab characters.
-Also trim excess spaces and tabs from ends of lines, and remove empty lines
-at the end of files. Also ensure the last line ends with a newline.
-
-If no paths are given on the command line, reindent operates as a filter,
-reading a single source file from standard input and writing the transformed
-source to standard output. In this case, the -d, -r and -v flags are
-ignored.
-
-You can pass one or more file and/or directory paths. When a directory
-path, all .py files within the directory will be examined, and, if the -r
-option is given, likewise recursively for subdirectories.
-
-If output is not to standard output, reindent overwrites files in place,
-renaming the originals with a .bak extension. If it finds nothing to
-change, the file is left alone. If reindent does change a file, the changed
-file is a fixed-point for future runs (i.e., running reindent on the
-resulting .py file won't change it again).
-
-The hard part of reindenting is figuring out what to do with comment
-lines. So long as the input files get a clean bill of health from
-tabnanny.py, reindent should do a good job.
-"""
-
-__version__ = "1"
-
-import tokenize
-import os
-import sys
-
-verbose = 0
-recurse = 0
-dryrun = 0
-no_backup = 0
-
-def usage(msg=None):
- if msg is not None:
- print >> sys.stderr, msg
- print >> sys.stderr, __doc__
-
-def errprint(*args):
- sep = ""
- for arg in args:
- sys.stderr.write(sep + str(arg))
- sep = " "
- sys.stderr.write("\n")
-
-def main():
- import getopt
- global verbose, recurse, dryrun, no_backup
-
- try:
- opts, args = getopt.getopt(sys.argv[1:], "drvhB",
- ["dryrun", "recurse", "verbose", "help",
- "no-backup"])
- except getopt.error, msg:
- usage(msg)
- return
- for o, a in opts:
- if o in ('-d', '--dryrun'):
- dryrun += 1
- elif o in ('-r', '--recurse'):
- recurse += 1
- elif o in ('-v', '--verbose'):
- verbose += 1
- elif o in ('-B', '--no-backup'):
- no_backup += 1
- elif o in ('-h', '--help'):
- usage()
- return
- if not args:
- r = Reindenter(sys.stdin)
- r.run()
- r.write(sys.stdout)
- return
- for arg in args:
- check(arg)
-
-def check(file):
- if os.path.isdir(file) and not os.path.islink(file):
- if verbose:
- print "listing directory", file
- names = os.listdir(file)
- for name in names:
- fullname = os.path.join(file, name)
- if ((recurse and os.path.isdir(fullname) and
- not os.path.islink(fullname))
- or name.lower().endswith(".py")):
- check(fullname)
- return
-
- if verbose:
- print "checking", file, "...",
- try:
- f = open(file)
- except IOError, msg:
- errprint("%s: I/O Error: %s" % (file, str(msg)))
- return
-
- r = Reindenter(f)
- f.close()
- if r.run():
- if verbose:
- print "changed."
- if dryrun:
- print "But this is a dry run, so leaving it alone."
- else:
- print "reindented", file, (dryrun and "(dry run => not really)" or "")
- if not dryrun:
- if not no_backup:
- bak = file + ".bak"
- if os.path.exists(bak):
- os.remove(bak)
- os.rename(file, bak)
- if verbose:
- print "renamed", file, "to", bak
- f = open(file, "w")
- r.write(f)
- f.close()
- if verbose:
- print "wrote new", file
- else:
- if verbose:
- print "unchanged."
-
-
-class Reindenter:
-
- def __init__(self, f):
- self.find_stmt = 1 # next token begins a fresh stmt?
- self.level = 0 # current indent level
-
- # Raw file lines.
- self.raw = f.readlines()
-
- # File lines, rstripped & tab-expanded. Dummy at start is so
- # that we can use tokenize's 1-based line numbering easily.
- # Note that a line is all-blank iff it's "\n".
- self.lines = [line.rstrip('\n \t').expandtabs() + "\n"
- for line in self.raw]
- self.lines.insert(0, None)
- self.index = 1 # index into self.lines of next line
-
- # List of (lineno, indentlevel) pairs, one for each stmt and
- # comment line. indentlevel is -1 for comment lines, as a
- # signal that tokenize doesn't know what to do about them;
- # indeed, they're our headache!
- self.stats = []
-
- def run(self):
- tokenize.tokenize(self.getline, self.tokeneater)
- # Remove trailing empty lines.
- lines = self.lines
- while lines and lines[-1] == "\n":
- lines.pop()
- # Sentinel.
- stats = self.stats
- stats.append((len(lines), 0))
- # Map count of leading spaces to # we want.
- have2want = {}
- # Program after transformation.
- after = self.after = []
- # Copy over initial empty lines -- there's nothing to do until
- # we see a line with *something* on it.
- i = stats[0][0]
- after.extend(lines[1:i])
- for i in range(len(stats)-1):
- thisstmt, thislevel = stats[i]
- nextstmt = stats[i+1][0]
- have = getlspace(lines[thisstmt])
- want = thislevel * 4
- if want < 0:
- # A comment line.
- if have:
- # An indented comment line. If we saw the same
- # indentation before, reuse what it most recently
- # mapped to.
- want = have2want.get(have, -1)
- if want < 0:
- # Then it probably belongs to the next real stmt.
- for j in xrange(i+1, len(stats)-1):
- jline, jlevel = stats[j]
- if jlevel >= 0:
- if have == getlspace(lines[jline]):
- want = jlevel * 4
- break
- if want < 0: # Maybe it's a hanging
- # comment like this one,
- # in which case we should shift it like its base
- # line got shifted.
- for j in xrange(i-1, -1, -1):
- jline, jlevel = stats[j]
- if jlevel >= 0:
- want = have + getlspace(after[jline-1]) - \
- getlspace(lines[jline])
- break
- if want < 0:
- # Still no luck -- leave it alone.
- want = have
- else:
- want = 0
- assert want >= 0
- have2want[have] = want
- diff = want - have
- if diff == 0 or have == 0:
- after.extend(lines[thisstmt:nextstmt])
- else:
- for line in lines[thisstmt:nextstmt]:
- if diff > 0:
- if line == "\n":
- after.append(line)
- else:
- after.append(" " * diff + line)
- else:
- remove = min(getlspace(line), -diff)
- after.append(line[remove:])
- return self.raw != self.after
-
- def write(self, f):
- f.writelines(self.after)
-
- # Line-getter for tokenize.
- def getline(self):
- if self.index >= len(self.lines):
- line = ""
- else:
- line = self.lines[self.index]
- self.index += 1
- return line
-
- # Line-eater for tokenize.
- def tokeneater(self, type, token, (sline, scol), end, line,
- INDENT=tokenize.INDENT,
- DEDENT=tokenize.DEDENT,
- NEWLINE=tokenize.NEWLINE,
- COMMENT=tokenize.COMMENT,
- NL=tokenize.NL):
-
- if type == NEWLINE:
- # A program statement, or ENDMARKER, will eventually follow,
- # after some (possibly empty) run of tokens of the form
- # (NL | COMMENT)* (INDENT | DEDENT+)?
- self.find_stmt = 1
-
- elif type == INDENT:
- self.find_stmt = 1
- self.level += 1
-
- elif type == DEDENT:
- self.find_stmt = 1
- self.level -= 1
-
- elif type == COMMENT:
- if self.find_stmt:
- self.stats.append((sline, -1))
- # but we're still looking for a new stmt, so leave
- # find_stmt alone
-
- elif type == NL:
- pass
-
- elif self.find_stmt:
- # This is the first "real token" following a NEWLINE, so it
- # must be the first token of the next program statement, or an
- # ENDMARKER.
- self.find_stmt = 0
- if line: # not endmarker
- self.stats.append((sline, self.level))
-
-# Count number of leading blanks.
-def getlspace(line):
- i, n = 0, len(line)
- while i < n and line[i] == " ":
- i += 1
- return i
-
-if __name__ == '__main__':
- main()
diff --git a/scripts/vim2pygments.py b/scripts/vim2pygments.py
index 80f0ada2..42af0bbe 100644..100755
--- a/scripts/vim2pygments.py
+++ b/scripts/vim2pygments.py
@@ -11,10 +11,12 @@
:license: BSD, see LICENSE for details.
"""
+from __future__ import print_function
+
import sys
import re
from os import path
-from cStringIO import StringIO
+from io import StringIO
split_re = re.compile(r'(?<!\\)\s+')
@@ -765,7 +767,7 @@ TOKENS = {
}
TOKEN_TYPES = set()
-for token in TOKENS.itervalues():
+for token in TOKENS.values():
if not isinstance(token, tuple):
token = (token,)
for token in token:
@@ -836,7 +838,7 @@ def find_colors(code):
colors['Normal']['bgcolor'] = bg_color
color_map = {}
- for token, styles in colors.iteritems():
+ for token, styles in colors.items():
if token in TOKENS:
tmp = []
if styles.get('noinherit'):
@@ -879,7 +881,7 @@ class StyleWriter(object):
def write(self, out):
self.write_header(out)
default_token, tokens = find_colors(self.code)
- tokens = tokens.items()
+ tokens = list(tokens.items())
tokens.sort(lambda a, b: cmp(len(a[0]), len(a[1])))
bg_color = [x[3:] for x in default_token.split() if x.startswith('bg:')]
if bg_color:
@@ -916,14 +918,14 @@ def convert(filename, stream=None):
def main():
if len(sys.argv) != 2 or sys.argv[1] in ('-h', '--help'):
- print 'Usage: %s <filename.vim>' % sys.argv[0]
+ print('Usage: %s <filename.vim>' % sys.argv[0])
return 2
if sys.argv[1] in ('-v', '--version'):
- print '%s %s' % (SCRIPT_NAME, SCRIPT_VERSION)
+ print('%s %s' % (SCRIPT_NAME, SCRIPT_VERSION))
return
filename = sys.argv[1]
if not (path.exists(filename) and path.isfile(filename)):
- print 'Error: %s not found' % filename
+ print('Error: %s not found' % filename)
return 1
convert(filename, sys.stdout)
sys.stdout.write('\n')
diff --git a/setup.py b/setup.py
index 17bbf814..a0b2e90b 100755
--- a/setup.py
+++ b/setup.py
@@ -23,7 +23,7 @@
.. _Pygments tip:
http://bitbucket.org/birkenfeld/pygments-main/get/default.zip#egg=Pygments-dev
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
@@ -31,21 +31,22 @@ try:
from setuptools import setup, find_packages
have_setuptools = True
except ImportError:
- from distutils.core import setup
- def find_packages():
- return [
- 'pygments',
- 'pygments.lexers',
- 'pygments.formatters',
- 'pygments.styles',
- 'pygments.filters',
- ]
- have_setuptools = False
-
-try:
- from distutils.command.build_py import build_py_2to3 as build_py
-except ImportError:
- from distutils.command.build_py import build_py
+ try:
+ import ez_setup
+ ez_setup.use_setuptools()
+ from setuptools import setup, find_packages
+ have_setuptools = True
+ except ImportError:
+ from distutils.core import setup
+ def find_packages(*args, **kwargs):
+ return [
+ 'pygments',
+ 'pygments.lexers',
+ 'pygments.formatters',
+ 'pygments.styles',
+ 'pygments.filters',
+ ]
+ have_setuptools = False
if have_setuptools:
add_keywords = dict(
@@ -60,7 +61,7 @@ else:
setup(
name = 'Pygments',
- version = '1.6',
+ version = '2.0pre',
url = 'http://pygments.org/',
license = 'BSD License',
author = 'Georg Brandl',
@@ -68,7 +69,7 @@ setup(
description = 'Pygments is a syntax highlighting package written in Python.',
long_description = __doc__,
keywords = 'syntax highlighting',
- packages = find_packages(),
+ packages = find_packages(exclude=['ez_setup']),
platforms = 'any',
zip_safe = False,
include_package_data = True,
@@ -85,6 +86,5 @@ setup(
'Topic :: Text Processing :: Filters',
'Topic :: Utilities',
],
- cmdclass = {'build_py': build_py},
**add_keywords
)
diff --git a/tests/examplefiles/99_bottles_of_beer.chpl b/tests/examplefiles/99_bottles_of_beer.chpl
new file mode 100644
index 00000000..f73be7b1
--- /dev/null
+++ b/tests/examplefiles/99_bottles_of_beer.chpl
@@ -0,0 +1,118 @@
+/***********************************************************************
+ * Chapel implementation of "99 bottles of beer"
+ *
+ * by Brad Chamberlain and Steve Deitz
+ * 07/13/2006 in Knoxville airport while waiting for flight home from
+ * HPLS workshop
+ * compiles and runs with chpl compiler version 1.7.0
+ * for more information, contact: chapel_info@cray.com
+ *
+ *
+ * Notes:
+ * o as in all good parallel computations, boundary conditions
+ * constitute the vast bulk of complexity in this code (invite Brad to
+ * tell you about his zany boundary condition simplification scheme)
+ * o uses type inference for variables, arguments
+ * o relies on integer->string coercions
+ * o uses named argument passing (for documentation purposes only)
+ ***********************************************************************/
+
+// allow executable command-line specification of number of bottles
+// (e.g., ./a.out -snumBottles=999999)
+config const numBottles = 99;
+const numVerses = numBottles+1;
+
+// a domain to describe the space of lyrics
+var LyricsSpace: domain(1) = {1..numVerses};
+
+// array of lyrics
+var Lyrics: [LyricsSpace] string;
+
+// parallel computation of lyrics array
+[verse in LyricsSpace] Lyrics(verse) = computeLyric(verse);
+
+// as in any good parallel language, I/O to stdout is serialized.
+// (Note that I/O to a file could be parallelized using a parallel
+// prefix computation on the verse strings' lengths with file seeking)
+writeln(Lyrics);
+
+
+// HELPER FUNCTIONS:
+
+proc computeLyric(verseNum) {
+ var bottleNum = numBottles - (verseNum - 1);
+ var nextBottle = (bottleNum + numVerses - 1)%numVerses;
+ return "\n" // disguise space used to separate elements in array I/O
+ + describeBottles(bottleNum, startOfVerse=true) + " on the wall, "
+ + describeBottles(bottleNum) + ".\n"
+ + computeAction(bottleNum)
+ + describeBottles(nextBottle) + " on the wall.\n";
+}
+
+
+proc describeBottles(bottleNum, startOfVerse:bool = false) {
+ // NOTE: bool should not be necessary here (^^^^); working around bug
+ var bottleDescription = if (bottleNum) then bottleNum:string
+ else (if startOfVerse then "N"
+ else "n")
+ + "o more";
+ return bottleDescription
+ + " bottle" + (if (bottleNum == 1) then "" else "s")
+ + " of beer";
+}
+
+
+proc computeAction(bottleNum) {
+ return if (bottleNum == 0) then "Go to the store and buy some more, "
+ else "Take one down and pass it around, ";
+}
+
+
+// Modules...
+module M1 {
+ var x = 10;
+}
+
+module M2 {
+ use M1;
+ proc main() {
+ writeln("M2 -> M1 -> x " + x);
+ }
+}
+
+
+// Classes, records, unions...
+const PI: real = 3.14159;
+
+record Point {
+ var x, y: real;
+}
+var p: Point;
+writeln("Distance from origin: " + sqrt(p.x ** 2 + p.y ** 2));
+p = new Point(1.0, 2.0);
+writeln("Distance from origin: " + sqrt(p.x ** 2 + p.y ** 2));
+
+class Circle {
+ var p: Point;
+ var r: real;
+}
+var c = new Circle(r=2.0);
+proc Circle.area()
+ return PI * r ** 2;
+writeln("Area of circle: " + c.area());
+
+class Oval: Circle {
+ var r2: real;
+}
+proc Oval.area()
+ return PI * r * r2;
+
+delete c;
+c = nil;
+c = new Oval(r=1.0, r2=2.0);
+writeln("Area of oval: " + c.area());
+
+union U {
+ var i: int;
+ var r: real;
+}
diff --git a/tests/examplefiles/Deflate.fs b/tests/examplefiles/Deflate.fs
new file mode 100755
index 00000000..7d3680ec
--- /dev/null
+++ b/tests/examplefiles/Deflate.fs
@@ -0,0 +1,578 @@
+// public domain
+
+module Deflate
+
+open System
+open System.Collections.Generic
+open System.IO
+open System.Linq
+open Crc
+
+let maxbuf = 32768
+let maxlen = 258
+
+let getBit (b:byte) (bit:int) =
+ if b &&& (1uy <<< bit) = 0uy then 0 else 1
+
+type BitReader(sin:Stream) =
+ let mutable bit = 8
+ let mutable cur = 0uy
+
+ member x.Skip() =
+ bit <- 8
+
+ member x.ReadBit() =
+ if bit = 8 then
+ bit <- 0
+ let b = sin.ReadByte()
+ if b = -1 then
+ failwith "バッファを超過しました"
+ cur <- byte b
+ let ret = if cur &&& (1uy <<< bit) = 0uy then 0 else 1
+ bit <- bit + 1
+ ret
+
+ member x.ReadLE n =
+ let mutable ret = 0
+ for i = 0 to n - 1 do
+ if x.ReadBit() = 1 then ret <- ret ||| (1 <<< i)
+ ret
+
+ member x.ReadBE n =
+ let mutable ret = 0
+ for i = 0 to n - 1 do
+ ret <- (ret <<< 1) ||| x.ReadBit()
+ ret
+
+ member x.ReadBytes len =
+ if bit <> 8 then bit <- 8
+ let buf = Array.zeroCreate<byte> len
+ ignore <| sin.Read(buf, 0, len)
+ buf
+
+type WriteBuffer(sout:Stream) =
+ let mutable prev:byte[] = null
+ let mutable buf = Array.zeroCreate<byte> maxbuf
+ let mutable p = 0
+
+ let next newbuf =
+ prev <- buf
+ buf <- if newbuf then Array.zeroCreate<byte> maxbuf else null
+ p <- 0
+
+ member x.Close() =
+ next false
+ next false
+
+ interface IDisposable with
+ member x.Dispose() = x.Close()
+
+ member x.WriteByte (b:byte) =
+ buf.[p] <- b
+ sout.WriteByte b
+ p <- p + 1
+ if p = maxbuf then next true
+
+ member x.Write (src:byte[]) start len =
+ let maxlen = maxbuf - p
+ if len <= maxlen then
+ Array.Copy(src, start, buf, p, len)
+ sout.Write(src, start, len)
+ p <- p + len
+ if p = maxbuf then next true
+ else
+ x.Write src start maxlen
+ x.Write src (start + maxlen) (len - maxlen)
+
+ member x.Copy len dist =
+ if dist < 1 then
+ failwith <| sprintf "dist too small: %d < 1" dist
+ elif dist > maxbuf then
+ failwith <| sprintf "dist too big: %d > %d" dist maxbuf
+ let pp = p - dist
+ if pp < 0 then
+ if prev = null then
+ failwith <| sprintf "dist too big: %d > %d" dist p
+ let pp = pp + maxbuf
+ let maxlen = maxbuf - pp
+ if len <= maxlen then
+ x.Write prev pp len
+ else
+ x.Write prev pp maxlen
+ x.Copy (len - maxlen) dist
+ else
+ let maxlen = p - pp
+ if len <= maxlen then
+ x.Write buf pp len
+ else
+ if dist = 1 then
+ let b = buf.[pp]
+ for i = 1 to len do
+ x.WriteByte b
+ else
+ let buf' = buf
+ let mutable len' = len
+ while len' > 0 do
+ let len'' = Math.Min(len', maxlen)
+ x.Write buf' pp len''
+ len' <- len' - len''
+
+type Huffman(lens:int[]) =
+ let vals = Array.zeroCreate<int> lens.Length
+ let min = lens.Where(fun x -> x > 0).Min()
+ let max = lens.Max()
+ let counts = Array.zeroCreate<int> (max + 1)
+ let firsts = Array.zeroCreate<int> (max + 1)
+ let nexts = Array.zeroCreate<int> (max + 1)
+ let tables = Array.zeroCreate<int[]>(max + 1)
+
+ do
+ for len in lens do
+ if len > 0 then counts.[len] <- counts.[len] + 1
+ for i = 1 to max do
+ firsts.[i] <- (firsts.[i - 1] + counts.[i - 1]) <<< 1
+ Array.Copy(firsts, 0, nexts, 0, max + 1)
+ for i = 0 to vals.Length - 1 do
+ let len = lens.[i]
+ if len > 0 then
+ vals.[i] <- nexts.[len]
+ nexts.[len] <- nexts.[len] + 1
+
+ for i = 0 to vals.Length - 1 do
+ let len = lens.[i]
+ if len > 0 then
+ let start = firsts.[len]
+ if tables.[len] = null then
+ let count = nexts.[len] - start
+ tables.[len] <- Array.zeroCreate<int> count
+ tables.[len].[vals.[i] - start] <- i
+
+ member x.GetValue h =
+ let rec getv i =
+ if i > max then -1 else
+ if h < nexts.[i] then
+ tables.[i].[h - firsts.[i]]
+ else
+ getv (i + 1)
+ getv min
+
+ member x.Read(br:BitReader) =
+ let rec read h i =
+ if h < nexts.[i] then
+ tables.[i].[h - firsts.[i]]
+ else
+ read ((h <<< 1) ||| br.ReadBit()) (i + 1)
+ read (br.ReadBE min) min
+
+type [<AbstractClass>] HuffmanDecoder() =
+ abstract GetValue: unit->int
+ abstract GetDistance: unit->int
+
+type FixedHuffman(br:BitReader) =
+ inherit HuffmanDecoder()
+
+ override x.GetValue() =
+ let v = br.ReadBE 7
+ if v < 24 then v + 256 else
+ let v = (v <<< 1) ||| br.ReadBit()
+ if v < 192 then v - 48
+ elif v < 200 then v + 88
+ else ((v <<< 1) ||| br.ReadBit()) - 256
+
+ override x.GetDistance() = br.ReadBE 5
+
+type DynamicHuffman(br:BitReader) =
+ inherit HuffmanDecoder()
+
+ let lit, dist =
+ let hlit =
+ let hlit = (br.ReadLE 5) + 257
+ if hlit > 286 then failwith <| sprintf "hlit: %d > 286" hlit
+ hlit
+
+ let hdist =
+ let hdist = (br.ReadLE 5) + 1
+ if hdist > 32 then failwith <| sprintf "hdist: %d > 32" hdist
+ hdist
+
+ let hclen =
+ let hclen = (br.ReadLE 4) + 4
+ if hclen > 19 then failwith <| sprintf "hclen: %d > 19" hclen
+ hclen
+
+ let clen =
+ let hclens = Array.zeroCreate<int> 19
+ let order = [| 16; 17; 18; 0; 8; 7; 9; 6; 10; 5;
+ 11; 4; 12; 3; 13; 2; 14; 1; 15 |]
+ for i = 0 to hclen - 1 do
+ hclens.[order.[i]] <- br.ReadLE 3
+ new Huffman(hclens)
+
+ let ld = Array.zeroCreate<int>(hlit + hdist)
+ let mutable i = 0
+ while i < ld.Length do
+ let v = clen.Read(br)
+ if v < 16 then
+ ld.[i] <- v
+ i <- i + 1
+ else
+ let r, v =
+ match v with
+ | 16 -> (br.ReadLE 2) + 3, ld.[i - 1]
+ | 17 -> (br.ReadLE 3) + 3, 0
+ | 18 -> (br.ReadLE 7) + 11, 0
+ | _ -> failwith "不正な値です。"
+ for j = 0 to r - 1 do
+ ld.[i + j] <- v
+ i <- i + r
+
+ new Huffman(ld.[0 .. hlit - 1]),
+ new Huffman(ld.[hlit .. hlit + hdist - 1])
+
+ override x.GetValue() = lit.Read br
+ override x.GetDistance() = dist.Read br
+
+let getLitExLen v = if v < 265 || v = 285 then 0 else (v - 261) >>> 2
+let getDistExLen d = if d < 4 then 0 else (d - 2) >>> 1
+
+let litlens =
+ let litlens = Array.zeroCreate<int> 286
+ let mutable v = 3
+ for i = 257 to 284 do
+ litlens.[i] <- v
+ v <- v + (1 <<< (getLitExLen i))
+ litlens.[285] <- maxlen
+ litlens.[257..285]
+
+let distlens =
+ let distlens = Array.zeroCreate<int> 30
+ let mutable v = 1
+ for i = 0 to 29 do
+ distlens.[i] <- v
+ v <- v + (1 <<< (getDistExLen i))
+ distlens
+
+type Reader(sin:Stream) =
+ inherit Stream()
+
+ let br = new BitReader(sin)
+ let fh = new FixedHuffman(br)
+
+ let sout = new MemoryStream()
+ let dbuf = new WriteBuffer(sout)
+
+ let mutable cache:byte[] = null
+ let mutable canRead = true
+
+ let rec read (h:HuffmanDecoder) =
+ let v = h.GetValue()
+ if v > 285 then failwith <| sprintf "不正な値: %d" v
+ if v < 256 then
+ dbuf.WriteByte(byte v)
+ elif v > 256 then
+ let len =
+ if v < 265 then v - 254 else
+ litlens.[v - 257] + (br.ReadLE (getLitExLen v))
+ let dist =
+ let d = h.GetDistance()
+ if d > 29 then failwith <| sprintf "不正な距離: %d" d
+ if d < 4 then d + 1 else
+ distlens.[d] + (br.ReadLE (getDistExLen d))
+ dbuf.Copy len dist
+ if v <> 256 then read h
+
+ override x.CanRead = canRead
+ override x.CanWrite = false
+ override x.CanSeek = false
+ override x.Flush() = ()
+
+ override x.Close() =
+ dbuf.Close()
+ canRead <- false
+
+ override x.Read(buffer, offset, count) =
+ let offset =
+ if cache = null then 0 else
+ let clen = cache.Length
+ let len = Math.Min(clen, count)
+ Array.Copy(cache, 0, buffer, offset, len)
+ cache <- if len = clen then null
+ else cache.[len .. clen - 1]
+ len
+ let req = int64 <| count - offset
+ while canRead && sout.Length < req do
+ x.readBlock()
+ let len =
+ if sout.Length = 0L then 0 else
+ let data = sout.ToArray()
+ sout.SetLength(0L)
+ let dlen = data.Length
+ let len = Math.Min(int req, dlen)
+ Array.Copy(data, 0, buffer, offset, len)
+ if dlen > len then
+ cache <- data.[len..]
+ len
+ offset + len
+
+ override x.Position
+ with get() = raise <| new NotImplementedException()
+ and set(v) = raise <| new NotImplementedException()
+
+ override x.Length = raise <| new NotImplementedException()
+ override x.Seek(_, _) = raise <| new NotImplementedException()
+ override x.Write(_, _, _) = raise <| new NotImplementedException()
+ override x.SetLength(_) = raise <| new NotImplementedException()
+
+ member private x.readBlock() =
+ let bfinal = br.ReadBit()
+ match br.ReadLE 2 with
+ | 0 -> br.Skip()
+ let len = br.ReadLE 16
+ let nlen = br.ReadLE 16
+ if len + nlen <> 0x10000 then
+ failwith "不正な非圧縮長"
+ dbuf.Write (br.ReadBytes len) 0 len
+ | 1 -> read fh
+ | 2 -> read (new DynamicHuffman(br))
+ | _ -> failwith "不正なブロックタイプ"
+ if bfinal = 1 then
+ canRead <- false
+ x.Close()
+
+type BitWriter(sout:Stream) =
+ let mutable bit = 0
+ let mutable cur = 0uy
+
+ member x.Skip() =
+ if bit > 0 then
+ sout.WriteByte(cur)
+ bit <- 0
+ cur <- 0uy
+
+ interface IDisposable with
+ member x.Dispose() =
+ x.Skip()
+ sout.Flush()
+
+ member x.WriteBit(b:int) =
+ cur <- cur ||| ((byte b) <<< bit)
+ bit <- bit + 1
+ if bit = 8 then
+ sout.WriteByte(cur)
+ bit <- 0
+ cur <- 0uy
+
+ member x.WriteLE (len:int) (b:int) =
+ for i = 0 to len - 1 do
+ x.WriteBit <| if (b &&& (1 <<< i)) = 0 then 0 else 1
+
+ member x.WriteBE (len:int) (b:int) =
+ for i = len - 1 downto 0 do
+ x.WriteBit <| if (b &&& (1 <<< i)) = 0 then 0 else 1
+
+ member x.WriteBytes(data:byte[]) =
+ x.Skip()
+ sout.Write(data, 0, data.Length)
+
+type FixedHuffmanWriter(bw:BitWriter) =
+ member x.Write (b:int) =
+ if b < 144 then
+ bw.WriteBE 8 (b + 0b110000)
+ elif b < 256 then
+ bw.WriteBE 9 (b - 144 + 0b110010000)
+ elif b < 280 then
+ bw.WriteBE 7 (b - 256)
+ elif b < 288 then
+ bw.WriteBE 8 (b - 280 + 0b11000000)
+
+ member x.WriteLen (len:int) =
+ if len < 3 || len > maxlen then
+ failwith <| sprintf "不正な長さ: %d" len
+ let mutable ll = 285
+ while len < litlens.[ll - 257] do
+ ll <- ll - 1
+ x.Write ll
+ bw.WriteLE (getLitExLen ll) (len - litlens.[ll - 257])
+
+ member x.WriteDist (d:int) =
+ if d < 1 || d > maxbuf then
+ failwith <| sprintf "不正な距離: %d" d
+ let mutable dl = 29
+ while d < distlens.[dl] do
+ dl <- dl - 1
+ bw.WriteBE 5 dl
+ bw.WriteLE (getDistExLen dl) (d - distlens.[dl])
+
+let maxbuf2 = maxbuf * 2
+let buflen = maxbuf2 + maxlen
+
+let inline getHash (buf:byte[]) pos =
+ ((int buf.[pos]) <<< 4) ^^^ ((int buf.[pos + 1]) <<< 2) ^^^ (int buf.[pos + 2])
+
+let inline addHash (hash:List<int>[]) (buf:byte[]) pos =
+ if buf.[pos] <> buf.[pos + 1] then
+ hash.[getHash buf pos].Add pos
+
+let inline addHash2 (tables:int[,]) (counts:int[]) (buf:byte[]) pos =
+ if buf.[pos] <> buf.[pos + 1] then
+ let h = getHash buf pos
+ let c = counts.[h]
+ tables.[h, c &&& 15] <- pos
+ counts.[h] <- c + 1
+
+type Writer(t:int, sin:Stream) =
+ let mutable length = buflen
+ let buf = Array.zeroCreate<byte> buflen
+ let tables, counts =
+ if t = 2 then Array2D.zeroCreate<int> 4096 16, Array.create 4096 0 else null, null
+ let hash = if tables = null then [| for _ in 0..4095 -> new List<int>() |] else null
+ let mutable crc = ~~~0u
+
+ let read pos len =
+ let rlen = sin.Read(buf, pos, len)
+ if rlen < len then length <- pos + rlen
+ for i = pos to pos + rlen - 1 do
+ let b = int(crc ^^^ (uint32 buf.[i])) &&& 0xff
+ crc <- (crc >>> 8) ^^^ crc32_table.[b]
+ if hash <> null then
+ for list in hash do list.Clear()
+ else
+ Array.fill counts 0 counts.Length 0
+
+ do
+ read 0 buflen
+
+ let search (pos:int) =
+ let mutable maxp = -1
+ let mutable maxl = 2
+ let mlen = Math.Min(maxlen, length - pos)
+ let last = Math.Max(0, pos - maxbuf)
+ let h = getHash buf pos
+ if hash <> null then
+ let list = hash.[h]
+ let mutable i = list.Count - 1
+ while i >= 0 do
+ let p = list.[i]
+ if p < last then i <- 0 else
+ let mutable len = 0
+ while len < mlen && buf.[p + len] = buf.[pos + len] do
+ len <- len + 1
+ if len > maxl then
+ maxp <- p
+ maxl <- len
+ i <- i - 1
+ else
+ let c = counts.[h]
+ let p1, p2 = if c < 16 then 0, c - 1 else c + 1, c + 16
+ let mutable i = p2
+ while i >= p1 do
+ let p = tables.[h, i &&& 15]
+ if p < last then i <- 0 else
+ let mutable len = 0
+ while len < mlen && buf.[p + len] = buf.[pos + len] do
+ len <- len + 1
+ if len > maxl then
+ maxp <- p
+ maxl <- len
+ i <- i - 1
+ maxp, maxl
+
+ member x.Crc = ~~~crc
+
+ member x.Compress (sout:Stream) =
+ use bw = new BitWriter(sout)
+ bw.WriteBit 1
+ bw.WriteLE 2 1
+ let hw = new FixedHuffmanWriter(bw)
+ let mutable p = 0
+ match t with
+ | 2 ->
+ while p < length do
+ let b = buf.[p]
+ if p < length - 4 && b = buf.[p + 1] && b = buf.[p + 2] && b = buf.[p + 3] then
+ let mutable len = 4
+ let mlen = Math.Min(maxlen + 1, length - p)
+ while len < mlen && b = buf.[p + len] do
+ len <- len + 1
+ hw.Write(int b)
+ hw.WriteLen(len - 1)
+ hw.WriteDist 1
+ p <- p + len
+ else
+ let maxp, maxl = search p
+ if maxp < 0 then
+ hw.Write(int b)
+ addHash2 tables counts buf p
+ p <- p + 1
+ else
+ hw.WriteLen maxl
+ hw.WriteDist (p - maxp)
+ for i = p to p + maxl - 1 do
+ addHash2 tables counts buf i
+ p <- p + maxl
+ if p > maxbuf2 then
+ Array.Copy(buf, maxbuf, buf, 0, maxbuf + maxlen)
+ if length < buflen then length <- length - maxbuf else
+ read (maxbuf + maxlen) maxbuf
+ p <- p - maxbuf
+ for i = 0 to p - 1 do
+ addHash2 tables counts buf i
+ | 1 ->
+ while p < length do
+ let b = buf.[p]
+ if p < length - 4 && b = buf.[p + 1] && b = buf.[p + 2] && b = buf.[p + 3] then
+ let mutable len = 4
+ let mlen = Math.Min(maxlen + 1, length - p)
+ while len < mlen && b = buf.[p + len] do
+ len <- len + 1
+ hw.Write(int b)
+ hw.WriteLen(len - 1)
+ hw.WriteDist 1
+ p <- p + len
+ else
+ let maxp, maxl = search p
+ if maxp < 0 then
+ hw.Write(int b)
+ addHash hash buf p
+ p <- p + 1
+ else
+ hw.WriteLen maxl
+ hw.WriteDist (p - maxp)
+ for i = p to p + maxl - 1 do
+ addHash hash buf i
+ p <- p + maxl
+ if p > maxbuf2 then
+ Array.Copy(buf, maxbuf, buf, 0, maxbuf + maxlen)
+ if length < buflen then length <- length - maxbuf else
+ read (maxbuf + maxlen) maxbuf
+ p <- p - maxbuf
+ for i = 0 to p - 1 do
+ addHash hash buf i
+ | _ ->
+ while p < length do
+ let maxp, maxl = search p
+ if maxp < 0 then
+ hw.Write(int buf.[p])
+ hash.[getHash buf p].Add p
+ p <- p + 1
+ else
+ hw.WriteLen maxl
+ hw.WriteDist (p - maxp)
+ for i = p to p + maxl - 1 do
+ hash.[getHash buf i].Add i
+ p <- p + maxl
+ if p > maxbuf2 then
+ Array.Copy(buf, maxbuf, buf, 0, maxbuf + maxlen)
+ if length < buflen then length <- length - maxbuf else
+ read (maxbuf + maxlen) maxbuf
+ p <- p - maxbuf
+ for i = 0 to p - 1 do
+ hash.[getHash buf i].Add i
+ hw.Write 256
+
+let GetCompressBytes (sin:Stream) =
+ let now = DateTime.Now
+ let ms = new MemoryStream()
+ let w = new Writer(1, sin)
+ w.Compress ms
+ ms.ToArray(), w.Crc
diff --git a/tests/examplefiles/Error.pmod b/tests/examplefiles/Error.pmod
new file mode 100644
index 00000000..808ecb0e
--- /dev/null
+++ b/tests/examplefiles/Error.pmod
@@ -0,0 +1,38 @@
+#pike __REAL_VERSION__
+
+constant Generic = __builtin.GenericError;
+
+constant Index = __builtin.IndexError;
+
+constant BadArgument = __builtin.BadArgumentError;
+
+constant Math = __builtin.MathError;
+
+constant Resource = __builtin.ResourceError;
+
+constant Permission = __builtin.PermissionError;
+
+constant Decode = __builtin.DecodeError;
+
+constant Cpp = __builtin.CppError;
+
+constant Compilation = __builtin.CompilationError;
+
+constant MasterLoad = __builtin.MasterLoadError;
+
+constant ModuleLoad = __builtin.ModuleLoadError;
+
+//! Returns an Error object for any argument it receives. If the
+//! argument already is an Error object or is empty, it does nothing.
+object mkerror(mixed error)
+{
+ if (error == UNDEFINED)
+ return error;
+ if (objectp(error) && error->is_generic_error)
+ return error;
+ if (arrayp(error))
+ return Error.Generic(@error);
+ if (stringp(error))
+ return Error.Generic(error);
+ return Error.Generic(sprintf("%O", error));
+} \ No newline at end of file
diff --git a/tests/examplefiles/FakeFile.pike b/tests/examplefiles/FakeFile.pike
new file mode 100644
index 00000000..48f3ea64
--- /dev/null
+++ b/tests/examplefiles/FakeFile.pike
@@ -0,0 +1,360 @@
+#pike __REAL_VERSION__
+
+//! A string wrapper that pretends to be a @[Stdio.File] object
+//! in addition to some features of a @[Stdio.FILE] object.
+
+
+//! This constant can be used to distinguish a FakeFile object
+//! from a real @[Stdio.File] object.
+constant is_fake_file = 1;
+
+protected string data;
+protected int ptr;
+protected int(0..1) r;
+protected int(0..1) w;
+protected int mtime;
+
+protected function read_cb;
+protected function read_oob_cb;
+protected function write_cb;
+protected function write_oob_cb;
+protected function close_cb;
+
+//! @seealso
+//! @[Stdio.File()->close()]
+int close(void|string direction) {
+ direction = lower_case(direction||"rw");
+ int cr = has_value(direction, "r");
+ int cw = has_value(direction, "w");
+
+ if(cr) {
+ r = 0;
+ }
+
+ if(cw) {
+ w = 0;
+ }
+
+ // FIXME: Close callback
+ return 1;
+}
+
+//! @decl void create(string data, void|string type, void|int pointer)
+//! @seealso
+//! @[Stdio.File()->create()]
+void create(string _data, void|string type, int|void _ptr) {
+ if(!_data) error("No data string given to FakeFile.\n");
+ data = _data;
+ ptr = _ptr;
+ mtime = time();
+ if(type) {
+ type = lower_case(type);
+ if(has_value(type, "r"))
+ r = 1;
+ if(has_value(type, "w"))
+ w = 1;
+ }
+ else
+ r = w = 1;
+}
+
+protected string make_type_str() {
+ string type = "";
+ if(r) type += "r";
+ if(w) type += "w";
+ return type;
+}
+
+//! @seealso
+//! @[Stdio.File()->dup()]
+this_program dup() {
+ return this_program(data, make_type_str(), ptr);
+}
+
+//! Always returns 0.
+//! @seealso
+//! @[Stdio.File()->errno()]
+int errno() { return 0; }
+
+//! Returns size and the creation time of the string.
+Stdio.Stat stat() {
+ Stdio.Stat st = Stdio.Stat();
+ st->size = sizeof(data);
+ st->mtime=st->ctime=mtime;
+ st->atime=time();
+ return st;
+}
+
+//! @seealso
+//! @[Stdio.File()->line_iterator()]
+String.SplitIterator line_iterator(int|void trim) {
+ if(trim)
+ return String.SplitIterator( data-"\r", '\n' );
+ return String.SplitIterator( data, '\n' );
+}
+
+protected mixed id;
+
+//! @seealso
+//! @[Stdio.File()->query_id()]
+mixed query_id() { return id; }
+
+//! @seealso
+//! @[Stdio.File()->set_id()]
+void set_id(mixed _id) { id = _id; }
+
+//! @seealso
+//! @[Stdio.File()->read_function()]
+function(:string) read_function(int nbytes) {
+ return lambda() { return read(nbytes); };
+}
+
+//! @seealso
+//! @[Stdio.File()->peek()]
+int(-1..1) peek(int|float|void timeout) {
+ if(!r) return -1;
+ if(ptr >= sizeof(data)) return 0;
+ return 1;
+}
+
+//! Always returns 0.
+//! @seealso
+//! @[Stdio.File()->query_address()]
+string query_address(void|int(0..1) is_local) { return 0; }
+
+//! @seealso
+//! @[Stdio.File()->read()]
+string read(void|int(0..) len, void|int(0..1) not_all) {
+ if(!r) return 0;
+ if (len < 0) error("Cannot read negative number of characters.\n");
+ int start=ptr;
+ ptr += len;
+ if(zero_type(len) || ptr>sizeof(data))
+ ptr = sizeof(data);
+
+ // FIXME: read callback
+ return data[start..ptr-1];
+}
+
+//! @seealso
+//! @[Stdio.FILE()->gets()]
+string gets() {
+ if(!r) return 0;
+ string ret;
+ sscanf(data,"%*"+(string)ptr+"s%[^\n]",ret);
+ if(ret)
+ {
+ ptr+=sizeof(ret)+1;
+ if(ptr>sizeof(data))
+ {
+ ptr=sizeof(data);
+ if(!sizeof(ret))
+ ret = 0;
+ }
+ }
+
+ // FIXME: read callback
+ return ret;
+}
+
+//! @seealso
+//! @[Stdio.FILE()->getchar()]
+int getchar() {
+ if(!r) return 0;
+ int c;
+ if(catch(c=data[ptr]))
+ c=-1;
+ else
+ ptr++;
+
+ // FIXME: read callback
+ return c;
+}
+
+//! @seealso
+//! @[Stdio.FILE()->unread()]
+void unread(string s) {
+ if(!r) return;
+ if(data[ptr-sizeof(s)..ptr-1]==s)
+ ptr-=sizeof(s);
+ else
+ {
+ data=s+data[ptr..];
+ ptr=0;
+ }
+}
+
+//! @seealso
+//! @[Stdio.File()->seek()]
+int seek(int pos, void|int mult, void|int add) {
+ if(mult)
+ pos = pos*mult+add;
+ if(pos<0)
+ {
+ pos = sizeof(data)+pos;
+ if( pos < 0 )
+ pos = 0;
+ }
+ ptr = pos;
+ if( ptr > strlen( data ) )
+ ptr = strlen(data);
+ return ptr;
+}
+
+//! Always returns 1.
+//! @seealso
+//! @[Stdio.File()->sync()]
+int(1..1) sync() { return 1; }
+
+//! @seealso
+//! @[Stdio.File()->tell()]
+int tell() { return ptr; }
+
+//! @seealso
+//! @[Stdio.File()->truncate()]
+int(0..1) truncate(int length) {
+ data = data[..length-1];
+ return sizeof(data)==length;
+}
+
+//! @seealso
+//! @[Stdio.File()->write()]
+int(-1..) write(string|array(string) str, mixed ... extra) {
+ if(!w) return -1;
+ if(arrayp(str)) str=str*"";
+ if(sizeof(extra)) str=sprintf(str, @extra);
+
+ if(ptr==sizeof(data)) {
+ data += str;
+ ptr = sizeof(data);
+ }
+ else if(sizeof(str)==1)
+ data[ptr++] = str[0];
+ else {
+ data = data[..ptr-1] + str + data[ptr+sizeof(str)..];
+ ptr += sizeof(str);
+ }
+
+ // FIXME: write callback
+ return sizeof(str);
+}
+
+//! @seealso
+//! @[Stdio.File()->set_blocking]
+void set_blocking() {
+ close_cb = 0;
+ read_cb = 0;
+ read_oob_cb = 0;
+ write_cb = 0;
+ write_oob_cb = 0;
+}
+
+//! @seealso
+//! @[Stdio.File()->set_blocking_keep_callbacks]
+void set_blocking_keep_callbacks() { }
+
+//! @seealso
+//! @[Stdio.File()->set_blocking]
+void set_nonblocking(function rcb, function wcb, function ccb,
+ function rocb, function wocb) {
+ read_cb = rcb;
+ write_cb = wcb;
+ close_cb = ccb;
+ read_oob_cb = rocb;
+ write_oob_cb = wocb;
+}
+
+//! @seealso
+//! @[Stdio.File()->set_blocking_keep_callbacks]
+void set_nonblocking_keep_callbacks() { }
+
+
+//! @seealso
+//! @[Stdio.File()->set_close_callback]
+void set_close_callback(function cb) { close_cb = cb; }
+
+//! @seealso
+//! @[Stdio.File()->set_read_callback]
+void set_read_callback(function cb) { read_cb = cb; }
+
+//! @seealso
+//! @[Stdio.File()->set_read_oob_callback]
+void set_read_oob_callback(function cb) { read_oob_cb = cb; }
+
+//! @seealso
+//! @[Stdio.File()->set_write_callback]
+void set_write_callback(function cb) { write_cb = cb; }
+
+//! @seealso
+//! @[Stdio.File()->set_write_oob_callback]
+void set_write_oob_callback(function cb) { write_oob_cb = cb; }
+
+
+//! @seealso
+//! @[Stdio.File()->query_close_callback]
+function query_close_callback() { return close_cb; }
+
+//! @seealso
+//! @[Stdio.File()->query_read_callback]
+function query_read_callback() { return read_cb; }
+
+//! @seealso
+//! @[Stdio.File()->query_read_oob_callback]
+function query_read_oob_callback() { return read_oob_cb; }
+
+//! @seealso
+//! @[Stdio.File()->query_write_callback]
+function query_write_callback() { return write_cb; }
+
+//! @seealso
+//! @[Stdio.File()->query_write_oob_callback]
+function query_write_oob_callback() { return write_oob_cb; }
+
+string _sprintf(int t) {
+ return t=='O' && sprintf("%O(%d,%O)", this_program, sizeof(data),
+ make_type_str());
+}
+
+
+// FakeFile specials.
+
+//! A FakeFile can be casted to a string.
+mixed cast(string to) {
+ switch(to) {
+ case "string": return data;
+ case "object": return this;
+ }
+ error("Can not cast object to %O.\n", to);
+}
+
+//! Sizeof on a FakeFile returns the size of its contents.
+int(0..) _sizeof() {
+ return sizeof(data);
+}
+
+//! @ignore
+
+#define NOPE(X) mixed X (mixed ... args) { error("This is a FakeFile. %s is not available.\n", #X); }
+NOPE(assign);
+NOPE(async_connect);
+NOPE(connect);
+NOPE(connect_unix);
+NOPE(open);
+NOPE(open_socket);
+NOPE(pipe);
+NOPE(tcgetattr);
+NOPE(tcsetattr);
+
+// Stdio.Fd
+NOPE(dup2);
+NOPE(lock); // We could implement this
+NOPE(mode); // We could implement this
+NOPE(proxy); // We could implement this
+NOPE(query_fd);
+NOPE(read_oob);
+NOPE(set_close_on_exec);
+NOPE(set_keepalive);
+NOPE(trylock); // We could implement this
+NOPE(write_oob);
+
+//! @endignore \ No newline at end of file
diff --git a/tests/examplefiles/Get-CommandDefinitionHtml.ps1 b/tests/examplefiles/Get-CommandDefinitionHtml.ps1
new file mode 100644
index 00000000..b181955f
--- /dev/null
+++ b/tests/examplefiles/Get-CommandDefinitionHtml.ps1
@@ -0,0 +1,66 @@
+
+function Get-CommandDefinitionHtml {
+
+ # this tells powershell to allow advanced features,
+ # like the [validatenotnullorempty()] attribute below.
+ [CmdletBinding()]
+ param(
+ [ValidateNotNullOrEmpty()]
+ [string]$name
+ )
+
+ $command = get-command $name
+
+ # Look mom! I'm a cmdlet!
+ $PSCmdlet.WriteVerbose("Dumping HTML for " + $command)
+
+@"
+ <html>
+ <head>
+ <title>$($command.name)</title>
+ </head>
+ <body>
+ <table border="1">
+$(
+ $command.parametersets | % {
+@"
+
+ <tr>
+ <td>$($_.name)</td>
+ <td>
+ <table border="1">
+ <tr>
+ <th colspan="8">Parameters</th>
+
+$(
+ $count = 0
+ $_.parameters | % {
+ if (0 -eq ($count % 8)) {
+@'
+ </tr>
+ <tr>
+'@
+ }
+@"
+ <td>$($_.name)</td>
+"@
+ $count++
+ }
+)
+ </tr>
+ </table>
+ </td>
+ </tr>
+"@
+ }
+)
+ </table>
+ </body>
+ </html>
+"@
+}
+
+Get-CommandDefinitionHtml get-item > out.html
+
+# show in browser
+invoke-item out.html
diff --git a/tests/examplefiles/IPDispatchC.nc b/tests/examplefiles/IPDispatchC.nc
new file mode 100644
index 00000000..95a61a2c
--- /dev/null
+++ b/tests/examplefiles/IPDispatchC.nc
@@ -0,0 +1,104 @@
+/*
+ * "Copyright (c) 2008-2011 The Regents of the University of California.
+ * All rights reserved."
+ *
+ * Permission to use, copy, modify, and distribute this software and its
+ * documentation for any purpose, without fee, and without written agreement is
+ * hereby granted, provided that the above copyright notice, the following
+ * two paragraphs and the author appear in all copies of this software.
+ *
+ * IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR
+ * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT
+ * OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY OF
+ * CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
+ * ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATION TO
+ * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS."
+ *
+ */
+
+/**
+ *
+ *
+ */
+#include "IPDispatch.h"
+#include "BlipStatistics.h"
+
+configuration IPDispatchC {
+ provides {
+ interface SplitControl;
+ interface IPLower;
+ interface BlipStatistics<ip_statistics_t>;
+ }
+} implementation {
+
+ components MainC;
+ components NoLedsC as LedsC;
+
+ /* IPDispatchP wiring -- fragment rassembly and lib6lowpan bindings */
+ components IPDispatchP;
+ components CC2420RadioC as MessageC;
+ components ReadLqiC;
+ components new TimerMilliC();
+
+ SplitControl = IPDispatchP.SplitControl;
+ IPLower = IPDispatchP;
+ BlipStatistics = IPDispatchP;
+
+ IPDispatchP.Boot -> MainC;
+/* #else */
+/* components ResourceSendP; */
+/* ResourceSendP.SubSend -> MessageC; */
+/* ResourceSendP.Resource -> MessageC.SendResource[unique("RADIO_SEND_RESOURCE")]; */
+/* IPDispatchP.Ieee154Send -> ResourceSendP.Ieee154Send; */
+/* #endif */
+ IPDispatchP.RadioControl -> MessageC;
+
+ IPDispatchP.BarePacket -> MessageC.BarePacket;
+ IPDispatchP.Ieee154Send -> MessageC.BareSend;
+ IPDispatchP.Ieee154Receive -> MessageC.BareReceive;
+
+#ifdef LOW_POWER_LISTENING
+ IPDispatchP.LowPowerListening -> MessageC;
+#endif
+ MainC.SoftwareInit -> IPDispatchP.Init;
+
+ IPDispatchP.PacketLink -> MessageC;
+ IPDispatchP.ReadLqi -> ReadLqiC;
+ IPDispatchP.Leds -> LedsC;
+ IPDispatchP.ExpireTimer -> TimerMilliC;
+
+ components new PoolC(message_t, N_FRAGMENTS) as FragPool;
+ components new PoolC(struct send_entry, N_FRAGMENTS) as SendEntryPool;
+ components new QueueC(struct send_entry *, N_FRAGMENTS);
+ components new PoolC(struct send_info, N_CONCURRENT_SENDS) as SendInfoPool;
+
+ IPDispatchP.FragPool -> FragPool;
+ IPDispatchP.SendEntryPool -> SendEntryPool;
+ IPDispatchP.SendInfoPool -> SendInfoPool;
+ IPDispatchP.SendQueue -> QueueC;
+
+ components IPNeighborDiscoveryP;
+ IPDispatchP.NeighborDiscovery -> IPNeighborDiscoveryP;
+
+/* components ICMPResponderC; */
+/* #ifdef BLIP_MULTICAST */
+/* components MulticastP; */
+/* components new TrickleTimerMilliC(2, 30, 2, 1); */
+/* IP = MulticastP.IP; */
+
+/* MainC.SoftwareInit -> MulticastP.Init; */
+/* MulticastP.MulticastRx -> IPDispatchP.Multicast; */
+/* MulticastP.HopHeader -> IPExtensionP.HopByHopExt[0]; */
+/* MulticastP.TrickleTimer -> TrickleTimerMilliC.TrickleTimer[0]; */
+/* MulticastP.IPExtensions -> IPDispatchP; */
+/* #endif */
+
+#ifdef DELUGE
+ components NWProgC;
+#endif
+
+}
diff --git a/tests/examplefiles/IPDispatchP.nc b/tests/examplefiles/IPDispatchP.nc
new file mode 100644
index 00000000..628f39a0
--- /dev/null
+++ b/tests/examplefiles/IPDispatchP.nc
@@ -0,0 +1,671 @@
+/*
+ * "Copyright (c) 2008 The Regents of the University of California.
+ * All rights reserved."
+ *
+ * Permission to use, copy, modify, and distribute this software and its
+ * documentation for any purpose, without fee, and without written agreement is
+ * hereby granted, provided that the above copyright notice, the following
+ * two paragraphs and the author appear in all copies of this software.
+ *
+ * IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR
+ * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT
+ * OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY OF
+ * CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
+ * ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATION TO
+ * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS."
+ *
+ */
+
+#include <lib6lowpan/blip-tinyos-includes.h>
+#include <lib6lowpan/6lowpan.h>
+#include <lib6lowpan/lib6lowpan.h>
+#include <lib6lowpan/ip.h>
+#include <lib6lowpan/in_cksum.h>
+#include <lib6lowpan/ip_malloc.h>
+
+#include "blip_printf.h"
+#include "IPDispatch.h"
+#include "BlipStatistics.h"
+#include "table.h"
+
+/*
+ * Provides IP layer reception to applications on motes.
+ *
+ * @author Stephen Dawson-Haggerty <stevedh@cs.berkeley.edu>
+ */
+
+module IPDispatchP {
+ provides {
+ interface SplitControl;
+ // interface for protocols not requiring special hand-holding
+ interface IPLower;
+
+ interface BlipStatistics<ip_statistics_t>;
+
+ }
+ uses {
+ interface Boot;
+
+
+ /* link-layer wiring */
+ interface SplitControl as RadioControl;
+
+ interface Packet as BarePacket;
+ interface Send as Ieee154Send;
+ interface Receive as Ieee154Receive;
+
+ /* context lookup */
+ interface NeighborDiscovery;
+
+ interface ReadLqi;
+ interface PacketLink;
+ interface LowPowerListening;
+
+ /* buffers for outgoing fragments */
+ interface Pool<message_t> as FragPool;
+ interface Pool<struct send_info> as SendInfoPool;
+ interface Pool<struct send_entry> as SendEntryPool;
+ interface Queue<struct send_entry *> as SendQueue;
+
+ /* expire reconstruction */
+ interface Timer<TMilli> as ExpireTimer;
+
+ interface Leds;
+
+ }
+ provides interface Init;
+} implementation {
+
+#define HAVE_LOWPAN_EXTERN_MATCH_CONTEXT
+int lowpan_extern_read_context(struct in6_addr *addr, int context) {
+ return call NeighborDiscovery.getContext(context, addr);
+}
+
+int lowpan_extern_match_context(struct in6_addr *addr, uint8_t *ctx_id) {
+ return call NeighborDiscovery.matchContext(addr, ctx_id);
+}
+
+ // generally including source files like this is a no-no. I'm doing
+ // this in the hope that the optimizer will do a better job when
+ // they're part of a component.
+#include <lib6lowpan/ieee154_header.c>
+#include <lib6lowpan/lib6lowpan.c>
+#include <lib6lowpan/lib6lowpan_4944.c>
+#include <lib6lowpan/lib6lowpan_frag.c>
+
+ enum {
+ S_RUNNING,
+ S_STOPPED,
+ S_STOPPING,
+ };
+ uint8_t state = S_STOPPED;
+ bool radioBusy;
+ uint8_t current_local_label = 0;
+ ip_statistics_t stats;
+
+ // this in theory could be arbitrarily large; however, it needs to
+ // be large enough to hold all active reconstructions, and any tags
+ // which we are dropping. It's important to keep dropped tags
+ // around for a while, or else there are pathological situations
+ // where you continually allocate buffers for packets which will
+ // never complete.
+
+ ////////////////////////////////////////
+ //
+ //
+
+ table_t recon_cache;
+
+ // table of packets we are currently receiving fragments from, that
+ // are destined to us
+ struct lowpan_reconstruct recon_data[N_RECONSTRUCTIONS];
+
+ //
+ //
+ ////////////////////////////////////////
+
+ // task void sendTask();
+
+ void reconstruct_clear(void *ent) {
+ struct lowpan_reconstruct *recon = (struct lowpan_reconstruct *)ent;
+ memclr((uint8_t *)&recon->r_meta, sizeof(struct ip6_metadata));
+ recon->r_timeout = T_UNUSED;
+ recon->r_buf = NULL;
+ }
+
+ struct send_info *getSendInfo() {
+ struct send_info *ret = call SendInfoPool.get();
+ if (ret == NULL) return ret;
+ ret->_refcount = 1;
+ ret->upper_data = NULL;
+ ret->failed = FALSE;
+ ret->link_transmissions = 0;
+ ret->link_fragments = 0;
+ ret->link_fragment_attempts = 0;
+ return ret;
+ }
+#define SENDINFO_INCR(X) ((X)->_refcount)++
+void SENDINFO_DECR(struct send_info *si) {
+ if (--(si->_refcount) == 0) {
+ call SendInfoPool.put(si);
+ }
+}
+
+ command error_t SplitControl.start() {
+ return call RadioControl.start();
+ }
+
+ command error_t SplitControl.stop() {
+ if (!radioBusy) {
+ state = S_STOPPED;
+ return call RadioControl.stop();
+ } else {
+ // if there's a packet in the radio, wait for it to exit before
+ // stopping
+ state = S_STOPPING;
+ return SUCCESS;
+ }
+ }
+
+ event void RadioControl.startDone(error_t error) {
+#ifdef LPL_SLEEP_INTERVAL
+ call LowPowerListening.setLocalWakeupInterval(LPL_SLEEP_INTERVAL);
+#endif
+
+ if (error == SUCCESS) {
+ call Leds.led2Toggle();
+ call ExpireTimer.startPeriodic(FRAG_EXPIRE_TIME);
+ state = S_RUNNING;
+ radioBusy = FALSE;
+ }
+
+ signal SplitControl.startDone(error);
+ }
+
+ event void RadioControl.stopDone(error_t error) {
+ signal SplitControl.stopDone(error);
+ }
+
+ command error_t Init.init() {
+ // ip_malloc_init needs to be in init, not booted, because
+ // context for coap is initialised in init
+ ip_malloc_init();
+ return SUCCESS;
+ }
+
+ event void Boot.booted() {
+ call BlipStatistics.clear();
+
+ /* set up our reconstruction cache */
+ table_init(&recon_cache, recon_data, sizeof(struct lowpan_reconstruct), N_RECONSTRUCTIONS);
+ table_map(&recon_cache, reconstruct_clear);
+
+ call SplitControl.start();
+ }
+
+ /*
+ * Receive-side code.
+ */
+ void deliver(struct lowpan_reconstruct *recon) {
+ struct ip6_hdr *iph = (struct ip6_hdr *)recon->r_buf;
+
+ // printf("deliver [%i]: ", recon->r_bytes_rcvd);
+ // printf_buf(recon->r_buf, recon->r_bytes_rcvd);
+
+ /* the payload length field is always compressed, have to put it back here */
+ iph->ip6_plen = htons(recon->r_bytes_rcvd - sizeof(struct ip6_hdr));
+ signal IPLower.recv(iph, (void *)(iph + 1), &recon->r_meta);
+
+ // printf("ip_free(%p)\n", recon->r_buf);
+ ip_free(recon->r_buf);
+ recon->r_timeout = T_UNUSED;
+ recon->r_buf = NULL;
+ }
+
+ /*
+ * Bulletproof recovery logic is very important to make sure we
+ * don't get wedged with no free buffers.
+ *
+ * The table is managed as follows:
+ * - unused entries are marked T_UNUSED
+ * - entries which
+ * o have a buffer allocated
+ * o have had a fragment reception before we fired
+ * are marked T_ACTIVE
+ * - entries which have not had a fragment reception during the last timer period
+ * and were active are marked T_ZOMBIE
+ * - zombie receptions are deleted: their buffer is freed and table entry marked unused.
+ * - when a fragment is dropped, it is entered into the table as T_FAILED1.
+ * no buffer is allocated
+ * - when the timer fires, T_FAILED1 entries are aged to T_FAILED2.
+ * - T_FAILED2 entries are deleted. Incomming fragments with tags
+ * that are marked either FAILED1 or FAILED2 are dropped; this
+ * prevents us from allocating a buffer for a packet which we
+ * have already dropped fragments from.
+ *
+ */
+ void reconstruct_age(void *elt) {
+ struct lowpan_reconstruct *recon = (struct lowpan_reconstruct *)elt;
+ if (recon->r_timeout != T_UNUSED)
+ printf("recon src: 0x%x tag: 0x%x buf: %p recvd: %i/%i\n",
+ recon->r_source_key, recon->r_tag, recon->r_buf,
+ recon->r_bytes_rcvd, recon->r_size);
+ switch (recon->r_timeout) {
+ case T_ACTIVE:
+ recon->r_timeout = T_ZOMBIE; break; // age existing receptions
+ case T_FAILED1:
+ recon->r_timeout = T_FAILED2; break; // age existing receptions
+ case T_ZOMBIE:
+ case T_FAILED2:
+ // deallocate the space for reconstruction
+ printf("timing out buffer: src: %i tag: %i\n", recon->r_source_key, recon->r_tag);
+ if (recon->r_buf != NULL) {
+ printf("ip_free(%p)\n", recon->r_buf);
+ ip_free(recon->r_buf);
+ }
+ recon->r_timeout = T_UNUSED;
+ recon->r_buf = NULL;
+ break;
+ }
+ }
+
+ void ip_print_heap() {
+#ifdef PRINTFUART_ENABLED
+ bndrt_t *cur = (bndrt_t *)heap;
+ while (((uint8_t *)cur) - heap < IP_MALLOC_HEAP_SIZE) {
+ printf ("heap region start: %p length: %u used: %u\n",
+ cur, (*cur & IP_MALLOC_LEN), (*cur & IP_MALLOC_INUSE) >> 15);
+ cur = (bndrt_t *)(((uint8_t *)cur) + ((*cur) & IP_MALLOC_LEN));
+ }
+#endif
+ }
+
+ event void ExpireTimer.fired() {
+ table_map(&recon_cache, reconstruct_age);
+
+
+ printf("Frag pool size: %i\n", call FragPool.size());
+ printf("SendInfo pool size: %i\n", call SendInfoPool.size());
+ printf("SendEntry pool size: %i\n", call SendEntryPool.size());
+ printf("Forward queue length: %i\n", call SendQueue.size());
+ ip_print_heap();
+ printfflush();
+ }
+
+ /*
+ * allocate a structure for recording information about incomming fragments.
+ */
+
+ struct lowpan_reconstruct *get_reconstruct(uint16_t key, uint16_t tag) {
+ struct lowpan_reconstruct *ret = NULL;
+ int i;
+
+ // printf("get_reconstruct: %x %i\n", key, tag);
+
+ for (i = 0; i < N_RECONSTRUCTIONS; i++) {
+ struct lowpan_reconstruct *recon = (struct lowpan_reconstruct *)&recon_data[i];
+
+ if (recon->r_tag == tag &&
+ recon->r_source_key == key) {
+
+ if (recon->r_timeout > T_UNUSED) {
+ recon->r_timeout = T_ACTIVE;
+ ret = recon;
+ goto done;
+
+ } else if (recon->r_timeout < T_UNUSED) {
+ // if we have already tried and failed to get a buffer, we
+ // need to drop remaining fragments.
+ ret = NULL;
+ goto done;
+ }
+ }
+ if (recon->r_timeout == T_UNUSED)
+ ret = recon;
+ }
+ done:
+ // printf("got%p\n", ret);
+ return ret;
+ }
+
+ event message_t *Ieee154Receive.receive(message_t *msg, void *msg_payload, uint8_t len) {
+ struct packed_lowmsg lowmsg;
+ struct ieee154_frame_addr frame_address;
+ uint8_t *buf = msg_payload;
+
+ // printf(" -- RECEIVE -- len : %i\n", len);
+
+ BLIP_STATS_INCR(stats.rx_total);
+
+ /* unpack the 802.15.4 address fields */
+ buf = unpack_ieee154_hdr(msg_payload, &frame_address);
+ len -= buf - (uint8_t *)msg_payload;
+
+ /* unpack and 6lowpan headers */
+ lowmsg.data = buf;
+ lowmsg.len = len;
+ lowmsg.headers = getHeaderBitmap(&lowmsg);
+ if (lowmsg.headers == LOWMSG_NALP) {
+ goto fail;
+ }
+
+ if (hasFrag1Header(&lowmsg) || hasFragNHeader(&lowmsg)) {
+ // start reassembly
+ int rv;
+ struct lowpan_reconstruct *recon;
+ uint16_t tag, source_key;
+
+ source_key = ieee154_hashaddr(&frame_address.ieee_src);
+ getFragDgramTag(&lowmsg, &tag);
+ recon = get_reconstruct(source_key, tag);
+ if (!recon) {
+ goto fail;
+ }
+
+ /* fill in metadata: on fragmented packets, it applies to the
+ first fragment only */
+ memcpy(&recon->r_meta.sender, &frame_address.ieee_src,
+ sizeof(ieee154_addr_t));
+ recon->r_meta.lqi = call ReadLqi.readLqi(msg);
+ recon->r_meta.rssi = call ReadLqi.readRssi(msg);
+
+ if (hasFrag1Header(&lowmsg)) {
+ if (recon->r_buf != NULL) goto fail;
+ rv = lowpan_recon_start(&frame_address, recon, buf, len);
+ } else {
+ rv = lowpan_recon_add(recon, buf, len);
+ }
+
+ if (rv < 0) {
+ recon->r_timeout = T_FAILED1;
+ goto fail;
+ } else {
+ // printf("start recon buf: %p\n", recon->r_buf);
+ recon->r_timeout = T_ACTIVE;
+ recon->r_source_key = source_key;
+ recon->r_tag = tag;
+ }
+
+ if (recon->r_size == recon->r_bytes_rcvd) {
+ deliver(recon);
+ }
+
+ } else {
+ /* no fragmentation, just deliver it */
+ int rv;
+ struct lowpan_reconstruct recon;
+
+ /* fill in metadata */
+ memcpy(&recon.r_meta.sender, &frame_address.ieee_src,
+ sizeof(ieee154_addr_t));
+ recon.r_meta.lqi = call ReadLqi.readLqi(msg);
+ recon.r_meta.rssi = call ReadLqi.readRssi(msg);
+
+ buf = getLowpanPayload(&lowmsg);
+ if ((rv = lowpan_recon_start(&frame_address, &recon, buf, len)) < 0) {
+ goto fail;
+ }
+
+ if (recon.r_size == recon.r_bytes_rcvd) {
+ deliver(&recon);
+ } else {
+ // printf("ip_free(%p)\n", recon.r_buf);
+ ip_free(recon.r_buf);
+ }
+ }
+ goto done;
+ fail:
+ BLIP_STATS_INCR(stats.rx_drop);
+ done:
+ return msg;
+ }
+
+
+ /*
+ * Send-side functionality
+ */
+ task void sendTask() {
+ struct send_entry *s_entry;
+
+ // printf("sendTask() - sending\n");
+
+ if (radioBusy || state != S_RUNNING) return;
+ if (call SendQueue.empty()) return;
+ // this does not dequeue
+ s_entry = call SendQueue.head();
+
+#ifdef LPL_SLEEP_INTERVAL
+ call LowPowerListening.setRemoteWakeupInterval(s_entry->msg,
+ call LowPowerListening.getLocalWakeupInterval());
+#endif
+
+ if (s_entry->info->failed) {
+ dbg("Drops", "drops: sendTask: dropping failed fragment\n");
+ goto fail;
+ }
+
+ if ((call Ieee154Send.send(s_entry->msg,
+ call BarePacket.payloadLength(s_entry->msg))) != SUCCESS) {
+ dbg("Drops", "drops: sendTask: send failed\n");
+ goto fail;
+ } else {
+ radioBusy = TRUE;
+ }
+
+ return;
+ fail:
+ printf("SEND FAIL\n");
+ post sendTask();
+ BLIP_STATS_INCR(stats.tx_drop);
+
+ // deallocate the memory associated with this request.
+ // other fragments associated with this packet will get dropped.
+ s_entry->info->failed = TRUE;
+ SENDINFO_DECR(s_entry->info);
+ call FragPool.put(s_entry->msg);
+ call SendEntryPool.put(s_entry);
+ call SendQueue.dequeue();
+ }
+
+
+ /*
+ * it will pack the message into the fragment pool and enqueue
+ * those fragments for sending
+ *
+ * it will set
+ * - payload length
+ * - version, traffic class and flow label
+ *
+ * the source and destination IP addresses must be set by higher
+ * layers.
+ */
+ command error_t IPLower.send(struct ieee154_frame_addr *frame_addr,
+ struct ip6_packet *msg,
+ void *data) {
+ struct lowpan_ctx ctx;
+ struct send_info *s_info;
+ struct send_entry *s_entry;
+ message_t *outgoing;
+
+ int frag_len = 1;
+ error_t rc = SUCCESS;
+
+ if (state != S_RUNNING) {
+ return EOFF;
+ }
+
+ /* set version to 6 in case upper layers forgot */
+ msg->ip6_hdr.ip6_vfc &= ~IPV6_VERSION_MASK;
+ msg->ip6_hdr.ip6_vfc |= IPV6_VERSION;
+
+ ctx.tag = current_local_label++;
+ ctx.offset = 0;
+
+ s_info = getSendInfo();
+ if (s_info == NULL) {
+ rc = ERETRY;
+ goto cleanup_outer;
+ }
+ s_info->upper_data = data;
+
+ while (frag_len > 0) {
+ s_entry = call SendEntryPool.get();
+ outgoing = call FragPool.get();
+
+ if (s_entry == NULL || outgoing == NULL) {
+ if (s_entry != NULL)
+ call SendEntryPool.put(s_entry);
+ if (outgoing != NULL)
+ call FragPool.put(outgoing);
+ // this will cause any fragments we have already enqueued to
+ // be dropped by the send task.
+ s_info->failed = TRUE;
+ printf("drops: IP send: no fragments\n");
+ rc = ERETRY;
+ goto done;
+ }
+
+ call BarePacket.clear(outgoing);
+ frag_len = lowpan_frag_get(call Ieee154Send.getPayload(outgoing, 0),
+ call BarePacket.maxPayloadLength(),
+ msg,
+ frame_addr,
+ &ctx);
+ if (frag_len < 0) {
+ printf(" get frag error: %i\n", frag_len);
+ }
+
+ printf("fragment length: %i offset: %i\n", frag_len, ctx.offset);
+ call BarePacket.setPayloadLength(outgoing, frag_len);
+
+ if (frag_len <= 0) {
+ call FragPool.put(outgoing);
+ call SendEntryPool.put(s_entry);
+ goto done;
+ }
+
+ if (call SendQueue.enqueue(s_entry) != SUCCESS) {
+ BLIP_STATS_INCR(stats.encfail);
+ s_info->failed = TRUE;
+ printf("drops: IP send: enqueue failed\n");
+ goto done;
+ }
+
+ s_info->link_fragments++;
+ s_entry->msg = outgoing;
+ s_entry->info = s_info;
+
+ /* configure the L2 */
+ if (frame_addr->ieee_dst.ieee_mode == IEEE154_ADDR_SHORT &&
+ frame_addr->ieee_dst.i_saddr == IEEE154_BROADCAST_ADDR) {
+ call PacketLink.setRetries(s_entry->msg, 0);
+ } else {
+ call PacketLink.setRetries(s_entry->msg, BLIP_L2_RETRIES);
+ }
+ call PacketLink.setRetryDelay(s_entry->msg, BLIP_L2_DELAY);
+
+ SENDINFO_INCR(s_info);}
+
+ // printf("got %i frags\n", s_info->link_fragments);
+ done:
+ BLIP_STATS_INCR(stats.sent);
+ SENDINFO_DECR(s_info);
+ post sendTask();
+ cleanup_outer:
+ return rc;
+ }
+
+ event void Ieee154Send.sendDone(message_t *msg, error_t error) {
+ struct send_entry *s_entry = call SendQueue.head();
+
+ radioBusy = FALSE;
+
+ // printf("sendDone: %p %i\n", msg, error);
+
+ if (state == S_STOPPING) {
+ call RadioControl.stop();
+ state = S_STOPPED;
+ goto done;
+ }
+
+ s_entry->info->link_transmissions += (call PacketLink.getRetries(msg));
+ s_entry->info->link_fragment_attempts++;
+
+ if (!call PacketLink.wasDelivered(msg)) {
+ printf("sendDone: was not delivered! (%i tries)\n",
+ call PacketLink.getRetries(msg));
+ s_entry->info->failed = TRUE;
+ signal IPLower.sendDone(s_entry->info);
+/* if (s_entry->info->policy.dest[0] != 0xffff) */
+/* dbg("Drops", "drops: sendDone: frag was not delivered\n"); */
+ // need to check for broadcast frames
+ // BLIP_STATS_INCR(stats.tx_drop);
+ } else if (s_entry->info->link_fragment_attempts ==
+ s_entry->info->link_fragments) {
+ signal IPLower.sendDone(s_entry->info);
+ }
+
+ done:
+ // kill off any pending fragments
+ SENDINFO_DECR(s_entry->info);
+ call FragPool.put(s_entry->msg);
+ call SendEntryPool.put(s_entry);
+ call SendQueue.dequeue();
+
+ post sendTask();
+ }
+
+#if 0
+ command struct tlv_hdr *IPExtensions.findTlv(struct ip6_ext *ext, uint8_t tlv_val) {
+ int len = ext->len - sizeof(struct ip6_ext);
+ struct tlv_hdr *tlv = (struct tlv_hdr *)(ext + 1);
+ while (len > 0) {
+ if (tlv->type == tlv_val) return tlv;
+ if (tlv->len == 0) return NULL;
+ tlv = (struct tlv_hdr *)(((uint8_t *)tlv) + tlv->len);
+ len -= tlv->len;
+ }
+ return NULL;
+ }
+#endif
+
+
+ /*
+ * BlipStatistics interface
+ */
+ command void BlipStatistics.get(ip_statistics_t *statistics) {
+#ifdef BLIP_STATS_IP_MEM
+ stats.fragpool = call FragPool.size();
+ stats.sendinfo = call SendInfoPool.size();
+ stats.sendentry= call SendEntryPool.size();
+ stats.sndqueue = call SendQueue.size();
+ stats.heapfree = ip_malloc_freespace();
+ printf("frag: %i sendinfo: %i sendentry: %i sendqueue: %i heap: %i\n",
+ stats.fragpool,
+ stats.sendinfo,
+ stats.sendentry,
+ stats.sndqueue,
+ stats.heapfree);
+#endif
+ memcpy(statistics, &stats, sizeof(ip_statistics_t));
+
+ }
+
+ command void BlipStatistics.clear() {
+ memclr((uint8_t *)&stats, sizeof(ip_statistics_t));
+ }
+
+/* default event void IP.recv[uint8_t nxt_hdr](struct ip6_hdr *iph, */
+/* void *payload, */
+/* struct ip_metadata *meta) { */
+/* } */
+
+/* default event void Multicast.recv[uint8_t scope](struct ip6_hdr *iph, */
+/* void *payload, */
+/* struct ip_metadata *meta) { */
+/* } */
+}
diff --git a/tests/examplefiles/RoleQ.pm6 b/tests/examplefiles/RoleQ.pm6
new file mode 100644
index 00000000..9b66bde4
--- /dev/null
+++ b/tests/examplefiles/RoleQ.pm6
@@ -0,0 +1,23 @@
+role q {
+ token stopper { \' }
+
+ token escape:sym<\\> { <sym> <item=.backslash> }
+
+ token backslash:sym<qq> { <?before 'q'> <quote=.LANG('MAIN','quote')> }
+ token backslash:sym<\\> { <text=.sym> }
+ token backslash:sym<stopper> { <text=.stopper> }
+
+ token backslash:sym<miscq> { {} . }
+
+ method tweak_q($v) { self.panic("Too late for :q") }
+ method tweak_qq($v) { self.panic("Too late for :qq") }
+}
+
+role qq does b1 does c1 does s1 does a1 does h1 does f1 {
+ token stopper { \" }
+ token backslash:sym<unrec> { {} (\w) { self.throw_unrecog_backslash_seq: $/[0].Str } }
+ token backslash:sym<misc> { \W }
+
+ method tweak_q($v) { self.panic("Too late for :q") }
+ method tweak_qq($v) { self.panic("Too late for :qq") }
+}
diff --git a/tests/examplefiles/ANTLRv3.g b/tests/examplefiles/antlr_ANTLRv3.g
index fbe6d654..fbe6d654 100644
--- a/tests/examplefiles/ANTLRv3.g
+++ b/tests/examplefiles/antlr_ANTLRv3.g
diff --git a/tests/examplefiles/example.e b/tests/examplefiles/example.e
new file mode 100644
index 00000000..2e43954b
--- /dev/null
+++ b/tests/examplefiles/example.e
@@ -0,0 +1,124 @@
+note
+ description : "[
+ This is use to have almost every language element."
+
+ That way, I can correctly test the lexer. %]"
+
+ Don't try to understand what it does. It's not even compilling.
+ ]"
+ date : "August 6, 2013"
+ revision : "0.1"
+
+class
+ SAMPLE
+
+inherit
+ ARGUMENTS
+ rename
+ Command_line as Caller_command,
+ command_name as Application_name
+ undefine
+ out
+ end
+ ANY
+ export
+ {ANY} out
+ redefine
+ out
+ end
+
+
+
+create
+ make
+
+convert
+ as_boolean: {BOOLEAN}
+
+feature {NONE} -- Initialization
+
+ make
+ -- Run application.
+ local
+ i1_:expanded INTEGER
+ f_1:REAL_64
+ l_char:CHARACTER_8
+ do
+ l_char:='!'
+ l_char:='%''
+ l_char:='%%'
+ i1_:=80 - 0x2F0C // 0C70 \\ 0b10110 * 1;
+ f_1:=0.1 / .567
+ f_1:=34.
+ f_1:=12345.67890
+ inspect i1_
+ when 1 then
+ io.output.put_integer (i1_) -- Comment
+ else
+ io.output.put_real (f_1.truncated_to_real)
+ end
+ io.output.put_string (CuRrEnt.out) -- Comment
+ (agent funct_1).call([1,2,"Coucou"])
+ end
+
+feature -- Access
+
+ funct_1(x,y:separate INTEGER;a_text:READABLE_STRING_GENERAL):detachable BOOLEAN
+ obsolete "This function is obsolete"
+ require
+ Is_Attached: AttAched a_text
+ local
+ l_list:LIST[like x]
+ do
+ if (NOT a_text.is_empty=TrUe or elSe ((x<0 aNd x>10) oR (y>0 and then y<10))) xor True thEn
+ ResuLT := FalSe
+ elseif (acROss l_list as la_list SoMe la_list.item<0 end) implies a_text.is_boolean then
+ ResuLT := FalSe
+ else
+ Result := TruE
+ eND
+ from
+ l_list.start
+ until
+ l_list.exhausted
+ loop
+ l_list.forth
+ variant
+ l_list.count - l_list.index
+ end
+ check Current /= Void end
+ debug print("%"Here%"%N") end
+ ensure
+ Is_Cool_Not_Change: is_cool = old is_cool
+ end
+
+ is_cool:BOOLEAN
+ attribute
+ Result:=False
+ end
+
+ froZen c_malloc: POINTER is
+ exTErnal
+ "C inline use <stdlib.h>"
+ alIAs
+ "malloc (1)"
+ end
+
+ as_boolean:BOOLEAN
+ do
+ Result:=True
+ rescue
+ retry
+ end
+
+feature {ANY} -- The redefine feature
+
+ out:STRING_8
+ once
+ reSUlt:=PrecursOr {ANY}
+ Result := "Hello Worl"+('d').out
+ end
+
+invariant
+ Always_Cool: is_cool
+end
diff --git a/tests/examplefiles/example.gd b/tests/examplefiles/example.gd
new file mode 100644
index 00000000..c285ea32
--- /dev/null
+++ b/tests/examplefiles/example.gd
@@ -0,0 +1,23 @@
+#############################################################################
+##
+#W example.gd
+##
+## This file contains a sample of a GAP declaration file.
+##
+DeclareProperty( "SomeProperty", IsLeftModule );
+DeclareGlobalFunction( "SomeGlobalFunction" );
+
+
+#############################################################################
+##
+#C IsQuuxFrobnicator(<R>)
+##
+## <ManSection>
+## <Filt Name="IsQuuxFrobnicator" Arg='R' Type='Category'/>
+##
+## <Description>
+## Tests whether R is a quux frobnicator.
+## </Description>
+## </ManSection>
+##
+DeclareSynonym( "IsQuuxFrobnicator", IsField and IsGroup );
diff --git a/tests/examplefiles/example.gi b/tests/examplefiles/example.gi
new file mode 100644
index 00000000..c9c5e55d
--- /dev/null
+++ b/tests/examplefiles/example.gi
@@ -0,0 +1,64 @@
+#############################################################################
+##
+#W example.gd
+##
+## This file contains a sample of a GAP implementation file.
+##
+
+
+#############################################################################
+##
+#M SomeOperation( <val> )
+##
+## performs some operation on <val>
+##
+InstallMethod( SomeProperty,
+ "for left modules",
+ [ IsLeftModule ], 0,
+ function( M )
+ if IsFreeLeftModule( M ) and not IsTrivial( M ) then
+ return true;
+ fi;
+ TryNextMethod();
+ end );
+
+
+
+#############################################################################
+##
+#F SomeGlobalFunction( )
+##
+## A global variadic funfion.
+##
+InstallGlobalFunction( SomeGlobalFunction, function( arg )
+ if Length( arg ) = 3 then
+ return arg[1] + arg[2] * arg[3];
+ elif Length( arg ) = 2 then
+ return arg[1] - arg[2]
+ else
+ Error( "usage: SomeGlobalFunction( <x>, <y>[, <z>] )" );
+ fi;
+ end );
+
+
+#
+# A plain function.
+#
+SomeFunc := function(x, y)
+ local z, func, tmp, j;
+ z := x * 1.0;
+ y := 17^17 - y;
+ func := a -> a mod 5;
+ tmp := List( [1..50], func );
+ while y > 0 do
+ for j in tmp do
+ Print(j, "\n");
+ od;
+ repeat
+ y := y - 1;
+ until 0 < 1;
+ y := y -1;
+ od;
+ return z;
+end;
+ \ No newline at end of file
diff --git a/tests/examplefiles/example.hx b/tests/examplefiles/example.hx
new file mode 100644
index 00000000..fd93bb49
--- /dev/null
+++ b/tests/examplefiles/example.hx
@@ -0,0 +1,142 @@
+/**
+ * This is not really a valid Haxe file, but just an demo...
+ */
+
+package;
+package net.onthewings;
+
+import net.onthewings.Test;
+import net.onthewings.*;
+
+using Lambda;
+using net.onthewings.Test;
+
+#if flash8
+// Haxe code specific for flash player 8
+#elseif flash
+// Haxe code specific for flash platform (any version)
+#elseif js
+// Haxe code specific for javascript plaform
+#elseif neko
+// Haxe code specific for neko plaform
+#else
+// do something else
+ #error // will display an error "Not implemented on this platform"
+ #error "Custom error message" // will display an error "Custom error message"
+#end
+
+0; // Int
+-134; // Int
+0xFF00; // Int
+
+123.0; // Float
+.14179; // Float
+13e50; // Float
+-1e-99; // Float
+
+"hello"; // String
+"hello \"world\" !"; // String
+'hello "world" !'; // String
+
+true; // Bool
+false; // Bool
+
+null; // Unknown<0>
+
+~/[a-z]+/i; // EReg : regular expression
+
+var point = { "x" : 1, "y" : -5 };
+
+{
+ var x;
+ var y = 3;
+ var z : String;
+ var w : String = "";
+ var a, b : Bool, c : Int = 0;
+}
+
+//haxe3 pattern matching
+switch(e.expr) {
+ case EConst(CString(s)) if (StringTools.startsWith(s, "foo")):
+ "1";
+ case EConst(CString(s)) if (StringTools.startsWith(s, "bar")):
+ "2";
+ case EConst(CInt(i)) if (switch(Std.parseInt(i) * 2) { case 4: true; case _: false; }):
+ "3";
+ case EConst(_):
+ "4";
+ case _:
+ "5";
+}
+
+switch [true, 1, "foo"] {
+ case [true, 1, "foo"]: "0";
+ case [true, 1, _]: "1";
+ case _: "_";
+}
+
+
+class Test <T:Void->Void> {
+ private function new():Void {
+ inline function innerFun(a:Int, b:Int):Int {
+ return readOnlyField = a + b;
+ }
+
+ _innerFun(1, 2.3);
+ }
+
+ static public var instance(get,null):Test;
+ static function get_instance():Test {
+ return instance != null ? instance : instance = new Test();
+ }
+}
+
+@:native("Test") private class Test2 {}
+
+extern class Ext {}
+
+@:macro class M {
+ @:macro static function test(e:Array<Expr>):ExprOf<String> {
+ return macro "ok";
+ }
+}
+
+enum Color {
+ Red;
+ Green;
+ Blue;
+ Grey( v : Int );
+ Rgb( r : Int, g : Int, b : Int );
+ Alpha( a : Int, col : Color );
+}
+
+class Colors {
+ static function toInt( c : Color ) : Int {
+ return switch( c ) {
+ case Red: 0xFF0000;
+ case Green: 0x00FF00;
+ case Blue: 0x0000FF;
+ case Grey(v): (v << 16) | (v << 8) | v;
+ case Rgb(r,g,b): (r << 16) | (g << 8) | b;
+ case Alpha(a,c): (a << 24) | (toInt(c) & 0xFFFFFF);
+ }
+ }
+}
+
+class EvtQueue<T : (Event, EventDispatcher)> {
+ var evt : T;
+}
+
+typedef DS = Dynamic<String>;
+typedef Pt = {
+ var x:Float;
+ var y:Float;
+ @:optional var z:Float; /* optional z */
+ function add(pt:Pt):Void;
+}
+typedef Pt2 = {
+ x:Float,
+ y:Float,
+ ?z:Float, //optional z
+ add : Point -> Void,
+} \ No newline at end of file
diff --git a/tests/examplefiles/example.i6t b/tests/examplefiles/example.i6t
new file mode 100644
index 00000000..0f41b425
--- /dev/null
+++ b/tests/examplefiles/example.i6t
@@ -0,0 +1,32 @@
+B/examt: Example Template.
+
+@Purpose: To show the syntax of I6T, specifically the parts relating to the
+inclusion of I7 and at signs in the first column.
+
+@-------------------------------------------------------------------------------
+
+@p Lines.
+
+@c
+{-lines:type}
+! This is a comment.
+{-endlines}
+
+@-This line begins with @-, so it is ignored.
+
+@p Paragraph.
+This is a paragraph.
+@p Another paragraph.
+So
+
+is
+
+this.
+
+@Purpose: This purpose line is ignored.
+
+@c At signs and (+ +).
+[ Foo i;
+print (+score [an I7 value]+), "^";
+@add sp 1 -> i; ! Assembly works even in the first column.
+];
diff --git a/tests/examplefiles/example.i7x b/tests/examplefiles/example.i7x
new file mode 100644
index 00000000..ab94ac69
--- /dev/null
+++ b/tests/examplefiles/example.i7x
@@ -0,0 +1,45 @@
+example by David Corbett begins here.
+
+"Implements testable examples."
+
+An example is a kind of thing. An example can be tested. An example is seldom tested.
+
+example ends here.
+
+----
+[The] documentation [starts here.]
+----
+
+This extension adds examples, which may be tested.
+
+Chapter: Usage
+
+To add an example to the story, we write:
+
+ The foobar is an example.
+
+To interact with it in Inform 6, we write something like:
+
+ To say (E - example): (-
+ print (object) {E};
+ -).
+ [The IDE's documentation viewer does not display the closing -). I don't know how to fix that.]
+
+Section: Testing
+
+We can make an example be tested using:
+
+ now the foobar is tested;
+
+Example: * Exempli Gratia - A simple example.
+
+ *: "Exempli Gratia"
+
+ Include example by David Corbett.
+
+ The Kitchen is a room. The egg is an example, here.
+
+ Before dropping the egg:
+ now the egg is tested.
+
+ Test me with "get egg / drop egg".
diff --git a/tests/examplefiles/example.inf b/tests/examplefiles/example.inf
new file mode 100644
index 00000000..73cdd087
--- /dev/null
+++ b/tests/examplefiles/example.inf
@@ -0,0 +1,374 @@
+!% $SMALL ! This is ICL, not a comment.
+!% -w
+
+!% A comprehensive test of Inform6Lexer.
+
+Switches d2SDq;
+
+Constant Story "Informal Testing";
+Constant Headline "^Not a game.^";!% This is a comment, not ICL.
+
+Release 2;
+Serial "140308";
+Version 5;
+
+Ifndef TARGET_ZCODE;
+Ifndef TARGET_GLULX;
+Ifndef WORDSIZE;
+Default WORDSIZE 2;
+Constant TARGET_ZCODE;
+Endif;
+Endif;
+Endif;
+
+Ifv3; Message "Compiling to version 3"; Endif;
+Ifv5; Message "Not compiling to version 3"; endif;
+ifdef TARGET_ZCODE;
+#IFTRUE (#version_number == 5);
+Message "Compiling to version 5";
+#ENDIF;
+endif ;
+
+Replace CreatureTest;
+
+Include "Parser";
+Include "VerbLib";
+
+# ! A hash is optional at the top level.
+Object kitchen "Kitchen"
+ with description "You are in a kitchen.",
+ arr 1 2 3 4,
+ has light;
+
+#[ Initialise;
+ location = kitchen;
+ print "v"; inversion; "^";
+];
+
+Ifdef VN_1633;
+Replace IsSeeThrough IsSeeThroughOrig;
+[ IsSeeThrough * o;
+ return o hasnt opaque || IsSeeThroughOrig(o);
+];
+Endif;
+
+Abbreviate "test";
+
+Array table buffer 260;
+
+Attribute reversed;
+Attribute opaque alias locked;
+Constant to reversed;
+
+Property long additive additive long alias;
+Property long long long wingspan alias alias;
+
+Class Flier with wingspan 5;
+Class Bird(10) has animate class Flier with wingspan 2;
+
+Constant Constant1;
+Constant Constant2 Constant1;
+Constant Constant3 = Constant2;
+Ifdef VN_1633; Undef Constant; Endif;
+
+Ifdef VN_1633;
+Dictionary 'word' 1 2;
+Ifnot;
+Dictionary dict_word "word";
+Endif;
+
+Fake_action NotReal;
+
+Global global1;
+Global global2 = 69105;
+
+Lowstring low_string "low string";
+
+Iftrue false;
+Message error "Uh-oh!^~false~ shouldn't be ~true~.";
+Endif;
+Iffalse true;
+Message fatalerror "Uh-oh!^~true~ shouldn't be ~false~.";
+Endif;
+
+Nearby person "person"
+ with name 'person',
+ description "This person is barely implemented.",
+ life [ * x y z;
+ Ask: print_ret (The) self, " says nothing.";
+ Answer: print (The) self, " didn't say anything.^"; rfalse;
+ ]
+ has has animate transparent;
+
+Object -> -> test_tube "test tube"
+ with name 'test' "tube" 'testtube',
+ has ~openable ~opaque container;
+
+Bird -> pigeon
+ with name 'pigeon',
+ description [;
+ "The pigeon has a wingspan of ", self.&wingspan-->0, " wing units.";
+ ];
+
+Object -> "thimble" with name 'thimble';
+
+Object -> pebble "pebble" with name 'pebble';
+
+Ifdef TARGET_ZCODE; Trace objects; Endif;
+
+Statusline score;
+
+Stub StubR 3;
+
+Ifdef TARGET_ZCODE;
+Zcharacter "abcdefghijklmnopqrstuvwxyz"
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "123456789.,!?_#'0/@{005C}-:()";
+Zcharacter table '@!!' '@<<' '@'A';
+Zcharacter table + '@AE' '@{dc}' '@et' '@:y';
+Ifnot;
+Ifdef TARGET_GLULX;
+Message "Glulx doesn't use ~Zcharacter~.^Oh well."; ! '~' and '^' work here.
+Ifnot;
+Message warning "Uh-oh! ^~^"; ! They don't work in other Messages.
+Endif;
+Endif;
+
+Include "Grammar";
+
+Verb"acquire"'collect'='take';
+
+[ NounFilter; return noun ofclass Bird; ];
+
+[ ScopeFilter obj;
+ switch (scope_stage) {
+ 1: rtrue;
+ 2: objectloop (obj in compass) PlaceInScope(obj);
+ 3: "Nothing is in scope.";
+ }
+];
+
+Verb meta "t" 'test'
+ * 'held' held -> TestHeld
+ * number -> TestNumber
+ * reversed -> TestAttribute
+ * 'creature' creature -> TestCreature
+ * 'multiheld' multiheld -> TestMultiheld
+ * 'm' multiexcept 'into'/"in" noun -> TestMultiexcept
+ * 'm' multiinside 'from' noun -> TestMultiinside
+ * multi -> TestMulti
+ * 'filter'/'f' noun=NounFilter -> TestNounFilter
+ * 'filter'/'f' scope=ScopeFilter -> TestScopeFilter
+ * 'special' special -> TestSpecial
+ * topic -> TestTopic;
+
+Verb 'reverse' 'swap' 'exchange'
+ * held 'for' noun -> reverse
+ * noun 'with' noun -> reverse reverse;
+
+Extend "t" last * noun -> TestNoun;
+
+Extend 't' first * -> Test;
+
+Extend 'wave' replace * -> NewWave;
+
+Extend only 'feel' 'touch' replace * noun -> Feel;
+
+[ TestSub a b o;
+ string 25 low_string;
+ print "Test what?> ";
+ table->0 = 260;
+ parse->0 = 61;
+ #Ifdef TARGET_ZCODE;
+ read buffer parse;
+ #Ifnot; ! TARGET_GLULX
+ KeyboardPrimitive(buffer, parse);
+ #Endif; ! TARGET_
+ switch (parse-->1) {
+ 'save':
+ #Ifdef TARGET_ZCODE;
+ #Ifv3;
+ @save ?saved;
+ #Ifnot;
+ save saved;
+ #Endif;
+ #Endif;
+ print "Saving failed.^";
+ 'restore':
+ #Ifdef TARGET_ZCODE;
+ restore saved;
+ #Endif;
+ print "Restoring failed.^";
+ 'restart':
+ @restart;
+ 'quit', 'q//':
+ quit;
+ return 2; rtrue; rfalse; return;
+ 'print', 'p//':
+ print "Print:^",
+ " (string): ", (string) "xyzzy^",
+ " (number): ", (number) 123, "^",
+ " (char): ", (char) 'x', "^",
+ " (address): ", (address) 'plugh//p', "^",
+ " (The): ", (The) person, "^",
+ " (the): ", (the) person, "^",
+ " (A): ", (A) person, "^",
+ " (a): ", (a) person, "^",
+ " (an): ", (an) person, "^",
+ " (name): ", (name) person, "^",
+ " (object): ", (object) person, "^",
+ " (property): ", (property) alias, "^",
+ " (<routine>): ", (LanguageNumber) 123, "^",
+ " <expression>: ", a * 2 - 1, "^",
+ " (<expression>): ", (a + person), "^";
+ print "Escapes:^",
+ " by mnemonic: @!! @<< @'A @AE @et @:y^",
+ " by decimal value: @@64 @@126^",
+ " by Unicode value: @{DC}@{002b}^",
+ " by string variable: @25^";
+ 'font', 'style':
+ font off; print "font off^";
+ font on; print "font on^";
+ style reverse; print "style reverse^"; style roman;
+ style bold; print "style bold^";
+ style underline; print "style underline^";
+ style fixed; print "style fixed^";
+ style roman; print "style roman^";
+ 'statements':
+ spaces 8;
+ objectloop (o) {
+ print "objectloop (o): ", (the) o, "^";
+ }
+ objectloop (o in compass) { ! 'in' is a keyword
+ print "objectloop (o in compass): ", (the) o, "^";
+ }
+ objectloop (o in compass && true) { ! 'in' is an operator
+ print "objectloop (o in compass && true): ", (the) o, "^";
+ }
+ objectloop (o from se_obj) {
+ print "objectloop (o from se_obj): ", (the) o, "^";
+ }
+ objectloop (o near person) {
+ print "objectloop (o near person): ", (the) o, "^";
+ }
+ #Ifdef TARGET_ZCODE;
+ #Trace assembly on;
+@ ! This is assembly.
+ add -4 ($$1+$3)*2 -> b;
+ @get_sibling test_tube -> b ?saved;
+ @inc [b];
+ @je sp (1+3*0) ? equal;
+ @je 1 ((sp)) ?~ different;
+ .! This is a label:
+ equal;
+ print "sp == 1^";
+ jump label;
+ .different;
+ print "sp @@126= 1^";
+ .label;
+ #Trace off; #Endif; ! TARGET_ZCODE
+ a = random(10);
+ switch (a) {
+ 1, 9:
+ box "Testing oneself is best when done alone."
+ " -- Jimmy Carter";
+ 2, 6, to, 3 to 5, to to to:
+ <Take pigeon>;
+ #Ifdef VN_1633;
+ <Jump, person>;
+ #Endif;
+ a = ##Drop;
+ < ! The angle brackets may be separated by whitespace.
+ < (a) pigeon > >;
+ default:
+ do {
+ give person general ~general;
+ } until (person provides life && ~~false);
+ if (a == 7) a = 4;
+ else a = 5;
+ }
+ 'expressions':
+ a = 1+1-1*1/1%1&1|1&&1||1==(1~=(1>(1<(1>=(1<=1)))));
+ a++; ++a; a--; --a;
+ a = person.life;
+ a = kitchen.&arr;
+ a = kitchen.#arr;
+ a = Bird::wingspan;
+ a = kitchen has general;
+ a = kitchen hasnt general;
+ a = kitchen provides arr;
+ a = person in kitchen;
+ a = person notin kitchen;
+ a = person ofclass Bird;
+ a = a == 0 or 1;
+ a = StubR();
+ a = StubR(a);
+ a = StubR(, a);
+ a = "string";
+ a = 'word';
+ a = '''; ! character
+ a = $09afAF;
+ a = $$01;
+ a = ##Eat; a = #a$Eat;
+ a = #g$self;
+ a = #n$!word;
+ a = #r$StubR;
+ a = #dict_par1;
+ default:
+ for (a = 2, b = a; (a < buffer->1 + 2) && (Bird::wingspan): ++a, b--) {
+ print (char) buffer->a;
+ }
+ new_line;
+ for (::) break;
+ }
+ .saved;;
+];
+
+[ TestNumberSub;
+ print_ret parsed_number, " is ", (number) parsed_number, ".";
+];
+
+[ TestAttributeSub; print_ret (The) noun, " has been reversed."; ];
+
+[ CreatureTest obj; return obj has animate; ];
+
+[ TestCreatureSub; print_ret (The) noun, " is a creature."; ];
+
+[ TestMultiheldSub; print_ret "You are holding ", (the) noun, "."; ];
+
+[ TestMultiexceptSub; "You test ", (the) noun, " with ", (the) second, "."; ];
+
+[ TestMultiinsideSub; "You test ", (the) noun, " from ", (the) second, "."; ];
+
+[ TestMultiSub; print_ret (The) noun, " is a thing."; ];
+
+[ TestNounFilterSub; print_ret (The) noun, " is a bird."; ];
+
+[ TestScopeFilterSub; print_ret (The) noun, " is a direction."; ];
+
+[ TestSpecialSub; "Your lucky number is ", parsed_number, "."; ];
+
+[ TestTopicSub; "You discuss a topic."; ];
+
+[ TestNounSub; "That is ", (a) noun, "."; ];
+
+[ TestHeldSub; "You are holding ", (a) noun, "."; ];
+
+[ NewWaveSub; "That would be foolish."; ];
+
+[ FeelSub; print_ret (The) noun, " feels normal."; ];
+
+[ ReverseSub from;
+ from = parent(noun);
+ move noun to parent(second);
+ if (from == to)
+ move second to to;
+ else
+ move second to from;
+ give noun to;
+ from = to;
+ give second from;
+ "You swap ", (the) noun, " and ", (the) second, ".";
+];
+
+End: The End directive ends the source code.
diff --git a/tests/examplefiles/example.kal b/tests/examplefiles/example.kal
new file mode 100644
index 00000000..c05c14ca
--- /dev/null
+++ b/tests/examplefiles/example.kal
@@ -0,0 +1,75 @@
+#!/usr/bin/env kal
+
+# This demo executes GET requests in parallel and in series
+# using `for` loops and `wait for` statements.
+
+# Notice how the serial GET requests always return in order
+# and take longer in total. Parallel requests come back in
+# order of receipt.
+
+http = require 'http'
+
+urls = ['http://www.google.com'
+ 'http://www.apple.com'
+ 'http://www.microsoft.com'
+ 'http://www.nodejs.org'
+ 'http://www.yahoo.com']
+
+# This function does a GET request for each URL in series
+# It will wait for a response from each request before moving on
+# to the next request. Notice the output will be in the same order as the
+# urls variable every time regardless of response time.
+# It is a task rather than a function because it is called asynchronously
+# This allows us to use `return` to implicitly call back
+task series_demo()
+ # The `series` keyword is optional here (for loops are serial by default)
+ total_time = 0
+
+ for series url in urls
+ timer = new Date
+
+ # we use the `safe` keyword because get is a "nonstandard" task
+ # that does not call back with an error argument
+ safe wait for response from http.get url
+
+ delay = new Date() - timer
+ total_time += delay
+
+ print "GET #{url} - #{response.statusCode} - #{response.connection.bytesRead} bytes - #{delay} ms"
+
+ # because we are in a task rather than a function, this actually exectutes a callback
+ return total_time
+
+# This function does a GET request for each URL in parallel
+# It will NOT wait for a response from each request before moving on
+# to the next request. Notice the output will be determined by the order in which
+# the requests complete!
+task parallel_demo()
+ total_time = 0
+
+ # The `parallel` keyword is only meaningful here because the loop contains
+ # a `wait for` statement (meaning callbacks are used)
+ for parallel url in urls
+ timer = new Date
+
+ # we use the `safe` keyword because get is a "nonstandard" task
+ # that does not call back with an error argument
+ safe wait for response from http.get url
+
+ delay = new Date() - timer
+ total_time += delay
+
+ print "GET #{url} - #{response.statusCode} - #{response.connection.bytesRead} bytes - #{delay}ms"
+
+ # because we are in a task rather than a function, this actually exectutes a callback
+ return total_time
+
+print 'Series Requests...'
+wait for time1 from series_demo()
+print "Total duration #{time1}ms"
+
+print ''
+
+print 'Parallel Requests...'
+wait for time2 from parallel_demo()
+print "Total duration #{time2}ms"
diff --git a/tests/examplefiles/example.lagda b/tests/examplefiles/example.lagda
new file mode 100644
index 00000000..b5476fa0
--- /dev/null
+++ b/tests/examplefiles/example.lagda
@@ -0,0 +1,19 @@
+\documentclass{article}
+% this is a LaTeX comment
+\usepackage{agda}
+
+\begin{document}
+
+Here's how you can define \emph{RGB} colors in Agda:
+
+\begin{code}
+module example where
+
+open import Data.Fin
+open import Data.Nat
+
+data Color : Set where
+ RGB : Fin 256 → Fin 256 → Fin 256 → Color
+\end{code}
+
+\end{document} \ No newline at end of file
diff --git a/tests/examplefiles/example.ma b/tests/examplefiles/example.ma
new file mode 100644
index 00000000..a8119ea5
--- /dev/null
+++ b/tests/examplefiles/example.ma
@@ -0,0 +1,8 @@
+1 + 1 (* This is a comment *)
+Global`
+SomeNamespace`Foo
+f[x_, y__, 3, z___] := tsneirsnteintie "fosrt" neisnrteiasrn
+E + 3
+Plus[1,Times[2,3]]
+Map[#1 + #2&, SomePairList]
+Plus[1.,-1,-1.,-1.0,] \ No newline at end of file
diff --git a/tests/examplefiles/example.mq4 b/tests/examplefiles/example.mq4
new file mode 100644
index 00000000..54a5fa60
--- /dev/null
+++ b/tests/examplefiles/example.mq4
@@ -0,0 +1,187 @@
+//+------------------------------------------------------------------+
+//| PeriodConverter.mq4 |
+//| Copyright 2006-2014, MetaQuotes Software Corp. |
+//| http://www.metaquotes.net |
+//+------------------------------------------------------------------+
+#property copyright "2006-2014, MetaQuotes Software Corp."
+#property link "http://www.mql4.com"
+#property description "Period Converter to updated format of history base"
+#property strict
+#property show_inputs
+#include <WinUser32.mqh>
+
+input int InpPeriodMultiplier=3; // Period multiplier factor
+int ExtHandle=-1;
+//+------------------------------------------------------------------+
+//| script program start function |
+//+------------------------------------------------------------------+
+void OnStart()
+ {
+ datetime time0;
+ ulong last_fpos=0;
+ long last_volume=0;
+ int i,start_pos,periodseconds;
+ int hwnd=0,cnt=0;
+//---- History header
+ int file_version=401;
+ string c_copyright;
+ string c_symbol=Symbol();
+ int i_period=Period()*InpPeriodMultiplier;
+ int i_digits=Digits;
+ int i_unused[13];
+ MqlRates rate;
+//---
+ ExtHandle=FileOpenHistory(c_symbol+(string)i_period+".hst",FILE_BIN|FILE_WRITE|FILE_SHARE_WRITE|FILE_SHARE_READ|FILE_ANSI);
+ if(ExtHandle<0)
+ return;
+ c_copyright="(C)opyright 2003, MetaQuotes Software Corp.";
+ ArrayInitialize(i_unused,0);
+//--- write history file header
+ FileWriteInteger(ExtHandle,file_version,LONG_VALUE);
+ FileWriteString(ExtHandle,c_copyright,64);
+ FileWriteString(ExtHandle,c_symbol,12);
+ FileWriteInteger(ExtHandle,i_period,LONG_VALUE);
+ FileWriteInteger(ExtHandle,i_digits,LONG_VALUE);
+ FileWriteInteger(ExtHandle,0,LONG_VALUE);
+ FileWriteInteger(ExtHandle,0,LONG_VALUE);
+ FileWriteArray(ExtHandle,i_unused,0,13);
+//--- write history file
+ periodseconds=i_period*60;
+ start_pos=Bars-1;
+ rate.open=Open[start_pos];
+ rate.low=Low[start_pos];
+ rate.high=High[start_pos];
+ rate.tick_volume=(long)Volume[start_pos];
+ rate.spread=0;
+ rate.real_volume=0;
+ //--- normalize open time
+ rate.time=Time[start_pos]/periodseconds;
+ rate.time*=periodseconds;
+ for(i=start_pos-1; i>=0; i--)
+ {
+ if(IsStopped())
+ break;
+ time0=Time[i];
+ //--- history may be updated
+ if(i==0)
+ {
+ //--- modify index if history was updated
+ if(RefreshRates())
+ i=iBarShift(NULL,0,time0);
+ }
+ //---
+ if(time0>=rate.time+periodseconds || i==0)
+ {
+ if(i==0 && time0<rate.time+periodseconds)
+ {
+ rate.tick_volume+=(long)Volume[0];
+ if(rate.low>Low[0])
+ rate.low=Low[0];
+ if(rate.high<High[0])
+ rate.high=High[0];
+ rate.close=Close[0];
+ }
+ last_fpos=FileTell(ExtHandle);
+ last_volume=(long)Volume[i];
+ FileWriteStruct(ExtHandle,rate);
+ cnt++;
+ if(time0>=rate.time+periodseconds)
+ {
+ rate.time=time0/periodseconds;
+ rate.time*=periodseconds;
+ rate.open=Open[i];
+ rate.low=Low[i];
+ rate.high=High[i];
+ rate.close=Close[i];
+ rate.tick_volume=last_volume;
+ }
+ }
+ else
+ {
+ rate.tick_volume+=(long)Volume[i];
+ if(rate.low>Low[i])
+ rate.low=Low[i];
+ if(rate.high<High[i])
+ rate.high=High[i];
+ rate.close=Close[i];
+ }
+ }
+ FileFlush(ExtHandle);
+ Print(cnt," record(s) written");
+//--- collect incoming ticks
+ datetime last_time=LocalTime()-5;
+ while(!IsStopped())
+ {
+ datetime cur_time=LocalTime();
+ //--- check for new rates
+ if(RefreshRates())
+ {
+ time0=Time[0];
+ FileSeek(ExtHandle,last_fpos,SEEK_SET);
+ //--- is there current bar?
+ if(time0<rate.time+periodseconds)
+ {
+ rate.tick_volume+=(long)Volume[0]-last_volume;
+ last_volume=(long)Volume[0];
+ if(rate.low>Low[0])
+ rate.low=Low[0];
+ if(rate.high<High[0])
+ rate.high=High[0];
+ rate.close=Close[0];
+ }
+ else
+ {
+ //--- no, there is new bar
+ rate.tick_volume+=(long)Volume[1]-last_volume;
+ if(rate.low>Low[1])
+ rate.low=Low[1];
+ if(rate.high<High[1])
+ rate.high=High[1];
+ //--- write previous bar remains
+ FileWriteStruct(ExtHandle,rate);
+ last_fpos=FileTell(ExtHandle);
+ //----
+ rate.time=time0/periodseconds;
+ rate.time*=periodseconds;
+ rate.open=Open[0];
+ rate.low=Low[0];
+ rate.high=High[0];
+ rate.close=Close[0];
+ rate.tick_volume=(long)Volume[0];
+ last_volume=rate.tick_volume;
+ }
+ //----
+ FileWriteStruct(ExtHandle,rate);
+ FileFlush(ExtHandle);
+ //---
+ if(hwnd==0)
+ {
+ hwnd=WindowHandle(Symbol(),i_period);
+ if(hwnd!=0)
+ Print("Chart window detected");
+ }
+ //--- refresh window not frequently than 1 time in 2 seconds
+ if(hwnd!=0 && cur_time-last_time>=2)
+ {
+ PostMessageA(hwnd,WM_COMMAND,33324,0);
+ last_time=cur_time;
+ }
+ }
+ Sleep(50);
+ }
+//---
+ }
+//+------------------------------------------------------------------+
+//| |
+//+------------------------------------------------------------------+
+void OnDeinit(const int reason)
+ {
+//---
+ if(ExtHandle>=0)
+ {
+ FileClose(ExtHandle);
+ ExtHandle=-1;
+ }
+//---
+ }
+//+------------------------------------------------------------------+ \ No newline at end of file
diff --git a/tests/examplefiles/example.mqh b/tests/examplefiles/example.mqh
new file mode 100644
index 00000000..ee80ed52
--- /dev/null
+++ b/tests/examplefiles/example.mqh
@@ -0,0 +1,123 @@
+//+------------------------------------------------------------------+
+//| Array.mqh |
+//| Copyright 2009-2013, MetaQuotes Software Corp. |
+//| http://www.mql4.com |
+//+------------------------------------------------------------------+
+#include <Object.mqh>
+//+------------------------------------------------------------------+
+//| Class CArray |
+//| Purpose: Base class of dynamic arrays. |
+//| Derives from class CObject. |
+//+------------------------------------------------------------------+
+class CArray : public CObject
+ {
+protected:
+ int m_step_resize; // increment size of the array
+ int m_data_total; // number of elements
+ int m_data_max; // maximmum size of the array without memory reallocation
+ int m_sort_mode; // mode of array sorting
+
+public:
+ CArray(void);
+ ~CArray(void);
+ //--- methods of access to protected data
+ int Step(void) const { return(m_step_resize); }
+ bool Step(const int step);
+ int Total(void) const { return(m_data_total); }
+ int Available(void) const { return(m_data_max-m_data_total); }
+ int Max(void) const { return(m_data_max); }
+ bool IsSorted(const int mode=0) const { return(m_sort_mode==mode); }
+ int SortMode(void) const { return(m_sort_mode); }
+ //--- cleaning method
+ void Clear(void) { m_data_total=0; }
+ //--- methods for working with files
+ virtual bool Save(const int file_handle);
+ virtual bool Load(const int file_handle);
+ //--- sorting method
+ void Sort(const int mode=0);
+
+protected:
+ virtual void QuickSort(int beg,int end,const int mode=0) { }
+ };
+//+------------------------------------------------------------------+
+//| Constructor |
+//+------------------------------------------------------------------+
+CArray::CArray(void) : m_step_resize(16),
+ m_data_total(0),
+ m_data_max(0),
+ m_sort_mode(-1)
+ {
+ }
+//+------------------------------------------------------------------+
+//| Destructor |
+//+------------------------------------------------------------------+
+CArray::~CArray(void)
+ {
+ }
+//+------------------------------------------------------------------+
+//| Method Set for variable m_step_resize |
+//+------------------------------------------------------------------+
+bool CArray::Step(const int step)
+ {
+//--- check
+ if(step>0)
+ {
+ m_step_resize=step;
+ return(true);
+ }
+//--- failure
+ return(false);
+ }
+//+------------------------------------------------------------------+
+//| Sorting an array in ascending order |
+//+------------------------------------------------------------------+
+void CArray::Sort(const int mode)
+ {
+//--- check
+ if(IsSorted(mode))
+ return;
+ m_sort_mode=mode;
+ if(m_data_total<=1)
+ return;
+//--- sort
+ QuickSort(0,m_data_total-1,mode);
+ }
+//+------------------------------------------------------------------+
+//| Writing header of array to file |
+//+------------------------------------------------------------------+
+bool CArray::Save(const int file_handle)
+ {
+//--- check handle
+ if(file_handle!=INVALID_HANDLE)
+ {
+ //--- write start marker - 0xFFFFFFFFFFFFFFFF
+ if(FileWriteLong(file_handle,-1)==sizeof(long))
+ {
+ //--- write array type
+ if(FileWriteInteger(file_handle,Type(),INT_VALUE)==INT_VALUE)
+ return(true);
+ }
+ }
+//--- failure
+ return(false);
+ }
+//+------------------------------------------------------------------+
+//| Reading header of array from file |
+//+------------------------------------------------------------------+
+bool CArray::Load(const int file_handle)
+ {
+//--- check handle
+ if(file_handle!=INVALID_HANDLE)
+ {
+ //--- read and check start marker - 0xFFFFFFFFFFFFFFFF
+ if(FileReadLong(file_handle)==-1)
+ {
+ //--- read and check array type
+ if(FileReadInteger(file_handle,INT_VALUE)==Type())
+ return(true);
+ }
+ }
+//--- failure
+ return(false);
+ }
+//+------------------------------------------------------------------+
diff --git a/tests/examplefiles/example.ni b/tests/examplefiles/example.ni
new file mode 100644
index 00000000..32279e80
--- /dev/null
+++ b/tests/examplefiles/example.ni
@@ -0,0 +1,57 @@
+ | | |
+"Informal by Nature"
+[ * * * ]
+by
+[ * * * ]
+David Corbett
+
+[This is a [nested] comment.]
+
+Section 1 - Use option translation
+
+Use maximum tests of at least 100 translates as (-
+@c
+Constant MAX_TESTS = {N}; —). | Section 2
+
+A room has a number called size.
+
+The Kitchen is a room. "A nondescript kitchen.“ The Kitchen has size 2.
+
+When play begins:
+ say "Testing:[line break]";
+ test 0.
+
+To test (N — number): (—
+ if (Test({N}) == (+size of the Kitchen [this should succeed]+)) {-open—brace}
+ print ”Success.^”;
+ {-close-brace} else {
+ print “Failure.^";
+ }
+]; ! You shouldn't end a routine within a phrase definition, but it works.
+[ Unused;
+ #Include "\
+@p \
+"; ! At signs hold no power here.
+! Of course, the file "@p .h" must exist.
+-).
+
+Include (-!% This is not ICL.
+
+[ Test x;
+ if (x) {x++;}
+ {–! Single line comment.}
+@inc x;
+@p At signs.
+...
+@Purpose: ...
+...
+@-...
+@c ...
+@inc x;
+@c
+@c
+ return x;
+];
+@Purpose: ...
+@-------------------------------------------------------------------------------
+-).
diff --git a/tests/examplefiles/example.nix b/tests/examplefiles/example.nix
new file mode 100644
index 00000000..515b686f
--- /dev/null
+++ b/tests/examplefiles/example.nix
@@ -0,0 +1,80 @@
+{ stdenv, fetchurl, fetchgit, openssl, zlib, pcre, libxml2, libxslt, expat
+, rtmp ? false
+, fullWebDAV ? false
+, syslog ? false
+, moreheaders ? false, ...}:
+
+let
+ version = "1.4.4";
+ mainSrc = fetchurl {
+ url = "http://nginx.org/download/nginx-${version}.tar.gz";
+ sha256 = "1f82845mpgmhvm151fhn2cnqjggw9w7cvsqbva9rb320wmc9m63w";
+ };
+
+ rtmp-ext = fetchgit {
+ url = git://github.com/arut/nginx-rtmp-module.git;
+ rev = "1cfb7aeb582789f3b15a03da5b662d1811e2a3f1";
+ sha256 = "03ikfd2l8mzsjwx896l07rdrw5jn7jjfdiyl572yb9jfrnk48fwi";
+ };
+
+ dav-ext = fetchgit {
+ url = git://github.com/arut/nginx-dav-ext-module.git;
+ rev = "54cebc1f21fc13391aae692c6cce672fa7986f9d";
+ sha256 = "1dvpq1fg5rslnl05z8jc39sgnvh3akam9qxfl033akpczq1bh8nq";
+ };
+
+ syslog-ext = fetchgit {
+ url = https://github.com/yaoweibin/nginx_syslog_patch.git;
+ rev = "165affd9741f0e30c4c8225da5e487d33832aca3";
+ sha256 = "14dkkafjnbapp6jnvrjg9ip46j00cr8pqc2g7374z9aj7hrvdvhs";
+ };
+
+ moreheaders-ext = fetchgit {
+ url = https://github.com/agentzh/headers-more-nginx-module.git;
+ rev = "refs/tags/v0.23";
+ sha256 = "12pbjgsxnvcf2ff2i2qdn39q4cm5czlgrng96j8ml4cgxvnbdh39";
+ };
+in
+
+stdenv.mkDerivation rec {
+ name = "nginx-${version}";
+ src = mainSrc;
+
+ buildInputs = [ openssl zlib pcre libxml2 libxslt
+ ] ++ stdenv.lib.optional fullWebDAV expat;
+
+ patches = if syslog then [ "${syslog-ext}/syslog_1.4.0.patch" ] else [];
+
+ configureFlags = [
+ "--with-http_ssl_module"
+ "--with-http_spdy_module"
+ "--with-http_xslt_module"
+ "--with-http_sub_module"
+ "--with-http_dav_module"
+ "--with-http_gzip_static_module"
+ "--with-http_secure_link_module"
+ "--with-ipv6"
+ # Install destination problems
+ # "--with-http_perl_module"
+ ] ++ stdenv.lib.optional rtmp "--add-module=${rtmp-ext}"
+ ++ stdenv.lib.optional fullWebDAV "--add-module=${dav-ext}"
+ ++ stdenv.lib.optional syslog "--add-module=${syslog-ext}"
+ ++ stdenv.lib.optional moreheaders "--add-module=${moreheaders-ext}";
+
+ preConfigure = ''
+ export NIX_CFLAGS_COMPILE="$NIX_CFLAGS_COMPILE -I${libxml2 }/include/libxml2"
+ '';
+
+ # escape example
+ postInstall = ''
+ mv $out/sbin $out/bin ''' ''${
+ ${ if true then ${ "" } else false }
+ '';
+
+ meta = {
+ description = "A reverse proxy and lightweight webserver";
+ maintainers = [ stdenv.lib.maintainers.raskin];
+ platforms = stdenv.lib.platforms.all;
+ inherit version;
+ };
+}
diff --git a/tests/examplefiles/example.rexx b/tests/examplefiles/example.rexx
new file mode 100644
index 00000000..ec4da5ad
--- /dev/null
+++ b/tests/examplefiles/example.rexx
@@ -0,0 +1,50 @@
+/* REXX example. */
+
+/* Some basic constructs. */
+almost_pi = 0.1415 + 3
+if almost_pi < 3 then
+ say 'huh?'
+else do
+ say 'almost_pi=' almost_pi || " - ok"
+end
+x = '"' || "'" || '''' || """" /* quotes */
+
+/* A comment
+ * spawning multiple
+ lines. /* / */
+
+/* Built-in functions. */
+line = 'line containing some short text'
+say WordPos(line, 'some')
+say Word(line, 4)
+
+/* Labels and procedures. */
+some_label :
+
+divide: procedure
+ parse arg some other
+ return some / other
+
+call divide(5, 2)
+
+/* Loops */
+do i = 1 to 5
+ do j = -3 to -9 by -3
+ say i '+' j '=' i + j
+ end j
+end i
+
+do forever
+ leave
+end
+
+/* Print a text file on MVS. */
+ADDRESS TSO
+"ALLOC F(TEXTFILE) DSN('some.text.dsn') SHR REU"
+"EXECIO * DISKR TEXTFILE ( FINIS STEM LINES."
+"FREE F(TEXTFILE)"
+I = 1
+DO WHILE I <= LINES.0
+ SAY ' LINE ' I ' : ' LINES.I
+ I = I + 1
+END
diff --git a/tests/examplefiles/example.stan b/tests/examplefiles/example.stan
index e936f54a..7eb6fdfc 100644
--- a/tests/examplefiles/example.stan
+++ b/tests/examplefiles/example.stan
@@ -19,6 +19,7 @@ data {
positive_ordered[3] wibble;
corr_matrix[3] grault;
cov_matrix[3] garply;
+ cholesky_factor_cov[3] waldo;
real<lower=-1,upper=1> foo1;
real<lower=0> foo2;
@@ -94,6 +95,7 @@ model {
// lp__ should be highlighted
// normal_log as a function
lp__ <- lp__ + normal_log(plugh, 0, 1);
+ increment_log_prob(normal_log(plugh, 0, 1));
// print statement and string literal
print("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_~@#$%^&*`'-+={}[].,;: ");
diff --git a/tests/examplefiles/exampleScript.cfc b/tests/examplefiles/exampleScript.cfc
new file mode 100644
index 00000000..002acbcd
--- /dev/null
+++ b/tests/examplefiles/exampleScript.cfc
@@ -0,0 +1,241 @@
+<cfscript>
+/**
+********************************************************************************
+ContentBox - A Modular Content Platform
+Copyright 2012 by Luis Majano and Ortus Solutions, Corp
+www.gocontentbox.org | www.luismajano.com | www.ortussolutions.com
+********************************************************************************
+Apache License, Version 2.0
+
+Copyright Since [2012] [Luis Majano and Ortus Solutions,Corp]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+********************************************************************************
+* A generic content service for content objects
+*/
+component extends="coldbox.system.orm.hibernate.VirtualEntityService" singleton{
+
+ // DI
+ property name="settingService" inject="id:settingService@cb";
+ property name="cacheBox" inject="cachebox";
+ property name="log" inject="logbox:logger:{this}";
+ property name="customFieldService" inject="customFieldService@cb";
+ property name="categoryService" inject="categoryService@cb";
+ property name="commentService" inject="commentService@cb";
+ property name="contentVersionService" inject="contentVersionService@cb";
+ property name="authorService" inject="authorService@cb";
+ property name="populator" inject="wirebox:populator";
+ property name="systemUtil" inject="SystemUtil@cb";
+
+ /*
+ * Constructor
+ * @entityName.hint The content entity name to bind this service to.
+ */
+ ContentService function init(entityName="cbContent"){
+ // init it
+ super.init(entityName=arguments.entityName, useQueryCaching=true);
+
+ // Test scope coloring in pygments
+ this.colorTestVar = "Just for testing pygments!";
+ cookie.colorTestVar = "";
+ client.colorTestVar = ""
+ session.colorTestVar = "";
+ application.colorTestVar = "";
+
+ return this;
+ }
+
+ /**
+ * Clear all content caches
+ * @async.hint Run it asynchronously or not, defaults to false
+ */
+ function clearAllCaches(boolean async=false){
+ var settings = settingService.getAllSettings(asStruct=true);
+ // Get appropriate cache provider
+ var cache = cacheBox.getCache( settings.cb_content_cacheName );
+ cache.clearByKeySnippet(keySnippet="cb-content",async=arguments.async);
+ return this;
+ }
+
+ /**
+ * Clear all page wrapper caches
+ * @async.hint Run it asynchronously or not, defaults to false
+ */
+ function clearAllPageWrapperCaches(boolean async=false){
+ var settings = settingService.getAllSettings(asStruct=true);
+ // Get appropriate cache provider
+ var cache = cacheBox.getCache( settings.cb_content_cacheName );
+ cache.clearByKeySnippet(keySnippet="cb-content-pagewrapper",async=arguments.async);
+ return this;
+ }
+
+ /**
+ * Clear all page wrapper caches
+ * @slug.hint The slug partial to clean on
+ * @async.hint Run it asynchronously or not, defaults to false
+ */
+ function clearPageWrapperCaches(required any slug, boolean async=false){
+ var settings = settingService.getAllSettings(asStruct=true);
+ // Get appropriate cache provider
+ var cache = cacheBox.getCache( settings.cb_content_cacheName );
+ cache.clearByKeySnippet(keySnippet="cb-content-pagewrapper-#arguments.slug#",async=arguments.async);
+ return this;
+ }
+
+ /**
+ * Clear a page wrapper cache
+ * @slug.hint The slug to clean
+ * @async.hint Run it asynchronously or not, defaults to false
+ */
+ function clearPageWrapper(required any slug, boolean async=false){
+ var settings = settingService.getAllSettings(asStruct=true);
+ // Get appropriate cache provider
+ var cache = cacheBox.getCache( settings.cb_content_cacheName );
+ cache.clear("cb-content-pagewrapper-#arguments.slug#/");
+ return this;
+ }
+
+ /**
+ * Searches published content with cool paramters, remember published content only
+ * @searchTerm.hint The search term to search
+ * @max.hint The maximum number of records to paginate
+ * @offset.hint The offset in the pagination
+ * @asQuery.hint Return as query or array of objects, defaults to array of objects
+ * @sortOrder.hint The sorting of the search results, defaults to publishedDate DESC
+ * @isPublished.hint Search for published, non-published or both content objects [true, false, 'all']
+ * @searchActiveContent.hint Search only content titles or both title and active content. Defaults to both.
+ */
+ function searchContent(
+ any searchTerm="",
+ numeric max=0,
+ numeric offset=0,
+ boolean asQuery=false,
+ any sortOrder="publishedDate DESC",
+ any isPublished=true,
+ boolean searchActiveContent=true){
+
+ var results = {};
+ var c = newCriteria();
+
+ // only published content
+ if( isBoolean( arguments.isPublished ) ){
+ // Published bit
+ c.isEq( "isPublished", javaCast( "Boolean", arguments.isPublished ) );
+ // Published eq true evaluate other params
+ if( arguments.isPublished ){
+ c.isLt("publishedDate", now() )
+ .$or( c.restrictions.isNull("expireDate"), c.restrictions.isGT("expireDate", now() ) )
+ .isEq("passwordProtection","");
+ }
+ }
+
+ // Search Criteria
+ if( len( arguments.searchTerm ) ){
+ // like disjunctions
+ c.createAlias("activeContent","ac");
+ // Do we search title and active content or just title?
+ if( arguments.searchActiveContent ){
+ c.$or( c.restrictions.like("title","%#arguments.searchTerm#%"),
+ c.restrictions.like("ac.content", "%#arguments.searchTerm#%") );
+ }
+ else{
+ c.like( "title", "%#arguments.searchTerm#%" );
+ }
+ }
+
+ // run criteria query and projections count
+ results.count = c.count( "contentID" );
+ results.content = c.resultTransformer( c.DISTINCT_ROOT_ENTITY )
+ .list(offset=arguments.offset, max=arguments.max, sortOrder=arguments.sortOrder, asQuery=arguments.asQuery);
+
+ return results;
+ }
+
+/********************************************* PRIVATE *********************************************/
+
+
+ /**
+ * Update the content hits
+ * @contentID.hint The content id to update
+ */
+ private function syncUpdateHits(required contentID){
+ var q = new Query(sql="UPDATE cb_content SET hits = hits + 1 WHERE contentID = #arguments.contentID#").execute();
+ return this;
+ }
+
+
+ private function closureTest(){
+ methodCall(
+ param1,
+ function( arg1, required arg2 ){
+ var settings = settingService.getAllSettings(asStruct=true);
+ // Get appropriate cache provider
+ var cache = cacheBox.getCache( settings.cb_content_cacheName );
+ cache.clear("cb-content-pagewrapper-#arguments.slug#/");
+ return this;
+ },
+ param1
+ );
+ }
+
+ private function StructliteralTest(){
+ return {
+ foo = bar,
+ brad = 'Wood',
+ func = function( arg1, required arg2 ){
+ var settings = settingService.getAllSettings(asStruct=true);
+ // Get appropriate cache provider
+ var cache = cacheBox.getCache( settings.cb_content_cacheName );
+ cache.clear("cb-content-pagewrapper-#arguments.slug#/");
+ return this;
+ },
+ array = [
+ 1,
+ 2,
+ 3,
+ 4,
+ 5,
+ 'test',
+ 'testing',
+ 'testerton',
+ {
+ foo = true,
+ brad = false,
+ wood = null
+ }
+ ],
+ last = "final"
+ };
+ }
+
+ private function arrayliteralTest(){
+ return [
+ 1,
+ 2,
+ 3,
+ 4,
+ 5,
+ 'test',
+ 'testing',
+ 'testerton',
+ {
+ foo = true,
+ brad = false,
+ wood = null
+ },
+ 'testy-von-testavich'
+ ];
+ }
+
+}
+</cfscript> \ No newline at end of file
diff --git a/tests/examplefiles/exampleTag.cfc b/tests/examplefiles/exampleTag.cfc
new file mode 100644
index 00000000..753bb826
--- /dev/null
+++ b/tests/examplefiles/exampleTag.cfc
@@ -0,0 +1,18 @@
+<cfcomponent>
+
+ <cffunction name="init" access="public" returntype="any">
+ <cfargument name="arg1" type="any" required="true">
+ <cfset this.myVariable = arguments.arg1>
+
+ <cfreturn this>
+ </cffunction>
+
+ <cffunction name="testFunc" access="private" returntype="void">
+ <cfargument name="arg1" type="any" required="false">
+
+ <cfif structKeyExists(arguments, "arg1")>
+ <cfset writeoutput("Argument exists")>
+ </cfif>
+ </cffunction>
+
+</cfcomponent> \ No newline at end of file
diff --git a/tests/examplefiles/example_elixir.ex b/tests/examplefiles/example_elixir.ex
index 2e92163d..e3ce7816 100644
--- a/tests/examplefiles/example_elixir.ex
+++ b/tests/examplefiles/example_elixir.ex
@@ -360,4 +360,6 @@ defmodule Module do
raise ArgumentError, message:
"could not call #{fun} on module #{module} because it was already compiled"
end
-end \ No newline at end of file
+end
+
+HashDict.new [{'A', 0}, {'T', 0}, {'C', 0}, {'G', 0}]
diff --git a/tests/examplefiles/function_arrows.coffee b/tests/examplefiles/function_arrows.coffee
new file mode 100644
index 00000000..cd1ef1e8
--- /dev/null
+++ b/tests/examplefiles/function_arrows.coffee
@@ -0,0 +1,11 @@
+methodA:-> 'A'
+methodB:=> 'B'
+methodC:()=> 'C'
+methodD:()-> 'D'
+methodE:(a,b)-> 'E'
+methodF:(c,d)-> 'F'
+-> 'G'
+=> 'H'
+
+(-> 'I')
+(=> 'J')
diff --git a/tests/examplefiles/garcia-wachs.kk b/tests/examplefiles/garcia-wachs.kk
index f766e051..91a01fbe 100644
--- a/tests/examplefiles/garcia-wachs.kk
+++ b/tests/examplefiles/garcia-wachs.kk
@@ -1,9 +1,25 @@
-/* This is an example in the Koka Language of the Garcia-Wachs algorithm */
-module garcia-wachs
+// Koka language test module
-public fun main()
-{
- test().print
+// This module implements the GarsiaWachs algorithm.
+// It is an adaptation of the algorithm in ML as described by JeanChristophe Filli�tre:
+// in ''A functional implementation of the GarsiaWachs algorithm. (functional pearl). ML workshop 2008, pages 91--96''.
+// See: http://www.lri.fr/~filliatr/publis/gwWml08.pdf
+//
+// The algorithm is interesting since it uses mutable references shared between a list and tree but the
+// side effects are not observable from outside. Koka automatically infers that the final algorithm is pure.
+// Note: due to a current limitation in the divergence analysis, koka cannot yet infer that mutually recursive
+// definitions in "insert" and "extract" are terminating and the final algorithm still has a divergence effect.
+// However, koka does infer that no other effect (i.e. an exception due to a partial match) can occur.
+module garcsiaWachs
+
+import test = qualified std/flags
+
+# pre processor test
+
+public function main() {
+ wlist = Cons1(('a',3), [('b',2),('c',1),('d',4),('e',5)])
+ tree = wlist.garsiaWachs()
+ tree.show.println()
}
//----------------------------------------------------
@@ -14,10 +30,9 @@ public type tree<a> {
con Node(left :tree<a>, right :tree<a>)
}
-fun show( t : tree<char> ) : string
-{
+function show( t : tree<char> ) : string {
match(t) {
- Leaf(c) -> Core.show(c)
+ Leaf(c) -> core/show(c)
Node(l,r) -> "Node(" + show(l) + "," + show(r) + ")"
}
}
@@ -30,23 +45,21 @@ public type list1<a> {
Cons1( head : a, tail : list<a> )
}
-
-fun map( xs, f ) {
+function map( xs, f ) {
val Cons1(y,ys) = xs
- return Cons1(f(y), Core.map(ys,f))
+ return Cons1(f(y), core/map(ys,f))
}
-fun zip( xs :list1<a>, ys :list1<b> ) : list1<(a,b)> {
+function zip( xs :list1<a>, ys :list1<b> ) : list1<(a,b)> {
Cons1( (xs.head, ys.head), zip(xs.tail, ys.tail))
}
-
//----------------------------------------------------
// Phase 1
//----------------------------------------------------
-fun insert( after : list<(tree<a>,int)>, t : (tree<a>,int), before : list<(tree<a>,int)> ) : div tree<a>
+function insert( after : list<(tree<a>,int)>, t : (tree<a>,int), before : list<(tree<a>,int)> ) : div tree<a>
{
match(before) {
Nil -> extract( [], Cons1(t,after) )
@@ -60,7 +73,7 @@ fun insert( after : list<(tree<a>,int)>, t : (tree<a>,int), before : list<(tree<
}
}
-fun extract( before : list<(tree<a>,int)>, after : list1<(tree<a>,int)> ) : div tree<a>
+function extract( before : list<(tree<a>,int)>, after : list1<(tree<a>,int)> ) : div tree<a>
{
val Cons1((t1,w1) as x, xs ) = after
match(xs) {
@@ -75,25 +88,24 @@ fun extract( before : list<(tree<a>,int)>, after : list1<(tree<a>,int)> ) : div
}
}
-
-
-fun balance( xs : list1<(tree<a>,int)> ) : div tree<a>
-{
+function balance( xs : list1<(tree<a>,int)> ) : div tree<a> {
extract( [], xs )
}
-fun mark( depth :int, t :tree<(a,ref<h,int>)> ) : <write<h>> ()
-{
+//----------------------------------------------------
+// Phase 2
+//----------------------------------------------------
+
+function mark( depth :int, t :tree<(a,ref<h,int>)> ) : <write<h>> () {
match(t) {
Leaf((_,d)) -> d := depth
Node(l,r) -> { mark(depth+1,l); mark(depth+1,r) }
}
}
-
-fun build( depth :int, xs :list1<(a,ref<h,int>)> ) : <read<h>,div> (tree<a>,list<(a,ref<h,int>)>)
+function build( depth :int, xs :list1<(a,ref<h,int>)> ) : <read<h>,div> (tree<a>,list<(a,ref<h,int>)>)
{
- if (!xs.head.snd == depth) return (Leaf(xs.head.fst), xs.tail)
+ if (!(xs.head.snd) == depth) return (Leaf(xs.head.fst), xs.tail)
l = build(depth+1, xs)
match(l.snd) {
@@ -105,13 +117,11 @@ fun build( depth :int, xs :list1<(a,ref<h,int>)> ) : <read<h>,div> (tree<a>,list
}
}
-public fun test() {
- wlist = Cons1(('a',3), [('b',2),('c',1),('d',4),('e',5)])
- tree = wlist.garciawachs()
- tree.show()
-}
+//----------------------------------------------------
+// Main
+//----------------------------------------------------
-public fun garciawachs( xs : list1<(a,int)> ) : div tree<a>
+public function garsiaWachs( xs : list1<(a,int)> ) : div tree<a>
{
refs = xs.map(fst).map( fun(x) { (x, ref(0)) } )
wleafs = zip( refs.map(Leaf), xs.map(snd) )
diff --git a/tests/examplefiles/grammar-test.p6 b/tests/examplefiles/grammar-test.p6
new file mode 100644
index 00000000..28107f3e
--- /dev/null
+++ b/tests/examplefiles/grammar-test.p6
@@ -0,0 +1,22 @@
+token pod_formatting_code {
+ $<code>=<[A..Z]>
+ '<' { $*POD_IN_FORMATTINGCODE := 1 }
+ $<content>=[ <!before '>'> <pod_string_character> ]+
+ '>' { $*POD_IN_FORMATTINGCODE := 0 }
+}
+
+token pod_string {
+ <pod_string_character>+
+}
+
+token something:sym«<» {
+ <!>
+}
+
+token name {
+ <!>
+}
+
+token comment:sym<#> {
+ '#' {} \N*
+}
diff --git a/tests/examplefiles/hash_syntax.rb b/tests/examplefiles/hash_syntax.rb
new file mode 100644
index 00000000..35b27723
--- /dev/null
+++ b/tests/examplefiles/hash_syntax.rb
@@ -0,0 +1,5 @@
+{ :old_syntax => 'ok' }
+{ 'stings as key' => 'should be ok' }
+{ new_syntax: 'broken until now' }
+{ withoutunderscore: 'should be ok' }
+{ _underscoreinfront: 'might be ok, if I understand the pygments code correct' }
diff --git a/tests/examplefiles/File.hy b/tests/examplefiles/hybris_File.hy
index 9c86c641..9c86c641 100644
--- a/tests/examplefiles/File.hy
+++ b/tests/examplefiles/hybris_File.hy
diff --git a/tests/examplefiles/mg_sample.pro b/tests/examplefiles/idl_sample.pro
index 814d510d..814d510d 100644
--- a/tests/examplefiles/mg_sample.pro
+++ b/tests/examplefiles/idl_sample.pro
diff --git a/tests/examplefiles/inet_pton6.dg b/tests/examplefiles/inet_pton6.dg
index 4104b3e7..3813d5b8 100644
--- a/tests/examplefiles/inet_pton6.dg
+++ b/tests/examplefiles/inet_pton6.dg
@@ -1,5 +1,5 @@
-re = import!
-sys = import!
+import '/re'
+import '/sys'
# IPv6address = hexpart [ ":" IPv4address ]
@@ -20,7 +20,7 @@ addrv6 = re.compile $ r'(?i)(?:{})(?::{})?$'.format hexpart addrv4
#
# :return: a decimal integer
#
-base_n = (q digits) -> foldl (x y) -> (x * q + y) 0 digits
+base_n = q digits -> foldl (x y -> x * q + y) 0 digits
# Parse a sequence of hexadecimal numbers
@@ -29,7 +29,7 @@ base_n = (q digits) -> foldl (x y) -> (x * q + y) 0 digits
#
# :return: an iterable of Python ints
#
-unhex = q -> q and map p -> (int p 16) (q.split ':')
+unhex = q -> q and map (p -> int p 16) (q.split ':')
# Parse an IPv6 address as specified in RFC 4291.
@@ -39,33 +39,33 @@ unhex = q -> q and map p -> (int p 16) (q.split ':')
# :return: an integer which, written in binary form, points to the same node.
#
inet_pton6 = address ->
- raise $ ValueError 'not a valid IPv6 address' if not (match = addrv6.match address)
+ not (match = addrv6.match address) => raise $ ValueError 'not a valid IPv6 address'
start, end, *ipv4 = match.groups!
is_ipv4 = not $ None in ipv4
shift = (7 - start.count ':' - 2 * is_ipv4) * 16
- raise $ ValueError 'not a valid IPv6 address' if (end is None and shift) or shift < 0
+ (end is None and shift) or shift < 0 => raise $ ValueError 'not a valid IPv6 address'
hexaddr = (base_n 0x10000 (unhex start) << shift) + base_n 0x10000 (unhex $ end or '')
- (hexaddr << 32) + base_n 0x100 (map int ipv4) if is_ipv4 else hexaddr
+ if (is_ipv4 => (hexaddr << 32) + base_n 0x100 (map int ipv4)) (otherwise => hexaddr)
-inet6_type = q -> switch
- not q = 'unspecified'
- q == 1 = 'loopback'
- (q >> 32) == 0x000000000000ffff = 'IPv4-mapped'
- (q >> 64) == 0xfe80000000000000 = 'link-local'
- (q >> 120) != 0x00000000000000ff = 'general unicast'
- (q >> 112) % (1 << 4) == 0x0000000000000000 = 'multicast w/ reserved scope value'
- (q >> 112) % (1 << 4) == 0x000000000000000f = 'multicast w/ reserved scope value'
- (q >> 112) % (1 << 4) == 0x0000000000000001 = 'interface-local multicast'
- (q >> 112) % (1 << 4) == 0x0000000000000004 = 'admin-local multicast'
- (q >> 112) % (1 << 4) == 0x0000000000000005 = 'site-local multicast'
- (q >> 112) % (1 << 4) == 0x0000000000000008 = 'organization-local multicast'
- (q >> 112) % (1 << 4) == 0x000000000000000e = 'global multicast'
- (q >> 112) % (1 << 4) != 0x0000000000000002 = 'multicast w/ unknown scope value'
- (q >> 24) % (1 << 112) == 0x00000000000001ff = 'solicited-node multicast'
- True = 'link-local multicast'
+inet6_type = q -> if
+ q == 0 => 'unspecified'
+ q == 1 => 'loopback'
+ (q >> 32) == 0x000000000000ffff => 'IPv4-mapped'
+ (q >> 64) == 0xfe80000000000000 => 'link-local'
+ (q >> 120) != 0x00000000000000ff => 'general unicast'
+ (q >> 112) % (1 << 4) == 0x0000000000000000 => 'multicast w/ reserved scope value'
+ (q >> 112) % (1 << 4) == 0x000000000000000f => 'multicast w/ reserved scope value'
+ (q >> 112) % (1 << 4) == 0x0000000000000001 => 'interface-local multicast'
+ (q >> 112) % (1 << 4) == 0x0000000000000004 => 'admin-local multicast'
+ (q >> 112) % (1 << 4) == 0x0000000000000005 => 'site-local multicast'
+ (q >> 112) % (1 << 4) == 0x0000000000000008 => 'organization-local multicast'
+ (q >> 112) % (1 << 4) == 0x000000000000000e => 'global multicast'
+ (q >> 112) % (1 << 4) != 0x0000000000000002 => 'multicast w/ unknown scope value'
+ (q >> 24) % (1 << 112) == 0x00000000000001ff => 'solicited-node multicast'
+ otherwise => 'link-local multicast'
-print $ (x -> (inet6_type x, hex x)) $ inet_pton6 $ sys.stdin.read!.strip!
+print $ (x -> inet6_type x, hex x) $ inet_pton6 $ sys.stdin.read!.strip!
diff --git a/tests/examplefiles/language.hy b/tests/examplefiles/language.hy
new file mode 100644
index 00000000..9768c39c
--- /dev/null
+++ b/tests/examplefiles/language.hy
@@ -0,0 +1,165 @@
+;;;; This contains some of the core Hy functions used
+;;;; to make functional programming slightly easier.
+;;;;
+
+
+(defn _numeric-check [x]
+ (if (not (numeric? x))
+ (raise (TypeError (.format "{0!r} is not a number" x)))))
+
+(defn cycle [coll]
+ "Yield an infinite repetition of the items in coll"
+ (setv seen [])
+ (for [x coll]
+ (yield x)
+ (.append seen x))
+ (while seen
+ (for [x seen]
+ (yield x))))
+
+(defn dec [n]
+ "Decrement n by 1"
+ (_numeric-check n)
+ (- n 1))
+
+(defn distinct [coll]
+ "Return a generator from the original collection with duplicates
+ removed"
+ (let [[seen []] [citer (iter coll)]]
+ (for [val citer]
+ (if (not_in val seen)
+ (do
+ (yield val)
+ (.append seen val))))))
+
+(defn drop [count coll]
+ "Drop `count` elements from `coll` and yield back the rest"
+ (let [[citer (iter coll)]]
+ (try (for [i (range count)]
+ (next citer))
+ (catch [StopIteration]))
+ citer))
+
+(defn even? [n]
+ "Return true if n is an even number"
+ (_numeric-check n)
+ (= (% n 2) 0))
+
+(defn filter [pred coll]
+ "Return all elements from `coll` that pass `pred`"
+ (let [[citer (iter coll)]]
+ (for [val citer]
+ (if (pred val)
+ (yield val)))))
+
+(defn inc [n]
+ "Increment n by 1"
+ (_numeric-check n)
+ (+ n 1))
+
+(defn instance? [klass x]
+ (isinstance x klass))
+
+(defn iterable? [x]
+ "Return true if x is iterable"
+ (try (do (iter x) true)
+ (catch [Exception] false)))
+
+(defn iterate [f x]
+ (setv val x)
+ (while true
+ (yield val)
+ (setv val (f val))))
+
+(defn iterator? [x]
+ "Return true if x is an iterator"
+ (try (= x (iter x))
+ (catch [TypeError] false)))
+
+(defn neg? [n]
+ "Return true if n is < 0"
+ (_numeric-check n)
+ (< n 0))
+
+(defn none? [x]
+ "Return true if x is None"
+ (is x None))
+
+(defn numeric? [x]
+ (import numbers)
+ (instance? numbers.Number x))
+
+(defn nth [coll index]
+ "Return nth item in collection or sequence, counting from 0"
+ (if (not (neg? index))
+ (if (iterable? coll)
+ (try (first (list (take 1 (drop index coll))))
+ (catch [IndexError] None))
+ (try (get coll index)
+ (catch [IndexError] None)))
+ None))
+
+(defn odd? [n]
+ "Return true if n is an odd number"
+ (_numeric-check n)
+ (= (% n 2) 1))
+
+(defn pos? [n]
+ "Return true if n is > 0"
+ (_numeric_check n)
+ (> n 0))
+
+(defn remove [pred coll]
+ "Return coll with elements removed that pass `pred`"
+ (let [[citer (iter coll)]]
+ (for [val citer]
+ (if (not (pred val))
+ (yield val)))))
+
+(defn repeat [x &optional n]
+ "Yield x forever or optionally n times"
+ (if (none? n)
+ (setv dispatch (fn [] (while true (yield x))))
+ (setv dispatch (fn [] (for [_ (range n)] (yield x)))))
+ (dispatch))
+
+(defn repeatedly [func]
+ "Yield result of running func repeatedly"
+ (while true
+ (yield (func))))
+
+(defn take [count coll]
+ "Take `count` elements from `coll`, or the whole set if the total
+ number of entries in `coll` is less than `count`."
+ (let [[citer (iter coll)]]
+ (for [_ (range count)]
+ (yield (next citer)))))
+
+(defn take-nth [n coll]
+ "Return every nth member of coll
+ raises ValueError for (not (pos? n))"
+ (if (pos? n)
+ (let [[citer (iter coll)] [skip (dec n)]]
+ (for [val citer]
+ (yield val)
+ (for [_ (range skip)]
+ (next citer))))
+ (raise (ValueError "n must be positive"))))
+
+(defn take-while [pred coll]
+ "Take all elements while `pred` is true"
+ (let [[citer (iter coll)]]
+ (for [val citer]
+ (if (pred val)
+ (yield val)
+ (break)))))
+
+(defn zero? [n]
+ "Return true if n is 0"
+ (_numeric_check n)
+ (= n 0))
+
+(def *exports* ["cycle" "dec" "distinct" "drop" "even?" "filter" "inc"
+ "instance?" "iterable?" "iterate" "iterator?" "neg?"
+ "none?" "nth" "numeric?" "odd?" "pos?" "remove" "repeat"
+ "repeatedly" "take" "take_nth" "take_while" "zero?"])
diff --git a/tests/examplefiles/livescript-demo.ls b/tests/examplefiles/livescript-demo.ls
index 2ff68c63..16d1894a 100644
--- a/tests/examplefiles/livescript-demo.ls
+++ b/tests/examplefiles/livescript-demo.ls
@@ -9,6 +9,8 @@ underscores_i$d = ->
//regexp2//g
'strings' and "strings" and \strings
+another-word-list = <[ more words ]>
+
[2 til 10]
|> map (* 2)
|> filter (> 5)
diff --git a/tests/examplefiles/objc_example.m b/tests/examplefiles/objc_example.m
index cb5c0975..f4f27170 100644
--- a/tests/examplefiles/objc_example.m
+++ b/tests/examplefiles/objc_example.m
@@ -23,3 +23,13 @@ for (key in dictionary) {
NSLog(@"English: %@, Latin: %@", key, [dictionary valueForKey:key]);
}
+// Literals
+NSArray *a = @[ @"1", @"2" ];
+
+NSDictionary *d = @{ @"key": @"value" };
+
+NSNumber *n1 = @( 1 );
+NSNumber *n2 = @( [a length] );
+
++ (void)f1:(NSString *)s1;
++ (void)f2:(NSString *) s2;
diff --git a/tests/examplefiles/py3tb_test.py3tb b/tests/examplefiles/py3tb_test.py3tb
new file mode 100644
index 00000000..706a540f
--- /dev/null
+++ b/tests/examplefiles/py3tb_test.py3tb
@@ -0,0 +1,4 @@
+ File "<stdin>", line 1
+ 1+
+ ^
+SyntaxError: invalid syntax
diff --git a/tests/examplefiles/robotframework.txt b/tests/examplefiles/robotframework_test.txt
index 63ba63e6..63ba63e6 100644
--- a/tests/examplefiles/robotframework.txt
+++ b/tests/examplefiles/robotframework_test.txt
diff --git a/tests/examplefiles/scope.cirru b/tests/examplefiles/scope.cirru
new file mode 100644
index 00000000..728bcabf
--- /dev/null
+++ b/tests/examplefiles/scope.cirru
@@ -0,0 +1,43 @@
+
+-- https://github.com/Cirru/cirru-gopher/blob/master/code/scope.cr
+
+set a (int 2)
+
+print (self)
+
+set c (child)
+
+under c
+ under parent
+ print a
+
+print $ get c a
+
+set c x (int 3)
+print $ get c x
+
+set just-print $ code
+ print a
+
+print just-print
+
+eval (self) just-print
+eval just-print
+
+print (string "string with space")
+print (string "escapes \n \"\\")
+
+brackets ((((()))))
+
+"eval" $ string "eval"
+
+print (add $ (int 1) (int 2))
+
+print $ unwrap $
+ map (a $ int 1) (b $ int 2)
+
+print a
+ int 1
+ , b c
+ int 2
+ , d \ No newline at end of file
diff --git a/tests/examplefiles/swig_java.swg b/tests/examplefiles/swig_java.swg
new file mode 100644
index 00000000..6126a55e
--- /dev/null
+++ b/tests/examplefiles/swig_java.swg
@@ -0,0 +1,1329 @@
+/* -----------------------------------------------------------------------------
+ * java.swg
+ *
+ * Java typemaps
+ * ----------------------------------------------------------------------------- */
+
+%include <javahead.swg>
+
+/* The jni, jtype and jstype typemaps work together and so there should be one of each.
+ * The jni typemap contains the JNI type used in the JNI (C/C++) code.
+ * The jtype typemap contains the Java type used in the JNI intermediary class.
+ * The jstype typemap contains the Java type used in the Java proxy classes, type wrapper classes and module class. */
+
+/* Fragments */
+%fragment("SWIG_PackData", "header") {
+/* Pack binary data into a string */
+SWIGINTERN char * SWIG_PackData(char *c, void *ptr, size_t sz) {
+ static const char hex[17] = "0123456789abcdef";
+ register const unsigned char *u = (unsigned char *) ptr;
+ register const unsigned char *eu = u + sz;
+ for (; u != eu; ++u) {
+ register unsigned char uu = *u;
+ *(c++) = hex[(uu & 0xf0) >> 4];
+ *(c++) = hex[uu & 0xf];
+ }
+ return c;
+}
+}
+
+%fragment("SWIG_UnPackData", "header") {
+/* Unpack binary data from a string */
+SWIGINTERN const char * SWIG_UnpackData(const char *c, void *ptr, size_t sz) {
+ register unsigned char *u = (unsigned char *) ptr;
+ register const unsigned char *eu = u + sz;
+ for (; u != eu; ++u) {
+ register char d = *(c++);
+ register unsigned char uu;
+ if ((d >= '0') && (d <= '9'))
+ uu = ((d - '0') << 4);
+ else if ((d >= 'a') && (d <= 'f'))
+ uu = ((d - ('a'-10)) << 4);
+ else
+ return (char *) 0;
+ d = *(c++);
+ if ((d >= '0') && (d <= '9'))
+ uu |= (d - '0');
+ else if ((d >= 'a') && (d <= 'f'))
+ uu |= (d - ('a'-10));
+ else
+ return (char *) 0;
+ *u = uu;
+ }
+ return c;
+}
+}
+
+/* Primitive types */
+%typemap(jni) bool, const bool & "jboolean"
+%typemap(jni) char, const char & "jchar"
+%typemap(jni) signed char, const signed char & "jbyte"
+%typemap(jni) unsigned char, const unsigned char & "jshort"
+%typemap(jni) short, const short & "jshort"
+%typemap(jni) unsigned short, const unsigned short & "jint"
+%typemap(jni) int, const int & "jint"
+%typemap(jni) unsigned int, const unsigned int & "jlong"
+%typemap(jni) long, const long & "jint"
+%typemap(jni) unsigned long, const unsigned long & "jlong"
+%typemap(jni) long long, const long long & "jlong"
+%typemap(jni) unsigned long long, const unsigned long long & "jobject"
+%typemap(jni) float, const float & "jfloat"
+%typemap(jni) double, const double & "jdouble"
+%typemap(jni) void "void"
+
+%typemap(jtype) bool, const bool & "boolean"
+%typemap(jtype) char, const char & "char"
+%typemap(jtype) signed char, const signed char & "byte"
+%typemap(jtype) unsigned char, const unsigned char & "short"
+%typemap(jtype) short, const short & "short"
+%typemap(jtype) unsigned short, const unsigned short & "int"
+%typemap(jtype) int, const int & "int"
+%typemap(jtype) unsigned int, const unsigned int & "long"
+%typemap(jtype) long, const long & "int"
+%typemap(jtype) unsigned long, const unsigned long & "long"
+%typemap(jtype) long long, const long long & "long"
+%typemap(jtype) unsigned long long, const unsigned long long & "java.math.BigInteger"
+%typemap(jtype) float, const float & "float"
+%typemap(jtype) double, const double & "double"
+%typemap(jtype) void "void"
+
+%typemap(jstype) bool, const bool & "boolean"
+%typemap(jstype) char, const char & "char"
+%typemap(jstype) signed char, const signed char & "byte"
+%typemap(jstype) unsigned char, const unsigned char & "short"
+%typemap(jstype) short, const short & "short"
+%typemap(jstype) unsigned short, const unsigned short & "int"
+%typemap(jstype) int, const int & "int"
+%typemap(jstype) unsigned int, const unsigned int & "long"
+%typemap(jstype) long, const long & "int"
+%typemap(jstype) unsigned long, const unsigned long & "long"
+%typemap(jstype) long long, const long long & "long"
+%typemap(jstype) unsigned long long, const unsigned long long & "java.math.BigInteger"
+%typemap(jstype) float, const float & "float"
+%typemap(jstype) double, const double & "double"
+%typemap(jstype) void "void"
+
+%typemap(jni) char *, char *&, char[ANY], char[] "jstring"
+%typemap(jtype) char *, char *&, char[ANY], char[] "String"
+%typemap(jstype) char *, char *&, char[ANY], char[] "String"
+
+/* JNI types */
+%typemap(jni) jboolean "jboolean"
+%typemap(jni) jchar "jchar"
+%typemap(jni) jbyte "jbyte"
+%typemap(jni) jshort "jshort"
+%typemap(jni) jint "jint"
+%typemap(jni) jlong "jlong"
+%typemap(jni) jfloat "jfloat"
+%typemap(jni) jdouble "jdouble"
+%typemap(jni) jstring "jstring"
+%typemap(jni) jobject "jobject"
+%typemap(jni) jbooleanArray "jbooleanArray"
+%typemap(jni) jcharArray "jcharArray"
+%typemap(jni) jbyteArray "jbyteArray"
+%typemap(jni) jshortArray "jshortArray"
+%typemap(jni) jintArray "jintArray"
+%typemap(jni) jlongArray "jlongArray"
+%typemap(jni) jfloatArray "jfloatArray"
+%typemap(jni) jdoubleArray "jdoubleArray"
+%typemap(jni) jobjectArray "jobjectArray"
+
+%typemap(jtype) jboolean "boolean"
+%typemap(jtype) jchar "char"
+%typemap(jtype) jbyte "byte"
+%typemap(jtype) jshort "short"
+%typemap(jtype) jint "int"
+%typemap(jtype) jlong "long"
+%typemap(jtype) jfloat "float"
+%typemap(jtype) jdouble "double"
+%typemap(jtype) jstring "String"
+%typemap(jtype) jobject "Object"
+%typemap(jtype) jbooleanArray "boolean[]"
+%typemap(jtype) jcharArray "char[]"
+%typemap(jtype) jbyteArray "byte[]"
+%typemap(jtype) jshortArray "short[]"
+%typemap(jtype) jintArray "int[]"
+%typemap(jtype) jlongArray "long[]"
+%typemap(jtype) jfloatArray "float[]"
+%typemap(jtype) jdoubleArray "double[]"
+%typemap(jtype) jobjectArray "Object[]"
+
+%typemap(jstype) jboolean "boolean"
+%typemap(jstype) jchar "char"
+%typemap(jstype) jbyte "byte"
+%typemap(jstype) jshort "short"
+%typemap(jstype) jint "int"
+%typemap(jstype) jlong "long"
+%typemap(jstype) jfloat "float"
+%typemap(jstype) jdouble "double"
+%typemap(jstype) jstring "String"
+%typemap(jstype) jobject "Object"
+%typemap(jstype) jbooleanArray "boolean[]"
+%typemap(jstype) jcharArray "char[]"
+%typemap(jstype) jbyteArray "byte[]"
+%typemap(jstype) jshortArray "short[]"
+%typemap(jstype) jintArray "int[]"
+%typemap(jstype) jlongArray "long[]"
+%typemap(jstype) jfloatArray "float[]"
+%typemap(jstype) jdoubleArray "double[]"
+%typemap(jstype) jobjectArray "Object[]"
+
+/* Non primitive types */
+%typemap(jni) SWIGTYPE "jlong"
+%typemap(jtype) SWIGTYPE "long"
+%typemap(jstype) SWIGTYPE "$&javaclassname"
+
+%typemap(jni) SWIGTYPE [] "jlong"
+%typemap(jtype) SWIGTYPE [] "long"
+%typemap(jstype) SWIGTYPE [] "$javaclassname"
+
+%typemap(jni) SWIGTYPE * "jlong"
+%typemap(jtype) SWIGTYPE * "long"
+%typemap(jstype) SWIGTYPE * "$javaclassname"
+
+%typemap(jni) SWIGTYPE & "jlong"
+%typemap(jtype) SWIGTYPE & "long"
+%typemap(jstype) SWIGTYPE & "$javaclassname"
+
+/* pointer to a class member */
+%typemap(jni) SWIGTYPE (CLASS::*) "jstring"
+%typemap(jtype) SWIGTYPE (CLASS::*) "String"
+%typemap(jstype) SWIGTYPE (CLASS::*) "$javaclassname"
+
+/* The following are the in, out, freearg, argout typemaps. These are the JNI code generating typemaps for converting from Java to C and visa versa. */
+
+/* primitive types */
+%typemap(in) bool
+%{ $1 = $input ? true : false; %}
+
+%typemap(directorout) bool
+%{ $result = $input ? true : false; %}
+
+%typemap(javadirectorin) bool "$jniinput"
+%typemap(javadirectorout) bool "$javacall"
+
+%typemap(in) char,
+ signed char,
+ unsigned char,
+ short,
+ unsigned short,
+ int,
+ unsigned int,
+ long,
+ unsigned long,
+ long long,
+ float,
+ double
+%{ $1 = ($1_ltype)$input; %}
+
+%typemap(directorout) char,
+ signed char,
+ unsigned char,
+ short,
+ unsigned short,
+ int,
+ unsigned int,
+ long,
+ unsigned long,
+ long long,
+ float,
+ double
+%{ $result = ($1_ltype)$input; %}
+
+%typemap(directorin, descriptor="Z") bool "$input = (jboolean) $1;"
+%typemap(directorin, descriptor="C") char "$input = (jint) $1;"
+%typemap(directorin, descriptor="B") signed char "$input = (jbyte) $1;"
+%typemap(directorin, descriptor="S") unsigned char "$input = (jshort) $1;"
+%typemap(directorin, descriptor="S") short "$input = (jshort) $1;"
+%typemap(directorin, descriptor="I") unsigned short "$input = (jint) $1;"
+%typemap(directorin, descriptor="I") int "$input = (jint) $1;"
+%typemap(directorin, descriptor="J") unsigned int "$input = (jlong) $1;"
+%typemap(directorin, descriptor="I") long "$input = (jint) $1;"
+%typemap(directorin, descriptor="J") unsigned long "$input = (jlong) $1;"
+%typemap(directorin, descriptor="J") long long "$input = (jlong) $1;"
+%typemap(directorin, descriptor="F") float "$input = (jfloat) $1;"
+%typemap(directorin, descriptor="D") double "$input = (jdouble) $1;"
+
+%typemap(javadirectorin) char,
+ signed char,
+ unsigned char,
+ short,
+ unsigned short,
+ int,
+ unsigned int,
+ long,
+ unsigned long,
+ long long,
+ float,
+ double
+ "$jniinput"
+
+%typemap(javadirectorout) char,
+ signed char,
+ unsigned char,
+ short,
+ unsigned short,
+ int,
+ unsigned int,
+ long,
+ unsigned long,
+ long long,
+ float,
+ double
+ "$javacall"
+
+%typemap(out) bool %{ $result = (jboolean)$1; %}
+%typemap(out) char %{ $result = (jchar)$1; %}
+%typemap(out) signed char %{ $result = (jbyte)$1; %}
+%typemap(out) unsigned char %{ $result = (jshort)$1; %}
+%typemap(out) short %{ $result = (jshort)$1; %}
+%typemap(out) unsigned short %{ $result = (jint)$1; %}
+%typemap(out) int %{ $result = (jint)$1; %}
+%typemap(out) unsigned int %{ $result = (jlong)$1; %}
+%typemap(out) long %{ $result = (jint)$1; %}
+%typemap(out) unsigned long %{ $result = (jlong)$1; %}
+%typemap(out) long long %{ $result = (jlong)$1; %}
+%typemap(out) float %{ $result = (jfloat)$1; %}
+%typemap(out) double %{ $result = (jdouble)$1; %}
+
+/* unsigned long long */
+/* Convert from BigInteger using the toByteArray member function */
+%typemap(in) unsigned long long {
+ jclass clazz;
+ jmethodID mid;
+ jbyteArray ba;
+ jbyte* bae;
+ jsize sz;
+ int i;
+
+ if (!$input) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "BigInteger null");
+ return $null;
+ }
+ clazz = JCALL1(GetObjectClass, jenv, $input);
+ mid = JCALL3(GetMethodID, jenv, clazz, "toByteArray", "()[B");
+ ba = (jbyteArray)JCALL2(CallObjectMethod, jenv, $input, mid);
+ bae = JCALL2(GetByteArrayElements, jenv, ba, 0);
+ sz = JCALL1(GetArrayLength, jenv, ba);
+ $1 = 0;
+ for(i=0; i<sz; i++) {
+ $1 = ($1 << 8) | ($1_type)(unsigned char)bae[i];
+ }
+ JCALL3(ReleaseByteArrayElements, jenv, ba, bae, 0);
+}
+
+%typemap(directorout) unsigned long long {
+ jclass clazz;
+ jmethodID mid;
+ jbyteArray ba;
+ jbyte* bae;
+ jsize sz;
+ int i;
+
+ if (!$input) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "BigInteger null");
+ return $null;
+ }
+ clazz = JCALL1(GetObjectClass, jenv, $input);
+ mid = JCALL3(GetMethodID, jenv, clazz, "toByteArray", "()[B");
+ ba = (jbyteArray)JCALL2(CallObjectMethod, jenv, $input, mid);
+ bae = JCALL2(GetByteArrayElements, jenv, ba, 0);
+ sz = JCALL1(GetArrayLength, jenv, ba);
+ $result = 0;
+ for(i=0; i<sz; i++) {
+ $result = ($result << 8) | ($1_type)(unsigned char)bae[i];
+ }
+ JCALL3(ReleaseByteArrayElements, jenv, ba, bae, 0);
+}
+
+
+/* Convert to BigInteger - byte array holds number in 2's complement big endian format */
+%typemap(out) unsigned long long {
+ jbyteArray ba = JCALL1(NewByteArray, jenv, 9);
+ jbyte* bae = JCALL2(GetByteArrayElements, jenv, ba, 0);
+ jclass clazz = JCALL1(FindClass, jenv, "java/math/BigInteger");
+ jmethodID mid = JCALL3(GetMethodID, jenv, clazz, "<init>", "([B)V");
+ jobject bigint;
+ int i;
+
+ bae[0] = 0;
+ for(i=1; i<9; i++ ) {
+ bae[i] = (jbyte)($1>>8*(8-i));
+ }
+
+ JCALL3(ReleaseByteArrayElements, jenv, ba, bae, 0);
+ bigint = JCALL3(NewObject, jenv, clazz, mid, ba);
+ $result = bigint;
+}
+
+/* Convert to BigInteger (see out typemap) */
+%typemap(directorin, descriptor="Ljava/math/BigInteger;") unsigned long long, const unsigned long long & {
+ jbyteArray ba = JCALL1(NewByteArray, jenv, 9);
+ jbyte* bae = JCALL2(GetByteArrayElements, jenv, ba, 0);
+ jclass clazz = JCALL1(FindClass, jenv, "java/math/BigInteger");
+ jmethodID mid = JCALL3(GetMethodID, jenv, clazz, "<init>", "([B)V");
+ jobject bigint;
+ int swig_i;
+
+ bae[0] = 0;
+ for(swig_i=1; swig_i<9; swig_i++ ) {
+ bae[swig_i] = (jbyte)($1>>8*(8-swig_i));
+ }
+
+ JCALL3(ReleaseByteArrayElements, jenv, ba, bae, 0);
+ bigint = JCALL3(NewObject, jenv, clazz, mid, ba);
+ $input = bigint;
+}
+
+%typemap(javadirectorin) unsigned long long "$jniinput"
+%typemap(javadirectorout) unsigned long long "$javacall"
+
+/* char * - treat as String */
+%typemap(in, noblock=1) char * {
+ $1 = 0;
+ if ($input) {
+ $1 = ($1_ltype)JCALL2(GetStringUTFChars, jenv, $input, 0);
+ if (!$1) return $null;
+ }
+}
+
+%typemap(directorout, noblock=1, warning=SWIGWARN_TYPEMAP_DIRECTOROUT_PTR_MSG) char * {
+ $1 = 0;
+ if ($input) {
+ $result = ($1_ltype)JCALL2(GetStringUTFChars, jenv, $input, 0);
+ if (!$result) return $null;
+ }
+}
+
+%typemap(directorin, descriptor="Ljava/lang/String;", noblock=1) char * {
+ $input = 0;
+ if ($1) {
+ $input = JCALL1(NewStringUTF, jenv, (const char *)$1);
+ if (!$input) return $null;
+ }
+}
+
+%typemap(freearg, noblock=1) char * { if ($1) JCALL2(ReleaseStringUTFChars, jenv, $input, (const char *)$1); }
+%typemap(out, noblock=1) char * { if ($1) $result = JCALL1(NewStringUTF, jenv, (const char *)$1); }
+%typemap(javadirectorin) char * "$jniinput"
+%typemap(javadirectorout) char * "$javacall"
+
+/* char *& - treat as String */
+%typemap(in, noblock=1) char *& ($*1_ltype temp = 0) {
+ $1 = 0;
+ if ($input) {
+ temp = ($*1_ltype)JCALL2(GetStringUTFChars, jenv, $input, 0);
+ if (!temp) return $null;
+ }
+ $1 = &temp;
+}
+%typemap(freearg, noblock=1) char *& { if ($1 && *$1) JCALL2(ReleaseStringUTFChars, jenv, $input, (const char *)*$1); }
+%typemap(out, noblock=1) char *& { if (*$1) $result = JCALL1(NewStringUTF, jenv, (const char *)*$1); }
+
+%typemap(out) void ""
+%typemap(javadirectorin) void "$jniinput"
+%typemap(javadirectorout) void "$javacall"
+%typemap(directorin, descriptor="V") void ""
+
+/* primitive types by reference */
+%typemap(in) const bool & ($*1_ltype temp)
+%{ temp = $input ? true : false;
+ $1 = &temp; %}
+
+%typemap(directorout,warning=SWIGWARN_TYPEMAP_THREAD_UNSAFE_MSG) const bool &
+%{ static $*1_ltype temp;
+ temp = $input ? true : false;
+ $result = &temp; %}
+
+%typemap(javadirectorin) const bool & "$jniinput"
+%typemap(javadirectorout) const bool & "$javacall"
+
+%typemap(in) const char & ($*1_ltype temp),
+ const signed char & ($*1_ltype temp),
+ const unsigned char & ($*1_ltype temp),
+ const short & ($*1_ltype temp),
+ const unsigned short & ($*1_ltype temp),
+ const int & ($*1_ltype temp),
+ const unsigned int & ($*1_ltype temp),
+ const long & ($*1_ltype temp),
+ const unsigned long & ($*1_ltype temp),
+ const long long & ($*1_ltype temp),
+ const float & ($*1_ltype temp),
+ const double & ($*1_ltype temp)
+%{ temp = ($*1_ltype)$input;
+ $1 = &temp; %}
+
+%typemap(directorout,warning=SWIGWARN_TYPEMAP_THREAD_UNSAFE_MSG) const char &,
+ const signed char &,
+ const unsigned char &,
+ const short &,
+ const unsigned short &,
+ const int &,
+ const unsigned int &,
+ const long &,
+ const unsigned long &,
+ const long long &,
+ const float &,
+ const double &
+%{ static $*1_ltype temp;
+ temp = ($*1_ltype)$input;
+ $result = &temp; %}
+
+%typemap(directorin, descriptor="Z") const bool & "$input = (jboolean)$1;"
+%typemap(directorin, descriptor="C") const char & "$input = (jchar)$1;"
+%typemap(directorin, descriptor="B") const signed char & "$input = (jbyte)$1;"
+%typemap(directorin, descriptor="S") const unsigned char & "$input = (jshort)$1;"
+%typemap(directorin, descriptor="S") const short & "$input = (jshort)$1;"
+%typemap(directorin, descriptor="I") const unsigned short & "$input = (jint)$1;"
+%typemap(directorin, descriptor="I") const int & "$input = (jint)$1;"
+%typemap(directorin, descriptor="J") const unsigned int & "$input = (jlong)$1;"
+%typemap(directorin, descriptor="I") const long & "$input = (jint)$1;"
+%typemap(directorin, descriptor="J") const unsigned long & "$input = (jlong)$1;"
+%typemap(directorin, descriptor="J") const long long & "$input = (jlong)$1;"
+%typemap(directorin, descriptor="F") const float & "$input = (jfloat)$1;"
+%typemap(directorin, descriptor="D") const double & "$input = (jdouble)$1;"
+
+%typemap(javadirectorin) const char & ($*1_ltype temp),
+ const signed char & ($*1_ltype temp),
+ const unsigned char & ($*1_ltype temp),
+ const short & ($*1_ltype temp),
+ const unsigned short & ($*1_ltype temp),
+ const int & ($*1_ltype temp),
+ const unsigned int & ($*1_ltype temp),
+ const long & ($*1_ltype temp),
+ const unsigned long & ($*1_ltype temp),
+ const long long & ($*1_ltype temp),
+ const float & ($*1_ltype temp),
+ const double & ($*1_ltype temp)
+ "$jniinput"
+
+%typemap(javadirectorout) const char & ($*1_ltype temp),
+ const signed char & ($*1_ltype temp),
+ const unsigned char & ($*1_ltype temp),
+ const short & ($*1_ltype temp),
+ const unsigned short & ($*1_ltype temp),
+ const int & ($*1_ltype temp),
+ const unsigned int & ($*1_ltype temp),
+ const long & ($*1_ltype temp),
+ const unsigned long & ($*1_ltype temp),
+ const long long & ($*1_ltype temp),
+ const float & ($*1_ltype temp),
+ const double & ($*1_ltype temp)
+ "$javacall"
+
+
+%typemap(out) const bool & %{ $result = (jboolean)*$1; %}
+%typemap(out) const char & %{ $result = (jchar)*$1; %}
+%typemap(out) const signed char & %{ $result = (jbyte)*$1; %}
+%typemap(out) const unsigned char & %{ $result = (jshort)*$1; %}
+%typemap(out) const short & %{ $result = (jshort)*$1; %}
+%typemap(out) const unsigned short & %{ $result = (jint)*$1; %}
+%typemap(out) const int & %{ $result = (jint)*$1; %}
+%typemap(out) const unsigned int & %{ $result = (jlong)*$1; %}
+%typemap(out) const long & %{ $result = (jint)*$1; %}
+%typemap(out) const unsigned long & %{ $result = (jlong)*$1; %}
+%typemap(out) const long long & %{ $result = (jlong)*$1; %}
+%typemap(out) const float & %{ $result = (jfloat)*$1; %}
+%typemap(out) const double & %{ $result = (jdouble)*$1; %}
+
+/* const unsigned long long & */
+/* Similar to unsigned long long */
+%typemap(in) const unsigned long long & ($*1_ltype temp) {
+ jclass clazz;
+ jmethodID mid;
+ jbyteArray ba;
+ jbyte* bae;
+ jsize sz;
+ int i;
+
+ if (!$input) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "BigInteger null");
+ return $null;
+ }
+ clazz = JCALL1(GetObjectClass, jenv, $input);
+ mid = JCALL3(GetMethodID, jenv, clazz, "toByteArray", "()[B");
+ ba = (jbyteArray)JCALL2(CallObjectMethod, jenv, $input, mid);
+ bae = JCALL2(GetByteArrayElements, jenv, ba, 0);
+ sz = JCALL1(GetArrayLength, jenv, ba);
+ $1 = &temp;
+ temp = 0;
+ for(i=0; i<sz; i++) {
+ temp = (temp << 8) | ($*1_ltype)(unsigned char)bae[i];
+ }
+ JCALL3(ReleaseByteArrayElements, jenv, ba, bae, 0);
+}
+
+%typemap(directorout,warning=SWIGWARN_TYPEMAP_THREAD_UNSAFE_MSG) const unsigned long long & {
+ static $*1_ltype temp;
+ jclass clazz;
+ jmethodID mid;
+ jbyteArray ba;
+ jbyte* bae;
+ jsize sz;
+ int i;
+
+ if (!$input) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "BigInteger null");
+ return $null;
+ }
+ clazz = JCALL1(GetObjectClass, jenv, $input);
+ mid = JCALL3(GetMethodID, jenv, clazz, "toByteArray", "()[B");
+ ba = (jbyteArray)JCALL2(CallObjectMethod, jenv, $input, mid);
+ bae = JCALL2(GetByteArrayElements, jenv, ba, 0);
+ sz = JCALL1(GetArrayLength, jenv, ba);
+ $result = &temp;
+ temp = 0;
+ for(i=0; i<sz; i++) {
+ temp = (temp << 8) | ($*1_ltype)(unsigned char)bae[i];
+ }
+ JCALL3(ReleaseByteArrayElements, jenv, ba, bae, 0);
+}
+
+%typemap(out) const unsigned long long & {
+ jbyteArray ba = JCALL1(NewByteArray, jenv, 9);
+ jbyte* bae = JCALL2(GetByteArrayElements, jenv, ba, 0);
+ jclass clazz = JCALL1(FindClass, jenv, "java/math/BigInteger");
+ jmethodID mid = JCALL3(GetMethodID, jenv, clazz, "<init>", "([B)V");
+ jobject bigint;
+ int i;
+
+ bae[0] = 0;
+ for(i=1; i<9; i++ ) {
+ bae[i] = (jbyte)(*$1>>8*(8-i));
+ }
+
+ JCALL3(ReleaseByteArrayElements, jenv, ba, bae, 0);
+ bigint = JCALL3(NewObject, jenv, clazz, mid, ba);
+ $result = bigint;
+}
+
+%typemap(javadirectorin) const unsigned long long & "$jniinput"
+%typemap(javadirectorout) const unsigned long long & "$javacall"
+
+/* Default handling. Object passed by value. Convert to a pointer */
+%typemap(in) SWIGTYPE ($&1_type argp)
+%{ argp = *($&1_ltype*)&$input;
+ if (!argp) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Attempt to dereference null $1_type");
+ return $null;
+ }
+ $1 = *argp; %}
+
+%typemap(directorout) SWIGTYPE ($&1_type argp)
+%{ argp = *($&1_ltype*)&$input;
+ if (!argp) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Unexpected null return for type $1_type");
+ return $null;
+ }
+ $result = *argp; %}
+
+%typemap(out) SWIGTYPE
+#ifdef __cplusplus
+%{ *($&1_ltype*)&$result = new $1_ltype((const $1_ltype &)$1); %}
+#else
+{
+ $&1_ltype $1ptr = ($&1_ltype) malloc(sizeof($1_ltype));
+ memmove($1ptr, &$1, sizeof($1_type));
+ *($&1_ltype*)&$result = $1ptr;
+}
+#endif
+
+%typemap(directorin,descriptor="L$packagepath/$&javaclassname;") SWIGTYPE
+%{ $input = 0;
+ *(($&1_ltype*)&$input) = &$1; %}
+%typemap(javadirectorin) SWIGTYPE "new $&javaclassname($jniinput, false)"
+%typemap(javadirectorout) SWIGTYPE "$&javaclassname.getCPtr($javacall)"
+
+/* Generic pointers and references */
+%typemap(in) SWIGTYPE * %{ $1 = *($&1_ltype)&$input; %}
+%typemap(in, fragment="SWIG_UnPackData") SWIGTYPE (CLASS::*) {
+ const char *temp = 0;
+ if ($input) {
+ temp = JCALL2(GetStringUTFChars, jenv, $input, 0);
+ if (!temp) return $null;
+ }
+ SWIG_UnpackData(temp, (void *)&$1, sizeof($1));
+}
+%typemap(in) SWIGTYPE & %{ $1 = *($&1_ltype)&$input;
+ if (!$1) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "$1_type reference is null");
+ return $null;
+ } %}
+%typemap(out) SWIGTYPE *
+%{ *($&1_ltype)&$result = $1; %}
+%typemap(out, fragment="SWIG_PackData", noblock=1) SWIGTYPE (CLASS::*) {
+ char buf[128];
+ char *data = SWIG_PackData(buf, (void *)&$1, sizeof($1));
+ *data = '\0';
+ $result = JCALL1(NewStringUTF, jenv, buf);
+}
+%typemap(out) SWIGTYPE &
+%{ *($&1_ltype)&$result = $1; %}
+
+%typemap(directorout, warning=SWIGWARN_TYPEMAP_DIRECTOROUT_PTR_MSG) SWIGTYPE *
+%{ $result = *($&1_ltype)&$input; %}
+%typemap(directorout, warning=SWIGWARN_TYPEMAP_DIRECTOROUT_PTR_MSG) SWIGTYPE (CLASS::*)
+%{ $result = *($&1_ltype)&$input; %}
+
+%typemap(directorin,descriptor="L$packagepath/$javaclassname;") SWIGTYPE *
+%{ *(($&1_ltype)&$input) = ($1_ltype) $1; %}
+%typemap(directorin,descriptor="L$packagepath/$javaclassname;") SWIGTYPE (CLASS::*)
+%{ *(($&1_ltype)&$input) = ($1_ltype) $1; %}
+
+%typemap(directorout, warning=SWIGWARN_TYPEMAP_DIRECTOROUT_PTR_MSG) SWIGTYPE &
+%{ if (!$input) {
+ SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "Unexpected null return for type $1_type");
+ return $null;
+ }
+ $result = *($&1_ltype)&$input; %}
+%typemap(directorin,descriptor="L$packagepath/$javaclassname;") SWIGTYPE &
+%{ *($&1_ltype)&$input = ($1_ltype) &$1; %}
+
+%typemap(javadirectorin) SWIGTYPE *, SWIGTYPE (CLASS::*) "($jniinput == 0) ? null : new $javaclassname($jniinput, false)"
+%typemap(javadirectorin) SWIGTYPE & "new $javaclassname($jniinput, false)"
+%typemap(javadirectorout) SWIGTYPE *, SWIGTYPE (CLASS::*), SWIGTYPE & "$javaclassname.getCPtr($javacall)"
+
+/* Default array handling */
+%typemap(in) SWIGTYPE [] %{ $1 = *($&1_ltype)&$input; %}
+%typemap(out) SWIGTYPE [] %{ *($&1_ltype)&$result = $1; %}
+%typemap(freearg) SWIGTYPE [ANY], SWIGTYPE [] ""
+
+/* char arrays - treat as String */
+%typemap(in, noblock=1) char[ANY], char[] {
+ $1 = 0;
+ if ($input) {
+ $1 = ($1_ltype)JCALL2(GetStringUTFChars, jenv, $input, 0);
+ if (!$1) return $null;
+ }
+}
+
+%typemap(directorout, noblock=1) char[ANY], char[] {
+ $1 = 0;
+ if ($input) {
+ $result = ($1_ltype)JCALL2(GetStringUTFChars, jenv, $input, 0);
+ if (!$result) return $null;
+ }
+}
+
+%typemap(directorin, descriptor="Ljava/lang/String;", noblock=1) char[ANY], char[] {
+ $input = 0;
+ if ($1) {
+ $input = JCALL1(NewStringUTF, jenv, (const char *)$1);
+ if (!$input) return $null;
+ }
+}
+
+%typemap(argout) char[ANY], char[] ""
+%typemap(freearg, noblock=1) char[ANY], char[] { if ($1) JCALL2(ReleaseStringUTFChars, jenv, $input, (const char *)$1); }
+%typemap(out, noblock=1) char[ANY], char[] { if ($1) $result = JCALL1(NewStringUTF, jenv, (const char *)$1); }
+%typemap(javadirectorin) char[ANY], char[] "$jniinput"
+%typemap(javadirectorout) char[ANY], char[] "$javacall"
+
+/* JNI types */
+%typemap(in) jboolean,
+ jchar,
+ jbyte,
+ jshort,
+ jint,
+ jlong,
+ jfloat,
+ jdouble,
+ jstring,
+ jobject,
+ jbooleanArray,
+ jcharArray,
+ jbyteArray,
+ jshortArray,
+ jintArray,
+ jlongArray,
+ jfloatArray,
+ jdoubleArray,
+ jobjectArray
+%{ $1 = $input; %}
+
+%typemap(directorout) jboolean,
+ jchar,
+ jbyte,
+ jshort,
+ jint,
+ jlong,
+ jfloat,
+ jdouble,
+ jstring,
+ jobject,
+ jbooleanArray,
+ jcharArray,
+ jbyteArray,
+ jshortArray,
+ jintArray,
+ jlongArray,
+ jfloatArray,
+ jdoubleArray,
+ jobjectArray
+%{ $result = $input; %}
+
+%typemap(out) jboolean,
+ jchar,
+ jbyte,
+ jshort,
+ jint,
+ jlong,
+ jfloat,
+ jdouble,
+ jstring,
+ jobject,
+ jbooleanArray,
+ jcharArray,
+ jbyteArray,
+ jshortArray,
+ jintArray,
+ jlongArray,
+ jfloatArray,
+ jdoubleArray,
+ jobjectArray
+%{ $result = $1; %}
+
+%typemap(directorin,descriptor="Z") jboolean "$input = $1;"
+%typemap(directorin,descriptor="C") jchar "$input = $1;"
+%typemap(directorin,descriptor="B") jbyte "$input = $1;"
+%typemap(directorin,descriptor="S") jshort "$input = $1;"
+%typemap(directorin,descriptor="I") jint "$input = $1;"
+%typemap(directorin,descriptor="J") jlong "$input = $1;"
+%typemap(directorin,descriptor="F") jfloat "$input = $1;"
+%typemap(directorin,descriptor="D") jdouble "$input = $1;"
+%typemap(directorin,descriptor="Ljava/lang/String;") jstring "$input = $1;"
+%typemap(directorin,descriptor="Ljava/lang/Object;",nouse="1") jobject "$input = $1;"
+%typemap(directorin,descriptor="[Z") jbooleanArray "$input = $1;"
+%typemap(directorin,descriptor="[C") jcharArray "$input = $1;"
+%typemap(directorin,descriptor="[B") jbyteArray "$input = $1;"
+%typemap(directorin,descriptor="[S") jshortArray "$input = $1;"
+%typemap(directorin,descriptor="[I") jintArray "$input = $1;"
+%typemap(directorin,descriptor="[J") jlongArray "$input = $1;"
+%typemap(directorin,descriptor="[F") jfloatArray "$input = $1;"
+%typemap(directorin,descriptor="[D") jdoubleArray "$input = $1;"
+%typemap(directorin,descriptor="[Ljava/lang/Object;",nouse="1") jobjectArray "$input = $1;"
+
+%typemap(javadirectorin) jboolean,
+ jchar,
+ jbyte,
+ jshort,
+ jint,
+ jlong,
+ jfloat,
+ jdouble,
+ jstring,
+ jobject,
+ jbooleanArray,
+ jcharArray,
+ jbyteArray,
+ jshortArray,
+ jintArray,
+ jlongArray,
+ jfloatArray,
+ jdoubleArray,
+ jobjectArray
+ "$jniinput"
+
+%typemap(javadirectorout) jboolean,
+ jchar,
+ jbyte,
+ jshort,
+ jint,
+ jlong,
+ jfloat,
+ jdouble,
+ jstring,
+ jobject,
+ jbooleanArray,
+ jcharArray,
+ jbyteArray,
+ jshortArray,
+ jintArray,
+ jlongArray,
+ jfloatArray,
+ jdoubleArray,
+ jobjectArray
+ "$javacall"
+
+/* Typecheck typemaps - The purpose of these is merely to issue a warning for overloaded C++ functions
+ * that cannot be overloaded in Java as more than one C++ type maps to a single Java type */
+
+%typecheck(SWIG_TYPECHECK_BOOL) /* Java boolean */
+ jboolean,
+ bool,
+ const bool &
+ ""
+
+%typecheck(SWIG_TYPECHECK_CHAR) /* Java char */
+ jchar,
+ char,
+ const char &
+ ""
+
+%typecheck(SWIG_TYPECHECK_INT8) /* Java byte */
+ jbyte,
+ signed char,
+ const signed char &
+ ""
+
+%typecheck(SWIG_TYPECHECK_INT16) /* Java short */
+ jshort,
+ unsigned char,
+ short,
+ const unsigned char &,
+ const short &
+ ""
+
+%typecheck(SWIG_TYPECHECK_INT32) /* Java int */
+ jint,
+ unsigned short,
+ int,
+ long,
+ const unsigned short &,
+ const int &,
+ const long &
+ ""
+
+%typecheck(SWIG_TYPECHECK_INT64) /* Java long */
+ jlong,
+ unsigned int,
+ unsigned long,
+ long long,
+ const unsigned int &,
+ const unsigned long &,
+ const long long &
+ ""
+
+%typecheck(SWIG_TYPECHECK_INT128) /* Java BigInteger */
+ unsigned long long,
+ const unsigned long long &
+ ""
+
+%typecheck(SWIG_TYPECHECK_FLOAT) /* Java float */
+ jfloat,
+ float,
+ const float &
+ ""
+
+%typecheck(SWIG_TYPECHECK_DOUBLE) /* Java double */
+ jdouble,
+ double,
+ const double &
+ ""
+
+%typecheck(SWIG_TYPECHECK_STRING) /* Java String */
+ jstring,
+ char *,
+ char *&,
+ char[ANY],
+ char []
+ ""
+
+%typecheck(SWIG_TYPECHECK_BOOL_ARRAY) /* Java boolean[] */
+ jbooleanArray
+ ""
+
+%typecheck(SWIG_TYPECHECK_CHAR_ARRAY) /* Java char[] */
+ jcharArray
+ ""
+
+%typecheck(SWIG_TYPECHECK_INT8_ARRAY) /* Java byte[] */
+ jbyteArray
+ ""
+
+%typecheck(SWIG_TYPECHECK_INT16_ARRAY) /* Java short[] */
+ jshortArray
+ ""
+
+%typecheck(SWIG_TYPECHECK_INT32_ARRAY) /* Java int[] */
+ jintArray
+ ""
+
+%typecheck(SWIG_TYPECHECK_INT64_ARRAY) /* Java long[] */
+ jlongArray
+ ""
+
+%typecheck(SWIG_TYPECHECK_FLOAT_ARRAY) /* Java float[] */
+ jfloatArray
+ ""
+
+%typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY) /* Java double[] */
+ jdoubleArray
+ ""
+
+%typecheck(SWIG_TYPECHECK_OBJECT_ARRAY) /* Java jobject[] */
+ jobjectArray
+ ""
+
+%typecheck(SWIG_TYPECHECK_POINTER) /* Default */
+ SWIGTYPE,
+ SWIGTYPE *,
+ SWIGTYPE &,
+ SWIGTYPE *const&,
+ SWIGTYPE [],
+ SWIGTYPE (CLASS::*)
+ ""
+
+
+/* Exception handling */
+
+%typemap(throws) int,
+ long,
+ short,
+ unsigned int,
+ unsigned long,
+ unsigned short
+%{ char error_msg[256];
+ sprintf(error_msg, "C++ $1_type exception thrown, value: %d", $1);
+ SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, error_msg);
+ return $null; %}
+
+%typemap(throws) SWIGTYPE, SWIGTYPE &, SWIGTYPE *, SWIGTYPE [], SWIGTYPE [ANY]
+%{ (void)$1;
+ SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, "C++ $1_type exception thrown");
+ return $null; %}
+
+%typemap(throws) char *
+%{ SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, $1);
+ return $null; %}
+
+
+/* Typemaps for code generation in proxy classes and Java type wrapper classes */
+
+/* The javain typemap is used for converting function parameter types from the type
+ * used in the proxy, module or type wrapper class to the type used in the JNI class. */
+%typemap(javain) bool, const bool &,
+ char, const char &,
+ signed char, const signed char &,
+ unsigned char, const unsigned char &,
+ short, const short &,
+ unsigned short, const unsigned short &,
+ int, const int &,
+ unsigned int, const unsigned int &,
+ long, const long &,
+ unsigned long, const unsigned long &,
+ long long, const long long &,
+ unsigned long long, const unsigned long long &,
+ float, const float &,
+ double, const double &
+ "$javainput"
+%typemap(javain) char *, char *&, char[ANY], char[] "$javainput"
+%typemap(javain) jboolean,
+ jchar,
+ jbyte,
+ jshort,
+ jint,
+ jlong,
+ jfloat,
+ jdouble,
+ jstring,
+ jobject,
+ jbooleanArray,
+ jcharArray,
+ jbyteArray,
+ jshortArray,
+ jintArray,
+ jlongArray,
+ jfloatArray,
+ jdoubleArray,
+ jobjectArray
+ "$javainput"
+%typemap(javain) SWIGTYPE "$&javaclassname.getCPtr($javainput)"
+%typemap(javain) SWIGTYPE *, SWIGTYPE &, SWIGTYPE [] "$javaclassname.getCPtr($javainput)"
+%typemap(javain) SWIGTYPE (CLASS::*) "$javaclassname.getCMemberPtr($javainput)"
+
+/* The javaout typemap is used for converting function return types from the return type
+ * used in the JNI class to the type returned by the proxy, module or type wrapper class. */
+%typemap(javaout) bool, const bool &,
+ char, const char &,
+ signed char, const signed char &,
+ unsigned char, const unsigned char &,
+ short, const short &,
+ unsigned short, const unsigned short &,
+ int, const int &,
+ unsigned int, const unsigned int &,
+ long, const long &,
+ unsigned long, const unsigned long &,
+ long long, const long long &,
+ unsigned long long, const unsigned long long &,
+ float, const float &,
+ double, const double & {
+ return $jnicall;
+ }
+%typemap(javaout) char *, char *&, char[ANY], char[] {
+ return $jnicall;
+ }
+%typemap(javaout) jboolean,
+ jchar,
+ jbyte,
+ jshort,
+ jint,
+ jlong,
+ jfloat,
+ jdouble,
+ jstring,
+ jobject,
+ jbooleanArray,
+ jcharArray,
+ jbyteArray,
+ jshortArray,
+ jintArray,
+ jlongArray,
+ jfloatArray,
+ jdoubleArray,
+ jobjectArray {
+ return $jnicall;
+ }
+%typemap(javaout) void {
+ $jnicall;
+ }
+%typemap(javaout) SWIGTYPE {
+ return new $&javaclassname($jnicall, true);
+ }
+%typemap(javaout) SWIGTYPE & {
+ return new $javaclassname($jnicall, $owner);
+ }
+%typemap(javaout) SWIGTYPE *, SWIGTYPE [] {
+ long cPtr = $jnicall;
+ return (cPtr == 0) ? null : new $javaclassname(cPtr, $owner);
+ }
+%typemap(javaout) SWIGTYPE (CLASS::*) {
+ String cMemberPtr = $jnicall;
+ return (cMemberPtr == null) ? null : new $javaclassname(cMemberPtr, $owner);
+ }
+
+/* Pointer reference typemaps */
+%typemap(jni) SWIGTYPE *const& "jlong"
+%typemap(jtype) SWIGTYPE *const& "long"
+%typemap(jstype) SWIGTYPE *const& "$*javaclassname"
+%typemap(javain) SWIGTYPE *const& "$*javaclassname.getCPtr($javainput)"
+%typemap(javaout) SWIGTYPE *const& {
+ long cPtr = $jnicall;
+ return (cPtr == 0) ? null : new $*javaclassname(cPtr, $owner);
+ }
+%typemap(in) SWIGTYPE *const& ($*1_ltype temp = 0)
+%{ temp = *($1_ltype)&$input;
+ $1 = ($1_ltype)&temp; %}
+%typemap(out) SWIGTYPE *const&
+%{ *($1_ltype)&$result = *$1; %}
+
+/* Typemaps used for the generation of proxy and type wrapper class code */
+%typemap(javabase) SWIGTYPE, SWIGTYPE *, SWIGTYPE &, SWIGTYPE [], SWIGTYPE (CLASS::*) ""
+%typemap(javaclassmodifiers) SWIGTYPE, SWIGTYPE *, SWIGTYPE &, SWIGTYPE [], SWIGTYPE (CLASS::*) "public class"
+%typemap(javacode) SWIGTYPE, SWIGTYPE *, SWIGTYPE &, SWIGTYPE [], SWIGTYPE (CLASS::*) ""
+%typemap(javaimports) SWIGTYPE, SWIGTYPE *, SWIGTYPE &, SWIGTYPE [], SWIGTYPE (CLASS::*) ""
+%typemap(javainterfaces) SWIGTYPE, SWIGTYPE *, SWIGTYPE &, SWIGTYPE [], SWIGTYPE (CLASS::*) ""
+
+/* javabody typemaps */
+
+%define SWIG_JAVABODY_METHODS(PTRCTOR_VISIBILITY, CPTR_VISIBILITY, TYPE...) SWIG_JAVABODY_PROXY(PTRCTOR_VISIBILITY, CPTR_VISIBILITY, TYPE) %enddef // legacy name
+
+%define SWIG_JAVABODY_PROXY(PTRCTOR_VISIBILITY, CPTR_VISIBILITY, TYPE...)
+// Base proxy classes
+%typemap(javabody) TYPE %{
+ private long swigCPtr;
+ protected boolean swigCMemOwn;
+
+ PTRCTOR_VISIBILITY $javaclassname(long cPtr, boolean cMemoryOwn) {
+ swigCMemOwn = cMemoryOwn;
+ swigCPtr = cPtr;
+ }
+
+ CPTR_VISIBILITY static long getCPtr($javaclassname obj) {
+ return (obj == null) ? 0 : obj.swigCPtr;
+ }
+%}
+
+// Derived proxy classes
+%typemap(javabody_derived) TYPE %{
+ private long swigCPtr;
+
+ PTRCTOR_VISIBILITY $javaclassname(long cPtr, boolean cMemoryOwn) {
+ super($imclassname.$javaclazznameSWIGUpcast(cPtr), cMemoryOwn);
+ swigCPtr = cPtr;
+ }
+
+ CPTR_VISIBILITY static long getCPtr($javaclassname obj) {
+ return (obj == null) ? 0 : obj.swigCPtr;
+ }
+%}
+%enddef
+
+%define SWIG_JAVABODY_TYPEWRAPPER(PTRCTOR_VISIBILITY, DEFAULTCTOR_VISIBILITY, CPTR_VISIBILITY, TYPE...)
+// Typewrapper classes
+%typemap(javabody) TYPE *, TYPE &, TYPE [] %{
+ private long swigCPtr;
+
+ PTRCTOR_VISIBILITY $javaclassname(long cPtr, boolean futureUse) {
+ swigCPtr = cPtr;
+ }
+
+ DEFAULTCTOR_VISIBILITY $javaclassname() {
+ swigCPtr = 0;
+ }
+
+ CPTR_VISIBILITY static long getCPtr($javaclassname obj) {
+ return (obj == null) ? 0 : obj.swigCPtr;
+ }
+%}
+
+%typemap(javabody) TYPE (CLASS::*) %{
+ private String swigCMemberPtr;
+
+ PTRCTOR_VISIBILITY $javaclassname(String cMemberPtr, boolean futureUse) {
+ swigCMemberPtr = cMemberPtr;
+ }
+
+ DEFAULTCTOR_VISIBILITY $javaclassname() {
+ swigCMemberPtr = null;
+ }
+
+ CPTR_VISIBILITY static String getCMemberPtr($javaclassname obj) {
+ return obj.swigCMemberPtr;
+ }
+%}
+%enddef
+
+/* Set the default javabody typemaps to use protected visibility.
+ Use the macros to change to public if using multiple modules. */
+SWIG_JAVABODY_PROXY(protected, protected, SWIGTYPE)
+SWIG_JAVABODY_TYPEWRAPPER(protected, protected, protected, SWIGTYPE)
+
+%typemap(javafinalize) SWIGTYPE %{
+ protected void finalize() {
+ delete();
+ }
+%}
+
+/*
+ * Java constructor typemaps:
+ *
+ * The javaconstruct typemap is inserted when a proxy class's constructor is generated.
+ * This typemap allows control over what code is executed in the constructor as
+ * well as specifying who owns the underlying C/C++ object. Normally, Java has
+ * ownership and the underlying C/C++ object is deallocated when the Java object
+ * is finalized (swigCMemOwn is true.) If swigCMemOwn is false, C/C++ is
+ * ultimately responsible for deallocating the underlying object's memory.
+ *
+ * The SWIG_PROXY_CONSTRUCTOR macro defines the javaconstruct typemap for a proxy
+ * class for a particular TYPENAME. OWNERSHIP is passed as the value of
+ * swigCMemOwn to the pointer constructor method. WEAKREF determines which kind
+ * of Java object reference will be used by the C++ director class (WeakGlobalRef
+ * vs. GlobalRef.)
+ *
+ * The SWIG_DIRECTOR_OWNED macro sets the ownership of director-based proxy
+ * classes and the weak reference flag to false, meaning that the underlying C++
+ * object will be reclaimed by C++.
+ */
+
+%define SWIG_PROXY_CONSTRUCTOR(OWNERSHIP, WEAKREF, TYPENAME...)
+%typemap(javaconstruct,directorconnect="\n $imclassname.$javaclazznamedirector_connect(this, swigCPtr, swigCMemOwn, WEAKREF);") TYPENAME {
+ this($imcall, OWNERSHIP);$directorconnect
+ }
+%enddef
+
+%define SWIG_DIRECTOR_OWNED(TYPENAME...)
+SWIG_PROXY_CONSTRUCTOR(true, false, TYPENAME)
+%enddef
+
+// Set the default for SWIGTYPE: Java owns the C/C++ object.
+SWIG_PROXY_CONSTRUCTOR(true, true, SWIGTYPE)
+
+%typemap(javadestruct, methodname="delete", methodmodifiers="public synchronized") SWIGTYPE {
+ if (swigCPtr != 0) {
+ if (swigCMemOwn) {
+ swigCMemOwn = false;
+ $jnicall;
+ }
+ swigCPtr = 0;
+ }
+ }
+
+%typemap(javadestruct_derived, methodname="delete", methodmodifiers="public synchronized") SWIGTYPE {
+ if (swigCPtr != 0) {
+ if (swigCMemOwn) {
+ swigCMemOwn = false;
+ $jnicall;
+ }
+ swigCPtr = 0;
+ }
+ super.delete();
+ }
+
+%typemap(directordisconnect, methodname="swigDirectorDisconnect") SWIGTYPE %{
+ protected void $methodname() {
+ swigCMemOwn = false;
+ $jnicall;
+ }
+%}
+
+%typemap(directorowner_release, methodname="swigReleaseOwnership") SWIGTYPE %{
+ public void $methodname() {
+ swigCMemOwn = false;
+ $jnicall;
+ }
+%}
+
+%typemap(directorowner_take, methodname="swigTakeOwnership") SWIGTYPE %{
+ public void $methodname() {
+ swigCMemOwn = true;
+ $jnicall;
+ }
+%}
+
+/* Java specific directives */
+#define %javaconst(flag) %feature("java:const","flag")
+#define %javaconstvalue(value) %feature("java:constvalue",value)
+#define %javaenum(wrapapproach) %feature("java:enum","wrapapproach")
+#define %javamethodmodifiers %feature("java:methodmodifiers")
+#define %javaexception(exceptionclasses) %feature("except",throws=exceptionclasses)
+#define %nojavaexception %feature("except","0",throws="")
+#define %clearjavaexception %feature("except","",throws="")
+
+%pragma(java) jniclassclassmodifiers="public class"
+%pragma(java) moduleclassmodifiers="public class"
+
+/* Some ANSI C typemaps */
+
+%apply unsigned long { size_t };
+%apply const unsigned long & { const size_t & };
+
+/* Array reference typemaps */
+%apply SWIGTYPE & { SWIGTYPE ((&)[ANY]) }
+
+/* const pointers */
+%apply SWIGTYPE * { SWIGTYPE *const }
+
+/* String & length */
+%typemap(jni) (char *STRING, size_t LENGTH) "jbyteArray"
+%typemap(jtype) (char *STRING, size_t LENGTH) "byte[]"
+%typemap(jstype) (char *STRING, size_t LENGTH) "byte[]"
+%typemap(javain) (char *STRING, size_t LENGTH) "$javainput"
+%typemap(freearg) (char *STRING, size_t LENGTH) ""
+%typemap(in) (char *STRING, size_t LENGTH) {
+ if ($input) {
+ $1 = (char *) JCALL2(GetByteArrayElements, jenv, $input, 0);
+ $2 = (size_t) JCALL1(GetArrayLength, jenv, $input);
+ } else {
+ $1 = 0;
+ $2 = 0;
+ }
+}
+%typemap(argout) (char *STRING, size_t LENGTH) {
+ if ($input) JCALL3(ReleaseByteArrayElements, jenv, $input, (jbyte *)$1, 0);
+}
+%typemap(directorin, descriptor="[B") (char *STRING, size_t LENGTH) {
+ jbyteArray jb = (jenv)->NewByteArray($2);
+ (jenv)->SetByteArrayRegion(jb, 0, $2, (jbyte *)$1);
+ $input = jb;
+}
+%typemap(directorargout) (char *STRING, size_t LENGTH)
+%{(jenv)->GetByteArrayRegion($input, 0, $2, (jbyte *)$1); %}
+%apply (char *STRING, size_t LENGTH) { (char *STRING, int LENGTH) }
+
+/* java keywords */
+%include <javakw.swg>
+
+// Default enum handling
+%include <enumtypesafe.swg>
+
diff --git a/tests/examplefiles/swig_std_vector.i b/tests/examplefiles/swig_std_vector.i
new file mode 100644
index 00000000..baecf850
--- /dev/null
+++ b/tests/examplefiles/swig_std_vector.i
@@ -0,0 +1,225 @@
+//
+// std::vector
+//
+
+%include <std_container.i>
+
+// Vector
+
+%define %std_vector_methods(vector...)
+ %std_sequence_methods(vector)
+
+ void reserve(size_type n);
+ size_type capacity() const;
+%enddef
+
+
+%define %std_vector_methods_val(vector...)
+ %std_sequence_methods_val(vector)
+
+ void reserve(size_type n);
+ size_type capacity() const;
+%enddef
+
+
+// ------------------------------------------------------------------------
+// std::vector
+//
+// The aim of all that follows would be to integrate std::vector with
+// as much as possible, namely, to allow the user to pass and
+// be returned tuples or lists.
+// const declarations are used to guess the intent of the function being
+// exported; therefore, the following rationale is applied:
+//
+// -- f(std::vector<T>), f(const std::vector<T>&):
+// the parameter being read-only, either a sequence or a
+// previously wrapped std::vector<T> can be passed.
+// -- f(std::vector<T>&), f(std::vector<T>*):
+// the parameter may be modified; therefore, only a wrapped std::vector
+// can be passed.
+// -- std::vector<T> f(), const std::vector<T>& f():
+// the vector is returned by copy; therefore, a sequence of T:s
+// is returned which is most easily used in other functions
+// -- std::vector<T>& f(), std::vector<T>* f():
+// the vector is returned by reference; therefore, a wrapped std::vector
+// is returned
+// -- const std::vector<T>* f(), f(const std::vector<T>*):
+// for consistency, they expect and return a plain vector pointer.
+// ------------------------------------------------------------------------
+
+%{
+#include <vector>
+%}
+
+// exported classes
+
+
+namespace std {
+
+ template<class _Tp, class _Alloc = allocator< _Tp > >
+ class vector {
+ public:
+ typedef size_t size_type;
+ typedef ptrdiff_t difference_type;
+ typedef _Tp value_type;
+ typedef value_type* pointer;
+ typedef const value_type* const_pointer;
+ typedef _Tp& reference;
+ typedef const _Tp& const_reference;
+ typedef _Alloc allocator_type;
+
+ %traits_swigtype(_Tp);
+ %traits_enum(_Tp);
+
+ %fragment(SWIG_Traits_frag(std::vector<_Tp, _Alloc >), "header",
+ fragment=SWIG_Traits_frag(_Tp),
+ fragment="StdVectorTraits") {
+ namespace swig {
+ template <> struct traits<std::vector<_Tp, _Alloc > > {
+ typedef pointer_category category;
+ static const char* type_name() {
+ return "std::vector<" #_Tp "," #_Alloc " >";
+ }
+ };
+ }
+ }
+
+ %typemap_traits_ptr(SWIG_TYPECHECK_VECTOR, std::vector<_Tp, _Alloc >);
+
+#ifdef %swig_vector_methods
+ // Add swig/language extra methods
+ %swig_vector_methods(std::vector<_Tp, _Alloc >);
+#endif
+
+ %std_vector_methods(vector);
+ };
+
+ // ***
+ // This specialization should disappear or get simplified when
+ // a 'const SWIGTYPE*&' can be defined
+ // ***
+ template<class _Tp, class _Alloc >
+ class vector<_Tp*, _Alloc > {
+ public:
+ typedef size_t size_type;
+ typedef ptrdiff_t difference_type;
+ typedef _Tp* value_type;
+ typedef value_type* pointer;
+ typedef const value_type* const_pointer;
+ typedef value_type reference;
+ typedef value_type const_reference;
+ typedef _Alloc allocator_type;
+
+ %traits_swigtype(_Tp);
+
+ %fragment(SWIG_Traits_frag(std::vector<_Tp*, _Alloc >), "header",
+ fragment=SWIG_Traits_frag(_Tp),
+ fragment="StdVectorTraits") {
+ namespace swig {
+ template <> struct traits<std::vector<_Tp*, _Alloc > > {
+ typedef value_category category;
+ static const char* type_name() {
+ return "std::vector<" #_Tp " *," #_Alloc " >";
+ }
+ };
+ }
+ }
+
+ %typemap_traits_ptr(SWIG_TYPECHECK_VECTOR, std::vector<_Tp*, _Alloc >);
+
+#ifdef %swig_vector_methods_val
+ // Add swig/language extra methods
+ %swig_vector_methods_val(std::vector<_Tp*, _Alloc >);
+#endif
+
+ %std_vector_methods_val(vector);
+ };
+
+ // ***
+ // const pointer specialization
+ // ***
+ template<class _Tp, class _Alloc >
+ class vector<_Tp const *, _Alloc > {
+ public:
+ typedef size_t size_type;
+ typedef ptrdiff_t difference_type;
+ typedef _Tp const * value_type;
+ typedef value_type* pointer;
+ typedef const value_type* const_pointer;
+ typedef value_type reference;
+ typedef value_type const_reference;
+ typedef _Alloc allocator_type;
+
+ %traits_swigtype(_Tp);
+
+ %fragment(SWIG_Traits_frag(std::vector<_Tp const*, _Alloc >), "header",
+ fragment=SWIG_Traits_frag(_Tp),
+ fragment="StdVectorTraits") {
+ namespace swig {
+ template <> struct traits<std::vector<_Tp const*, _Alloc > > {
+ typedef value_category category;
+ static const char* type_name() {
+ return "std::vector<" #_Tp " const*," #_Alloc " >";
+ }
+ };
+ }
+ }
+
+ %typemap_traits_ptr(SWIG_TYPECHECK_VECTOR, std::vector<_Tp const*, _Alloc >);
+
+#ifdef %swig_vector_methods_val
+ // Add swig/language extra methods
+ %swig_vector_methods_val(std::vector<_Tp const*, _Alloc >);
+#endif
+
+ %std_vector_methods_val(vector);
+ };
+
+ // ***
+ // bool specialization
+ // ***
+
+ template<class _Alloc >
+ class vector<bool,_Alloc > {
+ public:
+ typedef size_t size_type;
+ typedef ptrdiff_t difference_type;
+ typedef bool value_type;
+ typedef value_type* pointer;
+ typedef const value_type* const_pointer;
+ typedef value_type reference;
+ typedef value_type const_reference;
+ typedef _Alloc allocator_type;
+
+ %traits_swigtype(bool);
+
+ %fragment(SWIG_Traits_frag(std::vector<bool, _Alloc >), "header",
+ fragment=SWIG_Traits_frag(bool),
+ fragment="StdVectorTraits") {
+ namespace swig {
+ template <> struct traits<std::vector<bool, _Alloc > > {
+ typedef value_category category;
+ static const char* type_name() {
+ return "std::vector<bool, _Alloc >";
+ }
+ };
+ }
+ }
+
+ %typemap_traits_ptr(SWIG_TYPECHECK_VECTOR, std::vector<bool, _Alloc >);
+
+
+#ifdef %swig_vector_methods_val
+ // Add swig/language extra methods
+ %swig_vector_methods_val(std::vector<bool, _Alloc >);
+#endif
+
+ %std_vector_methods_val(vector);
+
+#if defined(SWIG_STD_MODERN_STL) && !defined(SWIG_STD_NOMODERN_STL)
+ void flip();
+#endif
+
+ };
+
+}
diff --git a/tests/examplefiles/test.agda b/tests/examplefiles/test.agda
new file mode 100644
index 00000000..d930a77b
--- /dev/null
+++ b/tests/examplefiles/test.agda
@@ -0,0 +1,102 @@
+-- An Agda example file
+
+module test where
+
+open import Coinduction
+open import Data.Bool
+open import {- pointless comment between import and module name -} Data.Char
+open import Data.Nat
+open import Data.Nat.Properties
+open import Data.String
+open import Data.List hiding ([_])
+open import Data.Vec hiding ([_])
+open import Relation.Nullary.Core
+open import Relation.Binary.PropositionalEquality using (_≡_; refl; cong; trans; inspect; [_])
+
+open SemiringSolver
+
+{- this is a {- nested -} comment -}
+
+-- Factorial
+_! : ℕ → ℕ
+0 ! = 1
+(suc n) ! = (suc n) * n !
+
+-- The binomial coefficient
+_choose_ : ℕ → ℕ → ℕ
+_ choose 0 = 1
+0 choose _ = 0
+(suc n) choose (suc m) = (n choose m) + (n choose (suc m)) -- Pascal's rule
+
+choose-too-many : ∀ n m → n ≤ m → n choose (suc m) ≡ 0
+choose-too-many .0 m z≤n = refl
+choose-too-many (suc n) (suc m) (s≤s le) with n choose (suc m) | choose-too-many n m le | n choose (suc (suc m)) | choose-too-many n (suc m) (≤-step le)
+... | .0 | refl | .0 | refl = refl
+
+_++'_ : ∀ {a n m} {A : Set a} → Vec A n → Vec A m → Vec A (m + n)
+_++'_ {_} {n} {m} v₁ v₂ rewrite solve 2 (λ a b → b :+ a := a :+ b) refl n m = v₁ Data.Vec.++ v₂
+
+++'-test : (1 ∷ 2 ∷ 3 ∷ []) ++' (4 ∷ 5 ∷ []) ≡ (1 ∷ 2 ∷ 3 ∷ 4 ∷ 5 ∷ [])
+++'-test = refl
+
+data Coℕ : Set where
+ co0 : Coℕ
+ cosuc : ∞ Coℕ → Coℕ
+
+nanana : Coℕ
+nanana = let two = ♯ cosuc (♯ (cosuc (♯ co0))) in cosuc two
+
+abstract
+ data VacuumCleaner : Set where
+ Roomba : VacuumCleaner
+
+pointlessLemmaAboutBoolFunctions : (f : Bool → Bool) → f (f (f true)) ≡ f true
+pointlessLemmaAboutBoolFunctions f with f true | inspect f true
+... | true | [ eq₁ ] = trans (cong f eq₁) eq₁
+... | false | [ eq₁ ] with f false | inspect f false
+... | true | _ = eq₁
+... | false | [ eq₂ ] = eq₂
+
+mutual
+ isEven : ℕ → Bool
+ isEven 0 = true
+ isEven (suc n) = not (isOdd n)
+
+ isOdd : ℕ → Bool
+ isOdd 0 = false
+ isOdd (suc n) = not (isEven n)
+
+foo : String
+foo = "Hello World!"
+
+nl : Char
+nl = '\n'
+
+private
+ intersperseString : Char → List String → String
+ intersperseString c [] = ""
+ intersperseString c (x ∷ xs) = Data.List.foldl (λ a b → a Data.String.++ Data.String.fromList (c ∷ []) Data.String.++ b) x xs
+
+baz : String
+baz = intersperseString nl (Data.List.replicate 5 foo)
+
+postulate
+ Float : Set
+
+{-# BUILTIN FLOAT Float #-}
+
+pi : Float
+pi = 3.141593
+
+-- Astronomical unit
+au : Float
+au = 1.496e11 -- m
+
+plusFloat : Float → Float → Float
+plusFloat a b = {! !}
+
+record Subset (A : Set) (P : A → Set) : Set where
+ constructor _#_
+ field
+ elem : A
+ .proof : P elem
diff --git a/tests/examplefiles/test.apl b/tests/examplefiles/test.apl
new file mode 100644
index 00000000..26ecf971
--- /dev/null
+++ b/tests/examplefiles/test.apl
@@ -0,0 +1,26 @@
+∇ R←M COMBIN N;D;E;F;G;P
+ ⍝ Returns a matrix of every possible
+ ⍝ combination of M elements from the
+ ⍝ vector ⍳N. That is, returns a
+ ⍝ matrix with M!N rows and N columns.
+ ⍝
+ E←(⍳P←N-R←M-1)-⎕IO
+ D←R+⍳P
+ R←(P,1)⍴D
+ P←P⍴1
+ L1:→(⎕IO>1↑D←D-1)⍴0
+ P←+\P
+ G←+\¯1↓0,F←⌽P
+ E←F/E-G
+ R←(F/D),R[E+⍳⍴E;]
+ E←G
+ →L1
+∇
+
+∇ R←M QUICKEXP N
+ ⍝ Matrix exponentiation
+ B ← ⌊ 1 + 2 ⍟ N
+ V ← (B ⍴ 2) ⊤ N
+ L ← ⊂ M
+ R ← ⊃ +.× / V / L ⊣ { L ← (⊂ A +.× A ← ↑L) , L }¨ ⍳ B-1
+∇
diff --git a/tests/examplefiles/test.bb b/tests/examplefiles/test.bb
new file mode 100644
index 00000000..026ef22a
--- /dev/null
+++ b/tests/examplefiles/test.bb
@@ -0,0 +1,95 @@
+
+;foobar!
+
+;Include "blurg/blurg.bb"
+
+Const ca = $10000000 ; Hex
+Const cb = %10101010 ; Binary
+Global ga$ = "blargh"
+Local a = 124, b$ = "abcdef"
+
+Function name_123#(zorp$, ll = False, blah#, waffles% = 100)
+ Return 235.7804 ; comment
+End Function
+Function TestString$()
+End Function
+
+Function hub(blah$, abc = Pi)
+End Function
+Function Blar%()
+ Local aa %, ab # ,ac #, ad# ,ae$,af% ; Intentional mangling
+ Local ba#, bb.TBlarf , bc%,bd#,be. TFooBar,ff = True
+End Function
+
+abc()
+
+Function abc()
+ Print "abc" ; I cannot find a way to parse these as function calls without messing something up
+ Print ; Anyhow, they're generally not used in this way
+ Goto Eww_Goto
+ .Eww_Goto
+End Function
+
+Type TBlarf
+End Type
+
+Type TFooBar
+End Type
+
+Local myinst.MyClass = New MyClass
+TestMethod(myinst)
+
+Type MyClass
+
+ Field m_foo.MyClass
+ Field m_bar.MyClass
+
+; abc
+; def
+End Type
+
+Function TestMethod(self.MyClass) ; foobar
+ self\m_foo = self
+ self\m_bar = Object.MyClass(Handle self\m_foo)
+ Yell self\m_foo\m_bar\m_foo\m_bar
+End Function
+
+Function Yell(self.MyClass)
+ Print("huzzah!")
+End Function
+
+Function Wakka$(foo$)
+ Return foo + "bar"
+End Function
+
+
+Print("blah " + "blah " + "blah.")
+
+Local i : For i = 0 To 10 Step 1
+ Print("Index: " + i)
+Next
+Local array$[5]
+array[0] = "foo": array[1] = "bar":array[2] = "11":array[3] = "22":array[4] = "33"
+For i = 0 To 4
+ Local value$ = array[i]
+ Print("Value: " + value)
+Next
+
+Local foobar = Not (1 Or (2 And (4 Shl 5 Shr 6)) Sar 7) Mod (8+2)
+Local az = 1234567890
+az = az + 1
+az = az - 2
+az = az* 3
+az = az/ 4
+az = az And 5
+az = az Or 6
+az= ~ 7
+az = az Shl 8
+az= az Shr 9
+az = az Sar 10
+az = az Mod 11
+az = ((10-5+2/4*2)>(((8^2)) < 2)) And 12 Or 2
+
+
+;~IDEal Editor Parameters:
+;~C#Blitz3D \ No newline at end of file
diff --git a/tests/examplefiles/test.ebnf b/tests/examplefiles/test.ebnf
new file mode 100644
index 00000000..a96171b0
--- /dev/null
+++ b/tests/examplefiles/test.ebnf
@@ -0,0 +1,31 @@
+letter = "A" | "B" | "C" | "D" | "E" | "F" | "G"
+ | "H" | "I" | "J" | "K" | "L" | "M" | "N"
+ | "O" | "P" | "Q" | "R" | "S" | "T" | "U"
+ | "V" | "W" | "X" | "Y" | "Z" ;
+digit = "0" | "1" | "2" | "3" | "4" | "5" | "6" | "7" | "8" | "9" ;
+symbol = "[" | "]" | "{" | "}" | "(" | ")" | "<" | ">"
+ | "'" | '"' | "=" | "|" | "." | "," | ";" ;
+character = letter | digit | symbol | " " ;
+
+identifier = letter , { letter | digit | " " } ;
+terminal = "'" , character , { character } , "'"
+ | '"' , character , { character } , '"' ;
+
+special = "?" , any , "?" ;
+
+comment = (* this is a comment "" *) "(*" , any-symbol , "*)" ;
+any-symbol = ? any visible character ? ; (* ? ... ? *)
+
+lhs = identifier ;
+rhs = identifier
+ | terminal
+ | comment , rhs
+ | rhs , comment
+ | "[" , rhs , "]"
+ | "{" , rhs , "}"
+ | "(" , rhs , ")"
+ | rhs , "|" , rhs
+ | rhs , "," , rhs ;
+
+rule = lhs , "=" , rhs , ";" | comment ;
+grammar = { rule } ;
diff --git a/tests/examplefiles/test.idr b/tests/examplefiles/test.idr
new file mode 100644
index 00000000..f0e96d88
--- /dev/null
+++ b/tests/examplefiles/test.idr
@@ -0,0 +1,93 @@
+module Main
+
+data Ty = TyInt | TyBool | TyFun Ty Ty
+
+interpTy : Ty -> Type
+interpTy TyInt = Int
+interpTy TyBool = Bool
+interpTy (TyFun s t) = interpTy s -> interpTy t
+
+using (G : Vect n Ty)
+
+ data Env : Vect n Ty -> Type where
+ Nil : Env Nil
+ (::) : interpTy a -> Env G -> Env (a :: G)
+
+ data HasType : (i : Fin n) -> Vect n Ty -> Ty -> Type where
+ stop : HasType fZ (t :: G) t
+ pop : HasType k G t -> HasType (fS k) (u :: G) t
+
+ lookup : HasType i G t -> Env G -> interpTy t
+ lookup stop (x :: xs) = x
+ lookup (pop k) (x :: xs) = lookup k xs
+
+ data Expr : Vect n Ty -> Ty -> Type where
+ Var : HasType i G t -> Expr G t
+ Val : (x : Int) -> Expr G TyInt
+ Lam : Expr (a :: G) t -> Expr G (TyFun a t)
+ App : Expr G (TyFun a t) -> Expr G a -> Expr G t
+ Op : (interpTy a -> interpTy b -> interpTy c) -> Expr G a -> Expr G b ->
+ Expr G c
+ If : Expr G TyBool -> Expr G a -> Expr G a -> Expr G a
+ Bind : Expr G a -> (interpTy a -> Expr G b) -> Expr G b
+
+ dsl expr
+ lambda = Lam
+ variable = Var
+ index_first = stop
+ index_next = pop
+
+ (<$>) : |(f : Expr G (TyFun a t)) -> Expr G a -> Expr G t
+ (<$>) = \f, a => App f a
+
+ pure : Expr G a -> Expr G a
+ pure = id
+
+ syntax IF [x] THEN [t] ELSE [e] = If x t e
+
+ (==) : Expr G TyInt -> Expr G TyInt -> Expr G TyBool
+ (==) = Op (==)
+
+ (<) : Expr G TyInt -> Expr G TyInt -> Expr G TyBool
+ (<) = Op (<)
+
+ instance Num (Expr G TyInt) where
+ (+) x y = Op (+) x y
+ (-) x y = Op (-) x y
+ (*) x y = Op (*) x y
+
+ abs x = IF (x < 0) THEN (-x) ELSE x
+
+ fromInteger = Val . fromInteger
+
+ interp : Env G -> {static} Expr G t -> interpTy t
+ interp env (Var i) = lookup i env
+ interp env (Val x) = x
+ interp env (Lam sc) = \x => interp (x :: env) sc
+ interp env (App f s) = (interp env f) (interp env s)
+ interp env (Op op x y) = op (interp env x) (interp env y)
+ interp env (If x t e) = if (interp env x) then (interp env t) else (interp env e)
+ interp env (Bind v f) = interp env (f (interp env v))
+
+ eId : Expr G (TyFun TyInt TyInt)
+ eId = expr (\x => x)
+
+ eTEST : Expr G (TyFun TyInt (TyFun TyInt TyInt))
+ eTEST = expr (\x, y => y)
+
+ eAdd : Expr G (TyFun TyInt (TyFun TyInt TyInt))
+ eAdd = expr (\x, y => Op (+) x y)
+
+ eDouble : Expr G (TyFun TyInt TyInt)
+ eDouble = expr (\x => App (App eAdd x) (Var stop))
+
+ eFac : Expr G (TyFun TyInt TyInt)
+ eFac = expr (\x => IF x == 0 THEN 1 ELSE [| eFac (x - 1) |] * x)
+
+testFac : Int
+testFac = interp [] eFac 4
+
+main : IO ()
+main = print testFac
+
+
diff --git a/tests/examplefiles/test.mask b/tests/examplefiles/test.mask
new file mode 100644
index 00000000..39134d74
--- /dev/null
+++ b/tests/examplefiles/test.mask
@@ -0,0 +1,41 @@
+
+// comment
+h4.class-1#id.class-2.other checked='true' disabled name = x param > 'Enter ..'
+input placeholder=Password type=password >
+ :dualbind x-signal='dom:create' value=user.passord;
+% each='flowers' >
+ div style='
+ position: absolute;
+ display: inline-block;
+ background: url("image.png") center center no-repeat;
+ ';
+#skippedDiv.other {
+ img src='~[url]';
+ div style="text-align:center;" {
+ '~[: $obj.foo("username", name) + 2]'
+ "~[Localize: stringId]"
+ }
+
+ p > """
+
+ Hello "world"
+ """
+
+ p > '
+ Hello "world"
+ '
+
+ p > "Hello 'world'"
+
+ :customComponent x-value='tt';
+ /* footer > '(c) 2014' */
+}
+
+.skippedDiv >
+ span >
+ #skipped >
+ table >
+ td >
+ tr > ';)'
+
+br; \ No newline at end of file
diff --git a/tests/examplefiles/test.p6 b/tests/examplefiles/test.p6
new file mode 100644
index 00000000..3d12b56c
--- /dev/null
+++ b/tests/examplefiles/test.p6
@@ -0,0 +1,252 @@
+#!/usr/bin/env perl6
+
+use v6;
+
+my $string = 'I look like a # comment!';
+
+if $string eq 'foo' {
+ say 'hello';
+}
+
+regex http-verb {
+ 'GET'
+ | 'POST'
+ | 'PUT'
+ | 'DELETE'
+ | 'TRACE'
+ | 'OPTIONS'
+ | 'HEAD'
+}
+
+# a sample comment
+
+say 'Hello from Perl 6!'
+
+
+#`{
+multi-line comment!
+}
+
+say 'here';
+
+#`(
+multi-line comment!
+)
+
+say 'here';
+
+#`{{{
+I'm a special comment!
+}}}
+
+say 'there';
+
+#`{{
+I'm { even } specialer!
+}}
+
+say 'there';
+
+#`{{
+does {{nesting}} work?
+}}
+
+#`«<
+trying mixed delimiters
+
+my $string = qq<Hooray, arbitrary delimiter!>;
+my $string = qq«Hooray, arbitrary delimiter!»;
+my $string = q <now with whitespace!>;
+my $string = qq<<more strings>>;
+
+my %hash := Hash.new;
+
+=begin pod
+
+Here's some POD! Wooo
+
+=end pod
+
+=for Testing
+ This is POD (see? role isn't highlighted)
+
+say('this is not!');
+
+=table
+ Of role things
+
+say('not in your table');
+#= A single line declarator "block" (with a keyword like role)
+#| Another single line declarator "block" (with a keyword like role)
+#={
+ A declarator block (with a keyword like role)
+ }
+#|{
+ Another declarator block (with a keyword like role)
+ }
+#= { A single line declarator "block" with a brace (with a keyword like role)
+#=«
+ More declarator blocks! (with a keyword like role)
+ »
+#|«
+ More declarator blocks! (with a keyword like role)
+ »
+
+say 'Moar code!';
+
+my $don't = 16;
+
+sub don't($x) {
+ !$x
+}
+
+say don't 'foo';
+
+my %hash = (
+ :foo(1),
+);
+
+say %hash<foo>;
+say %hash<<foo>>;
+say %hash«foo»;
+
+say %*hash<foo>;
+say %*hash<<foo>>;
+say %*hash«foo»;
+
+say $<todo>;
+say $<todo>;
+
+for (@A Z @B) -> $a, $b {
+ say $a + $b;
+}
+
+Q:PIR {
+ .loadlib "somelib"
+}
+
+my $longstring = q/
+ lots
+ of
+ text
+/;
+
+my $heredoc = q:to/END_SQL/;
+SELECT * FROM Users
+WHERE first_name = 'Rob'
+END_SQL
+my $hello;
+
+# Fun with regexen
+
+if 'food' ~~ /foo/ {
+ say 'match!'
+}
+
+my $re = /foo/;
+my $re2 = m/ foo /;
+my $re3 = m:i/ FOO /;
+
+call-a-sub(/ foo /);
+call-a-sub(/ foo \/ bar /);
+
+my $re4 = rx/something | something-else/;
+my $result = ms/regexy stuff/;
+my $sub0 = s/regexy stuff/more stuff/;
+my $sub = ss/regexy stuff/more stuff/;
+my $trans = tr/regexy stuff/more stuff/;
+
+my @values = <a b c d>;
+call-sub(<a b c d>);
+call-sub <a b c d>;
+
+my $result = $a < $b;
+
+for <a b c d> -> $letter {
+ say $letter;
+}
+
+sub test-sub {
+ say @_;
+ say $!;
+ say $/;
+ say $0;
+ say $1;
+ say @*ARGS;
+ say $*ARGFILES;
+ say &?BLOCK;
+ say ::?CLASS;
+ say $?CLASS;
+ say @=COMMENT;
+ say %?CONFIG;
+ say $*CWD;
+ say $=data;
+ say %?DEEPMAGIC;
+ say $?DISTRO;
+ say $*DISTRO;
+ say $*EGID;
+ say %*ENV;
+ say $*ERR;
+ say $*EUID;
+ say $*EXECUTABLE_NAME;
+ say $?FILE;
+ say $?GRAMMAR;
+ say $*GID;
+ say $*IN;
+ say @*INC;
+ say %?LANG;
+ say $*LANG;
+ say $?LINE;
+ say %*META-ARGS;
+ say $?MODULE;
+ say %*OPTS;
+ say %*OPT;
+ say $?KERNEL;
+ say $*KERNEL;
+ say $*OUT;
+ say $?PACKAGE;
+ say $?PERL;
+ say $*PERL;
+ say $*PID;
+ say %=pod;
+ say $*PROGRAM_NAME;
+ say %*PROTOCOLS;
+ say ::?ROLE;
+ say $?ROLE;
+ say &?ROUTINE;
+ say $?SCOPE;
+ say $*TZ;
+ say $*UID;
+ say $?USAGE;
+ say $?VM;
+ say $?XVM;
+}
+
+say <a b c>;
+
+my $perl5_re = m:P5/ fo{2} /;
+my $re5 = rx«something | something-else»;
+
+my $M := %*COMPILING<%?OPTIONS><M>;
+
+say $M;
+
+sub regex-name { ... }
+my $pair = role-name => 'foo';
+$pair = rolesque => 'foo';
+
+my sub something(Str:D $value) { ... }
+
+my $s = q«<
+some
+string
+stuff
+»;
+
+my $regex = m«< some chars »;
+# after
+
+say $/<foo><bar>;
+
+roleq;
diff --git a/tests/examplefiles/test.pig b/tests/examplefiles/test.pig
new file mode 100644
index 00000000..f67b0268
--- /dev/null
+++ b/tests/examplefiles/test.pig
@@ -0,0 +1,148 @@
+/**
+ * This script is an example recommender (using made up data) showing how you might modify item-item links
+ * by defining similar relations between items in a dataset and customizing the change in weighting.
+ * This example creates metadata by using the genre field as the metadata_field. The items with
+ * the same genre have it's weight cut in half in order to boost the signals of movies that do not have the same genre.
+ * This technique requires a customization of the standard GetItemItemRecommendations macro
+ */
+import 'recommenders.pig';
+
+
+
+%default INPUT_PATH_PURCHASES '../data/retail/purchases.json'
+%default INPUT_PATH_WISHLIST '../data/retail/wishlists.json'
+%default INPUT_PATH_INVENTORY '../data/retail/inventory.json'
+%default OUTPUT_PATH '../data/retail/out/modify_item_item'
+
+
+/******** Custom GetItemItemRecommnedations *********/
+define recsys__GetItemItemRecommendations_ModifyCustom(user_item_signals, metadata) returns item_item_recs {
+
+ -- Convert user_item_signals to an item_item_graph
+ ii_links_raw, item_weights = recsys__BuildItemItemGraph(
+ $user_item_signals,
+ $LOGISTIC_PARAM,
+ $MIN_LINK_WEIGHT,
+ $MAX_LINKS_PER_USER
+ );
+ -- NOTE this function is added in order to combine metadata with item-item links
+ -- See macro for more detailed explination
+ ii_links_metadata = recsys__AddMetadataToItemItemLinks(
+ ii_links_raw,
+ $metadata
+ );
+
+ /********* Custom Code starts here ********/
+
+ --The code here should adjust the weights based on an item-item link and the equality of metadata.
+ -- In this case, if the metadata is the same, the weight is reduced. Otherwise the weight is left alone.
+ ii_links_adjusted = foreach ii_links_metadata generate item_A, item_B,
+ -- the amount of weight adjusted is dependant on the domain of data and what is expected
+ -- It is always best to adjust the weight by multiplying it by a factor rather than addition with a constant
+ (metadata_B == metadata_A ? (weight * 0.5): weight) as weight;
+
+
+ /******** Custom Code stops here *********/
+
+ -- remove negative numbers just incase
+ ii_links_adjusted_filt = foreach ii_links_adjusted generate item_A, item_B,
+ (weight <= 0 ? 0: weight) as weight;
+ -- Adjust the weights of the graph to improve recommendations.
+ ii_links = recsys__AdjustItemItemGraphWeight(
+ ii_links_adjusted_filt,
+ item_weights,
+ $BAYESIAN_PRIOR
+ );
+
+ -- Use the item-item graph to create item-item recommendations.
+ $item_item_recs = recsys__BuildItemItemRecommendationsFromGraph(
+ ii_links,
+ $NUM_RECS_PER_ITEM,
+ $NUM_RECS_PER_ITEM
+ );
+};
+
+
+/******* Load Data **********/
+
+--Get purchase signals
+purchase_input = load '$INPUT_PATH_PURCHASES' using org.apache.pig.piggybank.storage.JsonLoader(
+ 'row_id: int,
+ movie_id: chararray,
+ movie_name: chararray,
+ user_id: chararray,
+ purchase_price: int');
+
+--Get wishlist signals
+wishlist_input = load '$INPUT_PATH_WISHLIST' using org.apache.pig.piggybank.storage.JsonLoader(
+ 'row_id: int,
+ movie_id: chararray,
+ movie_name: chararray,
+ user_id: chararray');
+
+
+/******* Convert Data to Signals **********/
+
+-- Start with choosing 1 as max weight for a signal.
+purchase_signals = foreach purchase_input generate
+ user_id as user,
+ movie_name as item,
+ 1.0 as weight;
+
+
+-- Start with choosing 0.5 as weight for wishlist items because that is a weaker signal than
+-- purchasing an item.
+wishlist_signals = foreach wishlist_input generate
+ user_id as user,
+ movie_name as item,
+ 0.5 as weight;
+
+user_signals = union purchase_signals, wishlist_signals;
+
+
+/******** Changes for Modifying item-item links ******/
+inventory_input = load '$INPUT_PATH_INVENTORY' using org.apache.pig.piggybank.storage.JsonLoader(
+ 'movie_title: chararray,
+ genres: bag{tuple(content:chararray)}');
+
+
+metadata = foreach inventory_input generate
+ FLATTEN(genres) as metadata_field,
+ movie_title as item;
+-- requires the macro to be written seperately
+ --NOTE this macro is defined within this file for clarity
+item_item_recs = recsys__GetItemItemRecommendations_ModifyCustom(user_signals, metadata);
+/******* No more changes ********/
+
+
+user_item_recs = recsys__GetUserItemRecommendations(user_signals, item_item_recs);
+
+--Completely unrelated code stuck in the middle
+data = LOAD 's3n://my-s3-bucket/path/to/responses'
+ USING org.apache.pig.piggybank.storage.JsonLoader();
+responses = FOREACH data GENERATE object#'response' AS response: map[];
+out = FOREACH responses
+ GENERATE response#'id' AS id: int, response#'thread' AS thread: chararray,
+ response#'comments' AS comments: {t: (comment: chararray)};
+STORE out INTO 's3n://path/to/output' USING PigStorage('|');
+
+
+/******* Store recommendations **********/
+
+-- If your output folder exists already, hadoop will refuse to write data to it.
+
+rmf $OUTPUT_PATH/item_item_recs;
+rmf $OUTPUT_PATH/user_item_recs;
+
+store item_item_recs into '$OUTPUT_PATH/item_item_recs' using PigStorage();
+store user_item_recs into '$OUTPUT_PATH/user_item_recs' using PigStorage();
+
+-- STORE the item_item_recs into dynamo
+STORE item_item_recs
+ INTO '$OUTPUT_PATH/unused-ii-table-data'
+USING com.mortardata.pig.storage.DynamoDBStorage('$II_TABLE', '$AWS_ACCESS_KEY_ID', '$AWS_SECRET_ACCESS_KEY');
+
+-- STORE the user_item_recs into dynamo
+STORE user_item_recs
+ INTO '$OUTPUT_PATH/unused-ui-table-data'
+USING com.mortardata.pig.storage.DynamoDBStorage('$UI_TABLE', '$AWS_ACCESS_KEY_ID', '$AWS_SECRET_ACCESS_KEY');
diff --git a/tests/examplefiles/type.lisp b/tests/examplefiles/type.lisp
index 9c769379..c02c29df 100644
--- a/tests/examplefiles/type.lisp
+++ b/tests/examplefiles/type.lisp
@@ -1200,3 +1200,19 @@ Henry Baker:
(unless (clos::funcallable-instance-p #'clos::class-name)
(fmakunbound 'clos::class-name))
+
+
+(keywordp :junk)
+ T
+
+(keywordp ::junk)
+ T
+
+(symbol-name ::junk)
+ "JUNK"
+
+(symbol-name :#junk)
+ "#JUNK"
+
+(symbol-name :#.junk)
+ "#.JUNK"
diff --git a/tests/examplefiles/test.bas b/tests/examplefiles/vbnet_test.bas
index af5f2574..af5f2574 100644
--- a/tests/examplefiles/test.bas
+++ b/tests/examplefiles/vbnet_test.bas
diff --git a/tests/old_run.py b/tests/old_run.py
deleted file mode 100644
index 4f7cef16..00000000
--- a/tests/old_run.py
+++ /dev/null
@@ -1,138 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- Pygments unit tests
- ~~~~~~~~~~~~~~~~~~
-
- Usage::
-
- python run.py [testfile ...]
-
-
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-import sys, os
-import unittest
-
-from os.path import dirname, basename, join, abspath
-
-import pygments
-
-try:
- import coverage
-except ImportError:
- coverage = None
-
-testdir = abspath(dirname(__file__))
-
-failed = []
-total_test_count = 0
-error_test_count = 0
-
-
-def err(file, what, exc):
- print >>sys.stderr, file, 'failed %s:' % what,
- print >>sys.stderr, exc
- failed.append(file[:-3])
-
-
-class QuietTestRunner(object):
- """Customized test runner for relatively quiet output"""
-
- def __init__(self, testname, stream=sys.stderr):
- self.testname = testname
- self.stream = unittest._WritelnDecorator(stream)
-
- def run(self, test):
- global total_test_count
- global error_test_count
- result = unittest._TextTestResult(self.stream, True, 1)
- test(result)
- if not result.wasSuccessful():
- self.stream.write(' FAIL:')
- result.printErrors()
- failed.append(self.testname)
- else:
- self.stream.write(' ok\n')
- total_test_count += result.testsRun
- error_test_count += len(result.errors) + len(result.failures)
- return result
-
-
-def run_tests(with_coverage=False):
- # needed to avoid confusion involving atexit handlers
- import logging
-
- if sys.argv[1:]:
- # test only files given on cmdline
- files = [entry + '.py' for entry in sys.argv[1:] if entry.startswith('test_')]
- else:
- files = [entry for entry in os.listdir(testdir)
- if (entry.startswith('test_') and entry.endswith('.py'))]
- files.sort()
-
- WIDTH = 85
-
- print >>sys.stderr, \
- ('Pygments %s Test Suite running%s, stand by...' %
- (pygments.__version__,
- with_coverage and " with coverage analysis" or "")).center(WIDTH)
- print >>sys.stderr, ('(using Python %s)' % sys.version.split()[0]).center(WIDTH)
- print >>sys.stderr, '='*WIDTH
-
- if with_coverage:
- coverage.erase()
- coverage.start()
-
- for testfile in files:
- globs = {'__file__': join(testdir, testfile)}
- try:
- execfile(join(testdir, testfile), globs)
- except Exception, exc:
- raise
- err(testfile, 'execfile', exc)
- continue
- sys.stderr.write(testfile[:-3] + ': ')
- try:
- runner = QuietTestRunner(testfile[:-3])
- # make a test suite of all TestCases in the file
- tests = []
- for name, thing in globs.iteritems():
- if name.endswith('Test'):
- tests.append((name, unittest.makeSuite(thing)))
- tests.sort()
- suite = unittest.TestSuite()
- suite.addTests([x[1] for x in tests])
- runner.run(suite)
- except Exception, exc:
- err(testfile, 'running test', exc)
-
- print >>sys.stderr, '='*WIDTH
- if failed:
- print >>sys.stderr, '%d of %d tests failed.' % \
- (error_test_count, total_test_count)
- print >>sys.stderr, 'Tests failed in:', ', '.join(failed)
- ret = 1
- else:
- if total_test_count == 1:
- print >>sys.stderr, '1 test happy.'
- else:
- print >>sys.stderr, 'All %d tests happy.' % total_test_count
- ret = 0
-
- if with_coverage:
- coverage.stop()
- modules = [mod for name, mod in sys.modules.iteritems()
- if name.startswith('pygments.') and mod]
- coverage.report(modules)
-
- return ret
-
-
-if __name__ == '__main__':
- with_coverage = False
- if sys.argv[1:2] == ['-C']:
- with_coverage = bool(coverage)
- del sys.argv[1]
- sys.exit(run_tests(with_coverage))
diff --git a/tests/run.py b/tests/run.py
index 18a1d824..e87837e5 100644
--- a/tests/run.py
+++ b/tests/run.py
@@ -8,42 +8,37 @@
python run.py [testfile ...]
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
+from __future__ import print_function
+
import sys, os
-if sys.version_info >= (3,):
- # copy test suite over to "build/lib" and convert it
- print ('Copying and converting sources to build/lib/test...')
- from distutils.util import copydir_run_2to3
- testroot = os.path.dirname(__file__)
- newroot = os.path.join(testroot, '..', 'build/lib/test')
- copydir_run_2to3(testroot, newroot)
- # make nose believe that we run from the converted dir
- os.chdir(newroot)
-else:
- # only find tests in this directory
- if os.path.dirname(__file__):
- os.chdir(os.path.dirname(__file__))
+# only find tests in this directory
+if os.path.dirname(__file__):
+ os.chdir(os.path.dirname(__file__))
try:
import nose
except ImportError:
- print ('nose is required to run the Pygments test suite')
+ print('nose is required to run the Pygments test suite')
sys.exit(1)
try:
# make sure the current source is first on sys.path
sys.path.insert(0, '..')
import pygments
-except ImportError:
- print ('Cannot find Pygments to test: %s' % sys.exc_info()[1])
+except SyntaxError as err:
+ print('Syntax error: %s' % err)
+ sys.exit(1)
+except ImportError as err:
+ print('Cannot find Pygments to test: %s' % err)
sys.exit(1)
else:
- print ('Pygments %s test suite running (Python %s)...' %
- (pygments.__version__, sys.version.split()[0]))
+ print('Pygments %s test suite running (Python %s)...' %
+ (pygments.__version__, sys.version.split()[0]))
nose.main()
diff --git a/tests/test_basic_api.py b/tests/test_basic_api.py
index 00dc26f0..be7a4747 100644
--- a/tests/test_basic_api.py
+++ b/tests/test_basic_api.py
@@ -3,11 +3,12 @@
Pygments basic API tests
~~~~~~~~~~~~~~~~~~~~~~~~
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-import os
+from __future__ import print_function
+
import random
import unittest
@@ -15,7 +16,7 @@ from pygments import lexers, formatters, filters, format
from pygments.token import _TokenType, Text
from pygments.lexer import RegexLexer
from pygments.formatters.img import FontNotFound
-from pygments.util import BytesIO, StringIO, bytes, b
+from pygments.util import text_type, StringIO, xrange, ClassNotFound
import support
@@ -28,7 +29,7 @@ test_content = ''.join(test_content) + '\n'
def test_lexer_import_all():
# instantiate every lexer, to see if the token type defs are correct
- for x in lexers.LEXERS.keys():
+ for x in lexers.LEXERS:
c = getattr(lexers, x)()
@@ -45,6 +46,8 @@ def test_lexer_classes():
result = cls.analyse_text(".abc")
assert isinstance(result, float) and 0.0 <= result <= 1.0
+ assert all(al.lower() == al for al in cls.aliases)
+
inst = cls(opt1="val1", opt2="val2")
if issubclass(cls, RegexLexer):
if not hasattr(cls, '_tokens'):
@@ -60,14 +63,17 @@ def test_lexer_classes():
if cls.name in ['XQuery', 'Opa']: # XXX temporary
return
- tokens = list(inst.get_tokens(test_content))
+ try:
+ tokens = list(inst.get_tokens(test_content))
+ except KeyboardInterrupt:
+ raise KeyboardInterrupt('interrupted %s.get_tokens(): test_content=%r' % (cls.__name__, test_content))
txt = ""
for token in tokens:
assert isinstance(token, tuple)
assert isinstance(token[0], _TokenType)
if isinstance(token[1], str):
- print repr(token[1])
- assert isinstance(token[1], unicode)
+ print(repr(token[1]))
+ assert isinstance(token[1], text_type)
txt += token[1]
assert txt == test_content, "%s lexer roundtrip failed: %r != %r" % \
(cls.name, test_content, txt)
@@ -92,9 +98,10 @@ def test_lexer_options():
if cls.__name__ not in (
'PythonConsoleLexer', 'RConsoleLexer', 'RubyConsoleLexer',
'SqliteConsoleLexer', 'MatlabSessionLexer', 'ErlangShellLexer',
- 'BashSessionLexer', 'LiterateHaskellLexer', 'PostgresConsoleLexer',
- 'ElixirConsoleLexer', 'JuliaConsoleLexer', 'RobotFrameworkLexer',
- 'DylanConsoleLexer', 'ShellSessionLexer'):
+ 'BashSessionLexer', 'LiterateHaskellLexer', 'LiterateAgdaLexer',
+ 'PostgresConsoleLexer', 'ElixirConsoleLexer', 'JuliaConsoleLexer',
+ 'RobotFrameworkLexer', 'DylanConsoleLexer', 'ShellSessionLexer',
+ 'LiterateIdrisLexer'):
inst = cls(ensurenl=False)
ensure(inst.get_tokens('a\nb'), 'a\nb')
inst = cls(ensurenl=False, stripall=True)
@@ -122,7 +129,7 @@ def test_get_lexers():
]:
yield verify, func, args
- for cls, (_, lname, aliases, _, mimetypes) in lexers.LEXERS.iteritems():
+ for cls, (_, lname, aliases, _, mimetypes) in lexers.LEXERS.items():
assert cls == lexers.find_lexer_class(lname).__name__
for alias in aliases:
@@ -131,6 +138,13 @@ def test_get_lexers():
for mimetype in mimetypes:
assert cls == lexers.get_lexer_for_mimetype(mimetype).__class__.__name__
+ try:
+ lexers.get_lexer_by_name(None)
+ except ClassNotFound:
+ pass
+ else:
+ raise Exception
+
def test_formatter_public_api():
ts = list(lexers.PythonLexer().get_tokens("def f(): pass"))
@@ -157,7 +171,7 @@ def test_formatter_public_api():
pass
inst.format(ts, out)
- for formatter, info in formatters.FORMATTERS.iteritems():
+ for formatter, info in formatters.FORMATTERS.items():
yield verify, formatter, info
def test_formatter_encodings():
@@ -167,7 +181,7 @@ def test_formatter_encodings():
fmt = HtmlFormatter()
tokens = [(Text, u"ä")]
out = format(tokens, fmt)
- assert type(out) is unicode
+ assert type(out) is text_type
assert u"ä" in out
# encoding option
@@ -196,7 +210,7 @@ def test_formatter_unicode_handling():
if formatter.name != 'Raw tokens':
out = format(tokens, inst)
if formatter.unicodeoutput:
- assert type(out) is unicode
+ assert type(out) is text_type
inst = formatter(encoding='utf-8')
out = format(tokens, inst)
@@ -208,7 +222,7 @@ def test_formatter_unicode_handling():
out = format(tokens, inst)
assert type(out) is bytes, '%s: %r' % (formatter, out)
- for formatter, info in formatters.FORMATTERS.iteritems():
+ for formatter, info in formatters.FORMATTERS.items():
yield verify, formatter
@@ -236,7 +250,7 @@ class FiltersTest(unittest.TestCase):
'whitespace': {'spaces': True, 'tabs': True, 'newlines': True},
'highlight': {'names': ['isinstance', 'lexers', 'x']},
}
- for x in filters.FILTERS.keys():
+ for x in filters.FILTERS:
lx = lexers.PythonLexer()
lx.add_filter(x, **filter_args.get(x, {}))
fp = open(TESTFILE, 'rb')
diff --git a/tests/test_clexer.py b/tests/test_clexer.py
index 8b37bf57..c995bb2b 100644
--- a/tests/test_clexer.py
+++ b/tests/test_clexer.py
@@ -3,7 +3,7 @@
Basic CLexer Test
~~~~~~~~~~~~~~~~~
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
diff --git a/tests/test_cmdline.py b/tests/test_cmdline.py
index 5ad815c0..ef14661c 100644
--- a/tests/test_cmdline.py
+++ b/tests/test_cmdline.py
@@ -3,17 +3,18 @@
Command line test
~~~~~~~~~~~~~~~~~
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# Test the command line interface
-import sys, os
+import io
+import sys
import unittest
-import StringIO
from pygments import highlight
+from pygments.util import StringIO
from pygments.cmdline import main as cmdline_main
import support
@@ -24,8 +25,8 @@ TESTFILE, TESTDIR = support.location(__file__)
def run_cmdline(*args):
saved_stdout = sys.stdout
saved_stderr = sys.stderr
- new_stdout = sys.stdout = StringIO.StringIO()
- new_stderr = sys.stderr = StringIO.StringIO()
+ new_stdout = sys.stdout = StringIO()
+ new_stderr = sys.stderr = StringIO()
try:
ret = cmdline_main(["pygmentize"] + list(args))
finally:
diff --git a/tests/test_examplefiles.py b/tests/test_examplefiles.py
index d785cf3b..0547ffd3 100644
--- a/tests/test_examplefiles.py
+++ b/tests/test_examplefiles.py
@@ -3,18 +3,20 @@
Pygments tests with example files
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
+from __future__ import print_function
+
import os
import pprint
import difflib
-import cPickle as pickle
+import pickle
from pygments.lexers import get_lexer_for_filename, get_lexer_by_name
from pygments.token import Error
-from pygments.util import ClassNotFound, b
+from pygments.util import ClassNotFound
STORE_OUTPUT = False
@@ -31,21 +33,30 @@ def test_example_files():
absfn = os.path.join(testdir, 'examplefiles', fn)
if not os.path.isfile(absfn):
continue
- outfn = os.path.join(outdir, fn)
+ print(absfn)
+ code = open(absfn, 'rb').read()
try:
- lx = get_lexer_for_filename(absfn)
- except ClassNotFound:
- if "_" not in fn:
+ code = code.decode('utf-8')
+ except UnicodeError:
+ code = code.decode('latin1')
+
+ outfn = os.path.join(outdir, fn)
+
+ lx = None
+ if '_' in fn:
+ try:
+ lx = get_lexer_by_name(fn.split('_')[0])
+ except ClassNotFound:
+ pass
+ if lx is None:
+ try:
+ lx = get_lexer_for_filename(absfn, code=code)
+ except ClassNotFound:
raise AssertionError('file %r has no registered extension, '
'nor is of the form <lexer>_filename '
'for overriding, thus no lexer found.'
- % fn)
- try:
- name, rest = fn.split("_", 1)
- lx = get_lexer_by_name(name)
- except ClassNotFound:
- raise AssertionError('no lexer found for file %r' % fn)
+ % fn)
yield check_lexer, lx, absfn, outfn
def check_lexer(lx, absfn, outfn):
@@ -54,8 +65,8 @@ def check_lexer(lx, absfn, outfn):
text = fp.read()
finally:
fp.close()
- text = text.replace(b('\r\n'), b('\n'))
- text = text.strip(b('\n')) + b('\n')
+ text = text.replace(b'\r\n', b'\n')
+ text = text.strip(b'\n') + b'\n'
try:
text = text.decode('utf-8')
if text.startswith(u'\ufeff'):
@@ -71,8 +82,8 @@ def check_lexer(lx, absfn, outfn):
(lx, absfn, val, len(u''.join(ntext)))
tokens.append((type, val))
if u''.join(ntext) != text:
- print '\n'.join(difflib.unified_diff(u''.join(ntext).splitlines(),
- text.splitlines()))
+ print('\n'.join(difflib.unified_diff(u''.join(ntext).splitlines(),
+ text.splitlines())))
raise AssertionError('round trip failed for ' + absfn)
# check output against previous run if enabled
@@ -94,6 +105,6 @@ def check_lexer(lx, absfn, outfn):
if stored_tokens != tokens:
f1 = pprint.pformat(stored_tokens)
f2 = pprint.pformat(tokens)
- print '\n'.join(difflib.unified_diff(f1.splitlines(),
- f2.splitlines()))
+ print('\n'.join(difflib.unified_diff(f1.splitlines(),
+ f2.splitlines())))
assert False, absfn
diff --git a/tests/test_html_formatter.py b/tests/test_html_formatter.py
index f7e7a542..91225cd3 100644
--- a/tests/test_html_formatter.py
+++ b/tests/test_html_formatter.py
@@ -3,27 +3,29 @@
Pygments HTML formatter tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
+from __future__ import print_function
+
+import io
import os
import re
import unittest
-import StringIO
import tempfile
from os.path import join, dirname, isfile
+from pygments.util import StringIO
from pygments.lexers import PythonLexer
from pygments.formatters import HtmlFormatter, NullFormatter
from pygments.formatters.html import escape_html
-from pygments.util import uni_open
import support
TESTFILE, TESTDIR = support.location(__file__)
-fp = uni_open(TESTFILE, encoding='utf-8')
+fp = io.open(TESTFILE, encoding='utf-8')
try:
tokensource = list(PythonLexer().get_tokens(fp.read()))
finally:
@@ -33,11 +35,11 @@ finally:
class HtmlFormatterTest(unittest.TestCase):
def test_correct_output(self):
hfmt = HtmlFormatter(nowrap=True)
- houtfile = StringIO.StringIO()
+ houtfile = StringIO()
hfmt.format(tokensource, houtfile)
nfmt = NullFormatter()
- noutfile = StringIO.StringIO()
+ noutfile = StringIO()
nfmt.format(tokensource, noutfile)
stripped_html = re.sub('<.*?>', '', houtfile.getvalue())
@@ -74,13 +76,13 @@ class HtmlFormatterTest(unittest.TestCase):
dict(linenos=True, full=True),
dict(linenos=True, full=True, noclasses=True)]:
- outfile = StringIO.StringIO()
+ outfile = StringIO()
fmt = HtmlFormatter(**optdict)
fmt.format(tokensource, outfile)
def test_linenos(self):
optdict = dict(linenos=True)
- outfile = StringIO.StringIO()
+ outfile = StringIO()
fmt = HtmlFormatter(**optdict)
fmt.format(tokensource, outfile)
html = outfile.getvalue()
@@ -88,7 +90,7 @@ class HtmlFormatterTest(unittest.TestCase):
def test_linenos_with_startnum(self):
optdict = dict(linenos=True, linenostart=5)
- outfile = StringIO.StringIO()
+ outfile = StringIO()
fmt = HtmlFormatter(**optdict)
fmt.format(tokensource, outfile)
html = outfile.getvalue()
@@ -96,7 +98,7 @@ class HtmlFormatterTest(unittest.TestCase):
def test_lineanchors(self):
optdict = dict(lineanchors="foo")
- outfile = StringIO.StringIO()
+ outfile = StringIO()
fmt = HtmlFormatter(**optdict)
fmt.format(tokensource, outfile)
html = outfile.getvalue()
@@ -104,7 +106,7 @@ class HtmlFormatterTest(unittest.TestCase):
def test_lineanchors_with_startnum(self):
optdict = dict(lineanchors="foo", linenostart=5)
- outfile = StringIO.StringIO()
+ outfile = StringIO()
fmt = HtmlFormatter(**optdict)
fmt.format(tokensource, outfile)
html = outfile.getvalue()
@@ -132,7 +134,7 @@ class HtmlFormatterTest(unittest.TestCase):
pass
else:
if ret:
- print output
+ print(output)
self.assertFalse(ret, 'nsgmls run reported errors')
os.unlink(pathname)
@@ -172,7 +174,7 @@ class HtmlFormatterTest(unittest.TestCase):
# anymore in the actual source
fmt = HtmlFormatter(tagsfile='support/tags', lineanchors='L',
tagurlformat='%(fname)s%(fext)s')
- outfile = StringIO.StringIO()
+ outfile = StringIO()
fmt.format(tokensource, outfile)
self.assertTrue('<a href="test_html_formatter.py#L-165">test_ctags</a>'
in outfile.getvalue())
diff --git a/tests/test_latex_formatter.py b/tests/test_latex_formatter.py
index 06a74c3d..13ae87cd 100644
--- a/tests/test_latex_formatter.py
+++ b/tests/test_latex_formatter.py
@@ -3,10 +3,12 @@
Pygments LaTeX formatter tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
+from __future__ import print_function
+
import os
import unittest
import tempfile
@@ -48,7 +50,7 @@ class LatexFormatterTest(unittest.TestCase):
pass
else:
if ret:
- print output
+ print(output)
self.assertFalse(ret, 'latex run reported errors')
os.unlink(pathname)
diff --git a/tests/test_lexers_other.py b/tests/test_lexers_other.py
new file mode 100644
index 00000000..1e420c77
--- /dev/null
+++ b/tests/test_lexers_other.py
@@ -0,0 +1,68 @@
+# -*- coding: utf-8 -*-
+"""
+ Tests for other lexers
+ ~~~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import glob
+import os
+import unittest
+
+from pygments.lexers import guess_lexer
+from pygments.lexers.other import RexxLexer
+
+
+def _exampleFilePath(filename):
+ return os.path.join(os.path.dirname(__file__), 'examplefiles', filename)
+
+
+class AnalyseTextTest(unittest.TestCase):
+ def _testCanRecognizeAndGuessExampleFiles(self, lexer):
+ assert lexer is not None
+
+ for pattern in lexer.filenames:
+ exampleFilesPattern = _exampleFilePath(pattern)
+ for exampleFilePath in glob.glob(exampleFilesPattern):
+ exampleFile = open(exampleFilePath, 'rb')
+ try:
+ text = exampleFile.read().decode('utf-8')
+ probability = lexer.analyse_text(text)
+ self.assertTrue(probability > 0,
+ '%s must recognize %r' % (
+ lexer.name, exampleFilePath))
+ guessedLexer = guess_lexer(text)
+ self.assertEqual(guessedLexer.name, lexer.name)
+ finally:
+ exampleFile.close()
+
+ def testCanRecognizeAndGuessExampleFiles(self):
+ self._testCanRecognizeAndGuessExampleFiles(RexxLexer)
+
+
+class RexxLexerTest(unittest.TestCase):
+ def testCanGuessFromText(self):
+ self.assertAlmostEqual(0.01,
+ RexxLexer.analyse_text('/* */'))
+ self.assertAlmostEqual(1.0,
+ RexxLexer.analyse_text('''/* Rexx */
+ say "hello world"'''))
+ self.assertLess(0.5,
+ RexxLexer.analyse_text('/* */\n'
+ 'hello:pRoceduRe\n'
+ ' say "hello world"'))
+ self.assertLess(0.2,
+ RexxLexer.analyse_text('''/* */
+ if 1 > 0 then do
+ say "ok"
+ end
+ else do
+ say "huh?"
+ end'''))
+ self.assertLess(0.2,
+ RexxLexer.analyse_text('''/* */
+ greeting = "hello world!"
+ parse value greeting "hello" name "!"
+ say name'''))
diff --git a/tests/test_perllexer.py b/tests/test_perllexer.py
index 315b20e3..bfa3aeb8 100644
--- a/tests/test_perllexer.py
+++ b/tests/test_perllexer.py
@@ -3,7 +3,7 @@
Pygments regex lexer tests
~~~~~~~~~~~~~~~~~~~~~~~~~~
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
diff --git a/tests/test_regexlexer.py b/tests/test_regexlexer.py
index 28d9689b..b12dce0a 100644
--- a/tests/test_regexlexer.py
+++ b/tests/test_regexlexer.py
@@ -3,7 +3,7 @@
Pygments regex lexer tests
~~~~~~~~~~~~~~~~~~~~~~~~~~
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
diff --git a/tests/test_token.py b/tests/test_token.py
index 6a5b00b7..c5cc4990 100644
--- a/tests/test_token.py
+++ b/tests/test_token.py
@@ -3,7 +3,7 @@
Test suite for the token module
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
@@ -36,11 +36,11 @@ class TokenTest(unittest.TestCase):
stp = token.STANDARD_TYPES.copy()
stp[token.Token] = '---' # Token and Text do conflict, that is okay
t = {}
- for k, v in stp.iteritems():
+ for k, v in stp.items():
t.setdefault(v, []).append(k)
if len(t) == len(stp):
return # Okay
- for k, v in t.iteritems():
+ for k, v in t.items():
if len(v) > 1:
self.fail("%r has more than one key: %r" % (k, v))
diff --git a/tests/test_using_api.py b/tests/test_using_api.py
index bb89d1e2..9e53c206 100644
--- a/tests/test_using_api.py
+++ b/tests/test_using_api.py
@@ -3,7 +3,7 @@
Pygments tests for using()
~~~~~~~~~~~~~~~~~~~~~~~~~~
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
diff --git a/tests/test_util.py b/tests/test_util.py
index dbbc66ce..59ecf14f 100644
--- a/tests/test_util.py
+++ b/tests/test_util.py
@@ -3,7 +3,7 @@
Test suite for the util module
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""