summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeorg Brandl <georg@python.org>2016-02-17 10:26:29 +0100
committerGeorg Brandl <georg@python.org>2016-02-17 10:26:29 +0100
commit26cfb3066db7d76a5a6907f9627c6d693ab771a9 (patch)
treeb7966cf12ad81aa278d8a031210b143f41c1a9e4
parent224816e359f4da936c2f3069e07b72777dde5535 (diff)
parentae1da17e6788e34a8f35c064f6f4c4352093ecb6 (diff)
downloadpygments-26cfb3066db7d76a5a6907f9627c6d693ab771a9.tar.gz
Merge in HSAIL lexer (PR#518).
-rw-r--r--AUTHORS9
-rw-r--r--CHANGES67
-rw-r--r--doc/docs/api.rst15
-rw-r--r--doc/docs/integrate.rst11
-rw-r--r--doc/docs/lexerdevelopment.rst18
-rw-r--r--doc/docs/styles.rst56
-rw-r--r--doc/docs/tokens.rst16
-rw-r--r--doc/faq.rst32
-rw-r--r--doc/languages.rst1
-rw-r--r--pygments/__init__.py22
-rw-r--r--pygments/console.py24
-rw-r--r--pygments/filter.py8
-rw-r--r--pygments/formatter.py2
-rwxr-xr-xpygments/formatters/_mapping.py7
-rw-r--r--pygments/formatters/html.py58
-rw-r--r--pygments/formatters/img.py10
-rw-r--r--pygments/formatters/irc.py2
-rw-r--r--pygments/formatters/latex.py9
-rw-r--r--pygments/formatters/terminal256.py36
-rw-r--r--pygments/lexer.py4
-rw-r--r--pygments/lexers/__init__.py22
-rw-r--r--pygments/lexers/_csound_builtins.py7
-rw-r--r--pygments/lexers/_mapping.py928
-rw-r--r--pygments/lexers/algebra.py46
-rw-r--r--pygments/lexers/ampl.py87
-rw-r--r--pygments/lexers/asm.py162
-rw-r--r--pygments/lexers/business.py108
-rw-r--r--pygments/lexers/c_cpp.py16
-rw-r--r--pygments/lexers/c_like.py204
-rw-r--r--pygments/lexers/chapel.py16
-rw-r--r--pygments/lexers/clean.py275
-rw-r--r--pygments/lexers/configs.py218
-rw-r--r--pygments/lexers/csound.py112
-rw-r--r--pygments/lexers/css.py10
-rw-r--r--pygments/lexers/diff.py61
-rw-r--r--pygments/lexers/dotnet.py51
-rw-r--r--pygments/lexers/dsls.py85
-rw-r--r--pygments/lexers/elm.py16
-rw-r--r--pygments/lexers/erlang.py32
-rw-r--r--pygments/lexers/esoteric.py72
-rw-r--r--pygments/lexers/ezhil.py2
-rw-r--r--pygments/lexers/felix.py2
-rw-r--r--pygments/lexers/fortran.py9
-rw-r--r--pygments/lexers/grammar_notation.py131
-rw-r--r--pygments/lexers/haskell.py2
-rw-r--r--pygments/lexers/hdl.py25
-rw-r--r--pygments/lexers/idl.py13
-rw-r--r--pygments/lexers/igor.py402
-rw-r--r--pygments/lexers/int_fiction.py1
-rw-r--r--pygments/lexers/j.py4
-rw-r--r--pygments/lexers/javascript.py82
-rw-r--r--pygments/lexers/julia.py71
-rw-r--r--pygments/lexers/jvm.py19
-rw-r--r--pygments/lexers/lisp.py85
-rw-r--r--pygments/lexers/modula2.py2
-rw-r--r--pygments/lexers/oberon.py6
-rw-r--r--pygments/lexers/parasail.py2
-rw-r--r--pygments/lexers/perl.py3
-rw-r--r--pygments/lexers/php.py34
-rw-r--r--pygments/lexers/praat.py34
-rw-r--r--pygments/lexers/python.py166
-rw-r--r--pygments/lexers/qvt.py84
-rw-r--r--pygments/lexers/rdf.py75
-rw-r--r--pygments/lexers/ruby.py8
-rw-r--r--pygments/lexers/rust.py2
-rw-r--r--pygments/lexers/scripting.py21
-rw-r--r--pygments/lexers/shell.py1
-rw-r--r--pygments/lexers/sql.py51
-rw-r--r--pygments/lexers/supercollider.py8
-rw-r--r--pygments/lexers/templates.py8
-rw-r--r--pygments/lexers/testing.py2
-rw-r--r--pygments/lexers/textfmts.py5
-rw-r--r--pygments/lexers/theorem.py53
-rw-r--r--pygments/lexers/trafficscript.py3
-rw-r--r--pygments/lexers/typoscript.py225
-rw-r--r--pygments/lexers/varnish.py190
-rw-r--r--pygments/lexers/verification.py110
-rw-r--r--pygments/lexers/webmisc.py66
-rw-r--r--pygments/scanner.py3
-rw-r--r--pygments/sphinxext.py1
-rw-r--r--pygments/style.py41
-rw-r--r--pygments/styles/__init__.py1
-rw-r--r--pygments/styles/arduino.py10
-rw-r--r--pygments/styles/lovelace.py7
-rw-r--r--pygments/styles/perldoc.py2
-rw-r--r--pygments/token.py43
-rwxr-xr-xscripts/debug_lexer.py2
-rw-r--r--setup.cfg3
-rwxr-xr-xsetup.py2
-rw-r--r--tests/examplefiles/99_bottles_of_beer.chpl17
-rw-r--r--tests/examplefiles/StdGeneric.icl92
-rw-r--r--tests/examplefiles/abnf_example1.abnf22
-rw-r--r--tests/examplefiles/abnf_example2.abnf9
-rw-r--r--tests/examplefiles/bnf_example1.bnf15
-rw-r--r--tests/examplefiles/example.bc53
-rw-r--r--tests/examplefiles/example2.cpp20
-rw-r--r--tests/examplefiles/flatline_example186
-rw-r--r--tests/examplefiles/inform6_example7
-rw-r--r--tests/examplefiles/pacman.conf49
-rw-r--r--tests/examplefiles/pkgconfig_example.pc18
-rw-r--r--tests/examplefiles/postgresql_test.txt34
-rw-r--r--tests/examplefiles/scope.cirru26
-rw-r--r--tests/examplefiles/sparql.rq4
-rw-r--r--tests/examplefiles/termcap1340
-rw-r--r--tests/examplefiles/terminfo1445
-rw-r--r--tests/examplefiles/test.erl12
-rw-r--r--tests/examplefiles/test.escript4
-rw-r--r--tests/examplefiles/test.hsail2
-rw-r--r--tests/examplefiles/test.php29
-rw-r--r--tests/examplefiles/test.sco10
-rw-r--r--tests/examplefiles/test.sil206
-rw-r--r--tests/examplefiles/typescript_example (renamed from tests/examplefiles/example.ts)0
-rw-r--r--tests/examplefiles/typoscript_example1930
-rw-r--r--tests/examplefiles/varnish.vcl187
-rw-r--r--tests/examplefiles/wdiff_example1.wdiff731
-rw-r--r--tests/examplefiles/wdiff_example3.wdiff10
-rw-r--r--tests/test_html_formatter.py12
-rw-r--r--tests/test_java.py38
-rw-r--r--tests/test_lexers_other.py26
-rw-r--r--tests/test_terminal_formatter.py53
-rw-r--r--tests/test_token.py8
121 files changed, 10078 insertions, 1574 deletions
diff --git a/AUTHORS b/AUTHORS
index 381a7286..180f41bb 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -33,6 +33,7 @@ Other contributors, listed alphabetically, are:
* Adam Blinkinsop -- Haskell, Redcode lexers
* Frits van Bommel -- assembler lexers
* Pierre Bourdon -- bugfixes
+* Matthias Bussonnier -- ANSI style handling for terminal-256 formatter
* chebee7i -- Python traceback lexer improvements
* Hiram Chirino -- Scaml and Jade lexers
* Ian Cooper -- VGL lexer
@@ -88,7 +89,8 @@ Other contributors, listed alphabetically, are:
* Tim Howard -- BlitzMax lexer
* Dustin Howett -- Logos lexer
* Ivan Inozemtsev -- Fantom lexer
-* Hiroaki Itoh -- Shell console rewrite
+* Hiroaki Itoh -- Shell console rewrite, Lexers for PowerShell session,
+ MSDOS session, BC, WDiff
* Brian R. Jackson -- Tea lexer
* Christian Jann -- ShellSession lexer
* Dennis Kaarsemaker -- sources.list lexer
@@ -110,7 +112,9 @@ Other contributors, listed alphabetically, are:
* Jon Larimer, Google Inc. -- Smali lexer
* Olov Lassus -- Dart lexer
* Matt Layman -- TAP lexer
+* Kristian Lyngstøl -- Varnish lexers
* Sylvestre Ledru -- Scilab lexer
+* Chee Sing Lee -- Flatline lexer
* Mark Lee -- Vala lexer
* Valentin Lorentz -- C++ lexer improvements
* Ben Mabey -- Gherkin lexer
@@ -157,6 +161,7 @@ Other contributors, listed alphabetically, are:
* Lubomir Rintel -- GoodData MAQL and CL lexers
* Andre Roberge -- Tango style
* Georg Rollinger -- HSAIL lexer
+* Michiel Roos -- TypoScript lexer
* Konrad Rudolph -- LaTeX formatter enhancements
* Mario Ruggier -- Evoque lexers
* Miikka Salminen -- Lovelace style, Hexdump lexer, lexer enhancements
@@ -172,6 +177,7 @@ Other contributors, listed alphabetically, are:
* Alexander Smishlajev -- Visual FoxPro lexer
* Steve Spigarelli -- XQuery lexer
* Jerome St-Louis -- eC lexer
+* Camil Staps -- Clean lexer
* James Strachan -- Kotlin lexer
* Tom Stuart -- Treetop lexer
* Colin Sullivan -- SuperCollider lexer
@@ -187,6 +193,7 @@ Other contributors, listed alphabetically, are:
* Abe Voelker -- OpenEdge ABL lexer
* Pepijn de Vos -- HTML formatter CTags support
* Matthias Vallentin -- Bro lexer
+* Benoît Vinot -- AMPL lexer
* Linh Vu Hong -- RSL lexer
* Nathan Weizenbaum -- Haml and Sass lexers
* Nathan Whetsell -- Csound lexers
diff --git a/CHANGES b/CHANGES
index 7d648bce..a21a16da 100644
--- a/CHANGES
+++ b/CHANGES
@@ -7,9 +7,53 @@ pull request numbers to the requests at
<https://bitbucket.org/birkenfeld/pygments-main/pull-requests/merged>.
+Version 2.2
+-----------
+(in development)
+
+- Added lexers:
+
+ * AMPL
+ * TypoScript (#1173)
+ * Varnish config (PR#554)
+ * Clean (PR#503)
+ * WDiff (PR#513)
+ * Flatline (PR#551)
+ * Silver (PR#537)
+ * HSAIL (PR#518)
+
+- Added `lexers.find_lexer_class_by_name()`. (#1203)
+
+- Added new token types and lexing for magic methods and variables in Python
+ and PHP.
+
+- Added a new token type for string affixes and lexing for them in Python, C++
+ and Postgresql lexers.
+
+- Added a new token type for heredoc (and similar) string delimiters and
+ lexing for them in C++, Perl, PHP, Postgresql and Ruby lexers.
+
+- Styles can now define colors with ANSI colors for use in the 256-color
+ terminal formatter. (PR#531)
+
+
+Version 2.1.1
+-------------
+(relased Feb 14, 2016)
+
+- Fixed Jython compatibility (#1205)
+- Fixed HTML formatter output with leading empty lines (#1111)
+- Added a mapping table for LaTeX encodings and added utf8 (#1152)
+- Fixed image formatter font searching on Macs (#1188)
+- Fixed deepcopy-ing of Token instances (#1168)
+- Fixed Julia string interpolation (#1170)
+- Fixed statefulness of HttpLexer between get_tokens calls
+- Many smaller fixes to various lexers
+
+
Version 2.1
-----------
-(not released yet)
+(released Jan 17, 2016)
- Added lexers:
@@ -51,20 +95,22 @@ Version 2.1
* True color (24-bit) terminal ANSI sequences (#1142)
(formatter alias: "16m")
+- New "filename" option for HTML formatter (PR#527).
+
- Improved performance of the HTML formatter for long lines (PR#504).
-- Updated autopygmentize script (PR#445)
+- Updated autopygmentize script (PR#445).
- Fixed style inheritance for non-standard token types in HTML output.
- Added support for async/await to Python 3 lexer.
- Rewrote linenos option for TerminalFormatter (it's better, but slightly
- different output than before). (#1147)
+ different output than before) (#1147).
-- Javascript lexer now supports most of ES6. (#1100)
+- Javascript lexer now supports most of ES6 (#1100).
-- Cocoa builtins updated for iOS 8.1 (PR#433)
+- Cocoa builtins updated for iOS 8.1 (PR#433).
- Combined BashSessionLexer and ShellSessionLexer, new version should support
the prompt styles of either.
@@ -72,16 +118,11 @@ Version 2.1
- Added option to pygmentize to show a full traceback on exceptions.
- Fixed incomplete output on Windows and Python 3 (e.g. when using iPython
- Notebook). (#1153)
-
-- Allowed more traceback styles in Python console lexer. (PR#253)
+ Notebook) (#1153).
-- Added decorators to TypeScript. (PR#509)
+- Allowed more traceback styles in Python console lexer (PR#253).
-
-Version 2.0.3
--------------
-(not released yet)
+- Added decorators to TypeScript (PR#509).
- Fix highlighting of certain IRC logs formats (#1076).
diff --git a/doc/docs/api.rst b/doc/docs/api.rst
index 123a4643..dd831bd1 100644
--- a/doc/docs/api.rst
+++ b/doc/docs/api.rst
@@ -89,6 +89,21 @@ Functions from :mod:`pygments.lexers`:
.. versionadded:: 0.6
+.. function:: find_lexer_class_by_name(alias)
+
+ Return the `Lexer` subclass that has `alias` in its aliases list, without
+ instantiating it.
+
+ Will raise :exc:`pygments.util.ClassNotFound` if no lexer with that alias is
+ found.
+
+ .. versionadded:: 2.2
+
+.. function:: find_lexer_class(name)
+
+ Return the `Lexer` subclass that with the *name* attribute as given by
+ the *name* argument.
+
.. module:: pygments.formatters
diff --git a/doc/docs/integrate.rst b/doc/docs/integrate.rst
index 73e02acd..77daaa43 100644
--- a/doc/docs/integrate.rst
+++ b/doc/docs/integrate.rst
@@ -27,3 +27,14 @@ Bash completion
The source distribution contains a file ``external/pygments.bashcomp`` that
sets up completion for the ``pygmentize`` command in bash.
+
+Wrappers for other languages
+----------------------------
+
+These libraries provide Pygments highlighting for users of other languages
+than Python:
+
+* `pygments.rb <https://github.com/tmm1/pygments.rb>`_, a pygments wrapper for Ruby
+* `Clygments <https://github.com/bfontaine/clygments>`_, a pygments wrapper for
+ Clojure
+* `PHPygments <https://github.com/capynet/PHPygments>`_, a pygments wrapper for PHP
diff --git a/doc/docs/lexerdevelopment.rst b/doc/docs/lexerdevelopment.rst
index 2c868440..fd6e76b9 100644
--- a/doc/docs/lexerdevelopment.rst
+++ b/doc/docs/lexerdevelopment.rst
@@ -88,10 +88,16 @@ one.
Adding and testing a new lexer
==============================
-To make Pygments aware of your new lexer, you have to perform the following
-steps:
+Using a lexer that is not part of Pygments can be done via the Python API. You
+can import and instantiate the lexer, and pass it to :func:`pygments.highlight`.
-First, change to the current directory containing the Pygments source code:
+To prepare your new lexer for inclusion in the Pygments distribution, so that it
+will be found when passing filenames or lexer aliases from the command line, you
+have to perform the following steps.
+
+First, change to the current directory containing the Pygments source code. You
+will need to have either an unpacked source tarball, or (preferably) a copy
+cloned from BitBucket.
.. code-block:: console
@@ -101,11 +107,13 @@ Select a matching module under ``pygments/lexers``, or create a new module for
your lexer class.
Next, make sure the lexer is known from outside of the module. All modules in
-the ``pygments.lexers`` specify ``__all__``. For example, ``esoteric.py`` sets::
+the ``pygments.lexers`` package specify ``__all__``. For example,
+``esoteric.py`` sets::
__all__ = ['BrainfuckLexer', 'BefungeLexer', ...]
-Simply add the name of your lexer class to this list.
+Add the name of your lexer class to this list (or create the list if your lexer
+is the only class in the module).
Finally the lexer can be made publicly known by rebuilding the lexer mapping:
diff --git a/doc/docs/styles.rst b/doc/docs/styles.rst
index d56db0db..1094a270 100644
--- a/doc/docs/styles.rst
+++ b/doc/docs/styles.rst
@@ -143,3 +143,59 @@ a way to iterate over all styles:
>>> from pygments.styles import get_all_styles
>>> styles = list(get_all_styles())
+
+
+.. _AnsiTerminalStyle:
+
+Terminal Styles
+===============
+
+.. versionadded:: 2.2
+
+Custom styles used with the 256-color terminal formatter can also map colors to
+use the 8 default ANSI colors. To do so, use ``#ansigreen``, ``#ansired`` or
+any other colors defined in :attr:`pygments.style.ansicolors`. Foreground ANSI
+colors will be mapped to the corresponding `escape codes 30 to 37
+<https://en.wikipedia.org/wiki/ANSI_escape_code#Colors>`_ thus respecting any
+custom color mapping and themes provided by many terminal emulators. Light
+variants are treated as foreground color with and an added bold flag.
+``bg:#ansi<color>`` will also be respected, except the light variant will be the
+same shade as their dark variant.
+
+See the following example where the color of the string ``"hello world"`` is
+governed by the escape sequence ``\x1b[34;01m`` (Ansi Blue, Bold, 41 being red
+background) instead of an extended foreground & background color.
+
+.. sourcecode:: pycon
+
+ >>> from pygments import highlight
+ >>> from pygments.style import Style
+ >>> from pygments.token import Token
+ >>> from pygments.lexers import Python3Lexer
+ >>> from pygments.formatters import Terminal256Formatter
+
+ >>> class MyStyle(Style):
+ styles = {
+ Token.String: '#ansiblue bg:#ansired',
+ }
+
+ >>> code = 'print("Hello World")'
+ >>> result = highlight(code, Python3Lexer(), Terminal256Formatter(style=MyStyle))
+ >>> print(result.encode())
+ b'\x1b[34;41;01m"\x1b[39;49;00m\x1b[34;41;01mHello World\x1b[39;49;00m\x1b[34;41;01m"\x1b[39;49;00m'
+
+Colors specified using ``#ansi*`` are converted to a default set of RGB colors
+when used with formatters other than the terminal-256 formatter.
+
+By definition of ANSI, the following colors are considered "light" colors, and
+will be rendered by most terminals as bold:
+
+- "darkgray", "red", "green", "yellow", "blue", "fuchsia", "turquoise", "white"
+
+The following are considered "dark" colors and will be rendered as non-bold:
+
+- "black", "darkred", "darkgreen", "brown", "darkblue", "purple", "teal",
+ "lightgray"
+
+Exact behavior might depends on the terminal emulator you are using, and its
+settings.
diff --git a/doc/docs/tokens.rst b/doc/docs/tokens.rst
index 6455a501..801fc638 100644
--- a/doc/docs/tokens.rst
+++ b/doc/docs/tokens.rst
@@ -174,6 +174,10 @@ Name Tokens
`Name.Function`
Token type for function names.
+`Name.Function.Magic`
+ same as `Name.Function` but for special function names that have an implicit use
+ in a language (e.g. ``__init__`` method in Python).
+
`Name.Label`
Token type for label names (e.g. in languages that support ``goto``).
@@ -201,6 +205,10 @@ Name Tokens
`Name.Variable.Instance`
same as `Name.Variable` but for instance variables.
+`Name.Variable.Magic`
+ same as `Name.Variable` but for special variable names that have an implicit use
+ in a language (e.g. ``__doc__`` in Python).
+
Literals
========
@@ -215,12 +223,20 @@ Literals
`String`
For any string literal.
+`String.Affix`
+ Token type for affixes that further specify the type of the string they're
+ attached to (e.g. the prefixes ``r`` and ``u8`` in ``r"foo"`` and ``u8"foo"``).
+
`String.Backtick`
Token type for strings enclosed in backticks.
`String.Char`
Token type for single characters (e.g. Java, C).
+`String.Delimiter`
+ Token type for delimiting identifiers in "heredoc", raw and other similar
+ strings (e.g. the word ``END`` in Perl code ``print <<'END';``).
+
`String.Doc`
Token type for documentation strings (for example Python).
diff --git a/doc/faq.rst b/doc/faq.rst
index 5458e655..f375828b 100644
--- a/doc/faq.rst
+++ b/doc/faq.rst
@@ -89,28 +89,24 @@ Who uses Pygments?
This is an (incomplete) list of projects and sites known to use the Pygments highlighter.
-* `Pygments API <http://pygments.appspot.com/>`_, a HTTP POST interface to Pygments
+* `Wikipedia <http://en.wikipedia.org>`_
+* `BitBucket <http://bitbucket.org/>`_, a Mercurial and Git hosting site
* `The Sphinx documentation builder <http://sphinx.pocoo.org/>`_, for embedded source examples
* `rst2pdf <http://code.google.com/p/rst2pdf/>`_, a reStructuredText to PDF converter
-* `Zine <http://zine.pocoo.org/>`_, a Python blogging system
+* `Codecov <http://codecov.io/>`_, a code coverage CI service
* `Trac <http://trac.edgewall.org/>`_, the universal project management tool
-* `Bruce <http://r1chardj0n3s.googlepages.com/bruce>`_, a reStructuredText presentation tool
* `AsciiDoc <http://www.methods.co.nz/asciidoc/>`_, a text-based documentation generator
* `ActiveState Code <http://code.activestate.com/>`_, the Python Cookbook successor
* `ViewVC <http://viewvc.org/>`_, a web-based version control repository browser
* `BzrFruit <http://repo.or.cz/w/bzrfruit.git>`_, a Bazaar branch viewer
* `QBzr <http://bazaar-vcs.org/QBzr>`_, a cross-platform Qt-based GUI front end for Bazaar
-* `BitBucket <http://bitbucket.org/>`_, a Mercurial and Git hosting site
* `Review Board <http://www.review-board.org/>`_, a collaborative code reviewing tool
-* `skeletonz <http://orangoo.com/skeletonz/>`_, a Python powered content management system
* `Diamanda <http://code.google.com/p/diamanda/>`_, a Django powered wiki system with support for Pygments
* `Progopedia <http://progopedia.ru/>`_ (`English <http://progopedia.com/>`_),
an encyclopedia of programming languages
-* `Postmarkup <http://code.google.com/p/postmarkup/>`_, a BBCode to XHTML generator
-* `Language Comparison <http://michaelsilver.us/lc>`_, a site that compares different programming languages
-* `BPython <http://www.noiseforfree.com/bpython/>`_, a curses-based intelligent Python shell
-* `Challenge-You! <http://challenge-you.appspot.com/>`_, a site offering programming challenges
+* `Bruce <http://r1chardj0n3s.googlepages.com/bruce>`_, a reStructuredText presentation tool
* `PIDA <http://pida.co.uk/>`_, a universal IDE written in Python
+* `BPython <http://www.noiseforfree.com/bpython/>`_, a curses-based intelligent Python shell
* `PuDB <http://pypi.python.org/pypi/pudb>`_, a console Python debugger
* `XWiki <http://www.xwiki.org/>`_, a wiki-based development framework in Java, using Jython
* `roux <http://ananelson.com/software/roux/>`_, a script for running R scripts
@@ -118,23 +114,25 @@ This is an (incomplete) list of projects and sites known to use the Pygments hig
* `hurl <http://hurl.it/>`_, a web service for making HTTP requests
* `wxHTMLPygmentizer <http://colinbarnette.net/projects/wxHTMLPygmentizer>`_ is
a GUI utility, used to make code-colorization easier
-* `WpPygments <http://blog.mirotin.net/?page_id=49>`_, a highlighter plugin for WordPress
-* `LodgeIt <http://paste.pocoo.org/>`_, a pastebin with XMLRPC support and diffs
-* `SpammCan <http://chrisarndt.de/projects/spammcan/>`_, a pastebin (demo see
- `here <http://paste.chrisarndt.de/>`_)
-* `WowAce.com pastes <http://www.wowace.com/paste/>`_, a pastebin
+* `Postmarkup <http://code.google.com/p/postmarkup/>`_, a BBCode to XHTML generator
+* `WpPygments <http://blog.mirotin.net/?page_id=49>`_, and `WPygments
+ <https://github.com/capynet/WPygments>`_, highlighter plugins for WordPress
* `Siafoo <http://siafoo.net>`_, a tool for sharing and storing useful code and programming experience
* `D source <http://www.dsource.org/>`_, a community for the D programming language
-* `dumpz.org <http://dumpz.org/>`_, a pastebin
* `dpaste.com <http://dpaste.com/>`_, another Django pastebin
-* `PylonsHQ Pasties <http://pylonshq.com/pasties/new>`_, a pastebin
* `Django snippets <http://www.djangosnippets.org/>`_, a pastebin for Django code
* `Fayaa <http://www.fayaa.com/code/>`_, a Chinese pastebin
* `Incollo.com <http://incollo.com>`_, a free collaborative debugging tool
* `PasteBox <http://p.boxnet.eu/>`_, a pastebin focused on privacy
-* `xinotes.org <http://www.xinotes.org/>`_, a site to share notes, code snippets etc.
* `hilite.me <http://www.hilite.me/>`_, a site to highlight code snippets
* `patx.me <http://patx.me/paste>`_, a pastebin
+* `Fluidic <https://github.com/richsmith/fluidic>`_, an experiment in
+ integrating shells with a GUI
+* `pygments.rb <https://github.com/tmm1/pygments.rb>`_, a pygments wrapper for Ruby
+* `Clygments <https://github.com/bfontaine/clygments>`_, a pygments wrapper for
+ Clojure
+* `PHPygments <https://github.com/capynet/PHPygments>`_, a pygments wrapper for PHP
+
If you have a project or web site using Pygments, drop me a line, and I'll add a
link here.
diff --git a/doc/languages.rst b/doc/languages.rst
index a495d15c..ffe1bdb6 100644
--- a/doc/languages.rst
+++ b/doc/languages.rst
@@ -31,6 +31,7 @@ Programming languages
* Dart
* Delphi
* Dylan
+* `Elm <http://elm-lang.org/>`_
* Erlang
* `Ezhil <http://ezhillang.org>`_ Ezhil - A Tamil programming language
* Factor
diff --git a/pygments/__init__.py b/pygments/__init__.py
index b37bdccb..ffac59ef 100644
--- a/pygments/__init__.py
+++ b/pygments/__init__.py
@@ -25,18 +25,16 @@
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
+import sys
+
+from pygments.util import StringIO, BytesIO
-__version__ = '2.1a0'
+__version__ = '2.2a0'
__docformat__ = 'restructuredtext'
__all__ = ['lex', 'format', 'highlight']
-import sys
-
-from pygments.util import StringIO, BytesIO
-
-
def lex(code, lexer):
"""
Lex ``code`` with ``lexer`` and return an iterable of tokens.
@@ -44,9 +42,9 @@ def lex(code, lexer):
try:
return lexer.get_tokens(code)
except TypeError as err:
- if isinstance(err.args[0], str) and \
- ('unbound method get_tokens' in err.args[0] or
- 'missing 1 required positional argument' in err.args[0]):
+ if (isinstance(err.args[0], str) and
+ ('unbound method get_tokens' in err.args[0] or
+ 'missing 1 required positional argument' in err.args[0])):
raise TypeError('lex() argument must be a lexer instance, '
'not a class')
raise
@@ -68,9 +66,9 @@ def format(tokens, formatter, outfile=None): # pylint: disable=redefined-builti
else:
formatter.format(tokens, outfile)
except TypeError as err:
- if isinstance(err.args[0], str) and \
- ('unbound method format' in err.args[0] or
- 'missing 1 required positional argument' in err.args[0]):
+ if (isinstance(err.args[0], str) and
+ ('unbound method format' in err.args[0] or
+ 'missing 1 required positional argument' in err.args[0])):
raise TypeError('format() argument must be a formatter instance, '
'not a class')
raise
diff --git a/pygments/console.py b/pygments/console.py
index 4a2c9acb..4aaf5fcb 100644
--- a/pygments/console.py
+++ b/pygments/console.py
@@ -12,18 +12,18 @@
esc = "\x1b["
codes = {}
-codes[""] = ""
-codes["reset"] = esc + "39;49;00m"
+codes[""] = ""
+codes["reset"] = esc + "39;49;00m"
-codes["bold"] = esc + "01m"
-codes["faint"] = esc + "02m"
-codes["standout"] = esc + "03m"
+codes["bold"] = esc + "01m"
+codes["faint"] = esc + "02m"
+codes["standout"] = esc + "03m"
codes["underline"] = esc + "04m"
-codes["blink"] = esc + "05m"
-codes["overline"] = esc + "06m"
+codes["blink"] = esc + "05m"
+codes["overline"] = esc + "06m"
-dark_colors = ["black", "darkred", "darkgreen", "brown", "darkblue",
- "purple", "teal", "lightgray"]
+dark_colors = ["black", "darkred", "darkgreen", "brown", "darkblue",
+ "purple", "teal", "lightgray"]
light_colors = ["darkgray", "red", "green", "yellow", "blue",
"fuchsia", "turquoise", "white"]
@@ -35,10 +35,10 @@ for d, l in zip(dark_colors, light_colors):
del d, l, x
-codes["darkteal"] = codes["turquoise"]
+codes["darkteal"] = codes["turquoise"]
codes["darkyellow"] = codes["brown"]
-codes["fuscia"] = codes["fuchsia"]
-codes["white"] = codes["bold"]
+codes["fuscia"] = codes["fuchsia"]
+codes["white"] = codes["bold"]
def reset_color():
diff --git a/pygments/filter.py b/pygments/filter.py
index c8176ed9..f3082037 100644
--- a/pygments/filter.py
+++ b/pygments/filter.py
@@ -34,10 +34,10 @@ def simplefilter(f):
yield ttype, value.lower()
"""
return type(f.__name__, (FunctionFilter,), {
- 'function': f,
- '__module__': getattr(f, '__module__'),
- '__doc__': f.__doc__
- })
+ '__module__': getattr(f, '__module__'),
+ '__doc__': f.__doc__,
+ 'function': f,
+ })
class Filter(object):
diff --git a/pygments/formatter.py b/pygments/formatter.py
index addd07d7..9f22b3bc 100644
--- a/pygments/formatter.py
+++ b/pygments/formatter.py
@@ -65,7 +65,7 @@ class Formatter(object):
def __init__(self, **options):
self.style = _lookup_style(options.get('style', 'default'))
- self.full = get_bool_opt(options, 'full', False)
+ self.full = get_bool_opt(options, 'full', False)
self.title = options.get('title', '')
self.encoding = options.get('encoding', None) or None
if self.encoding in ('guess', 'chardet'):
diff --git a/pygments/formatters/_mapping.py b/pygments/formatters/_mapping.py
index a2e612ad..01d053dd 100755
--- a/pygments/formatters/_mapping.py
+++ b/pygments/formatters/_mapping.py
@@ -66,6 +66,13 @@ if __name__ == '__main__': # pragma: no cover
# extract useful sourcecode from this file
with open(__file__) as fp:
content = fp.read()
+ # replace crnl to nl for Windows.
+ #
+ # Note that, originally, contributers should keep nl of master
+ # repository, for example by using some kind of automatic
+ # management EOL, like `EolExtension
+ # <https://www.mercurial-scm.org/wiki/EolExtension>`.
+ content = content.replace("\r\n", "\n")
header = content[:content.find('FORMATTERS = {')]
footer = content[content.find("if __name__ == '__main__':"):]
diff --git a/pygments/formatters/html.py b/pygments/formatters/html.py
index b03a4bd5..2c6bb19e 100644
--- a/pygments/formatters/html.py
+++ b/pygments/formatters/html.py
@@ -321,6 +321,12 @@ class HtmlFormatter(Formatter):
.. versionadded:: 1.6
+ `filename`
+ A string used to generate a filename when rendering <pre> blocks,
+ for example if displaying source code.
+
+ .. versionadded:: 2.1
+
**Subclassing the HTML formatter**
@@ -388,6 +394,7 @@ class HtmlFormatter(Formatter):
self.noclobber_cssfile = get_bool_opt(options, 'noclobber_cssfile', False)
self.tagsfile = self._decodeifneeded(options.get('tagsfile', ''))
self.tagurlformat = self._decodeifneeded(options.get('tagurlformat', ''))
+ self.filename = self._decodeifneeded(options.get('filename', ''))
if self.tagsfile:
if not ctags:
@@ -521,7 +528,7 @@ class HtmlFormatter(Formatter):
cssfilename = os.path.join(os.path.dirname(filename),
self.cssfile)
except AttributeError:
- print('Note: Cannot determine output file name, ' \
+ print('Note: Cannot determine output file name, '
'using current directory as base for the CSS file name',
file=sys.stderr)
cssfilename = self.cssfile
@@ -530,21 +537,21 @@ class HtmlFormatter(Formatter):
if not os.path.exists(cssfilename) or not self.noclobber_cssfile:
cf = open(cssfilename, "w")
cf.write(CSSFILE_TEMPLATE %
- {'styledefs': self.get_style_defs('body')})
+ {'styledefs': self.get_style_defs('body')})
cf.close()
except IOError as err:
err.strerror = 'Error writing CSS file: ' + err.strerror
raise
yield 0, (DOC_HEADER_EXTERNALCSS %
- dict(title = self.title,
- cssfile = self.cssfile,
- encoding = self.encoding))
+ dict(title=self.title,
+ cssfile=self.cssfile,
+ encoding=self.encoding))
else:
yield 0, (DOC_HEADER %
- dict(title = self.title,
- styledefs = self.get_style_defs('body'),
- encoding = self.encoding))
+ dict(title=self.title,
+ styledefs=self.get_style_defs('body'),
+ encoding=self.encoding))
for t, line in inner:
yield t, line
@@ -623,35 +630,35 @@ class HtmlFormatter(Formatter):
if self.noclasses:
if sp:
for t, line in lines:
- if num%sp == 0:
+ if num % sp == 0:
style = 'background-color: #ffffc0; padding: 0 5px 0 5px'
else:
style = 'background-color: #f0f0f0; padding: 0 5px 0 5px'
yield 1, '<span style="%s">%*s </span>' % (
- style, mw, (num%st and ' ' or num)) + line
+ style, mw, (num % st and ' ' or num)) + line
num += 1
else:
for t, line in lines:
yield 1, ('<span style="background-color: #f0f0f0; '
'padding: 0 5px 0 5px">%*s </span>' % (
- mw, (num%st and ' ' or num)) + line)
+ mw, (num % st and ' ' or num)) + line)
num += 1
elif sp:
for t, line in lines:
yield 1, '<span class="lineno%s">%*s </span>' % (
- num%sp == 0 and ' special' or '', mw,
- (num%st and ' ' or num)) + line
+ num % sp == 0 and ' special' or '', mw,
+ (num % st and ' ' or num)) + line
num += 1
else:
for t, line in lines:
yield 1, '<span class="lineno">%*s </span>' % (
- mw, (num%st and ' ' or num)) + line
+ mw, (num % st and ' ' or num)) + line
num += 1
def _wrap_lineanchors(self, inner):
s = self.lineanchors
- i = self.linenostart - 1 # subtract 1 since we have to increment i
- # *before* yielding
+ # subtract 1 since we have to increment i *before* yielding
+ i = self.linenostart - 1
for t, line in inner:
if t:
i += 1
@@ -672,14 +679,14 @@ class HtmlFormatter(Formatter):
def _wrap_div(self, inner):
style = []
if (self.noclasses and not self.nobackground and
- self.style.background_color is not None):
+ self.style.background_color is not None):
style.append('background: %s' % (self.style.background_color,))
if self.cssstyles:
style.append(self.cssstyles)
style = '; '.join(style)
- yield 0, ('<div' + (self.cssclass and ' class="%s"' % self.cssclass)
- + (style and (' style="%s"' % style)) + '>')
+ yield 0, ('<div' + (self.cssclass and ' class="%s"' % self.cssclass) +
+ (style and (' style="%s"' % style)) + '>')
for tup in inner:
yield tup
yield 0, '</div>\n'
@@ -692,7 +699,12 @@ class HtmlFormatter(Formatter):
style.append('line-height: 125%')
style = '; '.join(style)
- yield 0, ('<pre' + (style and ' style="%s"' % style) + '>')
+ if self.filename:
+ yield 0, ('<span class="filename">' + self.filename + '</span>')
+
+ # the empty span here is to keep leading empty lines from being
+ # ignored by HTML parsers
+ yield 0, ('<pre' + (style and ' style="%s"' % style) + '><span></span>')
for tup in inner:
yield tup
yield 0, '</pre>'
@@ -743,8 +755,8 @@ class HtmlFormatter(Formatter):
if line:
if lspan != cspan:
line.extend(((lspan and '</span>'), cspan, part,
- (cspan and '</span>'), lsep))
- else: # both are the same
+ (cspan and '</span>'), lsep))
+ else: # both are the same
line.extend((part, (lspan and '</span>'), lsep))
yield 1, ''.join(line)
line = []
@@ -785,7 +797,7 @@ class HtmlFormatter(Formatter):
for i, (t, value) in enumerate(tokensource):
if t != 1:
yield t, value
- if i + 1 in hls: # i + 1 because Python indexes start at 0
+ if i + 1 in hls: # i + 1 because Python indexes start at 0
if self.noclasses:
style = ''
if self.style.highlight_color is not None:
diff --git a/pygments/formatters/img.py b/pygments/formatters/img.py
index 667a8697..c7b8e819 100644
--- a/pygments/formatters/img.py
+++ b/pygments/formatters/img.py
@@ -82,9 +82,13 @@ class FontManager(object):
stdout, _ = proc.communicate()
if proc.returncode == 0:
lines = stdout.splitlines()
- if lines:
- path = lines[0].strip().strip(':')
- return path
+ for line in lines:
+ if line.startswith('Fontconfig warning:'):
+ continue
+ path = line.decode().strip().strip(':')
+ if path:
+ return path
+ return None
def _create_nix(self):
for name in STYLES['NORMAL']:
diff --git a/pygments/formatters/irc.py b/pygments/formatters/irc.py
index 44fe6c4a..d1eed0ac 100644
--- a/pygments/formatters/irc.py
+++ b/pygments/formatters/irc.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
"""
pygments.formatters.irc
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ ~~~~~~~~~~~~~~~~~~~~~~~
Formatter for IRC output
diff --git a/pygments/formatters/latex.py b/pygments/formatters/latex.py
index 15e68e37..66d521f5 100644
--- a/pygments/formatters/latex.py
+++ b/pygments/formatters/latex.py
@@ -413,11 +413,18 @@ class LatexFormatter(Formatter):
outfile.write(u'\\end{' + self.envname + u'}\n')
if self.full:
+ encoding = self.encoding or 'utf8'
+ # map known existings encodings from LaTeX distribution
+ encoding = {
+ 'utf_8': 'utf8',
+ 'latin_1': 'latin1',
+ 'iso_8859_1': 'latin1',
+ }.get(encoding.replace('-', '_'), encoding)
realoutfile.write(DOC_TEMPLATE %
dict(docclass = self.docclass,
preamble = self.preamble,
title = self.title,
- encoding = self.encoding or 'utf8',
+ encoding = encoding,
styledefs = self.get_style_defs(),
code = outfile.getvalue()))
diff --git a/pygments/formatters/terminal256.py b/pygments/formatters/terminal256.py
index af311955..5110bc9e 100644
--- a/pygments/formatters/terminal256.py
+++ b/pygments/formatters/terminal256.py
@@ -27,6 +27,8 @@
import sys
from pygments.formatter import Formatter
+from pygments.console import codes
+from pygments.style import ansicolors
__all__ = ['Terminal256Formatter', 'TerminalTrueColorFormatter']
@@ -47,9 +49,21 @@ class EscapeSequence:
def color_string(self):
attrs = []
if self.fg is not None:
- attrs.extend(("38", "5", "%i" % self.fg))
+ if self.fg in ansicolors:
+ esc = codes[self.fg[5:]]
+ if ';01m' in esc:
+ self.bold = True
+ # extract fg color code.
+ attrs.append(esc[2:4])
+ else:
+ attrs.extend(("38", "5", "%i" % self.fg))
if self.bg is not None:
- attrs.extend(("48", "5", "%i" % self.bg))
+ if self.bg in ansicolors:
+ esc = codes[self.bg[5:]]
+ # extract fg color code, add 10 for bg.
+ attrs.append(str(int(esc[2:4])+10))
+ else:
+ attrs.extend(("48", "5", "%i" % self.bg))
if self.bold:
attrs.append("01")
if self.underline:
@@ -91,6 +105,11 @@ class Terminal256Formatter(Formatter):
.. versionadded:: 0.9
+ .. versionchanged:: 2.2
+ If the used style defines foreground colors in the form ``#ansi*``, then
+ `Terminal256Formatter` will map these to non extended foreground color.
+ See :ref:`AnsiTerminalStyle` for more information.
+
Options accepted:
`style`
@@ -169,6 +188,10 @@ class Terminal256Formatter(Formatter):
def _color_index(self, color):
index = self.best_match.get(color, None)
+ if color in ansicolors:
+ # strip the `#ansi` part and look up code
+ index = color
+ self.best_match[color] = index
if index is None:
try:
rgb = int(str(color), 16)
@@ -185,9 +208,14 @@ class Terminal256Formatter(Formatter):
def _setup_styles(self):
for ttype, ndef in self.style:
escape = EscapeSequence()
- if ndef['color']:
+ # get foreground from ansicolor if set
+ if ndef['ansicolor']:
+ escape.fg = self._color_index(ndef['ansicolor'])
+ elif ndef['color']:
escape.fg = self._color_index(ndef['color'])
- if ndef['bgcolor']:
+ if ndef['bgansicolor']:
+ escape.bg = self._color_index(ndef['bgansicolor'])
+ elif ndef['bgcolor']:
escape.bg = self._color_index(ndef['bgcolor'])
if self.usebold and ndef['bold']:
escape.bold = True
diff --git a/pygments/lexer.py b/pygments/lexer.py
index dd6c01e4..f16d8106 100644
--- a/pygments/lexer.py
+++ b/pygments/lexer.py
@@ -319,8 +319,8 @@ def bygroups(*args):
if data is not None:
if ctx:
ctx.pos = match.start(i + 1)
- for item in action(
- lexer, _PseudoMatch(match.start(i + 1), data), ctx):
+ for item in action(lexer,
+ _PseudoMatch(match.start(i + 1), data), ctx):
if item:
yield item
if ctx:
diff --git a/pygments/lexers/__init__.py b/pygments/lexers/__init__.py
index 7d0b89d4..d64f163f 100644
--- a/pygments/lexers/__init__.py
+++ b/pygments/lexers/__init__.py
@@ -72,6 +72,28 @@ def find_lexer_class(name):
return cls
+def find_lexer_class_by_name(alias):
+ """Lookup a lexer class by alias.
+
+ Like `get_lexer_by_name`, but does not instantiate the class.
+
+ .. versionadded:: 2.2
+ """
+ if not _alias:
+ raise ClassNotFound('no lexer for alias %r found' % _alias)
+ # lookup builtin lexers
+ for module_name, name, aliases, _, _ in itervalues(LEXERS):
+ if _alias.lower() in aliases:
+ if name not in _lexer_cache:
+ _load_lexers(module_name)
+ return _lexer_cache[name]
+ # continue with lexers from setuptools entrypoints
+ for cls in find_plugin_lexers():
+ if _alias.lower() in cls.aliases:
+ return cls
+ raise ClassNotFound('no lexer for alias %r found' % _alias)
+
+
def get_lexer_by_name(_alias, **options):
"""Get a lexer by an alias.
diff --git a/pygments/lexers/_csound_builtins.py b/pygments/lexers/_csound_builtins.py
index 5f7a798a..a88e0a83 100644
--- a/pygments/lexers/_csound_builtins.py
+++ b/pygments/lexers/_csound_builtins.py
@@ -1,4 +1,11 @@
# -*- coding: utf-8 -*-
+"""
+ pygments.lexers._csound_builtins
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
# Opcodes in Csound 6.05 from
# csound --list-opcodes
diff --git a/pygments/lexers/_mapping.py b/pygments/lexers/_mapping.py
index 272d7acf..cc6764a7 100644
--- a/pygments/lexers/_mapping.py
+++ b/pygments/lexers/_mapping.py
@@ -1,452 +1,476 @@
-# -*- coding: utf-8 -*-
-"""
- pygments.lexers._mapping
- ~~~~~~~~~~~~~~~~~~~~~~~~
-
- Lexer mapping definitions. This file is generated by itself. Everytime
- you change something on a builtin lexer definition, run this script from
- the lexers folder to update it.
-
- Do not alter the LEXERS dictionary by hand.
-
- :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-from __future__ import print_function
-
-LEXERS = {
- 'ABAPLexer': ('pygments.lexers.business', 'ABAP', ('abap',), ('*.abap',), ('text/x-abap',)),
- 'APLLexer': ('pygments.lexers.apl', 'APL', ('apl',), ('*.apl',), ()),
- 'ActionScript3Lexer': ('pygments.lexers.actionscript', 'ActionScript 3', ('as3', 'actionscript3'), ('*.as',), ('application/x-actionscript3', 'text/x-actionscript3', 'text/actionscript3')),
- 'ActionScriptLexer': ('pygments.lexers.actionscript', 'ActionScript', ('as', 'actionscript'), ('*.as',), ('application/x-actionscript', 'text/x-actionscript', 'text/actionscript')),
- 'AdaLexer': ('pygments.lexers.pascal', 'Ada', ('ada', 'ada95', 'ada2005'), ('*.adb', '*.ads', '*.ada'), ('text/x-ada',)),
- 'AdlLexer': ('pygments.lexers.archetype', 'ADL', ('adl',), ('*.adl', '*.adls', '*.adlf', '*.adlx'), ()),
- 'AgdaLexer': ('pygments.lexers.haskell', 'Agda', ('agda',), ('*.agda',), ('text/x-agda',)),
- 'AlloyLexer': ('pygments.lexers.dsls', 'Alloy', ('alloy',), ('*.als',), ('text/x-alloy',)),
- 'AmbientTalkLexer': ('pygments.lexers.ambient', 'AmbientTalk', ('at', 'ambienttalk', 'ambienttalk/2'), ('*.at',), ('text/x-ambienttalk',)),
- 'AntlrActionScriptLexer': ('pygments.lexers.parsers', 'ANTLR With ActionScript Target', ('antlr-as', 'antlr-actionscript'), ('*.G', '*.g'), ()),
- 'AntlrCSharpLexer': ('pygments.lexers.parsers', 'ANTLR With C# Target', ('antlr-csharp', 'antlr-c#'), ('*.G', '*.g'), ()),
- 'AntlrCppLexer': ('pygments.lexers.parsers', 'ANTLR With CPP Target', ('antlr-cpp',), ('*.G', '*.g'), ()),
- 'AntlrJavaLexer': ('pygments.lexers.parsers', 'ANTLR With Java Target', ('antlr-java',), ('*.G', '*.g'), ()),
- 'AntlrLexer': ('pygments.lexers.parsers', 'ANTLR', ('antlr',), (), ()),
- 'AntlrObjectiveCLexer': ('pygments.lexers.parsers', 'ANTLR With ObjectiveC Target', ('antlr-objc',), ('*.G', '*.g'), ()),
- 'AntlrPerlLexer': ('pygments.lexers.parsers', 'ANTLR With Perl Target', ('antlr-perl',), ('*.G', '*.g'), ()),
- 'AntlrPythonLexer': ('pygments.lexers.parsers', 'ANTLR With Python Target', ('antlr-python',), ('*.G', '*.g'), ()),
- 'AntlrRubyLexer': ('pygments.lexers.parsers', 'ANTLR With Ruby Target', ('antlr-ruby', 'antlr-rb'), ('*.G', '*.g'), ()),
- 'ApacheConfLexer': ('pygments.lexers.configs', 'ApacheConf', ('apacheconf', 'aconf', 'apache'), ('.htaccess', 'apache.conf', 'apache2.conf'), ('text/x-apacheconf',)),
- 'AppleScriptLexer': ('pygments.lexers.scripting', 'AppleScript', ('applescript',), ('*.applescript',), ()),
- 'ArduinoLexer': ('pygments.lexers.c_like', 'Arduino', ('arduino',), ('*.ino',), ('text/x-arduino',)),
- 'AspectJLexer': ('pygments.lexers.jvm', 'AspectJ', ('aspectj',), ('*.aj',), ('text/x-aspectj',)),
- 'AsymptoteLexer': ('pygments.lexers.graphics', 'Asymptote', ('asy', 'asymptote'), ('*.asy',), ('text/x-asymptote',)),
- 'AutoItLexer': ('pygments.lexers.automation', 'AutoIt', ('autoit',), ('*.au3',), ('text/x-autoit',)),
- 'AutohotkeyLexer': ('pygments.lexers.automation', 'autohotkey', ('ahk', 'autohotkey'), ('*.ahk', '*.ahkl'), ('text/x-autohotkey',)),
- 'AwkLexer': ('pygments.lexers.textedit', 'Awk', ('awk', 'gawk', 'mawk', 'nawk'), ('*.awk',), ('application/x-awk',)),
- 'BBCodeLexer': ('pygments.lexers.markup', 'BBCode', ('bbcode',), (), ('text/x-bbcode',)),
- 'BaseMakefileLexer': ('pygments.lexers.make', 'Base Makefile', ('basemake',), (), ()),
- 'BashLexer': ('pygments.lexers.shell', 'Bash', ('bash', 'sh', 'ksh', 'shell'), ('*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass', '.bashrc', 'bashrc', '.bash_*', 'bash_*', 'PKGBUILD'), ('application/x-sh', 'application/x-shellscript')),
- 'BashSessionLexer': ('pygments.lexers.shell', 'Bash Session', ('console', 'shell-session'), ('*.sh-session', '*.shell-session'), ('application/x-shell-session', 'application/x-sh-session')),
- 'BatchLexer': ('pygments.lexers.shell', 'Batchfile', ('bat', 'batch', 'dosbatch', 'winbatch'), ('*.bat', '*.cmd'), ('application/x-dos-batch',)),
- 'BefungeLexer': ('pygments.lexers.esoteric', 'Befunge', ('befunge',), ('*.befunge',), ('application/x-befunge',)),
- 'BlitzBasicLexer': ('pygments.lexers.basic', 'BlitzBasic', ('blitzbasic', 'b3d', 'bplus'), ('*.bb', '*.decls'), ('text/x-bb',)),
- 'BlitzMaxLexer': ('pygments.lexers.basic', 'BlitzMax', ('blitzmax', 'bmax'), ('*.bmx',), ('text/x-bmx',)),
- 'BooLexer': ('pygments.lexers.dotnet', 'Boo', ('boo',), ('*.boo',), ('text/x-boo',)),
- 'BoogieLexer': ('pygments.lexers.esoteric', 'Boogie', ('boogie',), ('*.bpl',), ()),
- 'BrainfuckLexer': ('pygments.lexers.esoteric', 'Brainfuck', ('brainfuck', 'bf'), ('*.bf', '*.b'), ('application/x-brainfuck',)),
- 'BroLexer': ('pygments.lexers.dsls', 'Bro', ('bro',), ('*.bro',), ()),
- 'BugsLexer': ('pygments.lexers.modeling', 'BUGS', ('bugs', 'winbugs', 'openbugs'), ('*.bug',), ()),
- 'CAmkESLexer': ('pygments.lexers.esoteric', 'CAmkES', ('camkes', 'idl4'), ('*.camkes', '*.idl4'), ()),
- 'CLexer': ('pygments.lexers.c_cpp', 'C', ('c',), ('*.c', '*.h', '*.idc'), ('text/x-chdr', 'text/x-csrc')),
- 'CMakeLexer': ('pygments.lexers.make', 'CMake', ('cmake',), ('*.cmake', 'CMakeLists.txt'), ('text/x-cmake',)),
- 'CObjdumpLexer': ('pygments.lexers.asm', 'c-objdump', ('c-objdump',), ('*.c-objdump',), ('text/x-c-objdump',)),
- 'CPSALexer': ('pygments.lexers.lisp', 'CPSA', ('cpsa',), ('*.cpsa',), ()),
- 'CSharpAspxLexer': ('pygments.lexers.dotnet', 'aspx-cs', ('aspx-cs',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
- 'CSharpLexer': ('pygments.lexers.dotnet', 'C#', ('csharp', 'c#'), ('*.cs',), ('text/x-csharp',)),
- 'Ca65Lexer': ('pygments.lexers.asm', 'ca65 assembler', ('ca65',), ('*.s',), ()),
- 'CadlLexer': ('pygments.lexers.archetype', 'cADL', ('cadl',), ('*.cadl',), ()),
- 'CbmBasicV2Lexer': ('pygments.lexers.basic', 'CBM BASIC V2', ('cbmbas',), ('*.bas',), ()),
- 'CeylonLexer': ('pygments.lexers.jvm', 'Ceylon', ('ceylon',), ('*.ceylon',), ('text/x-ceylon',)),
- 'Cfengine3Lexer': ('pygments.lexers.configs', 'CFEngine3', ('cfengine3', 'cf3'), ('*.cf',), ()),
- 'ChaiscriptLexer': ('pygments.lexers.scripting', 'ChaiScript', ('chai', 'chaiscript'), ('*.chai',), ('text/x-chaiscript', 'application/x-chaiscript')),
- 'ChapelLexer': ('pygments.lexers.chapel', 'Chapel', ('chapel', 'chpl'), ('*.chpl',), ()),
- 'CheetahHtmlLexer': ('pygments.lexers.templates', 'HTML+Cheetah', ('html+cheetah', 'html+spitfire', 'htmlcheetah'), (), ('text/html+cheetah', 'text/html+spitfire')),
- 'CheetahJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Cheetah', ('js+cheetah', 'javascript+cheetah', 'js+spitfire', 'javascript+spitfire'), (), ('application/x-javascript+cheetah', 'text/x-javascript+cheetah', 'text/javascript+cheetah', 'application/x-javascript+spitfire', 'text/x-javascript+spitfire', 'text/javascript+spitfire')),
- 'CheetahLexer': ('pygments.lexers.templates', 'Cheetah', ('cheetah', 'spitfire'), ('*.tmpl', '*.spt'), ('application/x-cheetah', 'application/x-spitfire')),
- 'CheetahXmlLexer': ('pygments.lexers.templates', 'XML+Cheetah', ('xml+cheetah', 'xml+spitfire'), (), ('application/xml+cheetah', 'application/xml+spitfire')),
- 'CirruLexer': ('pygments.lexers.webmisc', 'Cirru', ('cirru',), ('*.cirru',), ('text/x-cirru',)),
- 'ClayLexer': ('pygments.lexers.c_like', 'Clay', ('clay',), ('*.clay',), ('text/x-clay',)),
- 'ClojureLexer': ('pygments.lexers.jvm', 'Clojure', ('clojure', 'clj'), ('*.clj',), ('text/x-clojure', 'application/x-clojure')),
- 'ClojureScriptLexer': ('pygments.lexers.jvm', 'ClojureScript', ('clojurescript', 'cljs'), ('*.cljs',), ('text/x-clojurescript', 'application/x-clojurescript')),
- 'CobolFreeformatLexer': ('pygments.lexers.business', 'COBOLFree', ('cobolfree',), ('*.cbl', '*.CBL'), ()),
- 'CobolLexer': ('pygments.lexers.business', 'COBOL', ('cobol',), ('*.cob', '*.COB', '*.cpy', '*.CPY'), ('text/x-cobol',)),
- 'CoffeeScriptLexer': ('pygments.lexers.javascript', 'CoffeeScript', ('coffee-script', 'coffeescript', 'coffee'), ('*.coffee',), ('text/coffeescript',)),
- 'ColdfusionCFCLexer': ('pygments.lexers.templates', 'Coldfusion CFC', ('cfc',), ('*.cfc',), ()),
- 'ColdfusionHtmlLexer': ('pygments.lexers.templates', 'Coldfusion HTML', ('cfm',), ('*.cfm', '*.cfml'), ('application/x-coldfusion',)),
- 'ColdfusionLexer': ('pygments.lexers.templates', 'cfstatement', ('cfs',), (), ()),
- 'CommonLispLexer': ('pygments.lexers.lisp', 'Common Lisp', ('common-lisp', 'cl', 'lisp'), ('*.cl', '*.lisp'), ('text/x-common-lisp',)),
- 'ComponentPascalLexer': ('pygments.lexers.oberon', 'Component Pascal', ('componentpascal', 'cp'), ('*.cp', '*.cps'), ('text/x-component-pascal',)),
- 'CoqLexer': ('pygments.lexers.theorem', 'Coq', ('coq',), ('*.v',), ('text/x-coq',)),
- 'CppLexer': ('pygments.lexers.c_cpp', 'C++', ('cpp', 'c++'), ('*.cpp', '*.hpp', '*.c++', '*.h++', '*.cc', '*.hh', '*.cxx', '*.hxx', '*.C', '*.H', '*.cp', '*.CPP'), ('text/x-c++hdr', 'text/x-c++src')),
- 'CppObjdumpLexer': ('pygments.lexers.asm', 'cpp-objdump', ('cpp-objdump', 'c++-objdumb', 'cxx-objdump'), ('*.cpp-objdump', '*.c++-objdump', '*.cxx-objdump'), ('text/x-cpp-objdump',)),
- 'CrmshLexer': ('pygments.lexers.dsls', 'Crmsh', ('crmsh', 'pcmk'), ('*.crmsh', '*.pcmk'), ()),
- 'CrocLexer': ('pygments.lexers.d', 'Croc', ('croc',), ('*.croc',), ('text/x-crocsrc',)),
- 'CryptolLexer': ('pygments.lexers.haskell', 'Cryptol', ('cryptol', 'cry'), ('*.cry',), ('text/x-cryptol',)),
- 'CsoundDocumentLexer': ('pygments.lexers.csound', 'Csound Document', (), ('*.csd',), ()),
- 'CsoundOrchestraLexer': ('pygments.lexers.csound', 'Csound Orchestra', (), ('*.orc',), ()),
- 'CsoundScoreLexer': ('pygments.lexers.csound', 'Csound Score', (), ('*.sco',), ()),
- 'CssDjangoLexer': ('pygments.lexers.templates', 'CSS+Django/Jinja', ('css+django', 'css+jinja'), (), ('text/css+django', 'text/css+jinja')),
- 'CssErbLexer': ('pygments.lexers.templates', 'CSS+Ruby', ('css+erb', 'css+ruby'), (), ('text/css+ruby',)),
- 'CssGenshiLexer': ('pygments.lexers.templates', 'CSS+Genshi Text', ('css+genshitext', 'css+genshi'), (), ('text/css+genshi',)),
- 'CssLexer': ('pygments.lexers.css', 'CSS', ('css',), ('*.css',), ('text/css',)),
- 'CssPhpLexer': ('pygments.lexers.templates', 'CSS+PHP', ('css+php',), (), ('text/css+php',)),
- 'CssSmartyLexer': ('pygments.lexers.templates', 'CSS+Smarty', ('css+smarty',), (), ('text/css+smarty',)),
- 'CudaLexer': ('pygments.lexers.c_like', 'CUDA', ('cuda', 'cu'), ('*.cu', '*.cuh'), ('text/x-cuda',)),
- 'CypherLexer': ('pygments.lexers.graph', 'Cypher', ('cypher',), ('*.cyp', '*.cypher'), ()),
- 'CythonLexer': ('pygments.lexers.python', 'Cython', ('cython', 'pyx', 'pyrex'), ('*.pyx', '*.pxd', '*.pxi'), ('text/x-cython', 'application/x-cython')),
- 'DLexer': ('pygments.lexers.d', 'D', ('d',), ('*.d', '*.di'), ('text/x-dsrc',)),
- 'DObjdumpLexer': ('pygments.lexers.asm', 'd-objdump', ('d-objdump',), ('*.d-objdump',), ('text/x-d-objdump',)),
- 'DarcsPatchLexer': ('pygments.lexers.diff', 'Darcs Patch', ('dpatch',), ('*.dpatch', '*.darcspatch'), ()),
- 'DartLexer': ('pygments.lexers.javascript', 'Dart', ('dart',), ('*.dart',), ('text/x-dart',)),
- 'DebianControlLexer': ('pygments.lexers.installers', 'Debian Control file', ('control', 'debcontrol'), ('control',), ()),
- 'DelphiLexer': ('pygments.lexers.pascal', 'Delphi', ('delphi', 'pas', 'pascal', 'objectpascal'), ('*.pas',), ('text/x-pascal',)),
- 'DgLexer': ('pygments.lexers.python', 'dg', ('dg',), ('*.dg',), ('text/x-dg',)),
- 'DiffLexer': ('pygments.lexers.diff', 'Diff', ('diff', 'udiff'), ('*.diff', '*.patch'), ('text/x-diff', 'text/x-patch')),
- 'DjangoLexer': ('pygments.lexers.templates', 'Django/Jinja', ('django', 'jinja'), (), ('application/x-django-templating', 'application/x-jinja')),
- 'DockerLexer': ('pygments.lexers.configs', 'Docker', ('docker', 'dockerfile'), ('Dockerfile', '*.docker'), ('text/x-dockerfile-config',)),
- 'DtdLexer': ('pygments.lexers.html', 'DTD', ('dtd',), ('*.dtd',), ('application/xml-dtd',)),
- 'DuelLexer': ('pygments.lexers.webmisc', 'Duel', ('duel', 'jbst', 'jsonml+bst'), ('*.duel', '*.jbst'), ('text/x-duel', 'text/x-jbst')),
- 'DylanConsoleLexer': ('pygments.lexers.dylan', 'Dylan session', ('dylan-console', 'dylan-repl'), ('*.dylan-console',), ('text/x-dylan-console',)),
- 'DylanLexer': ('pygments.lexers.dylan', 'Dylan', ('dylan',), ('*.dylan', '*.dyl', '*.intr'), ('text/x-dylan',)),
- 'DylanLidLexer': ('pygments.lexers.dylan', 'DylanLID', ('dylan-lid', 'lid'), ('*.lid', '*.hdp'), ('text/x-dylan-lid',)),
- 'ECLLexer': ('pygments.lexers.ecl', 'ECL', ('ecl',), ('*.ecl',), ('application/x-ecl',)),
- 'ECLexer': ('pygments.lexers.c_like', 'eC', ('ec',), ('*.ec', '*.eh'), ('text/x-echdr', 'text/x-ecsrc')),
- 'EarlGreyLexer': ('pygments.lexers.javascript', 'Earl Grey', ('earl-grey', 'earlgrey', 'eg'), ('*.eg',), ('text/x-earl-grey',)),
- 'EasytrieveLexer': ('pygments.lexers.scripting', 'Easytrieve', ('easytrieve',), ('*.ezt', '*.mac'), ('text/x-easytrieve',)),
- 'EbnfLexer': ('pygments.lexers.parsers', 'EBNF', ('ebnf',), ('*.ebnf',), ('text/x-ebnf',)),
- 'EiffelLexer': ('pygments.lexers.eiffel', 'Eiffel', ('eiffel',), ('*.e',), ('text/x-eiffel',)),
- 'ElixirConsoleLexer': ('pygments.lexers.erlang', 'Elixir iex session', ('iex',), (), ('text/x-elixir-shellsession',)),
- 'ElixirLexer': ('pygments.lexers.erlang', 'Elixir', ('elixir', 'ex', 'exs'), ('*.ex', '*.exs'), ('text/x-elixir',)),
- 'ElmLexer': ('pygments.lexers.elm', 'Elm', ('elm',), ('*.elm',), ('text/x-elm',)),
- 'EmacsLispLexer': ('pygments.lexers.lisp', 'EmacsLisp', ('emacs', 'elisp'), ('*.el',), ('text/x-elisp', 'application/x-elisp')),
- 'ErbLexer': ('pygments.lexers.templates', 'ERB', ('erb',), (), ('application/x-ruby-templating',)),
- 'ErlangLexer': ('pygments.lexers.erlang', 'Erlang', ('erlang',), ('*.erl', '*.hrl', '*.es', '*.escript'), ('text/x-erlang',)),
- 'ErlangShellLexer': ('pygments.lexers.erlang', 'Erlang erl session', ('erl',), ('*.erl-sh',), ('text/x-erl-shellsession',)),
- 'EvoqueHtmlLexer': ('pygments.lexers.templates', 'HTML+Evoque', ('html+evoque',), ('*.html',), ('text/html+evoque',)),
- 'EvoqueLexer': ('pygments.lexers.templates', 'Evoque', ('evoque',), ('*.evoque',), ('application/x-evoque',)),
- 'EvoqueXmlLexer': ('pygments.lexers.templates', 'XML+Evoque', ('xml+evoque',), ('*.xml',), ('application/xml+evoque',)),
- 'EzhilLexer': ('pygments.lexers.ezhil', 'Ezhil', ('ezhil',), ('*.n',), ('text/x-ezhil',)),
- 'FSharpLexer': ('pygments.lexers.dotnet', 'FSharp', ('fsharp',), ('*.fs', '*.fsi'), ('text/x-fsharp',)),
- 'FactorLexer': ('pygments.lexers.factor', 'Factor', ('factor',), ('*.factor',), ('text/x-factor',)),
- 'FancyLexer': ('pygments.lexers.ruby', 'Fancy', ('fancy', 'fy'), ('*.fy', '*.fancypack'), ('text/x-fancysrc',)),
- 'FantomLexer': ('pygments.lexers.fantom', 'Fantom', ('fan',), ('*.fan',), ('application/x-fantom',)),
- 'FelixLexer': ('pygments.lexers.felix', 'Felix', ('felix', 'flx'), ('*.flx', '*.flxh'), ('text/x-felix',)),
- 'FishShellLexer': ('pygments.lexers.shell', 'Fish', ('fish', 'fishshell'), ('*.fish', '*.load'), ('application/x-fish',)),
- 'FortranFixedLexer': ('pygments.lexers.fortran', 'FortranFixed', ('fortranfixed',), ('*.f', '*.F'), ()),
- 'FortranLexer': ('pygments.lexers.fortran', 'Fortran', ('fortran',), ('*.f03', '*.f90', '*.F03', '*.F90'), ('text/x-fortran',)),
- 'FoxProLexer': ('pygments.lexers.foxpro', 'FoxPro', ('foxpro', 'vfp', 'clipper', 'xbase'), ('*.PRG', '*.prg'), ()),
- 'GAPLexer': ('pygments.lexers.algebra', 'GAP', ('gap',), ('*.g', '*.gd', '*.gi', '*.gap'), ()),
- 'GLShaderLexer': ('pygments.lexers.graphics', 'GLSL', ('glsl',), ('*.vert', '*.frag', '*.geo'), ('text/x-glslsrc',)),
- 'GasLexer': ('pygments.lexers.asm', 'GAS', ('gas', 'asm'), ('*.s', '*.S'), ('text/x-gas',)),
- 'GenshiLexer': ('pygments.lexers.templates', 'Genshi', ('genshi', 'kid', 'xml+genshi', 'xml+kid'), ('*.kid',), ('application/x-genshi', 'application/x-kid')),
- 'GenshiTextLexer': ('pygments.lexers.templates', 'Genshi Text', ('genshitext',), (), ('application/x-genshi-text', 'text/x-genshi')),
- 'GettextLexer': ('pygments.lexers.textfmts', 'Gettext Catalog', ('pot', 'po'), ('*.pot', '*.po'), ('application/x-gettext', 'text/x-gettext', 'text/gettext')),
- 'GherkinLexer': ('pygments.lexers.testing', 'Gherkin', ('cucumber', 'gherkin'), ('*.feature',), ('text/x-gherkin',)),
- 'GnuplotLexer': ('pygments.lexers.graphics', 'Gnuplot', ('gnuplot',), ('*.plot', '*.plt'), ('text/x-gnuplot',)),
- 'GoLexer': ('pygments.lexers.go', 'Go', ('go',), ('*.go',), ('text/x-gosrc',)),
- 'GoloLexer': ('pygments.lexers.jvm', 'Golo', ('golo',), ('*.golo',), ()),
- 'GoodDataCLLexer': ('pygments.lexers.business', 'GoodData-CL', ('gooddata-cl',), ('*.gdc',), ('text/x-gooddata-cl',)),
- 'GosuLexer': ('pygments.lexers.jvm', 'Gosu', ('gosu',), ('*.gs', '*.gsx', '*.gsp', '*.vark'), ('text/x-gosu',)),
- 'GosuTemplateLexer': ('pygments.lexers.jvm', 'Gosu Template', ('gst',), ('*.gst',), ('text/x-gosu-template',)),
- 'GroffLexer': ('pygments.lexers.markup', 'Groff', ('groff', 'nroff', 'man'), ('*.[1234567]', '*.man'), ('application/x-troff', 'text/troff')),
- 'GroovyLexer': ('pygments.lexers.jvm', 'Groovy', ('groovy',), ('*.groovy', '*.gradle'), ('text/x-groovy',)),
- 'HamlLexer': ('pygments.lexers.html', 'Haml', ('haml',), ('*.haml',), ('text/x-haml',)),
- 'HandlebarsHtmlLexer': ('pygments.lexers.templates', 'HTML+Handlebars', ('html+handlebars',), ('*.handlebars', '*.hbs'), ('text/html+handlebars', 'text/x-handlebars-template')),
- 'HandlebarsLexer': ('pygments.lexers.templates', 'Handlebars', ('handlebars',), (), ()),
- 'HaskellLexer': ('pygments.lexers.haskell', 'Haskell', ('haskell', 'hs'), ('*.hs',), ('text/x-haskell',)),
- 'HaxeLexer': ('pygments.lexers.haxe', 'Haxe', ('hx', 'haxe', 'hxsl'), ('*.hx', '*.hxsl'), ('text/haxe', 'text/x-haxe', 'text/x-hx')),
- 'HexdumpLexer': ('pygments.lexers.hexdump', 'Hexdump', ('hexdump',), (), ()),
- 'HsailLexer': ('pygments.lexers.asm', 'HSAIL', ('hsail', 'hsa'), ('*.hsail',), ('text/x-hsail',)),
- 'HtmlDjangoLexer': ('pygments.lexers.templates', 'HTML+Django/Jinja', ('html+django', 'html+jinja', 'htmldjango'), (), ('text/html+django', 'text/html+jinja')),
- 'HtmlGenshiLexer': ('pygments.lexers.templates', 'HTML+Genshi', ('html+genshi', 'html+kid'), (), ('text/html+genshi',)),
- 'HtmlLexer': ('pygments.lexers.html', 'HTML', ('html',), ('*.html', '*.htm', '*.xhtml', '*.xslt'), ('text/html', 'application/xhtml+xml')),
- 'HtmlPhpLexer': ('pygments.lexers.templates', 'HTML+PHP', ('html+php',), ('*.phtml',), ('application/x-php', 'application/x-httpd-php', 'application/x-httpd-php3', 'application/x-httpd-php4', 'application/x-httpd-php5')),
- 'HtmlSmartyLexer': ('pygments.lexers.templates', 'HTML+Smarty', ('html+smarty',), (), ('text/html+smarty',)),
- 'HttpLexer': ('pygments.lexers.textfmts', 'HTTP', ('http',), (), ()),
- 'HxmlLexer': ('pygments.lexers.haxe', 'Hxml', ('haxeml', 'hxml'), ('*.hxml',), ()),
- 'HyLexer': ('pygments.lexers.lisp', 'Hy', ('hylang',), ('*.hy',), ('text/x-hy', 'application/x-hy')),
- 'HybrisLexer': ('pygments.lexers.scripting', 'Hybris', ('hybris', 'hy'), ('*.hy', '*.hyb'), ('text/x-hybris', 'application/x-hybris')),
- 'IDLLexer': ('pygments.lexers.idl', 'IDL', ('idl',), ('*.pro',), ('text/idl',)),
- 'IdrisLexer': ('pygments.lexers.haskell', 'Idris', ('idris', 'idr'), ('*.idr',), ('text/x-idris',)),
- 'IgorLexer': ('pygments.lexers.igor', 'Igor', ('igor', 'igorpro'), ('*.ipf',), ('text/ipf',)),
- 'Inform6Lexer': ('pygments.lexers.int_fiction', 'Inform 6', ('inform6', 'i6'), ('*.inf',), ()),
- 'Inform6TemplateLexer': ('pygments.lexers.int_fiction', 'Inform 6 template', ('i6t',), ('*.i6t',), ()),
- 'Inform7Lexer': ('pygments.lexers.int_fiction', 'Inform 7', ('inform7', 'i7'), ('*.ni', '*.i7x'), ()),
- 'IniLexer': ('pygments.lexers.configs', 'INI', ('ini', 'cfg', 'dosini'), ('*.ini', '*.cfg', '*.inf'), ('text/x-ini', 'text/inf')),
- 'IoLexer': ('pygments.lexers.iolang', 'Io', ('io',), ('*.io',), ('text/x-iosrc',)),
- 'IokeLexer': ('pygments.lexers.jvm', 'Ioke', ('ioke', 'ik'), ('*.ik',), ('text/x-iokesrc',)),
- 'IrcLogsLexer': ('pygments.lexers.textfmts', 'IRC logs', ('irc',), ('*.weechatlog',), ('text/x-irclog',)),
- 'IsabelleLexer': ('pygments.lexers.theorem', 'Isabelle', ('isabelle',), ('*.thy',), ('text/x-isabelle',)),
- 'JLexer': ('pygments.lexers.j', 'J', ('j',), ('*.ijs',), ('text/x-j',)),
- 'JadeLexer': ('pygments.lexers.html', 'Jade', ('jade',), ('*.jade',), ('text/x-jade',)),
- 'JagsLexer': ('pygments.lexers.modeling', 'JAGS', ('jags',), ('*.jag', '*.bug'), ()),
- 'JasminLexer': ('pygments.lexers.jvm', 'Jasmin', ('jasmin', 'jasminxt'), ('*.j',), ()),
- 'JavaLexer': ('pygments.lexers.jvm', 'Java', ('java',), ('*.java',), ('text/x-java',)),
- 'JavascriptDjangoLexer': ('pygments.lexers.templates', 'JavaScript+Django/Jinja', ('js+django', 'javascript+django', 'js+jinja', 'javascript+jinja'), (), ('application/x-javascript+django', 'application/x-javascript+jinja', 'text/x-javascript+django', 'text/x-javascript+jinja', 'text/javascript+django', 'text/javascript+jinja')),
- 'JavascriptErbLexer': ('pygments.lexers.templates', 'JavaScript+Ruby', ('js+erb', 'javascript+erb', 'js+ruby', 'javascript+ruby'), (), ('application/x-javascript+ruby', 'text/x-javascript+ruby', 'text/javascript+ruby')),
- 'JavascriptGenshiLexer': ('pygments.lexers.templates', 'JavaScript+Genshi Text', ('js+genshitext', 'js+genshi', 'javascript+genshitext', 'javascript+genshi'), (), ('application/x-javascript+genshi', 'text/x-javascript+genshi', 'text/javascript+genshi')),
- 'JavascriptLexer': ('pygments.lexers.javascript', 'JavaScript', ('js', 'javascript'), ('*.js', '*.jsm'), ('application/javascript', 'application/x-javascript', 'text/x-javascript', 'text/javascript')),
- 'JavascriptPhpLexer': ('pygments.lexers.templates', 'JavaScript+PHP', ('js+php', 'javascript+php'), (), ('application/x-javascript+php', 'text/x-javascript+php', 'text/javascript+php')),
- 'JavascriptSmartyLexer': ('pygments.lexers.templates', 'JavaScript+Smarty', ('js+smarty', 'javascript+smarty'), (), ('application/x-javascript+smarty', 'text/x-javascript+smarty', 'text/javascript+smarty')),
- 'JclLexer': ('pygments.lexers.scripting', 'JCL', ('jcl',), ('*.jcl',), ('text/x-jcl',)),
- 'JsonLdLexer': ('pygments.lexers.data', 'JSON-LD', ('jsonld', 'json-ld'), ('*.jsonld',), ('application/ld+json',)),
- 'JsonLexer': ('pygments.lexers.data', 'JSON', ('json',), ('*.json',), ('application/json',)),
- 'JspLexer': ('pygments.lexers.templates', 'Java Server Page', ('jsp',), ('*.jsp',), ('application/x-jsp',)),
- 'JuliaConsoleLexer': ('pygments.lexers.julia', 'Julia console', ('jlcon',), (), ()),
- 'JuliaLexer': ('pygments.lexers.julia', 'Julia', ('julia', 'jl'), ('*.jl',), ('text/x-julia', 'application/x-julia')),
- 'KalLexer': ('pygments.lexers.javascript', 'Kal', ('kal',), ('*.kal',), ('text/kal', 'application/kal')),
- 'KconfigLexer': ('pygments.lexers.configs', 'Kconfig', ('kconfig', 'menuconfig', 'linux-config', 'kernel-config'), ('Kconfig', '*Config.in*', 'external.in*', 'standard-modules.in'), ('text/x-kconfig',)),
- 'KokaLexer': ('pygments.lexers.haskell', 'Koka', ('koka',), ('*.kk', '*.kki'), ('text/x-koka',)),
- 'KotlinLexer': ('pygments.lexers.jvm', 'Kotlin', ('kotlin',), ('*.kt',), ('text/x-kotlin',)),
- 'LSLLexer': ('pygments.lexers.scripting', 'LSL', ('lsl',), ('*.lsl',), ('text/x-lsl',)),
- 'LassoCssLexer': ('pygments.lexers.templates', 'CSS+Lasso', ('css+lasso',), (), ('text/css+lasso',)),
- 'LassoHtmlLexer': ('pygments.lexers.templates', 'HTML+Lasso', ('html+lasso',), (), ('text/html+lasso', 'application/x-httpd-lasso', 'application/x-httpd-lasso[89]')),
- 'LassoJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Lasso', ('js+lasso', 'javascript+lasso'), (), ('application/x-javascript+lasso', 'text/x-javascript+lasso', 'text/javascript+lasso')),
- 'LassoLexer': ('pygments.lexers.javascript', 'Lasso', ('lasso', 'lassoscript'), ('*.lasso', '*.lasso[89]'), ('text/x-lasso',)),
- 'LassoXmlLexer': ('pygments.lexers.templates', 'XML+Lasso', ('xml+lasso',), (), ('application/xml+lasso',)),
- 'LeanLexer': ('pygments.lexers.theorem', 'Lean', ('lean',), ('*.lean',), ('text/x-lean',)),
- 'LessCssLexer': ('pygments.lexers.css', 'LessCss', ('less',), ('*.less',), ('text/x-less-css',)),
- 'LighttpdConfLexer': ('pygments.lexers.configs', 'Lighttpd configuration file', ('lighty', 'lighttpd'), (), ('text/x-lighttpd-conf',)),
- 'LimboLexer': ('pygments.lexers.inferno', 'Limbo', ('limbo',), ('*.b',), ('text/limbo',)),
- 'LiquidLexer': ('pygments.lexers.templates', 'liquid', ('liquid',), ('*.liquid',), ()),
- 'LiterateAgdaLexer': ('pygments.lexers.haskell', 'Literate Agda', ('lagda', 'literate-agda'), ('*.lagda',), ('text/x-literate-agda',)),
- 'LiterateCryptolLexer': ('pygments.lexers.haskell', 'Literate Cryptol', ('lcry', 'literate-cryptol', 'lcryptol'), ('*.lcry',), ('text/x-literate-cryptol',)),
- 'LiterateHaskellLexer': ('pygments.lexers.haskell', 'Literate Haskell', ('lhs', 'literate-haskell', 'lhaskell'), ('*.lhs',), ('text/x-literate-haskell',)),
- 'LiterateIdrisLexer': ('pygments.lexers.haskell', 'Literate Idris', ('lidr', 'literate-idris', 'lidris'), ('*.lidr',), ('text/x-literate-idris',)),
- 'LiveScriptLexer': ('pygments.lexers.javascript', 'LiveScript', ('live-script', 'livescript'), ('*.ls',), ('text/livescript',)),
- 'LlvmLexer': ('pygments.lexers.asm', 'LLVM', ('llvm',), ('*.ll',), ('text/x-llvm',)),
- 'LogosLexer': ('pygments.lexers.objective', 'Logos', ('logos',), ('*.x', '*.xi', '*.xm', '*.xmi'), ('text/x-logos',)),
- 'LogtalkLexer': ('pygments.lexers.prolog', 'Logtalk', ('logtalk',), ('*.lgt', '*.logtalk'), ('text/x-logtalk',)),
- 'LuaLexer': ('pygments.lexers.scripting', 'Lua', ('lua',), ('*.lua', '*.wlua'), ('text/x-lua', 'application/x-lua')),
- 'MOOCodeLexer': ('pygments.lexers.scripting', 'MOOCode', ('moocode', 'moo'), ('*.moo',), ('text/x-moocode',)),
- 'MSDOSSessionLexer': ('pygments.lexers.shell', 'MSDOS Session', ('doscon',), (), ()),
- 'MakefileLexer': ('pygments.lexers.make', 'Makefile', ('make', 'makefile', 'mf', 'bsdmake'), ('*.mak', '*.mk', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile'), ('text/x-makefile',)),
- 'MakoCssLexer': ('pygments.lexers.templates', 'CSS+Mako', ('css+mako',), (), ('text/css+mako',)),
- 'MakoHtmlLexer': ('pygments.lexers.templates', 'HTML+Mako', ('html+mako',), (), ('text/html+mako',)),
- 'MakoJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Mako', ('js+mako', 'javascript+mako'), (), ('application/x-javascript+mako', 'text/x-javascript+mako', 'text/javascript+mako')),
- 'MakoLexer': ('pygments.lexers.templates', 'Mako', ('mako',), ('*.mao',), ('application/x-mako',)),
- 'MakoXmlLexer': ('pygments.lexers.templates', 'XML+Mako', ('xml+mako',), (), ('application/xml+mako',)),
- 'MaqlLexer': ('pygments.lexers.business', 'MAQL', ('maql',), ('*.maql',), ('text/x-gooddata-maql', 'application/x-gooddata-maql')),
- 'MaskLexer': ('pygments.lexers.javascript', 'Mask', ('mask',), ('*.mask',), ('text/x-mask',)),
- 'MasonLexer': ('pygments.lexers.templates', 'Mason', ('mason',), ('*.m', '*.mhtml', '*.mc', '*.mi', 'autohandler', 'dhandler'), ('application/x-mason',)),
- 'MathematicaLexer': ('pygments.lexers.algebra', 'Mathematica', ('mathematica', 'mma', 'nb'), ('*.nb', '*.cdf', '*.nbp', '*.ma'), ('application/mathematica', 'application/vnd.wolfram.mathematica', 'application/vnd.wolfram.mathematica.package', 'application/vnd.wolfram.cdf')),
- 'MatlabLexer': ('pygments.lexers.matlab', 'Matlab', ('matlab',), ('*.m',), ('text/matlab',)),
- 'MatlabSessionLexer': ('pygments.lexers.matlab', 'Matlab session', ('matlabsession',), (), ()),
- 'MiniDLexer': ('pygments.lexers.d', 'MiniD', ('minid',), (), ('text/x-minidsrc',)),
- 'ModelicaLexer': ('pygments.lexers.modeling', 'Modelica', ('modelica',), ('*.mo',), ('text/x-modelica',)),
- 'Modula2Lexer': ('pygments.lexers.modula2', 'Modula-2', ('modula2', 'm2'), ('*.def', '*.mod'), ('text/x-modula2',)),
- 'MoinWikiLexer': ('pygments.lexers.markup', 'MoinMoin/Trac Wiki markup', ('trac-wiki', 'moin'), (), ('text/x-trac-wiki',)),
- 'MonkeyLexer': ('pygments.lexers.basic', 'Monkey', ('monkey',), ('*.monkey',), ('text/x-monkey',)),
- 'MoonScriptLexer': ('pygments.lexers.scripting', 'MoonScript', ('moon', 'moonscript'), ('*.moon',), ('text/x-moonscript', 'application/x-moonscript')),
- 'MozPreprocCssLexer': ('pygments.lexers.markup', 'CSS+mozpreproc', ('css+mozpreproc',), ('*.css.in',), ()),
- 'MozPreprocHashLexer': ('pygments.lexers.markup', 'mozhashpreproc', ('mozhashpreproc',), (), ()),
- 'MozPreprocJavascriptLexer': ('pygments.lexers.markup', 'Javascript+mozpreproc', ('javascript+mozpreproc',), ('*.js.in',), ()),
- 'MozPreprocPercentLexer': ('pygments.lexers.markup', 'mozpercentpreproc', ('mozpercentpreproc',), (), ()),
- 'MozPreprocXulLexer': ('pygments.lexers.markup', 'XUL+mozpreproc', ('xul+mozpreproc',), ('*.xul.in',), ()),
- 'MqlLexer': ('pygments.lexers.c_like', 'MQL', ('mql', 'mq4', 'mq5', 'mql4', 'mql5'), ('*.mq4', '*.mq5', '*.mqh'), ('text/x-mql',)),
- 'MscgenLexer': ('pygments.lexers.dsls', 'Mscgen', ('mscgen', 'msc'), ('*.msc',), ()),
- 'MuPADLexer': ('pygments.lexers.algebra', 'MuPAD', ('mupad',), ('*.mu',), ()),
- 'MxmlLexer': ('pygments.lexers.actionscript', 'MXML', ('mxml',), ('*.mxml',), ()),
- 'MySqlLexer': ('pygments.lexers.sql', 'MySQL', ('mysql',), (), ('text/x-mysql',)),
- 'MyghtyCssLexer': ('pygments.lexers.templates', 'CSS+Myghty', ('css+myghty',), (), ('text/css+myghty',)),
- 'MyghtyHtmlLexer': ('pygments.lexers.templates', 'HTML+Myghty', ('html+myghty',), (), ('text/html+myghty',)),
- 'MyghtyJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Myghty', ('js+myghty', 'javascript+myghty'), (), ('application/x-javascript+myghty', 'text/x-javascript+myghty', 'text/javascript+mygthy')),
- 'MyghtyLexer': ('pygments.lexers.templates', 'Myghty', ('myghty',), ('*.myt', 'autodelegate'), ('application/x-myghty',)),
- 'MyghtyXmlLexer': ('pygments.lexers.templates', 'XML+Myghty', ('xml+myghty',), (), ('application/xml+myghty',)),
- 'NSISLexer': ('pygments.lexers.installers', 'NSIS', ('nsis', 'nsi', 'nsh'), ('*.nsi', '*.nsh'), ('text/x-nsis',)),
- 'NasmLexer': ('pygments.lexers.asm', 'NASM', ('nasm',), ('*.asm', '*.ASM'), ('text/x-nasm',)),
- 'NasmObjdumpLexer': ('pygments.lexers.asm', 'objdump-nasm', ('objdump-nasm',), ('*.objdump-intel',), ('text/x-nasm-objdump',)),
- 'NemerleLexer': ('pygments.lexers.dotnet', 'Nemerle', ('nemerle',), ('*.n',), ('text/x-nemerle',)),
- 'NesCLexer': ('pygments.lexers.c_like', 'nesC', ('nesc',), ('*.nc',), ('text/x-nescsrc',)),
- 'NewLispLexer': ('pygments.lexers.lisp', 'NewLisp', ('newlisp',), ('*.lsp', '*.nl'), ('text/x-newlisp', 'application/x-newlisp')),
- 'NewspeakLexer': ('pygments.lexers.smalltalk', 'Newspeak', ('newspeak',), ('*.ns2',), ('text/x-newspeak',)),
- 'NginxConfLexer': ('pygments.lexers.configs', 'Nginx configuration file', ('nginx',), (), ('text/x-nginx-conf',)),
- 'NimrodLexer': ('pygments.lexers.nimrod', 'Nimrod', ('nimrod', 'nim'), ('*.nim', '*.nimrod'), ('text/x-nimrod',)),
- 'NitLexer': ('pygments.lexers.nit', 'Nit', ('nit',), ('*.nit',), ()),
- 'NixLexer': ('pygments.lexers.nix', 'Nix', ('nixos', 'nix'), ('*.nix',), ('text/x-nix',)),
- 'NumPyLexer': ('pygments.lexers.python', 'NumPy', ('numpy',), (), ()),
- 'ObjdumpLexer': ('pygments.lexers.asm', 'objdump', ('objdump',), ('*.objdump',), ('text/x-objdump',)),
- 'ObjectiveCLexer': ('pygments.lexers.objective', 'Objective-C', ('objective-c', 'objectivec', 'obj-c', 'objc'), ('*.m', '*.h'), ('text/x-objective-c',)),
- 'ObjectiveCppLexer': ('pygments.lexers.objective', 'Objective-C++', ('objective-c++', 'objectivec++', 'obj-c++', 'objc++'), ('*.mm', '*.hh'), ('text/x-objective-c++',)),
- 'ObjectiveJLexer': ('pygments.lexers.javascript', 'Objective-J', ('objective-j', 'objectivej', 'obj-j', 'objj'), ('*.j',), ('text/x-objective-j',)),
- 'OcamlLexer': ('pygments.lexers.ml', 'OCaml', ('ocaml',), ('*.ml', '*.mli', '*.mll', '*.mly'), ('text/x-ocaml',)),
- 'OctaveLexer': ('pygments.lexers.matlab', 'Octave', ('octave',), ('*.m',), ('text/octave',)),
- 'OdinLexer': ('pygments.lexers.archetype', 'ODIN', ('odin',), ('*.odin',), ('text/odin',)),
- 'OocLexer': ('pygments.lexers.ooc', 'Ooc', ('ooc',), ('*.ooc',), ('text/x-ooc',)),
- 'OpaLexer': ('pygments.lexers.ml', 'Opa', ('opa',), ('*.opa',), ('text/x-opa',)),
- 'OpenEdgeLexer': ('pygments.lexers.business', 'OpenEdge ABL', ('openedge', 'abl', 'progress'), ('*.p', '*.cls'), ('text/x-openedge', 'application/x-openedge')),
- 'PanLexer': ('pygments.lexers.dsls', 'Pan', ('pan',), ('*.pan',), ()),
- 'ParaSailLexer': ('pygments.lexers.parasail', 'ParaSail', ('parasail',), ('*.psi', '*.psl'), ('text/x-parasail',)),
- 'PawnLexer': ('pygments.lexers.pawn', 'Pawn', ('pawn',), ('*.p', '*.pwn', '*.inc'), ('text/x-pawn',)),
- 'Perl6Lexer': ('pygments.lexers.perl', 'Perl6', ('perl6', 'pl6'), ('*.pl', '*.pm', '*.nqp', '*.p6', '*.6pl', '*.p6l', '*.pl6', '*.6pm', '*.p6m', '*.pm6', '*.t'), ('text/x-perl6', 'application/x-perl6')),
- 'PerlLexer': ('pygments.lexers.perl', 'Perl', ('perl', 'pl'), ('*.pl', '*.pm', '*.t'), ('text/x-perl', 'application/x-perl')),
- 'PhpLexer': ('pygments.lexers.php', 'PHP', ('php', 'php3', 'php4', 'php5'), ('*.php', '*.php[345]', '*.inc'), ('text/x-php',)),
- 'PigLexer': ('pygments.lexers.jvm', 'Pig', ('pig',), ('*.pig',), ('text/x-pig',)),
- 'PikeLexer': ('pygments.lexers.c_like', 'Pike', ('pike',), ('*.pike', '*.pmod'), ('text/x-pike',)),
- 'PlPgsqlLexer': ('pygments.lexers.sql', 'PL/pgSQL', ('plpgsql',), (), ('text/x-plpgsql',)),
- 'PostScriptLexer': ('pygments.lexers.graphics', 'PostScript', ('postscript', 'postscr'), ('*.ps', '*.eps'), ('application/postscript',)),
- 'PostgresConsoleLexer': ('pygments.lexers.sql', 'PostgreSQL console (psql)', ('psql', 'postgresql-console', 'postgres-console'), (), ('text/x-postgresql-psql',)),
- 'PostgresLexer': ('pygments.lexers.sql', 'PostgreSQL SQL dialect', ('postgresql', 'postgres'), (), ('text/x-postgresql',)),
- 'PovrayLexer': ('pygments.lexers.graphics', 'POVRay', ('pov',), ('*.pov', '*.inc'), ('text/x-povray',)),
- 'PowerShellLexer': ('pygments.lexers.shell', 'PowerShell', ('powershell', 'posh', 'ps1', 'psm1'), ('*.ps1', '*.psm1'), ('text/x-powershell',)),
- 'PowerShellSessionLexer': ('pygments.lexers.shell', 'PowerShell Session', ('ps1con',), (), ()),
- 'PraatLexer': ('pygments.lexers.praat', 'Praat', ('praat',), ('*.praat', '*.proc', '*.psc'), ()),
- 'PrologLexer': ('pygments.lexers.prolog', 'Prolog', ('prolog',), ('*.ecl', '*.prolog', '*.pro', '*.pl'), ('text/x-prolog',)),
- 'PropertiesLexer': ('pygments.lexers.configs', 'Properties', ('properties', 'jproperties'), ('*.properties',), ('text/x-java-properties',)),
- 'ProtoBufLexer': ('pygments.lexers.dsls', 'Protocol Buffer', ('protobuf', 'proto'), ('*.proto',), ()),
- 'PuppetLexer': ('pygments.lexers.dsls', 'Puppet', ('puppet',), ('*.pp',), ()),
- 'PyPyLogLexer': ('pygments.lexers.console', 'PyPy Log', ('pypylog', 'pypy'), ('*.pypylog',), ('application/x-pypylog',)),
- 'Python3Lexer': ('pygments.lexers.python', 'Python 3', ('python3', 'py3'), (), ('text/x-python3', 'application/x-python3')),
- 'Python3TracebackLexer': ('pygments.lexers.python', 'Python 3.0 Traceback', ('py3tb',), ('*.py3tb',), ('text/x-python3-traceback',)),
- 'PythonConsoleLexer': ('pygments.lexers.python', 'Python console session', ('pycon',), (), ('text/x-python-doctest',)),
- 'PythonLexer': ('pygments.lexers.python', 'Python', ('python', 'py', 'sage'), ('*.py', '*.pyw', '*.sc', 'SConstruct', 'SConscript', '*.tac', '*.sage'), ('text/x-python', 'application/x-python')),
- 'PythonTracebackLexer': ('pygments.lexers.python', 'Python Traceback', ('pytb',), ('*.pytb',), ('text/x-python-traceback',)),
- 'QBasicLexer': ('pygments.lexers.basic', 'QBasic', ('qbasic', 'basic'), ('*.BAS', '*.bas'), ('text/basic',)),
- 'QVToLexer': ('pygments.lexers.qvt', 'QVTO', ('qvto', 'qvt'), ('*.qvto',), ()),
- 'QmlLexer': ('pygments.lexers.webmisc', 'QML', ('qml', 'qbs'), ('*.qml', '*.qbs'), ('application/x-qml', 'application/x-qt.qbs+qml')),
- 'RConsoleLexer': ('pygments.lexers.r', 'RConsole', ('rconsole', 'rout'), ('*.Rout',), ()),
- 'RPMSpecLexer': ('pygments.lexers.installers', 'RPMSpec', ('spec',), ('*.spec',), ('text/x-rpm-spec',)),
- 'RacketLexer': ('pygments.lexers.lisp', 'Racket', ('racket', 'rkt'), ('*.rkt', '*.rktd', '*.rktl'), ('text/x-racket', 'application/x-racket')),
- 'RagelCLexer': ('pygments.lexers.parsers', 'Ragel in C Host', ('ragel-c',), ('*.rl',), ()),
- 'RagelCppLexer': ('pygments.lexers.parsers', 'Ragel in CPP Host', ('ragel-cpp',), ('*.rl',), ()),
- 'RagelDLexer': ('pygments.lexers.parsers', 'Ragel in D Host', ('ragel-d',), ('*.rl',), ()),
- 'RagelEmbeddedLexer': ('pygments.lexers.parsers', 'Embedded Ragel', ('ragel-em',), ('*.rl',), ()),
- 'RagelJavaLexer': ('pygments.lexers.parsers', 'Ragel in Java Host', ('ragel-java',), ('*.rl',), ()),
- 'RagelLexer': ('pygments.lexers.parsers', 'Ragel', ('ragel',), (), ()),
- 'RagelObjectiveCLexer': ('pygments.lexers.parsers', 'Ragel in Objective C Host', ('ragel-objc',), ('*.rl',), ()),
- 'RagelRubyLexer': ('pygments.lexers.parsers', 'Ragel in Ruby Host', ('ragel-ruby', 'ragel-rb'), ('*.rl',), ()),
- 'RawTokenLexer': ('pygments.lexers.special', 'Raw token data', ('raw',), (), ('application/x-pygments-tokens',)),
- 'RdLexer': ('pygments.lexers.r', 'Rd', ('rd',), ('*.Rd',), ('text/x-r-doc',)),
- 'RebolLexer': ('pygments.lexers.rebol', 'REBOL', ('rebol',), ('*.r', '*.r3', '*.reb'), ('text/x-rebol',)),
- 'RedLexer': ('pygments.lexers.rebol', 'Red', ('red', 'red/system'), ('*.red', '*.reds'), ('text/x-red', 'text/x-red-system')),
- 'RedcodeLexer': ('pygments.lexers.esoteric', 'Redcode', ('redcode',), ('*.cw',), ()),
- 'RegeditLexer': ('pygments.lexers.configs', 'reg', ('registry',), ('*.reg',), ('text/x-windows-registry',)),
- 'ResourceLexer': ('pygments.lexers.resource', 'ResourceBundle', ('resource', 'resourcebundle'), ('*.txt',), ()),
- 'RexxLexer': ('pygments.lexers.scripting', 'Rexx', ('rexx', 'arexx'), ('*.rexx', '*.rex', '*.rx', '*.arexx'), ('text/x-rexx',)),
- 'RhtmlLexer': ('pygments.lexers.templates', 'RHTML', ('rhtml', 'html+erb', 'html+ruby'), ('*.rhtml',), ('text/html+ruby',)),
- 'RoboconfGraphLexer': ('pygments.lexers.roboconf', 'Roboconf Graph', ('roboconf-graph',), ('*.graph',), ()),
- 'RoboconfInstancesLexer': ('pygments.lexers.roboconf', 'Roboconf Instances', ('roboconf-instances',), ('*.instances',), ()),
- 'RobotFrameworkLexer': ('pygments.lexers.robotframework', 'RobotFramework', ('robotframework',), ('*.txt', '*.robot'), ('text/x-robotframework',)),
- 'RqlLexer': ('pygments.lexers.sql', 'RQL', ('rql',), ('*.rql',), ('text/x-rql',)),
- 'RslLexer': ('pygments.lexers.dsls', 'RSL', ('rsl',), ('*.rsl',), ('text/rsl',)),
- 'RstLexer': ('pygments.lexers.markup', 'reStructuredText', ('rst', 'rest', 'restructuredtext'), ('*.rst', '*.rest'), ('text/x-rst', 'text/prs.fallenstein.rst')),
- 'RtsLexer': ('pygments.lexers.trafficscript', 'TrafficScript', ('rts', 'trafficscript'), ('*.rts',), ()),
- 'RubyConsoleLexer': ('pygments.lexers.ruby', 'Ruby irb session', ('rbcon', 'irb'), (), ('text/x-ruby-shellsession',)),
- 'RubyLexer': ('pygments.lexers.ruby', 'Ruby', ('rb', 'ruby', 'duby'), ('*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec', '*.rbx', '*.duby', 'Gemfile'), ('text/x-ruby', 'application/x-ruby')),
- 'RustLexer': ('pygments.lexers.rust', 'Rust', ('rust',), ('*.rs',), ('text/rust',)),
- 'SLexer': ('pygments.lexers.r', 'S', ('splus', 's', 'r'), ('*.S', '*.R', '.Rhistory', '.Rprofile', '.Renviron'), ('text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r', 'text/x-R', 'text/x-r-history', 'text/x-r-profile')),
- 'SMLLexer': ('pygments.lexers.ml', 'Standard ML', ('sml',), ('*.sml', '*.sig', '*.fun'), ('text/x-standardml', 'application/x-standardml')),
- 'SassLexer': ('pygments.lexers.css', 'Sass', ('sass',), ('*.sass',), ('text/x-sass',)),
- 'ScalaLexer': ('pygments.lexers.jvm', 'Scala', ('scala',), ('*.scala',), ('text/x-scala',)),
- 'ScamlLexer': ('pygments.lexers.html', 'Scaml', ('scaml',), ('*.scaml',), ('text/x-scaml',)),
- 'SchemeLexer': ('pygments.lexers.lisp', 'Scheme', ('scheme', 'scm'), ('*.scm', '*.ss'), ('text/x-scheme', 'application/x-scheme')),
- 'ScilabLexer': ('pygments.lexers.matlab', 'Scilab', ('scilab',), ('*.sci', '*.sce', '*.tst'), ('text/scilab',)),
- 'ScssLexer': ('pygments.lexers.css', 'SCSS', ('scss',), ('*.scss',), ('text/x-scss',)),
- 'ShenLexer': ('pygments.lexers.lisp', 'Shen', ('shen',), ('*.shen',), ('text/x-shen', 'application/x-shen')),
- 'SlimLexer': ('pygments.lexers.webmisc', 'Slim', ('slim',), ('*.slim',), ('text/x-slim',)),
- 'SmaliLexer': ('pygments.lexers.dalvik', 'Smali', ('smali',), ('*.smali',), ('text/smali',)),
- 'SmalltalkLexer': ('pygments.lexers.smalltalk', 'Smalltalk', ('smalltalk', 'squeak', 'st'), ('*.st',), ('text/x-smalltalk',)),
- 'SmartyLexer': ('pygments.lexers.templates', 'Smarty', ('smarty',), ('*.tpl',), ('application/x-smarty',)),
- 'SnobolLexer': ('pygments.lexers.snobol', 'Snobol', ('snobol',), ('*.snobol',), ('text/x-snobol',)),
- 'SourcePawnLexer': ('pygments.lexers.pawn', 'SourcePawn', ('sp',), ('*.sp',), ('text/x-sourcepawn',)),
- 'SourcesListLexer': ('pygments.lexers.installers', 'Debian Sourcelist', ('sourceslist', 'sources.list', 'debsources'), ('sources.list',), ()),
- 'SparqlLexer': ('pygments.lexers.rdf', 'SPARQL', ('sparql',), ('*.rq', '*.sparql'), ('application/sparql-query',)),
- 'SqlLexer': ('pygments.lexers.sql', 'SQL', ('sql',), ('*.sql',), ('text/x-sql',)),
- 'SqliteConsoleLexer': ('pygments.lexers.sql', 'sqlite3con', ('sqlite3',), ('*.sqlite3-console',), ('text/x-sqlite3-console',)),
- 'SquidConfLexer': ('pygments.lexers.configs', 'SquidConf', ('squidconf', 'squid.conf', 'squid'), ('squid.conf',), ('text/x-squidconf',)),
- 'SspLexer': ('pygments.lexers.templates', 'Scalate Server Page', ('ssp',), ('*.ssp',), ('application/x-ssp',)),
- 'StanLexer': ('pygments.lexers.modeling', 'Stan', ('stan',), ('*.stan',), ()),
- 'SuperColliderLexer': ('pygments.lexers.supercollider', 'SuperCollider', ('sc', 'supercollider'), ('*.sc', '*.scd'), ('application/supercollider', 'text/supercollider')),
- 'SwiftLexer': ('pygments.lexers.objective', 'Swift', ('swift',), ('*.swift',), ('text/x-swift',)),
- 'SwigLexer': ('pygments.lexers.c_like', 'SWIG', ('swig',), ('*.swg', '*.i'), ('text/swig',)),
- 'SystemVerilogLexer': ('pygments.lexers.hdl', 'systemverilog', ('systemverilog', 'sv'), ('*.sv', '*.svh'), ('text/x-systemverilog',)),
- 'TAPLexer': ('pygments.lexers.testing', 'TAP', ('tap',), ('*.tap',), ()),
- 'Tads3Lexer': ('pygments.lexers.int_fiction', 'TADS 3', ('tads3',), ('*.t',), ()),
- 'TclLexer': ('pygments.lexers.tcl', 'Tcl', ('tcl',), ('*.tcl', '*.rvt'), ('text/x-tcl', 'text/x-script.tcl', 'application/x-tcl')),
- 'TcshLexer': ('pygments.lexers.shell', 'Tcsh', ('tcsh', 'csh'), ('*.tcsh', '*.csh'), ('application/x-csh',)),
- 'TcshSessionLexer': ('pygments.lexers.shell', 'Tcsh Session', ('tcshcon',), (), ()),
- 'TeaTemplateLexer': ('pygments.lexers.templates', 'Tea', ('tea',), ('*.tea',), ('text/x-tea',)),
- 'TerraformLexer': ('pygments.lexers.configs', 'Terraform', ('terraform', 'tf'), ('*.tf',), ('application/x-tf', 'application/x-terraform')),
- 'TexLexer': ('pygments.lexers.markup', 'TeX', ('tex', 'latex'), ('*.tex', '*.aux', '*.toc'), ('text/x-tex', 'text/x-latex')),
- 'TextLexer': ('pygments.lexers.special', 'Text only', ('text',), ('*.txt',), ('text/plain',)),
- 'ThriftLexer': ('pygments.lexers.dsls', 'Thrift', ('thrift',), ('*.thrift',), ('application/x-thrift',)),
- 'TodotxtLexer': ('pygments.lexers.textfmts', 'Todotxt', ('todotxt',), ('todo.txt', '*.todotxt'), ('text/x-todo',)),
- 'TreetopLexer': ('pygments.lexers.parsers', 'Treetop', ('treetop',), ('*.treetop', '*.tt'), ()),
- 'TurtleLexer': ('pygments.lexers.rdf', 'Turtle', ('turtle',), ('*.ttl',), ('text/turtle', 'application/x-turtle')),
- 'TwigHtmlLexer': ('pygments.lexers.templates', 'HTML+Twig', ('html+twig',), ('*.twig',), ('text/html+twig',)),
- 'TwigLexer': ('pygments.lexers.templates', 'Twig', ('twig',), (), ('application/x-twig',)),
- 'TypeScriptLexer': ('pygments.lexers.javascript', 'TypeScript', ('ts',), ('*.ts',), ('text/x-typescript',)),
- 'UrbiscriptLexer': ('pygments.lexers.urbi', 'UrbiScript', ('urbiscript',), ('*.u',), ('application/x-urbiscript',)),
- 'VCTreeStatusLexer': ('pygments.lexers.console', 'VCTreeStatus', ('vctreestatus',), (), ()),
- 'VGLLexer': ('pygments.lexers.dsls', 'VGL', ('vgl',), ('*.rpf',), ()),
- 'ValaLexer': ('pygments.lexers.c_like', 'Vala', ('vala', 'vapi'), ('*.vala', '*.vapi'), ('text/x-vala',)),
- 'VbNetAspxLexer': ('pygments.lexers.dotnet', 'aspx-vb', ('aspx-vb',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
- 'VbNetLexer': ('pygments.lexers.dotnet', 'VB.net', ('vb.net', 'vbnet'), ('*.vb', '*.bas'), ('text/x-vbnet', 'text/x-vba')),
- 'VelocityHtmlLexer': ('pygments.lexers.templates', 'HTML+Velocity', ('html+velocity',), (), ('text/html+velocity',)),
- 'VelocityLexer': ('pygments.lexers.templates', 'Velocity', ('velocity',), ('*.vm', '*.fhtml'), ()),
- 'VelocityXmlLexer': ('pygments.lexers.templates', 'XML+Velocity', ('xml+velocity',), (), ('application/xml+velocity',)),
- 'VerilogLexer': ('pygments.lexers.hdl', 'verilog', ('verilog', 'v'), ('*.v',), ('text/x-verilog',)),
- 'VhdlLexer': ('pygments.lexers.hdl', 'vhdl', ('vhdl',), ('*.vhdl', '*.vhd'), ('text/x-vhdl',)),
- 'VimLexer': ('pygments.lexers.textedit', 'VimL', ('vim',), ('*.vim', '.vimrc', '.exrc', '.gvimrc', '_vimrc', '_exrc', '_gvimrc', 'vimrc', 'gvimrc'), ('text/x-vim',)),
- 'X10Lexer': ('pygments.lexers.x10', 'X10', ('x10', 'xten'), ('*.x10',), ('text/x-x10',)),
- 'XQueryLexer': ('pygments.lexers.webmisc', 'XQuery', ('xquery', 'xqy', 'xq', 'xql', 'xqm'), ('*.xqy', '*.xquery', '*.xq', '*.xql', '*.xqm'), ('text/xquery', 'application/xquery')),
- 'XmlDjangoLexer': ('pygments.lexers.templates', 'XML+Django/Jinja', ('xml+django', 'xml+jinja'), (), ('application/xml+django', 'application/xml+jinja')),
- 'XmlErbLexer': ('pygments.lexers.templates', 'XML+Ruby', ('xml+erb', 'xml+ruby'), (), ('application/xml+ruby',)),
- 'XmlLexer': ('pygments.lexers.html', 'XML', ('xml',), ('*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd', '*.wsdl', '*.wsf'), ('text/xml', 'application/xml', 'image/svg+xml', 'application/rss+xml', 'application/atom+xml')),
- 'XmlPhpLexer': ('pygments.lexers.templates', 'XML+PHP', ('xml+php',), (), ('application/xml+php',)),
- 'XmlSmartyLexer': ('pygments.lexers.templates', 'XML+Smarty', ('xml+smarty',), (), ('application/xml+smarty',)),
- 'XsltLexer': ('pygments.lexers.html', 'XSLT', ('xslt',), ('*.xsl', '*.xslt', '*.xpl'), ('application/xsl+xml', 'application/xslt+xml')),
- 'XtendLexer': ('pygments.lexers.jvm', 'Xtend', ('xtend',), ('*.xtend',), ('text/x-xtend',)),
- 'YamlJinjaLexer': ('pygments.lexers.templates', 'YAML+Jinja', ('yaml+jinja', 'salt', 'sls'), ('*.sls',), ('text/x-yaml+jinja', 'text/x-sls')),
- 'YamlLexer': ('pygments.lexers.data', 'YAML', ('yaml',), ('*.yaml', '*.yml'), ('text/x-yaml',)),
- 'ZephirLexer': ('pygments.lexers.php', 'Zephir', ('zephir',), ('*.zep',), ()),
-}
-
-if __name__ == '__main__': # pragma: no cover
- import sys
- import os
-
- # lookup lexers
- found_lexers = []
- sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
- for root, dirs, files in os.walk('.'):
- for filename in files:
- if filename.endswith('.py') and not filename.startswith('_'):
- module_name = 'pygments.lexers%s.%s' % (
- root[1:].replace('/', '.'), filename[:-3])
- print(module_name)
- module = __import__(module_name, None, None, [''])
- for lexer_name in module.__all__:
- lexer = getattr(module, lexer_name)
- found_lexers.append(
- '%r: %r' % (lexer_name,
- (module_name,
- lexer.name,
- tuple(lexer.aliases),
- tuple(lexer.filenames),
- tuple(lexer.mimetypes))))
- # sort them to make the diff minimal
- found_lexers.sort()
-
- # extract useful sourcecode from this file
- with open(__file__) as fp:
- content = fp.read()
- header = content[:content.find('LEXERS = {')]
- footer = content[content.find("if __name__ == '__main__':"):]
-
- # write new file
- with open(__file__, 'w') as fp:
- fp.write(header)
- fp.write('LEXERS = {\n %s,\n}\n\n' % ',\n '.join(found_lexers))
- fp.write(footer)
-
- print ('=== %d lexers processed.' % len(found_lexers))
+# -*- coding: utf-8 -*-
+"""
+ pygments.lexers._mapping
+ ~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer mapping definitions. This file is generated by itself. Everytime
+ you change something on a builtin lexer definition, run this script from
+ the lexers folder to update it.
+
+ Do not alter the LEXERS dictionary by hand.
+
+ :copyright: Copyright 2006-2014, 2016 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from __future__ import print_function
+
+LEXERS = {
+ 'ABAPLexer': ('pygments.lexers.business', 'ABAP', ('abap',), ('*.abap', '*.ABAP'), ('text/x-abap',)),
+ 'APLLexer': ('pygments.lexers.apl', 'APL', ('apl',), ('*.apl',), ()),
+ 'AbnfLexer': ('pygments.lexers.grammar_notation', 'ABNF', ('abnf',), ('*.abnf',), ('text/x-abnf',)),
+ 'ActionScript3Lexer': ('pygments.lexers.actionscript', 'ActionScript 3', ('as3', 'actionscript3'), ('*.as',), ('application/x-actionscript3', 'text/x-actionscript3', 'text/actionscript3')),
+ 'ActionScriptLexer': ('pygments.lexers.actionscript', 'ActionScript', ('as', 'actionscript'), ('*.as',), ('application/x-actionscript', 'text/x-actionscript', 'text/actionscript')),
+ 'AdaLexer': ('pygments.lexers.pascal', 'Ada', ('ada', 'ada95', 'ada2005'), ('*.adb', '*.ads', '*.ada'), ('text/x-ada',)),
+ 'AdlLexer': ('pygments.lexers.archetype', 'ADL', ('adl',), ('*.adl', '*.adls', '*.adlf', '*.adlx'), ()),
+ 'AgdaLexer': ('pygments.lexers.haskell', 'Agda', ('agda',), ('*.agda',), ('text/x-agda',)),
+ 'AlloyLexer': ('pygments.lexers.dsls', 'Alloy', ('alloy',), ('*.als',), ('text/x-alloy',)),
+ 'AmbientTalkLexer': ('pygments.lexers.ambient', 'AmbientTalk', ('at', 'ambienttalk', 'ambienttalk/2'), ('*.at',), ('text/x-ambienttalk',)),
+ 'AmplLexer': ('pygments.lexers.ampl', 'Ampl', ('ampl',), ('*.run',), ()),
+ 'AntlrActionScriptLexer': ('pygments.lexers.parsers', 'ANTLR With ActionScript Target', ('antlr-as', 'antlr-actionscript'), ('*.G', '*.g'), ()),
+ 'AntlrCSharpLexer': ('pygments.lexers.parsers', 'ANTLR With C# Target', ('antlr-csharp', 'antlr-c#'), ('*.G', '*.g'), ()),
+ 'AntlrCppLexer': ('pygments.lexers.parsers', 'ANTLR With CPP Target', ('antlr-cpp',), ('*.G', '*.g'), ()),
+ 'AntlrJavaLexer': ('pygments.lexers.parsers', 'ANTLR With Java Target', ('antlr-java',), ('*.G', '*.g'), ()),
+ 'AntlrLexer': ('pygments.lexers.parsers', 'ANTLR', ('antlr',), (), ()),
+ 'AntlrObjectiveCLexer': ('pygments.lexers.parsers', 'ANTLR With ObjectiveC Target', ('antlr-objc',), ('*.G', '*.g'), ()),
+ 'AntlrPerlLexer': ('pygments.lexers.parsers', 'ANTLR With Perl Target', ('antlr-perl',), ('*.G', '*.g'), ()),
+ 'AntlrPythonLexer': ('pygments.lexers.parsers', 'ANTLR With Python Target', ('antlr-python',), ('*.G', '*.g'), ()),
+ 'AntlrRubyLexer': ('pygments.lexers.parsers', 'ANTLR With Ruby Target', ('antlr-ruby', 'antlr-rb'), ('*.G', '*.g'), ()),
+ 'ApacheConfLexer': ('pygments.lexers.configs', 'ApacheConf', ('apacheconf', 'aconf', 'apache'), ('.htaccess', 'apache.conf', 'apache2.conf'), ('text/x-apacheconf',)),
+ 'AppleScriptLexer': ('pygments.lexers.scripting', 'AppleScript', ('applescript',), ('*.applescript',), ()),
+ 'ArduinoLexer': ('pygments.lexers.c_like', 'Arduino', ('arduino',), ('*.ino',), ('text/x-arduino',)),
+ 'AspectJLexer': ('pygments.lexers.jvm', 'AspectJ', ('aspectj',), ('*.aj',), ('text/x-aspectj',)),
+ 'AsymptoteLexer': ('pygments.lexers.graphics', 'Asymptote', ('asy', 'asymptote'), ('*.asy',), ('text/x-asymptote',)),
+ 'AutoItLexer': ('pygments.lexers.automation', 'AutoIt', ('autoit',), ('*.au3',), ('text/x-autoit',)),
+ 'AutohotkeyLexer': ('pygments.lexers.automation', 'autohotkey', ('ahk', 'autohotkey'), ('*.ahk', '*.ahkl'), ('text/x-autohotkey',)),
+ 'AwkLexer': ('pygments.lexers.textedit', 'Awk', ('awk', 'gawk', 'mawk', 'nawk'), ('*.awk',), ('application/x-awk',)),
+ 'BBCodeLexer': ('pygments.lexers.markup', 'BBCode', ('bbcode',), (), ('text/x-bbcode',)),
+ 'BCLexer': ('pygments.lexers.algebra', 'BC', ('bc',), ('*.bc',), ()),
+ 'BaseMakefileLexer': ('pygments.lexers.make', 'Base Makefile', ('basemake',), (), ()),
+ 'BashLexer': ('pygments.lexers.shell', 'Bash', ('bash', 'sh', 'ksh', 'shell'), ('*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass', '*.exheres-0', '*.exlib', '.bashrc', 'bashrc', '.bash_*', 'bash_*', 'PKGBUILD'), ('application/x-sh', 'application/x-shellscript')),
+ 'BashSessionLexer': ('pygments.lexers.shell', 'Bash Session', ('console', 'shell-session'), ('*.sh-session', '*.shell-session'), ('application/x-shell-session', 'application/x-sh-session')),
+ 'BatchLexer': ('pygments.lexers.shell', 'Batchfile', ('bat', 'batch', 'dosbatch', 'winbatch'), ('*.bat', '*.cmd'), ('application/x-dos-batch',)),
+ 'BefungeLexer': ('pygments.lexers.esoteric', 'Befunge', ('befunge',), ('*.befunge',), ('application/x-befunge',)),
+ 'BlitzBasicLexer': ('pygments.lexers.basic', 'BlitzBasic', ('blitzbasic', 'b3d', 'bplus'), ('*.bb', '*.decls'), ('text/x-bb',)),
+ 'BlitzMaxLexer': ('pygments.lexers.basic', 'BlitzMax', ('blitzmax', 'bmax'), ('*.bmx',), ('text/x-bmx',)),
+ 'BnfLexer': ('pygments.lexers.grammar_notation', 'BNF', ('bnf',), ('*.bnf',), ('text/x-bnf',)),
+ 'BooLexer': ('pygments.lexers.dotnet', 'Boo', ('boo',), ('*.boo',), ('text/x-boo',)),
+ 'BoogieLexer': ('pygments.lexers.verification', 'Boogie', ('boogie',), ('*.bpl',), ()),
+ 'BrainfuckLexer': ('pygments.lexers.esoteric', 'Brainfuck', ('brainfuck', 'bf'), ('*.bf', '*.b'), ('application/x-brainfuck',)),
+ 'BroLexer': ('pygments.lexers.dsls', 'Bro', ('bro',), ('*.bro',), ()),
+ 'BugsLexer': ('pygments.lexers.modeling', 'BUGS', ('bugs', 'winbugs', 'openbugs'), ('*.bug',), ()),
+ 'CAmkESLexer': ('pygments.lexers.esoteric', 'CAmkES', ('camkes', 'idl4'), ('*.camkes', '*.idl4'), ()),
+ 'CLexer': ('pygments.lexers.c_cpp', 'C', ('c',), ('*.c', '*.h', '*.idc'), ('text/x-chdr', 'text/x-csrc')),
+ 'CMakeLexer': ('pygments.lexers.make', 'CMake', ('cmake',), ('*.cmake', 'CMakeLists.txt'), ('text/x-cmake',)),
+ 'CObjdumpLexer': ('pygments.lexers.asm', 'c-objdump', ('c-objdump',), ('*.c-objdump',), ('text/x-c-objdump',)),
+ 'CPSALexer': ('pygments.lexers.lisp', 'CPSA', ('cpsa',), ('*.cpsa',), ()),
+ 'CSharpAspxLexer': ('pygments.lexers.dotnet', 'aspx-cs', ('aspx-cs',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
+ 'CSharpLexer': ('pygments.lexers.dotnet', 'C#', ('csharp', 'c#'), ('*.cs',), ('text/x-csharp',)),
+ 'Ca65Lexer': ('pygments.lexers.asm', 'ca65 assembler', ('ca65',), ('*.s',), ()),
+ 'CadlLexer': ('pygments.lexers.archetype', 'cADL', ('cadl',), ('*.cadl',), ()),
+ 'CbmBasicV2Lexer': ('pygments.lexers.basic', 'CBM BASIC V2', ('cbmbas',), ('*.bas',), ()),
+ 'CeylonLexer': ('pygments.lexers.jvm', 'Ceylon', ('ceylon',), ('*.ceylon',), ('text/x-ceylon',)),
+ 'Cfengine3Lexer': ('pygments.lexers.configs', 'CFEngine3', ('cfengine3', 'cf3'), ('*.cf',), ()),
+ 'ChaiscriptLexer': ('pygments.lexers.scripting', 'ChaiScript', ('chai', 'chaiscript'), ('*.chai',), ('text/x-chaiscript', 'application/x-chaiscript')),
+ 'ChapelLexer': ('pygments.lexers.chapel', 'Chapel', ('chapel', 'chpl'), ('*.chpl',), ()),
+ 'CheetahHtmlLexer': ('pygments.lexers.templates', 'HTML+Cheetah', ('html+cheetah', 'html+spitfire', 'htmlcheetah'), (), ('text/html+cheetah', 'text/html+spitfire')),
+ 'CheetahJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Cheetah', ('js+cheetah', 'javascript+cheetah', 'js+spitfire', 'javascript+spitfire'), (), ('application/x-javascript+cheetah', 'text/x-javascript+cheetah', 'text/javascript+cheetah', 'application/x-javascript+spitfire', 'text/x-javascript+spitfire', 'text/javascript+spitfire')),
+ 'CheetahLexer': ('pygments.lexers.templates', 'Cheetah', ('cheetah', 'spitfire'), ('*.tmpl', '*.spt'), ('application/x-cheetah', 'application/x-spitfire')),
+ 'CheetahXmlLexer': ('pygments.lexers.templates', 'XML+Cheetah', ('xml+cheetah', 'xml+spitfire'), (), ('application/xml+cheetah', 'application/xml+spitfire')),
+ 'CirruLexer': ('pygments.lexers.webmisc', 'Cirru', ('cirru',), ('*.cirru',), ('text/x-cirru',)),
+ 'ClayLexer': ('pygments.lexers.c_like', 'Clay', ('clay',), ('*.clay',), ('text/x-clay',)),
+ 'CleanLexer': ('pygments.lexers.clean', 'Clean', ('clean',), ('*.icl', '*.dcl'), ()),
+ 'ClojureLexer': ('pygments.lexers.jvm', 'Clojure', ('clojure', 'clj'), ('*.clj',), ('text/x-clojure', 'application/x-clojure')),
+ 'ClojureScriptLexer': ('pygments.lexers.jvm', 'ClojureScript', ('clojurescript', 'cljs'), ('*.cljs',), ('text/x-clojurescript', 'application/x-clojurescript')),
+ 'CobolFreeformatLexer': ('pygments.lexers.business', 'COBOLFree', ('cobolfree',), ('*.cbl', '*.CBL'), ()),
+ 'CobolLexer': ('pygments.lexers.business', 'COBOL', ('cobol',), ('*.cob', '*.COB', '*.cpy', '*.CPY'), ('text/x-cobol',)),
+ 'CoffeeScriptLexer': ('pygments.lexers.javascript', 'CoffeeScript', ('coffee-script', 'coffeescript', 'coffee'), ('*.coffee',), ('text/coffeescript',)),
+ 'ColdfusionCFCLexer': ('pygments.lexers.templates', 'Coldfusion CFC', ('cfc',), ('*.cfc',), ()),
+ 'ColdfusionHtmlLexer': ('pygments.lexers.templates', 'Coldfusion HTML', ('cfm',), ('*.cfm', '*.cfml'), ('application/x-coldfusion',)),
+ 'ColdfusionLexer': ('pygments.lexers.templates', 'cfstatement', ('cfs',), (), ()),
+ 'CommonLispLexer': ('pygments.lexers.lisp', 'Common Lisp', ('common-lisp', 'cl', 'lisp'), ('*.cl', '*.lisp'), ('text/x-common-lisp',)),
+ 'ComponentPascalLexer': ('pygments.lexers.oberon', 'Component Pascal', ('componentpascal', 'cp'), ('*.cp', '*.cps'), ('text/x-component-pascal',)),
+ 'CoqLexer': ('pygments.lexers.theorem', 'Coq', ('coq',), ('*.v',), ('text/x-coq',)),
+ 'CppLexer': ('pygments.lexers.c_cpp', 'C++', ('cpp', 'c++'), ('*.cpp', '*.hpp', '*.c++', '*.h++', '*.cc', '*.hh', '*.cxx', '*.hxx', '*.C', '*.H', '*.cp', '*.CPP'), ('text/x-c++hdr', 'text/x-c++src')),
+ 'CppObjdumpLexer': ('pygments.lexers.asm', 'cpp-objdump', ('cpp-objdump', 'c++-objdumb', 'cxx-objdump'), ('*.cpp-objdump', '*.c++-objdump', '*.cxx-objdump'), ('text/x-cpp-objdump',)),
+ 'CrmshLexer': ('pygments.lexers.dsls', 'Crmsh', ('crmsh', 'pcmk'), ('*.crmsh', '*.pcmk'), ()),
+ 'CrocLexer': ('pygments.lexers.d', 'Croc', ('croc',), ('*.croc',), ('text/x-crocsrc',)),
+ 'CryptolLexer': ('pygments.lexers.haskell', 'Cryptol', ('cryptol', 'cry'), ('*.cry',), ('text/x-cryptol',)),
+ 'CsoundDocumentLexer': ('pygments.lexers.csound', 'Csound Document', ('csound-document', 'csound-csd'), ('*.csd',), ()),
+ 'CsoundOrchestraLexer': ('pygments.lexers.csound', 'Csound Orchestra', ('csound', 'csound-orc'), ('*.orc',), ()),
+ 'CsoundScoreLexer': ('pygments.lexers.csound', 'Csound Score', ('csound-score', 'csound-sco'), ('*.sco',), ()),
+ 'CssDjangoLexer': ('pygments.lexers.templates', 'CSS+Django/Jinja', ('css+django', 'css+jinja'), (), ('text/css+django', 'text/css+jinja')),
+ 'CssErbLexer': ('pygments.lexers.templates', 'CSS+Ruby', ('css+erb', 'css+ruby'), (), ('text/css+ruby',)),
+ 'CssGenshiLexer': ('pygments.lexers.templates', 'CSS+Genshi Text', ('css+genshitext', 'css+genshi'), (), ('text/css+genshi',)),
+ 'CssLexer': ('pygments.lexers.css', 'CSS', ('css',), ('*.css',), ('text/css',)),
+ 'CssPhpLexer': ('pygments.lexers.templates', 'CSS+PHP', ('css+php',), (), ('text/css+php',)),
+ 'CssSmartyLexer': ('pygments.lexers.templates', 'CSS+Smarty', ('css+smarty',), (), ('text/css+smarty',)),
+ 'CudaLexer': ('pygments.lexers.c_like', 'CUDA', ('cuda', 'cu'), ('*.cu', '*.cuh'), ('text/x-cuda',)),
+ 'CypherLexer': ('pygments.lexers.graph', 'Cypher', ('cypher',), ('*.cyp', '*.cypher'), ()),
+ 'CythonLexer': ('pygments.lexers.python', 'Cython', ('cython', 'pyx', 'pyrex'), ('*.pyx', '*.pxd', '*.pxi'), ('text/x-cython', 'application/x-cython')),
+ 'DLexer': ('pygments.lexers.d', 'D', ('d',), ('*.d', '*.di'), ('text/x-dsrc',)),
+ 'DObjdumpLexer': ('pygments.lexers.asm', 'd-objdump', ('d-objdump',), ('*.d-objdump',), ('text/x-d-objdump',)),
+ 'DarcsPatchLexer': ('pygments.lexers.diff', 'Darcs Patch', ('dpatch',), ('*.dpatch', '*.darcspatch'), ()),
+ 'DartLexer': ('pygments.lexers.javascript', 'Dart', ('dart',), ('*.dart',), ('text/x-dart',)),
+ 'DebianControlLexer': ('pygments.lexers.installers', 'Debian Control file', ('control', 'debcontrol'), ('control',), ()),
+ 'DelphiLexer': ('pygments.lexers.pascal', 'Delphi', ('delphi', 'pas', 'pascal', 'objectpascal'), ('*.pas',), ('text/x-pascal',)),
+ 'DgLexer': ('pygments.lexers.python', 'dg', ('dg',), ('*.dg',), ('text/x-dg',)),
+ 'DiffLexer': ('pygments.lexers.diff', 'Diff', ('diff', 'udiff'), ('*.diff', '*.patch'), ('text/x-diff', 'text/x-patch')),
+ 'DjangoLexer': ('pygments.lexers.templates', 'Django/Jinja', ('django', 'jinja'), (), ('application/x-django-templating', 'application/x-jinja')),
+ 'DockerLexer': ('pygments.lexers.configs', 'Docker', ('docker', 'dockerfile'), ('Dockerfile', '*.docker'), ('text/x-dockerfile-config',)),
+ 'DtdLexer': ('pygments.lexers.html', 'DTD', ('dtd',), ('*.dtd',), ('application/xml-dtd',)),
+ 'DuelLexer': ('pygments.lexers.webmisc', 'Duel', ('duel', 'jbst', 'jsonml+bst'), ('*.duel', '*.jbst'), ('text/x-duel', 'text/x-jbst')),
+ 'DylanConsoleLexer': ('pygments.lexers.dylan', 'Dylan session', ('dylan-console', 'dylan-repl'), ('*.dylan-console',), ('text/x-dylan-console',)),
+ 'DylanLexer': ('pygments.lexers.dylan', 'Dylan', ('dylan',), ('*.dylan', '*.dyl', '*.intr'), ('text/x-dylan',)),
+ 'DylanLidLexer': ('pygments.lexers.dylan', 'DylanLID', ('dylan-lid', 'lid'), ('*.lid', '*.hdp'), ('text/x-dylan-lid',)),
+ 'ECLLexer': ('pygments.lexers.ecl', 'ECL', ('ecl',), ('*.ecl',), ('application/x-ecl',)),
+ 'ECLexer': ('pygments.lexers.c_like', 'eC', ('ec',), ('*.ec', '*.eh'), ('text/x-echdr', 'text/x-ecsrc')),
+ 'EarlGreyLexer': ('pygments.lexers.javascript', 'Earl Grey', ('earl-grey', 'earlgrey', 'eg'), ('*.eg',), ('text/x-earl-grey',)),
+ 'EasytrieveLexer': ('pygments.lexers.scripting', 'Easytrieve', ('easytrieve',), ('*.ezt', '*.mac'), ('text/x-easytrieve',)),
+ 'EbnfLexer': ('pygments.lexers.parsers', 'EBNF', ('ebnf',), ('*.ebnf',), ('text/x-ebnf',)),
+ 'EiffelLexer': ('pygments.lexers.eiffel', 'Eiffel', ('eiffel',), ('*.e',), ('text/x-eiffel',)),
+ 'ElixirConsoleLexer': ('pygments.lexers.erlang', 'Elixir iex session', ('iex',), (), ('text/x-elixir-shellsession',)),
+ 'ElixirLexer': ('pygments.lexers.erlang', 'Elixir', ('elixir', 'ex', 'exs'), ('*.ex', '*.exs'), ('text/x-elixir',)),
+ 'ElmLexer': ('pygments.lexers.elm', 'Elm', ('elm',), ('*.elm',), ('text/x-elm',)),
+ 'EmacsLispLexer': ('pygments.lexers.lisp', 'EmacsLisp', ('emacs', 'elisp', 'emacs-lisp'), ('*.el',), ('text/x-elisp', 'application/x-elisp')),
+ 'ErbLexer': ('pygments.lexers.templates', 'ERB', ('erb',), (), ('application/x-ruby-templating',)),
+ 'ErlangLexer': ('pygments.lexers.erlang', 'Erlang', ('erlang',), ('*.erl', '*.hrl', '*.es', '*.escript'), ('text/x-erlang',)),
+ 'ErlangShellLexer': ('pygments.lexers.erlang', 'Erlang erl session', ('erl',), ('*.erl-sh',), ('text/x-erl-shellsession',)),
+ 'EvoqueHtmlLexer': ('pygments.lexers.templates', 'HTML+Evoque', ('html+evoque',), ('*.html',), ('text/html+evoque',)),
+ 'EvoqueLexer': ('pygments.lexers.templates', 'Evoque', ('evoque',), ('*.evoque',), ('application/x-evoque',)),
+ 'EvoqueXmlLexer': ('pygments.lexers.templates', 'XML+Evoque', ('xml+evoque',), ('*.xml',), ('application/xml+evoque',)),
+ 'EzhilLexer': ('pygments.lexers.ezhil', 'Ezhil', ('ezhil',), ('*.n',), ('text/x-ezhil',)),
+ 'FSharpLexer': ('pygments.lexers.dotnet', 'FSharp', ('fsharp',), ('*.fs', '*.fsi'), ('text/x-fsharp',)),
+ 'FactorLexer': ('pygments.lexers.factor', 'Factor', ('factor',), ('*.factor',), ('text/x-factor',)),
+ 'FancyLexer': ('pygments.lexers.ruby', 'Fancy', ('fancy', 'fy'), ('*.fy', '*.fancypack'), ('text/x-fancysrc',)),
+ 'FantomLexer': ('pygments.lexers.fantom', 'Fantom', ('fan',), ('*.fan',), ('application/x-fantom',)),
+ 'FelixLexer': ('pygments.lexers.felix', 'Felix', ('felix', 'flx'), ('*.flx', '*.flxh'), ('text/x-felix',)),
+ 'FishShellLexer': ('pygments.lexers.shell', 'Fish', ('fish', 'fishshell'), ('*.fish', '*.load'), ('application/x-fish',)),
+ 'FlatlineLexer': ('pygments.lexers.dsls', 'Flatline', ('flatline',), (), ('text/x-flatline',)),
+ 'FortranFixedLexer': ('pygments.lexers.fortran', 'FortranFixed', ('fortranfixed',), ('*.f', '*.F'), ()),
+ 'FortranLexer': ('pygments.lexers.fortran', 'Fortran', ('fortran',), ('*.f03', '*.f90', '*.F03', '*.F90'), ('text/x-fortran',)),
+ 'FoxProLexer': ('pygments.lexers.foxpro', 'FoxPro', ('foxpro', 'vfp', 'clipper', 'xbase'), ('*.PRG', '*.prg'), ()),
+ 'GAPLexer': ('pygments.lexers.algebra', 'GAP', ('gap',), ('*.g', '*.gd', '*.gi', '*.gap'), ()),
+ 'GLShaderLexer': ('pygments.lexers.graphics', 'GLSL', ('glsl',), ('*.vert', '*.frag', '*.geo'), ('text/x-glslsrc',)),
+ 'GasLexer': ('pygments.lexers.asm', 'GAS', ('gas', 'asm'), ('*.s', '*.S'), ('text/x-gas',)),
+ 'GenshiLexer': ('pygments.lexers.templates', 'Genshi', ('genshi', 'kid', 'xml+genshi', 'xml+kid'), ('*.kid',), ('application/x-genshi', 'application/x-kid')),
+ 'GenshiTextLexer': ('pygments.lexers.templates', 'Genshi Text', ('genshitext',), (), ('application/x-genshi-text', 'text/x-genshi')),
+ 'GettextLexer': ('pygments.lexers.textfmts', 'Gettext Catalog', ('pot', 'po'), ('*.pot', '*.po'), ('application/x-gettext', 'text/x-gettext', 'text/gettext')),
+ 'GherkinLexer': ('pygments.lexers.testing', 'Gherkin', ('cucumber', 'gherkin'), ('*.feature',), ('text/x-gherkin',)),
+ 'GnuplotLexer': ('pygments.lexers.graphics', 'Gnuplot', ('gnuplot',), ('*.plot', '*.plt'), ('text/x-gnuplot',)),
+ 'GoLexer': ('pygments.lexers.go', 'Go', ('go',), ('*.go',), ('text/x-gosrc',)),
+ 'GoloLexer': ('pygments.lexers.jvm', 'Golo', ('golo',), ('*.golo',), ()),
+ 'GoodDataCLLexer': ('pygments.lexers.business', 'GoodData-CL', ('gooddata-cl',), ('*.gdc',), ('text/x-gooddata-cl',)),
+ 'GosuLexer': ('pygments.lexers.jvm', 'Gosu', ('gosu',), ('*.gs', '*.gsx', '*.gsp', '*.vark'), ('text/x-gosu',)),
+ 'GosuTemplateLexer': ('pygments.lexers.jvm', 'Gosu Template', ('gst',), ('*.gst',), ('text/x-gosu-template',)),
+ 'GroffLexer': ('pygments.lexers.markup', 'Groff', ('groff', 'nroff', 'man'), ('*.[1234567]', '*.man'), ('application/x-troff', 'text/troff')),
+ 'GroovyLexer': ('pygments.lexers.jvm', 'Groovy', ('groovy',), ('*.groovy', '*.gradle'), ('text/x-groovy',)),
+ 'HamlLexer': ('pygments.lexers.html', 'Haml', ('haml',), ('*.haml',), ('text/x-haml',)),
+ 'HandlebarsHtmlLexer': ('pygments.lexers.templates', 'HTML+Handlebars', ('html+handlebars',), ('*.handlebars', '*.hbs'), ('text/html+handlebars', 'text/x-handlebars-template')),
+ 'HandlebarsLexer': ('pygments.lexers.templates', 'Handlebars', ('handlebars',), (), ()),
+ 'HaskellLexer': ('pygments.lexers.haskell', 'Haskell', ('haskell', 'hs'), ('*.hs',), ('text/x-haskell',)),
+ 'HaxeLexer': ('pygments.lexers.haxe', 'Haxe', ('hx', 'haxe', 'hxsl'), ('*.hx', '*.hxsl'), ('text/haxe', 'text/x-haxe', 'text/x-hx')),
+ 'HexdumpLexer': ('pygments.lexers.hexdump', 'Hexdump', ('hexdump',), (), ()),
+ 'HsailLexer': ('pygments.lexers.asm', 'HSAIL', ('hsail', 'hsa'), ('*.hsail',), ('text/x-hsail',)),
+ 'HtmlDjangoLexer': ('pygments.lexers.templates', 'HTML+Django/Jinja', ('html+django', 'html+jinja', 'htmldjango'), (), ('text/html+django', 'text/html+jinja')),
+ 'HtmlGenshiLexer': ('pygments.lexers.templates', 'HTML+Genshi', ('html+genshi', 'html+kid'), (), ('text/html+genshi',)),
+ 'HtmlLexer': ('pygments.lexers.html', 'HTML', ('html',), ('*.html', '*.htm', '*.xhtml', '*.xslt'), ('text/html', 'application/xhtml+xml')),
+ 'HtmlPhpLexer': ('pygments.lexers.templates', 'HTML+PHP', ('html+php',), ('*.phtml',), ('application/x-php', 'application/x-httpd-php', 'application/x-httpd-php3', 'application/x-httpd-php4', 'application/x-httpd-php5')),
+ 'HtmlSmartyLexer': ('pygments.lexers.templates', 'HTML+Smarty', ('html+smarty',), (), ('text/html+smarty',)),
+ 'HttpLexer': ('pygments.lexers.textfmts', 'HTTP', ('http',), (), ()),
+ 'HxmlLexer': ('pygments.lexers.haxe', 'Hxml', ('haxeml', 'hxml'), ('*.hxml',), ()),
+ 'HyLexer': ('pygments.lexers.lisp', 'Hy', ('hylang',), ('*.hy',), ('text/x-hy', 'application/x-hy')),
+ 'HybrisLexer': ('pygments.lexers.scripting', 'Hybris', ('hybris', 'hy'), ('*.hy', '*.hyb'), ('text/x-hybris', 'application/x-hybris')),
+ 'IDLLexer': ('pygments.lexers.idl', 'IDL', ('idl',), ('*.pro',), ('text/idl',)),
+ 'IdrisLexer': ('pygments.lexers.haskell', 'Idris', ('idris', 'idr'), ('*.idr',), ('text/x-idris',)),
+ 'IgorLexer': ('pygments.lexers.igor', 'Igor', ('igor', 'igorpro'), ('*.ipf',), ('text/ipf',)),
+ 'Inform6Lexer': ('pygments.lexers.int_fiction', 'Inform 6', ('inform6', 'i6'), ('*.inf',), ()),
+ 'Inform6TemplateLexer': ('pygments.lexers.int_fiction', 'Inform 6 template', ('i6t',), ('*.i6t',), ()),
+ 'Inform7Lexer': ('pygments.lexers.int_fiction', 'Inform 7', ('inform7', 'i7'), ('*.ni', '*.i7x'), ()),
+ 'IniLexer': ('pygments.lexers.configs', 'INI', ('ini', 'cfg', 'dosini'), ('*.ini', '*.cfg', '*.inf'), ('text/x-ini', 'text/inf')),
+ 'IoLexer': ('pygments.lexers.iolang', 'Io', ('io',), ('*.io',), ('text/x-iosrc',)),
+ 'IokeLexer': ('pygments.lexers.jvm', 'Ioke', ('ioke', 'ik'), ('*.ik',), ('text/x-iokesrc',)),
+ 'IrcLogsLexer': ('pygments.lexers.textfmts', 'IRC logs', ('irc',), ('*.weechatlog',), ('text/x-irclog',)),
+ 'IsabelleLexer': ('pygments.lexers.theorem', 'Isabelle', ('isabelle',), ('*.thy',), ('text/x-isabelle',)),
+ 'JLexer': ('pygments.lexers.j', 'J', ('j',), ('*.ijs',), ('text/x-j',)),
+ 'JadeLexer': ('pygments.lexers.html', 'Jade', ('jade',), ('*.jade',), ('text/x-jade',)),
+ 'JagsLexer': ('pygments.lexers.modeling', 'JAGS', ('jags',), ('*.jag', '*.bug'), ()),
+ 'JasminLexer': ('pygments.lexers.jvm', 'Jasmin', ('jasmin', 'jasminxt'), ('*.j',), ()),
+ 'JavaLexer': ('pygments.lexers.jvm', 'Java', ('java',), ('*.java',), ('text/x-java',)),
+ 'JavascriptDjangoLexer': ('pygments.lexers.templates', 'JavaScript+Django/Jinja', ('js+django', 'javascript+django', 'js+jinja', 'javascript+jinja'), (), ('application/x-javascript+django', 'application/x-javascript+jinja', 'text/x-javascript+django', 'text/x-javascript+jinja', 'text/javascript+django', 'text/javascript+jinja')),
+ 'JavascriptErbLexer': ('pygments.lexers.templates', 'JavaScript+Ruby', ('js+erb', 'javascript+erb', 'js+ruby', 'javascript+ruby'), (), ('application/x-javascript+ruby', 'text/x-javascript+ruby', 'text/javascript+ruby')),
+ 'JavascriptGenshiLexer': ('pygments.lexers.templates', 'JavaScript+Genshi Text', ('js+genshitext', 'js+genshi', 'javascript+genshitext', 'javascript+genshi'), (), ('application/x-javascript+genshi', 'text/x-javascript+genshi', 'text/javascript+genshi')),
+ 'JavascriptLexer': ('pygments.lexers.javascript', 'JavaScript', ('js', 'javascript'), ('*.js', '*.jsm'), ('application/javascript', 'application/x-javascript', 'text/x-javascript', 'text/javascript')),
+ 'JavascriptPhpLexer': ('pygments.lexers.templates', 'JavaScript+PHP', ('js+php', 'javascript+php'), (), ('application/x-javascript+php', 'text/x-javascript+php', 'text/javascript+php')),
+ 'JavascriptSmartyLexer': ('pygments.lexers.templates', 'JavaScript+Smarty', ('js+smarty', 'javascript+smarty'), (), ('application/x-javascript+smarty', 'text/x-javascript+smarty', 'text/javascript+smarty')),
+ 'JclLexer': ('pygments.lexers.scripting', 'JCL', ('jcl',), ('*.jcl',), ('text/x-jcl',)),
+ 'JsonLdLexer': ('pygments.lexers.data', 'JSON-LD', ('jsonld', 'json-ld'), ('*.jsonld',), ('application/ld+json',)),
+ 'JsonLexer': ('pygments.lexers.data', 'JSON', ('json',), ('*.json',), ('application/json',)),
+ 'JspLexer': ('pygments.lexers.templates', 'Java Server Page', ('jsp',), ('*.jsp',), ('application/x-jsp',)),
+ 'JuliaConsoleLexer': ('pygments.lexers.julia', 'Julia console', ('jlcon',), (), ()),
+ 'JuliaLexer': ('pygments.lexers.julia', 'Julia', ('julia', 'jl'), ('*.jl',), ('text/x-julia', 'application/x-julia')),
+ 'KalLexer': ('pygments.lexers.javascript', 'Kal', ('kal',), ('*.kal',), ('text/kal', 'application/kal')),
+ 'KconfigLexer': ('pygments.lexers.configs', 'Kconfig', ('kconfig', 'menuconfig', 'linux-config', 'kernel-config'), ('Kconfig', '*Config.in*', 'external.in*', 'standard-modules.in'), ('text/x-kconfig',)),
+ 'KokaLexer': ('pygments.lexers.haskell', 'Koka', ('koka',), ('*.kk', '*.kki'), ('text/x-koka',)),
+ 'KotlinLexer': ('pygments.lexers.jvm', 'Kotlin', ('kotlin',), ('*.kt',), ('text/x-kotlin',)),
+ 'LSLLexer': ('pygments.lexers.scripting', 'LSL', ('lsl',), ('*.lsl',), ('text/x-lsl',)),
+ 'LassoCssLexer': ('pygments.lexers.templates', 'CSS+Lasso', ('css+lasso',), (), ('text/css+lasso',)),
+ 'LassoHtmlLexer': ('pygments.lexers.templates', 'HTML+Lasso', ('html+lasso',), (), ('text/html+lasso', 'application/x-httpd-lasso', 'application/x-httpd-lasso[89]')),
+ 'LassoJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Lasso', ('js+lasso', 'javascript+lasso'), (), ('application/x-javascript+lasso', 'text/x-javascript+lasso', 'text/javascript+lasso')),
+ 'LassoLexer': ('pygments.lexers.javascript', 'Lasso', ('lasso', 'lassoscript'), ('*.lasso', '*.lasso[89]'), ('text/x-lasso',)),
+ 'LassoXmlLexer': ('pygments.lexers.templates', 'XML+Lasso', ('xml+lasso',), (), ('application/xml+lasso',)),
+ 'LeanLexer': ('pygments.lexers.theorem', 'Lean', ('lean',), ('*.lean',), ('text/x-lean',)),
+ 'LessCssLexer': ('pygments.lexers.css', 'LessCss', ('less',), ('*.less',), ('text/x-less-css',)),
+ 'LighttpdConfLexer': ('pygments.lexers.configs', 'Lighttpd configuration file', ('lighty', 'lighttpd'), (), ('text/x-lighttpd-conf',)),
+ 'LimboLexer': ('pygments.lexers.inferno', 'Limbo', ('limbo',), ('*.b',), ('text/limbo',)),
+ 'LiquidLexer': ('pygments.lexers.templates', 'liquid', ('liquid',), ('*.liquid',), ()),
+ 'LiterateAgdaLexer': ('pygments.lexers.haskell', 'Literate Agda', ('lagda', 'literate-agda'), ('*.lagda',), ('text/x-literate-agda',)),
+ 'LiterateCryptolLexer': ('pygments.lexers.haskell', 'Literate Cryptol', ('lcry', 'literate-cryptol', 'lcryptol'), ('*.lcry',), ('text/x-literate-cryptol',)),
+ 'LiterateHaskellLexer': ('pygments.lexers.haskell', 'Literate Haskell', ('lhs', 'literate-haskell', 'lhaskell'), ('*.lhs',), ('text/x-literate-haskell',)),
+ 'LiterateIdrisLexer': ('pygments.lexers.haskell', 'Literate Idris', ('lidr', 'literate-idris', 'lidris'), ('*.lidr',), ('text/x-literate-idris',)),
+ 'LiveScriptLexer': ('pygments.lexers.javascript', 'LiveScript', ('live-script', 'livescript'), ('*.ls',), ('text/livescript',)),
+ 'LlvmLexer': ('pygments.lexers.asm', 'LLVM', ('llvm',), ('*.ll',), ('text/x-llvm',)),
+ 'LogosLexer': ('pygments.lexers.objective', 'Logos', ('logos',), ('*.x', '*.xi', '*.xm', '*.xmi'), ('text/x-logos',)),
+ 'LogtalkLexer': ('pygments.lexers.prolog', 'Logtalk', ('logtalk',), ('*.lgt', '*.logtalk'), ('text/x-logtalk',)),
+ 'LuaLexer': ('pygments.lexers.scripting', 'Lua', ('lua',), ('*.lua', '*.wlua'), ('text/x-lua', 'application/x-lua')),
+ 'MOOCodeLexer': ('pygments.lexers.scripting', 'MOOCode', ('moocode', 'moo'), ('*.moo',), ('text/x-moocode',)),
+ 'MSDOSSessionLexer': ('pygments.lexers.shell', 'MSDOS Session', ('doscon',), (), ()),
+ 'MakefileLexer': ('pygments.lexers.make', 'Makefile', ('make', 'makefile', 'mf', 'bsdmake'), ('*.mak', '*.mk', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile'), ('text/x-makefile',)),
+ 'MakoCssLexer': ('pygments.lexers.templates', 'CSS+Mako', ('css+mako',), (), ('text/css+mako',)),
+ 'MakoHtmlLexer': ('pygments.lexers.templates', 'HTML+Mako', ('html+mako',), (), ('text/html+mako',)),
+ 'MakoJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Mako', ('js+mako', 'javascript+mako'), (), ('application/x-javascript+mako', 'text/x-javascript+mako', 'text/javascript+mako')),
+ 'MakoLexer': ('pygments.lexers.templates', 'Mako', ('mako',), ('*.mao',), ('application/x-mako',)),
+ 'MakoXmlLexer': ('pygments.lexers.templates', 'XML+Mako', ('xml+mako',), (), ('application/xml+mako',)),
+ 'MaqlLexer': ('pygments.lexers.business', 'MAQL', ('maql',), ('*.maql',), ('text/x-gooddata-maql', 'application/x-gooddata-maql')),
+ 'MaskLexer': ('pygments.lexers.javascript', 'Mask', ('mask',), ('*.mask',), ('text/x-mask',)),
+ 'MasonLexer': ('pygments.lexers.templates', 'Mason', ('mason',), ('*.m', '*.mhtml', '*.mc', '*.mi', 'autohandler', 'dhandler'), ('application/x-mason',)),
+ 'MathematicaLexer': ('pygments.lexers.algebra', 'Mathematica', ('mathematica', 'mma', 'nb'), ('*.nb', '*.cdf', '*.nbp', '*.ma'), ('application/mathematica', 'application/vnd.wolfram.mathematica', 'application/vnd.wolfram.mathematica.package', 'application/vnd.wolfram.cdf')),
+ 'MatlabLexer': ('pygments.lexers.matlab', 'Matlab', ('matlab',), ('*.m',), ('text/matlab',)),
+ 'MatlabSessionLexer': ('pygments.lexers.matlab', 'Matlab session', ('matlabsession',), (), ()),
+ 'MiniDLexer': ('pygments.lexers.d', 'MiniD', ('minid',), (), ('text/x-minidsrc',)),
+ 'ModelicaLexer': ('pygments.lexers.modeling', 'Modelica', ('modelica',), ('*.mo',), ('text/x-modelica',)),
+ 'Modula2Lexer': ('pygments.lexers.modula2', 'Modula-2', ('modula2', 'm2'), ('*.def', '*.mod'), ('text/x-modula2',)),
+ 'MoinWikiLexer': ('pygments.lexers.markup', 'MoinMoin/Trac Wiki markup', ('trac-wiki', 'moin'), (), ('text/x-trac-wiki',)),
+ 'MonkeyLexer': ('pygments.lexers.basic', 'Monkey', ('monkey',), ('*.monkey',), ('text/x-monkey',)),
+ 'MoonScriptLexer': ('pygments.lexers.scripting', 'MoonScript', ('moon', 'moonscript'), ('*.moon',), ('text/x-moonscript', 'application/x-moonscript')),
+ 'MozPreprocCssLexer': ('pygments.lexers.markup', 'CSS+mozpreproc', ('css+mozpreproc',), ('*.css.in',), ()),
+ 'MozPreprocHashLexer': ('pygments.lexers.markup', 'mozhashpreproc', ('mozhashpreproc',), (), ()),
+ 'MozPreprocJavascriptLexer': ('pygments.lexers.markup', 'Javascript+mozpreproc', ('javascript+mozpreproc',), ('*.js.in',), ()),
+ 'MozPreprocPercentLexer': ('pygments.lexers.markup', 'mozpercentpreproc', ('mozpercentpreproc',), (), ()),
+ 'MozPreprocXulLexer': ('pygments.lexers.markup', 'XUL+mozpreproc', ('xul+mozpreproc',), ('*.xul.in',), ()),
+ 'MqlLexer': ('pygments.lexers.c_like', 'MQL', ('mql', 'mq4', 'mq5', 'mql4', 'mql5'), ('*.mq4', '*.mq5', '*.mqh'), ('text/x-mql',)),
+ 'MscgenLexer': ('pygments.lexers.dsls', 'Mscgen', ('mscgen', 'msc'), ('*.msc',), ()),
+ 'MuPADLexer': ('pygments.lexers.algebra', 'MuPAD', ('mupad',), ('*.mu',), ()),
+ 'MxmlLexer': ('pygments.lexers.actionscript', 'MXML', ('mxml',), ('*.mxml',), ()),
+ 'MySqlLexer': ('pygments.lexers.sql', 'MySQL', ('mysql',), (), ('text/x-mysql',)),
+ 'MyghtyCssLexer': ('pygments.lexers.templates', 'CSS+Myghty', ('css+myghty',), (), ('text/css+myghty',)),
+ 'MyghtyHtmlLexer': ('pygments.lexers.templates', 'HTML+Myghty', ('html+myghty',), (), ('text/html+myghty',)),
+ 'MyghtyJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Myghty', ('js+myghty', 'javascript+myghty'), (), ('application/x-javascript+myghty', 'text/x-javascript+myghty', 'text/javascript+mygthy')),
+ 'MyghtyLexer': ('pygments.lexers.templates', 'Myghty', ('myghty',), ('*.myt', 'autodelegate'), ('application/x-myghty',)),
+ 'MyghtyXmlLexer': ('pygments.lexers.templates', 'XML+Myghty', ('xml+myghty',), (), ('application/xml+myghty',)),
+ 'NSISLexer': ('pygments.lexers.installers', 'NSIS', ('nsis', 'nsi', 'nsh'), ('*.nsi', '*.nsh'), ('text/x-nsis',)),
+ 'NasmLexer': ('pygments.lexers.asm', 'NASM', ('nasm',), ('*.asm', '*.ASM'), ('text/x-nasm',)),
+ 'NasmObjdumpLexer': ('pygments.lexers.asm', 'objdump-nasm', ('objdump-nasm',), ('*.objdump-intel',), ('text/x-nasm-objdump',)),
+ 'NemerleLexer': ('pygments.lexers.dotnet', 'Nemerle', ('nemerle',), ('*.n',), ('text/x-nemerle',)),
+ 'NesCLexer': ('pygments.lexers.c_like', 'nesC', ('nesc',), ('*.nc',), ('text/x-nescsrc',)),
+ 'NewLispLexer': ('pygments.lexers.lisp', 'NewLisp', ('newlisp',), ('*.lsp', '*.nl'), ('text/x-newlisp', 'application/x-newlisp')),
+ 'NewspeakLexer': ('pygments.lexers.smalltalk', 'Newspeak', ('newspeak',), ('*.ns2',), ('text/x-newspeak',)),
+ 'NginxConfLexer': ('pygments.lexers.configs', 'Nginx configuration file', ('nginx',), (), ('text/x-nginx-conf',)),
+ 'NimrodLexer': ('pygments.lexers.nimrod', 'Nimrod', ('nimrod', 'nim'), ('*.nim', '*.nimrod'), ('text/x-nimrod',)),
+ 'NitLexer': ('pygments.lexers.nit', 'Nit', ('nit',), ('*.nit',), ()),
+ 'NixLexer': ('pygments.lexers.nix', 'Nix', ('nixos', 'nix'), ('*.nix',), ('text/x-nix',)),
+ 'NumPyLexer': ('pygments.lexers.python', 'NumPy', ('numpy',), (), ()),
+ 'ObjdumpLexer': ('pygments.lexers.asm', 'objdump', ('objdump',), ('*.objdump',), ('text/x-objdump',)),
+ 'ObjectiveCLexer': ('pygments.lexers.objective', 'Objective-C', ('objective-c', 'objectivec', 'obj-c', 'objc'), ('*.m', '*.h'), ('text/x-objective-c',)),
+ 'ObjectiveCppLexer': ('pygments.lexers.objective', 'Objective-C++', ('objective-c++', 'objectivec++', 'obj-c++', 'objc++'), ('*.mm', '*.hh'), ('text/x-objective-c++',)),
+ 'ObjectiveJLexer': ('pygments.lexers.javascript', 'Objective-J', ('objective-j', 'objectivej', 'obj-j', 'objj'), ('*.j',), ('text/x-objective-j',)),
+ 'OcamlLexer': ('pygments.lexers.ml', 'OCaml', ('ocaml',), ('*.ml', '*.mli', '*.mll', '*.mly'), ('text/x-ocaml',)),
+ 'OctaveLexer': ('pygments.lexers.matlab', 'Octave', ('octave',), ('*.m',), ('text/octave',)),
+ 'OdinLexer': ('pygments.lexers.archetype', 'ODIN', ('odin',), ('*.odin',), ('text/odin',)),
+ 'OocLexer': ('pygments.lexers.ooc', 'Ooc', ('ooc',), ('*.ooc',), ('text/x-ooc',)),
+ 'OpaLexer': ('pygments.lexers.ml', 'Opa', ('opa',), ('*.opa',), ('text/x-opa',)),
+ 'OpenEdgeLexer': ('pygments.lexers.business', 'OpenEdge ABL', ('openedge', 'abl', 'progress'), ('*.p', '*.cls'), ('text/x-openedge', 'application/x-openedge')),
+ 'PacmanConfLexer': ('pygments.lexers.configs', 'PacmanConf', ('pacmanconf',), ('pacman.conf',), ()),
+ 'PanLexer': ('pygments.lexers.dsls', 'Pan', ('pan',), ('*.pan',), ()),
+ 'ParaSailLexer': ('pygments.lexers.parasail', 'ParaSail', ('parasail',), ('*.psi', '*.psl'), ('text/x-parasail',)),
+ 'PawnLexer': ('pygments.lexers.pawn', 'Pawn', ('pawn',), ('*.p', '*.pwn', '*.inc'), ('text/x-pawn',)),
+ 'Perl6Lexer': ('pygments.lexers.perl', 'Perl6', ('perl6', 'pl6'), ('*.pl', '*.pm', '*.nqp', '*.p6', '*.6pl', '*.p6l', '*.pl6', '*.6pm', '*.p6m', '*.pm6', '*.t'), ('text/x-perl6', 'application/x-perl6')),
+ 'PerlLexer': ('pygments.lexers.perl', 'Perl', ('perl', 'pl'), ('*.pl', '*.pm', '*.t'), ('text/x-perl', 'application/x-perl')),
+ 'PhpLexer': ('pygments.lexers.php', 'PHP', ('php', 'php3', 'php4', 'php5'), ('*.php', '*.php[345]', '*.inc'), ('text/x-php',)),
+ 'PigLexer': ('pygments.lexers.jvm', 'Pig', ('pig',), ('*.pig',), ('text/x-pig',)),
+ 'PikeLexer': ('pygments.lexers.c_like', 'Pike', ('pike',), ('*.pike', '*.pmod'), ('text/x-pike',)),
+ 'PkgConfigLexer': ('pygments.lexers.configs', 'PkgConfig', ('pkgconfig',), ('*.pc',), ()),
+ 'PlPgsqlLexer': ('pygments.lexers.sql', 'PL/pgSQL', ('plpgsql',), (), ('text/x-plpgsql',)),
+ 'PostScriptLexer': ('pygments.lexers.graphics', 'PostScript', ('postscript', 'postscr'), ('*.ps', '*.eps'), ('application/postscript',)),
+ 'PostgresConsoleLexer': ('pygments.lexers.sql', 'PostgreSQL console (psql)', ('psql', 'postgresql-console', 'postgres-console'), (), ('text/x-postgresql-psql',)),
+ 'PostgresLexer': ('pygments.lexers.sql', 'PostgreSQL SQL dialect', ('postgresql', 'postgres'), (), ('text/x-postgresql',)),
+ 'PovrayLexer': ('pygments.lexers.graphics', 'POVRay', ('pov',), ('*.pov', '*.inc'), ('text/x-povray',)),
+ 'PowerShellLexer': ('pygments.lexers.shell', 'PowerShell', ('powershell', 'posh', 'ps1', 'psm1'), ('*.ps1', '*.psm1'), ('text/x-powershell',)),
+ 'PowerShellSessionLexer': ('pygments.lexers.shell', 'PowerShell Session', ('ps1con',), (), ()),
+ 'PraatLexer': ('pygments.lexers.praat', 'Praat', ('praat',), ('*.praat', '*.proc', '*.psc'), ()),
+ 'PrologLexer': ('pygments.lexers.prolog', 'Prolog', ('prolog',), ('*.ecl', '*.prolog', '*.pro', '*.pl'), ('text/x-prolog',)),
+ 'PropertiesLexer': ('pygments.lexers.configs', 'Properties', ('properties', 'jproperties'), ('*.properties',), ('text/x-java-properties',)),
+ 'ProtoBufLexer': ('pygments.lexers.dsls', 'Protocol Buffer', ('protobuf', 'proto'), ('*.proto',), ()),
+ 'PuppetLexer': ('pygments.lexers.dsls', 'Puppet', ('puppet',), ('*.pp',), ()),
+ 'PyPyLogLexer': ('pygments.lexers.console', 'PyPy Log', ('pypylog', 'pypy'), ('*.pypylog',), ('application/x-pypylog',)),
+ 'Python3Lexer': ('pygments.lexers.python', 'Python 3', ('python3', 'py3'), (), ('text/x-python3', 'application/x-python3')),
+ 'Python3TracebackLexer': ('pygments.lexers.python', 'Python 3.0 Traceback', ('py3tb',), ('*.py3tb',), ('text/x-python3-traceback',)),
+ 'PythonConsoleLexer': ('pygments.lexers.python', 'Python console session', ('pycon',), (), ('text/x-python-doctest',)),
+ 'PythonLexer': ('pygments.lexers.python', 'Python', ('python', 'py', 'sage'), ('*.py', '*.pyw', '*.sc', 'SConstruct', 'SConscript', '*.tac', '*.sage'), ('text/x-python', 'application/x-python')),
+ 'PythonTracebackLexer': ('pygments.lexers.python', 'Python Traceback', ('pytb',), ('*.pytb',), ('text/x-python-traceback',)),
+ 'QBasicLexer': ('pygments.lexers.basic', 'QBasic', ('qbasic', 'basic'), ('*.BAS', '*.bas'), ('text/basic',)),
+ 'QVToLexer': ('pygments.lexers.qvt', 'QVTO', ('qvto', 'qvt'), ('*.qvto',), ()),
+ 'QmlLexer': ('pygments.lexers.webmisc', 'QML', ('qml', 'qbs'), ('*.qml', '*.qbs'), ('application/x-qml', 'application/x-qt.qbs+qml')),
+ 'RConsoleLexer': ('pygments.lexers.r', 'RConsole', ('rconsole', 'rout'), ('*.Rout',), ()),
+ 'RPMSpecLexer': ('pygments.lexers.installers', 'RPMSpec', ('spec',), ('*.spec',), ('text/x-rpm-spec',)),
+ 'RacketLexer': ('pygments.lexers.lisp', 'Racket', ('racket', 'rkt'), ('*.rkt', '*.rktd', '*.rktl'), ('text/x-racket', 'application/x-racket')),
+ 'RagelCLexer': ('pygments.lexers.parsers', 'Ragel in C Host', ('ragel-c',), ('*.rl',), ()),
+ 'RagelCppLexer': ('pygments.lexers.parsers', 'Ragel in CPP Host', ('ragel-cpp',), ('*.rl',), ()),
+ 'RagelDLexer': ('pygments.lexers.parsers', 'Ragel in D Host', ('ragel-d',), ('*.rl',), ()),
+ 'RagelEmbeddedLexer': ('pygments.lexers.parsers', 'Embedded Ragel', ('ragel-em',), ('*.rl',), ()),
+ 'RagelJavaLexer': ('pygments.lexers.parsers', 'Ragel in Java Host', ('ragel-java',), ('*.rl',), ()),
+ 'RagelLexer': ('pygments.lexers.parsers', 'Ragel', ('ragel',), (), ()),
+ 'RagelObjectiveCLexer': ('pygments.lexers.parsers', 'Ragel in Objective C Host', ('ragel-objc',), ('*.rl',), ()),
+ 'RagelRubyLexer': ('pygments.lexers.parsers', 'Ragel in Ruby Host', ('ragel-ruby', 'ragel-rb'), ('*.rl',), ()),
+ 'RawTokenLexer': ('pygments.lexers.special', 'Raw token data', ('raw',), (), ('application/x-pygments-tokens',)),
+ 'RdLexer': ('pygments.lexers.r', 'Rd', ('rd',), ('*.Rd',), ('text/x-r-doc',)),
+ 'RebolLexer': ('pygments.lexers.rebol', 'REBOL', ('rebol',), ('*.r', '*.r3', '*.reb'), ('text/x-rebol',)),
+ 'RedLexer': ('pygments.lexers.rebol', 'Red', ('red', 'red/system'), ('*.red', '*.reds'), ('text/x-red', 'text/x-red-system')),
+ 'RedcodeLexer': ('pygments.lexers.esoteric', 'Redcode', ('redcode',), ('*.cw',), ()),
+ 'RegeditLexer': ('pygments.lexers.configs', 'reg', ('registry',), ('*.reg',), ('text/x-windows-registry',)),
+ 'ResourceLexer': ('pygments.lexers.resource', 'ResourceBundle', ('resource', 'resourcebundle'), ('*.txt',), ()),
+ 'RexxLexer': ('pygments.lexers.scripting', 'Rexx', ('rexx', 'arexx'), ('*.rexx', '*.rex', '*.rx', '*.arexx'), ('text/x-rexx',)),
+ 'RhtmlLexer': ('pygments.lexers.templates', 'RHTML', ('rhtml', 'html+erb', 'html+ruby'), ('*.rhtml',), ('text/html+ruby',)),
+ 'RoboconfGraphLexer': ('pygments.lexers.roboconf', 'Roboconf Graph', ('roboconf-graph',), ('*.graph',), ()),
+ 'RoboconfInstancesLexer': ('pygments.lexers.roboconf', 'Roboconf Instances', ('roboconf-instances',), ('*.instances',), ()),
+ 'RobotFrameworkLexer': ('pygments.lexers.robotframework', 'RobotFramework', ('robotframework',), ('*.txt', '*.robot'), ('text/x-robotframework',)),
+ 'RqlLexer': ('pygments.lexers.sql', 'RQL', ('rql',), ('*.rql',), ('text/x-rql',)),
+ 'RslLexer': ('pygments.lexers.dsls', 'RSL', ('rsl',), ('*.rsl',), ('text/rsl',)),
+ 'RstLexer': ('pygments.lexers.markup', 'reStructuredText', ('rst', 'rest', 'restructuredtext'), ('*.rst', '*.rest'), ('text/x-rst', 'text/prs.fallenstein.rst')),
+ 'RtsLexer': ('pygments.lexers.trafficscript', 'TrafficScript', ('rts', 'trafficscript'), ('*.rts',), ()),
+ 'RubyConsoleLexer': ('pygments.lexers.ruby', 'Ruby irb session', ('rbcon', 'irb'), (), ('text/x-ruby-shellsession',)),
+ 'RubyLexer': ('pygments.lexers.ruby', 'Ruby', ('rb', 'ruby', 'duby'), ('*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec', '*.rbx', '*.duby', 'Gemfile'), ('text/x-ruby', 'application/x-ruby')),
+ 'RustLexer': ('pygments.lexers.rust', 'Rust', ('rust',), ('*.rs', '*.rs.in'), ('text/rust',)),
+ 'SLexer': ('pygments.lexers.r', 'S', ('splus', 's', 'r'), ('*.S', '*.R', '.Rhistory', '.Rprofile', '.Renviron'), ('text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r', 'text/x-R', 'text/x-r-history', 'text/x-r-profile')),
+ 'SMLLexer': ('pygments.lexers.ml', 'Standard ML', ('sml',), ('*.sml', '*.sig', '*.fun'), ('text/x-standardml', 'application/x-standardml')),
+ 'SassLexer': ('pygments.lexers.css', 'Sass', ('sass',), ('*.sass',), ('text/x-sass',)),
+ 'ScalaLexer': ('pygments.lexers.jvm', 'Scala', ('scala',), ('*.scala',), ('text/x-scala',)),
+ 'ScamlLexer': ('pygments.lexers.html', 'Scaml', ('scaml',), ('*.scaml',), ('text/x-scaml',)),
+ 'SchemeLexer': ('pygments.lexers.lisp', 'Scheme', ('scheme', 'scm'), ('*.scm', '*.ss'), ('text/x-scheme', 'application/x-scheme')),
+ 'ScilabLexer': ('pygments.lexers.matlab', 'Scilab', ('scilab',), ('*.sci', '*.sce', '*.tst'), ('text/scilab',)),
+ 'ScssLexer': ('pygments.lexers.css', 'SCSS', ('scss',), ('*.scss',), ('text/x-scss',)),
+ 'ShenLexer': ('pygments.lexers.lisp', 'Shen', ('shen',), ('*.shen',), ('text/x-shen', 'application/x-shen')),
+ 'SilverLexer': ('pygments.lexers.verification', 'Silver', ('silver',), ('*.sil',), ()),
+ 'SlimLexer': ('pygments.lexers.webmisc', 'Slim', ('slim',), ('*.slim',), ('text/x-slim',)),
+ 'SmaliLexer': ('pygments.lexers.dalvik', 'Smali', ('smali',), ('*.smali',), ('text/smali',)),
+ 'SmalltalkLexer': ('pygments.lexers.smalltalk', 'Smalltalk', ('smalltalk', 'squeak', 'st'), ('*.st',), ('text/x-smalltalk',)),
+ 'SmartyLexer': ('pygments.lexers.templates', 'Smarty', ('smarty',), ('*.tpl',), ('application/x-smarty',)),
+ 'SnobolLexer': ('pygments.lexers.snobol', 'Snobol', ('snobol',), ('*.snobol',), ('text/x-snobol',)),
+ 'SourcePawnLexer': ('pygments.lexers.pawn', 'SourcePawn', ('sp',), ('*.sp',), ('text/x-sourcepawn',)),
+ 'SourcesListLexer': ('pygments.lexers.installers', 'Debian Sourcelist', ('sourceslist', 'sources.list', 'debsources'), ('sources.list',), ()),
+ 'SparqlLexer': ('pygments.lexers.rdf', 'SPARQL', ('sparql',), ('*.rq', '*.sparql'), ('application/sparql-query',)),
+ 'SqlLexer': ('pygments.lexers.sql', 'SQL', ('sql',), ('*.sql',), ('text/x-sql',)),
+ 'SqliteConsoleLexer': ('pygments.lexers.sql', 'sqlite3con', ('sqlite3',), ('*.sqlite3-console',), ('text/x-sqlite3-console',)),
+ 'SquidConfLexer': ('pygments.lexers.configs', 'SquidConf', ('squidconf', 'squid.conf', 'squid'), ('squid.conf',), ('text/x-squidconf',)),
+ 'SspLexer': ('pygments.lexers.templates', 'Scalate Server Page', ('ssp',), ('*.ssp',), ('application/x-ssp',)),
+ 'StanLexer': ('pygments.lexers.modeling', 'Stan', ('stan',), ('*.stan',), ()),
+ 'SuperColliderLexer': ('pygments.lexers.supercollider', 'SuperCollider', ('sc', 'supercollider'), ('*.sc', '*.scd'), ('application/supercollider', 'text/supercollider')),
+ 'SwiftLexer': ('pygments.lexers.objective', 'Swift', ('swift',), ('*.swift',), ('text/x-swift',)),
+ 'SwigLexer': ('pygments.lexers.c_like', 'SWIG', ('swig',), ('*.swg', '*.i'), ('text/swig',)),
+ 'SystemVerilogLexer': ('pygments.lexers.hdl', 'systemverilog', ('systemverilog', 'sv'), ('*.sv', '*.svh'), ('text/x-systemverilog',)),
+ 'TAPLexer': ('pygments.lexers.testing', 'TAP', ('tap',), ('*.tap',), ()),
+ 'Tads3Lexer': ('pygments.lexers.int_fiction', 'TADS 3', ('tads3',), ('*.t',), ()),
+ 'TclLexer': ('pygments.lexers.tcl', 'Tcl', ('tcl',), ('*.tcl', '*.rvt'), ('text/x-tcl', 'text/x-script.tcl', 'application/x-tcl')),
+ 'TcshLexer': ('pygments.lexers.shell', 'Tcsh', ('tcsh', 'csh'), ('*.tcsh', '*.csh'), ('application/x-csh',)),
+ 'TcshSessionLexer': ('pygments.lexers.shell', 'Tcsh Session', ('tcshcon',), (), ()),
+ 'TeaTemplateLexer': ('pygments.lexers.templates', 'Tea', ('tea',), ('*.tea',), ('text/x-tea',)),
+ 'TermcapLexer': ('pygments.lexers.configs', 'Termcap', ('termcap',), ('termcap', 'termcap.src'), ()),
+ 'TerminfoLexer': ('pygments.lexers.configs', 'Terminfo', ('terminfo',), ('terminfo', 'terminfo.src'), ()),
+ 'TerraformLexer': ('pygments.lexers.configs', 'Terraform', ('terraform', 'tf'), ('*.tf',), ('application/x-tf', 'application/x-terraform')),
+ 'TexLexer': ('pygments.lexers.markup', 'TeX', ('tex', 'latex'), ('*.tex', '*.aux', '*.toc'), ('text/x-tex', 'text/x-latex')),
+ 'TextLexer': ('pygments.lexers.special', 'Text only', ('text',), ('*.txt',), ('text/plain',)),
+ 'ThriftLexer': ('pygments.lexers.dsls', 'Thrift', ('thrift',), ('*.thrift',), ('application/x-thrift',)),
+ 'TodotxtLexer': ('pygments.lexers.textfmts', 'Todotxt', ('todotxt',), ('todo.txt', '*.todotxt'), ('text/x-todo',)),
+ 'TreetopLexer': ('pygments.lexers.parsers', 'Treetop', ('treetop',), ('*.treetop', '*.tt'), ()),
+ 'TurtleLexer': ('pygments.lexers.rdf', 'Turtle', ('turtle',), ('*.ttl',), ('text/turtle', 'application/x-turtle')),
+ 'TwigHtmlLexer': ('pygments.lexers.templates', 'HTML+Twig', ('html+twig',), ('*.twig',), ('text/html+twig',)),
+ 'TwigLexer': ('pygments.lexers.templates', 'Twig', ('twig',), (), ('application/x-twig',)),
+ 'TypeScriptLexer': ('pygments.lexers.javascript', 'TypeScript', ('ts', 'typescript'), ('*.ts',), ('text/x-typescript',)),
+ 'TypoScriptCssDataLexer': ('pygments.lexers.typoscript', 'TypoScriptCssData', ('typoscriptcssdata',), (), ()),
+ 'TypoScriptHtmlDataLexer': ('pygments.lexers.typoscript', 'TypoScriptHtmlData', ('typoscripthtmldata',), (), ()),
+ 'TypoScriptLexer': ('pygments.lexers.typoscript', 'TypoScript', ('typoscript',), ('*.ts', '*.txt'), ('text/x-typoscript',)),
+ 'UrbiscriptLexer': ('pygments.lexers.urbi', 'UrbiScript', ('urbiscript',), ('*.u',), ('application/x-urbiscript',)),
+ 'VCLLexer': ('pygments.lexers.varnish', 'VCL', ('vcl',), ('*.vcl',), ('text/x-vclsrc',)),
+ 'VCLSnippetLexer': ('pygments.lexers.varnish', 'VCLSnippets', ('vclsnippets', 'vclsnippet'), (), ('text/x-vclsnippet',)),
+ 'VCTreeStatusLexer': ('pygments.lexers.console', 'VCTreeStatus', ('vctreestatus',), (), ()),
+ 'VGLLexer': ('pygments.lexers.dsls', 'VGL', ('vgl',), ('*.rpf',), ()),
+ 'ValaLexer': ('pygments.lexers.c_like', 'Vala', ('vala', 'vapi'), ('*.vala', '*.vapi'), ('text/x-vala',)),
+ 'VbNetAspxLexer': ('pygments.lexers.dotnet', 'aspx-vb', ('aspx-vb',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
+ 'VbNetLexer': ('pygments.lexers.dotnet', 'VB.net', ('vb.net', 'vbnet'), ('*.vb', '*.bas'), ('text/x-vbnet', 'text/x-vba')),
+ 'VelocityHtmlLexer': ('pygments.lexers.templates', 'HTML+Velocity', ('html+velocity',), (), ('text/html+velocity',)),
+ 'VelocityLexer': ('pygments.lexers.templates', 'Velocity', ('velocity',), ('*.vm', '*.fhtml'), ()),
+ 'VelocityXmlLexer': ('pygments.lexers.templates', 'XML+Velocity', ('xml+velocity',), (), ('application/xml+velocity',)),
+ 'VerilogLexer': ('pygments.lexers.hdl', 'verilog', ('verilog', 'v'), ('*.v',), ('text/x-verilog',)),
+ 'VhdlLexer': ('pygments.lexers.hdl', 'vhdl', ('vhdl',), ('*.vhdl', '*.vhd'), ('text/x-vhdl',)),
+ 'VimLexer': ('pygments.lexers.textedit', 'VimL', ('vim',), ('*.vim', '.vimrc', '.exrc', '.gvimrc', '_vimrc', '_exrc', '_gvimrc', 'vimrc', 'gvimrc'), ('text/x-vim',)),
+ 'WDiffLexer': ('pygments.lexers.diff', 'WDiff', ('wdiff',), ('*.wdiff',), ()),
+ 'X10Lexer': ('pygments.lexers.x10', 'X10', ('x10', 'xten'), ('*.x10',), ('text/x-x10',)),
+ 'XQueryLexer': ('pygments.lexers.webmisc', 'XQuery', ('xquery', 'xqy', 'xq', 'xql', 'xqm'), ('*.xqy', '*.xquery', '*.xq', '*.xql', '*.xqm'), ('text/xquery', 'application/xquery')),
+ 'XmlDjangoLexer': ('pygments.lexers.templates', 'XML+Django/Jinja', ('xml+django', 'xml+jinja'), (), ('application/xml+django', 'application/xml+jinja')),
+ 'XmlErbLexer': ('pygments.lexers.templates', 'XML+Ruby', ('xml+erb', 'xml+ruby'), (), ('application/xml+ruby',)),
+ 'XmlLexer': ('pygments.lexers.html', 'XML', ('xml',), ('*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd', '*.wsdl', '*.wsf'), ('text/xml', 'application/xml', 'image/svg+xml', 'application/rss+xml', 'application/atom+xml')),
+ 'XmlPhpLexer': ('pygments.lexers.templates', 'XML+PHP', ('xml+php',), (), ('application/xml+php',)),
+ 'XmlSmartyLexer': ('pygments.lexers.templates', 'XML+Smarty', ('xml+smarty',), (), ('application/xml+smarty',)),
+ 'XsltLexer': ('pygments.lexers.html', 'XSLT', ('xslt',), ('*.xsl', '*.xslt', '*.xpl'), ('application/xsl+xml', 'application/xslt+xml')),
+ 'XtendLexer': ('pygments.lexers.jvm', 'Xtend', ('xtend',), ('*.xtend',), ('text/x-xtend',)),
+ 'YamlJinjaLexer': ('pygments.lexers.templates', 'YAML+Jinja', ('yaml+jinja', 'salt', 'sls'), ('*.sls',), ('text/x-yaml+jinja', 'text/x-sls')),
+ 'YamlLexer': ('pygments.lexers.data', 'YAML', ('yaml',), ('*.yaml', '*.yml'), ('text/x-yaml',)),
+ 'ZephirLexer': ('pygments.lexers.php', 'Zephir', ('zephir',), ('*.zep',), ()),
+}
+
+if __name__ == '__main__': # pragma: no cover
+ import sys
+ import os
+
+ # lookup lexers
+ found_lexers = []
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
+ for root, dirs, files in os.walk('.'):
+ for filename in files:
+ if filename.endswith('.py') and not filename.startswith('_'):
+ module_name = 'pygments.lexers%s.%s' % (
+ root[1:].replace('/', '.'), filename[:-3])
+ print(module_name)
+ module = __import__(module_name, None, None, [''])
+ for lexer_name in module.__all__:
+ lexer = getattr(module, lexer_name)
+ found_lexers.append(
+ '%r: %r' % (lexer_name,
+ (module_name,
+ lexer.name,
+ tuple(lexer.aliases),
+ tuple(lexer.filenames),
+ tuple(lexer.mimetypes))))
+ # sort them to make the diff minimal
+ found_lexers.sort()
+
+ # extract useful sourcecode from this file
+ with open(__file__) as fp:
+ content = fp.read()
+ # replace crnl to nl for Windows.
+ #
+ # Note that, originally, contributers should keep nl of master
+ # repository, for example by using some kind of automatic
+ # management EOL, like `EolExtension
+ # <https://www.mercurial-scm.org/wiki/EolExtension>`.
+ content = content.replace("\r\n", "\n")
+ header = content[:content.find('LEXERS = {')]
+ footer = content[content.find("if __name__ == '__main__':"):]
+
+ # write new file
+ with open(__file__, 'w') as fp:
+ fp.write(header)
+ fp.write('LEXERS = {\n %s,\n}\n\n' % ',\n '.join(found_lexers))
+ fp.write(footer)
+
+ print ('=== %d lexers processed.' % len(found_lexers))
diff --git a/pygments/lexers/algebra.py b/pygments/lexers/algebra.py
index 873b1bf2..79460ad4 100644
--- a/pygments/lexers/algebra.py
+++ b/pygments/lexers/algebra.py
@@ -15,7 +15,7 @@ from pygments.lexer import RegexLexer, bygroups, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
-__all__ = ['GAPLexer', 'MathematicaLexer', 'MuPADLexer']
+__all__ = ['GAPLexer', 'MathematicaLexer', 'MuPADLexer', 'BCLexer']
class GAPLexer(RegexLexer):
@@ -65,7 +65,7 @@ class GAPLexer(RegexLexer):
(r'[0-9]+(?:\.[0-9]*)?(?:e[0-9]+)?', Number),
(r'\.[0-9]+(?:e[0-9]+)?', Number),
(r'.', Text)
- ]
+ ],
}
@@ -104,9 +104,9 @@ class MathematicaLexer(RegexLexer):
(r'#\d*', Name.Variable),
(r'([a-zA-Z]+[a-zA-Z0-9]*)', Name),
- (r'-?[0-9]+\.[0-9]*', Number.Float),
- (r'-?[0-9]*\.[0-9]+', Number.Float),
- (r'-?[0-9]+', Number.Integer),
+ (r'-?\d+\.\d*', Number.Float),
+ (r'-?\d*\.\d+', Number.Float),
+ (r'-?\d+', Number.Integer),
(words(operators), Operator),
(words(punctuation), Punctuation),
@@ -183,5 +183,39 @@ class MuPADLexer(RegexLexer):
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline)
- ]
+ ],
+ }
+
+
+class BCLexer(RegexLexer):
+ """
+ A `BC <https://www.gnu.org/software/bc/>`_ lexer.
+
+ .. versionadded:: 2.1
+ """
+ name = 'BC'
+ aliases = ['bc']
+ filenames = ['*.bc']
+
+ tokens = {
+ 'root': [
+ (r'/\*', Comment.Multiline, 'comment'),
+ (r'"(?:[^"\\]|\\.)*"', String),
+ (r'[{}();,]', Punctuation),
+ (words(('if', 'else', 'while', 'for', 'break', 'continue',
+ 'halt', 'return', 'define', 'auto', 'print', 'read',
+ 'length', 'scale', 'sqrt', 'limits', 'quit',
+ 'warranty'), suffix=r'\b'), Keyword),
+ (r'\+\+|--|\|\||&&|'
+ r'([-<>+*%\^/!=])=?', Operator),
+ # bc doesn't support exponential
+ (r'[0-9]+(\.[0-9]*)?', Number),
+ (r'\.[0-9]+', Number),
+ (r'.', Text)
+ ],
+ 'comment': [
+ (r'[^*/]+', Comment.Multiline),
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'[*/]', Comment.Multiline)
+ ],
}
diff --git a/pygments/lexers/ampl.py b/pygments/lexers/ampl.py
new file mode 100644
index 00000000..c3ca80d4
--- /dev/null
+++ b/pygments/lexers/ampl.py
@@ -0,0 +1,87 @@
+# -*- coding: utf-8 -*-
+"""
+ pygments.lexers.ampl
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for the ampl language. <http://ampl.com/>
+
+ :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups, using, this, words
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation
+
+__all__ = ['AmplLexer']
+
+
+class AmplLexer(RegexLexer):
+ """
+ For AMPL source code.
+
+ .. versionadded:: 2.2
+ """
+ name = 'Ampl'
+ aliases = ['ampl']
+ filenames = ['*.run']
+
+ tokens = {
+ 'root': [
+ (r'\n', Text),
+ (r'\s+', Text.Whitespace),
+ (r'#.*?\n', Comment.Single),
+ (r'/[*](.|\n)*?[*]/', Comment.Multiline),
+ (words((
+ 'call', 'cd', 'close', 'commands', 'data', 'delete', 'display',
+ 'drop', 'end', 'environ', 'exit', 'expand', 'include', 'load',
+ 'model', 'objective', 'option', 'problem', 'purge', 'quit',
+ 'redeclare', 'reload', 'remove', 'reset', 'restore', 'shell',
+ 'show', 'solexpand', 'solution', 'solve', 'update', 'unload',
+ 'xref', 'coeff', 'coef', 'cover', 'obj', 'interval', 'default',
+ 'from', 'to', 'to_come', 'net_in', 'net_out', 'dimen',
+ 'dimension', 'check', 'complements', 'write', 'function',
+ 'pipe', 'format', 'if', 'then', 'else', 'in', 'while', 'repeat',
+ 'for'), suffix=r'\b'), Keyword.Reserved),
+ (r'(integer|binary|symbolic|ordered|circular|reversed|INOUT|IN|OUT|LOCAL)',
+ Keyword.Type),
+ (r'\".*?\"', String.Double),
+ (r'\'.*?\'', String.Single),
+ (r'[()\[\]{},;:]+', Punctuation),
+ (r'\b(\w+)(\.)(astatus|init0|init|lb0|lb1|lb2|lb|lrc|'
+ r'lslack|rc|relax|slack|sstatus|status|ub0|ub1|ub2|'
+ r'ub|urc|uslack|val)',
+ bygroups(Name.Variable, Punctuation, Keyword.Reserved)),
+ (r'(set|param|var|arc|minimize|maximize|subject to|s\.t\.|subj to|'
+ r'node|table|suffix|read table|write table)(\s+)(\w+)',
+ bygroups(Keyword.Declaration, Text, Name.Variable)),
+ (r'(param)(\s*)(:)(\s*)(\w+)(\s*)(:)(\s*)((\w|\s)+)',
+ bygroups(Keyword.Declaration, Text, Punctuation, Text,
+ Name.Variable, Text, Punctuation, Text, Name.Variable)),
+ (r'(let|fix|unfix)(\s*)((?:\{.*\})?)(\s*)(\w+)',
+ bygroups(Keyword.Declaration, Text, using(this), Text, Name.Variable)),
+ (words((
+ 'abs', 'acos', 'acosh', 'alias', 'asin', 'asinh', 'atan', 'atan2',
+ 'atanh', 'ceil', 'ctime', 'cos', 'exp', 'floor', 'log', 'log10',
+ 'max', 'min', 'precision', 'round', 'sin', 'sinh', 'sqrt', 'tan',
+ 'tanh', 'time', 'trunc', 'Beta', 'Cauchy', 'Exponential', 'Gamma',
+ 'Irand224', 'Normal', 'Normal01', 'Poisson', 'Uniform', 'Uniform01',
+ 'num', 'num0', 'ichar', 'char', 'length', 'substr', 'sprintf',
+ 'match', 'sub', 'gsub', 'print', 'printf', 'next', 'nextw', 'prev',
+ 'prevw', 'first', 'last', 'ord', 'ord0', 'card', 'arity',
+ 'indexarity'), prefix=r'\b', suffix=r'\b'), Name.Builtin),
+ (r'(\+|\-|\*|/|\*\*|=|<=|>=|==|\||\^|<|>|\!|\.\.|:=|\&|\!=|<<|>>)',
+ Operator),
+ (words((
+ 'or', 'exists', 'forall', 'and', 'in', 'not', 'within', 'union',
+ 'diff', 'difference', 'symdiff', 'inter', 'intersect',
+ 'intersection', 'cross', 'setof', 'by', 'less', 'sum', 'prod',
+ 'product', 'div', 'mod'), suffix=r'\b'),
+ Keyword.Reserved), # Operator.Name but not enough emphasized with that
+ (r'(\d+\.(?!\.)\d*|\.(?!.)\d+)([eE][+-]?\d+)?', Number.Float),
+ (r'\d+([eE][+-]?\d+)?', Number.Integer),
+ (r'[+-]?Infinity', Number.Integer),
+ (r'(\w+|(\.(?!\.)))', Text)
+ ]
+
+ }
diff --git a/pygments/lexers/asm.py b/pygments/lexers/asm.py
index 2ddc2a6a..760dd720 100644
--- a/pygments/lexers/asm.py
+++ b/pygments/lexers/asm.py
@@ -11,15 +11,16 @@
import re
-from pygments.lexer import RegexLexer, include, bygroups, using, DelegatingLexer
+from pygments.lexer import RegexLexer, include, bygroups, using, words, \
+ DelegatingLexer
from pygments.lexers.c_cpp import CppLexer, CLexer
from pygments.lexers.d import DLexer
from pygments.token import Text, Name, Number, String, Comment, Punctuation, \
Other, Keyword, Operator
__all__ = ['GasLexer', 'ObjdumpLexer', 'DObjdumpLexer', 'CppObjdumpLexer',
- 'CObjdumpLexer', 'HsailLexer', 'LlvmLexer', 'NasmLexer', 'NasmObjdumpLexer',
- 'Ca65Lexer']
+ 'CObjdumpLexer', 'HsailLexer', 'LlvmLexer', 'NasmLexer',
+ 'NasmObjdumpLexer', 'Ca65Lexer']
class GasLexer(RegexLexer):
@@ -87,7 +88,7 @@ class GasLexer(RegexLexer):
(r'#.*?\n', Comment)
],
'punctuation': [
- (r'[-*,.():]+', Punctuation)
+ (r'[-*,.()\[\]!:]+', Punctuation)
]
}
@@ -201,6 +202,8 @@ class CObjdumpLexer(DelegatingLexer):
class HsailLexer(RegexLexer):
"""
For HSAIL assembly code.
+
+ .. versionadded:: 2.2
"""
name = 'HSAIL'
aliases = ['hsail', 'hsa']
@@ -218,43 +221,39 @@ class HsailLexer(RegexLexer):
allocQual = r'(alloc\(agent\))'
# Instruction Modifiers
roundingMod = (r'((_ftz)?(_up|_down|_zero|_near))')
- datatypeMod = (r'_('
- # baseTypes
- r'u8|s8|u16|s16|u32|s32|u64|s64'
- r'|b1|b8|b16|b32|b64|b128'
- r'|f16|f32|f64'
- # packedTypes
- r'|u8x4| s8x4| u16x2| s16x2| u8x8| s8x8| u16x4| s16x4| u32x2| s32x2| u8x16| s8x16| u16x8| s16x8| u32x4| s32x4| u64x2| s64x2'
- r'|f16x2| f16x4| f16x8'
- r'|f32x2| f32x4'
- r'|f64x2'
- # opaqueType
- r'|roimg| woimg| rwimg'
- r'|samp'
- r'|sig32| sig64'
- r')')
-
+ datatypeMod = (r'_('
+ # packedTypes
+ r'u8x4|s8x4|u16x2|s16x2|u8x8|s8x8|u16x4|s16x4|u32x2|s32x2|'
+ r'u8x16|s8x16|u16x8|s16x8|u32x4|s32x4|u64x2|s64x2|'
+ r'f16x2|f16x4|f16x8|f32x2|f32x4|f64x2|'
+ # baseTypes
+ r'u8|s8|u16|s16|u32|s32|u64|s64|'
+ r'b128|b8|b16|b32|b64|b1|'
+ r'f16|f32|f64|'
+ # opaqueType
+ r'roimg|woimg|rwimg|samp|sig32|sig64)')
+
# Numeric Constant
float = r'((\d+\.)|(\d*\.\d+))[eE][+-]?\d+'
hexfloat = r'0[xX](([0-9a-fA-F]+\.[0-9a-fA-F]*)|([0-9a-fA-F]*\.[0-9a-fA-F]+))[pP][+-]?\d+'
- ieeefloat= r'0((h|H)[0-9a-fA-F]{4}|(f|F)[0-9a-fA-F]{8}|(d|D)[0-9a-fA-F]{16})'
-
+ ieeefloat = r'0((h|H)[0-9a-fA-F]{4}|(f|F)[0-9a-fA-F]{8}|(d|D)[0-9a-fA-F]{16})'
+
tokens = {
'root': [
include('whitespace'),
include('comments'),
-
+
(string, String),
(r'@' + identifier + ':', Name.Label),
(register, Name.Variable.Anonymous),
-
+
include('keyword'),
-
+
(r'&' + identifier, Name.Variable.Global),
(r'%' + identifier, Name.Variable),
-
+
(hexfloat, Number.Hex),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(ieeefloat, Number.Float),
@@ -272,65 +271,70 @@ class HsailLexer(RegexLexer):
],
'keyword': [
# Types
- (r'kernarg'+datatypeMod,
- Keyword.Type),
-
+ (r'kernarg' + datatypeMod, Keyword.Type),
+
# Regular keywords
- (r'(\$full|\$base'
- r'|\$small|\$large'
- r'|\$default|\$zero|\$near)', Keyword),
- (r'(module|extension'
- r'|pragma'
- r'|prog|indirect|signature|decl'
- r'|kernel|function'
- r'|enablebreakexceptions|enabledetectexceptions|maxdynamicgroupsize|maxflatgridsize|maxflatworkgroupsize|requireddim|requiredgridsize|requiredworkgroupsize|requirenopartialworkgroups'
- r')\b', Keyword),
+ (r'\$(full|base|small|large|default|zero|near)', Keyword),
+ (words((
+ 'module', 'extension', 'pragma', 'prog', 'indirect', 'signature',
+ 'decl', 'kernel', 'function', 'enablebreakexceptions',
+ 'enabledetectexceptions', 'maxdynamicgroupsize', 'maxflatgridsize',
+ 'maxflatworkgroupsize', 'requireddim', 'requiredgridsize',
+ 'requiredworkgroupsize', 'requirenopartialworkgroups'),
+ suffix=r'\b'), Keyword),
+
+ # instructions
+ (roundingMod, Keyword),
+ (datatypeMod, Keyword),
+ (r'_(' + alignQual + '|' + widthQual + ')', Keyword),
+ (r'_kernarg', Keyword),
+ (r'(nop|imagefence)\b', Keyword),
+ (words((
+ 'cleardetectexcept', 'clock', 'cuid', 'debugtrap', 'dim',
+ 'getdetectexcept', 'groupbaseptr', 'kernargbaseptr', 'laneid',
+ 'maxcuid', 'maxwaveid', 'packetid', 'setdetectexcept', 'waveid',
+ 'workitemflatabsid', 'workitemflatid', 'nullptr', 'abs', 'bitrev',
+ 'currentworkgroupsize', 'currentworkitemflatid', 'fract', 'ncos',
+ 'neg', 'nexp2', 'nlog2', 'nrcp', 'nrsqrt', 'nsin', 'nsqrt',
+ 'gridgroups', 'gridsize', 'not', 'sqrt', 'workgroupid',
+ 'workgroupsize', 'workitemabsid', 'workitemid', 'ceil', 'floor',
+ 'rint', 'trunc', 'add', 'bitmask', 'borrow', 'carry', 'copysign',
+ 'div', 'rem', 'sub', 'shl', 'shr', 'and', 'or', 'xor', 'unpackhi',
+ 'unpacklo', 'max', 'min', 'fma', 'mad', 'bitextract', 'bitselect',
+ 'shuffle', 'cmov', 'bitalign', 'bytealign', 'lerp', 'nfma', 'mul',
+ 'mulhi', 'mul24hi', 'mul24', 'mad24', 'mad24hi', 'bitinsert',
+ 'combine', 'expand', 'lda', 'mov', 'pack', 'unpack', 'packcvt',
+ 'unpackcvt', 'sad', 'sementp', 'ftos', 'stof', 'cmp', 'ld', 'st',
+ '_eq', '_ne', '_lt', '_le', '_gt', '_ge', '_equ', '_neu', '_ltu',
+ '_leu', '_gtu', '_geu', '_num', '_nan', '_seq', '_sne', '_slt',
+ '_sle', '_sgt', '_sge', '_snum', '_snan', '_sequ', '_sneu', '_sltu',
+ '_sleu', '_sgtu', '_sgeu', 'atomic', '_ld', '_st', '_cas', '_add',
+ '_and', '_exch', '_max', '_min', '_or', '_sub', '_wrapdec',
+ '_wrapinc', '_xor', 'ret', 'cvt', '_readonly', '_kernarg', '_global',
+ 'br', 'cbr', 'sbr', '_scacq', '_screl', '_scar', '_rlx', '_wave',
+ '_wg', '_agent', '_system', 'ldimage', 'stimage', '_v2', '_v3', '_v4',
+ '_1d', '_2d', '_3d', '_1da', '_2da', '_1db', '_2ddepth', '_2dadepth',
+ '_width', '_height', '_depth', '_array', '_channelorder',
+ '_channeltype', 'querysampler', '_coord', '_filter', '_addressing',
+ 'barrier', 'wavebarrier', 'initfbar', 'joinfbar', 'waitfbar',
+ 'arrivefbar', 'leavefbar', 'releasefbar', 'ldf', 'activelaneid',
+ 'activelanecount', 'activelanemask', 'activelanepermute', 'call',
+ 'scall', 'icall', 'alloca', 'packetcompletionsig',
+ 'addqueuewriteindex', 'casqueuewriteindex', 'ldqueuereadindex',
+ 'stqueuereadindex', 'readonly', 'global', 'private', 'group',
+ 'spill', 'arg', '_upi', '_downi', '_zeroi', '_neari', '_upi_sat',
+ '_downi_sat', '_zeroi_sat', '_neari_sat', '_supi', '_sdowni',
+ '_szeroi', '_sneari', '_supi_sat', '_sdowni_sat', '_szeroi_sat',
+ '_sneari_sat', '_pp', '_ps', '_sp', '_ss', '_s', '_p', '_pp_sat',
+ '_ps_sat', '_sp_sat', '_ss_sat', '_s_sat', '_p_sat'),
+ suffix=r'\b'), Keyword),
- # instructions
- (roundingMod, Keyword),
- (datatypeMod, Keyword),
- (r'_(' + alignQual + '|' + widthQual + ')', Keyword),
- (r'_kernarg', Keyword),
- (r'(nop|imagefence)\b', Keyword),
- (r'(cleardetectexcept|clock|cuid|debugtrap|dim|getdetectexcept|groupbaseptr|kernargbaseptr|laneid|maxcuid|maxwaveid|packetid|setdetectexcept|waveid|workitemflatabsid|workitemflatid|nullptr'
- r'|abs|bitrev|currentworkgroupsize|currentworkitemflatid|fract|ncos|neg|nexp2|nlog2|nrcp|nrsqrt|nsin|nsqrt|gridgroups|gridsize|not|sqrt|workgroupid|workgroupsize|workitemabsid|workitemid'
- r'|ceil|floor|rint|trunc'
- r'|add|bitmask|borrow|carry|copysign|div|rem|sub|shl|shr|and|or|xor|unpackhi|unpacklo'
- r'|max|min'
- r'|fma|mad|bitextract|bitselect|shuffle|cmov|bitalign|bytealign|lerp|nfma'
- r'|mul|mulhi|mul24hi|mul24|mad24|mad24hi|bitinsert|combine|expand|lda|mov'
- r'|pack|unpack|packcvt|unpackcvt|sad|sementp|ftos|stof'
- r'|cmp|ld|st'
- r'|_eq|_ne|_lt|_le|_gt|_ge|_equ|_neu|_ltu|_leu|_gtu|_geu|_num|_nan|_seq|_sne|_slt|_sle|_sgt|_sge|_snum|_snan|_sequ|_sneu|_sltu|_sleu|_sgtu|_sgeu'
- r'|atomic|_ld|_st|_cas|_add|_and|_exch|_max|_min|_or|_sub|_wrapdec|_wrapinc|_xor'
- r'|ret|cvt'
- r'|_readonly|_kernarg|_global'
-
- r'|br|cbr|sbr'
- r'_scacq|_screl|_scar|_rlx'
- r'_wave|_wg|_agent|_system'
- r'|ldimage|stimage'
- r'|_v2|_v3|_v4'
- r'|_1d|_2d|_3d|_1da|_2da|_1db|_2ddepth|_2dadepth'
- r'|_width|_height|_depth|_array|_channelorder|_channeltype'
- r'|querysampler|_coord|_filter|_addressing'
- r'|barrier|wavebarrier'
- r'|initfbar|joinfbar|waitfbar|arrivefbar|leavefbar|releasefbar|ldf'
- r'|activelaneid|activelanecount|activelanemask|activelanepermute'
- r'|call|scall|icall'
- r'|alloca|packetcompletionsig'
- r'|addqueuewriteindex|casqueuewriteindex|ldqueuereadindex|stqueuereadindex'
- r'|readonly|global|private|group|spill|arg'
- r'|_upi|_downi|_zeroi|_neari|_upi_sat|_downi_sat|_zeroi_sat|_neari_sat|_supi|_sdowni|_szeroi|_sneari|_supi_sat|_sdowni_sat|_szeroi_sat|_sneari_sat'
- r'|_pp|_ps|_sp|_ss|_s|_p|_pp_sat|_ps_sat|_sp_sat|_ss_sat|_s_sat|_p_sat'
- r')', Keyword),
-
-
# Integer types
(r'i[1-9]\d*', Keyword)
]
}
-
+
+
class LlvmLexer(RegexLexer):
"""
For LLVM assembly code.
diff --git a/pygments/lexers/business.py b/pygments/lexers/business.py
index c71d9c28..43978690 100644
--- a/pygments/lexers/business.py
+++ b/pygments/lexers/business.py
@@ -57,9 +57,9 @@ class CobolLexer(RegexLexer):
],
'core': [
# Figurative constants
- (r'(^|(?<=[^0-9a-z_\-]))(ALL\s+)?'
+ (r'(^|(?<=[^\w\-]))(ALL\s+)?'
r'((ZEROES)|(HIGH-VALUE|LOW-VALUE|QUOTE|SPACE|ZERO)(S)?)'
- r'\s*($|(?=[^0-9a-z_\-]))',
+ r'\s*($|(?=[^\w\-]))',
Name.Constant),
# Reserved words STATEMENTS and other bolds
@@ -79,8 +79,8 @@ class CobolLexer(RegexLexer):
'RETURN', 'REWRITE', 'SCREEN', 'SD', 'SEARCH', 'SECTION', 'SET',
'SORT', 'START', 'STOP', 'STRING', 'SUBTRACT', 'SUPPRESS',
'TERMINATE', 'THEN', 'UNLOCK', 'UNSTRING', 'USE', 'VALIDATE',
- 'WORKING-STORAGE', 'WRITE'), prefix=r'(^|(?<=[^0-9a-z_\-]))',
- suffix=r'\s*($|(?=[^0-9a-z_\-]))'),
+ 'WORKING-STORAGE', 'WRITE'), prefix=r'(^|(?<=[^\w\-]))',
+ suffix=r'\s*($|(?=[^\w\-]))'),
Keyword.Reserved),
# Reserved words
@@ -89,33 +89,33 @@ class CobolLexer(RegexLexer):
'ALPHABET', 'ALPHABETIC', 'ALPHABETIC-LOWER', 'ALPHABETIC-UPPER',
'ALPHANUMERIC', 'ALPHANUMERIC-EDITED', 'ALSO', 'ALTER', 'ALTERNATE'
'ANY', 'ARE', 'AREA', 'AREAS', 'ARGUMENT-NUMBER', 'ARGUMENT-VALUE', 'AS',
- 'ASCENDING', 'ASSIGN', 'AT', 'AUTO', 'AUTO-SKIP', 'AUTOMATIC', 'AUTOTERMINATE',
- 'BACKGROUND-COLOR', 'BASED', 'BEEP', 'BEFORE', 'BELL',
+ 'ASCENDING', 'ASSIGN', 'AT', 'AUTO', 'AUTO-SKIP', 'AUTOMATIC',
+ 'AUTOTERMINATE', 'BACKGROUND-COLOR', 'BASED', 'BEEP', 'BEFORE', 'BELL',
'BLANK', 'BLINK', 'BLOCK', 'BOTTOM', 'BY', 'BYTE-LENGTH', 'CHAINING',
- 'CHARACTER', 'CHARACTERS', 'CLASS', 'CODE', 'CODE-SET', 'COL', 'COLLATING',
- 'COLS', 'COLUMN', 'COLUMNS', 'COMMA', 'COMMAND-LINE', 'COMMIT', 'COMMON',
- 'CONSTANT', 'CONTAINS', 'CONTENT', 'CONTROL',
+ 'CHARACTER', 'CHARACTERS', 'CLASS', 'CODE', 'CODE-SET', 'COL',
+ 'COLLATING', 'COLS', 'COLUMN', 'COLUMNS', 'COMMA', 'COMMAND-LINE',
+ 'COMMIT', 'COMMON', 'CONSTANT', 'CONTAINS', 'CONTENT', 'CONTROL',
'CONTROLS', 'CONVERTING', 'COPY', 'CORR', 'CORRESPONDING', 'COUNT', 'CRT',
- 'CURRENCY', 'CURSOR', 'CYCLE', 'DATE', 'DAY', 'DAY-OF-WEEK', 'DE', 'DEBUGGING',
- 'DECIMAL-POINT', 'DECLARATIVES', 'DEFAULT', 'DELIMITED',
+ 'CURRENCY', 'CURSOR', 'CYCLE', 'DATE', 'DAY', 'DAY-OF-WEEK', 'DE',
+ 'DEBUGGING', 'DECIMAL-POINT', 'DECLARATIVES', 'DEFAULT', 'DELIMITED',
'DELIMITER', 'DEPENDING', 'DESCENDING', 'DETAIL', 'DISK',
'DOWN', 'DUPLICATES', 'DYNAMIC', 'EBCDIC',
'ENTRY', 'ENVIRONMENT-NAME', 'ENVIRONMENT-VALUE', 'EOL', 'EOP',
'EOS', 'ERASE', 'ERROR', 'ESCAPE', 'EXCEPTION',
- 'EXCLUSIVE', 'EXTEND', 'EXTERNAL',
- 'FILE-ID', 'FILLER', 'FINAL', 'FIRST', 'FIXED', 'FLOAT-LONG', 'FLOAT-SHORT',
- 'FOOTING', 'FOR', 'FOREGROUND-COLOR', 'FORMAT', 'FROM', 'FULL', 'FUNCTION',
- 'FUNCTION-ID', 'GIVING', 'GLOBAL', 'GROUP',
+ 'EXCLUSIVE', 'EXTEND', 'EXTERNAL', 'FILE-ID', 'FILLER', 'FINAL',
+ 'FIRST', 'FIXED', 'FLOAT-LONG', 'FLOAT-SHORT',
+ 'FOOTING', 'FOR', 'FOREGROUND-COLOR', 'FORMAT', 'FROM', 'FULL',
+ 'FUNCTION', 'FUNCTION-ID', 'GIVING', 'GLOBAL', 'GROUP',
'HEADING', 'HIGHLIGHT', 'I-O', 'ID',
'IGNORE', 'IGNORING', 'IN', 'INDEX', 'INDEXED', 'INDICATE',
- 'INITIAL', 'INITIALIZED', 'INPUT',
- 'INTO', 'INTRINSIC', 'INVALID', 'IS', 'JUST', 'JUSTIFIED', 'KEY', 'LABEL',
+ 'INITIAL', 'INITIALIZED', 'INPUT', 'INTO', 'INTRINSIC', 'INVALID',
+ 'IS', 'JUST', 'JUSTIFIED', 'KEY', 'LABEL',
'LAST', 'LEADING', 'LEFT', 'LENGTH', 'LIMIT', 'LIMITS', 'LINAGE',
'LINAGE-COUNTER', 'LINE', 'LINES', 'LOCALE', 'LOCK',
- 'LOWLIGHT', 'MANUAL', 'MEMORY', 'MINUS', 'MODE',
- 'MULTIPLE', 'NATIONAL', 'NATIONAL-EDITED', 'NATIVE',
- 'NEGATIVE', 'NEXT', 'NO', 'NULL', 'NULLS', 'NUMBER', 'NUMBERS', 'NUMERIC',
- 'NUMERIC-EDITED', 'OBJECT-COMPUTER', 'OCCURS', 'OF', 'OFF', 'OMITTED', 'ON', 'ONLY',
+ 'LOWLIGHT', 'MANUAL', 'MEMORY', 'MINUS', 'MODE', 'MULTIPLE',
+ 'NATIONAL', 'NATIONAL-EDITED', 'NATIVE', 'NEGATIVE', 'NEXT', 'NO',
+ 'NULL', 'NULLS', 'NUMBER', 'NUMBERS', 'NUMERIC', 'NUMERIC-EDITED',
+ 'OBJECT-COMPUTER', 'OCCURS', 'OF', 'OFF', 'OMITTED', 'ON', 'ONLY',
'OPTIONAL', 'ORDER', 'ORGANIZATION', 'OTHER', 'OUTPUT', 'OVERFLOW',
'OVERLINE', 'PACKED-DECIMAL', 'PADDING', 'PAGE', 'PARAGRAPH',
'PLUS', 'POINTER', 'POSITION', 'POSITIVE', 'PRESENT', 'PREVIOUS',
@@ -137,40 +137,42 @@ class CobolLexer(RegexLexer):
'UNSIGNED-INT', 'UNSIGNED-LONG', 'UNSIGNED-SHORT', 'UNTIL', 'UP',
'UPDATE', 'UPON', 'USAGE', 'USING', 'VALUE', 'VALUES', 'VARYING',
'WAIT', 'WHEN', 'WITH', 'WORDS', 'YYYYDDD', 'YYYYMMDD'),
- prefix=r'(^|(?<=[^0-9a-z_\-]))', suffix=r'\s*($|(?=[^0-9a-z_\-]))'),
+ prefix=r'(^|(?<=[^\w\-]))', suffix=r'\s*($|(?=[^\w\-]))'),
Keyword.Pseudo),
# inactive reserved words
(words((
- 'ACTIVE-CLASS', 'ALIGNED', 'ANYCASE', 'ARITHMETIC', 'ATTRIBUTE', 'B-AND',
- 'B-NOT', 'B-OR', 'B-XOR', 'BIT', 'BOOLEAN', 'CD', 'CENTER', 'CF', 'CH', 'CHAIN', 'CLASS-ID',
- 'CLASSIFICATION', 'COMMUNICATION', 'CONDITION', 'DATA-POINTER',
- 'DESTINATION', 'DISABLE', 'EC', 'EGI', 'EMI', 'ENABLE', 'END-RECEIVE',
- 'ENTRY-CONVENTION', 'EO', 'ESI', 'EXCEPTION-OBJECT', 'EXPANDS', 'FACTORY',
- 'FLOAT-BINARY-16', 'FLOAT-BINARY-34', 'FLOAT-BINARY-7',
- 'FLOAT-DECIMAL-16', 'FLOAT-DECIMAL-34', 'FLOAT-EXTENDED', 'FORMAT',
- 'FUNCTION-POINTER', 'GET', 'GROUP-USAGE', 'IMPLEMENTS', 'INFINITY',
- 'INHERITS', 'INTERFACE', 'INTERFACE-ID', 'INVOKE', 'LC_ALL', 'LC_COLLATE',
+ 'ACTIVE-CLASS', 'ALIGNED', 'ANYCASE', 'ARITHMETIC', 'ATTRIBUTE',
+ 'B-AND', 'B-NOT', 'B-OR', 'B-XOR', 'BIT', 'BOOLEAN', 'CD', 'CENTER',
+ 'CF', 'CH', 'CHAIN', 'CLASS-ID', 'CLASSIFICATION', 'COMMUNICATION',
+ 'CONDITION', 'DATA-POINTER', 'DESTINATION', 'DISABLE', 'EC', 'EGI',
+ 'EMI', 'ENABLE', 'END-RECEIVE', 'ENTRY-CONVENTION', 'EO', 'ESI',
+ 'EXCEPTION-OBJECT', 'EXPANDS', 'FACTORY', 'FLOAT-BINARY-16',
+ 'FLOAT-BINARY-34', 'FLOAT-BINARY-7', 'FLOAT-DECIMAL-16',
+ 'FLOAT-DECIMAL-34', 'FLOAT-EXTENDED', 'FORMAT', 'FUNCTION-POINTER',
+ 'GET', 'GROUP-USAGE', 'IMPLEMENTS', 'INFINITY', 'INHERITS',
+ 'INTERFACE', 'INTERFACE-ID', 'INVOKE', 'LC_ALL', 'LC_COLLATE',
'LC_CTYPE', 'LC_MESSAGES', 'LC_MONETARY', 'LC_NUMERIC', 'LC_TIME',
- 'LINE-COUNTER', 'MESSAGE', 'METHOD', 'METHOD-ID', 'NESTED', 'NONE', 'NORMAL',
- 'OBJECT', 'OBJECT-REFERENCE', 'OPTIONS', 'OVERRIDE', 'PAGE-COUNTER', 'PF', 'PH',
- 'PROPERTY', 'PROTOTYPE', 'PURGE', 'QUEUE', 'RAISE', 'RAISING', 'RECEIVE',
- 'RELATION', 'REPLACE', 'REPRESENTS-NOT-A-NUMBER', 'RESET', 'RESUME', 'RETRY',
- 'RF', 'RH', 'SECONDS', 'SEGMENT', 'SELF', 'SEND', 'SOURCES', 'STATEMENT', 'STEP',
- 'STRONG', 'SUB-QUEUE-1', 'SUB-QUEUE-2', 'SUB-QUEUE-3', 'SUPER', 'SYMBOL',
- 'SYSTEM-DEFAULT', 'TABLE', 'TERMINAL', 'TEXT', 'TYPEDEF', 'UCS-4', 'UNIVERSAL',
- 'USER-DEFAULT', 'UTF-16', 'UTF-8', 'VAL-STATUS', 'VALID', 'VALIDATE',
- 'VALIDATE-STATUS'),
- prefix=r'(^|(?<=[^0-9a-z_\-]))', suffix=r'\s*($|(?=[^0-9a-z_\-]))'),
+ 'LINE-COUNTER', 'MESSAGE', 'METHOD', 'METHOD-ID', 'NESTED', 'NONE',
+ 'NORMAL', 'OBJECT', 'OBJECT-REFERENCE', 'OPTIONS', 'OVERRIDE',
+ 'PAGE-COUNTER', 'PF', 'PH', 'PROPERTY', 'PROTOTYPE', 'PURGE',
+ 'QUEUE', 'RAISE', 'RAISING', 'RECEIVE', 'RELATION', 'REPLACE',
+ 'REPRESENTS-NOT-A-NUMBER', 'RESET', 'RESUME', 'RETRY', 'RF', 'RH',
+ 'SECONDS', 'SEGMENT', 'SELF', 'SEND', 'SOURCES', 'STATEMENT',
+ 'STEP', 'STRONG', 'SUB-QUEUE-1', 'SUB-QUEUE-2', 'SUB-QUEUE-3',
+ 'SUPER', 'SYMBOL', 'SYSTEM-DEFAULT', 'TABLE', 'TERMINAL', 'TEXT',
+ 'TYPEDEF', 'UCS-4', 'UNIVERSAL', 'USER-DEFAULT', 'UTF-16', 'UTF-8',
+ 'VAL-STATUS', 'VALID', 'VALIDATE', 'VALIDATE-STATUS'),
+ prefix=r'(^|(?<=[^\w\-]))', suffix=r'\s*($|(?=[^\w\-]))'),
Error),
# Data Types
- (r'(^|(?<=[^0-9a-z_\-]))'
+ (r'(^|(?<=[^\w\-]))'
r'(PIC\s+.+?(?=(\s|\.\s))|PICTURE\s+.+?(?=(\s|\.\s))|'
r'(COMPUTATIONAL)(-[1-5X])?|(COMP)(-[1-5X])?|'
r'BINARY-C-LONG|'
r'BINARY-CHAR|BINARY-DOUBLE|BINARY-LONG|BINARY-SHORT|'
- r'BINARY)\s*($|(?=[^0-9a-z_\-]))', Keyword.Type),
+ r'BINARY)\s*($|(?=[^\w\-]))', Keyword.Type),
# Operators
(r'(\*\*|\*|\+|-|/|<=|>=|<|>|==|/=|=)', Operator),
@@ -180,7 +182,7 @@ class CobolLexer(RegexLexer):
(r'([(),;:&%.])', Punctuation),
# Intrinsics
- (r'(^|(?<=[^0-9a-z_\-]))(ABS|ACOS|ANNUITY|ASIN|ATAN|BYTE-LENGTH|'
+ (r'(^|(?<=[^\w\-]))(ABS|ACOS|ANNUITY|ASIN|ATAN|BYTE-LENGTH|'
r'CHAR|COMBINED-DATETIME|CONCATENATE|COS|CURRENT-DATE|'
r'DATE-OF-INTEGER|DATE-TO-YYYYMMDD|DAY-OF-INTEGER|DAY-TO-YYYYDDD|'
r'EXCEPTION-(?:FILE|LOCATION|STATEMENT|STATUS)|EXP10|EXP|E|'
@@ -192,13 +194,13 @@ class CobolLexer(RegexLexer):
r'STANDARD-DEVIATION|STORED-CHAR-LENGTH|SUBSTITUTE(?:-CASE)?|'
r'SUM|TAN|TEST-DATE-YYYYMMDD|TEST-DAY-YYYYDDD|TRIM|'
r'UPPER-CASE|VARIANCE|WHEN-COMPILED|YEAR-TO-YYYY)\s*'
- r'($|(?=[^0-9a-z_\-]))', Name.Function),
+ r'($|(?=[^\w\-]))', Name.Function),
# Booleans
- (r'(^|(?<=[^0-9a-z_\-]))(true|false)\s*($|(?=[^0-9a-z_\-]))', Name.Builtin),
+ (r'(^|(?<=[^\w\-]))(true|false)\s*($|(?=[^\w\-]))', Name.Builtin),
# Comparing Operators
- (r'(^|(?<=[^0-9a-z_\-]))(equal|equals|ne|lt|le|gt|ge|'
- r'greater|less|than|not|and|or)\s*($|(?=[^0-9a-z_\-]))', Operator.Word),
+ (r'(^|(?<=[^\w\-]))(equal|equals|ne|lt|le|gt|ge|'
+ r'greater|less|than|not|and|or)\s*($|(?=[^\w\-]))', Operator.Word),
],
# \"[^\"\n]*\"|\'[^\'\n]*\'
@@ -244,7 +246,7 @@ class ABAPLexer(RegexLexer):
"""
name = 'ABAP'
aliases = ['abap']
- filenames = ['*.abap']
+ filenames = ['*.abap', '*.ABAP']
mimetypes = ['text/x-abap']
flags = re.IGNORECASE | re.MULTILINE
@@ -439,15 +441,15 @@ class OpenEdgeLexer(RegexLexer):
filenames = ['*.p', '*.cls']
mimetypes = ['text/x-openedge', 'application/x-openedge']
- types = (r'(?i)(^|(?<=[^0-9a-z_\-]))(CHARACTER|CHAR|CHARA|CHARAC|CHARACT|CHARACTE|'
+ types = (r'(?i)(^|(?<=[^\w\-]))(CHARACTER|CHAR|CHARA|CHARAC|CHARACT|CHARACTE|'
r'COM-HANDLE|DATE|DATETIME|DATETIME-TZ|'
r'DECIMAL|DEC|DECI|DECIM|DECIMA|HANDLE|'
r'INT64|INTEGER|INT|INTE|INTEG|INTEGE|'
- r'LOGICAL|LONGCHAR|MEMPTR|RAW|RECID|ROWID)\s*($|(?=[^0-9a-z_\-]))')
+ r'LOGICAL|LONGCHAR|MEMPTR|RAW|RECID|ROWID)\s*($|(?=[^\w\-]))')
keywords = words(OPENEDGEKEYWORDS,
- prefix=r'(?i)(^|(?<=[^0-9a-z_\-]))',
- suffix=r'\s*($|(?=[^0-9a-z_\-]))')
+ prefix=r'(?i)(^|(?<=[^\w\-]))',
+ suffix=r'\s*($|(?=[^\w\-]))')
tokens = {
'root': [
diff --git a/pygments/lexers/c_cpp.py b/pygments/lexers/c_cpp.py
index 5a7137ea..632871ba 100644
--- a/pygments/lexers/c_cpp.py
+++ b/pygments/lexers/c_cpp.py
@@ -50,8 +50,9 @@ class CFamilyLexer(RegexLexer):
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
],
'statements': [
- (r'L?"', String, 'string'),
- (r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
+ (r'(L?)(")', bygroups(String.Affix, String), 'string'),
+ (r"(L?)(')(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])(')",
+ bygroups(String.Affix, String.Char, String.Char, String.Char)),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex),
@@ -123,7 +124,8 @@ class CFamilyLexer(RegexLexer):
(r'\\', String), # stray backslash
],
'macro': [
- (r'(include)(' + _ws1 + ')([^\n]+)', bygroups(Comment.Preproc, Text, Comment.PreprocFile)),
+ (r'(include)(' + _ws1 + r')([^\n]+)',
+ bygroups(Comment.Preproc, Text, Comment.PreprocFile)),
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
@@ -216,6 +218,12 @@ class CppLexer(CFamilyLexer):
'final'), suffix=r'\b'), Keyword),
(r'char(16_t|32_t)\b', Keyword.Type),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
+ # C++11 raw strings
+ (r'(R)(")([^\\()\s]{,16})(\()((?:.|\n)*?)(\)\3)(")',
+ bygroups(String.Affix, String, String.Delimiter, String.Delimiter,
+ String, String.Delimiter, String)),
+ # C++11 UTF-8/16/32 strings
+ (r'(u8|u|U)(")', bygroups(String.Affix, String), 'string'),
inherit,
],
'root': [
@@ -235,7 +243,7 @@ class CppLexer(CFamilyLexer):
}
def analyse_text(text):
- if re.search('#include <[a-z]+>', text):
+ if re.search('#include <[a-z_]+>', text):
return 0.2
if re.search('using namespace ', text):
return 0.4
diff --git a/pygments/lexers/c_like.py b/pygments/lexers/c_like.py
index d894818d..f4a9c299 100644
--- a/pygments/lexers/c_like.py
+++ b/pygments/lexers/c_like.py
@@ -427,115 +427,115 @@ class ArduinoLexer(CppLexer):
filenames = ['*.ino']
mimetypes = ['text/x-arduino']
- # Language constants
- constants = set(('DIGITAL_MESSAGE', 'FIRMATA_STRING', 'ANALOG_MESSAGE',
- 'REPORT_DIGITAL', 'REPORT_ANALOG', 'INPUT_PULLUP',
- 'SET_PIN_MODE', 'INTERNAL2V56', 'SYSTEM_RESET', 'LED_BUILTIN',
- 'INTERNAL1V1', 'SYSEX_START', 'INTERNAL', 'EXTERNAL',
- 'DEFAULT', 'OUTPUT', 'INPUT', 'HIGH', 'LOW'))
-
# Language sketch main structure functions
structure = set(('setup', 'loop'))
- # Language variable types
- storage = set(('boolean', 'const', 'byte', 'word', 'string', 'String', 'array'))
+ # Language operators
+ operators = set(('not', 'or', 'and', 'xor'))
+
+ # Language 'variables'
+ variables = set((
+ 'DIGITAL_MESSAGE', 'FIRMATA_STRING', 'ANALOG_MESSAGE', 'REPORT_DIGITAL',
+ 'REPORT_ANALOG', 'INPUT_PULLUP', 'SET_PIN_MODE', 'INTERNAL2V56', 'SYSTEM_RESET',
+ 'LED_BUILTIN', 'INTERNAL1V1', 'SYSEX_START', 'INTERNAL', 'EXTERNAL', 'HIGH',
+ 'LOW', 'INPUT', 'OUTPUT', 'INPUT_PULLUP', 'LED_BUILTIN', 'true', 'false',
+ 'void', 'boolean', 'char', 'unsigned char', 'byte', 'int', 'unsigned int',
+ 'word', 'long', 'unsigned long', 'short', 'float', 'double', 'string', 'String',
+ 'array', 'static', 'volatile', 'const', 'boolean', 'byte', 'word', 'string',
+ 'String', 'array', 'int', 'float', 'private', 'char', 'virtual', 'operator',
+ 'sizeof', 'uint8_t', 'uint16_t', 'uint32_t', 'uint64_t', 'int8_t', 'int16_t',
+ 'int32_t', 'int64_t', 'dynamic_cast', 'typedef', 'const_cast', 'const',
+ 'struct', 'static_cast', 'union', 'unsigned', 'long', 'volatile', 'static',
+ 'protected', 'bool', 'public', 'friend', 'auto', 'void', 'enum', 'extern',
+ 'class', 'short', 'reinterpret_cast', 'double', 'register', 'explicit',
+ 'signed', 'inline', 'delete', '_Bool', 'complex', '_Complex', '_Imaginary',
+ 'atomic_bool', 'atomic_char', 'atomic_schar', 'atomic_uchar', 'atomic_short',
+ 'atomic_ushort', 'atomic_int', 'atomic_uint', 'atomic_long', 'atomic_ulong',
+ 'atomic_llong', 'atomic_ullong', 'PROGMEM'))
# Language shipped functions and class ( )
- functions = set(('KeyboardController', 'MouseController', 'SoftwareSerial',
- 'EthernetServer', 'EthernetClient', 'LiquidCrystal',
- 'RobotControl', 'GSMVoiceCall', 'EthernetUDP', 'EsploraTFT',
- 'HttpClient', 'RobotMotor', 'WiFiClient', 'GSMScanner',
- 'FileSystem', 'Scheduler', 'GSMServer', 'YunClient', 'YunServer',
- 'IPAddress', 'GSMClient', 'GSMModem', 'Keyboard', 'Ethernet',
- 'Console', 'GSMBand', 'Esplora', 'Stepper', 'Process',
- 'WiFiUDP', 'GSM_SMS', 'Mailbox', 'USBHost', 'Firmata', 'PImage',
- 'Client', 'Server', 'GSMPIN', 'FileIO', 'Bridge', 'Serial',
- 'EEPROM', 'Stream', 'Mouse', 'Audio', 'Servo', 'File', 'Task',
- 'GPRS', 'WiFi', 'Wire', 'TFT', 'GSM', 'SPI', 'SD',
- 'runShellCommandAsynchronously', 'analogWriteResolution',
- 'retrieveCallingNumber', 'printFirmwareVersion',
- 'analogReadResolution', 'sendDigitalPortPair',
- 'noListenOnLocalhost', 'readJoystickButton', 'setFirmwareVersion',
- 'readJoystickSwitch', 'scrollDisplayRight', 'getVoiceCallStatus',
- 'scrollDisplayLeft', 'writeMicroseconds', 'delayMicroseconds',
- 'beginTransmission', 'getSignalStrength', 'runAsynchronously',
- 'getAsynchronously', 'listenOnLocalhost', 'getCurrentCarrier',
- 'readAccelerometer', 'messageAvailable', 'sendDigitalPorts',
- 'lineFollowConfig', 'countryNameWrite', 'runShellCommand',
- 'readStringUntil', 'rewindDirectory', 'readTemperature',
- 'setClockDivider', 'readLightSensor', 'endTransmission',
- 'analogReference', 'detachInterrupt', 'countryNameRead',
- 'attachInterrupt', 'encryptionType', 'readBytesUntil',
- 'robotNameWrite', 'readMicrophone', 'robotNameRead', 'cityNameWrite',
- 'userNameWrite', 'readJoystickY', 'readJoystickX', 'mouseReleased',
- 'openNextFile', 'scanNetworks', 'noInterrupts', 'digitalWrite',
- 'beginSpeaker', 'mousePressed', 'isActionDone', 'mouseDragged',
- 'displayLogos', 'noAutoscroll', 'addParameter', 'remoteNumber',
- 'getModifiers', 'keyboardRead', 'userNameRead', 'waitContinue',
- 'processInput', 'parseCommand', 'printVersion', 'readNetworks',
- 'writeMessage', 'blinkVersion', 'cityNameRead', 'readMessage',
- 'setDataMode', 'parsePacket', 'isListening', 'setBitOrder',
- 'beginPacket', 'isDirectory', 'motorsWrite', 'drawCompass',
- 'digitalRead', 'clearScreen', 'serialEvent', 'rightToLeft',
- 'setTextSize', 'leftToRight', 'requestFrom', 'keyReleased',
- 'compassRead', 'analogWrite', 'interrupts', 'WiFiServer',
- 'disconnect', 'playMelody', 'parseFloat', 'autoscroll',
- 'getPINUsed', 'setPINUsed', 'setTimeout', 'sendAnalog',
- 'readSlider', 'analogRead', 'beginWrite', 'createChar',
- 'motorsStop', 'keyPressed', 'tempoWrite', 'readButton',
- 'subnetMask', 'debugPrint', 'macAddress', 'writeGreen',
- 'randomSeed', 'attachGPRS', 'readString', 'sendString',
- 'remotePort', 'releaseAll', 'mouseMoved', 'background',
- 'getXChange', 'getYChange', 'answerCall', 'getResult',
- 'voiceCall', 'endPacket', 'constrain', 'getSocket', 'writeJSON',
- 'getButton', 'available', 'connected', 'findUntil', 'readBytes',
- 'exitValue', 'readGreen', 'writeBlue', 'startLoop', 'IPAddress',
- 'isPressed', 'sendSysex', 'pauseMode', 'gatewayIP', 'setCursor',
- 'getOemKey', 'tuneWrite', 'noDisplay', 'loadImage', 'switchPIN',
- 'onRequest', 'onReceive', 'changePIN', 'playFile', 'noBuffer',
- 'parseInt', 'overflow', 'checkPIN', 'knobRead', 'beginTFT',
- 'bitClear', 'updateIR', 'bitWrite', 'position', 'writeRGB',
- 'highByte', 'writeRed', 'setSpeed', 'readBlue', 'noStroke',
- 'remoteIP', 'transfer', 'shutdown', 'hangCall', 'beginSMS',
- 'endWrite', 'attached', 'maintain', 'noCursor', 'checkReg',
- 'checkPUK', 'shiftOut', 'isValid', 'shiftIn', 'pulseIn',
- 'connect', 'println', 'localIP', 'pinMode', 'getIMEI',
- 'display', 'noBlink', 'process', 'getBand', 'running', 'beginSD',
- 'drawBMP', 'lowByte', 'setBand', 'release', 'bitRead', 'prepare',
- 'pointTo', 'readRed', 'setMode', 'noFill', 'remove', 'listen',
- 'stroke', 'detach', 'attach', 'noTone', 'exists', 'buffer',
- 'height', 'bitSet', 'circle', 'config', 'cursor', 'random',
- 'IRread', 'sizeof', 'setDNS', 'endSMS', 'getKey', 'micros',
- 'millis', 'begin', 'print', 'write', 'ready', 'flush', 'width',
- 'isPIN', 'blink', 'clear', 'press', 'mkdir', 'rmdir', 'close',
- 'point', 'yield', 'image', 'float', 'BSSID', 'click', 'delay',
- 'read', 'text', 'move', 'peek', 'beep', 'rect', 'line', 'open',
- 'seek', 'fill', 'size', 'turn', 'stop', 'home', 'find', 'char',
- 'byte', 'step', 'word', 'long', 'tone', 'sqrt', 'RSSI', 'SSID',
- 'end', 'bit', 'tan', 'cos', 'sin', 'pow', 'map', 'abs', 'max',
- 'min', 'int', 'get', 'run', 'put'))
-
+ functions = set((
+ 'KeyboardController', 'MouseController', 'SoftwareSerial', 'EthernetServer',
+ 'EthernetClient', 'LiquidCrystal', 'RobotControl', 'GSMVoiceCall',
+ 'EthernetUDP', 'EsploraTFT', 'HttpClient', 'RobotMotor', 'WiFiClient',
+ 'GSMScanner', 'FileSystem', 'Scheduler', 'GSMServer', 'YunClient', 'YunServer',
+ 'IPAddress', 'GSMClient', 'GSMModem', 'Keyboard', 'Ethernet', 'Console',
+ 'GSMBand', 'Esplora', 'Stepper', 'Process', 'WiFiUDP', 'GSM_SMS', 'Mailbox',
+ 'USBHost', 'Firmata', 'PImage', 'Client', 'Server', 'GSMPIN', 'FileIO',
+ 'Bridge', 'Serial', 'EEPROM', 'Stream', 'Mouse', 'Audio', 'Servo', 'File',
+ 'Task', 'GPRS', 'WiFi', 'Wire', 'TFT', 'GSM', 'SPI', 'SD',
+ 'runShellCommandAsynchronously', 'analogWriteResolution',
+ 'retrieveCallingNumber', 'printFirmwareVersion', 'analogReadResolution',
+ 'sendDigitalPortPair', 'noListenOnLocalhost', 'readJoystickButton',
+ 'setFirmwareVersion', 'readJoystickSwitch', 'scrollDisplayRight',
+ 'getVoiceCallStatus', 'scrollDisplayLeft', 'writeMicroseconds',
+ 'delayMicroseconds', 'beginTransmission', 'getSignalStrength',
+ 'runAsynchronously', 'getAsynchronously', 'listenOnLocalhost',
+ 'getCurrentCarrier', 'readAccelerometer', 'messageAvailable',
+ 'sendDigitalPorts', 'lineFollowConfig', 'countryNameWrite', 'runShellCommand',
+ 'readStringUntil', 'rewindDirectory', 'readTemperature', 'setClockDivider',
+ 'readLightSensor', 'endTransmission', 'analogReference', 'detachInterrupt',
+ 'countryNameRead', 'attachInterrupt', 'encryptionType', 'readBytesUntil',
+ 'robotNameWrite', 'readMicrophone', 'robotNameRead', 'cityNameWrite',
+ 'userNameWrite', 'readJoystickY', 'readJoystickX', 'mouseReleased',
+ 'openNextFile', 'scanNetworks', 'noInterrupts', 'digitalWrite', 'beginSpeaker',
+ 'mousePressed', 'isActionDone', 'mouseDragged', 'displayLogos', 'noAutoscroll',
+ 'addParameter', 'remoteNumber', 'getModifiers', 'keyboardRead', 'userNameRead',
+ 'waitContinue', 'processInput', 'parseCommand', 'printVersion', 'readNetworks',
+ 'writeMessage', 'blinkVersion', 'cityNameRead', 'readMessage', 'setDataMode',
+ 'parsePacket', 'isListening', 'setBitOrder', 'beginPacket', 'isDirectory',
+ 'motorsWrite', 'drawCompass', 'digitalRead', 'clearScreen', 'serialEvent',
+ 'rightToLeft', 'setTextSize', 'leftToRight', 'requestFrom', 'keyReleased',
+ 'compassRead', 'analogWrite', 'interrupts', 'WiFiServer', 'disconnect',
+ 'playMelody', 'parseFloat', 'autoscroll', 'getPINUsed', 'setPINUsed',
+ 'setTimeout', 'sendAnalog', 'readSlider', 'analogRead', 'beginWrite',
+ 'createChar', 'motorsStop', 'keyPressed', 'tempoWrite', 'readButton',
+ 'subnetMask', 'debugPrint', 'macAddress', 'writeGreen', 'randomSeed',
+ 'attachGPRS', 'readString', 'sendString', 'remotePort', 'releaseAll',
+ 'mouseMoved', 'background', 'getXChange', 'getYChange', 'answerCall',
+ 'getResult', 'voiceCall', 'endPacket', 'constrain', 'getSocket', 'writeJSON',
+ 'getButton', 'available', 'connected', 'findUntil', 'readBytes', 'exitValue',
+ 'readGreen', 'writeBlue', 'startLoop', 'IPAddress', 'isPressed', 'sendSysex',
+ 'pauseMode', 'gatewayIP', 'setCursor', 'getOemKey', 'tuneWrite', 'noDisplay',
+ 'loadImage', 'switchPIN', 'onRequest', 'onReceive', 'changePIN', 'playFile',
+ 'noBuffer', 'parseInt', 'overflow', 'checkPIN', 'knobRead', 'beginTFT',
+ 'bitClear', 'updateIR', 'bitWrite', 'position', 'writeRGB', 'highByte',
+ 'writeRed', 'setSpeed', 'readBlue', 'noStroke', 'remoteIP', 'transfer',
+ 'shutdown', 'hangCall', 'beginSMS', 'endWrite', 'attached', 'maintain',
+ 'noCursor', 'checkReg', 'checkPUK', 'shiftOut', 'isValid', 'shiftIn', 'pulseIn',
+ 'connect', 'println', 'localIP', 'pinMode', 'getIMEI', 'display', 'noBlink',
+ 'process', 'getBand', 'running', 'beginSD', 'drawBMP', 'lowByte', 'setBand',
+ 'release', 'bitRead', 'prepare', 'pointTo', 'readRed', 'setMode', 'noFill',
+ 'remove', 'listen', 'stroke', 'detach', 'attach', 'noTone', 'exists', 'buffer',
+ 'height', 'bitSet', 'circle', 'config', 'cursor', 'random', 'IRread', 'setDNS',
+ 'endSMS', 'getKey', 'micros', 'millis', 'begin', 'print', 'write', 'ready',
+ 'flush', 'width', 'isPIN', 'blink', 'clear', 'press', 'mkdir', 'rmdir', 'close',
+ 'point', 'yield', 'image', 'BSSID', 'click', 'delay', 'read', 'text', 'move',
+ 'peek', 'beep', 'rect', 'line', 'open', 'seek', 'fill', 'size', 'turn', 'stop',
+ 'home', 'find', 'step', 'tone', 'sqrt', 'RSSI', 'SSID', 'end', 'bit', 'tan',
+ 'cos', 'sin', 'pow', 'map', 'abs', 'max', 'min', 'get', 'run', 'put',
+ 'isAlphaNumeric', 'isAlpha', 'isAscii', 'isWhitespace', 'isControl', 'isDigit',
+ 'isGraph', 'isLowerCase', 'isPrintable', 'isPunct', 'isSpace', 'isUpperCase',
+ 'isHexadecimalDigit'))
+
+ # do not highlight
+ suppress_highlight = set((
+ 'namespace', 'template', 'mutable', 'using', 'asm', 'typeid',
+ 'typename', 'this', 'alignof', 'constexpr', 'decltype', 'noexcept',
+ 'static_assert', 'thread_local', 'restrict'))
+
def get_tokens_unprocessed(self, text):
for index, token, value in CppLexer.get_tokens_unprocessed(self, text):
- if token is Name:
- if value in self.constants:
- yield index, Keyword.Constant, value
- elif value in self.functions:
- yield index, Name.Function, value
- elif value in self.storage:
- yield index, Keyword.Type, value
- else:
- yield index, token, value
- elif token is Name.Function:
- if value in self.structure:
- yield index, Name.Other, value
- else:
- yield index, token, value
- elif token is Keyword:
- if value in self.storage:
- yield index, Keyword.Type, value
- else:
- yield index, token, value
+ if value in self.structure:
+ yield index, Name.Builtin, value
+ elif value in self.operators:
+ yield index, Operator, value
+ elif value in self.variables:
+ yield index, Keyword.Reserved, value
+ elif value in self.suppress_highlight:
+ yield index, Name, value
+ elif value in self.functions:
+ yield index, Name.Function, value
else:
yield index, token, value
diff --git a/pygments/lexers/chapel.py b/pygments/lexers/chapel.py
index 5b7be4dd..9f9894cd 100644
--- a/pygments/lexers/chapel.py
+++ b/pygments/lexers/chapel.py
@@ -44,12 +44,13 @@ class ChapelLexer(RegexLexer):
(words((
'align', 'atomic', 'begin', 'break', 'by', 'cobegin', 'coforall',
'continue', 'delete', 'dmapped', 'do', 'domain', 'else', 'enum',
- 'export', 'extern', 'for', 'forall', 'if', 'index', 'inline',
- 'iter', 'label', 'lambda', 'let', 'local', 'new', 'noinit', 'on',
- 'otherwise', 'pragma', 'private', 'public', 'reduce',
- 'require', 'return', 'scan', 'select', 'serial', 'single',
- 'sparse', 'subdomain', 'sync', 'then', 'use', 'when', 'where',
- 'while', 'with', 'yield', 'zip'), suffix=r'\b'),
+ 'except', 'export', 'extern', 'for', 'forall', 'if', 'index',
+ 'inline', 'iter', 'label', 'lambda', 'let', 'local', 'new',
+ 'noinit', 'on', 'only', 'otherwise', 'pragma', 'private',
+ 'public', 'reduce', 'require', 'return', 'scan', 'select',
+ 'serial', 'single', 'sparse', 'subdomain', 'sync', 'then',
+ 'use', 'when', 'where', 'while', 'with', 'yield', 'zip'),
+ suffix=r'\b'),
Keyword),
(r'(proc)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'procname'),
(r'(class|module|record|union)(\s+)', bygroups(Keyword, Text),
@@ -77,7 +78,8 @@ class ChapelLexer(RegexLexer):
(r'[0-9]+', Number.Integer),
# strings
- (r'["\'](\\\\|\\"|[^"\'])*["\']', String),
+ (r'"(\\\\|\\"|[^"])*"', String),
+ (r"'(\\\\|\\'|[^'])*'", String),
# tokens
(r'(=|\+=|-=|\*=|/=|\*\*=|%=|&=|\|=|\^=|&&=|\|\|=|<<=|>>=|'
diff --git a/pygments/lexers/clean.py b/pygments/lexers/clean.py
new file mode 100644
index 00000000..a3e81534
--- /dev/null
+++ b/pygments/lexers/clean.py
@@ -0,0 +1,275 @@
+# -*- coding: utf-8 -*-
+"""
+ pygments.lexers.clean
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for the Clean language.
+
+ :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import ExtendedRegexLexer, LexerContext, \
+ bygroups, words, include, default
+from pygments.token import Comment, Keyword, Literal, Name, Number, Operator, \
+ Punctuation, String, Text, Whitespace
+
+__all__ = ['CleanLexer']
+
+
+class CleanLexer(ExtendedRegexLexer):
+ """
+ Lexer for the general purpose, state-of-the-art, pure and lazy functional
+ programming language Clean (http://clean.cs.ru.nl/Clean).
+
+ .. versionadded: 2.2
+ """
+ name = 'Clean'
+ aliases = ['clean']
+ filenames = ['*.icl', '*.dcl']
+
+ def get_tokens_unprocessed(self, text=None, context=None):
+ ctx = LexerContext(text, 0)
+ ctx.indent = 0
+ return ExtendedRegexLexer.get_tokens_unprocessed(self, text, context=ctx)
+
+ def check_class_not_import(lexer, match, ctx):
+ if match.group(0) == 'import':
+ yield match.start(), Keyword.Namespace, match.group(0)
+ ctx.stack = ctx.stack[:-1] + ['fromimportfunc']
+ else:
+ yield match.start(), Name.Class, match.group(0)
+ ctx.pos = match.end()
+
+ def check_instance_class(lexer, match, ctx):
+ if match.group(0) == 'instance' or match.group(0) == 'class':
+ yield match.start(), Keyword, match.group(0)
+ else:
+ yield match.start(), Name.Function, match.group(0)
+ ctx.stack = ctx.stack + ['fromimportfunctype']
+ ctx.pos = match.end()
+
+ @staticmethod
+ def indent_len(text):
+ # Tabs are four spaces:
+ # https://svn.cs.ru.nl/repos/clean-platform/trunk/doc/STANDARDS.txt
+ text = text.replace('\n', '')
+ return len(text.replace('\t', ' ')), len(text)
+
+ def store_indent(lexer, match, ctx):
+ ctx.indent, _ = CleanLexer.indent_len(match.group(0))
+ ctx.pos = match.end()
+ yield match.start(), Text, match.group(0)
+
+ def check_indent1(lexer, match, ctx):
+ indent, reallen = CleanLexer.indent_len(match.group(0))
+ if indent > ctx.indent:
+ yield match.start(), Whitespace, match.group(0)
+ ctx.pos = match.start() + reallen + 1
+ else:
+ ctx.indent = 0
+ ctx.pos = match.start()
+ ctx.stack = ctx.stack[:-1]
+ yield match.start(), Whitespace, match.group(0)[1:]
+
+ def check_indent2(lexer, match, ctx):
+ indent, reallen = CleanLexer.indent_len(match.group(0))
+ if indent > ctx.indent:
+ yield match.start(), Whitespace, match.group(0)
+ ctx.pos = match.start() + reallen + 1
+ else:
+ ctx.indent = 0
+ ctx.pos = match.start()
+ ctx.stack = ctx.stack[:-2]
+
+ def check_indent3(lexer, match, ctx):
+ indent, reallen = CleanLexer.indent_len(match.group(0))
+ if indent > ctx.indent:
+ yield match.start(), Whitespace, match.group(0)
+ ctx.pos = match.start() + reallen + 1
+ else:
+ ctx.indent = 0
+ ctx.pos = match.start()
+ ctx.stack = ctx.stack[:-3]
+ yield match.start(), Whitespace, match.group(0)[1:]
+ if match.group(0) == '\n\n':
+ ctx.pos = ctx.pos + 1
+
+ def skip(lexer, match, ctx):
+ ctx.stack = ctx.stack[:-1]
+ ctx.pos = match.end()
+ yield match.start(), Comment, match.group(0)
+
+ keywords = ('class', 'instance', 'where', 'with', 'let', 'let!', 'with',
+ 'in', 'case', 'of', 'infix', 'infixr', 'infixl', 'generic',
+ 'derive', 'otherwise', 'code', 'inline')
+
+ tokens = {
+ 'common': [
+ (r';', Punctuation, '#pop'),
+ (r'//', Comment, 'singlecomment'),
+ ],
+ 'root': [
+ # Comments
+ (r'//.*\n', Comment.Single),
+ (r'(?s)/\*\*.*?\*/', Comment.Special),
+ (r'(?s)/\*.*?\*/', Comment.Multi),
+
+ # Modules, imports, etc.
+ (r'\b((?:implementation|definition|system)\s+)?(module)(\s+)([\w`]+)',
+ bygroups(Keyword.Namespace, Keyword.Namespace, Text, Name.Class)),
+ (r'(?<=\n)import(?=\s)', Keyword.Namespace, 'import'),
+ (r'(?<=\n)from(?=\s)', Keyword.Namespace, 'fromimport'),
+
+ # Keywords
+ # We cannot use (?s)^|(?<=\s) as prefix, so need to repeat this
+ (words(keywords, prefix=r'(?<=\s)', suffix=r'(?=\s)'), Keyword),
+ (words(keywords, prefix=r'^', suffix=r'(?=\s)'), Keyword),
+
+ # Function definitions
+ (r'(?=\{\|)', Whitespace, 'genericfunction'),
+ (r'(?<=\n)([ \t]*)([\w`$()=\-<>~*\^|+&%]+)((?:\s+[\w])*)(\s*)(::)',
+ bygroups(store_indent, Name.Function, Keyword.Type, Whitespace,
+ Punctuation),
+ 'functiondefargs'),
+
+ # Type definitions
+ (r'(?<=\n)([ \t]*)(::)', bygroups(store_indent, Punctuation), 'typedef'),
+ (r'^([ \t]*)(::)', bygroups(store_indent, Punctuation), 'typedef'),
+
+ # Literals
+ (r'\'\\?.(?<!\\)\'', String.Char),
+ (r'\'\\\d+\'', String.Char),
+ (r'\'\\\\\'', String.Char), # (special case for '\\')
+ (r'[+\-~]?\s*\d+\.\d+(E[+\-~]?\d+)?\b', Number.Float),
+ (r'[+\-~]?\s*0[0-7]\b', Number.Oct),
+ (r'[+\-~]?\s*0x[0-9a-fA-F]\b', Number.Hex),
+ (r'[+\-~]?\s*\d+\b', Number.Integer),
+ (r'"', String.Double, 'doubleqstring'),
+ (words(('True', 'False'), prefix=r'(?<=\s)', suffix=r'(?=\s)'),
+ Literal),
+
+ # Everything else is some name
+ (r'([\w`$%]+\.?)*[\w`$%]+', Name),
+
+ # Punctuation
+ (r'[{}()\[\],:;.#]', Punctuation),
+ (r'[+\-=!<>|&~*\^/]', Operator),
+ (r'\\\\', Operator),
+
+ # Lambda expressions
+ (r'\\.*?(->|\.|=)', Name.Function),
+
+ # Whitespace
+ (r'\s', Whitespace),
+
+ include('common'),
+ ],
+ 'fromimport': [
+ include('common'),
+ (r'([\w`]+)', check_class_not_import),
+ (r'\n', Whitespace, '#pop'),
+ (r'\s', Whitespace),
+ ],
+ 'fromimportfunc': [
+ include('common'),
+ (r'([\w`$()=\-<>~*\^|+&%]+)', check_instance_class),
+ (r',', Punctuation),
+ (r'\n', Whitespace, '#pop'),
+ (r'\s', Whitespace),
+ ],
+ 'fromimportfunctype': [
+ include('common'),
+ (r'[{(\[]', Punctuation, 'combtype'),
+ (r',', Punctuation, '#pop'),
+ (r'[:;.#]', Punctuation),
+ (r'\n', Whitespace, '#pop:2'),
+ (r'[^\S\n]+', Whitespace),
+ (r'\S+', Keyword.Type),
+ ],
+ 'combtype': [
+ include('common'),
+ (r'[})\]]', Punctuation, '#pop'),
+ (r'[{(\[]', Punctuation, '#pop'),
+ (r'[,:;.#]', Punctuation),
+ (r'\s+', Whitespace),
+ (r'\S+', Keyword.Type),
+ ],
+ 'import': [
+ include('common'),
+ (words(('from', 'import', 'as', 'qualified'),
+ prefix='(?<=\s)', suffix='(?=\s)'), Keyword.Namespace),
+ (r'[\w`]+', Name.Class),
+ (r'\n', Whitespace, '#pop'),
+ (r',', Punctuation),
+ (r'[^\S\n]+', Whitespace),
+ ],
+ 'singlecomment': [
+ (r'(.)(?=\n)', skip),
+ (r'.+(?!\n)', Comment),
+ ],
+ 'doubleqstring': [
+ (r'[^\\"]+', String.Double),
+ (r'"', String.Double, '#pop'),
+ (r'\\.', String.Double),
+ ],
+ 'typedef': [
+ include('common'),
+ (r'[\w`]+', Keyword.Type),
+ (r'[:=|(),\[\]{}!*]', Punctuation),
+ (r'->', Punctuation),
+ (r'\n(?=[^\s|])', Whitespace, '#pop'),
+ (r'\s', Whitespace),
+ (r'.', Keyword.Type),
+ ],
+ 'genericfunction': [
+ include('common'),
+ (r'\{\|', Punctuation),
+ (r'\|\}', Punctuation, '#pop'),
+ (r',', Punctuation),
+ (r'->', Punctuation),
+ (r'(\s+of\s+)(\{)', bygroups(Keyword, Punctuation), 'genericftypes'),
+ (r'\s', Whitespace),
+ (r'[\w`]+', Keyword.Type),
+ (r'[*()]', Punctuation),
+ ],
+ 'genericftypes': [
+ include('common'),
+ (r'[\w`]+', Keyword.Type),
+ (r',', Punctuation),
+ (r'\s', Whitespace),
+ (r'\}', Punctuation, '#pop'),
+ ],
+ 'functiondefargs': [
+ include('common'),
+ (r'\n(\s*)', check_indent1),
+ (r'[!{}()\[\],:;.#]', Punctuation),
+ (r'->', Punctuation, 'functiondefres'),
+ (r'^(?=\S)', Whitespace, '#pop'),
+ (r'\S', Keyword.Type),
+ (r'\s', Whitespace),
+ ],
+ 'functiondefres': [
+ include('common'),
+ (r'\n(\s*)', check_indent2),
+ (r'^(?=\S)', Whitespace, '#pop:2'),
+ (r'[!{}()\[\],:;.#]', Punctuation),
+ (r'\|', Punctuation, 'functiondefclasses'),
+ (r'\S', Keyword.Type),
+ (r'\s', Whitespace),
+ ],
+ 'functiondefclasses': [
+ include('common'),
+ (r'\n(\s*)', check_indent3),
+ (r'^(?=\S)', Whitespace, '#pop:3'),
+ (r'[,&]', Punctuation),
+ (r'[\w`$()=\-<>~*\^|+&%]', Name.Function, 'functionname'),
+ (r'\s', Whitespace),
+ ],
+ 'functionname': [
+ include('common'),
+ (r'[\w`$()=\-<>~*\^|+&%]+', Name.Function),
+ (r'(?=\{\|)', Punctuation, 'genericfunction'),
+ default('#pop'),
+ ]
+ }
diff --git a/pygments/lexers/configs.py b/pygments/lexers/configs.py
index f5a67bc4..9cc291e5 100644
--- a/pygments/lexers/configs.py
+++ b/pygments/lexers/configs.py
@@ -13,13 +13,14 @@ import re
from pygments.lexer import RegexLexer, default, words, bygroups, include, using
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
- Number, Punctuation, Whitespace
+ Number, Punctuation, Whitespace, Literal
from pygments.lexers.shell import BashLexer
__all__ = ['IniLexer', 'RegeditLexer', 'PropertiesLexer', 'KconfigLexer',
'Cfengine3Lexer', 'ApacheConfLexer', 'SquidConfLexer',
'NginxConfLexer', 'LighttpdConfLexer', 'DockerLexer',
- 'TerraformLexer']
+ 'TerraformLexer', 'TermcapLexer', 'TerminfoLexer',
+ 'PkgConfigLexer', 'PacmanConfLexer']
class IniLexer(RegexLexer):
@@ -38,8 +39,10 @@ class IniLexer(RegexLexer):
(r'[;#].*', Comment.Single),
(r'\[.*?\]$', Keyword),
(r'(.*?)([ \t]*)(=)([ \t]*)(.*(?:\n[ \t].+)*)',
- bygroups(Name.Attribute, Text, Operator, Text, String))
- ]
+ bygroups(Name.Attribute, Text, Operator, Text, String)),
+ # standalone option, supported by some INI parsers
+ (r'(.+?)$', Name.Attribute),
+ ],
}
def analyse_text(text):
@@ -597,7 +600,7 @@ class TerraformLexer(RegexLexer):
(r'(".*")', bygroups(String.Double)),
],
'punctuation': [
- (r'[\[\]\(\),.]', Punctuation),
+ (r'[\[\](),.]', Punctuation),
],
# Keep this seperate from punctuation - we sometimes want to use different
# Tokens for { }
@@ -617,3 +620,208 @@ class TerraformLexer(RegexLexer):
(r'\\\n', Text),
],
}
+
+
+class TermcapLexer(RegexLexer):
+ """
+ Lexer for termcap database source.
+
+ This is very simple and minimal.
+
+ .. versionadded:: 2.1
+ """
+ name = 'Termcap'
+ aliases = ['termcap']
+ filenames = ['termcap', 'termcap.src']
+ mimetypes = []
+
+ # NOTE:
+ # * multiline with trailing backslash
+ # * separator is ':'
+ # * to embed colon as data, we must use \072
+ # * space after separator is not allowed (mayve)
+ tokens = {
+ 'root': [
+ (r'^#.*$', Comment),
+ (r'^[^\s#:|]+', Name.Tag, 'names'),
+ ],
+ 'names': [
+ (r'\n', Text, '#pop'),
+ (r':', Punctuation, 'defs'),
+ (r'\|', Punctuation),
+ (r'[^:|]+', Name.Attribute),
+ ],
+ 'defs': [
+ (r'\\\n[ \t]*', Text),
+ (r'\n[ \t]*', Text, '#pop:2'),
+ (r'(#)([0-9]+)', bygroups(Operator, Number)),
+ (r'=', Operator, 'data'),
+ (r':', Punctuation),
+ (r'[^\s:=#]+', Name.Class),
+ ],
+ 'data': [
+ (r'\\072', Literal),
+ (r':', Punctuation, '#pop'),
+ (r'[^:\\]+', Literal), # for performance
+ (r'.', Literal),
+ ],
+ }
+
+
+class TerminfoLexer(RegexLexer):
+ """
+ Lexer for terminfo database source.
+
+ This is very simple and minimal.
+
+ .. versionadded:: 2.1
+ """
+ name = 'Terminfo'
+ aliases = ['terminfo']
+ filenames = ['terminfo', 'terminfo.src']
+ mimetypes = []
+
+ # NOTE:
+ # * multiline with leading whitespace
+ # * separator is ','
+ # * to embed comma as data, we can use \,
+ # * space after separator is allowed
+ tokens = {
+ 'root': [
+ (r'^#.*$', Comment),
+ (r'^[^\s#,|]+', Name.Tag, 'names'),
+ ],
+ 'names': [
+ (r'\n', Text, '#pop'),
+ (r'(,)([ \t]*)', bygroups(Punctuation, Text), 'defs'),
+ (r'\|', Punctuation),
+ (r'[^,|]+', Name.Attribute),
+ ],
+ 'defs': [
+ (r'\n[ \t]+', Text),
+ (r'\n', Text, '#pop:2'),
+ (r'(#)([0-9]+)', bygroups(Operator, Number)),
+ (r'=', Operator, 'data'),
+ (r'(,)([ \t]*)', bygroups(Punctuation, Text)),
+ (r'[^\s,=#]+', Name.Class),
+ ],
+ 'data': [
+ (r'\\[,\\]', Literal),
+ (r'(,)([ \t]*)', bygroups(Punctuation, Text), '#pop'),
+ (r'[^\\,]+', Literal), # for performance
+ (r'.', Literal),
+ ],
+ }
+
+
+class PkgConfigLexer(RegexLexer):
+ """
+ Lexer for `pkg-config
+ <http://www.freedesktop.org/wiki/Software/pkg-config/>`_
+ (see also `manual page <http://linux.die.net/man/1/pkg-config>`_).
+
+ .. versionadded:: 2.1
+ """
+
+ name = 'PkgConfig'
+ aliases = ['pkgconfig']
+ filenames = ['*.pc']
+ mimetypes = []
+
+ tokens = {
+ 'root': [
+ (r'#.*$', Comment.Single),
+
+ # variable definitions
+ (r'^(\w+)(=)', bygroups(Name.Attribute, Operator)),
+
+ # keyword lines
+ (r'^([\w.]+)(:)',
+ bygroups(Name.Tag, Punctuation), 'spvalue'),
+
+ # variable references
+ include('interp'),
+
+ # fallback
+ (r'[^${}#=:\n.]+', Text),
+ (r'.', Text),
+ ],
+ 'interp': [
+ # you can escape literal "$" as "$$"
+ (r'\$\$', Text),
+
+ # variable references
+ (r'\$\{', String.Interpol, 'curly'),
+ ],
+ 'curly': [
+ (r'\}', String.Interpol, '#pop'),
+ (r'\w+', Name.Attribute),
+ ],
+ 'spvalue': [
+ include('interp'),
+
+ (r'#.*$', Comment.Single, '#pop'),
+ (r'\n', Text, '#pop'),
+
+ # fallback
+ (r'[^${}#\n]+', Text),
+ (r'.', Text),
+ ],
+ }
+
+
+class PacmanConfLexer(RegexLexer):
+ """
+ Lexer for `pacman.conf
+ <https://www.archlinux.org/pacman/pacman.conf.5.html>`_.
+
+ Actually, IniLexer works almost fine for this format,
+ but it yield error token. It is because pacman.conf has
+ a form without assignment like:
+
+ UseSyslog
+ Color
+ TotalDownload
+ CheckSpace
+ VerbosePkgLists
+
+ These are flags to switch on.
+
+ .. versionadded:: 2.1
+ """
+
+ name = 'PacmanConf'
+ aliases = ['pacmanconf']
+ filenames = ['pacman.conf']
+ mimetypes = []
+
+ tokens = {
+ 'root': [
+ # comment
+ (r'#.*$', Comment.Single),
+
+ # section header
+ (r'^\s*\[.*?\]\s*$', Keyword),
+
+ # variable definitions
+ # (Leading space is allowed...)
+ (r'(\w+)(\s*)(=)',
+ bygroups(Name.Attribute, Text, Operator)),
+
+ # flags to on
+ (r'^(\s*)(\w+)(\s*)$',
+ bygroups(Text, Name.Attribute, Text)),
+
+ # built-in special values
+ (words((
+ '$repo', # repository
+ '$arch', # architecture
+ '%o', # outfile
+ '%u', # url
+ ), suffix=r'\b'),
+ Name.Variable),
+
+ # fallback
+ (r'.', Text),
+ ],
+ }
diff --git a/pygments/lexers/csound.py b/pygments/lexers/csound.py
index b9613bdf..95ee73d8 100644
--- a/pygments/lexers/csound.py
+++ b/pygments/lexers/csound.py
@@ -9,19 +9,19 @@
:license: BSD, see LICENSE for details.
"""
-import copy, re
+import re
from pygments.lexer import RegexLexer, bygroups, default, include, using, words
from pygments.token import Comment, Keyword, Name, Number, Operator, Punctuation, \
String, Text
from pygments.lexers._csound_builtins import OPCODES
-from pygments.lexers.html import HtmlLexer, XmlLexer
+from pygments.lexers.html import HtmlLexer
from pygments.lexers.python import PythonLexer
from pygments.lexers.scripting import LuaLexer
__all__ = ['CsoundScoreLexer', 'CsoundOrchestraLexer', 'CsoundDocumentLexer']
-newline = (r'((?:;|//).*)*(\n)', bygroups(Comment.Single, Text))
+newline = (r'((?:(?:;|//).*)*)(\n)', bygroups(Comment.Single, Text))
class CsoundLexer(RegexLexer):
@@ -98,6 +98,7 @@ class CsoundScoreLexer(CsoundLexer):
"""
name = 'Csound Score'
+ aliases = ['csound-score', 'csound-sco']
filenames = ['*.sco']
tokens = {
@@ -140,6 +141,7 @@ class CsoundOrchestraLexer(CsoundLexer):
"""
name = 'Csound Orchestra'
+ aliases = ['csound', 'csound-orc']
filenames = ['*.orc']
user_defined_opcodes = set()
@@ -175,7 +177,7 @@ class CsoundOrchestraLexer(CsoundLexer):
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+', Number.Integer),
(r'"', String, 'single-line string'),
- (r'{{', String, 'multi-line string'),
+ (r'\{\{', String, 'multi-line string'),
(r'[+\-*/%^!=&|<>#~¬]', Operator),
(r'[](),?:[]', Punctuation),
(words((
@@ -271,82 +273,94 @@ class CsoundOrchestraLexer(CsoundLexer):
(r'[\\"~$%\^\n]', String)
],
'multi-line string': [
- (r'}}', String, '#pop'),
- (r'[^\}]+|\}(?!\})', String)
+ (r'\}\}', String, '#pop'),
+ (r'[^}]+|\}(?!\})', String)
],
'scoreline opcode': [
include('whitespace or macro call'),
- (r'{{', String, 'scoreline'),
+ (r'\{\{', String, 'scoreline'),
default('#pop')
],
'scoreline': [
- (r'}}', String, '#pop'),
- (r'([^\}]+)|\}(?!\})', using(CsoundScoreLexer))
+ (r'\}\}', String, '#pop'),
+ (r'([^}]+)|\}(?!\})', using(CsoundScoreLexer))
],
'python opcode': [
include('whitespace or macro call'),
- (r'{{', String, 'python'),
+ (r'\{\{', String, 'python'),
default('#pop')
],
'python': [
- (r'}}', String, '#pop'),
- (r'([^\}]+)|\}(?!\})', using(PythonLexer))
+ (r'\}\}', String, '#pop'),
+ (r'([^}]+)|\}(?!\})', using(PythonLexer))
],
'lua opcode': [
include('whitespace or macro call'),
(r'"', String, 'single-line string'),
- (r'{{', String, 'lua'),
+ (r'\{\{', String, 'lua'),
(r',', Punctuation),
default('#pop')
],
'lua': [
- (r'}}', String, '#pop'),
- (r'([^\}]+)|\}(?!\})', using(LuaLexer))
+ (r'\}\}', String, '#pop'),
+ (r'([^}]+)|\}(?!\})', using(LuaLexer))
]
}
-class CsoundDocumentLexer(XmlLexer):
+class CsoundDocumentLexer(RegexLexer):
"""
For `Csound <http://csound.github.io>`_ documents.
-
+ .. versionadded:: 2.1
"""
name = 'Csound Document'
- aliases = []
+ aliases = ['csound-document', 'csound-csd']
filenames = ['*.csd']
- mimetypes = []
-
- tokens = copy.deepcopy(XmlLexer.tokens)
- for i, item in enumerate(tokens['root']):
- if len(item) > 2 and item[2] == 'tag':
- (tokens['root']).insert(i, (r'(<)(\s*)(CsInstruments)(\s*)',
- bygroups(Name.Tag, Text, Name.Tag, Text),
- ('orchestra content', 'tag')))
- (tokens['root']).insert(i, (r'(<)(\s*)(CsScore)(\s*)',
- bygroups(Name.Tag, Text, Name.Tag, Text),
- ('score content', 'tag')))
- (tokens['root']).insert(i, (r'(<)(\s*)(html)(\s*)',
- bygroups(Name.Tag, Text, Name.Tag, Text),
- ('HTML', 'tag')))
- break
-
- tokens['orchestra content'] = [
- (r'(<)(\s*)(/)(\s*)(CsInstruments)(\s*)(>)',
- bygroups(Name.Tag, Text, Name.Tag, Text, Name.Tag, Text, Name.Tag), '#pop'),
- (r'.+?(?=<\s*/\s*CsInstruments\s*>)', using(CsoundOrchestraLexer))
- ]
- tokens['score content'] = [
- (r'(<)(\s*)(/)(\s*)(CsScore)(\s*)(>)',
- bygroups(Name.Tag, Text, Name.Tag, Text, Name.Tag, Text, Name.Tag), '#pop'),
- (r'.+?(?=<\s*/\s*CsScore\s*>)', using(CsoundScoreLexer))
- ]
- tokens['HTML'] = [
- (r'(<)(\s*)(/)(\s*)(html)(\s*)(>)',
- bygroups(Name.Tag, Text, Name.Tag, Text, Name.Tag, Text, Name.Tag), '#pop'),
- (r'.+?(?=<\s*/\s*html\s*>)', using(HtmlLexer))
- ]
+
+ # These tokens are based on those in XmlLexer in pygments/lexers/html.py. Making
+ # CsoundDocumentLexer a subclass of XmlLexer rather than RegexLexer may seem like a
+ # better idea, since Csound Document files look like XML files. However, Csound
+ # Documents can contain Csound comments (preceded by //, for example) before and
+ # after the root element, unescaped bitwise AND & and less than < operators, etc. In
+ # other words, while Csound Document files look like XML files, they may not actually
+ # be XML files.
+ tokens = {
+ 'root': [
+ newline,
+ (r'/[*](.|\n)*?[*]/', Comment.Multiline),
+ (r'[^<&;/]+', Text),
+ (r'<\s*CsInstruments', Name.Tag, ('orchestra', 'tag')),
+ (r'<\s*CsScore', Name.Tag, ('score', 'tag')),
+ (r'<\s*[hH][tT][mM][lL]', Name.Tag, ('HTML', 'tag')),
+ (r'<\s*[\w:.-]+', Name.Tag, 'tag'),
+ (r'<\s*/\s*[\w:.-]+\s*>', Name.Tag)
+ ],
+ 'orchestra': [
+ (r'<\s*/\s*CsInstruments\s*>', Name.Tag, '#pop'),
+ (r'(.|\n)+?(?=<\s*/\s*CsInstruments\s*>)', using(CsoundOrchestraLexer))
+ ],
+ 'score': [
+ (r'<\s*/\s*CsScore\s*>', Name.Tag, '#pop'),
+ (r'(.|\n)+?(?=<\s*/\s*CsScore\s*>)', using(CsoundScoreLexer))
+ ],
+ 'HTML': [
+ (r'<\s*/\s*[hH][tT][mM][lL]\s*>', Name.Tag, '#pop'),
+ (r'(.|\n)+?(?=<\s*/\s*[hH][tT][mM][lL]\s*>)', using(HtmlLexer))
+ ],
+ 'tag': [
+ (r'\s+', Text),
+ (r'[\w.:-]+\s*=', Name.Attribute, 'attr'),
+ (r'/?\s*>', Name.Tag, '#pop')
+ ],
+ 'attr': [
+ (r'\s+', Text),
+ (r'".*?"', String, '#pop'),
+ (r"'.*?'", String, '#pop'),
+ (r'[^\s>]+', String, '#pop')
+ ]
+ }
diff --git a/pygments/lexers/css.py b/pygments/lexers/css.py
index 6f7e5be8..6c585dfa 100644
--- a/pygments/lexers/css.py
+++ b/pygments/lexers/css.py
@@ -41,7 +41,7 @@ class CssLexer(RegexLexer):
(r'\{', Punctuation, 'content'),
(r'\:[\w-]+', Name.Decorator),
(r'\.[\w-]+', Name.Class),
- (r'\#[\w-]+', Name.Function),
+ (r'\#[\w-]+', Name.Namespace),
(r'@[\w-]+', Keyword, 'atrule'),
(r'[\w-]+', Name.Tag),
(r'[~^*!%&$\[\]()<>|+=@:;,./?-]', Operator),
@@ -120,7 +120,7 @@ class CssLexer(RegexLexer):
'upper-alpha', 'upper-latin', 'upper-roman', 'uppercase', 'url',
'visible', 'w-resize', 'wait', 'wider', 'x-fast', 'x-high', 'x-large', 'x-loud',
'x-low', 'x-small', 'x-soft', 'xx-large', 'xx-small', 'yes'), suffix=r'\b'),
- Keyword),
+ Name.Builtin),
(words((
'indigo', 'gold', 'firebrick', 'indianred', 'yellow', 'darkolivegreen',
'darkseagreen', 'mediumvioletred', 'mediumorchid', 'chartreuse',
@@ -476,8 +476,8 @@ class ScssLexer(RegexLexer):
(r'@[\w-]+', Keyword, 'selector'),
(r'(\$[\w-]*\w)([ \t]*:)', bygroups(Name.Variable, Operator), 'value'),
# TODO: broken, and prone to infinite loops.
- #(r'(?=[^;{}][;}])', Name.Attribute, 'attr'),
- #(r'(?=[^;{}:]+:[^a-z])', Name.Attribute, 'attr'),
+ # (r'(?=[^;{}][;}])', Name.Attribute, 'attr'),
+ # (r'(?=[^;{}:]+:[^a-z])', Name.Attribute, 'attr'),
default('selector'),
],
@@ -518,7 +518,7 @@ class LessCssLexer(CssLexer):
inherit,
],
'content': [
- (r'{', Punctuation, '#push'),
+ (r'\{', Punctuation, '#push'),
inherit,
],
}
diff --git a/pygments/lexers/diff.py b/pygments/lexers/diff.py
index d3b1589d..726b49ad 100644
--- a/pygments/lexers/diff.py
+++ b/pygments/lexers/diff.py
@@ -9,11 +9,13 @@
:license: BSD, see LICENSE for details.
"""
+import re
+
from pygments.lexer import RegexLexer, include, bygroups
from pygments.token import Text, Comment, Operator, Keyword, Name, Generic, \
Literal
-__all__ = ['DiffLexer', 'DarcsPatchLexer']
+__all__ = ['DiffLexer', 'DarcsPatchLexer', 'WDiffLexer']
class DiffLexer(RegexLexer):
@@ -104,3 +106,60 @@ class DarcsPatchLexer(RegexLexer):
(r'[^\n\[]+', Generic.Deleted),
],
}
+
+
+class WDiffLexer(RegexLexer):
+ """
+ A `wdiff <https://www.gnu.org/software/wdiff/>`_ lexer.
+
+ Note that:
+
+ * only to normal output (without option like -l).
+ * if target files of wdiff contain "[-", "-]", "{+", "+}",
+ especially they are unbalanced, this lexer will get confusing.
+
+ .. versionadded:: 2.2
+ """
+
+ name = 'WDiff'
+ aliases = ['wdiff']
+ filenames = ['*.wdiff']
+ mimetypes = []
+
+ flags = re.MULTILINE | re.DOTALL
+
+ # We can only assume "[-" after "[-" before "-]" is `nested`,
+ # for instance wdiff to wdiff outputs. We have no way to
+ # distinct these marker is of wdiff output from original text.
+
+ ins_op = r"\{\+"
+ ins_cl = r"\+\}"
+ del_op = r"\[\-"
+ del_cl = r"\-\]"
+ normal = r'[^{}[\]+-]+' # for performance
+ tokens = {
+ 'root': [
+ (ins_op, Generic.Inserted, 'inserted'),
+ (del_op, Generic.Deleted, 'deleted'),
+ (normal, Text),
+ (r'.', Text),
+ ],
+ 'inserted': [
+ (ins_op, Generic.Inserted, '#push'),
+ (del_op, Generic.Inserted, '#push'),
+ (del_cl, Generic.Inserted, '#pop'),
+
+ (ins_cl, Generic.Inserted, '#pop'),
+ (normal, Generic.Inserted),
+ (r'.', Generic.Inserted),
+ ],
+ 'deleted': [
+ (del_op, Generic.Deleted, '#push'),
+ (ins_op, Generic.Deleted, '#push'),
+ (ins_cl, Generic.Deleted, '#pop'),
+
+ (del_cl, Generic.Deleted, '#pop'),
+ (normal, Generic.Deleted),
+ (r'.', Generic.Deleted),
+ ],
+ }
diff --git a/pygments/lexers/dotnet.py b/pygments/lexers/dotnet.py
index 21f8d1e4..11b4573e 100644
--- a/pygments/lexers/dotnet.py
+++ b/pygments/lexers/dotnet.py
@@ -11,7 +11,7 @@
import re
from pygments.lexer import RegexLexer, DelegatingLexer, bygroups, include, \
- using, this, default
+ using, this, default, words
from pygments.token import Punctuation, \
Text, Comment, Operator, Keyword, Name, String, Number, Literal, Other
from pygments.util import get_choice_opt, iteritems
@@ -97,17 +97,17 @@ class CSharpLexer(RegexLexer):
Comment.Preproc),
(r'\b(extern)(\s+)(alias)\b', bygroups(Keyword, Text,
Keyword)),
- (r'(abstract|as|async|await|base|break|case|catch|'
+ (r'(abstract|as|async|await|base|break|by|case|catch|'
r'checked|const|continue|default|delegate|'
r'do|else|enum|event|explicit|extern|false|finally|'
r'fixed|for|foreach|goto|if|implicit|in|interface|'
- r'internal|is|lock|new|null|operator|'
+ r'internal|is|let|lock|new|null|on|operator|'
r'out|override|params|private|protected|public|readonly|'
r'ref|return|sealed|sizeof|stackalloc|static|'
r'switch|this|throw|true|try|typeof|'
r'unchecked|unsafe|virtual|void|while|'
r'get|set|new|partial|yield|add|remove|value|alias|ascending|'
- r'descending|from|group|into|orderby|select|where|'
+ r'descending|from|group|into|orderby|select|thenby|where|'
r'join|equals)\b', Keyword),
(r'(global)(::)', bygroups(Keyword, Punctuation)),
(r'(bool|byte|char|decimal|double|dynamic|float|int|long|object|'
@@ -375,8 +375,8 @@ class VbNetLexer(RegexLexer):
filenames = ['*.vb', '*.bas']
mimetypes = ['text/x-vbnet', 'text/x-vba'] # (?)
- uni_name = '[_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl') + ']' + \
- '[' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc',
+ uni_name = '[_' + uni.combine('Ll', 'Lt', 'Lm', 'Nl') + ']' + \
+ '[' + uni.combine('Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc',
'Cf', 'Mn', 'Mc') + ']*'
flags = re.MULTILINE | re.IGNORECASE
@@ -394,25 +394,26 @@ class VbNetLexer(RegexLexer):
(r'[(){}!#,.:]', Punctuation),
(r'Option\s+(Strict|Explicit|Compare)\s+'
r'(On|Off|Binary|Text)', Keyword.Declaration),
- (r'(?<!\.)(AddHandler|Alias|'
- r'ByRef|ByVal|Call|Case|Catch|CBool|CByte|CChar|CDate|'
- r'CDec|CDbl|CInt|CLng|CObj|Continue|CSByte|CShort|'
- r'CSng|CStr|CType|CUInt|CULng|CUShort|Declare|'
- r'Default|Delegate|DirectCast|Do|Each|Else|ElseIf|'
- r'EndIf|Erase|Error|Event|Exit|False|Finally|For|'
- r'Friend|Get|Global|GoSub|GoTo|Handles|If|'
- r'Implements|Inherits|Interface|'
- r'Let|Lib|Loop|Me|MustInherit|'
- r'MustOverride|MyBase|MyClass|Narrowing|New|Next|'
- r'Not|Nothing|NotInheritable|NotOverridable|Of|On|'
- r'Operator|Option|Optional|Overloads|Overridable|'
- r'Overrides|ParamArray|Partial|Private|Protected|'
- r'Public|RaiseEvent|ReadOnly|ReDim|RemoveHandler|Resume|'
- r'Return|Select|Set|Shadows|Shared|Single|'
- r'Static|Step|Stop|SyncLock|Then|'
- r'Throw|To|True|Try|TryCast|Wend|'
- r'Using|When|While|Widening|With|WithEvents|'
- r'WriteOnly)\b', Keyword),
+ (words((
+ 'AddHandler', 'Alias', 'ByRef', 'ByVal', 'Call', 'Case',
+ 'Catch', 'CBool', 'CByte', 'CChar', 'CDate', 'CDec', 'CDbl',
+ 'CInt', 'CLng', 'CObj', 'Continue', 'CSByte', 'CShort', 'CSng',
+ 'CStr', 'CType', 'CUInt', 'CULng', 'CUShort', 'Declare',
+ 'Default', 'Delegate', 'DirectCast', 'Do', 'Each', 'Else',
+ 'ElseIf', 'EndIf', 'Erase', 'Error', 'Event', 'Exit', 'False',
+ 'Finally', 'For', 'Friend', 'Get', 'Global', 'GoSub', 'GoTo',
+ 'Handles', 'If', 'Implements', 'Inherits', 'Interface', 'Let',
+ 'Lib', 'Loop', 'Me', 'MustInherit', 'MustOverride', 'MyBase',
+ 'MyClass', 'Narrowing', 'New', 'Next', 'Not', 'Nothing',
+ 'NotInheritable', 'NotOverridable', 'Of', 'On', 'Operator',
+ 'Option', 'Optional', 'Overloads', 'Overridable', 'Overrides',
+ 'ParamArray', 'Partial', 'Private', 'Protected', 'Public',
+ 'RaiseEvent', 'ReadOnly', 'ReDim', 'RemoveHandler', 'Resume',
+ 'Return', 'Select', 'Set', 'Shadows', 'Shared', 'Single',
+ 'Static', 'Step', 'Stop', 'SyncLock', 'Then', 'Throw', 'To',
+ 'True', 'Try', 'TryCast', 'Wend', 'Using', 'When', 'While',
+ 'Widening', 'With', 'WithEvents', 'WriteOnly'),
+ prefix='(?<!\.)', suffix=r'\b'), Keyword),
(r'(?<!\.)End\b', Keyword, 'end'),
(r'(?<!\.)(Dim|Const)\b', Keyword, 'dim'),
(r'(?<!\.)(Function|Sub|Property)(\s+)',
diff --git a/pygments/lexers/dsls.py b/pygments/lexers/dsls.py
index 24fda2a2..6032017f 100644
--- a/pygments/lexers/dsls.py
+++ b/pygments/lexers/dsls.py
@@ -5,7 +5,7 @@
Lexers for various domain-specific languages.
- :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2016 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
@@ -18,7 +18,7 @@ from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
__all__ = ['ProtoBufLexer', 'BroLexer', 'PuppetLexer', 'RslLexer',
'MscgenLexer', 'VGLLexer', 'AlloyLexer', 'PanLexer',
- 'CrmshLexer', 'ThriftLexer']
+ 'CrmshLexer', 'ThriftLexer', 'FlatlineLexer']
class ProtoBufLexer(RegexLexer):
@@ -111,8 +111,8 @@ class ThriftLexer(RegexLexer):
include('keywords'),
include('numbers'),
(r'[&=]', Operator),
- (r'[:;\,\{\}\(\)\<>\[\]]', Punctuation),
- (r'[a-zA-Z_](\.[a-zA-Z_0-9]|[a-zA-Z_0-9])*', Name),
+ (r'[:;,{}()<>\[\]]', Punctuation),
+ (r'[a-zA-Z_](\.\w|\w)*', Name),
],
'whitespace': [
(r'\n', Text.Whitespace),
@@ -135,7 +135,7 @@ class ThriftLexer(RegexLexer):
(r'[^\\\'\n]+', String.Single),
],
'namespace': [
- (r'[a-z\*](\.[a-zA-Z_0-9]|[a-zA-Z_0-9])*', Name.Namespace, '#pop'),
+ (r'[a-z*](\.\w|\w)*', Name.Namespace, '#pop'),
default('#pop'),
],
'class': [
@@ -692,3 +692,78 @@ class CrmshLexer(RegexLexer):
(r'\s+|\n', Whitespace),
],
}
+
+
+class FlatlineLexer(RegexLexer):
+ """
+ Lexer for `Flatline <https://github.com/bigmlcom/flatline>`_ expressions.
+
+ .. versionadded:: 2.2
+ """
+ name = 'Flatline'
+ aliases = ['flatline']
+ filenames = []
+ mimetypes = ['text/x-flatline']
+
+ special_forms = ('let',)
+
+ builtins = (
+ "!=", "*", "+", "-", "<", "<=", "=", ">", ">=", "abs", "acos", "all",
+ "all-but", "all-with-defaults", "all-with-numeric-default", "and",
+ "asin", "atan", "avg", "avg-window", "bin-center", "bin-count", "call",
+ "category-count", "ceil", "cond", "cond-window", "cons", "cos", "cosh",
+ "count", "diff-window", "div", "ensure-value", "ensure-weighted-value",
+ "epoch", "epoch-day", "epoch-fields", "epoch-hour", "epoch-millisecond",
+ "epoch-minute", "epoch-month", "epoch-second", "epoch-weekday",
+ "epoch-year", "exp", "f", "field", "field-prop", "fields", "filter",
+ "first", "floor", "head", "if", "in", "integer", "language", "length",
+ "levenshtein", "linear-regression", "list", "ln", "log", "log10", "map",
+ "matches", "matches?", "max", "maximum", "md5", "mean", "median", "min",
+ "minimum", "missing", "missing-count", "missing?", "missing_count",
+ "mod", "mode", "normalize", "not", "nth", "occurrences", "or",
+ "percentile", "percentile-label", "population", "population-fraction",
+ "pow", "preferred", "preferred?", "quantile-label", "rand", "rand-int",
+ "random-value", "re-quote", "real", "replace", "replace-first", "rest",
+ "round", "row-number", "segment-label", "sha1", "sha256", "sin", "sinh",
+ "sqrt", "square", "standard-deviation", "standard_deviation", "str",
+ "subs", "sum", "sum-squares", "sum-window", "sum_squares", "summary",
+ "summary-no", "summary-str", "tail", "tan", "tanh", "to-degrees",
+ "to-radians", "variance", "vectorize", "weighted-random-value", "window",
+ "winnow", "within-percentiles?", "z-score",
+ )
+
+ valid_name = r'(?!#)[\w!$%*+<=>?/.#-]+'
+
+ tokens = {
+ 'root': [
+ # whitespaces - usually not relevant
+ (r'[,\s]+', Text),
+
+ # numbers
+ (r'-?\d+\.\d+', Number.Float),
+ (r'-?\d+', Number.Integer),
+ (r'0x-?[a-f\d]+', Number.Hex),
+
+ # strings, symbols and characters
+ (r'"(\\\\|\\"|[^"])*"', String),
+ (r"\\(.|[a-z]+)", String.Char),
+
+ # expression template placeholder
+ (r'_', String.Symbol),
+
+ # highlight the special forms
+ (words(special_forms, suffix=' '), Keyword),
+
+ # highlight the builtins
+ (words(builtins, suffix=' '), Name.Builtin),
+
+ # the remaining functions
+ (r'(?<=\()' + valid_name, Name.Function),
+
+ # find the remaining variables
+ (valid_name, Name.Variable),
+
+ # parentheses
+ (r'(\(|\))', Punctuation),
+ ],
+ }
diff --git a/pygments/lexers/elm.py b/pygments/lexers/elm.py
index b8206c6d..cd1fb98e 100644
--- a/pygments/lexers/elm.py
+++ b/pygments/lexers/elm.py
@@ -5,6 +5,8 @@
Lexer for the Elm programming language.
+ :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, words, include
@@ -44,7 +46,7 @@ class ElmLexer(RegexLexer):
'root': [
# Comments
- (r'{-', Comment.Multiline, 'comment'),
+ (r'\{-', Comment.Multiline, 'comment'),
(r'--.*', Comment.Single),
# Whitespace
@@ -84,20 +86,20 @@ class ElmLexer(RegexLexer):
(validName, Name.Variable),
# Parens
- (r'[,\(\)\[\]{}]', Punctuation),
+ (r'[,()\[\]{}]', Punctuation),
],
'comment': [
- (r'-(?!})', Comment.Multiline),
- (r'{-', Comment.Multiline, 'comment'),
+ (r'-(?!\})', Comment.Multiline),
+ (r'\{-', Comment.Multiline, 'comment'),
(r'[^-}]', Comment.Multiline),
- (r'-}', Comment.Multiline, '#pop'),
+ (r'-\}', Comment.Multiline, '#pop'),
],
'doublequote': [
- (r'\\u[0-9a-fA-F]\{4}', String.Escape),
- (r'\\[nrfvb\\\"]', String.Escape),
+ (r'\\u[0-9a-fA-F]{4}', String.Escape),
+ (r'\\[nrfvb\\"]', String.Escape),
(r'[^"]', String),
(r'"', String, '#pop'),
],
diff --git a/pygments/lexers/erlang.py b/pygments/lexers/erlang.py
index c353a4dc..93ddd2c2 100644
--- a/pygments/lexers/erlang.py
+++ b/pygments/lexers/erlang.py
@@ -82,7 +82,11 @@ class ErlangLexer(RegexLexer):
variable_re = r'(?:[A-Z_]\w*)'
- escape_re = r'(?:\\(?:[bdefnrstv\'"\\/]|[0-7][0-7]?[0-7]?|\^[a-zA-Z]))'
+ esc_char_re = r'[bdefnrstv\'"\\]'
+ esc_octal_re = r'[0-7][0-7]?[0-7]?'
+ esc_hex_re = r'(?:x[0-9a-fA-F]{2}|x\{[0-9a-fA-F]+\})'
+ esc_ctrl_re = r'\^[a-zA-Z]'
+ escape_re = r'(?:\\(?:'+esc_char_re+r'|'+esc_octal_re+r'|'+esc_hex_re+r'|'+esc_ctrl_re+r'))'
macro_re = r'(?:'+variable_re+r'|'+atom_re+r')'
@@ -112,11 +116,18 @@ class ErlangLexer(RegexLexer):
(r'\?'+macro_re, Name.Constant),
(r'\$(?:'+escape_re+r'|\\[ %]|[^\\])', String.Char),
(r'#'+atom_re+r'(:?\.'+atom_re+r')?', Name.Label),
+
+ # Erlang script shebang
+ (r'\A#!.+\n', Comment.Hashbang),
+
+ # EEP 43: Maps
+ # http://www.erlang.org/eeps/eep-0043.html
+ (r'#\{', Punctuation, 'map_key'),
],
'string': [
(escape_re, String.Escape),
(r'"', String, '#pop'),
- (r'~[0-9.*]*[~#+bBcdefginpPswWxX]', String.Interpol),
+ (r'~[0-9.*]*[~#+BPWXb-ginpswx]', String.Interpol),
(r'[^"\\~]+', String),
(r'~', String),
],
@@ -127,6 +138,17 @@ class ErlangLexer(RegexLexer):
bygroups(Name.Entity, Text, Punctuation, Name.Label), '#pop'),
(atom_re, Name.Entity, '#pop'),
],
+ 'map_key': [
+ include('root'),
+ (r'=>', Punctuation, 'map_val'),
+ (r':=', Punctuation, 'map_val'),
+ (r'\}', Punctuation, '#pop'),
+ ],
+ 'map_val': [
+ include('root'),
+ (r',', Punctuation, '#pop'),
+ (r'(?=\})', Punctuation, '#pop'),
+ ],
}
@@ -218,11 +240,11 @@ class ElixirLexer(RegexLexer):
KEYWORD_OPERATOR = ('not', 'and', 'or', 'when', 'in')
BUILTIN = (
'case', 'cond', 'for', 'if', 'unless', 'try', 'receive', 'raise',
- 'quote', 'unquote', 'unquote_splicing', 'throw', 'super'
+ 'quote', 'unquote', 'unquote_splicing', 'throw', 'super',
)
BUILTIN_DECLARATION = (
'def', 'defp', 'defmodule', 'defprotocol', 'defmacro', 'defmacrop',
- 'defdelegate', 'defexception', 'defstruct', 'defimpl', 'defcallback'
+ 'defdelegate', 'defexception', 'defstruct', 'defimpl', 'defcallback',
)
BUILTIN_NAMESPACE = ('import', 'require', 'use', 'alias')
@@ -241,7 +263,7 @@ class ElixirLexer(RegexLexer):
OPERATORS1 = ('<', '>', '+', '-', '*', '/', '!', '^', '&')
PUNCTUATION = (
- '\\\\', '<<', '>>', '=>', '(', ')', ':', ';', ',', '[', ']'
+ '\\\\', '<<', '>>', '=>', '(', ')', ':', ';', ',', '[', ']',
)
def get_tokens_unprocessed(self, text):
diff --git a/pygments/lexers/esoteric.py b/pygments/lexers/esoteric.py
index 73ea4a4a..c9db26b5 100644
--- a/pygments/lexers/esoteric.py
+++ b/pygments/lexers/esoteric.py
@@ -11,9 +11,9 @@
from pygments.lexer import RegexLexer, include, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
- Number, Punctuation, Error, Whitespace
+ Number, Punctuation, Error
-__all__ = ['BrainfuckLexer', 'BefungeLexer', 'BoogieLexer', 'RedcodeLexer', 'CAmkESLexer']
+__all__ = ['BrainfuckLexer', 'BefungeLexer', 'RedcodeLexer', 'CAmkESLexer']
class BrainfuckLexer(RegexLexer):
@@ -90,7 +90,7 @@ class CAmkESLexer(RegexLexer):
filenames = ['*.camkes', '*.idl4']
tokens = {
- 'root':[
+ 'root': [
# C pre-processor directive
(r'^\s*#.*\n', Comment.Preproc),
@@ -99,21 +99,25 @@ class CAmkESLexer(RegexLexer):
(r'/\*(.|\n)*?\*/', Comment),
(r'//.*\n', Comment),
- (r'[\[\(\){},\.;=\]]', Punctuation),
+ (r'[\[(){},.;\]]', Punctuation),
+ (r'[~!%^&*+=|?:<>/-]', Operator),
(words(('assembly', 'attribute', 'component', 'composition',
'configuration', 'connection', 'connector', 'consumes',
- 'control', 'dataport', 'Dataport', 'emits', 'event',
- 'Event', 'from', 'group', 'hardware', 'has', 'interface',
- 'Interface', 'maybe', 'procedure', 'Procedure', 'provides',
- 'template', 'to', 'uses'), suffix=r'\b'), Keyword),
+ 'control', 'dataport', 'Dataport', 'Dataports', 'emits',
+ 'event', 'Event', 'Events', 'export', 'from', 'group',
+ 'hardware', 'has', 'interface', 'Interface', 'maybe',
+ 'procedure', 'Procedure', 'Procedures', 'provides',
+ 'template', 'thread', 'threads', 'to', 'uses', 'with'),
+ suffix=r'\b'), Keyword),
(words(('bool', 'boolean', 'Buf', 'char', 'character', 'double',
'float', 'in', 'inout', 'int', 'int16_6', 'int32_t',
'int64_t', 'int8_t', 'integer', 'mutex', 'out', 'real',
- 'refin', 'semaphore', 'signed', 'string', 'uint16_t',
- 'uint32_t', 'uint64_t', 'uint8_t', 'uintptr_t', 'unsigned',
- 'void'), suffix=r'\b'), Keyword.Type),
+ 'refin', 'semaphore', 'signed', 'string', 'struct',
+ 'uint16_t', 'uint32_t', 'uint64_t', 'uint8_t', 'uintptr_t',
+ 'unsigned', 'void'),
+ suffix=r'\b'), Keyword.Type),
# Recognised attributes
(r'[a-zA-Z_]\w*_(priority|domain|buffer)', Keyword.Reserved),
@@ -131,6 +135,7 @@ class CAmkESLexer(RegexLexer):
(r'-?[\d]+', Number),
(r'-?[\d]+\.[\d]+', Number.Float),
(r'"[^"]*"', String),
+ (r'[Tt]rue|[Ff]alse', Name.Builtin),
# Identifiers
(r'[a-zA-Z_]\w*', Name),
@@ -172,48 +177,3 @@ class RedcodeLexer(RegexLexer):
(r'[-+]?\d+', Number.Integer),
],
}
-
-
-class BoogieLexer(RegexLexer):
- """
- For `Boogie <https://boogie.codeplex.com/>`_ source code.
-
- .. versionadded:: 2.1
- """
- name = 'Boogie'
- aliases = ['boogie']
- filenames = ['*.bpl']
-
- tokens = {
- 'root': [
- # Whitespace and Comments
- (r'\n', Whitespace),
- (r'\s+', Whitespace),
- (r'//[/!](.*?)\n', Comment.Doc),
- (r'//(.*?)\n', Comment.Single),
- (r'/\*', Comment.Multiline, 'comment'),
-
- (words((
- 'axiom', 'break', 'call', 'ensures', 'else', 'exists', 'function',
- 'forall', 'if', 'invariant', 'modifies', 'procedure', 'requires',
- 'then', 'var', 'while'),
- suffix=r'\b'), Keyword),
- (words(('const',), suffix=r'\b'), Keyword.Reserved),
-
- (words(('bool', 'int', 'ref'), suffix=r'\b'), Keyword.Type),
- include('numbers'),
- (r"(>=|<=|:=|!=|==>|&&|\|\||[+/\-=>*<\[\]])", Operator),
- (r"([{}():;,.])", Punctuation),
- # Identifier
- (r'[a-zA-Z_]\w*', Name),
- ],
- 'comment': [
- (r'[^*/]+', Comment.Multiline),
- (r'/\*', Comment.Multiline, '#push'),
- (r'\*/', Comment.Multiline, '#pop'),
- (r'[*/]', Comment.Multiline),
- ],
- 'numbers': [
- (r'[0-9]+', Number.Integer),
- ],
- }
diff --git a/pygments/lexers/ezhil.py b/pygments/lexers/ezhil.py
index 713541ee..a5468a0f 100644
--- a/pygments/lexers/ezhil.py
+++ b/pygments/lexers/ezhil.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
"""
pygments.lexers.ezhil
- ~~~~~~~~~~~~~~~~~~~~~~
+ ~~~~~~~~~~~~~~~~~~~~~
Pygments lexers for Ezhil language.
diff --git a/pygments/lexers/felix.py b/pygments/lexers/felix.py
index b7659769..9631bcc1 100644
--- a/pygments/lexers/felix.py
+++ b/pygments/lexers/felix.py
@@ -237,7 +237,7 @@ class FelixLexer(RegexLexer):
],
'strings': [
(r'%(\([a-zA-Z0-9]+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
- '[hlL]?[diouxXeEfFgGcrs%]', String.Interpol),
+ '[hlL]?[E-GXc-giorsux%]', String.Interpol),
(r'[^\\\'"%\n]+', String),
# quotes, percents and backslashes must be parsed one at a time
(r'[\'"\\]', String),
diff --git a/pygments/lexers/fortran.py b/pygments/lexers/fortran.py
index 4c22139d..e2f95b11 100644
--- a/pygments/lexers/fortran.py
+++ b/pygments/lexers/fortran.py
@@ -11,7 +11,7 @@
import re
-from pygments.lexer import RegexLexer, bygroups, include, words, using
+from pygments.lexer import RegexLexer, bygroups, include, words, using, default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic
@@ -191,16 +191,15 @@ class FortranFixedLexer(RegexLexer):
(r'(.{5})', Name.Label, 'cont-char'),
(r'.*\n', using(FortranLexer)),
],
-
'cont-char': [
(' ', Text, 'code'),
('0', Comment, 'code'),
- ('.', Generic.Strong, 'code')
+ ('.', Generic.Strong, 'code'),
],
-
'code': [
(r'(.{66})(.*)(\n)',
bygroups(_lex_fortran, Comment, Text), 'root'),
(r'(.*)(\n)', bygroups(_lex_fortran, Text), 'root'),
- (r'', Text, 'root')]
+ default('root'),
+ ]
}
diff --git a/pygments/lexers/grammar_notation.py b/pygments/lexers/grammar_notation.py
new file mode 100644
index 00000000..460914f4
--- /dev/null
+++ b/pygments/lexers/grammar_notation.py
@@ -0,0 +1,131 @@
+# -*- coding: utf-8 -*-
+"""
+ pygments.lexers.grammar_notation
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for grammer notations like BNF.
+
+ :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups, words
+from pygments.token import Punctuation, Text, Comment, Operator, \
+ Keyword, Name, Literal
+
+__all__ = ['BnfLexer', 'AbnfLexer']
+
+
+class BnfLexer(RegexLexer):
+ """
+ This lexer is for grammer notations which are similar to
+ original BNF.
+
+ In order to maximize a number of targets of this lexer,
+ let's decide some designs:
+
+ * We don't distinguish `Terminal Symbol`.
+
+ * We do assume that `NonTerminal Symbol` are always enclosed
+ with arrow brackets.
+
+ * We do assume that `NonTerminal Symbol` may include
+ any printable characters except arrow brackets and ASCII 0x20.
+ This assumption is for `RBNF <http://www.rfc-base.org/txt/rfc-5511.txt>`_.
+
+ * We do assume that target notation doesn't support comment.
+
+ * We don't distinguish any operators and punctuation except
+ `::=`.
+
+ Though these desision making might cause too minimal highlighting
+ and you might be disappointed, but it is reasonable for us.
+
+ .. versionadded:: 2.1
+ """
+
+ name = 'BNF'
+ aliases = ['bnf']
+ filenames = ['*.bnf']
+ mimetypes = ['text/x-bnf']
+
+ tokens = {
+ 'root': [
+ (r'(<)([ -;=?-~]+)(>)',
+ bygroups(Punctuation, Name.Class, Punctuation)),
+
+ # an only operator
+ (r'::=', Operator),
+
+ # fallback
+ (r'[^<>:]+', Text), # for performance
+ (r'.', Text),
+ ],
+ }
+
+
+class AbnfLexer(RegexLexer):
+ """
+ Lexer for `IETF 7405 ABNF
+ <http://www.ietf.org/rfc/rfc7405.txt>`_
+ (Updates `5234 <http://www.ietf.org/rfc/rfc5234.txt>`_)
+ grammars.
+
+ .. versionadded:: 2.1
+ """
+
+ name = 'ABNF'
+ aliases = ['abnf']
+ filenames = ['*.abnf']
+ mimetypes = ['text/x-abnf']
+
+ _core_rules = (
+ 'ALPHA', 'BIT', 'CHAR', 'CR', 'CRLF', 'CTL', 'DIGIT',
+ 'DQUOTE', 'HEXDIG', 'HTAB', 'LF', 'LWSP', 'OCTET',
+ 'SP', 'VCHAR', 'WSP')
+
+ tokens = {
+ 'root': [
+ # comment
+ (r';.*$', Comment.Single),
+
+ # quoted
+ # double quote itself in this state, it is as '%x22'.
+ (r'(%[si])?"[^"]*"', Literal),
+
+ # binary (but i have never seen...)
+ (r'%b[01]+\-[01]+\b', Literal), # range
+ (r'%b[01]+(\.[01]+)*\b', Literal), # concat
+
+ # decimal
+ (r'%d[0-9]+\-[0-9]+\b', Literal), # range
+ (r'%d[0-9]+(\.[0-9]+)*\b', Literal), # concat
+
+ # hexadecimal
+ (r'%x[0-9a-fA-F]+\-[0-9a-fA-F]+\b', Literal), # range
+ (r'%x[0-9a-fA-F]+(\.[0-9a-fA-F]+)*\b', Literal), # concat
+
+ # repetition (<a>*<b>element) including nRule
+ (r'\b[0-9]+\*[0-9]+', Operator),
+ (r'\b[0-9]+\*', Operator),
+ (r'\b[0-9]+', Operator),
+ (r'\*', Operator),
+
+ # Strictly speaking, these are not keyword but
+ # are called `Core Rule'.
+ (words(_core_rules, suffix=r'\b'), Keyword),
+
+ # nonterminals (ALPHA *(ALPHA / DIGIT / "-"))
+ (r'[a-zA-Z][a-zA-Z0-9-]+\b', Name.Class),
+
+ # operators
+ (r'(=/|=|/)', Operator),
+
+ # punctuation
+ (r'[\[\]()]', Punctuation),
+
+ # fallback
+ (r'\s+', Text),
+ (r'.', Text),
+ ],
+ }
diff --git a/pygments/lexers/haskell.py b/pygments/lexers/haskell.py
index 95e68a33..ffc3a3a2 100644
--- a/pygments/lexers/haskell.py
+++ b/pygments/lexers/haskell.py
@@ -321,7 +321,7 @@ class AgdaLexer(RegexLexer):
'module': [
(r'\{-', Comment.Multiline, 'comment'),
(r'[a-zA-Z][\w.]*', Name, '#pop'),
- (r'[^a-zA-Z]+', Text)
+ (r'[\W0-9_]+', Text)
],
'comment': HaskellLexer.tokens['comment'],
'character': HaskellLexer.tokens['character'],
diff --git a/pygments/lexers/hdl.py b/pygments/lexers/hdl.py
index fc5ff719..04cef14e 100644
--- a/pygments/lexers/hdl.py
+++ b/pygments/lexers/hdl.py
@@ -108,8 +108,8 @@ class VerilogLexer(RegexLexer):
'trior', 'tri0', 'tri1', 'trireg', 'uwire', 'wire', 'wand', 'wo'
'shortreal', 'real', 'realtime'), suffix=r'\b'),
Keyword.Type),
- ('[a-zA-Z_]\w*:(?!:)', Name.Label),
- ('[a-zA-Z_]\w*', Name),
+ (r'[a-zA-Z_]\w*:(?!:)', Name.Label),
+ (r'\$?[a-zA-Z_]\w*', Name),
],
'string': [
(r'"', String, '#pop'),
@@ -250,8 +250,8 @@ class SystemVerilogLexer(RegexLexer):
'trior', 'tri0', 'tri1', 'trireg', 'uwire', 'wire', 'wand', 'wo'
'shortreal', 'real', 'realtime'), suffix=r'\b'),
Keyword.Type),
- ('[a-zA-Z_]\w*:(?!:)', Name.Label),
- ('[a-zA-Z_]\w*', Name),
+ (r'[a-zA-Z_]\w*:(?!:)', Name.Label),
+ (r'\$?[a-zA-Z_]\w*', Name),
],
'classname': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
@@ -308,20 +308,27 @@ class VhdlLexer(RegexLexer):
(r'[~!%^&*+=|?:<>/-]', Operator),
(r"'[a-z_]\w*", Name.Attribute),
(r'[()\[\],.;\']', Punctuation),
- (r'"[^\n\\]*"', String),
+ (r'"[^\n\\"]*"', String),
(r'(library)(\s+)([a-z_]\w*)',
bygroups(Keyword, Text, Name.Namespace)),
(r'(use)(\s+)(entity)', bygroups(Keyword, Text, Keyword)),
+ (r'(use)(\s+)([a-z_][\w.]*\.)(all)',
+ bygroups(Keyword, Text, Name.Namespace, Keyword)),
(r'(use)(\s+)([a-z_][\w.]*)',
bygroups(Keyword, Text, Name.Namespace)),
+ (r'(std|ieee)(\.[a-z_]\w*)',
+ bygroups(Name.Namespace, Name.Namespace)),
+ (words(('std', 'ieee', 'work'), suffix=r'\b'),
+ Name.Namespace),
(r'(entity|component)(\s+)([a-z_]\w*)',
bygroups(Keyword, Text, Name.Class)),
(r'(architecture|configuration)(\s+)([a-z_]\w*)(\s+)'
r'(of)(\s+)([a-z_]\w*)(\s+)(is)',
bygroups(Keyword, Text, Name.Class, Text, Keyword, Text,
Name.Class, Text, Keyword)),
-
+ (r'([a-z_]\w*)(:)(\s+)(process|for)',
+ bygroups(Name.Class, Operator, Text, Keyword)),
(r'(end)(\s+)', bygroups(using(this), Text), 'endblock'),
include('types'),
@@ -341,7 +348,7 @@ class VhdlLexer(RegexLexer):
'boolean', 'bit', 'character', 'severity_level', 'integer', 'time',
'delay_length', 'natural', 'positive', 'string', 'bit_vector',
'file_open_kind', 'file_open_status', 'std_ulogic', 'std_ulogic_vector',
- 'std_logic', 'std_logic_vector'), suffix=r'\b'),
+ 'std_logic', 'std_logic_vector', 'signed', 'unsigned'), suffix=r'\b'),
Keyword.Type),
],
'keywords': [
@@ -357,8 +364,8 @@ class VhdlLexer(RegexLexer):
'next', 'nor', 'not', 'null', 'of', 'on',
'open', 'or', 'others', 'out', 'package', 'port',
'postponed', 'procedure', 'process', 'pure', 'range', 'record',
- 'register', 'reject', 'return', 'rol', 'ror', 'select',
- 'severity', 'signal', 'shared', 'sla', 'sli', 'sra',
+ 'register', 'reject', 'rem', 'return', 'rol', 'ror', 'select',
+ 'severity', 'signal', 'shared', 'sla', 'sll', 'sra',
'srl', 'subtype', 'then', 'to', 'transport', 'type',
'units', 'until', 'use', 'variable', 'wait', 'when',
'while', 'with', 'xnor', 'xor'), suffix=r'\b'),
diff --git a/pygments/lexers/idl.py b/pygments/lexers/idl.py
index d745bcfd..a0b39492 100644
--- a/pygments/lexers/idl.py
+++ b/pygments/lexers/idl.py
@@ -258,12 +258,13 @@ class IDLLexer(RegexLexer):
(r'\b(mod|lt|le|eq|ne|ge|gt|not|and|or|xor)\b', Operator),
(r'"[^\"]*"', String.Double),
(r"'[^\']*'", String.Single),
- (r'\b[\+\-]?([0-9]*\.[0-9]+|[0-9]+\.[0-9]*)(D|E)?([\+\-]?[0-9]+)?\b', Number.Float),
- (r'\b\'[\+\-]?[0-9A-F]+\'X(U?(S?|L{1,2})|B)\b', Number.Hex),
- (r'\b\'[\+\-]?[0-7]+\'O(U?(S?|L{1,2})|B)\b', Number.Oct),
- (r'\b[\+\-]?[0-9]+U?L{1,2}\b', Number.Integer.Long),
- (r'\b[\+\-]?[0-9]+U?S?\b', Number.Integer),
- (r'\b[\+\-]?[0-9]+B\b', Number),
+ (r'\b[+\-]?([0-9]*\.[0-9]+|[0-9]+\.[0-9]*)(D|E)?([+\-]?[0-9]+)?\b',
+ Number.Float),
+ (r'\b\'[+\-]?[0-9A-F]+\'X(U?(S?|L{1,2})|B)\b', Number.Hex),
+ (r'\b\'[+\-]?[0-7]+\'O(U?(S?|L{1,2})|B)\b', Number.Oct),
+ (r'\b[+\-]?[0-9]+U?L{1,2}\b', Number.Integer.Long),
+ (r'\b[+\-]?[0-9]+U?S?\b', Number.Integer),
+ (r'\b[+\-]?[0-9]+B\b', Number),
(r'.', Text),
]
}
diff --git a/pygments/lexers/igor.py b/pygments/lexers/igor.py
index b0eaf6aa..17fedf88 100644
--- a/pygments/lexers/igor.py
+++ b/pygments/lexers/igor.py
@@ -40,7 +40,7 @@ class IgorLexer(RegexLexer):
types = (
'variable', 'string', 'constant', 'strconstant', 'NVAR', 'SVAR', 'WAVE',
'STRUCT', 'dfref', 'funcref', 'char', 'uchar', 'int16', 'uint16', 'int32',
- 'uint32', 'float', 'double'
+ 'uint32', 'int64', 'uint64', 'float', 'double'
)
keywords = (
'override', 'ThreadSafe', 'MultiThread', 'static', 'Proc',
@@ -48,213 +48,221 @@ class IgorLexer(RegexLexer):
'Structure', 'EndStructure', 'EndMacro', 'Menu', 'SubMenu'
)
operations = (
- 'Abort', 'AddFIFOData', 'AddFIFOVectData', 'AddMovieAudio',
- 'AddMovieFrame', 'APMath', 'Append', 'AppendImage',
- 'AppendLayoutObject', 'AppendMatrixContour', 'AppendText',
- 'AppendToGraph', 'AppendToLayout', 'AppendToTable', 'AppendXYZContour',
- 'AutoPositionWindow', 'BackgroundInfo', 'Beep', 'BoundingBall',
- 'BrowseURL', 'BuildMenu', 'Button', 'cd', 'Chart', 'CheckBox',
- 'CheckDisplayed', 'ChooseColor', 'Close', 'CloseMovie', 'CloseProc',
- 'ColorScale', 'ColorTab2Wave', 'Concatenate', 'ControlBar',
- 'ControlInfo', 'ControlUpdate', 'ConvexHull', 'Convolve', 'CopyFile',
- 'CopyFolder', 'CopyScales', 'Correlate', 'CreateAliasShortcut', 'Cross',
- 'CtrlBackground', 'CtrlFIFO', 'CtrlNamedBackground', 'Cursor',
- 'CurveFit', 'CustomControl', 'CWT', 'Debugger', 'DebuggerOptions',
- 'DefaultFont', 'DefaultGuiControls', 'DefaultGuiFont', 'DefineGuide',
- 'DelayUpdate', 'DeleteFile', 'DeleteFolder', 'DeletePoints',
- 'Differentiate', 'dir', 'Display', 'DisplayHelpTopic',
- 'DisplayProcedure', 'DoAlert', 'DoIgorMenu', 'DoUpdate', 'DoWindow',
- 'DoXOPIdle', 'DrawAction', 'DrawArc', 'DrawBezier', 'DrawLine',
- 'DrawOval', 'DrawPICT', 'DrawPoly', 'DrawRect', 'DrawRRect', 'DrawText',
- 'DSPDetrend', 'DSPPeriodogram', 'Duplicate', 'DuplicateDataFolder',
- 'DWT', 'EdgeStats', 'Edit', 'ErrorBars', 'Execute', 'ExecuteScriptText',
- 'ExperimentModified', 'Extract', 'FastGaussTransform', 'FastOp',
- 'FBinRead', 'FBinWrite', 'FFT', 'FIFO2Wave', 'FIFOStatus', 'FilterFIR',
- 'FilterIIR', 'FindLevel', 'FindLevels', 'FindPeak', 'FindPointsInPoly',
- 'FindRoots', 'FindSequence', 'FindValue', 'FPClustering', 'fprintf',
- 'FReadLine', 'FSetPos', 'FStatus', 'FTPDelete', 'FTPDownload',
- 'FTPUpload', 'FuncFit', 'FuncFitMD', 'GetAxis', 'GetFileFolderInfo',
- 'GetLastUserMenuInfo', 'GetMarquee', 'GetSelection', 'GetWindow',
- 'GraphNormal', 'GraphWaveDraw', 'GraphWaveEdit', 'Grep', 'GroupBox',
- 'Hanning', 'HideIgorMenus', 'HideInfo', 'HideProcedures', 'HideTools',
- 'HilbertTransform', 'Histogram', 'IFFT', 'ImageAnalyzeParticles',
- 'ImageBlend', 'ImageBoundaryToMask', 'ImageEdgeDetection',
- 'ImageFileInfo', 'ImageFilter', 'ImageFocus', 'ImageGenerateROIMask',
- 'ImageHistModification', 'ImageHistogram', 'ImageInterpolate',
- 'ImageLineProfile', 'ImageLoad', 'ImageMorphology', 'ImageRegistration',
- 'ImageRemoveBackground', 'ImageRestore', 'ImageRotate', 'ImageSave',
- 'ImageSeedFill', 'ImageSnake', 'ImageStats', 'ImageThreshold',
- 'ImageTransform', 'ImageUnwrapPhase', 'ImageWindow', 'IndexSort',
- 'InsertPoints', 'Integrate', 'IntegrateODE', 'Interp3DPath',
- 'Interpolate3D', 'KillBackground', 'KillControl', 'KillDataFolder',
- 'KillFIFO', 'KillFreeAxis', 'KillPath', 'KillPICTs', 'KillStrings',
- 'KillVariables', 'KillWaves', 'KillWindow', 'KMeans', 'Label', 'Layout',
- 'Legend', 'LinearFeedbackShiftRegister', 'ListBox', 'LoadData',
- 'LoadPackagePreferences', 'LoadPICT', 'LoadWave', 'Loess',
- 'LombPeriodogram', 'Make', 'MakeIndex', 'MarkPerfTestTime',
- 'MatrixConvolve', 'MatrixCorr', 'MatrixEigenV', 'MatrixFilter',
- 'MatrixGaussJ', 'MatrixInverse', 'MatrixLinearSolve',
- 'MatrixLinearSolveTD', 'MatrixLLS', 'MatrixLUBkSub', 'MatrixLUD',
- 'MatrixMultiply', 'MatrixOP', 'MatrixSchur', 'MatrixSolve',
- 'MatrixSVBkSub', 'MatrixSVD', 'MatrixTranspose', 'MeasureStyledText',
- 'Modify', 'ModifyContour', 'ModifyControl', 'ModifyControlList',
- 'ModifyFreeAxis', 'ModifyGraph', 'ModifyImage', 'ModifyLayout',
- 'ModifyPanel', 'ModifyTable', 'ModifyWaterfall', 'MoveDataFolder',
- 'MoveFile', 'MoveFolder', 'MoveString', 'MoveSubwindow', 'MoveVariable',
- 'MoveWave', 'MoveWindow', 'NeuralNetworkRun', 'NeuralNetworkTrain',
- 'NewDataFolder', 'NewFIFO', 'NewFIFOChan', 'NewFreeAxis', 'NewImage',
- 'NewLayout', 'NewMovie', 'NewNotebook', 'NewPanel', 'NewPath',
- 'NewWaterfall', 'Note', 'Notebook', 'NotebookAction', 'Open',
- 'OpenNotebook', 'Optimize', 'ParseOperationTemplate', 'PathInfo',
- 'PauseForUser', 'PauseUpdate', 'PCA', 'PlayMovie', 'PlayMovieAction',
- 'PlaySnd', 'PlaySound', 'PopupContextualMenu', 'PopupMenu',
- 'Preferences', 'PrimeFactors', 'Print', 'printf', 'PrintGraphs',
- 'PrintLayout', 'PrintNotebook', 'PrintSettings', 'PrintTable',
- 'Project', 'PulseStats', 'PutScrapText', 'pwd', 'Quit',
- 'RatioFromNumber', 'Redimension', 'Remove', 'RemoveContour',
+ 'Abort', 'AddFIFOData', 'AddFIFOVectData', 'AddMovieAudio', 'AddMovieFrame',
+ 'AdoptFiles', 'APMath', 'Append', 'AppendImage', 'AppendLayoutObject',
+ 'AppendMatrixContour', 'AppendText', 'AppendToGizmo', 'AppendToGraph',
+ 'AppendToLayout', 'AppendToTable', 'AppendXYZContour', 'AutoPositionWindow',
+ 'BackgroundInfo', 'Beep', 'BoundingBall', 'BoxSmooth', 'BrowseURL', 'BuildMenu',
+ 'Button', 'cd', 'Chart', 'CheckBox', 'CheckDisplayed', 'ChooseColor', 'Close',
+ 'CloseHelp', 'CloseMovie', 'CloseProc', 'ColorScale', 'ColorTab2Wave',
+ 'Concatenate', 'ControlBar', 'ControlInfo', 'ControlUpdate',
+ 'ConvertGlobalStringTextEncoding', 'ConvexHull', 'Convolve', 'CopyFile',
+ 'CopyFolder', 'CopyScales', 'Correlate', 'CreateAliasShortcut', 'CreateBrowser',
+ 'Cross', 'CtrlBackground', 'CtrlFIFO', 'CtrlNamedBackground', 'Cursor',
+ 'CurveFit', 'CustomControl', 'CWT', 'Debugger', 'DebuggerOptions', 'DefaultFont',
+ 'DefaultGuiControls', 'DefaultGuiFont', 'DefaultTextEncoding', 'DefineGuide',
+ 'DelayUpdate', 'DeleteAnnotations', 'DeleteFile', 'DeleteFolder', 'DeletePoints',
+ 'Differentiate', 'dir', 'Display', 'DisplayHelpTopic', 'DisplayProcedure',
+ 'DoAlert', 'DoIgorMenu', 'DoUpdate', 'DoWindow', 'DoXOPIdle', 'DPSS',
+ 'DrawAction', 'DrawArc', 'DrawBezier', 'DrawLine', 'DrawOval', 'DrawPICT',
+ 'DrawPoly', 'DrawRect', 'DrawRRect', 'DrawText', 'DrawUserShape', 'DSPDetrend',
+ 'DSPPeriodogram', 'Duplicate', 'DuplicateDataFolder', 'DWT', 'EdgeStats', 'Edit',
+ 'ErrorBars', 'EstimatePeakSizes', 'Execute', 'ExecuteScriptText',
+ 'ExperimentModified', 'ExportGizmo', 'Extract', 'FastGaussTransform', 'FastOp',
+ 'FBinRead', 'FBinWrite', 'FFT', 'FIFOStatus', 'FIFO2Wave', 'FilterFIR',
+ 'FilterIIR', 'FindAPeak', 'FindContour', 'FindDuplicates', 'FindLevel',
+ 'FindLevels', 'FindPeak', 'FindPointsInPoly', 'FindRoots', 'FindSequence',
+ 'FindValue', 'FPClustering', 'fprintf', 'FReadLine', 'FSetPos', 'FStatus',
+ 'FTPCreateDirectory', 'FTPDelete', 'FTPDownload', 'FTPUpload', 'FuncFit',
+ 'FuncFitMD', 'GBLoadWave', 'GetAxis', 'GetCamera', 'GetFileFolderInfo',
+ 'GetGizmo', 'GetLastUserMenuInfo', 'GetMarquee', 'GetMouse', 'GetSelection',
+ 'GetWindow', 'GPIBReadBinaryWave2', 'GPIBReadBinary2', 'GPIBReadWave2',
+ 'GPIBRead2', 'GPIBWriteBinaryWave2', 'GPIBWriteBinary2', 'GPIBWriteWave2',
+ 'GPIBWrite2', 'GPIB2', 'GraphNormal', 'GraphWaveDraw', 'GraphWaveEdit', 'Grep',
+ 'GroupBox', 'Hanning', 'HDF5CloseFile', 'HDF5CloseGroup', 'HDF5ConvertColors',
+ 'HDF5CreateFile', 'HDF5CreateGroup', 'HDF5CreateLink', 'HDF5Dump',
+ 'HDF5DumpErrors', 'HDF5DumpState', 'HDF5ListAttributes', 'HDF5ListGroup',
+ 'HDF5LoadData', 'HDF5LoadGroup', 'HDF5LoadImage', 'HDF5OpenFile', 'HDF5OpenGroup',
+ 'HDF5SaveData', 'HDF5SaveGroup', 'HDF5SaveImage', 'HDF5TestOperation',
+ 'HDF5UnlinkObject', 'HideIgorMenus', 'HideInfo', 'HideProcedures', 'HideTools',
+ 'HilbertTransform', 'Histogram', 'ICA', 'IFFT', 'ImageAnalyzeParticles',
+ 'ImageBlend', 'ImageBoundaryToMask', 'ImageEdgeDetection', 'ImageFileInfo',
+ 'ImageFilter', 'ImageFocus', 'ImageFromXYZ', 'ImageGenerateROIMask', 'ImageGLCM',
+ 'ImageHistModification', 'ImageHistogram', 'ImageInterpolate', 'ImageLineProfile',
+ 'ImageLoad', 'ImageMorphology', 'ImageRegistration', 'ImageRemoveBackground',
+ 'ImageRestore', 'ImageRotate', 'ImageSave', 'ImageSeedFill', 'ImageSkeleton3d',
+ 'ImageSnake', 'ImageStats', 'ImageThreshold', 'ImageTransform',
+ 'ImageUnwrapPhase', 'ImageWindow', 'IndexSort', 'InsertPoints', 'Integrate',
+ 'IntegrateODE', 'Integrate2D', 'Interpolate2', 'Interpolate3D', 'Interp3DPath',
+ 'JCAMPLoadWave', 'JointHistogram', 'KillBackground', 'KillControl',
+ 'KillDataFolder', 'KillFIFO', 'KillFreeAxis', 'KillPath', 'KillPICTs',
+ 'KillStrings', 'KillVariables', 'KillWaves', 'KillWindow', 'KMeans', 'Label',
+ 'Layout', 'LayoutPageAction', 'LayoutSlideShow', 'Legend',
+ 'LinearFeedbackShiftRegister', 'ListBox', 'LoadData', 'LoadPackagePreferences',
+ 'LoadPICT', 'LoadWave', 'Loess', 'LombPeriodogram', 'Make', 'MakeIndex',
+ 'MarkPerfTestTime', 'MatrixConvolve', 'MatrixCorr', 'MatrixEigenV',
+ 'MatrixFilter', 'MatrixGaussJ', 'MatrixGLM', 'MatrixInverse', 'MatrixLinearSolve',
+ 'MatrixLinearSolveTD', 'MatrixLLS', 'MatrixLUBkSub', 'MatrixLUD', 'MatrixLUDTD',
+ 'MatrixMultiply', 'MatrixOP', 'MatrixSchur', 'MatrixSolve', 'MatrixSVBkSub',
+ 'MatrixSVD', 'MatrixTranspose', 'MeasureStyledText', 'MLLoadWave', 'Modify',
+ 'ModifyBrowser', 'ModifyCamera', 'ModifyContour', 'ModifyControl',
+ 'ModifyControlList', 'ModifyFreeAxis', 'ModifyGizmo', 'ModifyGraph',
+ 'ModifyImage', 'ModifyLayout', 'ModifyPanel', 'ModifyTable', 'ModifyWaterfall',
+ 'MoveDataFolder', 'MoveFile', 'MoveFolder', 'MoveString', 'MoveSubwindow',
+ 'MoveVariable', 'MoveWave', 'MoveWindow', 'MultiTaperPSD',
+ 'MultiThreadingControl', 'NeuralNetworkRun', 'NeuralNetworkTrain', 'NewCamera',
+ 'NewDataFolder', 'NewFIFO', 'NewFIFOChan', 'NewFreeAxis', 'NewGizmo', 'NewImage',
+ 'NewLayout', 'NewMovie', 'NewNotebook', 'NewPanel', 'NewPath', 'NewWaterfall',
+ 'NI4882', 'Note', 'Notebook', 'NotebookAction', 'Open', 'OpenHelp',
+ 'OpenNotebook', 'Optimize', 'ParseOperationTemplate', 'PathInfo', 'PauseForUser',
+ 'PauseUpdate', 'PCA', 'PlayMovie', 'PlayMovieAction', 'PlaySound',
+ 'PopupContextualMenu', 'PopupMenu', 'Preferences', 'PrimeFactors', 'Print',
+ 'printf', 'PrintGraphs', 'PrintLayout', 'PrintNotebook', 'PrintSettings',
+ 'PrintTable', 'Project', 'PulseStats', 'PutScrapText', 'pwd', 'Quit',
+ 'RatioFromNumber', 'Redimension', 'Remove', 'RemoveContour', 'RemoveFromGizmo',
'RemoveFromGraph', 'RemoveFromLayout', 'RemoveFromTable', 'RemoveImage',
- 'RemoveLayoutObjects', 'RemovePath', 'Rename', 'RenameDataFolder',
- 'RenamePath', 'RenamePICT', 'RenameWindow', 'ReorderImages',
- 'ReorderTraces', 'ReplaceText', 'ReplaceWave', 'Resample',
- 'ResumeUpdate', 'Reverse', 'Rotate', 'Save', 'SaveData',
- 'SaveExperiment', 'SaveGraphCopy', 'SaveNotebook',
- 'SavePackagePreferences', 'SavePICT', 'SaveTableCopy',
- 'SetActiveSubwindow', 'SetAxis', 'SetBackground', 'SetDashPattern',
- 'SetDataFolder', 'SetDimLabel', 'SetDrawEnv', 'SetDrawLayer',
- 'SetFileFolderInfo', 'SetFormula', 'SetIgorHook', 'SetIgorMenuMode',
- 'SetIgorOption', 'SetMarquee', 'SetProcessSleep', 'SetRandomSeed',
- 'SetScale', 'SetVariable', 'SetWaveLock', 'SetWindow', 'ShowIgorMenus',
- 'ShowInfo', 'ShowTools', 'Silent', 'Sleep', 'Slider', 'Smooth',
- 'SmoothCustom', 'Sort', 'SoundInRecord', 'SoundInSet',
- 'SoundInStartChart', 'SoundInStatus', 'SoundInStopChart',
- 'SphericalInterpolate', 'SphericalTriangulate', 'SplitString',
- 'sprintf', 'sscanf', 'Stack', 'StackWindows',
+ 'RemoveLayoutObjects', 'RemovePath', 'Rename', 'RenameDataFolder', 'RenamePath',
+ 'RenamePICT', 'RenameWindow', 'ReorderImages', 'ReorderTraces', 'ReplaceText',
+ 'ReplaceWave', 'Resample', 'ResumeUpdate', 'Reverse', 'Rotate', 'Save',
+ 'SaveData', 'SaveExperiment', 'SaveGraphCopy', 'SaveNotebook',
+ 'SavePackagePreferences', 'SavePICT', 'SaveTableCopy', 'SetActiveSubwindow',
+ 'SetAxis', 'SetBackground', 'SetDashPattern', 'SetDataFolder', 'SetDimLabel',
+ 'SetDrawEnv', 'SetDrawLayer', 'SetFileFolderInfo', 'SetFormula', 'SetIgorHook',
+ 'SetIgorMenuMode', 'SetIgorOption', 'SetMarquee', 'SetProcessSleep',
+ 'SetRandomSeed', 'SetScale', 'SetVariable', 'SetWaveLock', 'SetWaveTextEncoding',
+ 'SetWindow', 'ShowIgorMenus', 'ShowInfo', 'ShowTools', 'Silent', 'Sleep',
+ 'Slider', 'Smooth', 'SmoothCustom', 'Sort', 'SortColumns', 'SoundInRecord',
+ 'SoundInSet', 'SoundInStartChart', 'SoundInStatus', 'SoundInStopChart',
+ 'SoundLoadWave', 'SoundSaveWave', 'SphericalInterpolate', 'SphericalTriangulate',
+ 'SplitString', 'SplitWave', 'sprintf', 'sscanf', 'Stack', 'StackWindows',
'StatsAngularDistanceTest', 'StatsANOVA1Test', 'StatsANOVA2NRTest',
'StatsANOVA2RMTest', 'StatsANOVA2Test', 'StatsChiTest',
- 'StatsCircularCorrelationTest', 'StatsCircularMeans',
- 'StatsCircularMoments', 'StatsCircularTwoSampleTest',
- 'StatsCochranTest', 'StatsContingencyTable', 'StatsDIPTest',
- 'StatsDunnettTest', 'StatsFriedmanTest', 'StatsFTest',
- 'StatsHodgesAjneTest', 'StatsJBTest', 'StatsKendallTauTest',
+ 'StatsCircularCorrelationTest', 'StatsCircularMeans', 'StatsCircularMoments',
+ 'StatsCircularTwoSampleTest', 'StatsCochranTest', 'StatsContingencyTable',
+ 'StatsDIPTest', 'StatsDunnettTest', 'StatsFriedmanTest', 'StatsFTest',
+ 'StatsHodgesAjneTest', 'StatsJBTest', 'StatsKDE', 'StatsKendallTauTest',
'StatsKSTest', 'StatsKWTest', 'StatsLinearCorrelationTest',
- 'StatsLinearRegression', 'StatsMultiCorrelationTest',
- 'StatsNPMCTest', 'StatsNPNominalSRTest', 'StatsQuantiles',
- 'StatsRankCorrelationTest', 'StatsResample', 'StatsSample',
- 'StatsScheffeTest', 'StatsSignTest', 'StatsSRTest', 'StatsTTest',
- 'StatsTukeyTest', 'StatsVariancesTest', 'StatsWatsonUSquaredTest',
- 'StatsWatsonWilliamsTest', 'StatsWheelerWatsonTest',
- 'StatsWilcoxonRankTest', 'StatsWRCorrelationTest', 'String',
- 'StructGet', 'StructPut', 'TabControl', 'Tag', 'TextBox', 'Tile',
- 'TileWindows', 'TitleBox', 'ToCommandLine', 'ToolsGrid',
- 'Triangulate3d', 'Unwrap', 'ValDisplay', 'Variable', 'WaveMeanStdv',
- 'WaveStats', 'WaveTransform', 'wfprintf', 'WignerTransform',
- 'WindowFunction',
+ 'StatsLinearRegression', 'StatsMultiCorrelationTest', 'StatsNPMCTest',
+ 'StatsNPNominalSRTest', 'StatsQuantiles', 'StatsRankCorrelationTest',
+ 'StatsResample', 'StatsSample', 'StatsScheffeTest', 'StatsShapiroWilkTest',
+ 'StatsSignTest', 'StatsSRTest', 'StatsTTest', 'StatsTukeyTest',
+ 'StatsVariancesTest', 'StatsWatsonUSquaredTest', 'StatsWatsonWilliamsTest',
+ 'StatsWheelerWatsonTest', 'StatsWilcoxonRankTest', 'StatsWRCorrelationTest',
+ 'String', 'StructGet', 'StructPut', 'SumDimension', 'SumSeries', 'TabControl',
+ 'Tag', 'TextBox', 'ThreadGroupPutDF', 'ThreadStart', 'Tile', 'TileWindows',
+ 'TitleBox', 'ToCommandLine', 'ToolsGrid', 'Triangulate3d', 'Unwrap', 'URLRequest',
+ 'ValDisplay', 'Variable', 'VDTClosePort2', 'VDTGetPortList2', 'VDTGetStatus2',
+ 'VDTOpenPort2', 'VDTOperationsPort2', 'VDTReadBinaryWave2', 'VDTReadBinary2',
+ 'VDTReadHexWave2', 'VDTReadHex2', 'VDTReadWave2', 'VDTRead2', 'VDTTerminalPort2',
+ 'VDTWriteBinaryWave2', 'VDTWriteBinary2', 'VDTWriteHexWave2', 'VDTWriteHex2',
+ 'VDTWriteWave2', 'VDTWrite2', 'VDT2', 'WaveMeanStdv', 'WaveStats',
+ 'WaveTransform', 'wfprintf', 'WignerTransform', 'WindowFunction', 'XLLoadWave'
)
functions = (
- 'abs', 'acos', 'acosh', 'AiryA', 'AiryAD', 'AiryB', 'AiryBD', 'alog',
- 'area', 'areaXY', 'asin', 'asinh', 'atan', 'atan2', 'atanh',
- 'AxisValFromPixel', 'Besseli', 'Besselj', 'Besselk', 'Bessely', 'bessi',
- 'bessj', 'bessk', 'bessy', 'beta', 'betai', 'BinarySearch',
+ 'abs', 'acos', 'acosh', 'AddListItem', 'AiryA', 'AiryAD', 'AiryB', 'AiryBD',
+ 'alog', 'AnnotationInfo', 'AnnotationList', 'area', 'areaXY', 'asin', 'asinh',
+ 'atan', 'atanh', 'atan2', 'AxisInfo', 'AxisList', 'AxisValFromPixel', 'Besseli',
+ 'Besselj', 'Besselk', 'Bessely', 'beta', 'betai', 'BinarySearch',
'BinarySearchInterp', 'binomial', 'binomialln', 'binomialNoise', 'cabs',
- 'CaptureHistoryStart', 'ceil', 'cequal', 'char2num', 'chebyshev',
- 'chebyshevU', 'CheckName', 'cmplx', 'cmpstr', 'conj', 'ContourZ', 'cos',
- 'cosh', 'cot', 'CountObjects', 'CountObjectsDFR', 'cpowi',
- 'CreationDate', 'csc', 'DataFolderExists', 'DataFolderRefsEqual',
- 'DataFolderRefStatus', 'date2secs', 'datetime', 'DateToJulian',
- 'Dawson', 'DDEExecute', 'DDEInitiate', 'DDEPokeString', 'DDEPokeWave',
- 'DDERequestWave', 'DDEStatus', 'DDETerminate', 'defined', 'deltax', 'digamma',
- 'DimDelta', 'DimOffset', 'DimSize', 'ei', 'enoise', 'equalWaves', 'erf',
- 'erfc', 'exists', 'exp', 'expInt', 'expNoise', 'factorial', 'fakedata',
- 'faverage', 'faverageXY', 'FindDimLabel', 'FindListItem', 'floor',
+ 'CaptureHistory', 'CaptureHistoryStart', 'ceil', 'cequal', 'char2num',
+ 'chebyshev', 'chebyshevU', 'CheckName', 'ChildWindowList', 'CleanupName', 'cmplx',
+ 'cmpstr', 'conj', 'ContourInfo', 'ContourNameList', 'ContourNameToWaveRef',
+ 'ContourZ', 'ControlNameList', 'ConvertTextEncoding', 'cos', 'cosh',
+ 'cosIntegral', 'cot', 'coth', 'CountObjects', 'CountObjectsDFR', 'cpowi',
+ 'CreationDate', 'csc', 'csch', 'CsrInfo', 'CsrWave', 'CsrWaveRef', 'CsrXWave',
+ 'CsrXWaveRef', 'CTabList', 'DataFolderDir', 'DataFolderExists',
+ 'DataFolderRefsEqual', 'DataFolderRefStatus', 'date', 'datetime', 'DateToJulian',
+ 'date2secs', 'Dawson', 'DDERequestString', 'defined', 'deltax', 'digamma',
+ 'dilogarithm', 'DimDelta', 'DimOffset', 'DimSize', 'ei', 'enoise', 'equalWaves',
+ 'erf', 'erfc', 'erfcw', 'exists', 'exp', 'ExpConvExp', 'ExpConvExpFit',
+ 'ExpConvExpFitBL', 'ExpConvExpFit1Shape', 'ExpConvExpFit1ShapeBL', 'ExpGauss',
+ 'ExpGaussFit', 'ExpGaussFitBL', 'ExpGaussFit1Shape', 'ExpGaussFit1ShapeBL',
+ 'expInt', 'expIntegralE1', 'expNoise', 'factorial', 'fakedata', 'faverage',
+ 'faverageXY', 'FetchURL', 'FindDimLabel', 'FindListItem', 'floor', 'FontList',
'FontSizeHeight', 'FontSizeStringWidth', 'FresnelCos', 'FresnelSin',
- 'gamma', 'gammaInc', 'gammaNoise', 'gammln', 'gammp', 'gammq', 'Gauss',
- 'Gauss1D', 'Gauss2D', 'gcd', 'GetDefaultFontSize',
- 'GetDefaultFontStyle', 'GetKeyState', 'GetRTError', 'gnoise',
- 'GrepString', 'hcsr', 'hermite', 'hermiteGauss', 'HyperG0F1',
- 'HyperG1F1', 'HyperG2F1', 'HyperGNoise', 'HyperGPFQ', 'IgorVersion',
- 'ilim', 'imag', 'Inf', 'Integrate1D', 'interp', 'Interp2D', 'Interp3D',
- 'inverseERF', 'inverseERFC', 'ItemsInList', 'jlim', 'Laguerre',
- 'LaguerreA', 'LaguerreGauss', 'leftx', 'LegendreA', 'limit', 'ln',
- 'log', 'logNormalNoise', 'lorentzianNoise', 'magsqr', 'MandelbrotPoint',
- 'MarcumQ', 'MatrixDet', 'MatrixDot', 'MatrixRank', 'MatrixTrace', 'max',
- 'mean', 'min', 'mod', 'ModDate', 'NaN', 'norm', 'NumberByKey',
- 'numpnts', 'numtype', 'NumVarOrDefault', 'NVAR_Exists', 'p2rect',
- 'ParamIsDefault', 'pcsr', 'Pi', 'PixelFromAxisVal', 'pnt2x',
- 'poissonNoise', 'poly', 'poly2D', 'PolygonArea', 'qcsr', 'r2polar',
- 'real', 'rightx', 'round', 'sawtooth', 'ScreenResolution', 'sec',
- 'SelectNumber', 'sign', 'sin', 'sinc', 'sinh', 'SphericalBessJ',
- 'SphericalBessJD', 'SphericalBessY', 'SphericalBessYD',
- 'SphericalHarmonics', 'sqrt', 'StartMSTimer', 'StatsBetaCDF',
- 'StatsBetaPDF', 'StatsBinomialCDF', 'StatsBinomialPDF',
- 'StatsCauchyCDF', 'StatsCauchyPDF', 'StatsChiCDF', 'StatsChiPDF',
- 'StatsCMSSDCDF', 'StatsCorrelation', 'StatsDExpCDF', 'StatsDExpPDF',
- 'StatsErlangCDF', 'StatsErlangPDF', 'StatsErrorPDF', 'StatsEValueCDF',
- 'StatsEValuePDF', 'StatsExpCDF', 'StatsExpPDF', 'StatsFCDF',
- 'StatsFPDF', 'StatsFriedmanCDF', 'StatsGammaCDF', 'StatsGammaPDF',
- 'StatsGeometricCDF', 'StatsGeometricPDF', 'StatsHyperGCDF',
- 'StatsHyperGPDF', 'StatsInvBetaCDF', 'StatsInvBinomialCDF',
- 'StatsInvCauchyCDF', 'StatsInvChiCDF', 'StatsInvCMSSDCDF',
- 'StatsInvDExpCDF', 'StatsInvEValueCDF', 'StatsInvExpCDF',
- 'StatsInvFCDF', 'StatsInvFriedmanCDF', 'StatsInvGammaCDF',
- 'StatsInvGeometricCDF', 'StatsInvKuiperCDF', 'StatsInvLogisticCDF',
- 'StatsInvLogNormalCDF', 'StatsInvMaxwellCDF', 'StatsInvMooreCDF',
- 'StatsInvNBinomialCDF', 'StatsInvNCChiCDF', 'StatsInvNCFCDF',
- 'StatsInvNormalCDF', 'StatsInvParetoCDF', 'StatsInvPoissonCDF',
- 'StatsInvPowerCDF', 'StatsInvQCDF', 'StatsInvQpCDF',
+ 'FuncRefInfo', 'FunctionInfo', 'FunctionList', 'FunctionPath', 'gamma',
+ 'gammaEuler', 'gammaInc', 'gammaNoise', 'gammln', 'gammp', 'gammq', 'Gauss',
+ 'GaussFit', 'GaussFitBL', 'GaussFit1Width', 'GaussFit1WidthBL', 'Gauss1D',
+ 'Gauss2D', 'gcd', 'GetBrowserLine', 'GetBrowserSelection', 'GetDataFolder',
+ 'GetDataFolderDFR', 'GetDefaultFont', 'GetDefaultFontSize', 'GetDefaultFontStyle',
+ 'GetDimLabel', 'GetEnvironmentVariable', 'GetErrMessage', 'GetFormula',
+ 'GetIndependentModuleName', 'GetIndexedObjName', 'GetIndexedObjNameDFR',
+ 'GetKeyState', 'GetRTErrMessage', 'GetRTError', 'GetRTLocation', 'GetRTLocInfo',
+ 'GetRTStackInfo', 'GetScrapText', 'GetUserData', 'GetWavesDataFolder',
+ 'GetWavesDataFolderDFR', 'GizmoInfo', 'GizmoScale', 'gnoise', 'GrepList',
+ 'GrepString', 'GuideInfo', 'GuideNameList', 'Hash', 'hcsr', 'HDF5AttributeInfo',
+ 'HDF5DatasetInfo', 'HDF5LibraryInfo', 'HDF5TypeInfo', 'hermite', 'hermiteGauss',
+ 'HyperGNoise', 'HyperGPFQ', 'HyperG0F1', 'HyperG1F1', 'HyperG2F1', 'IgorInfo',
+ 'IgorVersion', 'imag', 'ImageInfo', 'ImageNameList', 'ImageNameToWaveRef',
+ 'IndependentModuleList', 'IndexedDir', 'IndexedFile', 'Inf', 'Integrate1D',
+ 'interp', 'Interp2D', 'Interp3D', 'inverseERF', 'inverseERFC', 'ItemsInList',
+ 'JacobiCn', 'JacobiSn', 'JulianToDate', 'Laguerre', 'LaguerreA', 'LaguerreGauss',
+ 'LambertW', 'LayoutInfo', 'leftx', 'LegendreA', 'limit', 'ListMatch',
+ 'ListToTextWave', 'ListToWaveRefWave', 'ln', 'log', 'logNormalNoise',
+ 'LorentzianFit', 'LorentzianFitBL', 'LorentzianFit1Width',
+ 'LorentzianFit1WidthBL', 'lorentzianNoise', 'LowerStr', 'MacroList', 'magsqr',
+ 'MandelbrotPoint', 'MarcumQ', 'MatrixCondition', 'MatrixDet', 'MatrixDot',
+ 'MatrixRank', 'MatrixTrace', 'max', 'mean', 'median', 'min', 'mod', 'ModDate',
+ 'MPFXEMGPeak', 'MPFXExpConvExpPeak', 'MPFXGaussPeak', 'MPFXLorenzianPeak',
+ 'MPFXVoigtPeak', 'NameOfWave', 'NaN', 'NewFreeDataFolder', 'NewFreeWave', 'norm',
+ 'NormalizeUnicode', 'note', 'NumberByKey', 'numpnts', 'numtype',
+ 'NumVarOrDefault', 'num2char', 'num2istr', 'num2str', 'NVAR_Exists',
+ 'OperationList', 'PadString', 'PanelResolution', 'ParamIsDefault',
+ 'ParseFilePath', 'PathList', 'pcsr', 'Pi', 'PICTInfo', 'PICTList',
+ 'PixelFromAxisVal', 'pnt2x', 'poissonNoise', 'poly', 'PolygonArea', 'poly2D',
+ 'PossiblyQuoteName', 'ProcedureText', 'p2rect', 'qcsr', 'real', 'RemoveByKey',
+ 'RemoveEnding', 'RemoveFromList', 'RemoveListItem', 'ReplaceNumberByKey',
+ 'ReplaceString', 'ReplaceStringByKey', 'rightx', 'round', 'r2polar', 'sawtooth',
+ 'scaleToIndex', 'ScreenResolution', 'sec', 'sech', 'Secs2Date', 'Secs2Time',
+ 'SelectNumber', 'SelectString', 'SetEnvironmentVariable', 'sign', 'sin', 'sinc',
+ 'sinh', 'sinIntegral', 'SortList', 'SpecialCharacterInfo', 'SpecialCharacterList',
+ 'SpecialDirPath', 'SphericalBessJ', 'SphericalBessJD', 'SphericalBessY',
+ 'SphericalBessYD', 'SphericalHarmonics', 'sqrt', 'StartMSTimer', 'StatsBetaCDF',
+ 'StatsBetaPDF', 'StatsBinomialCDF', 'StatsBinomialPDF', 'StatsCauchyCDF',
+ 'StatsCauchyPDF', 'StatsChiCDF', 'StatsChiPDF', 'StatsCMSSDCDF',
+ 'StatsCorrelation', 'StatsDExpCDF', 'StatsDExpPDF', 'StatsErlangCDF',
+ 'StatsErlangPDF', 'StatsErrorPDF', 'StatsEValueCDF', 'StatsEValuePDF',
+ 'StatsExpCDF', 'StatsExpPDF', 'StatsFCDF', 'StatsFPDF', 'StatsFriedmanCDF',
+ 'StatsGammaCDF', 'StatsGammaPDF', 'StatsGeometricCDF', 'StatsGeometricPDF',
+ 'StatsGEVCDF', 'StatsGEVPDF', 'StatsHyperGCDF', 'StatsHyperGPDF',
+ 'StatsInvBetaCDF', 'StatsInvBinomialCDF', 'StatsInvCauchyCDF', 'StatsInvChiCDF',
+ 'StatsInvCMSSDCDF', 'StatsInvDExpCDF', 'StatsInvEValueCDF', 'StatsInvExpCDF',
+ 'StatsInvFCDF', 'StatsInvFriedmanCDF', 'StatsInvGammaCDF', 'StatsInvGeometricCDF',
+ 'StatsInvKuiperCDF', 'StatsInvLogisticCDF', 'StatsInvLogNormalCDF',
+ 'StatsInvMaxwellCDF', 'StatsInvMooreCDF', 'StatsInvNBinomialCDF',
+ 'StatsInvNCChiCDF', 'StatsInvNCFCDF', 'StatsInvNormalCDF', 'StatsInvParetoCDF',
+ 'StatsInvPoissonCDF', 'StatsInvPowerCDF', 'StatsInvQCDF', 'StatsInvQpCDF',
'StatsInvRayleighCDF', 'StatsInvRectangularCDF', 'StatsInvSpearmanCDF',
'StatsInvStudentCDF', 'StatsInvTopDownCDF', 'StatsInvTriangularCDF',
'StatsInvUsquaredCDF', 'StatsInvVonMisesCDF', 'StatsInvWeibullCDF',
- 'StatsKuiperCDF', 'StatsLogisticCDF', 'StatsLogisticPDF',
- 'StatsLogNormalCDF', 'StatsLogNormalPDF', 'StatsMaxwellCDF',
- 'StatsMaxwellPDF', 'StatsMedian', 'StatsMooreCDF', 'StatsNBinomialCDF',
- 'StatsNBinomialPDF', 'StatsNCChiCDF', 'StatsNCChiPDF', 'StatsNCFCDF',
- 'StatsNCFPDF', 'StatsNCTCDF', 'StatsNCTPDF', 'StatsNormalCDF',
- 'StatsNormalPDF', 'StatsParetoCDF', 'StatsParetoPDF', 'StatsPermute',
- 'StatsPoissonCDF', 'StatsPoissonPDF', 'StatsPowerCDF',
- 'StatsPowerNoise', 'StatsPowerPDF', 'StatsQCDF', 'StatsQpCDF',
- 'StatsRayleighCDF', 'StatsRayleighPDF', 'StatsRectangularCDF',
- 'StatsRectangularPDF', 'StatsRunsCDF', 'StatsSpearmanRhoCDF',
- 'StatsStudentCDF', 'StatsStudentPDF', 'StatsTopDownCDF',
+ 'StatsKuiperCDF', 'StatsLogisticCDF', 'StatsLogisticPDF', 'StatsLogNormalCDF',
+ 'StatsLogNormalPDF', 'StatsMaxwellCDF', 'StatsMaxwellPDF', 'StatsMedian',
+ 'StatsMooreCDF', 'StatsNBinomialCDF', 'StatsNBinomialPDF', 'StatsNCChiCDF',
+ 'StatsNCChiPDF', 'StatsNCFCDF', 'StatsNCFPDF', 'StatsNCTCDF', 'StatsNCTPDF',
+ 'StatsNormalCDF', 'StatsNormalPDF', 'StatsParetoCDF', 'StatsParetoPDF',
+ 'StatsPermute', 'StatsPoissonCDF', 'StatsPoissonPDF', 'StatsPowerCDF',
+ 'StatsPowerNoise', 'StatsPowerPDF', 'StatsQCDF', 'StatsQpCDF', 'StatsRayleighCDF',
+ 'StatsRayleighPDF', 'StatsRectangularCDF', 'StatsRectangularPDF', 'StatsRunsCDF',
+ 'StatsSpearmanRhoCDF', 'StatsStudentCDF', 'StatsStudentPDF', 'StatsTopDownCDF',
'StatsTriangularCDF', 'StatsTriangularPDF', 'StatsTrimmedMean',
- 'StatsUSquaredCDF', 'StatsVonMisesCDF', 'StatsVonMisesNoise',
- 'StatsVonMisesPDF', 'StatsWaldCDF', 'StatsWaldPDF', 'StatsWeibullCDF',
- 'StatsWeibullPDF', 'StopMSTimer', 'str2num', 'stringCRC', 'stringmatch',
- 'strlen', 'strsearch', 'StudentA', 'StudentT', 'sum', 'SVAR_Exists',
- 'TagVal', 'tan', 'tanh', 'ThreadGroupCreate', 'ThreadGroupRelease',
- 'ThreadGroupWait', 'ThreadProcessorCount', 'ThreadReturnValue', 'ticks',
- 'trunc', 'Variance', 'vcsr', 'WaveCRC', 'WaveDims', 'WaveExists',
- 'WaveMax', 'WaveMin', 'WaveRefsEqual', 'WaveType', 'WhichListItem',
- 'WinType', 'WNoise', 'x2pnt', 'xcsr', 'zcsr', 'ZernikeR',
- )
- functions += (
- 'AddListItem', 'AnnotationInfo', 'AnnotationList', 'AxisInfo',
- 'AxisList', 'CaptureHistory', 'ChildWindowList', 'CleanupName',
- 'ContourInfo', 'ContourNameList', 'ControlNameList', 'CsrInfo',
- 'CsrWave', 'CsrXWave', 'CTabList', 'DataFolderDir', 'date',
- 'DDERequestString', 'FontList', 'FuncRefInfo', 'FunctionInfo',
- 'FunctionList', 'FunctionPath', 'GetDataFolder', 'GetDefaultFont',
- 'GetDimLabel', 'GetErrMessage', 'GetFormula',
- 'GetIndependentModuleName', 'GetIndexedObjName', 'GetIndexedObjNameDFR',
- 'GetRTErrMessage', 'GetRTStackInfo', 'GetScrapText', 'GetUserData',
- 'GetWavesDataFolder', 'GrepList', 'GuideInfo', 'GuideNameList', 'Hash',
- 'IgorInfo', 'ImageInfo', 'ImageNameList', 'IndexedDir', 'IndexedFile',
- 'JulianToDate', 'LayoutInfo', 'ListMatch', 'LowerStr', 'MacroList',
- 'NameOfWave', 'note', 'num2char', 'num2istr', 'num2str',
- 'OperationList', 'PadString', 'ParseFilePath', 'PathList', 'PICTInfo',
- 'PICTList', 'PossiblyQuoteName', 'ProcedureText', 'RemoveByKey',
- 'RemoveEnding', 'RemoveFromList', 'RemoveListItem',
- 'ReplaceNumberByKey', 'ReplaceString', 'ReplaceStringByKey',
- 'Secs2Date', 'Secs2Time', 'SelectString', 'SortList',
- 'SpecialCharacterInfo', 'SpecialCharacterList', 'SpecialDirPath',
- 'StringByKey', 'StringFromList', 'StringList', 'StrVarOrDefault',
- 'TableInfo', 'TextFile', 'ThreadGroupGetDF', 'time', 'TraceFromPixel',
- 'TraceInfo', 'TraceNameList', 'UniqueName', 'UnPadString', 'UpperStr',
- 'VariableList', 'WaveInfo', 'WaveList', 'WaveName', 'WaveUnits',
- 'WinList', 'WinName', 'WinRecreation', 'XWaveName',
- 'ContourNameToWaveRef', 'CsrWaveRef', 'CsrXWaveRef',
- 'ImageNameToWaveRef', 'NewFreeWave', 'TagWaveRef', 'TraceNameToWaveRef',
- 'WaveRefIndexed', 'XWaveRefFromTrace', 'GetDataFolderDFR',
- 'GetWavesDataFolderDFR', 'NewFreeDataFolder', 'ThreadGroupGetDFR',
+ 'StatsUSquaredCDF', 'StatsVonMisesCDF', 'StatsVonMisesNoise', 'StatsVonMisesPDF',
+ 'StatsWaldCDF', 'StatsWaldPDF', 'StatsWeibullCDF', 'StatsWeibullPDF',
+ 'StopMSTimer', 'StringByKey', 'stringCRC', 'StringFromList', 'StringList',
+ 'stringmatch', 'strlen', 'strsearch', 'StrVarOrDefault', 'str2num', 'StudentA',
+ 'StudentT', 'sum', 'SVAR_Exists', 'TableInfo', 'TagVal', 'TagWaveRef', 'tan',
+ 'tanh', 'TextEncodingCode', 'TextEncodingName', 'TextFile', 'ThreadGroupCreate',
+ 'ThreadGroupGetDF', 'ThreadGroupGetDFR', 'ThreadGroupRelease', 'ThreadGroupWait',
+ 'ThreadProcessorCount', 'ThreadReturnValue', 'ticks', 'time', 'TraceFromPixel',
+ 'TraceInfo', 'TraceNameList', 'TraceNameToWaveRef', 'trunc', 'UniqueName',
+ 'UnPadString', 'UnsetEnvironmentVariable', 'UpperStr', 'URLDecode', 'URLEncode',
+ 'VariableList', 'Variance', 'vcsr', 'Voigt', 'VoigtFit', 'VoigtFitBL',
+ 'VoigtFit1Shape', 'VoigtFit1ShapeBL', 'VoigtFit1Shape1Width',
+ 'VoigtFit1Shape1WidthBL', 'VoigtFunc', 'WaveCRC', 'WaveDims', 'WaveExists',
+ 'WaveInfo', 'WaveList', 'WaveMax', 'WaveMin', 'WaveName', 'WaveRefIndexed',
+ 'WaveRefIndexedDFR', 'WaveRefsEqual', 'WaveRefWaveToList', 'WaveTextEncoding',
+ 'WaveType', 'WaveUnits', 'WhichListItem', 'WinList', 'WinName', 'WinRecreation',
+ 'WinType', 'WMFindWholeWord', 'WNoise', 'xcsr', 'XWaveName', 'XWaveRefFromTrace',
+ 'x2pnt', 'zcsr', 'ZernikeR', 'zeta'
)
tokens = {
@@ -272,7 +280,7 @@ class IgorLexer(RegexLexer):
# Built-in functions.
(words(functions, prefix=r'\b', suffix=r'\b'), Name.Function),
# Compiler directives.
- (r'^#(include|pragma|define|ifdef|ifndef|endif)',
+ (r'^#(include|pragma|define|undef|ifdef|ifndef|if|elif|else|endif)',
Name.Decorator),
(r'[^a-z"/]+$', Text),
(r'.', Text),
diff --git a/pygments/lexers/int_fiction.py b/pygments/lexers/int_fiction.py
index 25c472b1..724f9b27 100644
--- a/pygments/lexers/int_fiction.py
+++ b/pygments/lexers/int_fiction.py
@@ -285,6 +285,7 @@ class Inform6Lexer(RegexLexer):
include('_whitespace'),
(r';', Punctuation, '#pop'),
(r'\*', Punctuation),
+ (r'"', String.Double, 'plain-string'),
(_name, Name.Variable)
],
# Array
diff --git a/pygments/lexers/j.py b/pygments/lexers/j.py
index 20176d0d..f15595f8 100644
--- a/pygments/lexers/j.py
+++ b/pygments/lexers/j.py
@@ -5,6 +5,8 @@
Lexer for the J programming language.
+ :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, words, include
@@ -46,7 +48,7 @@ class JLexer(RegexLexer):
# Definitions
(r'0\s+:\s*0|noun\s+define\s*$', Name.Entity, 'nounDefinition'),
- (r'\b(([1-4]|13)\s+:\s*0)|((adverb|conjunction|dyad|monad|verb)\s+define)\b',
+ (r'(([1-4]|13)\s+:\s*0|(adverb|conjunction|dyad|monad|verb)\s+define)\b',
Name.Function, 'explicitDefinition'),
# Flow Control
diff --git a/pygments/lexers/javascript.py b/pygments/lexers/javascript.py
index 8e2d9797..5dca6832 100644
--- a/pygments/lexers/javascript.py
+++ b/pygments/lexers/javascript.py
@@ -97,13 +97,13 @@ class JavascriptLexer(RegexLexer):
(r'`', String.Backtick, '#pop'),
(r'\\\\', String.Backtick),
(r'\\`', String.Backtick),
- (r'\${', String.Interpol, 'interp-inside'),
+ (r'\$\{', String.Interpol, 'interp-inside'),
(r'\$', String.Backtick),
(r'[^`\\$]+', String.Backtick),
],
'interp-inside': [
# TODO: should this include single-line comments and allow nesting strings?
- (r'}', String.Interpol, '#pop'),
+ (r'\}', String.Interpol, '#pop'),
include('root'),
],
# (\\\\|\\`|[^`])*`', String.Backtick),
@@ -446,7 +446,7 @@ class TypeScriptLexer(RegexLexer):
"""
name = 'TypeScript'
- aliases = ['ts']
+ aliases = ['ts', 'typescript']
filenames = ['*.ts']
mimetypes = ['text/x-typescript']
@@ -1245,32 +1245,32 @@ class EarlGreyLexer(RegexLexer):
include('control'),
(r'[^\S\n]+', Text),
(r';;.*\n', Comment),
- (r'[\[\]\{\}\:\(\)\,\;]', Punctuation),
+ (r'[\[\]{}:(),;]', Punctuation),
(r'\\\n', Text),
(r'\\', Text),
include('errors'),
(words((
'with', 'where', 'when', 'and', 'not', 'or', 'in',
'as', 'of', 'is'),
- prefix=r'(?<=\s|\[)', suffix=r'(?![\w\$\-])'),
+ prefix=r'(?<=\s|\[)', suffix=r'(?![\w$\-])'),
Operator.Word),
- (r'[\*@]?->', Name.Function),
+ (r'[*@]?->', Name.Function),
(r'[+\-*/~^<>%&|?!@#.]*=', Operator.Word),
(r'\.{2,3}', Operator.Word), # Range Operator
(r'([+*/~^<>&|?!]+)|([#\-](?=\s))|@@+(?=\s)|=+', Operator),
- (r'(?<![\w\$\-])(var|let)(?:[^\w\$])', Keyword.Declaration),
+ (r'(?<![\w$\-])(var|let)(?:[^\w$])', Keyword.Declaration),
include('keywords'),
include('builtins'),
include('assignment'),
(r'''(?x)
- (?:()([a-zA-Z$_](?:[a-zA-Z$0-9_\-]*[a-zA-Z$0-9_])?)|
- (?<=[\s\{\[\(])(\.)([a-zA-Z$_](?:[a-zA-Z$0-9_\-]*[a-zA-Z$0-9_])?))
+ (?:()([a-zA-Z$_](?:[\w$\-]*[\w$])?)|
+ (?<=[\s{\[(])(\.)([a-zA-Z$_](?:[\w$\-]*[\w$])?))
(?=.*%)''',
bygroups(Punctuation, Name.Tag, Punctuation, Name.Class.Start), 'dbs'),
(r'[rR]?`', String.Backtick, 'bt'),
(r'[rR]?```', String.Backtick, 'tbt'),
- (r'(?<=[\s\[\{\(,;])\.([a-zA-Z$_](?:[a-zA-Z$0-9_-]*[a-zA-Z$0-9_])?)'
- r'(?=[\s\]\}\),;])', String.Symbol),
+ (r'(?<=[\s\[{(,;])\.([a-zA-Z$_](?:[\w$\-]*[\w$])?)'
+ r'(?=[\s\]}),;])', String.Symbol),
include('nested'),
(r'(?:[rR]|[rR]\.[gmi]{1,3})?"', String, combined('stringescape', 'dqs')),
(r'(?:[rR]|[rR]\.[gmi]{1,3})?\'', String, combined('stringescape', 'sqs')),
@@ -1281,9 +1281,9 @@ class EarlGreyLexer(RegexLexer):
include('numbers'),
],
'dbs': [
- (r'(\.)([a-zA-Z$_](?:[a-zA-Z$0-9_\-]*[a-zA-Z$0-9_])?)(?=[\[\.\s])',
+ (r'(\.)([a-zA-Z$_](?:[\w$\-]*[\w$])?)(?=[.\[\s])',
bygroups(Punctuation, Name.Class.DBS)),
- (r'(\[)([\^#][a-zA-Z$_](?:[a-zA-Z$0-9_\-]*[a-zA-Z$0-9_])?)(\])',
+ (r'(\[)([\^#][a-zA-Z$_](?:[\w$\-]*[\w$])?)(\])',
bygroups(Punctuation, Name.Entity.DBS, Punctuation)),
(r'\s+', Text),
(r'%', Operator.DBS, '#pop'),
@@ -1293,29 +1293,29 @@ class EarlGreyLexer(RegexLexer):
bygroups(Text.Whitespace, Text)),
],
'assignment': [
- (r'(\.)?([a-zA-Z$_](?:[a-zA-Z$0-9_-]*[a-zA-Z$0-9_])?)'
+ (r'(\.)?([a-zA-Z$_](?:[\w$\-]*[\w$])?)'
r'(?=\s+[+\-*/~^<>%&|?!@#.]*\=\s)',
bygroups(Punctuation, Name.Variable))
],
'errors': [
(words(('Error', 'TypeError', 'ReferenceError'),
- prefix=r'(?<![\w\$\-\.])', suffix=r'(?![\w\$\-\.])'),
+ prefix=r'(?<![\w\-$.])', suffix=r'(?![\w\-$.])'),
Name.Exception),
(r'''(?x)
- (?<![\w\$])
- E\.[\w\$](?:[\w\$\-]*[\w\$])?
- (?:\.[\w\$](?:[\w\$\-]*[\w\$])?)*
- (?=[\(\{\[\?\!\s])''',
+ (?<![\w$])
+ E\.[\w$](?:[\w$\-]*[\w$])?
+ (?:\.[\w$](?:[\w$\-]*[\w$])?)*
+ (?=[({\[?!\s])''',
Name.Exception),
],
'control': [
(r'''(?x)
- ([a-zA-Z$_](?:[a-zA-Z$0-9_-]*[a-zA-Z$0-9_])?)
+ ([a-zA-Z$_](?:[\w$-]*[\w$])?)
(?!\n)\s+
(?!and|as|each\*|each|in|is|mod|of|or|when|where|with)
- (?=(?:[+\-*/~^<>%&|?!@#.])?[a-zA-Z$_](?:[a-zA-Z$0-9_-]*[a-zA-Z$0-9_])?)''',
+ (?=(?:[+\-*/~^<>%&|?!@#.])?[a-zA-Z$_](?:[\w$-]*[\w$])?)''',
Keyword.Control),
- (r'([a-zA-Z$_](?:[a-zA-Z$0-9_-]*[a-zA-Z$0-9_])?)(?!\n)\s+(?=[\'"\d\{\[\(])',
+ (r'([a-zA-Z$_](?:[\w$-]*[\w$])?)(?!\n)\s+(?=[\'"\d{\[(])',
Keyword.Control),
(r'''(?x)
(?:
@@ -1324,28 +1324,28 @@ class EarlGreyLexer(RegexLexer):
(?<=with|each|with)|
(?<=each\*|where)
)(\s+)
- ([a-zA-Z$_](?:[a-zA-Z$0-9_\-]*[a-zA-Z$0-9_])?)(:)''',
+ ([a-zA-Z$_](?:[\w$-]*[\w$])?)(:)''',
bygroups(Text, Keyword.Control, Punctuation)),
(r'''(?x)
(?<![+\-*/~^<>%&|?!@#.])(\s+)
- ([a-zA-Z$_](?:[a-zA-Z$0-9_-]*[a-zA-Z$0-9_])?)(:)''',
+ ([a-zA-Z$_](?:[\w$-]*[\w$])?)(:)''',
bygroups(Text, Keyword.Control, Punctuation)),
],
'nested': [
(r'''(?x)
- (?<=[a-zA-Z$0-9_\]\}\)])(\.)
- ([a-zA-Z$_](?:[a-zA-Z$0-9_-]*[a-zA-Z$0-9_])?)
+ (?<=[\w$\]})])(\.)
+ ([a-zA-Z$_](?:[\w$-]*[\w$])?)
(?=\s+with(?:\s|\n))''',
bygroups(Punctuation, Name.Function)),
(r'''(?x)
(?<!\s)(\.)
- ([a-zA-Z$_](?:[a-zA-Z$0-9_-]*[a-zA-Z$0-9_])?)
- (?=[\}\]\)\.,;:\s])''',
+ ([a-zA-Z$_](?:[\w$-]*[\w$])?)
+ (?=[}\]).,;:\s])''',
bygroups(Punctuation, Name.Field)),
(r'''(?x)
- (?<=[a-zA-Z$0-9_\]\}\)])(\.)
- ([a-zA-Z$_](?:[a-zA-Z$0-9_-]*[a-zA-Z$0-9_])?)
- (?=[\[\{\(:])''',
+ (?<=[\w$\]})])(\.)
+ ([a-zA-Z$_](?:[\w$-]*[\w$])?)
+ (?=[\[{(:])''',
bygroups(Punctuation, Name.Function)),
],
'keywords': [
@@ -1354,15 +1354,15 @@ class EarlGreyLexer(RegexLexer):
'continue', 'elif', 'expr-value', 'if', 'match',
'return', 'yield', 'pass', 'else', 'require', 'var',
'let', 'async', 'method', 'gen'),
- prefix=r'(?<![\w\$\-\.])', suffix=r'(?![\w\$\-\.])'),
+ prefix=r'(?<![\w\-$.])', suffix=r'(?![\w\-$.])'),
Keyword.Pseudo),
(words(('this', 'self', '@'),
- prefix=r'(?<![\w\$\-\.])', suffix=r'(?![\w\$\-])'),
+ prefix=r'(?<![\w\-$.])', suffix=r'(?![\w\-$])'),
Keyword.Constant),
(words((
'Function', 'Object', 'Array', 'String', 'Number',
'Boolean', 'ErrorFactory', 'ENode', 'Promise'),
- prefix=r'(?<![\w\$\-\.])', suffix=r'(?![\w\$\-])'),
+ prefix=r'(?<![\w\-$.])', suffix=r'(?![\w\-$])'),
Keyword.Type),
],
'builtins': [
@@ -1373,20 +1373,20 @@ class EarlGreyLexer(RegexLexer):
'getChecker', 'get-checker', 'getProperty', 'get-property',
'getProjector', 'get-projector', 'consume', 'take',
'promisify', 'spawn', 'constructor'),
- prefix=r'(?<![\w\-#\.])', suffix=r'(?![\w\-\.])'),
+ prefix=r'(?<![\w\-#.])', suffix=r'(?![\w\-.])'),
Name.Builtin),
(words((
'true', 'false', 'null', 'undefined'),
- prefix=r'(?<![\w\$\-\.])', suffix=r'(?![\w\$\-\.])'),
+ prefix=r'(?<![\w\-$.])', suffix=r'(?![\w\-$.])'),
Name.Constant),
],
'name': [
- (r'@([a-zA-Z$_](?:[a-zA-Z$0-9_-]*[a-zA-Z$0-9_])?)', Name.Variable.Instance),
- (r'([a-zA-Z$_](?:[a-zA-Z$0-9_-]*[a-zA-Z$0-9_])?)(\+\+|\-\-)?',
+ (r'@([a-zA-Z$_](?:[\w$-]*[\w$])?)', Name.Variable.Instance),
+ (r'([a-zA-Z$_](?:[\w$-]*[\w$])?)(\+\+|\-\-)?',
bygroups(Name.Symbol, Operator.Word))
],
'tuple': [
- (r'#[a-zA-Z_][a-zA-Z_\-0-9]*(?=[\s\{\(,;\n])', Name.Namespace)
+ (r'#[a-zA-Z_][\w\-]*(?=[\s{(,;])', Name.Namespace)
],
'interpoling_string': [
(r'\}', String.Interpol, '#pop'),
@@ -1426,7 +1426,7 @@ class EarlGreyLexer(RegexLexer):
(r'```', String.Backtick, '#pop'),
(r'\n', String.Backtick),
(r'\^=?', String.Escape),
- (r'[^\`]+', String.Backtick),
+ (r'[^`]+', String.Backtick),
],
'numbers': [
(r'\d+\.(?!\.)\d*([eE][+-]?[0-9]+)?', Number.Float),
@@ -1434,7 +1434,7 @@ class EarlGreyLexer(RegexLexer):
(r'8r[0-7]+', Number.Oct),
(r'2r[01]+', Number.Bin),
(r'16r[a-fA-F0-9]+', Number.Hex),
- (r'([3-79]|[1-2][0-9]|3[0-6])r[a-zA-Z\d]+(\.[a-zA-Z\d]+)?', Number.Radix),
+ (r'([3-79]|[12][0-9]|3[0-6])r[a-zA-Z\d]+(\.[a-zA-Z\d]+)?', Number.Radix),
(r'\d+', Number.Integer)
],
}
diff --git a/pygments/lexers/julia.py b/pygments/lexers/julia.py
index cf7c7d61..9f84b8d9 100644
--- a/pygments/lexers/julia.py
+++ b/pygments/lexers/julia.py
@@ -11,13 +11,16 @@
import re
-from pygments.lexer import Lexer, RegexLexer, bygroups, combined, do_insertions
+from pygments.lexer import Lexer, RegexLexer, bygroups, combined, \
+ do_insertions, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic
from pygments.util import shebang_matches, unirange
__all__ = ['JuliaLexer', 'JuliaConsoleLexer']
+line_re = re.compile('.*?\n')
+
class JuliaLexer(RegexLexer):
"""
@@ -32,13 +35,26 @@ class JuliaLexer(RegexLexer):
flags = re.MULTILINE | re.UNICODE
- builtins = [
+ builtins = (
'exit', 'whos', 'edit', 'load', 'is', 'isa', 'isequal', 'typeof', 'tuple',
'ntuple', 'uid', 'hash', 'finalizer', 'convert', 'promote', 'subtype',
'typemin', 'typemax', 'realmin', 'realmax', 'sizeof', 'eps', 'promote_type',
'method_exists', 'applicable', 'invoke', 'dlopen', 'dlsym', 'system',
'error', 'throw', 'assert', 'new', 'Inf', 'Nan', 'pi', 'im',
- ]
+ )
+
+ keywords = (
+ 'begin', 'while', 'for', 'in', 'return', 'break', 'continue',
+ 'macro', 'quote', 'let', 'if', 'elseif', 'else', 'try', 'catch', 'end',
+ 'bitstype', 'ccall', 'do', 'using', 'module', 'import', 'export',
+ 'importall', 'baremodule', 'immutable',
+ )
+
+ types = (
+ 'Bool', 'Int', 'Int8', 'Int16', 'Int32', 'Int64', 'Uint', 'Uint8', 'Uint16',
+ 'Uint32', 'Uint64', 'Float32', 'Float64', 'Complex64', 'Complex128', 'Any',
+ 'Nothing', 'None',
+ )
tokens = {
'root': [
@@ -46,34 +62,29 @@ class JuliaLexer(RegexLexer):
(r'[^\S\n]+', Text),
(r'#=', Comment.Multiline, "blockcomment"),
(r'#.*$', Comment),
- (r'[]{}:(),;[@]', Punctuation),
+ (r'[\[\]{}:(),;@]', Punctuation),
(r'\\\n', Text),
(r'\\', Text),
# keywords
- (r'(begin|while|for|in|return|break|continue|'
- r'macro|quote|let|if|elseif|else|try|catch|end|'
- r'bitstype|ccall|do|using|module|import|export|'
- r'importall|baremodule|immutable)\b', Keyword),
(r'(local|global|const)\b', Keyword.Declaration),
- (r'(Bool|Int|Int8|Int16|Int32|Int64|Uint|Uint8|Uint16|Uint32|Uint64'
- r'|Float32|Float64|Complex64|Complex128|Any|Nothing|None)\b',
- Keyword.Type),
+ (words(keywords, suffix=r'\b'), Keyword),
+ (words(types, suffix=r'\b'), Keyword.Type),
# functions
(r'(function)((?:\s|\\\s)+)',
- bygroups(Keyword, Name.Function), 'funcname'),
+ bygroups(Keyword, Name.Function), 'funcname'),
# types
- (r'(type|typealias|abstract)((?:\s|\\\s)+)',
- bygroups(Keyword, Name.Class), 'typename'),
+ (r'(type|typealias|abstract|immutable)((?:\s|\\\s)+)',
+ bygroups(Keyword, Name.Class), 'typename'),
# operators
(r'==|!=|<=|>=|->|&&|\|\||::|<:|[-~+/*%=<>&^|.?!$]', Operator),
(r'\.\*|\.\^|\.\\|\.\/|\\', Operator),
# builtins
- ('(' + '|'.join(builtins) + r')\b', Name.Builtin),
+ (words(builtins, suffix=r'\b'), Name.Builtin),
# backticks
(r'`(?s).*?`', String.Backtick),
@@ -116,12 +127,12 @@ class JuliaLexer(RegexLexer):
],
'typename': [
- ('[a-zA-Z_]\w*', Name.Class, '#pop')
+ ('[a-zA-Z_]\w*', Name.Class, '#pop'),
],
'stringescape': [
(r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|'
- r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
+ r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape),
],
"blockcomment": [
(r'[^=#]', Comment.Multiline),
@@ -132,23 +143,29 @@ class JuliaLexer(RegexLexer):
'string': [
(r'"', String, '#pop'),
(r'\\\\|\\"|\\\n', String.Escape), # included here for raw strings
- (r'\$(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?',
- String.Interpol),
- (r'[^\\"$]+', String),
- # quotes, dollar signs, and backslashes must be parsed one at a time
- (r'["\\]', String),
- # unhandled string formatting sign
- (r'\$', String)
+ # Interpolation is defined as "$" followed by the shortest full
+ # expression, which is something we can't parse.
+ # Include the most common cases here: $word, and $(paren'd expr).
+ (r'\$[a-zA-Z_]+', String.Interpol),
+ (r'\$\(', String.Interpol, 'in-intp'),
+ # @printf and @sprintf formats
+ (r'%[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?[hlL]?[E-GXc-giorsux%]',
+ String.Interpol),
+ (r'[^$%"\\]+', String),
+ # unhandled special signs
+ (r'[$%"\\]', String),
],
+ 'in-intp': [
+ (r'[^()]+', String.Interpol),
+ (r'\(', String.Interpol, '#push'),
+ (r'\)', String.Interpol, '#pop'),
+ ]
}
def analyse_text(text):
return shebang_matches(text, r'julia')
-line_re = re.compile('.*?\n')
-
-
class JuliaConsoleLexer(Lexer):
"""
For Julia console sessions. Modeled after MatlabSessionLexer.
diff --git a/pygments/lexers/jvm.py b/pygments/lexers/jvm.py
index 14647616..af7f8105 100644
--- a/pygments/lexers/jvm.py
+++ b/pygments/lexers/jvm.py
@@ -66,10 +66,19 @@ class JavaLexer(RegexLexer):
(r'(\.)((?:[^\W\d]|\$)[\w$]*)', bygroups(Operator, Name.Attribute)),
(r'^\s*([^\W\d]|\$)[\w$]*:', Name.Label),
(r'([^\W\d]|\$)[\w$]*', Name),
+ (r'([0-9](_*[0-9]+)*\.([0-9](_*[0-9]+)*)?|'
+ r'([0-9](_*[0-9]+)*)?\.[0-9](_*[0-9]+)*)'
+ r'([eE][+\-]?[0-9](_*[0-9]+)*)?[fFdD]?|'
+ r'[0-9][eE][+\-]?[0-9](_*[0-9]+)*[fFdD]?|'
+ r'[0-9]([eE][+\-]?[0-9](_*[0-9]+)*)?[fFdD]|'
+ r'0[xX]([0-9a-fA-F](_*[0-9a-fA-F]+)*\.?|'
+ r'([0-9a-fA-F](_*[0-9a-fA-F]+)*)?\.[0-9a-fA-F](_*[0-9a-fA-F]+)*)'
+ r'[pP][+\-]?[0-9](_*[0-9]+)*[fFdD]?', Number.Float),
+ (r'0[xX][0-9a-fA-F](_*[0-9a-fA-F]+)*[lL]?', Number.Hex),
+ (r'0[bB][01](_*[01]+)*[lL]?', Number.Bin),
+ (r'0(_*[0-7]+)+[lL]?', Number.Oct),
+ (r'0|[1-9](_*[0-9]+)*[lL]?', Number.Integer),
(r'[~^*!%&\[\](){}<>|+=:;,./?-]', Operator),
- (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
- (r'0x[0-9a-fA-F]+', Number.Hex),
- (r'[0-9]+(_+[0-9]+)*L?', Number.Integer),
(r'\n', Text)
],
'class': [
@@ -555,14 +564,14 @@ class IokeLexer(RegexLexer):
],
'slashRegexp': [
- (r'(?<!\\)/[oxpniums]*', String.Regex, '#pop'),
+ (r'(?<!\\)/[im-psux]*', String.Regex, '#pop'),
include('interpolatableText'),
(r'\\/', String.Regex),
(r'[^/]', String.Regex)
],
'squareRegexp': [
- (r'(?<!\\)][oxpniums]*', String.Regex, '#pop'),
+ (r'(?<!\\)][im-psux]*', String.Regex, '#pop'),
include('interpolatableText'),
(r'\\]', String.Regex),
(r'[^\]]', String.Regex)
diff --git a/pygments/lexers/lisp.py b/pygments/lexers/lisp.py
index bd59d2b6..6d591e10 100644
--- a/pygments/lexers/lisp.py
+++ b/pygments/lexers/lisp.py
@@ -1488,7 +1488,7 @@ class EmacsLispLexer(RegexLexer):
.. versionadded:: 2.1
"""
name = 'EmacsLisp'
- aliases = ['emacs', 'elisp']
+ aliases = ['emacs', 'elisp', 'emacs-lisp']
filenames = ['*.el']
mimetypes = ['text/x-elisp', 'application/x-elisp']
@@ -2135,49 +2135,52 @@ class ShenLexer(RegexLexer):
filenames = ['*.shen']
mimetypes = ['text/x-shen', 'application/x-shen']
- DECLARATIONS = re.findall(r'\S+', """
- datatype define defmacro defprolog defcc synonyms declare package
- type function
- """)
-
- SPECIAL_FORMS = re.findall(r'\S+', """
- lambda get let if cases cond put time freeze value load $
- protect or and not do output prolog? trap-error error
- make-string /. set @p @s @v
- """)
-
- BUILTINS = re.findall(r'\S+', """
- == = * + - / < > >= <= <-address <-vector abort absvector
- absvector? address-> adjoin append arity assoc bind boolean?
- bound? call cd close cn compile concat cons cons? cut destroy
- difference element? empty? enable-type-theory error-to-string
- eval eval-kl exception explode external fail fail-if file
- findall fix fst fwhen gensym get-time hash hd hdstr hdv head
- identical implementation in include include-all-but inferences
- input input+ integer? intern intersection is kill language
- length limit lineread loaded macro macroexpand map mapcan
- maxinferences mode n->string nl nth null number? occurrences
- occurs-check open os out port porters pos pr preclude
- preclude-all-but print profile profile-results ps quit read
- read+ read-byte read-file read-file-as-bytelist
- read-file-as-string read-from-string release remove return
- reverse run save set simple-error snd specialise spy step
- stinput stoutput str string->n string->symbol string? subst
- symbol? systemf tail tc tc? thaw tl tlstr tlv track tuple?
- undefmacro unify unify! union unprofile unspecialise untrack
- variable? vector vector-> vector? verified version warn when
- write-byte write-to-file y-or-n?
- """)
-
- BUILTINS_ANYWHERE = re.findall(r'\S+', """
- where skip >> _ ! <e> <!>
- """)
+ DECLARATIONS = (
+ 'datatype', 'define', 'defmacro', 'defprolog', 'defcc',
+ 'synonyms', 'declare', 'package', 'type', 'function',
+ )
+
+ SPECIAL_FORMS = (
+ 'lambda', 'get', 'let', 'if', 'cases', 'cond', 'put', 'time', 'freeze',
+ 'value', 'load', '$', 'protect', 'or', 'and', 'not', 'do', 'output',
+ 'prolog?', 'trap-error', 'error', 'make-string', '/.', 'set', '@p',
+ '@s', '@v',
+ )
+
+ BUILTINS = (
+ '==', '=', '*', '+', '-', '/', '<', '>', '>=', '<=', '<-address',
+ '<-vector', 'abort', 'absvector', 'absvector?', 'address->', 'adjoin',
+ 'append', 'arity', 'assoc', 'bind', 'boolean?', 'bound?', 'call', 'cd',
+ 'close', 'cn', 'compile', 'concat', 'cons', 'cons?', 'cut', 'destroy',
+ 'difference', 'element?', 'empty?', 'enable-type-theory',
+ 'error-to-string', 'eval', 'eval-kl', 'exception', 'explode', 'external',
+ 'fail', 'fail-if', 'file', 'findall', 'fix', 'fst', 'fwhen', 'gensym',
+ 'get-time', 'hash', 'hd', 'hdstr', 'hdv', 'head', 'identical',
+ 'implementation', 'in', 'include', 'include-all-but', 'inferences',
+ 'input', 'input+', 'integer?', 'intern', 'intersection', 'is', 'kill',
+ 'language', 'length', 'limit', 'lineread', 'loaded', 'macro', 'macroexpand',
+ 'map', 'mapcan', 'maxinferences', 'mode', 'n->string', 'nl', 'nth', 'null',
+ 'number?', 'occurrences', 'occurs-check', 'open', 'os', 'out', 'port',
+ 'porters', 'pos', 'pr', 'preclude', 'preclude-all-but', 'print', 'profile',
+ 'profile-results', 'ps', 'quit', 'read', 'read+', 'read-byte', 'read-file',
+ 'read-file-as-bytelist', 'read-file-as-string', 'read-from-string',
+ 'release', 'remove', 'return', 'reverse', 'run', 'save', 'set',
+ 'simple-error', 'snd', 'specialise', 'spy', 'step', 'stinput', 'stoutput',
+ 'str', 'string->n', 'string->symbol', 'string?', 'subst', 'symbol?',
+ 'systemf', 'tail', 'tc', 'tc?', 'thaw', 'tl', 'tlstr', 'tlv', 'track',
+ 'tuple?', 'undefmacro', 'unify', 'unify!', 'union', 'unprofile',
+ 'unspecialise', 'untrack', 'variable?', 'vector', 'vector->', 'vector?',
+ 'verified', 'version', 'warn', 'when', 'write-byte', 'write-to-file',
+ 'y-or-n?',
+ )
+
+ BUILTINS_ANYWHERE = ('where', 'skip', '>>', '_', '!', '<e>', '<!>')
MAPPINGS = dict((s, Keyword) for s in DECLARATIONS)
MAPPINGS.update((s, Name.Builtin) for s in BUILTINS)
MAPPINGS.update((s, Keyword) for s in SPECIAL_FORMS)
- valid_symbol_chars = r'[\w!$%*+,<=>?/.\'@&#:_-]'
+ valid_symbol_chars = r'[\w!$%*+,<=>?/.\'@&#:-]'
valid_name = '%s+' % valid_symbol_chars
symbol_name = r'[a-z!$%%*+,<=>?/.\'@&#_-]%s*' % valid_symbol_chars
variable = r'[A-Z]%s*' % valid_symbol_chars
@@ -2313,7 +2316,7 @@ class CPSALexer(SchemeLexer):
# valid names for identifiers
# well, names can only not consist fully of numbers
# but this should be good enough for now
- valid_name = r'[a-zA-Z0-9!$%&*+,/:<=>?@^_~|-]+'
+ valid_name = r'[\w!$%&*+,/:<=>?@^~|-]+'
tokens = {
'root': [
@@ -2334,7 +2337,7 @@ class CPSALexer(SchemeLexer):
# strings, symbols and characters
(r'"(\\\\|\\"|[^"])*"', String),
(r"'" + valid_name, String.Symbol),
- (r"#\\([()/'\"._!§$%& ?=+-]{1}|[a-zA-Z0-9]+)", String.Char),
+ (r"#\\([()/'\"._!§$%& ?=+-]|[a-zA-Z0-9]+)", String.Char),
# constants
(r'(#t|#f)', Name.Constant),
diff --git a/pygments/lexers/modula2.py b/pygments/lexers/modula2.py
index a5fcbf78..01771f55 100644
--- a/pygments/lexers/modula2.py
+++ b/pygments/lexers/modula2.py
@@ -290,7 +290,7 @@ class Modula2Lexer(RegexLexer):
],
'unigraph_punctuation': [
# Common Punctuation
- (r'[\(\)\[\]{},.:;\|]', Punctuation),
+ (r'[()\[\]{},.:;|]', Punctuation),
# Case Label Separator Synonym
(r'!', Punctuation), # ISO
# Blueprint Punctuation
diff --git a/pygments/lexers/oberon.py b/pygments/lexers/oberon.py
index db18259d..51dfdab6 100644
--- a/pygments/lexers/oberon.py
+++ b/pygments/lexers/oberon.py
@@ -47,11 +47,11 @@ class ComponentPascalLexer(RegexLexer):
(r'\s+', Text), # whitespace
],
'comments': [
- (r'\(\*([^\$].*?)\*\)', Comment.Multiline),
+ (r'\(\*([^$].*?)\*\)', Comment.Multiline),
# TODO: nested comments (* (* ... *) ... (* ... *) *) not supported!
],
'punctuation': [
- (r'[\(\)\[\]\{\},.:;\|]', Punctuation),
+ (r'[()\[\]{},.:;|]', Punctuation),
],
'numliterals': [
(r'[0-9A-F]+X\b', Number.Hex), # char code
@@ -83,7 +83,7 @@ class ComponentPascalLexer(RegexLexer):
(r'\$', Operator),
],
'identifiers': [
- (r'([a-zA-Z_\$][\w\$]*)', Name),
+ (r'([a-zA-Z_$][\w$]*)', Name),
],
'builtins': [
(words((
diff --git a/pygments/lexers/parasail.py b/pygments/lexers/parasail.py
index 878f7d26..812e2923 100644
--- a/pygments/lexers/parasail.py
+++ b/pygments/lexers/parasail.py
@@ -60,7 +60,7 @@ class ParaSailLexer(RegexLexer):
(r'[a-zA-Z]\w*', Name),
# Operators and Punctuation
(r'(<==|==>|<=>|\*\*=|<\|=|<<=|>>=|==|!=|=\?|<=|>=|'
- r'\*\*|<<|>>|=>|:=|\+=|-=|\*=|\||\|=|/=|\+|-|\*|/|'
+ r'\*\*|<<|>>|=>|:=|\+=|-=|\*=|\|=|\||/=|\+|-|\*|/|'
r'\.\.|<\.\.|\.\.<|<\.\.<)',
Operator),
(r'(<|>|\[|\]|\(|\)|\||:|;|,|.|\{|\}|->)',
diff --git a/pygments/lexers/perl.py b/pygments/lexers/perl.py
index b78963d0..8df3c810 100644
--- a/pygments/lexers/perl.py
+++ b/pygments/lexers/perl.py
@@ -109,7 +109,8 @@ class PerlLexer(RegexLexer):
'utime', 'values', 'vec', 'wait', 'waitpid', 'wantarray', 'warn', 'write'), suffix=r'\b'),
Name.Builtin),
(r'((__(DATA|DIE|WARN)__)|(STD(IN|OUT|ERR)))\b', Name.Builtin.Pseudo),
- (r'<<([\'"]?)([a-zA-Z_]\w*)\1;?\n.*?\n\2\n', String),
+ (r'(<<)([\'"]?)([a-zA-Z_]\w*)(\2;?\n.*?\n)(\3)(\n)',
+ bygroups(String, String, String.Delimiter, String, String.Delimiter, Text)),
(r'__END__', Comment.Preproc, 'end-part'),
(r'\$\^[ADEFHILMOPSTWX]', Name.Variable.Global),
(r"\$[\\\"\[\]'&`+*.,;=%~?@$!<>(^|/-](?!\w)", Name.Variable.Global),
diff --git a/pygments/lexers/php.py b/pygments/lexers/php.py
index 75b662cb..2421738f 100644
--- a/pygments/lexers/php.py
+++ b/pygments/lexers/php.py
@@ -11,7 +11,8 @@
import re
-from pygments.lexer import RegexLexer, include, bygroups, default, using, this
+from pygments.lexer import RegexLexer, include, bygroups, default, using, \
+ this, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Other
from pygments.util import get_bool_opt, get_list_opt, iteritems
@@ -137,7 +138,9 @@ class PhpLexer(RegexLexer):
],
'php': [
(r'\?>', Comment.Preproc, '#pop'),
- (r'<<<([\'"]?)(' + _ident_inner + r')\1\n.*?\n\s*\2;?\n', String),
+ (r'(<<<)([\'"]?)(' + _ident_inner + r')(\2\n.*?\n\s*)(\3)(;?)(\n)',
+ bygroups(String, String, String.Delimiter, String, String.Delimiter,
+ Punctuation, Text)),
(r'\s+', Text),
(r'#.*?\n', Comment.Single),
(r'//.*?\n', Comment.Single),
@@ -162,13 +165,14 @@ class PhpLexer(RegexLexer):
r'FALSE|print|for|require|continue|foreach|require_once|'
r'declare|return|default|static|do|switch|die|stdClass|'
r'echo|else|TRUE|elseif|var|empty|if|xor|enddeclare|include|'
- r'virtual|endfor|include_once|while|endforeach|global|__FILE__|'
- r'endif|list|__LINE__|endswitch|new|__sleep|endwhile|not|'
- r'array|__wakeup|E_ALL|NULL|final|php_user_filter|interface|'
+ r'virtual|endfor|include_once|while|endforeach|global|'
+ r'endif|list|endswitch|new|endwhile|not|'
+ r'array|E_ALL|NULL|final|php_user_filter|interface|'
r'implements|public|private|protected|abstract|clone|try|'
r'catch|throw|this|use|namespace|trait|yield|'
r'finally)\b', Keyword),
(r'(true|false|null)\b', Keyword.Constant),
+ include('magicconstants'),
(r'\$\{\$+' + _ident_inner + '\}', Name.Variable),
(r'\$+' + _ident_inner, Name.Variable),
(_ident_inner, Name.Other),
@@ -182,11 +186,29 @@ class PhpLexer(RegexLexer):
(r'`([^`\\]*(?:\\.[^`\\]*)*)`', String.Backtick),
(r'"', String.Double, 'string'),
],
+ 'magicfuncs': [
+ # source: http://php.net/manual/en/language.oop5.magic.php
+ (words((
+ '__construct', '__destruct', '__call', '__callStatic', '__get', '__set',
+ '__isset', '__unset', '__sleep', '__wakeup', '__toString', '__invoke',
+ '__set_state', '__clone', '__debugInfo',), suffix=r'\b'),
+ Name.Function.Magic),
+ ],
+ 'magicconstants': [
+ # source: http://php.net/manual/en/language.constants.predefined.php
+ (words((
+ '__LINE__', '__FILE__', '__DIR__', '__FUNCTION__', '__CLASS__',
+ '__TRAIT__', '__METHOD__', '__NAMESPACE__',),
+ suffix=r'\b'),
+ Name.Constant),
+ ],
'classname': [
(_ident_inner, Name.Class, '#pop')
],
'functionname': [
- (_ident_inner, Name.Function, '#pop')
+ include('magicfuncs'),
+ (_ident_inner, Name.Function, '#pop'),
+ default('#pop')
],
'string': [
(r'"', String.Double, '#pop'),
diff --git a/pygments/lexers/praat.py b/pygments/lexers/praat.py
index 776c38b8..9255216d 100644
--- a/pygments/lexers/praat.py
+++ b/pygments/lexers/praat.py
@@ -27,21 +27,21 @@ class PraatLexer(RegexLexer):
aliases = ['praat']
filenames = ['*.praat', '*.proc', '*.psc']
- keywords = [
+ keywords = (
'if', 'then', 'else', 'elsif', 'elif', 'endif', 'fi', 'for', 'from', 'to',
'endfor', 'endproc', 'while', 'endwhile', 'repeat', 'until', 'select', 'plus',
'minus', 'demo', 'assert', 'stopwatch', 'nocheck', 'nowarn', 'noprogress',
'editor', 'endeditor', 'clearinfo',
- ]
+ )
- functions_string = [
+ functions_string = (
'backslashTrigraphsToUnicode', 'chooseDirectory', 'chooseReadFile',
'chooseWriteFile', 'date', 'demoKey', 'do', 'environment', 'extractLine',
'extractWord', 'fixed', 'info', 'left', 'mid', 'percent', 'readFile', 'replace',
'replace_regex', 'right', 'selected', 'string', 'unicodeToBackslashTrigraphs',
- ]
+ )
- functions_numeric = [
+ functions_numeric = (
'abs', 'appendFile', 'appendFileLine', 'appendInfo', 'appendInfoLine', 'arccos',
'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh', 'barkToHertz',
'beginPause', 'beginSendPraat', 'besselI', 'besselK', 'beta', 'beta2',
@@ -67,13 +67,13 @@ class PraatLexer(RegexLexer):
'sincpi', 'sinh', 'soundPressureToPhon', 'sqrt', 'startsWith', 'studentP',
'studentQ', 'tan', 'tanh', 'variableExists', 'word', 'writeFile', 'writeFileLine',
'writeInfo', 'writeInfoLine',
- ]
+ )
- functions_array = [
+ functions_array = (
'linear', 'randomGauss', 'randomInteger', 'randomUniform', 'zero',
- ]
+ )
- objects = [
+ objects = (
'Activation', 'AffineTransform', 'AmplitudeTier', 'Art', 'Artword',
'Autosegment', 'BarkFilter', 'BarkSpectrogram', 'CCA', 'Categories',
'Cepstrogram', 'Cepstrum', 'Cepstrumc', 'ChebyshevSeries', 'ClassificationTable',
@@ -100,17 +100,17 @@ class PraatLexer(RegexLexer):
'Strings', 'StringsIndex', 'Table', 'TableOfReal', 'TextGrid', 'TextInterval',
'TextPoint', 'TextTier', 'Tier', 'Transition', 'VocalTract', 'VocalTractTier',
'Weight', 'WordList',
- ]
+ )
- variables_numeric = [
+ variables_numeric = (
'macintosh', 'windows', 'unix', 'praatVersion', 'pi', 'e', 'undefined',
- ]
+ )
- variables_string = [
+ variables_string = (
'praatVersion', 'tab', 'shellDirectory', 'homeDirectory',
'preferencesDirectory', 'newline', 'temporaryDirectory',
'defaultDirectory',
- ]
+ )
tokens = {
'root': [
@@ -151,7 +151,7 @@ class PraatLexer(RegexLexer):
(r"'(?=.*')", String.Interpol, 'string_interpolated'),
(r'\.{3}', Keyword, ('#pop', 'old_arguments')),
(r':', Keyword, ('#pop', 'comma_list')),
- (r'[\s\n]', Text, '#pop'),
+ (r'\s', Text, '#pop'),
],
'procedure_call': [
(r'\s+', Text),
@@ -230,7 +230,7 @@ class PraatLexer(RegexLexer):
bygroups(Name.Builtin, Name.Builtin, String.Interpol),
('object_attributes', 'string_interpolated')),
- (r'\.?_?[a-z][a-zA-Z0-9_.]*(\$|#)?', Text),
+ (r'\.?_?[a-z][\w.]*(\$|#)?', Text),
(r'[\[\]]', Punctuation, 'comma_list'),
(r"'(?=.*')", String.Interpol, 'string_interpolated'),
],
@@ -239,7 +239,7 @@ class PraatLexer(RegexLexer):
(r'\b(and|or|not|div|mod)\b', Operator.Word),
],
'string_interpolated': [
- (r'\.?[_a-z][a-zA-Z0-9_.]*[\$#]?(?:\[[a-zA-Z0-9,]+\])?(:[0-9]+)?',
+ (r'\.?[_a-z][\w.]*[$#]?(?:\[[a-zA-Z0-9,]+\])?(:[0-9]+)?',
String.Interpol),
(r"'", String.Interpol, '#pop'),
],
diff --git a/pygments/lexers/python.py b/pygments/lexers/python.py
index dee8e6c7..7601afa8 100644
--- a/pygments/lexers/python.py
+++ b/pygments/lexers/python.py
@@ -39,7 +39,7 @@ class PythonLexer(RegexLexer):
return [
# the old style '%s' % (...) string formatting
(r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
- '[hlL]?[diouxXeEfFgGcrs%]', String.Interpol),
+ '[hlL]?[E-GXc-giorsux%]', String.Interpol),
# backslashes, quotes and formatting signs must be parsed one at a time
(r'[^\\\'"%\n]+', ttype),
(r'[\'"\\]', ttype),
@@ -51,8 +51,10 @@ class PythonLexer(RegexLexer):
tokens = {
'root': [
(r'\n', Text),
- (r'^(\s*)([rRuU]{,2}"""(?:.|\n)*?""")', bygroups(Text, String.Doc)),
- (r"^(\s*)([rRuU]{,2}'''(?:.|\n)*?''')", bygroups(Text, String.Doc)),
+ (r'^(\s*)([rRuUbB]{,2})("""(?:.|\n)*?""")',
+ bygroups(Text, String.Affix, String.Doc)),
+ (r"^(\s*)([rRuUbB]{,2})('''(?:.|\n)*?''')",
+ bygroups(Text, String.Affix, String.Doc)),
(r'[^\S\n]+', Text),
(r'\A#!.+$', Comment.Hashbang),
(r'#.*$', Comment.Single),
@@ -69,15 +71,25 @@ class PythonLexer(RegexLexer):
(r'(import)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text),
'import'),
include('builtins'),
+ include('magicfuncs'),
+ include('magicvars'),
include('backtick'),
- ('(?:[rR]|[uU][rR]|[rR][uU])"""', String.Double, 'tdqs'),
- ("(?:[rR]|[uU][rR]|[rR][uU])'''", String.Single, 'tsqs'),
- ('(?:[rR]|[uU][rR]|[rR][uU])"', String.Double, 'dqs'),
- ("(?:[rR]|[uU][rR]|[rR][uU])'", String.Single, 'sqs'),
- ('[uU]?"""', String.Double, combined('stringescape', 'tdqs')),
- ("[uU]?'''", String.Single, combined('stringescape', 'tsqs')),
- ('[uU]?"', String.Double, combined('stringescape', 'dqs')),
- ("[uU]?'", String.Single, combined('stringescape', 'sqs')),
+ ('([rR]|[uUbB][rR]|[rR][uUbB])(""")',
+ bygroups(String.Affix, String.Double), 'tdqs'),
+ ("([rR]|[uUbB][rR]|[rR][uUbB])(''')",
+ bygroups(String.Affix, String.Single), 'tsqs'),
+ ('([rR]|[uUbB][rR]|[rR][uUbB])(")',
+ bygroups(String.Affix, String.Double), 'dqs'),
+ ("([rR]|[uUbB][rR]|[rR][uUbB])(')",
+ bygroups(String.Affix, String.Single), 'sqs'),
+ ('([uUbB]?)(""")', bygroups(String.Affix, String.Double),
+ combined('stringescape', 'tdqs')),
+ ("([uUbB]?)(''')", bygroups(String.Affix, String.Single),
+ combined('stringescape', 'tsqs')),
+ ('([uUbB]?)(")', bygroups(String.Affix, String.Double),
+ combined('stringescape', 'dqs')),
+ ("([uUbB]?)(')", bygroups(String.Affix, String.Single),
+ combined('stringescape', 'sqs')),
include('name'),
include('numbers'),
],
@@ -123,6 +135,37 @@ class PythonLexer(RegexLexer):
'ZeroDivisionError'), prefix=r'(?<!\.)', suffix=r'\b'),
Name.Exception),
],
+ 'magicfuncs': [
+ (words((
+ '__abs__', '__add__', '__and__', '__call__', '__cmp__', '__coerce__',
+ '__complex__', '__contains__', '__del__', '__delattr__', '__delete__',
+ '__delitem__', '__delslice__', '__div__', '__divmod__', '__enter__',
+ '__eq__', '__exit__', '__float__', '__floordiv__', '__ge__', '__get__',
+ '__getattr__', '__getattribute__', '__getitem__', '__getslice__', '__gt__',
+ '__hash__', '__hex__', '__iadd__', '__iand__', '__idiv__', '__ifloordiv__',
+ '__ilshift__', '__imod__', '__imul__', '__index__', '__init__',
+ '__instancecheck__', '__int__', '__invert__', '__iop__', '__ior__',
+ '__ipow__', '__irshift__', '__isub__', '__iter__', '__itruediv__',
+ '__ixor__', '__le__', '__len__', '__long__', '__lshift__', '__lt__',
+ '__missing__', '__mod__', '__mul__', '__ne__', '__neg__', '__new__',
+ '__nonzero__', '__oct__', '__op__', '__or__', '__pos__', '__pow__',
+ '__radd__', '__rand__', '__rcmp__', '__rdiv__', '__rdivmod__', '__repr__',
+ '__reversed__', '__rfloordiv__', '__rlshift__', '__rmod__', '__rmul__',
+ '__rop__', '__ror__', '__rpow__', '__rrshift__', '__rshift__', '__rsub__',
+ '__rtruediv__', '__rxor__', '__set__', '__setattr__', '__setitem__',
+ '__setslice__', '__str__', '__sub__', '__subclasscheck__', '__truediv__',
+ '__unicode__', '__xor__'), suffix=r'\b'),
+ Name.Function.Magic),
+ ],
+ 'magicvars': [
+ (words((
+ '__bases__', '__class__', '__closure__', '__code__', '__defaults__',
+ '__dict__', '__doc__', '__file__', '__func__', '__globals__',
+ '__metaclass__', '__module__', '__mro__', '__name__', '__self__',
+ '__slots__', '__weakref__'),
+ suffix=r'\b'),
+ Name.Variable.Magic),
+ ],
'numbers': [
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float),
(r'\d+[eE][+-]?[0-9]+j?', Number.Float),
@@ -140,7 +183,9 @@ class PythonLexer(RegexLexer):
('[a-zA-Z_]\w*', Name),
],
'funcname': [
- ('[a-zA-Z_]\w*', Name.Function, '#pop')
+ include('magicfuncs'),
+ ('[a-zA-Z_]\w*', Name.Function, '#pop'),
+ default('#pop'),
],
'classname': [
('[a-zA-Z_]\w*', Name.Class, '#pop')
@@ -213,6 +258,26 @@ class Python3Lexer(RegexLexer):
uni_name = "[%s][%s]*" % (uni.xid_start, uni.xid_continue)
+ def innerstring_rules(ttype):
+ return [
+ # the old style '%s' % (...) string formatting (still valid in Py3)
+ (r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
+ '[hlL]?[E-GXc-giorsux%]', String.Interpol),
+ # the new style '{}'.format(...) string formatting
+ (r'\{'
+ '((\w+)((\.\w+)|(\[[^\]]+\]))*)?' # field name
+ '(\![sra])?' # conversion
+ '(\:(.?[<>=\^])?[-+ ]?#?0?(\d+)?,?(\.\d+)?[E-GXb-gnosx%]?)?'
+ '\}', String.Interpol),
+
+ # backslashes, quotes and formatting signs must be parsed one at a time
+ (r'[^\\\'"%{\n]+', ttype),
+ (r'[\'"\\]', ttype),
+ # unhandled string formatting sign
+ (r'%|(\{{1,2})', ttype)
+ # newlines are an error (use "nl" state)
+ ]
+
tokens = PythonLexer.tokens.copy()
tokens['keywords'] = [
(words((
@@ -263,6 +328,38 @@ class Python3Lexer(RegexLexer):
prefix=r'(?<!\.)', suffix=r'\b'),
Name.Exception),
]
+ tokens['magicfuncs'] = [
+ (words((
+ '__abs__', '__add__', '__aenter__', '__aexit__', '__aiter__', '__and__',
+ '__anext__', '__await__', '__bool__', '__bytes__', '__call__',
+ '__complex__', '__contains__', '__del__', '__delattr__', '__delete__',
+ '__delitem__', '__dir__', '__divmod__', '__enter__', '__eq__', '__exit__',
+ '__float__', '__floordiv__', '__format__', '__ge__', '__get__',
+ '__getattr__', '__getattribute__', '__getitem__', '__gt__', '__hash__',
+ '__iadd__', '__iand__', '__ifloordiv__', '__ilshift__', '__imatmul__',
+ '__imod__', '__import__', '__imul__', '__index__', '__init__',
+ '__instancecheck__', '__int__', '__invert__', '__ior__', '__ipow__',
+ '__irshift__', '__isub__', '__iter__', '__itruediv__', '__ixor__',
+ '__le__', '__len__', '__length_hint__', '__lshift__', '__lt__',
+ '__matmul__', '__missing__', '__mod__', '__mul__', '__ne__', '__neg__',
+ '__new__', '__next__', '__or__', '__pos__', '__pow__', '__prepare__',
+ '__radd__', '__rand__', '__rdivmod__', '__repr__', '__reversed__',
+ '__rfloordiv__', '__rlshift__', '__rmatmul__', '__rmod__', '__rmul__',
+ '__ror__', '__round__', '__rpow__', '__rrshift__', '__rshift__',
+ '__rsub__', '__rtruediv__', '__rxor__', '__set__', '__setattr__',
+ '__setitem__', '__str__', '__sub__', '__subclasscheck__', '__truediv__',
+ '__xor__'), suffix=r'\b'),
+ Name.Function.Magic),
+ ]
+ tokens['magicvars'] = [
+ (words((
+ '__annotations__', '__bases__', '__class__', '__closure__', '__code__',
+ '__defaults__', '__dict__', '__doc__', '__file__', '__func__',
+ '__globals__', '__kwdefaults__', '__module__', '__mro__', '__name__',
+ '__objclass__', '__qualname__', '__self__', '__slots__', '__weakref__'),
+ suffix=r'\b'),
+ Name.Variable.Magic),
+ ]
tokens['numbers'] = [
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'0[oO][0-7]+', Number.Oct),
@@ -295,23 +392,8 @@ class Python3Lexer(RegexLexer):
(uni_name, Name.Namespace),
default('#pop'),
]
- tokens['strings'] = [
- # the old style '%s' % (...) string formatting (still valid in Py3)
- (r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
- '[hlL]?[diouxXeEfFgGcrs%]', String.Interpol),
- # the new style '{}'.format(...) string formatting
- (r'\{'
- '((\w+)((\.\w+)|(\[[^\]]+\]))*)?' # field name
- '(\![sra])?' # conversion
- '(\:(.?[<>=\^])?[-+ ]?#?0?(\d+)?,?(\.\d+)?[bcdeEfFgGnosxX%]?)?'
- '\}', String.Interpol),
- # backslashes, quotes and formatting signs must be parsed one at a time
- (r'[^\\\'"%\{\n]+', String),
- (r'[\'"\\]', String),
- # unhandled string formatting sign
- (r'%|(\{{1,2})', String)
- # newlines are an error (use "nl" state)
- ]
+ tokens['strings-single'] = innerstring_rules(String.Single)
+ tokens['strings-double'] = innerstring_rules(String.Double)
def analyse_text(text):
return shebang_matches(text, r'pythonw?3(\.\d)?')
@@ -515,6 +597,8 @@ class CythonLexer(RegexLexer):
include('keywords'),
(r'(def|property)(\s+)', bygroups(Keyword, Text), 'funcname'),
(r'(cp?def)(\s+)', bygroups(Keyword, Text), 'cdef'),
+ # (should actually start a block with only cdefs)
+ (r'(cdef)(:)', bygroups(Keyword, Punctuation)),
(r'(class|struct)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'(from)(\s+)', bygroups(Keyword, Text), 'fromimport'),
(r'(c?import)(\s+)', bygroups(Keyword, Text), 'import'),
@@ -534,7 +618,7 @@ class CythonLexer(RegexLexer):
'keywords': [
(words((
'assert', 'break', 'by', 'continue', 'ctypedef', 'del', 'elif',
- 'else', 'except', 'except?', 'exec', 'finally', 'for', 'gil',
+ 'else', 'except', 'except?', 'exec', 'finally', 'for', 'fused', 'gil',
'global', 'if', 'include', 'lambda', 'nogil', 'pass', 'print',
'raise', 'return', 'try', 'while', 'yield', 'as', 'with'), suffix=r'\b'),
Keyword),
@@ -626,7 +710,7 @@ class CythonLexer(RegexLexer):
],
'strings': [
(r'%(\([a-zA-Z0-9]+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
- '[hlL]?[diouxXeEfFgGcrs%]', String.Interpol),
+ '[hlL]?[E-GXc-giorsux%]', String.Interpol),
(r'[^\\\'"%\n]+', String),
# quotes, percents and backslashes must be parsed one at a time
(r'[\'"\\]', String),
@@ -697,18 +781,20 @@ class DgLexer(RegexLexer):
(words((
'bool', 'bytearray', 'bytes', 'classmethod', 'complex', 'dict', 'dict\'',
'float', 'frozenset', 'int', 'list', 'list\'', 'memoryview', 'object',
- 'property', 'range', 'set', 'set\'', 'slice', 'staticmethod', 'str', 'super',
- 'tuple', 'tuple\'', 'type'), prefix=r'(?<!\.)', suffix=r'(?![\'\w])'),
+ 'property', 'range', 'set', 'set\'', 'slice', 'staticmethod', 'str',
+ 'super', 'tuple', 'tuple\'', 'type'),
+ prefix=r'(?<!\.)', suffix=r'(?![\'\w])'),
Name.Builtin),
(words((
'__import__', 'abs', 'all', 'any', 'bin', 'bind', 'chr', 'cmp', 'compile',
'complex', 'delattr', 'dir', 'divmod', 'drop', 'dropwhile', 'enumerate',
- 'eval', 'exhaust', 'filter', 'flip', 'foldl1?', 'format', 'fst', 'getattr',
- 'globals', 'hasattr', 'hash', 'head', 'hex', 'id', 'init', 'input',
- 'isinstance', 'issubclass', 'iter', 'iterate', 'last', 'len', 'locals',
- 'map', 'max', 'min', 'next', 'oct', 'open', 'ord', 'pow', 'print', 'repr',
- 'reversed', 'round', 'setattr', 'scanl1?', 'snd', 'sorted', 'sum', 'tail',
- 'take', 'takewhile', 'vars', 'zip'), prefix=r'(?<!\.)', suffix=r'(?![\'\w])'),
+ 'eval', 'exhaust', 'filter', 'flip', 'foldl1?', 'format', 'fst',
+ 'getattr', 'globals', 'hasattr', 'hash', 'head', 'hex', 'id', 'init',
+ 'input', 'isinstance', 'issubclass', 'iter', 'iterate', 'last', 'len',
+ 'locals', 'map', 'max', 'min', 'next', 'oct', 'open', 'ord', 'pow',
+ 'print', 'repr', 'reversed', 'round', 'setattr', 'scanl1?', 'snd',
+ 'sorted', 'sum', 'tail', 'take', 'takewhile', 'vars', 'zip'),
+ prefix=r'(?<!\.)', suffix=r'(?![\'\w])'),
Name.Builtin),
(r"(?<!\.)(self|Ellipsis|NotImplemented|None|True|False)(?!['\w])",
Name.Builtin.Pseudo),
@@ -734,7 +820,7 @@ class DgLexer(RegexLexer):
],
'string': [
(r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
- '[hlL]?[diouxXeEfFgGcrs%]', String.Interpol),
+ '[hlL]?[E-GXc-giorsux%]', String.Interpol),
(r'[^\\\'"%\n]+', String),
# quotes, percents and backslashes must be parsed one at a time
(r'[\'"\\]', String),
diff --git a/pygments/lexers/qvt.py b/pygments/lexers/qvt.py
index 5bc61310..f30e4887 100644
--- a/pygments/lexers/qvt.py
+++ b/pygments/lexers/qvt.py
@@ -9,7 +9,8 @@
:license: BSD, see LICENSE for details.
"""
-from pygments.lexer import RegexLexer, bygroups, include, combined
+from pygments.lexer import RegexLexer, bygroups, include, combined, default, \
+ words
from pygments.token import Text, Comment, Operator, Keyword, Punctuation, \
Name, String, Number
@@ -50,23 +51,26 @@ class QVToLexer(RegexLexer):
bygroups(Comment, Comment, Comment.Preproc, Comment)),
# Uncomment the following if you want to distinguish between
# '/*' and '/**', à la javadoc
- #(r'/[*]{2}(.|\n)*?[*]/', Comment.Multiline),
+ # (r'/[*]{2}(.|\n)*?[*]/', Comment.Multiline),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'\\\n', Text),
(r'(and|not|or|xor|##?)\b', Operator.Word),
- (r'([:]{1-2}=|[-+]=)\b', Operator.Word),
- (r'(@|<<|>>)\b', Keyword), # stereotypes
- (r'!=|<>|=|==|!->|->|>=|<=|[.]{3}|[+/*%=<>&|.~]', Operator),
+ (r'(:{1,2}=|[-+]=)\b', Operator.Word),
+ (r'(@|<<|>>)\b', Keyword), # stereotypes
+ (r'!=|<>|==|=|!->|->|>=|<=|[.]{3}|[+/*%=<>&|.~]', Operator),
(r'[]{}:(),;[]', Punctuation),
(r'(true|false|unlimited|null)\b', Keyword.Constant),
(r'(this|self|result)\b', Name.Builtin.Pseudo),
(r'(var)\b', Keyword.Declaration),
(r'(from|import)\b', Keyword.Namespace, 'fromimport'),
- (r'(metamodel|class|exception|primitive|enum|transformation|library)(\s+)([a-zA-Z0-9_]+)',
+ (r'(metamodel|class|exception|primitive|enum|transformation|'
+ r'library)(\s+)(\w+)',
bygroups(Keyword.Word, Text, Name.Class)),
- (r'(exception)(\s+)([a-zA-Z0-9_]+)', bygroups(Keyword.Word, Text, Name.Exception)),
+ (r'(exception)(\s+)(\w+)',
+ bygroups(Keyword.Word, Text, Name.Exception)),
(r'(main)\b', Name.Function),
- (r'(mapping|helper|query)(\s+)', bygroups(Keyword.Declaration, Text), 'operation'),
+ (r'(mapping|helper|query)(\s+)',
+ bygroups(Keyword.Declaration, Text), 'operation'),
(r'(assert)(\s+)\b', bygroups(Keyword, Text), 'assert'),
(r'(Bag|Collection|Dict|OrderedSet|Sequence|Set|Tuple|List)\b',
Keyword.Type),
@@ -75,46 +79,45 @@ class QVToLexer(RegexLexer):
("'", String, combined('stringescape', 'sqs')),
include('name'),
include('numbers'),
- # (r'([a-zA-Z_][a-zA-Z0-9_]*)(::)([a-zA-Z_][a-zA-Z0-9_]*)',
+ # (r'([a-zA-Z_]\w*)(::)([a-zA-Z_]\w*)',
# bygroups(Text, Text, Text)),
- ],
+ ],
'fromimport': [
(r'(?:[ \t]|\\\n)+', Text),
- (r'[a-zA-Z_][a-zA-Z0-9_.]*', Name.Namespace),
- (r'', Text, '#pop'),
- ],
+ (r'[a-zA-Z_][\w.]*', Name.Namespace),
+ default('#pop'),
+ ],
'operation': [
(r'::', Text),
- (r'(.*::)([a-zA-Z_][a-zA-Z0-9_]*)[ \t]*(\()', bygroups(Text,Name.Function, Text), '#pop')
- ],
+ (r'(.*::)([a-zA-Z_]\w*)([ \t]*)(\()',
+ bygroups(Text, Name.Function, Text, Punctuation), '#pop')
+ ],
'assert': [
(r'(warning|error|fatal)\b', Keyword, '#pop'),
- (r'', Text, '#pop') # all else: go back
- ],
+ default('#pop'), # all else: go back
+ ],
'keywords': [
- (r'(abstract|access|any|assert|'
- r'blackbox|break|case|collect|collectNested|'
- r'collectOne|collectselect|collectselectOne|composes|'
- r'compute|configuration|constructor|continue|datatype|'
- r'default|derived|disjuncts|do|elif|else|end|'
- r'endif|except|exists|extends|'
- r'forAll|forEach|forOne|from|if|'
- r'implies|in|inherits|init|inout|'
- r'intermediate|invresolve|invresolveIn|invresolveone|'
- r'invresolveoneIn|isUnique|iterate|late|let|'
- r'literal|log|map|merges|'
- r'modeltype|new|object|one|'
- r'ordered|out|package|population|'
- r'property|raise|readonly|references|refines|'
- r'reject|resolve|resolveIn|resolveone|resolveoneIn|'
- r'return|select|selectOne|sortedBy|static|switch|'
- r'tag|then|try|typedef|'
- r'unlimited|uses|when|where|while|with|'
- r'xcollect|xmap|xselect)\b', Keyword),
+ (words((
+ 'abstract', 'access', 'any', 'assert', 'blackbox', 'break',
+ 'case', 'collect', 'collectNested', 'collectOne', 'collectselect',
+ 'collectselectOne', 'composes', 'compute', 'configuration',
+ 'constructor', 'continue', 'datatype', 'default', 'derived',
+ 'disjuncts', 'do', 'elif', 'else', 'end', 'endif', 'except',
+ 'exists', 'extends', 'forAll', 'forEach', 'forOne', 'from', 'if',
+ 'implies', 'in', 'inherits', 'init', 'inout', 'intermediate',
+ 'invresolve', 'invresolveIn', 'invresolveone', 'invresolveoneIn',
+ 'isUnique', 'iterate', 'late', 'let', 'literal', 'log', 'map',
+ 'merges', 'modeltype', 'new', 'object', 'one', 'ordered', 'out',
+ 'package', 'population', 'property', 'raise', 'readonly',
+ 'references', 'refines', 'reject', 'resolve', 'resolveIn',
+ 'resolveone', 'resolveoneIn', 'return', 'select', 'selectOne',
+ 'sortedBy', 'static', 'switch', 'tag', 'then', 'try', 'typedef',
+ 'unlimited', 'uses', 'when', 'where', 'while', 'with', 'xcollect',
+ 'xmap', 'xselect'), suffix=r'\b'), Keyword),
],
# There is no need to distinguish between String.Single and
@@ -127,18 +130,18 @@ class QVToLexer(RegexLexer):
'stringescape': [
(r'\\([\\btnfr"\']|u[0-3][0-7]{2}|u[0-7]{1,2})', String.Escape)
],
- 'dqs': [ # double-quoted string
+ 'dqs': [ # double-quoted string
(r'"', String, '#pop'),
(r'\\\\|\\"', String.Escape),
include('strings')
],
- 'sqs': [ # single-quoted string
+ 'sqs': [ # single-quoted string
(r"'", String, '#pop'),
(r"\\\\|\\'", String.Escape),
include('strings')
],
'name': [
- ('[a-zA-Z_][a-zA-Z0-9_]*', Name),
+ ('[a-zA-Z_]\w*', Name),
],
# numbers: excerpt taken from the python lexer
'numbers': [
@@ -146,5 +149,4 @@ class QVToLexer(RegexLexer):
(r'\d+[eE][+-]?[0-9]+', Number.Float),
(r'\d+', Number.Integer)
],
- }
-
+ }
diff --git a/pygments/lexers/rdf.py b/pygments/lexers/rdf.py
index cb634ee0..6dd6e8b9 100644
--- a/pygments/lexers/rdf.py
+++ b/pygments/lexers/rdf.py
@@ -29,43 +29,55 @@ class SparqlLexer(RegexLexer):
filenames = ['*.rq', '*.sparql']
mimetypes = ['application/sparql-query']
+ # character group definitions ::
+
+ PN_CHARS_BASE_GRP = (u'a-zA-Z'
+ u'\u00c0-\u00d6'
+ u'\u00d8-\u00f6'
+ u'\u00f8-\u02ff'
+ u'\u0370-\u037d'
+ u'\u037f-\u1fff'
+ u'\u200c-\u200d'
+ u'\u2070-\u218f'
+ u'\u2c00-\u2fef'
+ u'\u3001-\ud7ff'
+ u'\uf900-\ufdcf'
+ u'\ufdf0-\ufffd')
+
+ PN_CHARS_U_GRP = (PN_CHARS_BASE_GRP + '_')
+
+ PN_CHARS_GRP = (PN_CHARS_U_GRP +
+ r'\-' +
+ r'0-9' +
+ u'\u00b7' +
+ u'\u0300-\u036f' +
+ u'\u203f-\u2040')
+
+ HEX_GRP = '0-9A-Fa-f'
+
+ PN_LOCAL_ESC_CHARS_GRP = r' _~.\-!$&"()*+,;=/?#@%'
+
# terminal productions ::
- PN_CHARS_BASE = (u'(?:[a-zA-Z'
- u'\u00c0-\u00d6'
- u'\u00d8-\u00f6'
- u'\u00f8-\u02ff'
- u'\u0370-\u037d'
- u'\u037f-\u1fff'
- u'\u200c-\u200d'
- u'\u2070-\u218f'
- u'\u2c00-\u2fef'
- u'\u3001-\ud7ff'
- u'\uf900-\ufdcf'
- u'\ufdf0-\ufffd]|'
- u'[^\u0000-\uffff]|'
- u'[\ud800-\udbff][\udc00-\udfff])')
+ PN_CHARS_BASE = '[' + PN_CHARS_BASE_GRP + ']'
- PN_CHARS_U = '(?:' + PN_CHARS_BASE + '|_)'
+ PN_CHARS_U = '[' + PN_CHARS_U_GRP + ']'
- PN_CHARS = ('(?:' + PN_CHARS_U + r'|[\-0-9' +
- u'\u00b7' +
- u'\u0300-\u036f' +
- u'\u203f-\u2040])')
+ PN_CHARS = '[' + PN_CHARS_GRP + ']'
- HEX = '[0-9A-Fa-f]'
+ HEX = '[' + HEX_GRP + ']'
- PN_LOCAL_ESC_CHARS = r'[ _~.\-!$&""()*+,;=/?#@%]'
+ PN_LOCAL_ESC_CHARS = '[' + PN_LOCAL_ESC_CHARS_GRP + ']'
IRIREF = r'<(?:[^<>"{}|^`\\\x00-\x20])*>'
- BLANK_NODE_LABEL = '_:(?:' + PN_CHARS_U + '|[0-9])(?:(?:' + PN_CHARS + '|\.)*' + \
- PN_CHARS + ')?'
+ BLANK_NODE_LABEL = '_:[0-9' + PN_CHARS_U_GRP + '](?:[' + PN_CHARS_GRP + \
+ '.]*' + PN_CHARS + ')?'
- PN_PREFIX = PN_CHARS_BASE + '(?:(?:' + PN_CHARS + '|\.)*' + PN_CHARS + ')?'
+ PN_PREFIX = PN_CHARS_BASE + '(?:[' + PN_CHARS_GRP + '.]*' + PN_CHARS + ')?'
- VARNAME = '(?:' + PN_CHARS_U + '|[0-9])(?:' + PN_CHARS_U + \
- u'|[0-9\u00b7\u0300-\u036f\u203f-\u2040])*'
+ VARNAME = u'[0-9' + PN_CHARS_U_GRP + '][' + PN_CHARS_U_GRP + \
+ u'0-9\u00b7\u0300-\u036f\u203f-\u2040]*'
PERCENT = '%' + HEX + HEX
@@ -73,9 +85,9 @@ class SparqlLexer(RegexLexer):
PLX = '(?:' + PERCENT + ')|(?:' + PN_LOCAL_ESC + ')'
- PN_LOCAL = ('(?:(?:' + PN_CHARS_U + '|[:0-9])|' + PLX + ')' +
- '(?:(?:(?:' + PN_CHARS + '|[.:])|' + PLX + ')*(?:(?:' +
- PN_CHARS + '|:)|' + PLX + '))?')
+ PN_LOCAL = ('(?:[' + PN_CHARS_U_GRP + ':0-9' + ']|' + PLX + ')' +
+ '(?:(?:[' + PN_CHARS_GRP + '.:]|' + PLX + ')*(?:[' +
+ PN_CHARS_GRP + ':]|' + PLX + '))?')
EXPONENT = r'[eE][+-]?\d+'
@@ -178,7 +190,7 @@ class TurtleLexer(RegexLexer):
flags = re.IGNORECASE
patterns = {
- 'PNAME_NS': r'((?:[a-zA-Z][\w-]*)?\:)', # Simplified character range
+ 'PNAME_NS': r'((?:[a-z][\w-]*)?\:)', # Simplified character range
'IRIREF': r'(<[^<>"{}|^`\\\x00-\x20]*>)'
}
@@ -245,8 +257,7 @@ class TurtleLexer(RegexLexer):
(r'.', String, '#pop'),
],
'end-of-string': [
-
- (r'(@)([a-zA-Z]+(:?-[a-zA-Z0-9]+)*)',
+ (r'(@)([a-z]+(:?-[a-z0-9]+)*)',
bygroups(Operator, Generic.Emph), '#pop:2'),
(r'(\^\^)%(IRIREF)s' % patterns, bygroups(Operator, Generic.Emph), '#pop:2'),
diff --git a/pygments/lexers/ruby.py b/pygments/lexers/ruby.py
index e81d6ecf..f16416d3 100644
--- a/pygments/lexers/ruby.py
+++ b/pygments/lexers/ruby.py
@@ -47,9 +47,9 @@ class RubyLexer(ExtendedRegexLexer):
start = match.start(1)
yield start, Operator, match.group(1) # <<-?
- yield match.start(2), String.Heredoc, match.group(2) # quote ", ', `
- yield match.start(3), Name.Constant, match.group(3) # heredoc name
- yield match.start(4), String.Heredoc, match.group(4) # quote again
+ yield match.start(2), String.Heredoc, match.group(2) # quote ", ', `
+ yield match.start(3), String.Delimiter, match.group(3) # heredoc name
+ yield match.start(4), String.Heredoc, match.group(4) # quote again
heredocstack = ctx.__dict__.setdefault('heredocstack', [])
outermost = not bool(heredocstack)
@@ -74,7 +74,7 @@ class RubyLexer(ExtendedRegexLexer):
if check == hdname:
for amatch in lines:
yield amatch.start(), String.Heredoc, amatch.group()
- yield match.start(), Name.Constant, match.group()
+ yield match.start(), String.Delimiter, match.group()
ctx.pos = match.end()
break
else:
diff --git a/pygments/lexers/rust.py b/pygments/lexers/rust.py
index d8939678..5d1162b8 100644
--- a/pygments/lexers/rust.py
+++ b/pygments/lexers/rust.py
@@ -23,7 +23,7 @@ class RustLexer(RegexLexer):
.. versionadded:: 1.6
"""
name = 'Rust'
- filenames = ['*.rs']
+ filenames = ['*.rs', '*.rs.in']
aliases = ['rust']
mimetypes = ['text/rust']
diff --git a/pygments/lexers/scripting.py b/pygments/lexers/scripting.py
index 4dd9594b..ac0f7533 100644
--- a/pygments/lexers/scripting.py
+++ b/pygments/lexers/scripting.py
@@ -1020,11 +1020,11 @@ class EasytrieveLexer(RegexLexer):
(r"'(''|[^'])*'", String),
(r'\s+', Whitespace),
# Everything else just belongs to a name
- (_NON_DELIMITER_OR_COMMENT_PATTERN + r'+', Name)
+ (_NON_DELIMITER_OR_COMMENT_PATTERN + r'+', Name),
],
'after_declaration': [
(_NON_DELIMITER_OR_COMMENT_PATTERN + r'+', Name.Function),
- ('', Whitespace, '#pop')
+ default('#pop'),
],
'after_macro_argument': [
(r'\*.*\n', Comment.Single, '#pop'),
@@ -1032,7 +1032,7 @@ class EasytrieveLexer(RegexLexer):
(_OPERATORS_PATTERN, Operator, '#pop'),
(r"'(''|[^'])*'", String, '#pop'),
# Everything else just belongs to a name
- (_NON_DELIMITER_OR_COMMENT_PATTERN + r'+', Name)
+ (_NON_DELIMITER_OR_COMMENT_PATTERN + r'+', Name),
],
}
_COMMENT_LINE_REGEX = re.compile(r'^\s*\*')
@@ -1122,7 +1122,8 @@ class EasytrieveLexer(RegexLexer):
class JclLexer(RegexLexer):
"""
- `Job Control Language (JCL) <http://publibz.boulder.ibm.com/cgi-bin/bookmgr_OS390/BOOKS/IEA2B570/CCONTENTS>`_
+ `Job Control Language (JCL)
+ <http://publibz.boulder.ibm.com/cgi-bin/bookmgr_OS390/BOOKS/IEA2B570/CCONTENTS>`_
is a scripting language used on mainframe platforms to instruct the system
on how to run a batch job or start a subsystem. It is somewhat
comparable to MS DOS batch and Unix shell scripts.
@@ -1145,10 +1146,10 @@ class JclLexer(RegexLexer):
],
'statement': [
(r'\s*\n', Whitespace, '#pop'),
- (r'([a-z][a-z_0-9]*)(\s+)(exec|job)(\s*)',
+ (r'([a-z]\w*)(\s+)(exec|job)(\s*)',
bygroups(Name.Label, Whitespace, Keyword.Reserved, Whitespace),
'option'),
- (r'[a-z][a-z_0-9]*', Name.Variable, 'statement_command'),
+ (r'[a-z]\w*', Name.Variable, 'statement_command'),
(r'\s+', Whitespace, 'statement_command'),
],
'statement_command': [
@@ -1167,10 +1168,10 @@ class JclLexer(RegexLexer):
(r'\*', Name.Builtin),
(r'[\[\](){}<>;,]', Punctuation),
(r'[-+*/=&%]', Operator),
- (r'[a-z_][a-z_0-9]*', Name),
- (r'[0-9]+\.[0-9]*', Number.Float),
- (r'\.[0-9]+', Number.Float),
- (r'[0-9]+', Number.Integer),
+ (r'[a-z_]\w*', Name),
+ (r'\d+\.\d*', Number.Float),
+ (r'\.\d+', Number.Float),
+ (r'\d+', Number.Integer),
(r"'", String, 'option_string'),
(r'[ \t]+', Whitespace, 'option_comment'),
(r'\.', Punctuation),
diff --git a/pygments/lexers/shell.py b/pygments/lexers/shell.py
index dc23d018..4145939e 100644
--- a/pygments/lexers/shell.py
+++ b/pygments/lexers/shell.py
@@ -35,6 +35,7 @@ class BashLexer(RegexLexer):
name = 'Bash'
aliases = ['bash', 'sh', 'ksh', 'shell']
filenames = ['*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass',
+ '*.exheres-0', '*.exlib',
'.bashrc', 'bashrc', '.bash_*', 'bash_*', 'PKGBUILD']
mimetypes = ['application/x-sh', 'application/x-shellscript']
diff --git a/pygments/lexers/sql.py b/pygments/lexers/sql.py
index 646a9f31..7c06226b 100644
--- a/pygments/lexers/sql.py
+++ b/pygments/lexers/sql.py
@@ -57,11 +57,14 @@ line_re = re.compile('.*?\n')
language_re = re.compile(r"\s+LANGUAGE\s+'?(\w+)'?", re.IGNORECASE)
+do_re = re.compile(r'\bDO\b', re.IGNORECASE)
+
def language_callback(lexer, match):
"""Parse the content of a $-string using a lexer
- The lexer is chosen looking for a nearby LANGUAGE.
+ The lexer is chosen looking for a nearby LANGUAGE or assumed as
+ plpgsql if inside a DO statement and no LANGUAGE has been found.
"""
l = None
m = language_re.match(lexer.text[match.end():match.end()+100])
@@ -72,15 +75,26 @@ def language_callback(lexer, match):
lexer.text[max(0, match.start()-100):match.start()]))
if m:
l = lexer._get_lexer(m[-1].group(1))
-
+ else:
+ m = list(do_re.finditer(
+ lexer.text[max(0, match.start()-25):match.start()]))
+ if m:
+ l = lexer._get_lexer('plpgsql')
+
+ # 1 = $, 2 = delimiter, 3 = $
+ yield (match.start(1), String, match.group(1))
+ yield (match.start(2), String.Delimiter, match.group(2))
+ yield (match.start(3), String, match.group(3))
+ # 4 = string contents
if l:
- yield (match.start(1), String, match.group(1))
- for x in l.get_tokens_unprocessed(match.group(2)):
+ for x in l.get_tokens_unprocessed(match.group(4)):
yield x
- yield (match.start(3), String, match.group(3))
-
else:
- yield (match.start(), String, match.group())
+ yield (match.start(4), String, match.group(4))
+ # 5 = $, 6 = delimiter, 7 = $
+ yield (match.start(5), String, match.group(5))
+ yield (match.start(6), String.Delimiter, match.group(6))
+ yield (match.start(7), String, match.group(7))
class PostgresBase(object):
@@ -148,9 +162,10 @@ class PostgresLexer(PostgresBase, RegexLexer):
(r'\$\d+', Name.Variable),
(r'([0-9]*\.[0-9]*|[0-9]+)(e[+-]?[0-9]+)?', Number.Float),
(r'[0-9]+', Number.Integer),
- (r"(E|U&)?'(''|[^'])*'", String.Single),
- (r'(U&)?"(""|[^"])*"', String.Name), # quoted identifier
- (r'(?s)(\$[^$]*\$)(.*?)(\1)', language_callback),
+ (r"((?:E|U&)?)(')", bygroups(String.Affix, String.Single), 'string'),
+ # quoted identifier
+ (r'((?:U&)?)(")', bygroups(String.Affix, String.Name), 'quoted-ident'),
+ (r'(?s)(\$)([^$]*)(\$)(.*?)(\$)(\2)(\$)', language_callback),
(r'[a-z_]\w*', Name),
# psql variable in SQL
@@ -164,6 +179,16 @@ class PostgresLexer(PostgresBase, RegexLexer):
(r'[^/*]+', Comment.Multiline),
(r'[/*]', Comment.Multiline)
],
+ 'string': [
+ (r"[^']+", String.Single),
+ (r"''", String.Single),
+ (r"'", String.Single, '#pop'),
+ ],
+ 'quoted-ident': [
+ (r'[^"]+', String.Name),
+ (r'""', String.Name),
+ (r'"', String.Name, '#pop'),
+ ],
}
@@ -380,13 +405,13 @@ class SqlLexer(RegexLexer):
'DEFINED', 'DEFINER', 'DELETE', 'DELIMITER', 'DELIMITERS', 'DEREF', 'DESC',
'DESCRIBE', 'DESCRIPTOR', 'DESTROY', 'DESTRUCTOR', 'DETERMINISTIC',
'DIAGNOSTICS', 'DICTIONARY', 'DISCONNECT', 'DISPATCH', 'DISTINCT', 'DO',
- 'DOMAIN', 'DROP', 'DYNAMIC', 'DYNAMIC_FUNCTION', 'DYNAMIC_FUNCTION_CODE',
- 'EACH', 'ELSE', 'ENCODING', 'ENCRYPTED', 'END', 'END-EXEC', 'EQUALS', 'ESCAPE', 'EVERY',
+ 'DOMAIN', 'DROP', 'DYNAMIC', 'DYNAMIC_FUNCTION', 'DYNAMIC_FUNCTION_CODE', 'EACH',
+ 'ELSE', 'ELSIF', 'ENCODING', 'ENCRYPTED', 'END', 'END-EXEC', 'EQUALS', 'ESCAPE', 'EVERY',
'EXCEPTION', 'EXCEPT', 'EXCLUDING', 'EXCLUSIVE', 'EXEC', 'EXECUTE', 'EXISTING',
'EXISTS', 'EXPLAIN', 'EXTERNAL', 'EXTRACT', 'FALSE', 'FETCH', 'FINAL', 'FIRST', 'FOR',
'FORCE', 'FOREIGN', 'FORTRAN', 'FORWARD', 'FOUND', 'FREE', 'FREEZE', 'FROM', 'FULL',
'FUNCTION', 'G', 'GENERAL', 'GENERATED', 'GET', 'GLOBAL', 'GO', 'GOTO', 'GRANT', 'GRANTED',
- 'GROUP', 'GROUPING', 'HANDLER', 'HAVING', 'HIERARCHY', 'HOLD', 'HOST', 'IDENTITY',
+ 'GROUP', 'GROUPING', 'HANDLER', 'HAVING', 'HIERARCHY', 'HOLD', 'HOST', 'IDENTITY', 'IF',
'IGNORE', 'ILIKE', 'IMMEDIATE', 'IMMUTABLE', 'IMPLEMENTATION', 'IMPLICIT', 'IN',
'INCLUDING', 'INCREMENT', 'INDEX', 'INDITCATOR', 'INFIX', 'INHERITS', 'INITIALIZE',
'INITIALLY', 'INNER', 'INOUT', 'INPUT', 'INSENSITIVE', 'INSERT', 'INSTANTIABLE',
diff --git a/pygments/lexers/supercollider.py b/pygments/lexers/supercollider.py
index d3e4c460..cef147b8 100644
--- a/pygments/lexers/supercollider.py
+++ b/pygments/lexers/supercollider.py
@@ -11,7 +11,7 @@
import re
-from pygments.lexer import RegexLexer, include, words
+from pygments.lexer import RegexLexer, include, words, default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
@@ -43,7 +43,7 @@ class SuperColliderLexer(RegexLexer):
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'(?=/)', Text, ('#pop', 'badregex')),
- (r'', Text, '#pop')
+ default('#pop'),
],
'badregex': [
(r'\n', Text, '#pop')
@@ -79,8 +79,8 @@ class SuperColliderLexer(RegexLexer):
'thisFunctionDef', 'thisFunction', 'thisMethod', 'thisProcess',
'thisThread', 'this'), suffix=r'\b'),
Name.Builtin),
- (r'[$a-zA-Z_][a-zA-Z0-9_]*', Name.Other),
- (r'\\?[$a-zA-Z_][a-zA-Z0-9_]*', String.Symbol),
+ (r'[$a-zA-Z_]\w*', Name.Other),
+ (r'\\?[$a-zA-Z_]\w*', String.Symbol),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
diff --git a/pygments/lexers/templates.py b/pygments/lexers/templates.py
index 71055a9f..3e55b6ad 100644
--- a/pygments/lexers/templates.py
+++ b/pygments/lexers/templates.py
@@ -251,7 +251,7 @@ class VelocityLexer(RegexLexer):
'funcparams': [
(r'\$\{?', Punctuation, 'variable'),
(r'\s+', Text),
- (r',', Punctuation),
+ (r'[,:]', Punctuation),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r"0[xX][0-9a-fA-F]+[Ll]?", Number),
@@ -259,6 +259,8 @@ class VelocityLexer(RegexLexer):
(r'(true|false|null)\b', Keyword.Constant),
(r'\(', Punctuation, '#push'),
(r'\)', Punctuation, '#pop'),
+ (r'\{', Punctuation, '#push'),
+ (r'\}', Punctuation, '#pop'),
(r'\[', Punctuation, '#push'),
(r'\]', Punctuation, '#pop'),
]
@@ -875,7 +877,7 @@ class GenshiMarkupLexer(RegexLexer):
# yield style and script blocks as Other
(r'<\s*(script|style)\s*.*?>.*?<\s*/\1\s*>', Other),
(r'<\s*py:[a-zA-Z0-9]+', Name.Tag, 'pytag'),
- (r'<\s*[a-zA-Z0-9:]+', Name.Tag, 'tag'),
+ (r'<\s*[a-zA-Z0-9:.]+', Name.Tag, 'tag'),
include('variable'),
(r'[<$]', Other),
],
@@ -1780,8 +1782,6 @@ class LassoJavascriptLexer(DelegatingLexer):
def analyse_text(text):
rv = LassoLexer.analyse_text(text) - 0.05
- if 'function' in text:
- rv += 0.2
return rv
diff --git a/pygments/lexers/testing.py b/pygments/lexers/testing.py
index 0bdebe74..be8b6f71 100644
--- a/pygments/lexers/testing.py
+++ b/pygments/lexers/testing.py
@@ -147,7 +147,7 @@ class TAPLexer(RegexLexer):
(r'^TAP version \d+\n', Name.Namespace),
# Specify a plan with a plan line.
- (r'^1..\d+', Keyword.Declaration, 'plan'),
+ (r'^1\.\.\d+', Keyword.Declaration, 'plan'),
# A test failure
(r'^(not ok)([^\S\n]*)(\d*)',
diff --git a/pygments/lexers/textfmts.py b/pygments/lexers/textfmts.py
index 43b16f8c..cab9add5 100644
--- a/pygments/lexers/textfmts.py
+++ b/pygments/lexers/textfmts.py
@@ -122,6 +122,11 @@ class HttpLexer(RegexLexer):
flags = re.DOTALL
+ def get_tokens_unprocessed(self, text, stack=('root',)):
+ """Reset the content-type state."""
+ self.content_type = None
+ return RegexLexer.get_tokens_unprocessed(self, text, stack)
+
def header_callback(self, match):
if match.group(1).lower() == 'content-type':
content_type = match.group(5).strip()
diff --git a/pygments/lexers/theorem.py b/pygments/lexers/theorem.py
index 47fdc8b6..f8c7d0a9 100644
--- a/pygments/lexers/theorem.py
+++ b/pygments/lexers/theorem.py
@@ -43,7 +43,8 @@ class CoqLexer(RegexLexer):
'Proposition', 'Fact', 'Remark', 'Example', 'Proof', 'Goal', 'Save',
'Qed', 'Defined', 'Hint', 'Resolve', 'Rewrite', 'View', 'Search',
'Show', 'Print', 'Printing', 'All', 'Graph', 'Projections', 'inside',
- 'outside', 'Check',
+ 'outside', 'Check', 'Global', 'Instance', 'Class', 'Existing',
+ 'Universe', 'Polymorphic', 'Monomorphic', 'Context'
)
keywords2 = (
# Gallina
@@ -64,12 +65,16 @@ class CoqLexer(RegexLexer):
'unfold', 'change', 'cutrewrite', 'simpl', 'have', 'suff', 'wlog',
'suffices', 'without', 'loss', 'nat_norm', 'assert', 'cut', 'trivial',
'revert', 'bool_congr', 'nat_congr', 'symmetry', 'transitivity', 'auto',
- 'split', 'left', 'right', 'autorewrite', 'tauto',
+ 'split', 'left', 'right', 'autorewrite', 'tauto', 'setoid_rewrite',
+ 'intuition', 'eauto', 'eapply', 'econstructor', 'etransitivity',
+ 'constructor', 'erewrite', 'red', 'cbv', 'lazy', 'vm_compute',
+ 'native_compute', 'subst',
)
keywords5 = (
# Terminators
'by', 'done', 'exact', 'reflexivity', 'tauto', 'romega', 'omega',
'assumption', 'solve', 'contradiction', 'discriminate',
+ 'congruence',
)
keywords6 = (
# Control
@@ -87,15 +92,13 @@ class CoqLexer(RegexLexer):
'->', r'\.', r'\.\.', ':', '::', ':=', ':>', ';', ';;', '<', '<-',
'<->', '=', '>', '>]', r'>\}', r'\?', r'\?\?', r'\[', r'\[<', r'\[>',
r'\[\|', ']', '_', '`', r'\{', r'\{<', r'\|', r'\|]', r'\}', '~', '=>',
- r'/\\', r'\\/',
+ r'/\\', r'\\/', r'\{\|', r'\|\}',
u'Π', u'λ',
)
operators = r'[!$%&*+\./:<=>?@^|~-]'
- word_operators = ('and', 'asr', 'land', 'lor', 'lsl', 'lxor', 'mod', 'or')
prefix_syms = r'[!?~]'
infix_syms = r'[=<>@^|&+\*/$%-]'
- primitives = ('unit', 'int', 'float', 'bool', 'string', 'char', 'list',
- 'array')
+ primitives = ('unit', 'nat', 'bool', 'string', 'ascii', 'list')
tokens = {
'root': [
@@ -108,11 +111,10 @@ class CoqLexer(RegexLexer):
(words(keywords4, prefix=r'\b', suffix=r'\b'), Keyword),
(words(keywords5, prefix=r'\b', suffix=r'\b'), Keyword.Pseudo),
(words(keywords6, prefix=r'\b', suffix=r'\b'), Keyword.Reserved),
- (r'\b([A-Z][\w\']*)(?=\s*\.)', Name.Namespace, 'dotted'),
- (r'\b([A-Z][\w\']*)', Name.Class),
+ # (r'\b([A-Z][\w\']*)(\.)', Name.Namespace, 'dotted'),
+ (r'\b([A-Z][\w\']*)', Name),
(r'(%s)' % '|'.join(keyopts[::-1]), Operator),
(r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator),
- (r'\b(%s)\b' % '|'.join(word_operators), Operator.Word),
(r'\b(%s)\b' % '|'.join(primitives), Keyword.Type),
(r"[^\W\d][\w']*", Name),
@@ -130,7 +132,7 @@ class CoqLexer(RegexLexer):
(r'"', String.Double, 'string'),
- (r'[~?][a-z][\w\']*:', Name.Variable),
+ (r'[~?][a-z][\w\']*:', Name),
],
'comment': [
(r'[^(*)]+', Comment),
@@ -388,20 +390,23 @@ class LeanLexer(RegexLexer):
flags = re.MULTILINE | re.UNICODE
- keywords1 = ('import', 'abbreviation', 'opaque_hint', 'tactic_hint', 'definition', 'renaming',
- 'inline', 'hiding', 'exposing', 'parameter', 'parameters', 'conjecture',
- 'hypothesis', 'lemma', 'corollary', 'variable', 'variables', 'print', 'theorem',
- 'axiom', 'inductive', 'structure', 'universe', 'alias', 'help',
- 'options', 'precedence', 'postfix', 'prefix', 'calc_trans', 'calc_subst', 'calc_refl',
- 'infix', 'infixl', 'infixr', 'notation', 'eval', 'check', 'exit', 'coercion', 'end',
- 'private', 'using', 'namespace', 'including', 'instance', 'section', 'context',
- 'protected', 'expose', 'export', 'set_option', 'add_rewrite', 'extends',
- 'open', 'example', 'constant', 'constants', 'print', 'opaque', 'reducible', 'irreducible'
+ keywords1 = (
+ 'import', 'abbreviation', 'opaque_hint', 'tactic_hint', 'definition',
+ 'renaming', 'inline', 'hiding', 'exposing', 'parameter', 'parameters',
+ 'conjecture', 'hypothesis', 'lemma', 'corollary', 'variable', 'variables',
+ 'print', 'theorem', 'axiom', 'inductive', 'structure', 'universe', 'alias',
+ 'help', 'options', 'precedence', 'postfix', 'prefix', 'calc_trans',
+ 'calc_subst', 'calc_refl', 'infix', 'infixl', 'infixr', 'notation', 'eval',
+ 'check', 'exit', 'coercion', 'end', 'private', 'using', 'namespace',
+ 'including', 'instance', 'section', 'context', 'protected', 'expose',
+ 'export', 'set_option', 'add_rewrite', 'extends', 'open', 'example',
+ 'constant', 'constants', 'print', 'opaque', 'reducible', 'irreducible',
)
keywords2 = (
- 'forall', 'fun', 'Pi', 'obtain', 'from', 'have', 'show', 'assume', 'take',
- 'let', 'if', 'else', 'then', 'by', 'in', 'with', 'begin', 'proof', 'qed', 'calc', 'match'
+ 'forall', 'fun', 'Pi', 'obtain', 'from', 'have', 'show', 'assume',
+ 'take', 'let', 'if', 'else', 'then', 'by', 'in', 'with', 'begin',
+ 'proof', 'qed', 'calc', 'match',
)
keywords3 = (
@@ -412,10 +417,10 @@ class LeanLexer(RegexLexer):
operators = (
'!=', '#', '&', '&&', '*', '+', '-', '/', '@', '!', '`',
'-.', '->', '.', '..', '...', '::', ':>', ';', ';;', '<',
- '<-', '=', '==', '>', '_', '`', '|', '||', '~', '=>', '<=', '>=',
+ '<-', '=', '==', '>', '_', '|', '||', '~', '=>', '<=', '>=',
'/\\', '\\/', u'∀', u'Π', u'λ', u'↔', u'∧', u'∨', u'≠', u'≤', u'≥',
- u'¬', u'⁻¹', u'⬝', u'▸', u'→', u'∃', u'ℕ', u'ℤ', u'≈', u'×', u'⌞', u'⌟', u'≡',
- u'⟨', u'⟩'
+ u'¬', u'⁻¹', u'⬝', u'▸', u'→', u'∃', u'ℕ', u'ℤ', u'≈', u'×', u'⌞',
+ u'⌟', u'≡', u'⟨', u'⟩',
)
punctuation = ('(', ')', ':', '{', '}', '[', ']', u'⦃', u'⦄', ':=', ',')
diff --git a/pygments/lexers/trafficscript.py b/pygments/lexers/trafficscript.py
index 34ca7d5b..03ab6a06 100644
--- a/pygments/lexers/trafficscript.py
+++ b/pygments/lexers/trafficscript.py
@@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-
"""
-
pygments.lexers.trafficscript
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -9,6 +8,7 @@
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
+
import re
from pygments.lexer import RegexLexer
@@ -16,6 +16,7 @@ from pygments.token import String, Number, Name, Keyword, Operator, Text, Commen
__all__ = ['RtsLexer']
+
class RtsLexer(RegexLexer):
"""
For `Riverbed Stingray Traffic Manager <http://www.riverbed.com/stingray>`_
diff --git a/pygments/lexers/typoscript.py b/pygments/lexers/typoscript.py
new file mode 100644
index 00000000..407847ed
--- /dev/null
+++ b/pygments/lexers/typoscript.py
@@ -0,0 +1,225 @@
+# -*- coding: utf-8 -*-
+"""
+ pygments.lexers.typoscript
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for TypoScript
+
+ `TypoScriptLexer`
+ A TypoScript lexer.
+
+ `TypoScriptCssDataLexer`
+ Lexer that highlights markers, constants and registers within css.
+
+ `TypoScriptHtmlDataLexer`
+ Lexer that highlights markers, constants and registers within html tags.
+
+ :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include, bygroups, using
+from pygments.token import Keyword, Text, Comment, Name, String, Number, \
+ Operator, Punctuation
+from pygments.lexer import DelegatingLexer
+from pygments.lexers.web import HtmlLexer, CssLexer
+
+__all__ = ['TypoScriptLexer', 'TypoScriptCssDataLexer', 'TypoScriptHtmlDataLexer']
+
+
+class TypoScriptCssDataLexer(RegexLexer):
+ """
+ Lexer that highlights markers, constants and registers within css blocks.
+
+ .. versionadded:: 2.2
+ """
+
+ name = 'TypoScriptCssData'
+ aliases = ['typoscriptcssdata']
+
+ tokens = {
+ 'root': [
+ # marker: ###MARK###
+ (r'(.*)(###\w+###)(.*)', bygroups(String, Name.Constant, String)),
+ # constant: {$some.constant}
+ (r'(\{)(\$)((?:[\w\-]+\.)*)([\w\-]+)(\})',
+ bygroups(String.Symbol, Operator, Name.Constant,
+ Name.Constant, String.Symbol)), # constant
+ # constant: {register:somevalue}
+ (r'(.*)(\{)([\w\-]+)(\s*:\s*)([\w\-]+)(\})(.*)',
+ bygroups(String, String.Symbol, Name.Constant, Operator,
+ Name.Constant, String.Symbol, String)), # constant
+ # whitespace
+ (r'\s+', Text),
+ # comments
+ (r'/\*(?:(?!\*/).)*\*/', Comment),
+ (r'(?<!(#|\'|"))(?:#(?!(?:[a-fA-F0-9]{6}|[a-fA-F0-9]{3}))[^\n#]+|//[^\n]*)',
+ Comment),
+ # other
+ (r'[<>,:=.*%+|]', String),
+ (r'[\w"\-!/&;(){}]+', String),
+ ]
+ }
+
+
+class TypoScriptHtmlDataLexer(RegexLexer):
+ """
+ Lexer that highlights markers, constants and registers within html tags.
+
+ .. versionadded:: 2.2
+ """
+
+ name = 'TypoScriptHtmlData'
+ aliases = ['typoscripthtmldata']
+
+ tokens = {
+ 'root': [
+ # INCLUDE_TYPOSCRIPT
+ (r'(INCLUDE_TYPOSCRIPT)', Name.Class),
+ # Language label or extension resource FILE:... or LLL:... or EXT:...
+ (r'(EXT|FILE|LLL):[^}\n"]*', String),
+ # marker: ###MARK###
+ (r'(.*)(###\w+###)(.*)', bygroups(String, Name.Constant, String)),
+ # constant: {$some.constant}
+ (r'(\{)(\$)((?:[\w\-]+\.)*)([\w\-]+)(\})',
+ bygroups(String.Symbol, Operator, Name.Constant,
+ Name.Constant, String.Symbol)), # constant
+ # constant: {register:somevalue}
+ (r'(.*)(\{)([\w\-]+)(\s*:\s*)([\w\-]+)(\})(.*)',
+ bygroups(String, String.Symbol, Name.Constant, Operator,
+ Name.Constant, String.Symbol, String)), # constant
+ # whitespace
+ (r'\s+', Text),
+ # other
+ (r'[<>,:=.*%+|]', String),
+ (r'[\w"\-!/&;(){}#]+', String),
+ ]
+ }
+
+
+class TypoScriptLexer(RegexLexer):
+ """
+ Lexer for TypoScript code.
+
+ http://docs.typo3.org/typo3cms/TyposcriptReference/
+
+ .. versionadded:: 2.2
+ """
+
+ name = 'TypoScript'
+ aliases = ['typoscript']
+ filenames = ['*.ts', '*.txt']
+ mimetypes = ['text/x-typoscript']
+
+ flags = re.DOTALL | re.MULTILINE
+
+ tokens = {
+ 'root': [
+ include('comment'),
+ include('constant'),
+ include('html'),
+ include('label'),
+ include('whitespace'),
+ include('keywords'),
+ include('punctuation'),
+ include('operator'),
+ include('structure'),
+ include('literal'),
+ include('other'),
+ ],
+ 'keywords': [
+ # Conditions
+ (r'(\[)(?i)(browser|compatVersion|dayofmonth|dayofweek|dayofyear|'
+ r'device|ELSE|END|GLOBAL|globalString|globalVar|hostname|hour|IP|'
+ r'language|loginUser|loginuser|minute|month|page|PIDinRootline|'
+ r'PIDupinRootline|system|treeLevel|useragent|userFunc|usergroup|'
+ r'version)([^\]]*)(\])',
+ bygroups(String.Symbol, Name.Constant, Text, String.Symbol)),
+ # Functions
+ (r'(?=[\w\-])(HTMLparser|HTMLparser_tags|addParams|cache|encapsLines|'
+ r'filelink|if|imageLinkWrap|imgResource|makelinks|numRows|numberFormat|'
+ r'parseFunc|replacement|round|select|split|stdWrap|strPad|tableStyle|'
+ r'tags|textStyle|typolink)(?![\w\-])', Name.Function),
+ # Toplevel objects and _*
+ (r'(?:(=?\s*<?\s+|^\s*))(cObj|field|config|content|constants|FEData|'
+ r'file|frameset|includeLibs|lib|page|plugin|register|resources|sitemap|'
+ r'sitetitle|styles|temp|tt_[^:.\s]*|types|xmlnews|INCLUDE_TYPOSCRIPT|'
+ r'_CSS_DEFAULT_STYLE|_DEFAULT_PI_VARS|_LOCAL_LANG)(?![\w\-])',
+ bygroups(Operator, Name.Builtin)),
+ # Content objects
+ (r'(?=[\w\-])(CASE|CLEARGIF|COA|COA_INT|COBJ_ARRAY|COLUMNS|CONTENT|'
+ r'CTABLE|EDITPANEL|FILE|FILES|FLUIDTEMPLATE|FORM|HMENU|HRULER|HTML|'
+ r'IMAGE|IMGTEXT|IMG_RESOURCE|LOAD_REGISTER|MEDIA|MULTIMEDIA|OTABLE|'
+ r'PAGE|QTOBJECT|RECORDS|RESTORE_REGISTER|SEARCHRESULT|SVG|SWFOBJECT|'
+ r'TEMPLATE|TEXT|USER|USER_INT)(?![\w\-])', Name.Class),
+ # Menu states
+ (r'(?=[\w\-])(ACTIFSUBRO|ACTIFSUB|ACTRO|ACT|CURIFSUBRO|CURIFSUB|CURRO|'
+ r'CUR|IFSUBRO|IFSUB|NO|SPC|USERDEF1RO|USERDEF1|USERDEF2RO|USERDEF2|'
+ r'USRRO|USR)', Name.Class),
+ # Menu objects
+ (r'(?=[\w\-])(GMENU_FOLDOUT|GMENU_LAYERS|GMENU|IMGMENUITEM|IMGMENU|'
+ r'JSMENUITEM|JSMENU|TMENUITEM|TMENU_LAYERS|TMENU)', Name.Class),
+ # PHP objects
+ (r'(?=[\w\-])(PHP_SCRIPT(_EXT|_INT)?)', Name.Class),
+ (r'(?=[\w\-])(userFunc)(?![\w\-])', Name.Function),
+ ],
+ 'whitespace': [
+ (r'\s+', Text),
+ ],
+ 'html':[
+ (r'<\S[^\n>]*>', using(TypoScriptHtmlDataLexer)),
+ (r'&[^;\n]*;', String),
+ (r'(_CSS_DEFAULT_STYLE)(\s*)(\()(?s)(.*(?=\n\)))',
+ bygroups(Name.Class, Text, String.Symbol, using(TypoScriptCssDataLexer))),
+ ],
+ 'literal': [
+ (r'0x[0-9A-Fa-f]+t?',Number.Hex),
+ # (r'[0-9]*\.[0-9]+([eE][0-9]+)?[fd]?\s*(?:[^=])', Number.Float),
+ (r'[0-9]+', Number.Integer),
+ (r'(###\w+###)', Name.Constant),
+ ],
+ 'label': [
+ # Language label or extension resource FILE:... or LLL:... or EXT:...
+ (r'(EXT|FILE|LLL):[^}\n"]*', String),
+ # Path to a resource
+ (r'(?![^\w\-])([\w\-]+(?:/[\w\-]+)+/?)(\S*\n)',
+ bygroups(String, String)),
+ ],
+ 'punctuation': [
+ (r'[,.]', Punctuation),
+ ],
+ 'operator': [
+ (r'[<>,:=.*%+|]', Operator),
+ ],
+ 'structure': [
+ # Brackets and braces
+ (r'[{}()\[\]\\]', String.Symbol),
+ ],
+ 'constant': [
+ # Constant: {$some.constant}
+ (r'(\{)(\$)((?:[\w\-]+\.)*)([\w\-]+)(\})',
+ bygroups(String.Symbol, Operator, Name.Constant,
+ Name.Constant, String.Symbol)), # constant
+ # Constant: {register:somevalue}
+ (r'(\{)([\w\-]+)(\s*:\s*)([\w\-]+)(\})',
+ bygroups(String.Symbol, Name.Constant, Operator,
+ Name.Constant, String.Symbol)), # constant
+ # Hex color: #ff0077
+ (r'(#[a-fA-F0-9]{6}\b|#[a-fA-F0-9]{3}\b)', String.Char)
+ ],
+ 'comment': [
+ (r'(?<!(#|\'|"))(?:#(?!(?:[a-fA-F0-9]{6}|[a-fA-F0-9]{3}))[^\n#]+|//[^\n]*)',
+ Comment),
+ (r'/\*(?:(?!\*/).)*\*/', Comment),
+ (r'(\s*#\s*\n)', Comment),
+ ],
+ 'other': [
+ (r'[\w"\-!/&;]+', Text),
+ ],
+ }
+
+ def analyse_text(text):
+ if '<INCLUDE_TYPOSCRIPT:' in text:
+ return 1.0
diff --git a/pygments/lexers/varnish.py b/pygments/lexers/varnish.py
new file mode 100644
index 00000000..e64a601b
--- /dev/null
+++ b/pygments/lexers/varnish.py
@@ -0,0 +1,190 @@
+# -*- coding: utf-8 -*-
+"""
+ pygments.lexers.varnish
+ ~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for Varnish configuration
+
+ :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, include, bygroups, using, this, \
+ inherit, words
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Literal
+
+__all__ = ['VCLLexer', 'VCLSnippetLexer']
+
+
+class VCLLexer(RegexLexer):
+ """
+ For Varnish Configuration Language (VCL).
+
+ .. versionadded:: 2.2
+ """
+ name = 'VCL'
+ aliases = ['vcl']
+ filenames = ['*.vcl']
+ mimetypes = ['text/x-vclsrc']
+
+ def analyse_text(text):
+ # If the very first line is 'vcl 4.0;' it's pretty much guaranteed
+ # that this is VCL
+ if text.startswith('vcl 4.0;'):
+ return 1.0
+ # Skip over comments and blank lines
+ # This is accurate enough that returning 0.9 is reasonable.
+ # Almost no VCL files start without some comments.
+ elif '\nvcl 4\.0;' in text[:1000]:
+ return 0.9
+
+ tokens = {
+ 'probe': [
+ include('whitespace'),
+ include('comments'),
+ (r'(\.\w+)(\s*=\s*)([^;]*)(;)',
+ bygroups(Name.Attribute, Operator, using(this), Punctuation)),
+ (r'\}', Punctuation, '#pop'),
+ ],
+ 'acl': [
+ include('whitespace'),
+ include('comments'),
+ (r'[!/]+', Operator),
+ (r';', Punctuation),
+ (r'\d+', Number),
+ (r'\}', Punctuation, '#pop'),
+ ],
+ 'backend': [
+ include('whitespace'),
+ (r'(\.probe)(\s*=\s*)(\w+)(;)',
+ bygroups(Name.Attribute, Operator, Name.Variable.Global, Punctuation)),
+ (r'(\.probe)(\s*=\s*)(\{)',
+ bygroups(Name.Attribute, Operator, Punctuation), 'probe'),
+ (r'(\.\w+\b)(\s*=\s*)([^;]*)(\s*;)',
+ bygroups(Name.Attribute, Operator, using(this), Punctuation)),
+ (r'\{', Punctuation, '#push'),
+ (r'\}', Punctuation, '#pop'),
+ ],
+ 'statements': [
+ (r'(\d\.)?\d+[sdwhmy]', Literal.Date),
+ (r'(\d\.)?\d+ms', Literal.Date),
+ (r'(vcl_pass|vcl_hash|vcl_hit|vcl_init|vcl_backend_fetch|vcl_pipe|'
+ r'vcl_backend_response|vcl_synth|vcl_deliver|vcl_backend_error|'
+ r'vcl_fini|vcl_recv|vcl_purge|vcl_miss)\b', Name.Function),
+ (r'(pipe|retry|hash|synth|deliver|purge|abandon|lookup|pass|fail|ok|'
+ r'miss|fetch|restart)\b', Name.Constant),
+ (r'(beresp|obj|resp|req|req_top|bereq)\.http\.[a-zA-Z_-]+\b', Name.Variable),
+ (words((
+ 'obj.status', 'req.hash_always_miss', 'beresp.backend', 'req.esi_level',
+ 'req.can_gzip', 'beresp.ttl', 'obj.uncacheable', 'req.ttl', 'obj.hits',
+ 'client.identity', 'req.hash_ignore_busy', 'obj.reason', 'req.xid',
+ 'req_top.proto', 'beresp.age', 'obj.proto', 'obj.age', 'local.ip',
+ 'beresp.uncacheable', 'req.method', 'beresp.backend.ip', 'now',
+ 'obj.grace', 'req.restarts', 'beresp.keep', 'req.proto', 'resp.proto',
+ 'bereq.xid', 'bereq.between_bytes_timeout', 'req.esi',
+ 'bereq.first_byte_timeout', 'bereq.method', 'bereq.connect_timeout',
+ 'beresp.do_gzip', 'resp.status', 'beresp.do_gunzip',
+ 'beresp.storage_hint', 'resp.is_streaming', 'beresp.do_stream',
+ 'req_top.method', 'bereq.backend', 'beresp.backend.name', 'beresp.status',
+ 'req.url', 'obj.keep', 'obj.ttl', 'beresp.reason', 'bereq.retries',
+ 'resp.reason', 'bereq.url', 'beresp.do_esi', 'beresp.proto', 'client.ip',
+ 'bereq.proto', 'server.hostname', 'remote.ip', 'req.backend_hint',
+ 'server.identity', 'req_top.url', 'beresp.grace', 'beresp.was_304',
+ 'server.ip', 'bereq.uncacheable', 'now'), suffix=r'\b'),
+ Name.Variable),
+ (r'[!%&+*\-,/<.}{>=|~]+', Operator),
+ (r'[();]', Punctuation),
+
+ (r'[,]+', Punctuation),
+ (words(('include', 'hash_data', 'regsub', 'regsuball', 'if', 'else',
+ 'elsif', 'elif', 'synth', 'synthetic', 'ban', 'synth',
+ 'return', 'set', 'unset', 'import', 'include', 'new',
+ 'rollback', 'call'), suffix=r'\b'),
+ Keyword),
+ (r'storage\.\w+\.\w+\b', Name.Variable),
+ (words(('true', 'false')), Name.Builtin),
+ (r'\d+\b', Number),
+ (r'(backend)(\s+\w+)(\s*\{)',
+ bygroups(Keyword, Name.Variable.Global, Punctuation), 'backend'),
+ (r'(probe\s)(\s*\w+\s)(\{)',
+ bygroups(Keyword, Name.Variable.Global, Punctuation), 'probe'),
+ (r'(acl\s)(\s*\w+\s)(\{)',
+ bygroups(Keyword, Name.Variable.Global, Punctuation), 'acl'),
+ (r'(vcl )(4.0)(;)$',
+ bygroups(Keyword.Reserved, Name.Constant, Punctuation)),
+ (r'(sub\s+)([a-zA-Z]\w*)(\s*\{)',
+ bygroups(Keyword, Name.Function, Punctuation)),
+ (r'([a-zA-Z_]\w*)'
+ r'(\.)'
+ r'([a-zA-Z_]\w*)'
+ r'(\s*\(.*\))',
+ bygroups(Name.Function, Punctuation, Name.Function, using(this))),
+ ('[a-zA-Z_]\w*', Name),
+ ],
+ 'comment': [
+ (r'[^*/]+', Comment.Multiline),
+ (r'/\*', Comment.Multiline, '#push'),
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'[*/]', Comment.Multiline),
+ ],
+ 'comments': [
+ (r'#.*$', Comment),
+ (r'/\*', Comment.Multiline, 'comment'),
+ (r'//.*$', Comment),
+ ],
+ 'string': [
+ (r'"', String, '#pop'),
+ (r'[^"\n]+', String), # all other characters
+ ],
+ 'multistring': [
+ (r'[^"}]', String),
+ (r'"\}', String, '#pop'),
+ (r'["}]', String),
+ ],
+ 'whitespace': [
+ (r'L?"', String, 'string'),
+ (r'\{"', String, 'multistring'),
+ (r'\n', Text),
+ (r'\s+', Text),
+ (r'\\\n', Text), # line continuation
+ ],
+ 'root': [
+ include('whitespace'),
+ include('comments'),
+ include('statements'),
+ (r'\s+', Text),
+ ],
+ }
+
+
+class VCLSnippetLexer(VCLLexer):
+ """
+ For Varnish Configuration Language snippets.
+
+ .. versionadded:: 2.2
+ """
+ name = 'VCLSnippets'
+ aliases = ['vclsnippets', 'vclsnippet']
+ mimetypes = ['text/x-vclsnippet']
+ filenames = []
+
+ def analyse_text(text):
+ # override method inherited from VCLLexer
+ return 0
+
+ tokens = {
+ 'snippetspre': [
+ (r'\.\.\.+', Comment),
+ (r'(bereq|req|req_top|resp|beresp|obj|client|server|local|remote|'
+ r'storage)($|\.\*)', Name.Variable),
+ ],
+ 'snippetspost': [
+ (r'(backend)\b', Keyword.Reserved),
+ ],
+ 'root': [
+ include('snippetspre'),
+ inherit,
+ include('snippetspost'),
+ ],
+ }
diff --git a/pygments/lexers/verification.py b/pygments/lexers/verification.py
new file mode 100644
index 00000000..2391eb49
--- /dev/null
+++ b/pygments/lexers/verification.py
@@ -0,0 +1,110 @@
+# -*- coding: utf-8 -*-
+"""
+ pygments.lexers.verification
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for Intermediate Verification Languages (IVLs).
+
+ :copyright: Copyright 2006-2016 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, include, words
+from pygments.token import Comment, Operator, Keyword, Name, Number, \
+ Punctuation, Whitespace
+
+__all__ = ['BoogieLexer', 'SilverLexer']
+
+
+class BoogieLexer(RegexLexer):
+ """
+ For `Boogie <https://boogie.codeplex.com/>`_ source code.
+
+ .. versionadded:: 2.1
+ """
+ name = 'Boogie'
+ aliases = ['boogie']
+ filenames = ['*.bpl']
+
+ tokens = {
+ 'root': [
+ # Whitespace and Comments
+ (r'\n', Whitespace),
+ (r'\s+', Whitespace),
+ (r'//[/!](.*?)\n', Comment.Doc),
+ (r'//(.*?)\n', Comment.Single),
+ (r'/\*', Comment.Multiline, 'comment'),
+
+ (words((
+ 'axiom', 'break', 'call', 'ensures', 'else', 'exists', 'function',
+ 'forall', 'if', 'invariant', 'modifies', 'procedure', 'requires',
+ 'then', 'var', 'while'),
+ suffix=r'\b'), Keyword),
+ (words(('const',), suffix=r'\b'), Keyword.Reserved),
+
+ (words(('bool', 'int', 'ref'), suffix=r'\b'), Keyword.Type),
+ include('numbers'),
+ (r"(>=|<=|:=|!=|==>|&&|\|\||[+/\-=>*<\[\]])", Operator),
+ (r"([{}():;,.])", Punctuation),
+ # Identifier
+ (r'[a-zA-Z_]\w*', Name),
+ ],
+ 'comment': [
+ (r'[^*/]+', Comment.Multiline),
+ (r'/\*', Comment.Multiline, '#push'),
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'[*/]', Comment.Multiline),
+ ],
+ 'numbers': [
+ (r'[0-9]+', Number.Integer),
+ ],
+ }
+
+
+class SilverLexer(RegexLexer):
+ """
+ For `Silver <https://bitbucket.org/viperproject/silver>`_ source code.
+
+ .. versionadded:: 2.2
+ """
+ name = 'Silver'
+ aliases = ['silver']
+ filenames = ['*.sil']
+
+ tokens = {
+ 'root': [
+ # Whitespace and Comments
+ (r'\n', Whitespace),
+ (r'\s+', Whitespace),
+ (r'//[/!](.*?)\n', Comment.Doc),
+ (r'//(.*?)\n', Comment.Single),
+ (r'/\*', Comment.Multiline, 'comment'),
+
+ (words((
+ 'result', 'true', 'false', 'null', 'method', 'function',
+ 'predicate', 'program', 'domain', 'axiom', 'var', 'returns',
+ 'field', 'define', 'requires', 'ensures', 'invariant',
+ 'fold', 'unfold', 'inhale', 'exhale', 'new', 'assert',
+ 'assume', 'goto', 'while', 'if', 'elseif', 'else', 'fresh',
+ 'constraining', 'Seq', 'Set', 'Multiset', 'union', 'intersection',
+ 'setminus', 'subset', 'unfolding', 'in', 'old', 'forall', 'exists',
+ 'acc', 'wildcard', 'write', 'none', 'epsilon', 'perm', 'unique'),
+ suffix=r'\b'), Keyword),
+ (words(('Int', 'Perm', 'Bool', 'Ref'), suffix=r'\b'), Keyword.Type),
+ include('numbers'),
+
+ (r'[!%&*+=|?:<>/-]', Operator),
+ (r"([{}():;,.])", Punctuation),
+ # Identifier
+ (r'[a-zA-Z_$0-9]\w*', Name),
+ ],
+ 'comment': [
+ (r'[^*/]+', Comment.Multiline),
+ (r'/\*', Comment.Multiline, '#push'),
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'[*/]', Comment.Multiline),
+ ],
+ 'numbers': [
+ (r'[0-9]+', Number.Integer),
+ ],
+ }
diff --git a/pygments/lexers/webmisc.py b/pygments/lexers/webmisc.py
index def11dba..551846c2 100644
--- a/pygments/lexers/webmisc.py
+++ b/pygments/lexers/webmisc.py
@@ -191,6 +191,14 @@ class XQueryLexer(ExtendedRegexLexer):
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
+ def pushstate_operator_map_callback(lexer, match, ctx):
+ yield match.start(), Keyword, match.group(1)
+ yield match.start(), Text, match.group(2)
+ yield match.start(), Punctuation, match.group(3)
+ ctx.stack = ['root']
+ lexer.xquery_parse_state.append('operator')
+ ctx.pos = match.end()
+
def pushstate_operator_root_validate(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
@@ -338,11 +346,11 @@ class XQueryLexer(ExtendedRegexLexer):
(r'and|or', Operator.Word, 'root'),
(r'(eq|ge|gt|le|lt|ne|idiv|intersect|in)(?=\b)',
Operator.Word, 'root'),
- (r'return|satisfies|to|union|where|preserve\s+strip',
+ (r'return|satisfies|to|union|where|count|preserve\s+strip',
Keyword, 'root'),
(r'(>=|>>|>|<=|<<|<|-|\*|!=|\+|\|\||\||:=|=|!)',
operator_root_callback),
- (r'(::|;|\[|//|/|,)',
+ (r'(::|:|;|\[|//|/|,)',
punctuation_root_callback),
(r'(castable|cast)(\s+)(as)\b',
bygroups(Keyword, Text, Keyword), 'singletype'),
@@ -356,13 +364,18 @@ class XQueryLexer(ExtendedRegexLexer):
(r'(\))(\s*)(as)',
bygroups(Punctuation, Text, Keyword), 'itemtype'),
(r'\$', Name.Variable, 'varname'),
- (r'(for|let)(\s+)(\$)',
+ (r'(for|let|previous|next)(\s+)(\$)',
bygroups(Keyword, Text, Name.Variable), 'varname'),
+ (r'(for)(\s+)(tumbling|sliding)(\s+)(window)(\s+)(\$)',
+ bygroups(Keyword, Text, Keyword, Text, Keyword, Text, Name.Variable), 'varname'),
# (r'\)|\?|\]', Punctuation, '#push'),
(r'\)|\?|\]', Punctuation),
(r'(empty)(\s+)(greatest|least)', bygroups(Keyword, Text, Keyword)),
(r'ascending|descending|default', Keyword, '#push'),
+ (r'(allowing)(\s+)(empty)', bygroups(Keyword, Text, Keyword)),
(r'external', Keyword),
+ (r'(start|when|end)', Keyword, 'root'),
+ (r'(only)(\s+)(end)', bygroups(Keyword, Text, Keyword), 'root'),
(r'collation', Keyword, 'uritooperator'),
# eXist specific XQUF
@@ -421,6 +434,7 @@ class XQueryLexer(ExtendedRegexLexer):
(r'(' + qname + ')(\()?', bygroups(Name, Punctuation), 'operator'),
],
'singletype': [
+ include('whitespace'),
(r'\(:', Comment, 'comment'),
(ncname + r'(:\*)', Name.Variable, 'operator'),
(qname, Name.Variable, 'operator'),
@@ -437,7 +451,7 @@ class XQueryLexer(ExtendedRegexLexer):
# Marklogic specific type?
(r'(processing-instruction)(\s*)(\()',
bygroups(Keyword, Text, Punctuation),
- ('occurrenceindicator', 'kindtestforpi')),
+ ('occurrenceindicator', 'kindtestforpi')),
(r'(item)(\s*)(\()(\s*)(\))(?=[*+?])',
bygroups(Keyword, Text, Punctuation, Text, Punctuation),
'occurrenceindicator'),
@@ -448,7 +462,7 @@ class XQueryLexer(ExtendedRegexLexer):
bygroups(Keyword, Text, String.Double), 'namespacedecl'),
(r'(at)(\s+)(' + stringsingle + ')',
bygroups(Keyword, Text, String.Single), 'namespacedecl'),
- (r'except|intersect|in|is|return|satisfies|to|union|where',
+ (r'except|intersect|in|is|return|satisfies|to|union|where|count',
Keyword, 'root'),
(r'and|div|eq|ge|gt|le|lt|ne|idiv|mod|or', Operator.Word, 'root'),
(r':=|=|,|>=|>>|>|\[|\(|<=|<<|<|-|!=|\|\||\|', Operator, 'root'),
@@ -464,7 +478,7 @@ class XQueryLexer(ExtendedRegexLexer):
(r'case|as', Keyword, 'itemtype'),
(r'(\))(\s*)(as)', bygroups(Operator, Text, Keyword), 'itemtype'),
(ncname + r':\*', Keyword.Type, 'operator'),
- (r'(function)(\()', bygroups(Keyword.Type, Punctuation)),
+ (r'(function|map|array)(\()', bygroups(Keyword.Type, Punctuation)),
(qname, Keyword.Type, 'occurrenceindicator'),
],
'kindtest': [
@@ -555,6 +569,7 @@ class XQueryLexer(ExtendedRegexLexer):
(qname, Name.Tag),
],
'xmlspace_decl': [
+ include('whitespace'),
(r'\(:', Comment, 'comment'),
(r'preserve|strip', Keyword, '#pop'),
],
@@ -617,13 +632,15 @@ class XQueryLexer(ExtendedRegexLexer):
bygroups(Keyword.Declaration, Text, Keyword.Declaration), 'operator'),
(r'(declare)(\s+)(default)(\s+)(order)',
bygroups(Keyword.Declaration, Text, Keyword.Declaration, Text, Keyword.Declaration), 'operator'),
+ (r'(declare)(\s+)(context)(\s+)(item)',
+ bygroups(Keyword.Declaration, Text, Keyword.Declaration, Text, Keyword.Declaration), 'operator'),
(ncname + ':\*', Name, 'operator'),
('\*:'+ncname, Name.Tag, 'operator'),
('\*', Name.Tag, 'operator'),
(stringdouble, String.Double, 'operator'),
(stringsingle, String.Single, 'operator'),
- (r'(\})', popstate_callback),
+ (r'(\}|\])', popstate_callback),
# NAMESPACE DECL
(r'(declare)(\s+)(default)(\s+)(collation)',
@@ -644,6 +661,8 @@ class XQueryLexer(ExtendedRegexLexer):
# VARNAMEs
(r'(for|let|some|every)(\s+)(\$)',
bygroups(Keyword, Text, Name.Variable), 'varname'),
+ (r'(for)(\s+)(tumbling|sliding)(\s+)(window)(\s+)(\$)',
+ bygroups(Keyword, Text, Keyword, Text, Keyword, Text, Name.Variable), 'varname'),
(r'\$', Name.Variable, 'varname'),
(r'(declare)(\s+)(variable)(\s+)(\$)',
bygroups(Keyword.Declaration, Text, Keyword.Declaration, Text, Name.Variable), 'varname'),
@@ -677,8 +696,8 @@ class XQueryLexer(ExtendedRegexLexer):
pushstate_operator_root_validate_withmode),
(r'(validate)(\s*)(\{)', pushstate_operator_root_validate),
(r'(typeswitch)(\s*)(\()', bygroups(Keyword, Text, Punctuation)),
- (r'(typeswitch)(\s*)(\()', bygroups(Keyword, Text, Punctuation)),
- (r'(element|attribute)(\s*)(\{)',
+ (r'(switch)(\s*)(\()', bygroups(Keyword, Text, Punctuation)),
+ (r'(element|attribute|namespace)(\s*)(\{)',
pushstate_operator_root_construct_callback),
(r'(document|text|processing-instruction|comment)(\s*)(\{)',
@@ -690,18 +709,21 @@ class XQueryLexer(ExtendedRegexLexer):
(r'(element)(\s+)(?=' + qname + r')',
bygroups(Keyword, Text), 'element_qname'),
# PROCESSING_INSTRUCTION
- (r'(processing-instruction)(\s+)(' + ncname + r')(\s*)(\{)',
+ (r'(processing-instruction|namespace)(\s+)(' + ncname + r')(\s*)(\{)',
bygroups(Keyword, Text, Name.Variable, Text, Punctuation),
'operator'),
(r'(declare|define)(\s+)(function)',
bygroups(Keyword.Declaration, Text, Keyword.Declaration)),
- (r'(\{)', pushstate_operator_root_callback),
+ (r'(\{|\[)', pushstate_operator_root_callback),
(r'(unordered|ordered)(\s*)(\{)',
pushstate_operator_order_callback),
+ (r'(map|array)(\s*)(\{)',
+ pushstate_operator_map_callback),
+
(r'(declare)(\s+)(ordering)',
bygroups(Keyword.Declaration, Text, Keyword.Declaration), 'declareordering'),
@@ -739,10 +761,11 @@ class XQueryLexer(ExtendedRegexLexer):
bygroups(Keyword, Text, Punctuation, Name.Variable), 'varname'),
- (r'(@'+qname+')', Name.Attribute),
- (r'(@'+ncname+')', Name.Attribute),
- (r'@\*:'+ncname, Name.Attribute),
- (r'(@)', Name.Attribute),
+ (r'(@'+qname+')', Name.Attribute, 'operator'),
+ (r'(@'+ncname+')', Name.Attribute, 'operator'),
+ (r'@\*:'+ncname, Name.Attribute, 'operator'),
+ (r'@\*', Name.Attribute, 'operator'),
+ (r'(@)', Name.Attribute, 'operator'),
(r'//|/|\+|-|;|,|\(|\)', Punctuation),
@@ -833,10 +856,11 @@ class CirruLexer(RegexLexer):
Syntax rules of Cirru can be found at:
http://cirru.org/
- * using ``()`` to markup blocks, but limited in the same line
- * using ``""`` to markup strings, allow ``\`` to escape
- * using ``$`` as a shorthand for ``()`` till indentation end or ``)``
- * using indentations for create nesting
+ * using ``()`` for expressions, but restricted in a same line
+ * using ``""`` for strings, with ``\`` for escaping chars
+ * using ``$`` as folding operator
+ * using ``,`` as unfolding operator
+ * using indentations for nested blocks
.. versionadded:: 2.0
"""
@@ -857,16 +881,16 @@ class CirruLexer(RegexLexer):
(r'.', String.Escape, '#pop'),
],
'function': [
+ (r'\,', Operator, '#pop'),
(r'[^\s"()]+', Name.Function, '#pop'),
(r'\)', Operator, '#pop'),
(r'(?=\n)', Text, '#pop'),
(r'\(', Operator, '#push'),
(r'"', String, ('#pop', 'string')),
(r'[ ]+', Text.Whitespace),
- (r'\,', Operator, '#pop'),
],
'line': [
- (r'\$', Operator, 'function'),
+ (r'(?<!\w)\$(?!\w)', Operator, 'function'),
(r'\(', Operator, 'function'),
(r'\)', Operator),
(r'\n', Text, '#pop'),
diff --git a/pygments/scanner.py b/pygments/scanner.py
index 35dbbadd..3ff11e4a 100644
--- a/pygments/scanner.py
+++ b/pygments/scanner.py
@@ -66,7 +66,8 @@ class Scanner(object):
def test(self, pattern):
"""Apply a pattern on the current position and check
- if it patches. Doesn't touch pos."""
+ if it patches. Doesn't touch pos.
+ """
return self.check(pattern) is not None
def scan(self, pattern):
diff --git a/pygments/sphinxext.py b/pygments/sphinxext.py
index 2dc9810f..de8cd73b 100644
--- a/pygments/sphinxext.py
+++ b/pygments/sphinxext.py
@@ -57,6 +57,7 @@ FILTERDOC = '''
'''
+
class PygmentsDoc(Directive):
"""
A directive to collect all lexers/formatters/filters and generate
diff --git a/pygments/style.py b/pygments/style.py
index b2b990ea..68ee3a19 100644
--- a/pygments/style.py
+++ b/pygments/style.py
@@ -12,6 +12,29 @@
from pygments.token import Token, STANDARD_TYPES
from pygments.util import add_metaclass
+# Default mapping of #ansixxx to RGB colors.
+_ansimap = {
+ # dark
+ '#ansiblack': '000000',
+ '#ansidarkred': '7f0000',
+ '#ansidarkgreen': '007f00',
+ '#ansibrown': '7f7fe0',
+ '#ansidarkblue': '00007f',
+ '#ansipurple': '7f007f',
+ '#ansiteal': '007f7f',
+ '#ansilightgray': 'e5e5e5',
+ # normal
+ '#ansidarkgray': '555555',
+ '#ansired': 'ff0000',
+ '#ansigreen': '00ff00',
+ '#ansiyellow': 'ffff00',
+ '#ansiblue': '0000ff',
+ '#ansifuchsia': 'ff00ff',
+ '#ansiturquoise': '00ffff',
+ '#ansiwhite': 'ffffff',
+}
+ansicolors = set(_ansimap)
+
class StyleMeta(type):
@@ -22,6 +45,8 @@ class StyleMeta(type):
obj.styles[token] = ''
def colorformat(text):
+ if text in ansicolors:
+ return text
if text[0:1] == '#':
col = text[1:]
if len(col) == 6:
@@ -79,16 +104,28 @@ class StyleMeta(type):
def style_for_token(cls, token):
t = cls._styles[token]
+ ansicolor = bgansicolor = None
+ color = t[0]
+ if color.startswith('#ansi'):
+ ansicolor = color
+ color = _ansimap[color]
+ bgcolor = t[4]
+ if bgcolor.startswith('#ansi'):
+ bgansicolor = bgcolor
+ bgcolor = _ansimap[bgcolor]
+
return {
- 'color': t[0] or None,
+ 'color': color or None,
'bold': bool(t[1]),
'italic': bool(t[2]),
'underline': bool(t[3]),
- 'bgcolor': t[4] or None,
+ 'bgcolor': bgcolor or None,
'border': t[5] or None,
'roman': bool(t[6]) or None,
'sans': bool(t[7]) or None,
'mono': bool(t[8]) or None,
+ 'ansicolor': ansicolor,
+ 'bgansicolor': bgansicolor,
}
def list_styles(cls):
diff --git a/pygments/styles/__init__.py b/pygments/styles/__init__.py
index d7a0564a..4efd196e 100644
--- a/pygments/styles/__init__.py
+++ b/pygments/styles/__init__.py
@@ -41,6 +41,7 @@ STYLE_MAP = {
'lovelace': 'lovelace::LovelaceStyle',
'algol': 'algol::AlgolStyle',
'algol_nu': 'algol_nu::Algol_NuStyle',
+ 'arduino': 'arduino::ArduinoStyle'
}
diff --git a/pygments/styles/arduino.py b/pygments/styles/arduino.py
index cb4d17b0..1bf2103c 100644
--- a/pygments/styles/arduino.py
+++ b/pygments/styles/arduino.py
@@ -29,7 +29,7 @@ class ArduinoStyle(Style):
Comment: "#95a5a6", # class: 'c'
Comment.Multiline: "", # class: 'cm'
- Comment.Preproc: "#434f54", # class: 'cp'
+ Comment.Preproc: "#728E00", # class: 'cp'
Comment.Single: "", # class: 'c1'
Comment.Special: "", # class: 'cs'
@@ -38,15 +38,15 @@ class ArduinoStyle(Style):
Keyword.Declaration: "", # class: 'kd'
Keyword.Namespace: "", # class: 'kn'
Keyword.Pseudo: "#00979D", # class: 'kp'
- Keyword.Reserved: "", # class: 'kr'
+ Keyword.Reserved: "#00979D", # class: 'kr'
Keyword.Type: "#00979D", # class: 'kt'
- Operator: "#434f54", # class: 'o'
+ Operator: "#728E00", # class: 'o'
Operator.Word: "", # class: 'ow'
Name: "#434f54", # class: 'n'
Name.Attribute: "", # class: 'na'
- Name.Builtin: "", # class: 'nb'
+ Name.Builtin: "#728E00", # class: 'nb'
Name.Builtin.Pseudo: "", # class: 'bp'
Name.Class: "", # class: 'nc'
Name.Constant: "", # class: 'no'
@@ -64,7 +64,7 @@ class ArduinoStyle(Style):
Name.Variable.Global: "", # class: 'vg'
Name.Variable.Instance: "", # class: 'vi'
- Number: "#434f54", # class: 'm'
+ Number: "#8A7B52", # class: 'm'
Number.Float: "", # class: 'mf'
Number.Hex: "", # class: 'mh'
Number.Integer: "", # class: 'mi'
diff --git a/pygments/styles/lovelace.py b/pygments/styles/lovelace.py
index 31bd5505..236dde9b 100644
--- a/pygments/styles/lovelace.py
+++ b/pygments/styles/lovelace.py
@@ -8,6 +8,9 @@
Pygments style by Miikka Salminen (https://github.com/miikkas)
A desaturated, somewhat subdued style created for the Lovelace interactive
learning environment.
+
+ :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
"""
from pygments.style import Style
@@ -59,14 +62,18 @@ class LovelaceStyle(Style):
Name.Entity: _ESCAPE_LIME,
Name.Exception: _EXCEPT_YELLOW,
Name.Function: _FUN_BROWN,
+ Name.Function.Magic: _DOC_ORANGE,
Name.Label: _LABEL_CYAN,
Name.Namespace: _LABEL_CYAN,
Name.Tag: _KW_BLUE,
Name.Variable: '#b04040',
Name.Variable.Global:_EXCEPT_YELLOW,
+ Name.Variable.Magic: _DOC_ORANGE,
String: _STR_RED,
+ String.Affix: '#444444',
String.Char: _OW_PURPLE,
+ String.Delimiter: _DOC_ORANGE,
String.Doc: 'italic '+_DOC_ORANGE,
String.Escape: _ESCAPE_LIME,
String.Interpol: 'underline',
diff --git a/pygments/styles/perldoc.py b/pygments/styles/perldoc.py
index 47a097ca..eae6170d 100644
--- a/pygments/styles/perldoc.py
+++ b/pygments/styles/perldoc.py
@@ -41,7 +41,7 @@ class PerldocStyle(Style):
Operator.Word: '#8B008B',
Keyword: '#8B008B bold',
- Keyword.Type: '#a7a7a7',
+ Keyword.Type: '#00688B',
Name.Class: '#008b45 bold',
Name.Exception: '#008b45 bold',
diff --git a/pygments/token.py b/pygments/token.py
index f31625ed..fbd5b805 100644
--- a/pygments/token.py
+++ b/pygments/token.py
@@ -9,6 +9,7 @@
:license: BSD, see LICENSE for details.
"""
+
class _TokenType(tuple):
parent = None
@@ -43,31 +44,39 @@ class _TokenType(tuple):
def __repr__(self):
return 'Token' + (self and '.' or '') + '.'.join(self)
+ def __copy__(self):
+ # These instances are supposed to be singletons
+ return self
+
+ def __deepcopy__(self, memo):
+ # These instances are supposed to be singletons
+ return self
+
-Token = _TokenType()
+Token = _TokenType()
# Special token types
-Text = Token.Text
-Whitespace = Text.Whitespace
-Escape = Token.Escape
-Error = Token.Error
+Text = Token.Text
+Whitespace = Text.Whitespace
+Escape = Token.Escape
+Error = Token.Error
# Text that doesn't belong to this lexer (e.g. HTML in PHP)
-Other = Token.Other
+Other = Token.Other
# Common token types for source code
-Keyword = Token.Keyword
-Name = Token.Name
-Literal = Token.Literal
-String = Literal.String
-Number = Literal.Number
+Keyword = Token.Keyword
+Name = Token.Name
+Literal = Token.Literal
+String = Literal.String
+Number = Literal.Number
Punctuation = Token.Punctuation
-Operator = Token.Operator
-Comment = Token.Comment
+Operator = Token.Operator
+Comment = Token.Comment
# Generic types for non-source code
-Generic = Token.Generic
+Generic = Token.Generic
-# String and some others are not direct childs of Token.
+# String and some others are not direct children of Token.
# alias them:
Token.Token = Token
Token.String = String
@@ -139,6 +148,7 @@ STANDARD_TYPES = {
Name.Entity: 'ni',
Name.Exception: 'ne',
Name.Function: 'nf',
+ Name.Function.Magic: 'fm',
Name.Property: 'py',
Name.Label: 'nl',
Name.Namespace: 'nn',
@@ -148,13 +158,16 @@ STANDARD_TYPES = {
Name.Variable.Class: 'vc',
Name.Variable.Global: 'vg',
Name.Variable.Instance: 'vi',
+ Name.Variable.Magic: 'vm',
Literal: 'l',
Literal.Date: 'ld',
String: 's',
+ String.Affix: 'sa',
String.Backtick: 'sb',
String.Char: 'sc',
+ String.Delimiter: 'dl',
String.Doc: 'sd',
String.Double: 's2',
String.Escape: 'se',
diff --git a/scripts/debug_lexer.py b/scripts/debug_lexer.py
index cedd0988..4b7db41a 100755
--- a/scripts/debug_lexer.py
+++ b/scripts/debug_lexer.py
@@ -109,6 +109,8 @@ def main(fn, lexer=None, options={}):
lxcls = find_lexer_class(name)
if lxcls is None:
raise AssertionError('no lexer found for file %r' % fn)
+ print('Using lexer: %s (%s.%s)' % (lxcls.name, lxcls.__module__,
+ lxcls.__name__))
debug_lexer = False
# if profile:
# # does not work for e.g. ExtendedRegexLexers
diff --git a/setup.cfg b/setup.cfg
index abca6bcc..17eb2173 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -5,3 +5,6 @@ tag_date = true
[aliases]
release = egg_info -RDb ''
upload = upload --sign --identity=36580288
+
+[bdist_wheel]
+universal = 1 \ No newline at end of file
diff --git a/setup.py b/setup.py
index 951404e5..a07ac055 100755
--- a/setup.py
+++ b/setup.py
@@ -54,7 +54,7 @@ else:
setup(
name = 'Pygments',
- version = '2.1a0',
+ version = '2.2',
url = 'http://pygments.org/',
license = 'BSD License',
author = 'Georg Brandl',
diff --git a/tests/examplefiles/99_bottles_of_beer.chpl b/tests/examplefiles/99_bottles_of_beer.chpl
index 3629028d..cdc1e650 100644
--- a/tests/examplefiles/99_bottles_of_beer.chpl
+++ b/tests/examplefiles/99_bottles_of_beer.chpl
@@ -4,7 +4,7 @@
* by Brad Chamberlain and Steve Deitz
* 07/13/2006 in Knoxville airport while waiting for flight home from
* HPLS workshop
- * compiles and runs with chpl compiler version 1.7.0
+ * compiles and runs with chpl compiler version 1.12.0
* for more information, contact: chapel_info@cray.com
*
*
@@ -71,10 +71,13 @@ proc computeAction(bottleNum) {
// Modules...
module M1 {
var x = 10;
+
+ var y = 13.0;
}
module M2 {
- use M1;
+ use M1 except y;
+ use M1 only y;
proc main() {
writeln("M2 -> M1 -> x " + x);
}
@@ -148,10 +151,10 @@ class IntPair {
var ip = new IntPair(17,2);
write(ip);
-var targetDom: {1..10},
+var targetDom = {1..10},
target: [targetDom] int;
coforall i in targetDom with (ref target) {
- targetDom[i] = i ** 3;
+ target[i] = i ** 3;
}
var wideOpen = 0o777,
@@ -166,9 +169,11 @@ private module M3 {
}
private iter bar() {
-
+ for i in 1..10 {
+ yield i;
+ }
}
private var x: int;
-} \ No newline at end of file
+}
diff --git a/tests/examplefiles/StdGeneric.icl b/tests/examplefiles/StdGeneric.icl
new file mode 100644
index 00000000..2e6c3931
--- /dev/null
+++ b/tests/examplefiles/StdGeneric.icl
@@ -0,0 +1,92 @@
+implementation module StdGeneric
+
+import StdInt, StdMisc, StdClass, StdFunc
+
+generic bimap a b :: Bimap .a .b
+
+bimapId :: Bimap .a .a
+bimapId = { map_to = id, map_from = id }
+
+bimap{|c|} = { map_to = id, map_from = id }
+
+bimap{|PAIR|} bx by = { map_to= map_to, map_from=map_from }
+where
+ map_to (PAIR x y) = PAIR (bx.map_to x) (by.map_to y)
+ map_from (PAIR x y) = PAIR (bx.map_from x) (by.map_from y)
+bimap{|EITHER|} bl br = { map_to= map_to, map_from=map_from }
+where
+ map_to (LEFT x) = LEFT (bl.map_to x)
+ map_to (RIGHT x) = RIGHT (br.map_to x)
+ map_from (LEFT x) = LEFT (bl.map_from x)
+ map_from (RIGHT x) = RIGHT (br.map_from x)
+
+bimap{|(->)|} barg bres = { map_to = map_to, map_from = map_from }
+where
+ map_to f = comp3 bres.map_to f barg.map_from
+ map_from f = comp3 bres.map_from f barg.map_to
+
+bimap{|CONS|} barg = { map_to= map_to, map_from=map_from }
+where
+ map_to (CONS x) = CONS (barg.map_to x)
+ map_from (CONS x) = CONS (barg.map_from x)
+
+bimap{|FIELD|} barg = { map_to= map_to, map_from=map_from }
+where
+ map_to (FIELD x) = FIELD (barg.map_to x)
+ map_from (FIELD x) = FIELD (barg.map_from x)
+
+bimap{|OBJECT|} barg = { map_to= map_to, map_from=map_from }
+where
+ map_to (OBJECT x) = OBJECT (barg.map_to x)
+ map_from (OBJECT x) = OBJECT (barg.map_from x)
+
+bimap{|Bimap|} x y = {map_to = map_to, map_from = map_from}
+where
+ map_to {map_to, map_from} =
+ { map_to = comp3 y.map_to map_to x.map_from
+ , map_from = comp3 x.map_to map_from y.map_from
+ }
+ map_from {map_to, map_from} =
+ { map_to = comp3 y.map_from map_to x.map_to
+ , map_from = comp3 x.map_from map_from y.map_to
+ }
+
+comp3 :: !(.a -> .b) u:(.c -> .a) !(.d -> .c) -> u:(.d -> .b)
+comp3 f g h
+ | is_id f
+ | is_id h
+ = cast g
+ = cast (\x -> g (h x))
+ | is_id h
+ = cast (\x -> f (g x))
+ = \x -> f (g (h x))
+where
+ is_id :: !.(.a -> .b) -> Bool
+ is_id f = code inline
+ {
+ eq_desc e_StdFunc_did 0 0
+ pop_a 1
+ }
+
+ cast :: !u:a -> u:b
+ cast f = code inline
+ {
+ pop_a 0
+ }
+
+getConsPath :: !GenericConsDescriptor -> [ConsPos]
+getConsPath {gcd_index, gcd_type_def={gtd_num_conses}}
+ = doit gcd_index gtd_num_conses
+where
+ doit i n
+ | n == 0
+ = abort "getConsPath: zero conses\n"
+ | i >= n
+ = abort "getConsPath: cons index >= number of conses"
+ | n == 1
+ = []
+ | i < (n/2)
+ = [ ConsLeft : doit i (n/2) ]
+ | otherwise
+ = [ ConsRight : doit (i - (n/2)) (n - (n/2)) ]
+ \ No newline at end of file
diff --git a/tests/examplefiles/abnf_example1.abnf b/tests/examplefiles/abnf_example1.abnf
new file mode 100644
index 00000000..5cd9cd25
--- /dev/null
+++ b/tests/examplefiles/abnf_example1.abnf
@@ -0,0 +1,22 @@
+; This examples from WikiPedia <https://en.wikipedia.org/wiki/Augmented_Backus%E2%80%93Naur_Form>.
+
+ postal-address = name-part street zip-part
+
+ name-part = *(personal-part SP) last-name [SP suffix] CRLF
+ name-part =/ personal-part CRLF
+
+ personal-part = first-name / (initial ".")
+ first-name = *ALPHA
+ initial = ALPHA
+ last-name = *ALPHA
+ suffix = ("Jr." / "Sr." / 1*("I" / "V" / "X"))
+
+ street = [apt SP] house-num SP street-name CRLF
+ apt = 1*4DIGIT
+ house-num = 1*8(DIGIT / ALPHA)
+ street-name = 1*VCHAR
+
+ zip-part = town-name "," SP state 1*2SP zip-code CRLF
+ town-name = 1*(ALPHA / SP)
+ state = 2ALPHA
+ zip-code = 5DIGIT ["-" 4DIGIT]
diff --git a/tests/examplefiles/abnf_example2.abnf b/tests/examplefiles/abnf_example2.abnf
new file mode 100644
index 00000000..8781adfb
--- /dev/null
+++ b/tests/examplefiles/abnf_example2.abnf
@@ -0,0 +1,9 @@
+crlf = %d13.10
+
+command = "command string"
+
+char-line = %x0D.0A *(%x20-7E) %x0D.0A
+
+without-ws-and-ctl = %d1-8 / %d11 / %d12 / %d14-31 / %d127
+
+three-blank-lines = %x0D.0A.0D.0A.0D.0A
diff --git a/tests/examplefiles/bnf_example1.bnf b/tests/examplefiles/bnf_example1.bnf
new file mode 100644
index 00000000..fe041a6e
--- /dev/null
+++ b/tests/examplefiles/bnf_example1.bnf
@@ -0,0 +1,15 @@
+; This examples from WikiPedia <https://en.wikipedia.org/wiki/Backus%E2%80%93Naur_Form>.
+
+ <postal-address> ::= <name-part> <street-address> <zip-part>
+
+ <name-part> ::= <personal-part> <last-name> <opt-suffix-part> <EOL>
+ | <personal-part> <name-part>
+
+ <personal-part> ::= <initial> "." | <first-name>
+
+ <street-address> ::= <house-num> <street-name> <opt-apt-num> <EOL>
+
+ <zip-part> ::= <town-name> "," <state-code> <ZIP-code> <EOL>
+
+ <opt-suffix-part> ::= "Sr." | "Jr." | <roman-numeral> | ""
+ <opt-apt-num> ::= <apt-num> | ""
diff --git a/tests/examplefiles/example.bc b/tests/examplefiles/example.bc
new file mode 100644
index 00000000..6604cd31
--- /dev/null
+++ b/tests/examplefiles/example.bc
@@ -0,0 +1,53 @@
+/*
+ * Calculate the Greatest Common Divisor of a and b.
+ */
+define gcd(a, b) {
+ auto tmp;
+
+ /*
+ * Euclidean algorithm
+ */
+ while (b != 0) {
+ tmp = a % b;
+ a = b;
+ b = tmp;
+ }
+ return a;
+}
+"gcd(225, 150) = " ; gcd(225, 150)
+
+/* assign operators */
+a = 10
+a += 1
+a++
+++a
+a--
+--a
+a += 5
+a -= 5
+a *= 2
+a /= 3
+a ^= 2
+a %= 2
+
+/* comparison */
+if (a > 2) {
+}
+if (a >= 2) {
+}
+if (a == 2) {
+}
+if (a != 2) {
+}
+if (a <= 2) {
+}
+if (a < 2) {
+}
+
+a /* /*/ * 2 /* == a * 2 */
+a //* /*/ 1.5 /* == a / 1.5 */
+a /*/*/ * 3 /* == a * 3 */
+a * 3 /**/ * 4 /* == a * 3 * 4 */
+a / 3 //*//*/ .4 /* == a / 3 / 0.4 */
+a / 3 //*//*/ 1.3 /* == a / 3 / 1.4 */
+a / 3 /*//*// 1.3 /* == a / 3 / 1.4 */
diff --git a/tests/examplefiles/example2.cpp b/tests/examplefiles/example2.cpp
new file mode 100644
index 00000000..ccd99383
--- /dev/null
+++ b/tests/examplefiles/example2.cpp
@@ -0,0 +1,20 @@
+/*
+ * A Test file for the different string literals.
+ */
+
+#include <iostream>
+
+int main() {
+ char *_str = "a normal string";
+ wchar_t *L_str = L"a wide string";
+ char *u8_str = u8"utf-8 string";
+ char16_t *u_str = u"utf-16 string";
+ char32_t *U_str = U"utf-32 string";
+ char *R_str = R""""(raw string with
+"""
+as a delimiter)"""";
+
+ std::cout << R_str << std::endl;
+
+ return 0;
+}
diff --git a/tests/examplefiles/flatline_example b/tests/examplefiles/flatline_example
new file mode 100644
index 00000000..5ea73408
--- /dev/null
+++ b/tests/examplefiles/flatline_example
@@ -0,0 +1,186 @@
+(field "another field" 2)
+(f "000001" -2)
+
+(missing? "a field" 23)
+
+(random-value "age")
+(weighted-random-value "000001")
+
+(if (missing? "00000") (random-value "000000") (f "000000"))
+
+(ensure-value "000000")
+(ensure-weighted-value "000000")
+
+(normalize "000001")
+(normalize "length" 8 23)
+
+(z-score "a numeric field")
+(z-score 23)
+
+(field-prop string "00023" name)
+(field-prop numeric "00023" summary missing_count)
+
+(category-count "species" "Iris-versicolor")
+(category-count "species" (f "000004"))
+(bin-count "age" (f "bin-selector"))
+(bin-center "000003" 3)
+(bin-center (field "field-selector") 4)
+
+(let (v (f "age"))
+ (cond (< v 2) "baby"
+ (< v 10) "child"
+ (< v 20) "teenager"
+ "adult"))
+
+(segment-label "000000" "baby" 2 "child" 10 "teenager" 20 "adult")
+(segment-label 0 "1st fourth" "2nd fourth" "3rd fourth" "4th fourth")
+
+(let (max (maximum 0)
+ min (minimum 0)
+ step (/ (- max min) 4))
+ (segment-label 0 "1st fourth" (+ min step)
+ "2nd fourth" (+ min step step)
+ "3rd fourth" (+ min step step step)
+ "4th fourth"))
+
+(contains-items? "000000" "blue" "green" "darkblue")
+
+(<= (percentile "age" 0.5) (f "age") (percentile "age" 0.95))
+
+(within-percentiles? "age" 0.5 0.95)
+
+(percentile-label "000023" "1st" "2nd" "3rd" "4th")
+
+(cond (within-percentiles? "000023" 0 0.25) "1st"
+ (within-percentiles? "000023" 0.25 0.5) "2nd"
+ (within-percentiles? "000023" 0.5 0.75) "3rd"
+ "4th")
+
+(str 1 "hello " (field "a"))
+(str "value_" (+ 3 4) "/" (name "000001"))
+
+(length "abc")
+(length "")
+
+(levenshtein (f 0) "a random string")
+(if (< (levenshtein (f 0) "bluething") 5) "bluething" (f 0))
+
+(occurrences "howdy woman, howdy" "howdy")
+(occurrences "howdy woman" "Man" true)
+(occurrences "howdy man" "Man" true)
+(occurrences "hola, Holas" "hola" true "es")
+
+(md5 "a text")
+(sha1 "a text")
+(sha256 "")
+
+(matches? (field "name") ".*\\sHal\\s.*")
+(matches? (field "name") "(?i).*\\shal\\s.*")
+
+(if (matches? (f "result") (re-quote (f "target"))) "GOOD" "MISS")
+(matches? (f "name") (str "^" (re-quote (f "salutation")) "\\s *$"))
+
+(replace "Almost Pig Latin" "\\b(\\w)(\\w+)\\b" "$2$1ay")
+(replace-first "swap first two words" "(\\w+)(\\s+)(\\w+)" "$3$2$1")
+
+(language "this is an English phrase")
+
+(< (field 0) (field 1))
+(<= (field 0 -1) (field 0) (field 0 1))
+(> (field "date") "07-14-1969")
+(>= 23 (f "000004" -2))
+
+(= "Dante" (field "Author"))
+(= 1300 (field "Year"))
+(= (field "Year" -2) (field "Year" -1) (field "Year"))
+(!= (field "00033" -1) (field "00033" 1))
+
+(and (= 3 (field 1)) (= "meh" (f "a")) (< (f "pregnancies") 5))
+(not true)
+
+(linear-regression 1 1 2 2 3 3 4 4)
+(linear-regression 2.0 3.1 2.3 3.3 24.3 45.2)
+
+(epoch-fields (f "milliseconds"))
+(epoch-year (* 1000 (f "seconds")))
+
+(/ (f "a-datetime-string") 1000)
+(/ (epoch (f "a-datetime-string")) 1000)
+
+(epoch-fields (epoch "1969-14-07T06:00:12"))
+(epoch-hour (epoch "11~22~30" "hh~mm~ss"))
+
+(let (x (+ (window "a" -10 10))
+ a (/ (* x 3) 4.34)
+ y (if (< a 10) "Good" "Bad"))
+ (list x (str (f 10) "-" y) a y))
+
+(list (let (z (f 0)) (* 2 (* z z) (log z)))
+ (let (pi 3.141592653589793 r (f "radius")) (* 4 pi r r)))
+
+(if (< (field "age") 18) "non-adult" "adult")
+
+(if (= "oh" (field "000000")) "OH")
+
+(if (> (field "000001") (mean "000001"))
+ "above average"
+ (if (< (field "000001") (mean "000001"))
+ "below average"
+ "mediocre"))
+
+(cond (> (f "000001") (mean "000001")) "above average"
+ (= (f "000001") (mean "000001")) "below average"
+ "mediocre")
+
+(cond (or (= "a" (f 0)) (= "a+" (f 0))) 1
+ (or (= "b" (f 0)) (= "b+" (f 0))) 0
+ (or (= "c" (f 0)) (= "c+" (f 0))) -1)
+
+(cond (< (f "age") 2) "baby"
+ (and (<= 2 (f "age") 10) (= "F" (f "sex"))) "girl"
+ (and (<= 2 (f "age") 10) (= "M" (f "sex"))) "boy"
+ (< 10 (f "age") 20) "teenager"
+ "adult")
+
+(list (field "age")
+ (field "weight" -1)
+ (population "age"))
+
+(list 1.23
+ (if (< (field "age") 10) "child" "adult")
+ (field 3))
+
+(head (cons x lst))
+(tail (cons x lst))
+
+(count (list (f 1) (f 2)))
+(mode (list a b b c b a c c c))
+(max (list -1 2 -2 0.38))
+(min (list -1.3 2 1))
+(avg (list -1 -2 1 2 0.8 -0.8))
+
+(in 3 (1 2 3 2))
+(in "abc" (1 2 3))
+(in (f "size") ("X" "XXL"))
+
+(< _ 3)
+(+ (f "000001" _) 3)
+(< -18 _ (f 3))
+
+(map (* 2 _) (list (f 0 -1) (f 0) (f 0 1)))
+
+(all-but "id" "000023")
+(fields "000003" 3 "a field" "another" "0002a3b-3")
+
+(all-with-defaults "species" "Iris-versicolor"
+ "petal-width" 2.8
+ "000002" 0)
+
+(all-with-numeric-default "median")
+(all-with-numeric-default 0)
+
+(window "000001" -1 2)
+(filter (< _ 99.9) (map (+ 32 (* 1.8 _)) (window "Temp" -2 0)))
+
+(let (now (f "epoch"))
+ (avg (cond-window "temperature" (< (- (f "epoch") now) 240))))
diff --git a/tests/examplefiles/inform6_example b/tests/examplefiles/inform6_example
index 73cdd087..6fa1fe5b 100644
--- a/tests/examplefiles/inform6_example
+++ b/tests/examplefiles/inform6_example
@@ -8,8 +8,8 @@ Switches d2SDq;
Constant Story "Informal Testing";
Constant Headline "^Not a game.^";!% This is a comment, not ICL.
-Release 2;
-Serial "140308";
+Release 3;
+Serial "151213";
Version 5;
Ifndef TARGET_ZCODE;
@@ -174,7 +174,8 @@ Extend 'wave' replace * -> NewWave;
Extend only 'feel' 'touch' replace * noun -> Feel;
-[ TestSub a b o;
+[ TestSub "a\
+ " b o "@@98"; ! Not an escape sequence.
string 25 low_string;
print "Test what?> ";
table->0 = 260;
diff --git a/tests/examplefiles/pacman.conf b/tests/examplefiles/pacman.conf
new file mode 100644
index 00000000..78dbf5e1
--- /dev/null
+++ b/tests/examplefiles/pacman.conf
@@ -0,0 +1,49 @@
+#
+# /etc/pacman.conf
+#
+# This example file has no relation to `pacman.ijs`
+# but is of configuration of Arch Linux's package manager `pacman`.
+#
+
+#
+# GENERAL OPTIONS
+#
+[options]
+RootDir = /opt/local/site-private
+#DBPath = /var/lib/pacman/
+#CacheDir = /var/cache/pacman/pkg/
+LogFile = /opt/local/site-private/var/log/pacman.log
+#GPGDir = /etc/pacman.d/gnupg/
+HoldPkg = pacman
+#XferCommand = /usr/bin/curl -C - -f %u > %o
+XferCommand = /usr/local/bin/wget --passive-ftp -c -O %o %u
+#CleanMethod = KeepInstalled
+#UseDelta = 0.7
+Architecture = auto
+
+#IgnorePkg =
+#IgnoreGroup =
+
+NoUpgrade = etc/passwd etc/group etc/shadow
+NoUpgrade = etc/fstab
+#NoExtract =
+
+#UseSyslog
+Color
+#TotalDownload
+CheckSpace
+#VerbosePkgLists
+
+#SigLevel = Never
+SigLevel = Required DatabaseOptional
+LocalFileSigLevel = Optional
+RemoteFileSigLevel = Required
+
+Server = ftp://ftp9.yaphatchpotchgen.net/$repo/os/$arch
+
+[fubar32]
+Include = /etc/pacman.d/mirrorlist.fubar32 # comment is allowed here
+
+#[custom]
+#SigLevel = Optional TrustAll
+#Server = file:///home/custompkgs
diff --git a/tests/examplefiles/pkgconfig_example.pc b/tests/examplefiles/pkgconfig_example.pc
new file mode 100644
index 00000000..2a59204e
--- /dev/null
+++ b/tests/examplefiles/pkgconfig_example.pc
@@ -0,0 +1,18 @@
+# This is for a fictional package `yet another portable hatchpotch generator'.
+prefix=/usr/local/opt/site/private # define variable `prefix`
+exec_prefix=${prefix} # using variable reference
+libdir=${exec_prefix}/lib
+includedir=${prefix}/include
+just_for_test=$${this is not a part of variable reference} # escape with `$$`
+
+Name: YAPHatchPotchGen
+Description: Yet Another Portable HatchPotch GENerator.
+Version: 352.9.3
+URL: http://www9.yaphatchpotchgen.net # Don't access.
+Requires: piyohogelib-9.0 = 9.5.3
+Requires.private: nyorolib-3.0 = 3.0.9
+Conflicts: apiyohoge <= 8.3
+Libs: -L${libdir} -lyaphatchpotchgen-352.9 # using variable reference
+Libs.private: -ll -ly
+Cflags: -I${includedir}/piyohogelib-9.0 -I${libdir}/yaphatchpotchgen/include
+
diff --git a/tests/examplefiles/postgresql_test.txt b/tests/examplefiles/postgresql_test.txt
index 190d184f..28db5ee3 100644
--- a/tests/examplefiles/postgresql_test.txt
+++ b/tests/examplefiles/postgresql_test.txt
@@ -45,3 +45,37 @@ $$;
SELECT U&'\0441\043B\043E\043D'
FROM U&"\0441\043B\043E\043D";
+-- Escapes
+SELECT E'1\n2\n3';
+
+-- DO example from postgresql documentation
+/*
+ * PostgreSQL is Copyright © 1996-2016 by the PostgreSQL Global Development Group.
+ *
+ * Postgres95 is Copyright © 1994-5 by the Regents of the University of California.
+ *
+ * Permission to use, copy, modify, and distribute this software and its
+ * documentation for any purpose, without fee, and without a written agreement
+ * is hereby granted, provided that the above copyright notice and this paragraph
+ * and the following two paragraphs appear in all copies.
+ *
+ * IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR
+ * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING
+ * LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION,
+ * EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
+ * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS-IS" BASIS,
+ * AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE,
+ * SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
+ */
+DO $$DECLARE r record;
+BEGIN
+ FOR r IN SELECT table_schema, table_name FROM information_schema.tables
+ WHERE table_type = 'VIEW' AND table_schema = 'public'
+ LOOP
+ EXECUTE 'GRANT ALL ON ' || quote_ident(r.table_schema) || '.' || quote_ident(r.table_name) || ' TO webuser';
+ END LOOP;
+END$$;
diff --git a/tests/examplefiles/scope.cirru b/tests/examplefiles/scope.cirru
index d3bd8f16..c3d1a2c6 100644
--- a/tests/examplefiles/scope.cirru
+++ b/tests/examplefiles/scope.cirru
@@ -209,3 +209,29 @@ for (a x i) (.log console x i)
set a 0
while (< a 10) (+= a 1) (.log console a)
+
+-- WebAssembly variable names
+
+-- ":(c) 2015 Andreas Rossberg"
+
+module
+ export :even $even
+ export "odd" $odd
+
+ func $even (param $n i32) (result i32)
+ if (i32.eq (get_local $n) (i32.const 0))
+ i32.const 1
+ call $odd (i32.sub (get_local $n) (i32.const 1))
+
+ func $odd (param $n i32) (result i32)
+ store_global $scratch (get_local $n)
+ if (i32.eq (get_local $n) (i32.const 0)
+ i32.const 0
+ call $even (i32.sub (get_local $n) (i32.const 1))
+
+ global $scratch i32
+
+assert_eq (invoke :even (i32.const 13)) (i32.const 0)
+assert_eq (invoke :even (i32.const 20)) (i32.const 1)
+assert_eq (invoke :odd (i32.const 13)) (i32.const 1)
+assert_eq (invoke :odd (i32.const 20)) (i32.const 0)
diff --git a/tests/examplefiles/sparql.rq b/tests/examplefiles/sparql.rq
index 70b594e1..d979d203 100644
--- a/tests/examplefiles/sparql.rq
+++ b/tests/examplefiles/sparql.rq
@@ -29,8 +29,8 @@ SELECT ?person (COUNT(?nick) AS ?nickCount) {
ex:float5 .0e1 ;
ex:float6 5e11 ;
ex:float7 1. ;
- ex:À "" ;
- ex:豈 "" ;
+ ex:aUnicodeÀExample "somestring" ;
+ ex:catName "Kitty", "Kitty_" ; # object list
ex:escape "\n\u00c0\U00010000";
ex:catAge ?catage ;
dcterms:description "Someone with a cat called \"cat\"."@en . # language tag
diff --git a/tests/examplefiles/termcap b/tests/examplefiles/termcap
new file mode 100644
index 00000000..e20adaba
--- /dev/null
+++ b/tests/examplefiles/termcap
@@ -0,0 +1,1340 @@
+######## This example from excerpt of <http://www.catb.org/esr/terminfo/>:
+#
+# Version 11.0.1
+# $Date: 2000/03/02 15:51:11 $
+# termcap syntax
+#
+
+######## ANSI, UNIX CONSOLE, AND SPECIAL TYPES
+#
+# This section describes terminal classes and brands that are still
+# quite common.
+#
+
+#### Specials
+#
+# Special "terminals". These are used to label tty lines when you don't
+# know what kind of terminal is on it. The characteristics of an unknown
+# terminal are the lowest common denominator - they look about like a ti 700.
+#
+
+dumb|80-column dumb tty:\
+ :am:\
+ :co#80:\
+ :bl=^G:cr=^M:do=^J:sf=^J:
+unknown|unknown terminal type:\
+ :gn:tc=dumb:
+lpr|printer|line printer:\
+ :bs:hc:os:\
+ :co#132:li#66:\
+ :bl=^G:cr=^M:do=^J:ff=^L:le=^H:sf=^J:
+glasstty|classic glass tty interpreting ASCII control characters:\
+ :am:bs:\
+ :co#80:\
+ :bl=^G:cl=^L:cr=^M:do=^J:kd=^J:kl=^H:le=^H:nw=^M^J:ta=^I:
+vanilla:\
+ :bs:\
+ :bl=^G:cr=^M:do=^J:sf=^J:
+
+#### ANSI.SYS/ISO 6429/ECMA-48 Capabilities
+#
+# See the end-of-file comment for more on these.
+#
+
+# ANSI capabilities are broken up into pieces, so that a terminal
+# implementing some ANSI subset can use many of them.
+ansi+local1:\
+ :do=\E[B:le=\E[D:nd=\E[C:up=\E[A:
+ansi+local:\
+ :DO=\E[%dB:LE=\E[%dD:RI=\E[%dC:UP=\E[%dA:tc=ansi+local1:
+ansi+tabs:\
+ :bt=\E[Z:ct=\E[2g:st=\EH:ta=^I:
+ansi+inittabs:\
+ :it#8:tc=ansi+tabs:
+ansi+erase:\
+ :cd=\E[J:ce=\E[K:cl=\E[H\E[J:
+ansi+rca:\
+ :ch=\E[%+^AG:cv=\E[%+^Ad:
+ansi+cup:\
+ :cm=\E[%i%d;%dH:ho=\E[H:
+ansi+rep:\
+ :..rp=%p1%c\E[%p2%{1}%-%db:
+ansi+idl1:\
+ :al=\E[L:dl=\E[M:
+ansi+idl:\
+ :AL=\E[%dL:DL=\E[%dM:tc=ansi+idl1:
+ansi+idc:\
+ :IC=\E[%d@:dc=\E[P:ei=\E6:ic=\E[@:im=\E6:
+ansi+arrows:\
+ :kb=^H:kd=\E[B:kh=\E[H:kl=\E[D:kr=\E[C:ku=\E[A:
+ansi+sgr|ansi graphic renditions:\
+ :mb=\E[5m:me=\E[0m:mk=\E[8m:mr=\E[7m:
+ansi+sgrso|ansi standout only:\
+ :se=\E[m:so=\E[7m:
+ansi+sgrul|ansi underline only:\
+ :ue=\E[m:us=\E[4m:
+ansi+sgrbold|ansi graphic renditions; assuming terminal has bold; not dim:\
+ :md=\E[1m:\
+ :..sa=\E[%?%p1%t7;%;%?%p2%t4;%;%?%p3%t7;%;%?%p4%t5;%;%?%p6%t1;%;m:tc=ansi+sgr:tc=ansi+sgrso:tc=ansi+sgrul:
+ansi+sgrdim|ansi graphic renditions; assuming terminal has dim; not bold:\
+ :mh=\E[2m:\
+ :..sa=\E[%?%p1%t7;%;%?%p2%t4;%;%?%p3%t7;%;%?%p4%t5;%;%?%p5%t2;%;m:tc=ansi+sgr:tc=ansi+sgrso:tc=ansi+sgrul:
+ansi+pp|ansi printer port:\
+ :pf=\E[4i:po=\E[5i:ps=\E[0i:
+ansi+csr|ansi scroll-region plus cursor save & restore:\
+ :cs=\E[%i%d;%dr:rc=\E8:sc=\E7:
+
+# The IBM PC alternate character set. Plug this into any Intel console entry.
+# We use \E[11m for rmacs rather than \E[12m so the <acsc> string can use the
+# ROM graphics for control characters such as the diamond, up- and down-arrow.
+# This works with the System V, Linux, and BSDI consoles. It's a safe bet this
+# will work with any Intel console, they all seem to have inherited \E[11m
+# from the ANSI.SYS de-facto standard.
+klone+acs|alternate character set for ansi.sys displays:\
+ :ac=+\020\054\021-\030.^Y0\333`\004a\261f\370g\361h\260j\331k\277l\332m\300n\305o~p\304q\304r\304s_t\303u\264v\301w\302x\263y\363z\362{\343|\330}\234~\376:\
+ :ae=\E[10m:as=\E[11m:
+
+# Highlight controls corresponding to the ANSI.SYS standard. Most
+# console drivers for Intel boxes obey these. Makes the same assumption
+# about \E[11m as klone+acs. True ANSI/ECMA-48 would have :se=\E[27m:,
+# :ue=\E[24m:, but this isn't a documented feature of ANSI.SYS.
+klone+sgr|attribute control for ansi.sys displays:\
+ :S2=\E[11m:S3=\E[10m:mb=\E[5m:md=\E[1m:me=\E[0;10m:\
+ :mk=\E[8m:mr=\E[7m:\
+ :..sa=\E[0;10%?%p1%t;7%;%?%p2%t;4%;%?%p3%t;7%;%?%p4%t;5%;%?%p6%t;1%;%?%p7%t;8%;%?%p9%t;11%;m:\
+ :se=\E[m:so=\E[7m:ue=\E[m:us=\E[4m:\
+ :tc=klone+acs:
+
+# Highlight controls corresponding to the ANSI.SYS standard. *All*
+# console drivers for Intel boxes obey these. Does not assume \E[11m will
+# work; uses \E[12m instead, which is pretty bulletproof but loses you the ACS
+# diamond and arrow characters under curses.
+klone+sgr-dumb|attribute control for ansi.sys displays (no ESC [ 11 m):\
+ :as=\E[12m:mb=\E[5m:md=\E[1m:me=\E[0;10m:mk=\E[8m:\
+ :mr=\E[7m:\
+ :..sa=\E[0;10%?%p1%t;7%;%?%p2%t;4%;%?%p3%t;7%;%?%p4%t;5%;%?%p6%t;1%;%?%p7%t;8%;%?%p9%t;12%;m:\
+ :se=\E[m:so=\E[7m:ue=\E[m:us=\E[4m:\
+ :tc=klone+acs:
+
+# KOI8-R (RFC1489) acs (alternate character set)
+# From: Qing Long <qinglong@Bolizm.ihep.su>, 24 Feb 1996.
+klone+koi8acs|alternate character set for ansi.sys displays with KOI8 charset:\
+ :ac=+\020\054\021-\036.^_0\215`\004a\237f\234g\232h\222i\220j\205k\203l\202m\204n\212o\213p\216q\0r\217s\214t\206u\207v\210w\211x\201y\230z\231{\267|\274}L~\225:\
+ :ae=\E[10m:as=\E[11m:
+
+# ANSI.SYS color control. The setab/setaf caps depend on the coincidence
+# between SVr4/XPG4's color numbers and ANSI.SYS attributes. Here are longer
+# but equivalent strings that don't rely on that coincidence:
+# setb=\E[4%?%p1%{1}%=%t4%e%p1%{3}%=%t6%e%p1%{4}%=%t1%e%p1%{6}%=%t3%e%p1%d%;m,
+# setf=\E[3%?%p1%{1}%=%t4%e%p1%{3}%=%t6%e%p1%{4}%=%t1%e%p1%{6}%=%t3%e%p1%d%;m,
+# The DOS 5 manual asserts that these sequences meet the ISO 6429 standard.
+# They match a subset of ECMA-48.
+klone+color|color control for ansi.sys and ISO6429-compatible displays:\
+ :Co#8:NC#3:pa#64:\
+ :AB=\E[4%p1%dm:AF=\E[3%p1%dm:op=\E[37;40m:
+
+# This is better than klone+color, it doesn't assume white-on-black as the
+# default color pair, but many `ANSI' terminals don't grok the <op> cap.
+ecma+color|color control for ECMA-48-compatible terminals:\
+ :Co#8:NC#3:pa#64:\
+ :AB=\E[4%p1%dm:AF=\E[3%p1%dm:op=\E[39;49m:
+
+# Attribute control for ECMA-48-compatible terminals
+ecma+sgr|attribute capabilities for true ECMA-48 terminals:\
+ :se=\E[27m:ue=\E[24m:\
+ :tc=klone+sgr:
+
+# For comparison, here are all the capabilities implied by the Intel
+# Binary Compatibility Standard (level 2) that fit within terminfo.
+# For more detail on this rather pathetic standard, see the comments
+# near the end of this file.
+ibcs2|Intel Binary Compatibility Standard prescriptions:\
+ :AL=\E[%dL:DC=\E[%dP:DO=\E[%dB:IC=\E[%d@:LE=\E[%dD:\
+ :RA=\E[?7l:RI=\E[%dC:S1=\E=%p1%dg:SA=\E[?7h:SF=\E[%dS:\
+ :SR=\E[%dT:UP=\E[%dA:bt=\E[Z:ch=\E[%i%dG:cl=\Ec:\
+ :cm=\E[%i%d;%dH:ct=\E[g:cv=\E[%i%dd:ec=\E[%dX:ei=:im=:\
+ :rc=\E7:sc=\E7:st=\EH:
+
+#### ANSI/ECMA-48 terminals and terminal emulators
+#
+# See near the end of this file for details on ANSI conformance.
+# Don't mess with these entries! Lots of other entries depend on them!
+#
+# This section lists entries in a least-capable to most-capable order.
+# if you're in doubt about what `ANSI' matches yours, try them in that
+# order and back off from the first that breaks.
+
+# ansi-mr is for ANSI terminals with ONLY relative cursor addressing
+# and more than one page of memory. It uses local motions instead of
+# direct cursor addressing, and makes almost no assumptions. It does
+# assume auto margins, no padding and/or xon/xoff, and a 24x80 screen.
+ansi-mr|mem rel cup ansi:\
+ :am:xo:\
+ :co#80:li#24:tc=vanilla:tc=ansi+erase:tc=ansi+local1:
+
+# ansi-mini is a bare minimum ANSI terminal. This should work on anything, but
+# beware of screen size problems and memory relative cursor addressing.
+ansi-mini|minimum ansi standard terminal:\
+ :am:xo:\
+ :co#80:li#24:tc=vanilla:tc=ansi+cup:tc=ansi+erase:
+
+# ansi-mtabs adds relative addressing and minimal tab support
+ansi-mtabs|any ansi terminal with pessimistic assumptions:\
+ :it#8:\
+ :ta=^I:tc=ansi+local1:tc=ansi-mini:
+
+# ANSI X3.64 from emory!mlhhh (Hugh Hansard) via BRL
+#
+# The following is an entry for the full ANSI 3.64 (1977). It lacks
+# padding, but most terminals using the standard are "fast" enough
+# not to require any -- even at 9600 bps. If you encounter problems,
+# try including the padding specifications.
+#
+# Note: the :as: and :ae: specifications are not implemented here, for
+# the available termcap documentation does not make clear WHICH alternate
+# character set to specify. ANSI 3.64 seems to make allowances for several.
+# Please make the appropriate adjustments to fit your needs -- that is
+# if you will be using alternate character sets.
+#
+# There are very few terminals running the full ANSI 3.64 standard,
+# so I could only test this entry on one verified terminal (Visual 102).
+# I would appreciate the results on other terminals sent to me.
+#
+# Please report comments, changes, and problems to:
+#
+# U.S. MAIL: Hugh Hansard
+# Box: 22830
+# Emory University
+# Atlanta, GA. 30322.
+#
+# USENET {akgua,msdc,sb1,sb6,gatech}!emory!mlhhh.
+#
+# (Added vt100 :rc:,:sc: to quiet a tic warning --esr)
+ansi77|ansi 3.64 standard 1977 version:\
+ :am:bs:mi:\
+ :co#80:it#8:li#24:\
+ :al=5*\E[L:bl=^G:cd=\E[J:ce=\E[K:cl=\E[;H\E[2J:\
+ :cm=\E[%i%d;%dH:cr=^M:cs=\E[%i%d;%dr:dc=\E[P:dl=5*\E[M:\
+ :do=\E[B:ei=\E[4l:ho=\E[H:im=\E[4h:k1=\EOP:k2=\EOR:k4=\EOS:\
+ :kb=^H:kd=\E[B:kh=\E[H:kl=\E[D:kr=\E[C:ku=\E[A:le=^H:\
+ :nd=\E[C:nw=^M\ED:rc=\E8:sc=\E7:se=\E[m:sf=\ED:so=\E[7m:\
+ :sr=\EM:ta=^I:ue=\E[m:up=\E[A:us=\E[4m:
+
+# Procomm and some other ANSI emulations don't recognize all of the ANSI-
+# standard capabilities. This entry deletes :UP:, :RI:, :DO:, :LE:, and
+# <vpa>/<hpa> capabilities, forcing curses to use repetitions of :up:,
+# :nd:, :do: and :le:. Also deleted :IC: and :ic:, as QModem up to
+# 5.03 doesn't recognize these. Finally, we delete :rp: and :sr:, which seem
+# to confuse many emulators. On the other hand, we can count on these programs
+# doing :ae:/:as:/:sa:. Older versions of this entry featured
+# <invis=\E[9m>, but <invis=\E[8m> now seems to be more common under
+# ANSI.SYS influence.
+# From: Eric S. Raymond <esr@snark.thyrsus.com> Oct 30 1995
+pcansi-m|pcansi-mono|ibm-pc terminal programs claiming to be ansi (mono mode):\
+ :am:bs:mi:ms:\
+ :co#80:it#8:li#24:\
+ :al=\E[L:bl=^G:bt=\E[Z:cd=\E[J:ce=\E[K:cl=\E[H\E[J:\
+ :cm=\E[%i%d;%dH:cr=^M:ct=\E[2g:dc=\E[P:dl=\E[M:do=\E[B:\
+ :ho=\E[H:kb=^H:kd=\E[B:kh=\E[H:kl=\E[D:kr=\E[C:ku=\E[A:\
+ :le=\E[D:nd=\E[C:sf=^J:st=\EH:ta=^I:up=\E[A:\
+ :tc=klone+sgr-dumb:
+pcansi-25-m|pcansi25m|ibm-pc terminal programs with 25 lines (mono mode):\
+ :li#25:tc=pcansi-m:
+pcansi-33-m|pcansi33m|ibm-pc terminal programs with 33 lines (mono mode):\
+ :li#33:tc=pcansi-m:
+pcansi-43-m|ansi43m|ibm-pc terminal programs with 43 lines (mono mode):\
+ :li#43:tc=pcansi-m:
+# The color versions. All PC emulators do color...
+pcansi|ibm-pc terminal programs claiming to be ansi:\
+ :tc=klone+color:tc=pcansi-m:
+pcansi-25|pcansi25|ibm-pc terminal programs with 25 lines:\
+ :li#25:tc=pcansi:
+pcansi-33|pcansi33|ibm-pc terminal programs with 33 lines:\
+ :li#33:tc=pcansi:
+pcansi-43|pcansi43|ibm-pc terminal programs with 43 lines:\
+ :li#43:tc=pcansi:
+
+# ansi-m -- full ANSI X3.64 with ANSI.SYS-compatible attributes, no color.
+# If you want pound signs rather than dollars, replace `B' with `A'
+# in the <s0ds>, <s1ds>, <s2ds>, and <s3ds> capabilities.
+# From: Eric S. Raymond <esr@snark.thyrsus.com> Nov 6 1995
+ansi-m|ansi-mono|ANSI X3.64-1979 terminal with ANSI.SYS compatible attributes:\
+ :5i:\
+ :AL=\E[%dL:DC=\E[%dP:DL=\E[%dM:DO=\E[%dB:IC=\E[%d@:\
+ :LE=\E[%dD:RI=\E[%dC:SF=\E[%dS:SR=\E[%dT:UP=\E[%dA:\
+ :cb=\E[1K:ch=\E[%i%dG:ct=\E[2g:cv=\E[%i%dd:ec=\E[%dX:ei=:\
+ :im=:kB=\E[Z:kI=\E[L:kb=^H:kd=\E[B:kl=\E[D:kr=\E[C:ku=\E[A:\
+ :nw=\r\E[S:pf=\E[4i:po=\E[5i:..rp=%p1%c\E[%p2%{1}%-%db:\
+ :s0=\E(B:s1=\E)B:s2=\E*B:s3=\E+B:ta=\E[I:\
+ :tc=pcansi-m:
+
+# ansi -- this terminfo expresses the largest subset of X3.64 that will fit in
+# standard terminfo. Assumes ANSI.SYS-compatible attributes and color.
+# From: Eric S. Raymond <esr@snark.thyrsus.com> Nov 6 1995
+ansi|ansi/pc-term compatible with color:\
+ :u6=\E[%i%d;%dR:u7=\E[6n:..u8=\E[?%[;0123456789]c:\
+ :u9=\E[c:\
+ :tc=ecma+color:tc=klone+sgr:tc=ansi-m:
+
+# ansi-generic is a vanilla ANSI terminal. This is assumed to implement
+# all the normal ANSI stuff with no extensions. It assumes
+# insert/delete line/char is there, so it won't work with
+# vt100 clones. It assumes video attributes for bold, blink,
+# underline, and reverse, which won't matter much if the terminal
+# can't do some of those. Padding is assumed to be zero, which
+# shouldn't hurt since xon/xoff is assumed.
+ansi-generic|generic ansi standard terminal:\
+ :am:xo:\
+ :co#80:li#24:tc=vanilla:tc=ansi+csr:tc=ansi+cup:\
+ :tc=ansi+rca:tc=ansi+erase:tc=ansi+tabs:tc=ansi+local:\
+ :tc=ansi+idc:tc=ansi+idl:tc=ansi+rep:tc=ansi+sgrbold:\
+ :tc=ansi+arrows:
+
+#### Linux consoles
+#
+
+# This entry is good for the 1.2.13 or later version of the Linux console.
+#
+# ***************************************************************************
+# * *
+# * WARNING: *
+# * Linuxes come with a default keyboard mapping kcbt=^I. This entry, in *
+# * response to user requests, assumes kcbt=\E[Z, the ANSI/ECMA reverse-tab *
+# * character. Here are the keymap replacement lines that will set this up: *
+# * *
+# keycode 15 = Tab Tab
+# alt keycode 15 = Meta_Tab
+# shift keycode 15 = F26
+# string F26 ="\033[Z"
+# * *
+# * This has to use a key slot which is unfortunate (any unused one will *
+# * do, F26 is the higher-numbered one). The change ought to be built *
+# * into the kernel tables. *
+# * *
+# ***************************************************************************
+#
+# The 1.3.x kernels add color-change capabilities; if yours doesn't have this
+# and it matters, turn off <ccc>. The %02x escape used to implement this is
+# not back-portable to SV curses and not supported in ncurses versions before
+# 1.9.9. All linux kernels since 1.2.13 (at least) set the screen size
+# themselves; this entry assumes that capability.
+#
+# This entry is good for the 1.2.13 or later version of the Linux console.
+#
+# ***************************************************************************
+# * *
+# * WARNING: *
+# * Linuxes come with a default keyboard mapping kcbt=^I. This entry, in *
+# * response to user requests, assumes kcbt=\E[Z, the ANSI/ECMA reverse-tab *
+# * character. Here are the keymap replacement lines that will set this up: *
+# * *
+# keycode 15 = Tab Tab
+# alt keycode 15 = Meta_Tab
+# shift keycode 15 = F26
+# string F26 ="\033[Z"
+# * *
+# * This has to use a key slot which is unfortunate (any unused one will *
+# * do, F26 is the higher-numbered one). The change ought to be built *
+# * into the kernel tables. *
+# * *
+# ***************************************************************************
+#
+# The 1.3.x kernels add color-change capabilities; if yours doesn't have this
+# and it matters, turn off <ccc>. The %02x escape used to implement this is
+# not back-portable to SV curses and not supported in ncurses versions before
+# 1.9.9. All linux kernels since 1.2.13 (at least) set the screen size
+# themselves; this entry assumes that capability.
+#
+# The 2.2.x kernels add a private mode that sets the cursor type; use that to
+# get a block cursor for cvvis.
+# reported by Frank Heckenbach <frank@g-n-u.de>.
+# (untranslatable capabilities removed to fit entry within 1023 bytes)
+# (sgr removed to fit entry within 1023 bytes)
+# (terminfo-only capabilities suppressed to fit entry within 1023 bytes)
+linux|linux console:\
+ :am:eo:mi:ms:xn:xo:\
+ :it#8:\
+ :AL=\E[%dL:DC=\E[%dP:DL=\E[%dM:IC=\E[%d@:K2=\E[G:al=\E[L:\
+ :bl=^G:cd=\E[J:ce=\E[K:cl=\E[H\E[J:cm=\E[%i%d;%dH:cr=^M:\
+ :cs=\E[%i%d;%dr:ct=\E[3g:dc=\E[P:dl=\E[M:do=^J:ec=\E[%dX:\
+ :ei=\E[4l:ho=\E[H:ic=\E[@:im=\E[4h:k1=\E[[A:k2=\E[[B:\
+ :k3=\E[[C:k4=\E[[D:k5=\E[[E:k6=\E[17~:k7=\E[18~:k8=\E[19~:\
+ :k9=\E[20~:kD=\E[3~:kI=\E[2~:kN=\E[6~:kP=\E[5~:kb=\177:\
+ :kd=\E[B:kh=\E[1~:kl=\E[D:kr=\E[C:ku=\E[A:le=^H:mh=\E[2m:\
+ :mr=\E[7m:nd=\E[C:nw=^M^J:rc=\E8:sc=\E7:se=\E[27m:sf=^J:\
+ :sr=\EM:st=\EH:ta=^I:ue=\E[24m:up=\E[A:us=\E[4m:\
+ :vb=200\E[?5h\E[?5l:ve=\E[?25h\E[?0c:vi=\E[?25l\E[?1c:\
+ :vs=\E[?25h\E[?8c:\
+ :tc=klone+sgr:tc=ecma+color:
+linux-m|Linux console no color:\
+ :Co@:pa@:\
+ :AB@:AF@:Sb@:Sf@:tc=linux:
+linux-c-nc|linux console 1.3.x hack for ncurses only:\
+ :cc:\
+ :..Ic=\E]P%p1%x%p2%{255}%*%{1000}%/%02x%p3%{255}%*%{1000}%/%02x%p4%{255}%*%{1000}%/%02x:\
+ :oc=\E]R:\
+ :tc=linux:
+# From: Dennis Henriksen <opus@osrl.dk>, 9 July 1996
+linux-c|linux console 1.3.6+ with private palette for each virtual console:\
+ :cc:\
+ :Co#8:pa#64:\
+ :..Ic=\E]P%?%p1%{9}%>%t%p1%{10}%-%'a'%+%c%e%p1%d%;%p2%{255}%&%Pr%gr%{16}%/%Px%?%gx%{9}%>%t%gx%{10}%-%'A'%+%c%e%gx%d%;%gr%{15}%&%Px%?%gx%{9}%>%t%gx%{10}%-%'A'%+%c%e%gx%d%;%p3%{255}%&%Pr%gr%{16}%/%Px%?%gx%{9}%>%t%gx%{10}%-%'A'%+%c%e%gx%d%;%gr%{15}%&%Px%?%gx%{9}%>%t%gx%{10}%-%'A'%+%c%e%gx%d%;%p4%{255}%&%Pr%gr%{16}%/%Px%?%gx%{9}%>%t%gx%{10}%-%'A'%+%c%e%gx%d%;%gr%{15}%&%Px%?%gx%{9}%>%t%gx%{10}%-%'A'%+%c%e%gx%d%;:\
+ :oc=\E]R:\
+ :tc=linux:
+
+# See the note on ICH/ICH1 VERSUS RMIR/SMIR near the end of file
+linux-nic|linux with ich/ich1 suppressed for non-curses programs:\
+ :IC@:ei=:ic@:im=:\
+ :tc=linux:
+
+# This assumes you have used setfont(8) to load one of the Linux koi8-r fonts.
+# acsc entry from Pavel Roskin" <pavel@absolute.spb.su>, 29 Sep 1997.
+linux-koi8|linux with koi8 alternate character set:\
+ :ac=+\020\054\021-\030.^Y0\215`\004a\221f\234g\237h\220i\276j\205k\203l\202m\204n\212o~p\0q\0r\0s_t\206u\207v\211w\210x\201y\230z\231{\267|\274~\224:tc=linux:\
+ :tc=klone+koi8acs:
+
+# Another entry for KOI8-r with Qing Long's acsc.
+# (which one better complies with the standard?)
+linux-koi8r|linux with koi8-r alternate character set:\
+ :tc=linux:tc=klone+koi8acs:
+
+# Entry for the latin1 and latin2 fonts
+linux-lat|linux with latin1 or latin2 alternate character set:\
+ :ac=+\020\054\021-\030.^Y0\333`\004a\013f\370g\361h\260i\316j\211k\214l\206m\203n\305o~p\304q\212r\304s_t\207u\215v\301w\302x\205y\363z\362{\343|\330}\234~\376:\
+ :tc=linux:
+
+#### NetBSD consoles
+#
+# pcvt termcap database entries (corresponding to release 3.31)
+# Author's last edit-date: [Fri Sep 15 20:29:10 1995]
+#
+# (For the terminfo master file, I translated these into terminfo syntax.
+# Then I dropped all the pseudo-HP entries. we don't want and can't use
+# the :Xs: flag. Then I split :is: into a size-independent :i1: and a
+# size-dependent :is:. Finally, I added <rmam>/<smam> -- esr)
+
+# NOTE: :ic: has been taken out of this entry. for reference, it should
+# be <ich1=\E[@>. For discussion, see ICH/ICH1 VERSUS RMIR/SMIR below.
+# (esr: added :vi: and :ve: to resolve NetBSD Problem Report #4583)
+pcvtXX|pcvt vt200 emulator (DEC VT220):\
+ :am:km:mi:ms:xn:\
+ :it#8:vt#3:\
+ :AL=\E[%dL:DC=\E[%dP:DL=\E[%dM:DO=\E[%dB:IC=\E[%d@:\
+ :LE=\E[%dD:RA=\E[?7l:RI=\E[%dC:SA=\E[?7h:SF=\E[%dS:\
+ :SR=\E[%dT:UP=\E[%dA:\
+ :ac=++\054\054--..00``aaffgghhiijjkkllmmnnooppqqrrssttuuvvwwxxyyzz~~:\
+ :ae=\E(B:al=\E[L:as=\E(0:bl=^G:cb=\E[1K:cd=\E[J:ce=\E[K:\
+ :cl=\E[H\E[J:cm=\E[%i%d;%dH:cr=^M:cs=\E[%i%d;%dr:\
+ :ct=\E[3g:dc=\E[P:dl=\E[M:do=\E[B:ei=\E[4l:ho=\E[H:\
+ :i1=\E>\E[?3l\E[?4l\E[?5l\E[?7h\E[?8h:im=\E[4h:\
+ :k1=\E[17~:k2=\E[18~:k3=\E[19~:k4=\E[20~:k5=\E[21~:\
+ :k6=\E[23~:k7=\E[24~:k8=\E[25~:kD=\E[3~:kH=\E[4~:kI=\E[2~:\
+ :kN=\E[6~:kP=\E[5~:kb=\177:kd=\EOB:ke=\E[?1l\E>:kh=\E[1~:\
+ :kl=\EOD:kr=\EOC:ks=\E[?1h\E=:ku=\EOA:le=^H:mb=\E[5m:\
+ :md=\E[1m:me=\E[m:mr=\E[7m:nd=\E[C:nw=\EE:\
+ :r1=\Ec\E>\E[?3l\E[?4l\E[?5l\E[?7h\E[?8h:rc=\E8:\
+ :rf=/usr/share/tabset/vt100:sc=\E7:se=\E[27m:sf=\ED:\
+ :so=\E[7m:sr=\EM:st=\EH:ta=^I:ue=\E[24m:up=\E[A:us=\E[4m:\
+ :ve=\E[?25h:vi=\E[?25l:
+
+# NetBSD/FreeBSD vt220 terminal emulator console (pc keyboard & monitor)
+# termcap entries for pure VT220-Emulation and 25, 28, 35, 40, 43 and
+# 50 lines entries; 80 columns
+pcvt25|dec vt220 emulation with 25 lines:\
+ :co#80:li#25:\
+ :is=\E[1;25r\E[25;1H:tc=pcvtXX:
+pcvt28|dec vt220 emulation with 28 lines:\
+ :co#80:li#28:\
+ :is=\E[1;28r\E[28;1H:tc=pcvtXX:
+pcvt35|dec vt220 emulation with 35 lines:\
+ :co#80:li#35:\
+ :is=\E[1;35r\E[35;1H:tc=pcvtXX:
+pcvt40|dec vt220 emulation with 40 lines:\
+ :co#80:li#40:\
+ :is=\E[1;40r\E[40;1H:tc=pcvtXX:
+pcvt43|dec vt220 emulation with 43 lines:\
+ :co#80:li#43:\
+ :is=\E[1;43r\E[43;1H:tc=pcvtXX:
+pcvt50|dec vt220 emulation with 50 lines:\
+ :co#80:li#50:\
+ :is=\E[1;50r\E[50;1H:tc=pcvtXX:
+
+# NetBSD/FreeBSD vt220 terminal emulator console (pc keyboard & monitor)
+# termcap entries for pure VT220-Emulation and 25, 28, 35, 40, 43 and
+# 50 lines entries; 132 columns
+pcvt25w|dec vt220 emulation with 25 lines and 132 cols:\
+ :co#132:li#25:\
+ :is=\E[1;25r\E[25;1H:tc=pcvtXX:
+pcvt28w|dec vt220 emulation with 28 lines and 132 cols:\
+ :co#132:li#28:\
+ :is=\E[1;28r\E[28;1H:tc=pcvtXX:
+pcvt35w|dec vt220 emulation with 35 lines and 132 cols:\
+ :co#132:li#35:\
+ :is=\E[1;35r\E[35;1H:tc=pcvtXX:
+pcvt40w|dec vt220 emulation with 40 lines and 132 cols:\
+ :co#132:li#40:\
+ :is=\E[1;40r\E[40;1H:tc=pcvtXX:
+pcvt43w|dec vt220 emulation with 43 lines and 132 cols:\
+ :co#132:li#43:\
+ :is=\E[1;43r\E[43;1H:tc=pcvtXX:
+pcvt50w|dec vt220 emulation with 50 lines and 132 cols:\
+ :co#132:li#50:\
+ :is=\E[1;50r\E[50;1H:tc=pcvtXX:
+
+# Terminfo entries to enable the use of the ncurses library in colour on a
+# NetBSD-arm32 console (only tested on a RiscPC).
+# Created by Dave Millen <dmill@globalnet.co.uk> 22.07.98
+# modified codes for setf/setb to setaf/setab, then to klone+color, corrected
+# typo in invis - TD
+arm100|arm100-am|Arm(RiscPC) ncurses compatible (for 640x480):\
+ :am:ms:ut:xn:xo:\
+ :co#80:it#8:li#30:\
+ :@8=\E[M:DO=\E[%dB:K1=\E[q:K2=\E[r:K3=\E[s:K4=\E[p:K5=\E[n:\
+ :LE=\E[%dD:RA=\E[?7l:RI=\E[%dC:SA=\E[?7h:UP=\E[%dA:\
+ :ac=``aaffggjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~:\
+ :ae=^O:as=^N:bl=^G:cb=\E[1K:cd=\E[J:ce=\E[K:cl=\E[H\E[J:\
+ :cm=\E[%i%d;%dH:cr=^M:cs=\E[%i%d;%dr:ct=\E[3g:do=^J:\
+ :eA=\E(B\E)0:ho=\E[H:k0=\E[y:k1=\E[P:k2=\E[Q:k3=\E[R:\
+ :k4=\E[S:k5=\E[t:k6=\E[u:k7=\E[v:k8=\E[l:k9=\E[w:k;=\E[x:\
+ :kb=^H:kd=\E[B:ke=\E[?1l\E>:kl=\E[D:kr=\E[C:ks=\E[?1h\E=:\
+ :ku=\E[A:le=^H:mb=\E[5m:md=\E[1m:me=\E[m\017:mk=\E[8m:\
+ :mr=\E[6m:nd=\E[C:r2=\E>\E[?3l\E[?4l\E[?5l\E[?7h\E[?8h:\
+ :rc=\E8:\
+ :..sa=\E[0%?%p1%p6%|%t;1%;%?%p2%t;4%;%?%p1%p3%|%t;7%;%?%p4%t;5%;m%?%p9%t\016%e\017%;:\
+ :sc=\E7:se=\E[m:sf=^J:so=\E[7m:sr=\EM:st=\EH:ta=^I:ue=\E[m:\
+ :up=\E[A:us=\E[4m:\
+ :tc=ecma+sgr:tc=klone+color:
+arm100-w|arm100-wam|Arm(RiscPC) ncurses compatible (for 1024x768):\
+ :co#132:li#50:tc=arm100:
+
+# NetBSD/x68k console vt200 emulator. This port runs on a 68K machine
+# manufactured by Sharp for the Japenese market.
+# From Minoura Makoto <minoura@netlaputa.or.jp>, 12 May 1996
+x68k|x68k-ite|NetBSD/x68k ITE:\
+ :co#96:li#32:\
+ :%1=\E[28~:kC=\E[9~:tc=vt220:
+
+# <tv@pobox.com>:
+# Entry for the DNARD OpenFirmware console, close to ANSI but not quite.
+#
+# (still unfinished, but good enough so far.)
+ofcons:\
+ :bw:\
+ :co#80:li#30:\
+ :AL=\233%dL:DC=\233%dP:DL=\233%dM:DO=\233%dB:IC=\233%d@:\
+ :LE=\233%dD:RI=\233%dC:UP=\233%dA:al=\233L:bl=^G:cd=\233J:\
+ :ce=\233K:cl=^L:cm=\233%i%d;%dH:cr=^M:dc=\233P:dl=\233M:\
+ :do=\233B:ei=:ic=\233@:im=:k1=\2330P:k2=\2330Q:k3=\2330W:\
+ :k4=\2330x:k5=\2330t:k6=\2330u:k7=\2330q:k8=\2330r:\
+ :k9=\2330p:k;=\2330M:kD=\233P:kN=\233/:kP=\233?:kb=^H:\
+ :kd=\233B:kl=\233D:kr=\233C:ku=\233A:le=\233D:mb=\2337;2m:\
+ :md=\2331m:me=\2330m:mh=\2332m:mk=\2338m:mr=\2337m:\
+ :nd=\233C:nw=^M^J:se=\2330m:sf=^J:ta=^I:ue=\2330m:up=\233A:\
+ :vb=^G:
+
+# NetBSD "wscons" emulator in vt220 mode
+# These are micro-minimal and probably need to be redone for real
+# after the manner of the pcvt entries.
+wsvt25|NetBSD wscons in 25 line DEC VT220 mode:\
+ :co#80:li#25:tc=vt220:
+
+wsvt25m|NetBSD wscons in 25 line DEC VT220 mode with Meta:\
+ :km:\
+ :co#80:li#25:tc=vt220:
+
+# `rasterconsole' provided by 4.4BSD, NetBSD and OpenBSD on SPARC, and
+# DECstation/pmax.
+rcons|BSD rasterconsole:\
+ :tc=sun-il:
+# Color version of above. Color currenly only provided by NetBSD.
+rcons-color|BSD rasterconsole with ANSI color:\
+ :ut:\
+ :Co#8:pa#64:\
+ :AB=\E[4%dm:AF=\E[3%dm:op=\E[m:tc=rcons:
+
+#### FreeBSD console entries
+#
+# From: Andrey Chernov <ache@astral.msk.su> 29 Mar 1996
+# Andrey Chernov maintains the FreeBSD termcap distributions.
+#
+# Note: Users of FreeBSD 2.1.0 and older versions must either upgrade
+# or comment out the :cb: capability in the console entry.
+#
+# Alexander Lukyanov reports:
+# I have seen FreeBSD-2.1.5R... The old el1 bug changed, but it is still there.
+# Now el1 clears not only to the line beginning, but also a large chunk
+# of previous line. But there is another bug - ech does not work at all.
+#
+
+# for syscons
+# common entry without semigraphics
+# Bug: The <op> capability resets attributes.
+# Bug? The ech and el1 attributes appear to move the cursor in some cases; for
+# instance el1 does if the cursor is moved to the right margin first. Removed
+# by T.Dickey 97/5/3 (ech=\E[%p1%dX, el1=\E[1K)
+#
+# Setting colors turns off reverse; we cannot guarantee order, so use ncv.
+# Note that this disables standout with color.
+cons25w|ansiw|ansi80x25-raw|freebsd console (25-line raw mode):\
+ :NP:am:bw:eo:ms:ut:\
+ :Co#8:NC#21:co#80:it#8:li#25:pa#64:\
+ :@7=\E[F:AB=\E[4%p1%dm:AF=\E[3%p1%dm:AL=\E[%dL:DC=\E[%dP:\
+ :DL=\E[%dM:DO=\E[%dB:F1=\E[W:F2=\E[X:IC=\E[%d@:K2=\E[E:\
+ :LE=\E[%dD:RI=\E[%dC:SF=\E[%dS:SR=\E[%dT:UP=\E[%dA:\
+ :al=\E[L:bl=^G:bt=\E[Z:cd=\E[J:ce=\E[K:ch=\E[%i%d`:\
+ :cl=\E[H\E[J:cm=\E[%i%d;%dH:cr=^M:cv=\E[%i%dd:dc=\E[P:\
+ :dl=\E[M:do=\E[B:ei=:ho=\E[H:ic=\E[@:im=:k1=\E[M:k2=\E[N:\
+ :k3=\E[O:k4=\E[P:k5=\E[Q:k6=\E[R:k7=\E[S:k8=\E[T:k9=\E[U:\
+ :k;=\E[V:kB=\E[Z:kD=\177:kI=\E[L:kN=\E[G:kP=\E[I:kb=^H:\
+ :kd=\E[B:kh=\E[H:kl=\E[D:kr=\E[C:ku=\E[A:le=^H:mb=\E[5m:\
+ :md=\E[1m:me=\E[m:mh=\E[30;1m:mr=\E[7m:nd=\E[C:nw=\E[E:\
+ :op=\E[x:r1=\E[x\E[m\Ec:se=\E[m:sf=\E[S:so=\E[7m:sr=\E[T:\
+ :ta=^I:up=\E[A:ve=\E[=0C:vs=\E[=1C:
+cons25|ansis|ansi80x25|freebsd console (25-line ansi mode):\
+ :ac=-\030.^Y0\333`\004a\260f\370g\361h\261i\025j\331k\277l\332m\300n\305q\304t\303u\264v\301w\302x\263y\363z\362~\371:\
+ :tc=cons25w:
+cons25-m|ansis-mono|ansi80x25-mono|freebsd console (25-line mono ansi mode):\
+ :Co@:pa@:\
+ :AB@:AF@:md@:mh@:op@:ue=\E[m:us=\E[4m:tc=cons25:
+cons30|ansi80x30|freebsd console (30-line ansi mode):\
+ :li#30:tc=cons25:
+cons30-m|ansi80x30-mono|freebsd console (30-line mono ansi mode):\
+ :li#30:tc=cons25-m:
+cons43|ansi80x43|freebsd console (43-line ansi mode):\
+ :li#43:tc=cons25:
+cons43-m|ansi80x43-mono|freebsd console (43-line mono ansi mode):\
+ :li#43:tc=cons25-m:
+cons50|ansil|ansi80x50|freebsd console (50-line ansi mode):\
+ :li#50:tc=cons25:
+cons50-m|ansil-mono|ansi80x50-mono|freebsd console (50-line mono ansi mode):\
+ :li#50:tc=cons25-m:
+cons60|ansi80x60|freebsd console (60-line ansi mode):\
+ :li#60:tc=cons25:
+cons60-m|ansi80x60-mono|freebsd console (60-line mono ansi mode):\
+ :li#60:tc=cons25-m:
+cons25r|pc3r|ibmpc3r|cons25-koi8-r|freebsd console w/koi8-r cyrillic:\
+ :ac=-\030.^Y0\215`\004a\220f\234h\221i\025j\205k\203l\202m\204n\212q\0t\206u\207v\211w\210x\201y\230z\231~\225:\
+ :tc=cons25w:
+cons25r-m|pc3r-m|ibmpc3r-mono|cons25-koi8r-m|freebsd console w/koi8-r cyrillic (mono):\
+ :Co@:pa@:\
+ :AB@:AF@:op@:ue=\E[m:us=\E[4m:tc=cons25r:
+cons50r|cons50-koi8r|freebsd console w/koi8-r cyrillic (50 lines):\
+ :li#50:tc=cons25r:
+cons50r-m|cons50-koi8r-m|freebsd console w/koi8-r cyrillic (50-line mono):\
+ :li#50:tc=cons25r-m:
+cons60r|cons60-koi8r|freebsd console w/koi8-r cyrillic (60 lines):\
+ :li#60:tc=cons25r:
+cons60r-m|cons60-koi8r-m|freebsd console w/koi8-r cyrillic (60-line mono):\
+ :li#60:tc=cons25r-m:
+# ISO 8859-1 FreeBSD console
+cons25l1|cons25-iso8859|freebsd console w/iso 8859-1 chars:\
+ :ac=+\253\054\273-\030.\031`\201a\202f\207g\210i\247j\213k\214l\215m\216n\217o\220p\221q\222r\223s\224t\225u\226v\227w\230x\231y\232z\233~\237:\
+ :tc=cons25w:
+cons25l1-m|cons25-iso-m|freebsd console w/iso 8859-1 chars (mono):\
+ :Co@:pa@:\
+ :AB@:AF@:md@:mh@:op@:ue=\E[m:us=\E[4m:tc=cons25l1:
+cons50l1|cons50-iso8859|freebsd console w/iso 8859-1 chars (50 lines):\
+ :li#50:tc=cons25l1:
+cons50l1-m|cons50-iso-m|freebsd console w/iso 8859-1 chars (50-line mono):\
+ :li#50:tc=cons25l1-m:
+cons60l1|cons60-iso|freebsd console w/iso 8859-1 chars (60 lines):\
+ :li#60:tc=cons25l1:
+cons60l1-m|cons60-iso-m|freebsd console w/iso 8859-1 chars (60-line mono):\
+ :li#60:tc=cons25l1-m:
+
+#### 386BSD and BSD/OS Consoles
+#
+
+# This was the original 386BSD console entry (I think).
+# Some places it's named oldpc3|oldibmpc3.
+# From: Alex R.N. Wetmore <aw2t@andrew.cmu.edu>
+origpc3|origibmpc3|IBM PC 386BSD Console:\
+ :am:bw:eo:xo:\
+ :co#80:li#25:\
+ :ac=j\331k\277l\332m\300n\305q\304t\303u\264v\301w\302x\263:\
+ :cd=\E[J:ce=\E[K:cl=\Ec:cm=\E[%i%2;%2H:do=\E[B:ho=\E[H:\
+ :kd=\E[B:kh=\E[Y:kl=\E[D:kr=\E[C:ku=\E[A:le=^H:md=\E[7m:\
+ :me=\E[m\E[1;0x\E[2;7x:nd=\E[C:se=\E[1;0x\E[2;7x:\
+ :sf=\E[S:so=\E[1;7x\E[2;0x:sr=\E[T:ue=\E[1;0x\E[2;7x:\
+ :up=\E[A:us=\E[1;7x\E[2;0x:
+
+# description of BSD/386 console emulator in version 1.0 (supplied by BSDI)
+oldpc3|oldibmpc3|old IBM PC BSD/386 Console:\
+ :km:\
+ :li#25:\
+ :al=\E[L:bl=^G:cr=^M:dl=\E[M:do=^J:kH=\E[F:kI=\E[L:kN=\E[G:\
+ :kP=\E[I:kb=^H:kd=\E[B:kh=\E[H:kl=\E[D:kr=\E[C:ku=\E[A:\
+ :md=\E[=15F:me=\E[=R:mh=\E[=8F:nw=^M^J:sf=^J:ta=^I:
+
+# Description of BSD/OS console emulator in version 1.1, 2.0, 2.1
+# Note, the emulator supports many of the additional console features
+# listed in the iBCS2 (e.g. character-set selection) though not all
+# are described here. This entry really ought to be upgraded.
+# Also note, the console will also work with fewer lines after doing
+# "stty rows NN", e.g. to use 24 lines.
+# (Color support from Kevin Rosenberg <kevin@cyberport.com>, 2 May 1996)
+# Bug: The <op> capability resets attributes.
+bsdos-pc-nobold|BSD/OS PC console w/o bold:\
+ :am:eo:km:xo:\
+ :co#80:it#8:li#25:\
+ :AL=\E[%dL:DL=\E[%dM:DO=\E[%dB:LE=\E[%dD:RI=\E[%dC:\
+ :UP=\E[%dA:al=\E[L:bl=^G:cd=\E[J:ce=\E[K:cl=\Ec:\
+ :cm=\E[%i%d;%dH:cr=^M:dl=\E[M:do=^J:ho=\E[H:kH=\E[F:\
+ :kI=\E[L:kN=\E[G:kP=\E[I:kb=^H:kd=\E[B:kh=\E[H:kl=\E[D:\
+ :kr=\E[C:ku=\E[A:le=^H:nd=\E[C:nw=^M^J:rc=\E8:\
+ :..sa=\E[0;10%?%p1%t;7%;%?%p3%t;7%;%?%p4%t;5%;%?%p6%t;1%;%?%p7%t;8%;%?%p9%t;11%;m%?%p5%t\E[=8F%;:\
+ :sc=\E7:sf=^J:ta=^I:up=\E[A:\
+ :tc=klone+sgr:tc=klone+color:
+bsdos-pc|IBM PC BSD/OS Console:\
+ :..sa=\E[0;10%?%p1%t;7%;%?%p2%t;1%;%?%p3%t;7%;%?%p4%t;5%;%?%p6%t;1%;%?%p7%t;8%;%?%p9%t;11%;m:tc=bsdos-pc-nobold:
+
+# Old names for BSD/OS PC console used in releases before 4.1.
+pc3|BSD/OS on the PC Console:\
+ :tc=bsdos-pc-nobold:
+ibmpc3|pc3-bold|BSD/OS on the PC Console with bold instead of underline:\
+ :tc=bsdos-pc:
+
+# BSD/OS on the SPARC
+bsdos-sparc|Sun SPARC BSD/OS Console:\
+ :tc=sun:
+
+# BSD/OS on the PowerPC
+bsdos-ppc|PowerPC BSD/OS Console:\
+ :tc=bsdos-pc:
+
+#### DEC VT100 and compatibles
+#
+# DEC terminals from the vt100 forward are collected here. Older DEC terminals
+# and micro consoles can be found in the `obsolete' section. More details on
+# the relationship between the VT100 and ANSI X3.64/ISO 6429/ECMA-48 may be
+# found near the end of this file.
+#
+# Except where noted, these entries are DEC's official terminfos.
+# Contact Bill Hedberg <hedberg@hannah.enet.dec.com> of Terminal Support
+# Engineering for more information. Updated terminfos and termcaps
+# are kept available at ftp://gatekeeper.dec.com/pub/DEC/termcaps.
+#
+# In October 1995 DEC sold its terminals business, including the VT and Dorio
+# line and trademark, to SunRiver Data Systems. SunRiver has since changed
+# its name to Boundless Technologies; see http://www.boundless.com.
+#
+
+# NOTE: Any VT100 emulation, whether in hardware or software, almost
+# certainly includes what DEC called the `Level 1 editing extension' codes;
+# only the very oldest VT100s lacked these and there probably aren't any of
+# those left alive. To capture these, use one of the VT102 entries.
+#
+# Note that the :xn: glitch in vt100 is not quite the same as on the Concept,
+# since the cursor is left in a different position while in the
+# weird state (concept at beginning of next line, vt100 at end
+# of this line) so all versions of vi before 3.7 don't handle
+# :xn: right on vt100. The correct way to handle :xn: is when
+# you output the char in column 80, immediately output CR LF
+# and then assume you are in column 1 of the next line. If :xn:
+# is on, am should be on too.
+#
+# I assume you have smooth scroll off or are at a slow enough baud
+# rate that it doesn't matter (1200? or less). Also this assumes
+# that you set auto-nl to "on", if you set it off use vt100-nam
+# below.
+#
+# The padding requirements listed here are guesses. It is strongly
+# recommended that xon/xoff be enabled, as this is assumed here.
+#
+# The vt100 uses <rs2> and <rf> rather than :is:/:ct:/:st: because the
+# tab settings are in non-volatile memory and don't need to be
+# reset upon login. Also setting the number of columns glitches
+# the screen annoyingly. You can type "reset" to get them set.
+#
+# The VT100 series terminals have cursor ("arrows") keys which can operate
+# in two different modes: Cursor Mode and Application Mode. Cursor Mode
+# is the reset state, and is assumed to be the normal state. Application
+# Mode is the "set" state. In Cursor Mode, the cursor keys transmit
+# "Esc [ {code}" sequences, conforming to ANSI standards. In Application
+# Mode, the cursor keys transmit "Esc O <code>" sequences. Application Mode
+# was provided primarily as an aid to the porting of VT52 applications. It is
+# assumed that the cursor keys are normally in Cursor Mode, and expected that
+# applications such as vi will always transmit the :ks: string. Therefore,
+# the definitions for the cursor keys are made to match what the terminal
+# transmits after the :ks: string is transmitted. If the :ks: string
+# is a null string or is not defined, then cursor keys are assumed to be in
+# "Cursor Mode", and the cursor keys definitions should match that assumption,
+# else the appication may fail. It is also expected that applications will
+# always transmit the :ke: string to the terminal before they exit.
+#
+# The VT100 series terminals have an auxilliary keypad, commonly referred to as
+# the "Numeric Keypad", because it is a cluster of numeric and function keys.
+# The Numeric Keypad which can operate in two different modes: Numeric Mode and
+# Application Mode. Numeric Mode is the reset state, and is assumed to be
+# the normal state. Application Mode is the "set" state. In Numeric Mode,
+# the numeric and punctuation keys transmit ASCII 7-bit characters, and the
+# Enter key transmits the same as the Return key (Note: the Return key
+# can be configured to send either LF (\015) or CR LF). In Application Mode,
+# all the keypad keys transmit "Esc O {code}" sequences. The PF1 - PF4 keys
+# always send the same "Esc O {code}" sequences. It is assumed that the keypad
+# is normally in Numeric Mode. If an application requires that the keypad be
+# in Application Mode then it is expected that the user, or the application,
+# will set the TERM environment variable to point to a terminfo entry which has
+# defined the :ks: string to include the codes that switch the keypad into
+# Application Mode, and the terminfo entry will also define function key
+# fields to match the Application Mode control codes. If the :ks: string
+# is a null string or is not defined, then the keypad is assumed to be in
+# Numeric Mode. If the :ks: string switches the keypad into Application
+# Mode, it is expected that the :ke: string will contain the control codes
+# necessary to reset the keypad to "Normal" mode, and it is also expected that
+# applications which transmit the :ks: string will also always transmit the
+# :ke: string to the terminal before they exit.
+#
+# Here's a diagram of the VT100 keypad keys with their bindings.
+# The top line is the name of the key (some DEC keyboards have the keys
+# labelled somewhat differently, like GOLD instead of PF1, but this is
+# the most "official" name). The second line is the escape sequence it
+# generates in Application Keypad mode (where "$" means the ESC
+# character). The third line contains two items, first the mapping of
+# the key in terminfo, and then in termcap.
+# _______________________________________
+# | PF1 | PF2 | PF3 | PF4 |
+# | $OP | $OQ | $OR | $OS |
+# |_kf1__k1_|_kf2__k2_|_kf3__k3_|_kf4__k4_|
+# | 7 8 9 - |
+# | $Ow | $Ox | $Oy | $Om |
+# |_kf9__k9_|_kf10_k;_|_kf0__k0_|_________|
+# | 4 | 5 | 6 | , |
+# | $Ot | $Ou | $Ov | $Ol |
+# |_kf5__k5_|_kf6__k6_|_kf7__k7_|_kf8__k8_|
+# | 1 | 2 | 3 | |
+# | $Oq | $Or | $Os | enter |
+# |_ka1__K1_|_kb2__K2_|_ka3__K3_| $OM |
+# | 0 | . | |
+# | $Op | $On | |
+# |___kc1_______K4____|_kc3__K5_|_kent_@8_|
+#
+# And here, for those of you with orphaned VT100s lacking documentation, is
+# a description of the soft switches invoked when you do `Set Up'.
+#
+# Scroll 0-Jump Shifted 3 0-#
+# | 1-Smooth | 1-British pound sign
+# | Autorepeat 0-Off | Wrap Around 0-Off
+# | | 1-On | | 1-On
+# | | Screen 0-Dark Bkg | | New Line 0-Off
+# | | | 1-Light Bkg | | | 1-On
+# | | | Cursor 0-Underline | | | Interlace 0-Off
+# | | | | 1-Block | | | | 1-On
+# | | | | | | | |
+# 1 1 0 1 1 1 1 1 0 1 0 0 0 0 1 0 <--Standard Settings
+# | | | | | | | |
+# | | | Auto XON/XOFF 0-Off | | | Power 0-60 Hz
+# | | | 1-On | | | 1-50 Hz
+# | | Ansi/VT52 0-VT52 | | Bits Per Char. 0-7 Bits
+# | | 1-ANSI | | 1-8 Bits
+# | Keyclick 0-Off | Parity 0-Off
+# | 1-On | 1-On
+# Margin Bell 0-Off Parity Sense 0-Odd
+# 1-On 1-Even
+#
+# The following SET-UP modes are assumed for normal operation:
+# ANSI_MODE AUTO_XON/XOFF_ON NEWLINE_OFF 80_COLUMNS
+# WRAP_AROUND_ON JUMP_SCROLL_OFF
+# Other SET-UP modes may be set for operator convenience or communication
+# requirements; I recommend
+# AUTOREPEAT_ON BLOCK_CURSOR MARGIN_BELL_OFF SHIFTED_3_#
+# Unless you have a graphics add-on such as Digital Engineering's VT640
+# (and even then, whenever it can be arranged!) you should set
+# INTERLACE_OFF
+#
+# (vt100: I added <rmam>/<smam> based on the init string, also :bs:. -- esr)
+vt100|vt100-am|dec vt100 (w/advanced video):\
+ :am:bs:ms:xn:xo:\
+ :co#80:it#8:li#24:vt#3:\
+ :@8=\EOM:DO=\E[%dB:K1=\EOq:K2=\EOr:K3=\EOs:K4=\EOp:K5=\EOn:\
+ :LE=\E[%dD:RA=\E[?7l:RI=\E[%dC:SA=\E[?7h:UP=\E[%dA:\
+ :ac=``aaffggjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~:\
+ :ae=^O:as=^N:bl=^G:cb=\E[1K:cd=\E[J:ce=\E[K:cl=\E[H\E[J:\
+ :cm=\E[%i%d;%dH:cr=^M:cs=\E[%i%d;%dr:ct=\E[3g:do=^J:\
+ :eA=\E(B\E)0:ho=\E[H:k0=\EOy:k1=\EOP:k2=\EOQ:k3=\EOR:\
+ :k4=\EOS:k5=\EOt:k6=\EOu:k7=\EOv:k8=\EOl:k9=\EOw:k;=\EOx:\
+ :kb=^H:kd=\EOB:ke=\E[?1l\E>:kl=\EOD:kr=\EOC:ks=\E[?1h\E=:\
+ :ku=\EOA:le=^H:mb=\E[5m:md=\E[1m:me=\E[m\017:mr=\E[7m:\
+ :nd=\E[C:r2=\E>\E[?3l\E[?4l\E[?5l\E[?7h\E[?8h:rc=\E8:\
+ :..sa=\E[0%?%p1%p6%|%t;1%;%?%p2%t;4%;%?%p1%p3%|%t;7%;%?%p4%t;5%;m%?%p9%t\016%e\017%;:\
+ :sc=\E7:se=\E[m:sf=^J:so=\E[7m:sr=\EM:st=\EH:ta=^I:ue=\E[m:\
+ :up=\E[A:us=\E[4m:
+vt100nam|vt100-nam|vt100 no automargins:\
+ :am@:xn@:tc=vt100-am:
+vt100-vb|dec vt100 (w/advanced video) & no beep:\
+ :bl@:vb=\E[?5h\E[?5l:tc=vt100:
+
+# Ordinary vt100 in 132 column ("wide") mode.
+vt100-w|vt100-w-am|dec vt100 132 cols (w/advanced video):\
+ :co#132:li#24:\
+ :r2=\E>\E[?3h\E[?4l\E[?5l\E[?8h:tc=vt100-am:
+vt100-w-nam|vt100-nam-w|dec vt100 132 cols (w/advanced video no automargin):\
+ :co#132:li#14:vt@:\
+ :r2=\E>\E[?3h\E[?4l\E[?5l\E[?8h:tc=vt100-nam:
+
+# vt100 with no advanced video.
+vt100-nav|vt100 without advanced video option:\
+ :sg#1:\
+ :mb@:md@:me@:mr@:sa@:se=\E[m:so=\E[7m:ue@:us@:tc=vt100:
+vt100-nav-w|vt100-w-nav|dec vt100 132 cols 14 lines (no advanced video option):\
+ :co#132:li#14:tc=vt100-nav:
+
+# vt100 with one of the 24 lines used as a status line.
+# We put the status line on the top.
+vt100-s|vt100-s-top|vt100-top-s|vt100 for use with top sysline:\
+ :es:hs:\
+ :li#23:\
+ :cl=\E[2;1H\E[J:cm=\E[%i%+^A;%dH:cs=\E[%i%i%d;%dr:\
+ :ds=\E7\E[1;24r\E8:fs=\E8:ho=\E[2;1H:is=\E7\E[2;24r\E8:\
+ :ts=\E7\E[1;%p1%dH\E[1K:\
+ :tc=vt100-am:
+
+# Status line at bottom.
+# Clearing the screen will clobber status line.
+vt100-s-bot|vt100-bot-s|vt100 for use with bottom sysline:\
+ :es:hs:\
+ :li#23:\
+ :ds=\E7\E[1;24r\E8:fs=\E8:is=\E[1;23r\E[23;1H:\
+ :ts=\E7\E[24;%p1%dH\E[1K:\
+ :tc=vt100-am:
+
+# Most of the `vt100' emulators out there actually emulate a vt102
+# This entry (or vt102-nsgr) is probably the right thing to use for
+# these.
+vt102|dec vt102:\
+ :mi:\
+ :al=\E[L:dc=\E[P:dl=\E[M:ei=\E[4l:im=\E[4h:tc=vt100:
+vt102-w|dec vt102 in wide mode:\
+ :co#132:\
+ :r3=\E[?3h:tc=vt102:
+
+# Many brain-dead PC comm programs that pretend to be `vt100-compatible'
+# fail to interpret the ^O and ^N escapes properly. Symptom: the :me:
+# string in the canonical vt100 entry above leaves the screen littered
+# with little snowflake or star characters (IBM PC ROM character \017 = ^O)
+# after highlight turnoffs. This entry should fix that, and even leave
+# ACS support working, at the cost of making multiple-highlight changes
+# slightly more expensive.
+# From: Eric S. Raymond <esr@snark.thyrsus.com> July 22 1995
+vt102-nsgr|vt102 no sgr (use if you see snowflakes after highlight changes):\
+ :me=\E[m:sa@:\
+ :tc=vt102:
+
+# VT125 Graphics CRT. Clear screen also erases graphics
+vt125|vt125 graphics terminal:\
+ :cl=\E[H\E[2J\EPpS(E)\E\:tc=vt100:
+
+# This isn't a DEC entry, it came from University of Wisconsin.
+# (vt131: I added <rmam>/<smam> based on the init string, also :bs: -- esr)
+vt131|dec vt131:\
+ :am:bs:xn:\
+ :co#80:it#8:li#24:vt#3:\
+ :RA=\E[?7h:SA=\E[?7h:bl=^G:cd=50\E[J:ce=3\E[K:\
+ :cl=50\E[;H\E[2J:cm=5\E[%i%d;%dH:cr=^M:cs=\E[%i%d;%dr:\
+ :do=^J:ho=\E[H:is=\E[1;24r\E[24;1H:k1=\EOP:k2=\EOQ:\
+ :k3=\EOR:k4=\EOS:kb=^H:kd=\EOB:ke=\E[?1l\E>:kl=\EOD:\
+ :kr=\EOC:ks=\E[?1h\E=:ku=\EOA:le=^H:mb=2\E[5m:md=2\E[1m:\
+ :me=2\E[m:mr=2\E[7m:nd=2\E[C:nw=^M^J:\
+ :r1=\E>\E[?3l\E[?4l\E[?5l\E[?7h\E[?8h:rc=\E8:sc=\E7:\
+ :se=2\E[m:so=2\E[7m:sr=5\EM:ta=^I:ue=2\E[m:up=2\E[A:\
+ :us=2\E[4m:
+
+# vt132 - like vt100 but slower and has ins/del line and such.
+# I'm told that :im:/:ei: are backwards in the terminal from the
+# manual and from the ANSI standard, this describes the actual
+# terminal. I've never actually used a vt132 myself, so this
+# is untested.
+#
+vt132|DEC vt132:\
+ :xn:\
+ :al=\E[L:dc=\E[P:dl=\E[M:ei=\E[4h:im=\E[4l:ip=:sf=\n:tc=vt100:
+
+# This vt220 description maps F5--F9 to the second block of function keys
+# at the top of the keyboard. The "DO" key is used as F10 to avoid conflict
+# with the key marked (ESC) on the vt220. See vt220d for an alternate mapping.
+# PF1--PF4 are used as F1--F4.
+#
+vt220-old|vt200-old|DEC VT220 in vt100 emulation mode:\
+ :am:bs:mi:pt:xn:xo:\
+ :co#80:li#24:vt#3:\
+ :@7=\E[4~:RA=\E[?7l:SA=\E[?7h:\
+ :ac=``aaffggjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~:\
+ :ae=\E(B:al=\E[L:as=\E(0:bl=^G:cd=\E[J:ce=\E[K:\
+ :cl=\E[H\E[2J:cm=\E[%i%d;%dH:cr=^M:cs=\E[%i%d;%dr:\
+ :dc=\E[P:dl=\E[M:do=\E[B:ei=\E[4l:ho=\E[H:\
+ :if=/usr/share/tabset/vt100:im=\E[4h:\
+ :is=\E[1;24r\E[24;1H:k1=\EOP:k2=\EOQ:k3=\EOR:k4=\EOS:\
+ :k5=\E[17~:k6=\E[18~:k7=\E[19~:k8=\E[20~:k9=\E[21~:\
+ :k;=\E[29~:kD=\E[3~:kI=\E[2~:kN=\E[6~:kP=\E[5~:kb=^H:\
+ :kd=\E[B:kh=\E[1~:kl=\E[D:kr=\E[C:ku=\E[A:le=^H:mb=\E[5m:\
+ :md=\E[1m:me=\E[m:mr=\E[7m:nd=\E[C:nl=^J:\
+ :r2=\E>\E[?3l\E[?4l\E[?5l\E[?7h\E[?8h:rc=\E8:\
+ :rf=/usr/share/tabset/vt100:\
+ :..sa=\E[0%?%p6%t;1%;%?%p2%t;4%;%?%p4%t;5%;%?%p1%p3%|%t;7%;m%?%p9%t\E(0%e\E(B%;:\
+ :sc=\E7:se=\E[27m:sf=20\ED:so=\E[7m:sr=14\EM:ta=^I:\
+ :ue=\E[24m:up=\E[A:us=\E[4m:ve=\E[?25h:vi=\E[?25l:
+
+# A much better description of the VT200/220; used to be vt220-8
+# (untranslatable capabilities removed to fit entry within 1023 bytes)
+# (sgr removed to fit entry within 1023 bytes)
+# (terminfo-only capabilities suppressed to fit entry within 1023 bytes)
+vt220|vt200|dec vt220:\
+ :am:bs:mi:ms:xn:xo:\
+ :co#80:it#8:li#24:vt#3:\
+ :AL=\E[%dL:DC=\E[%dP:DL=\E[%dM:DO=\E[%dB:IC=\E[%d@:\
+ :LE=\E[%dD:RI=\E[%dC:UP=\E[%dA:ae=^O:al=\E[L:as=^N:bl=^G:\
+ :cd=\E[J:ce=\E[K:cl=\E[H\E[J:cm=\E[%i%d;%dH:cr=^M:\
+ :cs=\E[%i%d;%dr:ct=\E[3g:dc=\E[P:dl=\E[M:do=^J:ec=\E[%dX:\
+ :ei=\E[4l:ho=\E[H:if=/usr/share/tabset/vt100:im=\E[4h:\
+ :is=\E[?7h\E[>\E[?1h\E F\E[?4l:k1=\EOP:k2=\EOQ:k3=\EOR:\
+ :k4=\EOS:k6=\E[17~:k7=\E[18~:k8=\E[19~:k9=\E[20~:kI=\E[2~:\
+ :kN=\E[6~:kP=\E[5~:kb=^H:kd=\E[B:kh=\E[H:kl=\E[D:kr=\E[C:\
+ :ku=\E[A:le=^H:mb=\E[5m:md=\E[1m:me=\E[m:mr=\E[7m:nd=\E[C:\
+ :nw=\EE:rc=\E8:sc=\E7:se=\E[27m:sf=\ED:so=\E[7m:sr=\EM:\
+ :st=\EH:ta=^I:ue=\E[24m:up=\E[A:us=\E[4m:vb=\E[?5h\E[?5l:
+vt220-w|vt200-w|DEC vt220 in wide mode:\
+ :co#132:\
+ :r3=\E[?3h:tc=vt220:
+# (untranslatable capabilities removed to fit entry within 1023 bytes)
+# (sgr removed to fit entry within 1023 bytes)
+# (terminfo-only capabilities suppressed to fit entry within 1023 bytes)
+vt220-8bit|vt220-8|vt200-8bit|vt200-8|dec vt220/200 in 8-bit mode:\
+ :am:bs:mi:ms:xn:xo:\
+ :co#80:it#8:li#24:vt#3:\
+ :AL=\233%dL:DC=\233%dP:DL=\233%dM:DO=\233%dB:IC=\233%d@:\
+ :LE=\233%dD:RI=\233%dC:UP=\233%dA:ae=^O:al=\233L:as=^N:\
+ :bl=^G:cd=\233J:ce=\233K:cl=\233H\233J:cm=\233%i%d;%dH:\
+ :cr=^M:cs=\233%i%d;%dr:ct=\2333g:dc=\233P:dl=\233M:do=^J:\
+ :ec=\233%dX:ei=\2334l:ho=\233H:\
+ :if=/usr/share/tabset/vt100:im=\2334h:\
+ :is=\233?7h\233>\233?1h\E F\233?4l:k1=\EOP:k2=\EOQ:\
+ :k3=\EOR:k4=\EOS:k6=\23317~:k7=\23318~:k8=\23319~:\
+ :k9=\23320~:kI=\2332~:kN=\2336~:kP=\2335~:kb=^H:kd=\233B:\
+ :kh=\233H:kl=\233D:kr=\233C:ku=\233A:le=^H:mb=\2335m:\
+ :md=\2331m:me=\233m:mr=\2337m:nd=\233C:nw=\EE:rc=\E8:\
+ :sc=\E7:se=\23327m:sf=\ED:so=\2337m:sr=\EM:st=\EH:ta=^I:\
+ :ue=\23324m:up=\233A:us=\2334m:vb=\233?5h\233?5l:
+
+#
+# vt220d:
+# This vt220 description regards F6--F10 as the second block of function keys
+# at the top of the keyboard. This mapping follows the description given
+# in the VT220 Programmer Reference Manual and agrees with the labeling
+# on some terminals that emulate the vt220. There is no support for an F5.
+# See vt220 for an alternate mapping.
+#
+vt220d|DEC VT220 in vt100 mode with DEC function key labeling:\
+ :F1=\E[23~:F2=\E[24~:F3=\E[25~:F4=\E[26~:F5=\E[28~:\
+ :F6=\E[29~:F7=\E[31~:F8=\E[32~:F9=\E[33~:FA=\E[34~:k5@:\
+ :k6=\E[17~:k7=\E[18~:k8=\E[19~:k9=\E[20~:k;=\E[21~:\
+ :tc=vt220-old:
+
+vt220-nam|v200-nam|VT220 in vt100 mode with no auto margins:\
+ :am@:\
+ :r2=\E>\E[?3l\E[?4l\E[?5l\E[?7l\E[?8h:tc=vt220:
+
+# vt220 termcap written Tue Oct 25 20:41:10 1988 by Alex Latzko
+# (not an official DEC entry!)
+# The problem with real vt220 terminals is they don't send escapes when in
+# in vt220 mode. This can be gotten around two ways. 1> don't send
+# escapes or 2> put the vt220 into vt100 mode and use all the nifty
+# features of vt100 advanced video which it then has.
+#
+# This entry takes the view of putting a vt220 into vt100 mode so
+# you can use the escape key in emacs and everything else which needs it.
+#
+# You probably don't want to use this on a VMS machine since VMS will think
+# it has a vt220 and will get fouled up coming out of emacs
+#
+# From: Alexander Latzko <latzko@marsenius.rutgers.edu>, 30 Dec 1996
+# (Added vt100 :rc:,:sc: to quiet a tic warning -- esr)
+vt200-js|vt220-js|dec vt200 series with jump scroll:\
+ :am:\
+ :co#80:\
+ :al=\E[L:bl=^G:cd=\E[J:ce=\E[K:cl=\E[H\E[J:cm=\E[%i%d;%dH:\
+ :cr=^M:cs=\E[%i%d;%dr:dc=\E[P:dl=\E[M:dm=:do=^J:ed=:\
+ :ei=\E[4l:ho=\E[H:im=\E[4h:\
+ :is=\E[61"p\E[H\E[?3l\E[?4l\E[?1l\E[?5l\E[?6l\E[?7h\E[?8h\E[?25h\E>\E[m:\
+ :k1=\EOP:k2=\EOQ:k3=\EOR:k4=\EOS:kb=^H:kd=\EOB:\
+ :ke=\E[?1l\E>:kl=\EOD:kr=\EOC:ks=\E[?1h\E=:ku=\EOA:le=^H:\
+ :nw=^M\ED:r1=\E>\E[?3l\E[?4l\E[?5l\E[?7h\E[?8h:rc=\E8:\
+ :rf=/usr/lib/tabset/vt100:sc=\E7:se=5\E[27m:sf=\ED:\
+ :so=5\E[7m:sr=\EM:ta=^I:ue=\E[24m:up=\E[A:us=\E[4m:
+
+
+# This was DEC's vt320. Use the purpose-built one below instead
+#vt320|DEC VT320 in vt100 emulation mode,
+# use=vt220,
+
+#
+# Use v320n for SCO's LYRIX. Otherwise, use Adam Thompson's vt320-nam.
+#
+vt320nam|v320n|DEC VT320 in vt100 emul. mode with NO AUTO WRAP mode:\
+ :am@:\
+ :r2=\E>\E[?3l\E[?4l\E[?5l\E[?7l\E[?8h:tc=vt220:
+
+# These entries are not DEC's official ones, they were purpose-built for the
+# VT320. Here are the designer's notes:
+# <kel> is end on a PC kbd. Actually 'select' on a VT. Mapped to
+# 'Erase to End of Field'... since nothing seems to use 'end' anyways...
+# khome is Home on a PC kbd. Actually 'FIND' on a VT.
+# Things that use <knxt> usually use tab anyways... and things that don't use
+# tab usually use <knxt> instead...
+# kprv is same as tab - Backtab is useless...
+# I left out :sa: because of its RIDICULOUS complexity,
+# and the resulting fact that it causes the termcap translation of the entry
+# to SMASH the 1k-barrier...
+# From: Adam Thompson <athompso@pangea.ca> Sept 10 1995
+# (vt320: uncommented :fs:, comnmmented out <kslt> to avoid a conflict --esr)
+# (untranslatable capabilities removed to fit entry within 1023 bytes)
+# (sgr removed to fit entry within 1023 bytes)
+# (terminfo-only capabilities suppressed to fit entry within 1023 bytes)
+vt320|vt300|dec vt320 7 bit terminal:\
+ :am:es:hs:mi:ms:xn:\
+ :co#80:li#24:ws#80:\
+ :AL=\E[%dL:DC=\E[%dP:DL=\E[%dM:DO=\E[%dB:IC=\E[%d@:\
+ :K1=\EOw:K2=\EOu:K3=\EOy:K4=\EOq:K5=\EOs:LE=\E[%dD:\
+ :RI=\E[%dC:UP=\E[%dA:ae=\E(B:al=\E[L:as=\E(0:bl=^G:cd=\E[J:\
+ :ce=\E[K:cl=\E[H\E[2J:cm=\E[%i%d;%dH:cr=^M:\
+ :cs=\E[%i%d;%dr:ct=\E[3g:dc=\E[P:dl=\E[M:do=^J:ec=\E[%dX:\
+ :ei=\E[4l:fs=\E[0$}:ho=\E[H:im=\E[4h:\
+ :is=\E>\E[?3l\E[?4l\E[5?l\E[?7h\E[?8h\E[1;24r\E[24;1H:\
+ :k1=\EOP:k2=\EOQ:k3=\EOR:k4=\EOS:k6=\E[17~:k7=\E[18~:\
+ :k8=\E[19~:k9=\E[20~:kD=\E[3~:kI=\E[2~:kN=\E[6~:kP=\E[5~:\
+ :kb=\177:kd=\EOB:ke=\E[?1l\E>:kh=\E[1~:kl=\EOD:kr=\EOC:\
+ :ks=\E[?1h\E=:ku=\EOA:le=^H:mb=\E[5m:md=\E[1m:me=\E[m:\
+ :mr=\E[7m:nd=\E[C:nw=\EE:rc=\E8:sc=\E7:se=\E[m:sf=\ED:\
+ :so=\E[7m:sr=\EM:st=\EH:ta=^I:ts=\E[1$}\E[H\E[K:ue=\E[m:\
+ :up=\E[A:us=\E[4m:ve=\E[?25h:vi=\E[?25l:
+vt320-nam|vt300-nam|dec vt320 7 bit terminal with no am to make SAS happy:\
+ :am@:\
+ :is=\E>\E[?3l\E[?4l\E[5?l\E[?7l\E[?8h\E[1;24r\E[24;1H:\
+ :r2=\E>\E[?3l\E[?4l\E[5?l\E[?7l\E[?8h\E[1;24r\E[24;1H:\
+ :tc=vt320:
+# We have to init 132-col mode, not 80-col mode.
+vt320-w|vt300-w|dec vt320 wide 7 bit terminal:\
+ :co#132:ws#132:\
+ :is=\E>\E[?3h\E[?4l\E[5?l\E[?7h\E[?8h\E[1;24r\E[24;1H:\
+ :r2=\E>\E[?3h\E[?4l\E[5?l\E[?7h\E[?8h\E[1;24r\E[24;1H:\
+ :tc=vt320:
+vt320-w-nam|vt300-w-nam|dec vt320 wide 7 bit terminal with no am:\
+ :am@:\
+ :is=\E>\E[?3h\E[?4l\E[5?l\E[?7l\E[?8h\E[1;24r\E[24;1H:\
+ :r2=\E>\E[?3h\E[?4l\E[5?l\E[?7l\E[?8h\E[1;24r\E[24;1H:\
+ :tc=vt320-w:
+
+# VT330 and VT340 -- These are ReGIS and SIXEL graphics terminals
+# which are pretty much a superset of the VT320. They have the
+# host writable status line, yet another different DRCS matrix size,
+# and such, but they add the DEC Technical character set, Multiple text
+# pages, selectable length pages, and the like. The difference between
+# the vt330 and vt340 is that the latter has only 2 planes and a monochrome
+# monitor, the former has 4 planes and a color monitor. These terminals
+# support VT131 and ANSI block mode, but as with much of these things,
+# termcap/terminfo doesn't deal with these features.
+#
+# Note that this entry is are set up in what was the standard way for GNU
+# Emacs v18 terminal modes to deal with the cursor keys in that the arrow
+# keys were switched into application mode at the same time the numeric pad
+# is switched into application mode. This changes the definitions of the
+# arrow keys. Emacs v19 is smarter and mines its keys directly out of
+# your termcap or terminfo entry,
+#
+# From: Daniel Glasser <dag@persoft.persoft.com>, 13 Oct 1993
+# (vt340: string capability "sb=\E[M" corrected to "sr";
+# also, added <rmam>/<smam> based on the init string -- esr)
+vt340|dec-vt340|vt330|dec-vt330|dec vt340 graphics terminal with 24 line page:\
+ :am:es:hs:mi:ms:xn:xo:\
+ :co#80:it#8:li#24:vt#3:\
+ :AL=\E[%dL:DC=\E[%dP:DL=\E[%dM:DO=\E[%dB:IC=\E[%d@:\
+ :LE=\E[%dD:RA=\E[?7l:RI=\E[%dC:SA=\E[?7h:UP=\E[%dA:\
+ :ac=``aaffggjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~:\
+ :ae=^O:al=\E[L:as=^N:cd=\E[J:ce=\E[K:cl=\E[H\E[J:\
+ :cm=\E[%i%d;%dH:cr=^M:cs=\E[%i%d;%dr:ct=\E[3g:dc=\E[P:\
+ :dl=\E[M:do=^J:ds=\E[2$~\r\E[1$}\E[K\E[$}:ei=\E[4l:\
+ :fs=\E[$}:ho=\E[H:im=\E[4h:\
+ :is=\E<\E F\E>\E[?1h\E[?3l\E[?4l\E[?5l\E[?7h\E[?8h\E[1;24r\E[24;1H:\
+ :k1=\EOP:k2=\EOQ:k3=\EOR:k4=\EOS:k6=\E[17~:k7=\E[18~:\
+ :k8=\E[19~:k9=\E[20~:kb=^H:kd=\EOB:ke=\E[?1l\E>:kl=\EOD:\
+ :kr=\EOC:ks=\E[?1h\E=:ku=\EOA:l1=pf1:l2=pf2:l3=pf3:l4=pf4:\
+ :le=^H:mb=\E[5m:md=\E[1m:me=\E[m:mr=\E[7m:nd=\E[C:nw=^M\ED:\
+ :r1=\E[?3l:rc=\E8:rf=/usr/share/tabset/vt300:sc=\E7:\
+ :se=\E[27m:sf=\ED:so=\E[7m:sr=\EM:st=\EH:ta=^I:\
+ :ts=\E[2$~\E[1$}\E[1;%dH:ue=\E[24m:up=\E[A:us=\E[4m:\
+ :vb=200\E[?5h\E[?5l:ve=\E[?25h:vi=\E[?25l:vs=\E[?25h:
+
+# DEC doesn't supply a vt400 description, so we add Daniel Glasser's
+# (originally written with vt420 as its primary name, and usable for it).
+#
+# VT400/420 -- This terminal is a superset of the vt320. It adds the multiple
+# text pages and long text pages with selectable length of the vt340, along
+# with left and right margins, rectangular area text copy, fill, and erase
+# operations, selected region character attribute change operations,
+# page memory and rectangle checksums, insert/delete column, reception
+# macros, and other features too numerous to remember right now. TERMCAP
+# can only take advantage of a few of these added features.
+#
+# Note that this entry is are set up in what was the standard way for GNU
+# Emacs v18 terminal modes to deal with the cursor keys in that the arrow
+# keys were switched into application mode at the same time the numeric pad
+# is switched into application mode. This changes the definitions of the
+# arrow keys. Emacs v19 is smarter and mines its keys directly out of
+# your termcap entry,
+#
+# From: Daniel Glasser <dag@persoft.persoft.com>, 13 Oct 1993
+# (vt400: string capability ":sb=\E[M:" corrected to ":sr=\E[M:";
+# also, added <rmam>/<smam> based on the init string -- esr)
+# (untranslatable capabilities removed to fit entry within 1023 bytes)
+# (sgr removed to fit entry within 1023 bytes)
+# (terminfo-only capabilities suppressed to fit entry within 1023 bytes)
+vt400|vt400-24|dec-vt400|dec vt400 24x80 column autowrap:\
+ :am:es:hs:mi:ms:xn:xo:\
+ :co#80:it#8:li#24:vt#3:\
+ :AL=\E[%dL:DC=\E[%dP:DL=\E[%dM:DO=\E[%dB:IC=\E[%d@:\
+ :LE=\E[%dD:RI=\E[%dC:UP=\E[%dA:ae=^O:al=\E[L:as=^N:\
+ :cd=10\E[J:ce=4\E[K:cl=10\E[H\E[J:cm=\E[%i%d;%dH:cr=^M:\
+ :cs=\E[%i%d;%dr:ct=\E[3g:dc=\E[P:dl=\E[M:do=^J:\
+ :ds=\E[2$~\r\E[1$}\E[K\E[$}:ei=\E[4l:fs=\E[$}:ho=\E[H:\
+ :ic=\E[@:im=\E[4h:\
+ :is=\E<\E F\E>\E[?1h\E[?3l\E[?4l\E[?5l\E[?7h\E[?8h\E[1;24r\E[24;1H:\
+ :k1=\EOP:k2=\EOQ:k3=\EOR:k4=\EOS:k6=\E[17~:k7=\E[18~:\
+ :k8=\E[19~:k9=\E[20~:kb=^H:kd=\EOB:ke=\E[?1l\E>:kl=\EOD:\
+ :kr=\EOC:ks=\E[?1h\E=:ku=\EOA:le=^H:mb=\E[5m:md=\E[1m:\
+ :me=\E[m:mr=\E[7m:nd=\E[C:nw=^M\ED:rc=\E8:sc=\E7:se=\E[27m:\
+ :sf=\ED:so=\E[7m:sr=\EM:st=\EH:ta=^I:\
+ :ts=\E[2$~\E[1$}\E[1;%dH:ue=\E[24m:up=\E[A:us=\E[4m:\
+ :vb=200\E[?5h\E[?5l:ve=\E[?25h:vi=\E[?25l:vs=\E[?25h:
+
+# (vt420: I removed :k0:, it collided with <kf10>. I also restored
+# a missing :sc: -- esr)
+vt420|DEC VT420:\
+ :am:mi:xn:xo:\
+ :co#80:li#24:vt#3:\
+ :*6=\E[4~:@0=\E[1~:RA=\E[?7l:\
+ :S5=\E[?0;0r\E>\E[?3l\E[?4l\E[?5l\E[?7h\E[?8h:\
+ :SA=\E[?7h:\
+ :ac=``aaffggjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~:\
+ :ae=\E(B:al=\E[L:as=\E(0:bl=^G:cd=\E[J:ce=\E[K:\
+ :cl=\E[H\E[2J:cm=\E[%i%d;%dH:cr=^M:cs=\E[%i%d;%dr:\
+ :dc=\E[P:dl=\E[M:do=\E[B:ei=\E[4l:ho=\E[H:\
+ :i2=\E[?67h\E[64;1"p:if=/usr/share/tabset/vt300:\
+ :im=\E[4h:is=\E[1;24r\E[24;1H:k1=\EOP:k2=\EOQ:k3=\EOR:\
+ :k4=\EOS:k5=\E[17~:k6=\E[18~:k7=\E[19~:k8=\E[20~:\
+ :k9=\E[21~:k;=\E[29~:kD=\E[3~:kI=\E[2~:kN=\E[6~:kP=\E[5~:\
+ :kb=^H:kd=\E[B:ke=\E>:kl=\E[D:kr=\E[C:ks=\E=:ku=\E[A:le=^H:\
+ :mb=\E[5m:md=\E[1m:me=\E[m:mr=\E[7m:nd=\E[C:\
+ :r3=\E[?67h\E[64;1"p:rc=\E8:rf=/usr/share/tabset/vt300:\
+ :..sa=\E[0%?%p6%t;1%;%?%p2%t;4%;%?%p4%t;5%;%?%p1%p3%|%t;7%;m%?%p9%t\E(0%e\E(B%;:\
+ :sc=\E7:se=\E[m:sf=\ED:so=\E[7m:sr=\EM:ta=^I:ue=\E[m:\
+ :up=\E[A:us=\E[4m:
+
+#
+# DEC VT220 and up support DECUDK (user-defined keys). DECUDK (i.e., pfx)
+# takes two parameters, the key and the string. Translating the key is
+# straightforward (keys 1-5 are not defined on real terminals, though some
+# emulators define these):
+#
+# if (key < 16) then value = key;
+# else if (key < 21) then value = key + 1;
+# else if (key < 25) then value = key + 2;
+# else if (key < 27) then value = key + 3;
+# else if (key < 30) then value = key + 4;
+# else value = key + 5;
+#
+# The string must be the hexadecimal equivalent, e.g., "5052494E" for "PRINT".
+# There's no provision in terminfo for emitting a string in this format, so the
+# application has to know it.
+#
+vt420pc|DEC VT420 w/PC keyboard:\
+ :@7=\E[4~:F1=\E[23~:F2=\E[24~:F3=\E[11;2~:F4=\E[12;2~:\
+ :F5=\E[13;2~:F6=\E[14;2~:F7=\E[15;2~:F8=\E[17;2~:\
+ :F9=\E[18;2~:FA=\E[19;2~:FB=\E[20;2~:FC=\E[21;2~:\
+ :FD=\E[23;2~:FE=\E[24;2~:FF=\E[23~:FG=\E[24~:FH=\E[25~:\
+ :FI=\E[26~:FJ=\E[28~:FK=\E[29~:FL=\E[31~:FM=\E[32~:\
+ :FN=\E[33~:FO=\E[34~:FP=\E[35~:FQ=\E[36~:FR=\E[23;2~:\
+ :FS=\E[24;2~:FT=\E[25;2~:FU=\E[26;2~:FV=\E[28;2~:\
+ :FW=\E[29;2~:FX=\E[31;2~:FY=\E[32;2~:FZ=\E[33;2~:\
+ :Fa=\E[34;2~:Fb=\E[35;2~:Fc=\E[36;2~:\
+ :S6=USR_TERM\072vt420pcdos\072:k1=\E[11~:k2=\E[12~:\
+ :k3=\E[13~:k4=\E[14~:k5=\E[15~:k6=\E[17~:k7=\E[18~:\
+ :k8=\E[19~:k9=\E[20~:k;=\E[21~:kD=\177:kh=\E[H:\
+ :..px=\EP1;1|%?%{16}%p1%>%t%{0}%e%{21}%p1%>%t%{1}%e%{25}%p1%>%t%{2}%e%{27}%p1%>%t%{3}%e%{30}%p1%>%t%{4}%e%{5}%;%p1%+%d/%p2%s\E\:tc=vt420:
+
+vt420pcdos|DEC VT420 w/PC for DOS Merge:\
+ :li#25:\
+ :S1=%?%p2%{19}%=%t\E\023\021%e%p2%{32}%<%t\E%p2%c%e%p2%{127}%=%t\E\177%e%p2%c%;:\
+ :S4=\E[?1;2r\E[34h:\
+ :S5=\E[?0;0r\E>\E[?3l\E[?4l\E[?5l\E[?7h\E[?8h:S6@:\
+ :me=\E[m:sa@:\
+ :tc=vt420pc:
+
+vt420f|DEC VT420 with VT kbd; VT400 mode; F1-F5 used as Fkeys:\
+ :F1=\E[23~:F2=\E[24~:F3=\E[25~:F4=\E[26~:F5=\E[28~:\
+ :F6=\E[29~:F7=\E[31~:F8=\E[32~:F9=\E[33~:FA=\E[34~:\
+ :k1=\E[11~:k2=\E[12~:k3=\E[13~:k4=\E[14~:k5=\E[15~:\
+ :k6=\E[17~:k7=\E[18~:k8=\E[19~:k9=\E[20~:k;=\E[21~:\
+ :kD=\177:kh=\E[H:l1=\EOP:l2=\EOQ:l3=\EOR:l4=\EOS:\
+ :tc=vt420:
+
+vt510|DEC VT510:\
+ :tc=vt420:
+vt510pc|DEC VT510 w/PC keyboard:\
+ :tc=vt420pc:
+vt510pcdos|DEC VT510 w/PC for DOS Merge:\
+ :tc=vt420pcdos:
+
+# VT520/VT525
+#
+# The VT520 is a monochrome text terminal capable of managing up to
+# four independent sessions in the terminal. It has multiple ANSI
+# emulations (VT520, VT420, VT320, VT220, VT100, VT PCTerm, SCO Console)
+# and ASCII emulations (WY160/60, PCTerm, 50/50+, 150/120, TVI 950,
+# 925 910+, ADDS A2). This terminfo data is for the ANSI emulations only.
+#
+# Terminal Set-Up is entered by pressing [F3], [Caps Lock]/[F3] or
+# [Alt]/[Print Screen] depending upon which keyboard and which
+# terminal mode is being used. If Set-Up has been disabled or
+# assigned to an unknown key, Set-Up may be entered by pressing
+# [F3] as the first key after power up, regardless of keyboard type.
+# (vt520: I added <rmam>/<smam> based on the init string, also :sc: -- esr)
+# (untranslatable capabilities removed to fit entry within 1023 bytes)
+vt520|DEC VT520:\
+ :am:mi:xn:xo:\
+ :co#80:li#24:vt#3:\
+ :*6=\E[4~:@0=\E[1~:RA=\E[?7l:\
+ :S5=\E[?0;0r\E>\E[?3l\E[?4l\E[?5l\E[?7h\E[?8h:\
+ :SA=\E[?7h:\
+ :ac=``aaffggjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~:\
+ :ae=\E(B:al=\E[L:as=\E(0:bl=^G:cd=\E[J:ce=\E[K:\
+ :cl=\E[H\E[2J:cm=\E[%i%d;%dH:cr=^M:cs=\E[%i%d;%dr:\
+ :dc=\E[P:dl=\E[M:do=\E[B:ei=\E[4l:ho=\E[H:\
+ :i2=\E[?67h\E[64;1"p:if=/usr/share/tabset/vt300:\
+ :im=\E[4h:is=\E[1;24r\E[24;1H:k0=\E[29~:k1=\EOP:k2=\EOQ:\
+ :k3=\EOR:k4=\EOS:k5=\E[17~:k6=\E[18~:k7=\E[19~:k8=\E[20~:\
+ :k9=\E[21~:k;=\E[29~:kD=\E[3~:kI=\E[2~:kN=\E[6~:kP=\E[5~:\
+ :kb=^H:kd=\E[B:kl=\E[D:kr=\E[C:ku=\E[A:le=^H:mb=\E[5m:\
+ :md=\E[1m:me=\E[m:mr=\E[7m:nd=\E[C:r3=\E[?67h\E[64;1"p:\
+ :rc=\E8:rf=/usr/share/tabset/vt300:sc=\E7:se=\E[m:sf=\ED:\
+ :so=\E[7m:sr=\EM:ta=^I:ue=\E[m:up=\E[A:us=\E[4m:
+
+# (vt525: I added <rmam>/<smam> based on the init string;
+# removed :se:=\E[m, :ue:=\E[m, added :sc: -- esr)
+# (untranslatable capabilities removed to fit entry within 1023 bytes)
+vt525|DEC VT525:\
+ :am:mi:xn:xo:\
+ :co#80:li#24:vt#3:\
+ :*6=\E[4~:@0=\E[1~:RA=\E[?7l:\
+ :S5=\E[?0;0r\E>\E[?3l\E[?4l\E[?5l\E[?7h\E[?8h:\
+ :SA=\E[?7h:\
+ :ac=``aaffggjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~:\
+ :ae=\E(B:al=\E[L:as=\E(0:bl=^G:cd=\E[J:ce=\E[K:\
+ :cl=\E[H\E[2J:cm=\E[%i%d;%dH:cr=^M:cs=\E[%i%d;%dr:\
+ :dc=\E[P:dl=\E[M:do=\E[B:ei=\E[4l:ho=\E[H:\
+ :i2=\E[?67h\E[64;1"p:if=/usr/share/tabset/vt300:\
+ :im=\E[4h:is=\E[1;24r\E[24;1H:k0=\E[29~:k1=\EOP:k2=\EOQ:\
+ :k3=\EOR:k4=\EOS:k5=\E[17~:k6=\E[18~:k7=\E[19~:k8=\E[20~:\
+ :k9=\E[21~:k;=\E[29~:kD=\E[3~:kI=\E[2~:kN=\E[6~:kP=\E[5~:\
+ :kb=^H:kd=\E[B:kl=\E[D:kr=\E[C:ku=\E[A:le=^H:mb=\E[5m:\
+ :md=\E[1m:me=\E[m:mr=\E[7m:nd=\E[C:r3=\E[?67h\E[64;1"p:\
+ :rc=\E8:rf=/usr/share/tabset/vt300:sc=\E7:se=\E[m:sf=\ED:\
+ :so=\E[7m:sr=\EM:ta=^I:ue=\E[m:up=\E[A:us=\E[4m:
diff --git a/tests/examplefiles/terminfo b/tests/examplefiles/terminfo
new file mode 100644
index 00000000..2b68d035
--- /dev/null
+++ b/tests/examplefiles/terminfo
@@ -0,0 +1,1445 @@
+######## This example from excerpt of <http://www.catb.org/esr/terminfo/>:
+#
+# Version 11.0.1
+# $Date: 2000/03/02 15:51:11 $
+# terminfo syntax
+#
+
+######## ANSI, UNIX CONSOLE, AND SPECIAL TYPES
+#
+# This section describes terminal classes and brands that are still
+# quite common.
+#
+
+#### Specials
+#
+# Special "terminals". These are used to label tty lines when you don't
+# know what kind of terminal is on it. The characteristics of an unknown
+# terminal are the lowest common denominator - they look about like a ti 700.
+#
+
+dumb|80-column dumb tty,
+ am,
+ cols#80,
+ bel=^G, cr=^M, cud1=^J, ind=^J,
+unknown|unknown terminal type,
+ gn, use=dumb,
+lpr|printer|line printer,
+ hc, os,
+ cols#132, lines#66,
+ bel=^G, cr=^M, cub1=^H, cud1=^J, ff=^L, ind=^J,
+glasstty|classic glass tty interpreting ASCII control characters,
+ am,
+ cols#80,
+ bel=^G, clear=^L, cr=^M, cub1=^H, cud1=^J, ht=^I, kcub1=^H,
+ kcud1=^J, nel=^M^J,
+vanilla,
+ bel=^G, cr=^M, cud1=^J, ind=^J,
+
+#### ANSI.SYS/ISO 6429/ECMA-48 Capabilities
+#
+# See the end-of-file comment for more on these.
+#
+
+# ANSI capabilities are broken up into pieces, so that a terminal
+# implementing some ANSI subset can use many of them.
+ansi+local1,
+ cub1=\E[D, cud1=\E[B, cuf1=\E[C, cuu1=\E[A,
+ansi+local,
+ cub=\E[%p1%dD, cud=\E[%p1%dB, cuf=\E[%p1%dC,
+ cuu=\E[%p1%dA,
+ use=ansi+local1,
+ansi+tabs,
+ cbt=\E[Z, ht=^I, hts=\EH, tbc=\E[2g,
+ansi+inittabs,
+ it#8, use=ansi+tabs,
+ansi+erase,
+ clear=\E[H\E[J, ed=\E[J, el=\E[K,
+ansi+rca,
+ hpa=\E[%p1%{1}%+%dG, vpa=\E[%p1%{1}%+%dd,
+ansi+cup,
+ cup=\E[%i%p1%d;%p2%dH, home=\E[H,
+ansi+rep,
+ rep=%p1%c\E[%p2%{1}%-%db,
+ansi+idl1,
+ dl1=\E[M, il1=\E[L,
+ansi+idl,
+ dl=\E[%p1%dM, il=\E[%p1%dL, use=ansi+idl1,
+ansi+idc,
+ dch1=\E[P, ich=\E[%p1%d@, ich1=\E[@, rmir=\E6, smir=\E6,
+ansi+arrows,
+ kbs=^H, kcub1=\E[D, kcud1=\E[B, kcuf1=\E[C, kcuu1=\E[A,
+ khome=\E[H,
+ansi+sgr|ansi graphic renditions,
+ blink=\E[5m, invis=\E[8m, rev=\E[7m, sgr0=\E[0m,
+ansi+sgrso|ansi standout only,
+ rmso=\E[m, smso=\E[7m,
+ansi+sgrul|ansi underline only,
+ rmul=\E[m, smul=\E[4m,
+ansi+sgrbold|ansi graphic renditions; assuming terminal has bold; not dim,
+ bold=\E[1m,
+ sgr=\E[%?%p1%t7;%;%?%p2%t4;%;%?%p3%t7;%;%?%p4%t5;%;%?%p6%t1;%;m, use=ansi+sgr, use=ansi+sgrso, use=ansi+sgrul,
+ansi+sgrdim|ansi graphic renditions; assuming terminal has dim; not bold,
+ dim=\E[2m,
+ sgr=\E[%?%p1%t7;%;%?%p2%t4;%;%?%p3%t7;%;%?%p4%t5;%;%?%p5%t2;%;m, use=ansi+sgr, use=ansi+sgrso, use=ansi+sgrul,
+ansi+pp|ansi printer port,
+ mc0=\E[0i, mc4=\E[4i, mc5=\E[5i,
+ansi+csr|ansi scroll-region plus cursor save & restore,
+ csr=\E[%i%p1%d;%p2%dr, rc=\E8, sc=\E7,
+
+# The IBM PC alternate character set. Plug this into any Intel console entry.
+# We use \E[11m for rmacs rather than \E[12m so the <acsc> string can use the
+# ROM graphics for control characters such as the diamond, up- and down-arrow.
+# This works with the System V, Linux, and BSDI consoles. It's a safe bet this
+# will work with any Intel console, they all seem to have inherited \E[11m
+# from the ANSI.SYS de-facto standard.
+klone+acs|alternate character set for ansi.sys displays,
+ acsc=+\020\,\021-\030.^Y0\333`\004a\261f\370g\361h\260j\331k\277l\332m\300n\305o~p\304q\304r\304s_t\303u\264v\301w\302x\263y\363z\362{\343|\330}\234~\376,
+ rmacs=\E[10m, smacs=\E[11m,
+
+# Highlight controls corresponding to the ANSI.SYS standard. Most
+# console drivers for Intel boxes obey these. Makes the same assumption
+# about \E[11m as klone+acs. True ANSI/ECMA-48 would have <rmso=\E[27m>,
+# <rmul=\E[24m>, but this isn't a documented feature of ANSI.SYS.
+klone+sgr|attribute control for ansi.sys displays,
+ blink=\E[5m, bold=\E[1m, invis=\E[8m, rev=\E[7m,
+ rmpch=\E[10m, rmso=\E[m, rmul=\E[m,
+ sgr=\E[0;10%?%p1%t;7%;%?%p2%t;4%;%?%p3%t;7%;%?%p4%t;5%;%?%p6%t;1%;%?%p7%t;8%;%?%p9%t;11%;m,
+ sgr0=\E[0;10m, smpch=\E[11m, smso=\E[7m, smul=\E[4m,
+ use=klone+acs,
+
+# Highlight controls corresponding to the ANSI.SYS standard. *All*
+# console drivers for Intel boxes obey these. Does not assume \E[11m will
+# work; uses \E[12m instead, which is pretty bulletproof but loses you the ACS
+# diamond and arrow characters under curses.
+klone+sgr-dumb|attribute control for ansi.sys displays (no ESC [ 11 m),
+ blink=\E[5m, bold=\E[1m, invis=\E[8m, rev=\E[7m, rmso=\E[m,
+ rmul=\E[m,
+ sgr=\E[0;10%?%p1%t;7%;%?%p2%t;4%;%?%p3%t;7%;%?%p4%t;5%;%?%p6%t;1%;%?%p7%t;8%;%?%p9%t;12%;m,
+ sgr0=\E[0;10m, smacs=\E[12m, smso=\E[7m, smul=\E[4m,
+ use=klone+acs,
+
+# KOI8-R (RFC1489) acs (alternate character set)
+# From: Qing Long <qinglong@Bolizm.ihep.su>, 24 Feb 1996.
+klone+koi8acs|alternate character set for ansi.sys displays with KOI8 charset,
+ acsc=+\020\,\021-\036.^_0\215`\004a\237f\234g\232h\222i\220j\205k\203l\202m\204n\212o\213p\216q\0r\217s\214t\206u\207v\210w\211x\201y\230z\231{\267|\274}L~\225,
+ rmacs=\E[10m, smacs=\E[11m,
+
+# ANSI.SYS color control. The setab/setaf caps depend on the coincidence
+# between SVr4/XPG4's color numbers and ANSI.SYS attributes. Here are longer
+# but equivalent strings that don't rely on that coincidence:
+# setb=\E[4%?%p1%{1}%=%t4%e%p1%{3}%=%t6%e%p1%{4}%=%t1%e%p1%{6}%=%t3%e%p1%d%;m,
+# setf=\E[3%?%p1%{1}%=%t4%e%p1%{3}%=%t6%e%p1%{4}%=%t1%e%p1%{6}%=%t3%e%p1%d%;m,
+# The DOS 5 manual asserts that these sequences meet the ISO 6429 standard.
+# They match a subset of ECMA-48.
+klone+color|color control for ansi.sys and ISO6429-compatible displays,
+ colors#8, ncv#3, pairs#64,
+ op=\E[37;40m, setab=\E[4%p1%dm, setaf=\E[3%p1%dm,
+
+# This is better than klone+color, it doesn't assume white-on-black as the
+# default color pair, but many `ANSI' terminals don't grok the <op> cap.
+ecma+color|color control for ECMA-48-compatible terminals,
+ colors#8, ncv#3, pairs#64,
+ op=\E[39;49m, setab=\E[4%p1%dm, setaf=\E[3%p1%dm,
+
+# Attribute control for ECMA-48-compatible terminals
+ecma+sgr|attribute capabilities for true ECMA-48 terminals,
+ rmso=\E[27m, rmul=\E[24m,
+ use=klone+sgr,
+
+# For comparison, here are all the capabilities implied by the Intel
+# Binary Compatibility Standard (level 2) that fit within terminfo.
+# For more detail on this rather pathetic standard, see the comments
+# near the end of this file.
+ibcs2|Intel Binary Compatibility Standard prescriptions,
+ cbt=\E[Z, clear=\Ec, cub=\E[%p1%dD, cud=\E[%p1%dB,
+ cuf=\E[%p1%dC, cup=\E[%i%p1%d;%p2%dH, cuu=\E[%p1%dA,
+ dch=\E[%p1%dP, dispc=\E=%p1%dg, ech=\E[%p1%dX,
+ hpa=\E[%i%p1%dG, hts=\EH, ich=\E[%p1%d@, il=\E[%p1%dL,
+ indn=\E[%p1%dS, rc=\E7, rin=\E[%p1%dT, rmam=\E[?7l, sc=\E7,
+ smam=\E[?7h, tbc=\E[g, vpa=\E[%i%p1%dd,
+
+#### ANSI/ECMA-48 terminals and terminal emulators
+#
+# See near the end of this file for details on ANSI conformance.
+# Don't mess with these entries! Lots of other entries depend on them!
+#
+# This section lists entries in a least-capable to most-capable order.
+# if you're in doubt about what `ANSI' matches yours, try them in that
+# order and back off from the first that breaks.
+
+# ansi-mr is for ANSI terminals with ONLY relative cursor addressing
+# and more than one page of memory. It uses local motions instead of
+# direct cursor addressing, and makes almost no assumptions. It does
+# assume auto margins, no padding and/or xon/xoff, and a 24x80 screen.
+ansi-mr|mem rel cup ansi,
+ am, xon,
+ cols#80, lines#24, use=vanilla, use=ansi+erase,
+ use=ansi+local1,
+
+# ansi-mini is a bare minimum ANSI terminal. This should work on anything, but
+# beware of screen size problems and memory relative cursor addressing.
+ansi-mini|minimum ansi standard terminal,
+ am, xon,
+ cols#80, lines#24, use=vanilla, use=ansi+cup,
+ use=ansi+erase,
+
+# ansi-mtabs adds relative addressing and minimal tab support
+ansi-mtabs|any ansi terminal with pessimistic assumptions,
+ it#8,
+ ht=^I, use=ansi+local1, use=ansi-mini,
+
+# ANSI X3.64 from emory!mlhhh (Hugh Hansard) via BRL
+#
+# The following is an entry for the full ANSI 3.64 (1977). It lacks
+# padding, but most terminals using the standard are "fast" enough
+# not to require any -- even at 9600 bps. If you encounter problems,
+# try including the padding specifications.
+#
+# Note: the :as: and :ae: specifications are not implemented here, for
+# the available termcap documentation does not make clear WHICH alternate
+# character set to specify. ANSI 3.64 seems to make allowances for several.
+# Please make the appropriate adjustments to fit your needs -- that is
+# if you will be using alternate character sets.
+#
+# There are very few terminals running the full ANSI 3.64 standard,
+# so I could only test this entry on one verified terminal (Visual 102).
+# I would appreciate the results on other terminals sent to me.
+#
+# Please report comments, changes, and problems to:
+#
+# U.S. MAIL: Hugh Hansard
+# Box: 22830
+# Emory University
+# Atlanta, GA. 30322.
+#
+# USENET {akgua,msdc,sb1,sb6,gatech}!emory!mlhhh.
+#
+# (Added vt100 <rc>,<sc> to quiet a tic warning --esr)
+ansi77|ansi 3.64 standard 1977 version,
+ am, mir,
+ cols#80, it#8, lines#24,
+ bel=^G, clear=\E[;H\E[2J, cr=^M, csr=\E[%i%p1%d;%p2%dr,
+ cub1=^H, cud1=\E[B, cuf1=\E[C, cup=\E[%i%p1%d;%p2%dH,
+ cuu1=\E[A, dch1=\E[P, dl1=\E[M$<5*/>, ed=\E[J, el=\E[K,
+ home=\E[H, ht=^I, il1=\E[L$<5*/>, ind=\ED, kbs=^H,
+ kcub1=\E[D, kcud1=\E[B, kcuf1=\E[C, kcuu1=\E[A, kf1=\EOP,
+ kf2=\EOR, kf4=\EOS, khome=\E[H, nel=^M\ED, rc=\E8, ri=\EM,
+ rmir=\E[4l, rmso=\E[m, rmul=\E[m, sc=\E7, smir=\E[4h,
+ smso=\E[7m, smul=\E[4m,
+
+# Procomm and some other ANSI emulations don't recognize all of the ANSI-
+# standard capabilities. This entry deletes <cuu>, <cuf>, <cud>, <cub>, and
+# <vpa>/<hpa> capabilities, forcing curses to use repetitions of <cuu1>,
+# <cuf1>, <cud1> and <cub1>. Also deleted <ich> and <ich1>, as QModem up to
+# 5.03 doesn't recognize these. Finally, we delete <rep> and <ri>, which seem
+# to confuse many emulators. On the other hand, we can count on these programs
+# doing <rmacs>/<smacs>/<sgr>. Older versions of this entry featured
+# <invis=\E[9m>, but <invis=\E[8m> now seems to be more common under
+# ANSI.SYS influence.
+# From: Eric S. Raymond <esr@snark.thyrsus.com> Oct 30 1995
+pcansi-m|pcansi-mono|ibm-pc terminal programs claiming to be ansi (mono mode),
+ am, mir, msgr,
+ cols#80, it#8, lines#24,
+ bel=^G, cbt=\E[Z, clear=\E[H\E[J, cr=^M, cub1=\E[D,
+ cud1=\E[B, cuf1=\E[C, cup=\E[%i%p1%d;%p2%dH, cuu1=\E[A,
+ dch1=\E[P, dl1=\E[M, ed=\E[J, el=\E[K, home=\E[H, ht=^I,
+ hts=\EH, il1=\E[L, ind=^J, kbs=^H, kcub1=\E[D, kcud1=\E[B,
+ kcuf1=\E[C, kcuu1=\E[A, khome=\E[H, tbc=\E[2g,
+ use=klone+sgr-dumb,
+pcansi-25-m|pcansi25m|ibm-pc terminal programs with 25 lines (mono mode),
+ lines#25, use=pcansi-m,
+pcansi-33-m|pcansi33m|ibm-pc terminal programs with 33 lines (mono mode),
+ lines#33, use=pcansi-m,
+pcansi-43-m|ansi43m|ibm-pc terminal programs with 43 lines (mono mode),
+ lines#43, use=pcansi-m,
+# The color versions. All PC emulators do color...
+pcansi|ibm-pc terminal programs claiming to be ansi,
+ use=klone+color, use=pcansi-m,
+pcansi-25|pcansi25|ibm-pc terminal programs with 25 lines,
+ lines#25, use=pcansi,
+pcansi-33|pcansi33|ibm-pc terminal programs with 33 lines,
+ lines#33, use=pcansi,
+pcansi-43|pcansi43|ibm-pc terminal programs with 43 lines,
+ lines#43, use=pcansi,
+
+# ansi-m -- full ANSI X3.64 with ANSI.SYS-compatible attributes, no color.
+# If you want pound signs rather than dollars, replace `B' with `A'
+# in the <s0ds>, <s1ds>, <s2ds>, and <s3ds> capabilities.
+# From: Eric S. Raymond <esr@snark.thyrsus.com> Nov 6 1995
+ansi-m|ansi-mono|ANSI X3.64-1979 terminal with ANSI.SYS compatible attributes,
+ mc5i,
+ cub=\E[%p1%dD, cud=\E[%p1%dB, cuf=\E[%p1%dC,
+ cuu=\E[%p1%dA, dch=\E[%p1%dP, dl=\E[%p1%dM,
+ ech=\E[%p1%dX, el1=\E[1K, hpa=\E[%i%p1%dG, ht=\E[I,
+ ich=\E[%p1%d@, il=\E[%p1%dL, indn=\E[%p1%dS, kbs=^H,
+ kcbt=\E[Z, kcub1=\E[D, kcud1=\E[B, kcuf1=\E[C, kcuu1=\E[A,
+ kich1=\E[L, mc4=\E[4i, mc5=\E[5i, nel=\r\E[S,
+ rep=%p1%c\E[%p2%{1}%-%db, rin=\E[%p1%dT, s0ds=\E(B,
+ s1ds=\E)B, s2ds=\E*B, s3ds=\E+B, tbc=\E[2g,
+ vpa=\E[%i%p1%dd, use=pcansi-m,
+
+# ansi -- this terminfo expresses the largest subset of X3.64 that will fit in
+# standard terminfo. Assumes ANSI.SYS-compatible attributes and color.
+# From: Eric S. Raymond <esr@snark.thyrsus.com> Nov 6 1995
+ansi|ansi/pc-term compatible with color,
+ u6=\E[%i%d;%dR, u7=\E[6n, u8=\E[?%[;0123456789]c,
+ u9=\E[c,
+ use=ecma+color, use=klone+sgr, use=ansi-m,
+
+# ansi-generic is a vanilla ANSI terminal. This is assumed to implement
+# all the normal ANSI stuff with no extensions. It assumes
+# insert/delete line/char is there, so it won't work with
+# vt100 clones. It assumes video attributes for bold, blink,
+# underline, and reverse, which won't matter much if the terminal
+# can't do some of those. Padding is assumed to be zero, which
+# shouldn't hurt since xon/xoff is assumed.
+ansi-generic|generic ansi standard terminal,
+ am, xon,
+ cols#80, lines#24, use=vanilla, use=ansi+csr, use=ansi+cup,
+ use=ansi+rca, use=ansi+erase, use=ansi+tabs,
+ use=ansi+local, use=ansi+idc, use=ansi+idl, use=ansi+rep,
+ use=ansi+sgrbold, use=ansi+arrows,
+
+#### Linux consoles
+#
+
+# This entry is good for the 1.2.13 or later version of the Linux console.
+#
+# ***************************************************************************
+# * *
+# * WARNING: *
+# * Linuxes come with a default keyboard mapping kcbt=^I. This entry, in *
+# * response to user requests, assumes kcbt=\E[Z, the ANSI/ECMA reverse-tab *
+# * character. Here are the keymap replacement lines that will set this up: *
+# * *
+# keycode 15 = Tab Tab
+# alt keycode 15 = Meta_Tab
+# shift keycode 15 = F26
+# string F26 ="\033[Z"
+# * *
+# * This has to use a key slot which is unfortunate (any unused one will *
+# * do, F26 is the higher-numbered one). The change ought to be built *
+# * into the kernel tables. *
+# * *
+# ***************************************************************************
+#
+# The 1.3.x kernels add color-change capabilities; if yours doesn't have this
+# and it matters, turn off <ccc>. The %02x escape used to implement this is
+# not back-portable to SV curses and not supported in ncurses versions before
+# 1.9.9. All linux kernels since 1.2.13 (at least) set the screen size
+# themselves; this entry assumes that capability.
+#
+# This entry is good for the 1.2.13 or later version of the Linux console.
+#
+# ***************************************************************************
+# * *
+# * WARNING: *
+# * Linuxes come with a default keyboard mapping kcbt=^I. This entry, in *
+# * response to user requests, assumes kcbt=\E[Z, the ANSI/ECMA reverse-tab *
+# * character. Here are the keymap replacement lines that will set this up: *
+# * *
+# keycode 15 = Tab Tab
+# alt keycode 15 = Meta_Tab
+# shift keycode 15 = F26
+# string F26 ="\033[Z"
+# * *
+# * This has to use a key slot which is unfortunate (any unused one will *
+# * do, F26 is the higher-numbered one). The change ought to be built *
+# * into the kernel tables. *
+# * *
+# ***************************************************************************
+#
+# The 1.3.x kernels add color-change capabilities; if yours doesn't have this
+# and it matters, turn off <ccc>. The %02x escape used to implement this is
+# not back-portable to SV curses and not supported in ncurses versions before
+# 1.9.9. All linux kernels since 1.2.13 (at least) set the screen size
+# themselves; this entry assumes that capability.
+#
+# The 2.2.x kernels add a private mode that sets the cursor type; use that to
+# get a block cursor for cvvis.
+# reported by Frank Heckenbach <frank@g-n-u.de>.
+linux|linux console,
+ am, bce, eo, mir, msgr, xenl, xon,
+ it#8, ncv#2,
+ acsc=+\020\,\021-\030.^Y0\333`\004a\261f\370g\361h\260i\316j\331k\277l\332m\300n\305o~p\304q\304r\304s_t\303u\264v\301w\302x\263y\363z\362{\343|\330}\234~\376,
+ bel=^G, civis=\E[?25l\E[?1c, clear=\E[H\E[J,
+ cnorm=\E[?25h\E[?0c, cr=^M, csr=\E[%i%p1%d;%p2%dr,
+ cub1=^H, cud1=^J, cuf1=\E[C, cup=\E[%i%p1%d;%p2%dH,
+ cuu1=\E[A, cvvis=\E[?25h\E[?8c, dch=\E[%p1%dP, dch1=\E[P,
+ dim=\E[2m, dl=\E[%p1%dM, dl1=\E[M, ech=\E[%p1%dX, ed=\E[J,
+ el=\E[K, el1=\E[1K, flash=\E[?5h\E[?5l$<200/>, home=\E[H,
+ hpa=\E[%i%p1%dG, ht=^I, hts=\EH, ich=\E[%p1%d@, ich1=\E[@,
+ il=\E[%p1%dL, il1=\E[L, ind=^J, kb2=\E[G, kbs=\177,
+ kcbt=\E[Z, kcub1=\E[D, kcud1=\E[B, kcuf1=\E[C, kcuu1=\E[A,
+ kdch1=\E[3~, kend=\E[4~, kf1=\E[[A, kf10=\E[21~,
+ kf11=\E[23~, kf12=\E[24~, kf13=\E[25~, kf14=\E[26~,
+ kf15=\E[28~, kf16=\E[29~, kf17=\E[31~, kf18=\E[32~,
+ kf19=\E[33~, kf2=\E[[B, kf20=\E[34~, kf3=\E[[C, kf4=\E[[D,
+ kf5=\E[[E, kf6=\E[17~, kf7=\E[18~, kf8=\E[19~, kf9=\E[20~,
+ khome=\E[1~, kich1=\E[2~, knp=\E[6~, kpp=\E[5~, kspd=^Z,
+ nel=^M^J, rc=\E8, rev=\E[7m, ri=\EM, rmir=\E[4l, rmso=\E[27m,
+ rmul=\E[24m, rs1=\Ec\E]R, sc=\E7,
+ sgr=\E[0;10%?%p1%t;7%;%?%p2%t;4%;%?%p3%t;7%;%?%p4%t;5%;%?%p5%t;2%;%?%p6%t;1%;%?%p7%t;8%;%?%p9%t;11%;m,
+ smir=\E[4h, smul=\E[4m, tbc=\E[3g, u6=\E[%i%d;%dR,
+ u7=\E[6n, u8=\E[?6c, u9=\E[c, vpa=\E[%i%p1%dd,
+ use=klone+sgr, use=ecma+color,
+linux-m|Linux console no color,
+ colors@, pairs@,
+ setab@, setaf@, setb@, setf@, use=linux,
+linux-c-nc|linux console 1.3.x hack for ncurses only,
+ ccc,
+ initc=\E]P%p1%x%p2%{255}%*%{1000}%/%02x%p3%{255}%*%{1000}%/%02x%p4%{255}%*%{1000}%/%02x,
+ oc=\E]R,
+ use=linux,
+# From: Dennis Henriksen <opus@osrl.dk>, 9 July 1996
+linux-c|linux console 1.3.6+ with private palette for each virtual console,
+ ccc,
+ colors#8, pairs#64,
+ initc=\E]P%?%p1%{9}%>%t%p1%{10}%-%'a'%+%c%e%p1%d%;%p2%{255}%&%Pr%gr%{16}%/%Px%?%gx%{9}%>%t%gx%{10}%-%'A'%+%c%e%gx%d%;%gr%{15}%&%Px%?%gx%{9}%>%t%gx%{10}%-%'A'%+%c%e%gx%d%;%p3%{255}%&%Pr%gr%{16}%/%Px%?%gx%{9}%>%t%gx%{10}%-%'A'%+%c%e%gx%d%;%gr%{15}%&%Px%?%gx%{9}%>%t%gx%{10}%-%'A'%+%c%e%gx%d%;%p4%{255}%&%Pr%gr%{16}%/%Px%?%gx%{9}%>%t%gx%{10}%-%'A'%+%c%e%gx%d%;%gr%{15}%&%Px%?%gx%{9}%>%t%gx%{10}%-%'A'%+%c%e%gx%d%;,
+ oc=\E]R,
+ use=linux,
+
+# See the note on ICH/ICH1 VERSUS RMIR/SMIR near the end of file
+linux-nic|linux with ich/ich1 suppressed for non-curses programs,
+ ich@, ich1@,
+ use=linux,
+
+# This assumes you have used setfont(8) to load one of the Linux koi8-r fonts.
+# acsc entry from Pavel Roskin" <pavel@absolute.spb.su>, 29 Sep 1997.
+linux-koi8|linux with koi8 alternate character set,
+ acsc=+\020\,\021-\030.^Y0\215`\004a\221f\234g\237h\220i\276j\205k\203l\202m\204n\212o~p\0q\0r\0s_t\206u\207v\211w\210x\201y\230z\231{\267|\274~\224,
+ use=linux, use=klone+koi8acs,
+
+# Another entry for KOI8-r with Qing Long's acsc.
+# (which one better complies with the standard?)
+linux-koi8r|linux with koi8-r alternate character set,
+ use=linux, use=klone+koi8acs,
+
+# Entry for the latin1 and latin2 fonts
+linux-lat|linux with latin1 or latin2 alternate character set,
+ acsc=+\020\,\021-\030.^Y0\333`\004a\013f\370g\361h\260i\316j\211k\214l\206m\203n\305o~p\304q\212r\304s_t\207u\215v\301w\302x\205y\363z\362{\343|\330}\234~\376,
+ use=linux,
+
+#### NetBSD consoles
+#
+# pcvt termcap database entries (corresponding to release 3.31)
+# Author's last edit-date: [Fri Sep 15 20:29:10 1995]
+#
+# (For the terminfo master file, I translated these into terminfo syntax.
+# Then I dropped all the pseudo-HP entries. we don't want and can't use
+# the :Xs: flag. Then I split :is: into a size-independent <is1> and a
+# size-dependent <is2>. Finally, I added <rmam>/<smam> -- esr)
+
+# NOTE: <ich1> has been taken out of this entry. for reference, it should
+# be <ich1=\E[@>. For discussion, see ICH/ICH1 VERSUS RMIR/SMIR below.
+# (esr: added <civis> and <cnorm> to resolve NetBSD Problem Report #4583)
+pcvtXX|pcvt vt200 emulator (DEC VT220),
+ am, km, mir, msgr, xenl,
+ it#8, vt#3,
+ acsc=++\,\,--..00``aaffgghhiijjkkllmmnnooppqqrrssttuuvvwwxxyyzz~~,
+ bel=^G, blink=\E[5m, bold=\E[1m, civis=\E[?25l,
+ clear=\E[H\E[J, cnorm=\E[?25h, cr=^M,
+ csr=\E[%i%p1%d;%p2%dr, cub=\E[%p1%dD, cub1=^H,
+ cud=\E[%p1%dB, cud1=\E[B, cuf=\E[%p1%dC, cuf1=\E[C,
+ cup=\E[%i%p1%d;%p2%dH, cuu=\E[%p1%dA, cuu1=\E[A,
+ dch=\E[%p1%dP, dch1=\E[P, dl=\E[%p1%dM, dl1=\E[M, ed=\E[J,
+ el=\E[K, el1=\E[1K, home=\E[H, ht=^I, hts=\EH, ich=\E[%p1%d@,
+ il=\E[%p1%dL, il1=\E[L, ind=\ED, indn=\E[%p1%dS,
+ is1=\E>\E[?3l\E[?4l\E[?5l\E[?7h\E[?8h, kbs=\177,
+ kcub1=\EOD, kcud1=\EOB, kcuf1=\EOC, kcuu1=\EOA,
+ kdch1=\E[3~, kf1=\E[17~, kf2=\E[18~, kf3=\E[19~,
+ kf4=\E[20~, kf5=\E[21~, kf6=\E[23~, kf7=\E[24~, kf8=\E[25~,
+ khome=\E[1~, kich1=\E[2~, kll=\E[4~, knp=\E[6~, kpp=\E[5~,
+ nel=\EE, rc=\E8, rev=\E[7m, rf=/usr/share/tabset/vt100,
+ ri=\EM, rin=\E[%p1%dT, rmacs=\E(B, rmam=\E[?7l, rmir=\E[4l,
+ rmkx=\E[?1l\E>, rmso=\E[27m, rmul=\E[24m,
+ rs1=\Ec\E>\E[?3l\E[?4l\E[?5l\E[?7h\E[?8h, sc=\E7,
+ sgr0=\E[m, smacs=\E(0, smam=\E[?7h, smir=\E[4h,
+ smkx=\E[?1h\E=, smso=\E[7m, smul=\E[4m, tbc=\E[3g,
+
+# NetBSD/FreeBSD vt220 terminal emulator console (pc keyboard & monitor)
+# termcap entries for pure VT220-Emulation and 25, 28, 35, 40, 43 and
+# 50 lines entries; 80 columns
+pcvt25|dec vt220 emulation with 25 lines,
+ cols#80, lines#25,
+ is2=\E[1;25r\E[25;1H, use=pcvtXX,
+pcvt28|dec vt220 emulation with 28 lines,
+ cols#80, lines#28,
+ is2=\E[1;28r\E[28;1H, use=pcvtXX,
+pcvt35|dec vt220 emulation with 35 lines,
+ cols#80, lines#35,
+ is2=\E[1;35r\E[35;1H, use=pcvtXX,
+pcvt40|dec vt220 emulation with 40 lines,
+ cols#80, lines#40,
+ is2=\E[1;40r\E[40;1H, use=pcvtXX,
+pcvt43|dec vt220 emulation with 43 lines,
+ cols#80, lines#43,
+ is2=\E[1;43r\E[43;1H, use=pcvtXX,
+pcvt50|dec vt220 emulation with 50 lines,
+ cols#80, lines#50,
+ is2=\E[1;50r\E[50;1H, use=pcvtXX,
+
+# NetBSD/FreeBSD vt220 terminal emulator console (pc keyboard & monitor)
+# termcap entries for pure VT220-Emulation and 25, 28, 35, 40, 43 and
+# 50 lines entries; 132 columns
+pcvt25w|dec vt220 emulation with 25 lines and 132 cols,
+ cols#132, lines#25,
+ is2=\E[1;25r\E[25;1H, use=pcvtXX,
+pcvt28w|dec vt220 emulation with 28 lines and 132 cols,
+ cols#132, lines#28,
+ is2=\E[1;28r\E[28;1H, use=pcvtXX,
+pcvt35w|dec vt220 emulation with 35 lines and 132 cols,
+ cols#132, lines#35,
+ is2=\E[1;35r\E[35;1H, use=pcvtXX,
+pcvt40w|dec vt220 emulation with 40 lines and 132 cols,
+ cols#132, lines#40,
+ is2=\E[1;40r\E[40;1H, use=pcvtXX,
+pcvt43w|dec vt220 emulation with 43 lines and 132 cols,
+ cols#132, lines#43,
+ is2=\E[1;43r\E[43;1H, use=pcvtXX,
+pcvt50w|dec vt220 emulation with 50 lines and 132 cols,
+ cols#132, lines#50,
+ is2=\E[1;50r\E[50;1H, use=pcvtXX,
+
+# Terminfo entries to enable the use of the ncurses library in colour on a
+# NetBSD-arm32 console (only tested on a RiscPC).
+# Created by Dave Millen <dmill@globalnet.co.uk> 22.07.98
+# modified codes for setf/setb to setaf/setab, then to klone+color, corrected
+# typo in invis - TD
+arm100|arm100-am|Arm(RiscPC) ncurses compatible (for 640x480),
+ am, bce, msgr, xenl, xon,
+ cols#80, it#8, lines#30,
+ acsc=``aaffggjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~,
+ bel=^G, blink=\E[5m$<2>, bold=\E[1m$<2>,
+ clear=\E[H\E[J$<50>, cr=^M, csr=\E[%i%p1%d;%p2%dr,
+ cub=\E[%p1%dD, cub1=^H, cud=\E[%p1%dB, cud1=^J,
+ cuf=\E[%p1%dC, cuf1=\E[C$<2>,
+ cup=\E[%i%p1%d;%p2%dH$<5>, cuu=\E[%p1%dA,
+ cuu1=\E[A$<2>, ed=\E[J$<50>, el=\E[K$<3>, el1=\E[1K$<3>,
+ enacs=\E(B\E)0, home=\E[H, ht=^I, hts=\EH, ind=^J,
+ invis=\E[8m$<2>, ka1=\E[q, ka3=\E[s, kb2=\E[r, kbs=^H,
+ kc1=\E[p, kc3=\E[n, kcub1=\E[D, kcud1=\E[B, kcuf1=\E[C,
+ kcuu1=\E[A, kent=\E[M, kf0=\E[y, kf1=\E[P, kf10=\E[x,
+ kf2=\E[Q, kf3=\E[R, kf4=\E[S, kf5=\E[t, kf6=\E[u, kf7=\E[v,
+ kf8=\E[l, kf9=\E[w, rc=\E8, rev=\E[6m$<2>, ri=\EM$<5>,
+ rmacs=^O, rmam=\E[?7l, rmkx=\E[?1l\E>, rmso=\E[m$<2>,
+ rmul=\E[m$<2>, rs2=\E>\E[?3l\E[?4l\E[?5l\E[?7h\E[?8h,
+ sc=\E7,
+ sgr=\E[0%?%p1%p6%|%t;1%;%?%p2%t;4%;%?%p1%p3%|%t;7%;%?%p4%t;5%;m%?%p9%t\016%e\017%;,
+ sgr0=\E[m\017$<2>, smacs=^N, smam=\E[?7h, smkx=\E[?1h\E=,
+ smso=\E[7m$<2>, smul=\E[4m$<2>, tbc=\E[3g,
+ use=ecma+sgr, use=klone+color,
+arm100-w|arm100-wam|Arm(RiscPC) ncurses compatible (for 1024x768),
+ cols#132, lines#50, use=arm100,
+
+# NetBSD/x68k console vt200 emulator. This port runs on a 68K machine
+# manufactured by Sharp for the Japenese market.
+# From Minoura Makoto <minoura@netlaputa.or.jp>, 12 May 1996
+x68k|x68k-ite|NetBSD/x68k ITE,
+ cols#96, lines#32,
+ kclr=\E[9~, khlp=\E[28~, use=vt220,
+
+# <tv@pobox.com>:
+# Entry for the DNARD OpenFirmware console, close to ANSI but not quite.
+#
+# (still unfinished, but good enough so far.)
+ofcons,
+ bw,
+ cols#80, lines#30,
+ bel=^G, blink=\2337;2m, bold=\2331m, clear=^L, cr=^M,
+ cub=\233%p1%dD, cub1=\233D, cud=\233%p1%dB, cud1=\233B,
+ cuf=\233%p1%dC, cuf1=\233C, cup=\233%i%p1%d;%p2%dH,
+ cuu=\233%p1%dA, cuu1=\233A, dch=\233%p1%dP, dch1=\233P,
+ dim=\2332m, dl=\233%p1%dM, dl1=\233M, ed=\233J, el=\233K,
+ flash=^G, ht=^I, ich=\233%p1%d@, ich1=\233@, il=\233%p1%dL,
+ il1=\233L, ind=^J, invis=\2338m, kbs=^H, kcub1=\233D,
+ kcud1=\233B, kcuf1=\233C, kcuu1=\233A, kdch1=\233P,
+ kf1=\2330P, kf10=\2330M, kf2=\2330Q, kf3=\2330W,
+ kf4=\2330x, kf5=\2330t, kf6=\2330u, kf7=\2330q, kf8=\2330r,
+ kf9=\2330p, knp=\233/, kpp=\233?, nel=^M^J, rev=\2337m,
+ rmso=\2330m, rmul=\2330m, sgr0=\2330m,
+
+# NetBSD "wscons" emulator in vt220 mode
+# These are micro-minimal and probably need to be redone for real
+# after the manner of the pcvt entries.
+wsvt25|NetBSD wscons in 25 line DEC VT220 mode,
+ cols#80, lines#25, use=vt220,
+
+wsvt25m|NetBSD wscons in 25 line DEC VT220 mode with Meta,
+ km,
+ cols#80, lines#25, use=vt220,
+
+# `rasterconsole' provided by 4.4BSD, NetBSD and OpenBSD on SPARC, and
+# DECstation/pmax.
+rcons|BSD rasterconsole,
+ use=sun-il,
+# Color version of above. Color currenly only provided by NetBSD.
+rcons-color|BSD rasterconsole with ANSI color,
+ bce,
+ colors#8, pairs#64,
+ op=\E[m, setab=\E[4%dm, setaf=\E[3%dm, use=rcons,
+
+#### FreeBSD console entries
+#
+# From: Andrey Chernov <ache@astral.msk.su> 29 Mar 1996
+# Andrey Chernov maintains the FreeBSD termcap distributions.
+#
+# Note: Users of FreeBSD 2.1.0 and older versions must either upgrade
+# or comment out the :cb: capability in the console entry.
+#
+# Alexander Lukyanov reports:
+# I have seen FreeBSD-2.1.5R... The old el1 bug changed, but it is still there.
+# Now el1 clears not only to the line beginning, but also a large chunk
+# of previous line. But there is another bug - ech does not work at all.
+#
+
+# for syscons
+# common entry without semigraphics
+# Bug: The <op> capability resets attributes.
+# Bug? The ech and el1 attributes appear to move the cursor in some cases; for
+# instance el1 does if the cursor is moved to the right margin first. Removed
+# by T.Dickey 97/5/3 (ech=\E[%p1%dX, el1=\E[1K)
+#
+# Setting colors turns off reverse; we cannot guarantee order, so use ncv.
+# Note that this disables standout with color.
+cons25w|ansiw|ansi80x25-raw|freebsd console (25-line raw mode),
+ am, bce, bw, eo, msgr, npc,
+ colors#8, cols#80, it#8, lines#25, ncv#21, pairs#64,
+ bel=^G, blink=\E[5m, bold=\E[1m, cbt=\E[Z, clear=\E[H\E[J,
+ cnorm=\E[=0C, cr=^M, cub=\E[%p1%dD, cub1=^H, cud=\E[%p1%dB,
+ cud1=\E[B, cuf=\E[%p1%dC, cuf1=\E[C,
+ cup=\E[%i%p1%d;%p2%dH, cuu=\E[%p1%dA, cuu1=\E[A,
+ cvvis=\E[=1C, dch=\E[%p1%dP, dch1=\E[P, dim=\E[30;1m,
+ dl=\E[%p1%dM, dl1=\E[M, ed=\E[J, el=\E[K, home=\E[H,
+ hpa=\E[%i%p1%d`, ht=^I, ich=\E[%p1%d@, ich1=\E[@,
+ il=\E[%p1%dL, il1=\E[L, ind=\E[S, indn=\E[%p1%dS, kb2=\E[E,
+ kbs=^H, kcbt=\E[Z, kcub1=\E[D, kcud1=\E[B, kcuf1=\E[C,
+ kcuu1=\E[A, kdch1=\177, kend=\E[F, kf1=\E[M, kf10=\E[V,
+ kf11=\E[W, kf12=\E[X, kf2=\E[N, kf3=\E[O, kf4=\E[P, kf5=\E[Q,
+ kf6=\E[R, kf7=\E[S, kf8=\E[T, kf9=\E[U, khome=\E[H,
+ kich1=\E[L, knp=\E[G, kpp=\E[I, nel=\E[E, op=\E[x, rev=\E[7m,
+ ri=\E[T, rin=\E[%p1%dT, rmso=\E[m, rs1=\E[x\E[m\Ec,
+ setab=\E[4%p1%dm, setaf=\E[3%p1%dm, sgr0=\E[m,
+ smso=\E[7m, vpa=\E[%i%p1%dd,
+cons25|ansis|ansi80x25|freebsd console (25-line ansi mode),
+ acsc=-\030.^Y0\333`\004a\260f\370g\361h\261i\025j\331k\277l\332m\300n\305q\304t\303u\264v\301w\302x\263y\363z\362~\371,
+ use=cons25w,
+cons25-m|ansis-mono|ansi80x25-mono|freebsd console (25-line mono ansi mode),
+ colors@, pairs@,
+ bold@, dim@, op@, rmul=\E[m, setab@, setaf@, smul=\E[4m, use=cons25,
+cons30|ansi80x30|freebsd console (30-line ansi mode),
+ lines#30, use=cons25,
+cons30-m|ansi80x30-mono|freebsd console (30-line mono ansi mode),
+ lines#30, use=cons25-m,
+cons43|ansi80x43|freebsd console (43-line ansi mode),
+ lines#43, use=cons25,
+cons43-m|ansi80x43-mono|freebsd console (43-line mono ansi mode),
+ lines#43, use=cons25-m,
+cons50|ansil|ansi80x50|freebsd console (50-line ansi mode),
+ lines#50, use=cons25,
+cons50-m|ansil-mono|ansi80x50-mono|freebsd console (50-line mono ansi mode),
+ lines#50, use=cons25-m,
+cons60|ansi80x60|freebsd console (60-line ansi mode),
+ lines#60, use=cons25,
+cons60-m|ansi80x60-mono|freebsd console (60-line mono ansi mode),
+ lines#60, use=cons25-m,
+cons25r|pc3r|ibmpc3r|cons25-koi8-r|freebsd console w/koi8-r cyrillic,
+ acsc=-\030.^Y0\215`\004a\220f\234h\221i\025j\205k\203l\202m\204n\212q\0t\206u\207v\211w\210x\201y\230z\231~\225,
+ use=cons25w,
+cons25r-m|pc3r-m|ibmpc3r-mono|cons25-koi8r-m|freebsd console w/koi8-r cyrillic (mono),
+ colors@, pairs@,
+ op@, rmul=\E[m, setab@, setaf@, smul=\E[4m, use=cons25r,
+cons50r|cons50-koi8r|freebsd console w/koi8-r cyrillic (50 lines),
+ lines#50, use=cons25r,
+cons50r-m|cons50-koi8r-m|freebsd console w/koi8-r cyrillic (50-line mono),
+ lines#50, use=cons25r-m,
+cons60r|cons60-koi8r|freebsd console w/koi8-r cyrillic (60 lines),
+ lines#60, use=cons25r,
+cons60r-m|cons60-koi8r-m|freebsd console w/koi8-r cyrillic (60-line mono),
+ lines#60, use=cons25r-m,
+# ISO 8859-1 FreeBSD console
+cons25l1|cons25-iso8859|freebsd console w/iso 8859-1 chars,
+ acsc=+\253\,\273-\030.\031`\201a\202f\207g\210i\247j\213k\214l\215m\216n\217o\220p\221q\222r\223s\224t\225u\226v\227w\230x\231y\232z\233~\237,
+ use=cons25w,
+cons25l1-m|cons25-iso-m|freebsd console w/iso 8859-1 chars (mono),
+ colors@, pairs@,
+ bold@, dim@, op@, rmul=\E[m, setab@, setaf@, smul=\E[4m, use=cons25l1,
+cons50l1|cons50-iso8859|freebsd console w/iso 8859-1 chars (50 lines),
+ lines#50, use=cons25l1,
+cons50l1-m|cons50-iso-m|freebsd console w/iso 8859-1 chars (50-line mono),
+ lines#50, use=cons25l1-m,
+cons60l1|cons60-iso|freebsd console w/iso 8859-1 chars (60 lines),
+ lines#60, use=cons25l1,
+cons60l1-m|cons60-iso-m|freebsd console w/iso 8859-1 chars (60-line mono),
+ lines#60, use=cons25l1-m,
+
+#### 386BSD and BSD/OS Consoles
+#
+
+# This was the original 386BSD console entry (I think).
+# Some places it's named oldpc3|oldibmpc3.
+# From: Alex R.N. Wetmore <aw2t@andrew.cmu.edu>
+origpc3|origibmpc3|IBM PC 386BSD Console,
+ am, bw, eo, xon,
+ cols#80, lines#25,
+ acsc=j\331k\277l\332m\300n\305q\304t\303u\264v\301w\302x\263,
+ bold=\E[7m, clear=\Ec, cub1=^H, cud1=\E[B, cuf1=\E[C,
+ cup=\E[%i%p1%2d;%p2%2dH, cuu1=\E[A, ed=\E[J, el=\E[K,
+ home=\E[H, ind=\E[S, kcub1=\E[D, kcud1=\E[B, kcuf1=\E[C,
+ kcuu1=\E[A, khome=\E[Y, ri=\E[T, rmso=\E[1;0x\E[2;7x,
+ rmul=\E[1;0x\E[2;7x, sgr0=\E[m\E[1;0x\E[2;7x,
+ smso=\E[1;7x\E[2;0x, smul=\E[1;7x\E[2;0x,
+
+# description of BSD/386 console emulator in version 1.0 (supplied by BSDI)
+oldpc3|oldibmpc3|old IBM PC BSD/386 Console,
+ km,
+ lines#25,
+ bel=^G, bold=\E[=15F, cr=^M, cud1=^J, dim=\E[=8F, dl1=\E[M,
+ ht=^I, il1=\E[L, ind=^J, kbs=^H, kcub1=\E[D, kcud1=\E[B,
+ kcuf1=\E[C, kcuu1=\E[A, khome=\E[H, kich1=\E[L, kll=\E[F,
+ knp=\E[G, kpp=\E[I, nel=^M^J, sgr0=\E[=R,
+
+# Description of BSD/OS console emulator in version 1.1, 2.0, 2.1
+# Note, the emulator supports many of the additional console features
+# listed in the iBCS2 (e.g. character-set selection) though not all
+# are described here. This entry really ought to be upgraded.
+# Also note, the console will also work with fewer lines after doing
+# "stty rows NN", e.g. to use 24 lines.
+# (Color support from Kevin Rosenberg <kevin@cyberport.com>, 2 May 1996)
+# Bug: The <op> capability resets attributes.
+bsdos-pc-nobold|BSD/OS PC console w/o bold,
+ am, eo, km, xon,
+ cols#80, it#8, lines#25,
+ bel=^G, clear=\Ec, cr=^M, cub=\E[%p1%dD, cub1=^H,
+ cud=\E[%p1%dB, cud1=^J, cuf=\E[%p1%dC, cuf1=\E[C,
+ cup=\E[%i%p1%d;%p2%dH, cuu=\E[%p1%dA, cuu1=\E[A,
+ dl=\E[%p1%dM, dl1=\E[M, ed=\E[J, el=\E[K, home=\E[H, ht=^I,
+ il=\E[%p1%dL, il1=\E[L, ind=^J, kbs=^H, kcub1=\E[D,
+ kcud1=\E[B, kcuf1=\E[C, kcuu1=\E[A, khome=\E[H, kich1=\E[L,
+ kll=\E[F, knp=\E[G, kpp=\E[I, nel=^M^J, rc=\E8, sc=\E7,
+ sgr=\E[0;10%?%p1%t;7%;%?%p3%t;7%;%?%p4%t;5%;%?%p6%t;1%;%?%p7%t;8%;%?%p9%t;11%;m%?%p5%t\E[=8F%;,
+ use=klone+sgr, use=klone+color,
+bsdos-pc|IBM PC BSD/OS Console,
+ sgr=\E[0;10%?%p1%t;7%;%?%p2%t;1%;%?%p3%t;7%;%?%p4%t;5%;%?%p6%t;1%;%?%p7%t;8%;%?%p9%t;11%;m, use=bsdos-pc-nobold,
+
+# Old names for BSD/OS PC console used in releases before 4.1.
+pc3|BSD/OS on the PC Console,
+ use=bsdos-pc-nobold,
+ibmpc3|pc3-bold|BSD/OS on the PC Console with bold instead of underline,
+ use=bsdos-pc,
+
+# BSD/OS on the SPARC
+bsdos-sparc|Sun SPARC BSD/OS Console,
+ use=sun,
+
+# BSD/OS on the PowerPC
+bsdos-ppc|PowerPC BSD/OS Console,
+ use=bsdos-pc,
+
+#### DEC VT100 and compatibles
+#
+# DEC terminals from the vt100 forward are collected here. Older DEC terminals
+# and micro consoles can be found in the `obsolete' section. More details on
+# the relationship between the VT100 and ANSI X3.64/ISO 6429/ECMA-48 may be
+# found near the end of this file.
+#
+# Except where noted, these entries are DEC's official terminfos.
+# Contact Bill Hedberg <hedberg@hannah.enet.dec.com> of Terminal Support
+# Engineering for more information. Updated terminfos and termcaps
+# are kept available at ftp://gatekeeper.dec.com/pub/DEC/termcaps.
+#
+# In October 1995 DEC sold its terminals business, including the VT and Dorio
+# line and trademark, to SunRiver Data Systems. SunRiver has since changed
+# its name to Boundless Technologies; see http://www.boundless.com.
+#
+
+# NOTE: Any VT100 emulation, whether in hardware or software, almost
+# certainly includes what DEC called the `Level 1 editing extension' codes;
+# only the very oldest VT100s lacked these and there probably aren't any of
+# those left alive. To capture these, use one of the VT102 entries.
+#
+# Note that the <xenl> glitch in vt100 is not quite the same as on the Concept,
+# since the cursor is left in a different position while in the
+# weird state (concept at beginning of next line, vt100 at end
+# of this line) so all versions of vi before 3.7 don't handle
+# <xenl> right on vt100. The correct way to handle <xenl> is when
+# you output the char in column 80, immediately output CR LF
+# and then assume you are in column 1 of the next line. If <xenl>
+# is on, am should be on too.
+#
+# I assume you have smooth scroll off or are at a slow enough baud
+# rate that it doesn't matter (1200? or less). Also this assumes
+# that you set auto-nl to "on", if you set it off use vt100-nam
+# below.
+#
+# The padding requirements listed here are guesses. It is strongly
+# recommended that xon/xoff be enabled, as this is assumed here.
+#
+# The vt100 uses <rs2> and <rf> rather than <is2>/<tbc>/<hts> because the
+# tab settings are in non-volatile memory and don't need to be
+# reset upon login. Also setting the number of columns glitches
+# the screen annoyingly. You can type "reset" to get them set.
+#
+# The VT100 series terminals have cursor ("arrows") keys which can operate
+# in two different modes: Cursor Mode and Application Mode. Cursor Mode
+# is the reset state, and is assumed to be the normal state. Application
+# Mode is the "set" state. In Cursor Mode, the cursor keys transmit
+# "Esc [ {code}" sequences, conforming to ANSI standards. In Application
+# Mode, the cursor keys transmit "Esc O <code>" sequences. Application Mode
+# was provided primarily as an aid to the porting of VT52 applications. It is
+# assumed that the cursor keys are normally in Cursor Mode, and expected that
+# applications such as vi will always transmit the <smkx> string. Therefore,
+# the definitions for the cursor keys are made to match what the terminal
+# transmits after the <smkx> string is transmitted. If the <smkx> string
+# is a null string or is not defined, then cursor keys are assumed to be in
+# "Cursor Mode", and the cursor keys definitions should match that assumption,
+# else the appication may fail. It is also expected that applications will
+# always transmit the <rmkx> string to the terminal before they exit.
+#
+# The VT100 series terminals have an auxilliary keypad, commonly referred to as
+# the "Numeric Keypad", because it is a cluster of numeric and function keys.
+# The Numeric Keypad which can operate in two different modes: Numeric Mode and
+# Application Mode. Numeric Mode is the reset state, and is assumed to be
+# the normal state. Application Mode is the "set" state. In Numeric Mode,
+# the numeric and punctuation keys transmit ASCII 7-bit characters, and the
+# Enter key transmits the same as the Return key (Note: the Return key
+# can be configured to send either LF (\015) or CR LF). In Application Mode,
+# all the keypad keys transmit "Esc O {code}" sequences. The PF1 - PF4 keys
+# always send the same "Esc O {code}" sequences. It is assumed that the keypad
+# is normally in Numeric Mode. If an application requires that the keypad be
+# in Application Mode then it is expected that the user, or the application,
+# will set the TERM environment variable to point to a terminfo entry which has
+# defined the <smkx> string to include the codes that switch the keypad into
+# Application Mode, and the terminfo entry will also define function key
+# fields to match the Application Mode control codes. If the <smkx> string
+# is a null string or is not defined, then the keypad is assumed to be in
+# Numeric Mode. If the <smkx> string switches the keypad into Application
+# Mode, it is expected that the <rmkx> string will contain the control codes
+# necessary to reset the keypad to "Normal" mode, and it is also expected that
+# applications which transmit the <smkx> string will also always transmit the
+# <rmkx> string to the terminal before they exit.
+#
+# Here's a diagram of the VT100 keypad keys with their bindings.
+# The top line is the name of the key (some DEC keyboards have the keys
+# labelled somewhat differently, like GOLD instead of PF1, but this is
+# the most "official" name). The second line is the escape sequence it
+# generates in Application Keypad mode (where "$" means the ESC
+# character). The third line contains two items, first the mapping of
+# the key in terminfo, and then in termcap.
+# _______________________________________
+# | PF1 | PF2 | PF3 | PF4 |
+# | $OP | $OQ | $OR | $OS |
+# |_kf1__k1_|_kf2__k2_|_kf3__k3_|_kf4__k4_|
+# | 7 8 9 - |
+# | $Ow | $Ox | $Oy | $Om |
+# |_kf9__k9_|_kf10_k;_|_kf0__k0_|_________|
+# | 4 | 5 | 6 | , |
+# | $Ot | $Ou | $Ov | $Ol |
+# |_kf5__k5_|_kf6__k6_|_kf7__k7_|_kf8__k8_|
+# | 1 | 2 | 3 | |
+# | $Oq | $Or | $Os | enter |
+# |_ka1__K1_|_kb2__K2_|_ka3__K3_| $OM |
+# | 0 | . | |
+# | $Op | $On | |
+# |___kc1_______K4____|_kc3__K5_|_kent_@8_|
+#
+# And here, for those of you with orphaned VT100s lacking documentation, is
+# a description of the soft switches invoked when you do `Set Up'.
+#
+# Scroll 0-Jump Shifted 3 0-#
+# | 1-Smooth | 1-British pound sign
+# | Autorepeat 0-Off | Wrap Around 0-Off
+# | | 1-On | | 1-On
+# | | Screen 0-Dark Bkg | | New Line 0-Off
+# | | | 1-Light Bkg | | | 1-On
+# | | | Cursor 0-Underline | | | Interlace 0-Off
+# | | | | 1-Block | | | | 1-On
+# | | | | | | | |
+# 1 1 0 1 1 1 1 1 0 1 0 0 0 0 1 0 <--Standard Settings
+# | | | | | | | |
+# | | | Auto XON/XOFF 0-Off | | | Power 0-60 Hz
+# | | | 1-On | | | 1-50 Hz
+# | | Ansi/VT52 0-VT52 | | Bits Per Char. 0-7 Bits
+# | | 1-ANSI | | 1-8 Bits
+# | Keyclick 0-Off | Parity 0-Off
+# | 1-On | 1-On
+# Margin Bell 0-Off Parity Sense 0-Odd
+# 1-On 1-Even
+#
+# The following SET-UP modes are assumed for normal operation:
+# ANSI_MODE AUTO_XON/XOFF_ON NEWLINE_OFF 80_COLUMNS
+# WRAP_AROUND_ON JUMP_SCROLL_OFF
+# Other SET-UP modes may be set for operator convenience or communication
+# requirements; I recommend
+# AUTOREPEAT_ON BLOCK_CURSOR MARGIN_BELL_OFF SHIFTED_3_#
+# Unless you have a graphics add-on such as Digital Engineering's VT640
+# (and even then, whenever it can be arranged!) you should set
+# INTERLACE_OFF
+#
+# (vt100: I added <rmam>/<smam> based on the init string, also <OTbs>. -- esr)
+vt100|vt100-am|dec vt100 (w/advanced video),
+ am, msgr, xenl, xon,
+ cols#80, it#8, lines#24, vt#3,
+ acsc=``aaffggjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~,
+ bel=^G, blink=\E[5m$<2>, bold=\E[1m$<2>,
+ clear=\E[H\E[J$<50>, cr=^M, csr=\E[%i%p1%d;%p2%dr,
+ cub=\E[%p1%dD, cub1=^H, cud=\E[%p1%dB, cud1=^J,
+ cuf=\E[%p1%dC, cuf1=\E[C$<2>,
+ cup=\E[%i%p1%d;%p2%dH$<5>, cuu=\E[%p1%dA,
+ cuu1=\E[A$<2>, ed=\E[J$<50>, el=\E[K$<3>, el1=\E[1K$<3>,
+ enacs=\E(B\E)0, home=\E[H, ht=^I, hts=\EH, ind=^J, ka1=\EOq,
+ ka3=\EOs, kb2=\EOr, kbs=^H, kc1=\EOp, kc3=\EOn, kcub1=\EOD,
+ kcud1=\EOB, kcuf1=\EOC, kcuu1=\EOA, kent=\EOM, kf0=\EOy,
+ kf1=\EOP, kf10=\EOx, kf2=\EOQ, kf3=\EOR, kf4=\EOS, kf5=\EOt,
+ kf6=\EOu, kf7=\EOv, kf8=\EOl, kf9=\EOw, rc=\E8,
+ rev=\E[7m$<2>, ri=\EM$<5>, rmacs=^O, rmam=\E[?7l,
+ rmkx=\E[?1l\E>, rmso=\E[m$<2>, rmul=\E[m$<2>,
+ rs2=\E>\E[?3l\E[?4l\E[?5l\E[?7h\E[?8h, sc=\E7,
+ sgr=\E[0%?%p1%p6%|%t;1%;%?%p2%t;4%;%?%p1%p3%|%t;7%;%?%p4%t;5%;m%?%p9%t\016%e\017%;,
+ sgr0=\E[m\017$<2>, smacs=^N, smam=\E[?7h, smkx=\E[?1h\E=,
+ smso=\E[7m$<2>, smul=\E[4m$<2>, tbc=\E[3g,
+vt100nam|vt100-nam|vt100 no automargins,
+ am@, xenl@, use=vt100-am,
+vt100-vb|dec vt100 (w/advanced video) & no beep,
+ bel@, flash=\E[?5h\E[?5l, use=vt100,
+
+# Ordinary vt100 in 132 column ("wide") mode.
+vt100-w|vt100-w-am|dec vt100 132 cols (w/advanced video),
+ cols#132, lines#24,
+ rs2=\E>\E[?3h\E[?4l\E[?5l\E[?8h, use=vt100-am,
+vt100-w-nam|vt100-nam-w|dec vt100 132 cols (w/advanced video no automargin),
+ cols#132, lines#14, vt@,
+ rs2=\E>\E[?3h\E[?4l\E[?5l\E[?8h, use=vt100-nam,
+
+# vt100 with no advanced video.
+vt100-nav|vt100 without advanced video option,
+ xmc#1,
+ blink@, bold@, rev@, rmso=\E[m, rmul@, sgr@, sgr0@, smso=\E[7m,
+ smul@,
+ use=vt100,
+vt100-nav-w|vt100-w-nav|dec vt100 132 cols 14 lines (no advanced video option),
+ cols#132, lines#14, use=vt100-nav,
+
+# vt100 with one of the 24 lines used as a status line.
+# We put the status line on the top.
+vt100-s|vt100-s-top|vt100-top-s|vt100 for use with top sysline,
+ eslok, hs,
+ lines#23,
+ clear=\E[2;1H\E[J$<50>, csr=\E[%i%i%p1%d;%p2%dr,
+ cup=\E[%i%p1%{1}%+%d;%p2%dH$<5>, dsl=\E7\E[1;24r\E8,
+ fsl=\E8, home=\E[2;1H, is2=\E7\E[2;24r\E8,
+ tsl=\E7\E[1;%p1%dH\E[1K, use=vt100-am,
+
+# Status line at bottom.
+# Clearing the screen will clobber status line.
+vt100-s-bot|vt100-bot-s|vt100 for use with bottom sysline,
+ eslok, hs,
+ lines#23,
+ dsl=\E7\E[1;24r\E8, fsl=\E8, is2=\E[1;23r\E[23;1H,
+ tsl=\E7\E[24;%p1%dH\E[1K,
+ use=vt100-am,
+
+# Most of the `vt100' emulators out there actually emulate a vt102
+# This entry (or vt102-nsgr) is probably the right thing to use for
+# these.
+vt102|dec vt102,
+ mir,
+ dch1=\E[P, dl1=\E[M, il1=\E[L, rmir=\E[4l, smir=\E[4h, use=vt100,
+vt102-w|dec vt102 in wide mode,
+ cols#132,
+ rs3=\E[?3h, use=vt102,
+
+# Many brain-dead PC comm programs that pretend to be `vt100-compatible'
+# fail to interpret the ^O and ^N escapes properly. Symptom: the <sgr0>
+# string in the canonical vt100 entry above leaves the screen littered
+# with little snowflake or star characters (IBM PC ROM character \017 = ^O)
+# after highlight turnoffs. This entry should fix that, and even leave
+# ACS support working, at the cost of making multiple-highlight changes
+# slightly more expensive.
+# From: Eric S. Raymond <esr@snark.thyrsus.com> July 22 1995
+vt102-nsgr|vt102 no sgr (use if you see snowflakes after highlight changes),
+ sgr@, sgr0=\E[m,
+ use=vt102,
+
+# VT125 Graphics CRT. Clear screen also erases graphics
+vt125|vt125 graphics terminal,
+ clear=\E[H\E[2J\EPpS(E)\E\\$<50>, use=vt100,
+
+# This isn't a DEC entry, it came from University of Wisconsin.
+# (vt131: I added <rmam>/<smam> based on the init string, also <OTbs> -- esr)
+vt131|dec vt131,
+ am, xenl,
+ cols#80, it#8, lines#24, vt#3,
+ bel=^G, blink=\E[5m$<2/>, bold=\E[1m$<2/>,
+ clear=\E[;H\E[2J$<50/>, cr=^M, csr=\E[%i%p1%d;%p2%dr,
+ cub1=^H, cud1=^J, cuf1=\E[C$<2/>,
+ cup=\E[%i%p1%d;%p2%dH$<5/>, cuu1=\E[A$<2/>,
+ ed=\E[J$<50/>, el=\E[K$<3/>, home=\E[H, ht=^I,
+ is2=\E[1;24r\E[24;1H, kbs=^H, kcub1=\EOD, kcud1=\EOB,
+ kcuf1=\EOC, kcuu1=\EOA, kf1=\EOP, kf2=\EOQ, kf3=\EOR,
+ kf4=\EOS, nel=^M^J, rc=\E8, rev=\E[7m$<2/>, ri=\EM$<5/>,
+ rmam=\E[?7h, rmkx=\E[?1l\E>, rmso=\E[m$<2/>,
+ rmul=\E[m$<2/>,
+ rs1=\E>\E[?3l\E[?4l\E[?5l\E[?7h\E[?8h, sc=\E7,
+ sgr0=\E[m$<2/>, smam=\E[?7h, smkx=\E[?1h\E=,
+ smso=\E[7m$<2/>, smul=\E[4m$<2/>,
+
+# vt132 - like vt100 but slower and has ins/del line and such.
+# I'm told that <smir>/<rmir> are backwards in the terminal from the
+# manual and from the ANSI standard, this describes the actual
+# terminal. I've never actually used a vt132 myself, so this
+# is untested.
+#
+vt132|DEC vt132,
+ xenl,
+ dch1=\E[P$<7>, dl1=\E[M$<99>, il1=\E[L$<99>, ind=\n$<30>,
+ ip=$<7>, rmir=\E[4h, smir=\E[4l,
+ use=vt100,
+
+# This vt220 description maps F5--F9 to the second block of function keys
+# at the top of the keyboard. The "DO" key is used as F10 to avoid conflict
+# with the key marked (ESC) on the vt220. See vt220d for an alternate mapping.
+# PF1--PF4 are used as F1--F4.
+#
+vt220-old|vt200-old|DEC VT220 in vt100 emulation mode,
+ am, mir, xenl, xon,
+ cols#80, lines#24, vt#3,
+ acsc=``aaffggjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~,
+ bel=^G, blink=\E[5m$<2>, bold=\E[1m$<2>, civis=\E[?25l,
+ clear=\E[H\E[2J$<50>, cnorm=\E[?25h, cr=^M,
+ csr=\E[%i%p1%d;%p2%dr, cub1=^H, cud1=\E[B, cuf1=\E[C,
+ cup=\E[%i%p1%d;%p2%dH$<10>, cuu1=\E[A, dch1=\E[P,
+ dl1=\E[M, ed=\E[J$<50>, el=\E[K$<3>, home=\E[H, ht=^I,
+ if=/usr/share/tabset/vt100, il1=\E[L, ind=\ED$<20/>,
+ is2=\E[1;24r\E[24;1H, kbs=^H, kcub1=\E[D, kcud1=\E[B,
+ kcuf1=\E[C, kcuu1=\E[A, kdch1=\E[3~, kend=\E[4~, kf1=\EOP,
+ kf10=\E[29~, kf2=\EOQ, kf3=\EOR, kf4=\EOS, kf5=\E[17~,
+ kf6=\E[18~, kf7=\E[19~, kf8=\E[20~, kf9=\E[21~,
+ khome=\E[1~, kich1=\E[2~, knp=\E[6~, kpp=\E[5~, rc=\E8,
+ rev=\E[7m$<2>, rf=/usr/share/tabset/vt100,
+ ri=\EM$<14/>, rmacs=\E(B$<4>, rmam=\E[?7l, rmir=\E[4l,
+ rmso=\E[27m, rmul=\E[24m,
+ rs2=\E>\E[?3l\E[?4l\E[?5l\E[?7h\E[?8h, sc=\E7,
+ sgr=\E[0%?%p6%t;1%;%?%p2%t;4%;%?%p4%t;5%;%?%p1%p3%|%t;7%;m%?%p9%t\E(0%e\E(B%;,
+ sgr0=\E[m$<2>, smacs=\E(0$<2>, smam=\E[?7h, smir=\E[4h,
+ smso=\E[7m, smul=\E[4m,
+
+# A much better description of the VT200/220; used to be vt220-8
+vt220|vt200|dec vt220,
+ am, mc5i, mir, msgr, xenl, xon,
+ cols#80, it#8, lines#24, vt#3,
+ acsc=``aaffggjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~,
+ bel=^G, blink=\E[5m, bold=\E[1m, clear=\E[H\E[J, cr=^M,
+ csr=\E[%i%p1%d;%p2%dr, cub=\E[%p1%dD, cub1=^H,
+ cud=\E[%p1%dB, cud1=^J, cuf=\E[%p1%dC, cuf1=\E[C,
+ cup=\E[%i%p1%d;%p2%dH, cuu=\E[%p1%dA, cuu1=\E[A,
+ dch=\E[%p1%dP, dch1=\E[P, dl=\E[%p1%dM, dl1=\E[M,
+ ech=\E[%p1%dX, ed=\E[J, el=\E[K, el1=\E[1K, enacs=\E)0,
+ flash=\E[?5h$<200/>\E[?5l, home=\E[H, ht=^I, hts=\EH,
+ ich=\E[%p1%d@, if=/usr/share/tabset/vt100,
+ il=\E[%p1%dL, il1=\E[L, ind=\ED,
+ is2=\E[?7h\E[>\E[?1h\E F\E[?4l, kbs=^H, kcub1=\E[D,
+ kcud1=\E[B, kcuf1=\E[C, kcuu1=\E[A, kf1=\EOP, kf10=\E[21~,
+ kf11=\E[23~, kf12=\E[24~, kf13=\E[25~, kf14=\E[26~,
+ kf17=\E[31~, kf18=\E[32~, kf19=\E[33~, kf2=\EOQ,
+ kf20=\E[34~, kf3=\EOR, kf4=\EOS, kf6=\E[17~, kf7=\E[18~,
+ kf8=\E[19~, kf9=\E[20~, kfnd=\E[1~, khlp=\E[28~,
+ khome=\E[H, kich1=\E[2~, knp=\E[6~, kpp=\E[5~, krdo=\E[29~,
+ kslt=\E[4~, lf1=pf1, lf2=pf2, lf3=pf3, lf4=pf4, mc0=\E[i,
+ mc4=\E[4i, mc5=\E[5i, nel=\EE, rc=\E8, rev=\E[7m, ri=\EM,
+ rmacs=^O, rmam=\E[?7l, rmir=\E[4l, rmso=\E[27m,
+ rmul=\E[24m, rs1=\E[?3l, sc=\E7, sgr0=\E[m, smacs=^N,
+ smam=\E[?7h, smir=\E[4h, smso=\E[7m, smul=\E[4m, tbc=\E[3g,
+vt220-w|vt200-w|DEC vt220 in wide mode,
+ cols#132,
+ rs3=\E[?3h, use=vt220,
+vt220-8bit|vt220-8|vt200-8bit|vt200-8|dec vt220/200 in 8-bit mode,
+ am, mc5i, mir, msgr, xenl, xon,
+ cols#80, it#8, lines#24, vt#3,
+ acsc=``aaffggjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~,
+ bel=^G, blink=\2335m, bold=\2331m, clear=\233H\233J, cr=^M,
+ csr=\233%i%p1%d;%p2%dr, cub=\233%p1%dD, cub1=^H,
+ cud=\233%p1%dB, cud1=^J, cuf=\233%p1%dC, cuf1=\233C,
+ cup=\233%i%p1%d;%p2%dH, cuu=\233%p1%dA, cuu1=\233A,
+ dch=\233%p1%dP, dch1=\233P, dl=\233%p1%dM, dl1=\233M,
+ ech=\233%p1%dX, ed=\233J, el=\233K, el1=\2331K, enacs=\E)0,
+ flash=\233?5h$<200/>\233?5l, home=\233H, ht=^I, hts=\EH,
+ ich=\233%p1%d@, if=/usr/share/tabset/vt100,
+ il=\233%p1%dL, il1=\233L, ind=\ED,
+ is2=\233?7h\233>\233?1h\E F\233?4l, kbs=^H,
+ kcub1=\233D, kcud1=\233B, kcuf1=\233C, kcuu1=\233A,
+ kf1=\EOP, kf10=\23321~, kf11=\23323~, kf12=\23324~,
+ kf13=\23325~, kf14=\23326~, kf17=\23331~, kf18=\23332~,
+ kf19=\23333~, kf2=\EOQ, kf20=\23334~, kf3=\EOR, kf4=\EOS,
+ kf6=\23317~, kf7=\23318~, kf8=\23319~, kf9=\23320~,
+ kfnd=\2331~, khlp=\23328~, khome=\233H, kich1=\2332~,
+ knp=\2336~, kpp=\2335~, krdo=\23329~, kslt=\2334~, lf1=pf1,
+ lf2=pf2, lf3=pf3, lf4=pf4, mc0=\233i, mc4=\2334i, mc5=\2335i,
+ nel=\EE, rc=\E8, rev=\2337m, ri=\EM, rmacs=^O, rmam=\233?7l,
+ rmir=\2334l, rmso=\23327m, rmul=\23324m, rs1=\233?3l,
+ sc=\E7, sgr0=\233m, smacs=^N, smam=\233?7h, smir=\2334h,
+ smso=\2337m, smul=\2334m, tbc=\2333g,
+
+#
+# vt220d:
+# This vt220 description regards F6--F10 as the second block of function keys
+# at the top of the keyboard. This mapping follows the description given
+# in the VT220 Programmer Reference Manual and agrees with the labeling
+# on some terminals that emulate the vt220. There is no support for an F5.
+# See vt220 for an alternate mapping.
+#
+vt220d|DEC VT220 in vt100 mode with DEC function key labeling,
+ kf10=\E[21~, kf11=\E[23~, kf12=\E[24~, kf13=\E[25~,
+ kf14=\E[26~, kf15=\E[28~, kf16=\E[29~, kf17=\E[31~,
+ kf18=\E[32~, kf19=\E[33~, kf20=\E[34~, kf5@, kf6=\E[17~,
+ kf7=\E[18~, kf8=\E[19~, kf9=\E[20~,
+ use=vt220-old,
+
+vt220-nam|v200-nam|VT220 in vt100 mode with no auto margins,
+ am@,
+ rs2=\E>\E[?3l\E[?4l\E[?5l\E[?7l\E[?8h, use=vt220,
+
+# vt220 termcap written Tue Oct 25 20:41:10 1988 by Alex Latzko
+# (not an official DEC entry!)
+# The problem with real vt220 terminals is they don't send escapes when in
+# in vt220 mode. This can be gotten around two ways. 1> don't send
+# escapes or 2> put the vt220 into vt100 mode and use all the nifty
+# features of vt100 advanced video which it then has.
+#
+# This entry takes the view of putting a vt220 into vt100 mode so
+# you can use the escape key in emacs and everything else which needs it.
+#
+# You probably don't want to use this on a VMS machine since VMS will think
+# it has a vt220 and will get fouled up coming out of emacs
+#
+# From: Alexander Latzko <latzko@marsenius.rutgers.edu>, 30 Dec 1996
+# (Added vt100 <rc>,<sc> to quiet a tic warning -- esr)
+vt200-js|vt220-js|dec vt200 series with jump scroll,
+ am,
+ cols#80,
+ bel=^G, clear=\E[H\E[J, cr=^M, csr=\E[%i%p1%d;%p2%dr,
+ cub1=^H, cud1=^J, cup=\E[%i%p1%d;%p2%dH, cuu1=\E[A,
+ dch1=\E[P, dl1=\E[M, ed=\E[J, el=\E[K, home=\E[H, ht=^I,
+ il1=\E[L, ind=\ED,
+ is2=\E[61"p\E[H\E[?3l\E[?4l\E[?1l\E[?5l\E[?6l\E[?7h\E[?8h\E[?25h\E>\E[m,
+ kbs=^H, kcub1=\EOD, kcud1=\EOB, kcuf1=\EOC, kcuu1=\EOA,
+ kf1=\EOP, kf2=\EOQ, kf3=\EOR, kf4=\EOS, nel=^M\ED, rc=\E8,
+ rf=/usr/lib/tabset/vt100, ri=\EM, rmdc=, rmir=\E[4l,
+ rmkx=\E[?1l\E>, rmso=\E[27m$<5/>, rmul=\E[24m,
+ rs1=\E>\E[?3l\E[?4l\E[?5l\E[?7h\E[?8h, sc=\E7, smdc=,
+ smir=\E[4h, smkx=\E[?1h\E=, smso=\E[7m$<5/>, smul=\E[4m,
+
+
+# This was DEC's vt320. Use the purpose-built one below instead
+#vt320|DEC VT320 in vt100 emulation mode,
+# use=vt220,
+
+#
+# Use v320n for SCO's LYRIX. Otherwise, use Adam Thompson's vt320-nam.
+#
+vt320nam|v320n|DEC VT320 in vt100 emul. mode with NO AUTO WRAP mode,
+ am@,
+ rs2=\E>\E[?3l\E[?4l\E[?5l\E[?7l\E[?8h, use=vt220,
+
+# These entries are not DEC's official ones, they were purpose-built for the
+# VT320. Here are the designer's notes:
+# <kel> is end on a PC kbd. Actually 'select' on a VT. Mapped to
+# 'Erase to End of Field'... since nothing seems to use 'end' anyways...
+# khome is Home on a PC kbd. Actually 'FIND' on a VT.
+# Things that use <knxt> usually use tab anyways... and things that don't use
+# tab usually use <knxt> instead...
+# kprv is same as tab - Backtab is useless...
+# I left out <sgr> because of its RIDICULOUS complexity,
+# and the resulting fact that it causes the termcap translation of the entry
+# to SMASH the 1k-barrier...
+# From: Adam Thompson <athompso@pangea.ca> Sept 10 1995
+# (vt320: uncommented <fsl>, comnmmented out <kslt> to avoid a conflict --esr)
+vt320|vt300|dec vt320 7 bit terminal,
+ am, eslok, hs, mir, msgr, xenl,
+ cols#80, lines#24, wsl#80,
+ acsc=``aaffggjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~,
+ bel=^G, blink=\E[5m, bold=\E[1m, civis=\E[?25l,
+ clear=\E[H\E[2J, cnorm=\E[?25h, cr=^M,
+ csr=\E[%i%p1%d;%p2%dr, cub=\E[%p1%dD, cub1=^H,
+ cud=\E[%p1%dB, cud1=^J, cuf=\E[%p1%dC, cuf1=\E[C,
+ cup=\E[%i%p1%d;%p2%dH, cuu=\E[%p1%dA, cuu1=\E[A,
+ dch=\E[%p1%dP, dch1=\E[P, dl=\E[%p1%dM, dl1=\E[M,
+ ech=\E[%p1%dX, ed=\E[J, el=\E[K, el1=\E[1K, fsl=\E[0$},
+ home=\E[H, ht=^I, hts=\EH, ich=\E[%p1%d@, il=\E[%p1%dL,
+ il1=\E[L, ind=\ED,
+ is2=\E>\E[?3l\E[?4l\E[5?l\E[?7h\E[?8h\E[1;24r\E[24;1H,
+ ka1=\EOw, ka3=\EOy, kb2=\EOu, kbs=\177, kc1=\EOq, kc3=\EOs,
+ kcub1=\EOD, kcud1=\EOB, kcuf1=\EOC, kcuu1=\EOA,
+ kdch1=\E[3~, kel=\E[4~, kent=\EOM, kf1=\EOP, kf10=\E[21~,
+ kf11=\E[23~, kf12=\E[24~, kf13=\E[25~, kf14=\E[26~,
+ kf15=\E[28~, kf16=\E[29~, kf17=\E[31~, kf18=\E[32~,
+ kf19=\E[33~, kf2=\EOQ, kf20=\E[34~, kf3=\EOR, kf4=\EOS,
+ kf6=\E[17~, kf7=\E[18~, kf8=\E[19~, kf9=\E[20~,
+ khome=\E[1~, kich1=\E[2~, knp=\E[6~, knxt=^I, kpp=\E[5~,
+ kprv=\E[Z, mc0=\E[i, mc4=\E[?4i, mc5=\E[?5i, nel=\EE, rc=\E8,
+ rev=\E[7m, rf=/usr/share/tabset/vt300, ri=\EM,
+ rmacs=\E(B, rmam=\E[?7l, rmir=\E[4l, rmkx=\E[?1l\E>,
+ rmso=\E[m, rmul=\E[m,
+ rs2=\E>\E[?3l\E[?4l\E[5?l\E[?7h\E[?8h\E[1;24r\E[24;1H,
+ sc=\E7, sgr0=\E[m, smacs=\E(0, smam=\E[?7h, smir=\E[4h,
+ smkx=\E[?1h\E=, smso=\E[7m, smul=\E[4m, tbc=\E[3g,
+ tsl=\E[1$}\E[H\E[K,
+vt320-nam|vt300-nam|dec vt320 7 bit terminal with no am to make SAS happy,
+ am@,
+ is2=\E>\E[?3l\E[?4l\E[5?l\E[?7l\E[?8h\E[1;24r\E[24;1H,
+ rs2=\E>\E[?3l\E[?4l\E[5?l\E[?7l\E[?8h\E[1;24r\E[24;1H,
+ use=vt320,
+# We have to init 132-col mode, not 80-col mode.
+vt320-w|vt300-w|dec vt320 wide 7 bit terminal,
+ cols#132, wsl#132,
+ is2=\E>\E[?3h\E[?4l\E[5?l\E[?7h\E[?8h\E[1;24r\E[24;1H,
+ rs2=\E>\E[?3h\E[?4l\E[5?l\E[?7h\E[?8h\E[1;24r\E[24;1H,
+ use=vt320,
+vt320-w-nam|vt300-w-nam|dec vt320 wide 7 bit terminal with no am,
+ am@,
+ is2=\E>\E[?3h\E[?4l\E[5?l\E[?7l\E[?8h\E[1;24r\E[24;1H,
+ rs2=\E>\E[?3h\E[?4l\E[5?l\E[?7l\E[?8h\E[1;24r\E[24;1H,
+ use=vt320-w,
+
+# VT330 and VT340 -- These are ReGIS and SIXEL graphics terminals
+# which are pretty much a superset of the VT320. They have the
+# host writable status line, yet another different DRCS matrix size,
+# and such, but they add the DEC Technical character set, Multiple text
+# pages, selectable length pages, and the like. The difference between
+# the vt330 and vt340 is that the latter has only 2 planes and a monochrome
+# monitor, the former has 4 planes and a color monitor. These terminals
+# support VT131 and ANSI block mode, but as with much of these things,
+# termcap/terminfo doesn't deal with these features.
+#
+# Note that this entry is are set up in what was the standard way for GNU
+# Emacs v18 terminal modes to deal with the cursor keys in that the arrow
+# keys were switched into application mode at the same time the numeric pad
+# is switched into application mode. This changes the definitions of the
+# arrow keys. Emacs v19 is smarter and mines its keys directly out of
+# your termcap or terminfo entry,
+#
+# From: Daniel Glasser <dag@persoft.persoft.com>, 13 Oct 1993
+# (vt340: string capability "sb=\E[M" corrected to "sr";
+# also, added <rmam>/<smam> based on the init string -- esr)
+vt340|dec-vt340|vt330|dec-vt330|dec vt340 graphics terminal with 24 line page,
+ am, eslok, hs, mir, msgr, xenl, xon,
+ cols#80, it#8, lines#24, vt#3,
+ acsc=``aaffggjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~,
+ blink=\E[5m, bold=\E[1m, civis=\E[?25l, clear=\E[H\E[J,
+ cnorm=\E[?25h, cr=^M, csr=\E[%i%p1%d;%p2%dr,
+ cub=\E[%p1%dD, cub1=^H, cud=\E[%p1%dB, cud1=^J,
+ cuf=\E[%p1%dC, cuf1=\E[C, cup=\E[%i%p1%d;%p2%dH,
+ cuu=\E[%p1%dA, cuu1=\E[A, cvvis=\E[?25h, dch=\E[%p1%dP,
+ dch1=\E[P, dl=\E[%p1%dM, dl1=\E[M,
+ dsl=\E[2$~\r\E[1$}\E[K\E[$}, ed=\E[J, el=\E[K,
+ flash=\E[?5h\E[?5l$<200/>, fsl=\E[$}, home=\E[H, ht=^I,
+ hts=\EH, ich=\E[%p1%d@, il=\E[%p1%dL, il1=\E[L, ind=\ED,
+ is2=\E<\E F\E>\E[?1h\E[?3l\E[?4l\E[?5l\E[?7h\E[?8h\E[1;24r\E[24;1H,
+ kbs=^H, kcub1=\EOD, kcud1=\EOB, kcuf1=\EOC, kcuu1=\EOA,
+ kf1=\EOP, kf2=\EOQ, kf3=\EOR, kf4=\EOS, kf6=\E[17~,
+ kf7=\E[18~, kf8=\E[19~, kf9=\E[20~, lf1=pf1, lf2=pf2,
+ lf3=pf3, lf4=pf4, nel=^M\ED, rc=\E8, rev=\E[7m,
+ rf=/usr/share/tabset/vt300, ri=\EM, rmacs=^O,
+ rmam=\E[?7l, rmir=\E[4l, rmkx=\E[?1l\E>, rmso=\E[27m,
+ rmul=\E[24m, rs1=\E[?3l, sc=\E7, sgr0=\E[m, smacs=^N,
+ smam=\E[?7h, smir=\E[4h, smkx=\E[?1h\E=, smso=\E[7m,
+ smul=\E[4m, tbc=\E[3g, tsl=\E[2$~\E[1$}\E[1;%dH,
+
+# DEC doesn't supply a vt400 description, so we add Daniel Glasser's
+# (originally written with vt420 as its primary name, and usable for it).
+#
+# VT400/420 -- This terminal is a superset of the vt320. It adds the multiple
+# text pages and long text pages with selectable length of the vt340, along
+# with left and right margins, rectangular area text copy, fill, and erase
+# operations, selected region character attribute change operations,
+# page memory and rectangle checksums, insert/delete column, reception
+# macros, and other features too numerous to remember right now. TERMCAP
+# can only take advantage of a few of these added features.
+#
+# Note that this entry is are set up in what was the standard way for GNU
+# Emacs v18 terminal modes to deal with the cursor keys in that the arrow
+# keys were switched into application mode at the same time the numeric pad
+# is switched into application mode. This changes the definitions of the
+# arrow keys. Emacs v19 is smarter and mines its keys directly out of
+# your termcap entry,
+#
+# From: Daniel Glasser <dag@persoft.persoft.com>, 13 Oct 1993
+# (vt400: string capability ":sb=\E[M:" corrected to ":sr=\E[M:";
+# also, added <rmam>/<smam> based on the init string -- esr)
+vt400|vt400-24|dec-vt400|dec vt400 24x80 column autowrap,
+ am, eslok, hs, mir, msgr, xenl, xon,
+ cols#80, it#8, lines#24, vt#3,
+ acsc=``aaffggjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~,
+ blink=\E[5m, bold=\E[1m, civis=\E[?25l,
+ clear=\E[H\E[J$<10/>, cnorm=\E[?25h, cr=^M,
+ csr=\E[%i%p1%d;%p2%dr, cub=\E[%p1%dD, cub1=^H,
+ cud=\E[%p1%dB, cud1=^J, cuf=\E[%p1%dC, cuf1=\E[C,
+ cup=\E[%i%p1%d;%p2%dH, cuu=\E[%p1%dA, cuu1=\E[A,
+ cvvis=\E[?25h, dch=\E[%p1%dP, dch1=\E[P, dl=\E[%p1%dM,
+ dl1=\E[M, dsl=\E[2$~\r\E[1$}\E[K\E[$}, ed=\E[J$<10/>,
+ el=\E[K$<4/>, flash=\E[?5h\E[?5l$<200/>, fsl=\E[$},
+ home=\E[H, ht=^I, hts=\EH, ich=\E[%p1%d@, ich1=\E[@,
+ il=\E[%p1%dL, il1=\E[L, ind=\ED,
+ is2=\E<\E F\E>\E[?1h\E[?3l\E[?4l\E[?5l\E[?7h\E[?8h\E[1;24r\E[24;1H,
+ kbs=^H, kcub1=\EOD, kcud1=\EOB, kcuf1=\EOC, kcuu1=\EOA,
+ kf1=\EOP, kf2=\EOQ, kf3=\EOR, kf4=\EOS, kf6=\E[17~,
+ kf7=\E[18~, kf8=\E[19~, kf9=\E[20~, lf1=pf1, lf2=pf2,
+ lf3=pf3, lf4=pf4, nel=^M\ED, rc=\E8, rev=\E[7m,
+ rf=/usr/share/tabset/vt300, ri=\EM, rmacs=^O,
+ rmam=\E[?7l, rmir=\E[4l, rmkx=\E[?1l\E>, rmso=\E[27m,
+ rmul=\E[24m, rs1=\E<\E[?3l\E[!p\E[?7h, sc=\E7, sgr0=\E[m,
+ smacs=^N, smam=\E[?7h, smir=\E[4h, smkx=\E[?1h\E=,
+ smso=\E[7m, smul=\E[4m, tbc=\E[3g,
+ tsl=\E[2$~\E[1$}\E[1;%dH,
+
+# (vt420: I removed <kf0>, it collided with <kf10>. I also restored
+# a missing <sc> -- esr)
+vt420|DEC VT420,
+ am, mir, xenl, xon,
+ cols#80, lines#24, vt#3,
+ acsc=``aaffggjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~,
+ bel=^G, blink=\E[5m$<2>, bold=\E[1m$<2>,
+ clear=\E[H\E[2J$<50>, cr=^M, csr=\E[%i%p1%d;%p2%dr,
+ cub1=^H, cud1=\E[B, cuf1=\E[C,
+ cup=\E[%i%p1%d;%p2%dH$<10>, cuu1=\E[A, dch1=\E[P,
+ dl1=\E[M, ed=\E[J$<50>, el=\E[K$<3>, home=\E[H, ht=^I,
+ if=/usr/share/tabset/vt300, il1=\E[L, ind=\ED,
+ is2=\E[1;24r\E[24;1H, is3=\E[?67h\E[64;1"p, kbs=^H,
+ kcub1=\E[D, kcud1=\E[B, kcuf1=\E[C, kcuu1=\E[A,
+ kdch1=\E[3~, kf1=\EOP, kf10=\E[29~, kf2=\EOQ, kf3=\EOR,
+ kf4=\EOS, kf5=\E[17~, kf6=\E[18~, kf7=\E[19~, kf8=\E[20~,
+ kf9=\E[21~, kfnd=\E[1~, kich1=\E[2~, knp=\E[6~, kpp=\E[5~,
+ kslt=\E[4~, rc=\E8, rev=\E[7m$<2>,
+ rf=/usr/share/tabset/vt300, ri=\EM, rmacs=\E(B$<4>,
+ rmam=\E[?7l, rmir=\E[4l, rmkx=\E>,
+ rmsc=\E[?0;0r\E>\E[?3l\E[?4l\E[?5l\E[?7h\E[?8h,
+ rmso=\E[m, rmul=\E[m, rs3=\E[?67h\E[64;1"p, sc=\E7,
+ sgr=\E[0%?%p6%t;1%;%?%p2%t;4%;%?%p4%t;5%;%?%p1%p3%|%t;7%;m%?%p9%t\E(0%e\E(B%;,
+ sgr0=\E[m$<2>, smacs=\E(0$<2>, smam=\E[?7h, smir=\E[4h,
+ smkx=\E=, smso=\E[7m, smul=\E[4m,
+
+#
+# DEC VT220 and up support DECUDK (user-defined keys). DECUDK (i.e., pfx)
+# takes two parameters, the key and the string. Translating the key is
+# straightforward (keys 1-5 are not defined on real terminals, though some
+# emulators define these):
+#
+# if (key < 16) then value = key;
+# else if (key < 21) then value = key + 1;
+# else if (key < 25) then value = key + 2;
+# else if (key < 27) then value = key + 3;
+# else if (key < 30) then value = key + 4;
+# else value = key + 5;
+#
+# The string must be the hexadecimal equivalent, e.g., "5052494E" for "PRINT".
+# There's no provision in terminfo for emitting a string in this format, so the
+# application has to know it.
+#
+vt420pc|DEC VT420 w/PC keyboard,
+ kdch1=\177, kend=\E[4~, kf1=\E[11~, kf10=\E[21~,
+ kf11=\E[23~, kf12=\E[24~, kf13=\E[11;2~, kf14=\E[12;2~,
+ kf15=\E[13;2~, kf16=\E[14;2~, kf17=\E[15;2~,
+ kf18=\E[17;2~, kf19=\E[18;2~, kf2=\E[12~, kf20=\E[19;2~,
+ kf21=\E[20;2~, kf22=\E[21;2~, kf23=\E[23;2~,
+ kf24=\E[24;2~, kf25=\E[23~, kf26=\E[24~, kf27=\E[25~,
+ kf28=\E[26~, kf29=\E[28~, kf3=\E[13~, kf30=\E[29~,
+ kf31=\E[31~, kf32=\E[32~, kf33=\E[33~, kf34=\E[34~,
+ kf35=\E[35~, kf36=\E[36~, kf37=\E[23;2~, kf38=\E[24;2~,
+ kf39=\E[25;2~, kf4=\E[14~, kf40=\E[26;2~, kf41=\E[28;2~,
+ kf42=\E[29;2~, kf43=\E[31;2~, kf44=\E[32;2~,
+ kf45=\E[33;2~, kf46=\E[34;2~, kf47=\E[35;2~,
+ kf48=\E[36;2~, kf5=\E[15~, kf6=\E[17~, kf7=\E[18~,
+ kf8=\E[19~, kf9=\E[20~, khome=\E[H,
+ pctrm=USR_TERM\:vt420pcdos\:,
+ pfx=\EP1;1|%?%{16}%p1%>%t%{0}%e%{21}%p1%>%t%{1}%e%{25}%p1%>%t%{2}%e%{27}%p1%>%t%{3}%e%{30}%p1%>%t%{4}%e%{5}%;%p1%+%d/%p2%s\E\\, use=vt420,
+
+vt420pcdos|DEC VT420 w/PC for DOS Merge,
+ lines#25,
+ dispc=%?%p2%{19}%=%t\E\023\021%e%p2%{32}%<%t\E%p2%c%e%p2%{127}%=%t\E\177%e%p2%c%;,
+ pctrm@,
+ rmsc=\E[?0;0r\E>\E[?3l\E[?4l\E[?5l\E[?7h\E[?8h, sgr@,
+ sgr0=\E[m, smsc=\E[?1;2r\E[34h,
+ use=vt420pc,
+
+vt420f|DEC VT420 with VT kbd; VT400 mode; F1-F5 used as Fkeys,
+ kdch1=\177, kf1=\E[11~, kf10=\E[21~, kf11=\E[23~,
+ kf12=\E[24~, kf13=\E[25~, kf14=\E[26~, kf15=\E[28~,
+ kf16=\E[29~, kf17=\E[31~, kf18=\E[32~, kf19=\E[33~,
+ kf2=\E[12~, kf20=\E[34~, kf3=\E[13~, kf4=\E[14~,
+ kf5=\E[15~, kf6=\E[17~, kf7=\E[18~, kf8=\E[19~, kf9=\E[20~,
+ khome=\E[H, lf1=\EOP, lf2=\EOQ, lf3=\EOR, lf4=\EOS,
+ use=vt420,
+
+vt510|DEC VT510,
+ use=vt420,
+vt510pc|DEC VT510 w/PC keyboard,
+ use=vt420pc,
+vt510pcdos|DEC VT510 w/PC for DOS Merge,
+ use=vt420pcdos,
+
+# VT520/VT525
+#
+# The VT520 is a monochrome text terminal capable of managing up to
+# four independent sessions in the terminal. It has multiple ANSI
+# emulations (VT520, VT420, VT320, VT220, VT100, VT PCTerm, SCO Console)
+# and ASCII emulations (WY160/60, PCTerm, 50/50+, 150/120, TVI 950,
+# 925 910+, ADDS A2). This terminfo data is for the ANSI emulations only.
+#
+# Terminal Set-Up is entered by pressing [F3], [Caps Lock]/[F3] or
+# [Alt]/[Print Screen] depending upon which keyboard and which
+# terminal mode is being used. If Set-Up has been disabled or
+# assigned to an unknown key, Set-Up may be entered by pressing
+# [F3] as the first key after power up, regardless of keyboard type.
+# (vt520: I added <rmam>/<smam> based on the init string, also <sc> -- esr)
+vt520|DEC VT520,
+ am, mir, xenl, xon,
+ cols#80, lines#24, vt#3,
+ acsc=``aaffggjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~,
+ bel=^G, blink=\E[5m$<2>, bold=\E[1m$<2>,
+ clear=\E[H\E[2J$<50>, cr=^M, csr=\E[%i%p1%d;%p2%dr,
+ cub1=^H, cud1=\E[B, cuf1=\E[C,
+ cup=\E[%i%p1%d;%p2%dH$<10>, cuu1=\E[A, dch1=\E[P,
+ dl1=\E[M, ed=\E[J$<50>, el=\E[K$<3>, home=\E[H, ht=^I,
+ if=/usr/share/tabset/vt300, il1=\E[L, ind=\ED,
+ is2=\E[1;24r\E[24;1H, is3=\E[?67h\E[64;1"p, kbs=^H,
+ kcub1=\E[D, kcud1=\E[B, kcuf1=\E[C, kcuu1=\E[A,
+ kdch1=\E[3~, kf0=\E[29~, kf1=\EOP, kf10=\E[29~, kf2=\EOQ,
+ kf3=\EOR, kf4=\EOS, kf5=\E[17~, kf6=\E[18~, kf7=\E[19~,
+ kf8=\E[20~, kf9=\E[21~, kfnd=\E[1~, kich1=\E[2~, knp=\E[6~,
+ kpp=\E[5~, kslt=\E[4~,
+ pfx=\EP1;1|%?%{16}%p1%>%t%{0}%e%{21}%p1%>%t%{1}%e%{25}%p1%>%t%{2}%e%{27}%p1%>%t%{3}%e%{30}%p1%>%t%{4}%e%{5}%;%p1%+%d/%p2%s\E\\,
+ rc=\E8, rev=\E[7m$<2>, rf=/usr/share/tabset/vt300,
+ ri=\EM, rmacs=\E(B$<4>, rmam=\E[?7l, rmir=\E[4l,
+ rmsc=\E[?0;0r\E>\E[?3l\E[?4l\E[?5l\E[?7h\E[?8h,
+ rmso=\E[m, rmul=\E[m, rs3=\E[?67h\E[64;1"p, sc=\E7,
+ sgr=\E[0%?%p6%t;1%;%?%p2%t;4%;%?%p4%t;5%;%?%p1%p3%|%t;7%;m%?%p9%t\E(0%e\E(B%;,
+ sgr0=\E[m$<2>, smacs=\E(0$<2>, smam=\E[?7h, smir=\E[4h,
+ smso=\E[7m, smul=\E[4m,
+
+# (vt525: I added <rmam>/<smam> based on the init string;
+# removed <rmso>=\E[m, <rmul>=\E[m, added <sc> -- esr)
+vt525|DEC VT525,
+ am, mir, xenl, xon,
+ cols#80, lines#24, vt#3,
+ acsc=``aaffggjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~,
+ bel=^G, blink=\E[5m$<2>, bold=\E[1m$<2>,
+ clear=\E[H\E[2J$<50>, cr=^M, csr=\E[%i%p1%d;%p2%dr,
+ cub1=^H, cud1=\E[B, cuf1=\E[C,
+ cup=\E[%i%p1%d;%p2%dH$<10>, cuu1=\E[A, dch1=\E[P,
+ dl1=\E[M, ed=\E[J$<50>, el=\E[K$<3>, home=\E[H, ht=^I,
+ if=/usr/share/tabset/vt300, il1=\E[L, ind=\ED,
+ is2=\E[1;24r\E[24;1H, is3=\E[?67h\E[64;1"p, kbs=^H,
+ kcub1=\E[D, kcud1=\E[B, kcuf1=\E[C, kcuu1=\E[A,
+ kdch1=\E[3~, kf0=\E[29~, kf1=\EOP, kf10=\E[29~, kf2=\EOQ,
+ kf3=\EOR, kf4=\EOS, kf5=\E[17~, kf6=\E[18~, kf7=\E[19~,
+ kf8=\E[20~, kf9=\E[21~, kfnd=\E[1~, kich1=\E[2~, knp=\E[6~,
+ kpp=\E[5~, kslt=\E[4~,
+ pfx=\EP1;1|%?%{16}%p1%>%t%{0}%e%{21}%p1%>%t%{1}%e%{25}%p1%>%t%{2}%e%{27}%p1%>%t%{3}%e%{30}%p1%>%t%{4}%e%{5}%;%p1%+%d/%p2%s\E\\,
+ rc=\E8, rev=\E[7m$<2>, rf=/usr/share/tabset/vt300,
+ ri=\EM, rmacs=\E(B$<4>, rmam=\E[?7l, rmir=\E[4l,
+ rmsc=\E[?0;0r\E>\E[?3l\E[?4l\E[?5l\E[?7h\E[?8h,
+ rmso=\E[m, rmul=\E[m, rs3=\E[?67h\E[64;1"p, sc=\E7,
+ sgr=\E[0%?%p6%t;1%;%?%p2%t;4%;%?%p4%t;5%;%?%p1%p3%|%t;7%;m%?%p9%t\E(0%e\E(B%;,
+ sgr0=\E[m$<2>, smacs=\E(0$<2>, smam=\E[?7h, smir=\E[4h,
+ smso=\E[7m, smul=\E[4m,
diff --git a/tests/examplefiles/test.erl b/tests/examplefiles/test.erl
index 5b983e75..d4ab4825 100644
--- a/tests/examplefiles/test.erl
+++ b/tests/examplefiles/test.erl
@@ -152,6 +152,18 @@ a_binary() ->
a_list_comprehension() ->
[X*2 || X <- [1,2,3]].
+a_map() ->
+ M0 = #{ a => 1, b => 2 },
+ M1 = M0#{ b := 200 }.
+
+escape_sequences() ->
+ [ "\b\d\e\f\n\r\s\t\v\'\"\\"
+ , "\1\12\123" % octal
+ , "\x01" % short hex
+ , "\x{fff}" % long hex
+ , "\^a\^A" % control characters
+ ].
+
map(Fun, [H|T]) ->
[Fun(H) | map(Fun, T)];
diff --git a/tests/examplefiles/test.escript b/tests/examplefiles/test.escript
new file mode 100644
index 00000000..3fafb803
--- /dev/null
+++ b/tests/examplefiles/test.escript
@@ -0,0 +1,4 @@
+#!/usr/bin/env escript
+
+main(_Args) ->
+ ok.
diff --git a/tests/examplefiles/test.hsail b/tests/examplefiles/test.hsail
index 750fe04c..f9c25091 100644
--- a/tests/examplefiles/test.hsail
+++ b/tests/examplefiles/test.hsail
@@ -59,4 +59,4 @@ prog kernel &mmul2d(
st_global_f64 $d1, [$d0];
ret;
};
-
+
diff --git a/tests/examplefiles/test.php b/tests/examplefiles/test.php
index 2ce4023e..e8efdc6a 100644
--- a/tests/examplefiles/test.php
+++ b/tests/examplefiles/test.php
@@ -505,11 +505,40 @@ function &byref() {
return $x;
}
+// Test highlighting of magic methods and variables
+class MagicClass {
+ public $magic_str;
+ public $ordinary_str;
+
+ public function __construct($some_var) {
+ $this->magic_str = __FILE__;
+ $this->ordinary_str = $some_var;
+ }
+
+ public function __toString() {
+ return $this->magic_str;
+ }
+
+ public function nonMagic() {
+ return $this->ordinary_str;
+ }
+}
+
+$magic = new MagicClass(__DIR__);
+__toString();
+$magic->nonMagic();
+$magic->__toString();
+
echo <<<EOF
Test the heredocs...
EOF;
+echo <<<"some_delimiter"
+more heredoc testing
+continues on this line
+some_delimiter;
+
?>
diff --git a/tests/examplefiles/test.sco b/tests/examplefiles/test.sco
new file mode 100644
index 00000000..a0b39251
--- /dev/null
+++ b/tests/examplefiles/test.sco
@@ -0,0 +1,10 @@
+f 1 0 16384 10 1
+i "N_a_M_e_" 0 2
+i "TestOscillator" 2 2
+i "TestBitwiseNOT" 0 1
+i "TestBitwiseXOR" 0 1
+i "TestGoto" 0 1
+i "TestMacroPeriodSuffix" 4 1
+i "TestAt" 0 1
+i "MacroAbuse" 0 1
+e
diff --git a/tests/examplefiles/test.sil b/tests/examplefiles/test.sil
new file mode 100644
index 00000000..3bcee835
--- /dev/null
+++ b/tests/examplefiles/test.sil
@@ -0,0 +1,206 @@
+domain Option__Node {
+ unique function Option__Node__Some(): Option__Node
+ unique function Option__Node__None(): Option__Node
+
+ function variantOfOptionNode(self: Ref): Option__Node
+
+ function isOptionNode(self: Ref): Bool
+
+ axiom ax_variantOfOptionNodeChoices {
+ forall x: Ref :: { variantOfOptionNode(x) }
+ (variantOfOptionNode(x) == Option__Node__Some() || variantOfOptionNode(x) == Option__Node__None())
+ }
+
+ axiom ax_isCounterState {
+ forall x: Ref :: { variantOfOptionNode(x) }
+ isOptionNode(x) == (variantOfOptionNode(x) == Option__Node__Some() ||
+ variantOfOptionNode(x) == Option__Node__None())
+ }
+}
+
+predicate validOption(this: Ref) {
+ isOptionNode(this) &&
+ variantOfOptionNode(this) == Option__Node__Some() ==> (
+ acc(this.Option__Node__Some__1, write) &&
+ acc(validNode(this.Option__Node__Some__1))
+ )
+}
+
+field Option__Node__Some__1: Ref
+
+field Node__v: Int
+field Node__next: Ref
+
+predicate validNode(this: Ref) {
+ acc(this.Node__v) &&
+ acc(this.Node__next) &&
+ acc(validOption(this.Node__next))
+}
+
+
+function length(this: Ref): Int
+ requires acc(validNode(this), write)
+ ensures result >= 1
+{
+ (unfolding acc(validNode(this), write) in
+ unfolding acc(validOption(this.Node__next)) in
+ (variantOfOptionNode(this.Node__next) == Option__Node__None()) ?
+ 1 : 1 + length(this.Node__next.Option__Node__Some__1)
+ )
+}
+
+function itemAt(this: Ref, i: Int): Int
+ requires acc(validNode(this), write)
+ requires 0 <= i && i < length(this)
+{
+ unfolding acc(validNode(this), write) in unfolding acc(validOption(this.Node__next)) in (
+ (i == 0) ?
+ this.Node__v:
+ (variantOfOptionNode(this.Node__next) == Option__Node__Some()) ?
+ itemAt(this.Node__next.Option__Node__Some__1, i-1) : this.Node__v
+ )
+}
+
+function sum(this$1: Ref): Int
+ requires acc(validNode(this$1), write)
+{
+ (unfolding acc(validNode(this$1), write) in unfolding acc(validOption(this$1.Node__next)) in
+ (variantOfOptionNode(this$1.Node__next) == Option__Node__None()) ? this$1.Node__v : this$1.Node__v + sum(this$1.Node__next.Option__Node__Some__1))
+}
+
+method append(this: Ref, val: Int)
+ requires acc(validNode(this), write)
+ ensures acc(validNode(this), write) /* POST1 */
+ ensures length(this) == (old(length(this)) + 1) /* POST2 */
+ ensures (forall i: Int :: (0 <= i && i < old(length(this))) ==> (itemAt(this, i) == old(itemAt(this, i)))) /* POST3 */
+ ensures itemAt(this, length(this) - 1) == val /* POST4 */
+ ensures true ==> true
+{
+ var tmp_node: Ref
+ var tmp_option: Ref
+
+ unfold acc(validNode(this), write)
+ unfold acc(validOption(this.Node__next), write)
+
+ if (variantOfOptionNode(this.Node__next) == Option__Node__None()) {
+ tmp_node := new(Node__next, Node__v)
+ tmp_node.Node__next := null
+ tmp_node.Node__v := val
+
+ assume variantOfOptionNode(tmp_node.Node__next) == Option__Node__None()
+ fold acc(validOption(tmp_node.Node__next))
+ fold acc(validNode(tmp_node), write)
+
+ tmp_option := new(Option__Node__Some__1)
+ tmp_option.Option__Node__Some__1 := tmp_node
+ assume variantOfOptionNode(tmp_option) == Option__Node__Some()
+ fold acc(validOption(tmp_option))
+
+ this.Node__next := tmp_option
+
+
+ unfold validOption(tmp_option)
+ assert length(tmp_node) == 1 /* TODO: Required by Silicon, POST2 fails otherwise */
+ assert itemAt(tmp_node, 0) == val /* TODO: Required by Silicon, POST4 fails otherwise */
+ fold validOption(tmp_option)
+ } else {
+ append(this.Node__next.Option__Node__Some__1, val)
+ fold acc(validOption(this.Node__next), write)
+ }
+
+ fold acc(validNode(this), write)
+}
+
+method prepend(tail: Ref, val: Int) returns (res: Ref)
+ requires acc(validNode(tail))
+ ensures acc(validNode(res))
+ //ensures acc(validNode(tail))
+ ensures length(res) == old(length(tail)) + 1
+
+ ensures (forall i: Int :: (1 <= i && i < length(res)) ==> (itemAt(res, i) == old(itemAt(tail, i-1)))) /* POST3 */
+ ensures itemAt(res, 0) == val
+{
+ var tmp_option: Ref
+
+ res := new(Node__v, Node__next)
+ res.Node__v := val
+
+ tmp_option := new(Option__Node__Some__1)
+ tmp_option.Option__Node__Some__1 := tail
+ assume variantOfOptionNode(tmp_option) == Option__Node__Some()
+
+ res.Node__next := tmp_option
+
+ assert acc(validNode(tail))
+ fold acc(validOption(res.Node__next))
+ fold acc(validNode(res))
+}
+
+method length_iter(list: Ref) returns (len: Int)
+ requires acc(validNode(list), write)
+ ensures old(length(list)) == len
+ // TODO we have to preserve this property
+ // ensures acc(validNode(list))
+{
+ var curr: Ref := list
+ var tmp: Ref := list
+
+ len := 1
+
+ unfold acc(validNode(curr))
+ unfold acc(validOption(curr.Node__next))
+ while(variantOfOptionNode(curr.Node__next) == Option__Node__Some())
+ invariant acc(curr.Node__v)
+ invariant acc(curr.Node__next)
+ invariant (variantOfOptionNode(curr.Node__next) == Option__Node__Some() ==> (
+ acc(curr.Node__next.Option__Node__Some__1, write) &&
+ acc(validNode(curr.Node__next.Option__Node__Some__1))
+ ))
+ invariant (variantOfOptionNode(curr.Node__next) == Option__Node__Some() ==> len + length(curr.Node__next.Option__Node__Some__1) == old(length(list)))
+ invariant (variantOfOptionNode(curr.Node__next) == Option__Node__None() ==> len == old(length(list)))
+ {
+ assert acc(validNode(curr.Node__next.Option__Node__Some__1))
+ len := len + 1
+ tmp := curr
+ curr := curr.Node__next.Option__Node__Some__1
+ unfold acc(validNode(curr))
+ unfold acc(validOption(curr.Node__next))
+ }
+}
+
+method t1()
+{
+ var l: Ref
+
+ l := new(Node__v, Node__next)
+ l.Node__next := null
+ l.Node__v := 1
+ assume variantOfOptionNode(l.Node__next) == Option__Node__None()
+
+ fold validOption(l.Node__next)
+ fold validNode(l)
+
+ assert length(l) == 1
+ assert itemAt(l, 0) == 1
+
+ append(l, 7)
+ assert itemAt(l, 1) == 7
+ assert itemAt(l, 0) == 1
+ assert length(l) == 2
+
+ l := prepend(l, 10)
+ assert itemAt(l, 2) == 7
+ assert itemAt(l, 1) == 1
+ assert itemAt(l, 0) == 10
+ assert length(l) == 3
+
+ //assert sum(l) == 18
+}
+
+method t2(l: Ref) returns (res: Ref)
+ requires acc(validNode(l), write)
+ ensures acc(validNode(res), write)
+ ensures length(res) > old(length(l))
+{
+ res := prepend(l, 10)
+}
diff --git a/tests/examplefiles/example.ts b/tests/examplefiles/typescript_example
index 760e2543..760e2543 100644
--- a/tests/examplefiles/example.ts
+++ b/tests/examplefiles/typescript_example
diff --git a/tests/examplefiles/typoscript_example b/tests/examplefiles/typoscript_example
new file mode 100644
index 00000000..e2fccf5d
--- /dev/null
+++ b/tests/examplefiles/typoscript_example
@@ -0,0 +1,1930 @@
+# ***************************************************************************
+# Notice: "styles." (and "temp.") objects are UNSET after template parsing!
+# Use "lib." for persisting storage of objects.
+# ***************************************************************************
+
+<INCLUDE_TYPOSCRIPT: source="FILE: EXT:www_tue_nl/Configuration/TypoScript/Setup/Root.ts">
+
+page.80 = RECORDS
+page.80 {
+ source = 1
+ tables = tt_address
+ conf.tt_address = COA
+ conf.tt_address {
+ 20 = TEXT
+ 20.field = email
+ 20.typolink.parameter.field = email
+ }
+}
+
+ /*
+page.200 = PHP_SCRIPT_EXT
+page.200 {
+ 1 = TMENU
+ 1.wrap = <div style="width:200px; border: 1px solid;">|</div>
+ 1.expAll = 1
+ 1.submenuObjSuffixes = a |*| |*| b
+ 1.NO.allWrap = <b>|</b><br/>
+
+ 2 = TMENU
+ 2.NO.allWrap = <div style="background:red;">|</div>
+
+ 2a = TMENU
+ 2a.NO.allWrap = <div style="background:yellow;">|</div>
+*
+ 2b = TMENU
+ 2b.NO.allWrap = <div style="background:green;">|</div>
+}
+*/
+
+ # Add the CSS and JS files
+page {
+ includeCSS { # comment at the end of a line
+ file99 = fileadmin/your-fancybox.css
+ }
+ includeJSFooter {
+ fancybox = fileadmin/your-fancybox.js
+ }
+}
+
+ # Change the default rendering of images to match lightbox requirements
+tt_content.image.20.1.imageLinkWrap {
+ JSwindow = 0
+ test = MyExtension\Path\To\Class
+
+ directImageLink = 1
+ linkParams.ATagParams {
+ dataWrap = class= "lightbox" rel="fancybox{field:uid}"
+ }
+}
+
+tt_content.image.20.1.imageLinkWrap >
+tt_content.image.20.1.imageLinkWrap = 1
+tt_content.image.20.1.imageLinkWrap {
+ enable = 1
+ typolink {
+ # directly link to the recent image
+ parameter.cObject = IMG_RESOURCE
+ parameter.cObject.file.import.data = TSFE:lastImageInfo|origFile
+ parameter.cObject.file.maxW = {$styles.content.imgtext.maxW}
+ parameter.override.listNum.stdWrap.data = register : IMAGE_NUM_CURRENT
+ title.field = imagecaption // title
+ title.split.token.char = 10
+ title.if.isTrue.field = imagecaption // header
+ title.split.token.char = 10
+ title.split.returnKey.data = register : IMAGE_NUM_CURRENT
+ parameter.cObject = IMG_RESOURCE
+ parameter.cObject.file.import.data = TSFE:lastImageInfo|origFile
+ ATagParams = target="_blank"
+ }
+}
+
+10 = IMAGE
+10 {
+ # point to the image
+ file = fileadmin/demo/lorem_ipsum/images/a4.jpg
+ # make it rather small
+ file.width = 80
+ # add a link to tx_cms_showpic.php that shows the original image
+ imageLinkWrap = 1
+ imageLinkWrap {
+ enable = 1
+ # JSwindow = 1
+ }
+}
+
+# Clear out any constants in this reserved room!
+styles.content >
+
+# get content
+styles.content.get = CONTENT
+styles.content.get {
+ table = tt_content
+ select.orderBy = sorting
+ select.where = colPos=0
+ select.languageField = sys_language_uid
+}
+
+# get content, left
+styles.content.getLeft < styles.content.get
+styles.content.getLeft.select.where = colPos=1
+
+# get content, right
+styles.content.getRight < styles.content.get
+styles.content.getRight.select.where = colPos=2
+
+# get content, margin
+styles.content.getBorder < styles.content.get
+styles.content.getBorder.select.where = colPos=3
+
+# get news
+styles.content.getNews < styles.content.get
+styles.content.getNews.select.pidInList = {$styles.content.getNews.newsPid}
+
+# Edit page object:
+styles.content.editPanelPage = COA
+styles.content.editPanelPage {
+ 10 = EDITPANEL
+ 10 {
+ allow = toolbar,move,hide
+ label.data = LLL:EXT:css_styled_content/pi1/locallang.xml:eIcon.page
+ label.wrap = |&nbsp;<b>%s</b>
+ }
+}
+
+
+
+
+
+
+
+
+
+
+
+# *********************************************************************
+# "lib." objects are preserved from unsetting after template parsing
+# *********************************************************************
+
+# Creates persistent ParseFunc setup for non-HTML content. This is recommended to use (as a reference!)
+lib.parseFunc {
+ makelinks = 1
+ makelinks.http.keep = {$styles.content.links.keep}
+ makelinks.http.extTarget = {$styles.content.links.extTarget}
+ makelinks.mailto.keep = path
+ tags {
+ link = TEXT
+ link {
+ current = 1
+ typolink.parameter.data = parameters : allParams
+ typolink.extTarget = {$styles.content.links.extTarget}
+ typolink.target = {$styles.content.links.target}
+ parseFunc.constants =1
+ }
+ }
+ allowTags = {$styles.content.links.allowTags}
+ denyTags = *
+ sword = <span class="csc-sword">|</span>
+ constants = 1
+
+ nonTypoTagStdWrap.HTMLparser = 1
+ nonTypoTagStdWrap.HTMLparser {
+ keepNonMatchedTags = 1
+ htmlSpecialChars = 2
+ }
+}
+
+# good old parsefunc in "styles.content.parseFunc" is created for backwards compatibility. Don't use it, just ignore.
+styles.content.parseFunc < lib.parseFunc
+
+# Creates persistent ParseFunc setup for RTE content (which is mainly HTML) based on the "ts_css" transformation.
+lib.parseFunc_RTE < lib.parseFunc
+lib.parseFunc_RTE {
+ // makelinks >
+ # Processing <table> and <blockquote> blocks separately
+ externalBlocks = table, blockquote, dd, dl, ol, ul, div
+ externalBlocks {
+ # The blockquote content is passed into parseFunc again...
+ blockquote.stripNL=1
+ blockquote.callRecursive=1
+ blockquote.callRecursive.tagStdWrap.HTMLparser = 1
+ blockquote.callRecursive.tagStdWrap.HTMLparser.tags.blockquote.overrideAttribs = style="margin-bottom:0;margin-top:0;"
+
+ ol.stripNL=1
+ ol.stdWrap.parseFunc = < lib.parseFunc
+
+ ul.stripNL=1
+ ul.stdWrap.parseFunc = < lib.parseFunc
+
+ table.stripNL=1
+ table.stdWrap.HTMLparser = 1
+ table.stdWrap.HTMLparser.tags.table.fixAttrib.class {
+ default = contenttable
+ always = 1
+ list = contenttable
+ }
+ table.stdWrap.HTMLparser.keepNonMatchedTags = 1
+ table.HTMLtableCells=1
+ table.HTMLtableCells {
+ default.callRecursive=1
+ addChr10BetweenParagraphs=1
+ }
+ div.stripNL = 1
+ div.callRecursive = 1
+
+ # Definition list processing
+ dl < .div
+ dd < .div
+ }
+ nonTypoTagStdWrap.encapsLines {
+ encapsTagList = p,pre,h1,h2,h3,h4,h5,h6,hr,dt
+ remapTag.DIV = P
+ nonWrappedTag = P
+ innerStdWrap_all.ifBlank = &nbsp;
+ addAttributes.P.class = bodytext
+ addAttributes.P.class.setOnly=blank
+ }
+ nonTypoTagStdWrap.HTMLparser = 1
+ nonTypoTagStdWrap.HTMLparser {
+ keepNonMatchedTags = 1
+ htmlSpecialChars = 2
+ }
+}
+
+
+# Content header:
+lib.stdheader = COA
+lib.stdheader {
+
+ # Create align style-attribute for <Hx> tags
+ 2 = LOAD_REGISTER
+ 2.headerStyle.field = header_position
+ 2.headerStyle.required = 1
+ 2.headerStyle.noTrimWrap = | style="text-align:|;"|
+
+ # Create class="csc-firstHeader" attribute for <Hx> tags
+ 3 = LOAD_REGISTER
+ 3.headerClass = csc-firstHeader
+ 3.headerClass.if.value=1
+ 3.headerClass.if.equals.data = cObj:parentRecordNumber
+ 3.headerClass.noTrimWrap = | class="|"|
+
+ # Date format:
+ 5 = TEXT
+ 5.field = date
+ 5.if.isTrue.field = date
+ 5.strftime = %x
+ 5.wrap = <p class="csc-header-date">|</p>
+ 5.prefixComment = 2 | Header date:
+
+ # This CASE cObject renders the header content:
+ # currentValue is set to the header data, possibly wrapped in link-tags.
+ 10 = CASE
+ 10.setCurrent {
+ field = header
+ htmlSpecialChars = 1
+ typolink.parameter.field = header_link
+ }
+ 10.key.field = header_layout
+ 10.key.ifEmpty = {$content.defaultHeaderType}
+ 10.key.ifEmpty.override.data = register: defaultHeaderType
+
+ 10.1 = TEXT
+ 10.1.current = 1
+ 10.1.dataWrap = <h1{register:headerStyle}{register:headerClass}>|</h1>
+
+ 10.2 < .10.1
+ 10.2.dataWrap = <h2{register:headerStyle}{register:headerClass}>|</h2>
+
+ 10.3 < .10.1
+ 10.3.dataWrap = <h3{register:headerStyle}{register:headerClass}>|</h3>
+
+ 10.4 < .10.1
+ 10.4.dataWrap = <h4{register:headerStyle}{register:headerClass}>|</h4>
+
+ 10.5 < .10.1
+ 10.5.dataWrap = <h5{register:headerStyle}{register:headerClass}>|</h5>
+
+ # Pops the used registers off the stack:
+ 98 = RESTORE_REGISTER
+ 99 = RESTORE_REGISTER
+
+ # Post-processing:
+ stdWrap.fieldRequired = header
+ stdWrap.if {
+ equals.field = header_layout
+ value = 100
+ negate = 1
+ }
+
+ stdWrap.editIcons = tt_content : header, [header_layout | header_position], [header_link|date]
+ stdWrap.editIcons.beforeLastTag = 1
+ stdWrap.editIcons.iconTitle.data = LLL:EXT:css_styled_content/pi1/locallang.xml:eIcon.header
+
+ stdWrap.dataWrap = <div class="csc-header csc-header-n{cObj:parentRecordNumber}">|</div>
+ stdWrap.prefixComment = 2 | Header:
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#******************************************************
+# Including library for processing of some elements:
+#******************************************************
+includeLibs.tx_cssstyledcontent_pi1 = EXT:css_styled_content/pi1/class.tx_cssstyledcontent_pi1.php
+
+
+#**********************************
+# tt_content is started
+#**********************************
+tt_content >
+tt_content = CASE
+tt_content.key.field = CType
+tt_content.stdWrap {
+ innerWrap.cObject = CASE
+ innerWrap.cObject {
+ key.field = section_frame
+
+ default = COA
+ default {
+ 10 = TEXT
+ 10 {
+ value = <div id="c{field:uid}"
+ override.cObject = TEXT
+ override.cObject {
+ value = <div
+ if.value = div
+ if.equals.field = CType
+ }
+ insertData = 1
+ }
+
+ 15 = TEXT
+ 15 {
+ value = csc-default
+ noTrimWrap = | class="|" |
+ required = 1
+ }
+
+ 20 = COA
+ 20 {
+ 10 = COA
+ 10 {
+ 10 = TEXT
+ 10 {
+ value = {$content.spaceBefore}
+ wrap = |+
+ if.isTrue = {$content.spaceBefore}
+ }
+
+ 20 = TEXT
+ 20 {
+ field = spaceBefore
+ }
+
+ stdWrap {
+ prioriCalc = intval
+ wrap = margin-top:|px;
+ required = 1
+ ifEmpty.value =
+ }
+ }
+
+ 20 = COA
+ 20 {
+ 10 = TEXT
+ 10 {
+ value = {$content.spaceAfter}
+ wrap = |+
+ if.isTrue = {$content.spaceAfter}
+ }
+
+ 20 = TEXT
+ 20 {
+ field = spaceAfter
+ }
+
+ stdWrap {
+ prioriCalc = intval
+ wrap = margin-bottom:|px;
+ required = 1
+ ifEmpty.value =
+ }
+ }
+
+ stdWrap.noTrimWrap = | style="|" |
+ stdWrap.required = 1
+ }
+ 30 = TEXT
+ 30.value = >|</div>
+ }
+
+ 1 =< tt_content.stdWrap.innerWrap.cObject.default
+ 1.15.value = csc-frame csc-frame-invisible
+
+ 5 =< tt_content.stdWrap.innerWrap.cObject.default
+ 5.15.value = csc-frame csc-frame-rulerBefore
+
+ 6 =< tt_content.stdWrap.innerWrap.cObject.default
+ 6.15.value = csc-frame csc-frame-rulerAfter
+
+ 10 =< tt_content.stdWrap.innerWrap.cObject.default
+ 10.15.value = csc-frame csc-frame-indent
+
+ 11 =< tt_content.stdWrap.innerWrap.cObject.default
+ 11.15.value = csc-frame csc-frame-indent3366
+
+ 12 =< tt_content.stdWrap.innerWrap.cObject.default
+ 12.15.value = csc-frame csc-frame-indent6633
+
+ 20 =< tt_content.stdWrap.innerWrap.cObject.default
+ 20.15.value = csc-frame csc-frame-frame1
+
+ 21 =< tt_content.stdWrap.innerWrap.cObject.default
+ 21.15.value = csc-frame csc-frame-frame2
+
+ 66 = COA
+ 66 {
+ 10 = TEXT
+ 10 {
+ value = <a id="c{field:uid}"></a>
+ insertData = 1
+ }
+
+ 20 = COA
+ 20 {
+ 10 = TEXT
+ 10 {
+ value = {$content.spaceBefore}
+ wrap = |+
+ if.isTrue = {$content.spaceBefore}
+ }
+
+ 20 = TEXT
+ 20 {
+ field = spaceBefore
+ }
+
+ stdWrap {
+ prioriCalc = intval
+ wrap = margin-top:|px;
+ required = 1
+ ifEmpty.value =
+ wrap2 = <div style="|"></div>
+ }
+ }
+
+ 30 = TEXT
+ 30 {
+ value = |
+ }
+
+ 40 < .20
+ 40 {
+ 10 {
+ value = {$content.spaceAfter}
+ if.isTrue = {$content.spaceAfter}
+ }
+ 20.field = spaceAfter
+ stdWrap.wrap = margin-bottom:|px;
+ }
+ }
+
+ }
+
+ innerWrap2 = | <p class="csc-linkToTop"><a href="#">{LLL:EXT:css_styled_content/pi1/locallang.xml:label.toTop}</a></p>
+ innerWrap2.insertData = 1
+ innerWrap2.fieldRequired = linkToTop
+
+ prepend = TEXT
+ prepend.dataWrap = <a id="c{field:_LOCALIZED_UID}"></a>
+ prepend.if.isTrue.field = _LOCALIZED_UID
+
+ editPanel = 1
+ editPanel {
+ allow = move,new,edit,hide,delete
+ line = 5
+ label = %s
+ onlyCurrentPid = 1
+ previewBorder = 4
+ edit.displayRecord = 1
+ }
+
+ prefixComment = 1 | CONTENT ELEMENT, uid:{field:uid}/{field:CType}
+}
+
+
+
+# *****************
+# CType: header
+# *****************
+# See Object path "lib.stdheader"
+tt_content.header = COA
+tt_content.header {
+ 10 = < lib.stdheader
+
+ 20 = TEXT
+ 20 {
+ field = subheader
+ required = 1
+
+ dataWrap = <p class="csc-subheader csc-subheader-{field:layout}">|</p>
+ htmlSpecialChars = 1
+
+ editIcons = tt_content:subheader,layout
+ editIcons.beforeLastTag = 1
+ editIcons.iconTitle.data = LLL:EXT:css_styled_content/pi1/locallang.xml:eIcon.subheader
+
+ prefixComment = 2 | Subheader:
+ }
+}
+
+
+
+# *****************
+# CType: text
+# *****************
+tt_content.text = COA
+tt_content.text {
+ 10 = < lib.stdheader
+
+ 20 = TEXT
+ 20 {
+ field = bodytext
+ required = 1
+
+ parseFunc = < lib.parseFunc_RTE
+
+ editIcons = tt_content:bodytext, rte_enabled
+ editIcons.beforeLastTag = 1
+ editIcons.iconTitle.data = LLL:EXT:css_styled_content/pi1/locallang.xml:eIcon.bodytext
+
+ prefixComment = 2 | Text:
+ }
+}
+
+
+
+# *****************
+# CType: image
+# *****************
+# (also used for rendering 'textpic' type):
+tt_content.image = COA
+tt_content.image.10 = < lib.stdheader
+tt_content.image.20 = USER
+tt_content.image.20 {
+ userFunc = tx_cssstyledcontent_pi1->render_textpic
+
+ # Image source
+ imgList.field = image
+ imgPath = uploads/pics/
+
+ # Single image rendering
+ imgObjNum = 1
+ 1 {
+ file.import.current = 1
+ file.width.field = imagewidth
+ imageLinkWrap = 1
+ imageLinkWrap {
+ bodyTag = <body style="margin:0; background:#fff;">
+ wrap = <a href="javascript:close();"> | </a>
+ width = {$styles.content.imgtext.linkWrap.width}
+ height = {$styles.content.imgtext.linkWrap.height}
+ effects = {$styles.content.imgtext.linkWrap.effects}
+
+ JSwindow = 1
+ JSwindow.newWindow = {$styles.content.imgtext.linkWrap.newWindow}
+ JSwindow.if.isFalse = {$styles.content.imgtext.linkWrap.lightboxEnabled}
+
+ directImageLink = {$styles.content.imgtext.linkWrap.lightboxEnabled}
+
+ enable.field = image_zoom
+ enable.ifEmpty.typolink.parameter.field = image_link
+ enable.ifEmpty.typolink.parameter.listNum.splitChar = 10
+ enable.ifEmpty.typolink.parameter.listNum.stdWrap.data = register : IMAGE_NUM_CURRENT
+ enable.ifEmpty.typolink.returnLast = url
+
+ typolink.parameter.field = image_link
+ typolink.parameter.listNum.splitChar = 10
+ typolink.parameter.listNum.stdWrap.data = register : IMAGE_NUM_CURRENT
+ typolink.target = {$styles.content.links.target}
+ typolink.extTarget = {$styles.content.links.extTarget}
+
+ linkParams.ATagParams.dataWrap = class="{$styles.content.imgtext.linkWrap.lightboxCssClass}" rel="{$styles.content.imgtext.linkWrap.lightboxRelAttribute}"
+ }
+
+ altText = TEXT
+ altText {
+ field = altText
+ stripHtml = 1
+ split.token.char = 10
+ split.token.if.isTrue = {$styles.content.imgtext.imageTextSplit}
+ split.returnKey.data = register : IMAGE_NUM_CURRENT
+ }
+
+ titleText < .altText
+ titleText.field = titleText
+
+ longdescURL < .altText
+ longdescURL.field = longdescURL
+
+ emptyTitleHandling = {$styles.content.imgtext.emptyTitleHandling}
+ titleInLink = {$styles.content.imgtext.titleInLink}
+ titleInLinkAndImg = {$styles.content.imgtext.titleInLinkAndImg}
+ }
+
+ textPos.field = imageorient
+ maxW = {$styles.content.imgtext.maxW}
+ maxW.override.data = register:maxImageWidth
+ maxWInText = {$styles.content.imgtext.maxWInText}
+ maxWInText.override.data = register:maxImageWidthInText
+
+ equalH.field = imageheight
+
+ image_compression.field = image_compression
+ image_effects.field = image_effects
+
+ noRows.field = image_noRows
+
+ cols.field = imagecols
+ border.field = imageborder
+
+ caption {
+ 1 = TEXT
+ 1 {
+ field = imagecaption
+ required = 1
+ parseFunc =< lib.parseFunc
+ br = 1
+ split.token.char = 10
+ split.token.if.isPositive = {$styles.content.imgtext.imageTextSplit} + {$styles.content.imgtext.captionSplit}
+ split.returnKey.data = register : IMAGE_NUM_CURRENT
+ }
+ }
+ # captionSplit is deprecated, use imageTextSplit instead
+ captionSplit = {$styles.content.imgtext.captionSplit}
+ captionAlign.field = imagecaption_position
+ # caption/alttext/title/longdescURL splitting
+ imageTextSplit = {$styles.content.imgtext.imageTextSplit}
+
+ borderCol = {$styles.content.imgtext.borderColor}
+ borderThick = {$styles.content.imgtext.borderThick}
+ borderClass = {$styles.content.imgtext.borderClass}
+ colSpace = {$styles.content.imgtext.colSpace}
+ rowSpace = {$styles.content.imgtext.rowSpace}
+ textMargin = {$styles.content.imgtext.textMargin}
+
+ borderSpace = {$styles.content.imgtext.borderSpace}
+ separateRows = {$styles.content.imgtext.separateRows}
+ addClasses =
+ addClassesImage =
+ addClassesImage.ifEmpty = csc-textpic-firstcol csc-textpic-lastcol
+ addClassesImage.override = csc-textpic-firstcol |*| |*| csc-textpic-lastcol
+ addClassesImage.override.if {
+ isGreaterThan.field = imagecols
+ value = 1
+ }
+
+ #
+ imageStdWrap.dataWrap = <div class="csc-textpic-imagewrap" style="width:{register:totalwidth}px;"> | </div>
+ imageStdWrapNoWidth.wrap = <div class="csc-textpic-imagewrap"> | </div>
+
+ # if noRows is set, wrap around each column:
+ imageColumnStdWrap.dataWrap = <div class="csc-textpic-imagecolumn" style="width:{register:columnwidth}px;"> | </div>
+
+ layout = CASE
+ layout {
+ key.field = imageorient
+ # above-center
+ default = TEXT
+ default.value = <div class="csc-textpic csc-textpic-center csc-textpic-above###CLASSES###">###IMAGES######TEXT###</div><div class="csc-textpic-clear"><!-- --></div>
+ # above-right
+ 1 = TEXT
+ 1.value = <div class="csc-textpic csc-textpic-right csc-textpic-above###CLASSES###">###IMAGES######TEXT###</div><div class="csc-textpic-clear"><!-- --></div>
+ # above-left
+ 2 = TEXT
+ 2.value = <div class="csc-textpic csc-textpic-left csc-textpic-above###CLASSES###">###IMAGES######TEXT###</div><div class="csc-textpic-clear"><!-- --></div>
+ # below-center
+ 8 = TEXT
+ 8.value = <div class="csc-textpic csc-textpic-center csc-textpic-below###CLASSES###">###TEXT######IMAGES###</div><div class="csc-textpic-clear"><!-- --></div>
+ # below-right
+ 9 = TEXT
+ 9.value = <div class="csc-textpic csc-textpic-right csc-textpic-below###CLASSES###">###TEXT######IMAGES###</div><div class="csc-textpic-clear"><!-- --></div>
+ # below-left
+ 10 = TEXT
+ 10.value = <div class="csc-textpic csc-textpic-left csc-textpic-below###CLASSES###">###TEXT######IMAGES###</div><div class="csc-textpic-clear"><!-- --></div>
+ # intext-right
+ 17 = TEXT
+ 17.value = <div class="csc-textpic csc-textpic-intext-right###CLASSES###">###IMAGES######TEXT###</div>
+ 17.override = <div class="csc-textpic csc-textpic-intext-right###CLASSES###">###IMAGES######TEXT###</div><div class="csc-textpic-clear"><!-- --></div>
+ 17.override.if.isTrue = {$styles.content.imgtext.addIntextClearer}
+ # intext-left
+ 18 = TEXT
+ 18.value = <div class="csc-textpic csc-textpic-intext-left###CLASSES###">###IMAGES######TEXT###</div>
+ 18.override = <div class="csc-textpic csc-textpic-intext-left###CLASSES###">###IMAGES######TEXT###</div><div class="csc-textpic-clear"><!-- --></div>
+ 18.override.if.isTrue = {$styles.content.imgtext.addIntextClearer}
+ # intext-right-nowrap
+ 25 = TEXT
+ 25.value = <div class="csc-textpic csc-textpic-intext-right-nowrap###CLASSES###">###IMAGES###<div style="margin-right:{register:rowWidthPlusTextMargin}px;">###TEXT###</div></div><div class="csc-textpic-clear"><!-- --></div>
+ 25.insertData = 1
+ # intext-left-nowrap
+ 26 = TEXT
+ 26.value = <div class="csc-textpic csc-textpic-intext-left-nowrap###CLASSES###">###IMAGES###<div style="margin-left:{register:rowWidthPlusTextMargin}px;">###TEXT###</div></div><div class="csc-textpic-clear"><!-- --></div>
+ 26.insertData = 1
+ }
+
+ rendering {
+ dl {
+ # Choose another rendering for special edge cases
+ fallbackRendering = COA
+ fallbackRendering {
+ # Just one image without a caption => don't need the dl-overhead, use the "simple" rendering
+ 10 = TEXT
+ 10 {
+ if {
+ isFalse.field = imagecaption
+ value = 1
+ equals.data = register:imageCount
+ }
+ value = simple
+ }
+
+ # Multiple images and one global caption => "ul"
+ 20 = TEXT
+ 20 {
+ if {
+ value = 1
+ isGreaterThan.data = register:imageCount
+ isTrue.if.isTrue.data = register:renderGlobalCaption
+ isTrue.field = imagecaption
+ }
+ value = ul
+ }
+
+ # Multiple images and no caption at all => "ul"
+ 30 = TEXT
+ 30 {
+ if {
+ value = 1
+ isGreaterThan.data = register:imageCount
+ isFalse.field = imagecaption
+ }
+ value = ul
+ }
+ }
+ imageRowStdWrap.dataWrap = <div class="csc-textpic-imagerow" style="width:{register:rowwidth}px;"> | </div>
+ imageLastRowStdWrap.dataWrap = <div class="csc-textpic-imagerow csc-textpic-imagerow-last" style="width:{register:rowwidth}px;"> | </div>
+ noRowsStdWrap.wrap =
+ oneImageStdWrap.dataWrap = <dl class="csc-textpic-image###CLASSES###" style="width:{register:imagespace}px;"> | </dl>
+ imgTagStdWrap.wrap = <dt> | </dt>
+ editIconsStdWrap.wrap = <dd> | </dd>
+ caption {
+ required = 1
+ wrap = <dd class="csc-textpic-caption"> | </dd>
+ }
+ }
+ ul {
+ # Just one image without a caption => don't need the ul-overhead, use the "simple" rendering
+ fallbackRendering < tt_content.image.20.rendering.dl.fallbackRendering.10
+ imageRowStdWrap.dataWrap = <div class="csc-textpic-imagerow" style="width:{register:rowwidth}px;"><ul> | </ul></div>
+ imageLastRowStdWrap.dataWrap = <div class="csc-textpic-imagerow csc-textpic-imagerow-last" style="width:{register:rowwidth}px;"><ul> | </ul></div>
+ noRowsStdWrap.wrap = <ul> | </ul>
+ oneImageStdWrap.dataWrap = <li class="csc-textpic-image###CLASSES###" style="width:{register:imagespace}px;"> | </li>
+ imgTagStdWrap.wrap =
+ editIconsStdWrap.wrap = <div> | </div>
+ caption.wrap = <div class="csc-textpic-caption"> | </div>
+ }
+ div {
+ # Just one image without a caption => don't need the div-overhead, use the "simple" rendering
+ fallbackRendering < tt_content.image.20.rendering.dl.fallbackRendering.10
+ imageRowStdWrap.dataWrap = <div class="csc-textpic-imagerow" style="width:{register:rowwidth}px;"> | </div>
+ imageLastRowStdWrap.dataWrap = <div class="csc-textpic-imagerow csc-textpic-imagerow-last" style="width:{register:rowwidth}px;"> | </div>
+ noRowsStdWrap.wrap =
+ oneImageStdWrap.dataWrap = <div class="csc-textpic-image###CLASSES###" style="width:{register:imagespace}px;"> | </div>
+ imgTagStdWrap.wrap = <div> | </div>
+ editIconsStdWrap.wrap = <div> | </div>
+ caption.wrap = <div class="csc-textpic-caption"> | </div>
+ }
+ simple {
+ imageRowStdWrap.dataWrap = |
+ imageLastRowStdWrap.dataWrap = |
+ noRowsStdWrap.wrap =
+ oneImageStdWrap.dataWrap = |
+ imgTagStdWrap.wrap = |
+ editIconsStdWrap.wrap = |
+ caption.wrap = <div class="csc-textpic-caption"> | </div>
+ imageStdWrap.dataWrap = <div class="csc-textpic-imagewrap csc-textpic-single-image" style="width:{register:totalwidth}px;"> | </div>
+ imageStdWrapNoWidth.wrap = <div class="csc-textpic-imagewrap csc-textpic-single-image"> | </div>
+ }
+ }
+ renderMethod = dl
+
+ editIcons = tt_content : image [imageorient|imagewidth|imageheight], [imagecols|image_noRows|imageborder],[image_link|image_zoom],[image_compression|image_effects|image_frames],imagecaption[imagecaption_position]
+ editIcons.iconTitle.data = LLL:EXT:css_styled_content/pi1/locallang.xml:eIcon.images
+
+ caption.editIcons = tt_content : imagecaption[imagecaption_position]
+ caption.editIcons.beforeLastTag=1
+ caption.editIcons.iconTitle.data = LLL:EXT:css_styled_content/pi1/locallang.xml:eIcon.caption
+
+ stdWrap.prefixComment = 2 | Image block:
+}
+
+# *****************
+# CType: textpic
+# *****************
+tt_content.textpic = COA
+tt_content.textpic {
+ 10 = COA
+ 10.if.value = 25
+ 10.if.isLessThan.field = imageorient
+ 10.10 = < lib.stdheader
+
+ 20 = < tt_content.image.20
+ 20 {
+ text.10 = COA
+ text.10 {
+ if.value = 24
+ if.isGreaterThan.field = imageorient
+ 10 = < lib.stdheader
+ 10.stdWrap.dataWrap = <div class="csc-textpicHeader csc-textpicHeader-{field:imageorient}">|</div>
+ }
+ text.20 = < tt_content.text.20
+ text.wrap = <div class="csc-textpic-text"> | </div>
+ }
+}
+
+
+
+# *****************
+# CType: bullet
+# *****************
+tt_content.bullets = COA
+tt_content.bullets {
+ 10 = < lib.stdheader
+
+ 20 = TEXT
+ 20 {
+ field = bodytext
+ trim = 1
+ split{
+ token.char = 10
+ cObjNum = |*|1|| 2|*|
+ 1.current = 1
+ 1.parseFunc =< lib.parseFunc
+ 1.wrap = <li class="odd">|</li>
+
+ 2.current = 1
+ 2.parseFunc =< lib.parseFunc
+ 2.wrap = <li class="even">|</li>
+ }
+ dataWrap = <ul class="csc-bulletlist csc-bulletlist-{field:layout}">|</ul>
+ editIcons = tt_content: bodytext, [layout]
+ editIcons.beforeLastTag = 1
+ editIcons.iconTitle.data = LLL:EXT:css_styled_content/pi1/locallang.php:eIcon.bullets
+
+ prefixComment = 2 | Bullet list:
+ }
+}
+
+
+
+# *****************
+# CType: table
+# *****************
+# Rendered by a PHP function specifically written to handle CE tables. See css_styled_content/pi1/class.tx_cssstyledcontent_pi1.php
+tt_content.table = COA
+tt_content.table {
+ 10 = < lib.stdheader
+
+ 20 = USER
+ 20.userFunc = tx_cssstyledcontent_pi1->render_table
+ 20.field = bodytext
+
+ 20.color {
+ default =
+ 1 = #EDEBF1
+ 2 = #F5FFAA
+ }
+ 20.tableParams_0 {
+ border =
+ cellpadding =
+ cellspacing =
+ }
+ 20.tableParams_1 {
+ border =
+ cellpadding =
+ cellspacing =
+ }
+ 20.tableParams_2 {
+ border =
+ cellpadding =
+ cellspacing =
+ }
+ 20.tableParams_3 {
+ border =
+ cellpadding =
+ cellspacing =
+ }
+ 20.innerStdWrap.wrap = |
+ 20.innerStdWrap.parseFunc = < lib.parseFunc
+
+ 20.stdWrap {
+ editIcons = tt_content: cols, bodytext, [layout], [table_bgColor|table_border|table_cellspacing|table_cellpadding]
+ editIcons.beforeLastTag = 1
+ editIcons.iconTitle.data = LLL:EXT:css_styled_content/pi1/locallang.xml:eIcon.table
+
+ prefixComment = 2 | Table:
+ }
+}
+
+
+# *****************
+# CType: uploads
+# *****************
+# Rendered by a PHP function specifically written to handle CE filelists. See css_styled_content/pi1/class.tx_cssstyledcontent_pi1.php
+tt_content.uploads = COA
+tt_content.uploads {
+ 10 = < lib.stdheader
+
+ 20 = USER
+ 20.userFunc = tx_cssstyledcontent_pi1->render_uploads
+ 20.field = media
+ 20.filePath.field = select_key
+
+ 20 {
+ # Rendering for each file (e.g. rows of the table) as a cObject
+ itemRendering = COA
+ itemRendering {
+ wrap = <tr class="tr-odd tr-first">|</tr> |*| <tr class="tr-even">|</tr> || <tr class="tr-odd">|</tr> |*|
+
+ 10 = TEXT
+ 10.data = register:linkedIcon
+ 10.wrap = <td class="csc-uploads-icon">|</td>
+ 10.if.isPositive.field = layout
+
+ 20 = COA
+ 20.wrap = <td class="csc-uploads-fileName">|</td>
+ 20.1 = TEXT
+ 20.1 {
+ data = register:linkedLabel
+ wrap = <p>|</p>
+ }
+ 20.2 = TEXT
+ 20.2 {
+ data = register:description
+ wrap = <p class="csc-uploads-description">|</p>
+ required = 1
+ htmlSpecialChars = 1
+ }
+
+ 30 = TEXT
+ 30.if.isTrue.field = filelink_size
+ 30.data = register:fileSize
+ 30.wrap = <td class="csc-uploads-fileSize">|</td>
+ 30.bytes = 1
+ 30.bytes.labels = {$styles.content.uploads.filesizeBytesLabels}
+ }
+ useSpacesInLinkText = 0
+ stripFileExtensionFromLinkText = 0
+ }
+
+ 20.color {
+ default =
+ 1 = #EDEBF1
+ 2 = #F5FFAA
+ }
+ 20.tableParams_0 {
+ border =
+ cellpadding =
+ cellspacing =
+ }
+ 20.tableParams_1 {
+ border =
+ cellpadding =
+ cellspacing =
+ }
+ 20.tableParams_2 {
+ border =
+ cellpadding =
+ cellspacing =
+ }
+ 20.tableParams_3 {
+ border =
+ cellpadding =
+ cellspacing =
+ }
+
+ 20.linkProc {
+ target = _blank
+ jumpurl = {$styles.content.uploads.jumpurl}
+ jumpurl.secure = {$styles.content.uploads.jumpurl_secure}
+ jumpurl.secure.mimeTypes = {$styles.content.uploads.jumpurl_secure_mimeTypes}
+ removePrependedNumbers = 1
+
+ iconCObject = IMAGE
+ iconCObject.file.import.data = register : ICON_REL_PATH
+ iconCObject.file.width = 150
+ }
+
+ 20.filesize {
+ bytes = 1
+ bytes.labels = {$styles.content.uploads.filesizeBytesLabels}
+ }
+
+ 20.stdWrap {
+ editIcons = tt_content: media, layout [table_bgColor|table_border|table_cellspacing|table_cellpadding], filelink_size, imagecaption
+ editIcons.iconTitle.data = LLL:EXT:css_styled_content/pi1/locallang.xml:eIcon.filelist
+
+ prefixComment = 2 | File list:
+ }
+}
+
+
+# ******************
+# CType: multimedia
+# ******************
+tt_content.multimedia = COA
+tt_content.multimedia {
+ 10 = < lib.stdheader
+
+ 20 = MULTIMEDIA
+ 20.file.field = multimedia
+ 20.file.wrap = uploads/media/
+ 20.file.listNum = 0
+ 20.params.field = bodytext
+
+ 20.stdWrap {
+ editIcons = tt_content: multimedia, bodytext
+ editIcons.iconTitle.data = LLL:EXT:css_styled_content/pi1/locallang.xml:eIcon.multimedia
+
+ prefixComment = 2 | Multimedia element:
+ }
+}
+
+# *****************
+# CType: swfobject
+# *****************
+tt_content.swfobject = COA
+tt_content.swfobject {
+ 10 = < lib.stdheader
+
+ 20 = SWFOBJECT
+ 20 {
+ file =
+ width =
+ height =
+
+ flexParams.field = pi_flexform
+
+ alternativeContent.field = bodytext
+
+ layout = ###SWFOBJECT###
+
+ video {
+ player = {$styles.content.media.videoPlayer}
+
+ defaultWidth = {$styles.content.media.defaultVideoWidth}
+ defaultHeight = {$styles.content.media.defaultVideoHeight}
+
+ default {
+ params.quality = high
+ params.menu = false
+ params.allowScriptAccess = sameDomain
+ params.allowFullScreen = true
+ }
+ mapping {
+
+ }
+ }
+
+ audio {
+ player = {$styles.content.media.audioPlayer}
+
+ defaultWidth = {$styles.content.media.defaultAudioWidth}
+ defaultHeight = {$styles.content.media.defaultAudioHeight}
+
+ default {
+ params.quality = high
+ params.allowScriptAccess = sameDomain
+ params.menu = false
+ }
+ mapping {
+ flashvars.file = soundFile
+ }
+ }
+
+ }
+ 20.stdWrap {
+ editIcons = tt_content: multimedia, imagewidth, imageheight, pi_flexform, bodytext
+ editIcons.iconTitle.data = LLL:EXT:css_styled_content/pi1/locallang.xml:eIcon.multimedia
+
+ prefixComment = 2 | SWFobject element:
+ }
+}
+
+# *****************
+# CType: qtobject
+# *****************
+tt_content.qtobject = COA
+tt_content.qtobject {
+ 10 = < lib.stdheader
+
+ 20 = QTOBJECT
+ 20 {
+ file =
+ width =
+ height =
+
+ flexParams.field = pi_flexform
+
+ alternativeContent.field = bodytext
+
+ layout = ###QTOBJECT###
+
+ video {
+ player = {$styles.content.media.videoPlayer}
+
+ defaultWidth = {$styles.content.media.defaultVideoWidth}
+ defaultHeight = {$styles.content.media.defaultVideoHeight}
+
+ default {
+ params.quality = high
+ params.menu = false
+ params.allowScriptAccess = sameDomain
+ params.allowFullScreen = true
+ }
+ mapping {
+
+ }
+ }
+
+ audio {
+ player = {$styles.content.media.audioPlayer}
+
+ defaultWidth = {$styles.content.media.defaultAudioWidth}
+ defaultHeight = {$styles.content.media.defaultAudioHeight}
+
+ default {
+ params.quality = high
+ params.allowScriptAccess = sameDomain
+ params.menu = false
+ }
+ mapping {
+ flashvars.file = soundFile
+ }
+ }
+ }
+ 20.stdWrap {
+ editIcons = tt_content: multimedia, imagewidth, imageheight, pi_flexform, bodytext
+ editIcons.iconTitle.data = LLL:EXT:css_styled_content/pi1/locallang.xml:eIcon.multimedia
+
+ prefixComment = 2 | QTobject element:
+ }
+}
+
+# *****************
+# CType: media
+# *****************
+tt_content.media = COA
+tt_content.media {
+ 10 = < lib.stdheader
+
+ 20 = MEDIA
+ 20 {
+
+ flexParams.field = pi_flexform
+ alternativeContent < tt_content.text.20
+ alternativeContent.field = bodytext
+
+ type = video
+ renderType = auto
+ allowEmptyUrl = 0
+ forcePlayer = 1
+
+ fileExtHandler {
+ default = MEDIA
+ avi = MEDIA
+ asf = MEDIA
+ class = MEDIA
+ wmv = MEDIA
+ mp3 = SWF
+ mp4 = SWF
+ m4v = SWF
+ swa = SWF
+ flv = SWF
+ swf = SWF
+ mov = QT
+ m4v = QT
+ m4a = QT
+ }
+
+ mimeConf.swfobject < tt_content.swfobject.20
+ mimeConf.qtobject < tt_content.qtobject.20
+
+ }
+ 20.stdWrap {
+ editIcons = tt_content: pi_flexform, bodytext
+ editIcons.iconTitle.data = LLL:EXT:css_styled_content/pi1/locallang.xml:eIcon.multimedia
+
+ prefixComment = 2 | Media element:
+ }
+}
+
+# ******************
+# CType: mailform
+# ******************
+tt_content.mailform = COA
+tt_content.mailform.10 = < lib.stdheader
+tt_content.mailform.20 = FORM
+tt_content.mailform.20 {
+ accessibility = 1
+ noWrapAttr=1
+ formName = mailform
+ dontMd5FieldNames = 1
+ layout = <div class="csc-mailform-field">###LABEL### ###FIELD###</div>
+ labelWrap.wrap = |
+ commentWrap.wrap = |
+ radioWrap.wrap = |<br />
+ radioWrap.accessibilityWrap = <fieldset###RADIO_FIELD_ID###><legend>###RADIO_GROUP_LABEL###</legend>|</fieldset>
+ REQ = 1
+ REQ.labelWrap.wrap = |
+ COMMENT.layout = <div class="csc-mailform-label">###LABEL###</div>
+ RADIO.layout = <div class="csc-mailform-field">###LABEL### <span class="csc-mailform-radio">###FIELD###</span></div>
+ LABEL.layout = <div class="csc-mailform-field">###LABEL### <span class="csc-mailform-label">###FIELD###</span></div>
+ target = {$styles.content.mailform.target}
+ goodMess = {$styles.content.mailform.goodMess}
+ badMess = {$styles.content.mailform.badMess}
+ redirect.field = pages
+ redirect.listNum = 0
+ recipient.field = subheader
+ data.field = bodytext
+ locationData = 1
+ hiddenFields.stdWrap.wrap = <div style="display:none;">|</div>
+
+ params.radio = class="csc-mailform-radio"
+ params.check = class="csc-mailform-check"
+ params.submit = class="csc-mailform-submit"
+
+ stdWrap.wrap = <fieldset class="csc-mailform"> | </fieldset>
+ stdWrap {
+ editIcons = tt_content: bodytext, pages, subheader
+ editIcons.iconTitle.data = LLL:EXT:css_styled_content/pi1/locallang.xml:eIcon.form
+
+ prefixComment = 2 | Mail form inserted:
+ }
+}
+
+
+# ******************
+# CType: search
+# ******************
+tt_content.search = COA
+tt_content.search.10 = < lib.stdheader
+# Result:
+tt_content.search.20 = SEARCHRESULT
+tt_content.search.20 {
+ allowedCols = pages.title-subtitle-keywords-description : tt_content.header-bodytext-imagecaption : tt_address.name-title-address-email-company-city-country : tt_links.title-note-note2-url : tt_board.subject-message-author-email : tt_calender.title-note : tt_products.title-note-itemnumber
+ languageField.tt_content = sys_language_uid
+ renderObj = COA
+ renderObj {
+
+ 10 = TEXT
+ 10.field = pages_title
+ 10.htmlSpecialChars = 1
+ 10.typolink {
+ parameter.field = uid
+ target = {$styles.content.searchresult.resultTarget}
+ additionalParams.data = register:SWORD_PARAMS
+ additionalParams.required = 1
+ additionalParams.wrap = &no_cache=1
+ }
+ 10.htmlSpecialChars = 1
+ 10.wrap = <h3 class="csc-searchResultHeader">|</h3>
+
+ 20 = COA
+ 20 {
+ 10 = TEXT
+ 10.field = tt_content_bodytext
+ 10.stripHtml = 1
+ 10.htmlSpecialChars = 1
+ }
+ 20.stdWrap.crop = 200 | ...
+ 20.stdWrap.wrap = <p class="csc-searchResult">|</p>
+ }
+
+ layout = COA
+ layout {
+ wrap = <table border="0" cellspacing="0" cellpadding="2" class="csc-searchResultInfo"><tr> | </tr></table> ###RESULT###
+
+ 10 = TEXT
+ 10.data = LLL:EXT:css_styled_content/pi1/locallang.xml:search.resultRange
+ 10.wrap = <td class="csc-searchResultRange"><p>|</p></td>
+
+ 20 = TEXT
+ 20.value = ###PREV###&nbsp;&nbsp;&nbsp;###NEXT###
+ 20.wrap = <td class="csc-searchResultPrevNext"><p>|</p></td>
+ }
+
+ noResultObj = COA
+ noResultObj {
+ 10 = TEXT
+ 10.data = LLL:EXT:css_styled_content/pi1/locallang.xml:search.emptySearch
+ 10.wrap = <h3 class="csc-noSearchResultMsg">|</h3>
+ }
+
+ next = TEXT
+ next.data = LLL:EXT:css_styled_content/pi1/locallang.xml:search.searchResultNext
+
+ prev = TEXT
+ prev.data = LLL:EXT:css_styled_content/pi1/locallang.xml:search.searchResultPrev
+
+ target = {$styles.content.searchresult.target}
+ range = 20
+
+ stdWrap.prefixComment = 2 | Search result:
+}
+
+# Form:
+tt_content.search.30 < tt_content.mailform.20
+tt_content.search.30 {
+ goodMess = {$styles.content.searchform.goodMess}
+ redirect >
+ recipient >
+ data >
+ dataArray {
+ 10.label.data = LLL:EXT:css_styled_content/pi1/locallang.xml:search.searchWord
+ 10.type = sword=input
+ 20.label.data = LLL:EXT:css_styled_content/pi1/locallang.xml:search.searchIn
+ 20.type = scols=select
+ 20.valueArray {
+ 10.label.data = LLL:EXT:css_styled_content/pi1/locallang.xml:search.headersKeywords
+ 10.value = pages.title-subtitle-keywords-description:tt_content.header
+ 20.label.data = LLL:EXT:css_styled_content/pi1/locallang.xml:search.pageContent
+ 20.value = tt_content.header-bodytext-imagecaption
+ }
+ 30.type = stype=hidden
+ 30.value = L0
+ 40.type = submit=submit
+ 40.value.data = LLL:EXT:css_styled_content/pi1/locallang.xml:search.searchButton
+ }
+ type.field = pages
+ type.listNum = 0
+ locationData = HTTP_POST_VARS
+ no_cache = 1
+
+ stdWrap.wrap = <table border="0" cellspacing="1" cellpadding="1" class="csc-searchform"> | </table>
+ stdWrap {
+ editIcons.iconTitle.data = LLL:EXT:css_styled_content/pi1/locallang.xml:eIcon.search
+
+ prefixComment = 2 | Search form inserted:
+ }
+}
+
+
+# ******************
+# CType: login
+# ******************
+tt_content.login < tt_content.mailform
+tt_content.login.10 = < lib.stdheader
+tt_content.login.20 {
+ goodMess = {$styles.content.loginform.goodMess}
+ redirect >
+ recipient >
+ data >
+ dataArray {
+ 10.label.data = LLL:EXT:css_styled_content/pi1/locallang.xml:login.username
+ 10.type = *user=input
+ 20.label.data = LLL:EXT:css_styled_content/pi1/locallang.xml:login.password
+ 20.type = *pass=password
+ 30.type = logintype=hidden
+ 30.value = login
+ 40.type = submit=submit
+ 40.value.data = LLL:EXT:css_styled_content/pi1/locallang.xml:login.login
+ }
+ type.field = pages
+ type.listNum = 0
+ target = {$styles.content.loginform.target}
+ locationData = 0
+ hiddenFields.pid = TEXT
+ hiddenFields.pid {
+ value = {$styles.content.loginform.pid}
+ override.field = pages
+ override.listNum = 1
+ }
+
+ stdWrap.wrap = <div class="csc-loginform"> | </div>
+ stdWrap {
+ editIcons.iconTitle.data = LLL:EXT:css_styled_content/pi1/locallang.xml:eIcon.login
+
+ prefixComment = 2 | Login/Logout form:
+ }
+}
+[loginUser = *]
+tt_content.login.20 {
+ dataArray >
+ dataArray {
+ 10.label.data = LLL:EXT:css_styled_content/pi1/locallang.xml:login.username
+ 10.label.wrap = |&nbsp;<!--###USERNAME###-->
+ 30.type = logintype=hidden
+ 30.value = logout
+ 40.type = submit=submit
+ 40.value.data = LLL:EXT:css_styled_content/pi1/locallang.xml:login.logout
+ }
+}
+[global]
+
+
+# ******************
+# CType: splash
+# ******************
+# Deprecated element.
+# Still here for backwards compliance with plugins using the "text box" type.
+tt_content.splash = CASE
+tt_content.splash.key.field = splash_layout
+tt_content.splash.stdWrap {
+ prefixComment = 2 | Textbox inserted (Deprecated)
+}
+tt_content.splash.default = COA
+tt_content.splash.default {
+ 20 = CTABLE
+ 20 {
+ c.1 = < tt_content.text
+ lm.1 = IMAGE
+ lm.1.file {
+ import = uploads/pics/
+ import.field = image
+ import.listNum = 0
+ maxW.field = imagewidth
+ maxW.ifEmpty = 200
+ }
+ cMargins = 30,0,0,0
+ }
+}
+tt_content.splash.1 < tt_content.splash.default
+tt_content.splash.1.20.lm.1.file >
+tt_content.splash.1.20.lm.1.file = GIFBUILDER
+tt_content.splash.1.20.lm.1.file {
+ XY = [10.w]+10,[10.h]+10
+ backColor = {$content.splash.bgCol}
+ backColor.override.data = register:pageColor
+ format = jpg
+ 5 = BOX
+ 5.dimensions = 3,3,[10.w],[10.h]
+ 5.color = #333333
+ 7 = EFFECT
+ 7.value = blur=99|blur=99|blur=99|blur=99|blur=99|blur=99|blur=99
+ 10 = IMAGE
+ 10.file {
+ import = uploads/pics/
+ import.field = image
+ import.listNum = 0
+ maxW.field = imagewidth
+ maxW.ifEmpty = 200
+ }
+}
+// The image frames are not available unless TypoScript code from styles.content.imgFrames.x is provided manually:
+tt_content.splash.2 < tt_content.splash.default
+#tt_content.splash.2.20.lm.1.file.m < styles.content.imgFrames.1
+tt_content.splash.3 < tt_content.splash.default
+#tt_content.splash.3.20.lm.1.file.m < styles.content.imgFrames.2
+
+// From plugin.postit1, if included:
+tt_content.splash.20 = < plugin.postit1
+
+
+
+# ****************
+# CType: menu
+# ****************
+tt_content.menu = COA
+tt_content.menu {
+ 10 = < lib.stdheader
+
+ 20 = CASE
+ 20 {
+ key.field = menu_type
+
+ # "Menu of these pages"
+ default = HMENU
+ default {
+ special = list
+ special.value.field = pages
+ wrap = <ul class="csc-menu csc-menu-def">|</ul>
+ 1 = TMENU
+ 1 {
+ target = {$PAGE_TARGET}
+ NO {
+ stdWrap.htmlSpecialChars = 1
+ wrapItemAndSub = <li>|</li>
+ ATagTitle.field = description // title
+ }
+ noBlur = 1
+ }
+ }
+
+ # "Menu of subpages to these pages"
+ 1 < .default
+ 1 {
+ special = directory
+ wrap = <ul class="csc-menu csc-menu-1">|</ul>
+ }
+
+ # "Sitemap - liststyle"
+ 2 = HMENU
+ 2 {
+ wrap = <div class="csc-sitemap">|</div>
+ 1 = TMENU
+ 1 {
+ target = {$PAGE_TARGET}
+ noBlur = 1
+ expAll = 1
+ wrap = <ul>|</ul>
+ NO {
+ stdWrap.htmlSpecialChars = 1
+ wrapItemAndSub = <li>|</li>
+ ATagTitle.field = description // title
+ }
+ }
+ 2 < .1
+ 3 < .1
+ 4 < .1
+ 5 < .1
+ 6 < .1
+ 7 < .1
+ }
+
+ # "Section index (pagecontent w/Index checked - liststyle)"
+ 3 < styles.content.get
+ 3 {
+ wrap = <ul class="csc-menu csc-menu-3">|</ul>
+ select.andWhere = sectionIndex!=0
+ select.pidInList.override.field = pages
+ renderObj = TEXT
+ renderObj {
+ fieldRequired = header
+ trim = 1
+ field = header
+ htmlSpecialChars = 1
+ noBlur = 1
+ wrap = <li class="csc-section">|</li>
+ typolink.parameter.field = pid
+ typolink.section.field = uid
+ }
+ }
+
+ # "Menu of subpages to these pages (with abstract)"
+ 4 < .1
+ 4 {
+ wrap = <dl class="csc-menu csc-menu-4">|</dl>
+ 1.NO {
+ wrapItemAndSub >
+ linkWrap = <dt>|</dt>
+ after {
+ data = field : abstract // field : description // field : subtitle
+ required = 1
+ htmlSpecialChars = 1
+ wrap = <dd>|</dd>
+ }
+ ATagTitle.field = description // title
+ }
+ }
+
+ # "Recently updated pages"
+ 5 < .default
+ 5 {
+ wrap = <ul class="csc-menu csc-menu-5">|</ul>
+ special = updated
+ special {
+ maxAge = 3600*24*7
+ excludeNoSearchPages = 1
+ }
+ }
+
+ # "Related pages (based on keywords)"
+ 6 < .default
+ 6 {
+ wrap = <ul class="csc-menu csc-menu-6">|</ul>
+ special = keywords
+ special {
+ excludeNoSearchPages = 1
+ }
+ }
+
+ # "Menu of subpages to these pages + sections - liststyle"
+ 7 < .1
+ 7 {
+ wrap = <ul class="csc-menu csc-menu-7">|</ul>
+ 1.expAll = 1
+ 2 < .1
+ 2 {
+ sectionIndex = 1
+ sectionIndex.type = header
+ wrap = <ul>|</ul>
+ NO.wrapItemAndSub = <li class="csc-section">|</li>
+ }
+ }
+ }
+
+ 20.stdWrap {
+ editIcons = tt_content: menu_type, pages
+ editIcons.iconTitle.data = LLL:EXT:css_styled_content/pi1/locallang.xml:eIcon.menuSitemap
+
+ prefixComment = 2 | Menu/Sitemap element:
+ }
+}
+
+
+
+# ****************
+# CType: shortcut
+# ****************
+# Should be a complete copy from the old static template "content (default)"
+tt_content.shortcut = COA
+tt_content.shortcut {
+ 20 = CASE
+ 20.key.field = layout
+ 20.0= RECORDS
+ 20.0 {
+ source.field = records
+ tables = {$content.shortcut.tables}
+ # THESE are OLD plugins. Modern plugins registers themselves automatically!
+ conf.tt_content = < tt_content
+ conf.tt_address = < tt_address
+ conf.tt_links = < tt_links
+ conf.tt_guest = < tt_guest
+ conf.tt_board = < tt_board
+ conf.tt_calender = < tt_calender
+ conf.tt_rating < tt_rating
+ conf.tt_products = < tt_products
+ conf.tt_news = < tt_news
+ conf.tt_poll = < plugin.tt_poll
+ }
+ 20.1= RECORDS
+ 20.1 {
+ source.field = records
+ tables = {$content.shortcut.tables}
+ conf.tt_poll = < plugin.tt_poll
+ conf.tt_poll.code = RESULT,SUBMITTEDVOTE
+ }
+
+ 20.stdWrap {
+ editIcons = tt_content: records
+ editIcons.iconTitle.data = LLL:EXT:css_styled_content/pi1/locallang.xml:eIcon.recordList
+
+ prefixComment = 2 | Inclusion of other records (by reference):
+ }
+}
+
+
+# ****************
+# CType: list
+# ****************
+# Should be a complete copy from the old static template "content (default)" (except "lib.stdheader")
+tt_content.list = COA
+tt_content.list {
+ 10 = < lib.stdheader
+
+ 20 = CASE
+ 20.key.field = list_type
+ 20 {
+ # LIST element references (NOT copy of objects!)
+ # THESE are OLD plugins. Modern plugins registers themselves automatically!
+ 3 = CASE
+ 3.key.field = layout
+ 3.0 = < plugin.tt_guest
+
+ 4 = CASE
+ 4.key.field = layout
+ 4.0 = < plugin.tt_board_list
+ 4.1 = < plugin.tt_board_tree
+
+ 2 = CASE
+ 2.key.field = layout
+ 2.0 = < plugin.tt_board_tree
+
+ 5 = CASE
+ 5.key.field = layout
+ 5.0 = < plugin.tt_products
+
+ 7 = CASE
+ 7.key.field = layout
+ 7.0 = < plugin.tt_calender
+
+ 8 = CASE
+ 8.key.field = layout
+ 8.0 = < plugin.tt_rating
+
+ 9 = CASE
+ 9.key.field = layout
+ 9.0 = < plugin.tt_news
+
+ 11 = CASE
+ 11.key.field = layout
+ 11.0 = < plugin.tipafriend
+
+ 20 = CASE
+ 20.key.field = layout
+ 20.0 = < plugin.feadmin.fe_users
+
+ 21 = CASE
+ 21.key.field = layout
+ 21.0 = < plugin.feadmin.dmailsubscription
+ }
+
+ 20.stdWrap {
+ editIcons = tt_content: list_type, layout, select_key, pages [recursive]
+ editIcons.iconTitle.data = LLL:EXT:css_styled_content/pi1/locallang.xml:eIcon.plugin
+
+ prefixComment = 2 | Plugin inserted:
+ }
+}
+
+
+# ****************
+# CType: script
+# ****************
+# OBSOLETE! Please make extensions instead. The "script" content element was meant for these custom purposes in the past. Today extensions will do the job better.
+tt_content.script = TEXT
+tt_content.script {
+ value =
+
+ prefixComment = 2 | Script element (Deprecated)
+}
+
+
+# ****************
+# CType: div
+# ****************
+tt_content.div = TEXT
+tt_content.div {
+ value = <hr />
+ wrap = <div class="divider">|</div>
+ prefixComment = 2 | Div element
+}
+
+
+# ****************
+# CType: html
+# ****************
+# This truely IS a content object, launched from inside the PHP class of course.
+# Should be a complete copy from the old static template "content (default)"
+tt_content.html = TEXT
+tt_content.html {
+ field = bodytext
+
+ editIcons = tt_content: pages
+ editIcons.iconTitle.data = LLL:EXT:css_styled_content/pi1/locallang.xml:eIcon.html
+
+ prefixComment = 2 | Raw HTML content:
+}
+
+
+# ****************
+# Default error msg:
+# ****************
+tt_content.default = TEXT
+tt_content.default {
+ field = CType
+ wrap = <p style="background-color: yellow;"><b>ERROR:</b> Content Element type "|" has no rendering definition!</p>
+
+ prefixComment = 2 | Unknown element message:
+}
+
+# *********************************************************************
+# ACCESSIBILTY MODE
+# *********************************************************************
+
+
+
+
+
+
+
+plugin.tx_cssstyledcontent._CSS_DEFAULT_STYLE (
+ /* Captions */
+ DIV.csc-textpic-caption-c .csc-textpic-caption { text-align: center; }
+ DIV.csc-textpic-caption-r .csc-textpic-caption { text-align: right; }
+ DIV.csc-textpic-caption-l .csc-textpic-caption { text-align: left; }
+
+ /* Needed for noRows setting */
+ DIV.csc-textpic DIV.csc-textpic-imagecolumn { float: left; display: inline; }
+
+ /* Border just around the image */
+ {$styles.content.imgtext.borderSelector} {
+ border: {$styles.content.imgtext.borderThick}px solid {$styles.content.imgtext.borderColor};
+ padding: {$styles.content.imgtext.borderSpace}px {$styles.content.imgtext.borderSpace}px;
+ }
+
+ DIV.csc-textpic-imagewrap { padding: 0; }
+
+ DIV.csc-textpic IMG { border: none; }
+
+ /* DIV: This will place the images side by side */
+ DIV.csc-textpic DIV.csc-textpic-imagewrap DIV.csc-textpic-image { float: left; }
+
+ /* UL: This will place the images side by side */
+ DIV.csc-textpic DIV.csc-textpic-imagewrap UL { list-style: none; margin: 0; padding: 0; }
+ DIV.csc-textpic DIV.csc-textpic-imagewrap UL LI { float: left; margin: 0; padding: 0; }
+
+ /* DL: This will place the images side by side */
+ DIV.csc-textpic DIV.csc-textpic-imagewrap DL.csc-textpic-image { float: left; }
+ DIV.csc-textpic DIV.csc-textpic-imagewrap DL.csc-textpic-image DT { float: none; }
+ DIV.csc-textpic DIV.csc-textpic-imagewrap DL.csc-textpic-image DD { float: none; }
+ DIV.csc-textpic DIV.csc-textpic-imagewrap DL.csc-textpic-image DD IMG { border: none; } /* FE-Editing Icons */
+ DL.csc-textpic-image { margin: 0; }
+ DL.csc-textpic-image DT { margin: 0; display: inline; }
+ DL.csc-textpic-image DD { margin: 0; }
+
+ /* Clearer */
+ DIV.csc-textpic-clear { clear: both; }
+
+ /* Margins around images: */
+
+ /* Pictures on left, add margin on right */
+ DIV.csc-textpic-left DIV.csc-textpic-imagewrap .csc-textpic-image,
+ DIV.csc-textpic-intext-left-nowrap DIV.csc-textpic-imagewrap .csc-textpic-image,
+ DIV.csc-textpic-intext-left DIV.csc-textpic-imagewrap .csc-textpic-image {
+ display: inline; /* IE fix for double-margin bug */
+ margin-right: {$styles.content.imgtext.colSpace}px;
+ }
+
+ /* Pictures on right, add margin on left */
+ DIV.csc-textpic-right DIV.csc-textpic-imagewrap .csc-textpic-image,
+ DIV.csc-textpic-intext-right-nowrap DIV.csc-textpic-imagewrap .csc-textpic-image,
+ DIV.csc-textpic-intext-right DIV.csc-textpic-imagewrap .csc-textpic-image {
+ display: inline; /* IE fix for double-margin bug */
+ margin-left: {$styles.content.imgtext.colSpace}px;
+ }
+
+ /* Pictures centered, add margin on left */
+ DIV.csc-textpic-center DIV.csc-textpic-imagewrap .csc-textpic-image {
+ display: inline; /* IE fix for double-margin bug */
+ margin-left: {$styles.content.imgtext.colSpace}px;
+ }
+ DIV.csc-textpic DIV.csc-textpic-imagewrap .csc-textpic-image .csc-textpic-caption { margin: 0; }
+ DIV.csc-textpic DIV.csc-textpic-imagewrap .csc-textpic-image IMG { margin: 0; vertical-align:bottom; }
+
+ /* Space below each image (also in-between rows) */
+ DIV.csc-textpic DIV.csc-textpic-imagewrap .csc-textpic-image { margin-bottom: {$styles.content.imgtext.rowSpace}px; }
+ DIV.csc-textpic-equalheight DIV.csc-textpic-imagerow { margin-bottom: {$styles.content.imgtext.rowSpace}px; display: block; }
+ DIV.csc-textpic DIV.csc-textpic-imagerow { clear: both; }
+ DIV.csc-textpic DIV.csc-textpic-single-image IMG { margin-bottom: {$styles.content.imgtext.rowSpace}px; }
+
+ /* IE7 hack for margin between image rows */
+ *+html DIV.csc-textpic DIV.csc-textpic-imagerow .csc-textpic-image { margin-bottom: 0; }
+ *+html DIV.csc-textpic DIV.csc-textpic-imagerow { margin-bottom: {$styles.content.imgtext.rowSpace}px; }
+
+ /* No margins around the whole image-block */
+ DIV.csc-textpic DIV.csc-textpic-imagewrap .csc-textpic-firstcol { margin-left: 0px !important; }
+ DIV.csc-textpic DIV.csc-textpic-imagewrap .csc-textpic-lastcol { margin-right: 0px !important; }
+
+ /* Add margin from image-block to text (in case of "Text w/ images") */
+ DIV.csc-textpic-intext-left DIV.csc-textpic-imagewrap,
+ DIV.csc-textpic-intext-left-nowrap DIV.csc-textpic-imagewrap {
+ margin-right: {$styles.content.imgtext.textMargin}px !important;
+ }
+ DIV.csc-textpic-intext-right DIV.csc-textpic-imagewrap,
+ DIV.csc-textpic-intext-right-nowrap DIV.csc-textpic-imagewrap {
+ margin-left: {$styles.content.imgtext.textMargin}px !important;
+ }
+
+ /* Positioning of images: */
+
+ /* Above */
+ DIV.csc-textpic-above DIV.csc-textpic-text { clear: both; }
+
+ /* Center (above or below) */
+ DIV.csc-textpic-center { text-align: center; /* IE-hack */ }
+ DIV.csc-textpic-center DIV.csc-textpic-imagewrap { margin: 0 auto; }
+ DIV.csc-textpic-center DIV.csc-textpic-imagewrap .csc-textpic-image { text-align: left; /* Remove IE-hack */ }
+ DIV.csc-textpic-center DIV.csc-textpic-text { text-align: left; /* Remove IE-hack */ }
+
+ /* Right (above or below) */
+ DIV.csc-textpic-right DIV.csc-textpic-imagewrap { float: right; }
+ DIV.csc-textpic-right DIV.csc-textpic-text { clear: right; }
+
+ /* Left (above or below) */
+ DIV.csc-textpic-left DIV.csc-textpic-imagewrap { float: left; }
+ DIV.csc-textpic-left DIV.csc-textpic-text { clear: left; }
+
+ /* Left (in text) */
+ DIV.csc-textpic-intext-left DIV.csc-textpic-imagewrap { float: left; }
+
+ /* Right (in text) */
+ DIV.csc-textpic-intext-right DIV.csc-textpic-imagewrap { float: right; }
+
+ /* Right (in text, no wrap around) */
+ DIV.csc-textpic-intext-right-nowrap DIV.csc-textpic-imagewrap { float: right; clear: both; }
+ /* Hide from IE5-mac. Only IE-win sees this. \*/
+ * html DIV.csc-textpic-intext-right-nowrap .csc-textpic-text { height: 1%; }
+ /* End hide from IE5/mac */
+
+ /* Left (in text, no wrap around) */
+ DIV.csc-textpic-intext-left-nowrap DIV.csc-textpic-imagewrap { float: left; clear: both; }
+ /* Hide from IE5-mac. Only IE-win sees this. \*/
+ * html DIV.csc-textpic-intext-left-nowrap .csc-textpic-text,
+ * html .csc-textpic-intext-left ol,
+ * html .csc-textpic-intext-left ul { height: 1%; }
+ /* End hide from IE5/mac */
+
+ DIV.csc-textpic DIV.csc-textpic-imagerow-last { margin-bottom: 0; }
+
+ /* Browser fixes: */
+
+ /* Fix for unordered and ordered list with image "In text, left" */
+ .csc-textpic-intext-left ol, .csc-textpic-intext-left ul {padding-left: 40px; overflow: auto; }
+)
+
+# TYPO3 SVN ID: $Id$
+
diff --git a/tests/examplefiles/varnish.vcl b/tests/examplefiles/varnish.vcl
new file mode 100644
index 00000000..6258c313
--- /dev/null
+++ b/tests/examplefiles/varnish.vcl
@@ -0,0 +1,187 @@
+# This is the VCL configuration Varnish will automatically append to your VCL
+# file during compilation/loading. See the vcl(7) man page for details on syntax
+# and semantics.
+# New users is recommended to use the example.vcl file as a starting point.
+
+vcl 4.0;
+
+backend foo { .host = "192.168.1.1"; }
+
+probe blatti { .url = "foo"; }
+probe fooy {
+ .url = "beh";
+
+}
+
+acl foo {
+ "192.168.1.1";
+ "192.168.0.0"/24;
+ ! "192.168.0.1";
+}
+
+include "foo.vcl";
+
+import std;
+
+sub vcl_init {
+ new b = director.foo();
+}
+
+sub vcl_recv {
+ ban(req.url ~ "foo");
+ rollback();
+}
+sub vcl_recv {
+ if (req.method == "PRI") {
+ /* We do not support SPDY or HTTP/2.0 */
+ return (synth(405));
+ }
+ if (req.method != "GET" &&
+ req.method != "HEAD" &&
+ req.method != "PUT" &&
+ req.method != "POST" &&
+ req.method != "TRACE" &&
+ req.method != "OPTIONS" &&
+ req.method != "DELETE") {
+ /* Non-RFC2616 or CONNECT which is weird. */
+ return (pipe);
+ }
+
+ if (req.method != "GET" && req.method != "HEAD") {
+ /* We only deal with GET and HEAD by default */
+ return (pass);
+ }
+ if (req.http.Authorization || req.http.Cookie) {
+ /* Not cacheable by default */
+ return (pass);
+ }
+ return (hash);
+}
+
+sub vcl_pipe {
+ # By default Connection: close is set on all piped requests, to stop
+ # connection reuse from sending future requests directly to the
+ # (potentially) wrong backend. If you do want this to happen, you can undo
+ # it here.
+ # unset bereq.http.connection;
+ return (pipe);
+}
+
+sub vcl_pass {
+ return (fetch);
+}
+
+sub vcl_hash {
+ hash_data(req.url);
+ if (req.http.host) {
+ hash_data(req.http.host);
+ } else {
+ hash_data(server.ip);
+ }
+ return (lookup);
+}
+
+sub vcl_purge {
+ return (synth(200, "Purged"));
+}
+
+sub vcl_hit {
+ if (obj.ttl >= 0s) {
+ // A pure unadultered hit, deliver it
+ return (deliver);
+ }
+ if (obj.ttl + obj.grace > 0s) {
+ // Object is in grace, deliver it
+ // Automatically triggers a background fetch
+ return (deliver);
+ }
+ // fetch & deliver once we get the result
+ return (miss);
+}
+
+sub vcl_miss {
+ return (fetch);
+}
+
+sub vcl_deliver {
+ set resp.http.x-storage = storage.s0.free;
+ return (deliver);
+}
+
+/*
+ * We can come here "invisibly" with the following errors: 413, 417 & 503
+ */
+sub vcl_synth {
+ set resp.http.Content-Type = "text/html; charset=utf-8";
+ set resp.http.Retry-After = "5";
+ synthetic( {"<!DOCTYPE html>
+<html>
+ <head>
+ <title>"} + resp.status + " " + resp.reason + {"</title>
+ </head>
+ <body>
+ <h1>Error "} + resp.status + " " + resp.reason + {"</h1>
+ <p>"} + resp.reason + {"</p>
+ <h3>Guru Meditation:</h3>
+ <p>XID: "} + req.xid + {"</p>
+ <hr>
+ <p>Varnish cache server</p>
+ </body>
+</html>
+"} );
+ return (deliver);
+}
+
+#######################################################################
+# Backend Fetch
+
+sub vcl_backend_fetch {
+ return (fetch);
+}
+
+sub vcl_backend_response {
+ if (beresp.ttl <= 0s ||
+ beresp.http.Set-Cookie ||
+ beresp.http.Surrogate-control ~ "no-store" ||
+ (!beresp.http.Surrogate-Control &&
+ beresp.http.Cache-Control ~ "no-cache|no-store|private") ||
+ beresp.http.Vary == "*") {
+ /*
+ * Mark as "Hit-For-Pass" for the next 2 minutes
+ */
+ set beresp.ttl = 120s;
+ set beresp.uncacheable = true;
+ }
+ return (deliver);
+}
+
+sub vcl_backend_error {
+ set beresp.http.Content-Type = "text/html; charset=utf-8";
+ set beresp.http.Retry-After = "5";
+ synthetic( {"<!DOCTYPE html>
+<html>
+ <head>
+ <title>"} + beresp.status + " " + beresp.reason + {"</title>
+ </head>
+ <body>
+ <h1>Error "} + beresp.status + " " + beresp.reason + {"</h1>
+ <p>"} + beresp.reason + {"</p>
+ <h3>Guru Meditation:</h3>
+ <p>XID: "} + bereq.xid + {"</p>
+ <hr>
+ <p>Varnish cache server</p>
+ </body>
+</html>
+"} );
+ return (deliver);
+}
+
+#######################################################################
+# Housekeeping
+
+sub vcl_init {
+}
+
+sub vcl_fini {
+ return (ok);
+}
diff --git a/tests/examplefiles/wdiff_example1.wdiff b/tests/examplefiles/wdiff_example1.wdiff
new file mode 100644
index 00000000..ca760812
--- /dev/null
+++ b/tests/examplefiles/wdiff_example1.wdiff
@@ -0,0 +1,731 @@
+.. -*- mode: rst -*-
+
+{+.. highlight:: python+}
+
+====================
+Write your own lexer
+====================
+
+If a lexer for your favorite language is missing in the Pygments package, you
+can easily write your own and extend Pygments.
+
+All you need can be found inside the :mod:`pygments.lexer` module. As you can
+read in the :doc:`API documentation <api>`, a lexer is a class that is
+initialized with some keyword arguments (the lexer options) and that provides a
+:meth:`.get_tokens_unprocessed()` method which is given a string or unicode
+object with the data to [-parse.-] {+lex.+}
+
+The :meth:`.get_tokens_unprocessed()` method must return an iterator or iterable
+containing tuples in the form ``(index, token, value)``. Normally you don't
+need to do this since there are [-numerous-] base lexers {+that do most of the work and that+}
+you can subclass.
+
+
+RegexLexer
+==========
+
+[-A very powerful (but quite easy to use)-]
+
+{+The+} lexer {+base class used by almost all of Pygments' lexers+} is the
+:class:`RegexLexer`. This
+[-lexer base-] class allows you to define lexing rules in terms of
+*regular expressions* for different *states*.
+
+States are groups of regular expressions that are matched against the input
+string at the *current position*. If one of these expressions matches, a
+corresponding action is performed [-(normally-] {+(such as+} yielding a token with a specific
+[-type),-]
+{+type, or changing state),+} the current position is set to where the last match
+ended and the matching process continues with the first regex of the current
+state.
+
+Lexer states are kept [-in-] {+on+} a [-state-] stack: each time a new state is entered, the new
+state is pushed onto the stack. The most basic lexers (like the `DiffLexer`)
+just need one state.
+
+Each state is defined as a list of tuples in the form (`regex`, `action`,
+`new_state`) where the last item is optional. In the most basic form, `action`
+is a token type (like `Name.Builtin`). That means: When `regex` matches, emit a
+token with the match text and type `tokentype` and push `new_state` on the state
+stack. If the new state is ``'#pop'``, the topmost state is popped from the
+stack instead. [-(To-] {+To+} pop more than one state, use ``'#pop:2'`` and so [-on.)-] {+on.+}
+``'#push'`` is a synonym for pushing the current state on the stack.
+
+The following example shows the `DiffLexer` from the builtin lexers. Note that
+it contains some additional attributes `name`, `aliases` and `filenames` which
+aren't required for a lexer. They are used by the builtin lexer lookup
+functions.
+
+[-.. sourcecode:: python-] {+::+}
+
+ from pygments.lexer import RegexLexer
+ from pygments.token import *
+
+ class DiffLexer(RegexLexer):
+ name = 'Diff'
+ aliases = ['diff']
+ filenames = ['*.diff']
+
+ tokens = {
+ 'root': [
+ (r' .*\n', Text),
+ (r'\+.*\n', Generic.Inserted),
+ (r'-.*\n', Generic.Deleted),
+ (r'@.*\n', Generic.Subheading),
+ (r'Index.*\n', Generic.Heading),
+ (r'=.*\n', Generic.Heading),
+ (r'.*\n', Text),
+ ]
+ }
+
+As you can see this lexer only uses one state. When the lexer starts scanning
+the text, it first checks if the current character is a space. If this is true
+it scans everything until newline and returns the [-parsed-] data as {+a+} `Text` [-token.-] {+token (which
+is the "no special highlighting" token).+}
+
+If this rule doesn't match, it checks if the current char is a plus sign. And
+so on.
+
+If no rule matches at the current position, the current char is emitted as an
+`Error` token that indicates a [-parsing-] {+lexing+} error, and the position is increased by
+[-1.-]
+{+one.+}
+
+
+Adding and testing a new lexer
+==============================
+
+To make [-pygments-] {+Pygments+} aware of your new lexer, you have to perform the following
+steps:
+
+First, change to the current directory containing the [-pygments-] {+Pygments+} source code:
+
+.. [-sourcecode::-] {+code-block::+} console
+
+ $ cd .../pygments-main
+
+{+Select a matching module under ``pygments/lexers``, or create a new module for
+your lexer class.+}
+
+Next, make sure the lexer is known from outside of the module. All modules in
+the ``pygments.lexers`` specify ``__all__``. For example, [-``other.py`` sets:
+
+.. sourcecode:: python-] {+``esoteric.py`` sets::+}
+
+ __all__ = ['BrainfuckLexer', 'BefungeLexer', ...]
+
+Simply add the name of your lexer class to this list.
+
+Finally the lexer can be made [-publically-] {+publicly+} known by rebuilding the lexer mapping:
+
+.. [-sourcecode::-] {+code-block::+} console
+
+ $ make mapfiles
+
+To test the new lexer, store an example file with the proper extension in
+``tests/examplefiles``. For example, to test your ``DiffLexer``, add a
+``tests/examplefiles/example.diff`` containing a sample diff output.
+
+Now you can use pygmentize to render your example to HTML:
+
+.. [-sourcecode::-] {+code-block::+} console
+
+ $ ./pygmentize -O full -f html -o /tmp/example.html tests/examplefiles/example.diff
+
+Note that this [-explicitely-] {+explicitly+} calls the ``pygmentize`` in the current directory
+by preceding it with ``./``. This ensures your modifications are used.
+Otherwise a possibly already installed, unmodified version without your new
+lexer would have been called from the system search path (``$PATH``).
+
+To view the result, open ``/tmp/example.html`` in your browser.
+
+Once the example renders as expected, you should run the complete test suite:
+
+.. [-sourcecode::-] {+code-block::+} console
+
+ $ make test
+
+{+It also tests that your lexer fulfills the lexer API and certain invariants,
+such as that the concatenation of all token text is the same as the input text.+}
+
+
+Regex Flags
+===========
+
+You can either define regex flags {+locally+} in the regex (``r'(?x)foo bar'``) or
+{+globally+} by adding a `flags` attribute to your lexer class. If no attribute is
+defined, it defaults to `re.MULTILINE`. For more [-informations-] {+information+} about regular
+expression flags see the {+page about+} `regular expressions`_ [-help page-] in the [-python-] {+Python+}
+documentation.
+
+.. _regular expressions: [-http://docs.python.org/lib/re-syntax.html-] {+http://docs.python.org/library/re.html#regular-expression-syntax+}
+
+
+Scanning multiple tokens at once
+================================
+
+{+So far, the `action` element in the rule tuple of regex, action and state has
+been a single token type. Now we look at the first of several other possible
+values.+}
+
+Here is a more complex lexer that highlights INI files. INI files consist of
+sections, comments and [-key-] {+``key+} = [-value pairs:
+
+.. sourcecode:: python-] {+value`` pairs::+}
+
+ from pygments.lexer import RegexLexer, bygroups
+ from pygments.token import *
+
+ class IniLexer(RegexLexer):
+ name = 'INI'
+ aliases = ['ini', 'cfg']
+ filenames = ['*.ini', '*.cfg']
+
+ tokens = {
+ 'root': [
+ (r'\s+', Text),
+ (r';.*?$', Comment),
+ (r'\[.*?\]$', Keyword),
+ (r'(.*?)(\s*)(=)(\s*)(.*?)$',
+ bygroups(Name.Attribute, Text, Operator, Text, String))
+ ]
+ }
+
+The lexer first looks for whitespace, comments and section names. [-And later-] {+Later+} it
+looks for a line that looks like a key, value pair, separated by an ``'='``
+sign, and optional whitespace.
+
+The `bygroups` helper [-makes sure that-] {+yields+} each {+capturing+} group [-is yielded-] {+in the regex+} with a different
+token type. First the `Name.Attribute` token, then a `Text` token for the
+optional whitespace, after that a `Operator` token for the equals sign. Then a
+`Text` token for the whitespace again. The rest of the line is returned as
+`String`.
+
+Note that for this to work, every part of the match must be inside a capturing
+group (a ``(...)``), and there must not be any nested capturing groups. If you
+nevertheless need a group, use a non-capturing group defined using this syntax:
+[-``r'(?:some|words|here)'``-]
+{+``(?:some|words|here)``+} (note the ``?:`` after the beginning parenthesis).
+
+If you find yourself needing a capturing group inside the regex which shouldn't
+be part of the output but is used in the regular expressions for backreferencing
+(eg: ``r'(<(foo|bar)>)(.*?)(</\2>)'``), you can pass `None` to the bygroups
+function and [-it will skip-] that group will be skipped in the output.
+
+
+Changing states
+===============
+
+Many lexers need multiple states to work as expected. For example, some
+languages allow multiline comments to be nested. Since this is a recursive
+pattern it's impossible to lex just using regular expressions.
+
+Here is [-the solution:
+
+.. sourcecode:: python-] {+a lexer that recognizes C++ style comments (multi-line with ``/* */``
+and single-line with ``//`` until end of line)::+}
+
+ from pygments.lexer import RegexLexer
+ from pygments.token import *
+
+ class [-ExampleLexer(RegexLexer):-] {+CppCommentLexer(RegexLexer):+}
+ name = 'Example Lexer with states'
+
+ tokens = {
+ 'root': [
+ (r'[^/]+', Text),
+ (r'/\*', Comment.Multiline, 'comment'),
+ (r'//.*?$', Comment.Singleline),
+ (r'/', Text)
+ ],
+ 'comment': [
+ (r'[^*/]', Comment.Multiline),
+ (r'/\*', Comment.Multiline, '#push'),
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'[*/]', Comment.Multiline)
+ ]
+ }
+
+This lexer starts lexing in the ``'root'`` state. It tries to match as much as
+possible until it finds a slash (``'/'``). If the next character after the slash
+is [-a star-] {+an asterisk+} (``'*'``) the `RegexLexer` sends those two characters to the
+output stream marked as `Comment.Multiline` and continues [-parsing-] {+lexing+} with the rules
+defined in the ``'comment'`` state.
+
+If there wasn't [-a star-] {+an asterisk+} after the slash, the `RegexLexer` checks if it's a
+[-singleline-]
+{+Singleline+} comment [-(eg:-] {+(i.e.+} followed by a second slash). If this also wasn't the
+case it must be a single [-slash-] {+slash, which is not a comment starter+} (the separate
+regex for a single slash must also be given, else the slash would be marked as
+an error token).
+
+Inside the ``'comment'`` state, we do the same thing again. Scan until the
+lexer finds a star or slash. If it's the opening of a multiline comment, push
+the ``'comment'`` state on the stack and continue scanning, again in the
+``'comment'`` state. Else, check if it's the end of the multiline comment. If
+yes, pop one state from the stack.
+
+Note: If you pop from an empty stack you'll get an `IndexError`. (There is an
+easy way to prevent this from happening: don't ``'#pop'`` in the root state).
+
+If the `RegexLexer` encounters a newline that is flagged as an error token, the
+stack is emptied and the lexer continues scanning in the ``'root'`` state. This
+[-helps-]
+{+can help+} producing error-tolerant highlighting for erroneous input, e.g. when a
+single-line string is not closed.
+
+
+Advanced state tricks
+=====================
+
+There are a few more things you can do with states:
+
+- You can push multiple states onto the stack if you give a tuple instead of a
+ simple string as the third item in a rule tuple. For example, if you want to
+ match a comment containing a directive, something [-like::-] {+like:
+
+ .. code-block:: text+}
+
+ /* <processing directive> rest of comment */
+
+ you can use this [-rule:
+
+ .. sourcecode:: python-] {+rule::+}
+
+ tokens = {
+ 'root': [
+ (r'/\* <', Comment, ('comment', 'directive')),
+ ...
+ ],
+ 'directive': [
+ (r'[^>]*', Comment.Directive),
+ (r'>', Comment, '#pop'),
+ ],
+ 'comment': [
+ (r'[^*]+', Comment),
+ (r'\*/', Comment, '#pop'),
+ (r'\*', Comment),
+ ]
+ }
+
+ When this encounters the above sample, first ``'comment'`` and ``'directive'``
+ are pushed onto the stack, then the lexer continues in the directive state
+ until it finds the closing ``>``, then it continues in the comment state until
+ the closing ``*/``. Then, both states are popped from the stack again and
+ lexing continues in the root state.
+
+ .. versionadded:: 0.9
+ The tuple can contain the special ``'#push'`` and ``'#pop'`` (but not
+ ``'#pop:n'``) directives.
+
+
+- You can include the rules of a state in the definition of another. This is
+ done by using `include` from [-`pygments.lexer`:
+
+ .. sourcecode:: python-] {+`pygments.lexer`::+}
+
+ from pygments.lexer import RegexLexer, bygroups, include
+ from pygments.token import *
+
+ class ExampleLexer(RegexLexer):
+ tokens = {
+ 'comments': [
+ (r'/\*.*?\*/', Comment),
+ (r'//.*?\n', Comment),
+ ],
+ 'root': [
+ include('comments'),
+ (r'(function )(\w+)( {)',
+ bygroups(Keyword, Name, Keyword), 'function'),
+ (r'.', Text),
+ ],
+ 'function': [
+ (r'[^}/]+', Text),
+ include('comments'),
+ (r'/', Text),
+ [-(r'}',-]
+ {+(r'\}',+} Keyword, '#pop'),
+ ]
+ }
+
+ This is a hypothetical lexer for a language that consist of functions and
+ comments. Because comments can occur at toplevel and in functions, we need
+ rules for comments in both states. As you can see, the `include` helper saves
+ repeating rules that occur more than once (in this example, the state
+ ``'comment'`` will never be entered by the lexer, as it's only there to be
+ included in ``'root'`` and ``'function'``).
+
+- Sometimes, you may want to "combine" a state from existing ones. This is
+ possible with the [-`combine`-] {+`combined`+} helper from `pygments.lexer`.
+
+ If you, instead of a new state, write ``combined('state1', 'state2')`` as the
+ third item of a rule tuple, a new anonymous state will be formed from state1
+ and state2 and if the rule matches, the lexer will enter this state.
+
+ This is not used very often, but can be helpful in some cases, such as the
+ `PythonLexer`'s string literal processing.
+
+- If you want your lexer to start lexing in a different state you can modify the
+ stack by [-overloading-] {+overriding+} the `get_tokens_unprocessed()` [-method:
+
+ .. sourcecode:: python-] {+method::+}
+
+ from pygments.lexer import RegexLexer
+
+ class [-MyLexer(RegexLexer):-] {+ExampleLexer(RegexLexer):+}
+ tokens = {...}
+
+ def get_tokens_unprocessed(self, [-text):
+ stack = ['root', 'otherstate']-] {+text, stack=('root', 'otherstate')):+}
+ for item in RegexLexer.get_tokens_unprocessed(text, stack):
+ yield item
+
+ Some lexers like the `PhpLexer` use this to make the leading ``<?php``
+ preprocessor comments optional. Note that you can crash the lexer easily by
+ putting values into the stack that don't exist in the token map. Also
+ removing ``'root'`` from the stack can result in strange errors!
+
+- [-An-] {+In some lexers, a state should be popped if anything is encountered that isn't
+ matched by a rule in the state. You could use an+} empty regex at the end of [-a-]
+ {+the+} state list, [-combined with ``'#pop'``, can
+ act as-] {+but Pygments provides+} a [-return point-] {+more obvious way of spelling that:
+ ``default('#pop')`` is equivalent to ``('', Text, '#pop')``.
+
+ .. versionadded:: 2.0
+
+
+Subclassing lexers derived+} from {+RegexLexer
+==========================================
+
+.. versionadded:: 1.6
+
+Sometimes multiple languages are very similar, but should still be lexed by
+different lexer classes.
+
+When subclassing+} a {+lexer derived from RegexLexer, the ``tokens`` dictionaries
+defined in the parent and child class are merged. For example::
+
+ from pygments.lexer import RegexLexer, inherit
+ from pygments.token import *
+
+ class BaseLexer(RegexLexer):
+ tokens = {
+ 'root': [
+ ('[a-z]+', Name),
+ (r'/\*', Comment, 'comment'),
+ ('"', String, 'string'),
+ ('\s+', Text),
+ ],
+ 'string': [
+ ('[^"]+', String),
+ ('"', String, '#pop'),
+ ],
+ 'comment': [
+ ...
+ ],
+ }
+
+ class DerivedLexer(BaseLexer):
+ tokens = {
+ 'root': [
+ ('[0-9]+', Number),
+ inherit,
+ ],
+ 'string': [
+ (r'[^"\\]+', String),
+ (r'\\.', String.Escape),
+ ('"', String, '#pop'),
+ ],
+ }
+
+The `BaseLexer` defines two states, lexing names and strings. The
+`DerivedLexer` defines its own tokens dictionary, which extends the definitions
+of the base lexer:
+
+* The "root"+} state {+has an additional rule and then the special object `inherit`,
+ which tells Pygments to insert the token definitions of the parent class at+}
+ that [-doesn't have a clear end marker.-] {+point.
+
+* The "string" state is replaced entirely, since there is not `inherit` rule.
+
+* The "comment" state is inherited entirely.+}
+
+
+Using multiple lexers
+=====================
+
+Using multiple lexers for the same input can be tricky. One of the easiest
+combination techniques is shown here: You can replace the [-token type-] {+action+} entry in a rule
+tuple [-(the second item)-] with a lexer class. The matched text will then be lexed with that lexer,
+and the resulting tokens will be yielded.
+
+For example, look at this stripped-down HTML [-lexer:
+
+.. sourcecode:: python-] {+lexer::+}
+
+ from pygments.lexer import RegexLexer, bygroups, using
+ from pygments.token import *
+ from [-pygments.lexers.web-] {+pygments.lexers.javascript+} import JavascriptLexer
+
+ class HtmlLexer(RegexLexer):
+ name = 'HTML'
+ aliases = ['html']
+ filenames = ['*.html', '*.htm']
+
+ flags = re.IGNORECASE | re.DOTALL
+ tokens = {
+ 'root': [
+ ('[^<&]+', Text),
+ ('&.*?;', Name.Entity),
+ (r'<\s*script\s*', Name.Tag, ('script-content', 'tag')),
+ (r'<\s*[a-zA-Z0-9:]+', Name.Tag, 'tag'),
+ (r'<\s*/\s*[a-zA-Z0-9:]+\s*>', Name.Tag),
+ ],
+ 'script-content': [
+ (r'(.+?)(<\s*/\s*script\s*>)',
+ bygroups(using(JavascriptLexer), Name.Tag),
+ '#pop'),
+ ]
+ }
+
+Here the content of a ``<script>`` tag is passed to a newly created instance of
+a `JavascriptLexer` and not processed by the `HtmlLexer`. This is done using
+the `using` helper that takes the other lexer class as its parameter.
+
+Note the combination of `bygroups` and `using`. This makes sure that the
+content up to the ``</script>`` end tag is processed by the `JavascriptLexer`,
+while the end tag is yielded as a normal token with the `Name.Tag` type.
+
+[-As an additional goodie, if the lexer class is replaced by `this` (imported from
+`pygments.lexer`), the "other" lexer will be the current one (because you cannot
+refer to the current class within the code that runs at class definition time).-]
+
+Also note the ``(r'<\s*script\s*', Name.Tag, ('script-content', 'tag'))`` rule.
+Here, two states are pushed onto the state stack, ``'script-content'`` and
+``'tag'``. That means that first ``'tag'`` is processed, which will [-parse-] {+lex+}
+attributes and the closing ``>``, then the ``'tag'`` state is popped and the
+next state on top of the stack will be ``'script-content'``.
+
+{+Since you cannot refer to the class currently being defined, use `this`
+(imported from `pygments.lexer`) to refer to the current lexer class, i.e.
+``using(this)``. This construct may seem unnecessary, but this is often the
+most obvious way of lexing arbitrary syntax between fixed delimiters without
+introducing deeply nested states.+}
+
+The `using()` helper has a special keyword argument, `state`, which works as
+follows: if given, the lexer to use initially is not in the ``"root"`` state,
+but in the state given by this argument. This [-*only* works-] {+does not work+} with [-a `RegexLexer`.-] {+advanced
+`RegexLexer` subclasses such as `ExtendedRegexLexer` (see below).+}
+
+Any other keywords arguments passed to `using()` are added to the keyword
+arguments used to create the lexer.
+
+
+Delegating Lexer
+================
+
+Another approach for nested lexers is the `DelegatingLexer` which is for example
+used for the template engine lexers. It takes two lexers as arguments on
+initialisation: a `root_lexer` and a `language_lexer`.
+
+The input is processed as follows: First, the whole text is lexed with the
+`language_lexer`. All tokens yielded with [-a-] {+the special+} type of ``Other`` are
+then concatenated and given to the `root_lexer`. The language tokens of the
+`language_lexer` are then inserted into the `root_lexer`'s token stream at the
+appropriate positions.
+
+[-.. sourcecode:: python-] {+::+}
+
+ from pygments.lexer import DelegatingLexer
+ from pygments.lexers.web import HtmlLexer, PhpLexer
+
+ class HtmlPhpLexer(DelegatingLexer):
+ def __init__(self, **options):
+ super(HtmlPhpLexer, self).__init__(HtmlLexer, PhpLexer, **options)
+
+This procedure ensures that e.g. HTML with template tags in it is highlighted
+correctly even if the template tags are put into HTML tags or attributes.
+
+If you want to change the needle token ``Other`` to something else, you can give
+the lexer another token type as the third [-parameter:
+
+.. sourcecode:: python-] {+parameter::+}
+
+ DelegatingLexer.__init__(MyLexer, OtherLexer, Text, **options)
+
+
+Callbacks
+=========
+
+Sometimes the grammar of a language is so complex that a lexer would be unable
+to [-parse-] {+process+} it just by using regular expressions and stacks.
+
+For this, the `RegexLexer` allows callbacks to be given in rule tuples, instead
+of token types (`bygroups` and `using` are nothing else but preimplemented
+callbacks). The callback must be a function taking two arguments:
+
+* the lexer itself
+* the match object for the last matched rule
+
+The callback must then return an iterable of (or simply yield) ``(index,
+tokentype, value)`` tuples, which are then just passed through by
+`get_tokens_unprocessed()`. The ``index`` here is the position of the token in
+the input string, ``tokentype`` is the normal token type (like `Name.Builtin`),
+and ``value`` the associated part of the input string.
+
+You can see an example [-here:
+
+.. sourcecode:: python-] {+here::+}
+
+ from pygments.lexer import RegexLexer
+ from pygments.token import Generic
+
+ class HypotheticLexer(RegexLexer):
+
+ def headline_callback(lexer, match):
+ equal_signs = match.group(1)
+ text = match.group(2)
+ yield match.start(), Generic.Headline, equal_signs + text + equal_signs
+
+ tokens = {
+ 'root': [
+ (r'(=+)(.*?)(\1)', headline_callback)
+ ]
+ }
+
+If the regex for the `headline_callback` matches, the function is called with
+the match object. Note that after the callback is done, processing continues
+normally, that is, after the end of the previous match. The callback has no
+possibility to influence the position.
+
+There are not really any simple examples for lexer callbacks, but you can see
+them in action e.g. in the [-`compiled.py`_ source code-] {+`SMLLexer` class+} in [-the `CLexer` and
+`JavaLexer` classes.-] {+`ml.py`_.+}
+
+.. [-_compiled.py: http://bitbucket.org/birkenfeld/pygments-main/src/tip/pygments/lexers/compiled.py-] {+_ml.py: http://bitbucket.org/birkenfeld/pygments-main/src/tip/pygments/lexers/ml.py+}
+
+
+The ExtendedRegexLexer class
+============================
+
+The `RegexLexer`, even with callbacks, unfortunately isn't powerful enough for
+the funky syntax rules of [-some-] languages [-that will go unnamed,-] such as Ruby.
+
+But fear not; even then you don't have to abandon the regular expression
+[-approach. For-]
+{+approach:+} Pygments has a subclass of `RegexLexer`, the `ExtendedRegexLexer`.
+All features known from RegexLexers are available here too, and the tokens are
+specified in exactly the same way, *except* for one detail:
+
+The `get_tokens_unprocessed()` method holds its internal state data not as local
+variables, but in an instance of the `pygments.lexer.LexerContext` class, and
+that instance is passed to callbacks as a third argument. This means that you
+can modify the lexer state in callbacks.
+
+The `LexerContext` class has the following members:
+
+* `text` -- the input text
+* `pos` -- the current starting position that is used for matching regexes
+* `stack` -- a list containing the state stack
+* `end` -- the maximum position to which regexes are matched, this defaults to
+ the length of `text`
+
+Additionally, the `get_tokens_unprocessed()` method can be given a
+`LexerContext` instead of a string and will then process this context instead of
+creating a new one for the string argument.
+
+Note that because you can set the current position to anything in the callback,
+it won't be automatically be set by the caller after the callback is finished.
+For example, this is how the hypothetical lexer above would be written with the
+[-`ExtendedRegexLexer`:
+
+.. sourcecode:: python-]
+{+`ExtendedRegexLexer`::+}
+
+ from pygments.lexer import ExtendedRegexLexer
+ from pygments.token import Generic
+
+ class ExHypotheticLexer(ExtendedRegexLexer):
+
+ def headline_callback(lexer, match, ctx):
+ equal_signs = match.group(1)
+ text = match.group(2)
+ yield match.start(), Generic.Headline, equal_signs + text + equal_signs
+ ctx.pos = match.end()
+
+ tokens = {
+ 'root': [
+ (r'(=+)(.*?)(\1)', headline_callback)
+ ]
+ }
+
+This might sound confusing (and it can really be). But it is needed, and for an
+example look at the Ruby lexer in [-`agile.py`_.-] {+`ruby.py`_.+}
+
+.. [-_agile.py: https://bitbucket.org/birkenfeld/pygments-main/src/tip/pygments/lexers/agile.py
+
+
+Filtering-] {+_ruby.py: https://bitbucket.org/birkenfeld/pygments-main/src/tip/pygments/lexers/ruby.py
+
+
+Handling Lists of Keywords
+==========================
+
+For a relatively short list (hundreds) you can construct an optimized regular
+expression directly using ``words()`` (longer lists, see next section). This
+function handles a few things for you automatically, including escaping
+metacharacters and Python's first-match rather than longest-match in
+alternations. Feel free to put the lists themselves in
+``pygments/lexers/_$lang_builtins.py`` (see examples there), and generated by
+code if possible.
+
+An example of using ``words()`` is something like::
+
+ from pygments.lexer import RegexLexer, words, Name
+
+ class MyLexer(RegexLexer):
+
+ tokens = {
+ 'root': [
+ (words(('else', 'elseif'), suffix=r'\b'), Name.Builtin),
+ (r'\w+', Name),
+ ],
+ }
+
+As you can see, you can add ``prefix`` and ``suffix`` parts to the constructed
+regex.
+
+
+Modifying+} Token Streams
+=======================
+
+Some languages ship a lot of builtin functions (for example PHP). The total
+amount of those functions differs from system to system because not everybody
+has every extension installed. In the case of PHP there are over 3000 builtin
+functions. That's an [-incredible-] {+incredibly+} huge amount of functions, much more than you
+[-can-]
+{+want to+} put into a regular expression.
+
+But because only `Name` tokens can be function names [-it's-] {+this is+} solvable by
+overriding the ``get_tokens_unprocessed()`` method. The following lexer
+subclasses the `PythonLexer` so that it highlights some additional names as
+pseudo [-keywords:
+
+.. sourcecode:: python-] {+keywords::+}
+
+ from [-pygments.lexers.agile-] {+pygments.lexers.python+} import PythonLexer
+ from pygments.token import Name, Keyword
+
+ class MyPythonLexer(PythonLexer):
+ EXTRA_KEYWORDS = [-['foo',-] {+set(('foo',+} 'bar', 'foobar', 'barfoo', 'spam', [-'eggs']-] {+'eggs'))+}
+
+ def get_tokens_unprocessed(self, text):
+ for index, token, value in PythonLexer.get_tokens_unprocessed(self, text):
+ if token is Name and value in self.EXTRA_KEYWORDS:
+ yield index, Keyword.Pseudo, value
+ else:
+ yield index, token, value
+
+The `PhpLexer` and `LuaLexer` use this method to resolve builtin functions.
+
+[-.. note:: Do not confuse this with the :doc:`filter <filters>` system.-]
diff --git a/tests/examplefiles/wdiff_example3.wdiff b/tests/examplefiles/wdiff_example3.wdiff
new file mode 100644
index 00000000..0bbd6d65
--- /dev/null
+++ b/tests/examplefiles/wdiff_example3.wdiff
@@ -0,0 +1,10 @@
+This example is unbalanced open-close.
+We can't treat these easily.
+
+{+ added? -]
+[- deleted? +}
+
+suddenly closed -]
+suddenly closed +}
+
+{+ added? [- deleted?
diff --git a/tests/test_html_formatter.py b/tests/test_html_formatter.py
index a82aaaf7..596d9fbc 100644
--- a/tests/test_html_formatter.py
+++ b/tests/test_html_formatter.py
@@ -116,7 +116,7 @@ class HtmlFormatterTest(unittest.TestCase):
fmt = HtmlFormatter(**optdict)
fmt.format(tokensource, outfile)
html = outfile.getvalue()
- self.assertTrue(re.search("<pre><a name=\"foo-1\">", html))
+ self.assertTrue(re.search("<pre><span></span><a name=\"foo-1\">", html))
def test_lineanchors_with_startnum(self):
optdict = dict(lineanchors="foo", linenostart=5)
@@ -124,7 +124,7 @@ class HtmlFormatterTest(unittest.TestCase):
fmt = HtmlFormatter(**optdict)
fmt.format(tokensource, outfile)
html = outfile.getvalue()
- self.assertTrue(re.search("<pre><a name=\"foo-5\">", html))
+ self.assertTrue(re.search("<pre><span></span><a name=\"foo-5\">", html))
def test_valid_output(self):
# test all available wrappers
@@ -192,3 +192,11 @@ class HtmlFormatterTest(unittest.TestCase):
fmt.format(tokensource, outfile)
self.assertTrue('<a href="test_html_formatter.py#L-165">test_ctags</a>'
in outfile.getvalue())
+
+ def test_filename(self):
+ optdict = dict(filename="test.py")
+ outfile = StringIO()
+ fmt = HtmlFormatter(**optdict)
+ fmt.format(tokensource, outfile)
+ html = outfile.getvalue()
+ self.assertTrue(re.search("<span class=\"filename\">test.py</span><pre>", html))
diff --git a/tests/test_java.py b/tests/test_java.py
index 33a64e99..f4096647 100644
--- a/tests/test_java.py
+++ b/tests/test_java.py
@@ -9,7 +9,7 @@
import unittest
-from pygments.token import Text, Name, Operator, Keyword
+from pygments.token import Text, Name, Operator, Keyword, Number
from pygments.lexers import JavaLexer
@@ -40,3 +40,39 @@ class JavaTest(unittest.TestCase):
]
self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+ def testNumericLiterals(self):
+ fragment = '0 5L 9__542_72l 0xbEEf 0X9_A 0_35 01 0b0___101_0'
+ fragment += ' 0. .7_17F 3e-1_3d 1f 6_01.9e+3 0x.1Fp3 0XEP8D\n'
+ tokens = [
+ (Number.Integer, '0'),
+ (Text, ' '),
+ (Number.Integer, '5L'),
+ (Text, ' '),
+ (Number.Integer, '9__542_72l'),
+ (Text, ' '),
+ (Number.Hex, '0xbEEf'),
+ (Text, ' '),
+ (Number.Hex, '0X9_A'),
+ (Text, ' '),
+ (Number.Oct, '0_35'),
+ (Text, ' '),
+ (Number.Oct, '01'),
+ (Text, ' '),
+ (Number.Bin, '0b0___101_0'),
+ (Text, ' '),
+ (Number.Float, '0.'),
+ (Text, ' '),
+ (Number.Float, '.7_17F'),
+ (Text, ' '),
+ (Number.Float, '3e-1_3d'),
+ (Text, ' '),
+ (Number.Float, '1f'),
+ (Text, ' '),
+ (Number.Float, '6_01.9e+3'),
+ (Text, ' '),
+ (Number.Float, '0x.1Fp3'),
+ (Text, ' '),
+ (Number.Float, '0XEP8D'),
+ (Text, '\n')
+ ]
+ self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
diff --git a/tests/test_lexers_other.py b/tests/test_lexers_other.py
index bb667c05..90d05ef8 100644
--- a/tests/test_lexers_other.py
+++ b/tests/test_lexers_other.py
@@ -13,6 +13,7 @@ import unittest
from pygments.lexers import guess_lexer
from pygments.lexers.scripting import EasytrieveLexer, JclLexer, RexxLexer
+
def _exampleFilePath(filename):
return os.path.join(os.path.dirname(__file__), 'examplefiles', filename)
@@ -28,8 +29,8 @@ class AnalyseTextTest(unittest.TestCase):
text = fp.read().decode('utf-8')
probability = lexer.analyse_text(text)
self.assertTrue(probability > 0,
- '%s must recognize %r' % (
- lexer.name, exampleFilePath))
+ '%s must recognize %r' % (
+ lexer.name, exampleFilePath))
guessedLexer = guess_lexer(text)
self.assertEqual(guessedLexer.name, lexer.name)
@@ -45,25 +46,24 @@ class AnalyseTextTest(unittest.TestCase):
class EasyTrieveLexerTest(unittest.TestCase):
def testCanGuessFromText(self):
- self.assertLess(0, EasytrieveLexer.analyse_text('MACRO'))
- self.assertLess(0, EasytrieveLexer.analyse_text('\nMACRO'))
- self.assertLess(0, EasytrieveLexer.analyse_text(' \nMACRO'))
- self.assertLess(0, EasytrieveLexer.analyse_text(' \n MACRO'))
- self.assertLess(0, EasytrieveLexer.analyse_text('*\nMACRO'))
- self.assertLess(0, EasytrieveLexer.analyse_text(
+ self.assertTrue(EasytrieveLexer.analyse_text('MACRO'))
+ self.assertTrue(EasytrieveLexer.analyse_text('\nMACRO'))
+ self.assertTrue(EasytrieveLexer.analyse_text(' \nMACRO'))
+ self.assertTrue(EasytrieveLexer.analyse_text(' \n MACRO'))
+ self.assertTrue(EasytrieveLexer.analyse_text('*\nMACRO'))
+ self.assertTrue(EasytrieveLexer.analyse_text(
'*\n *\n\n \n*\n MACRO'))
class RexxLexerTest(unittest.TestCase):
def testCanGuessFromText(self):
- self.assertAlmostEqual(0.01,
- RexxLexer.analyse_text('/* */'))
+ self.assertAlmostEqual(0.01, RexxLexer.analyse_text('/* */'))
self.assertAlmostEqual(1.0,
- RexxLexer.analyse_text('''/* Rexx */
+ RexxLexer.analyse_text('''/* Rexx */
say "hello world"'''))
val = RexxLexer.analyse_text('/* */\n'
- 'hello:pRoceduRe\n'
- ' say "hello world"')
+ 'hello:pRoceduRe\n'
+ ' say "hello world"')
self.assertTrue(val > 0.5, val)
val = RexxLexer.analyse_text('''/* */
if 1 > 0 then do
diff --git a/tests/test_terminal_formatter.py b/tests/test_terminal_formatter.py
index 07337cd5..cb5c6c44 100644
--- a/tests/test_terminal_formatter.py
+++ b/tests/test_terminal_formatter.py
@@ -14,7 +14,13 @@ import re
from pygments.util import StringIO
from pygments.lexers.sql import PlPgsqlLexer
-from pygments.formatters import TerminalFormatter
+from pygments.formatters import TerminalFormatter, Terminal256Formatter, \
+ HtmlFormatter, LatexFormatter
+
+from pygments.style import Style
+from pygments.token import Token
+from pygments.lexers import Python3Lexer
+from pygments import highlight
DEMO_TEXT = '''\
-- comment
@@ -26,9 +32,11 @@ DEMO_TOKENS = list(DEMO_LEXER().get_tokens(DEMO_TEXT))
ANSI_RE = re.compile(r'\x1b[\w\W]*?m')
+
def strip_ansi(x):
return ANSI_RE.sub('', x)
+
class TerminalFormatterTest(unittest.TestCase):
def test_reasonable_output(self):
out = StringIO()
@@ -49,3 +57,46 @@ class TerminalFormatterTest(unittest.TestCase):
for a, b in zip(DEMO_TEXT.splitlines(), plain.splitlines()):
self.assertTrue(a in b)
+
+
+class MyStyle(Style):
+ styles = {
+ Token.Comment: '#ansidarkgray',
+ Token.String: '#ansiblue bg:#ansidarkred',
+ Token.Number: '#ansigreen bg:#ansidarkgreen',
+ Token.Number.Hex: '#ansidarkgreen bg:#ansired',
+ }
+
+
+class Terminal256FormatterTest(unittest.TestCase):
+ code = '''
+# this should be a comment
+print("Hello World")
+async def function(a,b,c, *d, **kwarg:Bool)->Bool:
+ pass
+ return 123, 0xb3e3
+
+'''
+
+ def test_style_html(self):
+ style = HtmlFormatter(style=MyStyle).get_style_defs()
+ self.assertTrue('#555555' in style,
+ "ansigray for comment not html css style")
+
+ def test_others_work(self):
+ """check other formatters don't crash"""
+ highlight(self.code, Python3Lexer(), LatexFormatter(style=MyStyle))
+ highlight(self.code, Python3Lexer(), HtmlFormatter(style=MyStyle))
+
+ def test_256esc_seq(self):
+ """
+ test that a few escape sequences are actualy used when using #ansi<> color codes
+ """
+ def termtest(x):
+ return highlight(x, Python3Lexer(),
+ Terminal256Formatter(style=MyStyle))
+
+ self.assertTrue('32;41' in termtest('0x123'))
+ self.assertTrue('32;42' in termtest('123'))
+ self.assertTrue('30;01' in termtest('#comment'))
+ self.assertTrue('34;41' in termtest('"String"'))
diff --git a/tests/test_token.py b/tests/test_token.py
index c96bd9ef..0c6b02bf 100644
--- a/tests/test_token.py
+++ b/tests/test_token.py
@@ -7,6 +7,7 @@
:license: BSD, see LICENSE for details.
"""
+import copy
import unittest
from pygments import token
@@ -44,3 +45,10 @@ class TokenTest(unittest.TestCase):
for k, v in t.items():
if len(v) > 1:
self.fail("%r has more than one key: %r" % (k, v))
+
+ def test_copying(self):
+ # Token instances are supposed to be singletons, so copying or even
+ # deepcopying should return themselves
+ t = token.String
+ self.assertIs(t, copy.copy(t))
+ self.assertIs(t, copy.deepcopy(t))