summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnteru <bitbucket@ca.sh13.net>2019-05-21 16:32:04 +0000
committerAnteru <bitbucket@ca.sh13.net>2019-05-21 16:32:04 +0000
commit9c84bc6450663888e243268621dcf71eab18ed11 (patch)
tree15231faf42954c633413b5c06369da6b05949ed7
parentc0db1e260ea7e399a1f34bc9966914298e1b6b17 (diff)
parent8202e648945351366bd0c465d72953fae40f4783 (diff)
downloadpygments-9c84bc6450663888e243268621dcf71eab18ed11.tar.gz
Merged in lucatorella/pygments-main (pull request #813)
Add support for @import keyword in Objective-C
-rw-r--r--AUTHORS3
-rw-r--r--CHANGES31
-rw-r--r--LICENSE2
-rw-r--r--doc/languages.rst9
-rw-r--r--pygments/__init__.py2
-rw-r--r--pygments/console.py2
-rw-r--r--pygments/formatters/html.py44
-rw-r--r--pygments/lexer.py22
-rw-r--r--pygments/lexers/_mapping.py13
-rw-r--r--pygments/lexers/asm.py114
-rw-r--r--pygments/lexers/basic.py103
-rw-r--r--pygments/lexers/boa.py92
-rw-r--r--pygments/lexers/configs.py98
-rw-r--r--pygments/lexers/dotnet.py9
-rw-r--r--pygments/lexers/freefem.py967
-rw-r--r--pygments/lexers/javascript.py2
-rw-r--r--pygments/lexers/make.py4
-rw-r--r--pygments/lexers/pony.py94
-rw-r--r--pygments/lexers/prolog.py1
-rw-r--r--pygments/lexers/rdf.py7
-rw-r--r--pygments/lexers/rust.py2
-rw-r--r--pygments/lexers/shell.py2
-rw-r--r--pygments/lexers/slash.py187
-rw-r--r--pygments/lexers/teraterm.py158
-rw-r--r--pygments/lexers/theorem.py2
-rw-r--r--pygments/style.py4
-rwxr-xr-xsetup.py2
-rw-r--r--tests/examplefiles/apache2.conf5
-rw-r--r--tests/examplefiles/example.bbc156
-rw-r--r--tests/examplefiles/example.boa18
-rw-r--r--tests/examplefiles/example.pony18
-rw-r--r--tests/examplefiles/example.toml181
-rw-r--r--tests/examplefiles/freefem.edp94
-rw-r--r--tests/examplefiles/teraterm.ttl34
-rw-r--r--tests/test_asm.py30
-rw-r--r--tests/test_regexlexer.py24
-rw-r--r--tests/test_shell.py19
-rw-r--r--tests/test_terminal_formatter.py10
38 files changed, 2509 insertions, 56 deletions
diff --git a/AUTHORS b/AUTHORS
index ed9c547c..f7a7acad 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -132,6 +132,7 @@ Other contributors, listed alphabetically, are:
* Stephen McKamey -- Duel/JBST lexer
* Brian McKenna -- F# lexer
* Charles McLaughlin -- Puppet lexer
+* Kurt McKee -- Tera Term macro lexer
* Lukas Meuser -- BBCode formatter, Lua lexer
* Cat Miller -- Pig lexer
* Paul Miller -- LiveScript lexer
@@ -221,5 +222,7 @@ Other contributors, listed alphabetically, are:
* Rob Zimmerman -- Kal lexer
* Vincent Zurczak -- Roboconf lexer
* Rostyslav Golda -- FloScript lexer
+* GitHub, Inc -- DASM16, Augeas, TOML, and Slash lexers
+* Simon Garnotel -- FreeFem++ lexer
Many thanks for all contributions!
diff --git a/CHANGES b/CHANGES
index 747716fd..d0da916d 100644
--- a/CHANGES
+++ b/CHANGES
@@ -6,37 +6,66 @@ Issue numbers refer to the tracker at
pull request numbers to the requests at
<https://bitbucket.org/birkenfeld/pygments-main/pull-requests/merged>.
+Version 2.4.1
+-------------
+
+- Updated lexers:
+
+ * Coq (#1430)
+ * MSDOS Session (PR#734)
+ * Prolog (#1511)
+ * TypeScript (#1515)
+
+- Support CSS variables in stylesheets (PR#814)
+- Fix F# lexer name (PR#709)
+- Fix ``TerminalFormatter`` using bold for bright text (#1480)
+
Version 2.4.0
-------------
-(not released yet)
+(released May 8, 2019)
- Added lexers:
+ * Augeas (PR#807)
+ * BBC Basic (PR#806)
+ * Boa (PR#756)
* Charm++ CI (PR#788)
+ * DASM16 (PR#807)
* FloScript (PR#750)
+ * FreeFem++ (PR#785)
* Hspec (PR#790)
+ * Pony (PR#627)
* SGF (PR#780)
+ * Slash (PR#807)
* Slurm (PR#760)
+ * Tera Term Language (PR#749)
+ * TOML (PR#807)
* Unicon (PR#731)
* VBScript (PR#673)
- Updated lexers:
+ * Apache2 (PR#766)
* Cypher (PR#746)
* LLVM (PR#792)
+ * Makefiles (PR#766)
* PHP (#1482)
+ * Rust
* SQL (PR#672)
* Stan (PR#774)
* Stata (PR#800)
* Terraform (PR#787)
+ * YAML
- Add solarized style (PR#708)
- Add support for Markdown reference-style links (PR#753)
+- Add license information to generated HTML/CSS files (#1496)
- Change ANSI color names (PR#777)
- Fix catastrophic backtracking in the bash lexer (#1494)
- Fix documentation failing to build using Sphinx 2.0 (#1501)
- Fix incorrect links in the Lisp and R lexer documentation (PR#775)
- Fix rare unicode errors on Python 2.7 (PR#798, #1492)
+- Fix lexers popping from an empty stack (#1506)
- TypoScript uses ``.typoscript`` now (#1498)
- Updated Trove classifiers and ``pip`` requirements (PR#799)
diff --git a/LICENSE b/LICENSE
index 21815527..13d1c74b 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2006-2017 by the respective authors (see AUTHORS file).
+Copyright (c) 2006-2019 by the respective authors (see AUTHORS file).
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/doc/languages.rst b/doc/languages.rst
index 47e3363f..b06ccc55 100644
--- a/doc/languages.rst
+++ b/doc/languages.rst
@@ -14,8 +14,11 @@ Programming languages
* AppleScript
* Assembly (various)
* Asymptote
+* `Augeas <http://augeas.net>`_
* Awk
+* BBC Basic
* Befunge
+* `Boa <http://boa.cs.iastate.edu/docs/index.php>`_
* Boo
* BrainFuck
* C, C++
@@ -31,6 +34,7 @@ Programming languages
* `Cython <http://cython.org>`_
* `D <http://dlang.org>`_
* Dart
+* DCPU-16
* Delphi
* Dylan
* `Elm <http://elm-lang.org/>`_
@@ -41,6 +45,7 @@ Programming languages
* `Fennel <https://fennel-lang.org/>`_
* `FloScript <http://ioflo.com/>`_
* Fortran
+* `FreeFEM++ <https://freefem.org/>`_
* F#
* GAP
* Gherkin (Cucumber)
@@ -70,6 +75,7 @@ Programming languages
* OCaml
* PHP
* `Perl 5 <http://perl.org>`_ and `Perl 6 <https://perl6.org>`_
+* `Pony <https://www.ponylang.io/>`_
* PovRay
* PostScript
* PowerShell
@@ -85,10 +91,13 @@ Programming languages
* Scheme
* Scilab
* `SGF <https://www.red-bean.com/sgf/>`_
+* `Slash <https://github.com/arturadib/Slash-A>`_
* `Slurm <https://slurm.schedmd.com/overview.html>`_
* Smalltalk
* SNOBOL
* Tcl
+* `Tera Term language <https://ttssh2.osdn.jp/>`_
+* `TOML <https://github.com/toml-lang/toml>`_
* Vala
* Verilog
* VHDL
diff --git a/pygments/__init__.py b/pygments/__init__.py
index 4dd38fee..0da0649d 100644
--- a/pygments/__init__.py
+++ b/pygments/__init__.py
@@ -29,7 +29,7 @@ import sys
from pygments.util import StringIO, BytesIO
-__version__ = '2.3.1'
+__version__ = '2.4.0'
__docformat__ = 'restructuredtext'
__all__ = ['lex', 'format', 'highlight']
diff --git a/pygments/console.py b/pygments/console.py
index e61744ce..e9f30ad8 100644
--- a/pygments/console.py
+++ b/pygments/console.py
@@ -30,7 +30,7 @@ light_colors = ["brightblack", "brightred", "brightgreen", "brightyellow", "brig
x = 30
for d, l in zip(dark_colors, light_colors):
codes[d] = esc + "%im" % x
- codes[l] = esc + "%i;01m" % x
+ codes[l] = esc + "%im" % (60 + x)
x += 1
del d, l, x
diff --git a/pygments/formatters/html.py b/pygments/formatters/html.py
index 7d7605eb..d65c09ce 100644
--- a/pygments/formatters/html.py
+++ b/pygments/formatters/html.py
@@ -5,7 +5,7 @@
Formatter for HTML output.
- :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
@@ -41,6 +41,11 @@ def escape_html(text, table=_escape_html_table):
"""Escape &, <, > as well as single and double quotes for HTML."""
return text.translate(table)
+def webify(color):
+ if color.startswith('calc') or color.startswith('var'):
+ return color
+ else:
+ return '#' + color
def _get_ttype_class(ttype):
fname = STANDARD_TYPES.get(ttype)
@@ -55,6 +60,11 @@ def _get_ttype_class(ttype):
CSSFILE_TEMPLATE = '''\
+/*
+generated by Pygments <http://pygments.org>
+Copyright 2006-2019 by the Pygments team.
+Licensed under the BSD license, see LICENSE for details.
+*/
td.linenos { background-color: #f0f0f0; padding-right: 10px; }
span.lineno { background-color: #f0f0f0; padding: 0 5px 0 5px; }
pre { line-height: 125%%; }
@@ -64,7 +74,11 @@ pre { line-height: 125%%; }
DOC_HEADER = '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
-
+<!--
+generated by Pygments <http://pygments.org>
+Copyright 2006-2019 by the Pygments team.
+Licensed under the BSD license, see LICENSE for details.
+-->
<html>
<head>
<title>%(title)s</title>
@@ -322,11 +336,17 @@ class HtmlFormatter(Formatter):
.. versionadded:: 1.6
`filename`
- A string used to generate a filename when rendering <pre> blocks,
+ A string used to generate a filename when rendering ``<pre>`` blocks,
for example if displaying source code.
.. versionadded:: 2.1
+ `wrapcode`
+ Wrap the code inside ``<pre>`` blocks using ``<code>``, as recommended
+ by the HTML5 specification.
+
+ .. versionadded:: 2.4
+
**Subclassing the HTML formatter**
@@ -395,6 +415,7 @@ class HtmlFormatter(Formatter):
self.tagsfile = self._decodeifneeded(options.get('tagsfile', ''))
self.tagurlformat = self._decodeifneeded(options.get('tagurlformat', ''))
self.filename = self._decodeifneeded(options.get('filename', ''))
+ self.wrapcode = get_bool_opt(options, 'wrapcode', False)
if self.tagsfile:
if not ctags:
@@ -451,7 +472,7 @@ class HtmlFormatter(Formatter):
name = self._get_css_class(ttype)
style = ''
if ndef['color']:
- style += 'color: #%s; ' % ndef['color']
+ style += 'color: %s; ' % webify(ndef['color'])
if ndef['bold']:
style += 'font-weight: bold; '
if ndef['italic']:
@@ -459,9 +480,9 @@ class HtmlFormatter(Formatter):
if ndef['underline']:
style += 'text-decoration: underline; '
if ndef['bgcolor']:
- style += 'background-color: #%s; ' % ndef['bgcolor']
+ style += 'background-color: %s; ' % webify(ndef['bgcolor'])
if ndef['border']:
- style += 'border: 1px solid #%s; ' % ndef['border']
+ style += 'border: 1px solid %s; ' % webify(ndef['border'])
if style:
t2c[ttype] = name
# save len(ttype) to enable ordering the styles by
@@ -708,6 +729,12 @@ class HtmlFormatter(Formatter):
yield tup
yield 0, '</pre>'
+ def _wrap_code(self, inner):
+ yield 0, '<code>'
+ for tup in inner:
+ yield tup
+ yield 0, '</code>'
+
def _format_lines(self, tokensource):
"""
Just format the tokens, without any wrapping tags.
@@ -814,7 +841,10 @@ class HtmlFormatter(Formatter):
individual lines, in custom generators. See docstring
for `format`. Can be overridden.
"""
- return self._wrap_div(self._wrap_pre(source))
+ if self.wrapcode:
+ return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
+ else:
+ return self._wrap_div(self._wrap_pre(source))
def format_unencoded(self, tokensource, outfile):
"""
diff --git a/pygments/lexer.py b/pygments/lexer.py
index 90905ba5..62d66318 100644
--- a/pygments/lexer.py
+++ b/pygments/lexer.py
@@ -639,14 +639,20 @@ class RegexLexer(Lexer):
if isinstance(new_state, tuple):
for state in new_state:
if state == '#pop':
- statestack.pop()
+ if len(statestack) > 1:
+ statestack.pop()
elif state == '#push':
statestack.append(statestack[-1])
else:
statestack.append(state)
elif isinstance(new_state, int):
- # pop
- del statestack[new_state:]
+ # pop, but keep at least one state on the stack
+ # (random code leading to unexpected pops should
+ # not allow exceptions)
+ if abs(new_state) >= len(statestack):
+ del statestack[1:]
+ else:
+ del statestack[new_state:]
elif new_state == '#push':
statestack.append(statestack[-1])
else:
@@ -724,14 +730,18 @@ class ExtendedRegexLexer(RegexLexer):
if isinstance(new_state, tuple):
for state in new_state:
if state == '#pop':
- ctx.stack.pop()
+ if len(ctx.stack) > 1:
+ ctx.stack.pop()
elif state == '#push':
ctx.stack.append(ctx.stack[-1])
else:
ctx.stack.append(state)
elif isinstance(new_state, int):
- # pop
- del ctx.stack[new_state:]
+ # see RegexLexer for why this check is made
+ if abs(new_state) >= len(ctx.stack):
+ del ctx.state[1:]
+ else:
+ del ctx.stack[new_state:]
elif new_state == '#push':
ctx.stack.append(ctx.stack[-1])
else:
diff --git a/pygments/lexers/_mapping.py b/pygments/lexers/_mapping.py
index 6e0f728c..ce1b6dfc 100644
--- a/pygments/lexers/_mapping.py
+++ b/pygments/lexers/_mapping.py
@@ -44,9 +44,11 @@ LEXERS = {
'ArduinoLexer': ('pygments.lexers.c_like', 'Arduino', ('arduino',), ('*.ino',), ('text/x-arduino',)),
'AspectJLexer': ('pygments.lexers.jvm', 'AspectJ', ('aspectj',), ('*.aj',), ('text/x-aspectj',)),
'AsymptoteLexer': ('pygments.lexers.graphics', 'Asymptote', ('asy', 'asymptote'), ('*.asy',), ('text/x-asymptote',)),
+ 'AugeasLexer': ('pygments.lexers.configs', 'Augeas', ('augeas',), ('*.aug',), ()),
'AutoItLexer': ('pygments.lexers.automation', 'AutoIt', ('autoit',), ('*.au3',), ('text/x-autoit',)),
'AutohotkeyLexer': ('pygments.lexers.automation', 'autohotkey', ('ahk', 'autohotkey'), ('*.ahk', '*.ahkl'), ('text/x-autohotkey',)),
'AwkLexer': ('pygments.lexers.textedit', 'Awk', ('awk', 'gawk', 'mawk', 'nawk'), ('*.awk',), ('application/x-awk',)),
+ 'BBCBasicLexer': ('pygments.lexers.basic', 'BBC Basic', ('bbcbasic',), ('*.bbc',), ()),
'BBCodeLexer': ('pygments.lexers.markup', 'BBCode', ('bbcode',), (), ('text/x-bbcode',)),
'BCLexer': ('pygments.lexers.algebra', 'BC', ('bc',), ('*.bc',), ()),
'BSTLexer': ('pygments.lexers.bibtex', 'BST', ('bst', 'bst-pybtex'), ('*.bst',), ()),
@@ -59,6 +61,7 @@ LEXERS = {
'BlitzBasicLexer': ('pygments.lexers.basic', 'BlitzBasic', ('blitzbasic', 'b3d', 'bplus'), ('*.bb', '*.decls'), ('text/x-bb',)),
'BlitzMaxLexer': ('pygments.lexers.basic', 'BlitzMax', ('blitzmax', 'bmax'), ('*.bmx',), ('text/x-bmx',)),
'BnfLexer': ('pygments.lexers.grammar_notation', 'BNF', ('bnf',), ('*.bnf',), ('text/x-bnf',)),
+ 'BoaLexer': ('pygments.lexers.boa', 'Boa', ('boa',), ('*.boa',), ()),
'BooLexer': ('pygments.lexers.dotnet', 'Boo', ('boo',), ('*.boo',), ('text/x-boo',)),
'BoogieLexer': ('pygments.lexers.verification', 'Boogie', ('boogie',), ('*.bpl',), ()),
'BrainfuckLexer': ('pygments.lexers.esoteric', 'Brainfuck', ('brainfuck', 'bf'), ('*.bf', '*.b'), ('application/x-brainfuck',)),
@@ -121,6 +124,7 @@ LEXERS = {
'DObjdumpLexer': ('pygments.lexers.asm', 'd-objdump', ('d-objdump',), ('*.d-objdump',), ('text/x-d-objdump',)),
'DarcsPatchLexer': ('pygments.lexers.diff', 'Darcs Patch', ('dpatch',), ('*.dpatch', '*.darcspatch'), ()),
'DartLexer': ('pygments.lexers.javascript', 'Dart', ('dart',), ('*.dart',), ('text/x-dart',)),
+ 'Dasm16Lexer': ('pygments.lexers.asm', 'DASM16', ('dasm16',), ('*.dasm16', '*.dasm'), ('text/x-dasm16',)),
'DebianControlLexer': ('pygments.lexers.installers', 'Debian Control file', ('control', 'debcontrol'), ('control',), ()),
'DelphiLexer': ('pygments.lexers.pascal', 'Delphi', ('delphi', 'pas', 'pascal', 'objectpascal'), ('*.pas', '*.dpr'), ('text/x-pascal',)),
'DgLexer': ('pygments.lexers.python', 'dg', ('dg',), ('*.dg',), ('text/x-dg',)),
@@ -149,7 +153,7 @@ LEXERS = {
'EvoqueLexer': ('pygments.lexers.templates', 'Evoque', ('evoque',), ('*.evoque',), ('application/x-evoque',)),
'EvoqueXmlLexer': ('pygments.lexers.templates', 'XML+Evoque', ('xml+evoque',), ('*.xml',), ('application/xml+evoque',)),
'EzhilLexer': ('pygments.lexers.ezhil', 'Ezhil', ('ezhil',), ('*.n',), ('text/x-ezhil',)),
- 'FSharpLexer': ('pygments.lexers.dotnet', 'FSharp', ('fsharp',), ('*.fs', '*.fsi'), ('text/x-fsharp',)),
+ 'FSharpLexer': ('pygments.lexers.dotnet', 'F#', ('fsharp', 'f#'), ('*.fs', '*.fsi'), ('text/x-fsharp',)),
'FactorLexer': ('pygments.lexers.factor', 'Factor', ('factor',), ('*.factor',), ('text/x-factor',)),
'FancyLexer': ('pygments.lexers.ruby', 'Fancy', ('fancy', 'fy'), ('*.fy', '*.fancypack'), ('text/x-fancysrc',)),
'FantomLexer': ('pygments.lexers.fantom', 'Fantom', ('fan',), ('*.fan',), ('application/x-fantom',)),
@@ -162,6 +166,7 @@ LEXERS = {
'FortranFixedLexer': ('pygments.lexers.fortran', 'FortranFixed', ('fortranfixed',), ('*.f', '*.F'), ()),
'FortranLexer': ('pygments.lexers.fortran', 'Fortran', ('fortran',), ('*.f03', '*.f90', '*.F03', '*.F90'), ('text/x-fortran',)),
'FoxProLexer': ('pygments.lexers.foxpro', 'FoxPro', ('foxpro', 'vfp', 'clipper', 'xbase'), ('*.PRG', '*.prg'), ()),
+ 'FreeFemLexer': ('pygments.lexers.freefem', 'Freefem', ('freefem',), ('*.edp',), ('text/x-freefem',)),
'GAPLexer': ('pygments.lexers.algebra', 'GAP', ('gap',), ('*.g', '*.gd', '*.gi', '*.gap'), ()),
'GLShaderLexer': ('pygments.lexers.graphics', 'GLSL', ('glsl',), ('*.vert', '*.frag', '*.geo'), ('text/x-glslsrc',)),
'GasLexer': ('pygments.lexers.asm', 'GAS', ('gas', 'asm'), ('*.s', '*.S'), ('text/x-gas',)),
@@ -322,6 +327,7 @@ LEXERS = {
'PikeLexer': ('pygments.lexers.c_like', 'Pike', ('pike',), ('*.pike', '*.pmod'), ('text/x-pike',)),
'PkgConfigLexer': ('pygments.lexers.configs', 'PkgConfig', ('pkgconfig',), ('*.pc',), ()),
'PlPgsqlLexer': ('pygments.lexers.sql', 'PL/pgSQL', ('plpgsql',), (), ('text/x-plpgsql',)),
+ 'PonyLexer': ('pygments.lexers.pony', 'Pony', ('pony',), ('*.pony',), ()),
'PostScriptLexer': ('pygments.lexers.graphics', 'PostScript', ('postscript', 'postscr'), ('*.ps', '*.eps'), ('application/postscript',)),
'PostgresConsoleLexer': ('pygments.lexers.sql', 'PostgreSQL console (psql)', ('psql', 'postgresql-console', 'postgres-console'), (), ('text/x-postgresql-psql',)),
'PostgresLexer': ('pygments.lexers.sql', 'PostgreSQL SQL dialect', ('postgresql', 'postgres'), (), ('text/x-postgresql',)),
@@ -386,6 +392,7 @@ LEXERS = {
'ScssLexer': ('pygments.lexers.css', 'SCSS', ('scss',), ('*.scss',), ('text/x-scss',)),
'ShenLexer': ('pygments.lexers.lisp', 'Shen', ('shen',), ('*.shen',), ('text/x-shen', 'application/x-shen')),
'SilverLexer': ('pygments.lexers.verification', 'Silver', ('silver',), ('*.sil', '*.vpr'), ()),
+ 'SlashLexer': ('pygments.lexers.slash', 'Slash', ('slash',), ('*.sl',), ()),
'SlimLexer': ('pygments.lexers.webmisc', 'Slim', ('slim',), ('*.slim',), ('text/x-slim',)),
'SlurmBashLexer': ('pygments.lexers.shell', 'Slurm', ('slurm', 'sbatch'), ('*.sl',), ()),
'SmaliLexer': ('pygments.lexers.dalvik', 'Smali', ('smali',), ('*.smali',), ('text/smali',)),
@@ -408,12 +415,14 @@ LEXERS = {
'SwigLexer': ('pygments.lexers.c_like', 'SWIG', ('swig',), ('*.swg', '*.i'), ('text/swig',)),
'SystemVerilogLexer': ('pygments.lexers.hdl', 'systemverilog', ('systemverilog', 'sv'), ('*.sv', '*.svh'), ('text/x-systemverilog',)),
'TAPLexer': ('pygments.lexers.testing', 'TAP', ('tap',), ('*.tap',), ()),
+ 'TOMLLexer': ('pygments.lexers.configs', 'TOML', ('toml',), ('*.toml',), ()),
'Tads3Lexer': ('pygments.lexers.int_fiction', 'TADS 3', ('tads3',), ('*.t',), ()),
'TasmLexer': ('pygments.lexers.asm', 'TASM', ('tasm',), ('*.asm', '*.ASM', '*.tasm'), ('text/x-tasm',)),
'TclLexer': ('pygments.lexers.tcl', 'Tcl', ('tcl',), ('*.tcl', '*.rvt'), ('text/x-tcl', 'text/x-script.tcl', 'application/x-tcl')),
'TcshLexer': ('pygments.lexers.shell', 'Tcsh', ('tcsh', 'csh'), ('*.tcsh', '*.csh'), ('application/x-csh',)),
'TcshSessionLexer': ('pygments.lexers.shell', 'Tcsh Session', ('tcshcon',), (), ()),
'TeaTemplateLexer': ('pygments.lexers.templates', 'Tea', ('tea',), ('*.tea',), ('text/x-tea',)),
+ 'TeraTermLexer': ('pygments.lexers.teraterm', 'Tera Term macro', ('ttl', 'teraterm', 'teratermmacro'), ('*.ttl',), ('text/x-teratermmacro',)),
'TermcapLexer': ('pygments.lexers.configs', 'Termcap', ('termcap',), ('termcap', 'termcap.src'), ()),
'TerminfoLexer': ('pygments.lexers.configs', 'Terminfo', ('terminfo',), ('terminfo', 'terminfo.src'), ()),
'TerraformLexer': ('pygments.lexers.configs', 'Terraform', ('terraform', 'tf'), ('*.tf',), ('application/x-tf', 'application/x-terraform')),
@@ -433,7 +442,7 @@ LEXERS = {
'UcodeLexer': ('pygments.lexers.unicon', 'ucode', ('ucode',), ('*.u', '*.u1', '*.u2'), ()),
'UniconLexer': ('pygments.lexers.unicon', 'Unicon', ('unicon',), ('*.icn',), ('text/unicon',)),
'UrbiscriptLexer': ('pygments.lexers.urbi', 'UrbiScript', ('urbiscript',), ('*.u',), ('application/x-urbiscript',)),
- 'VBScriptLexer': ('pygments.lexers.basic', 'VBScript', (), ('*.vbs', '*.VBS'), ()),
+ 'VBScriptLexer': ('pygments.lexers.basic', 'VBScript', ('vbscript',), ('*.vbs', '*.VBS'), ()),
'VCLLexer': ('pygments.lexers.varnish', 'VCL', ('vcl',), ('*.vcl',), ('text/x-vclsrc',)),
'VCLSnippetLexer': ('pygments.lexers.varnish', 'VCLSnippets', ('vclsnippets', 'vclsnippet'), (), ('text/x-vclsnippet',)),
'VCTreeStatusLexer': ('pygments.lexers.console', 'VCTreeStatus', ('vctreestatus',), (), ()),
diff --git a/pygments/lexers/asm.py b/pygments/lexers/asm.py
index 2f08d510..3d2933d6 100644
--- a/pygments/lexers/asm.py
+++ b/pygments/lexers/asm.py
@@ -20,7 +20,7 @@ from pygments.token import Text, Name, Number, String, Comment, Punctuation, \
__all__ = ['GasLexer', 'ObjdumpLexer', 'DObjdumpLexer', 'CppObjdumpLexer',
'CObjdumpLexer', 'HsailLexer', 'LlvmLexer', 'NasmLexer',
- 'NasmObjdumpLexer', 'TasmLexer', 'Ca65Lexer']
+ 'NasmObjdumpLexer', 'TasmLexer', 'Ca65Lexer', 'Dasm16Lexer']
class GasLexer(RegexLexer):
@@ -468,9 +468,11 @@ class NasmLexer(RegexLexer):
r'mm[0-7]|cr[0-4]|dr[0-367]|tr[3-7]')
wordop = r'seg|wrt|strict'
type = r'byte|[dq]?word'
- directives = (r'BITS|USE16|USE32|SECTION|SEGMENT|ABSOLUTE|EXTERN|GLOBAL|'
+ # Directives must be followed by whitespace, otherwise CPU will match
+ # cpuid for instance.
+ directives = (r'(?:BITS|USE16|USE32|SECTION|SEGMENT|ABSOLUTE|EXTERN|GLOBAL|'
r'ORG|ALIGN|STRUC|ENDSTRUC|COMMON|CPU|GROUP|UPPERCASE|IMPORT|'
- r'EXPORT|LIBRARY|MODULE')
+ r'EXPORT|LIBRARY|MODULE)\s+')
flags = re.IGNORECASE | re.MULTILINE
tokens = {
@@ -650,3 +652,109 @@ class Ca65Lexer(RegexLexer):
# comments in GAS start with "#"
if re.match(r'^\s*;', text, re.MULTILINE):
return 0.9
+
+
+class Dasm16Lexer(RegexLexer):
+ """
+ Simple lexer for DCPU-16 Assembly
+
+ Check http://0x10c.com/doc/dcpu-16.txt
+
+ .. versionadded:: 2.4
+ """
+ name = 'DASM16'
+ aliases = ['dasm16']
+ filenames = ['*.dasm16', '*.dasm']
+ mimetypes = ['text/x-dasm16']
+
+ INSTRUCTIONS = [
+ 'SET',
+ 'ADD', 'SUB',
+ 'MUL', 'MLI',
+ 'DIV', 'DVI',
+ 'MOD', 'MDI',
+ 'AND', 'BOR', 'XOR',
+ 'SHR', 'ASR', 'SHL',
+ 'IFB', 'IFC', 'IFE', 'IFN', 'IFG', 'IFA', 'IFL', 'IFU',
+ 'ADX', 'SBX',
+ 'STI', 'STD',
+ 'JSR',
+ 'INT', 'IAG', 'IAS', 'RFI', 'IAQ', 'HWN', 'HWQ', 'HWI',
+ ]
+
+ REGISTERS = [
+ 'A', 'B', 'C',
+ 'X', 'Y', 'Z',
+ 'I', 'J',
+ 'SP', 'PC', 'EX',
+ 'POP', 'PEEK', 'PUSH'
+ ]
+
+ # Regexes yo
+ char = r'[a-zA-Z$._0-9@]'
+ identifier = r'(?:[a-zA-Z$_]' + char + r'*|\.' + char + '+)'
+ number = r'[+-]?(?:0[xX][a-zA-Z0-9]+|\d+)'
+ binary_number = r'0b[01_]+'
+ instruction = r'(?i)(' + '|'.join(INSTRUCTIONS) + ')'
+ single_char = r"'\\?" + char + "'"
+ string = r'"(\\"|[^"])*"'
+
+ def guess_identifier(lexer, match):
+ ident = match.group(0)
+ klass = Name.Variable if ident.upper() in lexer.REGISTERS else Name.Label
+ yield match.start(), klass, ident
+
+ tokens = {
+ 'root': [
+ include('whitespace'),
+ (':' + identifier, Name.Label),
+ (identifier + ':', Name.Label),
+ (instruction, Name.Function, 'instruction-args'),
+ (r'\.' + identifier, Name.Function, 'data-args'),
+ (r'[\r\n]+', Text)
+ ],
+
+ 'numeric' : [
+ (binary_number, Number.Integer),
+ (number, Number.Integer),
+ (single_char, String),
+ ],
+
+ 'arg' : [
+ (identifier, guess_identifier),
+ include('numeric')
+ ],
+
+ 'deref' : [
+ (r'\+', Punctuation),
+ (r'\]', Punctuation, '#pop'),
+ include('arg'),
+ include('whitespace')
+ ],
+
+ 'instruction-line' : [
+ (r'[\r\n]+', Text, '#pop'),
+ (r';.*?$', Comment, '#pop'),
+ include('whitespace')
+ ],
+
+ 'instruction-args': [
+ (r',', Punctuation),
+ (r'\[', Punctuation, 'deref'),
+ include('arg'),
+ include('instruction-line')
+ ],
+
+ 'data-args' : [
+ (r',', Punctuation),
+ include('numeric'),
+ (string, String),
+ include('instruction-line')
+ ],
+
+ 'whitespace': [
+ (r'\n', Text),
+ (r'\s+', Text),
+ (r';.*?\n', Comment)
+ ],
+ }
diff --git a/pygments/lexers/basic.py b/pygments/lexers/basic.py
index b0409386..f93d6d52 100644
--- a/pygments/lexers/basic.py
+++ b/pygments/lexers/basic.py
@@ -18,7 +18,8 @@ from pygments.lexers import _vbscript_builtins
__all__ = ['BlitzBasicLexer', 'BlitzMaxLexer', 'MonkeyLexer', 'CbmBasicV2Lexer',
- 'QBasicLexer', 'VBScriptLexer']
+ 'QBasicLexer', 'VBScriptLexer', 'BBCBasicLexer']
+
class BlitzMaxLexer(RegexLexer):
@@ -509,7 +510,7 @@ class VBScriptLexer(RegexLexer):
.. versionadded:: 2.4
"""
name = 'VBScript'
- aliases = []
+ aliases = ['vbscript']
filenames = ['*.vbs', '*.VBS']
flags = re.IGNORECASE
@@ -549,7 +550,7 @@ class VBScriptLexer(RegexLexer):
(r'[a-z_][a-z0-9_]*', Name),
(r'\b_\n', Operator),
(words(r'(),.:'), Punctuation),
- ('.+(\n)?', Error)
+ (r'.+(\n)?', Error)
],
'dim_more': [
(r'(\s*)(,)(\s*)([a-z_][a-z0-9]*)', bygroups(Whitespace, Punctuation, Whitespace, Name.Variable)),
@@ -561,4 +562,98 @@ class VBScriptLexer(RegexLexer):
(r'"', String.Double, '#pop'),
(r'\n', Error, '#pop'), # Unterminated string
],
- } \ No newline at end of file
+ }
+
+
+class BBCBasicLexer(RegexLexer):
+ """
+ BBC Basic was supplied on the BBC Micro, and later Acorn RISC OS.
+ It is also used by BBC Basic For Windows.
+
+ .. versionadded:: 2.4
+ """
+ base_keywords = ['OTHERWISE', 'AND', 'DIV', 'EOR', 'MOD', 'OR', 'ERROR',
+ 'LINE', 'OFF', 'STEP', 'SPC', 'TAB', 'ELSE', 'THEN',
+ 'OPENIN', 'PTR', 'PAGE', 'TIME', 'LOMEM', 'HIMEM', 'ABS',
+ 'ACS', 'ADVAL', 'ASC', 'ASN', 'ATN', 'BGET', 'COS', 'COUNT',
+ 'DEG', 'ERL', 'ERR', 'EVAL', 'EXP', 'EXT', 'FALSE', 'FN',
+ 'GET', 'INKEY', 'INSTR', 'INT', 'LEN', 'LN', 'LOG', 'NOT',
+ 'OPENUP', 'OPENOUT', 'PI', 'POINT', 'POS', 'RAD', 'RND',
+ 'SGN', 'SIN', 'SQR', 'TAN', 'TO', 'TRUE', 'USR', 'VAL',
+ 'VPOS', 'CHR$', 'GET$', 'INKEY$', 'LEFT$', 'MID$',
+ 'RIGHT$', 'STR$', 'STRING$', 'EOF', 'PTR', 'PAGE', 'TIME',
+ 'LOMEM', 'HIMEM', 'SOUND', 'BPUT', 'CALL', 'CHAIN', 'CLEAR',
+ 'CLOSE', 'CLG', 'CLS', 'DATA', 'DEF', 'DIM', 'DRAW', 'END',
+ 'ENDPROC', 'ENVELOPE', 'FOR', 'GOSUB', 'GOTO', 'GCOL', 'IF',
+ 'INPUT', 'LET', 'LOCAL', 'MODE', 'MOVE', 'NEXT', 'ON',
+ 'VDU', 'PLOT', 'PRINT', 'PROC', 'READ', 'REM', 'REPEAT',
+ 'REPORT', 'RESTORE', 'RETURN', 'RUN', 'STOP', 'COLOUR',
+ 'TRACE', 'UNTIL', 'WIDTH', 'OSCLI']
+
+ basic5_keywords = ['WHEN', 'OF', 'ENDCASE', 'ENDIF', 'ENDWHILE', 'CASE',
+ 'CIRCLE', 'FILL', 'ORIGIN', 'POINT', 'RECTANGLE', 'SWAP',
+ 'WHILE', 'WAIT', 'MOUSE', 'QUIT', 'SYS', 'INSTALL',
+ 'LIBRARY', 'TINT', 'ELLIPSE', 'BEATS', 'TEMPO', 'VOICES',
+ 'VOICE', 'STEREO', 'OVERLAY', 'APPEND', 'AUTO', 'CRUNCH',
+ 'DELETE', 'EDIT', 'HELP', 'LIST', 'LOAD', 'LVAR', 'NEW',
+ 'OLD', 'RENUMBER', 'SAVE', 'TEXTLOAD', 'TEXTSAVE',
+ 'TWIN', 'TWINO', 'INSTALL', 'SUM', 'BEAT']
+
+
+ name = 'BBC Basic'
+ aliases = ['bbcbasic']
+ filenames = ['*.bbc']
+
+ tokens = {
+ 'root': [
+ (r"[0-9]+", Name.Label),
+ (r"(\*)([^\n]*)",
+ bygroups(Keyword.Pseudo, Comment.Special)),
+ (r"", Whitespace, 'code'),
+ ],
+
+ 'code': [
+ (r"(REM)([^\n]*)",
+ bygroups(Keyword.Declaration, Comment.Single)),
+ (r'\n', Whitespace, 'root'),
+ (r'\s+', Whitespace),
+ (r':', Comment.Preproc),
+
+ # Some special cases to make functions come out nicer
+ (r'(DEF)(\s*)(FN|PROC)([A-Za-z_@][\w@]*)',
+ bygroups(Keyword.Declaration, Whitespace, Keyword.Declaration, Name.Function)),
+ (r'(FN|PROC)([A-Za-z_@][\w@]*)',
+ bygroups(Keyword, Name.Function)),
+
+ (r'(GOTO|GOSUB|THEN|RESTORE)(\s*)(\d+)',
+ bygroups(Keyword, Whitespace, Name.Label)),
+
+ (r'(TRUE|FALSE)', Keyword.Constant),
+ (r'(PAGE|LOMEM|HIMEM|TIME|WIDTH|ERL|ERR|REPORT\$|POS|VPOS|VOICES)', Keyword.Pseudo),
+
+ (words(base_keywords), Keyword),
+ (words(basic5_keywords), Keyword),
+
+ ('"', String.Double, 'string'),
+
+ ('%[01]{1,32}', Number.Bin),
+ ('&[0-9a-f]{1,8}', Number.Hex),
+
+ (r'[+-]?[0-9]+\.[0-9]*(E[+-]?[0-9]+)?', Number.Float),
+ (r'[+-]?\.[0-9]+(E[+-]?[0-9]+)?', Number.Float),
+ (r'[+-]?[0-9]+E[+-]?[0-9]+', Number.Float),
+ (r'[+-]?\d+', Number.Integer),
+
+ (r'([A-Za-z_@][\w@]*[%$]?)', Name.Variable),
+ (r'([+\-]=|[$!|?+\-*/%^=><();]|>=|<=|<>|<<|>>|>>>|,)', Operator),
+ ],
+ 'string': [
+ (r'[^"\n]+', String.Double),
+ (r'"', String.Double, '#pop'),
+ (r'\n', Error, 'root'), # Unterminated string
+ ],
+ }
+
+ def analyse_text(text):
+ if text.startswith('10REM >') or text.startswith('REM >'):
+ return 0.9
diff --git a/pygments/lexers/boa.py b/pygments/lexers/boa.py
new file mode 100644
index 00000000..dda31eb4
--- /dev/null
+++ b/pygments/lexers/boa.py
@@ -0,0 +1,92 @@
+# -*- coding: utf-8 -*-
+"""
+ pygments.lexers.boa
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for the Boa language.
+
+ :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, words
+from pygments.token import *
+
+__all__ = ['BoaLexer']
+
+line_re = re.compile('.*?\n')
+
+
+class BoaLexer(RegexLexer):
+ """
+ Lexer for the `Boa <http://boa.cs.iastate.edu/docs/>`_ language.
+
+ .. versionadded:: 2.4
+ """
+ name = 'Boa'
+ aliases = ['boa']
+ filenames = ['*.boa']
+
+ reserved = words(
+ ('input', 'output', 'of', 'weight', 'before', 'after', 'stop', 'ifall', 'foreach', 'exists', 'function',
+ 'break', 'switch', 'case', 'visitor', 'default', 'return', 'visit', 'while', 'if', 'else'),
+ suffix=r'\b', prefix=r'\b')
+ keywords = words(
+ ('bottom', 'collection', 'maximum', 'mean', 'minimum', 'set', 'sum', 'top', 'string', 'int', 'bool', 'float',
+ 'time', 'false', 'true', 'array', 'map', 'stack', 'enum', 'type'), suffix=r'\b', prefix=r'\b')
+ classes = words(
+ ('Project', 'ForgeKind', 'CodeRepository', 'Revision', 'RepositoryKind', 'ChangedFile', 'FileKind', 'ASTRoot',
+ 'Namespace', 'Declaration', 'Type', 'Method', 'Variable', 'Statement', 'Expression', 'Modifier',
+ 'StatementKind', 'ExpressionKind', 'ModifierKind', 'Visibility', 'TypeKind', 'Person', 'ChangeKind'),
+ suffix=r'\b', prefix=r'\b')
+ operators = ('->', ':=', ':', '=', '<<', '!', '++', '||', '&&', '+', '-', '*', ">", "<")
+ string_sep = ('`', '\"')
+ built_in_functions = words(
+ (
+ # Array functions
+ 'new', 'sort',
+ # Date & Time functions
+ 'yearof', 'dayofyear', 'hourof', 'minuteof', 'secondof', 'now', 'addday', 'addmonth', 'addweek', 'addyear',
+ 'dayofmonth', 'dayofweek', 'dayofyear', 'formattime', 'trunctoday', 'trunctohour', 'trunctominute',
+ 'trunctomonth', 'trunctosecond', 'trunctoyear',
+ # Map functions
+ 'clear', 'haskey', 'keys', 'lookup', 'remove', 'values',
+ # Math functions
+ 'abs', 'acos', 'acosh', 'asin', 'asinh', 'atan', 'atan2', 'atanh', 'ceil', 'cos', 'cosh', 'exp', 'floor',
+ 'highbit', 'isfinite', 'isinf', 'isnan', 'isnormal', 'log', 'log10', 'max', 'min', 'nrand', 'pow', 'rand',
+ 'round', 'sin', 'sinh', 'sqrt', 'tan', 'tanh', 'trunc',
+ # Other functions
+ 'def', 'hash', 'len',
+ # Set functions
+ 'add', 'contains', 'remove',
+ # String functions
+ 'format', 'lowercase', 'match', 'matchposns', 'matchstrs', 'regex', 'split', 'splitall', 'splitn',
+ 'strfind', 'strreplace', 'strrfind', 'substring', 'trim', 'uppercase',
+ # Type Conversion functions
+ 'bool', 'float', 'int', 'string', 'time',
+ # Domain-Specific functions
+ 'getast', 'getsnapshot', 'hasfiletype', 'isfixingrevision', 'iskind', 'isliteral',
+ ),
+ prefix=r'\b',
+ suffix=r'\(')
+
+ tokens = {
+ 'root': [
+ (r'#.*?$', Comment.Single),
+ (r'/\*.*?\*/', Comment.Multiline),
+ (reserved, Keyword.Reserved),
+ (built_in_functions, Name.Function),
+ (keywords, Keyword.Type),
+ (classes, Name.Classes),
+ (words(operators), Operator),
+ (r'[][(),;{}\\.]', Punctuation),
+ (r'"(\\\\|\\"|[^"])*"', String),
+ (r'`(\\\\|\\`|[^`])*`', String),
+ (words(string_sep), String.Delimeter),
+ (r'[a-zA-Z_]+', Name.Variable),
+ (r'[0-9]+', Number.Integer),
+ (r'\s+?', Text), # Whitespace
+ ]
+ }
diff --git a/pygments/lexers/configs.py b/pygments/lexers/configs.py
index 206ec360..1984bf36 100644
--- a/pygments/lexers/configs.py
+++ b/pygments/lexers/configs.py
@@ -21,7 +21,7 @@ __all__ = ['IniLexer', 'RegeditLexer', 'PropertiesLexer', 'KconfigLexer',
'Cfengine3Lexer', 'ApacheConfLexer', 'SquidConfLexer',
'NginxConfLexer', 'LighttpdConfLexer', 'DockerLexer',
'TerraformLexer', 'TermcapLexer', 'TerminfoLexer',
- 'PkgConfigLexer', 'PacmanConfLexer']
+ 'PkgConfigLexer', 'PacmanConfLexer', 'AugeasLexer', 'TOMLLexer']
class IniLexer(RegexLexer):
@@ -301,7 +301,7 @@ class ApacheConfLexer(RegexLexer):
'root': [
(r'\s+', Text),
(r'(#.*?)$', Comment),
- (r'(<[^\s>]+)(?:(\s+)(.*?))?(>)',
+ (r'(<[^\s>]+)(?:(\s+)(.*))?(>)',
bygroups(Name.Tag, Text, String, Name.Tag)),
(r'([a-z]\w*)(\s+)',
bygroups(Name.Builtin, Text), 'value'),
@@ -838,3 +838,97 @@ class PacmanConfLexer(RegexLexer):
(r'.', Text),
],
}
+
+
+class AugeasLexer(RegexLexer):
+ """
+ Lexer for `Augeas <http://augeas.net>`_.
+
+ .. versionadded:: 2.4
+ """
+ name = 'Augeas'
+ aliases = ['augeas']
+ filenames = ['*.aug']
+
+ tokens = {
+ 'root': [
+ (r'(module)(\s*)([^\s=]+)', bygroups(Keyword.Namespace, Text, Name.Namespace)),
+ (r'(let)(\s*)([^\s=]+)', bygroups(Keyword.Declaration, Text, Name.Variable)),
+ (r'(del|store|value|counter|seq|key|label|autoload|incl|excl|transform|test|get|put)(\s+)', bygroups(Name.Builtin, Text)),
+ (r'(\()([^:]+)(\:)(unit|string|regexp|lens|tree|filter)(\))', bygroups(Punctuation, Name.Variable, Punctuation, Keyword.Type, Punctuation)),
+ (r'\(\*', Comment.Multiline, 'comment'),
+ (r'[*+\-.;=?|]', Operator),
+ (r'[()\[\]{}]', Operator),
+ (r'"', String.Double, 'string'),
+ (r'\/', String.Regex, 'regex'),
+ (r'([A-Z]\w*)(\.)(\w+)', bygroups(Name.Namespace, Punctuation, Name.Variable)),
+ (r'.', Name.Variable),
+ (r'\s', Text),
+ ],
+ 'string': [
+ (r'\\.', String.Escape),
+ (r'[^"]', String.Double),
+ (r'"', String.Double, '#pop'),
+ ],
+ 'regex': [
+ (r'\\.', String.Escape),
+ (r'[^/]', String.Regex),
+ (r'\/', String.Regex, '#pop'),
+ ],
+ 'comment': [
+ (r'[^*)]', Comment.Multiline),
+ (r'\(\*', Comment.Multiline, '#push'),
+ (r'\*\)', Comment.Multiline, '#pop'),
+ (r'[)*]', Comment.Multiline)
+ ],
+ }
+
+
+class TOMLLexer(RegexLexer):
+ """
+ Lexer for `TOML <https://github.com/toml-lang/toml>`_, a simple language
+ for config files.
+
+ .. versionadded:: 2.4
+ """
+
+ name = 'TOML'
+ aliases = ['toml']
+ filenames = ['*.toml']
+
+ tokens = {
+ 'root': [
+
+ # Basics, comments, strings
+ (r'\s+', Text),
+ (r'#.*?$', Comment.Single),
+ # Basic string
+ (r'"(\\\\|\\"|[^"])*"', String),
+ # Literal string
+ (r'\'\'\'(.*)\'\'\'', String),
+ (r'\'[^\']*\'', String),
+ (r'(true|false)$', Keyword.Constant),
+ (r'[a-zA-Z_][\w\-]*', Name),
+
+ (r'\[.*?\]$', Keyword),
+ # Datetime
+ # TODO this needs to be expanded, as TOML is rather flexible:
+ # https://github.com/toml-lang/toml#offset-date-time
+ (r'\d{4}-\d{2}-\d{2}(?:T| )\d{2}:\d{2}:\d{2}(?:Z|[-+]\d{2}:\d{2})', Number.Integer),
+
+ # Numbers
+ (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float),
+ (r'\d+[eE][+-]?[0-9]+j?', Number.Float),
+ # Handle +-inf, +-infinity, +-nan
+ (r'[+-]?(?:(inf(?:inity)?)|nan)', Number.Float),
+ (r'[+-]?\d+', Number.Integer),
+
+ # Punctuation
+ (r'[]{}:(),;[]', Punctuation),
+ (r'\.', Punctuation),
+
+ # Operators
+ (r'=', Operator)
+
+ ]
+ }
diff --git a/pygments/lexers/dotnet.py b/pygments/lexers/dotnet.py
index 27ae77c5..1d3b1f38 100644
--- a/pygments/lexers/dotnet.py
+++ b/pygments/lexers/dotnet.py
@@ -541,16 +541,13 @@ class VbNetAspxLexer(DelegatingLexer):
# Very close to functional.OcamlLexer
class FSharpLexer(RegexLexer):
"""
- For the F# language (version 3.0).
-
- AAAAACK Strings
- http://research.microsoft.com/en-us/um/cambridge/projects/fsharp/manual/spec.html#_Toc335818775
+ For the `F# language <https://fsharp.org/>`_ (version 3.0).
.. versionadded:: 1.5
"""
- name = 'FSharp'
- aliases = ['fsharp']
+ name = 'F#'
+ aliases = ['fsharp', 'f#']
filenames = ['*.fs', '*.fsi']
mimetypes = ['text/x-fsharp']
diff --git a/pygments/lexers/freefem.py b/pygments/lexers/freefem.py
new file mode 100644
index 00000000..fd534915
--- /dev/null
+++ b/pygments/lexers/freefem.py
@@ -0,0 +1,967 @@
+# -*- coding: utf-8 -*-
+"""
+ pygments.lexers.freefem
+ ~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for FreeFem++ language.
+
+ :copyright: see README.md.
+ :license: GPLv3, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include, bygroups, inherit, words, \
+ default
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation
+
+from pygments.lexers.c_cpp import CLexer, CppLexer
+from pygments.lexers import _mql_builtins
+
+__all__ = ['FreeFemLexer']
+
+
+class FreeFemLexer(CppLexer):
+ """
+ For `FreeFem++ <https://freefem.org/>`_ source.
+
+ This is an extension of the CppLexer, as the FreeFem Language is a superset
+ of C++
+ """
+
+ name = 'Freefem'
+ aliases = ['freefem']
+ filenames = ['*.edp']
+ mimetypes = ['text/x-freefem']
+
+ # Language operators
+ operators = set(('+', '-', '*', '.*', '/', './', '%', '^', '^-1', ':', '\''))
+
+ # types
+ types = set((
+ 'bool',
+ 'border',
+ 'complex',
+ 'dmatrix',
+ 'fespace',
+ 'func',
+ 'gslspline',
+ 'ifstream',
+ 'int',
+ 'macro',
+ 'matrix',
+ 'mesh',
+ 'mesh3',
+ 'mpiComm',
+ 'mpiGroup',
+ 'mpiRequest',
+ 'NewMacro',
+ 'EndMacro',
+ 'ofstream',
+ 'Pmmap',
+ 'problem',
+ 'Psemaphore',
+ 'real',
+ 'solve',
+ 'string',
+ 'varf'
+ ))
+
+ # finite element spaces
+ fespaces = set((
+ 'BDM1',
+ 'BDM1Ortho',
+ 'Edge03d',
+ 'Edge13d',
+ 'Edge23d',
+ 'FEQF',
+ 'HCT',
+ 'P0',
+ 'P03d',
+ 'P0Edge',
+ 'P1',
+ 'P13d',
+ 'P1b',
+ 'P1b3d',
+ 'P1bl',
+ 'P1bl3d',
+ 'P1dc',
+ 'P1Edge',
+ 'P1nc',
+ 'P2',
+ 'P23d',
+ 'P2b',
+ 'P2BR',
+ 'P2dc',
+ 'P2Edge',
+ 'P2h',
+ 'P2Morley',
+ 'P2pnc',
+ 'P3',
+ 'P3dc',
+ 'P3Edge',
+ 'P4',
+ 'P4dc',
+ 'P4Edge',
+ 'P5Edge',
+ 'RT0',
+ 'RT03d',
+ 'RT0Ortho',
+ 'RT1',
+ 'RT1Ortho',
+ 'RT2',
+ 'RT2Ortho'
+ ))
+
+ # preprocessor
+ preprocessor = set((
+ 'ENDIFMACRO',
+ 'include',
+ 'IFMACRO',
+ 'load'
+ ))
+
+ # Language keywords
+ keywords = set((
+ 'adj',
+ 'append',
+ 'area',
+ 'ARGV',
+ 'be',
+ 'binary',
+ 'BoundaryEdge',
+ 'bordermeasure',
+ 'CG',
+ 'Cholesky',
+ 'cin',
+ 'cout',
+ 'Crout',
+ 'default',
+ 'diag',
+ 'edgeOrientation',
+ 'endl',
+ 'false',
+ 'ffind',
+ 'FILE',
+ 'find',
+ 'fixed',
+ 'flush',
+ 'GMRES',
+ 'good',
+ 'hTriangle',
+ 'im',
+ 'imax',
+ 'imin',
+ 'InternalEdge',
+ 'l1',
+ 'l2',
+ 'label',
+ 'lenEdge',
+ 'length',
+ 'LINE',
+ 'linfty',
+ 'LU',
+ 'm',
+ 'max',
+ 'measure',
+ 'min',
+ 'mpiAnySource',
+ 'mpiBAND',
+ 'mpiBXOR',
+ 'mpiCommWorld',
+ 'mpiLAND',
+ 'mpiLOR',
+ 'mpiLXOR',
+ 'mpiMAX',
+ 'mpiMIN',
+ 'mpiPROD',
+ 'mpirank',
+ 'mpisize',
+ 'mpiSUM',
+ 'mpiUndefined',
+ 'n',
+ 'N',
+ 'nbe',
+ 'ndof',
+ 'ndofK',
+ 'noshowbase',
+ 'noshowpos',
+ 'notaregion',
+ 'nt',
+ 'nTonEdge',
+ 'nuEdge',
+ 'nuTriangle',
+ 'nv',
+ 'P',
+ 'pi',
+ 'precision',
+ 'qf1pE',
+ 'qf1pElump',
+ 'qf1pT',
+ 'qf1pTlump',
+ 'qfV1',
+ 'qfV1lump',
+ 'qf2pE',
+ 'qf2pT',
+ 'qf2pT4P1',
+ 'qfV2',
+ 'qf3pE',
+ 'qf4pE',
+ 'qf5pE',
+ 'qf5pT',
+ 'qfV5',
+ 'qf7pT',
+ 'qf9pT',
+ 'qfnbpE',
+ 'quantile',
+ 're',
+ 'region',
+ 'rfind',
+ 'scientific',
+ 'searchMethod',
+ 'setw',
+ 'showbase',
+ 'showpos',
+ 'sparsesolver',
+ 'sum',
+ 'tellp',
+ 'true',
+ 'UMFPACK',
+ 'unused',
+ 'whoinElement',
+ 'verbosity',
+ 'version',
+ 'volume',
+ 'x',
+ 'y',
+ 'z'
+ ))
+
+ # Language shipped functions and class ( )
+ functions = set((
+ 'abs',
+ 'acos',
+ 'acosh',
+ 'adaptmesh',
+ 'adj',
+ 'AffineCG',
+ 'AffineGMRES',
+ 'arg',
+ 'asin',
+ 'asinh',
+ 'assert',
+ 'atan',
+ 'atan2',
+ 'atanh',
+ 'atof',
+ 'atoi',
+ 'BFGS',
+ 'broadcast',
+ 'buildlayers',
+ 'buildmesh',
+ 'ceil',
+ 'chi',
+ 'complexEigenValue',
+ 'copysign',
+ 'change',
+ 'checkmovemesh',
+ 'clock',
+ 'cmaes',
+ 'conj',
+ 'convect',
+ 'cos',
+ 'cosh',
+ 'cube',
+ 'd',
+ 'dd',
+ 'dfft',
+ 'diffnp',
+ 'diffpos',
+ 'dimKrylov',
+ 'dist',
+ 'dumptable',
+ 'dx',
+ 'dxx',
+ 'dxy',
+ 'dxz',
+ 'dy',
+ 'dyx',
+ 'dyy',
+ 'dyz',
+ 'dz',
+ 'dzx',
+ 'dzy',
+ 'dzz',
+ 'EigenValue',
+ 'emptymesh',
+ 'erf',
+ 'erfc',
+ 'exec',
+ 'exit',
+ 'exp',
+ 'fdim',
+ 'floor',
+ 'fmax',
+ 'fmin',
+ 'fmod',
+ 'freeyams',
+ 'getARGV',
+ 'getline',
+ 'gmshload',
+ 'gmshload3',
+ 'gslcdfugaussianP',
+ 'gslcdfugaussianQ',
+ 'gslcdfugaussianPinv',
+ 'gslcdfugaussianQinv',
+ 'gslcdfgaussianP',
+ 'gslcdfgaussianQ',
+ 'gslcdfgaussianPinv',
+ 'gslcdfgaussianQinv',
+ 'gslcdfgammaP',
+ 'gslcdfgammaQ',
+ 'gslcdfgammaPinv',
+ 'gslcdfgammaQinv',
+ 'gslcdfcauchyP',
+ 'gslcdfcauchyQ',
+ 'gslcdfcauchyPinv',
+ 'gslcdfcauchyQinv',
+ 'gslcdflaplaceP',
+ 'gslcdflaplaceQ',
+ 'gslcdflaplacePinv',
+ 'gslcdflaplaceQinv',
+ 'gslcdfrayleighP',
+ 'gslcdfrayleighQ',
+ 'gslcdfrayleighPinv',
+ 'gslcdfrayleighQinv',
+ 'gslcdfchisqP',
+ 'gslcdfchisqQ',
+ 'gslcdfchisqPinv',
+ 'gslcdfchisqQinv',
+ 'gslcdfexponentialP',
+ 'gslcdfexponentialQ',
+ 'gslcdfexponentialPinv',
+ 'gslcdfexponentialQinv',
+ 'gslcdfexppowP',
+ 'gslcdfexppowQ',
+ 'gslcdftdistP',
+ 'gslcdftdistQ',
+ 'gslcdftdistPinv',
+ 'gslcdftdistQinv',
+ 'gslcdffdistP',
+ 'gslcdffdistQ',
+ 'gslcdffdistPinv',
+ 'gslcdffdistQinv',
+ 'gslcdfbetaP',
+ 'gslcdfbetaQ',
+ 'gslcdfbetaPinv',
+ 'gslcdfbetaQinv',
+ 'gslcdfflatP',
+ 'gslcdfflatQ',
+ 'gslcdfflatPinv',
+ 'gslcdfflatQinv',
+ 'gslcdflognormalP',
+ 'gslcdflognormalQ',
+ 'gslcdflognormalPinv',
+ 'gslcdflognormalQinv',
+ 'gslcdfgumbel1P',
+ 'gslcdfgumbel1Q',
+ 'gslcdfgumbel1Pinv',
+ 'gslcdfgumbel1Qinv',
+ 'gslcdfgumbel2P',
+ 'gslcdfgumbel2Q',
+ 'gslcdfgumbel2Pinv',
+ 'gslcdfgumbel2Qinv',
+ 'gslcdfweibullP',
+ 'gslcdfweibullQ',
+ 'gslcdfweibullPinv',
+ 'gslcdfweibullQinv',
+ 'gslcdfparetoP',
+ 'gslcdfparetoQ',
+ 'gslcdfparetoPinv',
+ 'gslcdfparetoQinv',
+ 'gslcdflogisticP',
+ 'gslcdflogisticQ',
+ 'gslcdflogisticPinv',
+ 'gslcdflogisticQinv',
+ 'gslcdfbinomialP',
+ 'gslcdfbinomialQ',
+ 'gslcdfpoissonP',
+ 'gslcdfpoissonQ',
+ 'gslcdfgeometricP',
+ 'gslcdfgeometricQ',
+ 'gslcdfnegativebinomialP',
+ 'gslcdfnegativebinomialQ',
+ 'gslcdfpascalP',
+ 'gslcdfpascalQ',
+ 'gslinterpakima',
+ 'gslinterpakimaperiodic',
+ 'gslinterpcsplineperiodic',
+ 'gslinterpcspline',
+ 'gslinterpsteffen',
+ 'gslinterplinear',
+ 'gslinterppolynomial',
+ 'gslranbernoullipdf',
+ 'gslranbeta',
+ 'gslranbetapdf',
+ 'gslranbinomialpdf',
+ 'gslranexponential',
+ 'gslranexponentialpdf',
+ 'gslranexppow',
+ 'gslranexppowpdf',
+ 'gslrancauchy',
+ 'gslrancauchypdf',
+ 'gslranchisq',
+ 'gslranchisqpdf',
+ 'gslranerlang',
+ 'gslranerlangpdf',
+ 'gslranfdist',
+ 'gslranfdistpdf',
+ 'gslranflat',
+ 'gslranflatpdf',
+ 'gslrangamma',
+ 'gslrangammaint',
+ 'gslrangammapdf',
+ 'gslrangammamt',
+ 'gslrangammaknuth',
+ 'gslrangaussian',
+ 'gslrangaussianratiomethod',
+ 'gslrangaussianziggurat',
+ 'gslrangaussianpdf',
+ 'gslranugaussian',
+ 'gslranugaussianratiomethod',
+ 'gslranugaussianpdf',
+ 'gslrangaussiantail',
+ 'gslrangaussiantailpdf',
+ 'gslranugaussiantail',
+ 'gslranugaussiantailpdf',
+ 'gslranlandau',
+ 'gslranlandaupdf',
+ 'gslrangeometricpdf',
+ 'gslrangumbel1',
+ 'gslrangumbel1pdf',
+ 'gslrangumbel2',
+ 'gslrangumbel2pdf',
+ 'gslranlogistic',
+ 'gslranlogisticpdf',
+ 'gslranlognormal',
+ 'gslranlognormalpdf',
+ 'gslranlogarithmicpdf',
+ 'gslrannegativebinomialpdf',
+ 'gslranpascalpdf',
+ 'gslranpareto',
+ 'gslranparetopdf',
+ 'gslranpoissonpdf',
+ 'gslranrayleigh',
+ 'gslranrayleighpdf',
+ 'gslranrayleightail',
+ 'gslranrayleightailpdf',
+ 'gslrantdist',
+ 'gslrantdistpdf',
+ 'gslranlaplace',
+ 'gslranlaplacepdf',
+ 'gslranlevy',
+ 'gslranweibull',
+ 'gslranweibullpdf',
+ 'gslsfairyAi',
+ 'gslsfairyBi',
+ 'gslsfairyAiscaled',
+ 'gslsfairyBiscaled',
+ 'gslsfairyAideriv',
+ 'gslsfairyBideriv',
+ 'gslsfairyAiderivscaled',
+ 'gslsfairyBiderivscaled',
+ 'gslsfairyzeroAi',
+ 'gslsfairyzeroBi',
+ 'gslsfairyzeroAideriv',
+ 'gslsfairyzeroBideriv',
+ 'gslsfbesselJ0',
+ 'gslsfbesselJ1',
+ 'gslsfbesselJn',
+ 'gslsfbesselY0',
+ 'gslsfbesselY1',
+ 'gslsfbesselYn',
+ 'gslsfbesselI0',
+ 'gslsfbesselI1',
+ 'gslsfbesselIn',
+ 'gslsfbesselI0scaled',
+ 'gslsfbesselI1scaled',
+ 'gslsfbesselInscaled',
+ 'gslsfbesselK0',
+ 'gslsfbesselK1',
+ 'gslsfbesselKn',
+ 'gslsfbesselK0scaled',
+ 'gslsfbesselK1scaled',
+ 'gslsfbesselKnscaled',
+ 'gslsfbesselj0',
+ 'gslsfbesselj1',
+ 'gslsfbesselj2',
+ 'gslsfbesseljl',
+ 'gslsfbessely0',
+ 'gslsfbessely1',
+ 'gslsfbessely2',
+ 'gslsfbesselyl',
+ 'gslsfbesseli0scaled',
+ 'gslsfbesseli1scaled',
+ 'gslsfbesseli2scaled',
+ 'gslsfbesselilscaled',
+ 'gslsfbesselk0scaled',
+ 'gslsfbesselk1scaled',
+ 'gslsfbesselk2scaled',
+ 'gslsfbesselklscaled',
+ 'gslsfbesselJnu',
+ 'gslsfbesselYnu',
+ 'gslsfbesselInuscaled',
+ 'gslsfbesselInu',
+ 'gslsfbesselKnuscaled',
+ 'gslsfbesselKnu',
+ 'gslsfbessellnKnu',
+ 'gslsfbesselzeroJ0',
+ 'gslsfbesselzeroJ1',
+ 'gslsfbesselzeroJnu',
+ 'gslsfclausen',
+ 'gslsfhydrogenicR1',
+ 'gslsfdawson',
+ 'gslsfdebye1',
+ 'gslsfdebye2',
+ 'gslsfdebye3',
+ 'gslsfdebye4',
+ 'gslsfdebye5',
+ 'gslsfdebye6',
+ 'gslsfdilog',
+ 'gslsfmultiply',
+ 'gslsfellintKcomp',
+ 'gslsfellintEcomp',
+ 'gslsfellintPcomp',
+ 'gslsfellintDcomp',
+ 'gslsfellintF',
+ 'gslsfellintE',
+ 'gslsfellintRC',
+ 'gslsferfc',
+ 'gslsflogerfc',
+ 'gslsferf',
+ 'gslsferfZ',
+ 'gslsferfQ',
+ 'gslsfhazard',
+ 'gslsfexp',
+ 'gslsfexpmult',
+ 'gslsfexpm1',
+ 'gslsfexprel',
+ 'gslsfexprel2',
+ 'gslsfexpreln',
+ 'gslsfexpintE1',
+ 'gslsfexpintE2',
+ 'gslsfexpintEn',
+ 'gslsfexpintE1scaled',
+ 'gslsfexpintE2scaled',
+ 'gslsfexpintEnscaled',
+ 'gslsfexpintEi',
+ 'gslsfexpintEiscaled',
+ 'gslsfShi',
+ 'gslsfChi',
+ 'gslsfexpint3',
+ 'gslsfSi',
+ 'gslsfCi',
+ 'gslsfatanint',
+ 'gslsffermidiracm1',
+ 'gslsffermidirac0',
+ 'gslsffermidirac1',
+ 'gslsffermidirac2',
+ 'gslsffermidiracint',
+ 'gslsffermidiracmhalf',
+ 'gslsffermidirachalf',
+ 'gslsffermidirac3half',
+ 'gslsffermidiracinc0',
+ 'gslsflngamma',
+ 'gslsfgamma',
+ 'gslsfgammastar',
+ 'gslsfgammainv',
+ 'gslsftaylorcoeff',
+ 'gslsffact',
+ 'gslsfdoublefact',
+ 'gslsflnfact',
+ 'gslsflndoublefact',
+ 'gslsflnchoose',
+ 'gslsfchoose',
+ 'gslsflnpoch',
+ 'gslsfpoch',
+ 'gslsfpochrel',
+ 'gslsfgammaincQ',
+ 'gslsfgammaincP',
+ 'gslsfgammainc',
+ 'gslsflnbeta',
+ 'gslsfbeta',
+ 'gslsfbetainc',
+ 'gslsfgegenpoly1',
+ 'gslsfgegenpoly2',
+ 'gslsfgegenpoly3',
+ 'gslsfgegenpolyn',
+ 'gslsfhyperg0F1',
+ 'gslsfhyperg1F1int',
+ 'gslsfhyperg1F1',
+ 'gslsfhypergUint',
+ 'gslsfhypergU',
+ 'gslsfhyperg2F0',
+ 'gslsflaguerre1',
+ 'gslsflaguerre2',
+ 'gslsflaguerre3',
+ 'gslsflaguerren',
+ 'gslsflambertW0',
+ 'gslsflambertWm1',
+ 'gslsflegendrePl',
+ 'gslsflegendreP1',
+ 'gslsflegendreP2',
+ 'gslsflegendreP3',
+ 'gslsflegendreQ0',
+ 'gslsflegendreQ1',
+ 'gslsflegendreQl',
+ 'gslsflegendrePlm',
+ 'gslsflegendresphPlm',
+ 'gslsflegendrearraysize',
+ 'gslsfconicalPhalf',
+ 'gslsfconicalPmhalf',
+ 'gslsfconicalP0',
+ 'gslsfconicalP1',
+ 'gslsfconicalPsphreg',
+ 'gslsfconicalPcylreg',
+ 'gslsflegendreH3d0',
+ 'gslsflegendreH3d1',
+ 'gslsflegendreH3d',
+ 'gslsflog',
+ 'gslsflogabs',
+ 'gslsflog1plusx',
+ 'gslsflog1plusxmx',
+ 'gslsfpowint',
+ 'gslsfpsiint',
+ 'gslsfpsi',
+ 'gslsfpsi1piy',
+ 'gslsfpsi1int',
+ 'gslsfpsi1',
+ 'gslsfpsin',
+ 'gslsfsynchrotron1',
+ 'gslsfsynchrotron2',
+ 'gslsftransport2',
+ 'gslsftransport3',
+ 'gslsftransport4',
+ 'gslsftransport5',
+ 'gslsfsin',
+ 'gslsfcos',
+ 'gslsfhypot',
+ 'gslsfsinc',
+ 'gslsflnsinh',
+ 'gslsflncosh',
+ 'gslsfanglerestrictsymm',
+ 'gslsfanglerestrictpos',
+ 'gslsfzetaint',
+ 'gslsfzeta',
+ 'gslsfzetam1',
+ 'gslsfzetam1int',
+ 'gslsfhzeta',
+ 'gslsfetaint',
+ 'gslsfeta',
+ 'imag',
+ 'int1d',
+ 'int2d',
+ 'int3d',
+ 'intalledges',
+ 'intallfaces',
+ 'interpolate',
+ 'invdiff',
+ 'invdiffnp',
+ 'invdiffpos',
+ 'Isend',
+ 'isInf',
+ 'isNaN',
+ 'isoline',
+ 'Irecv',
+ 'j0',
+ 'j1',
+ 'jn',
+ 'jump',
+ 'lgamma',
+ 'LinearCG',
+ 'LinearGMRES',
+ 'log',
+ 'log10',
+ 'lrint',
+ 'lround',
+ 'max',
+ 'mean',
+ 'medit',
+ 'min',
+ 'mmg3d',
+ 'movemesh',
+ 'movemesh23',
+ 'mpiAlltoall',
+ 'mpiAlltoallv',
+ 'mpiAllgather',
+ 'mpiAllgatherv',
+ 'mpiAllReduce',
+ 'mpiBarrier',
+ 'mpiGather',
+ 'mpiGatherv',
+ 'mpiRank',
+ 'mpiReduce',
+ 'mpiScatter',
+ 'mpiScatterv',
+ 'mpiSize',
+ 'mpiWait',
+ 'mpiWaitAny',
+ 'mpiWtick',
+ 'mpiWtime',
+ 'mshmet',
+ 'NaN',
+ 'NLCG',
+ 'on',
+ 'plot',
+ 'polar',
+ 'Post',
+ 'pow',
+ 'processor',
+ 'processorblock',
+ 'projection',
+ 'randinit',
+ 'randint31',
+ 'randint32',
+ 'random',
+ 'randreal1',
+ 'randreal2',
+ 'randreal3',
+ 'randres53',
+ 'Read',
+ 'readmesh',
+ 'readmesh3',
+ 'Recv',
+ 'rint',
+ 'round',
+ 'savemesh',
+ 'savesol',
+ 'savevtk',
+ 'seekg',
+ 'Sent',
+ 'set',
+ 'sign',
+ 'signbit',
+ 'sin',
+ 'sinh',
+ 'sort',
+ 'splitComm',
+ 'splitmesh',
+ 'sqrt',
+ 'square',
+ 'srandom',
+ 'srandomdev',
+ 'Stringification',
+ 'swap',
+ 'system',
+ 'tan',
+ 'tanh',
+ 'tellg',
+ 'tetg',
+ 'tetgconvexhull',
+ 'tetgreconstruction',
+ 'tetgtransfo',
+ 'tgamma',
+ 'triangulate',
+ 'trunc',
+ 'Wait',
+ 'Write',
+ 'y0',
+ 'y1',
+ 'yn'
+ ))
+
+ # function parameters
+ parameters = set((
+ 'A',
+ 'A1',
+ 'abserror',
+ 'absolute',
+ 'aniso',
+ 'aspectratio',
+ 'B',
+ 'B1',
+ 'bb',
+ 'beginend',
+ 'bin',
+ 'boundary',
+ 'bw',
+ 'close',
+ 'cmm',
+ 'coef',
+ 'composante',
+ 'cutoff',
+ 'datafilename',
+ 'dataname',
+ 'dim',
+ 'distmax',
+ 'displacement',
+ 'doptions',
+ 'dparams',
+ 'eps',
+ 'err',
+ 'errg',
+ 'facemerge',
+ 'facetcl',
+ 'factorize',
+ 'file',
+ 'fill',
+ 'fixedborder',
+ 'flabel',
+ 'flags',
+ 'floatmesh',
+ 'floatsol',
+ 'fregion',
+ 'gradation',
+ 'grey',
+ 'hmax',
+ 'hmin',
+ 'holelist',
+ 'hsv',
+ 'init',
+ 'inquire',
+ 'inside',
+ 'IsMetric',
+ 'iso',
+ 'ivalue',
+ 'keepbackvertices',
+ 'label',
+ 'labeldown',
+ 'labelmid',
+ 'labelup',
+ 'levelset',
+ 'loptions',
+ 'lparams',
+ 'maxit',
+ 'maxsubdiv',
+ 'meditff',
+ 'mem',
+ 'memory',
+ 'metric',
+ 'mode',
+ 'nbarrow',
+ 'nbiso',
+ 'nbiter',
+ 'nbjacoby',
+ 'nboffacetcl',
+ 'nbofholes',
+ 'nbofregions',
+ 'nbregul',
+ 'nbsmooth',
+ 'nbvx',
+ 'ncv',
+ 'nev',
+ 'nomeshgeneration',
+ 'normalization',
+ 'omega',
+ 'op',
+ 'optimize',
+ 'option',
+ 'options',
+ 'order',
+ 'orientation',
+ 'periodic',
+ 'power',
+ 'precon',
+ 'prev',
+ 'ps',
+ 'ptmerge',
+ 'qfe',
+ 'qforder',
+ 'qft',
+ 'qfV',
+ 'ratio',
+ 'rawvector',
+ 'reffacelow',
+ 'reffacemid',
+ 'reffaceup',
+ 'refnum',
+ 'reftet',
+ 'reftri',
+ 'region',
+ 'regionlist',
+ 'renumv',
+ 'rescaling',
+ 'ridgeangle',
+ 'save',
+ 'sigma',
+ 'sizeofvolume',
+ 'smoothing',
+ 'solver',
+ 'sparams',
+ 'split',
+ 'splitin2',
+ 'splitpbedge',
+ 'stop',
+ 'strategy',
+ 'swap',
+ 'switch',
+ 'sym',
+ 't',
+ 'tgv',
+ 'thetamax',
+ 'tol',
+ 'tolpivot',
+ 'tolpivotsym',
+ 'transfo',
+ 'U2Vc',
+ 'value',
+ 'varrow',
+ 'vector',
+ 'veps',
+ 'viso',
+ 'wait',
+ 'width',
+ 'withsurfacemesh',
+ 'WindowIndex',
+ 'which',
+ 'zbound'
+ ))
+
+ # deprecated
+ deprecated = set((
+ 'fixeborder'
+ ))
+
+ # do not highlight
+ suppress_highlight = set((
+ 'alignof',
+ 'asm',
+ 'constexpr',
+ 'decltype',
+ 'div',
+ 'double',
+ 'grad',
+ 'mutable',
+ 'namespace',
+ 'noexcept',
+ 'restrict',
+ 'static_assert',
+ 'template',
+ 'this',
+ 'thread_local',
+ 'typeid',
+ 'typename',
+ 'using'
+ ))
+
+ def get_tokens_unprocessed(self, text):
+ for index, token, value in CppLexer.get_tokens_unprocessed(self, text):
+ if value in self.operators:
+ yield index, Operator, value
+ elif value in self.types:
+ yield index, Keyword.Type, value
+ elif value in self.fespaces:
+ yield index, Name.Class, value
+ elif value in self.preprocessor:
+ yield index, Comment.Preproc, value
+ elif value in self.keywords:
+ yield index, Keyword.Reserved, value
+ elif value in self.functions:
+ yield index, Name.Function, value
+ elif value in self.parameters:
+ yield index, Keyword.Pseudo, value
+ elif value in self.suppress_highlight:
+ yield index, Name, value
+ else:
+ yield index, token, value
diff --git a/pygments/lexers/javascript.py b/pygments/lexers/javascript.py
index 0507375f..57e1161e 100644
--- a/pygments/lexers/javascript.py
+++ b/pygments/lexers/javascript.py
@@ -482,7 +482,7 @@ class TypeScriptLexer(RegexLexer):
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(for|in|while|do|break|return|continue|switch|case|default|if|else|'
- r'throw|try|catch|finally|new|delete|typeof|instanceof|void|'
+ r'throw|try|catch|finally|new|delete|typeof|instanceof|void|of|'
r'this)\b', Keyword, 'slashstartsregex'),
(r'(var|let|with|function)\b', Keyword.Declaration, 'slashstartsregex'),
(r'(abstract|boolean|byte|char|class|const|debugger|double|enum|export|'
diff --git a/pygments/lexers/make.py b/pygments/lexers/make.py
index b222b672..c1e2f3a9 100644
--- a/pygments/lexers/make.py
+++ b/pygments/lexers/make.py
@@ -102,8 +102,8 @@ class BaseMakefileLexer(RegexLexer):
(r'\$\(', Keyword, 'expansion'),
],
'expansion': [
- (r'[^$a-zA-Z_()]+', Text),
- (r'[a-zA-Z_]+', Name.Variable),
+ (r'[^\w$().-]+', Text),
+ (r'[\w.-]+', Name.Variable),
(r'\$', Keyword),
(r'\(', Keyword, '#push'),
(r'\)', Keyword, '#pop'),
diff --git a/pygments/lexers/pony.py b/pygments/lexers/pony.py
new file mode 100644
index 00000000..13239047
--- /dev/null
+++ b/pygments/lexers/pony.py
@@ -0,0 +1,94 @@
+# -*- coding: utf-8 -*-
+"""
+ pygments.lexers.pony
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for Pony and related languages.
+
+ :copyright: Copyright 2006-2016 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups, words
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation
+
+__all__ = ['PonyLexer']
+
+
+class PonyLexer(RegexLexer):
+ """
+ For Pony source code.
+
+ .. versionadded:: 2.4
+ """
+
+ name = 'Pony'
+ aliases = ['pony']
+ filenames = ['*.pony']
+
+ _caps = r'(iso|trn|ref|val|box|tag)'
+
+ tokens = {
+ 'root': [
+ (r'\n', Text),
+ (r'[^\S\n]+', Text),
+ (r'//.*\n', Comment.Single),
+ (r'/\*', Comment.Multiline, 'nested_comment'),
+ (r'"""(?:.|\n)*?"""', String.Doc),
+ (r'"', String, 'string'),
+ (r'\'.*\'', String.Char),
+ (r'=>|[]{}:().~;,|&!^?[]', Punctuation),
+ (words((
+ 'addressof', 'and', 'as', 'consume', 'digestof', 'is', 'isnt',
+ 'not', 'or'),
+ suffix=r'\b'),
+ Operator.Word),
+ (r'!=|==|<<|>>|[-+/*%=<>]', Operator),
+ (words((
+ 'box', 'break', 'compile_error', 'compile_intrinsic',
+ 'continue', 'do', 'else', 'elseif', 'embed', 'end', 'error',
+ 'for', 'if', 'ifdef', 'in', 'iso', 'lambda', 'let', 'match',
+ 'object', 'recover', 'ref', 'repeat', 'return', 'tag', 'then',
+ 'this', 'trn', 'try', 'until', 'use', 'var', 'val', 'where',
+ 'while', 'with', '#any', '#read', '#send', '#share'),
+ suffix=r'\b'),
+ Keyword),
+ (r'(actor|class|struct|primitive|interface|trait|type)((?:\s)+)',
+ bygroups(Keyword, Text), 'typename'),
+ (r'(new|fun|be)((?:\s)+)', bygroups(Keyword, Text), 'methodname'),
+ (words((
+ 'I8', 'U8', 'I16', 'U16', 'I32', 'U32', 'I64', 'U64', 'I128',
+ 'U128', 'ILong', 'ULong', 'ISize', 'USize', 'F32', 'F64',
+ 'Bool', 'Pointer', 'None', 'Any', 'Array', 'String',
+ 'Iterator'),
+ suffix=r'\b'),
+ Name.Builtin.Type),
+ (r'_?[A-Z]\w*', Name.Type),
+ (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float),
+ (r'0x[0-9a-fA-F]+', Number.Hex),
+ (r'\d+', Number.Integer),
+ (r'(true|false)\b', Name.Builtin),
+ (r'_\d*', Name),
+ (r'_?[a-z][\w\'_]*', Name)
+ ],
+ 'typename': [
+ (_caps + r'?((?:\s)*)(_?[A-Z]\w*)',
+ bygroups(Keyword, Text, Name.Class), '#pop')
+ ],
+ 'methodname': [
+ (_caps + r'?((?:\s)*)(_?[a-z]\w*)',
+ bygroups(Keyword, Text, Name.Function), '#pop')
+ ],
+ 'nested_comment': [
+ (r'[^*/]+', Comment.Multiline),
+ (r'/\*', Comment.Multiline, '#push'),
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'[*/]', Comment.Multiline)
+ ],
+ 'string': [
+ (r'"', String, '#pop'),
+ (r'\\"', String),
+ (r'[^\\"]+', String)
+ ]
+ }
diff --git a/pygments/lexers/prolog.py b/pygments/lexers/prolog.py
index 58e762b0..daf8db96 100644
--- a/pygments/lexers/prolog.py
+++ b/pygments/lexers/prolog.py
@@ -31,7 +31,6 @@ class PrologLexer(RegexLexer):
tokens = {
'root': [
- (r'^#.*', Comment.Single),
(r'/\*', Comment.Multiline, 'nested-comment'),
(r'%.*', Comment.Single),
# character literal
diff --git a/pygments/lexers/rdf.py b/pygments/lexers/rdf.py
index 27bbe154..72efdfad 100644
--- a/pygments/lexers/rdf.py
+++ b/pygments/lexers/rdf.py
@@ -268,3 +268,10 @@ class TurtleLexer(RegexLexer):
],
}
+
+ # Turtle and Tera Term macro files share the same file extension
+ # but each has a recognizable and distinct syntax.
+ def analyse_text(text):
+ for t in ('@base ', 'BASE ', '@prefix ', 'PREFIX '):
+ if re.search(r'^\s*%s' % t, text):
+ return 0.80
diff --git a/pygments/lexers/rust.py b/pygments/lexers/rust.py
index 10097fba..b7b8cb7e 100644
--- a/pygments/lexers/rust.py
+++ b/pygments/lexers/rust.py
@@ -29,7 +29,7 @@ class RustLexer(RegexLexer):
keyword_types = (
words(('u8', 'u16', 'u32', 'u64', 'i8', 'i16', 'i32', 'i64',
- 'usize', 'isize', 'f32', 'f64', 'str', 'bool'),
+ 'i128', 'u128', 'usize', 'isize', 'f32', 'f64', 'str', 'bool'),
suffix=r'\b'),
Keyword.Type)
diff --git a/pygments/lexers/shell.py b/pygments/lexers/shell.py
index 31bc7e94..9fd6bf20 100644
--- a/pygments/lexers/shell.py
+++ b/pygments/lexers/shell.py
@@ -540,7 +540,7 @@ class MSDOSSessionLexer(ShellSessionBaseLexer):
mimetypes = []
_innerLexerCls = BatchLexer
- _ps1rgx = r'^([^>]+>)(.*\n?)'
+ _ps1rgx = r'^([^>]*>)(.*\n?)'
_ps2 = 'More? '
diff --git a/pygments/lexers/slash.py b/pygments/lexers/slash.py
new file mode 100644
index 00000000..bd73d463
--- /dev/null
+++ b/pygments/lexers/slash.py
@@ -0,0 +1,187 @@
+# -*- coding: utf-8 -*-
+"""
+ pygments.lexers.slash
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for the `Slash <https://github.com/arturadib/Slash-A>`_ programming
+ language.
+
+ :copyright: Copyright 2012 by GitHub, Inc
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import ExtendedRegexLexer, bygroups, DelegatingLexer
+from pygments.token import Name, Number, String, Comment, Punctuation, \
+ Other, Keyword, Operator, Whitespace
+
+__all__ = ['SlashLexer']
+
+
+class SlashLanguageLexer(ExtendedRegexLexer):
+ _nkw = r'(?=[^a-zA-Z_0-9])'
+
+ def move_state(new_state):
+ return ("#pop", new_state)
+
+ def right_angle_bracket(lexer, match, ctx):
+ if len(ctx.stack) > 1 and ctx.stack[-2] == "string":
+ ctx.stack.pop()
+ yield match.start(), String.Interpol, "}"
+ ctx.pos = match.end()
+ pass
+
+ tokens = {
+ "root": [
+ (r"<%=", Comment.Preproc, move_state("slash")),
+ (r"<%!!", Comment.Preproc, move_state("slash")),
+ (r"<%#.*?%>", Comment.Multiline),
+ (r"<%", Comment.Preproc, move_state("slash")),
+ (r".|\n", Other),
+ ],
+ "string": [
+ (r"\\", String.Escape, move_state("string_e")),
+ (r"\"", String, move_state("slash")),
+ (r"#\{", String.Interpol, "slash"),
+ (r'.|\n', String),
+ ],
+ "string_e": [
+ (r'n', String.Escape, move_state("string")),
+ (r't', String.Escape, move_state("string")),
+ (r'r', String.Escape, move_state("string")),
+ (r'e', String.Escape, move_state("string")),
+ (r'x[a-fA-F0-9]{2}', String.Escape, move_state("string")),
+ (r'.', String.Escape, move_state("string")),
+ ],
+ "regexp": [
+ (r'}[a-z]*', String.Regex, move_state("slash")),
+ (r'\\(.|\n)', String.Regex),
+ (r'{', String.Regex, "regexp_r"),
+ (r'.|\n', String.Regex),
+ ],
+ "regexp_r": [
+ (r'}[a-z]*', String.Regex, "#pop"),
+ (r'\\(.|\n)', String.Regex),
+ (r'{', String.Regex, "regexp_r"),
+ ],
+ "slash": [
+ (r"%>", Comment.Preproc, move_state("root")),
+ (r"\"", String, move_state("string")),
+ (r"'[a-zA-Z0-9_]+", String),
+ (r'%r{', String.Regex, move_state("regexp")),
+ (r'/\*.*?\*/', Comment.Multiline),
+ (r"(#|//).*?\n", Comment.Single),
+ (r'-?[0-9]+e[+-]?[0-9]+', Number.Float),
+ (r'-?[0-9]+\.[0-9]+(e[+-]?[0-9]+)?', Number.Float),
+ (r'-?[0-9]+', Number.Integer),
+ (r'nil'+_nkw, Name.Builtin),
+ (r'true'+_nkw, Name.Builtin),
+ (r'false'+_nkw, Name.Builtin),
+ (r'self'+_nkw, Name.Builtin),
+ (r'(class)(\s+)([A-Z][a-zA-Z0-9_\']*)',
+ bygroups(Keyword, Whitespace, Name.Class)),
+ (r'class'+_nkw, Keyword),
+ (r'extends'+_nkw, Keyword),
+ (r'(def)(\s+)(self)(\s*)(\.)(\s*)([a-z_][a-zA-Z0-9_\']*=?|<<|>>|==|<=>|<=|<|>=|>|\+|-(self)?|~(self)?|\*|/|%|^|&&|&|\||\[\]=?)',
+ bygroups(Keyword, Whitespace, Name.Builtin, Whitespace, Punctuation, Whitespace, Name.Function)),
+ (r'(def)(\s+)([a-z_][a-zA-Z0-9_\']*=?|<<|>>|==|<=>|<=|<|>=|>|\+|-(self)?|~(self)?|\*|/|%|^|&&|&|\||\[\]=?)',
+ bygroups(Keyword, Whitespace, Name.Function)),
+ (r'def'+_nkw, Keyword),
+ (r'if'+_nkw, Keyword),
+ (r'elsif'+_nkw, Keyword),
+ (r'else'+_nkw, Keyword),
+ (r'unless'+_nkw, Keyword),
+ (r'for'+_nkw, Keyword),
+ (r'in'+_nkw, Keyword),
+ (r'while'+_nkw, Keyword),
+ (r'until'+_nkw, Keyword),
+ (r'and'+_nkw, Keyword),
+ (r'or'+_nkw, Keyword),
+ (r'not'+_nkw, Keyword),
+ (r'lambda'+_nkw, Keyword),
+ (r'try'+_nkw, Keyword),
+ (r'catch'+_nkw, Keyword),
+ (r'return'+_nkw, Keyword),
+ (r'next'+_nkw, Keyword),
+ (r'last'+_nkw, Keyword),
+ (r'throw'+_nkw, Keyword),
+ (r'use'+_nkw, Keyword),
+ (r'switch'+_nkw, Keyword),
+ (r'\\', Keyword),
+ (r'λ', Keyword),
+ (r'__FILE__'+_nkw, Name.Builtin.Pseudo),
+ (r'__LINE__'+_nkw, Name.Builtin.Pseudo),
+ (r'[A-Z][a-zA-Z0-9_\']*'+_nkw, Name.Constant),
+ (r'[a-z_][a-zA-Z0-9_\']*'+_nkw, Name),
+ (r'@[a-z_][a-zA-Z0-9_\']*'+_nkw, Name.Variable.Instance),
+ (r'@@[a-z_][a-zA-Z0-9_\']*'+_nkw, Name.Variable.Class),
+ (r'\(', Punctuation),
+ (r'\)', Punctuation),
+ (r'\[', Punctuation),
+ (r'\]', Punctuation),
+ (r'\{', Punctuation),
+ (r'\}', right_angle_bracket),
+ (r';', Punctuation),
+ (r',', Punctuation),
+ (r'<<=', Operator),
+ (r'>>=', Operator),
+ (r'<<', Operator),
+ (r'>>', Operator),
+ (r'==', Operator),
+ (r'!=', Operator),
+ (r'=>', Operator),
+ (r'=', Operator),
+ (r'<=>', Operator),
+ (r'<=', Operator),
+ (r'>=', Operator),
+ (r'<', Operator),
+ (r'>', Operator),
+ (r'\+\+', Operator),
+ (r'\+=', Operator),
+ (r'-=', Operator),
+ (r'\*\*=', Operator),
+ (r'\*=', Operator),
+ (r'\*\*', Operator),
+ (r'\*', Operator),
+ (r'/=', Operator),
+ (r'\+', Operator),
+ (r'-', Operator),
+ (r'/', Operator),
+ (r'%=', Operator),
+ (r'%', Operator),
+ (r'^=', Operator),
+ (r'&&=', Operator),
+ (r'&=', Operator),
+ (r'&&', Operator),
+ (r'&', Operator),
+ (r'\|\|=', Operator),
+ (r'\|=', Operator),
+ (r'\|\|', Operator),
+ (r'\|', Operator),
+ (r'!', Operator),
+ (r'\.\.\.', Operator),
+ (r'\.\.', Operator),
+ (r'\.', Operator),
+ (r'::', Operator),
+ (r':', Operator),
+ (r'(\s|\n)+', Whitespace),
+ (r'[a-z_][a-zA-Z0-9_\']*', Name.Variable),
+ ],
+ }
+
+
+class SlashLexer(DelegatingLexer):
+ """
+ Lexer for the Slash programming language.
+
+ .. versionadded:: 2.4
+ """
+
+ name = 'Slash'
+ aliases = ['slash']
+ filenames = ['*.sl']
+
+ def __init__(self, **options):
+ from pygments.lexers.web import HtmlLexer
+ super(SlashLexer, self).__init__(HtmlLexer, SlashLanguageLexer, **options)
diff --git a/pygments/lexers/teraterm.py b/pygments/lexers/teraterm.py
new file mode 100644
index 00000000..100a89e0
--- /dev/null
+++ b/pygments/lexers/teraterm.py
@@ -0,0 +1,158 @@
+# -*- coding: utf-8 -*-
+"""
+ pygments.lexers.teraterm
+ ~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for Tera Term macro files.
+
+ :copyright: Copyright 2006-2018 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include, bygroups
+from pygments.token import Text, Comment, Operator, Name, String, \
+ Number, Keyword
+
+__all__ = ['TeraTermLexer']
+
+
+class TeraTermLexer(RegexLexer):
+ """
+ For `Tera Term <https://ttssh2.osdn.jp/>`_ macro source code.
+
+ .. versionadded:: 2.4
+ """
+ name = 'Tera Term macro'
+ aliases = ['ttl', 'teraterm', 'teratermmacro']
+ filenames = ['*.ttl']
+ mimetypes = ['text/x-teratermmacro']
+
+ tokens = {
+ 'root': [
+ include('comments'),
+ include('labels'),
+ include('commands'),
+ include('builtin-variables'),
+ include('user-variables'),
+ include('operators'),
+ include('numeric-literals'),
+ include('string-literals'),
+ include('all-whitespace'),
+ (r'[^\s]', Text),
+ ],
+ 'comments': [
+ (r';[^\r\n]*', Comment.Single),
+ (r'/\*', Comment.Multiline, 'in-comment'),
+ ],
+ 'in-comment': [
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'[^*/]+', Comment.Multiline),
+ (r'[*/]', Comment.Multiline)
+ ],
+ 'labels': [
+ (r'(?i)^(\s*)(:[0-9a-z_]+)', bygroups(Text, Name.Label)),
+ ],
+ 'commands': [
+ (
+ r'(?i)\b('
+ r'basename|beep|bplusrecv|bplussend|break|bringupbox|'
+ r'callmenu|changedir|checksum16|checksum16file|'
+ r'checksum32|checksum32file|checksum8|checksum8file|'
+ r'clearscreen|clipb2var|closesbox|closett|code2str|'
+ r'connect|continue|crc16|crc16file|crc32|crc32file|'
+ r'cygconnect|delpassword|dirname|dirnamebox|disconnect|'
+ r'dispstr|do|else|elseif|enablekeyb|end|endif|enduntil|'
+ r'endwhile|exec|execcmnd|exit|expandenv|fileclose|'
+ r'fileconcat|filecopy|filecreate|filedelete|filelock|'
+ r'filemarkptr|filenamebox|fileopen|fileread|filereadln|'
+ r'filerename|filesearch|fileseek|fileseekback|filestat|'
+ r'filestrseek|filestrseek2|filetruncate|fileunlock|'
+ r'filewrite|filewriteln|findclose|findfirst|findnext|'
+ r'flushrecv|foldercreate|folderdelete|foldersearch|for|'
+ r'getdate|getdir|getenv|getfileattr|gethostname|'
+ r'getipv4addr|getipv6addr|getmodemstatus|getpassword|'
+ r'getspecialfolder|gettime|gettitle|getttdir|getver|'
+ r'if|ifdefined|include|inputbox|int2str|intdim|'
+ r'ispassword|kmtfinish|kmtget|kmtrecv|kmtsend|listbox|'
+ r'loadkeymap|logautoclosemode|logclose|loginfo|logopen|'
+ r'logpause|logrotate|logstart|logwrite|loop|makepath|'
+ r'messagebox|mpause|next|passwordbox|pause|quickvanrecv|'
+ r'quickvansend|random|recvln|regexoption|restoresetup|'
+ r'return|rotateleft|rotateright|scprecv|scpsend|send|'
+ r'sendbreak|sendbroadcast|sendfile|sendkcode|sendln|'
+ r'sendlnbroadcast|sendlnmulticast|sendmulticast|setbaud|'
+ r'setdate|setdebug|setdir|setdlgpos|setdtr|setecho|'
+ r'setenv|setexitcode|setfileattr|setflowctrl|'
+ r'setmulticastname|setpassword|setrts|setsync|settime|'
+ r'settitle|show|showtt|sprintf|sprintf2|statusbox|'
+ r'str2code|str2int|strcompare|strconcat|strcopy|strdim|'
+ r'strinsert|strjoin|strlen|strmatch|strremove|'
+ r'strreplace|strscan|strspecial|strsplit|strtrim|'
+ r'testlink|then|tolower|toupper|unlink|until|uptime|'
+ r'var2clipb|wait|wait4all|waitevent|waitln|waitn|'
+ r'waitrecv|waitregex|while|xmodemrecv|xmodemsend|'
+ r'yesnobox|ymodemrecv|ymodemsend|zmodemrecv|zmodemsend'
+ r')\b',
+ Keyword,
+ ),
+ (
+ r'(?i)(call|goto)([ \t]+)([0-9a-z_]+)',
+ bygroups(Keyword, Text, Name.Label),
+ )
+ ],
+ 'builtin-variables': [
+ (
+ r'(?i)('
+ r'groupmatchstr1|groupmatchstr2|groupmatchstr3|'
+ r'groupmatchstr4|groupmatchstr5|groupmatchstr6|'
+ r'groupmatchstr7|groupmatchstr8|groupmatchstr9|'
+ r'param1|param2|param3|param4|param5|param6|'
+ r'param7|param8|param9|paramcnt|params|'
+ r'inputstr|matchstr|mtimeout|result|timeout'
+ r')\b',
+ Name.Builtin
+ ),
+ ],
+ 'user-variables': [
+ (r'(?i)[A-Z_][A-Z0-9_]*', Name.Variable),
+ ],
+ 'numeric-literals': [
+ (r'(-?)([0-9]+)', bygroups(Operator, Number.Integer)),
+ (r'(?i)\$[0-9a-f]+', Number.Hex),
+ ],
+ 'string-literals': [
+ (r'(?i)#(?:[0-9]+|\$[0-9a-f]+)', String.Char),
+ (r"'", String.Single, 'in-single-string'),
+ (r'"', String.Double, 'in-double-string'),
+ ],
+ 'in-general-string': [
+ (r'[\\][\\nt]', String.Escape), # Only three escapes are supported.
+ (r'.', String),
+ ],
+ 'in-single-string': [
+ (r"'", String.Single, '#pop'),
+ include('in-general-string'),
+ ],
+ 'in-double-string': [
+ (r'"', String.Double, '#pop'),
+ include('in-general-string'),
+ ],
+ 'operators': [
+ (r'and|not|or|xor', Operator.Word),
+ (r'[!%&*+<=>^~\|\/-]+', Operator),
+ (r'[()]', String.Symbol),
+ ],
+ 'all-whitespace': [
+ (r'[\s]+', Text),
+ ],
+ }
+
+ # Turtle and Tera Term macro files share the same file extension
+ # but each has a recognizable and distinct syntax.
+ def analyse_text(text):
+ result = 0.0
+ if re.search(TeraTermLexer.tokens['commands'][0][0], text):
+ result += 0.60
+ return result
diff --git a/pygments/lexers/theorem.py b/pygments/lexers/theorem.py
index e84a398b..e7619c33 100644
--- a/pygments/lexers/theorem.py
+++ b/pygments/lexers/theorem.py
@@ -98,7 +98,6 @@ class CoqLexer(RegexLexer):
operators = r'[!$%&*+\./:<=>?@^|~-]'
prefix_syms = r'[!?~]'
infix_syms = r'[=<>@^|&+\*/$%-]'
- primitives = ('unit', 'nat', 'bool', 'string', 'ascii', 'list')
tokens = {
'root': [
@@ -115,7 +114,6 @@ class CoqLexer(RegexLexer):
(r'\b([A-Z][\w\']*)', Name),
(r'(%s)' % '|'.join(keyopts[::-1]), Operator),
(r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator),
- (r'\b(%s)\b' % '|'.join(primitives), Keyword.Type),
(r"[^\W\d][\w']*", Name),
diff --git a/pygments/style.py b/pygments/style.py
index 89766d8c..aee23f96 100644
--- a/pygments/style.py
+++ b/pygments/style.py
@@ -73,9 +73,11 @@ class StyleMeta(type):
if len(col) == 6:
return col
elif len(col) == 3:
- return col[0]*2 + col[1]*2 + col[2]*2
+ return col[0] * 2 + col[1] * 2 + col[2] * 2
elif text == '':
return ''
+ elif text.startswith('var') or text.startswith('calc'):
+ return text
assert False, "wrong color format %r" % text
_styles = obj._styles = {}
diff --git a/setup.py b/setup.py
index 52889227..bd54b0f3 100755
--- a/setup.py
+++ b/setup.py
@@ -48,7 +48,7 @@ else:
setup(
name = 'Pygments',
- version = '2.3.1',
+ version = '2.4.0',
url = 'http://pygments.org/',
license = 'BSD License',
author = 'Georg Brandl',
diff --git a/tests/examplefiles/apache2.conf b/tests/examplefiles/apache2.conf
index d0e838e0..5db66b10 100644
--- a/tests/examplefiles/apache2.conf
+++ b/tests/examplefiles/apache2.conf
@@ -391,3 +391,8 @@ BrowserMatch "^WebDAVFS/1.[012]" redirect-carefully
# Include the virtual host configurations:
Include /etc/apache2/sites-enabled/[^.#]*
+
+# From PR#766
+<IfVersion >= 2.4>
+ErrorLogFormat "%{cu}t %M"
+</IfVersion> \ No newline at end of file
diff --git a/tests/examplefiles/example.bbc b/tests/examplefiles/example.bbc
new file mode 100644
index 00000000..ebdb8537
--- /dev/null
+++ b/tests/examplefiles/example.bbc
@@ -0,0 +1,156 @@
+10REM >EIRC
+20REM The simplest IRC client you can write. Maybe.
+30REM (C) Justin Fletcher, 1998
+40:
+50END=PAGE+1024*16
+60REM Change these if you wish
+70host$="irc.stealth.net"
+80port=6667
+90nick$="eirc"
+100ourchan$="#acorn"
+110:
+120REM Start connecting to a host
+130SYS "ESocket_ConnectToHost",host$,port TO handle
+140REPEAT
+150 SYS "ESocket_CheckState",handle TO state
+160 IF state<-1 THENSYS "ESocket_Forget",handle:SYS "ESocket_DecodeState",state TO a$:ERROR 1,"Failed ("+a$+")"
+170UNTIL state=4
+180:
+190REM We are now connected
+200PRINT"Connected"
+210:
+220REM Log on to the server
+230SYS "ESocket_SendLine",handle,"USER "+nick$+" x x :"+nick$
+240SYS "ESocket_SendLine",handle,"NICK "+nick$
+250SYS "ESocket_SendLine",handle,"JOIN "+ourchan$
+260REM Install a monitor so that we don't waste time
+270SYS "ESocket_Monitor",0,handle TO monitor
+280SYS "ESocket_ResetMonitor",monitor,0 TO polladdr%
+290:
+300REM If we crash, we should tidy up after ourselves
+310ON ERROR SYS "XESocket_Forget",handle:SYS "XESocket_Forget",monitor:ERROR EXT ERR,REPORT$+" at line "+STR$ERL
+320:
+330REM Memory buffer for our data
+340bufsize%=1024
+350DIM buf% bufsize%
+360:
+370input$="":REM The input line
+380REPEAT
+390 REM In a taskwindow we should yield until there is data
+400 SYS "OS_UpCall",6,polladdr%
+410 IF !polladdr%<>0 THEN
+420 REM Reset the monitor for the time being
+430 SYS "ESocket_ResetMonitor",monitor,0 TO polladdr%
+440 REPEAT
+450 REM Read lines from the connection until this buffer is empty
+460 SYS "ESocket_ReadLine",handle,buf%,bufsize%,%100 TO ,str,len
+470 IF str<>0 AND $str<>"" THEN
+480 line$=$str
+490 IF LEFT$(line$,4)="PING" THEN
+500 REM Ping's must be replied to immediately
+510 SYS "ESocket_SendLine",handle,"PONG "+MID$(line$,6)
+520 ELSE
+530 REM Extract source info
+540 from$=MID$(LEFT$(line$,INSTR(line$+" "," ")-1),2)
+550 line$=MID$(line$,INSTR(line$+" "," ")+1)
+560 uid$=LEFT$(from$,INSTR(from$+"!","!")-1)
+570 com$=LEFT$(line$,INSTR(line$+" "," ")-1)
+580 line$=MID$(line$,INSTR(line$+" "," ")+1)
+590 REM remove the input line
+600 IF input$<>"" THENFORI=1TOLEN(input$):VDU127:NEXT
+610 CASE FNupper(com$) OF
+620 WHEN "PRIVMSG"
+630 REM Extract the destination
+640 chan$=LEFT$(line$,INSTR(line$+" "," ")-1)
+650 line$=MID$(line$,INSTR(line$+" "," ")+2):REM Skip :
+660 IF LEFT$(line$,1)=CHR$1 THEN
+670 REM CTCP, so respond to it
+680 line$=MID$(line$,2,LEN(line$)-2)
+690 com$=LEFT$(line$,INSTR(line$+" "," ")-1)
+700 line$=MID$(line$,INSTR(line$+" "," ")+1)
+710 CASE FNupper(com$) OF
+720 WHEN "PING"
+730 REM Ping lag timing
+740 line$="PONG "+line$
+750 PRINTuid$;" pinged us"
+760 WHEN "VERSION"
+770 REM Version checking
+780 line$="VERSION EIRC 1.00 (c) Justin Fletcher"
+790 PRINTuid$;" wanted our version"
+800 WHEN "ACTION"
+810 PRINT"* ";uid$;" ";line$
+820 line$=""
+830 OTHERWISE
+840 REM everything else is an error
+850 line$="ERRMSG "+com$+" not understood"
+860 PRINT"CTCP '";com$;"' from ";uid$;" (";line$;")"
+870 ENDCASE
+880 IF line$<>"" THEN
+890 SYS "ESocket_SendLine",handle,"NOTICE "+uid$+" :"+CHR$1+line$+CHR$1
+900 ENDIF
+910 ELSE
+920 REM Somebody said something...
+930 PRINT"<";uid$;"> ";FNsafe(line$)
+940 ENDIF
+950 WHEN "JOIN"
+960 REM We (or someone else) has joined the channel
+970 chan$=LEFT$(line$,INSTR(line$+" "," ")):REM Skip :
+980 IF LEFT$(chan$,1)=":" THENchan$=MID$(chan$,2)
+990 PRINTuid$;" has joined ";chan$
+1000 WHEN "PART"
+1010 REM Someone else has left the channel
+1020 chan$=LEFT$(line$,INSTR(line$+" "," ")-1)
+1030 IF LEFT$(chan$,1)=":" THENchan$=MID$(chan$,2)
+1040 PRINTuid$;" has left ";chan$
+1050 WHEN "QUIT"
+1060 REM Someone else has quit IRC
+1070 PRINTuid$;" quit IRC"
+1080 OTHERWISE
+1090 REM Some unknown command
+1100 PRINTuid$;":";com$;":";FNsafe(line$)
+1110 ENDCASE
+1120 REM Re-display our input line
+1130 PRINTinput$;
+1140 ENDIF
+1150 ENDIF
+1160 UNTIL str=0
+1170 ENDIF
+1180 b$=INKEY$(0)
+1190 IF b$<>"" THEN
+1200 CASE b$ OF
+1210 WHEN CHR$13
+1220 SYS "ESocket_SendLine",handle,"PRIVMSG "+ourchan$+" :"+input$
+1230 REM Remove the line
+1240 IF input$<>"" THENFORI=1TOLEN(input$):VDU127:NEXT
+1250 REM We said it...
+1260 PRINT"<"+nick$+"> ";input$
+1270 input$=""
+1280 WHEN CHR$127,CHR$8
+1290 REM Backspace
+1300 IF input$<>"" THENVDU127
+1310 input$=LEFT$(input$)
+1320 OTHERWISE
+1330 REM Ad to current input
+1340 input$+=b$
+1350 PRINTb$;
+1360 ENDCASE
+1370 ENDIF
+1380 REM Has the socket closed
+1390 SYS "ESocket_Closed",handle,%0 TO closed
+1400UNTIL closed
+1410SYS "ESocket_Forget",handle
+1420SYS "ESocket_Forget",monitor
+1430END
+1440:
+1450DEFFNupper(a$):LOCAL c$,b$,I
+1460FORI=1TOLEN(a$)
+1470c$=MID$(a$,I,1):IF c$>="a"ANDc$<="z"THENc$=CHR$(ASC(c$)-32)
+1480b$+=c$:NEXT:=b$
+1490
+1500REM Remove control codes
+1510DEFFNsafe(line$)
+1520LOCAL I
+1530FORI=1TOLEN(line$)
+1540 IF MID$(line$,I,1)<" " THENMID$(line$,I,1)="*"
+1550NEXT
+1560=line$
diff --git a/tests/examplefiles/example.boa b/tests/examplefiles/example.boa
new file mode 100644
index 00000000..a18f1626
--- /dev/null
+++ b/tests/examplefiles/example.boa
@@ -0,0 +1,18 @@
+# Computes Number of Public Methods (NPM) for each project, per-type
+# Output is: NPM[ProjectID][TypeName] = NPM value
+p: Project = input;
+NPM: output sum[string][string] of int;
+
+visit(p, visitor {
+ # only look at the latest snapshot
+ before n: CodeRepository -> {
+ snapshot := getsnapshot(n);
+ foreach (i: int; def(snapshot[i]))
+ visit(snapshot[i]);
+ stop;
+ }
+ before node: Declaration ->
+ if (node.kind == TypeKind.CLASS)
+ foreach (i: int; has_modifier_public(node.methods[i]))
+ NPM[p.id][node.name] << 1;
+});
diff --git a/tests/examplefiles/example.pony b/tests/examplefiles/example.pony
new file mode 100644
index 00000000..654f2376
--- /dev/null
+++ b/tests/examplefiles/example.pony
@@ -0,0 +1,18 @@
+use "somepkg"
+
+/*
+ /* Nested */
+*/
+
+class trn Foo[A: Stringable ref] is Stringable
+ let _x = "\""
+
+ fun val dofoo() =>
+ """
+ DocString
+ """
+ (U64(2), "foo")._2
+
+actor Main
+ new create(env: Env) =>
+ env.out.print("Hello world")
diff --git a/tests/examplefiles/example.toml b/tests/examplefiles/example.toml
new file mode 100644
index 00000000..9c60c79f
--- /dev/null
+++ b/tests/examplefiles/example.toml
@@ -0,0 +1,181 @@
+# This is a TOML document comment
+
+title = "TOML example file" # This is an inline comment
+
+[examples]
+# Examples taken from https://github.com/toml-lang/toml/blob/master/README.md
+key = "value"
+bare_key = "value"
+bare-key = "value"
+1234 = "value"
+"127.0.0.1" = "value"
+"character encoding" = "value"
+"ʎǝʞ" = "value"
+'key2' = "value"
+'quoted "value"' = "value"
+name = "Orange"
+physical.color = "orange"
+physical.shape = "round"
+site."google.com" = true
+a.b.c = 1
+a.d = 2
+
+[strings]
+str = "I'm a string. \"You can quote me\". Name\tJos\u00E9\nLocation\tSF."
+str1 = """
+Roses are red
+Violets are blue"""
+str2 = "Roses are red\nViolets are blue"
+str3 = "Roses are red\r\nViolets are blue"
+
+ [strings.equivalents]
+ str1 = "The quick brown fox jumps over the lazy dog."
+ str2 = """
+The quick brown \
+
+
+ fox jumps over \
+ the lazy dog."""
+ str3 = """\
+ The quick brown \
+ fox jumps over \
+ the lazy dog.\
+ """
+
+ [strings.literal]
+ winpath = 'C:\Users\nodejs\templates'
+ winpath2 = '\\ServerX\admin$\system32\'
+ quoted = 'Tom "Dubs" Preston-Werner'
+ regex = '<\i\c*\s*>'
+
+ [strings.multiline]
+ regex2 = '''I [dw]on't need \d{2} apples'''
+ lines = '''
+The first newline is
+trimmed in raw strings.
+ All other whitespace
+ is preserved.
+'''
+
+[integers]
+int1 = +99
+int2 = 42
+int3 = 0
+int4 = -17
+int5 = 1_000
+int6 = 5_349_221
+int7 = 1_2_3_4_5 # discouraged format
+# hexadecimal with prefix `0x`
+hex1 = 0xDEADBEEF
+hex2 = 0xdeadbeef
+hex3 = 0xdead_beef
+# octal with prefix `0o`
+oct1 = 0o01234567
+oct2 = 0o755 # useful for Unix file permissions
+# binary with prefix `0b`
+bin1 = 0b11010110
+
+[floats]
+# fractional
+flt1 = +1.0
+flt2 = 3.1415
+flt3 = -0.01
+# exponent
+flt4 = 5e+22
+flt5 = 1e6
+flt6 = -2E-2
+# both
+flt7 = 6.626e-34
+# with underscores, for readability
+flt8 = 224_617.445_991_228
+# infinity
+sf1 = inf # positive infinity
+sf2 = +inf # positive infinity
+sf3 = -inf # negative infinity
+# not a number
+sf4 = nan # actual sNaN/qNaN encoding is implementation specific
+sf5 = +nan # same as `nan`
+sf6 = -nan # valid, actual encoding is implementation specific
+# plus/minus zero
+sf0_1 = +0.0
+sf0_2 = -0.0
+
+[booleans]
+bool1 = true
+bool2 = false
+
+[datetime.offset]
+odt1 = 1979-05-27T07:32:00Z
+odt2 = 1979-05-27T00:32:00-07:00
+odt3 = 1979-05-27T00:32:00.999999-07:00
+odt4 = 1979-05-27 07:32:00Z
+
+[datetime.local]
+ldt1 = 1979-05-27T07:32:00
+ldt2 = 1979-05-27T00:32:00.999999
+
+[date.local]
+ld1 = 1979-05-27
+
+[time.local]
+lt1 = 07:32:00
+lt2 = 00:32:00.999999
+
+[arrays]
+arr1 = [ 1, 2, 3 ]
+arr2 = [ "red", "yellow", "green" ]
+arr3 = [ [ 1, 2 ], [3, 4, 5] ]
+arr4 = [ "all", 'strings', """are the same""", '''type''']
+arr5 = [ [ 1, 2 ], ["a", "b", "c"] ]
+arr6 = [ 1, 2.0 ] # INVALID
+arr7 = [
+ 1, 2, 3
+]
+arr8 = [
+ 1,
+ 2, # this is ok
+]
+
+["inline tables"]
+name = { first = "Tom", last = "Preston-Werner" }
+point = { x = 1, y = 2 }
+animal = { type.name = "pug" }
+
+["arrays of tables"]
+points = [ { x = 1, y = 2, z = 3 },
+ { x = 7, y = 8, z = 9 },
+ { x = 2, y = 4, z = 8 } ]
+
+ [products]
+
+ [[products]]
+ name = "Hammer"
+ sku = 738594937
+
+ [[products]]
+
+ [[products]]
+ name = "Nail"
+ sku = 284758393
+ color = "gray"
+
+ [fruits]
+
+ [[fruit]]
+ name = "apple"
+
+ [fruit.physical]
+ color = "red"
+ shape = "round"
+
+ [[fruit.variety]]
+ name = "red delicious"
+
+ [[fruit.variety]]
+ name = "granny smith"
+
+ [[fruit]]
+ name = "banana"
+
+ [[fruit.variety]]
+ name = "plantain"
diff --git a/tests/examplefiles/freefem.edp b/tests/examplefiles/freefem.edp
new file mode 100644
index 00000000..d4313338
--- /dev/null
+++ b/tests/examplefiles/freefem.edp
@@ -0,0 +1,94 @@
+// Example of problem solving in parallel
+
+// Usage:
+// ff-mpirun -np 12 LaplacianParallel.edp (here 12 is the number of threads (command nproc to know that)
+// Need FreeFem++ with PETSc
+
+// Parallel stuff
+load "PETSc"
+macro partitioner()metis//
+macro dimension()2//
+include "./macro_ddm.idp"
+
+macro def(i)[i]//
+macro init(i)[i]//
+//macro meshN()mesh// //these macro are defined in macro_ddm.idp
+//macro intN()int2d//
+
+// Parameters
+int nn = 500;
+real L = 1.;
+real H = 1.;
+
+func f = 1.;
+
+func Pk = P1;
+
+// Mesh
+border b1(t=0, L){x=t; y=0; label=1;}
+border b2(t=0, H){x=L; y=t; label=2;}
+border b3(t=L, 0){x=t; y=H; label=3;}
+border b4(t=H, 0){x=0; y=t; label=4;}
+
+meshN Th = buildmesh(b1(1) + b2(1) + b3(1) + b4(1)); //build a really coarse mesh (just to build the fespace later)
+//meshN Th = square(1, 1, [L*x, H*y]);
+
+int[int] Wall = [1, 2, 3, 4];
+
+// Fespace
+fespace Uh(Th, Pk);
+
+// Mesh partition
+int[int] ArrayIntersection;
+int[int][int] RestrictionIntersection(0);
+real[int] D;
+
+meshN ThBorder;
+meshN ThGlobal = buildmesh(b1(nn*L) + b2(nn*H) + b3(nn*L) + b4(nn*H)); //build the mesh to partition
+//meshN ThGlobal = square(nn*L, nn*H, [L*x, H*y]);
+int InterfaceLabel = 10;
+int Split = 1;
+int Overlap = 1;
+build(Th, ThBorder, ThGlobal, InterfaceLabel, Split, Overlap, D, ArrayIntersection, RestrictionIntersection, Uh, Pk, mpiCommWorld, false); //see macro_ddm.idp for detailed parameters
+
+// Macro
+macro grad(u) [dx(u), dy(u)] //
+
+// Problem
+varf vLaplacian (u, uh) //Problem in varf formulation mandatory
+ = intN(Th)(
+ grad(u)' * grad(uh)
+ )
+ - intN(Th)(
+ f * uh
+ )
+ + on(Wall, u=0)
+ ;
+
+matrix<real> Laplacian = vLaplacian(Uh, Uh); //build the sequential matrix
+real[int] LaplacianBoundary = vLaplacian(0, Uh);// and right hand side
+
+//// In sequential, you normally do that:
+//// Solve
+//Uh def(u)=init(0);
+//u[] = Laplacian^-1 * LaplacianBoundary;
+
+//// Plot
+//plot(u);
+
+// In parallel:
+// Matrix construction
+dmatrix PLaplacian(Laplacian, ArrayIntersection, RestrictionIntersection, D, bs=1); //build the parallel matrix
+set(PLaplacian, sparams="-pc_type lu -pc_factor_mat_solver_package mumps"); //preconditioner LU and MUMPS solver (see PETSc doc for detailed parameters)
+
+// Solve
+Uh def(u)=init(0); //define the unknown (must be defined after mesh partitioning)
+u[] = PLaplacian^-1 * LaplacianBoundary;
+
+// Export results to vtk (there is not plot in parallel)
+{
+ fespace PV(Th, P1);
+ PV uu=u;
+ int[int] Order = [1];
+ export("Result", Th, uu, Order, mpiCommWorld);
+}
diff --git a/tests/examplefiles/teraterm.ttl b/tests/examplefiles/teraterm.ttl
new file mode 100644
index 00000000..f6a3648a
--- /dev/null
+++ b/tests/examplefiles/teraterm.ttl
@@ -0,0 +1,34 @@
+messagebox "text \\not escaped \nescaped n" "other\n\rthing"
+messagebox "goto label /* a string */ ; same string"
+a=10
+b= 'abc'#$41'def'
+c =#65 /* multiline comment * / * / *//*
+comment */ d = 10 ; inline comment /* still inline */
+e = d + 20 - (($a * 2) / 4) << 3 % (2 >> 1) + result
+
+
+:thing
+
+strcompare c "thing"
+if result = 1 then
+ goto label_
+elseif result > -1 then
+ goto 10
+elseif d > (1+2*3)/7 then
+ messagebox "thing"
+else
+ messagebox "done"
+endif
+
+if abc messagebox "thing1" "title"
+
+
+; Invalid syntax
+bad = "no closing double quote
+bad = 'no closing single quote
+garbage
+...
+...
+...
+
+endgarbage
diff --git a/tests/test_asm.py b/tests/test_asm.py
new file mode 100644
index 00000000..8eaed248
--- /dev/null
+++ b/tests/test_asm.py
@@ -0,0 +1,30 @@
+# -*- coding: utf-8 -*-
+"""
+ Basic ColdfusionHtmlLexer Test
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import unittest
+import os
+
+from pygments.token import Token
+from pygments.lexers import NasmLexer
+
+
+class NasmLexerTest(unittest.TestCase):
+
+ def setUp(self):
+ self.lexer = NasmLexer()
+
+ def testCPUID(self):
+ # CPU is a valid directive, and we don't want to parse this as
+ # cpu id, but as a single token. See bug #1517
+ fragment = 'cpuid'
+ expected = [
+ (Token.Name.Function, u'cpuid'),
+ (Token.Text, u'\n'),
+ ]
+ self.assertEqual(expected, list(self.lexer.get_tokens(fragment)))
diff --git a/tests/test_regexlexer.py b/tests/test_regexlexer.py
index d919a950..778f3d03 100644
--- a/tests/test_regexlexer.py
+++ b/tests/test_regexlexer.py
@@ -11,7 +11,6 @@ import unittest
from pygments.token import Text
from pygments.lexer import RegexLexer
-from pygments.lexer import bygroups
from pygments.lexer import default
@@ -21,6 +20,8 @@ class TestLexer(RegexLexer):
'root': [
('a', Text.Root, 'rag'),
('e', Text.Root),
+ ('#', Text.Root, '#pop'),
+ ('@', Text.Root, ('#pop', '#pop')),
default(('beer', 'beer'))
],
'beer': [
@@ -37,18 +38,29 @@ class TupleTransTest(unittest.TestCase):
def test(self):
lx = TestLexer()
toks = list(lx.get_tokens_unprocessed('abcde'))
- self.assertEqual(toks,
- [(0, Text.Root, 'a'), (1, Text.Rag, 'b'), (2, Text.Rag, 'c'),
+ self.assertEqual(toks, [
+ (0, Text.Root, 'a'), (1, Text.Rag, 'b'), (2, Text.Rag, 'c'),
(3, Text.Beer, 'd'), (4, Text.Root, 'e')])
def test_multiline(self):
lx = TestLexer()
toks = list(lx.get_tokens_unprocessed('a\ne'))
- self.assertEqual(toks,
- [(0, Text.Root, 'a'), (1, Text, u'\n'),
- (2, Text.Root, 'e')])
+ self.assertEqual(toks, [
+ (0, Text.Root, 'a'), (1, Text, u'\n'), (2, Text.Root, 'e')])
def test_default(self):
lx = TestLexer()
toks = list(lx.get_tokens_unprocessed('d'))
self.assertEqual(toks, [(0, Text.Beer, 'd')])
+
+
+class PopEmptyTest(unittest.TestCase):
+ def test_regular(self):
+ lx = TestLexer()
+ toks = list(lx.get_tokens_unprocessed('#e'))
+ self.assertEqual(toks, [(0, Text.Root, '#'), (1, Text.Root, 'e')])
+
+ def test_tuple(self):
+ lx = TestLexer()
+ toks = list(lx.get_tokens_unprocessed('@e'))
+ self.assertEqual(toks, [(0, Text.Root, '@'), (1, Text.Root, 'e')])
diff --git a/tests/test_shell.py b/tests/test_shell.py
index e283793e..1121240a 100644
--- a/tests/test_shell.py
+++ b/tests/test_shell.py
@@ -10,7 +10,7 @@
import unittest
from pygments.token import Token
-from pygments.lexers import BashLexer, BashSessionLexer
+from pygments.lexers import BashLexer, BashSessionLexer, MSDOSSessionLexer
class BashTest(unittest.TestCase):
@@ -140,3 +140,20 @@ class BashSessionTest(unittest.TestCase):
]
self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+class MSDOSSessionTest(unittest.TestCase):
+
+ def setUp(self):
+ self.lexer = MSDOSSessionLexer()
+
+ def testGtOnlyPrompt(self):
+ fragment = u'> py\nhi\n'
+ tokens = [
+ (Token.Text, u''),
+ (Token.Generic.Prompt, u'>'),
+ (Token.Text, u' '),
+ (Token.Text, u'py'),
+ (Token.Text, u''),
+ (Token.Text, u'\n'),
+ (Token.Generic.Output, u'hi\n'),
+ ]
+ self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
diff --git a/tests/test_terminal_formatter.py b/tests/test_terminal_formatter.py
index 1f44807d..acc15167 100644
--- a/tests/test_terminal_formatter.py
+++ b/tests/test_terminal_formatter.py
@@ -90,13 +90,13 @@ async def function(a,b,c, *d, **kwarg:Bool)->Bool:
def test_256esc_seq(self):
"""
- test that a few escape sequences are actualy used when using ansi<> color codes
+ test that a few escape sequences are actually used when using ansi<> color codes
"""
def termtest(x):
return highlight(x, Python3Lexer(),
Terminal256Formatter(style=MyStyle))
- self.assertTrue('32;41' in termtest('0x123'))
- self.assertTrue('32;42' in termtest('123'))
- self.assertTrue('30;01' in termtest('#comment'))
- self.assertTrue('34;41' in termtest('"String"'))
+ self.assertTrue('32;101' in termtest('0x123'))
+ self.assertTrue('92;42' in termtest('123'))
+ self.assertTrue('90' in termtest('#comment'))
+ self.assertTrue('94;41' in termtest('"String"'))