summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnteru <bitbucket@ca.sh13.net>2019-02-12 14:52:24 +0000
committerAnteru <bitbucket@ca.sh13.net>2019-02-12 14:52:24 +0000
commitdc725fcaa116e5c1437c64319abca2a55a3bc11a (patch)
treee078a303fdd62fbe0fa7ad24b8e3e9ed1cc458b3
parentef16fca27bd00702983687edbfd9c767a29fc0a2 (diff)
parenta1467b3b8f2b889ae8dc7cea89ba3efceb17683e (diff)
downloadpygments-dc725fcaa116e5c1437c64319abca2a55a3bc11a.tar.gz
Merged in mdiener/pygments-main (pull request #788)
add lexer for Charm++ ci files
-rw-r--r--AUTHORS3
-rw-r--r--CHANGES10
-rw-r--r--doc/docs/styles.rst50
-rw-r--r--pygments/formatters/__init__.py3
-rw-r--r--pygments/formatters/html.py7
-rw-r--r--pygments/formatters/irc.py76
-rw-r--r--pygments/formatters/terminal.py48
-rw-r--r--pygments/formatters/terminal256.py4
-rw-r--r--pygments/lexers/__init__.py3
-rw-r--r--pygments/lexers/_cocoa_builtins.py3
-rw-r--r--pygments/lexers/_mapping.py3
-rw-r--r--pygments/lexers/_php_builtins.py28
-rw-r--r--pygments/lexers/_stan_builtins.py276
-rw-r--r--pygments/lexers/asm.py105
-rw-r--r--pygments/lexers/data.py4
-rw-r--r--pygments/lexers/esoteric.py2
-rw-r--r--pygments/lexers/floscript.py2
-rw-r--r--pygments/lexers/forth.py2
-rw-r--r--pygments/lexers/graph.py21
-rw-r--r--pygments/lexers/haskell.py26
-rw-r--r--pygments/lexers/modeling.py34
-rw-r--r--pygments/lexers/qvt.py2
-rw-r--r--pygments/lexers/sgf.py54
-rw-r--r--pygments/lexers/shell.py24
-rw-r--r--pygments/lexers/sql.py58
-rw-r--r--pygments/lexers/templates.py2
-rw-r--r--pygments/lexers/text.py1
-rw-r--r--pygments/styles/__init__.py2
-rw-r--r--pygments/styles/arduino.py4
-rw-r--r--pygments/styles/paraiso_dark.py2
-rw-r--r--pygments/styles/paraiso_light.py2
-rw-r--r--pygments/styles/solarized.py129
-rwxr-xr-xscripts/check_sources.py3
-rwxr-xr-xsetup.py5
-rw-r--r--tests/examplefiles/example.sgf35
-rw-r--r--tests/examplefiles/example.sl6
-rw-r--r--tests/examplefiles/example.stan4
-rw-r--r--tests/test_cmdline.py13
-rw-r--r--tests/test_html_formatter.py10
-rw-r--r--tests/test_sql.py46
40 files changed, 777 insertions, 335 deletions
diff --git a/AUTHORS b/AUTHORS
index b542fe54..758e042b 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -31,6 +31,7 @@ Other contributors, listed alphabetically, are:
* Sébastien Bigaret -- QVT Operational lexer
* Jarrett Billingsley -- MiniD lexer
* Adam Blinkinsop -- Haskell, Redcode lexers
+* Stéphane Blondon -- SGF lexer
* Frits van Bommel -- assembler lexers
* Pierre Bourdon -- bugfixes
* Matthias Bussonnier -- ANSI style handling for terminal-256 formatter
@@ -187,7 +188,7 @@ Other contributors, listed alphabetically, are:
* Alexander Smishlajev -- Visual FoxPro lexer
* Steve Spigarelli -- XQuery lexer
* Jerome St-Louis -- eC lexer
-* Camil Staps -- Clean and NuSMV lexers
+* Camil Staps -- Clean and NuSMV lexers; Solarized style
* James Strachan -- Kotlin lexer
* Tom Stuart -- Treetop lexer
* Colin Sullivan -- SuperCollider lexer
diff --git a/CHANGES b/CHANGES
index b0257335..a8b63c02 100644
--- a/CHANGES
+++ b/CHANGES
@@ -13,12 +13,22 @@ Version 2.4.0
- Added lexers:
* FloScript (PR#750)
+ * Hspec (PR#790)
+ * SGF (PR#780)
+ * Slurm (PR#760)
- Updated lexers:
+ * Cypher (PR#746)
+ * LLVM (PR#792)
+ * SQL (PR#672)
+ * Stan (PR#774)
* Terraform (PR#787)
+- Add solarized style (PR#708)
- Change ANSI color names (PR#777)
+- Fix rare unicode errors on Python 2.7 (PR#798, #1492)
+- Updated Trove classifiers and ``pip`` requirements (PR#799)
Version 2.3.1
-------------
diff --git a/doc/docs/styles.rst b/doc/docs/styles.rst
index 65a2a863..570293a5 100644
--- a/doc/docs/styles.rst
+++ b/doc/docs/styles.rst
@@ -201,34 +201,32 @@ The following are considered "dark" colors and will be rendered as non-bold:
Exact behavior might depends on the terminal emulator you are using, and its
settings.
-.. _NewAnsiColorNames:
+.. _new-ansi-color-names:
.. versionchanged:: 2.4
-The definition of the ansi color names has changed.
+The definition of the ANSI color names has changed.
New names are easier to understand and align to the colors used in other projects.
-
-+-------------------------+--------------------------+
-| New names | Pygments 2.2 |
-+=======================+============================+
-| ``ansiblack`` | ``#ansiblack`` |
-| ``ansired`` | ``#ansidarkred`` |
-| ``ansigreen`` | ``#ansidarkgreen`` |
-| ``ansiyellow`` | ``#ansibrown`` |
-| ``ansiblue`` | ``#ansidarkblue`` |
-| ``ansimagenta`` | ``#ansipurple`` |
-| ``ansicyan`` | ``#ansiteal`` |
-| ``ansigray`` | ``#ansilightgray`` |
-| ``ansibrightblack`` | ``#ansidarkgray`` |
-| ``ansibrightred`` | ``#ansired`` |
-| ``ansibrightgreen`` | ``#ansigreen`` |
-| ``ansibrightyellow`` | ``#ansiyellow`` |
-| ``ansibrightblue`` | ``#ansiblue`` |
-| ``ansibrightmagenta`` | ``#ansifuchsia`` |
-| ``ansibrightcyan`` | ``#ansiturquoise`` |
-| ``ansiwhite`` | ``#ansiwhite`` |
-+=========================+==========================+
-
-Old ansi color names are deprecated but will still work.
-
+===================== ====================
+New names Pygments up to 2.3
+===================== ====================
+``ansiblack`` ``#ansiblack``
+``ansired`` ``#ansidarkred``
+``ansigreen`` ``#ansidarkgreen``
+``ansiyellow`` ``#ansibrown``
+``ansiblue`` ``#ansidarkblue``
+``ansimagenta`` ``#ansipurple``
+``ansicyan`` ``#ansiteal``
+``ansigray`` ``#ansilightgray``
+``ansibrightblack`` ``#ansidarkgray``
+``ansibrightred`` ``#ansired``
+``ansibrightgreen`` ``#ansigreen``
+``ansibrightyellow`` ``#ansiyellow``
+``ansibrightblue`` ``#ansiblue``
+``ansibrightmagenta`` ``#ansifuchsia``
+``ansibrightcyan`` ``#ansiturquoise``
+``ansiwhite`` ``#ansiwhite``
+===================== ====================
+
+Old ANSI color names are deprecated but will still work.
diff --git a/pygments/formatters/__init__.py b/pygments/formatters/__init__.py
index 965c5f1a..457d76ec 100644
--- a/pygments/formatters/__init__.py
+++ b/pygments/formatters/__init__.py
@@ -98,7 +98,8 @@ def load_formatter_from_file(filename, formattername="CustomFormatter",
try:
# This empty dict will contain the namespace for the exec'd file
custom_namespace = {}
- exec(open(filename, 'rb').read(), custom_namespace)
+ with open(filename, 'rb') as f:
+ exec(f.read(), custom_namespace)
# Retrieve the class `formattername` from that namespace
if formattername not in custom_namespace:
raise ClassNotFound('no valid %s class found in %s' %
diff --git a/pygments/formatters/html.py b/pygments/formatters/html.py
index 2969d502..7d7605eb 100644
--- a/pygments/formatters/html.py
+++ b/pygments/formatters/html.py
@@ -535,10 +535,9 @@ class HtmlFormatter(Formatter):
# write CSS file only if noclobber_cssfile isn't given as an option.
try:
if not os.path.exists(cssfilename) or not self.noclobber_cssfile:
- cf = open(cssfilename, "w")
- cf.write(CSSFILE_TEMPLATE %
- {'styledefs': self.get_style_defs('body')})
- cf.close()
+ with open(cssfilename, "w") as cf:
+ cf.write(CSSFILE_TEMPLATE %
+ {'styledefs': self.get_style_defs('body')})
except IOError as err:
err.strerror = 'Error writing CSS file: ' + err.strerror
raise
diff --git a/pygments/formatters/irc.py b/pygments/formatters/irc.py
index eb744d74..d55584f8 100644
--- a/pygments/formatters/irc.py
+++ b/pygments/formatters/irc.py
@@ -25,55 +25,55 @@ __all__ = ['IRCFormatter']
IRC_COLORS = {
Token: ('', ''),
- Whitespace: ('lightgray', 'darkgray'),
- Comment: ('lightgray', 'darkgray'),
- Comment.Preproc: ('teal', 'turquoise'),
- Keyword: ('darkblue', 'blue'),
- Keyword.Type: ('teal', 'turquoise'),
- Operator.Word: ('purple', 'fuchsia'),
- Name.Builtin: ('teal', 'turquoise'),
- Name.Function: ('darkgreen', 'green'),
- Name.Namespace: ('_teal_', '_turquoise_'),
- Name.Class: ('_darkgreen_', '_green_'),
- Name.Exception: ('teal', 'turquoise'),
- Name.Decorator: ('darkgray', 'lightgray'),
- Name.Variable: ('darkred', 'red'),
- Name.Constant: ('darkred', 'red'),
- Name.Attribute: ('teal', 'turquoise'),
- Name.Tag: ('blue', 'blue'),
- String: ('brown', 'brown'),
- Number: ('darkblue', 'blue'),
-
- Generic.Deleted: ('red', 'red'),
- Generic.Inserted: ('darkgreen', 'green'),
+ Whitespace: ('gray', 'brightblack'),
+ Comment: ('gray', 'brightblack'),
+ Comment.Preproc: ('cyan', 'brightcyan'),
+ Keyword: ('blue', 'brightblue'),
+ Keyword.Type: ('cyan', 'brightcyan'),
+ Operator.Word: ('magenta', 'brightcyan'),
+ Name.Builtin: ('cyan', 'brightcyan'),
+ Name.Function: ('green', 'brightgreen'),
+ Name.Namespace: ('_cyan_', '_brightcyan_'),
+ Name.Class: ('_green_', '_brightgreen_'),
+ Name.Exception: ('cyan', 'brightcyan'),
+ Name.Decorator: ('brightblack', 'gray'),
+ Name.Variable: ('red', 'brightred'),
+ Name.Constant: ('red', 'brightred'),
+ Name.Attribute: ('cyan', 'brightcyan'),
+ Name.Tag: ('brightblue', 'brightblue'),
+ String: ('yellow', 'yellow'),
+ Number: ('blue', 'brightblue'),
+
+ Generic.Deleted: ('brightred', 'brightred'),
+ Generic.Inserted: ('green', 'brightgreen'),
Generic.Heading: ('**', '**'),
- Generic.Subheading: ('*purple*', '*fuchsia*'),
- Generic.Error: ('red', 'red'),
+ Generic.Subheading: ('*magenta*', '*brightmagenta*'),
+ Generic.Error: ('brightred', 'brightred'),
- Error: ('_red_', '_red_'),
+ Error: ('_brightred_', '_brightred_'),
}
IRC_COLOR_MAP = {
'white': 0,
'black': 1,
- 'darkblue': 2,
- 'green': 3,
- 'red': 4,
- 'brown': 5,
- 'purple': 6,
+ 'blue': 2,
+ 'brightgreen': 3,
+ 'brightred': 4,
+ 'yellow': 5,
+ 'magenta': 6,
'orange': 7,
- 'darkgreen': 7, #compat w/ ansi
- 'yellow': 8,
+ 'green': 7, #compat w/ ansi
+ 'brightyellow': 8,
'lightgreen': 9,
- 'turquoise': 9, # compat w/ ansi
- 'teal': 10,
+ 'brightcyan': 9, # compat w/ ansi
+ 'cyan': 10,
'lightblue': 11,
- 'darkred': 11, # compat w/ ansi
- 'blue': 12,
- 'fuchsia': 13,
- 'darkgray': 14,
- 'lightgray': 15,
+ 'red': 11, # compat w/ ansi
+ 'brightblue': 12,
+ 'brightmagenta': 13,
+ 'brightblack': 14,
+ 'gray': 15,
}
def ircformat(color, text):
diff --git a/pygments/formatters/terminal.py b/pygments/formatters/terminal.py
index b8fec52e..fcb52d94 100644
--- a/pygments/formatters/terminal.py
+++ b/pygments/formatters/terminal.py
@@ -26,33 +26,33 @@ __all__ = ['TerminalFormatter']
TERMINAL_COLORS = {
Token: ('', ''),
- Whitespace: ('lightgray', 'darkgray'),
- Comment: ('lightgray', 'darkgray'),
- Comment.Preproc: ('teal', 'turquoise'),
- Keyword: ('darkblue', 'blue'),
- Keyword.Type: ('teal', 'turquoise'),
- Operator.Word: ('purple', 'fuchsia'),
- Name.Builtin: ('teal', 'turquoise'),
- Name.Function: ('darkgreen', 'green'),
- Name.Namespace: ('_teal_', '_turquoise_'),
- Name.Class: ('_darkgreen_', '_green_'),
- Name.Exception: ('teal', 'turquoise'),
- Name.Decorator: ('darkgray', 'lightgray'),
- Name.Variable: ('darkred', 'red'),
- Name.Constant: ('darkred', 'red'),
- Name.Attribute: ('teal', 'turquoise'),
- Name.Tag: ('blue', 'blue'),
- String: ('brown', 'brown'),
- Number: ('darkblue', 'blue'),
-
- Generic.Deleted: ('red', 'red'),
- Generic.Inserted: ('darkgreen', 'green'),
+ Whitespace: ('gray', 'brightblack'),
+ Comment: ('gray', 'brightblack'),
+ Comment.Preproc: ('cyan', 'brightcyan'),
+ Keyword: ('blue', 'brightblue'),
+ Keyword.Type: ('cyan', 'brightcyan'),
+ Operator.Word: ('magenta', 'brightmagenta'),
+ Name.Builtin: ('cyan', 'brightcyan'),
+ Name.Function: ('green', 'brightgreen'),
+ Name.Namespace: ('_cyan_', '_brightcyan_'),
+ Name.Class: ('_green_', '_brightgreen_'),
+ Name.Exception: ('cyan', 'brightcyan'),
+ Name.Decorator: ('brightblack', 'gray'),
+ Name.Variable: ('red', 'brightred'),
+ Name.Constant: ('red', 'brightred'),
+ Name.Attribute: ('cyan', 'brightcyan'),
+ Name.Tag: ('brightblue', 'brightblue'),
+ String: ('yellow', 'yellow'),
+ Number: ('blue', 'brightblue'),
+
+ Generic.Deleted: ('brightred', 'brightred'),
+ Generic.Inserted: ('green', 'brightgreen'),
Generic.Heading: ('**', '**'),
- Generic.Subheading: ('*purple*', '*fuchsia*'),
+ Generic.Subheading: ('*magenta*', '*brightmagenta*'),
Generic.Prompt: ('**', '**'),
- Generic.Error: ('red', 'red'),
+ Generic.Error: ('brightred', 'brightred'),
- Error: ('_red_', '_red_'),
+ Error: ('_brightred_', '_brightred_'),
}
diff --git a/pygments/formatters/terminal256.py b/pygments/formatters/terminal256.py
index b18aca65..1235e9ed 100644
--- a/pygments/formatters/terminal256.py
+++ b/pygments/formatters/terminal256.py
@@ -111,9 +111,9 @@ class Terminal256Formatter(Formatter):
See :ref:`AnsiTerminalStyle` for more information.
.. versionchanged:: 2.4
- The ansi color names have been updated with names that are easier to
+ The ANSI color names have been updated with names that are easier to
understand and align with colornames of other projects and terminals.
- See :ref:`NewAnsiColorNames` for more information.
+ See :ref:`this table <new-ansi-color-names>` for more information.
Options accepted:
diff --git a/pygments/lexers/__init__.py b/pygments/lexers/__init__.py
index 328e072c..50f39d4e 100644
--- a/pygments/lexers/__init__.py
+++ b/pygments/lexers/__init__.py
@@ -133,7 +133,8 @@ def load_lexer_from_file(filename, lexername="CustomLexer", **options):
try:
# This empty dict will contain the namespace for the exec'd file
custom_namespace = {}
- exec(open(filename, 'rb').read(), custom_namespace)
+ with open(filename, 'rb') as f:
+ exec(f.read(), custom_namespace)
# Retrieve the class `lexername` from that namespace
if lexername not in custom_namespace:
raise ClassNotFound('no valid %s class found in %s' %
diff --git a/pygments/lexers/_cocoa_builtins.py b/pygments/lexers/_cocoa_builtins.py
index f55e9dd7..f17ea876 100644
--- a/pygments/lexers/_cocoa_builtins.py
+++ b/pygments/lexers/_cocoa_builtins.py
@@ -40,7 +40,8 @@ if __name__ == '__main__': # pragma: no cover
continue
headerFilePath = frameworkHeadersDir + f
- content = open(headerFilePath).read()
+ with open(headerFilePath) as f:
+ content = f.read()
res = re.findall(r'(?<=@interface )\w+', content)
for r in res:
all_interfaces.add(r)
diff --git a/pygments/lexers/_mapping.py b/pygments/lexers/_mapping.py
index c7032c96..8085bb8a 100644
--- a/pygments/lexers/_mapping.py
+++ b/pygments/lexers/_mapping.py
@@ -185,6 +185,7 @@ LEXERS = {
'HaxeLexer': ('pygments.lexers.haxe', 'Haxe', ('hx', 'haxe', 'hxsl'), ('*.hx', '*.hxsl'), ('text/haxe', 'text/x-haxe', 'text/x-hx')),
'HexdumpLexer': ('pygments.lexers.hexdump', 'Hexdump', ('hexdump',), (), ()),
'HsailLexer': ('pygments.lexers.asm', 'HSAIL', ('hsail', 'hsa'), ('*.hsail',), ('text/x-hsail',)),
+ 'HspecLexer': ('pygments.lexers.haskell', 'Hspec', ('hspec',), (), ()),
'HtmlDjangoLexer': ('pygments.lexers.templates', 'HTML+Django/Jinja', ('html+django', 'html+jinja', 'htmldjango'), (), ('text/html+django', 'text/html+jinja')),
'HtmlGenshiLexer': ('pygments.lexers.templates', 'HTML+Genshi', ('html+genshi', 'html+kid'), (), ('text/html+genshi',)),
'HtmlLexer': ('pygments.lexers.html', 'HTML', ('html',), ('*.html', '*.htm', '*.xhtml', '*.xslt'), ('text/html', 'application/xhtml+xml')),
@@ -385,8 +386,10 @@ LEXERS = {
'ShenLexer': ('pygments.lexers.lisp', 'Shen', ('shen',), ('*.shen',), ('text/x-shen', 'application/x-shen')),
'SilverLexer': ('pygments.lexers.verification', 'Silver', ('silver',), ('*.sil', '*.vpr'), ()),
'SlimLexer': ('pygments.lexers.webmisc', 'Slim', ('slim',), ('*.slim',), ('text/x-slim',)),
+ 'SlurmBashLexer': ('pygments.lexers.shell', 'Slurm', ('slurm', 'sbatch'), ('*.sl',), ()),
'SmaliLexer': ('pygments.lexers.dalvik', 'Smali', ('smali',), ('*.smali',), ('text/smali',)),
'SmalltalkLexer': ('pygments.lexers.smalltalk', 'Smalltalk', ('smalltalk', 'squeak', 'st'), ('*.st',), ('text/x-smalltalk',)),
+ 'SmartGameFormatLexer': ('pygments.lexers.sgf', 'SmartGameFormat', ('sgf',), ('*.sgf',), ()),
'SmartyLexer': ('pygments.lexers.templates', 'Smarty', ('smarty',), ('*.tpl',), ('application/x-smarty',)),
'SnobolLexer': ('pygments.lexers.snobol', 'Snobol', ('snobol',), ('*.snobol',), ('text/x-snobol',)),
'SnowballLexer': ('pygments.lexers.dsls', 'Snowball', ('snowball',), ('*.sbl',), ()),
diff --git a/pygments/lexers/_php_builtins.py b/pygments/lexers/_php_builtins.py
index bd4b7d99..c6084003 100644
--- a/pygments/lexers/_php_builtins.py
+++ b/pygments/lexers/_php_builtins.py
@@ -4698,18 +4698,19 @@ if __name__ == '__main__': # pragma: no cover
for file in get_php_references():
module = ''
- for line in open(file):
- if not module:
- search = module_re.search(line)
- if search:
- module = search.group(1)
- modules[module] = []
+ with open(file) as f:
+ for line in f:
+ if not module:
+ search = module_re.search(line)
+ if search:
+ module = search.group(1)
+ modules[module] = []
- elif 'href="function.' in line:
- for match in function_re.finditer(line):
- fn = match.group(1)
- if '-&gt;' not in fn and '::' not in fn and fn not in modules[module]:
- modules[module].append(fn)
+ elif 'href="function.' in line:
+ for match in function_re.finditer(line):
+ fn = match.group(1)
+ if '-&gt;' not in fn and '::' not in fn and fn not in modules[module]:
+ modules[module].append(fn)
if module:
# These are dummy manual pages, not actual functions
@@ -4726,9 +4727,8 @@ if __name__ == '__main__': # pragma: no cover
def get_php_references():
download = urlretrieve(PHP_MANUAL_URL)
- tar = tarfile.open(download[0])
- tar.extractall()
- tar.close()
+ with tarfile.open(download[0]) as tar:
+ tar.extractall()
for file in glob.glob("%s%s" % (PHP_MANUAL_DIR, PHP_REFERENCE_GLOB)):
yield file
os.remove(download[0])
diff --git a/pygments/lexers/_stan_builtins.py b/pygments/lexers/_stan_builtins.py
index a189647a..7f1e0ce3 100644
--- a/pygments/lexers/_stan_builtins.py
+++ b/pygments/lexers/_stan_builtins.py
@@ -4,24 +4,23 @@
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This file contains the names of functions for Stan used by
- ``pygments.lexers.math.StanLexer. This is for Stan language version 2.8.0.
+ ``pygments.lexers.math.StanLexer. This is for Stan language version 2.17.0.
- :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2018 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
KEYWORDS = (
+ 'break',
+ 'continue',
'else',
'for',
'if',
'in',
- 'increment_log_prob',
- 'integrate_ode',
- 'lp__',
'print',
'reject',
'return',
- 'while'
+ 'while',
)
TYPES = (
@@ -35,18 +34,18 @@ TYPES = (
'positive_ordered',
'real',
'row_vector',
- 'row_vectormatrix',
'simplex',
'unit_vector',
'vector',
- 'void')
+ 'void',
+)
FUNCTIONS = (
- 'Phi',
- 'Phi_approx',
'abs',
'acos',
'acosh',
+ 'algebra_solver',
+ 'append_array',
'append_col',
'append_row',
'asin',
@@ -54,55 +53,59 @@ FUNCTIONS = (
'atan',
'atan2',
'atanh',
- 'bernoulli_ccdf_log',
'bernoulli_cdf',
- 'bernoulli_cdf_log',
- 'bernoulli_log',
- 'bernoulli_logit_log',
+ 'bernoulli_lccdf',
+ 'bernoulli_lcdf',
+ 'bernoulli_logit_lpmf',
+ 'bernoulli_logit_rng',
+ 'bernoulli_lpmf',
'bernoulli_rng',
'bessel_first_kind',
'bessel_second_kind',
- 'beta_binomial_ccdf_log',
'beta_binomial_cdf',
- 'beta_binomial_cdf_log',
- 'beta_binomial_log',
+ 'beta_binomial_lccdf',
+ 'beta_binomial_lcdf',
+ 'beta_binomial_lpmf',
'beta_binomial_rng',
- 'beta_ccdf_log',
'beta_cdf',
- 'beta_cdf_log',
- 'beta_log',
+ 'beta_lccdf',
+ 'beta_lcdf',
+ 'beta_lpdf',
'beta_rng',
'binary_log_loss',
- 'binomial_ccdf_log',
'binomial_cdf',
- 'binomial_cdf_log',
'binomial_coefficient_log',
- 'binomial_log',
- 'binomial_logit_log',
+ 'binomial_lccdf',
+ 'binomial_lcdf',
+ 'binomial_logit_lpmf',
+ 'binomial_lpmf',
'binomial_rng',
'block',
- 'categorical_log',
- 'categorical_logit_log',
+ 'categorical_logit_lpmf',
+ 'categorical_logit_rng',
+ 'categorical_lpmf',
'categorical_rng',
- 'cauchy_ccdf_log',
'cauchy_cdf',
- 'cauchy_cdf_log',
- 'cauchy_log',
+ 'cauchy_lccdf',
+ 'cauchy_lcdf',
+ 'cauchy_lpdf',
'cauchy_rng',
'cbrt',
'ceil',
- 'chi_square_ccdf_log',
'chi_square_cdf',
- 'chi_square_cdf_log',
- 'chi_square_log',
+ 'chi_square_lccdf',
+ 'chi_square_lcdf',
+ 'chi_square_lpdf',
'chi_square_rng',
'cholesky_decompose',
+ 'choose',
'col',
'cols',
'columns_dot_product',
'columns_dot_self',
'cos',
'cosh',
+ 'cov_exp_quad',
'crossprod',
'csr_extract_u',
'csr_extract_v',
@@ -117,15 +120,15 @@ FUNCTIONS = (
'diagonal',
'digamma',
'dims',
- 'dirichlet_log',
+ 'dirichlet_lpdf',
'dirichlet_rng',
'distance',
'dot_product',
'dot_self',
- 'double_exponential_ccdf_log',
'double_exponential_cdf',
- 'double_exponential_cdf_log',
- 'double_exponential_log',
+ 'double_exponential_lccdf',
+ 'double_exponential_lcdf',
+ 'double_exponential_lpdf',
'double_exponential_rng',
'e',
'eigenvalues_sym',
@@ -134,16 +137,16 @@ FUNCTIONS = (
'erfc',
'exp',
'exp2',
- 'exp_mod_normal_ccdf_log',
'exp_mod_normal_cdf',
- 'exp_mod_normal_cdf_log',
- 'exp_mod_normal_log',
+ 'exp_mod_normal_lccdf',
+ 'exp_mod_normal_lcdf',
+ 'exp_mod_normal_lpdf',
'exp_mod_normal_rng',
'expm1',
- 'exponential_ccdf_log',
'exponential_cdf',
- 'exponential_cdf_log',
- 'exponential_log',
+ 'exponential_lccdf',
+ 'exponential_lcdf',
+ 'exponential_lpdf',
'exponential_rng',
'fabs',
'falling_factorial',
@@ -153,60 +156,65 @@ FUNCTIONS = (
'fmax',
'fmin',
'fmod',
- 'frechet_ccdf_log',
'frechet_cdf',
- 'frechet_cdf_log',
- 'frechet_log',
+ 'frechet_lccdf',
+ 'frechet_lcdf',
+ 'frechet_lpdf',
'frechet_rng',
- 'gamma_ccdf_log',
'gamma_cdf',
- 'gamma_cdf_log',
- 'gamma_log',
+ 'gamma_lccdf',
+ 'gamma_lcdf',
+ 'gamma_lpdf',
'gamma_p',
'gamma_q',
'gamma_rng',
- 'gaussian_dlm_obs_log',
+ 'gaussian_dlm_obs_lpdf',
'get_lp',
- 'gumbel_ccdf_log',
'gumbel_cdf',
- 'gumbel_cdf_log',
- 'gumbel_log',
+ 'gumbel_lccdf',
+ 'gumbel_lcdf',
+ 'gumbel_lpdf',
'gumbel_rng',
'head',
- 'hypergeometric_log',
+ 'hypergeometric_lpmf',
'hypergeometric_rng',
'hypot',
- 'if_else',
+ 'inc_beta',
'int_step',
+ 'integrate_ode',
+ 'integrate_ode_bdf',
+ 'integrate_ode_rk45',
'inv',
- 'inv_chi_square_ccdf_log',
'inv_chi_square_cdf',
- 'inv_chi_square_cdf_log',
- 'inv_chi_square_log',
+ 'inv_chi_square_lccdf',
+ 'inv_chi_square_lcdf',
+ 'inv_chi_square_lpdf',
'inv_chi_square_rng',
'inv_cloglog',
- 'inv_gamma_ccdf_log',
'inv_gamma_cdf',
- 'inv_gamma_cdf_log',
- 'inv_gamma_log',
+ 'inv_gamma_lccdf',
+ 'inv_gamma_lcdf',
+ 'inv_gamma_lpdf',
'inv_gamma_rng',
'inv_logit',
- 'inv_phi',
+ 'inv_Phi',
'inv_sqrt',
'inv_square',
- 'inv_wishart_log',
+ 'inv_wishart_lpdf',
'inv_wishart_rng',
'inverse',
'inverse_spd',
'is_inf',
'is_nan',
'lbeta',
+ 'lchoose',
'lgamma',
- 'lkj_corr_cholesky_log',
+ 'lkj_corr_cholesky_lpdf',
'lkj_corr_cholesky_rng',
- 'lkj_corr_log',
+ 'lkj_corr_lpdf',
'lkj_corr_rng',
'lmgamma',
+ 'lmultiply',
'log',
'log10',
'log1m',
@@ -223,81 +231,87 @@ FUNCTIONS = (
'log_rising_factorial',
'log_softmax',
'log_sum_exp',
- 'logistic_ccdf_log',
'logistic_cdf',
- 'logistic_cdf_log',
- 'logistic_log',
+ 'logistic_lccdf',
+ 'logistic_lcdf',
+ 'logistic_lpdf',
'logistic_rng',
'logit',
- 'lognormal_ccdf_log',
'lognormal_cdf',
- 'lognormal_cdf_log',
- 'lognormal_log',
+ 'lognormal_lccdf',
+ 'lognormal_lcdf',
+ 'lognormal_lpdf',
'lognormal_rng',
'machine_precision',
+ 'matrix_exp',
'max',
+ 'mdivide_left_spd',
'mdivide_left_tri_low',
+ 'mdivide_right_spd',
'mdivide_right_tri_low',
'mean',
'min',
'modified_bessel_first_kind',
'modified_bessel_second_kind',
- 'multi_gp_cholesky_log',
- 'multi_gp_log',
- 'multi_normal_cholesky_log',
+ 'multi_gp_cholesky_lpdf',
+ 'multi_gp_lpdf',
+ 'multi_normal_cholesky_lpdf',
'multi_normal_cholesky_rng',
- 'multi_normal_log',
- 'multi_normal_prec_log',
+ 'multi_normal_lpdf',
+ 'multi_normal_prec_lpdf',
'multi_normal_rng',
- 'multi_student_t_log',
+ 'multi_student_t_lpdf',
'multi_student_t_rng',
- 'multinomial_log',
+ 'multinomial_lpmf',
'multinomial_rng',
'multiply_log',
'multiply_lower_tri_self_transpose',
- 'neg_binomial_2_ccdf_log',
'neg_binomial_2_cdf',
- 'neg_binomial_2_cdf_log',
- 'neg_binomial_2_log',
- 'neg_binomial_2_log_log',
+ 'neg_binomial_2_lccdf',
+ 'neg_binomial_2_lcdf',
+ 'neg_binomial_2_log_lpmf',
'neg_binomial_2_log_rng',
+ 'neg_binomial_2_lpmf',
'neg_binomial_2_rng',
- 'neg_binomial_ccdf_log',
'neg_binomial_cdf',
- 'neg_binomial_cdf_log',
- 'neg_binomial_log',
+ 'neg_binomial_lccdf',
+ 'neg_binomial_lcdf',
+ 'neg_binomial_lpmf',
'neg_binomial_rng',
'negative_infinity',
- 'normal_ccdf_log',
'normal_cdf',
- 'normal_cdf_log',
- 'normal_log',
+ 'normal_lccdf',
+ 'normal_lcdf',
+ 'normal_lpdf',
'normal_rng',
'not_a_number',
'num_elements',
- 'ordered_logistic_log',
+ 'ordered_logistic_lpmf',
'ordered_logistic_rng',
'owens_t',
- 'pareto_ccdf_log',
'pareto_cdf',
- 'pareto_cdf_log',
- 'pareto_log',
+ 'pareto_lccdf',
+ 'pareto_lcdf',
+ 'pareto_lpdf',
'pareto_rng',
- 'pareto_type_2_ccdf_log',
'pareto_type_2_cdf',
- 'pareto_type_2_cdf_log',
- 'pareto_type_2_log',
+ 'pareto_type_2_lccdf',
+ 'pareto_type_2_lcdf',
+ 'pareto_type_2_lpdf',
'pareto_type_2_rng',
+ 'Phi',
+ 'Phi_approx',
'pi',
- 'poisson_ccdf_log',
'poisson_cdf',
- 'poisson_cdf_log',
- 'poisson_log',
- 'poisson_log_log',
+ 'poisson_lccdf',
+ 'poisson_lcdf',
+ 'poisson_log_lpmf',
'poisson_log_rng',
+ 'poisson_lpmf',
'poisson_rng',
'positive_infinity',
'pow',
+ 'print',
'prod',
'qr_Q',
'qr_R',
@@ -305,11 +319,12 @@ FUNCTIONS = (
'quad_form_diag',
'quad_form_sym',
'rank',
- 'rayleigh_ccdf_log',
'rayleigh_cdf',
- 'rayleigh_cdf_log',
- 'rayleigh_log',
+ 'rayleigh_lccdf',
+ 'rayleigh_lcdf',
+ 'rayleigh_lpdf',
'rayleigh_rng',
+ 'reject',
'rep_array',
'rep_matrix',
'rep_row_vector',
@@ -320,10 +335,10 @@ FUNCTIONS = (
'rows',
'rows_dot_product',
'rows_dot_self',
- 'scaled_inv_chi_square_ccdf_log',
'scaled_inv_chi_square_cdf',
- 'scaled_inv_chi_square_cdf_log',
- 'scaled_inv_chi_square_log',
+ 'scaled_inv_chi_square_lccdf',
+ 'scaled_inv_chi_square_lcdf',
+ 'scaled_inv_chi_square_lpdf',
'scaled_inv_chi_square_rng',
'sd',
'segment',
@@ -331,10 +346,10 @@ FUNCTIONS = (
'singular_values',
'sinh',
'size',
- 'skew_normal_ccdf_log',
'skew_normal_cdf',
- 'skew_normal_cdf_log',
- 'skew_normal_log',
+ 'skew_normal_lccdf',
+ 'skew_normal_lcdf',
+ 'skew_normal_lpdf',
'skew_normal_rng',
'softmax',
'sort_asc',
@@ -346,10 +361,10 @@ FUNCTIONS = (
'square',
'squared_distance',
'step',
- 'student_t_ccdf_log',
'student_t_cdf',
- 'student_t_cdf_log',
- 'student_t_log',
+ 'student_t_lccdf',
+ 'student_t_lcdf',
+ 'student_t_lpdf',
'student_t_rng',
'sub_col',
'sub_row',
@@ -357,6 +372,7 @@ FUNCTIONS = (
'tail',
'tan',
'tanh',
+ 'target',
'tcrossprod',
'tgamma',
'to_array_1d',
@@ -369,22 +385,22 @@ FUNCTIONS = (
'trace_quad_form',
'trigamma',
'trunc',
- 'uniform_ccdf_log',
'uniform_cdf',
- 'uniform_cdf_log',
- 'uniform_log',
+ 'uniform_lccdf',
+ 'uniform_lcdf',
+ 'uniform_lpdf',
'uniform_rng',
'variance',
- 'von_mises_log',
+ 'von_mises_lpdf',
'von_mises_rng',
- 'weibull_ccdf_log',
'weibull_cdf',
- 'weibull_cdf_log',
- 'weibull_log',
+ 'weibull_lccdf',
+ 'weibull_lcdf',
+ 'weibull_lpdf',
'weibull_rng',
- 'wiener_log',
- 'wishart_log',
- 'wishart_rng'
+ 'wiener_lpdf',
+ 'wishart_lpdf',
+ 'wishart_rng',
)
DISTRIBUTIONS = (
@@ -438,7 +454,7 @@ DISTRIBUTIONS = (
'von_mises',
'weibull',
'wiener',
- 'wishart'
+ 'wishart',
)
RESERVED = (
@@ -469,19 +485,23 @@ RESERVED = (
'do',
'double',
'dynamic_cast',
+ 'else',
'enum',
'explicit',
'export',
'extern',
'false',
- 'false',
'float',
+ 'for',
'friend',
'fvar',
'goto',
+ 'if',
+ 'in',
'inline',
'int',
'long',
+ 'lp__',
'mutable',
'namespace',
'new',
@@ -498,9 +518,16 @@ RESERVED = (
'register',
'reinterpret_cast',
'repeat',
+ 'return',
'short',
'signed',
'sizeof',
+ 'STAN_MAJOR',
+ 'STAN_MATH_MAJOR',
+ 'STAN_MATH_MINOR',
+ 'STAN_MATH_PATCH',
+ 'STAN_MINOR',
+ 'STAN_PATCH',
'static',
'static_assert',
'static_cast',
@@ -512,7 +539,6 @@ RESERVED = (
'thread_local',
'throw',
'true',
- 'true',
'try',
'typedef',
'typeid',
@@ -526,7 +552,7 @@ RESERVED = (
'void',
'volatile',
'wchar_t',
+ 'while',
'xor',
- 'xor_eq'
+ 'xor_eq',
)
-
diff --git a/pygments/lexers/asm.py b/pygments/lexers/asm.py
index 0a40e641..2f08d510 100644
--- a/pygments/lexers/asm.py
+++ b/pygments/lexers/asm.py
@@ -377,54 +377,63 @@ class LlvmLexer(RegexLexer):
'keyword': [
# Regular keywords
(words((
- 'begin', 'end', 'true', 'false', 'declare', 'define', 'global',
- 'constant', 'private', 'linker_private', 'internal',
- 'available_externally', 'linkonce', 'linkonce_odr', 'weak',
- 'weak_odr', 'appending', 'dllimport', 'dllexport', 'common',
- 'default', 'hidden', 'protected', 'extern_weak', 'external',
- 'thread_local', 'zeroinitializer', 'undef', 'null', 'to', 'tail',
- 'target', 'triple', 'datalayout', 'volatile', 'nuw', 'nsw', 'nnan',
- 'ninf', 'nsz', 'arcp', 'fast', 'exact', 'inbounds', 'align',
- 'addrspace', 'section', 'alias', 'module', 'asm', 'sideeffect',
- 'gc', 'dbg', 'linker_private_weak', 'attributes', 'blockaddress',
- 'initialexec', 'localdynamic', 'localexec', 'prefix', 'unnamed_addr',
- 'ccc', 'fastcc', 'coldcc', 'x86_stdcallcc', 'x86_fastcallcc',
- 'arm_apcscc', 'arm_aapcscc', 'arm_aapcs_vfpcc', 'ptx_device',
- 'ptx_kernel', 'intel_ocl_bicc', 'msp430_intrcc', 'spir_func',
- 'spir_kernel', 'x86_64_sysvcc', 'x86_64_win64cc', 'x86_thiscallcc',
- 'cc', 'c', 'signext', 'zeroext', 'inreg', 'sret', 'nounwind',
- 'noreturn', 'noalias', 'nocapture', 'byval', 'nest', 'readnone',
- 'readonly', 'inlinehint', 'noinline', 'alwaysinline', 'optsize', 'ssp',
- 'sspreq', 'noredzone', 'noimplicitfloat', 'naked', 'builtin', 'cold',
- 'nobuiltin', 'noduplicate', 'nonlazybind', 'optnone', 'returns_twice',
- 'sanitize_address', 'sanitize_memory', 'sanitize_thread', 'sspstrong',
- 'uwtable', 'returned', 'type', 'opaque', 'eq', 'ne', 'slt', 'sgt',
- 'sle', 'sge', 'ult', 'ugt', 'ule', 'uge', 'oeq', 'one', 'olt', 'ogt',
- 'ole', 'oge', 'ord', 'uno', 'ueq', 'une', 'x', 'acq_rel', 'acquire',
- 'alignstack', 'atomic', 'catch', 'cleanup', 'filter', 'inteldialect',
- 'max', 'min', 'monotonic', 'nand', 'personality', 'release', 'seq_cst',
- 'singlethread', 'umax', 'umin', 'unordered', 'xchg', 'add', 'fadd',
- 'sub', 'fsub', 'mul', 'fmul', 'udiv', 'sdiv', 'fdiv', 'urem', 'srem',
- 'frem', 'shl', 'lshr', 'ashr', 'and', 'or', 'xor', 'icmp', 'fcmp',
- 'phi', 'call', 'trunc', 'zext', 'sext', 'fptrunc', 'fpext', 'uitofp',
- 'sitofp', 'fptoui', 'fptosi', 'inttoptr', 'ptrtoint', 'bitcast',
- 'addrspacecast', 'select', 'va_arg', 'ret', 'br', 'switch', 'invoke',
- 'unwind', 'unreachable', 'indirectbr', 'landingpad', 'resume',
- 'malloc', 'alloca', 'free', 'load', 'store', 'getelementptr',
- 'extractelement', 'insertelement', 'shufflevector', 'getresult',
- 'extractvalue', 'insertvalue', 'atomicrmw', 'cmpxchg', 'fence',
- 'allocsize', 'amdgpu_cs', 'amdgpu_gs', 'amdgpu_kernel', 'amdgpu_ps',
- 'amdgpu_vs', 'any', 'anyregcc', 'argmemonly', 'avr_intrcc',
- 'avr_signalcc', 'caller', 'catchpad', 'catchret', 'catchswitch',
- 'cleanuppad', 'cleanupret', 'comdat', 'convergent', 'cxx_fast_tlscc',
- 'deplibs', 'dereferenceable', 'dereferenceable_or_null', 'distinct',
- 'exactmatch', 'externally_initialized', 'from', 'ghccc', 'hhvm_ccc',
- 'hhvmcc', 'ifunc', 'inaccessiblemem_or_argmemonly', 'inaccessiblememonly',
- 'inalloca', 'jumptable', 'largest', 'local_unnamed_addr', 'minsize',
- 'musttail', 'noduplicates', 'none', 'nonnull', 'norecurse', 'notail',
- 'preserve_allcc', 'preserve_mostcc', 'prologue', 'safestack', 'samesize',
- 'source_filename', 'swiftcc', 'swifterror', 'swiftself', 'webkit_jscc',
- 'within', 'writeonly', 'x86_intrcc', 'x86_vectorcallcc'),
+ 'acq_rel', 'acquire', 'add', 'addrspace', 'addrspacecast', 'afn', 'alias',
+ 'aliasee', 'align', 'alignLog2', 'alignstack', 'alloca', 'allocsize', 'allOnes',
+ 'alwaysinline', 'amdgpu_cs', 'amdgpu_es', 'amdgpu_gs', 'amdgpu_hs',
+ 'amdgpu_kernel', 'amdgpu_ls', 'amdgpu_ps', 'amdgpu_vs', 'and', 'any',
+ 'anyregcc', 'appending', 'arcp', 'argmemonly', 'args', 'arm_aapcs_vfpcc',
+ 'arm_aapcscc', 'arm_apcscc', 'ashr', 'asm', 'atomic', 'atomicrmw', 'attributes',
+ 'available_externally', 'avr_intrcc', 'avr_signalcc', 'bit', 'bitcast',
+ 'bitMask', 'blockaddress', 'br', 'branchFunnel', 'builtin', 'byArg', 'byte',
+ 'byteArray', 'byval', 'c', 'call', 'callee', 'caller', 'calls', 'catch',
+ 'catchpad', 'catchret', 'catchswitch', 'cc', 'ccc', 'cleanup', 'cleanuppad',
+ 'cleanupret', 'cmpxchg', 'cold', 'coldcc', 'comdat', 'common', 'constant',
+ 'contract', 'convergent', 'critical', 'cxx_fast_tlscc', 'datalayout', 'declare',
+ 'default', 'define', 'deplibs', 'dereferenceable', 'dereferenceable_or_null',
+ 'distinct', 'dllexport', 'dllimport', 'double', 'dso_local', 'dso_preemptable',
+ 'dsoLocal', 'eq', 'exact', 'exactmatch', 'extern_weak', 'external',
+ 'externally_initialized', 'extractelement', 'extractvalue', 'fadd', 'false',
+ 'fast', 'fastcc', 'fcmp', 'fdiv', 'fence', 'filter', 'flags', 'float', 'fmul',
+ 'fp128', 'fpext', 'fptosi', 'fptoui', 'fptrunc', 'frem', 'from', 'fsub',
+ 'funcFlags', 'function', 'gc', 'getelementptr', 'ghccc', 'global', 'guid', 'gv',
+ 'half', 'hash', 'hhvm_ccc', 'hhvmcc', 'hidden', 'hot', 'hotness', 'icmp',
+ 'ifunc', 'inaccessiblemem_or_argmemonly', 'inaccessiblememonly', 'inalloca',
+ 'inbounds', 'indir', 'indirectbr', 'info', 'initialexec', 'inline',
+ 'inlineBits', 'inlinehint', 'inrange', 'inreg', 'insertelement', 'insertvalue',
+ 'insts', 'intel_ocl_bicc', 'inteldialect', 'internal', 'inttoptr', 'invoke',
+ 'jumptable', 'kind', 'label', 'landingpad', 'largest', 'linkage', 'linkonce',
+ 'linkonce_odr', 'live', 'load', 'local_unnamed_addr', 'localdynamic',
+ 'localexec', 'lshr', 'max', 'metadata', 'min', 'minsize', 'module', 'monotonic',
+ 'msp430_intrcc', 'mul', 'musttail', 'naked', 'name', 'nand', 'ne', 'nest',
+ 'ninf', 'nnan', 'noalias', 'nobuiltin', 'nocapture', 'nocf_check',
+ 'noduplicate', 'noduplicates', 'noimplicitfloat', 'noinline', 'none',
+ 'nonlazybind', 'nonnull', 'norecurse', 'noRecurse', 'noredzone', 'noreturn',
+ 'notail', 'notEligibleToImport', 'nounwind', 'nsw', 'nsz', 'null', 'nuw', 'oeq',
+ 'offset', 'oge', 'ogt', 'ole', 'olt', 'one', 'opaque', 'optforfuzzing',
+ 'optnone', 'optsize', 'or', 'ord', 'path', 'personality', 'phi', 'ppc_fp128',
+ 'prefix', 'preserve_allcc', 'preserve_mostcc', 'private', 'prologue',
+ 'protected', 'ptrtoint', 'ptx_device', 'ptx_kernel', 'readnone', 'readNone',
+ 'readonly', 'readOnly', 'reassoc', 'refs', 'relbf', 'release', 'resByArg',
+ 'resume', 'ret', 'returnDoesNotAlias', 'returned', 'returns_twice', 'safestack',
+ 'samesize', 'sanitize_address', 'sanitize_hwaddress', 'sanitize_memory',
+ 'sanitize_thread', 'sdiv', 'section', 'select', 'seq_cst', 'sext', 'sge', 'sgt',
+ 'shadowcallstack', 'shl', 'shufflevector', 'sideeffect', 'signext', 'single',
+ 'singleImpl', 'singleImplName', 'sitofp', 'sizeM1', 'sizeM1BitWidth', 'sle',
+ 'slt', 'source_filename', 'speculatable', 'spir_func', 'spir_kernel', 'srem',
+ 'sret', 'ssp', 'sspreq', 'sspstrong', 'store', 'strictfp', 'sub', 'summaries',
+ 'summary', 'swiftcc', 'swifterror', 'swiftself', 'switch', 'syncscope', 'tail',
+ 'target', 'thread_local', 'to', 'token', 'triple', 'true', 'trunc', 'type',
+ 'typeCheckedLoadConstVCalls', 'typeCheckedLoadVCalls', 'typeid', 'typeIdInfo',
+ 'typeTestAssumeConstVCalls', 'typeTestAssumeVCalls', 'typeTestRes', 'typeTests',
+ 'udiv', 'ueq', 'uge', 'ugt', 'uitofp', 'ule', 'ult', 'umax', 'umin', 'undef',
+ 'une', 'uniformRetVal', 'uniqueRetVal', 'unknown', 'unnamed_addr', 'uno',
+ 'unordered', 'unreachable', 'unsat', 'unwind', 'urem', 'uselistorder',
+ 'uselistorder_bb', 'uwtable', 'va_arg', 'variable', 'vFuncId',
+ 'virtualConstProp', 'void', 'volatile', 'weak', 'weak_odr', 'webkit_jscc',
+ 'win64cc', 'within', 'wpdRes', 'wpdResolutions', 'writeonly', 'x',
+ 'x86_64_sysvcc', 'x86_fastcallcc', 'x86_fp80', 'x86_intrcc', 'x86_mmx',
+ 'x86_regcallcc', 'x86_stdcallcc', 'x86_thiscallcc', 'x86_vectorcallcc', 'xchg',
+ 'xor', 'zeroext', 'zeroinitializer', 'zext'),
suffix=r'\b'), Keyword),
# Types
diff --git a/pygments/lexers/data.py b/pygments/lexers/data.py
index a67d084e..7593b487 100644
--- a/pygments/lexers/data.py
+++ b/pygments/lexers/data.py
@@ -233,7 +233,7 @@ class YamlLexer(ExtendedRegexLexer):
# whitespaces separating tokens
(r'[ ]+', Text),
# key with colon
- (r'([^,:?\[\]{}\n]+)(:)(?=[ ]|$)',
+ (r'''([^,:?\[\]{}"'\n]+)(:)(?=[ ]|$)''',
bygroups(Name.Tag, set_indent(Punctuation, implicit=True))),
# tags, anchors and aliases,
include('descriptors'),
@@ -312,7 +312,7 @@ class YamlLexer(ExtendedRegexLexer):
# a flow mapping indicated by '{' and '}'
'flow-mapping': [
# key with colon
- (r'([^,:?\[\]{}\n]+)(:)(?=[ ]|$)',
+ (r'''([^,:?\[\]{}"'\n]+)(:)(?=[ ]|$)''',
bygroups(Name.Tag, Punctuation)),
# include flow collection rules
include('flow-collection'),
diff --git a/pygments/lexers/esoteric.py b/pygments/lexers/esoteric.py
index 793c28be..26222c9f 100644
--- a/pygments/lexers/esoteric.py
+++ b/pygments/lexers/esoteric.py
@@ -245,7 +245,7 @@ class AheuiLexer(RegexLexer):
Aheui_ is esoteric language based on Korean alphabets.
- .. _Aheui:: http://aheui.github.io/
+ .. _Aheui: http://aheui.github.io/
"""
diff --git a/pygments/lexers/floscript.py b/pygments/lexers/floscript.py
index b393c1e9..4f200809 100644
--- a/pygments/lexers/floscript.py
+++ b/pygments/lexers/floscript.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
"""
pygments.lexers.floscript
- ~~~~~~~~~~~~~~~~~~~~~~
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
Lexer for FloScript
diff --git a/pygments/lexers/forth.py b/pygments/lexers/forth.py
index a51f1b57..7fecdd52 100644
--- a/pygments/lexers/forth.py
+++ b/pygments/lexers/forth.py
@@ -3,6 +3,8 @@
pygments.lexers.forth
~~~~~~~~~~~~~~~~~~~~~
+ Lexer for the Forth language.
+
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
diff --git a/pygments/lexers/graph.py b/pygments/lexers/graph.py
index 1a338246..6e836bdd 100644
--- a/pygments/lexers/graph.py
+++ b/pygments/lexers/graph.py
@@ -22,9 +22,9 @@ __all__ = ['CypherLexer']
class CypherLexer(RegexLexer):
"""
For `Cypher Query Language
- <http://docs.neo4j.org/chunked/milestone/cypher-query-lang.html>`_
+ <https://neo4j.com/docs/developer-manual/3.3/cypher/>`_
- For the Cypher version in Neo4J 2.0
+ For the Cypher version in Neo4j 3.3
.. versionadded:: 2.0
"""
@@ -49,14 +49,19 @@ class CypherLexer(RegexLexer):
],
'keywords': [
(r'(create|order|match|limit|set|skip|start|return|with|where|'
- r'delete|foreach|not|by)\b', Keyword),
+ r'delete|foreach|not|by|true|false)\b', Keyword),
],
'clauses': [
- # TODO: many missing ones, see http://docs.neo4j.org/refcard/2.0/
- (r'(all|any|as|asc|create|create\s+unique|delete|'
- r'desc|distinct|foreach|in|is\s+null|limit|match|none|'
- r'order\s+by|return|set|skip|single|start|union|where|with)\b',
- Keyword),
+ # based on https://neo4j.com/docs/cypher-refcard/3.3/
+ (r'(all|any|as|asc|ascending|assert|call|case|create|'
+ r'create\s+index|create\s+unique|delete|desc|descending|'
+ r'distinct|drop\s+constraint\s+on|drop\s+index\s+on|end|'
+ r'ends\s+with|fieldterminator|foreach|in|is\s+node\s+key|'
+ r'is\s+null|is\s+unique|limit|load\s+csv\s+from|match|merge|none|'
+ r'not|null|on\s+match|on\s+create|optional\s+match|order\s+by|'
+ r'remove|return|set|skip|single|start|starts\s+with|then|union|'
+ r'union\s+all|unwind|using\s+periodic\s+commit|yield|where|when|'
+ r'with)\b', Keyword),
],
'relations': [
(r'(-\[)(.*?)(\]->)', bygroups(Operator, using(this), Operator)),
diff --git a/pygments/lexers/haskell.py b/pygments/lexers/haskell.py
index 88d4a4df..b3884f5c 100644
--- a/pygments/lexers/haskell.py
+++ b/pygments/lexers/haskell.py
@@ -12,12 +12,12 @@
import re
from pygments.lexer import Lexer, RegexLexer, bygroups, do_insertions, \
- default, include
+ default, include, inherit
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic
from pygments import unistring as uni
-__all__ = ['HaskellLexer', 'IdrisLexer', 'AgdaLexer', 'CryptolLexer',
+__all__ = ['HaskellLexer', 'HspecLexer', 'IdrisLexer', 'AgdaLexer', 'CryptolLexer',
'LiterateHaskellLexer', 'LiterateIdrisLexer', 'LiterateAgdaLexer',
'LiterateCryptolLexer', 'KokaLexer']
@@ -157,6 +157,28 @@ class HaskellLexer(RegexLexer):
}
+class HspecLexer(HaskellLexer):
+ """
+ A Haskell lexer with support for Hspec constructs.
+
+ .. versionadded:: 2.4.0
+ """
+
+ name = 'Hspec'
+ aliases = ['hspec']
+ filenames = []
+ mimetypes = []
+
+ tokens = {
+ 'root': [
+ (r'(it\s*)("[^"]*")', bygroups(Text, String.Doc)),
+ (r'(describe\s*)("[^"]*")', bygroups(Text, String.Doc)),
+ (r'(context\s*)("[^"]*")', bygroups(Text, String.Doc)),
+ inherit,
+ ],
+ }
+
+
class IdrisLexer(RegexLexer):
"""
A lexer for the dependently typed programming language Idris.
diff --git a/pygments/lexers/modeling.py b/pygments/lexers/modeling.py
index b354f1cf..481cce38 100644
--- a/pygments/lexers/modeling.py
+++ b/pygments/lexers/modeling.py
@@ -13,7 +13,7 @@ import re
from pygments.lexer import RegexLexer, include, bygroups, using, default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
- Number, Punctuation
+ Number, Punctuation, Whitespace
from pygments.lexers.html import HtmlLexer
from pygments.lexers import _stan_builtins
@@ -284,8 +284,8 @@ class StanLexer(RegexLexer):
"""Pygments Lexer for Stan models.
The Stan modeling language is specified in the *Stan Modeling Language
- User's Guide and Reference Manual, v2.8.0*,
- `pdf <https://github.com/stan-dev/stan/releases/download/v2.8.8/stan-reference-2.8.0.pdf>`__.
+ User's Guide and Reference Manual, v2.17.0*,
+ `pdf <https://github.com/stan-dev/stan/releases/download/v2.17.0/stan-reference-2.17.0.pdf>`__.
.. versionadded:: 1.6
"""
@@ -316,19 +316,24 @@ class StanLexer(RegexLexer):
'parameters', r'transformed\s+parameters',
'model', r'generated\s+quantities')),
bygroups(Keyword.Namespace, Text, Punctuation)),
+ # target keyword
+ (r'target\s*\+=', Keyword),
# Reserved Words
(r'(%s)\b' % r'|'.join(_stan_builtins.KEYWORDS), Keyword),
# Truncation
(r'T(?=\s*\[)', Keyword),
# Data types
(r'(%s)\b' % r'|'.join(_stan_builtins.TYPES), Keyword.Type),
+ # < should be punctuation, but elsewhere I can't tell if it is in
+ # a range constraint
+ (r'(<)(\s*)(upper|lower)(\s*)(=)', bygroups(Operator, Whitespace, Keyword, Whitespace, Punctuation)),
+ (r'(,)(\s*)(upper)(\s*)(=)', bygroups(Punctuation, Whitespace, Keyword, Whitespace, Punctuation)),
# Punctuation
- (r"[;:,\[\]()]", Punctuation),
+ (r"[;,\[\]()]", Punctuation),
# Builtin
- (r'(%s)(?=\s*\()'
- % r'|'.join(_stan_builtins.FUNCTIONS
- + _stan_builtins.DISTRIBUTIONS),
- Name.Builtin),
+ (r'(%s)(?=\s*\()' % '|'.join(_stan_builtins.FUNCTIONS), Name.Builtin),
+ (r'(~)(\s*)(%s)(?=\s*\()' % '|'.join(_stan_builtins.DISTRIBUTIONS),
+ bygroups(Operator, Whitespace, Name.Builtin)),
# Special names ending in __, like lp__
(r'[A-Za-z]\w*__\b', Name.Builtin.Pseudo),
(r'(%s)\b' % r'|'.join(_stan_builtins.RESERVED), Keyword.Reserved),
@@ -337,17 +342,18 @@ class StanLexer(RegexLexer):
# Regular variable names
(r'[A-Za-z]\w*\b', Name),
# Real Literals
- (r'-?[0-9]+(\.[0-9]+)?[eE]-?[0-9]+', Number.Float),
- (r'-?[0-9]*\.[0-9]*', Number.Float),
+ (r'[0-9]+(\.[0-9]*)?([eE][+-]?[0-9]+)?', Number.Float),
+ (r'\.[0-9]+([eE][+-]?[0-9]+)?', Number.Float),
# Integer Literals
- (r'-?[0-9]+', Number.Integer),
+ (r'[0-9]+', Number.Integer),
# Assignment operators
- # SLexer makes these tokens Operators.
- (r'<-|~', Operator),
+ (r'<-|(?:\+|-|\.?/|\.?\*|=)?=|~', Operator),
# Infix, prefix and postfix operators (and = )
- (r"\+|-|\.?\*|\.?/|\\|'|\^|==?|!=?|<=?|>=?|\|\||&&", Operator),
+ (r"\+|-|\.?\*|\.?/|\\|'|\^|!=?|<=?|>=?|\|\||&&|%|\?|:", Operator),
# Block delimiters
(r'[{}]', Punctuation),
+ # Distribution |
+ (r'\|', Punctuation)
]
}
diff --git a/pygments/lexers/qvt.py b/pygments/lexers/qvt.py
index af091a65..9b2559b1 100644
--- a/pygments/lexers/qvt.py
+++ b/pygments/lexers/qvt.py
@@ -18,7 +18,7 @@ __all__ = ['QVToLexer']
class QVToLexer(RegexLexer):
- """
+ u"""
For the `QVT Operational Mapping language <http://www.omg.org/spec/QVT/1.1/>`_.
Reference for implementing this: «Meta Object Facility (MOF) 2.0
diff --git a/pygments/lexers/sgf.py b/pygments/lexers/sgf.py
new file mode 100644
index 00000000..aa934b49
--- /dev/null
+++ b/pygments/lexers/sgf.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+"""
+ pygments.lexers.sgf
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for Smart Game Format (sgf) file format.
+
+ The format is used to store game records of board games for two players
+ (mainly Go game).
+ For more information about the definition of the format, see:
+ https://www.red-bean.com/sgf/
+
+ :copyright: Copyright 2006-2018 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+
+ .. versionadded:: 2.4
+"""
+
+from pygments.lexer import RegexLexer, bygroups
+from pygments.token import *
+
+__all__ = ["SmartGameFormatLexer"]
+
+
+class SmartGameFormatLexer(RegexLexer):
+ name = 'SmartGameFormat'
+ aliases = ['sgf']
+ filenames = ['*.sgf']
+
+ tokens = {
+ 'root': [
+ (r'[\s():;]', Punctuation),
+ # tokens:
+ (r'(A[BW]|AE|AN|AP|AR|AS|[BW]L|BM|[BW]R|[BW]S|[BW]T|CA|CH|CP|CR|DD|DM|DO|DT|EL|EV|EX|FF|FG|G[BW]|GC|GM|GN|HA|HO|ID|IP|IT|IY|KM|KO|L|LB|LN|LT|M|MA|MN|N|OB|OM|ON|OP|OT|OV|P[BW]|PC|PL|PM|RE|RG|RO|RU|SO|SC|SE|SI|SL|SO|SQ|ST|SU|SZ|T[BW]|TC|TE|TM|TR|UC|US|V|VW|[BW]|C)',
+ Name.Builtin),
+ # number:
+ (r'(\[)([0-9.]+)(\])',
+ bygroups(Punctuation, Literal.Number, Punctuation)),
+ # date:
+ (r'(\[)([0-9]{4}-[0-9]{2}-[0-9]{2})(\])',
+ bygroups(Punctuation, Literal.Date, Punctuation)),
+ # point:
+ (r'(\[)([a-z]{2})(\])',
+ bygroups(Punctuation, String, Punctuation)),
+ # double points:
+ (r'(\[)([a-z]{2})(:)([a-z]{2})(\])',
+ bygroups(Punctuation, String, Punctuation, String, Punctuation)),
+
+ (r'(\[)([\w\s#()+,\-.:?]+)(\])',
+ bygroups(Punctuation, String, Punctuation)),
+ (r'(\[)(\s.*)(\])',
+ bygroups(Punctuation, Text, Punctuation)),
+ ],
+ }
diff --git a/pygments/lexers/shell.py b/pygments/lexers/shell.py
index b3702e92..86d8c37a 100644
--- a/pygments/lexers/shell.py
+++ b/pygments/lexers/shell.py
@@ -19,7 +19,7 @@ from pygments.util import shebang_matches
__all__ = ['BashLexer', 'BashSessionLexer', 'TcshLexer', 'BatchLexer',
- 'MSDOSSessionLexer', 'PowerShellLexer',
+ 'SlurmBashLexer', 'MSDOSSessionLexer', 'PowerShellLexer',
'PowerShellSessionLexer', 'TcshSessionLexer', 'FishShellLexer']
line_re = re.compile('.*?\n')
@@ -126,6 +126,28 @@ class BashLexer(RegexLexer):
return 0.2
+class SlurmBashLexer(BashLexer):
+ """
+ Lexer for (ba|k|z|)sh Slurm scripts.
+
+ .. versionadded:: 2.4
+ """
+
+ name = 'Slurm'
+ aliases = ['slurm', 'sbatch']
+ filenames = ['*.sl']
+ mimetypes = []
+ EXTRA_KEYWORDS = {'srun'}
+
+ def get_tokens_unprocessed(self, text):
+ for index, token, value in BashLexer.get_tokens_unprocessed(self, text):
+ if token is Text and value in self.EXTRA_KEYWORDS:
+ yield index, Name.Builtin, value
+ elif token is Comment.Single and 'SBATCH' in value:
+ yield index, Keyword.Pseudo, value
+ else:
+ yield index, token, value
+
class ShellSessionBaseLexer(Lexer):
"""
Base lexer for simplistic shell sessions.
diff --git a/pygments/lexers/sql.py b/pygments/lexers/sql.py
index 7dd856b2..8884db22 100644
--- a/pygments/lexers/sql.py
+++ b/pygments/lexers/sql.py
@@ -59,7 +59,14 @@ line_re = re.compile('.*?\n')
language_re = re.compile(r"\s+LANGUAGE\s+'?(\w+)'?", re.IGNORECASE)
-do_re = re.compile(r'\bDO\b', re.IGNORECASE)
+do_re = re.compile(r'\bDO\b', re.IGNORECASE)
+
+# Regular expressions for analyse_text()
+name_between_bracket_re = re.compile(r'\[[a-zA-Z_]\w*\]')
+name_between_backtick_re = re.compile(r'`[a-zA-Z_]\w*`')
+tsql_go_re = re.compile(r'\bgo\b', re.IGNORECASE)
+tsql_declare_re = re.compile(r'\bdeclare\s+@', re.IGNORECASE)
+tsql_variable_re = re.compile(r'@[a-zA-Z_]\w*\b')
def language_callback(lexer, match):
@@ -82,7 +89,7 @@ def language_callback(lexer, match):
lexer.text[max(0, match.start()-25):match.start()]))
if m:
l = lexer._get_lexer('plpgsql')
-
+
# 1 = $, 2 = delimiter, 3 = $
yield (match.start(1), String, match.group(1))
yield (match.start(2), String.Delimiter, match.group(2))
@@ -474,6 +481,9 @@ class SqlLexer(RegexLexer):
]
}
+ def analyse_text(text):
+ return 0.01
+
class TransactSqlLexer(RegexLexer):
"""
@@ -530,6 +540,33 @@ class TransactSqlLexer(RegexLexer):
]
}
+ def analyse_text(text):
+ rating = 0
+ if tsql_declare_re.search(text):
+ # Found T-SQL variable declaration.
+ rating = 1.0
+ else:
+ name_between_backtick_count = len(
+ name_between_backtick_re.findall((text)))
+ name_between_bracket_count = len(
+ name_between_bracket_re.findall(text))
+ # We need to check if there are any names using
+ # backticks or brackets, as otherwise both are 0
+ # and 0 >= 2 * 0, so we would always assume it's true
+ dialect_name_count = name_between_backtick_count + name_between_bracket_count
+ if dialect_name_count >= 1 and name_between_bracket_count >= 2 * name_between_backtick_count:
+ # Found at least twice as many [name] as `name`.
+ rating += 0.5
+ elif name_between_bracket_count > name_between_backtick_count:
+ rating += 0.2
+ elif name_between_bracket_count > 0:
+ rating += 0.1
+ if tsql_variable_re.search(text) is not None:
+ rating += 0.1
+ if tsql_go_re.search(text) is not None:
+ rating += 0.1
+ return rating
+
class MySqlLexer(RegexLexer):
"""
@@ -603,6 +640,23 @@ class MySqlLexer(RegexLexer):
]
}
+ def analyse_text(text):
+ rating = 0
+ name_between_backtick_count = len(
+ name_between_backtick_re.findall((text)))
+ name_between_bracket_count = len(
+ name_between_bracket_re.findall(text))
+ # Same logic as above in the TSQL analysis
+ dialect_name_count = name_between_backtick_count + name_between_bracket_count
+ if dialect_name_count >= 1 and name_between_backtick_count >= 2 * name_between_bracket_count:
+ # Found at least twice as many `name` as [name].
+ rating += 0.5
+ elif name_between_backtick_count > name_between_bracket_count:
+ rating += 0.2
+ elif name_between_backtick_count > 0:
+ rating += 0.1
+ return rating
+
class SqliteConsoleLexer(Lexer):
"""
diff --git a/pygments/lexers/templates.py b/pygments/lexers/templates.py
index c184b2dd..8000deba 100644
--- a/pygments/lexers/templates.py
+++ b/pygments/lexers/templates.py
@@ -375,7 +375,7 @@ class DjangoLexer(RegexLexer):
(r'\.\w+', Name.Variable),
(r':?"(\\\\|\\"|[^"])*"', String.Double),
(r":?'(\\\\|\\'|[^'])*'", String.Single),
- (r'([{}()\[\]+\-*/,:~]|[><=]=?)', Operator),
+ (r'([{}()\[\]+\-*/%,:~]|[><=]=?|!=)', Operator),
(r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
r"0[xX][0-9a-fA-F]+[Ll]?", Number),
],
diff --git a/pygments/lexers/text.py b/pygments/lexers/text.py
index 9b3b5fea..bb1dccf2 100644
--- a/pygments/lexers/text.py
+++ b/pygments/lexers/text.py
@@ -18,6 +18,7 @@ from pygments.lexers.markup import BBCodeLexer, MoinWikiLexer, RstLexer, \
from pygments.lexers.installers import DebianControlLexer, SourcesListLexer
from pygments.lexers.make import MakefileLexer, BaseMakefileLexer, CMakeLexer
from pygments.lexers.haxe import HxmlLexer
+from pygments.lexers.sgf import SmartGameFormatLexer
from pygments.lexers.diff import DiffLexer, DarcsPatchLexer
from pygments.lexers.data import YamlLexer
from pygments.lexers.textfmts import IrcLogsLexer, GettextLexer, HttpLexer
diff --git a/pygments/styles/__init__.py b/pygments/styles/__init__.py
index 839a9b78..1f39c692 100644
--- a/pygments/styles/__init__.py
+++ b/pygments/styles/__init__.py
@@ -44,6 +44,8 @@ STYLE_MAP = {
'arduino': 'arduino::ArduinoStyle',
'rainbow_dash': 'rainbow_dash::RainbowDashStyle',
'abap': 'abap::AbapStyle',
+ 'solarized-dark': 'solarized::SolarizedDarkStyle',
+ 'solarized-light': 'solarized::SolarizedLightStyle',
}
diff --git a/pygments/styles/arduino.py b/pygments/styles/arduino.py
index 57e3809e..b500b6d9 100644
--- a/pygments/styles/arduino.py
+++ b/pygments/styles/arduino.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-"""
+u"""
pygments.styles.arduino
~~~~~~~~~~~~~~~~~~~~~~~
@@ -15,7 +15,7 @@ from pygments.token import Keyword, Name, Comment, String, Error, \
class ArduinoStyle(Style):
- """
+ u"""
The Arduino® language style. This style is designed to highlight the
Arduino source code, so exepect the best results with it.
"""
diff --git a/pygments/styles/paraiso_dark.py b/pygments/styles/paraiso_dark.py
index 5f334bb9..68abb9f6 100644
--- a/pygments/styles/paraiso_dark.py
+++ b/pygments/styles/paraiso_dark.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-"""
+u"""
pygments.styles.paraiso_dark
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/pygments/styles/paraiso_light.py b/pygments/styles/paraiso_light.py
index a8112819..186e4775 100644
--- a/pygments/styles/paraiso_light.py
+++ b/pygments/styles/paraiso_light.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-"""
+u"""
pygments.styles.paraiso_light
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/pygments/styles/solarized.py b/pygments/styles/solarized.py
new file mode 100644
index 00000000..a1790b08
--- /dev/null
+++ b/pygments/styles/solarized.py
@@ -0,0 +1,129 @@
+# -*- coding: utf-8 -*-
+"""
+ pygments.styles.solarized
+ ~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Solarized by Camil Staps
+
+ A Pygments style for the Solarized themes (licensed under MIT).
+ See: https://github.com/altercation/solarized
+
+ :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.style import Style
+from pygments.token import Comment, Error, Generic, Keyword, Name, Number, \
+ Operator, String, Token
+
+
+def make_style(colors):
+ return {
+ Token: colors['base0'],
+
+ Comment: 'italic ' + colors['base01'],
+ Comment.Hashbang: colors['base01'],
+ Comment.Multiline: colors['base01'],
+ Comment.Preproc: 'noitalic ' + colors['magenta'],
+ Comment.PreprocFile: 'noitalic ' + colors['base01'],
+
+ Keyword: colors['green'],
+ Keyword.Constant: colors['cyan'],
+ Keyword.Declaration: colors['cyan'],
+ Keyword.Namespace: colors['orange'],
+ Keyword.Type: colors['yellow'],
+
+ Operator: colors['base01'],
+ Operator.Word: colors['green'],
+
+ Name.Builtin: colors['blue'],
+ Name.Builtin.Pseudo: colors['blue'],
+ Name.Class: colors['blue'],
+ Name.Constant: colors['blue'],
+ Name.Decorator: colors['blue'],
+ Name.Entity: colors['blue'],
+ Name.Exception: colors['blue'],
+ Name.Function: colors['blue'],
+ Name.Function.Magic: colors['blue'],
+ Name.Label: colors['blue'],
+ Name.Namespace: colors['blue'],
+ Name.Tag: colors['blue'],
+ Name.Variable: colors['blue'],
+ Name.Variable.Global:colors['blue'],
+ Name.Variable.Magic: colors['blue'],
+
+ String: colors['cyan'],
+ String.Doc: colors['base01'],
+ String.Regex: colors['orange'],
+
+ Number: colors['cyan'],
+
+ Generic.Deleted: colors['red'],
+ Generic.Emph: 'italic',
+ Generic.Error: colors['red'],
+ Generic.Heading: 'bold',
+ Generic.Subheading: 'underline',
+ Generic.Inserted: colors['green'],
+ Generic.Strong: 'bold',
+ Generic.Traceback: colors['blue'],
+
+ Error: 'bg:' + colors['red'],
+ }
+
+
+DARK_COLORS = {
+ 'base03': '#002b36',
+ 'base02': '#073642',
+ 'base01': '#586e75',
+ 'base00': '#657b83',
+ 'base0': '#839496',
+ 'base1': '#93a1a1',
+ 'base2': '#eee8d5',
+ 'base3': '#fdf6e3',
+ 'yellow': '#b58900',
+ 'orange': '#cb4b16',
+ 'red': '#dc322f',
+ 'magenta': '#d33682',
+ 'violet': '#6c71c4',
+ 'blue': '#268bd2',
+ 'cyan': '#2aa198',
+ 'green': '#859900',
+}
+
+LIGHT_COLORS = {
+ 'base3': '#002b36',
+ 'base2': '#073642',
+ 'base1': '#586e75',
+ 'base0': '#657b83',
+ 'base00': '#839496',
+ 'base01': '#93a1a1',
+ 'base02': '#eee8d5',
+ 'base03': '#fdf6e3',
+ 'yellow': '#b58900',
+ 'orange': '#cb4b16',
+ 'red': '#dc322f',
+ 'magenta': '#d33682',
+ 'violet': '#6c71c4',
+ 'blue': '#268bd2',
+ 'cyan': '#2aa198',
+ 'green': '#859900',
+}
+
+
+class SolarizedDarkStyle(Style):
+ """
+ The solarized style, dark.
+ """
+
+ styles = make_style(DARK_COLORS)
+ background_color = DARK_COLORS['base03']
+ highlight_color = DARK_COLORS['base02']
+
+class SolarizedLightStyle(SolarizedDarkStyle):
+ """
+ The solarized style, light.
+ """
+
+ styles = make_style(LIGHT_COLORS)
+ background_color = LIGHT_COLORS['base03']
+ highlight_color = LIGHT_COLORS['base02']
diff --git a/scripts/check_sources.py b/scripts/check_sources.py
index db09de42..c0524b6c 100755
--- a/scripts/check_sources.py
+++ b/scripts/check_sources.py
@@ -185,7 +185,8 @@ def main(argv):
print("Checking %s..." % fn)
try:
- lines = open(fn, 'rb').read().decode('utf-8').splitlines()
+ with open(fn, 'rb') as f:
+ lines = f.read().decode('utf-8').splitlines()
except (IOError, OSError) as err:
print("%s: cannot open: %s" % (fn, err))
num += 1
diff --git a/setup.py b/setup.py
index 3f75a20e..52889227 100755
--- a/setup.py
+++ b/setup.py
@@ -60,6 +60,7 @@ setup(
platforms = 'any',
zip_safe = False,
include_package_data = True,
+ python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
classifiers = [
'License :: OSI Approved :: BSD License',
'Intended Audience :: Developers',
@@ -68,7 +69,11 @@ setup(
'Development Status :: 6 - Mature',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
+ 'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
+ 'Programming Language :: Python :: 3.5',
+ 'Programming Language :: Python :: 3.6',
+ 'Programming Language :: Python :: 3.7',
'Operating System :: OS Independent',
'Topic :: Text Processing :: Filters',
'Topic :: Utilities',
diff --git a/tests/examplefiles/example.sgf b/tests/examplefiles/example.sgf
new file mode 100644
index 00000000..024a461e
--- /dev/null
+++ b/tests/examplefiles/example.sgf
@@ -0,0 +1,35 @@
+(;FF[4]GM[1]SZ[19]FG[257:Figure 1]PM[1]
+PB[Takemiya Masaki]BR[9 dan]PW[Cho Chikun]
+WR[9 dan]RE[W+Resign]KM[5.5]TM[28800]DT[1996-10-18,19]
+EV[21st Meijin]RO[2 (final)]SO[Go World #78]US[Arno Hollosi]
+;B[pd];W[dp];B[pp];W[dd];B[pj];W[nc];B[oe];W[qc];B[pc];W[qd]
+(;B[qf];W[rf];B[rg];W[re];B[qg];W[pb];B[ob];W[qb]
+(;B[mp];W[fq];B[ci];W[cg];B[dl];W[cn];B[qo];W[ec];B[jp];W[jd]
+;B[ei];W[eg];B[kk]LB[qq:a][dj:b][ck:c][qp:d]N[Figure 1]
+
+;W[me]FG[257:Figure 2];B[kf];W[ke];B[lf];W[jf];B[jg]
+(;W[mf];B[if];W[je];B[ig];W[mg];B[mj];W[mq];B[lq];W[nq]
+(;B[lr];W[qq];B[pq];W[pr];B[rq];W[rr];B[rp];W[oq];B[mr];W[oo];B[mn]
+(;W[nr];B[qp]LB[kd:a][kh:b]N[Figure 2]
+
+;W[pk]FG[257:Figure 3];B[pm];W[oj];B[ok];W[qr];B[os];W[ol];B[nk];W[qj]
+;B[pi];W[pl];B[qm];W[ns];B[sr];W[om];B[op];W[qi];B[oi]
+(;W[rl];B[qh];W[rm];B[rn];W[ri];B[ql];W[qk];B[sm];W[sk];B[sh];W[og]
+;B[oh];W[np];B[no];W[mm];B[nn];W[lp];B[kp];W[lo];B[ln];W[ko];B[mo]
+;W[jo];B[km]N[Figure 3])
+
+(;W[ql]VW[ja:ss]FG[257:Dia. 6]MN[1];B[rm];W[ph];B[oh];W[pg];B[og];W[pf]
+;B[qh];W[qe];B[sh];W[of];B[sj]TR[oe][pd][pc][ob]LB[pe:a][sg:b][si:c]
+N[Diagram 6]))
+
+(;W[no]VW[jj:ss]FG[257:Dia. 5]MN[1];B[pn]N[Diagram 5]))
+
+(;B[pr]FG[257:Dia. 4]MN[1];W[kq];B[lp];W[lr];B[jq];W[jr];B[kp];W[kr];B[ir]
+;W[hr]LB[is:a][js:b][or:c]N[Diagram 4]))
+
+(;W[if]FG[257:Dia. 3]MN[1];B[mf];W[ig];B[jh]LB[ki:a]N[Diagram 3]))
+
+(;W[oc]VW[aa:sk]FG[257:Dia. 2]MN[1];B[md];W[mc];B[ld]N[Diagram 2]))
+
+(;B[qe]VW[aa:sj]FG[257:Dia. 1]MN[1];W[re];B[qf];W[rf];B[qg];W[pb];B[ob]
+;W[qb]LB[rg:a]N[Diagram 1]))
diff --git a/tests/examplefiles/example.sl b/tests/examplefiles/example.sl
new file mode 100644
index 00000000..5fb430de
--- /dev/null
+++ b/tests/examplefiles/example.sl
@@ -0,0 +1,6 @@
+#!/bin/bash
+#SBATCH --partition=part
+#SBATCH --job-name=job
+#SBATCH --mem=1G
+#SBATCH --cpus-per-task=8
+srun /usr/bin/sleep \ No newline at end of file
diff --git a/tests/examplefiles/example.stan b/tests/examplefiles/example.stan
index 69c9ac70..03b7b1b5 100644
--- a/tests/examplefiles/example.stan
+++ b/tests/examplefiles/example.stan
@@ -16,7 +16,7 @@ functions {
data {
// valid name
int abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_abc;
- // all types should be highlighed
+ // all types should be highlighted
int a3;
real foo[2];
vector[3] bar;
@@ -48,7 +48,7 @@ transformed data {
thud <- -12309865;
// ./ and .* should be recognized as operators
grault2 <- grault .* garply ./ garply;
- // ' and \ should be regognized as operators
+ // ' and \ should be recognized as operators
qux2 <- qux' \ bar;
}
diff --git a/tests/test_cmdline.py b/tests/test_cmdline.py
index 1500c875..a55e30ec 100644
--- a/tests/test_cmdline.py
+++ b/tests/test_cmdline.py
@@ -28,6 +28,13 @@ def func(args):
'''
+def _decode_output(text):
+ try:
+ return text.decode('utf-8')
+ except UnicodeEncodeError: # implicit encode on Python 2 with data loss
+ return text
+
+
def run_cmdline(*args, **kwds):
saved_stdin = sys.stdin
saved_stdout = sys.stdout
@@ -53,9 +60,9 @@ def run_cmdline(*args, **kwds):
sys.stderr = saved_stderr
new_stdout.flush()
new_stderr.flush()
- out, err = stdout_buffer.getvalue().decode('utf-8'), \
- stderr_buffer.getvalue().decode('utf-8')
- return (ret, out, err)
+ out, err = stdout_buffer.getvalue(), \
+ stderr_buffer.getvalue()
+ return (ret, _decode_output(out), _decode_output(err))
class CmdLineTest(unittest.TestCase):
diff --git a/tests/test_html_formatter.py b/tests/test_html_formatter.py
index 10450c56..670a5be9 100644
--- a/tests/test_html_formatter.py
+++ b/tests/test_html_formatter.py
@@ -132,9 +132,8 @@ class HtmlFormatterTest(unittest.TestCase):
outencoding='utf-8')
handle, pathname = tempfile.mkstemp('.html')
- tfile = os.fdopen(handle, 'w+b')
- fmt.format(tokensource, tfile)
- tfile.close()
+ with os.fdopen(handle, 'w+b') as tfile:
+ fmt.format(tokensource, tfile)
catname = os.path.join(TESTDIR, 'dtds', 'HTML4.soc')
try:
import subprocess
@@ -173,9 +172,8 @@ class HtmlFormatterTest(unittest.TestCase):
cssstyles=u'div:before { content: \'bäz\' }',
encoding='utf-8')
handle, pathname = tempfile.mkstemp('.html')
- tfile = os.fdopen(handle, 'w+b')
- fmt.format(tokensource, tfile)
- tfile.close()
+ with os.fdopen(handle, 'w+b') as tfile:
+ fmt.format(tokensource, tfile)
def test_ctags(self):
try:
diff --git a/tests/test_sql.py b/tests/test_sql.py
index c5f5c758..6be34006 100644
--- a/tests/test_sql.py
+++ b/tests/test_sql.py
@@ -8,7 +8,10 @@
"""
import unittest
-from pygments.lexers.sql import TransactSqlLexer
+from pygments.lexers.sql import name_between_bracket_re, \
+ name_between_backtick_re, tsql_go_re, tsql_declare_re, \
+ tsql_variable_re, MySqlLexer, SqlLexer, TransactSqlLexer
+
from pygments.token import Comment, Name, Number, Punctuation, Whitespace
@@ -72,3 +75,44 @@ class TransactSqlLexerTest(unittest.TestCase):
(Comment.Multiline, '*/'),
(Comment.Multiline, '*/'),
))
+
+
+class SqlAnalyzeTextTest(unittest.TestCase):
+ def test_can_match_analyze_text_res(self):
+ self.assertEqual(['`a`', '`bc`'],
+ name_between_backtick_re.findall('select `a`, `bc` from some'))
+ self.assertEqual(['[a]', '[bc]'],
+ name_between_bracket_re.findall('select [a], [bc] from some'))
+ self.assertTrue(tsql_declare_re.search('--\nDeClaRe @some int;'))
+ self.assertTrue(tsql_go_re.search('select 1\ngo\n--'))
+ self.assertTrue(tsql_variable_re.search(
+ 'create procedure dbo.usp_x @a int, @b int'))
+
+ def test_can_analyze_text(self):
+ mysql_lexer = MySqlLexer()
+ sql_lexer = SqlLexer()
+ tsql_lexer = TransactSqlLexer()
+ code_to_expected_lexer_map = {
+ 'select `a`, `bc` from some': mysql_lexer,
+ 'select a, bc from some': sql_lexer,
+ 'select [a], [bc] from some': tsql_lexer,
+ '-- `a`, `bc`\nselect [a], [bc] from some': tsql_lexer,
+ '-- `a`, `bc`\nselect [a], [bc] from some; go': tsql_lexer,
+ }
+ sql_lexers = set(code_to_expected_lexer_map.values())
+ for code, expected_lexer in code_to_expected_lexer_map.items():
+ ratings_and_lexers = list((lexer.analyse_text(code), lexer.name) for lexer in sql_lexers)
+ best_rating, best_lexer_name = sorted(ratings_and_lexers, reverse=True)[0]
+ expected_rating = expected_lexer.analyse_text(code)
+ message = (
+ 'lexer must be %s (rating %.2f) instead of '
+ '%s (rating %.2f) for analyse_text() on code:\n%s') % (
+ expected_lexer.name,
+ expected_rating,
+ best_lexer_name,
+ best_rating,
+ code
+ )
+ self.assertEqual(
+ expected_lexer.name, best_lexer_name, message
+ )