summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--CHANGES18
-rw-r--r--doc/conf.py4
-rw-r--r--doc/languages.rst4
-rw-r--r--pygments/lexers/_mapping.py8
-rw-r--r--pygments/lexers/_stata_builtins.py8
-rw-r--r--pygments/lexers/asm.py4
-rw-r--r--pygments/lexers/configs.py13
-rw-r--r--pygments/lexers/jvm.py33
-rw-r--r--pygments/lexers/slash.py3
-rw-r--r--pygments/lexers/stata.py143
-rw-r--r--pygments/styles/__init__.py4
-rw-r--r--pygments/styles/stata_dark.py41
-rw-r--r--pygments/styles/stata_light.py (renamed from pygments/styles/stata.py)27
-rw-r--r--tests/test_kotlin.py131
14 files changed, 361 insertions, 80 deletions
diff --git a/CHANGES b/CHANGES
index 3464ef22..2b1ebb0b 100644
--- a/CHANGES
+++ b/CHANGES
@@ -12,17 +12,17 @@ Version 2.4.0
- Added lexers:
+ * Augeas (PR#807)
* Charm++ CI (PR#788)
+ * DASM16 (PR#807)
* FloScript (PR#750)
* Hspec (PR#790)
* SGF (PR#780)
+ * Slash (PR#807)
* Slurm (PR#760)
+ * TOML (PR#807)
* Unicon (PR#731)
* VBScript (PR#673)
- * DASM16 (PR#807)
- * Augeas (PR#807)
- * TOML (PR#807)
- * Slash (PR#807)
- Updated lexers:
@@ -31,16 +31,18 @@ Version 2.4.0
* PHP (#1482)
* SQL (PR#672)
* Stan (PR#774)
+ * Stata (PR#800)
* Terraform (PR#787)
- Add solarized style (PR#708)
+- Add support for Markdown reference-style links (PR#753)
- Change ANSI color names (PR#777)
-- Fix rare unicode errors on Python 2.7 (PR#798, #1492)
-- Updated Trove classifiers and ``pip`` requirements (PR#799)
-- TypoScript uses ``.typoscript`` now (#1498)
- Fix catastrophic backtracking in the bash lexer (#1494)
+- Fix documentation failing to build using Sphinx 2.0 (#1501)
- Fix incorrect links in the Lisp and R lexer documentation (PR#775)
-- Add support for Markdown reference-style links (PR#753)
+- Fix rare unicode errors on Python 2.7 (PR#798, #1492)
+- TypoScript uses ``.typoscript`` now (#1498)
+- Updated Trove classifiers and ``pip`` requirements (PR#799)
Version 2.3.1
-------------
diff --git a/doc/conf.py b/doc/conf.py
index 51a91617..00db7d9b 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -125,8 +125,8 @@ html_static_path = ['_static']
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
-html_sidebars = {'index': 'indexsidebar.html',
- 'docs/*': 'docssidebar.html'}
+html_sidebars = {'index': ['indexsidebar.html'],
+ 'docs/*': ['docssidebar.html']}
# Additional templates that should be rendered to pages, maps page names to
# template names.
diff --git a/doc/languages.rst b/doc/languages.rst
index 47e3363f..d2508b07 100644
--- a/doc/languages.rst
+++ b/doc/languages.rst
@@ -14,6 +14,7 @@ Programming languages
* AppleScript
* Assembly (various)
* Asymptote
+* `Augeas <http://augeas.net>`_
* Awk
* Befunge
* Boo
@@ -31,6 +32,7 @@ Programming languages
* `Cython <http://cython.org>`_
* `D <http://dlang.org>`_
* Dart
+* DCPU-16
* Delphi
* Dylan
* `Elm <http://elm-lang.org/>`_
@@ -85,10 +87,12 @@ Programming languages
* Scheme
* Scilab
* `SGF <https://www.red-bean.com/sgf/>`_
+* `Slash <https://github.com/arturadib/Slash-A>`_
* `Slurm <https://slurm.schedmd.com/overview.html>`_
* Smalltalk
* SNOBOL
* Tcl
+* `TOML <https://github.com/toml-lang/toml>`_
* Vala
* Verilog
* VHDL
diff --git a/pygments/lexers/_mapping.py b/pygments/lexers/_mapping.py
index 3c05d147..e8838008 100644
--- a/pygments/lexers/_mapping.py
+++ b/pygments/lexers/_mapping.py
@@ -44,7 +44,7 @@ LEXERS = {
'ArduinoLexer': ('pygments.lexers.c_like', 'Arduino', ('arduino',), ('*.ino',), ('text/x-arduino',)),
'AspectJLexer': ('pygments.lexers.jvm', 'AspectJ', ('aspectj',), ('*.aj',), ('text/x-aspectj',)),
'AsymptoteLexer': ('pygments.lexers.graphics', 'Asymptote', ('asy', 'asymptote'), ('*.asy',), ('text/x-asymptote',)),
- 'AugeasLexer': ('pygments.lexers.configs', 'Augeas', ('augeas',), ('*.aug'), ()),
+ 'AugeasLexer': ('pygments.lexers.configs', 'Augeas', ('augeas',), ('*.aug',), ()),
'AutoItLexer': ('pygments.lexers.automation', 'AutoIt', ('autoit',), ('*.au3',), ('text/x-autoit',)),
'AutohotkeyLexer': ('pygments.lexers.automation', 'autohotkey', ('ahk', 'autohotkey'), ('*.ahk', '*.ahkl'), ('text/x-autohotkey',)),
'AwkLexer': ('pygments.lexers.textedit', 'Awk', ('awk', 'gawk', 'mawk', 'nawk'), ('*.awk',), ('application/x-awk',)),
@@ -122,7 +122,7 @@ LEXERS = {
'DObjdumpLexer': ('pygments.lexers.asm', 'd-objdump', ('d-objdump',), ('*.d-objdump',), ('text/x-d-objdump',)),
'DarcsPatchLexer': ('pygments.lexers.diff', 'Darcs Patch', ('dpatch',), ('*.dpatch', '*.darcspatch'), ()),
'DartLexer': ('pygments.lexers.javascript', 'Dart', ('dart',), ('*.dart',), ('text/x-dart',)),
- 'Dasm16Lexer': ('pygments.lexers.asm', 'DASM16', ('dasm16',), ('*.dasm16', '*.dasm'), ('text/x-dasm16')),
+ 'Dasm16Lexer': ('pygments.lexers.asm', 'DASM16', ('dasm16',), ('*.dasm16', '*.dasm'), ('text/x-dasm16',)),
'DebianControlLexer': ('pygments.lexers.installers', 'Debian Control file', ('control', 'debcontrol'), ('control',), ()),
'DelphiLexer': ('pygments.lexers.pascal', 'Delphi', ('delphi', 'pas', 'pascal', 'objectpascal'), ('*.pas', '*.dpr'), ('text/x-pascal',)),
'DgLexer': ('pygments.lexers.python', 'dg', ('dg',), ('*.dg',), ('text/x-dg',)),
@@ -377,7 +377,6 @@ LEXERS = {
'RubyLexer': ('pygments.lexers.ruby', 'Ruby', ('rb', 'ruby', 'duby'), ('*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec', '*.rbx', '*.duby', 'Gemfile'), ('text/x-ruby', 'application/x-ruby')),
'RustLexer': ('pygments.lexers.rust', 'Rust', ('rust', 'rs'), ('*.rs', '*.rs.in'), ('text/rust',)),
'SASLexer': ('pygments.lexers.sas', 'SAS', ('sas',), ('*.SAS', '*.sas'), ('text/x-sas', 'text/sas', 'application/x-sas')),
- 'SlashLexer': ('pygments.lexers.slash', 'Slash', ('slash'), ('*.sl'), ()),
'SLexer': ('pygments.lexers.r', 'S', ('splus', 's', 'r'), ('*.S', '*.R', '.Rhistory', '.Rprofile', '.Renviron'), ('text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r', 'text/x-R', 'text/x-r-history', 'text/x-r-profile')),
'SMLLexer': ('pygments.lexers.ml', 'Standard ML', ('sml',), ('*.sml', '*.sig', '*.fun'), ('text/x-standardml', 'application/x-standardml')),
'SarlLexer': ('pygments.lexers.jvm', 'SARL', ('sarl',), ('*.sarl',), ('text/x-sarl',)),
@@ -389,6 +388,7 @@ LEXERS = {
'ScssLexer': ('pygments.lexers.css', 'SCSS', ('scss',), ('*.scss',), ('text/x-scss',)),
'ShenLexer': ('pygments.lexers.lisp', 'Shen', ('shen',), ('*.shen',), ('text/x-shen', 'application/x-shen')),
'SilverLexer': ('pygments.lexers.verification', 'Silver', ('silver',), ('*.sil', '*.vpr'), ()),
+ 'SlashLexer': ('pygments.lexers.slash', 'Slash', ('slash',), ('*.sl',), ()),
'SlimLexer': ('pygments.lexers.webmisc', 'Slim', ('slim',), ('*.slim',), ('text/x-slim',)),
'SlurmBashLexer': ('pygments.lexers.shell', 'Slurm', ('slurm', 'sbatch'), ('*.sl',), ()),
'SmaliLexer': ('pygments.lexers.dalvik', 'Smali', ('smali',), ('*.smali',), ('text/smali',)),
@@ -411,6 +411,7 @@ LEXERS = {
'SwigLexer': ('pygments.lexers.c_like', 'SWIG', ('swig',), ('*.swg', '*.i'), ('text/swig',)),
'SystemVerilogLexer': ('pygments.lexers.hdl', 'systemverilog', ('systemverilog', 'sv'), ('*.sv', '*.svh'), ('text/x-systemverilog',)),
'TAPLexer': ('pygments.lexers.testing', 'TAP', ('tap',), ('*.tap',), ()),
+ 'TOMLLexer': ('pygments.lexers.configs', 'TOML', ('toml',), ('*.toml',), ()),
'Tads3Lexer': ('pygments.lexers.int_fiction', 'TADS 3', ('tads3',), ('*.t',), ()),
'TasmLexer': ('pygments.lexers.asm', 'TASM', ('tasm',), ('*.asm', '*.ASM', '*.tasm'), ('text/x-tasm',)),
'TclLexer': ('pygments.lexers.tcl', 'Tcl', ('tcl',), ('*.tcl', '*.rvt'), ('text/x-tcl', 'text/x-script.tcl', 'application/x-tcl')),
@@ -424,7 +425,6 @@ LEXERS = {
'TextLexer': ('pygments.lexers.special', 'Text only', ('text',), ('*.txt',), ('text/plain',)),
'ThriftLexer': ('pygments.lexers.dsls', 'Thrift', ('thrift',), ('*.thrift',), ('application/x-thrift',)),
'TodotxtLexer': ('pygments.lexers.textfmts', 'Todotxt', ('todotxt',), ('todo.txt', '*.todotxt'), ('text/x-todo',)),
- 'TOMLLexer': ('pygments.lexers.configs', 'TOML', ('toml',), ('*.toml'), ()),
'TransactSqlLexer': ('pygments.lexers.sql', 'Transact-SQL', ('tsql', 't-sql'), ('*.sql',), ('text/x-tsql',)),
'TreetopLexer': ('pygments.lexers.parsers', 'Treetop', ('treetop',), ('*.treetop', '*.tt'), ()),
'TurtleLexer': ('pygments.lexers.rdf', 'Turtle', ('turtle',), ('*.ttl',), ('text/turtle', 'application/x-turtle')),
diff --git a/pygments/lexers/_stata_builtins.py b/pygments/lexers/_stata_builtins.py
index 5f5f72a9..3e5e75b2 100644
--- a/pygments/lexers/_stata_builtins.py
+++ b/pygments/lexers/_stata_builtins.py
@@ -10,6 +10,10 @@
"""
+builtins_special = (
+ "if", "in", "using", "replace", "by", "gen", "generate"
+)
+
builtins_base = (
"if", "else", "in", "foreach", "for", "forv", "forva",
"forval", "forvalu", "forvalue", "forvalues", "by", "bys",
@@ -66,7 +70,7 @@ builtins_base = (
"doedit", "dotplot", "dotplot_7", "dprobit", "drawnorm",
"drop", "ds", "ds_util", "dstdize", "duplicates", "durbina",
"dwstat", "dydx", "e", "ed", "edi", "edit", "egen",
- "eivreg", "emdef", "en", "enc", "enco", "encod", "encode",
+ "eivreg", "emdef", "end", "en", "enc", "enco", "encod", "encode",
"eq", "erase", "ereg", "ereg_lf", "ereg_p", "ereg_sw",
"ereghet", "ereghet_glf", "ereghet_glf_sh", "ereghet_gp",
"ereghet_ilf", "ereghet_ilf_sh", "ereghet_ip", "eret",
@@ -415,5 +419,3 @@ builtins_functions = (
"weekly", "wofd", "word", "wordcount", "year", "yearly",
"yh", "ym", "yofd", "yq", "yw"
)
-
-
diff --git a/pygments/lexers/asm.py b/pygments/lexers/asm.py
index 9ac3fdd9..7100868c 100644
--- a/pygments/lexers/asm.py
+++ b/pygments/lexers/asm.py
@@ -660,8 +660,8 @@ class Dasm16Lexer(RegexLexer):
.. versionadded:: 2.4
"""
- name = 'dasm16'
- aliases = ['DASM16']
+ name = 'DASM16'
+ aliases = ['dasm16']
filenames = ['*.dasm16', '*.dasm']
mimetypes = ['text/x-dasm16']
diff --git a/pygments/lexers/configs.py b/pygments/lexers/configs.py
index 350dcd6f..765c67b7 100644
--- a/pygments/lexers/configs.py
+++ b/pygments/lexers/configs.py
@@ -842,7 +842,7 @@ class PacmanConfLexer(RegexLexer):
class AugeasLexer(RegexLexer):
"""
- Lexer for Augeas
+ Lexer for `Augeas <http://augeas.net>`_.
.. versionadded:: 2.4
"""
@@ -886,7 +886,8 @@ class AugeasLexer(RegexLexer):
class TOMLLexer(RegexLexer):
"""
- Lexer for TOML, a simple language for config files
+ Lexer for `TOML <https://github.com/toml-lang/toml>`_, a simple language
+ for config files.
.. versionadded:: 2.4
"""
@@ -901,12 +902,18 @@ class TOMLLexer(RegexLexer):
# Basics, comments, strings
(r'\s+', Text),
(r'#.*?$', Comment.Single),
+ # Basic string
(r'"(\\\\|\\"|[^"])*"', String),
+ # Literal string
+ (r'\'[^\']*\'', String),
(r'(true|false)$', Keyword.Constant),
('[a-zA-Z_][a-zA-Z0-9_\-]*', Name),
+ (r'\[.*?\]$', Keyword),
# Datetime
- (r'\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z', Number.Integer),
+ # TODO this needs to be expanded, as TOML is rather flexible:
+ # https://github.com/toml-lang/toml#offset-date-time
+ (r'\d{4}-\d{2}-\d{2}(?:T| )\d{2}:\d{2}:\d{2}(?:Z|[-+]\d{2}:\d{2})', Number.Integer),
# Numbers
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float),
diff --git a/pygments/lexers/jvm.py b/pygments/lexers/jvm.py
index 5a9a74a9..8de6e9f2 100644
--- a/pygments/lexers/jvm.py
+++ b/pygments/lexers/jvm.py
@@ -1006,7 +1006,7 @@ class KotlinLexer(RegexLexer):
.. versionadded:: 1.5
"""
-
+
name = 'Kotlin'
aliases = ['kotlin']
filenames = ['*.kt']
@@ -1017,15 +1017,22 @@ class KotlinLexer(RegexLexer):
kt_name = ('@?[_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl') + ']' +
'[' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc', 'Cf',
'Mn', 'Mc') + ']*')
- kt_id = '(' + kt_name + '|`' + kt_name + '`)'
+
+ kt_space_name = ('@?[_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl') + ']' +
+ '[' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc', 'Cf',
+ 'Mn', 'Mc', 'Zs') + ',-]*')
+
+ kt_id = '(' + kt_name + '|`' + kt_space_name + '`)'
tokens = {
'root': [
(r'^\s*\[.*?\]', Name.Attribute),
(r'[^\S\n]+', Text),
+ (r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//.*?\n', Comment.Single),
(r'/[*].*?[*]/', Comment.Multiline),
+ (r'""".*?"""', String),
(r'\n', Text),
(r'::|!!|\?[:.]', Operator),
(r'[~!%^&*()+=|\[\]:;,.<>/?-]', Punctuation),
@@ -1035,11 +1042,14 @@ class KotlinLexer(RegexLexer):
(r"'\\.'|'[^\\]'", String.Char),
(r"[0-9](\.[0-9]*)?([eE][+-][0-9]+)?[flFL]?|"
r"0[xX][0-9a-fA-F]+[Ll]?", Number),
- (r'(class)(\s+)(object)', bygroups(Keyword, Text, Keyword)),
+ (r'(object)(\s+)(:)(\s+)', bygroups(Keyword, Text, Punctuation, Text), 'class'),
+ (r'(companion)(\s+)(object)', bygroups(Keyword, Text, Keyword)),
(r'(class|interface|object)(\s+)', bygroups(Keyword, Text), 'class'),
(r'(package|import)(\s+)', bygroups(Keyword, Text), 'package'),
+ (r'(val|var)(\s+)([(])', bygroups(Keyword, Text, Punctuation), 'property_dec'),
(r'(val|var)(\s+)', bygroups(Keyword, Text), 'property'),
(r'(fun)(\s+)', bygroups(Keyword, Text), 'function'),
+ (r'(inline fun)(\s+)', bygroups(Keyword, Text), 'function'),
(r'(abstract|annotation|as|break|by|catch|class|companion|const|'
r'constructor|continue|crossinline|data|do|dynamic|else|enum|'
r'external|false|final|finally|for|fun|get|if|import|in|infix|'
@@ -1058,9 +1068,26 @@ class KotlinLexer(RegexLexer):
'property': [
(kt_id, Name.Property, '#pop')
],
+ 'property_dec': [
+ (r'(,)(\s*)', bygroups(Punctuation, Text)),
+ (r'(:)(\s*)', bygroups(Punctuation, Text)),
+ (r'<', Punctuation, 'generic'),
+ (r'([)])', Punctuation, '#pop'),
+ (kt_id, Name.Property)
+ ],
'function': [
+ (r'<', Punctuation, 'generic'),
+ (r''+kt_id+'([.])'+kt_id, bygroups(Name.Class, Punctuation, Name.Function), '#pop'),
(kt_id, Name.Function, '#pop')
],
+ 'generic': [
+ (r'(>)(\s*)', bygroups(Punctuation, Text), '#pop'),
+ (r':',Punctuation),
+ (r'(reified|out|in)\b', Keyword),
+ (r',',Text),
+ (r'\s+',Text),
+ (kt_id,Name)
+ ]
}
diff --git a/pygments/lexers/slash.py b/pygments/lexers/slash.py
index 3ba5e1e6..bd73d463 100644
--- a/pygments/lexers/slash.py
+++ b/pygments/lexers/slash.py
@@ -3,7 +3,8 @@
pygments.lexers.slash
~~~~~~~~~~~~~~~~~~~~~
- Lexer for the Slash programming language.
+ Lexer for the `Slash <https://github.com/arturadib/Slash-A>`_ programming
+ language.
:copyright: Copyright 2012 by GitHub, Inc
:license: BSD, see LICENSE for details.
diff --git a/pygments/lexers/stata.py b/pygments/lexers/stata.py
index a015a23e..9566d12a 100644
--- a/pygments/lexers/stata.py
+++ b/pygments/lexers/stata.py
@@ -9,6 +9,7 @@
:license: BSD, see LICENSE for details.
"""
+import re
from pygments.lexer import RegexLexer, include, words
from pygments.token import Comment, Keyword, Name, Number, \
String, Text, Operator
@@ -33,56 +34,118 @@ class StataLexer(RegexLexer):
aliases = ['stata', 'do']
filenames = ['*.do', '*.ado']
mimetypes = ['text/x-stata', 'text/stata', 'application/x-stata']
+ flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
include('comments'),
- include('vars-strings'),
+ include('strings'),
+ include('macros'),
include('numbers'),
include('keywords'),
+ include('operators'),
+ include('format'),
(r'.', Text),
],
- # Global and local macros; regular and special strings
- 'vars-strings': [
- (r'\$[\w{]', Name.Variable.Global, 'var_validglobal'),
- (r'`\w{0,31}\'', Name.Variable),
- (r'"', String, 'string_dquote'),
- (r'`"', String, 'string_mquote'),
- ],
- # For either string type, highlight macros as macros
- 'string_dquote': [
- (r'"', String, '#pop'),
- (r'\\\\|\\"|\\\n', String.Escape),
- (r'\$', Name.Variable.Global, 'var_validglobal'),
- (r'`', Name.Variable, 'var_validlocal'),
- (r'[^$`"\\]+', String),
- (r'[$"\\]', String),
- ],
- 'string_mquote': [
+ # Comments are a complicated beast in Stata because they can be
+ # nested and there are a few corner cases with that. See:
+ # - github.com/kylebarron/language-stata/issues/90
+ # - statalist.org/forums/forum/general-stata-discussion/general/1448244
+ 'comments': [
+ (r'(^//|(?<=\s)//)(?!/)', Comment.Single, 'comments-double-slash'),
+ (r'^\s*\*', Comment.Single, 'comments-star'),
+ (r'/\*', Comment.Multiline, 'comments-block'),
+ (r'(^///|(?<=\s)///)', Comment.Special, 'comments-triple-slash')
+ ],
+ 'comments-block': [
+ (r'/\*', Comment.Multiline, '#push'),
+ # this ends and restarts a comment block. but need to catch this so
+ # that it doesn\'t start _another_ level of comment blocks
+ (r'\*/\*', Comment.Multiline),
+ (r'(\*/\s+\*(?!/)[^\n]*)|(\*/)', Comment.Multiline, '#pop'),
+ # Match anything else as a character inside the comment
+ (r'.', Comment.Multiline),
+ ],
+ 'comments-star': [
+ (r'///.*?\n', Comment.Single,
+ ('#pop', 'comments-triple-slash')),
+ (r'(^//|(?<=\s)//)(?!/)', Comment.Single,
+ ('#pop', 'comments-double-slash')),
+ (r'/\*', Comment.Multiline, 'comments-block'),
+ (r'.(?=\n)', Comment.Single, '#pop'),
+ (r'.', Comment.Single),
+ ],
+ 'comments-triple-slash': [
+ (r'\n', Comment.Special, '#pop'),
+ # A // breaks out of a comment for the rest of the line
+ (r'//.*?(?=\n)', Comment.Single, '#pop'),
+ (r'.', Comment.Special),
+ ],
+ 'comments-double-slash': [
+ (r'\n', Text, '#pop'),
+ (r'.', Comment.Single),
+ ],
+ # `"compound string"' and regular "string"; note the former are
+ # nested.
+ 'strings': [
+ (r'`"', String, 'string-compound'),
+ (r'(?<!`)"', String, 'string-regular'),
+ ],
+ 'string-compound': [
+ (r'`"', String, '#push'),
(r'"\'', String, '#pop'),
- (r'\\\\|\\"|\\\n', String.Escape),
- (r'\$', Name.Variable.Global, 'var_validglobal'),
- (r'`', Name.Variable, 'var_validlocal'),
- (r'[^$`"\\]+', String),
- (r'[$"\\]', String),
- ],
- 'var_validglobal': [
- (r'\{\w{0,32}\}', Name.Variable.Global, '#pop'),
- (r'\w{1,32}', Name.Variable.Global, '#pop'),
+ (r'\\\\|\\"|\\\$|\\`|\\\n', String.Escape),
+ include('macros'),
+ (r'.', String)
],
- 'var_validlocal': [
- (r'\w{0,31}\'', Name.Variable, '#pop'),
+ 'string-regular': [
+ (r'(")(?!\')|(?=\n)', String, '#pop'),
+ (r'\\\\|\\"|\\\$|\\`|\\\n', String.Escape),
+ include('macros'),
+ (r'.', String)
],
- # * only OK at line start, // OK anywhere
- 'comments': [
- (r'^\s*\*.*$', Comment),
- (r'//.*', Comment.Single),
- (r'/\*.*?\*/', Comment.Multiline),
- (r'/[*](.|\n)*?[*]/', Comment.Multiline),
+ # A local is usually
+ # `\w{0,31}'
+ # `:extended macro'
+ # `=expression'
+ # `[rsen](results)'
+ # `(++--)scalar(++--)'
+ #
+ # However, there are all sorts of weird rules wrt edge
+ # cases. Instead of writing 27 exceptions, anything inside
+ # `' is a local.
+ #
+ # A global is more restricted, so we do follow rules. Note only
+ # locals explicitly enclosed ${} can be nested.
+ 'macros': [
+ (r'\$(\{|(?=[\$`]))', Name.Variable.Global, 'macro-global-nested'),
+ (r'\$', Name.Variable.Global, 'macro-global-name'),
+ (r'`', Name.Variable, 'macro-local'),
+ ],
+ 'macro-local': [
+ (r'`', Name.Variable, '#push'),
+ (r"'", Name.Variable, '#pop'),
+ (r'\$(\{|(?=[\$`]))', Name.Variable.Global, 'macro-global-nested'),
+ (r'\$', Name.Variable.Global, 'macro-global-name'),
+ (r'.', Name.Variable), # fallback
+ ],
+ 'macro-global-nested': [
+ (r'\$(\{|(?=[\$`]))', Name.Variable.Global, '#push'),
+ (r'\}', Name.Variable.Global, '#pop'),
+ (r'\$', Name.Variable.Global, 'macro-global-name'),
+ (r'`', Name.Variable, 'macro-local'),
+ (r'\w', Name.Variable.Global), # fallback
+ (r'(?!\w)', Name.Variable.Global, '#pop'),
+ ],
+ 'macro-global-name': [
+ (r'\$(\{|(?=[\$`]))', Name.Variable.Global, 'macro-global-nested', '#pop'),
+ (r'\$', Name.Variable.Global, 'macro-global-name', '#pop'),
+ (r'`', Name.Variable, 'macro-local', '#pop'),
+ (r'\w{1,32}', Name.Variable.Global, '#pop'),
],
# Built in functions and statements
'keywords': [
- (words(builtins_functions, prefix = r'\b', suffix = r'\('),
+ (words(builtins_functions, prefix = r'\b', suffix = r'(?=\()'),
Name.Function),
(words(builtins_base, prefix = r'(^\s*|\s)', suffix = r'\b'),
Keyword),
@@ -100,9 +163,9 @@ class StataLexer(RegexLexer):
],
# Stata formats
'format': [
- (r'%-?\d{1,2}(\.\d{1,2})?[gfe]c?', Name.Variable),
- (r'%(21x|16H|16L|8H|8L)', Name.Variable),
- (r'%-?(tc|tC|td|tw|tm|tq|th|ty|tg).{0,32}', Name.Variable),
- (r'%[-~]?\d{1,4}s', Name.Variable),
+ (r'%-?\d{1,2}(\.\d{1,2})?[gfe]c?', Name.Other),
+ (r'%(21x|16H|16L|8H|8L)', Name.Other),
+ (r'%-?(tc|tC|td|tw|tm|tq|th|ty|tg)\S{0,32}', Name.Other),
+ (r'%[-~]?\d{1,4}s', Name.Other),
]
}
diff --git a/pygments/styles/__init__.py b/pygments/styles/__init__.py
index 1f39c692..c7050a18 100644
--- a/pygments/styles/__init__.py
+++ b/pygments/styles/__init__.py
@@ -46,6 +46,10 @@ STYLE_MAP = {
'abap': 'abap::AbapStyle',
'solarized-dark': 'solarized::SolarizedDarkStyle',
'solarized-light': 'solarized::SolarizedLightStyle',
+ 'sas': 'sas::SasStyle',
+ 'stata': 'stata_light::StataLightStyle',
+ 'stata-light': 'stata_light::StataLightStyle',
+ 'stata-dark': 'stata_dark::StataDarkStyle',
}
diff --git a/pygments/styles/stata_dark.py b/pygments/styles/stata_dark.py
new file mode 100644
index 00000000..851a9a8d
--- /dev/null
+++ b/pygments/styles/stata_dark.py
@@ -0,0 +1,41 @@
+# -*- coding: utf-8 -*-
+"""
+ pygments.styles.stata_dark
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Dark style inspired by Stata's do-file editor. Note this is not
+ meant to be a complete style, just for Stata's file formats.
+
+
+ :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.style import Style
+from pygments.token import Keyword, Name, Comment, String, Error, \
+ Number, Operator, Whitespace, Generic, Text
+
+
+class StataDarkStyle(Style):
+
+ default_style = ''
+
+ background_color = "#232629"
+ highlight_color = "#49483e"
+
+ styles = {
+ Whitespace: '#bbbbbb',
+ Error: 'bg:#e3d2d2 #a61717',
+ Text: '#cccccc',
+ String: '#51cc99',
+ Number: '#4FB8CC',
+ Operator: '',
+ Name.Function: '#6a6aff',
+ Name.Other: '#e2828e',
+ Keyword: 'bold #7686bb',
+ Keyword.Constant: '',
+ Comment: 'italic #777777',
+ Name.Variable: 'bold #7AB4DB',
+ Name.Variable.Global: 'bold #BE646C',
+ Generic.Prompt: '#ffffff',
+ }
diff --git a/pygments/styles/stata.py b/pygments/styles/stata_light.py
index 2b5f5edd..fcca85e5 100644
--- a/pygments/styles/stata.py
+++ b/pygments/styles/stata_light.py
@@ -1,11 +1,10 @@
# -*- coding: utf-8 -*-
"""
- pygments.styles.stata
- ~~~~~~~~~~~~~~~~~~~~~
+ pygments.styles.stata_light
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~
- Style inspired by Stata's do-file editor. Note this is not meant
- to be a complete style. It's merely meant to mimic Stata's do file
- editor syntax highlighting.
+ Light Style inspired by Stata's do-file editor. Note this is not
+ meant to be a complete style, just for Stata's file formats.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
@@ -13,28 +12,28 @@
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
- Number, Operator, Whitespace
+ Number, Operator, Whitespace, Text
-class StataStyle(Style):
+class StataLightStyle(Style):
"""
- Style inspired by Stata's do-file editor. Note this is not meant
- to be a complete style. It's merely meant to mimic Stata's do file
- editor syntax highlighting.
+ Light mode style inspired by Stata's do-file editor. This is not
+ meant to be a complete style, just for use with Stata.
"""
default_style = ''
-
styles = {
+ Text: '#111111',
Whitespace: '#bbbbbb',
- Comment: 'italic #008800',
+ Error: 'bg:#e3d2d2 #a61717',
String: '#7a2424',
Number: '#2c2cff',
Operator: '',
+ Name.Function: '#2c2cff',
+ Name.Other: '#be646c',
Keyword: 'bold #353580',
Keyword.Constant: '',
- Name.Function: '#2c2cff',
+ Comment: 'italic #008800',
Name.Variable: 'bold #35baba',
Name.Variable.Global: 'bold #b5565e',
- Error: 'bg:#e3d2d2 #a61717'
}
diff --git a/tests/test_kotlin.py b/tests/test_kotlin.py
new file mode 100644
index 00000000..7c733ad9
--- /dev/null
+++ b/tests/test_kotlin.py
@@ -0,0 +1,131 @@
+# -*- coding: utf-8 -*-
+"""
+ Basic JavaLexer Test
+ ~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import unittest
+
+from pygments.token import Text, Name, Operator, Keyword, Number, Punctuation, String
+from pygments.lexers import KotlinLexer
+
+class KotlinTest(unittest.TestCase):
+
+ def setUp(self):
+ self.lexer = KotlinLexer()
+ self.maxDiff = None
+
+ def testCanCopeWithBackTickNamesInFunctions(self):
+ fragment = u'fun `wo bble`'
+ tokens = [
+ (Keyword, u'fun'),
+ (Text, u' '),
+ (Name.Function, u'`wo bble`'),
+ (Text, u'\n')
+ ]
+ self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+
+ def testCanCopeWithCommasAndDashesInBackTickNames(self):
+ fragment = u'fun `wo,-bble`'
+ tokens = [
+ (Keyword, u'fun'),
+ (Text, u' '),
+ (Name.Function, u'`wo,-bble`'),
+ (Text, u'\n')
+ ]
+ self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+
+ def testCanCopeWithDestructuring(self):
+ fragment = u'val (a, b) = '
+ tokens = [
+ (Keyword, u'val'),
+ (Text, u' '),
+ (Punctuation, u'('),
+ (Name.Property, u'a'),
+ (Punctuation, u','),
+ (Text, u' '),
+ (Name.Property, u'b'),
+ (Punctuation, u')'),
+ (Text, u' '),
+ (Punctuation, u'='),
+ (Text, u' '),
+ (Text, u'\n')
+ ]
+ self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+
+ def testCanCopeGenericsInDestructuring(self):
+ fragment = u'val (a: List<Something>, b: Set<Wobble>) ='
+ tokens = [
+ (Keyword, u'val'),
+ (Text, u' '),
+ (Punctuation, u'('),
+ (Name.Property, u'a'),
+ (Punctuation, u':'),
+ (Text, u' '),
+ (Name.Property, u'List'),
+ (Punctuation, u'<'),
+ (Name, u'Something'),
+ (Punctuation, u'>'),
+ (Punctuation, u','),
+ (Text, u' '),
+ (Name.Property, u'b'),
+ (Punctuation, u':'),
+ (Text, u' '),
+ (Name.Property, u'Set'),
+ (Punctuation, u'<'),
+ (Name, u'Wobble'),
+ (Punctuation, u'>'),
+ (Punctuation, u')'),
+ (Text, u' '),
+ (Punctuation, u'='),
+ (Text, u'\n')
+ ]
+ self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+
+ def testCanCopeWithGenerics(self):
+ fragment = u'inline fun <reified T : ContractState> VaultService.queryBy(): Vault.Page<T> {'
+ tokens = [
+ (Keyword, u'inline fun'),
+ (Text, u' '),
+ (Punctuation, u'<'),
+ (Keyword, u'reified'),
+ (Text, u' '),
+ (Name, u'T'),
+ (Text, u' '),
+ (Punctuation, u':'),
+ (Text, u' '),
+ (Name, u'ContractState'),
+ (Punctuation, u'>'),
+ (Text, u' '),
+ (Name.Class, u'VaultService'),
+ (Punctuation, u'.'),
+ (Name.Function, u'queryBy'),
+ (Punctuation, u'('),
+ (Punctuation, u')'),
+ (Punctuation, u':'),
+ (Text, u' '),
+ (Name, u'Vault'),
+ (Punctuation, u'.'),
+ (Name, u'Page'),
+ (Punctuation, u'<'),
+ (Name, u'T'),
+ (Punctuation, u'>'),
+ (Text, u' '),
+ (Punctuation, u'{'),
+ (Text, u'\n')
+ ]
+ self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+
+ def testShouldCopeWithMultilineComments(self):
+ fragment = u'"""\nthis\nis\na\ncomment"""'
+ tokens = [
+ (String, u'"""\nthis\nis\na\ncomment"""'),
+ (Text, u'\n')
+ ]
+ self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+
+if __name__ == '__main__':
+ unittest.main()