From 836d98cc163ea0dcb1b60ef1e536fdacb351d78a Mon Sep 17 00:00:00 2001 From: Benoit Vey Date: Mon, 25 Jul 2016 07:10:17 +0200 Subject: Add lexer for the Pony language --- pygments/lexers/_mapping.py | 1 + pygments/lexers/pony.py | 92 +++++++++++++++++++++++++++++++++++++++++ tests/examplefiles/example.pony | 18 ++++++++ 3 files changed, 111 insertions(+) create mode 100644 pygments/lexers/pony.py create mode 100644 tests/examplefiles/example.pony diff --git a/pygments/lexers/_mapping.py b/pygments/lexers/_mapping.py index a6097b1c..847cc0bf 100644 --- a/pygments/lexers/_mapping.py +++ b/pygments/lexers/_mapping.py @@ -316,6 +316,7 @@ LEXERS = { 'PikeLexer': ('pygments.lexers.c_like', 'Pike', ('pike',), ('*.pike', '*.pmod'), ('text/x-pike',)), 'PkgConfigLexer': ('pygments.lexers.configs', 'PkgConfig', ('pkgconfig',), ('*.pc',), ()), 'PlPgsqlLexer': ('pygments.lexers.sql', 'PL/pgSQL', ('plpgsql',), (), ('text/x-plpgsql',)), + 'PonyLexer': ('pygments.lexers.pony', 'Pony', ('pony',), ('*.pony',), ()), 'PostScriptLexer': ('pygments.lexers.graphics', 'PostScript', ('postscript', 'postscr'), ('*.ps', '*.eps'), ('application/postscript',)), 'PostgresConsoleLexer': ('pygments.lexers.sql', 'PostgreSQL console (psql)', ('psql', 'postgresql-console', 'postgres-console'), (), ('text/x-postgresql-psql',)), 'PostgresLexer': ('pygments.lexers.sql', 'PostgreSQL SQL dialect', ('postgresql', 'postgres'), (), ('text/x-postgresql',)), diff --git a/pygments/lexers/pony.py b/pygments/lexers/pony.py new file mode 100644 index 00000000..486e404b --- /dev/null +++ b/pygments/lexers/pony.py @@ -0,0 +1,92 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.pony + ~~~~~~~~~~~~~~~~~~~~ + + Lexers for Pony and related languages. + + :copyright: Copyright 2006-2016 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, bygroups, words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation + +__all__ = ['PonyLexer'] + + +class PonyLexer(RegexLexer): + """ + For Pony source code. + """ + + name = 'Pony' + aliases = ['pony'] + filenames = ['*.pony'] + + _caps = r'(iso|trn|ref|val|box|tag)' + + tokens = { + 'root': [ + (r'\n', Text), + (r'[^\S\n]+', Text), + (r'//.*\n', Comment.Single), + (r'/\*', Comment.Multiline, 'nested_comment'), + (r'"""(?:.|\n)*?"""', String.Doc), + (r'"', String, 'string'), + (r'\'.*\'', String.Char), + (r'=>|[]{}:().~;,|&!^?[]', Punctuation), + (words(( + 'addressof', 'and', 'as', 'consume', 'digestof', 'is', 'isnt', + 'not', 'or'), + suffix=r'\b'), + Operator.Word), + (r'!=|==|<<|>>|[-+/*%=<>]', Operator), + (words(( + 'box', 'break', 'compile_error', 'compile_intrinsic', + 'continue', 'do', 'else', 'elseif', 'embed', 'end', 'error', + 'for', 'if', 'ifdef', 'in', 'iso', 'lambda', 'let', 'match', + 'object', 'recover', 'ref', 'repeat', 'return', 'tag', 'then', + 'this', 'trn', 'try', 'until', 'use', 'var', 'val', 'where', + 'while', 'with', '#any', '#read', '#send', '#share'), + suffix=r'\b'), + Keyword), + (r'(actor|class|struct|primitive|interface|trait|type)((?:\s)+)', + bygroups(Keyword, Text), 'typename'), + (r'(new|fun|be)((?:\s)+)', bygroups(Keyword, Text), 'methodname'), + (words(( + 'I8', 'U8', 'I16', 'U16', 'I32', 'U32', 'I64', 'U64', 'I128', + 'U128', 'ILong', 'ULong', 'ISize', 'USize', 'F32', 'F64', + 'Bool', 'Pointer', 'None', 'Any', 'Array', 'String', + 'Iterator'), + suffix=r'\b'), + Name.Builtin.Type), + (r'_?[A-Z]\w*', Name.Type), + (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float), + (r'0x[0-9a-fA-F]+', Number.Hex), + (r'\d+', Number.Integer), + (r'(true|false)\b', Name.Builtin), + (r'_\d*', Name), + (r'_?[a-z][\w\'_]*', Name) + ], + 'typename': [ + (_caps + r'?((?:\s)*)(_?[A-Z]\w*)', + bygroups(Keyword, Text, Name.Class), '#pop') + ], + 'methodname': [ + (_caps + r'?((?:\s)*)(_?[a-z]\w*)', + bygroups(Keyword, Text, Name.Function), '#pop') + ], + 'nested_comment': [ + (r'[^*/]+', Comment.Multiline), + (r'/\*', Comment.Multiline, '#push'), + (r'\*/', Comment.Multiline, '#pop'), + (r'[*/]', Comment.Multiline) + ], + 'string': [ + (r'"', String, '#pop'), + (r'\\"', String), + (r'[^\\"]+', String) + ] + } diff --git a/tests/examplefiles/example.pony b/tests/examplefiles/example.pony new file mode 100644 index 00000000..654f2376 --- /dev/null +++ b/tests/examplefiles/example.pony @@ -0,0 +1,18 @@ +use "somepkg" + +/* + /* Nested */ +*/ + +class trn Foo[A: Stringable ref] is Stringable + let _x = "\"" + + fun val dofoo() => + """ + DocString + """ + (U64(2), "foo")._2 + +actor Main + new create(env: Env) => + env.out.print("Hello world") -- cgit v1.2.1 From 541c30ceb87355a7414e4dbfbcaf849371839751 Mon Sep 17 00:00:00 2001 From: Miro Hron?ok Date: Fri, 22 Sep 2017 13:07:39 +0200 Subject: Recognize single > as a prompt in doscon Fixes https://bitbucket.org/birkenfeld/pygments-main/issues/1380/lexer-for-cmdexe-interactive-session-with --- pygments/lexers/shell.py | 2 +- tests/test_shell.py | 19 ++++++++++++++++++- 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/pygments/lexers/shell.py b/pygments/lexers/shell.py index ceb6f14d..3b1c37ba 100644 --- a/pygments/lexers/shell.py +++ b/pygments/lexers/shell.py @@ -518,7 +518,7 @@ class MSDOSSessionLexer(ShellSessionBaseLexer): mimetypes = [] _innerLexerCls = BatchLexer - _ps1rgx = r'^([^>]+>)(.*\n?)' + _ps1rgx = r'^([^>]*>)(.*\n?)' _ps2 = 'More? ' diff --git a/tests/test_shell.py b/tests/test_shell.py index e283793e..1121240a 100644 --- a/tests/test_shell.py +++ b/tests/test_shell.py @@ -10,7 +10,7 @@ import unittest from pygments.token import Token -from pygments.lexers import BashLexer, BashSessionLexer +from pygments.lexers import BashLexer, BashSessionLexer, MSDOSSessionLexer class BashTest(unittest.TestCase): @@ -140,3 +140,20 @@ class BashSessionTest(unittest.TestCase): ] self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) +class MSDOSSessionTest(unittest.TestCase): + + def setUp(self): + self.lexer = MSDOSSessionLexer() + + def testGtOnlyPrompt(self): + fragment = u'> py\nhi\n' + tokens = [ + (Token.Text, u''), + (Token.Generic.Prompt, u'>'), + (Token.Text, u' '), + (Token.Text, u'py'), + (Token.Text, u''), + (Token.Text, u'\n'), + (Token.Generic.Output, u'hi\n'), + ] + self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) -- cgit v1.2.1 From 40a9f83df9f406ec2c89d6687b59df5df104842a Mon Sep 17 00:00:00 2001 From: Kurt McKee Date: Sun, 28 Jan 2018 13:25:10 -0600 Subject: Support the Tera Term macro language The patch modifies the Turtle parser in rdf.py, which uses the same file extension. A unit test file is included. --- AUTHORS | 1 + pygments/lexers/_mapping.py | 1 + pygments/lexers/rdf.py | 7 ++ pygments/lexers/teraterm.py | 158 ++++++++++++++++++++++++++++++++++++++++ tests/examplefiles/teraterm.ttl | 34 +++++++++ 5 files changed, 201 insertions(+) create mode 100644 pygments/lexers/teraterm.py create mode 100644 tests/examplefiles/teraterm.ttl diff --git a/AUTHORS b/AUTHORS index f9ba2675..faf18c55 100644 --- a/AUTHORS +++ b/AUTHORS @@ -129,6 +129,7 @@ Other contributors, listed alphabetically, are: * Stephen McKamey -- Duel/JBST lexer * Brian McKenna -- F# lexer * Charles McLaughlin -- Puppet lexer +* Kurt McKee -- Tera Term macro lexer * Lukas Meuser -- BBCode formatter, Lua lexer * Cat Miller -- Pig lexer * Paul Miller -- LiveScript lexer diff --git a/pygments/lexers/_mapping.py b/pygments/lexers/_mapping.py index b48ee1d1..cdf490f7 100644 --- a/pygments/lexers/_mapping.py +++ b/pygments/lexers/_mapping.py @@ -405,6 +405,7 @@ LEXERS = { 'TcshLexer': ('pygments.lexers.shell', 'Tcsh', ('tcsh', 'csh'), ('*.tcsh', '*.csh'), ('application/x-csh',)), 'TcshSessionLexer': ('pygments.lexers.shell', 'Tcsh Session', ('tcshcon',), (), ()), 'TeaTemplateLexer': ('pygments.lexers.templates', 'Tea', ('tea',), ('*.tea',), ('text/x-tea',)), + 'TeraTermLexer': ('pygments.lexers.teraterm', 'Tera Term macro', ('ttl', 'teraterm', 'teratermmacro'), ('*.ttl',), ('text/x-teratermmacro',)), 'TermcapLexer': ('pygments.lexers.configs', 'Termcap', ('termcap',), ('termcap', 'termcap.src'), ()), 'TerminfoLexer': ('pygments.lexers.configs', 'Terminfo', ('terminfo',), ('terminfo', 'terminfo.src'), ()), 'TerraformLexer': ('pygments.lexers.configs', 'Terraform', ('terraform', 'tf'), ('*.tf',), ('application/x-tf', 'application/x-terraform')), diff --git a/pygments/lexers/rdf.py b/pygments/lexers/rdf.py index d0f8778a..86a4edfd 100644 --- a/pygments/lexers/rdf.py +++ b/pygments/lexers/rdf.py @@ -268,3 +268,10 @@ class TurtleLexer(RegexLexer): ], } + + # Turtle and Tera Term macro files share the same file extension + # but each has a recognizable and distinct syntax. + def analyse_text(text): + for t in ('@base ', 'BASE ', '@prefix ', 'PREFIX '): + if re.search(r'^\s*%s' % t, text): + return 0.80 diff --git a/pygments/lexers/teraterm.py b/pygments/lexers/teraterm.py new file mode 100644 index 00000000..100a89e0 --- /dev/null +++ b/pygments/lexers/teraterm.py @@ -0,0 +1,158 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.teraterm + ~~~~~~~~~~~~~~~~~~~~~~~~ + + Lexer for Tera Term macro files. + + :copyright: Copyright 2006-2018 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, include, bygroups +from pygments.token import Text, Comment, Operator, Name, String, \ + Number, Keyword + +__all__ = ['TeraTermLexer'] + + +class TeraTermLexer(RegexLexer): + """ + For `Tera Term `_ macro source code. + + .. versionadded:: 2.4 + """ + name = 'Tera Term macro' + aliases = ['ttl', 'teraterm', 'teratermmacro'] + filenames = ['*.ttl'] + mimetypes = ['text/x-teratermmacro'] + + tokens = { + 'root': [ + include('comments'), + include('labels'), + include('commands'), + include('builtin-variables'), + include('user-variables'), + include('operators'), + include('numeric-literals'), + include('string-literals'), + include('all-whitespace'), + (r'[^\s]', Text), + ], + 'comments': [ + (r';[^\r\n]*', Comment.Single), + (r'/\*', Comment.Multiline, 'in-comment'), + ], + 'in-comment': [ + (r'\*/', Comment.Multiline, '#pop'), + (r'[^*/]+', Comment.Multiline), + (r'[*/]', Comment.Multiline) + ], + 'labels': [ + (r'(?i)^(\s*)(:[0-9a-z_]+)', bygroups(Text, Name.Label)), + ], + 'commands': [ + ( + r'(?i)\b(' + r'basename|beep|bplusrecv|bplussend|break|bringupbox|' + r'callmenu|changedir|checksum16|checksum16file|' + r'checksum32|checksum32file|checksum8|checksum8file|' + r'clearscreen|clipb2var|closesbox|closett|code2str|' + r'connect|continue|crc16|crc16file|crc32|crc32file|' + r'cygconnect|delpassword|dirname|dirnamebox|disconnect|' + r'dispstr|do|else|elseif|enablekeyb|end|endif|enduntil|' + r'endwhile|exec|execcmnd|exit|expandenv|fileclose|' + r'fileconcat|filecopy|filecreate|filedelete|filelock|' + r'filemarkptr|filenamebox|fileopen|fileread|filereadln|' + r'filerename|filesearch|fileseek|fileseekback|filestat|' + r'filestrseek|filestrseek2|filetruncate|fileunlock|' + r'filewrite|filewriteln|findclose|findfirst|findnext|' + r'flushrecv|foldercreate|folderdelete|foldersearch|for|' + r'getdate|getdir|getenv|getfileattr|gethostname|' + r'getipv4addr|getipv6addr|getmodemstatus|getpassword|' + r'getspecialfolder|gettime|gettitle|getttdir|getver|' + r'if|ifdefined|include|inputbox|int2str|intdim|' + r'ispassword|kmtfinish|kmtget|kmtrecv|kmtsend|listbox|' + r'loadkeymap|logautoclosemode|logclose|loginfo|logopen|' + r'logpause|logrotate|logstart|logwrite|loop|makepath|' + r'messagebox|mpause|next|passwordbox|pause|quickvanrecv|' + r'quickvansend|random|recvln|regexoption|restoresetup|' + r'return|rotateleft|rotateright|scprecv|scpsend|send|' + r'sendbreak|sendbroadcast|sendfile|sendkcode|sendln|' + r'sendlnbroadcast|sendlnmulticast|sendmulticast|setbaud|' + r'setdate|setdebug|setdir|setdlgpos|setdtr|setecho|' + r'setenv|setexitcode|setfileattr|setflowctrl|' + r'setmulticastname|setpassword|setrts|setsync|settime|' + r'settitle|show|showtt|sprintf|sprintf2|statusbox|' + r'str2code|str2int|strcompare|strconcat|strcopy|strdim|' + r'strinsert|strjoin|strlen|strmatch|strremove|' + r'strreplace|strscan|strspecial|strsplit|strtrim|' + r'testlink|then|tolower|toupper|unlink|until|uptime|' + r'var2clipb|wait|wait4all|waitevent|waitln|waitn|' + r'waitrecv|waitregex|while|xmodemrecv|xmodemsend|' + r'yesnobox|ymodemrecv|ymodemsend|zmodemrecv|zmodemsend' + r')\b', + Keyword, + ), + ( + r'(?i)(call|goto)([ \t]+)([0-9a-z_]+)', + bygroups(Keyword, Text, Name.Label), + ) + ], + 'builtin-variables': [ + ( + r'(?i)(' + r'groupmatchstr1|groupmatchstr2|groupmatchstr3|' + r'groupmatchstr4|groupmatchstr5|groupmatchstr6|' + r'groupmatchstr7|groupmatchstr8|groupmatchstr9|' + r'param1|param2|param3|param4|param5|param6|' + r'param7|param8|param9|paramcnt|params|' + r'inputstr|matchstr|mtimeout|result|timeout' + r')\b', + Name.Builtin + ), + ], + 'user-variables': [ + (r'(?i)[A-Z_][A-Z0-9_]*', Name.Variable), + ], + 'numeric-literals': [ + (r'(-?)([0-9]+)', bygroups(Operator, Number.Integer)), + (r'(?i)\$[0-9a-f]+', Number.Hex), + ], + 'string-literals': [ + (r'(?i)#(?:[0-9]+|\$[0-9a-f]+)', String.Char), + (r"'", String.Single, 'in-single-string'), + (r'"', String.Double, 'in-double-string'), + ], + 'in-general-string': [ + (r'[\\][\\nt]', String.Escape), # Only three escapes are supported. + (r'.', String), + ], + 'in-single-string': [ + (r"'", String.Single, '#pop'), + include('in-general-string'), + ], + 'in-double-string': [ + (r'"', String.Double, '#pop'), + include('in-general-string'), + ], + 'operators': [ + (r'and|not|or|xor', Operator.Word), + (r'[!%&*+<=>^~\|\/-]+', Operator), + (r'[()]', String.Symbol), + ], + 'all-whitespace': [ + (r'[\s]+', Text), + ], + } + + # Turtle and Tera Term macro files share the same file extension + # but each has a recognizable and distinct syntax. + def analyse_text(text): + result = 0.0 + if re.search(TeraTermLexer.tokens['commands'][0][0], text): + result += 0.60 + return result diff --git a/tests/examplefiles/teraterm.ttl b/tests/examplefiles/teraterm.ttl new file mode 100644 index 00000000..f6a3648a --- /dev/null +++ b/tests/examplefiles/teraterm.ttl @@ -0,0 +1,34 @@ +messagebox "text \\not escaped \nescaped n" "other\n\rthing" +messagebox "goto label /* a string */ ; same string" +a=10 +b= 'abc'#$41'def' +c =#65 /* multiline comment * / * / *//* +comment */ d = 10 ; inline comment /* still inline */ +e = d + 20 - (($a * 2) / 4) << 3 % (2 >> 1) + result + + +:thing + +strcompare c "thing" +if result = 1 then + goto label_ +elseif result > -1 then + goto 10 +elseif d > (1+2*3)/7 then + messagebox "thing" +else + messagebox "done" +endif + +if abc messagebox "thing1" "title" + + +; Invalid syntax +bad = "no closing double quote +bad = 'no closing single quote +garbage +... +... +... + +endgarbage -- cgit v1.2.1 From e3872ea8f65ac2c3c40046a7a3acf4f0541f0e56 Mon Sep 17 00:00:00 2001 From: Andr?s Carrasco Date: Tue, 17 Apr 2018 19:11:12 +0200 Subject: Add a lexer for the Boa Domain-Specific Langauge. --- pygments/lexers/_mapping.py | 1 + pygments/lexers/boa.py | 80 ++++++++++++++++++++++++++++++++++++++++++ tests/examplefiles/example.boa | 18 ++++++++++ 3 files changed, 99 insertions(+) create mode 100644 pygments/lexers/boa.py create mode 100644 tests/examplefiles/example.boa diff --git a/pygments/lexers/_mapping.py b/pygments/lexers/_mapping.py index b48ee1d1..a176238e 100644 --- a/pygments/lexers/_mapping.py +++ b/pygments/lexers/_mapping.py @@ -59,6 +59,7 @@ LEXERS = { 'BlitzBasicLexer': ('pygments.lexers.basic', 'BlitzBasic', ('blitzbasic', 'b3d', 'bplus'), ('*.bb', '*.decls'), ('text/x-bb',)), 'BlitzMaxLexer': ('pygments.lexers.basic', 'BlitzMax', ('blitzmax', 'bmax'), ('*.bmx',), ('text/x-bmx',)), 'BnfLexer': ('pygments.lexers.grammar_notation', 'BNF', ('bnf',), ('*.bnf',), ('text/x-bnf',)), + 'BoaLexer': ('pygments.lexers.boa', 'Boa', ('boa',), ('*.boa',), ()), 'BooLexer': ('pygments.lexers.dotnet', 'Boo', ('boo',), ('*.boo',), ('text/x-boo',)), 'BoogieLexer': ('pygments.lexers.verification', 'Boogie', ('boogie',), ('*.bpl',), ()), 'BrainfuckLexer': ('pygments.lexers.esoteric', 'Brainfuck', ('brainfuck', 'bf'), ('*.bf', '*.b'), ('application/x-brainfuck',)), diff --git a/pygments/lexers/boa.py b/pygments/lexers/boa.py new file mode 100644 index 00000000..41398cd4 --- /dev/null +++ b/pygments/lexers/boa.py @@ -0,0 +1,80 @@ +# -*- coding: utf-8 -*- +import re + +from pygments.lexer import RegexLexer, words +from pygments.token import * + +__all__ = ['BoaLexer'] + +line_re = re.compile('.*?\n') + + +class BoaLexer(RegexLexer): + """ + http://boa.cs.iastate.edu/docs/ + """ + name = 'Boa' + aliases = ['boa'] + filenames = ['*.boa'] + + reserved = words( + ('input', 'output', 'of', 'weight', 'before', 'after', 'stop', 'ifall', 'foreach', 'exists', 'function', + 'break', 'switch', 'case', 'visitor', 'default', 'return', 'visit', 'while', 'if', 'else'), + suffix=r'\b', prefix=r'\b') + keywords = words( + ('bottom', 'collection', 'maximum', 'mean', 'minimum', 'set', 'sum', 'top', 'string', 'int', 'bool', 'float', + 'time', 'false', 'true', 'array', 'map', 'stack', 'enum', 'type'), suffix=r'\b', prefix=r'\b') + classes = words( + ('Project', 'ForgeKind', 'CodeRepository', 'Revision', 'RepositoryKind', 'ChangedFile', 'FileKind', 'ASTRoot', + 'Namespace', 'Declaration', 'Type', 'Method', 'Variable', 'Statement', 'Expression', 'Modifier', + 'StatementKind', 'ExpressionKind', 'ModifierKind', 'Visibility', 'TypeKind', 'Person', 'ChangeKind'), + suffix=r'\b', prefix=r'\b') + operators = ('->', ':=', ':', '=', '<<', '!', '++', '||', '&&', '+', '-', '*', ">", "<") + string_sep = ('`', '\"') + built_in_functions = words( + ( + # Array functions + 'new', 'sort', + # Date & Time functions + 'yearof', 'dayofyear', 'hourof', 'minuteof', 'secondof', 'now', 'addday', 'addmonth', 'addweek', 'addyear', + 'dayofmonth', 'dayofweek', 'dayofyear', 'formattime', 'trunctoday', 'trunctohour', 'trunctominute', + 'trunctomonth', 'trunctosecond', 'trunctoyear', + # Map functions + 'clear', 'haskey', 'keys', 'lookup', 'remove', 'values', + # Math functions + 'abs', 'acos', 'acosh', 'asin', 'asinh', 'atan', 'atan2', 'atanh', 'ceil', 'cos', 'cosh', 'exp', 'floor', + 'highbit', 'isfinite', 'isinf', 'isnan', 'isnormal', 'log', 'log10', 'max', 'min', 'nrand', 'pow', 'rand', + 'round', 'sin', 'sinh', 'sqrt', 'tan', 'tanh', 'trunc', + # Other functions + 'def', 'hash', 'len', + # Set functions + 'add', 'contains', 'remove', + # String functions + 'format', 'lowercase', 'match', 'matchposns', 'matchstrs', 'regex', 'split', 'splitall', 'splitn', + 'strfind', 'strreplace', 'strrfind', 'substring', 'trim', 'uppercase', + # Type Conversion functions + 'bool', 'float', 'int', 'string', 'time', + # Domain-Specific functions + 'getast', 'getsnapshot', 'hasfiletype', 'isfixingrevision', 'iskind', 'isliteral', + ), + prefix=r'\b', + suffix=r'\(') + + tokens = { + 'root': [ + (r'#.*?$', Comment.Single), + (r'/\*.*?\*/', Comment.Multiline), + (reserved, Keyword.Reserved), + (built_in_functions, Name.Function), + (keywords, Keyword.Type), + (classes, Name.Classes), + (words(operators), Operator), + (r'[][(),;{}\\.]', Punctuation), + (r'"(\\\\|\\"|[^"])*"', String), + (r'`(\\\\|\\`|[^`])*`', String), + (words(string_sep), String.Delimeter), + (r'[a-zA-Z_]+', Name.Variable), + (r'[0-9]+', Number.Integer), + (r'\s+?', Text), # Whitespace + ] + } diff --git a/tests/examplefiles/example.boa b/tests/examplefiles/example.boa new file mode 100644 index 00000000..a18f1626 --- /dev/null +++ b/tests/examplefiles/example.boa @@ -0,0 +1,18 @@ +# Computes Number of Public Methods (NPM) for each project, per-type +# Output is: NPM[ProjectID][TypeName] = NPM value +p: Project = input; +NPM: output sum[string][string] of int; + +visit(p, visitor { + # only look at the latest snapshot + before n: CodeRepository -> { + snapshot := getsnapshot(n); + foreach (i: int; def(snapshot[i])) + visit(snapshot[i]); + stop; + } + before node: Declaration -> + if (node.kind == TypeKind.CLASS) + foreach (i: int; has_modifier_public(node.methods[i])) + NPM[p.id][node.name] << 1; +}); -- cgit v1.2.1 From f6d828a71611401d480b131e618ece4c8b3efd1d Mon Sep 17 00:00:00 2001 From: Robin Jarry Date: Tue, 19 Jun 2018 12:17:42 +0200 Subject: lexers/make: allow more GNU make constructs * Allow nested macro expansions: $(eval $(var)) * Allow digits, dots, slashes and dashes in variable names in expansions: $(var-with.1-number) --- pygments/lexers/make.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pygments/lexers/make.py b/pygments/lexers/make.py index b222b672..c1e2f3a9 100644 --- a/pygments/lexers/make.py +++ b/pygments/lexers/make.py @@ -102,8 +102,8 @@ class BaseMakefileLexer(RegexLexer): (r'\$\(', Keyword, 'expansion'), ], 'expansion': [ - (r'[^$a-zA-Z_()]+', Text), - (r'[a-zA-Z_]+', Name.Variable), + (r'[^\w$().-]+', Text), + (r'[\w.-]+', Name.Variable), (r'\$', Keyword), (r'\(', Keyword, '#push'), (r'\)', Keyword, '#pop'), -- cgit v1.2.1 From a6fbcf91b92ae3c9b69c0dc26ddbccc1d9378f8a Mon Sep 17 00:00:00 2001 From: Robin Jarry Date: Tue, 19 Jun 2018 12:25:57 +0200 Subject: lexers/apache: allow '>' characters in tags Fix lexer to support this: = 2.4> ErrorLogFormat "%{cu}t %M" --- pygments/lexers/configs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pygments/lexers/configs.py b/pygments/lexers/configs.py index c39b1a52..9282fcbf 100644 --- a/pygments/lexers/configs.py +++ b/pygments/lexers/configs.py @@ -300,7 +300,7 @@ class ApacheConfLexer(RegexLexer): 'root': [ (r'\s+', Text), (r'(#.*?)$', Comment), - (r'(<[^\s>]+)(?:(\s+)(.*?))?(>)', + (r'(<[^\s>]+)(?:(\s+)(.*))?(>)', bygroups(Name.Tag, Text, String, Name.Tag)), (r'([a-z]\w*)(\s+)', bygroups(Name.Builtin, Text), 'value'), -- cgit v1.2.1 From 731f5d2562fd1ff3a2497ecc8033e50f9e4cd6bc Mon Sep 17 00:00:00 2001 From: sgarnotel Date: Tue, 27 Nov 2018 16:53:38 +0100 Subject: Add FreeFem++ lexer --- AUTHORS | 1 + pygments/lexers/_mapping.py | 1 + pygments/lexers/freefem.py | 967 +++++++++++++++++++++++++++++++++++++++++ tests/examplefiles/freefem.edp | 94 ++++ 4 files changed, 1063 insertions(+) create mode 100644 pygments/lexers/freefem.py create mode 100644 tests/examplefiles/freefem.edp diff --git a/AUTHORS b/AUTHORS index 18e642f1..da76dcfd 100644 --- a/AUTHORS +++ b/AUTHORS @@ -218,5 +218,6 @@ Other contributors, listed alphabetically, are: * Alex Zimin -- Nemerle lexer * Rob Zimmerman -- Kal lexer * Vincent Zurczak -- Roboconf lexer +* Simon Garnotel -- FreeFem++ lexer Many thanks for all contributions! diff --git a/pygments/lexers/_mapping.py b/pygments/lexers/_mapping.py index 9ccc7615..5e936412 100644 --- a/pygments/lexers/_mapping.py +++ b/pygments/lexers/_mapping.py @@ -160,6 +160,7 @@ LEXERS = { 'FortranFixedLexer': ('pygments.lexers.fortran', 'FortranFixed', ('fortranfixed',), ('*.f', '*.F'), ()), 'FortranLexer': ('pygments.lexers.fortran', 'Fortran', ('fortran',), ('*.f03', '*.f90', '*.F03', '*.F90'), ('text/x-fortran',)), 'FoxProLexer': ('pygments.lexers.foxpro', 'FoxPro', ('foxpro', 'vfp', 'clipper', 'xbase'), ('*.PRG', '*.prg'), ()), + 'FreeFemLexer': ('pygments.lexers.freefem', 'FreeFem', ('freefem',), ('*.edp', '*.idp',), ('text/x-edpsrc',)), 'GAPLexer': ('pygments.lexers.algebra', 'GAP', ('gap',), ('*.g', '*.gd', '*.gi', '*.gap'), ()), 'GLShaderLexer': ('pygments.lexers.graphics', 'GLSL', ('glsl',), ('*.vert', '*.frag', '*.geo'), ('text/x-glslsrc',)), 'GasLexer': ('pygments.lexers.asm', 'GAS', ('gas', 'asm'), ('*.s', '*.S'), ('text/x-gas',)), diff --git a/pygments/lexers/freefem.py b/pygments/lexers/freefem.py new file mode 100644 index 00000000..fd534915 --- /dev/null +++ b/pygments/lexers/freefem.py @@ -0,0 +1,967 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.freefem + ~~~~~~~~~~~~~~~~~~~~~~ + + Lexer for FreeFem++ language. + + :copyright: see README.md. + :license: GPLv3, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, include, bygroups, inherit, words, \ + default +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation + +from pygments.lexers.c_cpp import CLexer, CppLexer +from pygments.lexers import _mql_builtins + +__all__ = ['FreeFemLexer'] + + +class FreeFemLexer(CppLexer): + """ + For `FreeFem++ `_ source. + + This is an extension of the CppLexer, as the FreeFem Language is a superset + of C++ + """ + + name = 'Freefem' + aliases = ['freefem'] + filenames = ['*.edp'] + mimetypes = ['text/x-freefem'] + + # Language operators + operators = set(('+', '-', '*', '.*', '/', './', '%', '^', '^-1', ':', '\'')) + + # types + types = set(( + 'bool', + 'border', + 'complex', + 'dmatrix', + 'fespace', + 'func', + 'gslspline', + 'ifstream', + 'int', + 'macro', + 'matrix', + 'mesh', + 'mesh3', + 'mpiComm', + 'mpiGroup', + 'mpiRequest', + 'NewMacro', + 'EndMacro', + 'ofstream', + 'Pmmap', + 'problem', + 'Psemaphore', + 'real', + 'solve', + 'string', + 'varf' + )) + + # finite element spaces + fespaces = set(( + 'BDM1', + 'BDM1Ortho', + 'Edge03d', + 'Edge13d', + 'Edge23d', + 'FEQF', + 'HCT', + 'P0', + 'P03d', + 'P0Edge', + 'P1', + 'P13d', + 'P1b', + 'P1b3d', + 'P1bl', + 'P1bl3d', + 'P1dc', + 'P1Edge', + 'P1nc', + 'P2', + 'P23d', + 'P2b', + 'P2BR', + 'P2dc', + 'P2Edge', + 'P2h', + 'P2Morley', + 'P2pnc', + 'P3', + 'P3dc', + 'P3Edge', + 'P4', + 'P4dc', + 'P4Edge', + 'P5Edge', + 'RT0', + 'RT03d', + 'RT0Ortho', + 'RT1', + 'RT1Ortho', + 'RT2', + 'RT2Ortho' + )) + + # preprocessor + preprocessor = set(( + 'ENDIFMACRO', + 'include', + 'IFMACRO', + 'load' + )) + + # Language keywords + keywords = set(( + 'adj', + 'append', + 'area', + 'ARGV', + 'be', + 'binary', + 'BoundaryEdge', + 'bordermeasure', + 'CG', + 'Cholesky', + 'cin', + 'cout', + 'Crout', + 'default', + 'diag', + 'edgeOrientation', + 'endl', + 'false', + 'ffind', + 'FILE', + 'find', + 'fixed', + 'flush', + 'GMRES', + 'good', + 'hTriangle', + 'im', + 'imax', + 'imin', + 'InternalEdge', + 'l1', + 'l2', + 'label', + 'lenEdge', + 'length', + 'LINE', + 'linfty', + 'LU', + 'm', + 'max', + 'measure', + 'min', + 'mpiAnySource', + 'mpiBAND', + 'mpiBXOR', + 'mpiCommWorld', + 'mpiLAND', + 'mpiLOR', + 'mpiLXOR', + 'mpiMAX', + 'mpiMIN', + 'mpiPROD', + 'mpirank', + 'mpisize', + 'mpiSUM', + 'mpiUndefined', + 'n', + 'N', + 'nbe', + 'ndof', + 'ndofK', + 'noshowbase', + 'noshowpos', + 'notaregion', + 'nt', + 'nTonEdge', + 'nuEdge', + 'nuTriangle', + 'nv', + 'P', + 'pi', + 'precision', + 'qf1pE', + 'qf1pElump', + 'qf1pT', + 'qf1pTlump', + 'qfV1', + 'qfV1lump', + 'qf2pE', + 'qf2pT', + 'qf2pT4P1', + 'qfV2', + 'qf3pE', + 'qf4pE', + 'qf5pE', + 'qf5pT', + 'qfV5', + 'qf7pT', + 'qf9pT', + 'qfnbpE', + 'quantile', + 're', + 'region', + 'rfind', + 'scientific', + 'searchMethod', + 'setw', + 'showbase', + 'showpos', + 'sparsesolver', + 'sum', + 'tellp', + 'true', + 'UMFPACK', + 'unused', + 'whoinElement', + 'verbosity', + 'version', + 'volume', + 'x', + 'y', + 'z' + )) + + # Language shipped functions and class ( ) + functions = set(( + 'abs', + 'acos', + 'acosh', + 'adaptmesh', + 'adj', + 'AffineCG', + 'AffineGMRES', + 'arg', + 'asin', + 'asinh', + 'assert', + 'atan', + 'atan2', + 'atanh', + 'atof', + 'atoi', + 'BFGS', + 'broadcast', + 'buildlayers', + 'buildmesh', + 'ceil', + 'chi', + 'complexEigenValue', + 'copysign', + 'change', + 'checkmovemesh', + 'clock', + 'cmaes', + 'conj', + 'convect', + 'cos', + 'cosh', + 'cube', + 'd', + 'dd', + 'dfft', + 'diffnp', + 'diffpos', + 'dimKrylov', + 'dist', + 'dumptable', + 'dx', + 'dxx', + 'dxy', + 'dxz', + 'dy', + 'dyx', + 'dyy', + 'dyz', + 'dz', + 'dzx', + 'dzy', + 'dzz', + 'EigenValue', + 'emptymesh', + 'erf', + 'erfc', + 'exec', + 'exit', + 'exp', + 'fdim', + 'floor', + 'fmax', + 'fmin', + 'fmod', + 'freeyams', + 'getARGV', + 'getline', + 'gmshload', + 'gmshload3', + 'gslcdfugaussianP', + 'gslcdfugaussianQ', + 'gslcdfugaussianPinv', + 'gslcdfugaussianQinv', + 'gslcdfgaussianP', + 'gslcdfgaussianQ', + 'gslcdfgaussianPinv', + 'gslcdfgaussianQinv', + 'gslcdfgammaP', + 'gslcdfgammaQ', + 'gslcdfgammaPinv', + 'gslcdfgammaQinv', + 'gslcdfcauchyP', + 'gslcdfcauchyQ', + 'gslcdfcauchyPinv', + 'gslcdfcauchyQinv', + 'gslcdflaplaceP', + 'gslcdflaplaceQ', + 'gslcdflaplacePinv', + 'gslcdflaplaceQinv', + 'gslcdfrayleighP', + 'gslcdfrayleighQ', + 'gslcdfrayleighPinv', + 'gslcdfrayleighQinv', + 'gslcdfchisqP', + 'gslcdfchisqQ', + 'gslcdfchisqPinv', + 'gslcdfchisqQinv', + 'gslcdfexponentialP', + 'gslcdfexponentialQ', + 'gslcdfexponentialPinv', + 'gslcdfexponentialQinv', + 'gslcdfexppowP', + 'gslcdfexppowQ', + 'gslcdftdistP', + 'gslcdftdistQ', + 'gslcdftdistPinv', + 'gslcdftdistQinv', + 'gslcdffdistP', + 'gslcdffdistQ', + 'gslcdffdistPinv', + 'gslcdffdistQinv', + 'gslcdfbetaP', + 'gslcdfbetaQ', + 'gslcdfbetaPinv', + 'gslcdfbetaQinv', + 'gslcdfflatP', + 'gslcdfflatQ', + 'gslcdfflatPinv', + 'gslcdfflatQinv', + 'gslcdflognormalP', + 'gslcdflognormalQ', + 'gslcdflognormalPinv', + 'gslcdflognormalQinv', + 'gslcdfgumbel1P', + 'gslcdfgumbel1Q', + 'gslcdfgumbel1Pinv', + 'gslcdfgumbel1Qinv', + 'gslcdfgumbel2P', + 'gslcdfgumbel2Q', + 'gslcdfgumbel2Pinv', + 'gslcdfgumbel2Qinv', + 'gslcdfweibullP', + 'gslcdfweibullQ', + 'gslcdfweibullPinv', + 'gslcdfweibullQinv', + 'gslcdfparetoP', + 'gslcdfparetoQ', + 'gslcdfparetoPinv', + 'gslcdfparetoQinv', + 'gslcdflogisticP', + 'gslcdflogisticQ', + 'gslcdflogisticPinv', + 'gslcdflogisticQinv', + 'gslcdfbinomialP', + 'gslcdfbinomialQ', + 'gslcdfpoissonP', + 'gslcdfpoissonQ', + 'gslcdfgeometricP', + 'gslcdfgeometricQ', + 'gslcdfnegativebinomialP', + 'gslcdfnegativebinomialQ', + 'gslcdfpascalP', + 'gslcdfpascalQ', + 'gslinterpakima', + 'gslinterpakimaperiodic', + 'gslinterpcsplineperiodic', + 'gslinterpcspline', + 'gslinterpsteffen', + 'gslinterplinear', + 'gslinterppolynomial', + 'gslranbernoullipdf', + 'gslranbeta', + 'gslranbetapdf', + 'gslranbinomialpdf', + 'gslranexponential', + 'gslranexponentialpdf', + 'gslranexppow', + 'gslranexppowpdf', + 'gslrancauchy', + 'gslrancauchypdf', + 'gslranchisq', + 'gslranchisqpdf', + 'gslranerlang', + 'gslranerlangpdf', + 'gslranfdist', + 'gslranfdistpdf', + 'gslranflat', + 'gslranflatpdf', + 'gslrangamma', + 'gslrangammaint', + 'gslrangammapdf', + 'gslrangammamt', + 'gslrangammaknuth', + 'gslrangaussian', + 'gslrangaussianratiomethod', + 'gslrangaussianziggurat', + 'gslrangaussianpdf', + 'gslranugaussian', + 'gslranugaussianratiomethod', + 'gslranugaussianpdf', + 'gslrangaussiantail', + 'gslrangaussiantailpdf', + 'gslranugaussiantail', + 'gslranugaussiantailpdf', + 'gslranlandau', + 'gslranlandaupdf', + 'gslrangeometricpdf', + 'gslrangumbel1', + 'gslrangumbel1pdf', + 'gslrangumbel2', + 'gslrangumbel2pdf', + 'gslranlogistic', + 'gslranlogisticpdf', + 'gslranlognormal', + 'gslranlognormalpdf', + 'gslranlogarithmicpdf', + 'gslrannegativebinomialpdf', + 'gslranpascalpdf', + 'gslranpareto', + 'gslranparetopdf', + 'gslranpoissonpdf', + 'gslranrayleigh', + 'gslranrayleighpdf', + 'gslranrayleightail', + 'gslranrayleightailpdf', + 'gslrantdist', + 'gslrantdistpdf', + 'gslranlaplace', + 'gslranlaplacepdf', + 'gslranlevy', + 'gslranweibull', + 'gslranweibullpdf', + 'gslsfairyAi', + 'gslsfairyBi', + 'gslsfairyAiscaled', + 'gslsfairyBiscaled', + 'gslsfairyAideriv', + 'gslsfairyBideriv', + 'gslsfairyAiderivscaled', + 'gslsfairyBiderivscaled', + 'gslsfairyzeroAi', + 'gslsfairyzeroBi', + 'gslsfairyzeroAideriv', + 'gslsfairyzeroBideriv', + 'gslsfbesselJ0', + 'gslsfbesselJ1', + 'gslsfbesselJn', + 'gslsfbesselY0', + 'gslsfbesselY1', + 'gslsfbesselYn', + 'gslsfbesselI0', + 'gslsfbesselI1', + 'gslsfbesselIn', + 'gslsfbesselI0scaled', + 'gslsfbesselI1scaled', + 'gslsfbesselInscaled', + 'gslsfbesselK0', + 'gslsfbesselK1', + 'gslsfbesselKn', + 'gslsfbesselK0scaled', + 'gslsfbesselK1scaled', + 'gslsfbesselKnscaled', + 'gslsfbesselj0', + 'gslsfbesselj1', + 'gslsfbesselj2', + 'gslsfbesseljl', + 'gslsfbessely0', + 'gslsfbessely1', + 'gslsfbessely2', + 'gslsfbesselyl', + 'gslsfbesseli0scaled', + 'gslsfbesseli1scaled', + 'gslsfbesseli2scaled', + 'gslsfbesselilscaled', + 'gslsfbesselk0scaled', + 'gslsfbesselk1scaled', + 'gslsfbesselk2scaled', + 'gslsfbesselklscaled', + 'gslsfbesselJnu', + 'gslsfbesselYnu', + 'gslsfbesselInuscaled', + 'gslsfbesselInu', + 'gslsfbesselKnuscaled', + 'gslsfbesselKnu', + 'gslsfbessellnKnu', + 'gslsfbesselzeroJ0', + 'gslsfbesselzeroJ1', + 'gslsfbesselzeroJnu', + 'gslsfclausen', + 'gslsfhydrogenicR1', + 'gslsfdawson', + 'gslsfdebye1', + 'gslsfdebye2', + 'gslsfdebye3', + 'gslsfdebye4', + 'gslsfdebye5', + 'gslsfdebye6', + 'gslsfdilog', + 'gslsfmultiply', + 'gslsfellintKcomp', + 'gslsfellintEcomp', + 'gslsfellintPcomp', + 'gslsfellintDcomp', + 'gslsfellintF', + 'gslsfellintE', + 'gslsfellintRC', + 'gslsferfc', + 'gslsflogerfc', + 'gslsferf', + 'gslsferfZ', + 'gslsferfQ', + 'gslsfhazard', + 'gslsfexp', + 'gslsfexpmult', + 'gslsfexpm1', + 'gslsfexprel', + 'gslsfexprel2', + 'gslsfexpreln', + 'gslsfexpintE1', + 'gslsfexpintE2', + 'gslsfexpintEn', + 'gslsfexpintE1scaled', + 'gslsfexpintE2scaled', + 'gslsfexpintEnscaled', + 'gslsfexpintEi', + 'gslsfexpintEiscaled', + 'gslsfShi', + 'gslsfChi', + 'gslsfexpint3', + 'gslsfSi', + 'gslsfCi', + 'gslsfatanint', + 'gslsffermidiracm1', + 'gslsffermidirac0', + 'gslsffermidirac1', + 'gslsffermidirac2', + 'gslsffermidiracint', + 'gslsffermidiracmhalf', + 'gslsffermidirachalf', + 'gslsffermidirac3half', + 'gslsffermidiracinc0', + 'gslsflngamma', + 'gslsfgamma', + 'gslsfgammastar', + 'gslsfgammainv', + 'gslsftaylorcoeff', + 'gslsffact', + 'gslsfdoublefact', + 'gslsflnfact', + 'gslsflndoublefact', + 'gslsflnchoose', + 'gslsfchoose', + 'gslsflnpoch', + 'gslsfpoch', + 'gslsfpochrel', + 'gslsfgammaincQ', + 'gslsfgammaincP', + 'gslsfgammainc', + 'gslsflnbeta', + 'gslsfbeta', + 'gslsfbetainc', + 'gslsfgegenpoly1', + 'gslsfgegenpoly2', + 'gslsfgegenpoly3', + 'gslsfgegenpolyn', + 'gslsfhyperg0F1', + 'gslsfhyperg1F1int', + 'gslsfhyperg1F1', + 'gslsfhypergUint', + 'gslsfhypergU', + 'gslsfhyperg2F0', + 'gslsflaguerre1', + 'gslsflaguerre2', + 'gslsflaguerre3', + 'gslsflaguerren', + 'gslsflambertW0', + 'gslsflambertWm1', + 'gslsflegendrePl', + 'gslsflegendreP1', + 'gslsflegendreP2', + 'gslsflegendreP3', + 'gslsflegendreQ0', + 'gslsflegendreQ1', + 'gslsflegendreQl', + 'gslsflegendrePlm', + 'gslsflegendresphPlm', + 'gslsflegendrearraysize', + 'gslsfconicalPhalf', + 'gslsfconicalPmhalf', + 'gslsfconicalP0', + 'gslsfconicalP1', + 'gslsfconicalPsphreg', + 'gslsfconicalPcylreg', + 'gslsflegendreH3d0', + 'gslsflegendreH3d1', + 'gslsflegendreH3d', + 'gslsflog', + 'gslsflogabs', + 'gslsflog1plusx', + 'gslsflog1plusxmx', + 'gslsfpowint', + 'gslsfpsiint', + 'gslsfpsi', + 'gslsfpsi1piy', + 'gslsfpsi1int', + 'gslsfpsi1', + 'gslsfpsin', + 'gslsfsynchrotron1', + 'gslsfsynchrotron2', + 'gslsftransport2', + 'gslsftransport3', + 'gslsftransport4', + 'gslsftransport5', + 'gslsfsin', + 'gslsfcos', + 'gslsfhypot', + 'gslsfsinc', + 'gslsflnsinh', + 'gslsflncosh', + 'gslsfanglerestrictsymm', + 'gslsfanglerestrictpos', + 'gslsfzetaint', + 'gslsfzeta', + 'gslsfzetam1', + 'gslsfzetam1int', + 'gslsfhzeta', + 'gslsfetaint', + 'gslsfeta', + 'imag', + 'int1d', + 'int2d', + 'int3d', + 'intalledges', + 'intallfaces', + 'interpolate', + 'invdiff', + 'invdiffnp', + 'invdiffpos', + 'Isend', + 'isInf', + 'isNaN', + 'isoline', + 'Irecv', + 'j0', + 'j1', + 'jn', + 'jump', + 'lgamma', + 'LinearCG', + 'LinearGMRES', + 'log', + 'log10', + 'lrint', + 'lround', + 'max', + 'mean', + 'medit', + 'min', + 'mmg3d', + 'movemesh', + 'movemesh23', + 'mpiAlltoall', + 'mpiAlltoallv', + 'mpiAllgather', + 'mpiAllgatherv', + 'mpiAllReduce', + 'mpiBarrier', + 'mpiGather', + 'mpiGatherv', + 'mpiRank', + 'mpiReduce', + 'mpiScatter', + 'mpiScatterv', + 'mpiSize', + 'mpiWait', + 'mpiWaitAny', + 'mpiWtick', + 'mpiWtime', + 'mshmet', + 'NaN', + 'NLCG', + 'on', + 'plot', + 'polar', + 'Post', + 'pow', + 'processor', + 'processorblock', + 'projection', + 'randinit', + 'randint31', + 'randint32', + 'random', + 'randreal1', + 'randreal2', + 'randreal3', + 'randres53', + 'Read', + 'readmesh', + 'readmesh3', + 'Recv', + 'rint', + 'round', + 'savemesh', + 'savesol', + 'savevtk', + 'seekg', + 'Sent', + 'set', + 'sign', + 'signbit', + 'sin', + 'sinh', + 'sort', + 'splitComm', + 'splitmesh', + 'sqrt', + 'square', + 'srandom', + 'srandomdev', + 'Stringification', + 'swap', + 'system', + 'tan', + 'tanh', + 'tellg', + 'tetg', + 'tetgconvexhull', + 'tetgreconstruction', + 'tetgtransfo', + 'tgamma', + 'triangulate', + 'trunc', + 'Wait', + 'Write', + 'y0', + 'y1', + 'yn' + )) + + # function parameters + parameters = set(( + 'A', + 'A1', + 'abserror', + 'absolute', + 'aniso', + 'aspectratio', + 'B', + 'B1', + 'bb', + 'beginend', + 'bin', + 'boundary', + 'bw', + 'close', + 'cmm', + 'coef', + 'composante', + 'cutoff', + 'datafilename', + 'dataname', + 'dim', + 'distmax', + 'displacement', + 'doptions', + 'dparams', + 'eps', + 'err', + 'errg', + 'facemerge', + 'facetcl', + 'factorize', + 'file', + 'fill', + 'fixedborder', + 'flabel', + 'flags', + 'floatmesh', + 'floatsol', + 'fregion', + 'gradation', + 'grey', + 'hmax', + 'hmin', + 'holelist', + 'hsv', + 'init', + 'inquire', + 'inside', + 'IsMetric', + 'iso', + 'ivalue', + 'keepbackvertices', + 'label', + 'labeldown', + 'labelmid', + 'labelup', + 'levelset', + 'loptions', + 'lparams', + 'maxit', + 'maxsubdiv', + 'meditff', + 'mem', + 'memory', + 'metric', + 'mode', + 'nbarrow', + 'nbiso', + 'nbiter', + 'nbjacoby', + 'nboffacetcl', + 'nbofholes', + 'nbofregions', + 'nbregul', + 'nbsmooth', + 'nbvx', + 'ncv', + 'nev', + 'nomeshgeneration', + 'normalization', + 'omega', + 'op', + 'optimize', + 'option', + 'options', + 'order', + 'orientation', + 'periodic', + 'power', + 'precon', + 'prev', + 'ps', + 'ptmerge', + 'qfe', + 'qforder', + 'qft', + 'qfV', + 'ratio', + 'rawvector', + 'reffacelow', + 'reffacemid', + 'reffaceup', + 'refnum', + 'reftet', + 'reftri', + 'region', + 'regionlist', + 'renumv', + 'rescaling', + 'ridgeangle', + 'save', + 'sigma', + 'sizeofvolume', + 'smoothing', + 'solver', + 'sparams', + 'split', + 'splitin2', + 'splitpbedge', + 'stop', + 'strategy', + 'swap', + 'switch', + 'sym', + 't', + 'tgv', + 'thetamax', + 'tol', + 'tolpivot', + 'tolpivotsym', + 'transfo', + 'U2Vc', + 'value', + 'varrow', + 'vector', + 'veps', + 'viso', + 'wait', + 'width', + 'withsurfacemesh', + 'WindowIndex', + 'which', + 'zbound' + )) + + # deprecated + deprecated = set(( + 'fixeborder' + )) + + # do not highlight + suppress_highlight = set(( + 'alignof', + 'asm', + 'constexpr', + 'decltype', + 'div', + 'double', + 'grad', + 'mutable', + 'namespace', + 'noexcept', + 'restrict', + 'static_assert', + 'template', + 'this', + 'thread_local', + 'typeid', + 'typename', + 'using' + )) + + def get_tokens_unprocessed(self, text): + for index, token, value in CppLexer.get_tokens_unprocessed(self, text): + if value in self.operators: + yield index, Operator, value + elif value in self.types: + yield index, Keyword.Type, value + elif value in self.fespaces: + yield index, Name.Class, value + elif value in self.preprocessor: + yield index, Comment.Preproc, value + elif value in self.keywords: + yield index, Keyword.Reserved, value + elif value in self.functions: + yield index, Name.Function, value + elif value in self.parameters: + yield index, Keyword.Pseudo, value + elif value in self.suppress_highlight: + yield index, Name, value + else: + yield index, token, value diff --git a/tests/examplefiles/freefem.edp b/tests/examplefiles/freefem.edp new file mode 100644 index 00000000..d4313338 --- /dev/null +++ b/tests/examplefiles/freefem.edp @@ -0,0 +1,94 @@ +// Example of problem solving in parallel + +// Usage: +// ff-mpirun -np 12 LaplacianParallel.edp (here 12 is the number of threads (command nproc to know that) +// Need FreeFem++ with PETSc + +// Parallel stuff +load "PETSc" +macro partitioner()metis// +macro dimension()2// +include "./macro_ddm.idp" + +macro def(i)[i]// +macro init(i)[i]// +//macro meshN()mesh// //these macro are defined in macro_ddm.idp +//macro intN()int2d// + +// Parameters +int nn = 500; +real L = 1.; +real H = 1.; + +func f = 1.; + +func Pk = P1; + +// Mesh +border b1(t=0, L){x=t; y=0; label=1;} +border b2(t=0, H){x=L; y=t; label=2;} +border b3(t=L, 0){x=t; y=H; label=3;} +border b4(t=H, 0){x=0; y=t; label=4;} + +meshN Th = buildmesh(b1(1) + b2(1) + b3(1) + b4(1)); //build a really coarse mesh (just to build the fespace later) +//meshN Th = square(1, 1, [L*x, H*y]); + +int[int] Wall = [1, 2, 3, 4]; + +// Fespace +fespace Uh(Th, Pk); + +// Mesh partition +int[int] ArrayIntersection; +int[int][int] RestrictionIntersection(0); +real[int] D; + +meshN ThBorder; +meshN ThGlobal = buildmesh(b1(nn*L) + b2(nn*H) + b3(nn*L) + b4(nn*H)); //build the mesh to partition +//meshN ThGlobal = square(nn*L, nn*H, [L*x, H*y]); +int InterfaceLabel = 10; +int Split = 1; +int Overlap = 1; +build(Th, ThBorder, ThGlobal, InterfaceLabel, Split, Overlap, D, ArrayIntersection, RestrictionIntersection, Uh, Pk, mpiCommWorld, false); //see macro_ddm.idp for detailed parameters + +// Macro +macro grad(u) [dx(u), dy(u)] // + +// Problem +varf vLaplacian (u, uh) //Problem in varf formulation mandatory + = intN(Th)( + grad(u)' * grad(uh) + ) + - intN(Th)( + f * uh + ) + + on(Wall, u=0) + ; + +matrix Laplacian = vLaplacian(Uh, Uh); //build the sequential matrix +real[int] LaplacianBoundary = vLaplacian(0, Uh);// and right hand side + +//// In sequential, you normally do that: +//// Solve +//Uh def(u)=init(0); +//u[] = Laplacian^-1 * LaplacianBoundary; + +//// Plot +//plot(u); + +// In parallel: +// Matrix construction +dmatrix PLaplacian(Laplacian, ArrayIntersection, RestrictionIntersection, D, bs=1); //build the parallel matrix +set(PLaplacian, sparams="-pc_type lu -pc_factor_mat_solver_package mumps"); //preconditioner LU and MUMPS solver (see PETSc doc for detailed parameters) + +// Solve +Uh def(u)=init(0); //define the unknown (must be defined after mesh partitioning) +u[] = PLaplacian^-1 * LaplacianBoundary; + +// Export results to vtk (there is not plot in parallel) +{ + fespace PV(Th, P1); + PV uu=u; + int[int] Order = [1]; + export("Result", Th, uu, Order, mpiCommWorld); +} -- cgit v1.2.1 From 665f287849402bdf05ef636e77989104e6cf8e9e Mon Sep 17 00:00:00 2001 From: Charles Ferguson Date: Thu, 14 Mar 2019 22:24:08 +0000 Subject: Create a Lexer class for BBC Basic files. The Lexer class for BBC Basic handles both the numbered lines, and unnumbered lines, of the detokenised (text) format of BBC BASIC. The tokeniser copes, in a naive manner, with the orignal versions, and BASIC V. It does not handle other extensions at this time, nor does it handle inline assembler. This should be sufficient for most cases where code needs to be presented in a colourful manner. --- pygments/lexers/_mapping.py | 1 + pygments/lexers/basic.py | 99 +++++++++++++++++++++++++- tests/examplefiles/example.bbc | 156 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 254 insertions(+), 2 deletions(-) create mode 100644 tests/examplefiles/example.bbc diff --git a/pygments/lexers/_mapping.py b/pygments/lexers/_mapping.py index f8207e70..e57124db 100644 --- a/pygments/lexers/_mapping.py +++ b/pygments/lexers/_mapping.py @@ -47,6 +47,7 @@ LEXERS = { 'AutoItLexer': ('pygments.lexers.automation', 'AutoIt', ('autoit',), ('*.au3',), ('text/x-autoit',)), 'AutohotkeyLexer': ('pygments.lexers.automation', 'autohotkey', ('ahk', 'autohotkey'), ('*.ahk', '*.ahkl'), ('text/x-autohotkey',)), 'AwkLexer': ('pygments.lexers.textedit', 'Awk', ('awk', 'gawk', 'mawk', 'nawk'), ('*.awk',), ('application/x-awk',)), + 'BBCBasicLexer': ('pygments.lexers.basic', 'BBC Basic', ('bbcbasic',), ('*.bbc',), ()), 'BBCodeLexer': ('pygments.lexers.markup', 'BBCode', ('bbcode',), (), ('text/x-bbcode',)), 'BCLexer': ('pygments.lexers.algebra', 'BC', ('bc',), ('*.bc',), ()), 'BSTLexer': ('pygments.lexers.bibtex', 'BST', ('bst', 'bst-pybtex'), ('*.bst',), ()), diff --git a/pygments/lexers/basic.py b/pygments/lexers/basic.py index b0409386..4d957c2b 100644 --- a/pygments/lexers/basic.py +++ b/pygments/lexers/basic.py @@ -18,7 +18,8 @@ from pygments.lexers import _vbscript_builtins __all__ = ['BlitzBasicLexer', 'BlitzMaxLexer', 'MonkeyLexer', 'CbmBasicV2Lexer', - 'QBasicLexer', 'VBScriptLexer'] + 'QBasicLexer', 'VBScriptLexer', 'BBCBasicLexer'] + class BlitzMaxLexer(RegexLexer): @@ -561,4 +562,98 @@ class VBScriptLexer(RegexLexer): (r'"', String.Double, '#pop'), (r'\n', Error, '#pop'), # Unterminated string ], - } \ No newline at end of file + } + + +class BBCBasicLexer(RegexLexer): + """ + BBC Basic was supplied on the BBC Micro, and later Acorn RISC OS. + It is also used by BBC Basic For Windows. + + .. versionadded:: 2.4???? + """ + base_keywords = ['OTHERWISE', 'AND', 'DIV', 'EOR', 'MOD', 'OR', 'ERROR', + 'LINE', 'OFF', 'STEP', 'SPC', 'TAB', 'ELSE', 'THEN', + 'OPENIN', 'PTR', 'PAGE', 'TIME', 'LOMEM', 'HIMEM', 'ABS', + 'ACS', 'ADVAL', 'ASC', 'ASN', 'ATN', 'BGET', 'COS', 'COUNT', + 'DEG', 'ERL', 'ERR', 'EVAL', 'EXP', 'EXT', 'FALSE', 'FN', + 'GET', 'INKEY', 'INSTR', 'INT', 'LEN', 'LN', 'LOG', 'NOT', + 'OPENUP', 'OPENOUT', 'PI', 'POINT', 'POS', 'RAD', 'RND', + 'SGN', 'SIN', 'SQR', 'TAN', 'TO', 'TRUE', 'USR', 'VAL', + 'VPOS', 'CHR$', 'GET$', 'INKEY$', 'LEFT$', 'MID$', + 'RIGHT$', 'STR$', 'STRING$', 'EOF', 'PTR', 'PAGE', 'TIME', + 'LOMEM', 'HIMEM', 'SOUND', 'BPUT', 'CALL', 'CHAIN', 'CLEAR', + 'CLOSE', 'CLG', 'CLS', 'DATA', 'DEF', 'DIM', 'DRAW', 'END', + 'ENDPROC', 'ENVELOPE', 'FOR', 'GOSUB', 'GOTO', 'GCOL', 'IF', + 'INPUT', 'LET', 'LOCAL', 'MODE', 'MOVE', 'NEXT', 'ON', + 'VDU', 'PLOT', 'PRINT', 'PROC', 'READ', 'REM', 'REPEAT', + 'REPORT', 'RESTORE', 'RETURN', 'RUN', 'STOP', 'COLOUR', + 'TRACE', 'UNTIL', 'WIDTH', 'OSCLI'] + + basic5_keywords = ['WHEN', 'OF', 'ENDCASE', 'ENDIF', 'ENDWHILE', 'CASE', + 'CIRCLE', 'FILL', 'ORIGIN', 'POINT', 'RECTANGLE', 'SWAP', + 'WHILE', 'WAIT', 'MOUSE', 'QUIT', 'SYS', 'INSTALL', + 'LIBRARY', 'TINT', 'ELLIPSE', 'BEATS', 'TEMPO', 'VOICES', + 'VOICE', 'STEREO', 'OVERLAY', 'APPEND', 'AUTO', 'CRUNCH', + 'DELETE', 'EDIT', 'HELP', 'LIST', 'LOAD', 'LVAR', 'NEW', + 'OLD', 'RENUMBER', 'SAVE', 'TEXTLOAD', 'TEXTSAVE', + 'TWIN', 'TWINO', 'INSTALL', 'SUM', 'BEAT'] + + + name = 'BBC Basic' + aliases = ['bbcbasic'] + filenames = ['*.bbc'] + + tokens = { + 'root': [ + (r"[0-9]+", Name.Label), + (r"(\*)([^\n]*)", + bygroups(Keyword.Pseudo, Comment.Special)), + (r"", Whitespace, 'code'), + ], + + 'code': [ + (r"(REM)([^\n]*)", + bygroups(Keyword.Declaration, Comment.Single)), + (r'\n', Whitespace, 'root'), + (r'\s+', Whitespace), + (r':', Comment.Preproc), + + # Some special cases to make functions come out nicer + (r'(DEF)(\s*)(FN|PROC)([A-Za-z_@][A-Za-z0-9_@]*)', + bygroups(Keyword.Declaration, Whitespace, Keyword.Declaration, Name.Function)), + (r'(FN|PROC)([A-Za-z_@][A-Za-z0-9_@]*)', + bygroups(Keyword, Name.Function)), + + (r'(GOTO|GOSUB|THEN|RESTORE)(\s*)(\d+)', + bygroups(Keyword, Whitespace, Name.Label)), + + (r'(TRUE|FALSE)', Keyword.Constant), + (r'(PAGE|LOMEM|HIMEM|TIME|WIDTH|ERL|ERR|REPORT\$|POS|VPOS|VOICES)', Keyword.Pseudo), + + (words(base_keywords), Keyword), + (words(basic5_keywords), Keyword), + + ('"', String.Double, 'string'), + + ('%[01]{1,32}', Number.Bin), + ('&[0-9a-f]{1,8}', Number.Hex), + + (r'[+-]?[0-9]+\.[0-9]*(E[+-]?[0-9]+)?', Number.Float), + (r'[+-]?\.[0-9]+(E[+-]?[0-9]+)?', Number.Float), + (r'[+-]?[0-9]+E[+-]?[0-9]+', Number.Float), + (r'[+-]?\d+', Number.Integer), + + (r'([A-Za-z_@][A-Za-z0-9_@]*[%$]?)', Name.Variable), + (r'([+\-]=|[$!|?+\-*/%^=><();]|>=|<=|<>|<<|>>|>>>|,)', Operator), + ], + 'string': [ + (r'[^"\n]+', String.Double), + (r'"', String.Double, '#pop'), + (r'\n', Error, 'root'), # Unterminated string + ], + } + + def analyse_text(text): + if text.startswith('10REM >') or text.startswith('REM >'): + return 0.9 diff --git a/tests/examplefiles/example.bbc b/tests/examplefiles/example.bbc new file mode 100644 index 00000000..ebdb8537 --- /dev/null +++ b/tests/examplefiles/example.bbc @@ -0,0 +1,156 @@ +10REM >EIRC +20REM The simplest IRC client you can write. Maybe. +30REM (C) Justin Fletcher, 1998 +40: +50END=PAGE+1024*16 +60REM Change these if you wish +70host$="irc.stealth.net" +80port=6667 +90nick$="eirc" +100ourchan$="#acorn" +110: +120REM Start connecting to a host +130SYS "ESocket_ConnectToHost",host$,port TO handle +140REPEAT +150 SYS "ESocket_CheckState",handle TO state +160 IF state<-1 THENSYS "ESocket_Forget",handle:SYS "ESocket_DecodeState",state TO a$:ERROR 1,"Failed ("+a$+")" +170UNTIL state=4 +180: +190REM We are now connected +200PRINT"Connected" +210: +220REM Log on to the server +230SYS "ESocket_SendLine",handle,"USER "+nick$+" x x :"+nick$ +240SYS "ESocket_SendLine",handle,"NICK "+nick$ +250SYS "ESocket_SendLine",handle,"JOIN "+ourchan$ +260REM Install a monitor so that we don't waste time +270SYS "ESocket_Monitor",0,handle TO monitor +280SYS "ESocket_ResetMonitor",monitor,0 TO polladdr% +290: +300REM If we crash, we should tidy up after ourselves +310ON ERROR SYS "XESocket_Forget",handle:SYS "XESocket_Forget",monitor:ERROR EXT ERR,REPORT$+" at line "+STR$ERL +320: +330REM Memory buffer for our data +340bufsize%=1024 +350DIM buf% bufsize% +360: +370input$="":REM The input line +380REPEAT +390 REM In a taskwindow we should yield until there is data +400 SYS "OS_UpCall",6,polladdr% +410 IF !polladdr%<>0 THEN +420 REM Reset the monitor for the time being +430 SYS "ESocket_ResetMonitor",monitor,0 TO polladdr% +440 REPEAT +450 REM Read lines from the connection until this buffer is empty +460 SYS "ESocket_ReadLine",handle,buf%,bufsize%,%100 TO ,str,len +470 IF str<>0 AND $str<>"" THEN +480 line$=$str +490 IF LEFT$(line$,4)="PING" THEN +500 REM Ping's must be replied to immediately +510 SYS "ESocket_SendLine",handle,"PONG "+MID$(line$,6) +520 ELSE +530 REM Extract source info +540 from$=MID$(LEFT$(line$,INSTR(line$+" "," ")-1),2) +550 line$=MID$(line$,INSTR(line$+" "," ")+1) +560 uid$=LEFT$(from$,INSTR(from$+"!","!")-1) +570 com$=LEFT$(line$,INSTR(line$+" "," ")-1) +580 line$=MID$(line$,INSTR(line$+" "," ")+1) +590 REM remove the input line +600 IF input$<>"" THENFORI=1TOLEN(input$):VDU127:NEXT +610 CASE FNupper(com$) OF +620 WHEN "PRIVMSG" +630 REM Extract the destination +640 chan$=LEFT$(line$,INSTR(line$+" "," ")-1) +650 line$=MID$(line$,INSTR(line$+" "," ")+2):REM Skip : +660 IF LEFT$(line$,1)=CHR$1 THEN +670 REM CTCP, so respond to it +680 line$=MID$(line$,2,LEN(line$)-2) +690 com$=LEFT$(line$,INSTR(line$+" "," ")-1) +700 line$=MID$(line$,INSTR(line$+" "," ")+1) +710 CASE FNupper(com$) OF +720 WHEN "PING" +730 REM Ping lag timing +740 line$="PONG "+line$ +750 PRINTuid$;" pinged us" +760 WHEN "VERSION" +770 REM Version checking +780 line$="VERSION EIRC 1.00 (c) Justin Fletcher" +790 PRINTuid$;" wanted our version" +800 WHEN "ACTION" +810 PRINT"* ";uid$;" ";line$ +820 line$="" +830 OTHERWISE +840 REM everything else is an error +850 line$="ERRMSG "+com$+" not understood" +860 PRINT"CTCP '";com$;"' from ";uid$;" (";line$;")" +870 ENDCASE +880 IF line$<>"" THEN +890 SYS "ESocket_SendLine",handle,"NOTICE "+uid$+" :"+CHR$1+line$+CHR$1 +900 ENDIF +910 ELSE +920 REM Somebody said something... +930 PRINT"<";uid$;"> ";FNsafe(line$) +940 ENDIF +950 WHEN "JOIN" +960 REM We (or someone else) has joined the channel +970 chan$=LEFT$(line$,INSTR(line$+" "," ")):REM Skip : +980 IF LEFT$(chan$,1)=":" THENchan$=MID$(chan$,2) +990 PRINTuid$;" has joined ";chan$ +1000 WHEN "PART" +1010 REM Someone else has left the channel +1020 chan$=LEFT$(line$,INSTR(line$+" "," ")-1) +1030 IF LEFT$(chan$,1)=":" THENchan$=MID$(chan$,2) +1040 PRINTuid$;" has left ";chan$ +1050 WHEN "QUIT" +1060 REM Someone else has quit IRC +1070 PRINTuid$;" quit IRC" +1080 OTHERWISE +1090 REM Some unknown command +1100 PRINTuid$;":";com$;":";FNsafe(line$) +1110 ENDCASE +1120 REM Re-display our input line +1130 PRINTinput$; +1140 ENDIF +1150 ENDIF +1160 UNTIL str=0 +1170 ENDIF +1180 b$=INKEY$(0) +1190 IF b$<>"" THEN +1200 CASE b$ OF +1210 WHEN CHR$13 +1220 SYS "ESocket_SendLine",handle,"PRIVMSG "+ourchan$+" :"+input$ +1230 REM Remove the line +1240 IF input$<>"" THENFORI=1TOLEN(input$):VDU127:NEXT +1250 REM We said it... +1260 PRINT"<"+nick$+"> ";input$ +1270 input$="" +1280 WHEN CHR$127,CHR$8 +1290 REM Backspace +1300 IF input$<>"" THENVDU127 +1310 input$=LEFT$(input$) +1320 OTHERWISE +1330 REM Ad to current input +1340 input$+=b$ +1350 PRINTb$; +1360 ENDCASE +1370 ENDIF +1380 REM Has the socket closed +1390 SYS "ESocket_Closed",handle,%0 TO closed +1400UNTIL closed +1410SYS "ESocket_Forget",handle +1420SYS "ESocket_Forget",monitor +1430END +1440: +1450DEFFNupper(a$):LOCAL c$,b$,I +1460FORI=1TOLEN(a$) +1470c$=MID$(a$,I,1):IF c$>="a"ANDc$<="z"THENc$=CHR$(ASC(c$)-32) +1480b$+=c$:NEXT:=b$ +1490 +1500REM Remove control codes +1510DEFFNsafe(line$) +1520LOCAL I +1530FORI=1TOLEN(line$) +1540 IF MID$(line$,I,1)<" " THENMID$(line$,I,1)="*" +1550NEXT +1560=line$ -- cgit v1.2.1 From 1bcd973510624a5449b2b1155229b9eb4ca85f70 Mon Sep 17 00:00:00 2001 From: "Frederik ?Freso? S. Olesen" Date: Sun, 31 Mar 2019 17:32:01 +0200 Subject: Add lexers for DASM16, Augeas, TOML, and Slash Lexers copied unmodified from https://github.com/liluo/pygments-github-lexers which is available under a 2-clause BSD license (same as pygments), copyright 2012 to GitHub, Inc. Fixes #1391 and #1150. --- AUTHORS | 1 + CHANGES | 4 + pygments/lexers/_mapping.py | 4 + pygments/lexers/asm.py | 106 ++++++++++++++++++++++++- pygments/lexers/configs.py | 79 ++++++++++++++++++- pygments/lexers/slash.py | 184 ++++++++++++++++++++++++++++++++++++++++++++ 6 files changed, 376 insertions(+), 2 deletions(-) create mode 100644 pygments/lexers/slash.py diff --git a/AUTHORS b/AUTHORS index ed9c547c..e6f90fea 100644 --- a/AUTHORS +++ b/AUTHORS @@ -221,5 +221,6 @@ Other contributors, listed alphabetically, are: * Rob Zimmerman -- Kal lexer * Vincent Zurczak -- Roboconf lexer * Rostyslav Golda -- FloScript lexer +* GitHub, Inc -- DASM16, Augeas, TOML, and Slash lexers Many thanks for all contributions! diff --git a/CHANGES b/CHANGES index 15863b27..3464ef22 100644 --- a/CHANGES +++ b/CHANGES @@ -19,6 +19,10 @@ Version 2.4.0 * Slurm (PR#760) * Unicon (PR#731) * VBScript (PR#673) + * DASM16 (PR#807) + * Augeas (PR#807) + * TOML (PR#807) + * Slash (PR#807) - Updated lexers: diff --git a/pygments/lexers/_mapping.py b/pygments/lexers/_mapping.py index 6e0f728c..3c05d147 100644 --- a/pygments/lexers/_mapping.py +++ b/pygments/lexers/_mapping.py @@ -44,6 +44,7 @@ LEXERS = { 'ArduinoLexer': ('pygments.lexers.c_like', 'Arduino', ('arduino',), ('*.ino',), ('text/x-arduino',)), 'AspectJLexer': ('pygments.lexers.jvm', 'AspectJ', ('aspectj',), ('*.aj',), ('text/x-aspectj',)), 'AsymptoteLexer': ('pygments.lexers.graphics', 'Asymptote', ('asy', 'asymptote'), ('*.asy',), ('text/x-asymptote',)), + 'AugeasLexer': ('pygments.lexers.configs', 'Augeas', ('augeas',), ('*.aug'), ()), 'AutoItLexer': ('pygments.lexers.automation', 'AutoIt', ('autoit',), ('*.au3',), ('text/x-autoit',)), 'AutohotkeyLexer': ('pygments.lexers.automation', 'autohotkey', ('ahk', 'autohotkey'), ('*.ahk', '*.ahkl'), ('text/x-autohotkey',)), 'AwkLexer': ('pygments.lexers.textedit', 'Awk', ('awk', 'gawk', 'mawk', 'nawk'), ('*.awk',), ('application/x-awk',)), @@ -121,6 +122,7 @@ LEXERS = { 'DObjdumpLexer': ('pygments.lexers.asm', 'd-objdump', ('d-objdump',), ('*.d-objdump',), ('text/x-d-objdump',)), 'DarcsPatchLexer': ('pygments.lexers.diff', 'Darcs Patch', ('dpatch',), ('*.dpatch', '*.darcspatch'), ()), 'DartLexer': ('pygments.lexers.javascript', 'Dart', ('dart',), ('*.dart',), ('text/x-dart',)), + 'Dasm16Lexer': ('pygments.lexers.asm', 'DASM16', ('dasm16',), ('*.dasm16', '*.dasm'), ('text/x-dasm16')), 'DebianControlLexer': ('pygments.lexers.installers', 'Debian Control file', ('control', 'debcontrol'), ('control',), ()), 'DelphiLexer': ('pygments.lexers.pascal', 'Delphi', ('delphi', 'pas', 'pascal', 'objectpascal'), ('*.pas', '*.dpr'), ('text/x-pascal',)), 'DgLexer': ('pygments.lexers.python', 'dg', ('dg',), ('*.dg',), ('text/x-dg',)), @@ -375,6 +377,7 @@ LEXERS = { 'RubyLexer': ('pygments.lexers.ruby', 'Ruby', ('rb', 'ruby', 'duby'), ('*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec', '*.rbx', '*.duby', 'Gemfile'), ('text/x-ruby', 'application/x-ruby')), 'RustLexer': ('pygments.lexers.rust', 'Rust', ('rust', 'rs'), ('*.rs', '*.rs.in'), ('text/rust',)), 'SASLexer': ('pygments.lexers.sas', 'SAS', ('sas',), ('*.SAS', '*.sas'), ('text/x-sas', 'text/sas', 'application/x-sas')), + 'SlashLexer': ('pygments.lexers.slash', 'Slash', ('slash'), ('*.sl'), ()), 'SLexer': ('pygments.lexers.r', 'S', ('splus', 's', 'r'), ('*.S', '*.R', '.Rhistory', '.Rprofile', '.Renviron'), ('text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r', 'text/x-R', 'text/x-r-history', 'text/x-r-profile')), 'SMLLexer': ('pygments.lexers.ml', 'Standard ML', ('sml',), ('*.sml', '*.sig', '*.fun'), ('text/x-standardml', 'application/x-standardml')), 'SarlLexer': ('pygments.lexers.jvm', 'SARL', ('sarl',), ('*.sarl',), ('text/x-sarl',)), @@ -421,6 +424,7 @@ LEXERS = { 'TextLexer': ('pygments.lexers.special', 'Text only', ('text',), ('*.txt',), ('text/plain',)), 'ThriftLexer': ('pygments.lexers.dsls', 'Thrift', ('thrift',), ('*.thrift',), ('application/x-thrift',)), 'TodotxtLexer': ('pygments.lexers.textfmts', 'Todotxt', ('todotxt',), ('todo.txt', '*.todotxt'), ('text/x-todo',)), + 'TOMLLexer': ('pygments.lexers.configs', 'TOML', ('toml',), ('*.toml'), ()), 'TransactSqlLexer': ('pygments.lexers.sql', 'Transact-SQL', ('tsql', 't-sql'), ('*.sql',), ('text/x-tsql',)), 'TreetopLexer': ('pygments.lexers.parsers', 'Treetop', ('treetop',), ('*.treetop', '*.tt'), ()), 'TurtleLexer': ('pygments.lexers.rdf', 'Turtle', ('turtle',), ('*.ttl',), ('text/turtle', 'application/x-turtle')), diff --git a/pygments/lexers/asm.py b/pygments/lexers/asm.py index 2f08d510..761b3315 100644 --- a/pygments/lexers/asm.py +++ b/pygments/lexers/asm.py @@ -20,7 +20,7 @@ from pygments.token import Text, Name, Number, String, Comment, Punctuation, \ __all__ = ['GasLexer', 'ObjdumpLexer', 'DObjdumpLexer', 'CppObjdumpLexer', 'CObjdumpLexer', 'HsailLexer', 'LlvmLexer', 'NasmLexer', - 'NasmObjdumpLexer', 'TasmLexer', 'Ca65Lexer'] + 'NasmObjdumpLexer', 'TasmLexer', 'Ca65Lexer', 'Dasm16Lexer'] class GasLexer(RegexLexer): @@ -650,3 +650,107 @@ class Ca65Lexer(RegexLexer): # comments in GAS start with "#" if re.match(r'^\s*;', text, re.MULTILINE): return 0.9 + + +class Dasm16Lexer(RegexLexer): + """ + Simple lexer for DCPU-16 Assembly + + Check http://0x10c.com/doc/dcpu-16.txt + """ + name = 'dasm16' + aliases = ['DASM16'] + filenames = ['*.dasm16', '*.dasm'] + mimetypes = ['text/x-dasm16'] + + INSTRUCTIONS = [ + 'SET', + 'ADD', 'SUB', + 'MUL', 'MLI', + 'DIV', 'DVI', + 'MOD', 'MDI', + 'AND', 'BOR', 'XOR', + 'SHR', 'ASR', 'SHL', + 'IFB', 'IFC', 'IFE', 'IFN', 'IFG', 'IFA', 'IFL', 'IFU', + 'ADX', 'SBX', + 'STI', 'STD', + 'JSR', + 'INT', 'IAG', 'IAS', 'RFI', 'IAQ', 'HWN', 'HWQ', 'HWI', + ] + + REGISTERS = [ + 'A', 'B', 'C', + 'X', 'Y', 'Z', + 'I', 'J', + 'SP', 'PC', 'EX', + 'POP', 'PEEK', 'PUSH' + ] + + # Regexes yo + char = r'[a-zA-Z$._0-9@]' + identifier = r'(?:[a-zA-Z$_]' + char + '*|\.' + char + '+)' + number = r'[+-]?(?:0[xX][a-zA-Z0-9]+|\d+)' + binary_number = r'0b[01_]+' + instruction = r'(?i)(' + '|'.join(INSTRUCTIONS) + ')' + single_char = r"'\\?" + char + "'" + string = r'"(\\"|[^"])*"' + + def guess_identifier(lexer, match): + ident = match.group(0) + klass = Name.Variable if ident.upper() in lexer.REGISTERS else Name.Label + yield match.start(), klass, ident + + tokens = { + 'root': [ + include('whitespace'), + (':' + identifier, Name.Label), + (identifier + ':', Name.Label), + (instruction, Name.Function, 'instruction-args'), + (r'\.' + identifier, Name.Function, 'data-args'), + (r'[\r\n]+', Text) + ], + + 'numeric' : [ + (binary_number, Number.Integer), + (number, Number.Integer), + (single_char, String), + ], + + 'arg' : [ + (identifier, guess_identifier), + include('numeric') + ], + + 'deref' : [ + (r'\+', Punctuation), + (r'\]', Punctuation, '#pop'), + include('arg'), + include('whitespace') + ], + + 'instruction-line' : [ + (r'[\r\n]+', Text, '#pop'), + (r';.*?$', Comment, '#pop'), + include('whitespace') + ], + + 'instruction-args': [ + (r',', Punctuation), + (r'\[', Punctuation, 'deref'), + include('arg'), + include('instruction-line') + ], + + 'data-args' : [ + (r',', Punctuation), + include('numeric'), + (string, String), + include('instruction-line') + ], + + 'whitespace': [ + (r'\n', Text), + (r'\s+', Text), + (r';.*?\n', Comment) + ], + } diff --git a/pygments/lexers/configs.py b/pygments/lexers/configs.py index 206ec360..7c9bd655 100644 --- a/pygments/lexers/configs.py +++ b/pygments/lexers/configs.py @@ -21,7 +21,7 @@ __all__ = ['IniLexer', 'RegeditLexer', 'PropertiesLexer', 'KconfigLexer', 'Cfengine3Lexer', 'ApacheConfLexer', 'SquidConfLexer', 'NginxConfLexer', 'LighttpdConfLexer', 'DockerLexer', 'TerraformLexer', 'TermcapLexer', 'TerminfoLexer', - 'PkgConfigLexer', 'PacmanConfLexer'] + 'PkgConfigLexer', 'PacmanConfLexer', 'AugeasLexer', 'TOMLLexer'] class IniLexer(RegexLexer): @@ -838,3 +838,80 @@ class PacmanConfLexer(RegexLexer): (r'.', Text), ], } + + +class AugeasLexer(RegexLexer): + name = 'Augeas' + aliases = ['augeas'] + filenames = ['*.aug'] + + tokens = { + 'root': [ + (r'(module)(\s*)([^\s=]+)', bygroups(Keyword.Namespace, Text, Name.Namespace)), + (r'(let)(\s*)([^\s=]+)', bygroups(Keyword.Declaration, Text, Name.Variable)), + (r'(del|store|value|counter|seq|key|label|autoload|incl|excl|transform|test|get|put)(\s+)', bygroups(Name.Builtin, Text)), + (r'(\()([^\:]+)(\:)(unit|string|regexp|lens|tree|filter)(\))', bygroups(Punctuation, Name.Variable, Punctuation, Keyword.Type, Punctuation)), + (r'\(\*', Comment.Multiline, 'comment'), + (r'[\+=\|\.\*\;\?-]', Operator), + (r'[\[\]\(\)\{\}]', Operator), + (r'"', String.Double, 'string'), + (r'\/', String.Regex, 'regex'), + (r'([A-Z]\w*)(\.)(\w+)', bygroups(Name.Namespace, Punctuation, Name.Variable)), + (r'.', Name.Variable), + (r'\s', Text), + ], + 'string': [ + (r'\\.', String.Escape), + (r'[^"]', String.Double), + (r'"', String.Double, '#pop'), + ], + 'regex': [ + (r'\\.', String.Escape), + (r'[^\/]', String.Regex), + (r'\/', String.Regex, '#pop'), + ], + 'comment': [ + (r'[^*\)]', Comment.Multiline), + (r'\(\*', Comment.Multiline, '#push'), + (r'\*\)', Comment.Multiline, '#pop'), + (r'[\*\)]', Comment.Multiline) + ], + } + + +class TOMLLexer(RegexLexer): + """ + Lexer for TOML, a simple language for config files + """ + + name = 'TOML' + aliases = ['toml'] + filenames = ['*.toml'] + + tokens = { + 'root': [ + + # Basics, comments, strings + (r'\s+', Text), + (r'#.*?$', Comment.Single), + (r'"(\\\\|\\"|[^"])*"', String), + (r'(true|false)$', Keyword.Constant), + ('[a-zA-Z_][a-zA-Z0-9_\-]*', Name), + + # Datetime + (r'\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z', Number.Integer), + + # Numbers + (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float), + (r'\d+[eE][+-]?[0-9]+j?', Number.Float), + (r'\-?\d+', Number.Integer), + + # Punctuation + (r'[]{}:(),;[]', Punctuation), + (r'\.', Punctuation), + + # Operators + (r'=', Operator) + + ] + } diff --git a/pygments/lexers/slash.py b/pygments/lexers/slash.py new file mode 100644 index 00000000..f18059f7 --- /dev/null +++ b/pygments/lexers/slash.py @@ -0,0 +1,184 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.slash + ~~~~~~~~~~~~~~~~~~~~~ + + Lexer for the Slash programming language. + + :copyright: Copyright 2012 by GitHub, Inc + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import ExtendedRegexLexer, bygroups, DelegatingLexer +from pygments.token import Name, Number, String, Comment, Punctuation, \ + Other, Keyword, Operator, Whitespace + +__all__ = ['SlashLexer'] + + +class SlashLanguageLexer(ExtendedRegexLexer): + _nkw = r'(?=[^a-zA-Z_0-9])' + + def move_state(new_state): + return ("#pop", new_state) + + def right_angle_bracket(lexer, match, ctx): + if len(ctx.stack) > 1 and ctx.stack[-2] == "string": + ctx.stack.pop() + yield match.start(), String.Interpol, "}" + ctx.pos = match.end() + pass + + tokens = { + "root": [ + (r"<%=", Comment.Preproc, move_state("slash")), + (r"<%!!", Comment.Preproc, move_state("slash")), + (r"<%#.*?%>", Comment.Multiline), + (r"<%", Comment.Preproc, move_state("slash")), + (r".|\n", Other), + ], + "string": [ + (r"\\", String.Escape, move_state("string_e")), + (r"\"", String, move_state("slash")), + (r"#\{", String.Interpol, "slash"), + (r'.|\n', String), + ], + "string_e": [ + (r'n', String.Escape, move_state("string")), + (r't', String.Escape, move_state("string")), + (r'r', String.Escape, move_state("string")), + (r'e', String.Escape, move_state("string")), + (r'x[a-fA-F0-9]{2}', String.Escape, move_state("string")), + (r'.', String.Escape, move_state("string")), + ], + "regexp": [ + (r'}[a-z]*', String.Regex, move_state("slash")), + (r'\\(.|\n)', String.Regex), + (r'{', String.Regex, "regexp_r"), + (r'.|\n', String.Regex), + ], + "regexp_r": [ + (r'}[a-z]*', String.Regex, "#pop"), + (r'\\(.|\n)', String.Regex), + (r'{', String.Regex, "regexp_r"), + ], + "slash": [ + (r"%>", Comment.Preproc, move_state("root")), + (r"\"", String, move_state("string")), + (r"'[a-zA-Z0-9_]+", String), + (r'%r{', String.Regex, move_state("regexp")), + (r'/\*.*?\*/', Comment.Multiline), + (r"(#|//).*?\n", Comment.Single), + (r'-?[0-9]+e[+-]?[0-9]+', Number.Float), + (r'-?[0-9]+\.[0-9]+(e[+-]?[0-9]+)?', Number.Float), + (r'-?[0-9]+', Number.Integer), + (r'nil'+_nkw, Name.Builtin), + (r'true'+_nkw, Name.Builtin), + (r'false'+_nkw, Name.Builtin), + (r'self'+_nkw, Name.Builtin), + (r'(class)(\s+)([A-Z][a-zA-Z0-9_\']*)', + bygroups(Keyword, Whitespace, Name.Class)), + (r'class'+_nkw, Keyword), + (r'extends'+_nkw, Keyword), + (r'(def)(\s+)(self)(\s*)(\.)(\s*)([a-z_][a-zA-Z0-9_\']*=?|<<|>>|==|<=>|<=|<|>=|>|\+|-(self)?|~(self)?|\*|/|%|^|&&|&|\||\[\]=?)', + bygroups(Keyword, Whitespace, Name.Builtin, Whitespace, Punctuation, Whitespace, Name.Function)), + (r'(def)(\s+)([a-z_][a-zA-Z0-9_\']*=?|<<|>>|==|<=>|<=|<|>=|>|\+|-(self)?|~(self)?|\*|/|%|^|&&|&|\||\[\]=?)', + bygroups(Keyword, Whitespace, Name.Function)), + (r'def'+_nkw, Keyword), + (r'if'+_nkw, Keyword), + (r'elsif'+_nkw, Keyword), + (r'else'+_nkw, Keyword), + (r'unless'+_nkw, Keyword), + (r'for'+_nkw, Keyword), + (r'in'+_nkw, Keyword), + (r'while'+_nkw, Keyword), + (r'until'+_nkw, Keyword), + (r'and'+_nkw, Keyword), + (r'or'+_nkw, Keyword), + (r'not'+_nkw, Keyword), + (r'lambda'+_nkw, Keyword), + (r'try'+_nkw, Keyword), + (r'catch'+_nkw, Keyword), + (r'return'+_nkw, Keyword), + (r'next'+_nkw, Keyword), + (r'last'+_nkw, Keyword), + (r'throw'+_nkw, Keyword), + (r'use'+_nkw, Keyword), + (r'switch'+_nkw, Keyword), + (r'\\', Keyword), + (r'λ', Keyword), + (r'__FILE__'+_nkw, Name.Builtin.Pseudo), + (r'__LINE__'+_nkw, Name.Builtin.Pseudo), + (r'[A-Z][a-zA-Z0-9_\']*'+_nkw, Name.Constant), + (r'[a-z_][a-zA-Z0-9_\']*'+_nkw, Name), + (r'@[a-z_][a-zA-Z0-9_\']*'+_nkw, Name.Variable.Instance), + (r'@@[a-z_][a-zA-Z0-9_\']*'+_nkw, Name.Variable.Class), + (r'\(', Punctuation), + (r'\)', Punctuation), + (r'\[', Punctuation), + (r'\]', Punctuation), + (r'\{', Punctuation), + (r'\}', right_angle_bracket), + (r';', Punctuation), + (r',', Punctuation), + (r'<<=', Operator), + (r'>>=', Operator), + (r'<<', Operator), + (r'>>', Operator), + (r'==', Operator), + (r'!=', Operator), + (r'=>', Operator), + (r'=', Operator), + (r'<=>', Operator), + (r'<=', Operator), + (r'>=', Operator), + (r'<', Operator), + (r'>', Operator), + (r'\+\+', Operator), + (r'\+=', Operator), + (r'-=', Operator), + (r'\*\*=', Operator), + (r'\*=', Operator), + (r'\*\*', Operator), + (r'\*', Operator), + (r'/=', Operator), + (r'\+', Operator), + (r'-', Operator), + (r'/', Operator), + (r'%=', Operator), + (r'%', Operator), + (r'^=', Operator), + (r'&&=', Operator), + (r'&=', Operator), + (r'&&', Operator), + (r'&', Operator), + (r'\|\|=', Operator), + (r'\|=', Operator), + (r'\|\|', Operator), + (r'\|', Operator), + (r'!', Operator), + (r'\.\.\.', Operator), + (r'\.\.', Operator), + (r'\.', Operator), + (r'::', Operator), + (r':', Operator), + (r'(\s|\n)+', Whitespace), + (r'[a-z_][a-zA-Z0-9_\']*', Name.Variable), + ], + } + + +class SlashLexer(DelegatingLexer): + """ + Lexer for the Slash programming language. + """ + + name = 'Slash' + aliases = ['slash'] + filenames = ['*.sl'] + + def __init__(self, **options): + from pygments.lexers.web import HtmlLexer + super(SlashLexer, self).__init__(HtmlLexer, SlashLanguageLexer, **options) -- cgit v1.2.1 From d050666629b06261aed0e0e82648edeefe59e0c6 Mon Sep 17 00:00:00 2001 From: "Frederik ?Freso? S. Olesen" Date: Sun, 31 Mar 2019 17:32:01 +0200 Subject: Add lexers for DASM16, Augeas, TOML, and Slash Lexers copied unmodified from https://github.com/liluo/pygments-github-lexers which is available under a 2-clause BSD license (same as pygments), copyright 2012 to GitHub, Inc. Fixes #1391 and #1150. --- AUTHORS | 1 + CHANGES | 4 + pygments/lexers/_mapping.py | 4 + pygments/lexers/asm.py | 106 ++++++++++++++++++++++++- pygments/lexers/configs.py | 79 ++++++++++++++++++- pygments/lexers/slash.py | 184 ++++++++++++++++++++++++++++++++++++++++++++ 6 files changed, 376 insertions(+), 2 deletions(-) create mode 100644 pygments/lexers/slash.py diff --git a/AUTHORS b/AUTHORS index ed9c547c..e6f90fea 100644 --- a/AUTHORS +++ b/AUTHORS @@ -221,5 +221,6 @@ Other contributors, listed alphabetically, are: * Rob Zimmerman -- Kal lexer * Vincent Zurczak -- Roboconf lexer * Rostyslav Golda -- FloScript lexer +* GitHub, Inc -- DASM16, Augeas, TOML, and Slash lexers Many thanks for all contributions! diff --git a/CHANGES b/CHANGES index 9a7426a3..84e68f1e 100644 --- a/CHANGES +++ b/CHANGES @@ -18,6 +18,10 @@ Version 2.4.0 * SGF (PR#780) * Slurm (PR#760) * VBScript (PR#673) + * DASM16 + * Augeas + * TOML + * Slash - Updated lexers: diff --git a/pygments/lexers/_mapping.py b/pygments/lexers/_mapping.py index f8207e70..2a068d0b 100644 --- a/pygments/lexers/_mapping.py +++ b/pygments/lexers/_mapping.py @@ -44,6 +44,7 @@ LEXERS = { 'ArduinoLexer': ('pygments.lexers.c_like', 'Arduino', ('arduino',), ('*.ino',), ('text/x-arduino',)), 'AspectJLexer': ('pygments.lexers.jvm', 'AspectJ', ('aspectj',), ('*.aj',), ('text/x-aspectj',)), 'AsymptoteLexer': ('pygments.lexers.graphics', 'Asymptote', ('asy', 'asymptote'), ('*.asy',), ('text/x-asymptote',)), + 'AugeasLexer': ('pygments.lexers.configs', 'Augeas', ('augeas',), ('*.aug'), ()), 'AutoItLexer': ('pygments.lexers.automation', 'AutoIt', ('autoit',), ('*.au3',), ('text/x-autoit',)), 'AutohotkeyLexer': ('pygments.lexers.automation', 'autohotkey', ('ahk', 'autohotkey'), ('*.ahk', '*.ahkl'), ('text/x-autohotkey',)), 'AwkLexer': ('pygments.lexers.textedit', 'Awk', ('awk', 'gawk', 'mawk', 'nawk'), ('*.awk',), ('application/x-awk',)), @@ -121,6 +122,7 @@ LEXERS = { 'DObjdumpLexer': ('pygments.lexers.asm', 'd-objdump', ('d-objdump',), ('*.d-objdump',), ('text/x-d-objdump',)), 'DarcsPatchLexer': ('pygments.lexers.diff', 'Darcs Patch', ('dpatch',), ('*.dpatch', '*.darcspatch'), ()), 'DartLexer': ('pygments.lexers.javascript', 'Dart', ('dart',), ('*.dart',), ('text/x-dart',)), + 'Dasm16Lexer': ('pygments.lexers.asm', 'DASM16', ('dasm16',), ('*.dasm16', '*.dasm'), ('text/x-dasm16')), 'DebianControlLexer': ('pygments.lexers.installers', 'Debian Control file', ('control', 'debcontrol'), ('control',), ()), 'DelphiLexer': ('pygments.lexers.pascal', 'Delphi', ('delphi', 'pas', 'pascal', 'objectpascal'), ('*.pas', '*.dpr'), ('text/x-pascal',)), 'DgLexer': ('pygments.lexers.python', 'dg', ('dg',), ('*.dg',), ('text/x-dg',)), @@ -374,6 +376,7 @@ LEXERS = { 'RubyLexer': ('pygments.lexers.ruby', 'Ruby', ('rb', 'ruby', 'duby'), ('*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec', '*.rbx', '*.duby', 'Gemfile'), ('text/x-ruby', 'application/x-ruby')), 'RustLexer': ('pygments.lexers.rust', 'Rust', ('rust', 'rs'), ('*.rs', '*.rs.in'), ('text/rust',)), 'SASLexer': ('pygments.lexers.sas', 'SAS', ('sas',), ('*.SAS', '*.sas'), ('text/x-sas', 'text/sas', 'application/x-sas')), + 'SlashLexer': ('pygments.lexers.slash', 'Slash', ('slash'), ('*.sl'), ()), 'SLexer': ('pygments.lexers.r', 'S', ('splus', 's', 'r'), ('*.S', '*.R', '.Rhistory', '.Rprofile', '.Renviron'), ('text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r', 'text/x-R', 'text/x-r-history', 'text/x-r-profile')), 'SMLLexer': ('pygments.lexers.ml', 'Standard ML', ('sml',), ('*.sml', '*.sig', '*.fun'), ('text/x-standardml', 'application/x-standardml')), 'SarlLexer': ('pygments.lexers.jvm', 'SARL', ('sarl',), ('*.sarl',), ('text/x-sarl',)), @@ -420,6 +423,7 @@ LEXERS = { 'TextLexer': ('pygments.lexers.special', 'Text only', ('text',), ('*.txt',), ('text/plain',)), 'ThriftLexer': ('pygments.lexers.dsls', 'Thrift', ('thrift',), ('*.thrift',), ('application/x-thrift',)), 'TodotxtLexer': ('pygments.lexers.textfmts', 'Todotxt', ('todotxt',), ('todo.txt', '*.todotxt'), ('text/x-todo',)), + 'TOMLLexer': ('pygments.lexers.configs', 'TOML', ('toml',), ('*.toml'), ()), 'TransactSqlLexer': ('pygments.lexers.sql', 'Transact-SQL', ('tsql', 't-sql'), ('*.sql',), ('text/x-tsql',)), 'TreetopLexer': ('pygments.lexers.parsers', 'Treetop', ('treetop',), ('*.treetop', '*.tt'), ()), 'TurtleLexer': ('pygments.lexers.rdf', 'Turtle', ('turtle',), ('*.ttl',), ('text/turtle', 'application/x-turtle')), diff --git a/pygments/lexers/asm.py b/pygments/lexers/asm.py index 2f08d510..761b3315 100644 --- a/pygments/lexers/asm.py +++ b/pygments/lexers/asm.py @@ -20,7 +20,7 @@ from pygments.token import Text, Name, Number, String, Comment, Punctuation, \ __all__ = ['GasLexer', 'ObjdumpLexer', 'DObjdumpLexer', 'CppObjdumpLexer', 'CObjdumpLexer', 'HsailLexer', 'LlvmLexer', 'NasmLexer', - 'NasmObjdumpLexer', 'TasmLexer', 'Ca65Lexer'] + 'NasmObjdumpLexer', 'TasmLexer', 'Ca65Lexer', 'Dasm16Lexer'] class GasLexer(RegexLexer): @@ -650,3 +650,107 @@ class Ca65Lexer(RegexLexer): # comments in GAS start with "#" if re.match(r'^\s*;', text, re.MULTILINE): return 0.9 + + +class Dasm16Lexer(RegexLexer): + """ + Simple lexer for DCPU-16 Assembly + + Check http://0x10c.com/doc/dcpu-16.txt + """ + name = 'dasm16' + aliases = ['DASM16'] + filenames = ['*.dasm16', '*.dasm'] + mimetypes = ['text/x-dasm16'] + + INSTRUCTIONS = [ + 'SET', + 'ADD', 'SUB', + 'MUL', 'MLI', + 'DIV', 'DVI', + 'MOD', 'MDI', + 'AND', 'BOR', 'XOR', + 'SHR', 'ASR', 'SHL', + 'IFB', 'IFC', 'IFE', 'IFN', 'IFG', 'IFA', 'IFL', 'IFU', + 'ADX', 'SBX', + 'STI', 'STD', + 'JSR', + 'INT', 'IAG', 'IAS', 'RFI', 'IAQ', 'HWN', 'HWQ', 'HWI', + ] + + REGISTERS = [ + 'A', 'B', 'C', + 'X', 'Y', 'Z', + 'I', 'J', + 'SP', 'PC', 'EX', + 'POP', 'PEEK', 'PUSH' + ] + + # Regexes yo + char = r'[a-zA-Z$._0-9@]' + identifier = r'(?:[a-zA-Z$_]' + char + '*|\.' + char + '+)' + number = r'[+-]?(?:0[xX][a-zA-Z0-9]+|\d+)' + binary_number = r'0b[01_]+' + instruction = r'(?i)(' + '|'.join(INSTRUCTIONS) + ')' + single_char = r"'\\?" + char + "'" + string = r'"(\\"|[^"])*"' + + def guess_identifier(lexer, match): + ident = match.group(0) + klass = Name.Variable if ident.upper() in lexer.REGISTERS else Name.Label + yield match.start(), klass, ident + + tokens = { + 'root': [ + include('whitespace'), + (':' + identifier, Name.Label), + (identifier + ':', Name.Label), + (instruction, Name.Function, 'instruction-args'), + (r'\.' + identifier, Name.Function, 'data-args'), + (r'[\r\n]+', Text) + ], + + 'numeric' : [ + (binary_number, Number.Integer), + (number, Number.Integer), + (single_char, String), + ], + + 'arg' : [ + (identifier, guess_identifier), + include('numeric') + ], + + 'deref' : [ + (r'\+', Punctuation), + (r'\]', Punctuation, '#pop'), + include('arg'), + include('whitespace') + ], + + 'instruction-line' : [ + (r'[\r\n]+', Text, '#pop'), + (r';.*?$', Comment, '#pop'), + include('whitespace') + ], + + 'instruction-args': [ + (r',', Punctuation), + (r'\[', Punctuation, 'deref'), + include('arg'), + include('instruction-line') + ], + + 'data-args' : [ + (r',', Punctuation), + include('numeric'), + (string, String), + include('instruction-line') + ], + + 'whitespace': [ + (r'\n', Text), + (r'\s+', Text), + (r';.*?\n', Comment) + ], + } diff --git a/pygments/lexers/configs.py b/pygments/lexers/configs.py index 206ec360..7c9bd655 100644 --- a/pygments/lexers/configs.py +++ b/pygments/lexers/configs.py @@ -21,7 +21,7 @@ __all__ = ['IniLexer', 'RegeditLexer', 'PropertiesLexer', 'KconfigLexer', 'Cfengine3Lexer', 'ApacheConfLexer', 'SquidConfLexer', 'NginxConfLexer', 'LighttpdConfLexer', 'DockerLexer', 'TerraformLexer', 'TermcapLexer', 'TerminfoLexer', - 'PkgConfigLexer', 'PacmanConfLexer'] + 'PkgConfigLexer', 'PacmanConfLexer', 'AugeasLexer', 'TOMLLexer'] class IniLexer(RegexLexer): @@ -838,3 +838,80 @@ class PacmanConfLexer(RegexLexer): (r'.', Text), ], } + + +class AugeasLexer(RegexLexer): + name = 'Augeas' + aliases = ['augeas'] + filenames = ['*.aug'] + + tokens = { + 'root': [ + (r'(module)(\s*)([^\s=]+)', bygroups(Keyword.Namespace, Text, Name.Namespace)), + (r'(let)(\s*)([^\s=]+)', bygroups(Keyword.Declaration, Text, Name.Variable)), + (r'(del|store|value|counter|seq|key|label|autoload|incl|excl|transform|test|get|put)(\s+)', bygroups(Name.Builtin, Text)), + (r'(\()([^\:]+)(\:)(unit|string|regexp|lens|tree|filter)(\))', bygroups(Punctuation, Name.Variable, Punctuation, Keyword.Type, Punctuation)), + (r'\(\*', Comment.Multiline, 'comment'), + (r'[\+=\|\.\*\;\?-]', Operator), + (r'[\[\]\(\)\{\}]', Operator), + (r'"', String.Double, 'string'), + (r'\/', String.Regex, 'regex'), + (r'([A-Z]\w*)(\.)(\w+)', bygroups(Name.Namespace, Punctuation, Name.Variable)), + (r'.', Name.Variable), + (r'\s', Text), + ], + 'string': [ + (r'\\.', String.Escape), + (r'[^"]', String.Double), + (r'"', String.Double, '#pop'), + ], + 'regex': [ + (r'\\.', String.Escape), + (r'[^\/]', String.Regex), + (r'\/', String.Regex, '#pop'), + ], + 'comment': [ + (r'[^*\)]', Comment.Multiline), + (r'\(\*', Comment.Multiline, '#push'), + (r'\*\)', Comment.Multiline, '#pop'), + (r'[\*\)]', Comment.Multiline) + ], + } + + +class TOMLLexer(RegexLexer): + """ + Lexer for TOML, a simple language for config files + """ + + name = 'TOML' + aliases = ['toml'] + filenames = ['*.toml'] + + tokens = { + 'root': [ + + # Basics, comments, strings + (r'\s+', Text), + (r'#.*?$', Comment.Single), + (r'"(\\\\|\\"|[^"])*"', String), + (r'(true|false)$', Keyword.Constant), + ('[a-zA-Z_][a-zA-Z0-9_\-]*', Name), + + # Datetime + (r'\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z', Number.Integer), + + # Numbers + (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float), + (r'\d+[eE][+-]?[0-9]+j?', Number.Float), + (r'\-?\d+', Number.Integer), + + # Punctuation + (r'[]{}:(),;[]', Punctuation), + (r'\.', Punctuation), + + # Operators + (r'=', Operator) + + ] + } diff --git a/pygments/lexers/slash.py b/pygments/lexers/slash.py new file mode 100644 index 00000000..f18059f7 --- /dev/null +++ b/pygments/lexers/slash.py @@ -0,0 +1,184 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.slash + ~~~~~~~~~~~~~~~~~~~~~ + + Lexer for the Slash programming language. + + :copyright: Copyright 2012 by GitHub, Inc + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import ExtendedRegexLexer, bygroups, DelegatingLexer +from pygments.token import Name, Number, String, Comment, Punctuation, \ + Other, Keyword, Operator, Whitespace + +__all__ = ['SlashLexer'] + + +class SlashLanguageLexer(ExtendedRegexLexer): + _nkw = r'(?=[^a-zA-Z_0-9])' + + def move_state(new_state): + return ("#pop", new_state) + + def right_angle_bracket(lexer, match, ctx): + if len(ctx.stack) > 1 and ctx.stack[-2] == "string": + ctx.stack.pop() + yield match.start(), String.Interpol, "}" + ctx.pos = match.end() + pass + + tokens = { + "root": [ + (r"<%=", Comment.Preproc, move_state("slash")), + (r"<%!!", Comment.Preproc, move_state("slash")), + (r"<%#.*?%>", Comment.Multiline), + (r"<%", Comment.Preproc, move_state("slash")), + (r".|\n", Other), + ], + "string": [ + (r"\\", String.Escape, move_state("string_e")), + (r"\"", String, move_state("slash")), + (r"#\{", String.Interpol, "slash"), + (r'.|\n', String), + ], + "string_e": [ + (r'n', String.Escape, move_state("string")), + (r't', String.Escape, move_state("string")), + (r'r', String.Escape, move_state("string")), + (r'e', String.Escape, move_state("string")), + (r'x[a-fA-F0-9]{2}', String.Escape, move_state("string")), + (r'.', String.Escape, move_state("string")), + ], + "regexp": [ + (r'}[a-z]*', String.Regex, move_state("slash")), + (r'\\(.|\n)', String.Regex), + (r'{', String.Regex, "regexp_r"), + (r'.|\n', String.Regex), + ], + "regexp_r": [ + (r'}[a-z]*', String.Regex, "#pop"), + (r'\\(.|\n)', String.Regex), + (r'{', String.Regex, "regexp_r"), + ], + "slash": [ + (r"%>", Comment.Preproc, move_state("root")), + (r"\"", String, move_state("string")), + (r"'[a-zA-Z0-9_]+", String), + (r'%r{', String.Regex, move_state("regexp")), + (r'/\*.*?\*/', Comment.Multiline), + (r"(#|//).*?\n", Comment.Single), + (r'-?[0-9]+e[+-]?[0-9]+', Number.Float), + (r'-?[0-9]+\.[0-9]+(e[+-]?[0-9]+)?', Number.Float), + (r'-?[0-9]+', Number.Integer), + (r'nil'+_nkw, Name.Builtin), + (r'true'+_nkw, Name.Builtin), + (r'false'+_nkw, Name.Builtin), + (r'self'+_nkw, Name.Builtin), + (r'(class)(\s+)([A-Z][a-zA-Z0-9_\']*)', + bygroups(Keyword, Whitespace, Name.Class)), + (r'class'+_nkw, Keyword), + (r'extends'+_nkw, Keyword), + (r'(def)(\s+)(self)(\s*)(\.)(\s*)([a-z_][a-zA-Z0-9_\']*=?|<<|>>|==|<=>|<=|<|>=|>|\+|-(self)?|~(self)?|\*|/|%|^|&&|&|\||\[\]=?)', + bygroups(Keyword, Whitespace, Name.Builtin, Whitespace, Punctuation, Whitespace, Name.Function)), + (r'(def)(\s+)([a-z_][a-zA-Z0-9_\']*=?|<<|>>|==|<=>|<=|<|>=|>|\+|-(self)?|~(self)?|\*|/|%|^|&&|&|\||\[\]=?)', + bygroups(Keyword, Whitespace, Name.Function)), + (r'def'+_nkw, Keyword), + (r'if'+_nkw, Keyword), + (r'elsif'+_nkw, Keyword), + (r'else'+_nkw, Keyword), + (r'unless'+_nkw, Keyword), + (r'for'+_nkw, Keyword), + (r'in'+_nkw, Keyword), + (r'while'+_nkw, Keyword), + (r'until'+_nkw, Keyword), + (r'and'+_nkw, Keyword), + (r'or'+_nkw, Keyword), + (r'not'+_nkw, Keyword), + (r'lambda'+_nkw, Keyword), + (r'try'+_nkw, Keyword), + (r'catch'+_nkw, Keyword), + (r'return'+_nkw, Keyword), + (r'next'+_nkw, Keyword), + (r'last'+_nkw, Keyword), + (r'throw'+_nkw, Keyword), + (r'use'+_nkw, Keyword), + (r'switch'+_nkw, Keyword), + (r'\\', Keyword), + (r'λ', Keyword), + (r'__FILE__'+_nkw, Name.Builtin.Pseudo), + (r'__LINE__'+_nkw, Name.Builtin.Pseudo), + (r'[A-Z][a-zA-Z0-9_\']*'+_nkw, Name.Constant), + (r'[a-z_][a-zA-Z0-9_\']*'+_nkw, Name), + (r'@[a-z_][a-zA-Z0-9_\']*'+_nkw, Name.Variable.Instance), + (r'@@[a-z_][a-zA-Z0-9_\']*'+_nkw, Name.Variable.Class), + (r'\(', Punctuation), + (r'\)', Punctuation), + (r'\[', Punctuation), + (r'\]', Punctuation), + (r'\{', Punctuation), + (r'\}', right_angle_bracket), + (r';', Punctuation), + (r',', Punctuation), + (r'<<=', Operator), + (r'>>=', Operator), + (r'<<', Operator), + (r'>>', Operator), + (r'==', Operator), + (r'!=', Operator), + (r'=>', Operator), + (r'=', Operator), + (r'<=>', Operator), + (r'<=', Operator), + (r'>=', Operator), + (r'<', Operator), + (r'>', Operator), + (r'\+\+', Operator), + (r'\+=', Operator), + (r'-=', Operator), + (r'\*\*=', Operator), + (r'\*=', Operator), + (r'\*\*', Operator), + (r'\*', Operator), + (r'/=', Operator), + (r'\+', Operator), + (r'-', Operator), + (r'/', Operator), + (r'%=', Operator), + (r'%', Operator), + (r'^=', Operator), + (r'&&=', Operator), + (r'&=', Operator), + (r'&&', Operator), + (r'&', Operator), + (r'\|\|=', Operator), + (r'\|=', Operator), + (r'\|\|', Operator), + (r'\|', Operator), + (r'!', Operator), + (r'\.\.\.', Operator), + (r'\.\.', Operator), + (r'\.', Operator), + (r'::', Operator), + (r':', Operator), + (r'(\s|\n)+', Whitespace), + (r'[a-z_][a-zA-Z0-9_\']*', Name.Variable), + ], + } + + +class SlashLexer(DelegatingLexer): + """ + Lexer for the Slash programming language. + """ + + name = 'Slash' + aliases = ['slash'] + filenames = ['*.sl'] + + def __init__(self, **options): + from pygments.lexers.web import HtmlLexer + super(SlashLexer, self).__init__(HtmlLexer, SlashLanguageLexer, **options) -- cgit v1.2.1 From e566797eb7b56db2dce22c2f116d9a2f0a750827 Mon Sep 17 00:00:00 2001 From: "Frederik ?Freso? S. Olesen" Date: Mon, 1 Apr 2019 17:50:02 +0200 Subject: Add `versionadded` to new lexer classes --- pygments/lexers/asm.py | 2 ++ pygments/lexers/configs.py | 7 +++++++ pygments/lexers/slash.py | 2 ++ 3 files changed, 11 insertions(+) diff --git a/pygments/lexers/asm.py b/pygments/lexers/asm.py index 761b3315..9ac3fdd9 100644 --- a/pygments/lexers/asm.py +++ b/pygments/lexers/asm.py @@ -657,6 +657,8 @@ class Dasm16Lexer(RegexLexer): Simple lexer for DCPU-16 Assembly Check http://0x10c.com/doc/dcpu-16.txt + + .. versionadded:: 2.4 """ name = 'dasm16' aliases = ['DASM16'] diff --git a/pygments/lexers/configs.py b/pygments/lexers/configs.py index 7c9bd655..350dcd6f 100644 --- a/pygments/lexers/configs.py +++ b/pygments/lexers/configs.py @@ -841,6 +841,11 @@ class PacmanConfLexer(RegexLexer): class AugeasLexer(RegexLexer): + """ + Lexer for Augeas + + .. versionadded:: 2.4 + """ name = 'Augeas' aliases = ['augeas'] filenames = ['*.aug'] @@ -882,6 +887,8 @@ class AugeasLexer(RegexLexer): class TOMLLexer(RegexLexer): """ Lexer for TOML, a simple language for config files + + .. versionadded:: 2.4 """ name = 'TOML' diff --git a/pygments/lexers/slash.py b/pygments/lexers/slash.py index f18059f7..3ba5e1e6 100644 --- a/pygments/lexers/slash.py +++ b/pygments/lexers/slash.py @@ -173,6 +173,8 @@ class SlashLanguageLexer(ExtendedRegexLexer): class SlashLexer(DelegatingLexer): """ Lexer for the Slash programming language. + + .. versionadded:: 2.4 """ name = 'Slash' -- cgit v1.2.1 From 7cd2a4447b61034aa5fcc4fd01c4271968cf6713 Mon Sep 17 00:00:00 2001 From: "Frederik ?Freso? S. Olesen" Date: Mon, 1 Apr 2019 18:40:12 +0200 Subject: Add example TOML file Based on https://github.com/toml-lang/toml/blob/master/README.md --- tests/examplefiles/example.toml | 181 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 181 insertions(+) create mode 100644 tests/examplefiles/example.toml diff --git a/tests/examplefiles/example.toml b/tests/examplefiles/example.toml new file mode 100644 index 00000000..9c60c79f --- /dev/null +++ b/tests/examplefiles/example.toml @@ -0,0 +1,181 @@ +# This is a TOML document comment + +title = "TOML example file" # This is an inline comment + +[examples] +# Examples taken from https://github.com/toml-lang/toml/blob/master/README.md +key = "value" +bare_key = "value" +bare-key = "value" +1234 = "value" +"127.0.0.1" = "value" +"character encoding" = "value" +"ʎǝʞ" = "value" +'key2' = "value" +'quoted "value"' = "value" +name = "Orange" +physical.color = "orange" +physical.shape = "round" +site."google.com" = true +a.b.c = 1 +a.d = 2 + +[strings] +str = "I'm a string. \"You can quote me\". Name\tJos\u00E9\nLocation\tSF." +str1 = """ +Roses are red +Violets are blue""" +str2 = "Roses are red\nViolets are blue" +str3 = "Roses are red\r\nViolets are blue" + + [strings.equivalents] + str1 = "The quick brown fox jumps over the lazy dog." + str2 = """ +The quick brown \ + + + fox jumps over \ + the lazy dog.""" + str3 = """\ + The quick brown \ + fox jumps over \ + the lazy dog.\ + """ + + [strings.literal] + winpath = 'C:\Users\nodejs\templates' + winpath2 = '\\ServerX\admin$\system32\' + quoted = 'Tom "Dubs" Preston-Werner' + regex = '<\i\c*\s*>' + + [strings.multiline] + regex2 = '''I [dw]on't need \d{2} apples''' + lines = ''' +The first newline is +trimmed in raw strings. + All other whitespace + is preserved. +''' + +[integers] +int1 = +99 +int2 = 42 +int3 = 0 +int4 = -17 +int5 = 1_000 +int6 = 5_349_221 +int7 = 1_2_3_4_5 # discouraged format +# hexadecimal with prefix `0x` +hex1 = 0xDEADBEEF +hex2 = 0xdeadbeef +hex3 = 0xdead_beef +# octal with prefix `0o` +oct1 = 0o01234567 +oct2 = 0o755 # useful for Unix file permissions +# binary with prefix `0b` +bin1 = 0b11010110 + +[floats] +# fractional +flt1 = +1.0 +flt2 = 3.1415 +flt3 = -0.01 +# exponent +flt4 = 5e+22 +flt5 = 1e6 +flt6 = -2E-2 +# both +flt7 = 6.626e-34 +# with underscores, for readability +flt8 = 224_617.445_991_228 +# infinity +sf1 = inf # positive infinity +sf2 = +inf # positive infinity +sf3 = -inf # negative infinity +# not a number +sf4 = nan # actual sNaN/qNaN encoding is implementation specific +sf5 = +nan # same as `nan` +sf6 = -nan # valid, actual encoding is implementation specific +# plus/minus zero +sf0_1 = +0.0 +sf0_2 = -0.0 + +[booleans] +bool1 = true +bool2 = false + +[datetime.offset] +odt1 = 1979-05-27T07:32:00Z +odt2 = 1979-05-27T00:32:00-07:00 +odt3 = 1979-05-27T00:32:00.999999-07:00 +odt4 = 1979-05-27 07:32:00Z + +[datetime.local] +ldt1 = 1979-05-27T07:32:00 +ldt2 = 1979-05-27T00:32:00.999999 + +[date.local] +ld1 = 1979-05-27 + +[time.local] +lt1 = 07:32:00 +lt2 = 00:32:00.999999 + +[arrays] +arr1 = [ 1, 2, 3 ] +arr2 = [ "red", "yellow", "green" ] +arr3 = [ [ 1, 2 ], [3, 4, 5] ] +arr4 = [ "all", 'strings', """are the same""", '''type'''] +arr5 = [ [ 1, 2 ], ["a", "b", "c"] ] +arr6 = [ 1, 2.0 ] # INVALID +arr7 = [ + 1, 2, 3 +] +arr8 = [ + 1, + 2, # this is ok +] + +["inline tables"] +name = { first = "Tom", last = "Preston-Werner" } +point = { x = 1, y = 2 } +animal = { type.name = "pug" } + +["arrays of tables"] +points = [ { x = 1, y = 2, z = 3 }, + { x = 7, y = 8, z = 9 }, + { x = 2, y = 4, z = 8 } ] + + [products] + + [[products]] + name = "Hammer" + sku = 738594937 + + [[products]] + + [[products]] + name = "Nail" + sku = 284758393 + color = "gray" + + [fruits] + + [[fruit]] + name = "apple" + + [fruit.physical] + color = "red" + shape = "round" + + [[fruit.variety]] + name = "red delicious" + + [[fruit.variety]] + name = "granny smith" + + [[fruit]] + name = "banana" + + [[fruit.variety]] + name = "plantain" -- cgit v1.2.1 From 075e65ed4e4424c121f7c70e0db9d2ff9e7895f8 Mon Sep 17 00:00:00 2001 From: "Matth?us G. Chajdas" Date: Sun, 28 Apr 2019 17:14:55 +0200 Subject: Add TOML example file and improve the lexer a bit. --- pygments/lexers/configs.py | 9 ++++++++- tests/examplefiles/example.toml | 15 +++++++++++++++ 2 files changed, 23 insertions(+), 1 deletion(-) create mode 100644 tests/examplefiles/example.toml diff --git a/pygments/lexers/configs.py b/pygments/lexers/configs.py index 7c9bd655..bf9cb0bb 100644 --- a/pygments/lexers/configs.py +++ b/pygments/lexers/configs.py @@ -894,12 +894,19 @@ class TOMLLexer(RegexLexer): # Basics, comments, strings (r'\s+', Text), (r'#.*?$', Comment.Single), + # Basic string (r'"(\\\\|\\"|[^"])*"', String), + # Literal string + (r'\'[^\']*\'', String), (r'(true|false)$', Keyword.Constant), ('[a-zA-Z_][a-zA-Z0-9_\-]*', Name), + (r'\[.*?\]$', Keyword), + # Datetime - (r'\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z', Number.Integer), + # TODO this needs to be expanded, as TOML is rather flexible: + # https://github.com/toml-lang/toml#offset-date-time + (r'\d{4}-\d{2}-\d{2}(?:T| )\d{2}:\d{2}:\d{2}(?:Z|[-+]\d{2}:\d{2})', Number.Integer), # Numbers (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float), diff --git a/tests/examplefiles/example.toml b/tests/examplefiles/example.toml new file mode 100644 index 00000000..40832c22 --- /dev/null +++ b/tests/examplefiles/example.toml @@ -0,0 +1,15 @@ +name = "TOML sample file" + +[section] +key = "value" +literal_string = 'C:\test' +other_string = '''value''' +list = [1, 2, 3] +nested_list = [ [1, 2], [3, 4] ] +float_variable = 13.37 + + [section.nested] + boolean_variable = false + date_variable = 1969-97-24T16:50:35-00:00 + +# Comment \ No newline at end of file -- cgit v1.2.1 From 5256ca6b4b12794a23b848a2dffa28025b8d9d72 Mon Sep 17 00:00:00 2001 From: "Matth?us G. Chajdas" Date: Sun, 28 Apr 2019 17:15:44 +0200 Subject: Various fixes and cleanups to the last batch of new languages. Add to CHANGES, languages, add versionadded, recreate mappings, fix DASM16 alias. --- CHANGES | 8 ++++---- doc/languages.rst | 4 ++++ pygments/lexers/_mapping.py | 8 ++++---- pygments/lexers/asm.py | 6 ++++-- pygments/lexers/configs.py | 10 +++++++++- pygments/lexers/slash.py | 5 ++++- 6 files changed, 29 insertions(+), 12 deletions(-) diff --git a/CHANGES b/CHANGES index 6a6a6a17..2b1ebb0b 100644 --- a/CHANGES +++ b/CHANGES @@ -12,17 +12,17 @@ Version 2.4.0 - Added lexers: + * Augeas (PR#807) * Charm++ CI (PR#788) + * DASM16 (PR#807) * FloScript (PR#750) * Hspec (PR#790) * SGF (PR#780) + * Slash (PR#807) * Slurm (PR#760) + * TOML (PR#807) * Unicon (PR#731) * VBScript (PR#673) - * DASM16 - * Augeas - * TOML - * Slash - Updated lexers: diff --git a/doc/languages.rst b/doc/languages.rst index 47e3363f..d2508b07 100644 --- a/doc/languages.rst +++ b/doc/languages.rst @@ -14,6 +14,7 @@ Programming languages * AppleScript * Assembly (various) * Asymptote +* `Augeas `_ * Awk * Befunge * Boo @@ -31,6 +32,7 @@ Programming languages * `Cython `_ * `D `_ * Dart +* DCPU-16 * Delphi * Dylan * `Elm `_ @@ -85,10 +87,12 @@ Programming languages * Scheme * Scilab * `SGF `_ +* `Slash `_ * `Slurm `_ * Smalltalk * SNOBOL * Tcl +* `TOML `_ * Vala * Verilog * VHDL diff --git a/pygments/lexers/_mapping.py b/pygments/lexers/_mapping.py index 3c05d147..e8838008 100644 --- a/pygments/lexers/_mapping.py +++ b/pygments/lexers/_mapping.py @@ -44,7 +44,7 @@ LEXERS = { 'ArduinoLexer': ('pygments.lexers.c_like', 'Arduino', ('arduino',), ('*.ino',), ('text/x-arduino',)), 'AspectJLexer': ('pygments.lexers.jvm', 'AspectJ', ('aspectj',), ('*.aj',), ('text/x-aspectj',)), 'AsymptoteLexer': ('pygments.lexers.graphics', 'Asymptote', ('asy', 'asymptote'), ('*.asy',), ('text/x-asymptote',)), - 'AugeasLexer': ('pygments.lexers.configs', 'Augeas', ('augeas',), ('*.aug'), ()), + 'AugeasLexer': ('pygments.lexers.configs', 'Augeas', ('augeas',), ('*.aug',), ()), 'AutoItLexer': ('pygments.lexers.automation', 'AutoIt', ('autoit',), ('*.au3',), ('text/x-autoit',)), 'AutohotkeyLexer': ('pygments.lexers.automation', 'autohotkey', ('ahk', 'autohotkey'), ('*.ahk', '*.ahkl'), ('text/x-autohotkey',)), 'AwkLexer': ('pygments.lexers.textedit', 'Awk', ('awk', 'gawk', 'mawk', 'nawk'), ('*.awk',), ('application/x-awk',)), @@ -122,7 +122,7 @@ LEXERS = { 'DObjdumpLexer': ('pygments.lexers.asm', 'd-objdump', ('d-objdump',), ('*.d-objdump',), ('text/x-d-objdump',)), 'DarcsPatchLexer': ('pygments.lexers.diff', 'Darcs Patch', ('dpatch',), ('*.dpatch', '*.darcspatch'), ()), 'DartLexer': ('pygments.lexers.javascript', 'Dart', ('dart',), ('*.dart',), ('text/x-dart',)), - 'Dasm16Lexer': ('pygments.lexers.asm', 'DASM16', ('dasm16',), ('*.dasm16', '*.dasm'), ('text/x-dasm16')), + 'Dasm16Lexer': ('pygments.lexers.asm', 'DASM16', ('dasm16',), ('*.dasm16', '*.dasm'), ('text/x-dasm16',)), 'DebianControlLexer': ('pygments.lexers.installers', 'Debian Control file', ('control', 'debcontrol'), ('control',), ()), 'DelphiLexer': ('pygments.lexers.pascal', 'Delphi', ('delphi', 'pas', 'pascal', 'objectpascal'), ('*.pas', '*.dpr'), ('text/x-pascal',)), 'DgLexer': ('pygments.lexers.python', 'dg', ('dg',), ('*.dg',), ('text/x-dg',)), @@ -377,7 +377,6 @@ LEXERS = { 'RubyLexer': ('pygments.lexers.ruby', 'Ruby', ('rb', 'ruby', 'duby'), ('*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec', '*.rbx', '*.duby', 'Gemfile'), ('text/x-ruby', 'application/x-ruby')), 'RustLexer': ('pygments.lexers.rust', 'Rust', ('rust', 'rs'), ('*.rs', '*.rs.in'), ('text/rust',)), 'SASLexer': ('pygments.lexers.sas', 'SAS', ('sas',), ('*.SAS', '*.sas'), ('text/x-sas', 'text/sas', 'application/x-sas')), - 'SlashLexer': ('pygments.lexers.slash', 'Slash', ('slash'), ('*.sl'), ()), 'SLexer': ('pygments.lexers.r', 'S', ('splus', 's', 'r'), ('*.S', '*.R', '.Rhistory', '.Rprofile', '.Renviron'), ('text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r', 'text/x-R', 'text/x-r-history', 'text/x-r-profile')), 'SMLLexer': ('pygments.lexers.ml', 'Standard ML', ('sml',), ('*.sml', '*.sig', '*.fun'), ('text/x-standardml', 'application/x-standardml')), 'SarlLexer': ('pygments.lexers.jvm', 'SARL', ('sarl',), ('*.sarl',), ('text/x-sarl',)), @@ -389,6 +388,7 @@ LEXERS = { 'ScssLexer': ('pygments.lexers.css', 'SCSS', ('scss',), ('*.scss',), ('text/x-scss',)), 'ShenLexer': ('pygments.lexers.lisp', 'Shen', ('shen',), ('*.shen',), ('text/x-shen', 'application/x-shen')), 'SilverLexer': ('pygments.lexers.verification', 'Silver', ('silver',), ('*.sil', '*.vpr'), ()), + 'SlashLexer': ('pygments.lexers.slash', 'Slash', ('slash',), ('*.sl',), ()), 'SlimLexer': ('pygments.lexers.webmisc', 'Slim', ('slim',), ('*.slim',), ('text/x-slim',)), 'SlurmBashLexer': ('pygments.lexers.shell', 'Slurm', ('slurm', 'sbatch'), ('*.sl',), ()), 'SmaliLexer': ('pygments.lexers.dalvik', 'Smali', ('smali',), ('*.smali',), ('text/smali',)), @@ -411,6 +411,7 @@ LEXERS = { 'SwigLexer': ('pygments.lexers.c_like', 'SWIG', ('swig',), ('*.swg', '*.i'), ('text/swig',)), 'SystemVerilogLexer': ('pygments.lexers.hdl', 'systemverilog', ('systemverilog', 'sv'), ('*.sv', '*.svh'), ('text/x-systemverilog',)), 'TAPLexer': ('pygments.lexers.testing', 'TAP', ('tap',), ('*.tap',), ()), + 'TOMLLexer': ('pygments.lexers.configs', 'TOML', ('toml',), ('*.toml',), ()), 'Tads3Lexer': ('pygments.lexers.int_fiction', 'TADS 3', ('tads3',), ('*.t',), ()), 'TasmLexer': ('pygments.lexers.asm', 'TASM', ('tasm',), ('*.asm', '*.ASM', '*.tasm'), ('text/x-tasm',)), 'TclLexer': ('pygments.lexers.tcl', 'Tcl', ('tcl',), ('*.tcl', '*.rvt'), ('text/x-tcl', 'text/x-script.tcl', 'application/x-tcl')), @@ -424,7 +425,6 @@ LEXERS = { 'TextLexer': ('pygments.lexers.special', 'Text only', ('text',), ('*.txt',), ('text/plain',)), 'ThriftLexer': ('pygments.lexers.dsls', 'Thrift', ('thrift',), ('*.thrift',), ('application/x-thrift',)), 'TodotxtLexer': ('pygments.lexers.textfmts', 'Todotxt', ('todotxt',), ('todo.txt', '*.todotxt'), ('text/x-todo',)), - 'TOMLLexer': ('pygments.lexers.configs', 'TOML', ('toml',), ('*.toml'), ()), 'TransactSqlLexer': ('pygments.lexers.sql', 'Transact-SQL', ('tsql', 't-sql'), ('*.sql',), ('text/x-tsql',)), 'TreetopLexer': ('pygments.lexers.parsers', 'Treetop', ('treetop',), ('*.treetop', '*.tt'), ()), 'TurtleLexer': ('pygments.lexers.rdf', 'Turtle', ('turtle',), ('*.ttl',), ('text/turtle', 'application/x-turtle')), diff --git a/pygments/lexers/asm.py b/pygments/lexers/asm.py index 761b3315..7100868c 100644 --- a/pygments/lexers/asm.py +++ b/pygments/lexers/asm.py @@ -657,9 +657,11 @@ class Dasm16Lexer(RegexLexer): Simple lexer for DCPU-16 Assembly Check http://0x10c.com/doc/dcpu-16.txt + + .. versionadded:: 2.4 """ - name = 'dasm16' - aliases = ['DASM16'] + name = 'DASM16' + aliases = ['dasm16'] filenames = ['*.dasm16', '*.dasm'] mimetypes = ['text/x-dasm16'] diff --git a/pygments/lexers/configs.py b/pygments/lexers/configs.py index bf9cb0bb..a3e28dd8 100644 --- a/pygments/lexers/configs.py +++ b/pygments/lexers/configs.py @@ -841,6 +841,11 @@ class PacmanConfLexer(RegexLexer): class AugeasLexer(RegexLexer): + """ + Lexer for `Augeas `_. + + .. versionadded:: 2.4 + """ name = 'Augeas' aliases = ['augeas'] filenames = ['*.aug'] @@ -881,7 +886,10 @@ class AugeasLexer(RegexLexer): class TOMLLexer(RegexLexer): """ - Lexer for TOML, a simple language for config files + Lexer for `TOML `_, a simple language + for config files. + + .. versionadded:: 2.4 """ name = 'TOML' diff --git a/pygments/lexers/slash.py b/pygments/lexers/slash.py index f18059f7..bd73d463 100644 --- a/pygments/lexers/slash.py +++ b/pygments/lexers/slash.py @@ -3,7 +3,8 @@ pygments.lexers.slash ~~~~~~~~~~~~~~~~~~~~~ - Lexer for the Slash programming language. + Lexer for the `Slash `_ programming + language. :copyright: Copyright 2012 by GitHub, Inc :license: BSD, see LICENSE for details. @@ -173,6 +174,8 @@ class SlashLanguageLexer(ExtendedRegexLexer): class SlashLexer(DelegatingLexer): """ Lexer for the Slash programming language. + + .. versionadded:: 2.4 """ name = 'Slash' -- cgit v1.2.1 From 323d10920c579054c861ee0cfdd9a30d012a4d0a Mon Sep 17 00:00:00 2001 From: "Matth?us G. Chajdas" Date: Tue, 30 Apr 2019 09:23:54 +0200 Subject: Improve TOML lexer. The last found a few shortcomings in the TOML around float parsing. These get fixed in this commit; additionally, I simplified a bunch of regex. --- pygments/lexers/configs.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/pygments/lexers/configs.py b/pygments/lexers/configs.py index 765c67b7..6f2c7c76 100644 --- a/pygments/lexers/configs.py +++ b/pygments/lexers/configs.py @@ -855,10 +855,10 @@ class AugeasLexer(RegexLexer): (r'(module)(\s*)([^\s=]+)', bygroups(Keyword.Namespace, Text, Name.Namespace)), (r'(let)(\s*)([^\s=]+)', bygroups(Keyword.Declaration, Text, Name.Variable)), (r'(del|store|value|counter|seq|key|label|autoload|incl|excl|transform|test|get|put)(\s+)', bygroups(Name.Builtin, Text)), - (r'(\()([^\:]+)(\:)(unit|string|regexp|lens|tree|filter)(\))', bygroups(Punctuation, Name.Variable, Punctuation, Keyword.Type, Punctuation)), + (r'(\()([^:]+)(\:)(unit|string|regexp|lens|tree|filter)(\))', bygroups(Punctuation, Name.Variable, Punctuation, Keyword.Type, Punctuation)), (r'\(\*', Comment.Multiline, 'comment'), - (r'[\+=\|\.\*\;\?-]', Operator), - (r'[\[\]\(\)\{\}]', Operator), + (r'[*+\-.;=?|]', Operator), + (r'[()\[\]{}]', Operator), (r'"', String.Double, 'string'), (r'\/', String.Regex, 'regex'), (r'([A-Z]\w*)(\.)(\w+)', bygroups(Name.Namespace, Punctuation, Name.Variable)), @@ -872,14 +872,14 @@ class AugeasLexer(RegexLexer): ], 'regex': [ (r'\\.', String.Escape), - (r'[^\/]', String.Regex), + (r'[^/]', String.Regex), (r'\/', String.Regex, '#pop'), ], 'comment': [ - (r'[^*\)]', Comment.Multiline), + (r'[^*)]', Comment.Multiline), (r'\(\*', Comment.Multiline, '#push'), (r'\*\)', Comment.Multiline, '#pop'), - (r'[\*\)]', Comment.Multiline) + (r'[)*]', Comment.Multiline) ], } @@ -905,9 +905,10 @@ class TOMLLexer(RegexLexer): # Basic string (r'"(\\\\|\\"|[^"])*"', String), # Literal string + (r'\'\'\'(.*)\'\'\'', String), (r'\'[^\']*\'', String), (r'(true|false)$', Keyword.Constant), - ('[a-zA-Z_][a-zA-Z0-9_\-]*', Name), + (r'[a-zA-Z_][\w\-]*', Name), (r'\[.*?\]$', Keyword), # Datetime @@ -918,7 +919,9 @@ class TOMLLexer(RegexLexer): # Numbers (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float), (r'\d+[eE][+-]?[0-9]+j?', Number.Float), - (r'\-?\d+', Number.Integer), + # Handle +-inf, +-infinity, +-nan + (r'[+-]?(?:(inf(?:inity)?)|nan)', Number.Float), + (r'[+-]?\d+', Number.Integer), # Punctuation (r'[]{}:(),;[]', Punctuation), -- cgit v1.2.1 From 0b839cf4a0acf37677e7b1a85181a628d9959735 Mon Sep 17 00:00:00 2001 From: "Matth?us G. Chajdas" Date: Tue, 30 Apr 2019 17:48:52 +0200 Subject: Simplify a few basic regex. --- pygments/lexers/basic.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pygments/lexers/basic.py b/pygments/lexers/basic.py index 4d957c2b..1aa3274c 100644 --- a/pygments/lexers/basic.py +++ b/pygments/lexers/basic.py @@ -550,7 +550,7 @@ class VBScriptLexer(RegexLexer): (r'[a-z_][a-z0-9_]*', Name), (r'\b_\n', Operator), (words(r'(),.:'), Punctuation), - ('.+(\n)?', Error) + (r'.+(\n)?', Error) ], 'dim_more': [ (r'(\s*)(,)(\s*)([a-z_][a-z0-9]*)', bygroups(Whitespace, Punctuation, Whitespace, Name.Variable)), @@ -570,7 +570,7 @@ class BBCBasicLexer(RegexLexer): BBC Basic was supplied on the BBC Micro, and later Acorn RISC OS. It is also used by BBC Basic For Windows. - .. versionadded:: 2.4???? + .. versionadded:: 2.4 """ base_keywords = ['OTHERWISE', 'AND', 'DIV', 'EOR', 'MOD', 'OR', 'ERROR', 'LINE', 'OFF', 'STEP', 'SPC', 'TAB', 'ELSE', 'THEN', @@ -620,9 +620,9 @@ class BBCBasicLexer(RegexLexer): (r':', Comment.Preproc), # Some special cases to make functions come out nicer - (r'(DEF)(\s*)(FN|PROC)([A-Za-z_@][A-Za-z0-9_@]*)', + (r'(DEF)(\s*)(FN|PROC)([A-Za-z_@][\w@]*)', bygroups(Keyword.Declaration, Whitespace, Keyword.Declaration, Name.Function)), - (r'(FN|PROC)([A-Za-z_@][A-Za-z0-9_@]*)', + (r'(FN|PROC)([A-Za-z_@][\w@]*)', bygroups(Keyword, Name.Function)), (r'(GOTO|GOSUB|THEN|RESTORE)(\s*)(\d+)', @@ -644,7 +644,7 @@ class BBCBasicLexer(RegexLexer): (r'[+-]?[0-9]+E[+-]?[0-9]+', Number.Float), (r'[+-]?\d+', Number.Integer), - (r'([A-Za-z_@][A-Za-z0-9_@]*[%$]?)', Name.Variable), + (r'([A-Za-z_@][\w@]*[%$]?)', Name.Variable), (r'([+\-]=|[$!|?+\-*/%^=><();]|>=|<=|<>|<<|>>|>>>|,)', Operator), ], 'string': [ -- cgit v1.2.1 From 601e6e86e45f31e23a7b29e1265d68aeba5e1366 Mon Sep 17 00:00:00 2001 From: "Matth?us G. Chajdas" Date: Tue, 30 Apr 2019 17:50:49 +0200 Subject: Update CHANGES, language list. --- CHANGES | 2 ++ doc/languages.rst | 2 ++ pygments/lexers/pony.py | 10 ++++++---- 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/CHANGES b/CHANGES index 2b1ebb0b..62d2f113 100644 --- a/CHANGES +++ b/CHANGES @@ -13,10 +13,12 @@ Version 2.4.0 - Added lexers: * Augeas (PR#807) + * BBC Basic (PR#806) * Charm++ CI (PR#788) * DASM16 (PR#807) * FloScript (PR#750) * Hspec (PR#790) + * Pony (PR#627) * SGF (PR#780) * Slash (PR#807) * Slurm (PR#760) diff --git a/doc/languages.rst b/doc/languages.rst index d2508b07..d96b1ba8 100644 --- a/doc/languages.rst +++ b/doc/languages.rst @@ -16,6 +16,7 @@ Programming languages * Asymptote * `Augeas `_ * Awk +* BBC Basic * Befunge * Boo * BrainFuck @@ -72,6 +73,7 @@ Programming languages * OCaml * PHP * `Perl 5 `_ and `Perl 6 `_ +* `Pony `_ * PovRay * PostScript * PowerShell diff --git a/pygments/lexers/pony.py b/pygments/lexers/pony.py index 486e404b..13239047 100644 --- a/pygments/lexers/pony.py +++ b/pygments/lexers/pony.py @@ -2,7 +2,7 @@ """ pygments.lexers.pony ~~~~~~~~~~~~~~~~~~~~ - + Lexers for Pony and related languages. :copyright: Copyright 2006-2016 by the Pygments team, see AUTHORS. @@ -19,14 +19,16 @@ __all__ = ['PonyLexer'] class PonyLexer(RegexLexer): """ For Pony source code. + + .. versionadded:: 2.4 """ - + name = 'Pony' aliases = ['pony'] filenames = ['*.pony'] - + _caps = r'(iso|trn|ref|val|box|tag)' - + tokens = { 'root': [ (r'\n', Text), -- cgit v1.2.1 From 8dfc5f5e73a1ab1e4890a6370064730006f56052 Mon Sep 17 00:00:00 2001 From: "Matth?us G. Chajdas" Date: Tue, 30 Apr 2019 17:53:35 +0200 Subject: Add Boa to CHANGES, language list. --- CHANGES | 1 + doc/languages.rst | 1 + 2 files changed, 2 insertions(+) diff --git a/CHANGES b/CHANGES index 62d2f113..9d4196d4 100644 --- a/CHANGES +++ b/CHANGES @@ -14,6 +14,7 @@ Version 2.4.0 * Augeas (PR#807) * BBC Basic (PR#806) + * Boa (PR#756) * Charm++ CI (PR#788) * DASM16 (PR#807) * FloScript (PR#750) diff --git a/doc/languages.rst b/doc/languages.rst index d96b1ba8..4e2c99f5 100644 --- a/doc/languages.rst +++ b/doc/languages.rst @@ -18,6 +18,7 @@ Programming languages * Awk * BBC Basic * Befunge +* `Boa `_ * Boo * BrainFuck * C, C++ -- cgit v1.2.1 From 024ce8710ff32c3a2946a0c4542906098d2e7891 Mon Sep 17 00:00:00 2001 From: "Matth?us G. Chajdas" Date: Tue, 30 Apr 2019 18:00:43 +0200 Subject: Add Tera Term to CHANGES, languages. --- CHANGES | 1 + doc/languages.rst | 1 + 2 files changed, 2 insertions(+) diff --git a/CHANGES b/CHANGES index 9d4196d4..1a5a359e 100644 --- a/CHANGES +++ b/CHANGES @@ -23,6 +23,7 @@ Version 2.4.0 * SGF (PR#780) * Slash (PR#807) * Slurm (PR#760) + * Tera Term Language (PR#749) * TOML (PR#807) * Unicon (PR#731) * VBScript (PR#673) diff --git a/doc/languages.rst b/doc/languages.rst index 4e2c99f5..4f3166b5 100644 --- a/doc/languages.rst +++ b/doc/languages.rst @@ -95,6 +95,7 @@ Programming languages * Smalltalk * SNOBOL * Tcl +* `Tera Term language `_ * `TOML `_ * Vala * Verilog -- cgit v1.2.1 From 990cce13e011e8404ad8143ac2e36acbdf31f759 Mon Sep 17 00:00:00 2001 From: "Matth?us G. Chajdas" Date: Tue, 30 Apr 2019 18:06:37 +0200 Subject: Update apache2.conf sample, CHANGES. --- CHANGES | 2 ++ tests/examplefiles/apache2.conf | 5 +++++ 2 files changed, 7 insertions(+) diff --git a/CHANGES b/CHANGES index 1a5a359e..833c2250 100644 --- a/CHANGES +++ b/CHANGES @@ -30,8 +30,10 @@ Version 2.4.0 - Updated lexers: + * Apache2 (PR#766) * Cypher (PR#746) * LLVM (PR#792) + * Makefiles (PR#766) * PHP (#1482) * SQL (PR#672) * Stan (PR#774) diff --git a/tests/examplefiles/apache2.conf b/tests/examplefiles/apache2.conf index d0e838e0..5db66b10 100644 --- a/tests/examplefiles/apache2.conf +++ b/tests/examplefiles/apache2.conf @@ -391,3 +391,8 @@ BrowserMatch "^WebDAVFS/1.[012]" redirect-carefully # Include the virtual host configurations: Include /etc/apache2/sites-enabled/[^.#]* + +# From PR#766 += 2.4> +ErrorLogFormat "%{cu}t %M" + \ No newline at end of file -- cgit v1.2.1 From 249e5feec73189cfff3a4c81d6a6c9fc821286cd Mon Sep 17 00:00:00 2001 From: "Matth?us G. Chajdas" Date: Tue, 30 Apr 2019 21:17:36 +0200 Subject: Update mappings, CHANGS and languages. --- CHANGES | 1 + doc/languages.rst | 1 + pygments/lexers/_mapping.py | 2 +- 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGES b/CHANGES index 833c2250..db1fb485 100644 --- a/CHANGES +++ b/CHANGES @@ -18,6 +18,7 @@ Version 2.4.0 * Charm++ CI (PR#788) * DASM16 (PR#807) * FloScript (PR#750) + * FreeFem++ (PR#785) * Hspec (PR#790) * Pony (PR#627) * SGF (PR#780) diff --git a/doc/languages.rst b/doc/languages.rst index 4f3166b5..b06ccc55 100644 --- a/doc/languages.rst +++ b/doc/languages.rst @@ -45,6 +45,7 @@ Programming languages * `Fennel `_ * `FloScript `_ * Fortran +* `FreeFEM++ `_ * F# * GAP * Gherkin (Cucumber) diff --git a/pygments/lexers/_mapping.py b/pygments/lexers/_mapping.py index f974a160..e4e8cf43 100644 --- a/pygments/lexers/_mapping.py +++ b/pygments/lexers/_mapping.py @@ -166,7 +166,7 @@ LEXERS = { 'FortranFixedLexer': ('pygments.lexers.fortran', 'FortranFixed', ('fortranfixed',), ('*.f', '*.F'), ()), 'FortranLexer': ('pygments.lexers.fortran', 'Fortran', ('fortran',), ('*.f03', '*.f90', '*.F03', '*.F90'), ('text/x-fortran',)), 'FoxProLexer': ('pygments.lexers.foxpro', 'FoxPro', ('foxpro', 'vfp', 'clipper', 'xbase'), ('*.PRG', '*.prg'), ()), - 'FreeFemLexer': ('pygments.lexers.freefem', 'FreeFem', ('freefem',), ('*.edp', '*.idp',), ('text/x-edpsrc',)), + 'FreeFemLexer': ('pygments.lexers.freefem', 'Freefem', ('freefem',), ('*.edp',), ('text/x-freefem',)), 'GAPLexer': ('pygments.lexers.algebra', 'GAP', ('gap',), ('*.g', '*.gd', '*.gi', '*.gap'), ()), 'GLShaderLexer': ('pygments.lexers.graphics', 'GLSL', ('glsl',), ('*.vert', '*.frag', '*.geo'), ('text/x-glslsrc',)), 'GasLexer': ('pygments.lexers.asm', 'GAS', ('gas', 'asm'), ('*.s', '*.S'), ('text/x-gas',)), -- cgit v1.2.1 From 8f733ca887f216cdcf1b1e5f9b70954811bcfc17 Mon Sep 17 00:00:00 2001 From: Sylvain Corlay Date: Thu, 2 May 2019 00:05:47 +0200 Subject: Allow for CSS variable in pygments stylesheets --- pygments/formatters/html.py | 11 ++++++++--- pygments/style.py | 4 +++- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/pygments/formatters/html.py b/pygments/formatters/html.py index 7d7605eb..bbddf507 100644 --- a/pygments/formatters/html.py +++ b/pygments/formatters/html.py @@ -41,6 +41,11 @@ def escape_html(text, table=_escape_html_table): """Escape &, <, > as well as single and double quotes for HTML.""" return text.translate(table) +def webify(color): + if color.startswith('calc') or color.startswith('var'): + return color + else: + return '#' + color def _get_ttype_class(ttype): fname = STANDARD_TYPES.get(ttype) @@ -451,7 +456,7 @@ class HtmlFormatter(Formatter): name = self._get_css_class(ttype) style = '' if ndef['color']: - style += 'color: #%s; ' % ndef['color'] + style += 'color: %s; ' % webify(ndef['color']) if ndef['bold']: style += 'font-weight: bold; ' if ndef['italic']: @@ -459,9 +464,9 @@ class HtmlFormatter(Formatter): if ndef['underline']: style += 'text-decoration: underline; ' if ndef['bgcolor']: - style += 'background-color: #%s; ' % ndef['bgcolor'] + style += 'background-color: %s; ' % webify(ndef['bgcolor']) if ndef['border']: - style += 'border: 1px solid #%s; ' % ndef['border'] + style += 'border: 1px solid %s; ' % webify(ndef['border']) if style: t2c[ttype] = name # save len(ttype) to enable ordering the styles by diff --git a/pygments/style.py b/pygments/style.py index 89766d8c..aee23f96 100644 --- a/pygments/style.py +++ b/pygments/style.py @@ -73,9 +73,11 @@ class StyleMeta(type): if len(col) == 6: return col elif len(col) == 3: - return col[0]*2 + col[1]*2 + col[2]*2 + return col[0] * 2 + col[1] * 2 + col[2] * 2 elif text == '': return '' + elif text.startswith('var') or text.startswith('calc'): + return text assert False, "wrong color format %r" % text _styles = obj._styles = {} -- cgit v1.2.1 From deea374f51f82b1ea16b27df3dce1749be93f614 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Mon, 6 May 2019 07:42:02 +0200 Subject: Never pop all states from the stack, even if a lexer wants to. fixes #1506 --- pygments/lexer.py | 22 ++++++++++++++++------ tests/test_regexlexer.py | 24 ++++++++++++++++++------ 2 files changed, 34 insertions(+), 12 deletions(-) diff --git a/pygments/lexer.py b/pygments/lexer.py index 90905ba5..62d66318 100644 --- a/pygments/lexer.py +++ b/pygments/lexer.py @@ -639,14 +639,20 @@ class RegexLexer(Lexer): if isinstance(new_state, tuple): for state in new_state: if state == '#pop': - statestack.pop() + if len(statestack) > 1: + statestack.pop() elif state == '#push': statestack.append(statestack[-1]) else: statestack.append(state) elif isinstance(new_state, int): - # pop - del statestack[new_state:] + # pop, but keep at least one state on the stack + # (random code leading to unexpected pops should + # not allow exceptions) + if abs(new_state) >= len(statestack): + del statestack[1:] + else: + del statestack[new_state:] elif new_state == '#push': statestack.append(statestack[-1]) else: @@ -724,14 +730,18 @@ class ExtendedRegexLexer(RegexLexer): if isinstance(new_state, tuple): for state in new_state: if state == '#pop': - ctx.stack.pop() + if len(ctx.stack) > 1: + ctx.stack.pop() elif state == '#push': ctx.stack.append(ctx.stack[-1]) else: ctx.stack.append(state) elif isinstance(new_state, int): - # pop - del ctx.stack[new_state:] + # see RegexLexer for why this check is made + if abs(new_state) >= len(ctx.stack): + del ctx.state[1:] + else: + del ctx.stack[new_state:] elif new_state == '#push': ctx.stack.append(ctx.stack[-1]) else: diff --git a/tests/test_regexlexer.py b/tests/test_regexlexer.py index d919a950..778f3d03 100644 --- a/tests/test_regexlexer.py +++ b/tests/test_regexlexer.py @@ -11,7 +11,6 @@ import unittest from pygments.token import Text from pygments.lexer import RegexLexer -from pygments.lexer import bygroups from pygments.lexer import default @@ -21,6 +20,8 @@ class TestLexer(RegexLexer): 'root': [ ('a', Text.Root, 'rag'), ('e', Text.Root), + ('#', Text.Root, '#pop'), + ('@', Text.Root, ('#pop', '#pop')), default(('beer', 'beer')) ], 'beer': [ @@ -37,18 +38,29 @@ class TupleTransTest(unittest.TestCase): def test(self): lx = TestLexer() toks = list(lx.get_tokens_unprocessed('abcde')) - self.assertEqual(toks, - [(0, Text.Root, 'a'), (1, Text.Rag, 'b'), (2, Text.Rag, 'c'), + self.assertEqual(toks, [ + (0, Text.Root, 'a'), (1, Text.Rag, 'b'), (2, Text.Rag, 'c'), (3, Text.Beer, 'd'), (4, Text.Root, 'e')]) def test_multiline(self): lx = TestLexer() toks = list(lx.get_tokens_unprocessed('a\ne')) - self.assertEqual(toks, - [(0, Text.Root, 'a'), (1, Text, u'\n'), - (2, Text.Root, 'e')]) + self.assertEqual(toks, [ + (0, Text.Root, 'a'), (1, Text, u'\n'), (2, Text.Root, 'e')]) def test_default(self): lx = TestLexer() toks = list(lx.get_tokens_unprocessed('d')) self.assertEqual(toks, [(0, Text.Beer, 'd')]) + + +class PopEmptyTest(unittest.TestCase): + def test_regular(self): + lx = TestLexer() + toks = list(lx.get_tokens_unprocessed('#e')) + self.assertEqual(toks, [(0, Text.Root, '#'), (1, Text.Root, 'e')]) + + def test_tuple(self): + lx = TestLexer() + toks = list(lx.get_tokens_unprocessed('@e')) + self.assertEqual(toks, [(0, Text.Root, '@'), (1, Text.Root, 'e')]) -- cgit v1.2.1 From c3a5a7023c0a266845c485058241ffed2eb37176 Mon Sep 17 00:00:00 2001 From: "Matth?us G. Chajdas" Date: Mon, 6 May 2019 19:37:28 +0200 Subject: Add option to wrap the code in both and
 (fixes #438.)

Adds a new option "wrapcode" which wraps the code using .
---
 pygments/formatters/html.py | 20 ++++++++++++++++++--
 1 file changed, 18 insertions(+), 2 deletions(-)

diff --git a/pygments/formatters/html.py b/pygments/formatters/html.py
index 7d7605eb..1ebfafcb 100644
--- a/pygments/formatters/html.py
+++ b/pygments/formatters/html.py
@@ -322,11 +322,17 @@ class HtmlFormatter(Formatter):
         .. versionadded:: 1.6
 
     `filename`
-        A string used to generate a filename when rendering 
 blocks,
+        A string used to generate a filename when rendering ``
`` blocks,
         for example if displaying source code.
 
         .. versionadded:: 2.1
 
+    `wrapcode`
+        Wrap the code inside ``
`` blocks using ````, as recommended
+        by the HTML5 specification.
+
+        .. versionadded:: 2.4
+
 
     **Subclassing the HTML formatter**
 
@@ -395,6 +401,7 @@ class HtmlFormatter(Formatter):
         self.tagsfile = self._decodeifneeded(options.get('tagsfile', ''))
         self.tagurlformat = self._decodeifneeded(options.get('tagurlformat', ''))
         self.filename = self._decodeifneeded(options.get('filename', ''))
+        self.wrapcode = get_bool_opt(options, 'wrapcode', False)
 
         if self.tagsfile:
             if not ctags:
@@ -708,6 +715,12 @@ class HtmlFormatter(Formatter):
             yield tup
         yield 0, '
' + def _wrap_code(self, inner): + yield 0, '' + for tup in inner: + yield tup + yield 0, '' + def _format_lines(self, tokensource): """ Just format the tokens, without any wrapping tags. @@ -814,7 +827,10 @@ class HtmlFormatter(Formatter): individual lines, in custom generators. See docstring for `format`. Can be overridden. """ - return self._wrap_div(self._wrap_pre(source)) + if self.wrapcode: + return self._wrap_div(self._wrap_pre(self._wrap_code(source))) + else: + return self._wrap_div(self._wrap_pre(source)) def format_unencoded(self, tokensource, outfile): """ -- cgit v1.2.1 From a5e3278c84828eb3f18f41c1af25324235b97d21 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Tue, 7 May 2019 17:46:01 +0200 Subject: Fix invalid string escape. --- pygments/lexers/asm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pygments/lexers/asm.py b/pygments/lexers/asm.py index 7100868c..b522450c 100644 --- a/pygments/lexers/asm.py +++ b/pygments/lexers/asm.py @@ -690,7 +690,7 @@ class Dasm16Lexer(RegexLexer): # Regexes yo char = r'[a-zA-Z$._0-9@]' - identifier = r'(?:[a-zA-Z$_]' + char + '*|\.' + char + '+)' + identifier = r'(?:[a-zA-Z$_]' + char + r'*|\.' + char + '+)' number = r'[+-]?(?:0[xX][a-zA-Z0-9]+|\d+)' binary_number = r'0b[01_]+' instruction = r'(?i)(' + '|'.join(INSTRUCTIONS) + ')' -- cgit v1.2.1 -- cgit v1.2.1 From 7d88bffb38b6e92cd891451469030a0b310869ce Mon Sep 17 00:00:00 2001 From: "Matth?us G. Chajdas" Date: Tue, 7 May 2019 21:05:53 +0200 Subject: Add license information to generated files (fixes #1496). Also update the license year to 2019. --- CHANGES | 1 + LICENSE | 2 +- pygments/formatters/html.py | 13 +++++++++++-- 3 files changed, 13 insertions(+), 3 deletions(-) diff --git a/CHANGES b/CHANGES index db1fb485..0503d14e 100644 --- a/CHANGES +++ b/CHANGES @@ -43,6 +43,7 @@ Version 2.4.0 - Add solarized style (PR#708) - Add support for Markdown reference-style links (PR#753) +- Add license information to generated HTML/CSS files (#1496) - Change ANSI color names (PR#777) - Fix catastrophic backtracking in the bash lexer (#1494) - Fix documentation failing to build using Sphinx 2.0 (#1501) diff --git a/LICENSE b/LICENSE index 21815527..13d1c74b 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2006-2017 by the respective authors (see AUTHORS file). +Copyright (c) 2006-2019 by the respective authors (see AUTHORS file). All rights reserved. Redistribution and use in source and binary forms, with or without diff --git a/pygments/formatters/html.py b/pygments/formatters/html.py index 1ebfafcb..99ab837c 100644 --- a/pygments/formatters/html.py +++ b/pygments/formatters/html.py @@ -5,7 +5,7 @@ Formatter for HTML output. - :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ @@ -55,6 +55,11 @@ def _get_ttype_class(ttype): CSSFILE_TEMPLATE = '''\ +/* +generated by Pygments +Copyright 2006-2019 by the Pygments team. +Licensed under the BSD license, see LICENSE for details. +*/ td.linenos { background-color: #f0f0f0; padding-right: 10px; } span.lineno { background-color: #f0f0f0; padding: 0 5px 0 5px; } pre { line-height: 125%%; } @@ -64,7 +69,11 @@ pre { line-height: 125%%; } DOC_HEADER = '''\ - + %(title)s -- cgit v1.2.1 From 682aeec48f57f11fb7f19cfff61bb52d9e835f57 Mon Sep 17 00:00:00 2001 From: "Matth?us G. Chajdas" Date: Wed, 8 May 2019 19:24:58 +0200 Subject: Add i128/u128 types to the Rust parser. --- CHANGES | 1 + pygments/lexers/rust.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGES b/CHANGES index 0503d14e..94b85262 100644 --- a/CHANGES +++ b/CHANGES @@ -36,6 +36,7 @@ Version 2.4.0 * LLVM (PR#792) * Makefiles (PR#766) * PHP (#1482) + * Rust * SQL (PR#672) * Stan (PR#774) * Stata (PR#800) diff --git a/pygments/lexers/rust.py b/pygments/lexers/rust.py index 10097fba..b7b8cb7e 100644 --- a/pygments/lexers/rust.py +++ b/pygments/lexers/rust.py @@ -29,7 +29,7 @@ class RustLexer(RegexLexer): keyword_types = ( words(('u8', 'u16', 'u32', 'u64', 'i8', 'i16', 'i32', 'i64', - 'usize', 'isize', 'f32', 'f64', 'str', 'bool'), + 'i128', 'u128', 'usize', 'isize', 'f32', 'f64', 'str', 'bool'), suffix=r'\b'), Keyword.Type) -- cgit v1.2.1 From 6047bc5d680a8916a064cdab5e1bba552305fb0e Mon Sep 17 00:00:00 2001 From: "Matth?us G. Chajdas" Date: Wed, 8 May 2019 19:25:11 +0200 Subject: Update CHANGES for release. --- CHANGES | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGES b/CHANGES index 94b85262..9e67f879 100644 --- a/CHANGES +++ b/CHANGES @@ -8,7 +8,7 @@ pull request numbers to the requests at Version 2.4.0 ------------- -(not released yet) +(released May 8, 2019) - Added lexers: @@ -50,6 +50,7 @@ Version 2.4.0 - Fix documentation failing to build using Sphinx 2.0 (#1501) - Fix incorrect links in the Lisp and R lexer documentation (PR#775) - Fix rare unicode errors on Python 2.7 (PR#798, #1492) +- Fix lexers popping from an empty stack (#1506) - TypoScript uses ``.typoscript`` now (#1498) - Updated Trove classifiers and ``pip`` requirements (PR#799) -- cgit v1.2.1 From d760bd375dfd775ab067cead3c4a83bb865a7bb1 Mon Sep 17 00:00:00 2001 From: "Matth?us G. Chajdas" Date: Wed, 8 May 2019 19:27:47 +0200 Subject: Bump version to 2.4.0. --- pygments/__init__.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pygments/__init__.py b/pygments/__init__.py index 4dd38fee..0da0649d 100644 --- a/pygments/__init__.py +++ b/pygments/__init__.py @@ -29,7 +29,7 @@ import sys from pygments.util import StringIO, BytesIO -__version__ = '2.3.1' +__version__ = '2.4.0' __docformat__ = 'restructuredtext' __all__ = ['lex', 'format', 'highlight'] diff --git a/setup.py b/setup.py index 52889227..bd54b0f3 100755 --- a/setup.py +++ b/setup.py @@ -48,7 +48,7 @@ else: setup( name = 'Pygments', - version = '2.3.1', + version = '2.4.0', url = 'http://pygments.org/', license = 'BSD License', author = 'Georg Brandl', -- cgit v1.2.1 From 8f43e7edbffc28f8372f3e6448e39b3427a5d9c7 Mon Sep 17 00:00:00 2001 From: "Matth?us G. Chajdas" Date: Wed, 8 May 2019 19:38:04 +0200 Subject: Add docstring to the Boa lexer. --- pygments/lexers/boa.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/pygments/lexers/boa.py b/pygments/lexers/boa.py index 41398cd4..dda31eb4 100644 --- a/pygments/lexers/boa.py +++ b/pygments/lexers/boa.py @@ -1,4 +1,14 @@ # -*- coding: utf-8 -*- +""" + pygments.lexers.boa + ~~~~~~~~~~~~~~~~~~~~ + + Lexers for the Boa language. + + :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + import re from pygments.lexer import RegexLexer, words @@ -11,7 +21,9 @@ line_re = re.compile('.*?\n') class BoaLexer(RegexLexer): """ - http://boa.cs.iastate.edu/docs/ + Lexer for the `Boa `_ language. + + .. versionadded:: 2.4 """ name = 'Boa' aliases = ['boa'] -- cgit v1.2.1 From d1b13a46e532f1eef99e4918334ce06b49484064 Mon Sep 17 00:00:00 2001 From: "Matth?us G. Chajdas" Date: Wed, 8 May 2019 19:45:11 +0200 Subject: Update CHANGES. The YAML lexer got updated since 2.3.1 as well. --- CHANGES | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGES b/CHANGES index 9e67f879..9ebde077 100644 --- a/CHANGES +++ b/CHANGES @@ -41,6 +41,7 @@ Version 2.4.0 * Stan (PR#774) * Stata (PR#800) * Terraform (PR#787) + * YAML - Add solarized style (PR#708) - Add support for Markdown reference-style links (PR#753) -- cgit v1.2.1 -- cgit v1.2.1 From 2461ec34c6aac22c0600b3e0e32a9f802765e9de Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Wed, 8 May 2019 22:20:10 +0200 Subject: All lexers should have an alias. --- pygments/lexers/_mapping.py | 2 +- pygments/lexers/basic.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pygments/lexers/_mapping.py b/pygments/lexers/_mapping.py index e4e8cf43..aea19b4e 100644 --- a/pygments/lexers/_mapping.py +++ b/pygments/lexers/_mapping.py @@ -442,7 +442,7 @@ LEXERS = { 'UcodeLexer': ('pygments.lexers.unicon', 'ucode', ('ucode',), ('*.u', '*.u1', '*.u2'), ()), 'UniconLexer': ('pygments.lexers.unicon', 'Unicon', ('unicon',), ('*.icn',), ('text/unicon',)), 'UrbiscriptLexer': ('pygments.lexers.urbi', 'UrbiScript', ('urbiscript',), ('*.u',), ('application/x-urbiscript',)), - 'VBScriptLexer': ('pygments.lexers.basic', 'VBScript', (), ('*.vbs', '*.VBS'), ()), + 'VBScriptLexer': ('pygments.lexers.basic', 'VBScript', ('vbscript',), ('*.vbs', '*.VBS'), ()), 'VCLLexer': ('pygments.lexers.varnish', 'VCL', ('vcl',), ('*.vcl',), ('text/x-vclsrc',)), 'VCLSnippetLexer': ('pygments.lexers.varnish', 'VCLSnippets', ('vclsnippets', 'vclsnippet'), (), ('text/x-vclsnippet',)), 'VCTreeStatusLexer': ('pygments.lexers.console', 'VCTreeStatus', ('vctreestatus',), (), ()), diff --git a/pygments/lexers/basic.py b/pygments/lexers/basic.py index 1aa3274c..f93d6d52 100644 --- a/pygments/lexers/basic.py +++ b/pygments/lexers/basic.py @@ -510,7 +510,7 @@ class VBScriptLexer(RegexLexer): .. versionadded:: 2.4 """ name = 'VBScript' - aliases = [] + aliases = ['vbscript'] filenames = ['*.vbs', '*.VBS'] flags = re.IGNORECASE -- cgit v1.2.1 From 8f1a1638284cf3e51386c9431aae13452f5e6a90 Mon Sep 17 00:00:00 2001 From: "Matth?us G. Chajdas" Date: Thu, 9 May 2019 07:07:31 +0200 Subject: Update CHANGES. --- CHANGES | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGES b/CHANGES index 9ebde077..bb6c3cca 100644 --- a/CHANGES +++ b/CHANGES @@ -6,6 +6,11 @@ Issue numbers refer to the tracker at pull request numbers to the requests at . +Version 2.4.1 +------------- + +- Support CSS variables in stylesheets (PR#814) + Version 2.4.0 ------------- (released May 8, 2019) -- cgit v1.2.1 From 41f5b07a3b9e099b3895eb6f239f8007156b2a84 Mon Sep 17 00:00:00 2001 From: "Matth?us G. Chajdas" Date: Thu, 9 May 2019 07:28:11 +0200 Subject: Rename the F# parser to F# (same as C#.) fsharp continues to be a valid alias, so existing code which asks for the FSharp lexer will continue to work. --- CHANGES | 1 + pygments/lexers/_mapping.py | 2 +- pygments/lexers/dotnet.py | 9 +++------ 3 files changed, 5 insertions(+), 7 deletions(-) diff --git a/CHANGES b/CHANGES index bb6c3cca..3de7f918 100644 --- a/CHANGES +++ b/CHANGES @@ -10,6 +10,7 @@ Version 2.4.1 ------------- - Support CSS variables in stylesheets (PR#814) +- Fix F# lexer name (PR#709) Version 2.4.0 ------------- diff --git a/pygments/lexers/_mapping.py b/pygments/lexers/_mapping.py index aea19b4e..ce1b6dfc 100644 --- a/pygments/lexers/_mapping.py +++ b/pygments/lexers/_mapping.py @@ -153,7 +153,7 @@ LEXERS = { 'EvoqueLexer': ('pygments.lexers.templates', 'Evoque', ('evoque',), ('*.evoque',), ('application/x-evoque',)), 'EvoqueXmlLexer': ('pygments.lexers.templates', 'XML+Evoque', ('xml+evoque',), ('*.xml',), ('application/xml+evoque',)), 'EzhilLexer': ('pygments.lexers.ezhil', 'Ezhil', ('ezhil',), ('*.n',), ('text/x-ezhil',)), - 'FSharpLexer': ('pygments.lexers.dotnet', 'FSharp', ('fsharp',), ('*.fs', '*.fsi'), ('text/x-fsharp',)), + 'FSharpLexer': ('pygments.lexers.dotnet', 'F#', ('fsharp', 'f#'), ('*.fs', '*.fsi'), ('text/x-fsharp',)), 'FactorLexer': ('pygments.lexers.factor', 'Factor', ('factor',), ('*.factor',), ('text/x-factor',)), 'FancyLexer': ('pygments.lexers.ruby', 'Fancy', ('fancy', 'fy'), ('*.fy', '*.fancypack'), ('text/x-fancysrc',)), 'FantomLexer': ('pygments.lexers.fantom', 'Fantom', ('fan',), ('*.fan',), ('application/x-fantom',)), diff --git a/pygments/lexers/dotnet.py b/pygments/lexers/dotnet.py index 27ae77c5..1d3b1f38 100644 --- a/pygments/lexers/dotnet.py +++ b/pygments/lexers/dotnet.py @@ -541,16 +541,13 @@ class VbNetAspxLexer(DelegatingLexer): # Very close to functional.OcamlLexer class FSharpLexer(RegexLexer): """ - For the F# language (version 3.0). - - AAAAACK Strings - http://research.microsoft.com/en-us/um/cambridge/projects/fsharp/manual/spec.html#_Toc335818775 + For the `F# language `_ (version 3.0). .. versionadded:: 1.5 """ - name = 'FSharp' - aliases = ['fsharp'] + name = 'F#' + aliases = ['fsharp', 'f#'] filenames = ['*.fs', '*.fsi'] mimetypes = ['text/x-fsharp'] -- cgit v1.2.1 From 893987b1d59e7d57c6fc856d8d413716ac91a538 Mon Sep 17 00:00:00 2001 From: "Matth?us G. Chajdas" Date: Thu, 9 May 2019 07:36:54 +0200 Subject: Update CHANGES. --- CHANGES | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGES b/CHANGES index 3de7f918..3e134db4 100644 --- a/CHANGES +++ b/CHANGES @@ -9,6 +9,10 @@ pull request numbers to the requests at Version 2.4.1 ------------- +- Updated lexers: + + * MSDOS Session (PR#734) + - Support CSS variables in stylesheets (PR#814) - Fix F# lexer name (PR#709) -- cgit v1.2.1 From 3bdc13e48a8eaa88f48325098293b5fed8f676af Mon Sep 17 00:00:00 2001 From: "Matth?us G. Chajdas" Date: Sat, 18 May 2019 13:56:28 +0200 Subject: Add 'of' to TypeScript grammar (fixes #1515.) --- pygments/lexers/javascript.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pygments/lexers/javascript.py b/pygments/lexers/javascript.py index 0507375f..57e1161e 100644 --- a/pygments/lexers/javascript.py +++ b/pygments/lexers/javascript.py @@ -482,7 +482,7 @@ class TypeScriptLexer(RegexLexer): (r'[{(\[;,]', Punctuation, 'slashstartsregex'), (r'[})\].]', Punctuation), (r'(for|in|while|do|break|return|continue|switch|case|default|if|else|' - r'throw|try|catch|finally|new|delete|typeof|instanceof|void|' + r'throw|try|catch|finally|new|delete|typeof|instanceof|void|of|' r'this)\b', Keyword, 'slashstartsregex'), (r'(var|let|with|function)\b', Keyword.Declaration, 'slashstartsregex'), (r'(abstract|boolean|byte|char|class|const|debugger|double|enum|export|' -- cgit v1.2.1 From ef8476c4cb27220bb286f4410fe04ad22da0d2b4 Mon Sep 17 00:00:00 2001 From: "Matth?us G. Chajdas" Date: Sat, 18 May 2019 14:03:24 +0200 Subject: Don't use bold for bright text in the console formatter (fixes #1480.) --- pygments/console.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pygments/console.py b/pygments/console.py index e61744ce..e9f30ad8 100644 --- a/pygments/console.py +++ b/pygments/console.py @@ -30,7 +30,7 @@ light_colors = ["brightblack", "brightred", "brightgreen", "brightyellow", "brig x = 30 for d, l in zip(dark_colors, light_colors): codes[d] = esc + "%im" % x - codes[l] = esc + "%i;01m" % x + codes[l] = esc + "%im" % (60 + x) x += 1 del d, l, x -- cgit v1.2.1 From 749842e13e5d44236716529f18c3471229fe12de Mon Sep 17 00:00:00 2001 From: "Matth?us G. Chajdas" Date: Sat, 18 May 2019 14:08:51 +0200 Subject: Stop highlighting primitive types in Coq (fixes #1430.) --- CHANGES | 3 +++ pygments/lexers/theorem.py | 2 -- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGES b/CHANGES index 3e134db4..a89168a0 100644 --- a/CHANGES +++ b/CHANGES @@ -11,10 +11,13 @@ Version 2.4.1 - Updated lexers: + * Coq (#1430) * MSDOS Session (PR#734) + * TypeScript (#1515) - Support CSS variables in stylesheets (PR#814) - Fix F# lexer name (PR#709) +- Fix ``TerminalFormatter`` using bold for bright text (#1480) Version 2.4.0 ------------- diff --git a/pygments/lexers/theorem.py b/pygments/lexers/theorem.py index e84a398b..e7619c33 100644 --- a/pygments/lexers/theorem.py +++ b/pygments/lexers/theorem.py @@ -98,7 +98,6 @@ class CoqLexer(RegexLexer): operators = r'[!$%&*+\./:<=>?@^|~-]' prefix_syms = r'[!?~]' infix_syms = r'[=<>@^|&+\*/$%-]' - primitives = ('unit', 'nat', 'bool', 'string', 'ascii', 'list') tokens = { 'root': [ @@ -115,7 +114,6 @@ class CoqLexer(RegexLexer): (r'\b([A-Z][\w\']*)', Name), (r'(%s)' % '|'.join(keyopts[::-1]), Operator), (r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator), - (r'\b(%s)\b' % '|'.join(primitives), Keyword.Type), (r"[^\W\d][\w']*", Name), -- cgit v1.2.1 From 3735e9c24e269822c84d865ed5d7e05265727944 Mon Sep 17 00:00:00 2001 From: "Matth?us G. Chajdas" Date: Sat, 18 May 2019 14:18:34 +0200 Subject: Update terminal formatter tests. In eea5abc9161a, we changed the TerminalFormatter to use bright instead of bold, and this caused the tests here to fail. --- tests/test_terminal_formatter.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/test_terminal_formatter.py b/tests/test_terminal_formatter.py index 1f44807d..acc15167 100644 --- a/tests/test_terminal_formatter.py +++ b/tests/test_terminal_formatter.py @@ -90,13 +90,13 @@ async def function(a,b,c, *d, **kwarg:Bool)->Bool: def test_256esc_seq(self): """ - test that a few escape sequences are actualy used when using ansi<> color codes + test that a few escape sequences are actually used when using ansi<> color codes """ def termtest(x): return highlight(x, Python3Lexer(), Terminal256Formatter(style=MyStyle)) - self.assertTrue('32;41' in termtest('0x123')) - self.assertTrue('32;42' in termtest('123')) - self.assertTrue('30;01' in termtest('#comment')) - self.assertTrue('34;41' in termtest('"String"')) + self.assertTrue('32;101' in termtest('0x123')) + self.assertTrue('92;42' in termtest('123')) + self.assertTrue('90' in termtest('#comment')) + self.assertTrue('94;41' in termtest('"String"')) -- cgit v1.2.1 From cbb7c47271c2a93f1a7e9c94d769ad060d4fcb73 Mon Sep 17 00:00:00 2001 From: "Matth?us G. Chajdas" Date: Sat, 18 May 2019 14:22:26 +0200 Subject: Fix comment handling in Prolog lexer (#1511.) # does not start a comment in Prolog. --- CHANGES | 1 + pygments/lexers/prolog.py | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES b/CHANGES index a89168a0..d0da916d 100644 --- a/CHANGES +++ b/CHANGES @@ -13,6 +13,7 @@ Version 2.4.1 * Coq (#1430) * MSDOS Session (PR#734) + * Prolog (#1511) * TypeScript (#1515) - Support CSS variables in stylesheets (PR#814) diff --git a/pygments/lexers/prolog.py b/pygments/lexers/prolog.py index 58e762b0..daf8db96 100644 --- a/pygments/lexers/prolog.py +++ b/pygments/lexers/prolog.py @@ -31,7 +31,6 @@ class PrologLexer(RegexLexer): tokens = { 'root': [ - (r'^#.*', Comment.Single), (r'/\*', Comment.Multiline, 'nested-comment'), (r'%.*', Comment.Single), # character literal -- cgit v1.2.1 From 52dd1cba99c9f4f3495ee1e9b93ee615b80ba880 Mon Sep 17 00:00:00 2001 From: "Matth?us G. Chajdas" Date: Sat, 18 May 2019 14:48:13 +0200 Subject: Don't emit a trailing newline in nowrap mode (fixes #1514.) --- CHANGES | 2 ++ pygments/formatters/html.py | 8 +++++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/CHANGES b/CHANGES index d0da916d..eaf47510 100644 --- a/CHANGES +++ b/CHANGES @@ -19,6 +19,8 @@ Version 2.4.1 - Support CSS variables in stylesheets (PR#814) - Fix F# lexer name (PR#709) - Fix ``TerminalFormatter`` using bold for bright text (#1480) +- Don't emit a trailing newline in the HtmlFormatter when ``nowrap`` is set + (#1514) Version 2.4.0 ------------- diff --git a/pygments/formatters/html.py b/pygments/formatters/html.py index d65c09ce..54e7df0e 100644 --- a/pygments/formatters/html.py +++ b/pygments/formatters/html.py @@ -803,7 +803,13 @@ class HtmlFormatter(Formatter): # else we neither have to open a new span nor set lspan if line: - line.extend(((lspan and ''), lsep)) + # If we're in nowrap mode, we try to make the output compact and + # omit the trailing newspace, this makes it easier to consume the + # HTML elsewhere + if self.nowrap: + line.append(lspan and '') + else: + line.extend((lspan and '', lsep)) yield 1, ''.join(line) def _lookup_ctag(self, token): -- cgit v1.2.1 From dcbf0c89d9ff12723d21410104b40739689e7afd Mon Sep 17 00:00:00 2001 From: "Matth?us G. Chajdas" Date: Tue, 21 May 2019 18:10:56 +0200 Subject: Backed out changeset: df3df829861d --- CHANGES | 2 -- pygments/formatters/html.py | 8 +------- 2 files changed, 1 insertion(+), 9 deletions(-) diff --git a/CHANGES b/CHANGES index eaf47510..d0da916d 100644 --- a/CHANGES +++ b/CHANGES @@ -19,8 +19,6 @@ Version 2.4.1 - Support CSS variables in stylesheets (PR#814) - Fix F# lexer name (PR#709) - Fix ``TerminalFormatter`` using bold for bright text (#1480) -- Don't emit a trailing newline in the HtmlFormatter when ``nowrap`` is set - (#1514) Version 2.4.0 ------------- diff --git a/pygments/formatters/html.py b/pygments/formatters/html.py index 54e7df0e..d65c09ce 100644 --- a/pygments/formatters/html.py +++ b/pygments/formatters/html.py @@ -803,13 +803,7 @@ class HtmlFormatter(Formatter): # else we neither have to open a new span nor set lspan if line: - # If we're in nowrap mode, we try to make the output compact and - # omit the trailing newspace, this makes it easier to consume the - # HTML elsewhere - if self.nowrap: - line.append(lspan and '') - else: - line.extend((lspan and '', lsep)) + line.extend(((lspan and ''), lsep)) yield 1, ''.join(line) def _lookup_ctag(self, token): -- cgit v1.2.1 From 8202e648945351366bd0c465d72953fae40f4783 Mon Sep 17 00:00:00 2001 From: "Matth?us G. Chajdas" Date: Tue, 21 May 2019 18:30:17 +0200 Subject: Fix directive parsing in NasmLexer (fixes #1517.) Directives were parsed independent of whitespace after them, which caused the cpuid instruction to be parsed as CPU & id, instead of cpuid. We now expect a whitespace character after a directive, which seems to match the Nasm documentation. --- pygments/lexers/asm.py | 6 ++++-- tests/test_asm.py | 30 ++++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+), 2 deletions(-) create mode 100644 tests/test_asm.py diff --git a/pygments/lexers/asm.py b/pygments/lexers/asm.py index b522450c..3d2933d6 100644 --- a/pygments/lexers/asm.py +++ b/pygments/lexers/asm.py @@ -468,9 +468,11 @@ class NasmLexer(RegexLexer): r'mm[0-7]|cr[0-4]|dr[0-367]|tr[3-7]') wordop = r'seg|wrt|strict' type = r'byte|[dq]?word' - directives = (r'BITS|USE16|USE32|SECTION|SEGMENT|ABSOLUTE|EXTERN|GLOBAL|' + # Directives must be followed by whitespace, otherwise CPU will match + # cpuid for instance. + directives = (r'(?:BITS|USE16|USE32|SECTION|SEGMENT|ABSOLUTE|EXTERN|GLOBAL|' r'ORG|ALIGN|STRUC|ENDSTRUC|COMMON|CPU|GROUP|UPPERCASE|IMPORT|' - r'EXPORT|LIBRARY|MODULE') + r'EXPORT|LIBRARY|MODULE)\s+') flags = re.IGNORECASE | re.MULTILINE tokens = { diff --git a/tests/test_asm.py b/tests/test_asm.py new file mode 100644 index 00000000..8eaed248 --- /dev/null +++ b/tests/test_asm.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +""" + Basic ColdfusionHtmlLexer Test + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import unittest +import os + +from pygments.token import Token +from pygments.lexers import NasmLexer + + +class NasmLexerTest(unittest.TestCase): + + def setUp(self): + self.lexer = NasmLexer() + + def testCPUID(self): + # CPU is a valid directive, and we don't want to parse this as + # cpu id, but as a single token. See bug #1517 + fragment = 'cpuid' + expected = [ + (Token.Name.Function, u'cpuid'), + (Token.Text, u'\n'), + ] + self.assertEqual(expected, list(self.lexer.get_tokens(fragment))) -- cgit v1.2.1