diff options
author | Georg Brandl <georg@python.org> | 2022-10-27 07:19:07 +0200 |
---|---|---|
committer | Georg Brandl <georg@python.org> | 2022-10-27 11:58:29 +0200 |
commit | 11dac32db5db1166b8d55dc85bc16d2085936c9a (patch) | |
tree | c5e59cbabf10790754b4f2bc915ed87b2dba8c09 | |
parent | 2b45339f7d6966b1780b28f3b792266f0f48214c (diff) | |
download | pygments-git-11dac32db5db1166b8d55dc85bc16d2085936c9a.tar.gz |
all: style fixes
107 files changed, 707 insertions, 931 deletions
diff --git a/pygments/formatters/__init__.py b/pygments/formatters/__init__.py index 809f0168..58de5fe3 100644 --- a/pygments/formatters/__init__.py +++ b/pygments/formatters/__init__.py @@ -8,7 +8,6 @@ :license: BSD, see LICENSE for details. """ -import re import sys import types from fnmatch import fnmatch diff --git a/pygments/lexers/__init__.py b/pygments/lexers/__init__.py index 8b163a03..83be0e48 100644 --- a/pygments/lexers/__init__.py +++ b/pygments/lexers/__init__.py @@ -8,7 +8,6 @@ :license: BSD, see LICENSE for details. """ -import re import sys import types from fnmatch import fnmatch diff --git a/pygments/lexers/_lua_builtins.py b/pygments/lexers/_lua_builtins.py index 08bcfb17..a31f6b3b 100644 --- a/pygments/lexers/_lua_builtins.py +++ b/pygments/lexers/_lua_builtins.py @@ -176,7 +176,6 @@ MODULES = {'basic': ('_G', if __name__ == '__main__': # pragma: no cover import re - import sys from urllib.request import urlopen import pprint diff --git a/pygments/lexers/_sourcemod_builtins.py b/pygments/lexers/_sourcemod_builtins.py index 3884b4b6..6f1392dc 100644 --- a/pygments/lexers/_sourcemod_builtins.py +++ b/pygments/lexers/_sourcemod_builtins.py @@ -1093,7 +1093,6 @@ FUNCTIONS = ( if __name__ == '__main__': # pragma: no cover import re - import sys from urllib.request import FancyURLopener from pygments.util import format_lines diff --git a/pygments/lexers/ada.py b/pygments/lexers/ada.py index b241afe2..34fe9895 100644 --- a/pygments/lexers/ada.py +++ b/pygments/lexers/ada.py @@ -10,17 +10,12 @@ import re -from pygments.lexer import Lexer, RegexLexer, include, bygroups, words, \ - using, this, default -from pygments.util import get_bool_opt, get_list_opt +from pygments.lexer import RegexLexer, include, bygroups, words, using, this, \ + default from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ - Number, Punctuation, Error -from pygments.scanner import Scanner + Number, Punctuation from pygments.lexers._ada_builtins import KEYWORD_LIST, BUILTIN_LIST -# compatibility import -from pygments.lexers.modula2 import Modula2Lexer - __all__ = ['AdaLexer'] diff --git a/pygments/lexers/algebra.py b/pygments/lexers/algebra.py index 058906d6..8c97eccc 100644 --- a/pygments/lexers/algebra.py +++ b/pygments/lexers/algebra.py @@ -14,7 +14,8 @@ from pygments.lexer import Lexer, RegexLexer, bygroups, do_insertions, words from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ Number, Punctuation, Generic, Whitespace -__all__ = ['GAPLexer', 'GAPConsoleLexer', 'MathematicaLexer', 'MuPADLexer', 'BCLexer'] +__all__ = ['GAPLexer', 'GAPConsoleLexer', 'MathematicaLexer', 'MuPADLexer', + 'BCLexer'] class GAPLexer(RegexLexer): diff --git a/pygments/lexers/ambient.py b/pygments/lexers/ambient.py index a5a7c576..abe61268 100644 --- a/pygments/lexers/ambient.py +++ b/pygments/lexers/ambient.py @@ -11,7 +11,7 @@ import re from pygments.lexer import RegexLexer, include, words, bygroups -from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ +from pygments.token import Comment, Operator, Keyword, Name, String, \ Number, Punctuation, Whitespace __all__ = ['AmbientTalkLexer'] diff --git a/pygments/lexers/ampl.py b/pygments/lexers/ampl.py index fc26f3cf..a3cf6057 100644 --- a/pygments/lexers/ampl.py +++ b/pygments/lexers/ampl.py @@ -59,7 +59,8 @@ class AmplLexer(RegexLexer): bygroups(Keyword.Declaration, Whitespace, Punctuation, Whitespace, Name.Variable, Whitespace, Punctuation, Whitespace, Name.Variable)), (r'(let|fix|unfix)(\s*)((?:\{.*\})?)(\s*)(\w+)', - bygroups(Keyword.Declaration, Whitespace, using(this), Whitespace, Name.Variable)), + bygroups(Keyword.Declaration, Whitespace, using(this), Whitespace, + Name.Variable)), (words(( 'abs', 'acos', 'acosh', 'alias', 'asin', 'asinh', 'atan', 'atan2', 'atanh', 'ceil', 'ctime', 'cos', 'exp', 'floor', 'log', 'log10', diff --git a/pygments/lexers/apdlexer.py b/pygments/lexers/apdlexer.py index 6bc16414..74ef7511 100644 --- a/pygments/lexers/apdlexer.py +++ b/pygments/lexers/apdlexer.py @@ -11,7 +11,7 @@ import re from pygments.lexer import RegexLexer, include, words -from pygments.token import Comment, Keyword, Name, Text, Number, Operator, \ +from pygments.token import Comment, Keyword, Name, Number, Operator, \ String, Generic, Punctuation, Whitespace __all__ = ['apdlexer'] diff --git a/pygments/lexers/apl.py b/pygments/lexers/apl.py index 944551f1..57905b53 100644 --- a/pygments/lexers/apl.py +++ b/pygments/lexers/apl.py @@ -9,7 +9,7 @@ """ from pygments.lexer import RegexLexer -from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ +from pygments.token import Comment, Operator, Keyword, Name, String, \ Number, Punctuation, Whitespace __all__ = ['APLLexer'] diff --git a/pygments/lexers/archetype.py b/pygments/lexers/archetype.py index 0afcce8a..1496d226 100644 --- a/pygments/lexers/archetype.py +++ b/pygments/lexers/archetype.py @@ -40,7 +40,8 @@ class AtomsLexer(RegexLexer): ], 'archetype_id': [ (r'([ \t]*)(([a-zA-Z]\w+(\.[a-zA-Z]\w+)*::)?[a-zA-Z]\w+(-[a-zA-Z]\w+){2}' - r'\.\w+[\w-]*\.v\d+(\.\d+){,2}((-[a-z]+)(\.\d+)?)?)', bygroups(Whitespace, Name.Decorator)), + r'\.\w+[\w-]*\.v\d+(\.\d+){,2}((-[a-z]+)(\.\d+)?)?)', + bygroups(Whitespace, Name.Decorator)), ], 'date_constraints': [ # ISO 8601-based date/time constraints @@ -271,7 +272,8 @@ class AdlLexer(AtomsLexer): # repeating the following two rules from the root state enable multi-line # strings that start in the first column to be dealt with (r'^(language|description|ontology|terminology|annotations|' - r'component_terminologies|revision_history)([ \t]*\n)', bygroups(Generic.Heading, Whitespace)), + r'component_terminologies|revision_history)([ \t]*\n)', + bygroups(Generic.Heading, Whitespace)), (r'^(definition)([ \t]*\n)', bygroups(Generic.Heading, Whitespace), 'cadl_section'), (r'^([ \t]*|[ \t]+.*)\n', using(OdinLexer)), (r'^([^"]*")(>[ \t]*\n)', bygroups(String, Punctuation)), diff --git a/pygments/lexers/asc.py b/pygments/lexers/asc.py index 18d779f6..41544673 100644 --- a/pygments/lexers/asc.py +++ b/pygments/lexers/asc.py @@ -17,7 +17,8 @@ __all__ = ['AscLexer'] class AscLexer(RegexLexer): """ - Lexer for ASCII armored files, containing `-----BEGIN/END ...-----` wrapped base64 data. + Lexer for ASCII armored files, containing `-----BEGIN/END ...-----` wrapped + base64 data. .. versionadded:: 2.10 """ @@ -26,9 +27,11 @@ class AscLexer(RegexLexer): filenames = [ '*.asc', # PGP; *.gpg, *.pgp, and *.sig too, but those can be binary '*.pem', # X.509; *.cer, *.crt, *.csr, and key etc too, but those can be binary - 'id_dsa', 'id_ecdsa', 'id_ecdsa_sk', 'id_ed25519', 'id_ed25519_sk', 'id_rsa', # SSH private keys + 'id_dsa', 'id_ecdsa', 'id_ecdsa_sk', 'id_ed25519', 'id_ed25519_sk', + 'id_rsa', # SSH private keys ] - mimetypes = ['application/pgp-keys', 'application/pgp-encrypted', 'application/pgp-signature'] + mimetypes = ['application/pgp-keys', 'application/pgp-encrypted', + 'application/pgp-signature'] flags = re.MULTILINE @@ -40,7 +43,8 @@ class AscLexer(RegexLexer): ], 'data': [ (r'\s+', Whitespace), - (r'^([^:]+)(:)([ \t]+)(.*)', bygroups(Name.Attribute, Operator, Whitespace, String)), + (r'^([^:]+)(:)([ \t]+)(.*)', + bygroups(Name.Attribute, Operator, Whitespace, String)), (r'^-----END [^\n]+-----$', Generic.Heading, 'root'), (r'\S+', String), ], diff --git a/pygments/lexers/asm.py b/pygments/lexers/asm.py index aa24f00b..024b9ead 100644 --- a/pygments/lexers/asm.py +++ b/pygments/lexers/asm.py @@ -862,7 +862,8 @@ class TasmLexer(RegexLexer): (register, Name.Builtin), (identifier, Name.Variable), # Do not match newline when it's preceded by a backslash - (r'(\\)(\s*)(;.*)([\r\n])', bygroups(Text, Whitespace, Comment.Single, Whitespace)), + (r'(\\)(\s*)(;.*)([\r\n])', + bygroups(Text, Whitespace, Comment.Single, Whitespace)), (r'[\r\n]+', Whitespace, '#pop'), include('whitespace') ], diff --git a/pygments/lexers/bare.py b/pygments/lexers/bare.py index c1d18337..a9de9bad 100644 --- a/pygments/lexers/bare.py +++ b/pygments/lexers/bare.py @@ -8,8 +8,6 @@ :license: BSD, see LICENSE for details. """ -import re - from pygments.lexer import RegexLexer, words, bygroups from pygments.token import Text, Comment, Keyword, Name, Literal, Whitespace @@ -66,7 +64,8 @@ class BareLexer(RegexLexer): 'struct': [ (r'\{', Text, '#push'), (r'\}', Text, '#pop'), - (r'([a-zA-Z0-9]+)(:)(\s*)', bygroups(Name.Attribute, Text, Whitespace), 'typedef'), + (r'([a-zA-Z0-9]+)(:)(\s*)', + bygroups(Name.Attribute, Text, Whitespace), 'typedef'), (r'\s+', Whitespace), ], 'union': [ @@ -94,7 +93,8 @@ class BareLexer(RegexLexer): 'enum': [ (r'\{', Text, '#push'), (r'\}', Text, '#pop'), - (r'([A-Z][A-Z0-9_]*)(\s*=\s*)(\d+)', bygroups(Name.Attribute, Text, Literal)), + (r'([A-Z][A-Z0-9_]*)(\s*=\s*)(\d+)', + bygroups(Name.Attribute, Text, Literal)), (r'([A-Z][A-Z0-9_]*)', bygroups(Name.Attribute)), (r'#.*?$', Comment), (r'\s+', Whitespace), diff --git a/pygments/lexers/basic.py b/pygments/lexers/basic.py index f0a33ff9..e726e05e 100644 --- a/pygments/lexers/basic.py +++ b/pygments/lexers/basic.py @@ -625,7 +625,8 @@ class BBCBasicLexer(RegexLexer): # Some special cases to make functions come out nicer (r'(DEF)(\s*)(FN|PROC)([A-Za-z_@][\w@]*)', - bygroups(Keyword.Declaration, Whitespace, Keyword.Declaration, Name.Function)), + bygroups(Keyword.Declaration, Whitespace, + Keyword.Declaration, Name.Function)), (r'(FN|PROC)([A-Za-z_@][\w@]*)', bygroups(Keyword, Name.Function)), @@ -633,7 +634,8 @@ class BBCBasicLexer(RegexLexer): bygroups(Keyword, Whitespace, Name.Label)), (r'(TRUE|FALSE)', Keyword.Constant), - (r'(PAGE|LOMEM|HIMEM|TIME|WIDTH|ERL|ERR|REPORT\$|POS|VPOS|VOICES)', Keyword.Pseudo), + (r'(PAGE|LOMEM|HIMEM|TIME|WIDTH|ERL|ERR|REPORT\$|POS|VPOS|VOICES)', + Keyword.Pseudo), (words(base_keywords), Keyword), (words(basic5_keywords), Keyword), diff --git a/pygments/lexers/bdd.py b/pygments/lexers/bdd.py index f9b582ce..4dbe33ef 100644 --- a/pygments/lexers/bdd.py +++ b/pygments/lexers/bdd.py @@ -2,7 +2,7 @@ pygments.lexers.bdd ~~~~~~~~~~~~~~~~~~~ - Lexer for BDD(Behavior-driven development). + Lexer for BDD(Behavior-driven development). More information: https://en.wikipedia.org/wiki/Behavior-driven_development :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. @@ -10,14 +10,15 @@ """ from pygments.lexer import RegexLexer, include -from pygments.token import Comment, Keyword, Name, String, Number, Text, Punctuation, Whitespace +from pygments.token import Comment, Keyword, Name, String, Number, Text, \ + Punctuation, Whitespace __all__ = ['BddLexer'] class BddLexer(RegexLexer): """ - Lexer for BDD(Behavior-driven development), which highlights not only keywords, - but also comments, punctuations, strings, numbers, and variables. + Lexer for BDD(Behavior-driven development), which highlights not only + keywords, but also comments, punctuations, strings, numbers, and variables. .. versionadded:: 2.11 """ @@ -27,7 +28,8 @@ class BddLexer(RegexLexer): filenames = ['*.feature'] mimetypes = ['text/x-bdd'] - step_keywords = r'Given|When|Then|Add|And|Feature|Scenario Outline|Scenario|Background|Examples|But' + step_keywords = (r'Given|When|Then|Add|And|Feature|Scenario Outline|' + r'Scenario|Background|Examples|But') tokens = { 'comments': [ @@ -47,7 +49,7 @@ class BddLexer(RegexLexer): (step_keywords, Keyword), include('comments'), include('miscellaneous'), - include('numbers'), + include('numbers'), (r'\S+', Text), ] } diff --git a/pygments/lexers/berry.py b/pygments/lexers/berry.py index d7525e98..ec7ce2ed 100644 --- a/pygments/lexers/berry.py +++ b/pygments/lexers/berry.py @@ -9,19 +9,22 @@ """ import re -from pygments.lexer import RegexLexer, words, default, include, bygroups -from pygments.token import Text, Comment, Whitespace, Operator, Keyword, Name, String, Number, Punctuation + +from pygments.lexer import RegexLexer, words, include, bygroups +from pygments.token import Comment, Whitespace, Operator, Keyword, Name, \ + String, Number, Punctuation __all__ = ['BerryLexer'] line_re = re.compile('.*?\n') + class BerryLexer(RegexLexer): """ For `berry <http://github.com/berry-lang/berry>`_ source code. .. versionadded:: 2.12.0 - """ + """ name = 'Berry' aliases = ['berry', 'be'] filenames = ['*.be'] @@ -34,9 +37,12 @@ class BerryLexer(RegexLexer): include('whitespace'), include('numbers'), include('keywords'), - (rf'(def)(\s+)({_name})', bygroups(Keyword.Declaration, Whitespace, Name.Function)), - (rf'\b(class)(\s+)({_name})', bygroups(Keyword.Declaration, Whitespace, Name.Class)), - (rf'\b(import)(\s+)({_name})', bygroups(Keyword.Namespace, Whitespace, Name.Namespace)), + (rf'(def)(\s+)({_name})', + bygroups(Keyword.Declaration, Whitespace, Name.Function)), + (rf'\b(class)(\s+)({_name})', + bygroups(Keyword.Declaration, Whitespace, Name.Class)), + (rf'\b(import)(\s+)({_name})', + bygroups(Keyword.Namespace, Whitespace, Name.Namespace)), include('expr') ], 'expr': [ @@ -57,14 +63,14 @@ class BerryLexer(RegexLexer): ], 'keywords': [ (words(( - 'as', 'break', 'continue', 'import', 'static', 'self', 'super'), + 'as', 'break', 'continue', 'import', 'static', 'self', 'super'), suffix=r'\b'), Keyword.Reserved), (r'(true|false|nil)\b', Keyword.Constant), (r'(var|def)\b', Keyword.Declaration) ], 'controls': [ (words(( - 'if', 'elif', 'else', 'for', 'while', 'do', 'end', 'break', + 'if', 'elif', 'else', 'for', 'while', 'do', 'end', 'break', 'continue', 'return', 'try', 'except', 'raise'), suffix=r'\b'), Keyword) ], @@ -73,7 +79,7 @@ class BerryLexer(RegexLexer): 'assert', 'bool', 'input', 'classname', 'classof', 'number', 'real', 'bytes', 'compile', 'map', 'list', 'int', 'isinstance', 'print', 'range', 'str', 'super', 'module', 'size', 'issubclass', 'open', - 'file', 'type', 'call'), + 'file', 'type', 'call'), suffix=r'\b'), Name.Builtin) ], 'numbers': [ diff --git a/pygments/lexers/bibtex.py b/pygments/lexers/bibtex.py index 4a7c1e51..cbc5a8e9 100644 --- a/pygments/lexers/bibtex.py +++ b/pygments/lexers/bibtex.py @@ -12,8 +12,8 @@ import re from pygments.lexer import RegexLexer, ExtendedRegexLexer, include, default, \ words -from pygments.token import Name, Comment, String, Error, Number, Text, \ - Keyword, Punctuation, Whitespace +from pygments.token import Name, Comment, String, Error, Number, Keyword, \ + Punctuation, Whitespace __all__ = ['BibTeXLexer', 'BSTLexer'] diff --git a/pygments/lexers/boa.py b/pygments/lexers/boa.py index ce5e09be..2cbc018d 100644 --- a/pygments/lexers/boa.py +++ b/pygments/lexers/boa.py @@ -11,8 +11,8 @@ import re from pygments.lexer import RegexLexer, words -from pygments.token import String, Comment, Keyword, Name, Number, Text, \ - Operator, Punctuation, Whitespace +from pygments.token import String, Comment, Keyword, Name, Number, Operator, \ + Punctuation, Whitespace __all__ = ['BoaLexer'] diff --git a/pygments/lexers/c_cpp.py b/pygments/lexers/c_cpp.py index 4b5aa2a0..65f3c4bc 100644 --- a/pygments/lexers/c_cpp.py +++ b/pygments/lexers/c_cpp.py @@ -64,9 +64,12 @@ class CFamilyLexer(RegexLexer): ('^(' + _ws1 + ')(#)', bygroups(using(this), Comment.Preproc), 'macro'), # Labels: - (r'(^[ \t]*)' # Line start and possible indentation. - r'(?!(?:public|private|protected|default)\b)' # Not followed by keywords which can be mistaken as labels. - r'(' + _ident + r')(\s*)(:)(?!:)', # Actual label, followed by a single colon. + # Line start and possible indentation. + (r'(^[ \t]*)' + # Not followed by keywords which can be mistaken as labels. + r'(?!(?:public|private|protected|default)\b)' + # Actual label, followed by a single colon. + r'(' + _ident + r')(\s*)(:)(?!:)', bygroups(Whitespace, Name.Label, Whitespace, Punctuation)), (r'\n', Whitespace), (r'[^\S\n]+', Whitespace), @@ -84,10 +87,13 @@ class CFamilyLexer(RegexLexer): bygroups(String.Affix, String.Char, String.Char, String.Char)), # Hexadecimal floating-point literals (C11, C++17) - (r'0[xX](' + _hexpart + r'\.' + _hexpart + r'|\.' + _hexpart + r'|' + _hexpart + r')[pP][+-]?' + _hexpart + r'[lL]?', Number.Float), + (r'0[xX](' + _hexpart + r'\.' + _hexpart + r'|\.' + _hexpart + + r'|' + _hexpart + r')[pP][+-]?' + _hexpart + r'[lL]?', Number.Float), - (r'(-)?(' + _decpart + r'\.' + _decpart + r'|\.' + _decpart + r'|' + _decpart + r')[eE][+-]?' + _decpart + r'[fFlL]?', Number.Float), - (r'(-)?((' + _decpart + r'\.(' + _decpart + r')?|\.' + _decpart + r')[fFlL]?)|(' + _decpart + r'[fFlL])', Number.Float), + (r'(-)?(' + _decpart + r'\.' + _decpart + r'|\.' + _decpart + r'|' + + _decpart + r')[eE][+-]?' + _decpart + r'[fFlL]?', Number.Float), + (r'(-)?((' + _decpart + r'\.(' + _decpart + r')?|\.' + + _decpart + r')[fFlL]?)|(' + _decpart + r'[fFlL])', Number.Float), (r'(-)?0[xX]' + _hexpart + _intsuffix, Number.Hex), (r'(-)?0[bB][01](\'?[01])*' + _intsuffix, Number.Bin), (r'(-)?0(\'?[0-7])+' + _intsuffix, Number.Oct), @@ -106,7 +112,7 @@ class CFamilyLexer(RegexLexer): 'keywords': [ (r'(struct|union)(\s+)', bygroups(Keyword, Whitespace), 'classname'), (r'case\b', Keyword, 'case-value'), - (words(('asm', 'auto', 'break', 'const', 'continue', 'default', + (words(('asm', 'auto', 'break', 'const', 'continue', 'default', 'do', 'else', 'enum', 'extern', 'for', 'goto', 'if', 'register', 'restricted', 'return', 'sizeof', 'struct', 'static', 'switch', 'typedef', 'volatile', 'while', 'union', @@ -135,8 +141,10 @@ class CFamilyLexer(RegexLexer): r'(\([^;"\')]*?\))' # signature r'(' + _possible_comments + r')' r'([^;{/"\']*)(\{)', - bygroups(using(this), using(this, state='whitespace'), Name.Function, using(this, state='whitespace'), - using(this), using(this, state='whitespace'), using(this), Punctuation), + bygroups(using(this), using(this, state='whitespace'), + Name.Function, using(this, state='whitespace'), + using(this), using(this, state='whitespace'), + using(this), Punctuation), 'function'), # function declarations (r'(' + _namespaced_ident + r'(?:[&*\s])+)' # return arguments @@ -146,8 +154,10 @@ class CFamilyLexer(RegexLexer): r'(\([^;"\')]*?\))' # signature r'(' + _possible_comments + r')' r'([^;/"\']*)(;)', - bygroups(using(this), using(this, state='whitespace'), Name.Function, using(this, state='whitespace'), - using(this), using(this, state='whitespace'), using(this), Punctuation)), + bygroups(using(this), using(this, state='whitespace'), + Name.Function, using(this, state='whitespace'), + using(this), using(this, state='whitespace'), + using(this), Punctuation)), include('types'), default('statement'), ], @@ -174,9 +184,11 @@ class CFamilyLexer(RegexLexer): ], 'macro': [ (r'('+_ws1+r')(include)('+_ws1+r')("[^"]+")([^\n]*)', - bygroups(using(this), Comment.Preproc, using(this), Comment.PreprocFile, Comment.Single)), + bygroups(using(this), Comment.Preproc, using(this), + Comment.PreprocFile, Comment.Single)), (r'('+_ws1+r')(include)('+_ws1+r')(<[^>]+>)([^\n]*)', - bygroups(using(this), Comment.Preproc, using(this), Comment.PreprocFile, Comment.Single)), + bygroups(using(this), Comment.Preproc, using(this), + Comment.PreprocFile, Comment.Single)), (r'[^/\n]+', Comment.Preproc), (r'/[*](.|\n)*?[*]/', Comment.Multiline), (r'//.*?\n', Comment.Single, '#pop'), @@ -221,12 +233,12 @@ class CFamilyLexer(RegexLexer): 'mode_t', 'nfds_t', 'pid_t', 'rlim_t', 'sig_t', 'sighandler_t', 'siginfo_t', 'sigset_t', 'sigval_t', 'socklen_t', 'timer_t', 'uid_t'} c11_atomic_types = { - 'atomic_bool', 'atomic_char', 'atomic_schar', 'atomic_uchar', 'atomic_short', + 'atomic_bool', 'atomic_char', 'atomic_schar', 'atomic_uchar', 'atomic_short', 'atomic_ushort', 'atomic_int', 'atomic_uint', 'atomic_long', 'atomic_ulong', - 'atomic_llong', 'atomic_ullong', 'atomic_char16_t', 'atomic_char32_t', 'atomic_wchar_t', - 'atomic_int_least8_t', 'atomic_uint_least8_t', 'atomic_int_least16_t', + 'atomic_llong', 'atomic_ullong', 'atomic_char16_t', 'atomic_char32_t', 'atomic_wchar_t', + 'atomic_int_least8_t', 'atomic_uint_least8_t', 'atomic_int_least16_t', 'atomic_uint_least16_t', 'atomic_int_least32_t', 'atomic_uint_least32_t', - 'atomic_int_least64_t', 'atomic_uint_least64_t', 'atomic_int_fast8_t', + 'atomic_int_least64_t', 'atomic_uint_least64_t', 'atomic_int_fast8_t', 'atomic_uint_fast8_t', 'atomic_int_fast16_t', 'atomic_uint_fast16_t', 'atomic_int_fast32_t', 'atomic_uint_fast32_t', 'atomic_int_fast64_t', 'atomic_uint_fast64_t', 'atomic_intptr_t', 'atomic_uintptr_t', 'atomic_size_t', @@ -286,7 +298,7 @@ class CLexer(CFamilyLexer): tokens = { 'keywords': [ (words(( - '_Alignas', '_Alignof', '_Noreturn', '_Generic', '_Thread_local', + '_Alignas', '_Alignof', '_Noreturn', '_Generic', '_Thread_local', '_Static_assert', '_Imaginary', 'noreturn', 'imaginary', 'complex'), suffix=r'\b'), Keyword), inherit @@ -370,7 +382,7 @@ class CppLexer(CFamilyLexer): 'private', 'protected', 'public', 'reinterpret_cast', 'class', 'restrict', 'static_cast', 'template', 'this', 'throw', 'throws', 'try', 'typeid', 'using', 'virtual', 'constexpr', 'nullptr', 'concept', - 'decltype', 'noexcept', 'override', 'final', 'constinit', 'consteval', + 'decltype', 'noexcept', 'override', 'final', 'constinit', 'consteval', 'co_await', 'co_return', 'co_yield', 'requires', 'import', 'module', 'typename'), suffix=r'\b'), Keyword), diff --git a/pygments/lexers/c_like.py b/pygments/lexers/c_like.py index 7e44270a..96a32260 100644 --- a/pygments/lexers/c_like.py +++ b/pygments/lexers/c_like.py @@ -38,10 +38,11 @@ class PikeLexer(CppLexer): 'statements': [ (words(( 'catch', 'new', 'private', 'protected', 'public', 'gauge', - 'throw', 'throws', 'class', 'interface', 'implement', 'abstract', 'extends', 'from', - 'this', 'super', 'constant', 'final', 'static', 'import', 'use', 'extern', - 'inline', 'proto', 'break', 'continue', 'if', 'else', 'for', - 'while', 'do', 'switch', 'case', 'as', 'in', 'version', 'return', 'true', 'false', 'null', + 'throw', 'throws', 'class', 'interface', 'implement', 'abstract', + 'extends', 'from', 'this', 'super', 'constant', 'final', 'static', + 'import', 'use', 'extern', 'inline', 'proto', 'break', 'continue', + 'if', 'else', 'for', 'while', 'do', 'switch', 'case', 'as', 'in', + 'version', 'return', 'true', 'false', 'null', '__VERSION__', '__MAJOR__', '__MINOR__', '__BUILD__', '__REAL_VERSION__', '__REAL_MAJOR__', '__REAL_MINOR__', '__REAL_BUILD__', '__DATE__', '__TIME__', '__FILE__', '__DIR__', '__LINE__', '__AUTO_BIGNUM__', '__NT__', '__PIKE__', diff --git a/pygments/lexers/capnproto.py b/pygments/lexers/capnproto.py index 202b0727..18107964 100644 --- a/pygments/lexers/capnproto.py +++ b/pygments/lexers/capnproto.py @@ -8,8 +8,6 @@ :license: BSD, see LICENSE for details. """ -import re - from pygments.lexer import RegexLexer, default from pygments.token import Text, Comment, Keyword, Name, Literal, Whitespace diff --git a/pygments/lexers/cddl.py b/pygments/lexers/cddl.py index 1d29787c..e55b655a 100644 --- a/pygments/lexers/cddl.py +++ b/pygments/lexers/cddl.py @@ -12,24 +12,12 @@ :license: BSD, see LICENSE for details. """ -import re +from pygments.lexer import RegexLexer, bygroups, include, words +from pygments.token import Comment, Error, Keyword, Name, Number, Operator, \ + Punctuation, String, Whitespace __all__ = ['CddlLexer'] -from pygments.lexer import RegexLexer, bygroups, include, words -from pygments.token import ( - Comment, - Error, - Keyword, - Name, - Number, - Operator, - Punctuation, - String, - Text, - Whitespace, -) - class CddlLexer(RegexLexer): """ @@ -143,15 +131,11 @@ class CddlLexer(RegexLexer): # Barewords as member keys (must be matched before values, types, typenames, # groupnames). # Token type is String as barewords are always interpreted as such. - ( - r"({bareword})(\s*)(:)".format(bareword=_re_id), - bygroups(String, Whitespace, Punctuation), - ), + (r"({bareword})(\s*)(:)".format(bareword=_re_id), + bygroups(String, Whitespace, Punctuation)), # predefined types - ( - words(_prelude_types, prefix=r"(?![\-_$@])\b", suffix=r"\b(?![\-_$@])"), - Name.Builtin, - ), + (words(_prelude_types, prefix=r"(?![\-_$@])\b", suffix=r"\b(?![\-_$@])"), + Name.Builtin), # user-defined groupnames, typenames (_re_id, Name.Class), # values @@ -160,10 +144,8 @@ class CddlLexer(RegexLexer): (r"0x[0-9a-fA-F]+(\.[0-9a-fA-F]+)?p[+-]?\d+", Number.Hex), # hexfloat (r"0x[0-9a-fA-F]+", Number.Hex), # hex # Float - ( - r"{int}(?=(\.\d|e[+-]?\d))(?:\.\d+)?(?:e[+-]?\d+)?".format(int=_re_int), - Number.Float, - ), + (r"{int}(?=(\.\d|e[+-]?\d))(?:\.\d+)?(?:e[+-]?\d+)?".format(int=_re_int), + Number.Float), # Int (_re_int, Number.Integer), (r'"(\\\\|\\"|[^"])*"', String.Double), diff --git a/pygments/lexers/comal.py b/pygments/lexers/comal.py index 0235cab6..258d32e9 100644 --- a/pygments/lexers/comal.py +++ b/pygments/lexers/comal.py @@ -34,7 +34,7 @@ class Comal80Lexer(RegexLexer): # _suffix = r"\b(?!['\[\]←£\\])" _identifier = r"[a-z]['\[\]←£\\\w]*" - + tokens = { 'root': [ (r'//.*\n', Comment.Single), diff --git a/pygments/lexers/configs.py b/pygments/lexers/configs.py index 6f61d98e..992296d9 100644 --- a/pygments/lexers/configs.py +++ b/pygments/lexers/configs.py @@ -47,7 +47,8 @@ class IniLexer(RegexLexer): (r'[;#].*', Comment.Single), (r'(\[.*?\])([ \t]*)$', bygroups(Keyword, Whitespace)), (r'(.*?)([ \t]*)([=:])([ \t]*)([^;#\n]*)(\\)(\s+)', - bygroups(Name.Attribute, Whitespace, Operator, Whitespace, String, Text, Whitespace), + bygroups(Name.Attribute, Whitespace, Operator, Whitespace, String, + Text, Whitespace), "value"), (r'(.*?)([ \t]*)([=:])([ \t]*)([^ ;#\n]*(?: +[^ ;#\n]+)*)', bygroups(Name.Attribute, Whitespace, Operator, Whitespace, String)), @@ -744,14 +745,12 @@ class TerraformLexer(ExtendedRegexLexer): bygroups(Keyword.Reserved, Whitespace, Name.Class, Whitespace, Name.Variable, Whitespace, Punctuation)), # here-doc style delimited strings - ( - r'(<<-?)\s*([a-zA-Z_]\w*)(.*?\n)', - heredoc_callback, - ) + (r'(<<-?)\s*([a-zA-Z_]\w*)(.*?\n)', heredoc_callback), ], 'identifier': [ (r'\b(var\.[0-9a-zA-Z-_\.\[\]]+)\b', bygroups(Name.Variable)), - (r'\b([0-9a-zA-Z-_\[\]]+\.[0-9a-zA-Z-_\.\[\]]+)\b', bygroups(Name.Variable)), + (r'\b([0-9a-zA-Z-_\[\]]+\.[0-9a-zA-Z-_\.\[\]]+)\b', + bygroups(Name.Variable)), ], 'punctuation': [ (r'[\[\]()\{\},.?:!=]', Punctuation), diff --git a/pygments/lexers/cplint.py b/pygments/lexers/cplint.py index 5c10885f..9ef3c9be 100644 --- a/pygments/lexers/cplint.py +++ b/pygments/lexers/cplint.py @@ -3,25 +3,22 @@ ~~~~~~~~~~~~~~~~~~~~~~ Lexer for the cplint language - + :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ -import re - from pygments.lexer import bygroups, inherit, words from pygments.lexers import PrologLexer -from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ - Number, Punctuation +from pygments.token import Operator, Keyword, Name, String, Punctuation __all__ = ['CplintLexer'] class CplintLexer(PrologLexer): """ - Lexer for cplint files, including CP-logic, Logic Programs with Annotated Disjunctions, - Distributional Clauses syntax, ProbLog, DTProbLog + Lexer for cplint files, including CP-logic, Logic Programs with Annotated + Disjunctions, Distributional Clauses syntax, ProbLog, DTProbLog. .. versionadded:: 2.12 """ @@ -33,15 +30,15 @@ class CplintLexer(PrologLexer): tokens = { 'root': [ - (r'map_query',Keyword), - (words(('gaussian','uniform_dens','dirichlet','gamma','beta','poisson','binomial','geometric', - 'exponential','pascal','multinomial','user','val', - 'uniform','discrete','finite')),Name.Builtin), - (r'([a-z]+)(:)', bygroups(String.Atom, Punctuation)), # annotations of atoms + (r'map_query', Keyword), + (words(('gaussian', 'uniform_dens', 'dirichlet', 'gamma', 'beta', + 'poisson', 'binomial', 'geometric', 'exponential', 'pascal', + 'multinomial', 'user', 'val', 'uniform', 'discrete', + 'finite')), Name.Builtin), + # annotations of atoms + (r'([a-z]+)(:)', bygroups(String.Atom, Punctuation)), (r':(-|=)|::?|~=?|=>', Operator), (r'\?', Name.Builtin), inherit, ], } - - diff --git a/pygments/lexers/crystal.py b/pygments/lexers/crystal.py index 7154a9b6..c7f0c54d 100644 --- a/pygments/lexers/crystal.py +++ b/pygments/lexers/crystal.py @@ -10,10 +10,10 @@ import re -from pygments.lexer import ExtendedRegexLexer, include, \ - bygroups, default, words -from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ - Number, Punctuation, Error, Whitespace +from pygments.lexer import ExtendedRegexLexer, include, bygroups, default, \ + words +from pygments.token import Comment, Operator, Keyword, Name, String, Number, \ + Punctuation, Error, Whitespace __all__ = ['CrystalLexer'] diff --git a/pygments/lexers/csound.py b/pygments/lexers/csound.py index eddc834e..014a6f57 100644 --- a/pygments/lexers/csound.py +++ b/pygments/lexers/csound.py @@ -25,7 +25,7 @@ newline = (r'((?:(?:;|//).*)*)(\n)', bygroups(Comment.Single, Text)) class CsoundLexer(RegexLexer): url = 'https://csound.com/' - + tokens = { 'whitespace': [ (r'[ \t]+', Whitespace), diff --git a/pygments/lexers/css.py b/pygments/lexers/css.py index 9cbd0d33..4d7cb46f 100644 --- a/pygments/lexers/css.py +++ b/pygments/lexers/css.py @@ -13,8 +13,8 @@ import copy from pygments.lexer import ExtendedRegexLexer, RegexLexer, include, bygroups, \ default, words, inherit -from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ - Number, Punctuation, Whitespace +from pygments.token import Comment, Operator, Keyword, Name, String, Number, \ + Punctuation, Whitespace from pygments.lexers._css_builtins import _css_properties __all__ = ['CssLexer', 'SassLexer', 'ScssLexer', 'LessCssLexer'] diff --git a/pygments/lexers/d.py b/pygments/lexers/d.py index 672ffd3e..10fa3b55 100644 --- a/pygments/lexers/d.py +++ b/pygments/lexers/d.py @@ -9,8 +9,8 @@ """ from pygments.lexer import RegexLexer, include, words, bygroups -from pygments.token import Text, Comment, Keyword, Name, String, \ - Number, Punctuation, Whitespace +from pygments.token import Comment, Keyword, Name, String, Number, \ + Punctuation, Whitespace __all__ = ['DLexer', 'CrocLexer', 'MiniDLexer'] diff --git a/pygments/lexers/dalvik.py b/pygments/lexers/dalvik.py index 11cc230d..4380f0e0 100644 --- a/pygments/lexers/dalvik.py +++ b/pygments/lexers/dalvik.py @@ -51,8 +51,10 @@ class SmaliLexer(RegexLexer): r'sparse-switch|catchall|catch|line|parameter|local|prologue|' r'epilogue|source))', bygroups(Whitespace, Keyword)), (r'^([ \t]*)(\.end)( )(field|subannotation|annotation|method|array-data|' - 'packed-switch|sparse-switch|parameter|local)', bygroups(Whitespace, Keyword, Whitespace, Keyword)), - (r'^([ \t]*)(\.restart)( )(local)', bygroups(Whitespace, Keyword, Whitespace, Keyword)), + 'packed-switch|sparse-switch|parameter|local)', + bygroups(Whitespace, Keyword, Whitespace, Keyword)), + (r'^([ \t]*)(\.restart)( )(local)', + bygroups(Whitespace, Keyword, Whitespace, Keyword)), ], 'access-modifier': [ (r'(public|private|protected|static|final|synchronized|bridge|' diff --git a/pygments/lexers/dsls.py b/pygments/lexers/dsls.py index 0b51341a..34e9b55f 100644 --- a/pygments/lexers/dsls.py +++ b/pygments/lexers/dsls.py @@ -232,7 +232,7 @@ class ZeekLexer(RegexLexer): 'directives': [ (r'@(load-plugin|load-sigs|load|unload)\b.*$', Comment.Preproc), (r'@(DEBUG|DIR|FILENAME|deprecated|if|ifdef|ifndef|else|endif)\b', Comment.Preproc), - (r'(@prefixes)(\s*)((\+?=).*)$', bygroups(Comment.Preproc, + (r'(@prefixes)(\s*)((\+?=).*)$', bygroups(Comment.Preproc, Whitespace, Comment.Preproc)), ], diff --git a/pygments/lexers/dylan.py b/pygments/lexers/dylan.py index 38ddfb4a..e3a8e304 100644 --- a/pygments/lexers/dylan.py +++ b/pygments/lexers/dylan.py @@ -11,7 +11,7 @@ import re from pygments.lexer import Lexer, RegexLexer, bygroups, do_insertions, default -from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ +from pygments.token import Comment, Operator, Keyword, Name, String, \ Number, Punctuation, Generic, Literal, Whitespace __all__ = ['DylanLexer', 'DylanConsoleLexer', 'DylanLidLexer'] diff --git a/pygments/lexers/ecl.py b/pygments/lexers/ecl.py index 7fa243a0..e092997a 100644 --- a/pygments/lexers/ecl.py +++ b/pygments/lexers/ecl.py @@ -11,7 +11,7 @@ import re from pygments.lexer import RegexLexer, include, bygroups, words -from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ +from pygments.token import Comment, Operator, Keyword, Name, String, \ Number, Punctuation, Whitespace __all__ = ['ECLLexer'] @@ -79,39 +79,48 @@ class ECLLexer(RegexLexer): Keyword.Reserved), # These are classed differently, check later (words(( - 'ALL', 'AND', 'ANY', 'AS', 'ATMOST', 'BEFORE', 'BEGINC++', 'BEST', 'BETWEEN', 'CASE', - 'CONST', 'COUNTER', 'CSV', 'DESCEND', 'ENCRYPT', 'ENDC++', 'ENDMACRO', 'EXCEPT', - 'EXCLUSIVE', 'EXPIRE', 'EXPORT', 'EXTEND', 'FALSE', 'FEW', 'FIRST', 'FLAT', 'FULL', - 'FUNCTION', 'GROUP', 'HEADER', 'HEADING', 'HOLE', 'IFBLOCK', 'IMPORT', 'IN', 'JOINED', - 'KEEP', 'KEYED', 'LAST', 'LEFT', 'LIMIT', 'LOAD', 'LOCAL', 'LOCALE', 'LOOKUP', 'MACRO', - 'MANY', 'MAXCOUNT', 'MAXLENGTH', 'MIN SKEW', 'MODULE', 'INTERFACE', 'NAMED', 'NOCASE', - 'NOROOT', 'NOSCAN', 'NOSORT', 'NOT', 'OF', 'ONLY', 'OPT', 'OR', 'OUTER', 'OVERWRITE', - 'PACKED', 'PARTITION', 'PENALTY', 'PHYSICALLENGTH', 'PIPE', 'QUOTE', 'RELATIONSHIP', - 'REPEAT', 'RETURN', 'RIGHT', 'SCAN', 'SELF', 'SEPARATOR', 'SERVICE', 'SHARED', 'SKEW', - 'SKIP', 'SQL', 'STORE', 'TERMINATOR', 'THOR', 'THRESHOLD', 'TOKEN', 'TRANSFORM', 'TRIM', - 'TRUE', 'TYPE', 'UNICODEORDER', 'UNSORTED', 'VALIDATE', 'VIRTUAL', 'WHOLE', 'WILD', - 'WITHIN', 'XML', 'XPATH', '__COMPRESSED__'), suffix=r'\b'), + 'ALL', 'AND', 'ANY', 'AS', 'ATMOST', 'BEFORE', 'BEGINC++', 'BEST', + 'BETWEEN', 'CASE', 'CONST', 'COUNTER', 'CSV', 'DESCEND', 'ENCRYPT', + 'ENDC++', 'ENDMACRO', 'EXCEPT', 'EXCLUSIVE', 'EXPIRE', 'EXPORT', + 'EXTEND', 'FALSE', 'FEW', 'FIRST', 'FLAT', 'FULL', 'FUNCTION', + 'GROUP', 'HEADER', 'HEADING', 'HOLE', 'IFBLOCK', 'IMPORT', 'IN', + 'JOINED', 'KEEP', 'KEYED', 'LAST', 'LEFT', 'LIMIT', 'LOAD', 'LOCAL', + 'LOCALE', 'LOOKUP', 'MACRO', 'MANY', 'MAXCOUNT', 'MAXLENGTH', + 'MIN SKEW', 'MODULE', 'INTERFACE', 'NAMED', 'NOCASE', 'NOROOT', + 'NOSCAN', 'NOSORT', 'NOT', 'OF', 'ONLY', 'OPT', 'OR', 'OUTER', + 'OVERWRITE', 'PACKED', 'PARTITION', 'PENALTY', 'PHYSICALLENGTH', + 'PIPE', 'QUOTE', 'RELATIONSHIP', 'REPEAT', 'RETURN', 'RIGHT', + 'SCAN', 'SELF', 'SEPARATOR', 'SERVICE', 'SHARED', 'SKEW', 'SKIP', + 'SQL', 'STORE', 'TERMINATOR', 'THOR', 'THRESHOLD', 'TOKEN', + 'TRANSFORM', 'TRIM', 'TRUE', 'TYPE', 'UNICODEORDER', 'UNSORTED', + 'VALIDATE', 'VIRTUAL', 'WHOLE', 'WILD', 'WITHIN', 'XML', 'XPATH', + '__COMPRESSED__'), suffix=r'\b'), Keyword.Reserved), ], 'functions': [ (words(( - 'ABS', 'ACOS', 'ALLNODES', 'ASCII', 'ASIN', 'ASSTRING', 'ATAN', 'ATAN2', 'AVE', 'CASE', - 'CHOOSE', 'CHOOSEN', 'CHOOSESETS', 'CLUSTERSIZE', 'COMBINE', 'CORRELATION', 'COS', - 'COSH', 'COUNT', 'COVARIANCE', 'CRON', 'DATASET', 'DEDUP', 'DEFINE', 'DENORMALIZE', - 'DISTRIBUTE', 'DISTRIBUTED', 'DISTRIBUTION', 'EBCDIC', 'ENTH', 'ERROR', 'EVALUATE', - 'EVENT', 'EVENTEXTRA', 'EVENTNAME', 'EXISTS', 'EXP', 'FAILCODE', 'FAILMESSAGE', - 'FETCH', 'FROMUNICODE', 'GETISVALID', 'GLOBAL', 'GRAPH', 'GROUP', 'HASH', 'HASH32', - 'HASH64', 'HASHCRC', 'HASHMD5', 'HAVING', 'IF', 'INDEX', 'INTFORMAT', 'ISVALID', - 'ITERATE', 'JOIN', 'KEYUNICODE', 'LENGTH', 'LIBRARY', 'LIMIT', 'LN', 'LOCAL', 'LOG', 'LOOP', - 'MAP', 'MATCHED', 'MATCHLENGTH', 'MATCHPOSITION', 'MATCHTEXT', 'MATCHUNICODE', - 'MAX', 'MERGE', 'MERGEJOIN', 'MIN', 'NOLOCAL', 'NONEMPTY', 'NORMALIZE', 'PARSE', 'PIPE', - 'POWER', 'PRELOAD', 'PROCESS', 'PROJECT', 'PULL', 'RANDOM', 'RANGE', 'RANK', 'RANKED', - 'REALFORMAT', 'RECORDOF', 'REGEXFIND', 'REGEXREPLACE', 'REGROUP', 'REJECTED', - 'ROLLUP', 'ROUND', 'ROUNDUP', 'ROW', 'ROWDIFF', 'SAMPLE', 'SET', 'SIN', 'SINH', 'SIZEOF', - 'SOAPCALL', 'SORT', 'SORTED', 'SQRT', 'STEPPED', 'STORED', 'SUM', 'TABLE', 'TAN', 'TANH', - 'THISNODE', 'TOPN', 'TOUNICODE', 'TRANSFER', 'TRIM', 'TRUNCATE', 'TYPEOF', 'UNGROUP', - 'UNICODEORDER', 'VARIANCE', 'WHICH', 'WORKUNIT', 'XMLDECODE', 'XMLENCODE', - 'XMLTEXT', 'XMLUNICODE'), suffix=r'\b'), + 'ABS', 'ACOS', 'ALLNODES', 'ASCII', 'ASIN', 'ASSTRING', 'ATAN', + 'ATAN2', 'AVE', 'CASE', 'CHOOSE', 'CHOOSEN', 'CHOOSESETS', + 'CLUSTERSIZE', 'COMBINE', 'CORRELATION', 'COS', 'COSH', 'COUNT', + 'COVARIANCE', 'CRON', 'DATASET', 'DEDUP', 'DEFINE', 'DENORMALIZE', + 'DISTRIBUTE', 'DISTRIBUTED', 'DISTRIBUTION', 'EBCDIC', 'ENTH', + 'ERROR', 'EVALUATE', 'EVENT', 'EVENTEXTRA', 'EVENTNAME', 'EXISTS', + 'EXP', 'FAILCODE', 'FAILMESSAGE', 'FETCH', 'FROMUNICODE', + 'GETISVALID', 'GLOBAL', 'GRAPH', 'GROUP', 'HASH', 'HASH32', + 'HASH64', 'HASHCRC', 'HASHMD5', 'HAVING', 'IF', 'INDEX', + 'INTFORMAT', 'ISVALID', 'ITERATE', 'JOIN', 'KEYUNICODE', 'LENGTH', + 'LIBRARY', 'LIMIT', 'LN', 'LOCAL', 'LOG', 'LOOP', 'MAP', 'MATCHED', + 'MATCHLENGTH', 'MATCHPOSITION', 'MATCHTEXT', 'MATCHUNICODE', 'MAX', + 'MERGE', 'MERGEJOIN', 'MIN', 'NOLOCAL', 'NONEMPTY', 'NORMALIZE', + 'PARSE', 'PIPE', 'POWER', 'PRELOAD', 'PROCESS', 'PROJECT', 'PULL', + 'RANDOM', 'RANGE', 'RANK', 'RANKED', 'REALFORMAT', 'RECORDOF', + 'REGEXFIND', 'REGEXREPLACE', 'REGROUP', 'REJECTED', 'ROLLUP', + 'ROUND', 'ROUNDUP', 'ROW', 'ROWDIFF', 'SAMPLE', 'SET', 'SIN', + 'SINH', 'SIZEOF', 'SOAPCALL', 'SORT', 'SORTED', 'SQRT', 'STEPPED', + 'STORED', 'SUM', 'TABLE', 'TAN', 'TANH', 'THISNODE', 'TOPN', + 'TOUNICODE', 'TRANSFER', 'TRIM', 'TRUNCATE', 'TYPEOF', 'UNGROUP', + 'UNICODEORDER', 'VARIANCE', 'WHICH', 'WORKUNIT', 'XMLDECODE', + 'XMLENCODE', 'XMLTEXT', 'XMLUNICODE'), suffix=r'\b'), Name.Function), ], 'string': [ diff --git a/pygments/lexers/eiffel.py b/pygments/lexers/eiffel.py index 793fad88..db7858c6 100644 --- a/pygments/lexers/eiffel.py +++ b/pygments/lexers/eiffel.py @@ -9,8 +9,8 @@ """ from pygments.lexer import RegexLexer, include, words, bygroups -from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ - Number, Punctuation, Whitespace +from pygments.token import Comment, Operator, Keyword, Name, String, Number, \ + Punctuation, Whitespace __all__ = ['EiffelLexer'] @@ -35,8 +35,10 @@ class EiffelLexer(RegexLexer): # Please note that keyword and operator are case insensitive. (r'(?i)(true|false|void|current|result|precursor)\b', Keyword.Constant), (r'(?i)(not|xor|implies|or)\b', Operator.Word), - (r'(?i)(and)(?:(\s+)(then))?\b', bygroups(Operator.Word, Whitespace, Operator.Word)), - (r'(?i)(or)(?:(\s+)(else))?\b', bygroups(Operator.Word, Whitespace, Operator.Word)), + (r'(?i)(and)(?:(\s+)(then))?\b', + bygroups(Operator.Word, Whitespace, Operator.Word)), + (r'(?i)(or)(?:(\s+)(else))?\b', + bygroups(Operator.Word, Whitespace, Operator.Word)), (words(( 'across', 'agent', 'alias', 'all', 'as', 'assign', 'attached', 'attribute', 'check', 'class', 'convert', 'create', 'debug', diff --git a/pygments/lexers/elm.py b/pygments/lexers/elm.py index c9fce501..4174f2b2 100644 --- a/pygments/lexers/elm.py +++ b/pygments/lexers/elm.py @@ -9,8 +9,8 @@ """ from pygments.lexer import RegexLexer, words, include, bygroups -from pygments.token import Comment, Keyword, Name, Number, Punctuation, String, \ - Text, Whitespace +from pygments.token import Comment, Keyword, Name, Number, Punctuation, \ + String, Whitespace __all__ = ['ElmLexer'] diff --git a/pygments/lexers/elpi.py b/pygments/lexers/elpi.py index e1fc19d4..4fb3aced 100644 --- a/pygments/lexers/elpi.py +++ b/pygments/lexers/elpi.py @@ -9,16 +9,17 @@ """ from pygments.lexer import RegexLexer, bygroups, include -from pygments.token import \ - Text, Comment, Operator, Keyword, Name, String, Number +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number __all__ = ['ElpiLexer'] + class ElpiLexer(RegexLexer): """ Lexer for the Elpi programming language. - .. versionadded::2.11 + .. versionadded:: 2.11 """ name = 'Elpi' @@ -26,7 +27,7 @@ class ElpiLexer(RegexLexer): aliases = ['elpi'] filenames = ['*.elpi'] mimetypes = ['text/x-elpi'] - + lcase_re = r"[a-z]" ucase_re = r"[A-Z]" digit_re = r"[0-9]" @@ -35,88 +36,107 @@ class ElpiLexer(RegexLexer): idchar_re = r"({}|{}|{}|{})".format(lcase_re,ucase_re,digit_re,schar_re) idcharstarns_re = r"({}*(\.({}|{}){}*)*)".format(idchar_re, lcase_re, ucase_re, idchar_re) symbchar_re = r"({}|{}|{}|{}|:)".format(lcase_re, ucase_re, digit_re, schar_re) - constant_re = r"({}{}*|{}{}|{}{}*|_{}+)".format(ucase_re, idchar_re, lcase_re, idcharstarns_re,schar2_re, symbchar_re,idchar_re) - symbol_re=r"(,|<=>|->|:-|;|\?-|->|&|=>|\bas\b|\buvar\b|<|=<|=|==|>=|>|\bi<|\bi=<|\bi>=|\bi>|\bis\b|\br<|\br=<|\br>=|\br>|\bs<|\bs=<|\bs>=|\bs>|@|::|\[\]|`->|`:|`:=|\^|-|\+|\bi-|\bi\+|r-|r\+|/|\*|\bdiv\b|\bi\*|\bmod\b|\br\*|~|\bi~|\br~)" - escape_re=r"\(({}|{})\)".format(constant_re,symbol_re) + constant_re = r"({}{}*|{}{}|{}{}*|_{}+)".format(ucase_re, idchar_re, lcase_re, idcharstarns_re, schar2_re, symbchar_re, idchar_re) + symbol_re = r"(,|<=>|->|:-|;|\?-|->|&|=>|\bas\b|\buvar\b|<|=<|=|==|>=|>|\bi<|\bi=<|\bi>=|\bi>|\bis\b|\br<|\br=<|\br>=|\br>|\bs<|\bs=<|\bs>=|\bs>|@|::|\[\]|`->|`:|`:=|\^|-|\+|\bi-|\bi\+|r-|r\+|/|\*|\bdiv\b|\bi\*|\bmod\b|\br\*|~|\bi~|\br~)" + escape_re = r"\(({}|{})\)".format(constant_re,symbol_re) const_sym_re = r"({}|{}|{})".format(constant_re,symbol_re,escape_re) tokens = { - 'root': [ include('elpi') ], + 'root': [ + include('elpi') + ], 'elpi': [ - include('_elpi-comment'), - (r"(:before|:after|:if|:name)(\s*)(\")",bygroups(Keyword.Mode,Text.Whitespace,String.Double),'elpi-string'), - (r"(:index)(\s*\()",bygroups(Keyword.Mode,Text.Whitespace),'elpi-indexing-expr'), - (r"\b(external pred|pred)(\s+)({})".format(const_sym_re),bygroups(Keyword.Declaration,Text.Whitespace,Name.Function),'elpi-pred-item'), - (r"\b(external type|type)(\s+)(({}(,\s*)?)+)".format(const_sym_re),bygroups(Keyword.Declaration,Text.Whitespace,Name.Function),'elpi-type'), - (r"\b(kind)(\s+)(({}|,)+)".format(const_sym_re),bygroups(Keyword.Declaration,Text.Whitespace,Name.Function),'elpi-type'), - (r"\b(typeabbrev)(\s+)({})".format(const_sym_re),bygroups(Keyword.Declaration,Text.Whitespace,Name.Function),'elpi-type'), - (r"\b(accumulate)(\s+)(\")",bygroups(Keyword.Declaration,Text.Whitespace,String.Double),'elpi-string'), - (r"\b(accumulate|namespace|local)(\s+)({})".format(constant_re),bygroups(Keyword.Declaration,Text.Whitespace,Text)), - (r"\b(shorten)(\s+)({}\.)".format(constant_re),bygroups(Keyword.Declaration,Text.Whitespace,Text)), - (r"\b(pi|sigma)(\s+)([a-zA-Z][A-Za-z0-9_ ]*)(\\)",bygroups(Keyword.Declaration,Text.Whitespace,Name.Variable,Text)), - (r"\b(constraint)(\s+)(({}(\s+)?)+)".format(const_sym_re),bygroups(Keyword.Declaration,Text.Whitespace,Name.Function),'elpi-chr-rule-start'), + (r"(:before|:after|:if|:name)(\s*)(\")", + bygroups(Keyword.Mode, Text.Whitespace, String.Double), + 'elpi-string'), + (r"(:index)(\s*\()", bygroups(Keyword.Mode, Text.Whitespace), + 'elpi-indexing-expr'), + (r"\b(external pred|pred)(\s+)({})".format(const_sym_re), + bygroups(Keyword.Declaration, Text.Whitespace, Name.Function), + 'elpi-pred-item'), + (r"\b(external type|type)(\s+)(({}(,\s*)?)+)".format(const_sym_re), + bygroups(Keyword.Declaration, Text.Whitespace, Name.Function), + 'elpi-type'), + (r"\b(kind)(\s+)(({}|,)+)".format(const_sym_re), + bygroups(Keyword.Declaration, Text.Whitespace, Name.Function), + 'elpi-type'), + (r"\b(typeabbrev)(\s+)({})".format(const_sym_re), + bygroups(Keyword.Declaration, Text.Whitespace, Name.Function), + 'elpi-type'), + (r"\b(accumulate)(\s+)(\")", + bygroups(Keyword.Declaration, Text.Whitespace, String.Double), + 'elpi-string'), + (r"\b(accumulate|namespace|local)(\s+)({})".format(constant_re), + bygroups(Keyword.Declaration, Text.Whitespace, Text)), + (r"\b(shorten)(\s+)({}\.)".format(constant_re), + bygroups(Keyword.Declaration, Text.Whitespace, Text)), + (r"\b(pi|sigma)(\s+)([a-zA-Z][A-Za-z0-9_ ]*)(\\)", + bygroups(Keyword.Declaration, Text.Whitespace, Name.Variable, Text)), + (r"\b(constraint)(\s+)(({}(\s+)?)+)".format(const_sym_re), + bygroups(Keyword.Declaration, Text.Whitespace, Name.Function), + 'elpi-chr-rule-start'), - (r"(?=[A-Z_]){}".format(constant_re),Name.Variable), - (r"(?=[a-z_]){}\\".format(constant_re),Name.Variable), - (r"_",Name.Variable), - (r"({}|!|=>|;)".format(symbol_re),Keyword.Declaration), - (constant_re,Text), - (r"\[|\]|\||=>",Keyword.Declaration), + (r"(?=[A-Z_]){}".format(constant_re), Name.Variable), + (r"(?=[a-z_]){}\\".format(constant_re), Name.Variable), + (r"_", Name.Variable), + (r"({}|!|=>|;)".format(symbol_re), Keyword.Declaration), + (constant_re, Text), + (r"\[|\]|\||=>", Keyword.Declaration), (r'"', String.Double, 'elpi-string'), (r'`', String.Double, 'elpi-btick'), (r'\'', String.Double, 'elpi-tick'), (r'\{[^\{]', Text, 'elpi-spill'), - (r"\(",Text,'elpi-in-parens'), + (r"\(", Text, 'elpi-in-parens'), (r'\d[\d_]*', Number.Integer), (r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)', Number.Float), (r"[\+\*\-/\^\.]", Operator), ], '_elpi-comment': [ - (r'%[^\n]*\n',Comment), - (r'/\*',Comment,'elpi-multiline-comment'), - (r"\s+",Text.Whitespace), + (r'%[^\n]*\n', Comment), + (r'/\*', Comment, 'elpi-multiline-comment'), + (r"\s+", Text.Whitespace), ], 'elpi-multiline-comment': [ - (r'\*/',Comment,'#pop'), - (r'.',Comment) + (r'\*/', Comment, '#pop'), + (r'.', Comment) ], 'elpi-indexing-expr':[ - (r'[0-9 _]+',Number.Integer), - (r'\)',Text,'#pop'), + (r'[0-9 _]+', Number.Integer), + (r'\)', Text, '#pop'), ], 'elpi-type': [ - (r"(ctype\s+)(\")",bygroups(Keyword.Type,String.Double),'elpi-string'), - (r'->',Keyword.Type), - (constant_re,Keyword.Type), - (r"\(|\)",Keyword.Type), - (r"\.",Text,'#pop'), + (r"(ctype\s+)(\")", bygroups(Keyword.Type, String.Double), 'elpi-string'), + (r'->', Keyword.Type), + (constant_re, Keyword.Type), + (r"\(|\)", Keyword.Type), + (r"\.", Text, '#pop'), include('_elpi-comment'), ], 'elpi-chr-rule-start': [ - (r"\{",Text,'elpi-chr-rule'), + (r"\{", Text, 'elpi-chr-rule'), include('_elpi-comment'), ], 'elpi-chr-rule': [ - (r"\brule\b",Keyword.Declaration), - (r"\\",Keyword.Declaration), - (r"\}",Text,'#pop:2'), + (r"\brule\b", Keyword.Declaration), + (r"\\", Keyword.Declaration), + (r"\}", Text, '#pop:2'), include('elpi'), ], 'elpi-pred-item': [ - (r"[io]:",Keyword.Mode,'elpi-ctype'), - (r"\.",Text,'#pop'), + (r"[io]:", Keyword.Mode, 'elpi-ctype'), + (r"\.", Text, '#pop'), include('_elpi-comment'), ], 'elpi-ctype': [ - (r"(ctype\s+)(\")",bygroups(Keyword.Type,String.Double),'elpi-string'), - (r'->',Keyword.Type), - (constant_re,Keyword.Type), - (r"\(|\)",Keyword.Type), - (r",",Text,'#pop'), - (r"\.",Text,'#pop:2'), + (r"(ctype\s+)(\")", bygroups(Keyword.Type, String.Double), 'elpi-string'), + (r'->', Keyword.Type), + (constant_re, Keyword.Type), + (r"\(|\)", Keyword.Type), + (r",", Text, '#pop'), + (r"\.", Text, '#pop:2'), include('_elpi-comment'), ], 'elpi-btick': [ diff --git a/pygments/lexers/email.py b/pygments/lexers/email.py index 7d9f17e5..1de01ff1 100644 --- a/pygments/lexers/email.py +++ b/pygments/lexers/email.py @@ -63,11 +63,9 @@ class EmailHeaderLexer(RegexLexer): (r"\b(\w[\w\.-]*\.[\w\.-]*\w[a-zA-Z]+)\b", Name.Function), # IPv4 - ( - r"(?<=\b)(?:(?:25[0-5]|2[0-4][0-9]|1?[0-9][0-9]?)\.){3}(?:25[0" - r"-5]|2[0-4][0-9]|1?[0-9][0-9]?)(?=\b)", - Number.Integer, - ), + (r"(?<=\b)(?:(?:25[0-5]|2[0-4][0-9]|1?[0-9][0-9]?)\.){3}(?:25[0" + r"-5]|2[0-4][0-9]|1?[0-9][0-9]?)(?=\b)", + Number.Integer), # IPv6 (r"(?<=\b)([0-9a-fA-F]{1,4}:){1,7}:(?!\b)", Number.Hex), @@ -83,43 +81,27 @@ class EmailHeaderLexer(RegexLexer): Number.Hex), (r"(?<=\b)([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}(?=\b)", Number.Hex), - ( - r"(?<=\b)::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}" - r"[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}" - r"[0-9])(?=\b)", - Number.Hex, - ), - ( - r"(?<=\b)([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-" - r"9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-" - r"9])(?=\b)", - Number.Hex, - ), + (r"(?<=\b)::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}" + r"[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}" + r"[0-9])(?=\b)", + Number.Hex), + (r"(?<=\b)([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9])" + r"{0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])(?=\b)", + Number.Hex), # Date time - ( - r"(?:(Sun|Mon|Tue|Wed|Thu|Fri|Sat),\s+)?(0[1-9]|[1-2]?[0-9]|3[" - r"01])\s+(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s+(" - r"19[0-9]{2}|[2-9][0-9]{3})\s+(2[0-3]|[0-1][0-9]):([0-5][0-9])" - r"(?::(60|[0-5][0-9]))?(?:\.\d{1,5})?\s+([-\+][0-9]{2}[0-5][0-" - r"9]|\(?(?:UTC?|GMT|(?:E|C|M|P)(?:ST|ET|DT)|[A-IK-Z])\)?)", - Name.Decorator, - ), + (r"(?:(Sun|Mon|Tue|Wed|Thu|Fri|Sat),\s+)?(0[1-9]|[1-2]?[0-9]|3[" + r"01])\s+(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s+(" + r"19[0-9]{2}|[2-9][0-9]{3})\s+(2[0-3]|[0-1][0-9]):([0-5][0-9])" + r"(?::(60|[0-5][0-9]))?(?:\.\d{1,5})?\s+([-\+][0-9]{2}[0-5][0-" + r"9]|\(?(?:UTC?|GMT|(?:E|C|M|P)(?:ST|ET|DT)|[A-IK-Z])\)?)", + Name.Decorator), # RFC-2047 encoded string - ( - r"(=\?)([\w-]+)(\?)([BbQq])(\?)([\[\w!\"#$%&\'()*+,-./:;<=>@[\\" - r"\]^_`{|}~]+)(\?=)", - bygroups( - String.Affix, - Name.Constant, - String.Affix, - Keyword.Constant, - String.Affix, - Number.Hex, - String.Affix - ) - ), + (r"(=\?)([\w-]+)(\?)([BbQq])(\?)([\[\w!\"#$%&\'()*+,-./:;<=>@[\\" + r"\]^_`{|}~]+)(\?=)", + bygroups(String.Affix, Name.Constant, String.Affix, Keyword.Constant, + String.Affix, Number.Hex, String.Affix)), # others (r'[\s]+', Text.Whitespace), diff --git a/pygments/lexers/erlang.py b/pygments/lexers/erlang.py index 6cdca984..d5aa0911 100644 --- a/pygments/lexers/erlang.py +++ b/pygments/lexers/erlang.py @@ -12,13 +12,12 @@ import re from pygments.lexer import Lexer, RegexLexer, bygroups, words, do_insertions, \ include, default -from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ +from pygments.token import Comment, Operator, Keyword, Name, String, \ Number, Punctuation, Generic, Whitespace __all__ = ['ErlangLexer', 'ErlangShellLexer', 'ElixirConsoleLexer', 'ElixirLexer'] - line_re = re.compile('.*?\n') diff --git a/pygments/lexers/esoteric.py b/pygments/lexers/esoteric.py index a0d1f1e2..aa09463c 100644 --- a/pygments/lexers/esoteric.py +++ b/pygments/lexers/esoteric.py @@ -9,8 +9,8 @@ """ from pygments.lexer import RegexLexer, include, words, bygroups -from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ - Number, Punctuation, Error, Whitespace +from pygments.token import Comment, Operator, Keyword, Name, String, Number, \ + Punctuation, Error, Whitespace __all__ = ['BrainfuckLexer', 'BefungeLexer', 'RedcodeLexer', 'CAmkESLexer', 'CapDLLexer', 'AheuiLexer'] diff --git a/pygments/lexers/ezhil.py b/pygments/lexers/ezhil.py index 73b7c8b4..caa52d54 100644 --- a/pygments/lexers/ezhil.py +++ b/pygments/lexers/ezhil.py @@ -9,9 +9,10 @@ """ import re -from pygments.lexer import RegexLexer, include, words, bygroups -from pygments.token import Keyword, Text, Comment, Name -from pygments.token import String, Number, Punctuation, Operator, Whitespace + +from pygments.lexer import RegexLexer, include, words +from pygments.token import Keyword, Comment, Name, String, Number, \ + Punctuation, Operator, Whitespace __all__ = ['EzhilLexer'] diff --git a/pygments/lexers/factor.py b/pygments/lexers/factor.py index 9697ac39..db10652d 100644 --- a/pygments/lexers/factor.py +++ b/pygments/lexers/factor.py @@ -8,11 +8,9 @@ :license: BSD, see LICENSE for details. """ -import re - from pygments.lexer import RegexLexer, bygroups, default, words from pygments.token import Text, Comment, Keyword, Name, String, Number, \ - Whitespace, Punctuation + Whitespace, Punctuation __all__ = ['FactorLexer'] diff --git a/pygments/lexers/fift.py b/pygments/lexers/fift.py index c20065c4..2cd8c383 100644 --- a/pygments/lexers/fift.py +++ b/pygments/lexers/fift.py @@ -8,9 +8,8 @@ :license: BSD, see LICENSE for details. """ -from pygments.lexer import RegexLexer, include, words -from pygments.token import Literal, Comment, Name, String, \ - Number, Whitespace +from pygments.lexer import RegexLexer, include +from pygments.token import Literal, Comment, Name, String, Number, Whitespace __all__ = ['FiftLexer'] diff --git a/pygments/lexers/forth.py b/pygments/lexers/forth.py index 6655cda5..2207908b 100644 --- a/pygments/lexers/forth.py +++ b/pygments/lexers/forth.py @@ -12,7 +12,7 @@ import re from pygments.lexer import RegexLexer, bygroups from pygments.token import Text, Comment, Keyword, Name, String, Number, \ - Whitespace + Whitespace __all__ = ['ForthLexer'] diff --git a/pygments/lexers/fortran.py b/pygments/lexers/fortran.py index 8d5ea2a2..d45581ac 100644 --- a/pygments/lexers/fortran.py +++ b/pygments/lexers/fortran.py @@ -68,8 +68,8 @@ class FortranLexer(RegexLexer): 'FUNCTION', 'GENERIC', 'IF', 'IMAGES', 'IMPLICIT', 'IMPORT', 'IMPURE', 'INCLUDE', 'INQUIRE', 'INTENT', 'INTERFACE', 'INTRINSIC', 'IS', 'LOCK', 'MEMORY', 'MODULE', 'NAMELIST', 'NULLIFY', - 'NONE', 'NON_INTRINSIC', 'NON_OVERRIDABLE', 'NOPASS', 'ONLY', 'OPEN', - 'OPTIONAL', 'OPTIONS', 'PARAMETER', 'PASS', 'PAUSE', 'POINTER', 'PRINT', + 'NONE', 'NON_INTRINSIC', 'NON_OVERRIDABLE', 'NOPASS', 'ONLY', 'OPEN', + 'OPTIONAL', 'OPTIONS', 'PARAMETER', 'PASS', 'PAUSE', 'POINTER', 'PRINT', 'PRIVATE', 'PROGRAM', 'PROCEDURE', 'PROTECTED', 'PUBLIC', 'PURE', 'READ', 'RECURSIVE', 'RESULT', 'RETURN', 'REWIND', 'SAVE', 'SELECT', 'SEQUENCE', 'STOP', 'SUBMODULE', 'SUBROUTINE', 'SYNC', 'SYNCALL', 'SYNCIMAGES', diff --git a/pygments/lexers/foxpro.py b/pygments/lexers/foxpro.py index 62edef8e..578c22a0 100644 --- a/pygments/lexers/foxpro.py +++ b/pygments/lexers/foxpro.py @@ -12,7 +12,7 @@ import re from pygments.lexer import RegexLexer from pygments.token import Punctuation, Text, Comment, Operator, Keyword, \ - Name, String + Name, String __all__ = ['FoxProLexer'] diff --git a/pygments/lexers/freefem.py b/pygments/lexers/freefem.py index a7b15a7e..6150c152 100644 --- a/pygments/lexers/freefem.py +++ b/pygments/lexers/freefem.py @@ -8,13 +8,9 @@ :license: BSD, see LICENSE for details. """ -from pygments.lexer import RegexLexer, include, bygroups, inherit, words, \ - default -from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ - Number, Punctuation +from pygments.token import Comment, Operator, Keyword, Name -from pygments.lexers.c_cpp import CLexer, CppLexer -from pygments.lexers import _mql_builtins +from pygments.lexers.c_cpp import CppLexer __all__ = ['FreeFemLexer'] diff --git a/pygments/lexers/futhark.py b/pygments/lexers/futhark.py index 8e3d54f4..92ea68f8 100644 --- a/pygments/lexers/futhark.py +++ b/pygments/lexers/futhark.py @@ -11,7 +11,7 @@ import re from pygments.lexer import RegexLexer, bygroups -from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ +from pygments.token import Comment, Operator, Keyword, Name, String, \ Number, Punctuation, Whitespace from pygments import unistring as uni diff --git a/pygments/lexers/gdscript.py b/pygments/lexers/gdscript.py index f5054f46..25849af2 100644 --- a/pygments/lexers/gdscript.py +++ b/pygments/lexers/gdscript.py @@ -37,11 +37,9 @@ class GDScriptLexer(RegexLexer): def innerstring_rules(ttype): return [ # the old style '%s' % (...) string formatting - ( - r"%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?" - "[hlL]?[E-GXc-giorsux%]", - String.Interpol, - ), + (r"%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?" + "[hlL]?[E-GXc-giorsux%]", + String.Interpol), # backslashes, quotes and formatting signs must be parsed one at a time (r'[^\\\'"%\n]+', ttype), (r'[\'"\\]', ttype), @@ -53,239 +51,84 @@ class GDScriptLexer(RegexLexer): tokens = { "root": [ (r"\n", Whitespace), - ( - r'^(\s*)([rRuUbB]{,2})("""(?:.|\n)*?""")', - bygroups(Whitespace, String.Affix, String.Doc), - ), - ( - r"^(\s*)([rRuUbB]{,2})('''(?:.|\n)*?''')", - bygroups(Whitespace, String.Affix, String.Doc), - ), + (r'^(\s*)([rRuUbB]{,2})("""(?:.|\n)*?""")', + bygroups(Whitespace, String.Affix, String.Doc)), + (r"^(\s*)([rRuUbB]{,2})('''(?:.|\n)*?''')", + bygroups(Whitespace, String.Affix, String.Doc)), (r"[^\S\n]+", Whitespace), (r"#.*$", Comment.Single), (r"[]{}:(),;[]", Punctuation), (r"(\\)(\n)", bygroups(Text, Whitespace)), (r"\\", Text), (r"(in|and|or|not)\b", Operator.Word), - ( - r"!=|==|<<|>>|&&|\+=|-=|\*=|/=|%=|&=|\|=|\|\||[-~+/*%=<>&^.!|$]", - Operator, - ), + (r"!=|==|<<|>>|&&|\+=|-=|\*=|/=|%=|&=|\|=|\|\||[-~+/*%=<>&^.!|$]", + Operator), include("keywords"), (r"(func)(\s+)", bygroups(Keyword, Whitespace), "funcname"), (r"(class)(\s+)", bygroups(Keyword, Whitespace), "classname"), include("builtins"), - ( - '([rR]|[uUbB][rR]|[rR][uUbB])(""")', - bygroups(String.Affix, String.Double), - "tdqs", - ), - ( - "([rR]|[uUbB][rR]|[rR][uUbB])(''')", - bygroups(String.Affix, String.Single), - "tsqs", - ), - ( - '([rR]|[uUbB][rR]|[rR][uUbB])(")', - bygroups(String.Affix, String.Double), - "dqs", - ), - ( - "([rR]|[uUbB][rR]|[rR][uUbB])(')", - bygroups(String.Affix, String.Single), - "sqs", - ), - ( - '([uUbB]?)(""")', - bygroups(String.Affix, String.Double), - combined("stringescape", "tdqs"), - ), - ( - "([uUbB]?)(''')", - bygroups(String.Affix, String.Single), - combined("stringescape", "tsqs"), - ), - ( - '([uUbB]?)(")', - bygroups(String.Affix, String.Double), - combined("stringescape", "dqs"), - ), - ( - "([uUbB]?)(')", - bygroups(String.Affix, String.Single), - combined("stringescape", "sqs"), - ), + ('([rR]|[uUbB][rR]|[rR][uUbB])(""")', + bygroups(String.Affix, String.Double), + "tdqs"), + ("([rR]|[uUbB][rR]|[rR][uUbB])(''')", + bygroups(String.Affix, String.Single), + "tsqs"), + ('([rR]|[uUbB][rR]|[rR][uUbB])(")', + bygroups(String.Affix, String.Double), + "dqs"), + ("([rR]|[uUbB][rR]|[rR][uUbB])(')", + bygroups(String.Affix, String.Single), + "sqs"), + ('([uUbB]?)(""")', + bygroups(String.Affix, String.Double), + combined("stringescape", "tdqs")), + ("([uUbB]?)(''')", + bygroups(String.Affix, String.Single), + combined("stringescape", "tsqs")), + ('([uUbB]?)(")', + bygroups(String.Affix, String.Double), + combined("stringescape", "dqs")), + ("([uUbB]?)(')", + bygroups(String.Affix, String.Single), + combined("stringescape", "sqs")), include("name"), include("numbers"), ], "keywords": [ - ( - words( - ( - "and", - "in", - "not", - "or", - "as", - "breakpoint", - "class", - "class_name", - "extends", - "is", - "func", - "setget", - "signal", - "tool", - "const", - "enum", - "export", - "onready", - "static", - "var", - "break", - "continue", - "if", - "elif", - "else", - "for", - "pass", - "return", - "match", - "while", - "remote", - "master", - "puppet", - "remotesync", - "mastersync", - "puppetsync", - ), - suffix=r"\b", - ), - Keyword, - ), + (words(("and", "in", "not", "or", "as", "breakpoint", "class", + "class_name", "extends", "is", "func", "setget", "signal", + "tool", "const", "enum", "export", "onready", "static", + "var", "break", "continue", "if", "elif", "else", "for", + "pass", "return", "match", "while", "remote", "master", + "puppet", "remotesync", "mastersync", "puppetsync"), + suffix=r"\b"), Keyword), ], "builtins": [ - ( - words( - ( - "Color8", - "ColorN", - "abs", - "acos", - "asin", - "assert", - "atan", - "atan2", - "bytes2var", - "ceil", - "char", - "clamp", - "convert", - "cos", - "cosh", - "db2linear", - "decimals", - "dectime", - "deg2rad", - "dict2inst", - "ease", - "exp", - "floor", - "fmod", - "fposmod", - "funcref", - "hash", - "inst2dict", - "instance_from_id", - "is_inf", - "is_nan", - "lerp", - "linear2db", - "load", - "log", - "max", - "min", - "nearest_po2", - "pow", - "preload", - "print", - "print_stack", - "printerr", - "printraw", - "prints", - "printt", - "rad2deg", - "rand_range", - "rand_seed", - "randf", - "randi", - "randomize", - "range", - "round", - "seed", - "sign", - "sin", - "sinh", - "sqrt", - "stepify", - "str", - "str2var", - "tan", - "tan", - "tanh", - "type_exist", - "typeof", - "var2bytes", - "var2str", - "weakref", - "yield", - ), - prefix=r"(?<!\.)", - suffix=r"\b", - ), - Name.Builtin, - ), - (r"((?<!\.)(self|false|true)|(PI|TAU|NAN|INF)" r")\b", Name.Builtin.Pseudo), - ( - words( - ( - "bool", - "int", - "float", - "String", - "NodePath", - "Vector2", - "Rect2", - "Transform2D", - "Vector3", - "Rect3", - "Plane", - "Quat", - "Basis", - "Transform", - "Color", - "RID", - "Object", - "NodePath", - "Dictionary", - "Array", - "PackedByteArray", - "PackedInt32Array", - "PackedInt64Array", - "PackedFloat32Array", - "PackedFloat64Array", - "PackedStringArray", - "PackedVector2Array", - "PackedVector3Array", - "PackedColorArray", - "null", - "void", - ), - prefix=r"(?<!\.)", - suffix=r"\b", - ), - Name.Builtin.Type, - ), + (words(("Color8", "ColorN", "abs", "acos", "asin", "assert", "atan", + "atan2", "bytes2var", "ceil", "char", "clamp", "convert", + "cos", "cosh", "db2linear", "decimals", "dectime", "deg2rad", + "dict2inst", "ease", "exp", "floor", "fmod", "fposmod", + "funcref", "hash", "inst2dict", "instance_from_id", "is_inf", + "is_nan", "lerp", "linear2db", "load", "log", "max", "min", + "nearest_po2", "pow", "preload", "print", "print_stack", + "printerr", "printraw", "prints", "printt", "rad2deg", + "rand_range", "rand_seed", "randf", "randi", "randomize", + "range", "round", "seed", "sign", "sin", "sinh", "sqrt", + "stepify", "str", "str2var", "tan", "tan", "tanh", + "type_exist", "typeof", "var2bytes", "var2str", "weakref", + "yield"), prefix=r"(?<!\.)", suffix=r"\b"), + Name.Builtin), + (r"((?<!\.)(self|false|true)|(PI|TAU|NAN|INF)" r")\b", + Name.Builtin.Pseudo), + (words(("bool", "int", "float", "String", "NodePath", "Vector2", + "Rect2", "Transform2D", "Vector3", "Rect3", "Plane", "Quat", + "Basis", "Transform", "Color", "RID", "Object", "NodePath", + "Dictionary", "Array", "PackedByteArray", "PackedInt32Array", + "PackedInt64Array", "PackedFloat32Array", "PackedFloat64Array", + "PackedStringArray", "PackedVector2Array", "PackedVector3Array", + "PackedColorArray", "null", "void"), + prefix=r"(?<!\.)", suffix=r"\b"), + Name.Builtin.Type), ], "numbers": [ (r"(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?", Number.Float), diff --git a/pygments/lexers/go.py b/pygments/lexers/go.py index a5de48af..8b7d8695 100644 --- a/pygments/lexers/go.py +++ b/pygments/lexers/go.py @@ -8,8 +8,6 @@ :license: BSD, see LICENSE for details. """ -import re - from pygments.lexer import RegexLexer, bygroups, words from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ Number, Punctuation, Whitespace diff --git a/pygments/lexers/grammar_notation.py b/pygments/lexers/grammar_notation.py index 6acc47e2..4be4144f 100644 --- a/pygments/lexers/grammar_notation.py +++ b/pygments/lexers/grammar_notation.py @@ -8,8 +8,6 @@ :license: BSD, see LICENSE for details. """ -import re - from pygments.lexer import RegexLexer, bygroups, include, this, using, words from pygments.token import Comment, Keyword, Literal, Name, Number, \ Operator, Punctuation, String, Text, Whitespace @@ -196,7 +194,7 @@ class JsgfLexer(RegexLexer): (r'\*/', Comment.Multiline, '#pop'), (r'^(\s*)(\*?)(\s*)(@(?:example|see))(\s+)' r'([\w\W]*?(?=(?:^\s*\*?\s*@|\*/)))', - bygroups(Whitespace,Comment.Multiline, Whitespace, Comment.Special, + bygroups(Whitespace, Comment.Multiline, Whitespace, Comment.Special, Whitespace, using(this, state='example'))), (r'(^\s*\*?\s*)(@\S*)', bygroups(Comment.Multiline, Comment.Special)), diff --git a/pygments/lexers/graph.py b/pygments/lexers/graph.py index de10e502..69e5bf15 100644 --- a/pygments/lexers/graph.py +++ b/pygments/lexers/graph.py @@ -77,10 +77,10 @@ class CypherLexer(RegexLexer): (r'(using)(\s+)(periodic)(\s+)(commit)\b', bygroups(Keyword, Whitespace, Keyword, Whitespace, Keyword)), (words(( - 'all', 'any', 'as', 'asc', 'ascending', 'assert', 'call', 'case', 'create', - 'delete', 'desc', 'descending', 'distinct', 'end', 'fieldterminator', - 'foreach', 'in', 'limit', 'match', 'merge', 'none', 'not', 'null', - 'remove', 'return', 'set', 'skip', 'single', 'start', 'then', 'union', + 'all', 'any', 'as', 'asc', 'ascending', 'assert', 'call', 'case', 'create', + 'delete', 'desc', 'descending', 'distinct', 'end', 'fieldterminator', + 'foreach', 'in', 'limit', 'match', 'merge', 'none', 'not', 'null', + 'remove', 'return', 'set', 'skip', 'single', 'start', 'then', 'union', 'unwind', 'yield', 'where', 'when', 'with'), suffix=r'\b'), Keyword), ], 'relations': [ diff --git a/pygments/lexers/gsql.py b/pygments/lexers/gsql.py index 6af99b27..8c7674de 100755 --- a/pygments/lexers/gsql.py +++ b/pygments/lexers/gsql.py @@ -11,16 +11,17 @@ import re from pygments.lexer import RegexLexer, include, bygroups, using, this, words -from pygments.token import Keyword, Punctuation, Comment, Operator, Name,\ - String, Number, Whitespace, Token - +from pygments.token import Keyword, Punctuation, Comment, Operator, Name, \ + String, Number, Whitespace __all__ = ["GSQLLexer"] + class GSQLLexer(RegexLexer): """ For GSQL queries (version 3.x). + .. versionadded:: 2.10 """ @@ -49,25 +50,35 @@ class GSQLLexer(RegexLexer): ], 'keywords': [ (words(( - 'ACCUM', 'AND', 'ANY', 'API', 'AS', 'ASC', 'AVG', 'BAG', 'BATCH', 'BETWEEN', 'BOOL', 'BOTH', - 'BREAK', 'BY', 'CASE', 'CATCH', 'COALESCE', 'COMPRESS', 'CONTINUE', 'COUNT', - 'CREATE', 'DATETIME', 'DATETIME_ADD', 'DATETIME_SUB', 'DELETE', 'DESC', 'DISTRIBUTED', 'DO', - 'DOUBLE', 'EDGE', 'ELSE', 'END', 'ESCAPE', 'EXCEPTION', 'FALSE', 'FILE', 'FILTER', 'FLOAT', 'FOREACH', 'FOR', - 'FROM', 'GRAPH', 'GROUP', 'GSQL_INT_MAX', 'GSQL_INT_MIN', 'GSQL_UINT_MAX', 'HAVING', 'IF', - 'IN', 'INSERT', 'INT', 'INTERPRET', 'INTERSECT', 'INTERVAL', 'INTO', 'IS', 'ISEMPTY', 'JSONARRAY', 'JSONOBJECT', 'LASTHOP', - 'LEADING', 'LIKE', 'LIMIT', 'LIST', 'LOAD_ACCUM', 'LOG', 'MAP', 'MATCH', 'MAX', 'MIN', 'MINUS', 'NOT', - 'NOW', 'NULL', 'OFFSET', 'OR', 'ORDER', 'PATH', 'PER', 'PINNED', 'POST_ACCUM', 'POST-ACCUM', 'PRIMARY_ID', 'PRINT', - 'QUERY', 'RAISE', 'RANGE', 'REPLACE', 'RESET_COLLECTION_ACCUM', 'RETURN', 'RETURNS', 'RUN', 'SAMPLE', 'SELECT', 'SELECT_VERTEX', - 'SET', 'SRC', 'STATIC', 'STRING', 'SUM', 'SYNTAX', 'TARGET', 'TAGSTGT', 'THEN', 'TO', 'TO_CSV', 'TO_DATETIME', 'TRAILING', 'TRIM', 'TRUE', - 'TRY', 'TUPLE', 'TYPEDEF', 'UINT', 'UNION', 'UPDATE', 'VALUES', 'VERTEX', 'WHEN', 'WHERE', 'WHILE', 'WITH'), prefix=r'(?<!\.)', suffix=r'\b'), Token.Keyword) + 'ACCUM', 'AND', 'ANY', 'API', 'AS', 'ASC', 'AVG', 'BAG', 'BATCH', + 'BETWEEN', 'BOOL', 'BOTH', 'BREAK', 'BY', 'CASE', 'CATCH', 'COALESCE', + 'COMPRESS', 'CONTINUE', 'COUNT', 'CREATE', 'DATETIME', 'DATETIME_ADD', + 'DATETIME_SUB', 'DELETE', 'DESC', 'DISTRIBUTED', 'DO', 'DOUBLE', + 'EDGE', 'ELSE', 'END', 'ESCAPE', 'EXCEPTION', 'FALSE', 'FILE', + 'FILTER', 'FLOAT', 'FOREACH', 'FOR', 'FROM', 'GRAPH', 'GROUP', + 'GSQL_INT_MAX', 'GSQL_INT_MIN', 'GSQL_UINT_MAX', 'HAVING', 'IF', + 'IN', 'INSERT', 'INT', 'INTERPRET', 'INTERSECT', 'INTERVAL', 'INTO', + 'IS', 'ISEMPTY', 'JSONARRAY', 'JSONOBJECT', 'LASTHOP', 'LEADING', + 'LIKE', 'LIMIT', 'LIST', 'LOAD_ACCUM', 'LOG', 'MAP', 'MATCH', 'MAX', + 'MIN', 'MINUS', 'NOT', 'NOW', 'NULL', 'OFFSET', 'OR', 'ORDER', 'PATH', + 'PER', 'PINNED', 'POST_ACCUM', 'POST-ACCUM', 'PRIMARY_ID', 'PRINT', + 'QUERY', 'RAISE', 'RANGE', 'REPLACE', 'RESET_COLLECTION_ACCUM', + 'RETURN', 'RETURNS', 'RUN', 'SAMPLE', 'SELECT', 'SELECT_VERTEX', + 'SET', 'SRC', 'STATIC', 'STRING', 'SUM', 'SYNTAX', 'TARGET', + 'TAGSTGT', 'THEN', 'TO', 'TO_CSV', 'TO_DATETIME', 'TRAILING', + 'TRIM', 'TRUE', 'TRY', 'TUPLE', 'TYPEDEF', 'UINT', 'UNION', 'UPDATE', + 'VALUES', 'VERTEX', 'WHEN', 'WHERE', 'WHILE', 'WITH'), + prefix=r'(?<!\.)', suffix=r'\b'), Keyword), ], 'clauses': [ - (words(('accum', 'having', 'limit', 'order', 'postAccum', 'sample', 'where')), Name.Builtin) + (words(('accum', 'having', 'limit', 'order', 'postAccum', 'sample', 'where')), + Name.Builtin), ], 'accums': [ (words(('andaccum', 'arrayaccum', 'avgaccum', 'bagaccum', 'bitwiseandaccum', - 'bitwiseoraccum', 'groupbyaccum', 'heapaccum', 'listaccum', 'MapAccum', - 'maxaccum', 'minaccum', 'oraccum', 'setaccum', 'sumaccum')), Name.Builtin), + 'bitwiseoraccum', 'groupbyaccum', 'heapaccum', 'listaccum', + 'MapAccum', 'maxaccum', 'minaccum', 'oraccum', 'setaccum', + 'sumaccum')), Name.Builtin), ], 'relations': [ (r'(-\s?)(\(.*\:\w?\))(\s?-)', bygroups(Operator, using(this), Operator)), @@ -86,7 +97,8 @@ class GSQLLexer(RegexLexer): (r'(\d+\.\d+|\d+)', Number), ], 'operators': [ - (r'\$|[^0-9|\/|\-](\-\=|\+\=|\*\=|\\\=|\=|\=\=|\=\=\=|\+|\-|\*|\\|\+\=|\>|\<)[^\>|\/]', Operator), + (r'\$|[^0-9|\/|\-](\-\=|\+\=|\*\=|\\\=|\=|\=\=|\=\=\=|' + r'\+|\-|\*|\\|\+\=|\>|\<)[^\>|\/]', Operator), (r'(\||\(|\)|\,|\;|\=|\-|\+|\*|\/|\>|\<|\:)', Operator), ], } diff --git a/pygments/lexers/hexdump.py b/pygments/lexers/hexdump.py index 58f0e8d6..b766d1de 100644 --- a/pygments/lexers/hexdump.py +++ b/pygments/lexers/hexdump.py @@ -9,7 +9,7 @@ """ from pygments.lexer import RegexLexer, bygroups, include -from pygments.token import Text, Name, Number, String, Punctuation, Whitespace +from pygments.token import Name, Number, String, Punctuation, Whitespace __all__ = ['HexdumpLexer'] diff --git a/pygments/lexers/inferno.py b/pygments/lexers/inferno.py index 93122b3c..a3bee4ff 100644 --- a/pygments/lexers/inferno.py +++ b/pygments/lexers/inferno.py @@ -11,7 +11,7 @@ import re from pygments.lexer import RegexLexer, include, bygroups, default -from pygments.token import Punctuation, Text, Comment, Operator, Keyword, \ +from pygments.token import Punctuation, Comment, Operator, Keyword, \ Name, String, Number, Whitespace __all__ = ['LimboLexer'] diff --git a/pygments/lexers/iolang.py b/pygments/lexers/iolang.py index da247d91..a760d1cf 100644 --- a/pygments/lexers/iolang.py +++ b/pygments/lexers/iolang.py @@ -9,8 +9,8 @@ """ from pygments.lexer import RegexLexer -from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ - Number, Whitespace +from pygments.token import Comment, Operator, Keyword, Name, String, Number, \ + Whitespace __all__ = ['IoLexer'] diff --git a/pygments/lexers/j.py b/pygments/lexers/j.py index d33207d1..7e5f9a1b 100644 --- a/pygments/lexers/j.py +++ b/pygments/lexers/j.py @@ -9,8 +9,8 @@ """ from pygments.lexer import RegexLexer, words, include, bygroups -from pygments.token import Comment, Keyword, Name, Number, Operator, Punctuation, \ - String, Text, Whitespace +from pygments.token import Comment, Keyword, Name, Number, Operator, \ + Punctuation, String, Whitespace __all__ = ['JLexer'] diff --git a/pygments/lexers/jsonnet.py b/pygments/lexers/jsonnet.py index bd98836c..305058cb 100644 --- a/pygments/lexers/jsonnet.py +++ b/pygments/lexers/jsonnet.py @@ -7,22 +7,10 @@ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ -from pygments.lexer import ( - include, - RegexLexer, - words, -) -from pygments.token import ( - Comment, - Keyword, - Name, - Number, - Operator, - Punctuation, - String, - Text, - Whitespace, -) + +from pygments.lexer import include, RegexLexer, words +from pygments.token import Comment, Keyword, Name, Number, Operator, \ + Punctuation, String, Text, Whitespace __all__ = ['JsonnetLexer'] diff --git a/pygments/lexers/julia.py b/pygments/lexers/julia.py index 5ac94357..86e760cf 100644 --- a/pygments/lexers/julia.py +++ b/pygments/lexers/julia.py @@ -8,8 +8,6 @@ :license: BSD, see LICENSE for details. """ -import re - from pygments.lexer import Lexer, RegexLexer, bygroups, do_insertions, \ words, include from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ @@ -56,7 +54,8 @@ class JuliaLexer(RegexLexer): (r'(?<![\]):<>\d.])(:' + allowed_variable + ')', String.Symbol), # type assertions - excludes expressions like ::typeof(sin) and ::avec[1] - (r'(?<=::)(\s*)(' + allowed_variable + r')\b(?![(\[])', bygroups(Whitespace, Keyword.Type)), + (r'(?<=::)(\s*)(' + allowed_variable + r')\b(?![(\[])', + bygroups(Whitespace, Keyword.Type)), # type comparisons # - MyType <: A or MyType >: A ('(' + allowed_variable + r')(\s*)([<>]:)(\s*)(' + allowed_variable + r')\b(?![(\[])', @@ -71,8 +70,10 @@ class JuliaLexer(RegexLexer): # operators # Suffixes aren't actually allowed on all operators, but we'll ignore that # since those cases are invalid Julia code. - (words([*OPERATORS_LIST, *DOTTED_OPERATORS_LIST], suffix=operator_suffixes), Operator), - (words(['.' + o for o in DOTTED_OPERATORS_LIST], suffix=operator_suffixes), Operator), + (words([*OPERATORS_LIST, *DOTTED_OPERATORS_LIST], + suffix=operator_suffixes), Operator), + (words(['.' + o for o in DOTTED_OPERATORS_LIST], + suffix=operator_suffixes), Operator), (words(['...', '..']), Operator), # NOTE @@ -96,12 +97,16 @@ class JuliaLexer(RegexLexer): (r'(r)(""")', bygroups(String.Affix, String.Regex), 'tqregex'), (r'(r)(")', bygroups(String.Affix, String.Regex), 'regex'), # other strings - (r'(' + allowed_variable + ')?(""")', bygroups(String.Affix, String), 'tqstring'), - (r'(' + allowed_variable + ')?(")', bygroups(String.Affix, String), 'string'), + (r'(' + allowed_variable + ')?(""")', + bygroups(String.Affix, String), 'tqstring'), + (r'(' + allowed_variable + ')?(")', + bygroups(String.Affix, String), 'string'), # backticks - (r'(' + allowed_variable + ')?(```)', bygroups(String.Affix, String.Backtick), 'tqcommand'), - (r'(' + allowed_variable + ')?(`)', bygroups(String.Affix, String.Backtick), 'command'), + (r'(' + allowed_variable + ')?(```)', + bygroups(String.Affix, String.Backtick), 'tqcommand'), + (r'(' + allowed_variable + ')?(`)', + bygroups(String.Affix, String.Backtick), 'command'), # type names # - names that begin a curly expression @@ -172,9 +177,9 @@ class JuliaLexer(RegexLexer): (r'([^"\\]|\\[^"])+', String), ], - # Interpolation is defined as "$" followed by the shortest full expression, which is - # something we can't parse. - # Include the most common cases here: $word, and $(paren'd expr). + # Interpolation is defined as "$" followed by the shortest full + # expression, which is something we can't parse. Include the most + # common cases here: $word, and $(paren'd expr). 'interp': [ (r'\$' + allowed_variable, String.Interpol), (r'(\$)(\()', bygroups(String.Interpol, Punctuation), 'in-intp'), @@ -186,7 +191,8 @@ class JuliaLexer(RegexLexer): ], 'string': [ - (r'(")(' + allowed_variable + r'|\d+)?', bygroups(String, String.Affix), '#pop'), + (r'(")(' + allowed_variable + r'|\d+)?', + bygroups(String, String.Affix), '#pop'), # FIXME: This escape pattern is not perfect. (r'\\([\\"\'$nrbtfav]|(x|u|U)[a-fA-F0-9]+|\d+)', String.Escape), include('interp'), @@ -197,7 +203,8 @@ class JuliaLexer(RegexLexer): (r'.', String), ], 'tqstring': [ - (r'(""")(' + allowed_variable + r'|\d+)?', bygroups(String, String.Affix), '#pop'), + (r'(""")(' + allowed_variable + r'|\d+)?', + bygroups(String, String.Affix), '#pop'), (r'\\([\\"\'$nrbtfav]|(x|u|U)[a-fA-F0-9]+|\d+)', String.Escape), include('interp'), (r'[^"$%\\]+', String), @@ -216,14 +223,16 @@ class JuliaLexer(RegexLexer): ], 'command': [ - (r'(`)(' + allowed_variable + r'|\d+)?', bygroups(String.Backtick, String.Affix), '#pop'), + (r'(`)(' + allowed_variable + r'|\d+)?', + bygroups(String.Backtick, String.Affix), '#pop'), (r'\\[`$]', String.Escape), include('interp'), (r'[^\\`$]+', String.Backtick), (r'.', String.Backtick), ], 'tqcommand': [ - (r'(```)(' + allowed_variable + r'|\d+)?', bygroups(String.Backtick, String.Affix), '#pop'), + (r'(```)(' + allowed_variable + r'|\d+)?', + bygroups(String.Backtick, String.Affix), '#pop'), (r'\\\$', String.Escape), include('interp'), (r'[^\\`$]+', String.Backtick), diff --git a/pygments/lexers/kuin.py b/pygments/lexers/kuin.py index 1d7ba67c..8dd7e705 100644 --- a/pygments/lexers/kuin.py +++ b/pygments/lexers/kuin.py @@ -14,9 +14,10 @@ from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ __all__ = ['KuinLexer'] + class KuinLexer(RegexLexer): """ - For Kuin source code + For Kuin source code. .. versionadded:: 2.9 """ @@ -34,17 +35,29 @@ class KuinLexer(RegexLexer): include('whitespace'), # Block-statement - (r'(\+?)([ \t]*)(\*?)([ \t]*)(\bfunc)([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*)', bygroups(Keyword,Whitespace, Keyword, Whitespace, Keyword, using(this), Name.Function), 'func_'), - (r'\b(class)([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*)', bygroups(Keyword, using(this), Name.Class), 'class_'), - (r'\b(enum)([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*)', bygroups(Keyword, using(this), Name.Constant), 'enum_'), - (r'\b(block)\b(?:([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*))?', bygroups(Keyword, using(this), Name.Other), 'block_'), - (r'\b(ifdef)\b(?:([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*))?', bygroups(Keyword, using(this), Name.Other), 'ifdef_'), - (r'\b(if)\b(?:([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*))?', bygroups(Keyword, using(this), Name.Other), 'if_'), - (r'\b(switch)\b(?:([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*))?', bygroups(Keyword, using(this), Name.Other), 'switch_'), - (r'\b(while)\b(?:([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*))?', bygroups(Keyword, using(this), Name.Other), 'while_'), - (r'\b(for)\b(?:([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*))?', bygroups(Keyword, using(this), Name.Other), 'for_'), - (r'\b(foreach)\b(?:([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*))?', bygroups(Keyword, using(this), Name.Other), 'foreach_'), - (r'\b(try)\b(?:([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*))?', bygroups(Keyword, using(this), Name.Other), 'try_'), + (r'(\+?)([ \t]*)(\*?)([ \t]*)(\bfunc)([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*)', + bygroups(Keyword,Whitespace, Keyword, Whitespace, Keyword, + using(this), Name.Function), 'func_'), + (r'\b(class)([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*)', + bygroups(Keyword, using(this), Name.Class), 'class_'), + (r'\b(enum)([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*)', + bygroups(Keyword, using(this), Name.Constant), 'enum_'), + (r'\b(block)\b(?:([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*))?', + bygroups(Keyword, using(this), Name.Other), 'block_'), + (r'\b(ifdef)\b(?:([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*))?', + bygroups(Keyword, using(this), Name.Other), 'ifdef_'), + (r'\b(if)\b(?:([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*))?', + bygroups(Keyword, using(this), Name.Other), 'if_'), + (r'\b(switch)\b(?:([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*))?', + bygroups(Keyword, using(this), Name.Other), 'switch_'), + (r'\b(while)\b(?:([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*))?', + bygroups(Keyword, using(this), Name.Other), 'while_'), + (r'\b(for)\b(?:([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*))?', + bygroups(Keyword, using(this), Name.Other), 'for_'), + (r'\b(foreach)\b(?:([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*))?', + bygroups(Keyword, using(this), Name.Other), 'foreach_'), + (r'\b(try)\b(?:([ \t]+(?:\n\s*\|)*[ \t]*)([a-zA-Z_][0-9a-zA-Z_]*))?', + bygroups(Keyword, using(this), Name.Other), 'try_'), # Line-statement (r'\b(do)\b', Keyword, 'do'), @@ -77,7 +90,8 @@ class KuinLexer(RegexLexer): (r'\n', Whitespace, 'func'), ], 'func': [ - (r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(func)\b', bygroups(Keyword, using(this), Keyword), '#pop:2'), + (r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(func)\b', + bygroups(Keyword, using(this), Keyword), '#pop:2'), include('statement'), ], 'class_': [ @@ -85,7 +99,8 @@ class KuinLexer(RegexLexer): (r'\n', Whitespace, 'class'), ], 'class': [ - (r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(class)\b', bygroups(Keyword, using(this), Keyword), '#pop:2'), + (r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(class)\b', + bygroups(Keyword, using(this), Keyword), '#pop:2'), include('statement'), ], 'enum_': [ @@ -93,7 +108,8 @@ class KuinLexer(RegexLexer): (r'\n', Whitespace, 'enum'), ], 'enum': [ - (r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(enum)\b', bygroups(Keyword, using(this), Keyword), '#pop:2'), + (r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(enum)\b', + bygroups(Keyword, using(this), Keyword), '#pop:2'), include('expr'), (r'\n', Whitespace), ], @@ -102,7 +118,8 @@ class KuinLexer(RegexLexer): (r'\n', Whitespace, 'block'), ], 'block': [ - (r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(block)\b', bygroups(Keyword, using(this), Keyword), '#pop:2'), + (r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(block)\b', + bygroups(Keyword, using(this), Keyword), '#pop:2'), include('statement'), include('break'), include('skip'), @@ -112,8 +129,10 @@ class KuinLexer(RegexLexer): (r'\n', Whitespace, 'ifdef'), ], 'ifdef': [ - (r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(ifdef)\b', bygroups(Keyword, using(this), Keyword), '#pop:2'), - (words(('rls', 'dbg'), prefix=r'\b', suffix=r'\b'), Keyword.Constant, 'ifdef_sp'), + (r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(ifdef)\b', + bygroups(Keyword, using(this), Keyword), '#pop:2'), + (words(('rls', 'dbg'), prefix=r'\b', suffix=r'\b'), + Keyword.Constant, 'ifdef_sp'), include('statement'), include('break'), include('skip'), @@ -127,7 +146,8 @@ class KuinLexer(RegexLexer): (r'\n', Whitespace, 'if'), ], 'if': [ - (r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(if)\b', bygroups(Keyword, using(this), Keyword), '#pop:2'), + (r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(if)\b', + bygroups(Keyword, using(this), Keyword), '#pop:2'), (words(('elif', 'else'), prefix=r'\b', suffix=r'\b'), Keyword, 'if_sp'), include('statement'), include('break'), @@ -142,8 +162,10 @@ class KuinLexer(RegexLexer): (r'\n', Whitespace, 'switch'), ], 'switch': [ - (r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(switch)\b', bygroups(Keyword, using(this), Keyword), '#pop:2'), - (words(('case', 'default', 'to'), prefix=r'\b', suffix=r'\b'), Keyword, 'switch_sp'), + (r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(switch)\b', + bygroups(Keyword, using(this), Keyword), '#pop:2'), + (words(('case', 'default', 'to'), prefix=r'\b', suffix=r'\b'), + Keyword, 'switch_sp'), include('statement'), include('break'), include('skip'), @@ -157,7 +179,8 @@ class KuinLexer(RegexLexer): (r'\n', Whitespace, 'while'), ], 'while': [ - (r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(while)\b', bygroups(Keyword, using(this), Keyword), '#pop:2'), + (r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(while)\b', + bygroups(Keyword, using(this), Keyword), '#pop:2'), include('statement'), include('break'), include('skip'), @@ -167,7 +190,8 @@ class KuinLexer(RegexLexer): (r'\n', Whitespace, 'for'), ], 'for': [ - (r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(for)\b', bygroups(Keyword, using(this), Keyword), '#pop:2'), + (r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(for)\b', + bygroups(Keyword, using(this), Keyword), '#pop:2'), include('statement'), include('break'), include('skip'), @@ -177,7 +201,8 @@ class KuinLexer(RegexLexer): (r'\n', Whitespace, 'foreach'), ], 'foreach': [ - (r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(foreach)\b', bygroups(Keyword, using(this), Keyword), '#pop:2'), + (r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(foreach)\b', + bygroups(Keyword, using(this), Keyword), '#pop:2'), include('statement'), include('break'), include('skip'), @@ -187,8 +212,10 @@ class KuinLexer(RegexLexer): (r'\n', Whitespace, 'try'), ], 'try': [ - (r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(try)\b', bygroups(Keyword, using(this), Keyword), '#pop:2'), - (words(('catch', 'finally', 'to'), prefix=r'\b', suffix=r'\b'), Keyword, 'try_sp'), + (r'\b(end)([ \t]+(?:\n\s*\|)*[ \t]*)(try)\b', + bygroups(Keyword, using(this), Keyword), '#pop:2'), + (words(('catch', 'finally', 'to'), prefix=r'\b', suffix=r'\b'), + Keyword, 'try_sp'), include('statement'), include('break'), include('skip'), @@ -200,10 +227,12 @@ class KuinLexer(RegexLexer): # Line-statement 'break': [ - (r'\b(break)\b([ \t]+)([a-zA-Z_][0-9a-zA-Z_]*)', bygroups(Keyword, using(this), Name.Other)), + (r'\b(break)\b([ \t]+)([a-zA-Z_][0-9a-zA-Z_]*)', + bygroups(Keyword, using(this), Name.Other)), ], 'skip': [ - (r'\b(skip)\b([ \t]+)([a-zA-Z_][0-9a-zA-Z_]*)', bygroups(Keyword, using(this), Name.Other)), + (r'\b(skip)\b([ \t]+)([a-zA-Z_][0-9a-zA-Z_]*)', + bygroups(Keyword, using(this), Name.Other)), ], 'alias': [ include('expr'), @@ -287,7 +316,8 @@ class KuinLexer(RegexLexer): # Identifier (r"\b([a-zA-Z_][0-9a-zA-Z_]*)(?=@)\b", Name), - (r"(@)?\b([a-zA-Z_][0-9a-zA-Z_]*)\b", bygroups(Name.Other, Name.Variable)), + (r"(@)?\b([a-zA-Z_][0-9a-zA-Z_]*)\b", + bygroups(Name.Other, Name.Variable)), ], # String diff --git a/pygments/lexers/lilypond.py b/pygments/lexers/lilypond.py index 93947cb3..c39e0b03 100644 --- a/pygments/lexers/lilypond.py +++ b/pygments/lexers/lilypond.py @@ -35,6 +35,7 @@ def builtin_words(names, backslash, suffix=NAME_END_RE): assert backslash == "disallowed" return words(names, prefix, suffix) + class LilyPondLexer(SchemeLexer): """ Lexer for input to LilyPond, a text-based music typesetter. diff --git a/pygments/lexers/lisp.py b/pygments/lexers/lisp.py index 6dab1fda..2b91b99d 100644 --- a/pygments/lexers/lisp.py +++ b/pygments/lexers/lisp.py @@ -22,6 +22,7 @@ __all__ = ['SchemeLexer', 'CommonLispLexer', 'HyLexer', 'RacketLexer', 'NewLispLexer', 'EmacsLispLexer', 'ShenLexer', 'CPSALexer', 'XtlangLexer', 'FennelLexer'] + class SchemeLexer(RegexLexer): """ A Scheme lexer. diff --git a/pygments/lexers/macaulay2.py b/pygments/lexers/macaulay2.py index db5d9902..3f773e24 100644 --- a/pygments/lexers/macaulay2.py +++ b/pygments/lexers/macaulay2.py @@ -1,6 +1,7 @@ """ pygments.lexers.macaulay2 ~~~~~~~~~~~~~~~~~~~~~~~~~ + Lexer for Macaulay2. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. diff --git a/pygments/lexers/mcfunction.py b/pygments/lexers/mcfunction.py index b05e21d4..d969d876 100644 --- a/pygments/lexers/mcfunction.py +++ b/pygments/lexers/mcfunction.py @@ -9,10 +9,8 @@ """ from pygments.lexer import RegexLexer, default, include, bygroups -from pygments.token import (Comment, Keyword, Literal, Name, Number, - Operator, Punctuation, String, Text, Token, - Whitespace) - +from pygments.token import Comment, Keyword, Literal, Name, Number, Operator, \ + Punctuation, String, Text, Whitespace __all__ = ['SNBTLexer', 'MCFunctionLexer'] @@ -20,7 +18,7 @@ __all__ = ['SNBTLexer', 'MCFunctionLexer'] class SNBTLexer(RegexLexer): """Lexer for stringified NBT, a data format used in Minecraft - .. versionadded:: 2.12.0 + .. versionadded:: 2.12 """ name = "SNBT" @@ -125,10 +123,7 @@ class MCFunctionLexer(RegexLexer): (r"(?<=run)\s+[a-z_]+", Name.Builtin), # UUID - ( - r"\b[0-9a-fA-F]+(?:-[0-9a-fA-F]+){4}\b", - Name.Variable, - ), + (r"\b[0-9a-fA-F]+(?:-[0-9a-fA-F]+){4}\b", Name.Variable), include("resource-name"), # normal command names and scoreboards # there's no way to know the differences unfortuntely @@ -137,17 +132,11 @@ class MCFunctionLexer(RegexLexer): ], "resource-name": [ - ( - # resource names have to be lowercase - r"#?[a-z_][a-z_.-]*:[a-z0-9_./-]+", - Name.Function, - ), - ( - # similar to above except optional `:`` - # a `/` must be present "somewhere" - r"#?[a-z0-9_\.\-]+\/[a-z0-9_\.\-\/]+", - Name.Function, - ) + # resource names have to be lowercase + (r"#?[a-z_][a-z_.-]*:[a-z0-9_./-]+", Name.Function), + # similar to above except optional `:`` + # a `/` must be present "somewhere" + (r"#?[a-z0-9_\.\-]+\/[a-z0-9_\.\-\/]+", Name.Function), ], "whitespace": [ @@ -155,18 +144,13 @@ class MCFunctionLexer(RegexLexer): ], "comments": [ - ( - rf"^\s*(#{_block_comment_prefix})", - Comment.Multiline, - ( - "comments.block", - "comments.block.emphasized", - ), - ), + (rf"^\s*(#{_block_comment_prefix})", Comment.Multiline, + ("comments.block", "comments.block.emphasized")), (r"#.*$", Comment.Single), ], "comments.block": [ - (rf"^\s*#{_block_comment_prefix}", Comment.Multiline, "comments.block.emphasized"), + (rf"^\s*#{_block_comment_prefix}", Comment.Multiline, + "comments.block.emphasized"), (r"^\s*#", Comment.Multiline, "comments.block.normal"), default("#pop"), ], diff --git a/pygments/lexers/meson.py b/pygments/lexers/meson.py index 4d366588..e8d1f08c 100644 --- a/pygments/lexers/meson.py +++ b/pygments/lexers/meson.py @@ -8,30 +8,16 @@ :license: BSD, see LICENSE for details. """ -import re - -from pygments.lexer import ( - RegexLexer, - words, - include, -) -from pygments.token import ( - Comment, - Name, - Number, - Punctuation, - Operator, - Keyword, - String, - Whitespace, -) +from pygments.lexer import RegexLexer, words, include +from pygments.token import Comment, Name, Number, Punctuation, Operator, \ + Keyword, String, Whitespace __all__ = ['MesonLexer'] class MesonLexer(RegexLexer): """Meson language lexer. - + The grammar definition use to transcribe the syntax was retrieved from https://mesonbuild.com/Syntax.html#grammar for version 0.58. Some of those definitions are improperly transcribed, so the Meson++ diff --git a/pygments/lexers/mips.py b/pygments/lexers/mips.py index 2877d72c..e20038b2 100644 --- a/pygments/lexers/mips.py +++ b/pygments/lexers/mips.py @@ -8,7 +8,6 @@ :license: BSD, see LICENSE for details. """ -import re from pygments.lexer import RegexLexer, words from pygments.token import Whitespace, Comment, String, Keyword, Name, Text @@ -58,7 +57,7 @@ class MIPSLexer(RegexLexer): "tltiu", # Exception / Interrupt "eret", "break", "bop", "syscall", - #--- Floats ----------------------------------------------------- + # --- Floats ----------------------------------------------------- # Arithmetic "add.s", "add.d", "sub.s", "sub.d", "mul.s", "mul.d", "div.s", "div.d", "neg.d", "neg.s", @@ -89,7 +88,7 @@ class MIPSLexer(RegexLexer): "move", # coproc: "mfc1.d", # comparisons "sgt", "sgtu", "sge", "sgeu", "sle", "sleu", "sne", "seq", - #--- Floats ----------------------------------------------------- + # --- Floats ----------------------------------------------------- # load-store "l.d", "l.s", "s.d", "s.s", ] @@ -114,7 +113,8 @@ class MIPSLexer(RegexLexer): (words(pseudoinstructions, suffix=r'\b'), Name.Variable), (words(keywords, suffix=r'\b'), Keyword), (r'[slm][ftwd]c[0-9]([.]d)?', Keyword), - (r'\$(f?[0-2][0-9]|f?3[01]|[ft]?[0-9]|[vk][01]|a[0-3]|s[0-7]|[gsf]p|ra|at|zero)', Keyword.Type), + (r'\$(f?[0-2][0-9]|f?3[01]|[ft]?[0-9]|[vk][01]|a[0-3]|s[0-7]|[gsf]p|ra|at|zero)', + Keyword.Type), (words(directives, suffix=r'\b'), Name.Entity), # Preprocessor? (r':|,|;|\{|\}|=>|@|\$|=', Name.Builtin), (r'\w+', Text), diff --git a/pygments/lexers/nimrod.py b/pygments/lexers/nimrod.py index 4d773288..bf5c948c 100644 --- a/pygments/lexers/nimrod.py +++ b/pygments/lexers/nimrod.py @@ -75,23 +75,24 @@ class NimrodLexer(RegexLexer): (r'##.*$', String.Doc), (r'#\[', Comment.Multiline, 'comment'), (r'#.*$', Comment), - + # Pragmas (r'\{\.', String.Other, 'pragma'), - + # Operators (r'[*=><+\-/@$~&%!?|\\\[\]]', Operator), (r'\.\.|\.|,|\[\.|\.\]|\{\.|\.\}|\(\.|\.\)|\{|\}|\(|\)|:|\^|`|;', Punctuation), - + # Case statement branch - (r'(\n\s*)(of)(\s)', bygroups(Text.Whitespace, Keyword, Text.Whitespace), 'casebranch'), - + (r'(\n\s*)(of)(\s)', bygroups(Text.Whitespace, Keyword, + Text.Whitespace), 'casebranch'), + # Strings (r'(?:[\w]+)"', String, 'rdqs'), (r'"""', String.Double, 'tdqs'), ('"', String, 'dqs'), - + # Char ("'", String.Char, 'chars'), @@ -105,10 +106,10 @@ class NimrodLexer(RegexLexer): (r'(v_?a_?r)\b', Keyword.Declaration), (r'(%s)\b' % underscorize(types), Name.Builtin), (r'(%s)\b' % underscorize(keywordsPseudo), Keyword.Pseudo), - + # Identifiers (r'\b((?![_\d])\w)(((?!_)\w)|(_(?!_)\w))*', Name), - + # Numbers (r'[0-9][0-9_]*(?=([e.]|\'f(32|64)))', Number.Float, ('float-suffix', 'float-number')), @@ -116,7 +117,7 @@ class NimrodLexer(RegexLexer): (r'0b[01][01_]*', Number.Bin, 'int-suffix'), (r'0o[0-7][0-7_]*', Number.Oct, 'int-suffix'), (r'[0-9][0-9_]*', Number.Integer, 'int-suffix'), - + # Whitespace (r'\s+', Text.Whitespace), (r'.+$', Error), diff --git a/pygments/lexers/pascal.py b/pygments/lexers/pascal.py index 2253a22e..63b887ad 100644 --- a/pygments/lexers/pascal.py +++ b/pygments/lexers/pascal.py @@ -10,8 +10,7 @@ import re -from pygments.lexer import Lexer, RegexLexer, include, bygroups, words, \ - using, this, default +from pygments.lexer import Lexer from pygments.util import get_bool_opt, get_list_opt from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ Number, Punctuation, Error diff --git a/pygments/lexers/phix.py b/pygments/lexers/phix.py index 6027e336..b292aa53 100644 --- a/pygments/lexers/phix.py +++ b/pygments/lexers/phix.py @@ -11,7 +11,8 @@ import re from pygments.lexer import RegexLexer, words -from pygments.token import Text, Comment, Operator, Keyword, Name, String, Whitespace +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Whitespace __all__ = ['PhixLexer'] @@ -361,4 +362,3 @@ class PhixLexer(RegexLexer): (r'[*/#]', Comment.Multiline) ] } - diff --git a/pygments/lexers/praat.py b/pygments/lexers/praat.py index 834f15b7..aad44483 100644 --- a/pygments/lexers/praat.py +++ b/pygments/lexers/praat.py @@ -9,8 +9,8 @@ """ from pygments.lexer import RegexLexer, words, bygroups, include -from pygments.token import Name, Text, Comment, Keyword, String, Punctuation, Number, \ - Operator +from pygments.token import Name, Text, Comment, Keyword, String, Punctuation, \ + Number, Operator __all__ = ['PraatLexer'] diff --git a/pygments/lexers/promql.py b/pygments/lexers/promql.py index 1c239a26..dcb8b33d 100644 --- a/pygments/lexers/promql.py +++ b/pygments/lexers/promql.py @@ -9,16 +9,8 @@ """ from pygments.lexer import RegexLexer, bygroups, default, words -from pygments.token import ( - Comment, - Keyword, - Name, - Number, - Operator, - Punctuation, - String, - Whitespace, -) +from pygments.token import Comment, Keyword, Name, Number, Operator, \ + Punctuation, String, Whitespace __all__ = ["PromQLLexer"] diff --git a/pygments/lexers/python.py b/pygments/lexers/python.py index 711dc836..6bd74a1e 100644 --- a/pygments/lexers/python.py +++ b/pygments/lexers/python.py @@ -171,7 +171,7 @@ class PythonLexer(RegexLexer): combined('bytesescape', 'dqs')), ("([bB])(')", bygroups(String.Affix, String.Single), combined('bytesescape', 'sqs')), - + (r'[^\S\n]+', Text), include('numbers'), (r'!=|==|<<|>>|:=|[-~+/*%=<>&^|.]', Operator), diff --git a/pygments/lexers/q.py b/pygments/lexers/q.py index 21a049fa..b445ee89 100644 --- a/pygments/lexers/q.py +++ b/pygments/lexers/q.py @@ -9,19 +9,8 @@ """ from pygments.lexer import RegexLexer, words, include, bygroups, inherit -from pygments.token import ( - Comment, - Keyword, - Name, - Number, - Operator, - Punctuation, - String, - Text, - Whitespace, - Literal, - Generic, -) +from pygments.token import Comment, Name, Number, Operator, Punctuation, \ + String, Whitespace, Literal, Generic __all__ = ["KLexer", "QLexer"] @@ -55,43 +44,32 @@ class KLexer(RegexLexer): include("declarations"), ], "keywords": [ - ( - words( - ("abs", "acos", "asin", "atan", "avg", "bin", - "binr", "by", "cor", "cos", "cov", "dev", - "delete", "div", "do", "enlist", "exec", "exit", - "exp", "from", "getenv", "hopen", "if", "in", - "insert", "last", "like", "log", "max", "min", - "prd", "select", "setenv", "sin", "sqrt", "ss", - "sum", "tan", "update", "var", "wavg", "while", - "within", "wsum", "xexp"), - suffix=r"\b", - ), - Operator.Word, - ), + (words(("abs", "acos", "asin", "atan", "avg", "bin", + "binr", "by", "cor", "cos", "cov", "dev", + "delete", "div", "do", "enlist", "exec", "exit", + "exp", "from", "getenv", "hopen", "if", "in", + "insert", "last", "like", "log", "max", "min", + "prd", "select", "setenv", "sin", "sqrt", "ss", + "sum", "tan", "update", "var", "wavg", "while", + "within", "wsum", "xexp"), + suffix=r"\b"), Operator.Word), ], "declarations": [ # Timing (r"^\\ts?", Comment.Preproc), - ( - r"^(\\\w\s+[^/\n]*?)(/.*)", - bygroups(Comment.Preproc, Comment.Single), - ), + (r"^(\\\w\s+[^/\n]*?)(/.*)", + bygroups(Comment.Preproc, Comment.Single)), # Generic System Commands (r"^\\\w.*", Comment.Preproc), # Prompt (r"^[a-zA-Z]\)", Generic.Prompt), # Function Names - ( - r"([.]?[a-zA-Z][\w.]*)(\s*)([-.~=!@#$%^&*_+|,<>?/\\:']?:)(\s*)(\{)", - bygroups(Name.Function, Whitespace, Operator, Whitespace, Punctuation), - "functions", - ), + (r"([.]?[a-zA-Z][\w.]*)(\s*)([-.~=!@#$%^&*_+|,<>?/\\:']?:)(\s*)(\{)", + bygroups(Name.Function, Whitespace, Operator, Whitespace, Punctuation), + "functions"), # Variable Names - ( - r"([.]?[a-zA-Z][\w.]*)(\s*)([-.~=!@#$%^&*_+|,<>?/\\:']?:)", - bygroups(Name.Variable, Whitespace, Operator), - ), + (r"([.]?[a-zA-Z][\w.]*)(\s*)([-.~=!@#$%^&*_+|,<>?/\\:']?:)", + bygroups(Name.Variable, Whitespace, Operator)), # Functions (r"\{", Punctuation, "functions"), # Parentheses @@ -131,40 +109,24 @@ class KLexer(RegexLexer): # Nulls/Infinities (r"0[nNwW][cefghijmndzuvtp]?", Number), # Timestamps - ( - ( - r"(?:[0-9]{4}[.][0-9]{2}[.][0-9]{2}|[0-9]+)" - "D(?:[0-9](?:[0-9](?::[0-9]{2}" - "(?::[0-9]{2}(?:[.][0-9]*)?)?)?)?)?" - ), - Literal.Date, - ), + ((r"(?:[0-9]{4}[.][0-9]{2}[.][0-9]{2}|[0-9]+)" + "D(?:[0-9](?:[0-9](?::[0-9]{2}" + "(?::[0-9]{2}(?:[.][0-9]*)?)?)?)?)?"), Literal.Date), # Datetimes - ( - ( - r"[0-9]{4}[.][0-9]{2}" - "(?:m|[.][0-9]{2}(?:T(?:[0-9]{2}:[0-9]{2}" - "(?::[0-9]{2}(?:[.][0-9]*)?)?)?)?)" - ), - Literal.Date, - ), + ((r"[0-9]{4}[.][0-9]{2}" + "(?:m|[.][0-9]{2}(?:T(?:[0-9]{2}:[0-9]{2}" + "(?::[0-9]{2}(?:[.][0-9]*)?)?)?)?)"), Literal.Date), # Times - ( - (r"[0-9]{2}:[0-9]{2}(?::[0-9]{2}(?:[.][0-9]{1,3})?)?"), - Literal.Date, - ), + (r"[0-9]{2}:[0-9]{2}(?::[0-9]{2}(?:[.][0-9]{1,3})?)?", + Literal.Date), # GUIDs - ( - r"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}", - Number.Hex, - ), + (r"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}", + Number.Hex), # Byte Vectors (r"0x[0-9a-fA-F]+", Number.Hex), # Floats - ( - r"([0-9]*[.]?[0-9]+|[0-9]+[.]?[0-9]*)[eE][+-]?[0-9]+[ef]?", - Number.Float, - ), + (r"([0-9]*[.]?[0-9]+|[0-9]+[.]?[0-9]*)[eE][+-]?[0-9]+[ef]?", + Number.Float), (r"([0-9]*[.][0-9]+|[0-9]+[.][0-9]*)[ef]?", Number.Float), (r"[0-9]+[ef]", Number.Float), # Characters @@ -200,35 +162,26 @@ class QLexer(KLexer): tokens = { "root": [ - ( - words( - ("aj", "aj0", "ajf", "ajf0", "all", "and", "any", - "asc", "asof", "attr", "avgs", "ceiling", "cols", - "count", "cross", "csv", "cut", "deltas", "desc", - "differ", "distinct", "dsave", "each", "ej", - "ema", "eval", "except", "fby", "fills", "first", - "fkeys", "flip", "floor", "get", "group", "gtime", - "hclose", "hcount", "hdel", "hsym", "iasc", - "idesc", "ij", "ijf", "inter", "inv", "key", - "keys", "lj", "ljf", "load", "lower", "lsq", - "ltime", "ltrim", "mavg", "maxs", "mcount", "md5", - "mdev", "med", "meta", "mins", "mmax", "mmin", - "mmu", "mod", "msum", "neg", "next", "not", - "null", "or", "over", "parse", "peach", "pj", - "prds", "prior", "prev", "rand", "rank", "ratios", - "raze", "read0", "read1", "reciprocal", "reval", - "reverse", "rload", "rotate", "rsave", "rtrim", - "save", "scan", "scov", "sdev", "set", "show", - "signum", "ssr", "string", "sublist", "sums", - "sv", "svar", "system", "tables", "til", "trim", - "txf", "type", "uj", "ujf", "ungroup", "union", - "upper", "upsert", "value", "view", "views", "vs", - "where", "wj", "wj1", "ww", "xasc", "xbar", - "xcol", "xcols", "xdesc", "xgroup", "xkey", - "xlog", "xprev", "xrank"), - suffix=r"\b", - ), - Name.Builtin, + (words(("aj", "aj0", "ajf", "ajf0", "all", "and", "any", "asc", + "asof", "attr", "avgs", "ceiling", "cols", "count", "cross", + "csv", "cut", "deltas", "desc", "differ", "distinct", "dsave", + "each", "ej", "ema", "eval", "except", "fby", "fills", "first", + "fkeys", "flip", "floor", "get", "group", "gtime", "hclose", + "hcount", "hdel", "hsym", "iasc", "idesc", "ij", "ijf", + "inter", "inv", "key", "keys", "lj", "ljf", "load", "lower", + "lsq", "ltime", "ltrim", "mavg", "maxs", "mcount", "md5", + "mdev", "med", "meta", "mins", "mmax", "mmin", "mmu", "mod", + "msum", "neg", "next", "not", "null", "or", "over", "parse", + "peach", "pj", "prds", "prior", "prev", "rand", "rank", "ratios", + "raze", "read0", "read1", "reciprocal", "reval", "reverse", + "rload", "rotate", "rsave", "rtrim", "save", "scan", "scov", + "sdev", "set", "show", "signum", "ssr", "string", "sublist", + "sums", "sv", "svar", "system", "tables", "til", "trim", "txf", + "type", "uj", "ujf", "ungroup", "union", "upper", "upsert", + "value", "view", "views", "vs", "where", "wj", "wj1", "ww", + "xasc", "xbar", "xcol", "xcols", "xdesc", "xgroup", "xkey", + "xlog", "xprev", "xrank"), + suffix=r"\b"), Name.Builtin, ), inherit, ], diff --git a/pygments/lexers/qlik.py b/pygments/lexers/qlik.py index 96ca53c0..bb4defd6 100644 --- a/pygments/lexers/qlik.py +++ b/pygments/lexers/qlik.py @@ -11,22 +11,10 @@ import re from pygments.lexer import RegexLexer, include, bygroups, words -from pygments.token import ( - Comment, - Keyword, - Name, - Number, - Operator, - Punctuation, - String, - Text, -) -from pygments.lexers._qlik_builtins import ( - OPERATORS_LIST, - STATEMENT_LIST, - SCRIPT_FUNCTIONS, - CONSTANT_LIST, -) +from pygments.token import Comment, Keyword, Name, Number, Operator, \ + Punctuation, String, Text +from pygments.lexers._qlik_builtins import OPERATORS_LIST, STATEMENT_LIST, \ + SCRIPT_FUNCTIONS, CONSTANT_LIST __all__ = ["QlikLexer"] @@ -93,33 +81,20 @@ class QlikLexer(RegexLexer): (r"/\*", Comment.Multiline, "comment"), (r"//.*\n", Comment.Single), # variable assignment - ( - r"(let|set)(\s+)", - bygroups( - Keyword.Declaration, - Text.Whitespace, - ), - "assignment", - ), + (r"(let|set)(\s+)", bygroups(Keyword.Declaration, Text.Whitespace), + "assignment"), # Word operators - ( - words(OPERATORS_LIST["words"], prefix=r"\b", suffix=r"\b"), - Operator.Word, - ), + (words(OPERATORS_LIST["words"], prefix=r"\b", suffix=r"\b"), + Operator.Word), # Statements - ( - words( - STATEMENT_LIST, - suffix=r"\b", - ), - Keyword, - ), + (words(STATEMENT_LIST, suffix=r"\b"), Keyword), # Table names (r"[a-z]\w*:", Keyword.Declaration), # Constants (words(CONSTANT_LIST, suffix=r"\b"), Keyword.Constant), # Functions - (words(SCRIPT_FUNCTIONS, suffix=r"(?=\s*\()"), Name.Builtin, "function"), + (words(SCRIPT_FUNCTIONS, suffix=r"(?=\s*\()"), Name.Builtin, + "function"), # interpolation - e.g. $(variableName) include("interp"), # Quotes denote a field/file name diff --git a/pygments/lexers/rdf.py b/pygments/lexers/rdf.py index ed542077..3919a9b1 100644 --- a/pygments/lexers/rdf.py +++ b/pygments/lexers/rdf.py @@ -11,8 +11,8 @@ import re from pygments.lexer import RegexLexer, bygroups, default -from pygments.token import Keyword, Punctuation, String, Number, Operator, Generic, \ - Whitespace, Name, Literal, Comment, Text +from pygments.token import Keyword, Punctuation, String, Number, Operator, \ + Generic, Whitespace, Name, Literal, Comment, Text __all__ = ['SparqlLexer', 'TurtleLexer', 'ShExCLexer'] diff --git a/pygments/lexers/ride.py b/pygments/lexers/ride.py index 9d535d80..e68c3961 100644 --- a/pygments/lexers/ride.py +++ b/pygments/lexers/ride.py @@ -9,7 +9,8 @@ """ from pygments.lexer import RegexLexer, words, include -from pygments.token import Comment, Keyword, Name, Number, Punctuation, String, Text +from pygments.token import Comment, Keyword, Name, Number, Punctuation, \ + String, Text __all__ = ['RideLexer'] diff --git a/pygments/lexers/rita.py b/pygments/lexers/rita.py index a8e4eca9..ff742e9d 100644 --- a/pygments/lexers/rita.py +++ b/pygments/lexers/rita.py @@ -8,11 +8,9 @@ :license: BSD, see LICENSE for details. """ -import re - -from pygments.lexer import RegexLexer, include, bygroups, using, this, \ - inherit, words -from pygments.token import Comment, Operator, Keyword, Name, Literal, Punctuation, Text, Whitespace +from pygments.lexer import RegexLexer +from pygments.token import Comment, Operator, Keyword, Name, Literal, \ + Punctuation, Whitespace __all__ = ['RitaLexer'] @@ -21,7 +19,7 @@ class RitaLexer(RegexLexer): """ Lexer for RITA. - .. versionadded:: 2.11 + .. versionadded:: 2.11 """ name = 'Rita' url = 'https://github.com/zaibacu/rita-dsl' diff --git a/pygments/lexers/savi.py b/pygments/lexers/savi.py index 280a3e47..2397fabe 100644 --- a/pygments/lexers/savi.py +++ b/pygments/lexers/savi.py @@ -9,12 +9,12 @@ """ from pygments.lexer import RegexLexer, bygroups, include -from pygments.token import \ - Whitespace, Keyword, Name, String, Number, \ +from pygments.token import Whitespace, Keyword, Name, String, Number, \ Operator, Punctuation, Comment, Generic, Error __all__ = ['SaviLexer'] + # The canonical version of this file can be found in the following repository, # where it is kept in sync with any language changes, as well as the other # pygments-like lexers that are maintained for use with other tools: diff --git a/pygments/lexers/scdoc.py b/pygments/lexers/scdoc.py index cb602db9..f670d334 100644 --- a/pygments/lexers/scdoc.py +++ b/pygments/lexers/scdoc.py @@ -10,11 +10,8 @@ import re -from pygments.lexer import RegexLexer, include, bygroups, \ - using, this -from pygments.token import Text, Comment, Keyword, String, \ - Generic - +from pygments.lexer import RegexLexer, include, bygroups, using, this +from pygments.token import Text, Comment, Keyword, String, Generic __all__ = ['ScdocLexer'] @@ -22,7 +19,7 @@ __all__ = ['ScdocLexer'] class ScdocLexer(RegexLexer): """ `scdoc` is a simple man page generator for POSIX systems written in C99. - + .. versionadded:: 2.5 """ name = 'scdoc' diff --git a/pygments/lexers/sgf.py b/pygments/lexers/sgf.py index 1b066b1c..865d55c2 100644 --- a/pygments/lexers/sgf.py +++ b/pygments/lexers/sgf.py @@ -9,7 +9,7 @@ """ from pygments.lexer import RegexLexer, bygroups -from pygments.token import Name, Literal, String, Text, Punctuation, Whitespace +from pygments.token import Name, Literal, String, Punctuation, Whitespace __all__ = ["SmartGameFormatLexer"] diff --git a/pygments/lexers/shell.py b/pygments/lexers/shell.py index cb9cb1de..acd78d97 100644 --- a/pygments/lexers/shell.py +++ b/pygments/lexers/shell.py @@ -16,7 +16,6 @@ from pygments.token import Punctuation, \ Text, Comment, Operator, Keyword, Name, String, Number, Generic from pygments.util import shebang_matches - __all__ = ['BashLexer', 'BashSessionLexer', 'TcshLexer', 'BatchLexer', 'SlurmBashLexer', 'MSDOSSessionLexer', 'PowerShellLexer', 'PowerShellSessionLexer', 'TcshSessionLexer', 'FishShellLexer', diff --git a/pygments/lexers/sieve.py b/pygments/lexers/sieve.py index 7f59651d..ab43db8b 100644 --- a/pygments/lexers/sieve.py +++ b/pygments/lexers/sieve.py @@ -18,7 +18,8 @@ """ from pygments.lexer import RegexLexer, bygroups -from pygments.token import Comment, Name, Literal, String, Text, Punctuation, Keyword +from pygments.token import Comment, Name, Literal, String, Text, Punctuation, \ + Keyword __all__ = ["SieveLexer"] @@ -26,6 +27,8 @@ __all__ = ["SieveLexer"] class SieveLexer(RegexLexer): """ Lexer for sieve format. + + .. versionadded:: 2.6 """ name = 'Sieve' filenames = ['*.siv', '*.sieve'] @@ -39,10 +42,17 @@ class SieveLexer(RegexLexer): (r'(?i)require', Keyword.Namespace), # tags: - (r'(?i)(:)(addresses|all|contains|content|create|copy|comparator|count|days|detail|domain|fcc|flags|from|handle|importance|is|localpart|length|lowerfirst|lower|matches|message|mime|options|over|percent|quotewildcard|raw|regex|specialuse|subject|text|under|upperfirst|upper|value)', + (r'(?i)(:)(addresses|all|contains|content|create|copy|comparator|' + r'count|days|detail|domain|fcc|flags|from|handle|importance|is|' + r'localpart|length|lowerfirst|lower|matches|message|mime|options|' + r'over|percent|quotewildcard|raw|regex|specialuse|subject|text|' + r'under|upperfirst|upper|value)', bygroups(Name.Tag, Name.Tag)), # tokens: - (r'(?i)(address|addflag|allof|anyof|body|discard|elsif|else|envelope|ereject|exists|false|fileinto|if|hasflag|header|keep|notify_method_capability|notify|not|redirect|reject|removeflag|setflag|size|spamtest|stop|string|true|vacation|virustest)', + (r'(?i)(address|addflag|allof|anyof|body|discard|elsif|else|envelope|' + r'ereject|exists|false|fileinto|if|hasflag|header|keep|' + r'notify_method_capability|notify|not|redirect|reject|removeflag|' + r'setflag|size|spamtest|stop|string|true|vacation|virustest)', Name.Builtin), (r'(?i)set', Keyword.Declaration), diff --git a/pygments/lexers/smithy.py b/pygments/lexers/smithy.py index 08301d5e..5f2f76cd 100644 --- a/pygments/lexers/smithy.py +++ b/pygments/lexers/smithy.py @@ -8,8 +8,6 @@ :license: BSD, see LICENSE for details. """ -import re - from pygments.lexer import RegexLexer, bygroups, words from pygments.token import Text, Comment, Keyword, Name, String, \ Number, Whitespace, Punctuation diff --git a/pygments/lexers/solidity.py b/pygments/lexers/solidity.py index 2b406173..adef85c1 100644 --- a/pygments/lexers/solidity.py +++ b/pygments/lexers/solidity.py @@ -8,8 +8,6 @@ :license: BSD, see LICENSE for details. """ -import re - from pygments.lexer import RegexLexer, bygroups, include, words from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ Number, Punctuation, Whitespace diff --git a/pygments/lexers/spice.py b/pygments/lexers/spice.py index e7a37f4d..53f111b0 100644 --- a/pygments/lexers/spice.py +++ b/pygments/lexers/spice.py @@ -8,8 +8,6 @@ :license: BSD, see LICENSE for details. """ -import re - from pygments.lexer import RegexLexer, bygroups, words from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ Number, Punctuation, Whitespace @@ -41,12 +39,18 @@ class SpiceLexer(RegexLexer): # keywords (r'(import|as)\b', Keyword.Namespace), (r'(f|p|type|struct|enum)\b', Keyword.Declaration), - (words(('if', 'else', 'for', 'foreach', 'while', 'break', 'continue', 'return', 'assert', 'thread', 'unsafe', 'ext', 'dll'), suffix=r'\b'), Keyword), - (words(('const', 'signed', 'unsigned', 'inline', 'public'), suffix=r'\b'), Keyword.Pseudo), - (words(('new', 'switch', 'case', 'yield', 'stash', 'pick', 'sync', 'class'), suffix=r'\b'), Keyword.Reserved), + (words(('if', 'else', 'for', 'foreach', 'while', 'break', + 'continue', 'return', 'assert', 'thread', 'unsafe', 'ext', + 'dll'), suffix=r'\b'), Keyword), + (words(('const', 'signed', 'unsigned', 'inline', 'public'), + suffix=r'\b'), Keyword.Pseudo), + (words(('new', 'switch', 'case', 'yield', 'stash', 'pick', 'sync', + 'class'), suffix=r'\b'), Keyword.Reserved), (r'(true|false|nil)\b', Keyword.Constant), - (words(('double', 'int', 'short', 'long', 'byte', 'char', 'string', 'bool', 'dyn'), suffix=r'\b'), Keyword.Type), - (words(('printf', 'sizeof', 'len', 'tid', 'join'), suffix=r'\b(\()'), bygroups(Name.Builtin, Punctuation)), + (words(('double', 'int', 'short', 'long', 'byte', 'char', 'string', + 'bool', 'dyn'), suffix=r'\b'), Keyword.Type), + (words(('printf', 'sizeof', 'len', 'tid', 'join'), suffix=r'\b(\()'), + bygroups(Name.Builtin, Punctuation)), # numeric literals (r'[0-9]*[.][0-9]+', Number.Double), (r'0[bB][01]+[sl]?', Number.Bin), @@ -58,7 +62,8 @@ class SpiceLexer(RegexLexer): # char literal (r'\'(\\\\|\\[^\\]|[^\'\\])\'', String.Char), # tokens - (r'<<=|>>=|<<|>>|<=|>=|\+=|-=|\*=|/=|\%=|\|=|&=|\^=|&&|\|\||&|\||\+\+|--|\%|\^|\~|==|!=|::|[.]{3}|[+\-*/&]', Operator), + (r'<<=|>>=|<<|>>|<=|>=|\+=|-=|\*=|/=|\%=|\|=|&=|\^=|&&|\|\||&|\||' + r'\+\+|--|\%|\^|\~|==|!=|::|[.]{3}|[+\-*/&]', Operator), (r'[|<>=!()\[\]{}.,;:\?]', Punctuation), # identifiers (r'[^\W\d]\w*', Name.Other), diff --git a/pygments/lexers/srcinfo.py b/pygments/lexers/srcinfo.py index 644d9b88..870527a7 100644 --- a/pygments/lexers/srcinfo.py +++ b/pygments/lexers/srcinfo.py @@ -4,7 +4,8 @@ Lexers for .SRCINFO files used by Arch Linux Packages. - The description of the format can be found in the wiki: https://wiki.archlinux.org/title/.SRCINFO + The description of the format can be found in the wiki: + https://wiki.archlinux.org/title/.SRCINFO :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. @@ -19,15 +20,18 @@ keywords = ( 'pkgbase', 'pkgname', 'pkgver', 'pkgrel', 'epoch', 'pkgdesc', 'url', 'install', 'changelog', - 'arch', 'groups', 'license', 'noextract', 'options', 'backup', 'validpgpkeys', + 'arch', 'groups', 'license', 'noextract', 'options', 'backup', + 'validpgpkeys', ) architecture_dependent_keywords = ( 'source', 'depends', 'checkdepends', 'makedepends', 'optdepends', 'provides', 'conflicts', 'replaces', - 'md5sums', 'sha1sums', 'sha224sums', 'sha256sums', 'sha384sums', 'sha512sums' + 'md5sums', 'sha1sums', 'sha224sums', 'sha256sums', 'sha384sums', + 'sha512sums', ) + class SrcinfoLexer(RegexLexer): """Lexer for .SRCINFO files used by Arch Linux Packages. @@ -43,7 +47,8 @@ class SrcinfoLexer(RegexLexer): (r'\s+', Whitespace), (r'#.*', Comment.Single), (words(keywords), Keyword, 'assignment'), - (words(architecture_dependent_keywords, suffix=r'_\w+'), Keyword, 'assignment'), + (words(architecture_dependent_keywords, suffix=r'_\w+'), + Keyword, 'assignment'), (r'\w+', Name.Variable, 'assignment'), ], 'assignment': [ diff --git a/pygments/lexers/supercollider.py b/pygments/lexers/supercollider.py index efa44b73..7cf405f5 100644 --- a/pygments/lexers/supercollider.py +++ b/pygments/lexers/supercollider.py @@ -28,7 +28,7 @@ class SuperColliderLexer(RegexLexer): url = 'http://supercollider.github.io/' aliases = ['supercollider', 'sc'] filenames = ['*.sc', '*.scd'] - mimetypes = ['application/supercollider', 'text/supercollider', ] + mimetypes = ['application/supercollider', 'text/supercollider'] flags = re.DOTALL | re.MULTILINE tokens = { diff --git a/pygments/lexers/tal.py b/pygments/lexers/tal.py index 7bc3fcb1..391d1da5 100644 --- a/pygments/lexers/tal.py +++ b/pygments/lexers/tal.py @@ -10,13 +10,13 @@ :license: BSD, see LICENSE for details. """ -import re - from pygments.lexer import RegexLexer, words -from pygments.token import Comment, Keyword, Name, String, Number, Punctuation, Whitespace, Literal +from pygments.token import Comment, Keyword, Name, String, Number, \ + Punctuation, Whitespace, Literal __all__ = ['TalLexer'] + class TalLexer(RegexLexer): """ For `Uxntal <https://wiki.xxiivv.com/site/uxntal.html>`_ source code. @@ -49,7 +49,8 @@ class TalLexer(RegexLexer): 'root': [ (r'\s+', Whitespace), # spaces (r'(?<!\S)\((?!\S)', Comment.Multiline, 'comment'), # comments - (words(instructions, prefix=r'(?<!\S)', suffix=r'2?k?r?(?!\S)'), Keyword.Reserved), # instructions + (words(instructions, prefix=r'(?<!\S)', suffix=r'2?k?r?(?!\S)'), + Keyword.Reserved), # instructions (r'[][{}](?!\S)', Punctuation), # delimiters (r'#([0-9a-f]{2}){1,2}(?!\S)', Number.Hex), # integer (r'"\S+', String), # raw string diff --git a/pygments/lexers/tcl.py b/pygments/lexers/tcl.py index bf9ab7cf..f29cc2cc 100644 --- a/pygments/lexers/tcl.py +++ b/pygments/lexers/tcl.py @@ -24,21 +24,24 @@ class TclLexer(RegexLexer): """ keyword_cmds_re = words(( - 'after', 'apply', 'array', 'break', 'catch', 'continue', 'elseif', 'else', 'error', - 'eval', 'expr', 'for', 'foreach', 'global', 'if', 'namespace', 'proc', 'rename', 'return', - 'set', 'switch', 'then', 'trace', 'unset', 'update', 'uplevel', 'upvar', 'variable', - 'vwait', 'while'), prefix=r'\b', suffix=r'\b') + 'after', 'apply', 'array', 'break', 'catch', 'continue', 'elseif', + 'else', 'error', 'eval', 'expr', 'for', 'foreach', 'global', 'if', + 'namespace', 'proc', 'rename', 'return', 'set', 'switch', 'then', + 'trace', 'unset', 'update', 'uplevel', 'upvar', 'variable', 'vwait', + 'while'), prefix=r'\b', suffix=r'\b') builtin_cmds_re = words(( - 'append', 'bgerror', 'binary', 'cd', 'chan', 'clock', 'close', 'concat', 'dde', 'dict', - 'encoding', 'eof', 'exec', 'exit', 'fblocked', 'fconfigure', 'fcopy', 'file', - 'fileevent', 'flush', 'format', 'gets', 'glob', 'history', 'http', 'incr', 'info', 'interp', - 'join', 'lappend', 'lassign', 'lindex', 'linsert', 'list', 'llength', 'load', 'loadTk', - 'lrange', 'lrepeat', 'lreplace', 'lreverse', 'lsearch', 'lset', 'lsort', 'mathfunc', - 'mathop', 'memory', 'msgcat', 'open', 'package', 'pid', 'pkg::create', 'pkg_mkIndex', - 'platform', 'platform::shell', 'puts', 'pwd', 're_syntax', 'read', 'refchan', - 'regexp', 'registry', 'regsub', 'scan', 'seek', 'socket', 'source', 'split', 'string', - 'subst', 'tell', 'time', 'tm', 'unknown', 'unload'), prefix=r'\b', suffix=r'\b') + 'append', 'bgerror', 'binary', 'cd', 'chan', 'clock', 'close', + 'concat', 'dde', 'dict', 'encoding', 'eof', 'exec', 'exit', 'fblocked', + 'fconfigure', 'fcopy', 'file', 'fileevent', 'flush', 'format', 'gets', + 'glob', 'history', 'http', 'incr', 'info', 'interp', 'join', 'lappend', + 'lassign', 'lindex', 'linsert', 'list', 'llength', 'load', 'loadTk', + 'lrange', 'lrepeat', 'lreplace', 'lreverse', 'lsearch', 'lset', 'lsort', + 'mathfunc', 'mathop', 'memory', 'msgcat', 'open', 'package', 'pid', + 'pkg::create', 'pkg_mkIndex', 'platform', 'platform::shell', 'puts', + 'pwd', 're_syntax', 'read', 'refchan', 'regexp', 'registry', 'regsub', + 'scan', 'seek', 'socket', 'source', 'split', 'string', 'subst', 'tell', + 'time', 'tm', 'unknown', 'unload'), prefix=r'\b', suffix=r'\b') name = 'Tcl' url = 'https://www.tcl.tk/about/language.html' diff --git a/pygments/lexers/teal.py b/pygments/lexers/teal.py index 5676f2f2..b49bb4b8 100644 --- a/pygments/lexers/teal.py +++ b/pygments/lexers/teal.py @@ -13,6 +13,7 @@ from pygments.token import Comment, Name, Number, String, Text, Keyword __all__ = ['TealLexer'] + class TealLexer(RegexLexer): """ For the Transaction Execution Approval Language (TEAL) diff --git a/pygments/lexers/textedit.py b/pygments/lexers/textedit.py index 1eb1b2f6..14884bbd 100644 --- a/pygments/lexers/textedit.py +++ b/pygments/lexers/textedit.py @@ -13,7 +13,7 @@ from bisect import bisect from pygments.lexer import RegexLexer, bygroups, default, include, this, using from pygments.lexers.python import PythonLexer -from pygments.token import Comment, Error, Keyword, Name, Number, Operator, \ +from pygments.token import Comment, Keyword, Name, Number, Operator, \ Punctuation, String, Text, Whitespace __all__ = ['AwkLexer', 'SedLexer', 'VimLexer'] diff --git a/pygments/lexers/theorem.py b/pygments/lexers/theorem.py index 4dcf77cb..2fa69008 100644 --- a/pygments/lexers/theorem.py +++ b/pygments/lexers/theorem.py @@ -8,8 +8,6 @@ :license: BSD, see LICENSE for details. """ -import re - from pygments.lexer import RegexLexer, default, words from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ Number, Punctuation, Generic, Whitespace diff --git a/pygments/lexers/tnt.py b/pygments/lexers/tnt.py index 582b2a2f..e0e205da 100644 --- a/pygments/lexers/tnt.py +++ b/pygments/lexers/tnt.py @@ -21,7 +21,7 @@ class TNTLexer(Lexer): """ Lexer for Typographic Number Theory, as described in the book Gödel, Escher, Bach, by Douglas R. Hofstadter - + .. versionadded:: 2.7 """ diff --git a/pygments/lexers/ul4.py b/pygments/lexers/ul4.py index 2511cee6..1530a52a 100644 --- a/pygments/lexers/ul4.py +++ b/pygments/lexers/ul4.py @@ -18,15 +18,8 @@ from pygments.token import Comment, Text, Keyword, String, Number, Literal, \ from pygments.lexers.web import HtmlLexer, XmlLexer, CssLexer, JavascriptLexer from pygments.lexers.python import PythonLexer - -__all__ = [ - 'UL4Lexer', - 'HTMLUL4Lexer', - 'XMLUL4Lexer', - 'CSSUL4Lexer', - 'JavascriptUL4Lexer', - 'PythonUL4Lexer', -] +__all__ = ['UL4Lexer', 'HTMLUL4Lexer', 'XMLUL4Lexer', 'CSSUL4Lexer', + 'JavascriptUL4Lexer', 'PythonUL4Lexer'] class UL4Lexer(RegexLexer): @@ -48,13 +41,15 @@ class UL4Lexer(RegexLexer): # Template header without name: # ``<?ul4?>`` r"(<\?)(\s*)(ul4)(\s*)(\?>)", - bygroups(Comment.Preproc, Text.Whitespace, Keyword, Text.Whitespace, Comment.Preproc), + bygroups(Comment.Preproc, Text.Whitespace, Keyword, + Text.Whitespace, Comment.Preproc), ), ( # Template header with name (potentially followed by the signature): # ``<?ul4 foo(bar=42)?>`` r"(<\?)(\s*)(ul4)(\s*)([a-zA-Z_][a-zA-Z_0-9]*)?", - bygroups(Comment.Preproc, Text.Whitespace, Keyword, Text.Whitespace, Name.Function), + bygroups(Comment.Preproc, Text.Whitespace, Keyword, + Text.Whitespace, Name.Function), "ul4", # Switch to "expression" mode ), ( @@ -80,7 +75,8 @@ class UL4Lexer(RegexLexer): # ``<?def?>`` tag for defining local templates # ``<?def foo(bar=42)?>...<?end def?>`` r"(<\?)(\s*)(def)(\s*)([a-zA-Z_][a-zA-Z_0-9]*)?", - bygroups(Comment.Preproc, Text.Whitespace, Keyword, Text.Whitespace, Name.Function), + bygroups(Comment.Preproc, Text.Whitespace, Keyword, + Text.Whitespace, Name.Function), "ul4", # Switch to "expression" mode ), ( diff --git a/pygments/lexers/webmisc.py b/pygments/lexers/webmisc.py index 187d641e..1042c774 100644 --- a/pygments/lexers/webmisc.py +++ b/pygments/lexers/webmisc.py @@ -26,7 +26,7 @@ __all__ = ['DuelLexer', 'SlimLexer', 'XQueryLexer', 'QmlLexer', 'CirruLexer'] class DuelLexer(RegexLexer): """ Lexer for Duel Views Engine (formerly JBST) markup with JavaScript code blocks. - + .. versionadded:: 1.4 """ diff --git a/pygments/lexers/wowtoc.py b/pygments/lexers/wowtoc.py index 03a79fe9..320ae011 100644 --- a/pygments/lexers/wowtoc.py +++ b/pygments/lexers/wowtoc.py @@ -3,12 +3,13 @@ ~~~~~~~~~~~~~~~~~~~~~~ Lexer for World of Warcraft TOC files - + TOC files describe game addons. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ + import re from pygments.lexer import RegexLexer, bygroups @@ -58,12 +59,15 @@ class WoWTocLexer(RegexLexer): # official localized tags, Notes and Title # (normal part is insensitive, locale part is sensitive) _create_tag_line_token( - r"((?:[nN][oO][tT][eE][sS]|[tT][iI][tT][lL][eE])-(?:ptBR|zhCN|enCN|frFR|deDE|itIT|esMX|ptPT|koKR|ruRU|esES|zhTW|enTW|enGB|enUS))", + r"((?:[nN][oO][tT][eE][sS]|[tT][iI][tT][lL][eE])-(?:ptBR|zhCN|" + r"enCN|frFR|deDE|itIT|esMX|ptPT|koKR|ruRU|esES|zhTW|enTW|enGB|enUS))", Name.Builtin, ), # other official tags _create_tag_line_token( - r"(Interface|Title|Notes|RequiredDeps|Dep[^: ]*|OptionalDeps|LoadOnDemand|LoadWith|LoadManagers|SavedVariablesPerCharacter|SavedVariables|DefaultState|Secure|Author|Version)", + r"(Interface|Title|Notes|RequiredDeps|Dep[^: ]*|OptionalDeps|" + r"LoadOnDemand|LoadWith|LoadManagers|SavedVariablesPerCharacter|" + r"SavedVariables|DefaultState|Secure|Author|Version)", Name.Builtin, ignore_case=True, ), @@ -78,10 +82,10 @@ class WoWTocLexer(RegexLexer): r"([^: ]*)", Name.Other, ), - + # Comments (r"^#.*$", Comment), - + # Addon Files (r"^.+$", Name), ] @@ -92,7 +96,7 @@ class WoWTocLexer(RegexLexer): # markup.py. Tex's anaylse_text() appears to be definitive (binary) and does not # share any likeness to WoW TOCs, which means we wont have to compete with it by # abitrary increments in score. - + result = 0 # while not required, an almost certain marker of WoW TOC's is the interface tag @@ -103,7 +107,7 @@ class WoWTocLexer(RegexLexer): match = re.search(interface_pattern, text) if match and re.match(r"(\d+)(\d{2})(\d{2})", match.group(7)): result += 0.8 - + casefolded = text.casefold() # Lua file listing is good marker too, but probably conflicts with many other # lexers diff --git a/pygments/lexers/x10.py b/pygments/lexers/x10.py index 855b8ccb..3e77c431 100644 --- a/pygments/lexers/x10.py +++ b/pygments/lexers/x10.py @@ -13,11 +13,12 @@ from pygments.token import Text, Comment, Keyword, String __all__ = ['X10Lexer'] + class X10Lexer(RegexLexer): """ For the X10 language. - .. versionadded:: 0.1 + .. versionadded:: 2.2 """ name = 'X10' diff --git a/pygments/lexers/xorg.py b/pygments/lexers/xorg.py index 54f131fe..196ebcfa 100644 --- a/pygments/lexers/xorg.py +++ b/pygments/lexers/xorg.py @@ -15,7 +15,7 @@ __all__ = ['XorgLexer'] class XorgLexer(RegexLexer): - """Lexer for xorg.conf file.""" + """Lexer for xorg.conf files.""" name = 'Xorg' url = 'https://www.x.org/wiki/' aliases = ['xorg.conf'] diff --git a/pygments/lexers/yang.py b/pygments/lexers/yang.py index 0e68cb82..1df675bc 100644 --- a/pygments/lexers/yang.py +++ b/pygments/lexers/yang.py @@ -8,12 +8,12 @@ :license: BSD, see LICENSE for details. """ -from pygments.lexer import (RegexLexer, bygroups, words) -from pygments.token import (Text, Token, Name, String, Comment, - Number) +from pygments.lexer import RegexLexer, bygroups, words +from pygments.token import Text, Token, Name, String, Comment, Number __all__ = ['YangLexer'] + class YangLexer(RegexLexer): """ Lexer for YANG, based on RFC7950. diff --git a/pygments/lexers/zig.py b/pygments/lexers/zig.py index 3101154a..be897a9d 100644 --- a/pygments/lexers/zig.py +++ b/pygments/lexers/zig.py @@ -17,6 +17,8 @@ __all__ = ['ZigLexer'] class ZigLexer(RegexLexer): """ + Lexer for the Zig language. + grammar: https://ziglang.org/documentation/master/#Grammar """ name = 'Zig' diff --git a/pygments/styles/dracula.py b/pygments/styles/dracula.py index e977bb45..9c2a7d14 100644 --- a/pygments/styles/dracula.py +++ b/pygments/styles/dracula.py @@ -12,10 +12,8 @@ """ from pygments.style import Style -from pygments.token import ( - Keyword, Name, Comment, String, Error, Literal, Number, Operator, Other, - Punctuation, Text, Generic, Whitespace, -) +from pygments.token import Keyword, Name, Comment, String, Error, Literal, \ + Number, Operator, Other, Punctuation, Text, Generic, Whitespace class DraculaStyle(Style): diff --git a/pygments/styles/gh_dark.py b/pygments/styles/gh_dark.py index 445a2ded..4899879d 100644 --- a/pygments/styles/gh_dark.py +++ b/pygments/styles/gh_dark.py @@ -10,8 +10,8 @@ """ from pygments.style import Style -from pygments.token import (Keyword, Name, Comment, Error, Number, - Operator, Generic, Text, Literal, String, Token) +from pygments.token import Keyword, Name, Comment, Error, Number, Operator, \ + Generic, Text, Literal, String, Token # vars are defined to match the defs in diff --git a/pygments/styles/nord.py b/pygments/styles/nord.py index 15b6d5cb..10a8a546 100644 --- a/pygments/styles/nord.py +++ b/pygments/styles/nord.py @@ -10,25 +10,13 @@ """ from pygments.style import Style -from pygments.token import ( - Keyword, - Name, - Comment, - String, - Error, - Number, - Operator, - Generic, - Whitespace, - Punctuation, - Text, - Token, -) +from pygments.token import Keyword, Name, Comment, String, Error, Number, \ + Operator, Generic, Whitespace, Punctuation, Text, Token class NordStyle(Style): """ - Pygments version of the "nord" theme by Arctic Ice Studio + Pygments version of the "nord" theme by Arctic Ice Studio. """ line_number_color = "#D8DEE9" @@ -93,6 +81,7 @@ class NordStyle(Style): Text: '#d8dee9', } + class NordDarkerStyle(Style): """ Pygments version of a darker "nord" theme by Arctic Ice Studio diff --git a/pygments/styles/onedark.py b/pygments/styles/onedark.py index 91ace5b5..21a3ac4b 100644 --- a/pygments/styles/onedark.py +++ b/pygments/styles/onedark.py @@ -12,13 +12,13 @@ """ from pygments.style import Style -from pygments.token import (Comment, Generic, Keyword, Name, Number, Operator, - Punctuation, String, Token, Whitespace) +from pygments.token import Comment, Keyword, Name, Number, Operator, \ + Punctuation, String, Token class OneDarkStyle(Style): """ - Theme inspired by One Dark Pro for Atom + Theme inspired by One Dark Pro for Atom. .. versionadded:: 2.11 """ diff --git a/pygments/styles/rainbow_dash.py b/pygments/styles/rainbow_dash.py index fa0e0ca5..e97d1c5c 100644 --- a/pygments/styles/rainbow_dash.py +++ b/pygments/styles/rainbow_dash.py @@ -11,8 +11,8 @@ """ from pygments.style import Style -from pygments.token import (Comment, Error, Generic, Name, Number, Operator, - String, Text, Whitespace, Keyword) +from pygments.token import Comment, Error, Generic, Name, Number, Operator, \ + String, Text, Whitespace, Keyword BLUE_LIGHT = '#0080ff' BLUE = '#2c5dcd' diff --git a/pygments/styles/zenburn.py b/pygments/styles/zenburn.py index 016ce0d4..c2665642 100644 --- a/pygments/styles/zenburn.py +++ b/pygments/styles/zenburn.py @@ -12,10 +12,8 @@ """ from pygments.style import Style -from pygments.token import ( - Token, Name, Operator, Keyword, Generic, Comment, Number, String, Literal, - Punctuation, Error, -) +from pygments.token import Token, Name, Operator, Keyword, Generic, Comment, \ + Number, String, Literal, Punctuation, Error class ZenburnStyle(Style): diff --git a/tests/test_templates.py b/tests/test_templates.py index b841ccbc..9ed816f8 100644 --- a/tests/test_templates.py +++ b/tests/test_templates.py @@ -1,11 +1,7 @@ import pytest -from pygments.lexers.templates import ( - JavascriptDjangoLexer, - MasonLexer, - SqlJinjaLexer, - VelocityLexer, -) +from pygments.lexers.templates import JavascriptDjangoLexer, MasonLexer, \ + SqlJinjaLexer, VelocityLexer from pygments.token import Comment |