diff options
86 files changed, 12645 insertions, 1008 deletions
@@ -7,7 +7,7 @@ Other contributors, listed alphabetically, are: * Sam Aaron -- Ioke lexer * Ali Afshar -- image formatter -* Thomas Aglassinger -- Easytrieve, JCL and Rexx lexers +* Thomas Aglassinger -- Easytrieve, JCL, Rexx and Transact-SQL lexers * Muthiah Annamalai -- Ezhil lexer * Kumar Appaiah -- Debian control lexer * Andreas Amann -- AppleScript lexer @@ -36,8 +36,9 @@ Other contributors, listed alphabetically, are: * Matthias Bussonnier -- ANSI style handling for terminal-256 formatter * chebee7i -- Python traceback lexer improvements * Hiram Chirino -- Scaml and Jade lexers +* Mauricio Caceres -- SAS and Stata lexers. * Ian Cooper -- VGL lexer -* David Corbett -- Inform, Jasmin, JSGF, and TADS 3 lexers +* David Corbett -- Inform, Jasmin, JSGF, Snowball, and TADS 3 lexers * Leaf Corcoran -- MoonScript lexer * Christopher Creutzig -- MuPAD lexer * Daniël W. Crompton -- Pike lexer @@ -65,6 +66,7 @@ Other contributors, listed alphabetically, are: * Alex Gilding -- BlitzBasic lexer * Bertrand Goetzmann -- Groovy lexer * Krzysiek Goj -- Scala lexer +* Andrey Golovizin -- BibTeX lexers * Matt Good -- Genshi, Cheetah lexers * Michał Górny -- vim modeline support * Alex Gosse -- TrafficScript lexer @@ -139,6 +141,7 @@ Other contributors, listed alphabetically, are: * Mher Movsisyan -- DTD lexer * Dejan Muhamedagic -- Crmsh lexer * Ana Nelson -- Ragel, ANTLR, R console lexers +* Kurt Neufeld -- Markdown lexer * Nam T. Nguyen -- Monokai style * Jesper Noehr -- HTML formatter "anchorlinenos" * Mike Nolta -- Julia lexer @@ -152,6 +155,7 @@ Other contributors, listed alphabetically, are: * Dominik Picheta -- Nimrod lexer * Andrew Pinkham -- RTF Formatter Refactoring * Clément Prévost -- UrbiScript lexer +* Oleh Prypin -- Crystal lexer (based on Ruby lexer) * Elias Rabel -- Fortran fixed form lexer * raichoo -- Idris lexer * Kashif Rasul -- CUDA lexer @@ -169,15 +173,17 @@ Other contributors, listed alphabetically, are: * Matteo Sasso -- Common Lisp lexer * Joe Schafer -- Ada lexer * Ken Schutte -- Matlab lexers +* Sebastian Schweizer -- Whiley lexer * Tassilo Schweyer -- Io, MOOCode lexers * Ted Shaw -- AutoIt lexer * Joerg Sieker -- ABAP lexer * Robert Simmons -- Standard ML lexer * Kirill Simonov -- YAML lexer +* Corbin Simpson -- Monte lexer * Alexander Smishlajev -- Visual FoxPro lexer * Steve Spigarelli -- XQuery lexer * Jerome St-Louis -- eC lexer -* Camil Staps -- Clean lexer +* Camil Staps -- Clean and NuSMV lexers * James Strachan -- Kotlin lexer * Tom Stuart -- Treetop lexer * Colin Sullivan -- SuperCollider lexer @@ -187,6 +193,7 @@ Other contributors, listed alphabetically, are: * Jeremy Thurgood -- Erlang, Squid config lexers * Brian Tiffin -- OpenCOBOL lexer * Bob Tolbert -- Hy lexer +* Matthias Trute -- Forth lexer * Erick Tryzelaar -- Felix lexer * Alexander Udalov -- Kotlin lexer improvements * Thomas Van Doren -- Chapel lexer @@ -55,6 +55,9 @@ test: test-coverage: @$(PYTHON) tests/run.py -d --with-coverage --cover-package=pygments --cover-erase $(TEST) +test-examplefiles: + nosetests tests/test_examplefiles.py + tox-test: @tox -- $(TEST) diff --git a/doc/docs/lexers.rst b/doc/docs/lexers.rst index 9262efb0..ef40f140 100644 --- a/doc/docs/lexers.rst +++ b/doc/docs/lexers.rst @@ -31,7 +31,7 @@ Currently, **all lexers** support these options: If this option is set to ``"guess"``, a simple UTF-8 vs. Latin-1 detection is used, if it is set to ``"chardet"``, the - `chardet library <http://chardet.feedparser.org/>`_ is used to + `chardet library <https://chardet.github.io/>`_ is used to guess the encoding of the input. .. versionadded:: 0.6 diff --git a/doc/docs/unicode.rst b/doc/docs/unicode.rst index 17853a36..dca91116 100644 --- a/doc/docs/unicode.rst +++ b/doc/docs/unicode.rst @@ -55,4 +55,4 @@ encoding is handled differently, see :doc:`the command line docs <cmdline>`. options dict with lexers and formatters, and still have different input and output encodings. -.. _chardet: http://chardet.feedparser.org/ +.. _chardet: https://chardet.github.io/ diff --git a/doc/languages.rst b/doc/languages.rst index ffe1bdb6..7fa8eb2f 100644 --- a/doc/languages.rst +++ b/doc/languages.rst @@ -26,6 +26,7 @@ Programming languages * Common Lisp * Coq * Cryptol (incl. Literate Cryptol) +* `Crystal <http://crystal-lang.org>`_ * `Cython <http://cython.org>`_ * `D <http://dlang.org>`_ * Dart diff --git a/external/autopygmentize b/external/autopygmentize index 7873c22a..f18cac09 100755 --- a/external/autopygmentize +++ b/external/autopygmentize @@ -40,6 +40,7 @@ if [[ "$lexer" == text ]]; then text/x-po) lexer=po;; text/x-python) lexer=python;; text/x-ruby) lexer=ruby;; + text/x-crystal) lexer=crystal;; text/x-shellscript) lexer=sh;; text/x-tcl) lexer=tcl;; text/x-tex|text/x-texinfo) lexer=latex;; # FIXME: texinfo really needs its own lexer diff --git a/pygments/lexers/_lua_builtins.py b/pygments/lexers/_lua_builtins.py index 6d2929b6..7472b9e6 100644 --- a/pygments/lexers/_lua_builtins.py +++ b/pygments/lexers/_lua_builtins.py @@ -15,54 +15,65 @@ from __future__ import print_function - MODULES = {'basic': ('_G', '_VERSION', 'assert', 'collectgarbage', 'dofile', 'error', - 'getfenv', 'getmetatable', 'ipairs', 'load', 'loadfile', - 'loadstring', 'next', 'pairs', 'pcall', 'print', 'rawequal', 'rawget', + 'rawlen', 'rawset', 'select', - 'setfenv', 'setmetatable', 'tonumber', 'tostring', 'type', - 'unpack', 'xpcall'), + 'bit32': ('bit32.arshift', + 'bit32.band', + 'bit32.bnot', + 'bit32.bor', + 'bit32.btest', + 'bit32.bxor', + 'bit32.extract', + 'bit32.lrotate', + 'bit32.lshift', + 'bit32.replace', + 'bit32.rrotate', + 'bit32.rshift'), 'coroutine': ('coroutine.create', + 'coroutine.isyieldable', 'coroutine.resume', 'coroutine.running', 'coroutine.status', 'coroutine.wrap', 'coroutine.yield'), 'debug': ('debug.debug', - 'debug.getfenv', 'debug.gethook', 'debug.getinfo', 'debug.getlocal', 'debug.getmetatable', 'debug.getregistry', 'debug.getupvalue', - 'debug.setfenv', + 'debug.getuservalue', 'debug.sethook', 'debug.setlocal', 'debug.setmetatable', 'debug.setupvalue', - 'debug.traceback'), + 'debug.setuservalue', + 'debug.traceback', + 'debug.upvalueid', + 'debug.upvaluejoin'), 'io': ('io.close', 'io.flush', 'io.input', @@ -71,17 +82,20 @@ MODULES = {'basic': ('_G', 'io.output', 'io.popen', 'io.read', + 'io.stderr', + 'io.stdin', + 'io.stdout', 'io.tmpfile', 'io.type', 'io.write'), 'math': ('math.abs', 'math.acos', 'math.asin', - 'math.atan2', 'math.atan', + 'math.atan2', 'math.ceil', - 'math.cosh', 'math.cos', + 'math.cosh', 'math.deg', 'math.exp', 'math.floor', @@ -89,29 +103,34 @@ MODULES = {'basic': ('_G', 'math.frexp', 'math.huge', 'math.ldexp', - 'math.log10', 'math.log', 'math.max', + 'math.maxinteger', 'math.min', + 'math.mininteger', 'math.modf', 'math.pi', 'math.pow', 'math.rad', 'math.random', 'math.randomseed', - 'math.sinh', 'math.sin', + 'math.sinh', 'math.sqrt', + 'math.tan', 'math.tanh', - 'math.tan'), - 'modules': ('module', - 'require', + 'math.tointeger', + 'math.type', + 'math.ult'), + 'modules': ('package.config', 'package.cpath', 'package.loaded', 'package.loadlib', 'package.path', 'package.preload', - 'package.seeall'), + 'package.searchers', + 'package.searchpath', + 'require'), 'os': ('os.clock', 'os.date', 'os.difftime', @@ -133,19 +152,37 @@ MODULES = {'basic': ('_G', 'string.len', 'string.lower', 'string.match', + 'string.pack', + 'string.packsize', 'string.rep', 'string.reverse', 'string.sub', + 'string.unpack', 'string.upper'), 'table': ('table.concat', 'table.insert', - 'table.maxn', + 'table.move', + 'table.pack', 'table.remove', - 'table.sort')} - + 'table.sort', + 'table.unpack'), + 'utf8': ('utf8.char', + 'utf8.charpattern', + 'utf8.codepoint', + 'utf8.codes', + 'utf8.len', + 'utf8.offset')} if __name__ == '__main__': # pragma: no cover import re + import sys + + # urllib ends up wanting to import a module called 'math' -- if + # pygments/lexers is in the path, this ends badly. + for i in range(len(sys.path)-1, -1, -1): + if sys.path[i].endswith('/lexers'): + del sys.path[i] + try: from urllib import urlopen except ImportError: @@ -196,7 +233,7 @@ if __name__ == '__main__': # pragma: no cover def get_newest_version(): f = urlopen('http://www.lua.org/manual/') - r = re.compile(r'^<A HREF="(\d\.\d)/">Lua \1</A>') + r = re.compile(r'^<A HREF="(\d\.\d)/">(Lua )?\1</A>') for line in f: m = r.match(line) if m is not None: @@ -204,7 +241,7 @@ if __name__ == '__main__': # pragma: no cover def get_lua_functions(version): f = urlopen('http://www.lua.org/manual/%s/' % version) - r = re.compile(r'^<A HREF="manual.html#pdf-(.+)">\1</A>') + r = re.compile(r'^<A HREF="manual.html#pdf-(?!lua|LUA)([^:]+)">\1</A>') functions = [] for line in f: m = r.match(line) @@ -236,15 +273,22 @@ if __name__ == '__main__': # pragma: no cover def run(): version = get_newest_version() - print('> Downloading function index for Lua %s' % version) - functions = get_lua_functions(version) - print('> %d functions found:' % len(functions)) + functions = set() + for v in ('5.2', version): + print('> Downloading function index for Lua %s' % v) + f = get_lua_functions(v) + print('> %d functions found, %d new:' % + (len(f), len(set(f) - functions))) + functions |= set(f) + + functions = sorted(functions) modules = {} for full_function_name in functions: print('>> %s' % full_function_name) m = get_function_module(full_function_name) modules.setdefault(m, []).append(full_function_name) + modules = {k: tuple(v) for k, v in modules.iteritems()} regenerate(__file__, modules) diff --git a/pygments/lexers/_mapping.py b/pygments/lexers/_mapping.py index e22d7df7..a6097b1c 100644 --- a/pygments/lexers/_mapping.py +++ b/pygments/lexers/_mapping.py @@ -24,9 +24,12 @@ LEXERS = { 'AdaLexer': ('pygments.lexers.pascal', 'Ada', ('ada', 'ada95', 'ada2005'), ('*.adb', '*.ads', '*.ada'), ('text/x-ada',)), 'AdlLexer': ('pygments.lexers.archetype', 'ADL', ('adl',), ('*.adl', '*.adls', '*.adlf', '*.adlx'), ()), 'AgdaLexer': ('pygments.lexers.haskell', 'Agda', ('agda',), ('*.agda',), ('text/x-agda',)), + 'AheuiLexer': ('pygments.lexers.esoteric', 'Aheui', ('aheui',), ('*.aheui',), ()), 'AlloyLexer': ('pygments.lexers.dsls', 'Alloy', ('alloy',), ('*.als',), ('text/x-alloy',)), 'AmbientTalkLexer': ('pygments.lexers.ambient', 'AmbientTalk', ('at', 'ambienttalk', 'ambienttalk/2'), ('*.at',), ('text/x-ambienttalk',)), 'AmplLexer': ('pygments.lexers.ampl', 'Ampl', ('ampl',), ('*.run',), ()), + 'Angular2HtmlLexer': ('pygments.lexers.templates', 'HTML + Angular2', ('html+ng2',), ('*.ng2',), ()), + 'Angular2Lexer': ('pygments.lexers.templates', 'Angular2', ('ng2',), (), ()), 'AntlrActionScriptLexer': ('pygments.lexers.parsers', 'ANTLR With ActionScript Target', ('antlr-as', 'antlr-actionscript'), ('*.G', '*.g'), ()), 'AntlrCSharpLexer': ('pygments.lexers.parsers', 'ANTLR With C# Target', ('antlr-csharp', 'antlr-c#'), ('*.G', '*.g'), ()), 'AntlrCppLexer': ('pygments.lexers.parsers', 'ANTLR With CPP Target', ('antlr-cpp',), ('*.G', '*.g'), ()), @@ -46,11 +49,13 @@ LEXERS = { 'AwkLexer': ('pygments.lexers.textedit', 'Awk', ('awk', 'gawk', 'mawk', 'nawk'), ('*.awk',), ('application/x-awk',)), 'BBCodeLexer': ('pygments.lexers.markup', 'BBCode', ('bbcode',), (), ('text/x-bbcode',)), 'BCLexer': ('pygments.lexers.algebra', 'BC', ('bc',), ('*.bc',), ()), + 'BSTLexer': ('pygments.lexers.bibtex', 'BST', ('bst', 'bst-pybtex'), ('*.bst',), ()), 'BaseMakefileLexer': ('pygments.lexers.make', 'Base Makefile', ('basemake',), (), ()), 'BashLexer': ('pygments.lexers.shell', 'Bash', ('bash', 'sh', 'ksh', 'shell'), ('*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass', '*.exheres-0', '*.exlib', '.bashrc', 'bashrc', '.bash_*', 'bash_*', 'PKGBUILD'), ('application/x-sh', 'application/x-shellscript')), 'BashSessionLexer': ('pygments.lexers.shell', 'Bash Session', ('console', 'shell-session'), ('*.sh-session', '*.shell-session'), ('application/x-shell-session', 'application/x-sh-session')), 'BatchLexer': ('pygments.lexers.shell', 'Batchfile', ('bat', 'batch', 'dosbatch', 'winbatch'), ('*.bat', '*.cmd'), ('application/x-dos-batch',)), 'BefungeLexer': ('pygments.lexers.esoteric', 'Befunge', ('befunge',), ('*.befunge',), ('application/x-befunge',)), + 'BibTeXLexer': ('pygments.lexers.bibtex', 'BibTeX', ('bib', 'bibtex'), ('*.bib',), ('text/x-bibtex',)), 'BlitzBasicLexer': ('pygments.lexers.basic', 'BlitzBasic', ('blitzbasic', 'b3d', 'bplus'), ('*.bb', '*.decls'), ('text/x-bb',)), 'BlitzMaxLexer': ('pygments.lexers.basic', 'BlitzMax', ('blitzmax', 'bmax'), ('*.bmx',), ('text/x-bmx',)), 'BnfLexer': ('pygments.lexers.grammar_notation', 'BNF', ('bnf',), ('*.bnf',), ('text/x-bnf',)), @@ -68,6 +73,7 @@ LEXERS = { 'CSharpLexer': ('pygments.lexers.dotnet', 'C#', ('csharp', 'c#'), ('*.cs',), ('text/x-csharp',)), 'Ca65Lexer': ('pygments.lexers.asm', 'ca65 assembler', ('ca65',), ('*.s',), ()), 'CadlLexer': ('pygments.lexers.archetype', 'cADL', ('cadl',), ('*.cadl',), ()), + 'CapDLLexer': ('pygments.lexers.esoteric', 'CapDL', ('capdl',), ('*.cdl',), ()), 'CapnProtoLexer': ('pygments.lexers.capnproto', "Cap'n Proto", ('capnp',), ('*.capnp',), ()), 'CbmBasicV2Lexer': ('pygments.lexers.basic', 'CBM BASIC V2', ('cbmbas',), ('*.bas',), ()), 'CeylonLexer': ('pygments.lexers.jvm', 'Ceylon', ('ceylon',), ('*.ceylon',), ('text/x-ceylon',)), @@ -97,6 +103,7 @@ LEXERS = { 'CrmshLexer': ('pygments.lexers.dsls', 'Crmsh', ('crmsh', 'pcmk'), ('*.crmsh', '*.pcmk'), ()), 'CrocLexer': ('pygments.lexers.d', 'Croc', ('croc',), ('*.croc',), ('text/x-crocsrc',)), 'CryptolLexer': ('pygments.lexers.haskell', 'Cryptol', ('cryptol', 'cry'), ('*.cry',), ('text/x-cryptol',)), + 'CrystalLexer': ('pygments.lexers.crystal', 'Crystal', ('cr', 'crystal'), ('*.cr',), ('text/x-crystal',)), 'CsoundDocumentLexer': ('pygments.lexers.csound', 'Csound Document', ('csound-document', 'csound-csd'), ('*.csd',), ()), 'CsoundOrchestraLexer': ('pygments.lexers.csound', 'Csound Orchestra', ('csound', 'csound-orc'), ('*.orc',), ()), 'CsoundScoreLexer': ('pygments.lexers.csound', 'Csound Score', ('csound-score', 'csound-sco'), ('*.sco',), ()), @@ -148,6 +155,7 @@ LEXERS = { 'FelixLexer': ('pygments.lexers.felix', 'Felix', ('felix', 'flx'), ('*.flx', '*.flxh'), ('text/x-felix',)), 'FishShellLexer': ('pygments.lexers.shell', 'Fish', ('fish', 'fishshell'), ('*.fish', '*.load'), ('application/x-fish',)), 'FlatlineLexer': ('pygments.lexers.dsls', 'Flatline', ('flatline',), (), ('text/x-flatline',)), + 'ForthLexer': ('pygments.lexers.forth', 'Forth', ('forth',), ('*.frt', '*.fs'), ('application/x-forth',)), 'FortranFixedLexer': ('pygments.lexers.fortran', 'FortranFixed', ('fortranfixed',), ('*.f', '*.F'), ()), 'FortranLexer': ('pygments.lexers.fortran', 'Fortran', ('fortran',), ('*.f03', '*.f90', '*.F03', '*.F90'), ('text/x-fortran',)), 'FoxProLexer': ('pygments.lexers.foxpro', 'FoxPro', ('foxpro', 'vfp', 'clipper', 'xbase'), ('*.PRG', '*.prg'), ()), @@ -194,7 +202,6 @@ LEXERS = { 'IrcLogsLexer': ('pygments.lexers.textfmts', 'IRC logs', ('irc',), ('*.weechatlog',), ('text/x-irclog',)), 'IsabelleLexer': ('pygments.lexers.theorem', 'Isabelle', ('isabelle',), ('*.thy',), ('text/x-isabelle',)), 'JLexer': ('pygments.lexers.j', 'J', ('j',), ('*.ijs',), ('text/x-j',)), - 'JadeLexer': ('pygments.lexers.html', 'Jade', ('jade',), ('*.jade',), ('text/x-jade',)), 'JagsLexer': ('pygments.lexers.modeling', 'JAGS', ('jags',), ('*.jag', '*.bug'), ()), 'JasminLexer': ('pygments.lexers.jvm', 'Jasmin', ('jasmin', 'jasminxt'), ('*.j',), ()), 'JavaLexer': ('pygments.lexers.jvm', 'Java', ('java',), ('*.java',), ('text/x-java',)), @@ -206,11 +213,13 @@ LEXERS = { 'JavascriptSmartyLexer': ('pygments.lexers.templates', 'JavaScript+Smarty', ('js+smarty', 'javascript+smarty'), (), ('application/x-javascript+smarty', 'text/x-javascript+smarty', 'text/javascript+smarty')), 'JclLexer': ('pygments.lexers.scripting', 'JCL', ('jcl',), ('*.jcl',), ('text/x-jcl',)), 'JsgfLexer': ('pygments.lexers.grammar_notation', 'JSGF', ('jsgf',), ('*.jsgf',), ('application/jsgf', 'application/x-jsgf', 'text/jsgf')), + 'JsonBareObjectLexer': ('pygments.lexers.data', 'JSONBareObject', ('json-object',), (), ('application/json-object',)), 'JsonLdLexer': ('pygments.lexers.data', 'JSON-LD', ('jsonld', 'json-ld'), ('*.jsonld',), ('application/ld+json',)), 'JsonLexer': ('pygments.lexers.data', 'JSON', ('json',), ('*.json',), ('application/json',)), 'JspLexer': ('pygments.lexers.templates', 'Java Server Page', ('jsp',), ('*.jsp',), ('application/x-jsp',)), 'JuliaConsoleLexer': ('pygments.lexers.julia', 'Julia console', ('jlcon',), (), ()), 'JuliaLexer': ('pygments.lexers.julia', 'Julia', ('julia', 'jl'), ('*.jl',), ('text/x-julia', 'application/x-julia')), + 'JuttleLexer': ('pygments.lexers.javascript', 'Juttle', ('juttle', 'juttle'), ('*.juttle',), ('application/juttle', 'application/x-juttle', 'text/x-juttle', 'text/juttle')), 'KalLexer': ('pygments.lexers.javascript', 'Kal', ('kal',), ('*.kal',), ('text/kal', 'application/kal')), 'KconfigLexer': ('pygments.lexers.configs', 'Kconfig', ('kconfig', 'menuconfig', 'linux-config', 'kernel-config'), ('Kconfig', '*Config.in*', 'external.in*', 'standard-modules.in'), ('text/x-kconfig',)), 'KokaLexer': ('pygments.lexers.haskell', 'Koka', ('koka',), ('*.kk', '*.kki'), ('text/x-koka',)), @@ -244,6 +253,7 @@ LEXERS = { 'MakoLexer': ('pygments.lexers.templates', 'Mako', ('mako',), ('*.mao',), ('application/x-mako',)), 'MakoXmlLexer': ('pygments.lexers.templates', 'XML+Mako', ('xml+mako',), (), ('application/xml+mako',)), 'MaqlLexer': ('pygments.lexers.business', 'MAQL', ('maql',), ('*.maql',), ('text/x-gooddata-maql', 'application/x-gooddata-maql')), + 'MarkdownLexer': ('pygments.lexers.markup', 'markdown', ('md',), ('*.md',), ('text/x-markdown',)), 'MaskLexer': ('pygments.lexers.javascript', 'Mask', ('mask',), ('*.mask',), ('text/x-mask',)), 'MasonLexer': ('pygments.lexers.templates', 'Mason', ('mason',), ('*.m', '*.mhtml', '*.mc', '*.mi', 'autohandler', 'dhandler'), ('application/x-mason',)), 'MathematicaLexer': ('pygments.lexers.algebra', 'Mathematica', ('mathematica', 'mma', 'nb'), ('*.nb', '*.cdf', '*.nbp', '*.ma'), ('application/mathematica', 'application/vnd.wolfram.mathematica', 'application/vnd.wolfram.mathematica.package', 'application/vnd.wolfram.cdf')), @@ -254,6 +264,7 @@ LEXERS = { 'Modula2Lexer': ('pygments.lexers.modula2', 'Modula-2', ('modula2', 'm2'), ('*.def', '*.mod'), ('text/x-modula2',)), 'MoinWikiLexer': ('pygments.lexers.markup', 'MoinMoin/Trac Wiki markup', ('trac-wiki', 'moin'), (), ('text/x-trac-wiki',)), 'MonkeyLexer': ('pygments.lexers.basic', 'Monkey', ('monkey',), ('*.monkey',), ('text/x-monkey',)), + 'MonteLexer': ('pygments.lexers.monte', 'Monte', ('monte',), ('*.mt',), ()), 'MoonScriptLexer': ('pygments.lexers.scripting', 'MoonScript', ('moon', 'moonscript'), ('*.moon',), ('text/x-moonscript', 'application/x-moonscript')), 'MozPreprocCssLexer': ('pygments.lexers.markup', 'CSS+mozpreproc', ('css+mozpreproc',), ('*.css.in',), ()), 'MozPreprocHashLexer': ('pygments.lexers.markup', 'mozhashpreproc', ('mozhashpreproc',), (), ()), @@ -276,12 +287,13 @@ LEXERS = { 'NasmObjdumpLexer': ('pygments.lexers.asm', 'objdump-nasm', ('objdump-nasm',), ('*.objdump-intel',), ('text/x-nasm-objdump',)), 'NemerleLexer': ('pygments.lexers.dotnet', 'Nemerle', ('nemerle',), ('*.n',), ('text/x-nemerle',)), 'NesCLexer': ('pygments.lexers.c_like', 'nesC', ('nesc',), ('*.nc',), ('text/x-nescsrc',)), - 'NewLispLexer': ('pygments.lexers.lisp', 'NewLisp', ('newlisp',), ('*.lsp', '*.nl'), ('text/x-newlisp', 'application/x-newlisp')), + 'NewLispLexer': ('pygments.lexers.lisp', 'NewLisp', ('newlisp',), ('*.lsp', '*.nl', '*.kif'), ('text/x-newlisp', 'application/x-newlisp')), 'NewspeakLexer': ('pygments.lexers.smalltalk', 'Newspeak', ('newspeak',), ('*.ns2',), ('text/x-newspeak',)), 'NginxConfLexer': ('pygments.lexers.configs', 'Nginx configuration file', ('nginx',), (), ('text/x-nginx-conf',)), 'NimrodLexer': ('pygments.lexers.nimrod', 'Nimrod', ('nim', 'nimrod'), ('*.nim', '*.nimrod'), ('text/x-nim',)), 'NitLexer': ('pygments.lexers.nit', 'Nit', ('nit',), ('*.nit',), ()), 'NixLexer': ('pygments.lexers.nix', 'Nix', ('nixos', 'nix'), ('*.nix',), ('text/x-nix',)), + 'NuSMVLexer': ('pygments.lexers.smv', 'NuSMV', ('nusmv',), ('*.smv',), ()), 'NumPyLexer': ('pygments.lexers.python', 'NumPy', ('numpy',), (), ()), 'ObjdumpLexer': ('pygments.lexers.asm', 'objdump', ('objdump',), ('*.objdump',), ('text/x-objdump',)), 'ObjectiveCLexer': ('pygments.lexers.objective', 'Objective-C', ('objective-c', 'objectivec', 'obj-c', 'objc'), ('*.m', '*.h'), ('text/x-objective-c',)), @@ -314,6 +326,7 @@ LEXERS = { 'PrologLexer': ('pygments.lexers.prolog', 'Prolog', ('prolog',), ('*.ecl', '*.prolog', '*.pro', '*.pl'), ('text/x-prolog',)), 'PropertiesLexer': ('pygments.lexers.configs', 'Properties', ('properties', 'jproperties'), ('*.properties',), ('text/x-java-properties',)), 'ProtoBufLexer': ('pygments.lexers.dsls', 'Protocol Buffer', ('protobuf', 'proto'), ('*.proto',), ()), + 'PugLexer': ('pygments.lexers.html', 'Pug', ('pug', 'jade'), ('*.pug', '*.jade'), ('text/x-pug', 'text/x-jade')), 'PuppetLexer': ('pygments.lexers.dsls', 'Puppet', ('puppet',), ('*.pp',), ()), 'PyPyLogLexer': ('pygments.lexers.console', 'PyPy Log', ('pypylog', 'pypy'), ('*.pypylog',), ('application/x-pypylog',)), 'Python3Lexer': ('pygments.lexers.python', 'Python 3', ('python3', 'py3'), (), ('text/x-python3', 'application/x-python3')), @@ -325,6 +338,7 @@ LEXERS = { 'QVToLexer': ('pygments.lexers.qvt', 'QVTO', ('qvto', 'qvt'), ('*.qvto',), ()), 'QmlLexer': ('pygments.lexers.webmisc', 'QML', ('qml', 'qbs'), ('*.qml', '*.qbs'), ('application/x-qml', 'application/x-qt.qbs+qml')), 'RConsoleLexer': ('pygments.lexers.r', 'RConsole', ('rconsole', 'rout'), ('*.Rout',), ()), + 'RNCCompactLexer': ('pygments.lexers.rnc', 'Relax-NG Compact', ('rnc', 'rng-compact'), ('*.rnc',), ()), 'RPMSpecLexer': ('pygments.lexers.installers', 'RPMSpec', ('spec',), ('*.spec',), ('text/x-rpm-spec',)), 'RacketLexer': ('pygments.lexers.lisp', 'Racket', ('racket', 'rkt'), ('*.rkt', '*.rktd', '*.rktl'), ('text/x-racket', 'application/x-racket')), 'RagelCLexer': ('pygments.lexers.parsers', 'Ragel in C Host', ('ragel-c',), ('*.rl',), ()), @@ -354,6 +368,7 @@ LEXERS = { 'RubyConsoleLexer': ('pygments.lexers.ruby', 'Ruby irb session', ('rbcon', 'irb'), (), ('text/x-ruby-shellsession',)), 'RubyLexer': ('pygments.lexers.ruby', 'Ruby', ('rb', 'ruby', 'duby'), ('*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec', '*.rbx', '*.duby', 'Gemfile'), ('text/x-ruby', 'application/x-ruby')), 'RustLexer': ('pygments.lexers.rust', 'Rust', ('rust',), ('*.rs', '*.rs.in'), ('text/rust',)), + 'SASLexer': ('pygments.lexers.sas', 'SAS', ('sas',), ('*.SAS', '*.sas'), ('text/x-sas', 'text/sas', 'application/x-sas')), 'SLexer': ('pygments.lexers.r', 'S', ('splus', 's', 'r'), ('*.S', '*.R', '.Rhistory', '.Rprofile', '.Renviron'), ('text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r', 'text/x-R', 'text/x-r-history', 'text/x-r-profile')), 'SMLLexer': ('pygments.lexers.ml', 'Standard ML', ('sml',), ('*.sml', '*.sig', '*.fun'), ('text/x-standardml', 'application/x-standardml')), 'SassLexer': ('pygments.lexers.css', 'Sass', ('sass',), ('*.sass',), ('text/x-sass',)), @@ -369,6 +384,7 @@ LEXERS = { 'SmalltalkLexer': ('pygments.lexers.smalltalk', 'Smalltalk', ('smalltalk', 'squeak', 'st'), ('*.st',), ('text/x-smalltalk',)), 'SmartyLexer': ('pygments.lexers.templates', 'Smarty', ('smarty',), ('*.tpl',), ('application/x-smarty',)), 'SnobolLexer': ('pygments.lexers.snobol', 'Snobol', ('snobol',), ('*.snobol',), ('text/x-snobol',)), + 'SnowballLexer': ('pygments.lexers.dsls', 'Snowball', ('snowball',), ('*.sbl',), ()), 'SourcePawnLexer': ('pygments.lexers.pawn', 'SourcePawn', ('sp',), ('*.sp',), ('text/x-sourcepawn',)), 'SourcesListLexer': ('pygments.lexers.installers', 'Debian Sourcelist', ('sourceslist', 'sources.list', 'debsources'), ('sources.list',), ()), 'SparqlLexer': ('pygments.lexers.rdf', 'SPARQL', ('sparql',), ('*.rq', '*.sparql'), ('application/sparql-query',)), @@ -377,12 +393,14 @@ LEXERS = { 'SquidConfLexer': ('pygments.lexers.configs', 'SquidConf', ('squidconf', 'squid.conf', 'squid'), ('squid.conf',), ('text/x-squidconf',)), 'SspLexer': ('pygments.lexers.templates', 'Scalate Server Page', ('ssp',), ('*.ssp',), ('application/x-ssp',)), 'StanLexer': ('pygments.lexers.modeling', 'Stan', ('stan',), ('*.stan',), ()), + 'StataLexer': ('pygments.lexers.stata', 'Stata', ('stata', 'do'), ('*.do', '*.ado'), ('text/x-stata', 'text/stata', 'application/x-stata')), 'SuperColliderLexer': ('pygments.lexers.supercollider', 'SuperCollider', ('sc', 'supercollider'), ('*.sc', '*.scd'), ('application/supercollider', 'text/supercollider')), 'SwiftLexer': ('pygments.lexers.objective', 'Swift', ('swift',), ('*.swift',), ('text/x-swift',)), 'SwigLexer': ('pygments.lexers.c_like', 'SWIG', ('swig',), ('*.swg', '*.i'), ('text/swig',)), 'SystemVerilogLexer': ('pygments.lexers.hdl', 'systemverilog', ('systemverilog', 'sv'), ('*.sv', '*.svh'), ('text/x-systemverilog',)), 'TAPLexer': ('pygments.lexers.testing', 'TAP', ('tap',), ('*.tap',), ()), 'Tads3Lexer': ('pygments.lexers.int_fiction', 'TADS 3', ('tads3',), ('*.t',), ()), + 'TasmLexer': ('pygments.lexers.asm', 'TASM', ('tasm',), ('*.asm', '*.ASM', '*.tasm'), ('text/x-tasm',)), 'TclLexer': ('pygments.lexers.tcl', 'Tcl', ('tcl',), ('*.tcl', '*.rvt'), ('text/x-tcl', 'text/x-script.tcl', 'application/x-tcl')), 'TcshLexer': ('pygments.lexers.shell', 'Tcsh', ('tcsh', 'csh'), ('*.tcsh', '*.csh'), ('application/x-csh',)), 'TcshSessionLexer': ('pygments.lexers.shell', 'Tcsh Session', ('tcshcon',), (), ()), @@ -394,6 +412,7 @@ LEXERS = { 'TextLexer': ('pygments.lexers.special', 'Text only', ('text',), ('*.txt',), ('text/plain',)), 'ThriftLexer': ('pygments.lexers.dsls', 'Thrift', ('thrift',), ('*.thrift',), ('application/x-thrift',)), 'TodotxtLexer': ('pygments.lexers.textfmts', 'Todotxt', ('todotxt',), ('todo.txt', '*.todotxt'), ('text/x-todo',)), + 'TransactSqlLexer': ('pygments.lexers.sql', 'Transact-SQL', ('tsql', 't-sql'), ('*.sql',), ('text/x-tsql',)), 'TreetopLexer': ('pygments.lexers.parsers', 'Treetop', ('treetop',), ('*.treetop', '*.tt'), ()), 'TurtleLexer': ('pygments.lexers.rdf', 'Turtle', ('turtle',), ('*.ttl',), ('text/turtle', 'application/x-turtle')), 'TwigHtmlLexer': ('pygments.lexers.templates', 'HTML+Twig', ('html+twig',), ('*.twig',), ('text/html+twig',)), @@ -417,6 +436,7 @@ LEXERS = { 'VhdlLexer': ('pygments.lexers.hdl', 'vhdl', ('vhdl',), ('*.vhdl', '*.vhd'), ('text/x-vhdl',)), 'VimLexer': ('pygments.lexers.textedit', 'VimL', ('vim',), ('*.vim', '.vimrc', '.exrc', '.gvimrc', '_vimrc', '_exrc', '_gvimrc', 'vimrc', 'gvimrc'), ('text/x-vim',)), 'WDiffLexer': ('pygments.lexers.diff', 'WDiff', ('wdiff',), ('*.wdiff',), ()), + 'WhileyLexer': ('pygments.lexers.whiley', 'Whiley', ('whiley',), ('*.whiley',), ('text/x-whiley',)), 'X10Lexer': ('pygments.lexers.x10', 'X10', ('x10', 'xten'), ('*.x10',), ('text/x-x10',)), 'XQueryLexer': ('pygments.lexers.webmisc', 'XQuery', ('xquery', 'xqy', 'xq', 'xql', 'xqm'), ('*.xqy', '*.xquery', '*.xq', '*.xql', '*.xqm'), ('text/xquery', 'application/xquery')), 'XmlDjangoLexer': ('pygments.lexers.templates', 'XML+Django/Jinja', ('xml+django', 'xml+jinja'), (), ('application/xml+django', 'application/xml+jinja')), diff --git a/pygments/lexers/_stata_builtins.py b/pygments/lexers/_stata_builtins.py new file mode 100644 index 00000000..424a739f --- /dev/null +++ b/pygments/lexers/_stata_builtins.py @@ -0,0 +1,419 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers._stata_builtins + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Builtins for Stata + + :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + + +builtins_base = ( + "if", "else", "in", "foreach", "for", "forv", "forva", + "forval", "forvalu", "forvalue", "forvalues", "by", "bys", + "bysort", "quietly", "qui", "about", "ac", + "ac_7", "acprplot", "acprplot_7", "adjust", "ado", "adopath", + "adoupdate", "alpha", "ameans", "an", "ano", "anov", "anova", + "anova_estat", "anova_terms", "anovadef", "aorder", "ap", "app", + "appe", "appen", "append", "arch", "arch_dr", "arch_estat", + "arch_p", "archlm", "areg", "areg_p", "args", "arima", + "arima_dr", "arima_estat", "arima_p", "as", "asmprobit", + "asmprobit_estat", "asmprobit_lf", "asmprobit_mfx__dlg", + "asmprobit_p", "ass", "asse", "asser", "assert", "avplot", + "avplot_7", "avplots", "avplots_7", "bcskew0", "bgodfrey", + "binreg", "bip0_lf", "biplot", "bipp_lf", "bipr_lf", + "bipr_p", "biprobit", "bitest", "bitesti", "bitowt", "blogit", + "bmemsize", "boot", "bootsamp", "bootstrap", "bootstrap_8", + "boxco_l", "boxco_p", "boxcox", "boxcox_6", "boxcox_p", + "bprobit", "br", "break", "brier", "bro", "brow", "brows", + "browse", "brr", "brrstat", "bs", "bs_7", "bsampl_w", + "bsample", "bsample_7", "bsqreg", "bstat", "bstat_7", "bstat_8", + "bstrap", "bstrap_7", "ca", "ca_estat", "ca_p", "cabiplot", + "camat", "canon", "canon_8", "canon_8_p", "canon_estat", + "canon_p", "cap", "caprojection", "capt", "captu", "captur", + "capture", "cat", "cc", "cchart", "cchart_7", "cci", + "cd", "censobs_table", "centile", "cf", "char", "chdir", + "checkdlgfiles", "checkestimationsample", "checkhlpfiles", + "checksum", "chelp", "ci", "cii", "cl", "class", "classutil", + "clear", "cli", "clis", "clist", "clo", "clog", "clog_lf", + "clog_p", "clogi", "clogi_sw", "clogit", "clogit_lf", + "clogit_p", "clogitp", "clogl_sw", "cloglog", "clonevar", + "clslistarray", "cluster", "cluster_measures", "cluster_stop", + "cluster_tree", "cluster_tree_8", "clustermat", "cmdlog", + "cnr", "cnre", "cnreg", "cnreg_p", "cnreg_sw", "cnsreg", + "codebook", "collaps4", "collapse", "colormult_nb", + "colormult_nw", "compare", "compress", "conf", "confi", + "confir", "confirm", "conren", "cons", "const", "constr", + "constra", "constrai", "constrain", "constraint", "continue", + "contract", "copy", "copyright", "copysource", "cor", "corc", + "corr", "corr2data", "corr_anti", "corr_kmo", "corr_smc", + "corre", "correl", "correla", "correlat", "correlate", + "corrgram", "cou", "coun", "count", "cox", "cox_p", "cox_sw", + "coxbase", "coxhaz", "coxvar", "cprplot", "cprplot_7", + "crc", "cret", "cretu", "cretur", "creturn", "cross", "cs", + "cscript", "cscript_log", "csi", "ct", "ct_is", "ctset", + "ctst_5", "ctst_st", "cttost", "cumsp", "cumsp_7", "cumul", + "cusum", "cusum_7", "cutil", "d", "datasig", "datasign", + "datasigna", "datasignat", "datasignatu", "datasignatur", + "datasignature", "datetof", "db", "dbeta", "de", "dec", + "deco", "decod", "decode", "deff", "des", "desc", "descr", + "descri", "describ", "describe", "destring", "dfbeta", + "dfgls", "dfuller", "di", "di_g", "dir", "dirstats", "dis", + "discard", "disp", "disp_res", "disp_s", "displ", "displa", + "display", "distinct", "do", "doe", "doed", "doedi", + "doedit", "dotplot", "dotplot_7", "dprobit", "drawnorm", + "drop", "ds", "ds_util", "dstdize", "duplicates", "durbina", + "dwstat", "dydx", "e", "ed", "edi", "edit", "egen", + "eivreg", "emdef", "en", "enc", "enco", "encod", "encode", + "eq", "erase", "ereg", "ereg_lf", "ereg_p", "ereg_sw", + "ereghet", "ereghet_glf", "ereghet_glf_sh", "ereghet_gp", + "ereghet_ilf", "ereghet_ilf_sh", "ereghet_ip", "eret", + "eretu", "eretur", "ereturn", "err", "erro", "error", "est", + "est_cfexist", "est_cfname", "est_clickable", "est_expand", + "est_hold", "est_table", "est_unhold", "est_unholdok", + "estat", "estat_default", "estat_summ", "estat_vce_only", + "esti", "estimates", "etodow", "etof", "etomdy", "ex", + "exi", "exit", "expand", "expandcl", "fac", "fact", "facto", + "factor", "factor_estat", "factor_p", "factor_pca_rotated", + "factor_rotate", "factormat", "fcast", "fcast_compute", + "fcast_graph", "fdades", "fdadesc", "fdadescr", "fdadescri", + "fdadescrib", "fdadescribe", "fdasav", "fdasave", "fdause", + "fh_st", "open", "read", "close", + "file", "filefilter", "fillin", "find_hlp_file", "findfile", + "findit", "findit_7", "fit", "fl", "fli", "flis", "flist", + "for5_0", "form", "forma", "format", "fpredict", "frac_154", + "frac_adj", "frac_chk", "frac_cox", "frac_ddp", "frac_dis", + "frac_dv", "frac_in", "frac_mun", "frac_pp", "frac_pq", + "frac_pv", "frac_wgt", "frac_xo", "fracgen", "fracplot", + "fracplot_7", "fracpoly", "fracpred", "fron_ex", "fron_hn", + "fron_p", "fron_tn", "fron_tn2", "frontier", "ftodate", "ftoe", + "ftomdy", "ftowdate", "g", "gamhet_glf", "gamhet_gp", + "gamhet_ilf", "gamhet_ip", "gamma", "gamma_d2", "gamma_p", + "gamma_sw", "gammahet", "gdi_hexagon", "gdi_spokes", "ge", + "gen", "gene", "gener", "genera", "generat", "generate", + "genrank", "genstd", "genvmean", "gettoken", "gl", "gladder", + "gladder_7", "glim_l01", "glim_l02", "glim_l03", "glim_l04", + "glim_l05", "glim_l06", "glim_l07", "glim_l08", "glim_l09", + "glim_l10", "glim_l11", "glim_l12", "glim_lf", "glim_mu", + "glim_nw1", "glim_nw2", "glim_nw3", "glim_p", "glim_v1", + "glim_v2", "glim_v3", "glim_v4", "glim_v5", "glim_v6", + "glim_v7", "glm", "glm_6", "glm_p", "glm_sw", "glmpred", "glo", + "glob", "globa", "global", "glogit", "glogit_8", "glogit_p", + "gmeans", "gnbre_lf", "gnbreg", "gnbreg_5", "gnbreg_p", + "gomp_lf", "gompe_sw", "gomper_p", "gompertz", "gompertzhet", + "gomphet_glf", "gomphet_glf_sh", "gomphet_gp", "gomphet_ilf", + "gomphet_ilf_sh", "gomphet_ip", "gphdot", "gphpen", + "gphprint", "gprefs", "gprobi_p", "gprobit", "gprobit_8", "gr", + "gr7", "gr_copy", "gr_current", "gr_db", "gr_describe", + "gr_dir", "gr_draw", "gr_draw_replay", "gr_drop", "gr_edit", + "gr_editviewopts", "gr_example", "gr_example2", "gr_export", + "gr_print", "gr_qscheme", "gr_query", "gr_read", "gr_rename", + "gr_replay", "gr_save", "gr_set", "gr_setscheme", "gr_table", + "gr_undo", "gr_use", "graph", "graph7", "grebar", "greigen", + "greigen_7", "greigen_8", "grmeanby", "grmeanby_7", + "gs_fileinfo", "gs_filetype", "gs_graphinfo", "gs_stat", + "gsort", "gwood", "h", "hadimvo", "hareg", "hausman", + "haver", "he", "heck_d2", "heckma_p", "heckman", "heckp_lf", + "heckpr_p", "heckprob", "hel", "help", "hereg", "hetpr_lf", + "hetpr_p", "hetprob", "hettest", "hexdump", "hilite", + "hist", "hist_7", "histogram", "hlogit", "hlu", "hmeans", + "hotel", "hotelling", "hprobit", "hreg", "hsearch", "icd9", + "icd9_ff", "icd9p", "iis", "impute", "imtest", "inbase", + "include", "inf", "infi", "infil", "infile", "infix", "inp", + "inpu", "input", "ins", "insheet", "insp", "inspe", + "inspec", "inspect", "integ", "inten", "intreg", "intreg_7", + "intreg_p", "intrg2_ll", "intrg_ll", "intrg_ll2", "ipolate", + "iqreg", "ir", "irf", "irf_create", "irfm", "iri", "is_svy", + "is_svysum", "isid", "istdize", "ivprob_1_lf", "ivprob_lf", + "ivprobit", "ivprobit_p", "ivreg", "ivreg_footnote", + "ivtob_1_lf", "ivtob_lf", "ivtobit", "ivtobit_p", "jackknife", + "jacknife", "jknife", "jknife_6", "jknife_8", "jkstat", + "joinby", "kalarma1", "kap", "kap_3", "kapmeier", "kappa", + "kapwgt", "kdensity", "kdensity_7", "keep", "ksm", "ksmirnov", + "ktau", "kwallis", "l", "la", "lab", "labe", "label", + "labelbook", "ladder", "levels", "levelsof", "leverage", + "lfit", "lfit_p", "li", "lincom", "line", "linktest", + "lis", "list", "lloghet_glf", "lloghet_glf_sh", "lloghet_gp", + "lloghet_ilf", "lloghet_ilf_sh", "lloghet_ip", "llogi_sw", + "llogis_p", "llogist", "llogistic", "llogistichet", + "lnorm_lf", "lnorm_sw", "lnorma_p", "lnormal", "lnormalhet", + "lnormhet_glf", "lnormhet_glf_sh", "lnormhet_gp", + "lnormhet_ilf", "lnormhet_ilf_sh", "lnormhet_ip", "lnskew0", + "loadingplot", "loc", "loca", "local", "log", "logi", + "logis_lf", "logistic", "logistic_p", "logit", "logit_estat", + "logit_p", "loglogs", "logrank", "loneway", "lookfor", + "lookup", "lowess", "lowess_7", "lpredict", "lrecomp", "lroc", + "lroc_7", "lrtest", "ls", "lsens", "lsens_7", "lsens_x", + "lstat", "ltable", "ltable_7", "ltriang", "lv", "lvr2plot", + "lvr2plot_7", "m", "ma", "mac", "macr", "macro", "makecns", + "man", "manova", "manova_estat", "manova_p", "manovatest", + "mantel", "mark", "markin", "markout", "marksample", "mat", + "mat_capp", "mat_order", "mat_put_rr", "mat_rapp", "mata", + "mata_clear", "mata_describe", "mata_drop", "mata_matdescribe", + "mata_matsave", "mata_matuse", "mata_memory", "mata_mlib", + "mata_mosave", "mata_rename", "mata_which", "matalabel", + "matcproc", "matlist", "matname", "matr", "matri", + "matrix", "matrix_input__dlg", "matstrik", "mcc", "mcci", + "md0_", "md1_", "md1debug_", "md2_", "md2debug_", "mds", + "mds_estat", "mds_p", "mdsconfig", "mdslong", "mdsmat", + "mdsshepard", "mdytoe", "mdytof", "me_derd", "mean", + "means", "median", "memory", "memsize", "meqparse", "mer", + "merg", "merge", "mfp", "mfx", "mhelp", "mhodds", "minbound", + "mixed_ll", "mixed_ll_reparm", "mkassert", "mkdir", + "mkmat", "mkspline", "ml", "ml_5", "ml_adjs", "ml_bhhhs", + "ml_c_d", "ml_check", "ml_clear", "ml_cnt", "ml_debug", + "ml_defd", "ml_e0", "ml_e0_bfgs", "ml_e0_cycle", "ml_e0_dfp", + "ml_e0i", "ml_e1", "ml_e1_bfgs", "ml_e1_bhhh", "ml_e1_cycle", + "ml_e1_dfp", "ml_e2", "ml_e2_cycle", "ml_ebfg0", "ml_ebfr0", + "ml_ebfr1", "ml_ebh0q", "ml_ebhh0", "ml_ebhr0", "ml_ebr0i", + "ml_ecr0i", "ml_edfp0", "ml_edfr0", "ml_edfr1", "ml_edr0i", + "ml_eds", "ml_eer0i", "ml_egr0i", "ml_elf", "ml_elf_bfgs", + "ml_elf_bhhh", "ml_elf_cycle", "ml_elf_dfp", "ml_elfi", + "ml_elfs", "ml_enr0i", "ml_enrr0", "ml_erdu0", "ml_erdu0_bfgs", + "ml_erdu0_bhhh", "ml_erdu0_bhhhq", "ml_erdu0_cycle", + "ml_erdu0_dfp", "ml_erdu0_nrbfgs", "ml_exde", "ml_footnote", + "ml_geqnr", "ml_grad0", "ml_graph", "ml_hbhhh", "ml_hd0", + "ml_hold", "ml_init", "ml_inv", "ml_log", "ml_max", + "ml_mlout", "ml_mlout_8", "ml_model", "ml_nb0", "ml_opt", + "ml_p", "ml_plot", "ml_query", "ml_rdgrd", "ml_repor", + "ml_s_e", "ml_score", "ml_searc", "ml_technique", "ml_unhold", + "mleval", "mlf_", "mlmatbysum", "mlmatsum", "mlog", "mlogi", + "mlogit", "mlogit_footnote", "mlogit_p", "mlopts", "mlsum", + "mlvecsum", "mnl0_", "mor", "more", "mov", "move", "mprobit", + "mprobit_lf", "mprobit_p", "mrdu0_", "mrdu1_", "mvdecode", + "mvencode", "mvreg", "mvreg_estat", "n", "nbreg", + "nbreg_al", "nbreg_lf", "nbreg_p", "nbreg_sw", "nestreg", "net", + "newey", "newey_7", "newey_p", "news", "nl", "nl_7", "nl_9", + "nl_9_p", "nl_p", "nl_p_7", "nlcom", "nlcom_p", "nlexp2", + "nlexp2_7", "nlexp2a", "nlexp2a_7", "nlexp3", "nlexp3_7", + "nlgom3", "nlgom3_7", "nlgom4", "nlgom4_7", "nlinit", "nllog3", + "nllog3_7", "nllog4", "nllog4_7", "nlog_rd", "nlogit", + "nlogit_p", "nlogitgen", "nlogittree", "nlpred", "no", + "nobreak", "noi", "nois", "noisi", "noisil", "noisily", "note", + "notes", "notes_dlg", "nptrend", "numlabel", "numlist", "odbc", + "old_ver", "olo", "olog", "ologi", "ologi_sw", "ologit", + "ologit_p", "ologitp", "on", "one", "onew", "onewa", "oneway", + "op_colnm", "op_comp", "op_diff", "op_inv", "op_str", "opr", + "opro", "oprob", "oprob_sw", "oprobi", "oprobi_p", "oprobit", + "oprobitp", "opts_exclusive", "order", "orthog", "orthpoly", + "ou", "out", "outf", "outfi", "outfil", "outfile", "outs", + "outsh", "outshe", "outshee", "outsheet", "ovtest", "pac", + "pac_7", "palette", "parse", "parse_dissim", "pause", "pca", + "pca_8", "pca_display", "pca_estat", "pca_p", "pca_rotate", + "pcamat", "pchart", "pchart_7", "pchi", "pchi_7", "pcorr", + "pctile", "pentium", "pergram", "pergram_7", "permute", + "permute_8", "personal", "peto_st", "pkcollapse", "pkcross", + "pkequiv", "pkexamine", "pkexamine_7", "pkshape", "pksumm", + "pksumm_7", "pl", "plo", "plot", "plugin", "pnorm", + "pnorm_7", "poisgof", "poiss_lf", "poiss_sw", "poisso_p", + "poisson", "poisson_estat", "post", "postclose", "postfile", + "postutil", "pperron", "pr", "prais", "prais_e", "prais_e2", + "prais_p", "predict", "predictnl", "preserve", "print", + "pro", "prob", "probi", "probit", "probit_estat", "probit_p", + "proc_time", "procoverlay", "procrustes", "procrustes_estat", + "procrustes_p", "profiler", "prog", "progr", "progra", + "program", "prop", "proportion", "prtest", "prtesti", "pwcorr", + "pwd", "q", "s", "qby", "qbys", "qchi", "qchi_7", "qladder", + "qladder_7", "qnorm", "qnorm_7", "qqplot", "qqplot_7", "qreg", + "qreg_c", "qreg_p", "qreg_sw", "qu", "quadchk", "quantile", + "quantile_7", "que", "quer", "query", "range", "ranksum", + "ratio", "rchart", "rchart_7", "rcof", "recast", "reclink", + "recode", "reg", "reg3", "reg3_p", "regdw", "regr", "regre", + "regre_p2", "regres", "regres_p", "regress", "regress_estat", + "regriv_p", "remap", "ren", "rena", "renam", "rename", + "renpfix", "repeat", "replace", "report", "reshape", + "restore", "ret", "retu", "retur", "return", "rm", "rmdir", + "robvar", "roccomp", "roccomp_7", "roccomp_8", "rocf_lf", + "rocfit", "rocfit_8", "rocgold", "rocplot", "rocplot_7", + "roctab", "roctab_7", "rolling", "rologit", "rologit_p", + "rot", "rota", "rotat", "rotate", "rotatemat", "rreg", + "rreg_p", "ru", "run", "runtest", "rvfplot", "rvfplot_7", + "rvpplot", "rvpplot_7", "sa", "safesum", "sample", + "sampsi", "sav", "save", "savedresults", "saveold", "sc", + "sca", "scal", "scala", "scalar", "scatter", "scm_mine", + "sco", "scob_lf", "scob_p", "scobi_sw", "scobit", "scor", + "score", "scoreplot", "scoreplot_help", "scree", "screeplot", + "screeplot_help", "sdtest", "sdtesti", "se", "search", + "separate", "seperate", "serrbar", "serrbar_7", "serset", "set", + "set_defaults", "sfrancia", "sh", "she", "shel", "shell", + "shewhart", "shewhart_7", "signestimationsample", "signrank", + "signtest", "simul", "simul_7", "simulate", "simulate_8", + "sktest", "sleep", "slogit", "slogit_d2", "slogit_p", "smooth", + "snapspan", "so", "sor", "sort", "spearman", "spikeplot", + "spikeplot_7", "spikeplt", "spline_x", "split", "sqreg", + "sqreg_p", "sret", "sretu", "sretur", "sreturn", "ssc", "st", + "st_ct", "st_hc", "st_hcd", "st_hcd_sh", "st_is", "st_issys", + "st_note", "st_promo", "st_set", "st_show", "st_smpl", + "st_subid", "stack", "statsby", "statsby_8", "stbase", "stci", + "stci_7", "stcox", "stcox_estat", "stcox_fr", "stcox_fr_ll", + "stcox_p", "stcox_sw", "stcoxkm", "stcoxkm_7", "stcstat", + "stcurv", "stcurve", "stcurve_7", "stdes", "stem", "stepwise", + "stereg", "stfill", "stgen", "stir", "stjoin", "stmc", "stmh", + "stphplot", "stphplot_7", "stphtest", "stphtest_7", + "stptime", "strate", "strate_7", "streg", "streg_sw", "streset", + "sts", "sts_7", "stset", "stsplit", "stsum", "sttocc", + "sttoct", "stvary", "stweib", "su", "suest", "suest_8", + "sum", "summ", "summa", "summar", "summari", "summariz", + "summarize", "sunflower", "sureg", "survcurv", "survsum", + "svar", "svar_p", "svmat", "svy", "svy_disp", "svy_dreg", + "svy_est", "svy_est_7", "svy_estat", "svy_get", "svy_gnbreg_p", + "svy_head", "svy_header", "svy_heckman_p", "svy_heckprob_p", + "svy_intreg_p", "svy_ivreg_p", "svy_logistic_p", "svy_logit_p", + "svy_mlogit_p", "svy_nbreg_p", "svy_ologit_p", "svy_oprobit_p", + "svy_poisson_p", "svy_probit_p", "svy_regress_p", "svy_sub", + "svy_sub_7", "svy_x", "svy_x_7", "svy_x_p", "svydes", + "svydes_8", "svygen", "svygnbreg", "svyheckman", "svyheckprob", + "svyintreg", "svyintreg_7", "svyintrg", "svyivreg", "svylc", + "svylog_p", "svylogit", "svymarkout", "svymarkout_8", + "svymean", "svymlog", "svymlogit", "svynbreg", "svyolog", + "svyologit", "svyoprob", "svyoprobit", "svyopts", + "svypois", "svypois_7", "svypoisson", "svyprobit", "svyprobt", + "svyprop", "svyprop_7", "svyratio", "svyreg", "svyreg_p", + "svyregress", "svyset", "svyset_7", "svyset_8", "svytab", + "svytab_7", "svytest", "svytotal", "sw", "sw_8", "swcnreg", + "swcox", "swereg", "swilk", "swlogis", "swlogit", + "swologit", "swoprbt", "swpois", "swprobit", "swqreg", + "swtobit", "swweib", "symmetry", "symmi", "symplot", + "symplot_7", "syntax", "sysdescribe", "sysdir", "sysuse", + "szroeter", "ta", "tab", "tab1", "tab2", "tab_or", "tabd", + "tabdi", "tabdis", "tabdisp", "tabi", "table", "tabodds", + "tabodds_7", "tabstat", "tabu", "tabul", "tabula", "tabulat", + "tabulate", "te", "tempfile", "tempname", "tempvar", "tes", + "test", "testnl", "testparm", "teststd", "tetrachoric", + "time_it", "timer", "tis", "tob", "tobi", "tobit", "tobit_p", + "tobit_sw", "token", "tokeni", "tokeniz", "tokenize", + "tostring", "total", "translate", "translator", "transmap", + "treat_ll", "treatr_p", "treatreg", "trim", "trnb_cons", + "trnb_mean", "trpoiss_d2", "trunc_ll", "truncr_p", "truncreg", + "tsappend", "tset", "tsfill", "tsline", "tsline_ex", + "tsreport", "tsrevar", "tsrline", "tsset", "tssmooth", + "tsunab", "ttest", "ttesti", "tut_chk", "tut_wait", "tutorial", + "tw", "tware_st", "two", "twoway", "twoway__fpfit_serset", + "twoway__function_gen", "twoway__histogram_gen", + "twoway__ipoint_serset", "twoway__ipoints_serset", + "twoway__kdensity_gen", "twoway__lfit_serset", + "twoway__normgen_gen", "twoway__pci_serset", + "twoway__qfit_serset", "twoway__scatteri_serset", + "twoway__sunflower_gen", "twoway_ksm_serset", "ty", "typ", + "type", "typeof", "u", "unab", "unabbrev", "unabcmd", + "update", "us", "use", "uselabel", "var", "var_mkcompanion", + "var_p", "varbasic", "varfcast", "vargranger", "varirf", + "varirf_add", "varirf_cgraph", "varirf_create", "varirf_ctable", + "varirf_describe", "varirf_dir", "varirf_drop", "varirf_erase", + "varirf_graph", "varirf_ograph", "varirf_rename", "varirf_set", + "varirf_table", "varlist", "varlmar", "varnorm", "varsoc", + "varstable", "varstable_w", "varstable_w2", "varwle", + "vce", "vec", "vec_fevd", "vec_mkphi", "vec_p", "vec_p_w", + "vecirf_create", "veclmar", "veclmar_w", "vecnorm", + "vecnorm_w", "vecrank", "vecstable", "verinst", "vers", + "versi", "versio", "version", "view", "viewsource", "vif", + "vwls", "wdatetof", "webdescribe", "webseek", "webuse", + "weib1_lf", "weib2_lf", "weib_lf", "weib_lf0", "weibhet_glf", + "weibhet_glf_sh", "weibhet_glfa", "weibhet_glfa_sh", + "weibhet_gp", "weibhet_ilf", "weibhet_ilf_sh", "weibhet_ilfa", + "weibhet_ilfa_sh", "weibhet_ip", "weibu_sw", "weibul_p", + "weibull", "weibull_c", "weibull_s", "weibullhet", + "wh", "whelp", "whi", "which", "whil", "while", "wilc_st", + "wilcoxon", "win", "wind", "windo", "window", "winexec", + "wntestb", "wntestb_7", "wntestq", "xchart", "xchart_7", + "xcorr", "xcorr_7", "xi", "xi_6", "xmlsav", "xmlsave", + "xmluse", "xpose", "xsh", "xshe", "xshel", "xshell", + "xt_iis", "xt_tis", "xtab_p", "xtabond", "xtbin_p", + "xtclog", "xtcloglog", "xtcloglog_8", "xtcloglog_d2", + "xtcloglog_pa_p", "xtcloglog_re_p", "xtcnt_p", "xtcorr", + "xtdata", "xtdes", "xtfront_p", "xtfrontier", "xtgee", + "xtgee_elink", "xtgee_estat", "xtgee_makeivar", "xtgee_p", + "xtgee_plink", "xtgls", "xtgls_p", "xthaus", "xthausman", + "xtht_p", "xthtaylor", "xtile", "xtint_p", "xtintreg", + "xtintreg_8", "xtintreg_d2", "xtintreg_p", "xtivp_1", + "xtivp_2", "xtivreg", "xtline", "xtline_ex", "xtlogit", + "xtlogit_8", "xtlogit_d2", "xtlogit_fe_p", "xtlogit_pa_p", + "xtlogit_re_p", "xtmixed", "xtmixed_estat", "xtmixed_p", + "xtnb_fe", "xtnb_lf", "xtnbreg", "xtnbreg_pa_p", + "xtnbreg_refe_p", "xtpcse", "xtpcse_p", "xtpois", "xtpoisson", + "xtpoisson_d2", "xtpoisson_pa_p", "xtpoisson_refe_p", "xtpred", + "xtprobit", "xtprobit_8", "xtprobit_d2", "xtprobit_re_p", + "xtps_fe", "xtps_lf", "xtps_ren", "xtps_ren_8", "xtrar_p", + "xtrc", "xtrc_p", "xtrchh", "xtrefe_p", "xtreg", "xtreg_be", + "xtreg_fe", "xtreg_ml", "xtreg_pa_p", "xtreg_re", + "xtregar", "xtrere_p", "xtset", "xtsf_ll", "xtsf_llti", + "xtsum", "xttab", "xttest0", "xttobit", "xttobit_8", + "xttobit_p", "xttrans", "yx", "yxview__barlike_draw", + "yxview_area_draw", "yxview_bar_draw", "yxview_dot_draw", + "yxview_dropline_draw", "yxview_function_draw", + "yxview_iarrow_draw", "yxview_ilabels_draw", + "yxview_normal_draw", "yxview_pcarrow_draw", + "yxview_pcbarrow_draw", "yxview_pccapsym_draw", + "yxview_pcscatter_draw", "yxview_pcspike_draw", + "yxview_rarea_draw", "yxview_rbar_draw", "yxview_rbarm_draw", + "yxview_rcap_draw", "yxview_rcapsym_draw", + "yxview_rconnected_draw", "yxview_rline_draw", + "yxview_rscatter_draw", "yxview_rspike_draw", + "yxview_spike_draw", "yxview_sunflower_draw", "zap_s", "zinb", + "zinb_llf", "zinb_plf", "zip", "zip_llf", "zip_p", "zip_plf", + "zt_ct_5", "zt_hc_5", "zt_hcd_5", "zt_is_5", "zt_iss_5", + "zt_sho_5", "zt_smp_5", "ztbase_5", "ztcox_5", "ztdes_5", + "ztereg_5", "ztfill_5", "ztgen_5", "ztir_5", "ztjoin_5", "ztnb", + "ztnb_p", "ztp", "ztp_p", "zts_5", "ztset_5", "ztspli_5", + "ztsum_5", "zttoct_5", "ztvary_5", "ztweib_5" +) + +builtins_functions = ( + "Cdhms", "Chms", "Clock", "Cmdyhms", "Cofc", "Cofd", "F", + "Fden", "Ftail", "I", "J", "_caller", "abbrev", "abs", "acos", + "acosh", "asin", "asinh", "atan", "atan2", "atanh", + "autocode", "betaden", "binomial", "binomialp", "binomialtail", + "binormal", "bofd", "byteorder", "c", "ceil", "char", + "chi2", "chi2den", "chi2tail", "cholesky", "chop", "clip", + "clock", "cloglog", "cofC", "cofd", "colnumb", "colsof", "comb", + "cond", "corr", "cos", "cosh", "d", "daily", "date", "day", + "det", "dgammapda", "dgammapdada", "dgammapdadx", "dgammapdx", + "dgammapdxdx", "dhms", "diag", "diag0cnt", "digamma", + "dofC", "dofb", "dofc", "dofh", "dofm", "dofq", "dofw", + "dofy", "dow", "doy", "dunnettprob", "e", "el", "epsdouble", + "epsfloat", "exp", "fileexists", "fileread", "filereaderror", + "filewrite", "float", "floor", "fmtwidth", "gammaden", + "gammap", "gammaptail", "get", "group", "h", "hadamard", + "halfyear", "halfyearly", "has_eprop", "hh", "hhC", "hms", + "hofd", "hours", "hypergeometric", "hypergeometricp", "ibeta", + "ibetatail", "index", "indexnot", "inlist", "inrange", "int", + "inv", "invF", "invFtail", "invbinomial", "invbinomialtail", + "invchi2", "invchi2tail", "invcloglog", "invdunnettprob", + "invgammap", "invgammaptail", "invibeta", "invibetatail", + "invlogit", "invnFtail", "invnbinomial", "invnbinomialtail", + "invnchi2", "invnchi2tail", "invnibeta", "invnorm", "invnormal", + "invnttail", "invpoisson", "invpoissontail", "invsym", "invt", + "invttail", "invtukeyprob", "irecode", "issym", "issymmetric", + "itrim", "length", "ln", "lnfact", "lnfactorial", "lngamma", + "lnnormal", "lnnormalden", "log", "log10", "logit", "lower", + "ltrim", "m", "match", "matmissing", "matrix", "matuniform", + "max", "maxbyte", "maxdouble", "maxfloat", "maxint", "maxlong", + "mdy", "mdyhms", "mi", "min", "minbyte", "mindouble", + "minfloat", "minint", "minlong", "minutes", "missing", "mm", + "mmC", "mod", "mofd", "month", "monthly", "mreldif", + "msofhours", "msofminutes", "msofseconds", "nF", "nFden", + "nFtail", "nbetaden", "nbinomial", "nbinomialp", "nbinomialtail", + "nchi2", "nchi2den", "nchi2tail", "nibeta", "norm", "normal", + "normalden", "normd", "npnF", "npnchi2", "npnt", "nt", "ntden", + "nttail", "nullmat", "plural", "poisson", "poissonp", + "poissontail", "proper", "q", "qofd", "quarter", "quarterly", + "r", "rbeta", "rbinomial", "rchi2", "real", "recode", "regexm", + "regexr", "regexs", "reldif", "replay", "return", "reverse", + "rgamma", "rhypergeometric", "rnbinomial", "rnormal", "round", + "rownumb", "rowsof", "rpoisson", "rt", "rtrim", "runiform", "s", + "scalar", "seconds", "sign", "sin", "sinh", "smallestdouble", + "soundex", "soundex_nara", "sqrt", "ss", "ssC", "strcat", + "strdup", "string", "strlen", "strlower", "strltrim", "strmatch", + "strofreal", "strpos", "strproper", "strreverse", "strrtrim", + "strtoname", "strtrim", "strupper", "subinstr", "subinword", + "substr", "sum", "sweep", "syminv", "t", "tC", "tan", "tanh", + "tc", "td", "tden", "th", "tin", "tm", "tq", "trace", + "trigamma", "trim", "trunc", "ttail", "tukeyprob", "tw", + "twithin", "uniform", "upper", "vec", "vecdiag", "w", "week", + "weekly", "wofd", "word", "wordcount", "year", "yearly", + "yh", "ym", "yofd", "yq", "yw" +) + + diff --git a/pygments/lexers/_tsql_builtins.py b/pygments/lexers/_tsql_builtins.py new file mode 100644 index 00000000..44ad8244 --- /dev/null +++ b/pygments/lexers/_tsql_builtins.py @@ -0,0 +1,1004 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers._tsql_builtins + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + These are manually translated lists from https://msdn.microsoft.com. + + :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +# See https://msdn.microsoft.com/en-us/library/ms174986.aspx. +OPERATORS = ( + '!<', + '!=', + '!>', + '<', + '<=', + '<>', + '=', + '>', + '>=', + '+', + '+=', + '-', + '-=', + '*', + '*=', + '/', + '/=', + '%', + '%=', + '&', + '&=', + '|', + '|=', + '^', + '^=', + '~', + '::', +) + +OPERATOR_WORDS = ( + 'all', + 'and', + 'any', + 'between', + 'except', + 'exists', + 'in', + 'intersect', + 'like', + 'not', + 'or', + 'some', + 'union', +) + +_KEYWORDS_SERVER = ( + 'add', + 'all', + 'alter', + 'and', + 'any', + 'as', + 'asc', + 'authorization', + 'backup', + 'begin', + 'between', + 'break', + 'browse', + 'bulk', + 'by', + 'cascade', + 'case', + 'catch', + 'check', + 'checkpoint', + 'close', + 'clustered', + 'coalesce', + 'collate', + 'column', + 'commit', + 'compute', + 'constraint', + 'contains', + 'containstable', + 'continue', + 'convert', + 'create', + 'cross', + 'current', + 'current_date', + 'current_time', + 'current_timestamp', + 'current_user', + 'cursor', + 'database', + 'dbcc', + 'deallocate', + 'declare', + 'default', + 'delete', + 'deny', + 'desc', + 'disk', + 'distinct', + 'distributed', + 'double', + 'drop', + 'dump', + 'else', + 'end', + 'errlvl', + 'escape', + 'except', + 'exec', + 'execute', + 'exists', + 'exit', + 'external', + 'fetch', + 'file', + 'fillfactor', + 'for', + 'foreign', + 'freetext', + 'freetexttable', + 'from', + 'full', + 'function', + 'goto', + 'grant', + 'group', + 'having', + 'holdlock', + 'identity', + 'identity_insert', + 'identitycol', + 'if', + 'in', + 'index', + 'inner', + 'insert', + 'intersect', + 'into', + 'is', + 'join', + 'key', + 'kill', + 'left', + 'like', + 'lineno', + 'load', + 'merge', + 'national', + 'nocheck', + 'nonclustered', + 'not', + 'null', + 'nullif', + 'of', + 'off', + 'offsets', + 'on', + 'open', + 'opendatasource', + 'openquery', + 'openrowset', + 'openxml', + 'option', + 'or', + 'order', + 'outer', + 'over', + 'percent', + 'pivot', + 'plan', + 'precision', + 'primary', + 'print', + 'proc', + 'procedure', + 'public', + 'raiserror', + 'read', + 'readtext', + 'reconfigure', + 'references', + 'replication', + 'restore', + 'restrict', + 'return', + 'revert', + 'revoke', + 'right', + 'rollback', + 'rowcount', + 'rowguidcol', + 'rule', + 'save', + 'schema', + 'securityaudit', + 'select', + 'semantickeyphrasetable', + 'semanticsimilaritydetailstable', + 'semanticsimilaritytable', + 'session_user', + 'set', + 'setuser', + 'shutdown', + 'some', + 'statistics', + 'system_user', + 'table', + 'tablesample', + 'textsize', + 'then', + 'throw', + 'to', + 'top', + 'tran', + 'transaction', + 'trigger', + 'truncate', + 'try', + 'try_convert', + 'tsequal', + 'union', + 'unique', + 'unpivot', + 'update', + 'updatetext', + 'use', + 'user', + 'values', + 'varying', + 'view', + 'waitfor', + 'when', + 'where', + 'while', + 'with', + 'within', + 'writetext', +) + +_KEYWORDS_FUTURE = ( + 'absolute', + 'action', + 'admin', + 'after', + 'aggregate', + 'alias', + 'allocate', + 'are', + 'array', + 'asensitive', + 'assertion', + 'asymmetric', + 'at', + 'atomic', + 'before', + 'binary', + 'bit', + 'blob', + 'boolean', + 'both', + 'breadth', + 'call', + 'called', + 'cardinality', + 'cascaded', + 'cast', + 'catalog', + 'char', + 'character', + 'class', + 'clob', + 'collation', + 'collect', + 'completion', + 'condition', + 'connect', + 'connection', + 'constraints', + 'constructor', + 'corr', + 'corresponding', + 'covar_pop', + 'covar_samp', + 'cube', + 'cume_dist', + 'current_catalog', + 'current_default_transform_group', + 'current_path', + 'current_role', + 'current_schema', + 'current_transform_group_for_type', + 'cycle', + 'data', + 'date', + 'day', + 'dec', + 'decimal', + 'deferrable', + 'deferred', + 'depth', + 'deref', + 'describe', + 'descriptor', + 'destroy', + 'destructor', + 'deterministic', + 'diagnostics', + 'dictionary', + 'disconnect', + 'domain', + 'dynamic', + 'each', + 'element', + 'end-exec', + 'equals', + 'every', + 'exception', + 'false', + 'filter', + 'first', + 'float', + 'found', + 'free', + 'fulltexttable', + 'fusion', + 'general', + 'get', + 'global', + 'go', + 'grouping', + 'hold', + 'host', + 'hour', + 'ignore', + 'immediate', + 'indicator', + 'initialize', + 'initially', + 'inout', + 'input', + 'int', + 'integer', + 'intersection', + 'interval', + 'isolation', + 'iterate', + 'language', + 'large', + 'last', + 'lateral', + 'leading', + 'less', + 'level', + 'like_regex', + 'limit', + 'ln', + 'local', + 'localtime', + 'localtimestamp', + 'locator', + 'map', + 'match', + 'member', + 'method', + 'minute', + 'mod', + 'modifies', + 'modify', + 'module', + 'month', + 'multiset', + 'names', + 'natural', + 'nchar', + 'nclob', + 'new', + 'next', + 'no', + 'none', + 'normalize', + 'numeric', + 'object', + 'occurrences_regex', + 'old', + 'only', + 'operation', + 'ordinality', + 'out', + 'output', + 'overlay', + 'pad', + 'parameter', + 'parameters', + 'partial', + 'partition', + 'path', + 'percent_rank', + 'percentile_cont', + 'percentile_disc', + 'position_regex', + 'postfix', + 'prefix', + 'preorder', + 'prepare', + 'preserve', + 'prior', + 'privileges', + 'range', + 'reads', + 'real', + 'recursive', + 'ref', + 'referencing', + 'regr_avgx', + 'regr_avgy', + 'regr_count', + 'regr_intercept', + 'regr_r2', + 'regr_slope', + 'regr_sxx', + 'regr_sxy', + 'regr_syy', + 'relative', + 'release', + 'result', + 'returns', + 'role', + 'rollup', + 'routine', + 'row', + 'rows', + 'savepoint', + 'scope', + 'scroll', + 'search', + 'second', + 'section', + 'sensitive', + 'sequence', + 'session', + 'sets', + 'similar', + 'size', + 'smallint', + 'space', + 'specific', + 'specifictype', + 'sql', + 'sqlexception', + 'sqlstate', + 'sqlwarning', + 'start', + 'state', + 'statement', + 'static', + 'stddev_pop', + 'stddev_samp', + 'structure', + 'submultiset', + 'substring_regex', + 'symmetric', + 'system', + 'temporary', + 'terminate', + 'than', + 'time', + 'timestamp', + 'timezone_hour', + 'timezone_minute', + 'trailing', + 'translate_regex', + 'translation', + 'treat', + 'true', + 'uescape', + 'under', + 'unknown', + 'unnest', + 'usage', + 'using', + 'value', + 'var_pop', + 'var_samp', + 'varchar', + 'variable', + 'whenever', + 'width_bucket', + 'window', + 'within', + 'without', + 'work', + 'write', + 'xmlagg', + 'xmlattributes', + 'xmlbinary', + 'xmlcast', + 'xmlcomment', + 'xmlconcat', + 'xmldocument', + 'xmlelement', + 'xmlexists', + 'xmlforest', + 'xmliterate', + 'xmlnamespaces', + 'xmlparse', + 'xmlpi', + 'xmlquery', + 'xmlserialize', + 'xmltable', + 'xmltext', + 'xmlvalidate', + 'year', + 'zone', +) + +_KEYWORDS_ODBC = ( + 'absolute', + 'action', + 'ada', + 'add', + 'all', + 'allocate', + 'alter', + 'and', + 'any', + 'are', + 'as', + 'asc', + 'assertion', + 'at', + 'authorization', + 'avg', + 'begin', + 'between', + 'bit', + 'bit_length', + 'both', + 'by', + 'cascade', + 'cascaded', + 'case', + 'cast', + 'catalog', + 'char', + 'char_length', + 'character', + 'character_length', + 'check', + 'close', + 'coalesce', + 'collate', + 'collation', + 'column', + 'commit', + 'connect', + 'connection', + 'constraint', + 'constraints', + 'continue', + 'convert', + 'corresponding', + 'count', + 'create', + 'cross', + 'current', + 'current_date', + 'current_time', + 'current_timestamp', + 'current_user', + 'cursor', + 'date', + 'day', + 'deallocate', + 'dec', + 'decimal', + 'declare', + 'default', + 'deferrable', + 'deferred', + 'delete', + 'desc', + 'describe', + 'descriptor', + 'diagnostics', + 'disconnect', + 'distinct', + 'domain', + 'double', + 'drop', + 'else', + 'end', + 'end-exec', + 'escape', + 'except', + 'exception', + 'exec', + 'execute', + 'exists', + 'external', + 'extract', + 'false', + 'fetch', + 'first', + 'float', + 'for', + 'foreign', + 'fortran', + 'found', + 'from', + 'full', + 'get', + 'global', + 'go', + 'goto', + 'grant', + 'group', + 'having', + 'hour', + 'identity', + 'immediate', + 'in', + 'include', + 'index', + 'indicator', + 'initially', + 'inner', + 'input', + 'insensitive', + 'insert', + 'int', + 'integer', + 'intersect', + 'interval', + 'into', + 'is', + 'isolation', + 'join', + 'key', + 'language', + 'last', + 'leading', + 'left', + 'level', + 'like', + 'local', + 'lower', + 'match', + 'max', + 'min', + 'minute', + 'module', + 'month', + 'names', + 'national', + 'natural', + 'nchar', + 'next', + 'no', + 'none', + 'not', + 'null', + 'nullif', + 'numeric', + 'octet_length', + 'of', + 'on', + 'only', + 'open', + 'option', + 'or', + 'order', + 'outer', + 'output', + 'overlaps', + 'pad', + 'partial', + 'pascal', + 'position', + 'precision', + 'prepare', + 'preserve', + 'primary', + 'prior', + 'privileges', + 'procedure', + 'public', + 'read', + 'real', + 'references', + 'relative', + 'restrict', + 'revoke', + 'right', + 'rollback', + 'rows', + 'schema', + 'scroll', + 'second', + 'section', + 'select', + 'session', + 'session_user', + 'set', + 'size', + 'smallint', + 'some', + 'space', + 'sql', + 'sqlca', + 'sqlcode', + 'sqlerror', + 'sqlstate', + 'sqlwarning', + 'substring', + 'sum', + 'system_user', + 'table', + 'temporary', + 'then', + 'time', + 'timestamp', + 'timezone_hour', + 'timezone_minute', + 'to', + 'trailing', + 'transaction', + 'translate', + 'translation', + 'trim', + 'true', + 'union', + 'unique', + 'unknown', + 'update', + 'upper', + 'usage', + 'user', + 'using', + 'value', + 'values', + 'varchar', + 'varying', + 'view', + 'when', + 'whenever', + 'where', + 'with', + 'work', + 'write', + 'year', + 'zone', +) + +# See https://msdn.microsoft.com/en-us/library/ms189822.aspx. +KEYWORDS = sorted(set(_KEYWORDS_FUTURE + _KEYWORDS_ODBC + _KEYWORDS_SERVER)) + +# See https://msdn.microsoft.com/en-us/library/ms187752.aspx. +TYPES = ( + 'bigint', + 'binary', + 'bit', + 'char', + 'cursor', + 'date', + 'datetime', + 'datetime2', + 'datetimeoffset', + 'decimal', + 'float', + 'hierarchyid', + 'image', + 'int', + 'money', + 'nchar', + 'ntext', + 'numeric', + 'nvarchar', + 'real', + 'smalldatetime', + 'smallint', + 'smallmoney', + 'sql_variant', + 'table', + 'text', + 'time', + 'timestamp', + 'tinyint', + 'uniqueidentifier', + 'varbinary', + 'varchar', + 'xml', +) + +# See https://msdn.microsoft.com/en-us/library/ms174318.aspx. +FUNCTIONS = ( + '$partition', + 'abs', + 'acos', + 'app_name', + 'applock_mode', + 'applock_test', + 'ascii', + 'asin', + 'assemblyproperty', + 'atan', + 'atn2', + 'avg', + 'binary_checksum', + 'cast', + 'ceiling', + 'certencoded', + 'certprivatekey', + 'char', + 'charindex', + 'checksum', + 'checksum_agg', + 'choose', + 'col_length', + 'col_name', + 'columnproperty', + 'compress', + 'concat', + 'connectionproperty', + 'context_info', + 'convert', + 'cos', + 'cot', + 'count', + 'count_big', + 'current_request_id', + 'current_timestamp', + 'current_transaction_id', + 'current_user', + 'cursor_status', + 'database_principal_id', + 'databasepropertyex', + 'dateadd', + 'datediff', + 'datediff_big', + 'datefromparts', + 'datename', + 'datepart', + 'datetime2fromparts', + 'datetimefromparts', + 'datetimeoffsetfromparts', + 'day', + 'db_id', + 'db_name', + 'decompress', + 'degrees', + 'dense_rank', + 'difference', + 'eomonth', + 'error_line', + 'error_message', + 'error_number', + 'error_procedure', + 'error_severity', + 'error_state', + 'exp', + 'file_id', + 'file_idex', + 'file_name', + 'filegroup_id', + 'filegroup_name', + 'filegroupproperty', + 'fileproperty', + 'floor', + 'format', + 'formatmessage', + 'fulltextcatalogproperty', + 'fulltextserviceproperty', + 'get_filestream_transaction_context', + 'getansinull', + 'getdate', + 'getutcdate', + 'grouping', + 'grouping_id', + 'has_perms_by_name', + 'host_id', + 'host_name', + 'iif', + 'index_col', + 'indexkey_property', + 'indexproperty', + 'is_member', + 'is_rolemember', + 'is_srvrolemember', + 'isdate', + 'isjson', + 'isnull', + 'isnumeric', + 'json_modify', + 'json_query', + 'json_value', + 'left', + 'len', + 'log', + 'log10', + 'lower', + 'ltrim', + 'max', + 'min', + 'min_active_rowversion', + 'month', + 'nchar', + 'newid', + 'newsequentialid', + 'ntile', + 'object_definition', + 'object_id', + 'object_name', + 'object_schema_name', + 'objectproperty', + 'objectpropertyex', + 'opendatasource', + 'openjson', + 'openquery', + 'openrowset', + 'openxml', + 'original_db_name', + 'original_login', + 'parse', + 'parsename', + 'patindex', + 'permissions', + 'pi', + 'power', + 'pwdcompare', + 'pwdencrypt', + 'quotename', + 'radians', + 'rand', + 'rank', + 'replace', + 'replicate', + 'reverse', + 'right', + 'round', + 'row_number', + 'rowcount_big', + 'rtrim', + 'schema_id', + 'schema_name', + 'scope_identity', + 'serverproperty', + 'session_context', + 'session_user', + 'sign', + 'sin', + 'smalldatetimefromparts', + 'soundex', + 'sp_helplanguage', + 'space', + 'sqrt', + 'square', + 'stats_date', + 'stdev', + 'stdevp', + 'str', + 'string_escape', + 'string_split', + 'stuff', + 'substring', + 'sum', + 'suser_id', + 'suser_name', + 'suser_sid', + 'suser_sname', + 'switchoffset', + 'sysdatetime', + 'sysdatetimeoffset', + 'system_user', + 'sysutcdatetime', + 'tan', + 'textptr', + 'textvalid', + 'timefromparts', + 'todatetimeoffset', + 'try_cast', + 'try_convert', + 'try_parse', + 'type_id', + 'type_name', + 'typeproperty', + 'unicode', + 'upper', + 'user_id', + 'user_name', + 'var', + 'varp', + 'xact_state', + 'year', +) diff --git a/pygments/lexers/asm.py b/pygments/lexers/asm.py index 325cbbed..2bb3eac9 100644 --- a/pygments/lexers/asm.py +++ b/pygments/lexers/asm.py @@ -20,7 +20,7 @@ from pygments.token import Text, Name, Number, String, Comment, Punctuation, \ __all__ = ['GasLexer', 'ObjdumpLexer', 'DObjdumpLexer', 'CppObjdumpLexer', 'CObjdumpLexer', 'HsailLexer', 'LlvmLexer', 'NasmLexer', - 'NasmObjdumpLexer', 'Ca65Lexer'] + 'NasmObjdumpLexer', 'TasmLexer', 'Ca65Lexer'] class GasLexer(RegexLexer): @@ -424,7 +424,6 @@ class LlvmLexer(RegexLexer): ] } - class NasmLexer(RegexLexer): """ For Nasm (Intel) assembly code. @@ -512,6 +511,86 @@ class NasmObjdumpLexer(ObjdumpLexer): tokens = _objdump_lexer_tokens(NasmLexer) +class TasmLexer(RegexLexer): + """ + For Tasm (Turbo Assembler) assembly code. + """ + name = 'TASM' + aliases = ['tasm'] + filenames = ['*.asm', '*.ASM', '*.tasm'] + mimetypes = ['text/x-tasm'] + + identifier = r'[@a-z$._?][\w$.?#@~]*' + hexn = r'(?:0x[0-9a-f]+|$0[0-9a-f]*|[0-9]+[0-9a-f]*h)' + octn = r'[0-7]+q' + binn = r'[01]+b' + decn = r'[0-9]+' + floatn = decn + r'\.e?' + decn + string = r'"(\\"|[^"\n])*"|' + r"'(\\'|[^'\n])*'|" + r"`(\\`|[^`\n])*`" + declkw = r'(?:res|d)[bwdqt]|times' + register = (r'r[0-9][0-5]?[bwd]|' + r'[a-d][lh]|[er]?[a-d]x|[er]?[sb]p|[er]?[sd]i|[c-gs]s|st[0-7]|' + r'mm[0-7]|cr[0-4]|dr[0-367]|tr[3-7]') + wordop = r'seg|wrt|strict' + type = r'byte|[dq]?word' + directives = (r'BITS|USE16|USE32|SECTION|SEGMENT|ABSOLUTE|EXTERN|GLOBAL|' + r'ORG|ALIGN|STRUC|ENDSTRUC|ENDS|COMMON|CPU|GROUP|UPPERCASE|INCLUDE|' + r'EXPORT|LIBRARY|MODULE|PROC|ENDP|USES|ARG|DATASEG|UDATASEG|END|IDEAL|' + r'P386|MODEL|ASSUME|CODESEG|SIZE') + # T[A-Z][a-z] is more of a convention. Lexer should filter out STRUC definitions + # and then 'add' them to datatype somehow. + datatype = (r'db|dd|dw|T[A-Z][a-z]+') + + flags = re.IGNORECASE | re.MULTILINE + tokens = { + 'root': [ + (r'^\s*%', Comment.Preproc, 'preproc'), + include('whitespace'), + (identifier + ':', Name.Label), + (directives, Keyword, 'instruction-args'), + (r'(%s)(\s+)(%s)' % (identifier, datatype), + bygroups(Name.Constant, Keyword.Declaration, Keyword.Declaration), + 'instruction-args'), + (declkw, Keyword.Declaration, 'instruction-args'), + (identifier, Name.Function, 'instruction-args'), + (r'[\r\n]+', Text) + ], + 'instruction-args': [ + (string, String), + (hexn, Number.Hex), + (octn, Number.Oct), + (binn, Number.Bin), + (floatn, Number.Float), + (decn, Number.Integer), + include('punctuation'), + (register, Name.Builtin), + (identifier, Name.Variable), + # Do not match newline when it's preceeded by a backslash + (r'(\\\s*)(;.*)([\r\n])', bygroups(Text, Comment.Single, Text)), + (r'[\r\n]+', Text, '#pop'), + include('whitespace') + ], + 'preproc': [ + (r'[^;\n]+', Comment.Preproc), + (r';.*?\n', Comment.Single, '#pop'), + (r'\n', Comment.Preproc, '#pop'), + ], + 'whitespace': [ + (r'[\n\r]', Text), + (r'\\[\n\r]', Text), + (r'[ \t]+', Text), + (r';.*', Comment.Single) + ], + 'punctuation': [ + (r'[,():\[\]]+', Punctuation), + (r'[&|^<>+*=/%~-]+', Operator), + (r'[$]+', Keyword.Constant), + (wordop, Operator.Word), + (type, Keyword.Type) + ], + } + + class Ca65Lexer(RegexLexer): """ For ca65 assembler sources. diff --git a/pygments/lexers/bibtex.py b/pygments/lexers/bibtex.py new file mode 100644 index 00000000..cbaedca2 --- /dev/null +++ b/pygments/lexers/bibtex.py @@ -0,0 +1,156 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.bibtex + ~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for BibTeX bibliography data and styles + + :copyright: Copyright 2005-2015 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, ExtendedRegexLexer, include, default, words +from pygments.token import Name, Comment, String, Error, Number, Text, Keyword, Punctuation + +__all__ = ['BibTeXLexer', 'BSTLexer'] + + +class BibTeXLexer(ExtendedRegexLexer): + """ + A lexer for BibTeX bibliography data format. + + .. versionadded:: 2.2 + """ + + name = 'BibTeX' + aliases = ['bib', 'bibtex'] + filenames = ['*.bib'] + mimetypes = ["text/x-bibtex"] + flags = re.IGNORECASE + + ALLOWED_CHARS = r'@!$&*+\-./:;<>?\[\\\]^`|~' + IDENTIFIER = '[{0}][{1}]*'.format('a-z_' + ALLOWED_CHARS, r'\w' + ALLOWED_CHARS) + + def open_brace_callback(self, match, ctx): + opening_brace = match.group() + ctx.opening_brace = opening_brace + yield match.start(), Punctuation, opening_brace + ctx.pos = match.end() + + def close_brace_callback(self, match, ctx): + closing_brace = match.group() + if ( + ctx.opening_brace == '{' and closing_brace != '}' or + ctx.opening_brace == '(' and closing_brace != ')' + ): + yield match.start(), Error, closing_brace + else: + yield match.start(), Punctuation, closing_brace + del ctx.opening_brace + ctx.pos = match.end() + + tokens = { + 'root': [ + include('whitespace'), + ('@comment', Comment), + ('@preamble', Name.Class, ('closing-brace', 'value', 'opening-brace')), + ('@string', Name.Class, ('closing-brace', 'field', 'opening-brace')), + ('@' + IDENTIFIER, Name.Class, ('closing-brace', 'command-body', 'opening-brace')), + ('.+', Comment), + ], + 'opening-brace': [ + include('whitespace'), + (r'[{(]', open_brace_callback, '#pop'), + ], + 'closing-brace': [ + include('whitespace'), + (r'[})]', close_brace_callback, '#pop'), + ], + 'command-body': [ + include('whitespace'), + (r'[^\s\,\}]+', Name.Label, ('#pop', 'fields')), + ], + 'fields': [ + include('whitespace'), + (',', Punctuation, 'field'), + default('#pop'), + ], + 'field': [ + include('whitespace'), + (IDENTIFIER, Name.Attribute, ('value', '=')), + default('#pop'), + ], + '=': [ + include('whitespace'), + ('=', Punctuation, '#pop'), + ], + 'value': [ + include('whitespace'), + (IDENTIFIER, Name.Variable), + ('"', String, 'quoted-string'), + (r'\{', String, 'braced-string'), + (r'[\d]+', Number), + ('#', Punctuation), + default('#pop'), + ], + 'quoted-string': [ + (r'\{', String, 'braced-string'), + ('"', String, '#pop'), + ('[^\{\"]+', String), + ], + 'braced-string': [ + (r'\{', String, '#push'), + (r'\}', String, '#pop'), + ('[^\{\}]+', String), + ], + 'whitespace': [ + (r'\s+', Text), + ], + } + + +class BSTLexer(RegexLexer): + """ + A lexer for BibTeX bibliography styles. + + .. versionadded:: 2.2 + """ + + name = 'BST' + aliases = ['bst', 'bst-pybtex'] + filenames = ['*.bst'] + flags = re.IGNORECASE | re.MULTILINE + + tokens = { + 'root': [ + include('whitespace'), + (words(['read', 'sort']), Keyword), + (words(['execute', 'integers', 'iterate', 'reverse', 'strings']), Keyword, ('group')), + (words(['function', 'macro']), Keyword, ('group', 'group')), + (words(['entry']), Keyword, ('group', 'group', 'group')), + ], + 'group': [ + include('whitespace'), + (r'\{', Punctuation, ('#pop', 'group-end', 'body')), + ], + 'group-end': [ + include('whitespace'), + (r'\}', Punctuation, '#pop'), + ], + 'body': [ + include('whitespace'), + (r"\'[^#\"\{\}\s]+", Name.Function), + (r'[^#\"\{\}\s]+\$', Name.Builtin), + (r'[^#\"\{\}\s]+', Name.Variable), + (r'"[^\"]*"', String), + (r'#-?\d+', Number), + (r'\{', Punctuation, ('group-end', 'body')), + default('#pop'), + ], + 'whitespace': [ + ('\s+', Text), + ('%.*?$', Comment.SingleLine), + ], + } diff --git a/pygments/lexers/c_cpp.py b/pygments/lexers/c_cpp.py index a8d75c0a..2a2419d4 100644 --- a/pygments/lexers/c_cpp.py +++ b/pygments/lexers/c_cpp.py @@ -46,8 +46,10 @@ class CFamilyLexer(RegexLexer): (r'\n', Text), (r'\s+', Text), (r'\\\n', Text), # line continuation - (r'//(\n|(.|\n)*?[^\\]\n)', Comment.Single), - (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), + (r'//(\n|[\w\W]*?[^\\]\n)', Comment.Single), + (r'/(\\\n)?[*][\w\W]*?[*](\\\n)?/', Comment.Multiline), + # Open until EOF, so no ending delimeter + (r'/(\\\n)?[*][\w\W]*', Comment.Multiline), ], 'statements': [ (r'(L?)(")', bygroups(String.Affix, String), 'string'), diff --git a/pygments/lexers/clean.py b/pygments/lexers/clean.py index a3e81534..b87ff99e 100644 --- a/pygments/lexers/clean.py +++ b/pygments/lexers/clean.py @@ -100,7 +100,7 @@ class CleanLexer(ExtendedRegexLexer): ctx.pos = match.end() yield match.start(), Comment, match.group(0) - keywords = ('class', 'instance', 'where', 'with', 'let', 'let!', 'with', + keywords = ('class', 'instance', 'where', 'with', 'let', 'let!', 'in', 'case', 'of', 'infix', 'infixr', 'infixl', 'generic', 'derive', 'otherwise', 'code', 'inline') @@ -116,7 +116,7 @@ class CleanLexer(ExtendedRegexLexer): (r'(?s)/\*.*?\*/', Comment.Multi), # Modules, imports, etc. - (r'\b((?:implementation|definition|system)\s+)?(module)(\s+)([\w`]+)', + (r'\b((?:implementation|definition|system)\s+)?(module)(\s+)([\w`\.]+)', bygroups(Keyword.Namespace, Keyword.Namespace, Text, Name.Class)), (r'(?<=\n)import(?=\s)', Keyword.Namespace, 'import'), (r'(?<=\n)from(?=\s)', Keyword.Namespace, 'fromimport'), @@ -128,7 +128,7 @@ class CleanLexer(ExtendedRegexLexer): # Function definitions (r'(?=\{\|)', Whitespace, 'genericfunction'), - (r'(?<=\n)([ \t]*)([\w`$()=\-<>~*\^|+&%]+)((?:\s+[\w])*)(\s*)(::)', + (r'(?<=\n)([ \t]*)([\w`$()=\-<>~*\^|+&%]+)((?:\s+\w)*)(\s*)(::)', bygroups(store_indent, Name.Function, Keyword.Type, Whitespace, Punctuation), 'functiondefargs'), @@ -149,8 +149,12 @@ class CleanLexer(ExtendedRegexLexer): (words(('True', 'False'), prefix=r'(?<=\s)', suffix=r'(?=\s)'), Literal), + # Qualified names + (r'(\')([\w\.]+)(\'\.)', + bygroups(Punctuation, Name.Namespace, Punctuation)), + # Everything else is some name - (r'([\w`$%]+\.?)*[\w`$%]+', Name), + (r'([\w`$%\/\?@]+\.?)*[\w`$%\/\?@]+', Name), # Punctuation (r'[{}()\[\],:;.#]', Punctuation), @@ -167,13 +171,14 @@ class CleanLexer(ExtendedRegexLexer): ], 'fromimport': [ include('common'), - (r'([\w`]+)', check_class_not_import), + (r'([\w`\.]+)', check_class_not_import), (r'\n', Whitespace, '#pop'), (r'\s', Whitespace), ], 'fromimportfunc': [ include('common'), - (r'([\w`$()=\-<>~*\^|+&%]+)', check_instance_class), + (r'(::)\s+([^,\s]+)', bygroups(Punctuation, Keyword.Type)), + (r'([\w`$()=\-<>~*\^|+&%\/]+)', check_instance_class), (r',', Punctuation), (r'\n', Whitespace, '#pop'), (r'\s', Whitespace), @@ -199,7 +204,7 @@ class CleanLexer(ExtendedRegexLexer): include('common'), (words(('from', 'import', 'as', 'qualified'), prefix='(?<=\s)', suffix='(?=\s)'), Keyword.Namespace), - (r'[\w`]+', Name.Class), + (r'[\w`\.]+', Name.Class), (r'\n', Whitespace, '#pop'), (r',', Punctuation), (r'[^\S\n]+', Whitespace), @@ -230,7 +235,7 @@ class CleanLexer(ExtendedRegexLexer): (r'->', Punctuation), (r'(\s+of\s+)(\{)', bygroups(Keyword, Punctuation), 'genericftypes'), (r'\s', Whitespace), - (r'[\w`]+', Keyword.Type), + (r'[\w`\[\]{}!]+', Keyword.Type), (r'[*()]', Punctuation), ], 'genericftypes': [ @@ -263,12 +268,20 @@ class CleanLexer(ExtendedRegexLexer): (r'\n(\s*)', check_indent3), (r'^(?=\S)', Whitespace, '#pop:3'), (r'[,&]', Punctuation), - (r'[\w`$()=\-<>~*\^|+&%]', Name.Function, 'functionname'), - (r'\s', Whitespace), + (r'\[', Punctuation, 'functiondefuniquneq'), + (r'[\w`$()=\-<>~*\^|+&%\/{}\[\]@]', Name.Function, 'functionname'), + (r'\s+', Whitespace), + ], + 'functiondefuniquneq': [ + include('common'), + (r'[a-z]+', Keyword.Type), + (r'\s+', Whitespace), + (r'<=|,', Punctuation), + (r'\]', Punctuation, '#pop') ], 'functionname': [ include('common'), - (r'[\w`$()=\-<>~*\^|+&%]+', Name.Function), + (r'[\w`$()=\-<>~*\^|+&%\/]+', Name.Function), (r'(?=\{\|)', Punctuation, 'genericfunction'), default('#pop'), ] diff --git a/pygments/lexers/compiled.py b/pygments/lexers/compiled.py index 1cf83d7f..b6673437 100644 --- a/pygments/lexers/compiled.py +++ b/pygments/lexers/compiled.py @@ -29,5 +29,6 @@ from pygments.lexers.dylan import DylanLexer, DylanLidLexer, DylanConsoleLexer from pygments.lexers.ooc import OocLexer from pygments.lexers.felix import FelixLexer from pygments.lexers.nimrod import NimrodLexer +from pygments.lexers.crystal import CrystalLexer __all__ = [] diff --git a/pygments/lexers/configs.py b/pygments/lexers/configs.py index 9cc291e5..27d4bcb6 100644 --- a/pygments/lexers/configs.py +++ b/pygments/lexers/configs.py @@ -100,6 +100,8 @@ class PropertiesLexer(RegexLexer): """ Lexer for configuration files in Java's properties format. + Note: trailing whitespace counts as part of the value as per spec + .. versionadded:: 1.4 """ @@ -110,10 +112,12 @@ class PropertiesLexer(RegexLexer): tokens = { 'root': [ - (r'\s+', Text), - (r'(?:[;#]|//).*$', Comment), + (r'^(\w+)([ \t])(\w+\s*)$', bygroups(Name.Attribute, Text, String)), + (r'^\w+(\\[ \t]\w*)*$', Name.Attribute), + (r'(^ *)([#!].*)', bygroups(Text, Comment)), (r'(.*?)([ \t]*)([=:])([ \t]*)(.*(?:(?<=\\)\n.*)*)', bygroups(Name.Attribute, Text, Operator, Text, String)), + (r'\s', Text), ], } diff --git a/pygments/lexers/crystal.py b/pygments/lexers/crystal.py new file mode 100644 index 00000000..78c70b61 --- /dev/null +++ b/pygments/lexers/crystal.py @@ -0,0 +1,384 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.crystal + ~~~~~~~~~~~~~~~~~~~~~~~ + + Lexer for Crystal. + + :copyright: Copyright 2006-2016 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import Lexer, RegexLexer, ExtendedRegexLexer, include, \ + bygroups, default, LexerContext, do_insertions, words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Error, Generic +from pygments.util import shebang_matches + +__all__ = ['CrystalLexer'] + +line_re = re.compile('.*?\n') + + + + +CRYSTAL_OPERATORS = [ + '!=', '!~', '!', '%', '&&', '&', '**', '*', '+', '-', '/', '<=>', '<<', '<=', '<', + '===', '==', '=~', '=', '>=', '>>', '>', '[]=', '[]?', '[]', '^', '||', '|', '~' +] + + +class CrystalLexer(ExtendedRegexLexer): + """ + For `Crystal <http://crystal-lang.org>`_ source code. + """ + + name = 'Crystal' + aliases = ['cr', 'crystal'] + filenames = ['*.cr'] + mimetypes = ['text/x-crystal'] + + flags = re.DOTALL | re.MULTILINE + + def heredoc_callback(self, match, ctx): + # okay, this is the hardest part of parsing Crystal... + # match: 1 = <<-?, 2 = quote? 3 = name 4 = quote? 5 = rest of line + + start = match.start(1) + yield start, Operator, match.group(1) # <<-? + yield match.start(2), String.Heredoc, match.group(2) # quote ", ', ` + yield match.start(3), String.Delimiter, match.group(3) # heredoc name + yield match.start(4), String.Heredoc, match.group(4) # quote again + + heredocstack = ctx.__dict__.setdefault('heredocstack', []) + outermost = not bool(heredocstack) + heredocstack.append((match.group(1) == '<<-', match.group(3))) + + ctx.pos = match.start(5) + ctx.end = match.end(5) + # this may find other heredocs + for i, t, v in self.get_tokens_unprocessed(context=ctx): + yield i, t, v + ctx.pos = match.end() + + if outermost: + # this is the outer heredoc again, now we can process them all + for tolerant, hdname in heredocstack: + lines = [] + for match in line_re.finditer(ctx.text, ctx.pos): + if tolerant: + check = match.group().strip() + else: + check = match.group().rstrip() + if check == hdname: + for amatch in lines: + yield amatch.start(), String.Heredoc, amatch.group() + yield match.start(), String.Delimiter, match.group() + ctx.pos = match.end() + break + else: + lines.append(match) + else: + # end of heredoc not found -- error! + for amatch in lines: + yield amatch.start(), Error, amatch.group() + ctx.end = len(ctx.text) + del heredocstack[:] + + def gen_crystalstrings_rules(): + def intp_regex_callback(self, match, ctx): + yield match.start(1), String.Regex, match.group(1) # begin + nctx = LexerContext(match.group(3), 0, ['interpolated-regex']) + for i, t, v in self.get_tokens_unprocessed(context=nctx): + yield match.start(3)+i, t, v + yield match.start(4), String.Regex, match.group(4) # end[imsx]* + ctx.pos = match.end() + + def intp_string_callback(self, match, ctx): + yield match.start(1), String.Other, match.group(1) + nctx = LexerContext(match.group(3), 0, ['interpolated-string']) + for i, t, v in self.get_tokens_unprocessed(context=nctx): + yield match.start(3)+i, t, v + yield match.start(4), String.Other, match.group(4) # end + ctx.pos = match.end() + + states = {} + states['strings'] = [ + (r'\:@{0,2}[a-zA-Z_]\w*[!?]?', String.Symbol), + (words(CRYSTAL_OPERATORS, prefix=r'\:@{0,2}'), String.Symbol), + (r":'(\\\\|\\'|[^'])*'", String.Symbol), + # This allows arbitrary text after '\ for simplicity + (r"'(\\\\|\\'|[^']|\\[^'\\]+)'", String.Char), + (r':"', String.Symbol, 'simple-sym'), + # Crystal doesn't have "symbol:"s but this simplifies function args + (r'([a-zA-Z_]\w*)(:)(?!:)', bygroups(String.Symbol, Punctuation)), + (r'"', String.Double, 'simple-string'), + (r'(?<!\.)`', String.Backtick, 'simple-backtick'), + ] + + # double-quoted string and symbol + for name, ttype, end in ('string', String.Double, '"'), \ + ('sym', String.Symbol, '"'), \ + ('backtick', String.Backtick, '`'): + states['simple-'+name] = [ + include('string-escaped' if name == 'sym' else 'string-intp-escaped'), + (r'[^\\%s#]+' % end, ttype), + (r'[\\#]', ttype), + (end, ttype, '#pop'), + ] + + # braced quoted strings + for lbrace, rbrace, bracecc, name in \ + ('\\{', '\\}', '{}', 'cb'), \ + ('\\[', '\\]', '\\[\\]', 'sb'), \ + ('\\(', '\\)', '()', 'pa'), \ + ('<', '>', '<>', 'ab'): + states[name+'-intp-string'] = [ + (r'\\[' + lbrace + ']', String.Other), + (lbrace, String.Other, '#push'), + (rbrace, String.Other, '#pop'), + include('string-intp-escaped'), + (r'[\\#' + bracecc + ']', String.Other), + (r'[^\\#' + bracecc + ']+', String.Other), + ] + states['strings'].append((r'%' + lbrace, String.Other, + name+'-intp-string')) + states[name+'-string'] = [ + (r'\\[\\' + bracecc + ']', String.Other), + (lbrace, String.Other, '#push'), + (rbrace, String.Other, '#pop'), + (r'[\\#' + bracecc + ']', String.Other), + (r'[^\\#' + bracecc + ']+', String.Other), + ] + # http://crystal-lang.org/docs/syntax_and_semantics/literals/array.html + states['strings'].append((r'%[wi]' + lbrace, String.Other, + name+'-string')) + states[name+'-regex'] = [ + (r'\\[\\' + bracecc + ']', String.Regex), + (lbrace, String.Regex, '#push'), + (rbrace + '[imsx]*', String.Regex, '#pop'), + include('string-intp'), + (r'[\\#' + bracecc + ']', String.Regex), + (r'[^\\#' + bracecc + ']+', String.Regex), + ] + states['strings'].append((r'%r' + lbrace, String.Regex, + name+'-regex')) + + # these must come after %<brace>! + states['strings'] += [ + # %r regex + (r'(%r([\W_]))((?:\\\2|(?!\2).)*)(\2[imsx]*)', + intp_regex_callback), + # regular fancy strings with qsw + (r'(%[wi]([\W_]))((?:\\\2|(?!\2).)*)(\2)', + intp_string_callback), + # special forms of fancy strings after operators or + # in method calls with braces + (r'(?<=[-+/*%=<>&!^|~,(])(\s*)(%([\t ])(?:(?:\\\3|(?!\3).)*)\3)', + bygroups(Text, String.Other, None)), + # and because of fixed width lookbehinds the whole thing a + # second time for line startings... + (r'^(\s*)(%([\t ])(?:(?:\\\3|(?!\3).)*)\3)', + bygroups(Text, String.Other, None)), + # all regular fancy strings without qsw + (r'(%([\[{(<]))((?:\\\2|(?!\2).)*)(\2)', + intp_string_callback), + ] + + return states + + tokens = { + 'root': [ + (r'#.*?$', Comment.Single), + # keywords + (words(''' + abstract asm as begin break case do else elsif end ensure extend ifdef if + include instance_sizeof next of pointerof private protected rescue return + require sizeof super then typeof unless until when while with yield + '''.split(), suffix=r'\b'), Keyword), + (words(['true', 'false', 'nil'], suffix=r'\b'), Keyword.Constant), + # start of function, class and module names + (r'(module|lib)(\s+)([a-zA-Z_]\w*(?:::[a-zA-Z_]\w*)*)', + bygroups(Keyword, Text, Name.Namespace)), + (r'(def|fun|macro)(\s+)((?:[a-zA-Z_]\w*::)*)', + bygroups(Keyword, Text, Name.Namespace), 'funcname'), + (r'def(?=[*%&^`~+-/\[<>=])', Keyword, 'funcname'), + (r'(class|struct|union|type|alias|enum)(\s+)((?:[a-zA-Z_]\w*::)*)', + bygroups(Keyword, Text, Name.Namespace), 'classname'), + (r'(self|out|uninitialized)\b|(is_a|responds_to)\?', Keyword.Pseudo), + # macros + (words(''' + debugger record pp assert_responds_to spawn parallel + getter setter property delegate def_hash def_equals def_equals_and_hash forward_missing_to + '''.split(), suffix=r'\b'), Name.Builtin.Pseudo), + (r'getter[!?]|property[!?]|__(DIR|FILE|LINE)__\b', Name.Builtin.Pseudo), + # builtins + # http://crystal-lang.org/api/toplevel.html + (words(''' + Object Value Struct Reference Proc Class Nil Symbol Enum Void + Bool Number Int Int8 Int16 Int32 Int64 UInt8 UInt16 UInt32 UInt64 Float Float32 Float64 Char String + Pointer Slice Range Exception Regex + Mutex StaticArray Array Hash Set Tuple Deque Box Process File Dir Time Channel Concurrent Scheduler + abort at_exit caller delay exit fork future get_stack_top gets lazy loop main p print printf puts + raise rand read_line sleep sprintf system with_color + '''.split(), prefix=r'(?<!\.)', suffix=r'\b'), Name.Builtin), + # normal heredocs + (r'(?<!\w)(<<-?)(["`\']?)([a-zA-Z_]\w*)(\2)(.*?\n)', + heredoc_callback), + # empty string heredocs + (r'(<<-?)("|\')()(\2)(.*?\n)', heredoc_callback), + (r'__END__', Comment.Preproc, 'end-part'), + # multiline regex (after keywords or assignments) + (r'(?:^|(?<=[=<>~!:])|' + r'(?<=(?:\s|;)when\s)|' + r'(?<=(?:\s|;)or\s)|' + r'(?<=(?:\s|;)and\s)|' + r'(?<=\.index\s)|' + r'(?<=\.scan\s)|' + r'(?<=\.sub\s)|' + r'(?<=\.sub!\s)|' + r'(?<=\.gsub\s)|' + r'(?<=\.gsub!\s)|' + r'(?<=\.match\s)|' + r'(?<=(?:\s|;)if\s)|' + r'(?<=(?:\s|;)elsif\s)|' + r'(?<=^when\s)|' + r'(?<=^index\s)|' + r'(?<=^scan\s)|' + r'(?<=^sub\s)|' + r'(?<=^gsub\s)|' + r'(?<=^sub!\s)|' + r'(?<=^gsub!\s)|' + r'(?<=^match\s)|' + r'(?<=^if\s)|' + r'(?<=^elsif\s)' + r')(\s*)(/)', bygroups(Text, String.Regex), 'multiline-regex'), + # multiline regex (in method calls or subscripts) + (r'(?<=\(|,|\[)/', String.Regex, 'multiline-regex'), + # multiline regex (this time the funny no whitespace rule) + (r'(\s+)(/)(?![\s=])', bygroups(Text, String.Regex), + 'multiline-regex'), + # lex numbers and ignore following regular expressions which + # are division operators in fact (grrrr. i hate that. any + # better ideas?) + # since pygments 0.7 we also eat a "?" operator after numbers + # so that the char operator does not work. Chars are not allowed + # there so that you can use the ternary operator. + # stupid example: + # x>=0?n[x]:"" + (r'(0o[0-7]+(?:_[0-7]+)*(?:_?[iu][0-9]+)?)\b(\s*)([/?])?', + bygroups(Number.Oct, Text, Operator)), + (r'(0x[0-9A-Fa-f]+(?:_[0-9A-Fa-f]+)*(?:_?[iu][0-9]+)?)\b(\s*)([/?])?', + bygroups(Number.Hex, Text, Operator)), + (r'(0b[01]+(?:_[01]+)*(?:_?[iu][0-9]+)?)\b(\s*)([/?])?', + bygroups(Number.Bin, Text, Operator)), + # 3 separate expressions for floats because any of the 3 optional parts makes it a float + (r'((?:0(?![0-9])|[1-9][\d_]*)(?:\.\d[\d_]*)(?:e[+-]?[0-9]+)?(?:_?[f][0-9]+)?)(\s*)([/?])?', + bygroups(Number.Float, Text, Operator)), + (r'((?:0(?![0-9])|[1-9][\d_]*)(?:\.\d[\d_]*)?(?:e[+-]?[0-9]+)(?:_?[f][0-9]+)?)(\s*)([/?])?', + bygroups(Number.Float, Text, Operator)), + (r'((?:0(?![0-9])|[1-9][\d_]*)(?:\.\d[\d_]*)?(?:e[+-]?[0-9]+)?(?:_?[f][0-9]+))(\s*)([/?])?', + bygroups(Number.Float, Text, Operator)), + (r'(0\b|[1-9][\d]*(?:_\d+)*(?:_?[iu][0-9]+)?)\b(\s*)([/?])?', + bygroups(Number.Integer, Text, Operator)), + # Names + (r'@@[a-zA-Z_]\w*', Name.Variable.Class), + (r'@[a-zA-Z_]\w*', Name.Variable.Instance), + (r'\$\w+', Name.Variable.Global), + (r'\$[!@&`\'+~=/\\,;.<>_*$?:"^-]', Name.Variable.Global), + (r'\$-[0adFiIlpvw]', Name.Variable.Global), + (r'::', Operator), + include('strings'), + # chars + (r'\?(\\[MC]-)*' # modifiers + r'(\\([\\befnrtv#"\']|x[a-fA-F0-9]{1,2}|[0-7]{1,3})|\S)' + r'(?!\w)', + String.Char), + (r'[A-Z][A-Z_]+\b', Name.Constant), + # macro expansion + (r'\{%', String.Interpol, 'in-macro-control'), + (r'\{\{', String.Interpol, 'in-macro-expr'), + # attributes + (r'(@\[)(\s*)([A-Z]\w*)', bygroups(Operator, Text, Name.Decorator), 'in-attr'), + # this is needed because Crystal attributes can look + # like keywords (class) or like this: ` ?!? + (words(CRYSTAL_OPERATORS, prefix=r'(\.|::)'), + bygroups(Operator, Name.Operator)), + (r'(\.|::)([a-zA-Z_]\w*[!?]?|[*%&^`~+\-/\[<>=])', + bygroups(Operator, Name)), + # Names can end with [!?] unless it's "!=" + (r'[a-zA-Z_]\w*(?:[!?](?!=))?', Name), + (r'(\[|\]\??|\*\*|<=>?|>=|<<?|>>?|=~|===|' + r'!~|&&?|\|\||\.{1,3})', Operator), + (r'[-+/*%=<>&!^|~]=?', Operator), + (r'[(){};,/?:\\]', Punctuation), + (r'\s+', Text) + ], + 'funcname': [ + (r'(?:([a-zA-Z_]\w*)(\.))?' + r'([a-zA-Z_]\w*[!?]?|\*\*?|[-+]@?|' + r'[/%&|^`~]|\[\]=?|<<|>>|<=?>|>=?|===?)', + bygroups(Name.Class, Operator, Name.Function), '#pop'), + default('#pop') + ], + 'classname': [ + (r'[A-Z_]\w*', Name.Class), + (r'(\()(\s*)([A-Z_]\w*)(\s*)(\))', bygroups(Punctuation, Text, Name.Class, Text, Punctuation)), + default('#pop') + ], + 'in-intp': [ + (r'\{', String.Interpol, '#push'), + (r'\}', String.Interpol, '#pop'), + include('root'), + ], + 'string-intp': [ + (r'#\{', String.Interpol, 'in-intp'), + ], + 'string-escaped': [ + (r'\\([\\befnstv#"\']|x[a-fA-F0-9]{1,2}|[0-7]{1,3})', String.Escape) + ], + 'string-intp-escaped': [ + include('string-intp'), + include('string-escaped'), + ], + 'interpolated-regex': [ + include('string-intp'), + (r'[\\#]', String.Regex), + (r'[^\\#]+', String.Regex), + ], + 'interpolated-string': [ + include('string-intp'), + (r'[\\#]', String.Other), + (r'[^\\#]+', String.Other), + ], + 'multiline-regex': [ + include('string-intp'), + (r'\\\\', String.Regex), + (r'\\/', String.Regex), + (r'[\\#]', String.Regex), + (r'[^\\/#]+', String.Regex), + (r'/[imsx]*', String.Regex, '#pop'), + ], + 'end-part': [ + (r'.+', Comment.Preproc, '#pop') + ], + 'in-macro-control': [ + (r'\{%', String.Interpol, '#push'), + (r'%\}', String.Interpol, '#pop'), + (r'for\b|in\b', Keyword), + include('root'), + ], + 'in-macro-expr': [ + (r'\{\{', String.Interpol, '#push'), + (r'\}\}', String.Interpol, '#pop'), + include('root'), + ], + 'in-attr': [ + (r'\[', Operator, '#push'), + (r'\]', Operator, '#pop'), + include('root'), + ], + } + tokens.update(gen_crystalstrings_rules()) diff --git a/pygments/lexers/data.py b/pygments/lexers/data.py index 84d02f49..4c39db64 100644 --- a/pygments/lexers/data.py +++ b/pygments/lexers/data.py @@ -14,9 +14,9 @@ import re from pygments.lexer import RegexLexer, ExtendedRegexLexer, LexerContext, \ include, bygroups, inherit from pygments.token import Text, Comment, Keyword, Name, String, Number, \ - Punctuation, Literal + Punctuation, Literal, Error -__all__ = ['YamlLexer', 'JsonLexer', 'JsonLdLexer'] +__all__ = ['YamlLexer', 'JsonLexer', 'JsonBareObjectLexer', 'JsonLdLexer'] class YamlLexerContext(LexerContext): @@ -476,7 +476,7 @@ class JsonLexer(RegexLexer): # comma terminates the attribute but expects more (r',', Punctuation, '#pop'), # a closing bracket terminates the entire object, so pop twice - (r'\}', Punctuation, ('#pop', '#pop')), + (r'\}', Punctuation, '#pop:2'), ], # a json object - { attr, attr, ... } @@ -508,6 +508,31 @@ class JsonLexer(RegexLexer): ], } + +class JsonBareObjectLexer(JsonLexer): + """ + For JSON data structures (with missing object curly braces). + + .. versionadded:: 2.2 + """ + + name = 'JSONBareObject' + aliases = ['json-object'] + filenames = [] + mimetypes = ['application/json-object'] + + tokens = { + 'root': [ + (r'\}', Error), + include('objectvalue'), + ], + 'objectattribute': [ + (r'\}', Error), + inherit, + ], + } + + class JsonLdLexer(JsonLexer): """ For `JSON-LD <http://json-ld.org/>`_ linked data. diff --git a/pygments/lexers/dsls.py b/pygments/lexers/dsls.py index 6032017f..312d5f5e 100644 --- a/pygments/lexers/dsls.py +++ b/pygments/lexers/dsls.py @@ -11,14 +11,14 @@ import re -from pygments.lexer import RegexLexer, bygroups, words, include, default, \ - this, using, combined +from pygments.lexer import ExtendedRegexLexer, RegexLexer, bygroups, words, \ + include, default, this, using, combined from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ Number, Punctuation, Literal, Whitespace __all__ = ['ProtoBufLexer', 'BroLexer', 'PuppetLexer', 'RslLexer', 'MscgenLexer', 'VGLLexer', 'AlloyLexer', 'PanLexer', - 'CrmshLexer', 'ThriftLexer', 'FlatlineLexer'] + 'CrmshLexer', 'ThriftLexer', 'FlatlineLexer', 'SnowballLexer'] class ProtoBufLexer(RegexLexer): @@ -156,7 +156,7 @@ class ThriftLexer(RegexLexer): Keyword.Namespace), (words(( 'void', 'bool', 'byte', 'i16', 'i32', 'i64', 'double', - 'string', 'binary', 'void', 'map', 'list', 'set', 'slist', + 'string', 'binary', 'map', 'list', 'set', 'slist', 'senum'), suffix=r'\b'), Keyword.Type), (words(( @@ -767,3 +767,112 @@ class FlatlineLexer(RegexLexer): (r'(\(|\))', Punctuation), ], } + + +class SnowballLexer(ExtendedRegexLexer): + """ + Lexer for `Snowball <http://snowballstem.org/>`_ source code. + + .. versionadded:: 2.2 + """ + + name = 'Snowball' + aliases = ['snowball'] + filenames = ['*.sbl'] + + _ws = r'\n\r\t ' + + def __init__(self, **options): + self._reset_stringescapes() + ExtendedRegexLexer.__init__(self, **options) + + def _reset_stringescapes(self): + self._start = "'" + self._end = "'" + + def _string(do_string_first): + def callback(lexer, match, ctx): + s = match.start() + text = match.group() + string = re.compile(r'([^%s]*)(.)' % re.escape(lexer._start)).match + escape = re.compile(r'([^%s]*)(.)' % re.escape(lexer._end)).match + pos = 0 + do_string = do_string_first + while pos < len(text): + if do_string: + match = string(text, pos) + yield s + match.start(1), String.Single, match.group(1) + if match.group(2) == "'": + yield s + match.start(2), String.Single, match.group(2) + ctx.stack.pop() + break + yield s + match.start(2), String.Escape, match.group(2) + pos = match.end() + match = escape(text, pos) + yield s + match.start(), String.Escape, match.group() + if match.group(2) != lexer._end: + ctx.stack[-1] = 'escape' + break + pos = match.end() + do_string = True + ctx.pos = s + match.end() + return callback + + def _stringescapes(lexer, match, ctx): + lexer._start = match.group(3) + lexer._end = match.group(5) + return bygroups(Keyword.Reserved, Text, String.Escape, Text, + String.Escape)(lexer, match, ctx) + + tokens = { + 'root': [ + (words(('len', 'lenof'), suffix=r'\b'), Operator.Word), + include('root1'), + ], + 'root1': [ + (r'[%s]+' % _ws, Text), + (r'\d+', Number.Integer), + (r"'", String.Single, 'string'), + (r'[()]', Punctuation), + (r'/\*[\w\W]*?\*/', Comment.Multiline), + (r'//.*', Comment.Single), + (r'[!*+\-/<=>]=|[-=]>|<[+-]|[$*+\-/<=>?\[\]]', Operator), + (words(('as', 'get', 'hex', 'among', 'define', 'decimal', + 'backwardmode'), suffix=r'\b'), + Keyword.Reserved), + (words(('strings', 'booleans', 'integers', 'routines', 'externals', + 'groupings'), suffix=r'\b'), + Keyword.Reserved, 'declaration'), + (words(('do', 'or', 'and', 'for', 'hop', 'non', 'not', 'set', 'try', + 'fail', 'goto', 'loop', 'next', 'test', 'true', + 'false', 'unset', 'atmark', 'attach', 'delete', 'gopast', + 'insert', 'repeat', 'sizeof', 'tomark', 'atleast', + 'atlimit', 'reverse', 'setmark', 'tolimit', 'setlimit', + 'backwards', 'substring'), suffix=r'\b'), + Operator.Word), + (words(('size', 'limit', 'cursor', 'maxint', 'minint'), + suffix=r'\b'), + Name.Builtin), + (r'(stringdef\b)([%s]*)([^%s]+)' % (_ws, _ws), + bygroups(Keyword.Reserved, Text, String.Escape)), + (r'(stringescapes\b)([%s]*)(.)([%s]*)(.)' % (_ws, _ws), + _stringescapes), + (r'[A-Za-z]\w*', Name), + ], + 'declaration': [ + (r'\)', Punctuation, '#pop'), + (words(('len', 'lenof'), suffix=r'\b'), Name, + ('root1', 'declaration')), + include('root1'), + ], + 'string': [ + (r"[^']*'", _string(True)), + ], + 'escape': [ + (r"[^']*'", _string(False)), + ], + } + + def get_tokens_unprocessed(self, text=None, context=None): + self._reset_stringescapes() + return ExtendedRegexLexer.get_tokens_unprocessed(self, text, context) diff --git a/pygments/lexers/esoteric.py b/pygments/lexers/esoteric.py index c9db26b5..150d930f 100644 --- a/pygments/lexers/esoteric.py +++ b/pygments/lexers/esoteric.py @@ -13,7 +13,8 @@ from pygments.lexer import RegexLexer, include, words from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ Number, Punctuation, Error -__all__ = ['BrainfuckLexer', 'BefungeLexer', 'RedcodeLexer', 'CAmkESLexer'] +__all__ = ['BrainfuckLexer', 'BefungeLexer', 'RedcodeLexer', 'CAmkESLexer', + 'CapDLLexer', 'AheuiLexer'] class BrainfuckLexer(RegexLexer): @@ -143,6 +144,63 @@ class CAmkESLexer(RegexLexer): } +class CapDLLexer(RegexLexer): + """ + Basic lexer for + `CapDL <https://ssrg.nicta.com.au/publications/nictaabstracts/Kuz_KLW_10.abstract.pml>`_. + + The source of the primary tool that reads such specifications is available + at https://github.com/seL4/capdl/tree/master/capDL-tool. Note that this + lexer only supports a subset of the grammar. For example, identifiers can + shadow type names, but these instances are currently incorrectly + highlighted as types. Supporting this would need a stateful lexer that is + considered unnecessarily complex for now. + """ + name = 'CapDL' + aliases = ['capdl'] + filenames = ['*.cdl'] + + tokens = { + 'root':[ + + # C pre-processor directive + (r'^\s*#.*\n', Comment.Preproc), + + # Whitespace, comments + (r'\s+', Text), + (r'/\*(.|\n)*?\*/', Comment), + (r'(//|--).*\n', Comment), + + (r'[<>\[\(\)\{\},:;=\]]', Punctuation), + (r'\.\.', Punctuation), + + (words(('arch', 'arm11', 'caps', 'child_of', 'ia32', 'irq', 'maps', + 'objects'), suffix=r'\b'), Keyword), + + (words(('aep', 'asid_pool', 'cnode', 'ep', 'frame', 'io_device', + 'io_ports', 'io_pt', 'notification', 'pd', 'pt', 'tcb', + 'ut', 'vcpu'), suffix=r'\b'), Keyword.Type), + + # Properties + (words(('asid', 'addr', 'badge', 'cached', 'dom', 'domainID', 'elf', + 'fault_ep', 'G', 'guard', 'guard_size', 'init', 'ip', + 'prio', 'sp', 'R', 'RG', 'RX', 'RW', 'RWG', 'RWX', 'W', + 'WG', 'WX', 'level', 'masked', 'master_reply', 'paddr', + 'ports', 'reply', 'uncached'), suffix=r'\b'), + Keyword.Reserved), + + # Literals + (r'0[xX][\da-fA-F]+', Number.Hex), + (r'\d+(\.\d+)?(k|M)?', Number), + (words(('bits',), suffix=r'\b'), Number), + (words(('cspace', 'vspace', 'reply_slot', 'caller_slot', + 'ipc_buffer_slot'), suffix=r'\b'), Number), + + # Identifiers + (r'[a-zA-Z_][-_@\.\w]*', Name), + ], + } + class RedcodeLexer(RegexLexer): """ A simple Redcode lexer based on ICWS'94. @@ -177,3 +235,41 @@ class RedcodeLexer(RegexLexer): (r'[-+]?\d+', Number.Integer), ], } + + +class AheuiLexer(RegexLexer): + """ + Aheui_ Lexer. + + Aheui_ is esoteric language based on Korean alphabets. + + .. _Aheui:: http://aheui.github.io/ + + """ + + name = 'Aheui' + aliases = ['aheui'] + filenames = ['*.aheui'] + + tokens = { + 'root': [ + (u'[' + u'나-낳냐-냫너-넣녀-녛노-놓뇨-눟뉴-닇' + u'다-닿댜-댷더-덯뎌-뎧도-돟됴-둫듀-딓' + u'따-땋땨-떃떠-떻뗘-뗳또-똫뚀-뚷뜌-띟' + u'라-랗랴-럏러-렇려-렿로-롷료-뤃류-릫' + u'마-맣먀-먛머-멓며-몋모-뫃묘-뭏뮤-믷' + u'바-밯뱌-뱧버-벟벼-볗보-봏뵤-붛뷰-빃' + u'빠-빻뺘-뺳뻐-뻫뼈-뼣뽀-뽛뾰-뿧쀼-삏' + u'사-샇샤-샿서-섷셔-셯소-솧쇼-숳슈-싛' + u'싸-쌓쌰-썋써-쎃쎠-쎻쏘-쏳쑈-쑿쓔-씧' + u'자-잫쟈-쟣저-젛져-졓조-좋죠-줗쥬-즿' + u'차-챃챠-챻처-첳쳐-쳫초-촣쵸-춯츄-칗' + u'카-캏캬-컇커-컿켜-켷코-콯쿄-쿻큐-킣' + u'타-탛탸-턓터-텋텨-톃토-톻툐-퉇튜-틯' + u'파-팧퍄-퍟퍼-펗펴-폏포-퐇표-풓퓨-픻' + u'하-핳햐-햫허-헣혀-혛호-홓효-훟휴-힇' + u']', Operator), + ('.', Comment), + ], + } diff --git a/pygments/lexers/ezhil.py b/pygments/lexers/ezhil.py index a5468a0f..eea300ad 100644 --- a/pygments/lexers/ezhil.py +++ b/pygments/lexers/ezhil.py @@ -36,13 +36,13 @@ class EzhilLexer(RegexLexer): (r'#.*\n', Comment.Single), (r'[@+/*,^\-%]|[!<>=]=?|&&?|\|\|?', Operator), (u'இல்', Operator.Word), - (words(('assert', 'max', 'min', - 'நீளம்','சரம்_இடமாற்று','சரம்_கண்டுபிடி', - 'பட்டியல்','பின்இணை','வரிசைப்படுத்து', - 'எடு','தலைகீழ்','நீட்டிக்க','நுழைக்க','வை', - 'கோப்பை_திற','கோப்பை_எழுது','கோப்பை_மூடு', - 'pi','sin','cos','tan','sqrt','hypot','pow','exp','log','log10' - 'min','max','exit', + (words((u'assert', u'max', u'min', + u'நீளம்', u'சரம்_இடமாற்று', u'சரம்_கண்டுபிடி', + u'பட்டியல்', u'பின்இணை', u'வரிசைப்படுத்து', + u'எடு', u'தலைகீழ்', u'நீட்டிக்க', u'நுழைக்க', u'வை', + u'கோப்பை_திற', u'கோப்பை_எழுது', u'கோப்பை_மூடு', + u'pi', u'sin', u'cos', u'tan', u'sqrt', u'hypot', u'pow', + u'exp', u'log', u'log10', u'exit', ), suffix=r'\b'), Name.Builtin), (r'(True|False)\b', Keyword.Constant), (r'[^\S\n]+', Text), diff --git a/pygments/lexers/forth.py b/pygments/lexers/forth.py new file mode 100644 index 00000000..eb806ba0 --- /dev/null +++ b/pygments/lexers/forth.py @@ -0,0 +1,177 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.forth + ~~~~~~~~~~~~~~~~~~~~~ + + :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, include, bygroups +from pygments.token import Error, Punctuation, Literal, Token, \ + Text, Comment, Operator, Keyword, Name, String, Number, Generic + + +__all__ = ['ForthLexer'] + + +class ForthLexer(RegexLexer): + """ + Lexer for Forth files. + + .. versionadded:: 2.2 + """ + name = 'Forth' + aliases = ['forth'] + filenames = ['*.frt', '*.fs'] + mimetypes = ['application/x-forth'] + + delimiter = r'\s' + delimiter_end = r'(?=[%s])' % delimiter + + valid_name_chars = r'[^%s]' % delimiter + valid_name = r"%s+%s" % (valid_name_chars, delimiter_end) + + flags = re.IGNORECASE | re.MULTILINE + + tokens = { + 'root': [ + (r'\s+', Text), + # All comment types + (r'\\.*?\n', Comment.Single), + (r'\([\s].*?\)', Comment.Single), + # defining words. The next word is a new command name + (r'(:|variable|constant|value|buffer:)(\s+)', + bygroups(Keyword.Namespace, Text), 'worddef'), + # strings are rather simple + (r'([.sc]")(\s+?)', bygroups(String, Text), 'stringdef'), + # keywords from the various wordsets + # *** Wordset BLOCK + (r'(blk|block|buffer|evaluate|flush|load|save-buffers|update|' + # *** Wordset BLOCK-EXT + r'empty-buffers|list|refill|scr|thru|' + # *** Wordset CORE + r'\#s|\*\/mod|\+loop|\/mod|0<|0=|1\+|1-|2!|' + r'2\*|2\/|2@|2drop|2dup|2over|2swap|>body|' + r'>in|>number|>r|\?dup|abort|abort\"|abs|' + r'accept|align|aligned|allot|and|base|begin|' + r'bl|c!|c,|c@|cell\+|cells|char|char\+|' + r'chars|constant|count|cr|create|decimal|' + r'depth|do|does>|drop|dup|else|emit|environment\?|' + r'evaluate|execute|exit|fill|find|fm\/mod|' + r'here|hold|i|if|immediate|invert|j|key|' + r'leave|literal|loop|lshift|m\*|max|min|' + r'mod|move|negate|or|over|postpone|quit|' + r'r>|r@|recurse|repeat|rot|rshift|s\"|s>d|' + r'sign|sm\/rem|source|space|spaces|state|swap|' + r'then|type|u\.|u\<|um\*|um\/mod|unloop|until|' + r'variable|while|word|xor|\[char\]|\[\'\]|' + r'@|!|\#|<\#|\#>|:|;|\+|-|\*|\/|,|<|>|\|1\+|1-|\.|' + # *** Wordset CORE-EXT + r'\.r|0<>|' + r'0>|2>r|2r>|2r@|:noname|\?do|again|c\"|' + r'case|compile,|endcase|endof|erase|false|' + r'hex|marker|nip|of|pad|parse|pick|refill|' + r'restore-input|roll|save-input|source-id|to|' + r'true|tuck|u\.r|u>|unused|value|within|' + r'\[compile\]|' + # *** Wordset CORE-EXT-obsolescent + r'\#tib|convert|expect|query|span|' + r'tib|' + # *** Wordset DOUBLE + r'2constant|2literal|2variable|d\+|d-|' + r'd\.|d\.r|d0<|d0=|d2\*|d2\/|d<|d=|d>s|' + r'dabs|dmax|dmin|dnegate|m\*\/|m\+|' + # *** Wordset DOUBLE-EXT + r'2rot|du<|' + # *** Wordset EXCEPTION + r'catch|throw|' + # *** Wordset EXCEPTION-EXT + r'abort|abort\"|' + # *** Wordset FACILITY + r'at-xy|key\?|page|' + # *** Wordset FACILITY-EXT + r'ekey|ekey>char|ekey\?|emit\?|ms|time&date|' + # *** Wordset FILE + r'BIN|CLOSE-FILE|CREATE-FILE|DELETE-FILE|FILE-POSITION|' + r'FILE-SIZE|INCLUDE-FILE|INCLUDED|OPEN-FILE|R\/O|' + r'R\/W|READ-FILE|READ-LINE|REPOSITION-FILE|RESIZE-FILE|' + r'S\"|SOURCE-ID|W/O|WRITE-FILE|WRITE-LINE|' + # *** Wordset FILE-EXT + r'FILE-STATUS|FLUSH-FILE|REFILL|RENAME-FILE|' + # *** Wordset FLOAT + r'>float|d>f|' + r'f!|f\*|f\+|f-|f\/|f0<|f0=|f<|f>d|f@|' + r'falign|faligned|fconstant|fdepth|fdrop|fdup|' + r'fliteral|float\+|floats|floor|fmax|fmin|' + r'fnegate|fover|frot|fround|fswap|fvariable|' + r'represent|' + # *** Wordset FLOAT-EXT + r'df!|df@|dfalign|dfaligned|dfloat\+|' + r'dfloats|f\*\*|f\.|fabs|facos|facosh|falog|' + r'fasin|fasinh|fatan|fatan2|fatanh|fcos|fcosh|' + r'fe\.|fexp|fexpm1|fln|flnp1|flog|fs\.|fsin|' + r'fsincos|fsinh|fsqrt|ftan|ftanh|f~|precision|' + r'set-precision|sf!|sf@|sfalign|sfaligned|sfloat\+|' + r'sfloats|' + # *** Wordset LOCAL + r'\(local\)|to|' + # *** Wordset LOCAL-EXT + r'locals\||' + # *** Wordset MEMORY + r'allocate|free|resize|' + # *** Wordset SEARCH + r'definitions|find|forth-wordlist|get-current|' + r'get-order|search-wordlist|set-current|set-order|' + r'wordlist|' + # *** Wordset SEARCH-EXT + r'also|forth|only|order|previous|' + # *** Wordset STRING + r'-trailing|\/string|blank|cmove|cmove>|compare|' + r'search|sliteral|' + # *** Wordset TOOLS + r'.s|dump|see|words|' + # *** Wordset TOOLS-EXT + r';code|' + r'ahead|assembler|bye|code|cs-pick|cs-roll|' + r'editor|state|\[else\]|\[if\]|\[then\]|' + # *** Wordset TOOLS-EXT-obsolescent + r'forget|' + # Forth 2012 + r'defer|defer@|defer!|action-of|begin-structure|field:|buffer:|' + r'parse-name|buffer:|traverse-wordlist|n>r|nr>|2value|fvalue|' + r'name>interpret|name>compile|name>string|' + r'cfield:|end-structure)'+delimiter, Keyword), + + # Numbers + (r'(\$[0-9A-F]+)', Number.Hex), + (r'(\#|%|&|\-|\+)?[0-9]+', Number.Integer), + (r'(\#|%|&|\-|\+)?[0-9.]+', Keyword.Type), + # amforth specific + (r'(@i|!i|@e|!e|pause|noop|turnkey|sleep|' + r'itype|icompare|sp@|sp!|rp@|rp!|up@|up!|' + r'>a|a>|a@|a!|a@+|a@-|>b|b>|b@|b!|b@+|b@-|' + r'find-name|1ms|' + r'sp0|rp0|\(evaluate\)|int-trap|int!)' + delimiter, + Name.Constant), + # a proposal + (r'(do-recognizer|r:fail|recognizer:|get-recognizers|' + r'set-recognizers|r:float|r>comp|r>int|r>post|' + r'r:name|r:word|r:dnum|r:num|recognizer|forth-recognizer|' + r'rec:num|rec:float|rec:word)' + delimiter, Name.Decorator), + # defining words. The next word is a new command name + (r'(Evalue|Rvalue|Uvalue|Edefer|Rdefer|Udefer)(\s+)', + bygroups(Keyword.Namespace, Text), 'worddef'), + + (valid_name, Name.Function), # Anything else is executed + + ], + 'worddef': [ + (r'\S+', Name.Class, '#pop'), + ], + 'stringdef': [ + (r'[^"]+', String, '#pop'), + ], + } diff --git a/pygments/lexers/html.py b/pygments/lexers/html.py index 7893952f..24733748 100644 --- a/pygments/lexers/html.py +++ b/pygments/lexers/html.py @@ -23,7 +23,7 @@ from pygments.lexers.css import CssLexer, _indentation, _starts_block from pygments.lexers.ruby import RubyLexer __all__ = ['HtmlLexer', 'DtdLexer', 'XmlLexer', 'XsltLexer', 'HamlLexer', - 'ScamlLexer', 'JadeLexer'] + 'ScamlLexer', 'PugLexer'] class HtmlLexer(RegexLexer): @@ -492,19 +492,19 @@ class ScamlLexer(ExtendedRegexLexer): } -class JadeLexer(ExtendedRegexLexer): +class PugLexer(ExtendedRegexLexer): """ - For Jade markup. - Jade is a variant of Scaml, see: + For Pug markup. + Pug is a variant of Scaml, see: http://scalate.fusesource.org/documentation/scaml-reference.html .. versionadded:: 1.4 """ - name = 'Jade' - aliases = ['jade'] - filenames = ['*.jade'] - mimetypes = ['text/x-jade'] + name = 'Pug' + aliases = ['pug', 'jade'] + filenames = ['*.pug', '*.jade'] + mimetypes = ['text/x-pug', 'text/x-jade'] flags = re.IGNORECASE _dot = r'.' @@ -599,3 +599,4 @@ class JadeLexer(ExtendedRegexLexer): (r'\n', Text, 'root'), ], } +JadeLexer = PugLexer # compat diff --git a/pygments/lexers/j.py b/pygments/lexers/j.py index f15595f8..1231d597 100644 --- a/pygments/lexers/j.py +++ b/pygments/lexers/j.py @@ -75,8 +75,8 @@ class JLexer(RegexLexer): 'fetch', 'file2url', 'fixdotdot', 'fliprgb', 'getargs', 'getenv', 'hfd', 'inv', 'inverse', 'iospath', 'isatty', 'isutf8', 'items', 'leaf', 'list', - 'nameclass', 'namelist', 'namelist', 'names', 'nc', - 'nl', 'on', 'pick', 'pick', 'rows', + 'nameclass', 'namelist', 'names', 'nc', + 'nl', 'on', 'pick', 'rows', 'script', 'scriptd', 'sign', 'sminfo', 'smoutput', 'sort', 'split', 'stderr', 'stdin', 'stdout', 'table', 'take', 'timespacex', 'timex', 'tmoutput', diff --git a/pygments/lexers/javascript.py b/pygments/lexers/javascript.py index 5dca6832..a23ba184 100644 --- a/pygments/lexers/javascript.py +++ b/pygments/lexers/javascript.py @@ -20,7 +20,7 @@ import pygments.unistring as uni __all__ = ['JavascriptLexer', 'KalLexer', 'LiveScriptLexer', 'DartLexer', 'TypeScriptLexer', 'LassoLexer', 'ObjectiveJLexer', - 'CoffeeScriptLexer', 'MaskLexer', 'EarlGreyLexer'] + 'CoffeeScriptLexer', 'MaskLexer', 'EarlGreyLexer', 'JuttleLexer'] JS_IDENT_START = ('(?:[$_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') + ']|\\\\u[a-fA-F0-9]{4})') @@ -1016,6 +1016,12 @@ class CoffeeScriptLexer(RegexLexer): filenames = ['*.coffee'] mimetypes = ['text/coffeescript'] + + _operator_re = ( + r'\+\+|~|&&|\band\b|\bor\b|\bis\b|\bisnt\b|\bnot\b|\?|:|' + r'\|\||\\(?=\n)|' + r'(<<|>>>?|==?(?!>)|!=?|=(?!>)|-(?!>)|[<>+*`%&\|\^/])=?') + flags = re.DOTALL tokens = { 'commentsandwhitespace': [ @@ -1034,17 +1040,17 @@ class CoffeeScriptLexer(RegexLexer): (r'///', String.Regex, ('#pop', 'multilineregex')), (r'/(?! )(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/' r'([gim]+\b|\B)', String.Regex, '#pop'), + # This isn't really guarding against mishighlighting well-formed + # code, just the ability to infinite-loop between root and + # slashstartsregex. + (r'/', Operator), default('#pop'), ], 'root': [ - # this next expr leads to infinite loops root -> slashstartsregex - # (r'^(?=\s|/|<!--)', Text, 'slashstartsregex'), include('commentsandwhitespace'), - (r'\+\+|~|&&|\band\b|\bor\b|\bis\b|\bisnt\b|\bnot\b|\?|:|' - r'\|\||\\(?=\n)|' - r'(<<|>>>?|==?(?!>)|!=?|=(?!>)|-(?!>)|[<>+*`%&|^/])=?', - Operator, 'slashstartsregex'), - (r'(?:\([^()]*\))?\s*[=-]>', Name.Function), + (r'^(?=\s|/)', Text, 'slashstartsregex'), + (_operator_re, Operator, 'slashstartsregex'), + (r'(?:\([^()]*\))?\s*[=-]>', Name.Function, 'slashstartsregex'), (r'[{(\[;,]', Punctuation, 'slashstartsregex'), (r'[})\].]', Punctuation), (r'(?<![.$])(for|own|in|of|while|until|' @@ -1065,7 +1071,7 @@ class CoffeeScriptLexer(RegexLexer): (r'@[$a-zA-Z_][\w.:$]*\s*[:=]\s', Name.Variable.Instance, 'slashstartsregex'), (r'@', Name.Other, 'slashstartsregex'), - (r'@?[$a-zA-Z_][\w$]*', Name.Other, 'slashstartsregex'), + (r'@?[$a-zA-Z_][\w$]*', Name.Other), (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), (r'0x[0-9a-fA-F]+', Number.Hex), (r'[0-9]+', Number.Integer), @@ -1438,3 +1444,63 @@ class EarlGreyLexer(RegexLexer): (r'\d+', Number.Integer) ], } + +class JuttleLexer(RegexLexer): + """ + For `Juttle`_ source code. + + .. _Juttle: https://github.com/juttle/juttle + + """ + + name = 'Juttle' + aliases = ['juttle', 'juttle'] + filenames = ['*.juttle'] + mimetypes = ['application/juttle', 'application/x-juttle', + 'text/x-juttle', 'text/juttle'] + + flags = re.DOTALL | re.UNICODE | re.MULTILINE + + tokens = { + 'commentsandwhitespace': [ + (r'\s+', Text), + (r'//.*?\n', Comment.Single), + (r'/\*.*?\*/', Comment.Multiline) + ], + 'slashstartsregex': [ + include('commentsandwhitespace'), + (r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/' + r'([gim]+\b|\B)', String.Regex, '#pop'), + (r'(?=/)', Text, ('#pop', 'badregex')), + default('#pop') + ], + 'badregex': [ + (r'\n', Text, '#pop') + ], + 'root': [ + (r'^(?=\s|/)', Text, 'slashstartsregex'), + include('commentsandwhitespace'), + (r':\d{2}:\d{2}:\d{2}(\.\d*)?:', String.Moment), + (r':(now|beginning|end|forever|yesterday|today|tomorrow|(\d+(\.\d*)?|\.\d+)(ms|[smhdwMy])?):', String.Moment), + (r':\d{4}-\d{2}-\d{2}(T\d{2}:\d{2}:\d{2}(\.\d*)?)?(Z|[+-]\d{2}:\d{2}|[+-]\d{4})?:', String.Moment), + (r':((\d+(\.\d*)?|\.\d+)[ ]+)?(millisecond|second|minute|hour|day|week|month|year)[s]?' + r'(([ ]+and[ ]+(\d+[ ]+)?(millisecond|second|minute|hour|day|week|month|year)[s]?)' + r'|[ ]+(ago|from[ ]+now))*:', String.Moment), + (r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|' + r'(==?|!=?|[-<>+*%&|^/])=?', Operator, 'slashstartsregex'), + (r'[{(\[;,]', Punctuation, 'slashstartsregex'), + (r'[})\].]', Punctuation), + (r'(import|return|continue|if|else)\b', Keyword, 'slashstartsregex'), + (r'(var|const|function|reducer|sub|input)\b', Keyword.Declaration, 'slashstartsregex'), + (r'(batch|emit|filter|head|join|keep|pace|pass|put|read|reduce|remove|' + r'sequence|skip|sort|split|tail|unbatch|uniq|view|write)\b', Keyword.Reserved), + (r'(true|false|null|Infinity)\b', Keyword.Constant), + (r'(Array|Date|Juttle|Math|Number|Object|RegExp|String)\b', Name.Builtin), + (JS_IDENT, Name.Other), + (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), + (r'[0-9]+', Number.Integer), + (r'"(\\\\|\\"|[^"])*"', String.Double), + (r"'(\\\\|\\'|[^'])*'", String.Single) + ] + + } diff --git a/pygments/lexers/julia.py b/pygments/lexers/julia.py index 9f84b8d9..95c503a0 100644 --- a/pygments/lexers/julia.py +++ b/pygments/lexers/julia.py @@ -11,15 +11,17 @@ import re -from pygments.lexer import Lexer, RegexLexer, bygroups, combined, \ - do_insertions, words +from pygments.lexer import Lexer, RegexLexer, bygroups, do_insertions, \ + words, include from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ Number, Punctuation, Generic from pygments.util import shebang_matches, unirange __all__ = ['JuliaLexer', 'JuliaConsoleLexer'] -line_re = re.compile('.*?\n') +allowed_variable = ( + u'(?:[a-zA-Z_\u00A1-\uffff]|%s)(?:[a-zA-Z_0-9\u00A1-\uffff]|%s)*!*' % + ((unirange(0x10000, 0x10ffff),) * 2)) class JuliaLexer(RegexLexer): @@ -28,6 +30,7 @@ class JuliaLexer(RegexLexer): .. versionadded:: 1.6 """ + name = 'Julia' aliases = ['julia', 'jl'] filenames = ['*.jl'] @@ -35,59 +38,151 @@ class JuliaLexer(RegexLexer): flags = re.MULTILINE | re.UNICODE - builtins = ( - 'exit', 'whos', 'edit', 'load', 'is', 'isa', 'isequal', 'typeof', 'tuple', - 'ntuple', 'uid', 'hash', 'finalizer', 'convert', 'promote', 'subtype', - 'typemin', 'typemax', 'realmin', 'realmax', 'sizeof', 'eps', 'promote_type', - 'method_exists', 'applicable', 'invoke', 'dlopen', 'dlsym', 'system', - 'error', 'throw', 'assert', 'new', 'Inf', 'Nan', 'pi', 'im', - ) - - keywords = ( - 'begin', 'while', 'for', 'in', 'return', 'break', 'continue', - 'macro', 'quote', 'let', 'if', 'elseif', 'else', 'try', 'catch', 'end', - 'bitstype', 'ccall', 'do', 'using', 'module', 'import', 'export', - 'importall', 'baremodule', 'immutable', - ) - - types = ( - 'Bool', 'Int', 'Int8', 'Int16', 'Int32', 'Int64', 'Uint', 'Uint8', 'Uint16', - 'Uint32', 'Uint64', 'Float32', 'Float64', 'Complex64', 'Complex128', 'Any', - 'Nothing', 'None', - ) - tokens = { 'root': [ (r'\n', Text), (r'[^\S\n]+', Text), (r'#=', Comment.Multiline, "blockcomment"), (r'#.*$', Comment), - (r'[\[\]{}:(),;@]', Punctuation), - (r'\\\n', Text), - (r'\\', Text), + (r'[\[\]{}(),;]', Punctuation), # keywords + (r'in\b', Keyword.Pseudo), + (r'(true|false)\b', Keyword.Constant), (r'(local|global|const)\b', Keyword.Declaration), - (words(keywords, suffix=r'\b'), Keyword), - (words(types, suffix=r'\b'), Keyword.Type), - + (words([ + 'function', 'type', 'typealias', 'abstract', 'immutable', + 'baremodule', 'begin', 'bitstype', 'break', 'catch', 'ccall', + 'continue', 'do', 'else', 'elseif', 'end', 'export', 'finally', + 'for', 'if', 'import', 'importall', 'let', 'macro', 'module', + 'quote', 'return', 'try', 'using', 'while'], + suffix=r'\b'), Keyword), + + # NOTE + # Patterns below work only for definition sites and thus hardly reliable. + # # functions - (r'(function)((?:\s|\\\s)+)', - bygroups(Keyword, Name.Function), 'funcname'), - + # (r'(function)(\s+)(' + allowed_variable + ')', + # bygroups(Keyword, Text, Name.Function)), + # # types - (r'(type|typealias|abstract|immutable)((?:\s|\\\s)+)', - bygroups(Keyword, Name.Class), 'typename'), - - # operators - (r'==|!=|<=|>=|->|&&|\|\||::|<:|[-~+/*%=<>&^|.?!$]', Operator), - (r'\.\*|\.\^|\.\\|\.\/|\\', Operator), + # (r'(type|typealias|abstract|immutable)(\s+)(' + allowed_variable + ')', + # bygroups(Keyword, Text, Name.Class)), + + # type names + (words([ + 'ANY', 'ASCIIString', 'AbstractArray', 'AbstractChannel', + 'AbstractFloat', 'AbstractMatrix', 'AbstractRNG', + 'AbstractSparseArray', 'AbstractSparseMatrix', + 'AbstractSparseVector', 'AbstractString', 'AbstractVecOrMat', + 'AbstractVector', 'Any', 'ArgumentError', 'Array', + 'AssertionError', 'Associative', 'Base64DecodePipe', + 'Base64EncodePipe', 'Bidiagonal', 'BigFloat', 'BigInt', + 'BitArray', 'BitMatrix', 'BitVector', 'Bool', 'BoundsError', + 'Box', 'BufferStream', 'CapturedException', 'CartesianIndex', + 'CartesianRange', 'Cchar', 'Cdouble', 'Cfloat', 'Channel', + 'Char', 'Cint', 'Cintmax_t', 'Clong', 'Clonglong', + 'ClusterManager', 'Cmd', 'Coff_t', 'Colon', 'Complex', + 'Complex128', 'Complex32', 'Complex64', 'CompositeException', + 'Condition', 'Cptrdiff_t', 'Cshort', 'Csize_t', 'Cssize_t', + 'Cstring', 'Cuchar', 'Cuint', 'Cuintmax_t', 'Culong', + 'Culonglong', 'Cushort', 'Cwchar_t', 'Cwstring', 'DataType', + 'Date', 'DateTime', 'DenseArray', 'DenseMatrix', + 'DenseVecOrMat', 'DenseVector', 'Diagonal', 'Dict', + 'DimensionMismatch', 'Dims', 'DirectIndexString', 'Display', + 'DivideError', 'DomainError', 'EOFError', 'EachLine', 'Enum', + 'Enumerate', 'ErrorException', 'Exception', 'Expr', + 'Factorization', 'FileMonitor', 'FileOffset', 'Filter', + 'Float16', 'Float32', 'Float64', 'FloatRange', 'Function', + 'GenSym', 'GlobalRef', 'GotoNode', 'HTML', 'Hermitian', 'IO', + 'IOBuffer', 'IOStream', 'IPv4', 'IPv6', 'InexactError', + 'InitError', 'Int', 'Int128', 'Int16', 'Int32', 'Int64', 'Int8', + 'IntSet', 'Integer', 'InterruptException', 'IntrinsicFunction', + 'InvalidStateException', 'Irrational', 'KeyError', 'LabelNode', + 'LambdaStaticData', 'LinSpace', 'LineNumberNode', 'LoadError', + 'LocalProcess', 'LowerTriangular', 'MIME', 'Matrix', + 'MersenneTwister', 'Method', 'MethodError', 'MethodTable', + 'Module', 'NTuple', 'NewvarNode', 'NullException', 'Nullable', + 'Number', 'ObjectIdDict', 'OrdinalRange', 'OutOfMemoryError', + 'OverflowError', 'Pair', 'ParseError', 'PartialQuickSort', + 'Pipe', 'PollingFileWatcher', 'ProcessExitedException', + 'ProcessGroup', 'Ptr', 'QuoteNode', 'RandomDevice', 'Range', + 'Rational', 'RawFD', 'ReadOnlyMemoryError', 'Real', + 'ReentrantLock', 'Ref', 'Regex', 'RegexMatch', + 'RemoteException', 'RemoteRef', 'RepString', 'RevString', + 'RopeString', 'RoundingMode', 'SegmentationFault', + 'SerializationState', 'Set', 'SharedArray', 'SharedMatrix', + 'SharedVector', 'Signed', 'SimpleVector', 'SparseMatrixCSC', + 'StackOverflowError', 'StatStruct', 'StepRange', 'StridedArray', + 'StridedMatrix', 'StridedVecOrMat', 'StridedVector', 'SubArray', + 'SubString', 'SymTridiagonal', 'Symbol', 'SymbolNode', + 'Symmetric', 'SystemError', 'TCPSocket', 'Task', 'Text', + 'TextDisplay', 'Timer', 'TopNode', 'Tridiagonal', 'Tuple', + 'Type', 'TypeConstructor', 'TypeError', 'TypeName', 'TypeVar', + 'UDPSocket', 'UInt', 'UInt128', 'UInt16', 'UInt32', 'UInt64', + 'UInt8', 'UTF16String', 'UTF32String', 'UTF8String', + 'UndefRefError', 'UndefVarError', 'UnicodeError', 'UniformScaling', + 'Union', 'UnitRange', 'Unsigned', 'UpperTriangular', 'Val', + 'Vararg', 'VecOrMat', 'Vector', 'VersionNumber', 'Void', 'WString', + 'WeakKeyDict', 'WeakRef', 'WorkerConfig', 'Zip'], suffix=r'\b'), + Keyword.Type), # builtins - (words(builtins, suffix=r'\b'), Name.Builtin), + (words([ + u'ARGS', u'CPU_CORES', u'C_NULL', u'DevNull', u'ENDIAN_BOM', + u'ENV', u'I', u'Inf', u'Inf16', u'Inf32', u'Inf64', + u'InsertionSort', u'JULIA_HOME', u'LOAD_PATH', u'MergeSort', + u'NaN', u'NaN16', u'NaN32', u'NaN64', u'OS_NAME', + u'QuickSort', u'RoundDown', u'RoundFromZero', u'RoundNearest', + u'RoundNearestTiesAway', u'RoundNearestTiesUp', + u'RoundToZero', u'RoundUp', u'STDERR', u'STDIN', u'STDOUT', + u'VERSION', u'WORD_SIZE', u'catalan', u'e', u'eu', + u'eulergamma', u'golden', u'im', u'nothing', u'pi', u'γ', + u'π', u'φ'], + suffix=r'\b'), Name.Builtin), - # backticks - (r'`(?s).*?`', String.Backtick), + # operators + # see: https://github.com/JuliaLang/julia/blob/master/src/julia-parser.scm + (words([ + # prec-assignment + u'=', u':=', u'+=', u'-=', u'*=', u'/=', u'//=', u'.//=', u'.*=', u'./=', + u'\=', u'.\=', u'^=', u'.^=', u'÷=', u'.÷=', u'%=', u'.%=', u'|=', u'&=', + u'$=', u'=>', u'<<=', u'>>=', u'>>>=', u'~', u'.+=', u'.-=', + # prec-conditional + u'?', + # prec-arrow + u'--', u'-->', + # prec-lazy-or + u'||', + # prec-lazy-and + u'&&', + # prec-comparison + u'>', u'<', u'>=', u'≥', u'<=', u'≤', u'==', u'===', u'≡', u'!=', u'≠', + u'!==', u'≢', u'.>', u'.<', u'.>=', u'.≥', u'.<=', u'.≤', u'.==', u'.!=', + u'.≠', u'.=', u'.!', u'<:', u'>:', u'∈', u'∉', u'∋', u'∌', u'⊆', + u'⊈', u'⊂', + u'⊄', u'⊊', + # prec-pipe + u'|>', u'<|', + # prec-colon + u':', + # prec-plus + u'+', u'-', u'.+', u'.-', u'|', u'∪', u'$', + # prec-bitshift + u'<<', u'>>', u'>>>', u'.<<', u'.>>', u'.>>>', + # prec-times + u'*', u'/', u'./', u'÷', u'.÷', u'%', u'⋅', u'.%', u'.*', u'\\', u'.\\', u'&', u'∩', + # prec-rational + u'//', u'.//', + # prec-power + u'^', u'.^', + # prec-decl + u'::', + # prec-dot + u'.', + # unary op + u'+', u'-', u'!', u'~', u'√', u'∛', u'∜' + ]), Operator), # chars (r"'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,3}|\\u[a-fA-F0-9]{1,4}|" @@ -97,13 +192,19 @@ class JuliaLexer(RegexLexer): (r'(?<=[.\w)\]])\'+', Operator), # strings - (r'(?:[IL])"', String, 'string'), - (r'[E]?"', String, combined('stringescape', 'string')), + (r'"""', String, 'tqstring'), + (r'"', String, 'string'), + + # regular expressions + (r'r"""', String.Regex, 'tqregex'), + (r'r"', String.Regex, 'regex'), + + # backticks + (r'`', String.Backtick, 'command'), # names - (r'@[\w.]+', Name.Decorator), - (u'(?:[a-zA-Z_\u00A1-\uffff]|%s)(?:[a-zA-Z_0-9\u00A1-\uffff]|%s)*!*' % - ((unirange(0x10000, 0x10ffff),)*2), Name), + (allowed_variable, Name), + (r'@' + allowed_variable, Name.Decorator), # numbers (r'(\d+(_\d+)+\.\d*|\d*\.\d+(_\d+)+)([eEf][+-]?[0-9]+)?', Number.Float), @@ -120,45 +221,59 @@ class JuliaLexer(RegexLexer): (r'\d+', Number.Integer) ], - 'funcname': [ - ('[a-zA-Z_]\w*', Name.Function, '#pop'), - ('\([^\s\w{]{1,2}\)', Operator, '#pop'), - ('[^\s\w{]{1,2}', Operator, '#pop'), - ], - - 'typename': [ - ('[a-zA-Z_]\w*', Name.Class, '#pop'), - ], - - 'stringescape': [ - (r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|' - r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape), - ], "blockcomment": [ (r'[^=#]', Comment.Multiline), (r'#=', Comment.Multiline, '#push'), (r'=#', Comment.Multiline, '#pop'), (r'[=#]', Comment.Multiline), ], + 'string': [ (r'"', String, '#pop'), - (r'\\\\|\\"|\\\n', String.Escape), # included here for raw strings + # FIXME: This escape pattern is not perfect. + (r'\\([\\"\'\$nrbtfav]|(x|u|U)[a-fA-F0-9]+|\d+)', String.Escape), # Interpolation is defined as "$" followed by the shortest full # expression, which is something we can't parse. # Include the most common cases here: $word, and $(paren'd expr). - (r'\$[a-zA-Z_]+', String.Interpol), - (r'\$\(', String.Interpol, 'in-intp'), + (r'\$' + allowed_variable, String.Interpol), + # (r'\$[a-zA-Z_]+', String.Interpol), + (r'(\$)(\()', bygroups(String.Interpol, Punctuation), 'in-intp'), # @printf and @sprintf formats (r'%[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?[hlL]?[E-GXc-giorsux%]', String.Interpol), - (r'[^$%"\\]+', String), - # unhandled special signs - (r'[$%"\\]', String), + (r'.|\s', String), ], + + 'tqstring': [ + (r'"""', String, '#pop'), + (r'\\([\\"\'\$nrbtfav]|(x|u|U)[a-fA-F0-9]+|\d+)', String.Escape), + (r'\$' + allowed_variable, String.Interpol), + (r'(\$)(\()', bygroups(String.Interpol, Punctuation), 'in-intp'), + (r'.|\s', String), + ], + + 'regex': [ + (r'"', String.Regex, '#pop'), + (r'\\"', String.Regex), + (r'.|\s', String.Regex), + ], + + 'tqregex': [ + (r'"""', String.Regex, '#pop'), + (r'.|\s', String.Regex), + ], + + 'command': [ + (r'`', String.Backtick, '#pop'), + (r'\$' + allowed_variable, String.Interpol), + (r'(\$)(\()', bygroups(String.Interpol, Punctuation), 'in-intp'), + (r'.|\s', String.Backtick) + ], + 'in-intp': [ - (r'[^()]+', String.Interpol), - (r'\(', String.Interpol, '#push'), - (r'\)', String.Interpol, '#pop'), + (r'\(', Punctuation, '#push'), + (r'\)', Punctuation, '#pop'), + include('root'), ] } @@ -177,27 +292,26 @@ class JuliaConsoleLexer(Lexer): def get_tokens_unprocessed(self, text): jllexer = JuliaLexer(**self.options) - + start = 0 curcode = '' insertions = [] + output = False + error = False - for match in line_re.finditer(text): - line = match.group() - + for line in text.splitlines(True): if line.startswith('julia>'): - insertions.append((len(curcode), - [(0, Generic.Prompt, line[:6])])) + insertions.append((len(curcode), [(0, Generic.Prompt, line[:6])])) + curcode += line[6:] + output = False + error = False + elif line.startswith('help?>') or line.startswith('shell>'): + yield start, Generic.Prompt, line[:6] + yield start + 6, Text, line[6:] + output = False + error = False + elif line.startswith(' ') and not output: + insertions.append((len(curcode), [(0, Text, line[:6])])) curcode += line[6:] - - elif line.startswith(' '): - - idx = len(curcode) - - # without is showing error on same line as before...? - line = "\n" + line - token = (0, Generic.Traceback, line) - insertions.append((idx, [token])) - else: if curcode: for item in do_insertions( @@ -205,10 +319,15 @@ class JuliaConsoleLexer(Lexer): yield item curcode = '' insertions = [] - - yield match.start(), Generic.Output, line - - if curcode: # or item: + if line.startswith('ERROR: ') or error: + yield start, Generic.Error, line + error = True + else: + yield start, Generic.Output, line + output = True + start += len(line) + + if curcode: for item in do_insertions( insertions, jllexer.get_tokens_unprocessed(curcode)): yield item diff --git a/pygments/lexers/lisp.py b/pygments/lexers/lisp.py index 05f02a3c..67d74566 100644 --- a/pygments/lexers/lisp.py +++ b/pygments/lexers/lisp.py @@ -471,779 +471,779 @@ class RacketLexer(RegexLexer): # Generated by example.rkt _keywords = ( - '#%app', '#%datum', '#%declare', '#%expression', '#%module-begin', - '#%plain-app', '#%plain-lambda', '#%plain-module-begin', - '#%printing-module-begin', '#%provide', '#%require', - '#%stratified-body', '#%top', '#%top-interaction', - '#%variable-reference', '->', '->*', '->*m', '->d', '->dm', '->i', - '->m', '...', ':do-in', '==', '=>', '_', 'absent', 'abstract', - 'all-defined-out', 'all-from-out', 'and', 'any', 'augment', 'augment*', - 'augment-final', 'augment-final*', 'augride', 'augride*', 'begin', - 'begin-for-syntax', 'begin0', 'case', 'case->', 'case->m', - 'case-lambda', 'class', 'class*', 'class-field-accessor', - 'class-field-mutator', 'class/c', 'class/derived', 'combine-in', - 'combine-out', 'command-line', 'compound-unit', 'compound-unit/infer', - 'cond', 'cons/dc', 'contract', 'contract-out', 'contract-struct', - 'contracted', 'define', 'define-compound-unit', - 'define-compound-unit/infer', 'define-contract-struct', - 'define-custom-hash-types', 'define-custom-set-types', - 'define-for-syntax', 'define-local-member-name', 'define-logger', - 'define-match-expander', 'define-member-name', - 'define-module-boundary-contract', 'define-namespace-anchor', - 'define-opt/c', 'define-sequence-syntax', 'define-serializable-class', - 'define-serializable-class*', 'define-signature', - 'define-signature-form', 'define-struct', 'define-struct/contract', - 'define-struct/derived', 'define-syntax', 'define-syntax-rule', - 'define-syntaxes', 'define-unit', 'define-unit-binding', - 'define-unit-from-context', 'define-unit/contract', - 'define-unit/new-import-export', 'define-unit/s', 'define-values', - 'define-values-for-export', 'define-values-for-syntax', - 'define-values/invoke-unit', 'define-values/invoke-unit/infer', - 'define/augment', 'define/augment-final', 'define/augride', - 'define/contract', 'define/final-prop', 'define/match', - 'define/overment', 'define/override', 'define/override-final', - 'define/private', 'define/public', 'define/public-final', - 'define/pubment', 'define/subexpression-pos-prop', - 'define/subexpression-pos-prop/name', 'delay', 'delay/idle', - 'delay/name', 'delay/strict', 'delay/sync', 'delay/thread', 'do', - 'else', 'except', 'except-in', 'except-out', 'export', 'extends', - 'failure-cont', 'false', 'false/c', 'field', 'field-bound?', 'file', - 'flat-murec-contract', 'flat-rec-contract', 'for', 'for*', 'for*/and', - 'for*/async', 'for*/first', 'for*/fold', 'for*/fold/derived', - 'for*/hash', 'for*/hasheq', 'for*/hasheqv', 'for*/last', 'for*/list', - 'for*/lists', 'for*/mutable-set', 'for*/mutable-seteq', - 'for*/mutable-seteqv', 'for*/or', 'for*/product', 'for*/set', - 'for*/seteq', 'for*/seteqv', 'for*/stream', 'for*/sum', 'for*/vector', - 'for*/weak-set', 'for*/weak-seteq', 'for*/weak-seteqv', 'for-label', - 'for-meta', 'for-syntax', 'for-template', 'for/and', 'for/async', - 'for/first', 'for/fold', 'for/fold/derived', 'for/hash', 'for/hasheq', - 'for/hasheqv', 'for/last', 'for/list', 'for/lists', 'for/mutable-set', - 'for/mutable-seteq', 'for/mutable-seteqv', 'for/or', 'for/product', - 'for/set', 'for/seteq', 'for/seteqv', 'for/stream', 'for/sum', - 'for/vector', 'for/weak-set', 'for/weak-seteq', 'for/weak-seteqv', - 'gen:custom-write', 'gen:dict', 'gen:equal+hash', 'gen:set', - 'gen:stream', 'generic', 'get-field', 'hash/dc', 'if', 'implies', - 'import', 'include', 'include-at/relative-to', - 'include-at/relative-to/reader', 'include/reader', 'inherit', - 'inherit-field', 'inherit/inner', 'inherit/super', 'init', - 'init-depend', 'init-field', 'init-rest', 'inner', 'inspect', - 'instantiate', 'interface', 'interface*', 'invariant-assertion', - 'invoke-unit', 'invoke-unit/infer', 'lambda', 'lazy', 'let', 'let*', - 'let*-values', 'let-syntax', 'let-syntaxes', 'let-values', 'let/cc', - 'let/ec', 'letrec', 'letrec-syntax', 'letrec-syntaxes', - 'letrec-syntaxes+values', 'letrec-values', 'lib', 'link', 'local', - 'local-require', 'log-debug', 'log-error', 'log-fatal', 'log-info', - 'log-warning', 'match', 'match*', 'match*/derived', 'match-define', - 'match-define-values', 'match-lambda', 'match-lambda*', - 'match-lambda**', 'match-let', 'match-let*', 'match-let*-values', - 'match-let-values', 'match-letrec', 'match-letrec-values', - 'match/derived', 'match/values', 'member-name-key', 'mixin', 'module', - 'module*', 'module+', 'nand', 'new', 'nor', 'object-contract', - 'object/c', 'only', 'only-in', 'only-meta-in', 'open', 'opt/c', 'or', - 'overment', 'overment*', 'override', 'override*', 'override-final', - 'override-final*', 'parameterize', 'parameterize*', - 'parameterize-break', 'parametric->/c', 'place', 'place*', - 'place/context', 'planet', 'prefix', 'prefix-in', 'prefix-out', - 'private', 'private*', 'prompt-tag/c', 'protect-out', 'provide', - 'provide-signature-elements', 'provide/contract', 'public', 'public*', - 'public-final', 'public-final*', 'pubment', 'pubment*', 'quasiquote', - 'quasisyntax', 'quasisyntax/loc', 'quote', 'quote-syntax', - 'quote-syntax/prune', 'recontract-out', 'recursive-contract', - 'relative-in', 'rename', 'rename-in', 'rename-inner', 'rename-out', - 'rename-super', 'require', 'send', 'send*', 'send+', 'send-generic', - 'send/apply', 'send/keyword-apply', 'set!', 'set!-values', - 'set-field!', 'shared', 'stream', 'stream*', 'stream-cons', 'struct', - 'struct*', 'struct-copy', 'struct-field-index', 'struct-out', - 'struct/c', 'struct/ctc', 'struct/dc', 'submod', 'super', - 'super-instantiate', 'super-make-object', 'super-new', 'syntax', - 'syntax-case', 'syntax-case*', 'syntax-id-rules', 'syntax-rules', - 'syntax/loc', 'tag', 'this', 'this%', 'thunk', 'thunk*', 'time', - 'unconstrained-domain->', 'unit', 'unit-from-context', 'unit/c', - 'unit/new-import-export', 'unit/s', 'unless', 'unquote', - 'unquote-splicing', 'unsyntax', 'unsyntax-splicing', 'values/drop', - 'when', 'with-continuation-mark', 'with-contract', - 'with-contract-continuation-mark', 'with-handlers', 'with-handlers*', - 'with-method', 'with-syntax', u'λ' + u'#%app', u'#%datum', u'#%declare', u'#%expression', u'#%module-begin', + u'#%plain-app', u'#%plain-lambda', u'#%plain-module-begin', + u'#%printing-module-begin', u'#%provide', u'#%require', + u'#%stratified-body', u'#%top', u'#%top-interaction', + u'#%variable-reference', u'->', u'->*', u'->*m', u'->d', u'->dm', u'->i', + u'->m', u'...', u':do-in', u'==', u'=>', u'_', u'absent', u'abstract', + u'all-defined-out', u'all-from-out', u'and', u'any', u'augment', u'augment*', + u'augment-final', u'augment-final*', u'augride', u'augride*', u'begin', + u'begin-for-syntax', u'begin0', u'case', u'case->', u'case->m', + u'case-lambda', u'class', u'class*', u'class-field-accessor', + u'class-field-mutator', u'class/c', u'class/derived', u'combine-in', + u'combine-out', u'command-line', u'compound-unit', u'compound-unit/infer', + u'cond', u'cons/dc', u'contract', u'contract-out', u'contract-struct', + u'contracted', u'define', u'define-compound-unit', + u'define-compound-unit/infer', u'define-contract-struct', + u'define-custom-hash-types', u'define-custom-set-types', + u'define-for-syntax', u'define-local-member-name', u'define-logger', + u'define-match-expander', u'define-member-name', + u'define-module-boundary-contract', u'define-namespace-anchor', + u'define-opt/c', u'define-sequence-syntax', u'define-serializable-class', + u'define-serializable-class*', u'define-signature', + u'define-signature-form', u'define-struct', u'define-struct/contract', + u'define-struct/derived', u'define-syntax', u'define-syntax-rule', + u'define-syntaxes', u'define-unit', u'define-unit-binding', + u'define-unit-from-context', u'define-unit/contract', + u'define-unit/new-import-export', u'define-unit/s', u'define-values', + u'define-values-for-export', u'define-values-for-syntax', + u'define-values/invoke-unit', u'define-values/invoke-unit/infer', + u'define/augment', u'define/augment-final', u'define/augride', + u'define/contract', u'define/final-prop', u'define/match', + u'define/overment', u'define/override', u'define/override-final', + u'define/private', u'define/public', u'define/public-final', + u'define/pubment', u'define/subexpression-pos-prop', + u'define/subexpression-pos-prop/name', u'delay', u'delay/idle', + u'delay/name', u'delay/strict', u'delay/sync', u'delay/thread', u'do', + u'else', u'except', u'except-in', u'except-out', u'export', u'extends', + u'failure-cont', u'false', u'false/c', u'field', u'field-bound?', u'file', + u'flat-murec-contract', u'flat-rec-contract', u'for', u'for*', u'for*/and', + u'for*/async', u'for*/first', u'for*/fold', u'for*/fold/derived', + u'for*/hash', u'for*/hasheq', u'for*/hasheqv', u'for*/last', u'for*/list', + u'for*/lists', u'for*/mutable-set', u'for*/mutable-seteq', + u'for*/mutable-seteqv', u'for*/or', u'for*/product', u'for*/set', + u'for*/seteq', u'for*/seteqv', u'for*/stream', u'for*/sum', u'for*/vector', + u'for*/weak-set', u'for*/weak-seteq', u'for*/weak-seteqv', u'for-label', + u'for-meta', u'for-syntax', u'for-template', u'for/and', u'for/async', + u'for/first', u'for/fold', u'for/fold/derived', u'for/hash', u'for/hasheq', + u'for/hasheqv', u'for/last', u'for/list', u'for/lists', u'for/mutable-set', + u'for/mutable-seteq', u'for/mutable-seteqv', u'for/or', u'for/product', + u'for/set', u'for/seteq', u'for/seteqv', u'for/stream', u'for/sum', + u'for/vector', u'for/weak-set', u'for/weak-seteq', u'for/weak-seteqv', + u'gen:custom-write', u'gen:dict', u'gen:equal+hash', u'gen:set', + u'gen:stream', u'generic', u'get-field', u'hash/dc', u'if', u'implies', + u'import', u'include', u'include-at/relative-to', + u'include-at/relative-to/reader', u'include/reader', u'inherit', + u'inherit-field', u'inherit/inner', u'inherit/super', u'init', + u'init-depend', u'init-field', u'init-rest', u'inner', u'inspect', + u'instantiate', u'interface', u'interface*', u'invariant-assertion', + u'invoke-unit', u'invoke-unit/infer', u'lambda', u'lazy', u'let', u'let*', + u'let*-values', u'let-syntax', u'let-syntaxes', u'let-values', u'let/cc', + u'let/ec', u'letrec', u'letrec-syntax', u'letrec-syntaxes', + u'letrec-syntaxes+values', u'letrec-values', u'lib', u'link', u'local', + u'local-require', u'log-debug', u'log-error', u'log-fatal', u'log-info', + u'log-warning', u'match', u'match*', u'match*/derived', u'match-define', + u'match-define-values', u'match-lambda', u'match-lambda*', + u'match-lambda**', u'match-let', u'match-let*', u'match-let*-values', + u'match-let-values', u'match-letrec', u'match-letrec-values', + u'match/derived', u'match/values', u'member-name-key', u'mixin', u'module', + u'module*', u'module+', u'nand', u'new', u'nor', u'object-contract', + u'object/c', u'only', u'only-in', u'only-meta-in', u'open', u'opt/c', u'or', + u'overment', u'overment*', u'override', u'override*', u'override-final', + u'override-final*', u'parameterize', u'parameterize*', + u'parameterize-break', u'parametric->/c', u'place', u'place*', + u'place/context', u'planet', u'prefix', u'prefix-in', u'prefix-out', + u'private', u'private*', u'prompt-tag/c', u'protect-out', u'provide', + u'provide-signature-elements', u'provide/contract', u'public', u'public*', + u'public-final', u'public-final*', u'pubment', u'pubment*', u'quasiquote', + u'quasisyntax', u'quasisyntax/loc', u'quote', u'quote-syntax', + u'quote-syntax/prune', u'recontract-out', u'recursive-contract', + u'relative-in', u'rename', u'rename-in', u'rename-inner', u'rename-out', + u'rename-super', u'require', u'send', u'send*', u'send+', u'send-generic', + u'send/apply', u'send/keyword-apply', u'set!', u'set!-values', + u'set-field!', u'shared', u'stream', u'stream*', u'stream-cons', u'struct', + u'struct*', u'struct-copy', u'struct-field-index', u'struct-out', + u'struct/c', u'struct/ctc', u'struct/dc', u'submod', u'super', + u'super-instantiate', u'super-make-object', u'super-new', u'syntax', + u'syntax-case', u'syntax-case*', u'syntax-id-rules', u'syntax-rules', + u'syntax/loc', u'tag', u'this', u'this%', u'thunk', u'thunk*', u'time', + u'unconstrained-domain->', u'unit', u'unit-from-context', u'unit/c', + u'unit/new-import-export', u'unit/s', u'unless', u'unquote', + u'unquote-splicing', u'unsyntax', u'unsyntax-splicing', u'values/drop', + u'when', u'with-continuation-mark', u'with-contract', + u'with-contract-continuation-mark', u'with-handlers', u'with-handlers*', + u'with-method', u'with-syntax', u'λ' ) # Generated by example.rkt _builtins = ( - '*', '*list/c', '+', '-', '/', '<', '</c', '<=', '<=/c', '=', '=/c', - '>', '>/c', '>=', '>=/c', 'abort-current-continuation', 'abs', - 'absolute-path?', 'acos', 'add-between', 'add1', 'alarm-evt', - 'always-evt', 'and/c', 'andmap', 'angle', 'any/c', 'append', 'append*', - 'append-map', 'apply', 'argmax', 'argmin', 'arithmetic-shift', - 'arity-at-least', 'arity-at-least-value', 'arity-at-least?', - 'arity-checking-wrapper', 'arity-includes?', 'arity=?', - 'arrow-contract-info', 'arrow-contract-info-accepts-arglist', - 'arrow-contract-info-chaperone-procedure', - 'arrow-contract-info-check-first-order', 'arrow-contract-info?', - 'asin', 'assf', 'assoc', 'assq', 'assv', 'atan', - 'bad-number-of-results', 'banner', 'base->-doms/c', 'base->-rngs/c', - 'base->?', 'between/c', 'bitwise-and', 'bitwise-bit-field', - 'bitwise-bit-set?', 'bitwise-ior', 'bitwise-not', 'bitwise-xor', - 'blame-add-car-context', 'blame-add-cdr-context', 'blame-add-context', - 'blame-add-missing-party', 'blame-add-nth-arg-context', - 'blame-add-range-context', 'blame-add-unknown-context', - 'blame-context', 'blame-contract', 'blame-fmt->-string', - 'blame-missing-party?', 'blame-negative', 'blame-original?', - 'blame-positive', 'blame-replace-negative', 'blame-source', - 'blame-swap', 'blame-swapped?', 'blame-update', 'blame-value', - 'blame?', 'boolean=?', 'boolean?', 'bound-identifier=?', 'box', - 'box-cas!', 'box-immutable', 'box-immutable/c', 'box/c', 'box?', - 'break-enabled', 'break-parameterization?', 'break-thread', - 'build-chaperone-contract-property', 'build-compound-type-name', - 'build-contract-property', 'build-flat-contract-property', - 'build-list', 'build-path', 'build-path/convention-type', - 'build-string', 'build-vector', 'byte-pregexp', 'byte-pregexp?', - 'byte-ready?', 'byte-regexp', 'byte-regexp?', 'byte?', 'bytes', - 'bytes->immutable-bytes', 'bytes->list', 'bytes->path', - 'bytes->path-element', 'bytes->string/latin-1', 'bytes->string/locale', - 'bytes->string/utf-8', 'bytes-append', 'bytes-append*', - 'bytes-close-converter', 'bytes-convert', 'bytes-convert-end', - 'bytes-converter?', 'bytes-copy', 'bytes-copy!', - 'bytes-environment-variable-name?', 'bytes-fill!', 'bytes-join', - 'bytes-length', 'bytes-no-nuls?', 'bytes-open-converter', 'bytes-ref', - 'bytes-set!', 'bytes-utf-8-index', 'bytes-utf-8-length', - 'bytes-utf-8-ref', 'bytes<?', 'bytes=?', 'bytes>?', 'bytes?', 'caaaar', - 'caaadr', 'caaar', 'caadar', 'caaddr', 'caadr', 'caar', 'cadaar', - 'cadadr', 'cadar', 'caddar', 'cadddr', 'caddr', 'cadr', - 'call-in-nested-thread', 'call-with-atomic-output-file', - 'call-with-break-parameterization', - 'call-with-composable-continuation', 'call-with-continuation-barrier', - 'call-with-continuation-prompt', 'call-with-current-continuation', - 'call-with-default-reading-parameterization', - 'call-with-escape-continuation', 'call-with-exception-handler', - 'call-with-file-lock/timeout', 'call-with-immediate-continuation-mark', - 'call-with-input-bytes', 'call-with-input-file', - 'call-with-input-file*', 'call-with-input-string', - 'call-with-output-bytes', 'call-with-output-file', - 'call-with-output-file*', 'call-with-output-string', - 'call-with-parameterization', 'call-with-semaphore', - 'call-with-semaphore/enable-break', 'call-with-values', 'call/cc', - 'call/ec', 'car', 'cartesian-product', 'cdaaar', 'cdaadr', 'cdaar', - 'cdadar', 'cdaddr', 'cdadr', 'cdar', 'cddaar', 'cddadr', 'cddar', - 'cdddar', 'cddddr', 'cdddr', 'cddr', 'cdr', 'ceiling', 'channel-get', - 'channel-put', 'channel-put-evt', 'channel-put-evt?', - 'channel-try-get', 'channel/c', 'channel?', 'chaperone-box', - 'chaperone-channel', 'chaperone-continuation-mark-key', - 'chaperone-contract-property?', 'chaperone-contract?', 'chaperone-evt', - 'chaperone-hash', 'chaperone-hash-set', 'chaperone-of?', - 'chaperone-procedure', 'chaperone-procedure*', 'chaperone-prompt-tag', - 'chaperone-struct', 'chaperone-struct-type', 'chaperone-vector', - 'chaperone?', 'char->integer', 'char-alphabetic?', 'char-blank?', - 'char-ci<=?', 'char-ci<?', 'char-ci=?', 'char-ci>=?', 'char-ci>?', - 'char-downcase', 'char-foldcase', 'char-general-category', - 'char-graphic?', 'char-in', 'char-in/c', 'char-iso-control?', - 'char-lower-case?', 'char-numeric?', 'char-punctuation?', - 'char-ready?', 'char-symbolic?', 'char-title-case?', 'char-titlecase', - 'char-upcase', 'char-upper-case?', 'char-utf-8-length', - 'char-whitespace?', 'char<=?', 'char<?', 'char=?', 'char>=?', 'char>?', - 'char?', 'check-duplicate-identifier', 'check-duplicates', - 'checked-procedure-check-and-extract', 'choice-evt', - 'class->interface', 'class-info', 'class-seal', 'class-unseal', - 'class?', 'cleanse-path', 'close-input-port', 'close-output-port', - 'coerce-chaperone-contract', 'coerce-chaperone-contracts', - 'coerce-contract', 'coerce-contract/f', 'coerce-contracts', - 'coerce-flat-contract', 'coerce-flat-contracts', 'collect-garbage', - 'collection-file-path', 'collection-path', 'combinations', 'compile', - 'compile-allow-set!-undefined', 'compile-context-preservation-enabled', - 'compile-enforce-module-constants', 'compile-syntax', - 'compiled-expression-recompile', 'compiled-expression?', - 'compiled-module-expression?', 'complete-path?', 'complex?', 'compose', - 'compose1', 'conjoin', 'conjugate', 'cons', 'cons/c', 'cons?', 'const', - 'continuation-mark-key/c', 'continuation-mark-key?', - 'continuation-mark-set->context', 'continuation-mark-set->list', - 'continuation-mark-set->list*', 'continuation-mark-set-first', - 'continuation-mark-set?', 'continuation-marks', - 'continuation-prompt-available?', 'continuation-prompt-tag?', - 'continuation?', 'contract-continuation-mark-key', - 'contract-custom-write-property-proc', 'contract-exercise', - 'contract-first-order', 'contract-first-order-passes?', - 'contract-late-neg-projection', 'contract-name', 'contract-proc', - 'contract-projection', 'contract-property?', - 'contract-random-generate', 'contract-random-generate-fail', - 'contract-random-generate-fail?', - 'contract-random-generate-get-current-environment', - 'contract-random-generate-stash', 'contract-random-generate/choose', - 'contract-stronger?', 'contract-struct-exercise', - 'contract-struct-generate', 'contract-struct-late-neg-projection', - 'contract-struct-list-contract?', 'contract-val-first-projection', - 'contract?', 'convert-stream', 'copy-directory/files', 'copy-file', - 'copy-port', 'cos', 'cosh', 'count', 'current-blame-format', - 'current-break-parameterization', 'current-code-inspector', - 'current-command-line-arguments', 'current-compile', - 'current-compiled-file-roots', 'current-continuation-marks', - 'current-contract-region', 'current-custodian', 'current-directory', - 'current-directory-for-user', 'current-drive', - 'current-environment-variables', 'current-error-port', 'current-eval', - 'current-evt-pseudo-random-generator', - 'current-force-delete-permissions', 'current-future', - 'current-gc-milliseconds', 'current-get-interaction-input-port', - 'current-inexact-milliseconds', 'current-input-port', - 'current-inspector', 'current-library-collection-links', - 'current-library-collection-paths', 'current-load', - 'current-load-extension', 'current-load-relative-directory', - 'current-load/use-compiled', 'current-locale', 'current-logger', - 'current-memory-use', 'current-milliseconds', - 'current-module-declare-name', 'current-module-declare-source', - 'current-module-name-resolver', 'current-module-path-for-load', - 'current-namespace', 'current-output-port', 'current-parameterization', - 'current-plumber', 'current-preserved-thread-cell-values', - 'current-print', 'current-process-milliseconds', 'current-prompt-read', - 'current-pseudo-random-generator', 'current-read-interaction', - 'current-reader-guard', 'current-readtable', 'current-seconds', - 'current-security-guard', 'current-subprocess-custodian-mode', - 'current-thread', 'current-thread-group', - 'current-thread-initial-stack-size', - 'current-write-relative-directory', 'curry', 'curryr', - 'custodian-box-value', 'custodian-box?', 'custodian-limit-memory', - 'custodian-managed-list', 'custodian-memory-accounting-available?', - 'custodian-require-memory', 'custodian-shutdown-all', 'custodian?', - 'custom-print-quotable-accessor', 'custom-print-quotable?', - 'custom-write-accessor', 'custom-write-property-proc', 'custom-write?', - 'date', 'date*', 'date*-nanosecond', 'date*-time-zone-name', 'date*?', - 'date-day', 'date-dst?', 'date-hour', 'date-minute', 'date-month', - 'date-second', 'date-time-zone-offset', 'date-week-day', 'date-year', - 'date-year-day', 'date?', 'datum->syntax', 'datum-intern-literal', - 'default-continuation-prompt-tag', 'degrees->radians', - 'delete-directory', 'delete-directory/files', 'delete-file', - 'denominator', 'dict->list', 'dict-can-functional-set?', - 'dict-can-remove-keys?', 'dict-clear', 'dict-clear!', 'dict-copy', - 'dict-count', 'dict-empty?', 'dict-for-each', 'dict-has-key?', - 'dict-implements/c', 'dict-implements?', 'dict-iter-contract', - 'dict-iterate-first', 'dict-iterate-key', 'dict-iterate-next', - 'dict-iterate-value', 'dict-key-contract', 'dict-keys', 'dict-map', - 'dict-mutable?', 'dict-ref', 'dict-ref!', 'dict-remove', - 'dict-remove!', 'dict-set', 'dict-set!', 'dict-set*', 'dict-set*!', - 'dict-update', 'dict-update!', 'dict-value-contract', 'dict-values', - 'dict?', 'directory-exists?', 'directory-list', 'disjoin', 'display', - 'display-lines', 'display-lines-to-file', 'display-to-file', - 'displayln', 'double-flonum?', 'drop', 'drop-common-prefix', - 'drop-right', 'dropf', 'dropf-right', 'dump-memory-stats', - 'dup-input-port', 'dup-output-port', 'dynamic->*', 'dynamic-get-field', - 'dynamic-object/c', 'dynamic-place', 'dynamic-place*', - 'dynamic-require', 'dynamic-require-for-syntax', 'dynamic-send', - 'dynamic-set-field!', 'dynamic-wind', 'eighth', 'empty', - 'empty-sequence', 'empty-stream', 'empty?', - 'environment-variables-copy', 'environment-variables-names', - 'environment-variables-ref', 'environment-variables-set!', - 'environment-variables?', 'eof', 'eof-evt', 'eof-object?', - 'ephemeron-value', 'ephemeron?', 'eprintf', 'eq-contract-val', - 'eq-contract?', 'eq-hash-code', 'eq?', 'equal-contract-val', - 'equal-contract?', 'equal-hash-code', 'equal-secondary-hash-code', - 'equal<%>', 'equal?', 'equal?/recur', 'eqv-hash-code', 'eqv?', 'error', - 'error-display-handler', 'error-escape-handler', - 'error-print-context-length', 'error-print-source-location', - 'error-print-width', 'error-value->string-handler', 'eval', - 'eval-jit-enabled', 'eval-syntax', 'even?', 'evt/c', 'evt?', - 'exact->inexact', 'exact-ceiling', 'exact-floor', 'exact-integer?', - 'exact-nonnegative-integer?', 'exact-positive-integer?', 'exact-round', - 'exact-truncate', 'exact?', 'executable-yield-handler', 'exit', - 'exit-handler', 'exn', 'exn-continuation-marks', 'exn-message', - 'exn:break', 'exn:break-continuation', 'exn:break:hang-up', - 'exn:break:hang-up?', 'exn:break:terminate', 'exn:break:terminate?', - 'exn:break?', 'exn:fail', 'exn:fail:contract', - 'exn:fail:contract:arity', 'exn:fail:contract:arity?', - 'exn:fail:contract:blame', 'exn:fail:contract:blame-object', - 'exn:fail:contract:blame?', 'exn:fail:contract:continuation', - 'exn:fail:contract:continuation?', 'exn:fail:contract:divide-by-zero', - 'exn:fail:contract:divide-by-zero?', - 'exn:fail:contract:non-fixnum-result', - 'exn:fail:contract:non-fixnum-result?', 'exn:fail:contract:variable', - 'exn:fail:contract:variable-id', 'exn:fail:contract:variable?', - 'exn:fail:contract?', 'exn:fail:filesystem', - 'exn:fail:filesystem:errno', 'exn:fail:filesystem:errno-errno', - 'exn:fail:filesystem:errno?', 'exn:fail:filesystem:exists', - 'exn:fail:filesystem:exists?', 'exn:fail:filesystem:missing-module', - 'exn:fail:filesystem:missing-module-path', - 'exn:fail:filesystem:missing-module?', 'exn:fail:filesystem:version', - 'exn:fail:filesystem:version?', 'exn:fail:filesystem?', - 'exn:fail:network', 'exn:fail:network:errno', - 'exn:fail:network:errno-errno', 'exn:fail:network:errno?', - 'exn:fail:network?', 'exn:fail:object', 'exn:fail:object?', - 'exn:fail:out-of-memory', 'exn:fail:out-of-memory?', 'exn:fail:read', - 'exn:fail:read-srclocs', 'exn:fail:read:eof', 'exn:fail:read:eof?', - 'exn:fail:read:non-char', 'exn:fail:read:non-char?', 'exn:fail:read?', - 'exn:fail:syntax', 'exn:fail:syntax-exprs', - 'exn:fail:syntax:missing-module', - 'exn:fail:syntax:missing-module-path', - 'exn:fail:syntax:missing-module?', 'exn:fail:syntax:unbound', - 'exn:fail:syntax:unbound?', 'exn:fail:syntax?', 'exn:fail:unsupported', - 'exn:fail:unsupported?', 'exn:fail:user', 'exn:fail:user?', - 'exn:fail?', 'exn:misc:match?', 'exn:missing-module-accessor', - 'exn:missing-module?', 'exn:srclocs-accessor', 'exn:srclocs?', 'exn?', - 'exp', 'expand', 'expand-once', 'expand-syntax', 'expand-syntax-once', - 'expand-syntax-to-top-form', 'expand-to-top-form', 'expand-user-path', - 'explode-path', 'expt', 'externalizable<%>', 'failure-result/c', - 'false?', 'field-names', 'fifth', 'file->bytes', 'file->bytes-lines', - 'file->lines', 'file->list', 'file->string', 'file->value', - 'file-exists?', 'file-name-from-path', 'file-or-directory-identity', - 'file-or-directory-modify-seconds', 'file-or-directory-permissions', - 'file-position', 'file-position*', 'file-size', - 'file-stream-buffer-mode', 'file-stream-port?', 'file-truncate', - 'filename-extension', 'filesystem-change-evt', - 'filesystem-change-evt-cancel', 'filesystem-change-evt?', - 'filesystem-root-list', 'filter', 'filter-map', 'filter-not', - 'filter-read-input-port', 'find-executable-path', 'find-files', - 'find-library-collection-links', 'find-library-collection-paths', - 'find-relative-path', 'find-system-path', 'findf', 'first', - 'first-or/c', 'fixnum?', 'flat-contract', 'flat-contract-predicate', - 'flat-contract-property?', 'flat-contract?', 'flat-named-contract', - 'flatten', 'floating-point-bytes->real', 'flonum?', 'floor', - 'flush-output', 'fold-files', 'foldl', 'foldr', 'for-each', 'force', - 'format', 'fourth', 'fprintf', 'free-identifier=?', - 'free-label-identifier=?', 'free-template-identifier=?', - 'free-transformer-identifier=?', 'fsemaphore-count', 'fsemaphore-post', - 'fsemaphore-try-wait?', 'fsemaphore-wait', 'fsemaphore?', 'future', - 'future?', 'futures-enabled?', 'gcd', 'generate-member-key', - 'generate-temporaries', 'generic-set?', 'generic?', 'gensym', - 'get-output-bytes', 'get-output-string', 'get-preference', - 'get/build-late-neg-projection', 'get/build-val-first-projection', - 'getenv', 'global-port-print-handler', 'group-by', 'group-execute-bit', - 'group-read-bit', 'group-write-bit', 'guard-evt', 'handle-evt', - 'handle-evt?', 'has-blame?', 'has-contract?', 'hash', 'hash->list', - 'hash-clear', 'hash-clear!', 'hash-copy', 'hash-copy-clear', - 'hash-count', 'hash-empty?', 'hash-eq?', 'hash-equal?', 'hash-eqv?', - 'hash-for-each', 'hash-has-key?', 'hash-iterate-first', - 'hash-iterate-key', 'hash-iterate-key+value', 'hash-iterate-next', - 'hash-iterate-pair', 'hash-iterate-value', 'hash-keys', 'hash-map', - 'hash-placeholder?', 'hash-ref', 'hash-ref!', 'hash-remove', - 'hash-remove!', 'hash-set', 'hash-set!', 'hash-set*', 'hash-set*!', - 'hash-update', 'hash-update!', 'hash-values', 'hash-weak?', 'hash/c', - 'hash?', 'hasheq', 'hasheqv', 'identifier-binding', - 'identifier-binding-symbol', 'identifier-label-binding', - 'identifier-prune-lexical-context', - 'identifier-prune-to-source-module', - 'identifier-remove-from-definition-context', - 'identifier-template-binding', 'identifier-transformer-binding', - 'identifier?', 'identity', 'if/c', 'imag-part', 'immutable?', - 'impersonate-box', 'impersonate-channel', - 'impersonate-continuation-mark-key', 'impersonate-hash', - 'impersonate-hash-set', 'impersonate-procedure', - 'impersonate-procedure*', 'impersonate-prompt-tag', - 'impersonate-struct', 'impersonate-vector', 'impersonator-contract?', - 'impersonator-ephemeron', 'impersonator-of?', - 'impersonator-prop:application-mark', 'impersonator-prop:blame', - 'impersonator-prop:contracted', - 'impersonator-property-accessor-procedure?', 'impersonator-property?', - 'impersonator?', 'implementation?', 'implementation?/c', 'in-bytes', - 'in-bytes-lines', 'in-combinations', 'in-cycle', 'in-dict', - 'in-dict-keys', 'in-dict-pairs', 'in-dict-values', 'in-directory', - 'in-hash', 'in-hash-keys', 'in-hash-pairs', 'in-hash-values', - 'in-immutable-hash', 'in-immutable-hash-keys', - 'in-immutable-hash-pairs', 'in-immutable-hash-values', - 'in-immutable-set', 'in-indexed', 'in-input-port-bytes', - 'in-input-port-chars', 'in-lines', 'in-list', 'in-mlist', - 'in-mutable-hash', 'in-mutable-hash-keys', 'in-mutable-hash-pairs', - 'in-mutable-hash-values', 'in-mutable-set', 'in-naturals', - 'in-parallel', 'in-permutations', 'in-port', 'in-producer', 'in-range', - 'in-sequences', 'in-set', 'in-slice', 'in-stream', 'in-string', - 'in-syntax', 'in-value', 'in-values*-sequence', 'in-values-sequence', - 'in-vector', 'in-weak-hash', 'in-weak-hash-keys', 'in-weak-hash-pairs', - 'in-weak-hash-values', 'in-weak-set', 'inexact->exact', - 'inexact-real?', 'inexact?', 'infinite?', 'input-port-append', - 'input-port?', 'inspector?', 'instanceof/c', 'integer->char', - 'integer->integer-bytes', 'integer-bytes->integer', 'integer-in', - 'integer-length', 'integer-sqrt', 'integer-sqrt/remainder', 'integer?', - 'interface->method-names', 'interface-extension?', 'interface?', - 'internal-definition-context-binding-identifiers', - 'internal-definition-context-introduce', - 'internal-definition-context-seal', 'internal-definition-context?', - 'is-a?', 'is-a?/c', 'keyword->string', 'keyword-apply', 'keyword<?', - 'keyword?', 'keywords-match', 'kill-thread', 'last', 'last-pair', - 'lcm', 'length', 'liberal-define-context?', 'link-exists?', 'list', - 'list*', 'list*of', 'list->bytes', 'list->mutable-set', - 'list->mutable-seteq', 'list->mutable-seteqv', 'list->set', - 'list->seteq', 'list->seteqv', 'list->string', 'list->vector', - 'list->weak-set', 'list->weak-seteq', 'list->weak-seteqv', - 'list-contract?', 'list-prefix?', 'list-ref', 'list-set', 'list-tail', - 'list-update', 'list/c', 'list?', 'listen-port-number?', 'listof', - 'load', 'load-extension', 'load-on-demand-enabled', 'load-relative', - 'load-relative-extension', 'load/cd', 'load/use-compiled', - 'local-expand', 'local-expand/capture-lifts', - 'local-transformer-expand', 'local-transformer-expand/capture-lifts', - 'locale-string-encoding', 'log', 'log-all-levels', 'log-level-evt', - 'log-level?', 'log-max-level', 'log-message', 'log-receiver?', - 'logger-name', 'logger?', 'magnitude', 'make-arity-at-least', - 'make-base-empty-namespace', 'make-base-namespace', 'make-bytes', - 'make-channel', 'make-chaperone-contract', - 'make-continuation-mark-key', 'make-continuation-prompt-tag', - 'make-contract', 'make-custodian', 'make-custodian-box', - 'make-custom-hash', 'make-custom-hash-types', 'make-custom-set', - 'make-custom-set-types', 'make-date', 'make-date*', - 'make-derived-parameter', 'make-directory', 'make-directory*', - 'make-do-sequence', 'make-empty-namespace', - 'make-environment-variables', 'make-ephemeron', 'make-exn', - 'make-exn:break', 'make-exn:break:hang-up', 'make-exn:break:terminate', - 'make-exn:fail', 'make-exn:fail:contract', - 'make-exn:fail:contract:arity', 'make-exn:fail:contract:blame', - 'make-exn:fail:contract:continuation', - 'make-exn:fail:contract:divide-by-zero', - 'make-exn:fail:contract:non-fixnum-result', - 'make-exn:fail:contract:variable', 'make-exn:fail:filesystem', - 'make-exn:fail:filesystem:errno', 'make-exn:fail:filesystem:exists', - 'make-exn:fail:filesystem:missing-module', - 'make-exn:fail:filesystem:version', 'make-exn:fail:network', - 'make-exn:fail:network:errno', 'make-exn:fail:object', - 'make-exn:fail:out-of-memory', 'make-exn:fail:read', - 'make-exn:fail:read:eof', 'make-exn:fail:read:non-char', - 'make-exn:fail:syntax', 'make-exn:fail:syntax:missing-module', - 'make-exn:fail:syntax:unbound', 'make-exn:fail:unsupported', - 'make-exn:fail:user', 'make-file-or-directory-link', - 'make-flat-contract', 'make-fsemaphore', 'make-generic', - 'make-handle-get-preference-locked', 'make-hash', - 'make-hash-placeholder', 'make-hasheq', 'make-hasheq-placeholder', - 'make-hasheqv', 'make-hasheqv-placeholder', - 'make-immutable-custom-hash', 'make-immutable-hash', - 'make-immutable-hasheq', 'make-immutable-hasheqv', - 'make-impersonator-property', 'make-input-port', - 'make-input-port/read-to-peek', 'make-inspector', - 'make-keyword-procedure', 'make-known-char-range-list', - 'make-limited-input-port', 'make-list', 'make-lock-file-name', - 'make-log-receiver', 'make-logger', 'make-mixin-contract', - 'make-mutable-custom-set', 'make-none/c', 'make-object', - 'make-output-port', 'make-parameter', 'make-parent-directory*', - 'make-phantom-bytes', 'make-pipe', 'make-pipe-with-specials', - 'make-placeholder', 'make-plumber', 'make-polar', 'make-prefab-struct', - 'make-primitive-class', 'make-proj-contract', - 'make-pseudo-random-generator', 'make-reader-graph', 'make-readtable', - 'make-rectangular', 'make-rename-transformer', - 'make-resolved-module-path', 'make-security-guard', 'make-semaphore', - 'make-set!-transformer', 'make-shared-bytes', 'make-sibling-inspector', - 'make-special-comment', 'make-srcloc', 'make-string', - 'make-struct-field-accessor', 'make-struct-field-mutator', - 'make-struct-type', 'make-struct-type-property', - 'make-syntax-delta-introducer', 'make-syntax-introducer', - 'make-temporary-file', 'make-tentative-pretty-print-output-port', - 'make-thread-cell', 'make-thread-group', 'make-vector', - 'make-weak-box', 'make-weak-custom-hash', 'make-weak-custom-set', - 'make-weak-hash', 'make-weak-hasheq', 'make-weak-hasheqv', - 'make-will-executor', 'map', 'match-equality-test', - 'matches-arity-exactly?', 'max', 'mcar', 'mcdr', 'mcons', 'member', - 'member-name-key-hash-code', 'member-name-key=?', 'member-name-key?', - 'memf', 'memq', 'memv', 'merge-input', 'method-in-interface?', 'min', - 'mixin-contract', 'module->exports', 'module->imports', - 'module->language-info', 'module->namespace', - 'module-compiled-cross-phase-persistent?', 'module-compiled-exports', - 'module-compiled-imports', 'module-compiled-language-info', - 'module-compiled-name', 'module-compiled-submodules', - 'module-declared?', 'module-path-index-join', - 'module-path-index-resolve', 'module-path-index-split', - 'module-path-index-submodule', 'module-path-index?', 'module-path?', - 'module-predefined?', 'module-provide-protected?', 'modulo', 'mpair?', - 'mutable-set', 'mutable-seteq', 'mutable-seteqv', 'n->th', - 'nack-guard-evt', 'namespace-anchor->empty-namespace', - 'namespace-anchor->namespace', 'namespace-anchor?', - 'namespace-attach-module', 'namespace-attach-module-declaration', - 'namespace-base-phase', 'namespace-mapped-symbols', - 'namespace-module-identifier', 'namespace-module-registry', - 'namespace-require', 'namespace-require/constant', - 'namespace-require/copy', 'namespace-require/expansion-time', - 'namespace-set-variable-value!', 'namespace-symbol->identifier', - 'namespace-syntax-introduce', 'namespace-undefine-variable!', - 'namespace-unprotect-module', 'namespace-variable-value', 'namespace?', - 'nan?', 'natural-number/c', 'negate', 'negative?', 'never-evt', - u'new-∀/c', u'new-∃/c', 'newline', 'ninth', 'non-empty-listof', - 'non-empty-string?', 'none/c', 'normal-case-path', 'normalize-arity', - 'normalize-path', 'normalized-arity?', 'not', 'not/c', 'null', 'null?', - 'number->string', 'number?', 'numerator', 'object%', 'object->vector', - 'object-info', 'object-interface', 'object-method-arity-includes?', - 'object-name', 'object-or-false=?', 'object=?', 'object?', 'odd?', - 'one-of/c', 'open-input-bytes', 'open-input-file', - 'open-input-output-file', 'open-input-string', 'open-output-bytes', - 'open-output-file', 'open-output-nowhere', 'open-output-string', - 'or/c', 'order-of-magnitude', 'ormap', 'other-execute-bit', - 'other-read-bit', 'other-write-bit', 'output-port?', 'pair?', - 'parameter-procedure=?', 'parameter/c', 'parameter?', - 'parameterization?', 'parse-command-line', 'partition', 'path->bytes', - 'path->complete-path', 'path->directory-path', 'path->string', - 'path-add-suffix', 'path-convention-type', 'path-element->bytes', - 'path-element->string', 'path-element?', 'path-for-some-system?', - 'path-list-string->path-list', 'path-only', 'path-replace-suffix', - 'path-string?', 'path<?', 'path?', 'pathlist-closure', 'peek-byte', - 'peek-byte-or-special', 'peek-bytes', 'peek-bytes!', 'peek-bytes!-evt', - 'peek-bytes-avail!', 'peek-bytes-avail!*', 'peek-bytes-avail!-evt', - 'peek-bytes-avail!/enable-break', 'peek-bytes-evt', 'peek-char', - 'peek-char-or-special', 'peek-string', 'peek-string!', - 'peek-string!-evt', 'peek-string-evt', 'peeking-input-port', - 'permutations', 'phantom-bytes?', 'pi', 'pi.f', 'pipe-content-length', - 'place-break', 'place-channel', 'place-channel-get', - 'place-channel-put', 'place-channel-put/get', 'place-channel?', - 'place-dead-evt', 'place-enabled?', 'place-kill', 'place-location?', - 'place-message-allowed?', 'place-sleep', 'place-wait', 'place?', - 'placeholder-get', 'placeholder-set!', 'placeholder?', - 'plumber-add-flush!', 'plumber-flush-all', - 'plumber-flush-handle-remove!', 'plumber-flush-handle?', 'plumber?', - 'poll-guard-evt', 'port->bytes', 'port->bytes-lines', 'port->lines', - 'port->list', 'port->string', 'port-closed-evt', 'port-closed?', - 'port-commit-peeked', 'port-count-lines!', 'port-count-lines-enabled', - 'port-counts-lines?', 'port-display-handler', 'port-file-identity', - 'port-file-unlock', 'port-next-location', 'port-number?', - 'port-print-handler', 'port-progress-evt', - 'port-provides-progress-evts?', 'port-read-handler', - 'port-try-file-lock?', 'port-write-handler', 'port-writes-atomic?', - 'port-writes-special?', 'port?', 'positive?', 'predicate/c', - 'prefab-key->struct-type', 'prefab-key?', 'prefab-struct-key', - 'preferences-lock-file-mode', 'pregexp', 'pregexp?', 'pretty-display', - 'pretty-format', 'pretty-print', 'pretty-print-.-symbol-without-bars', - 'pretty-print-abbreviate-read-macros', 'pretty-print-columns', - 'pretty-print-current-style-table', 'pretty-print-depth', - 'pretty-print-exact-as-decimal', 'pretty-print-extend-style-table', - 'pretty-print-handler', 'pretty-print-newline', - 'pretty-print-post-print-hook', 'pretty-print-pre-print-hook', - 'pretty-print-print-hook', 'pretty-print-print-line', - 'pretty-print-remap-stylable', 'pretty-print-show-inexactness', - 'pretty-print-size-hook', 'pretty-print-style-table?', - 'pretty-printing', 'pretty-write', 'primitive-closure?', - 'primitive-result-arity', 'primitive?', 'print', 'print-as-expression', - 'print-boolean-long-form', 'print-box', 'print-graph', - 'print-hash-table', 'print-mpair-curly-braces', - 'print-pair-curly-braces', 'print-reader-abbreviations', - 'print-struct', 'print-syntax-width', 'print-unreadable', - 'print-vector-length', 'printable/c', 'printable<%>', 'printf', - 'println', 'procedure->method', 'procedure-arity', - 'procedure-arity-includes/c', 'procedure-arity-includes?', - 'procedure-arity?', 'procedure-closure-contents-eq?', - 'procedure-extract-target', 'procedure-keywords', - 'procedure-reduce-arity', 'procedure-reduce-keyword-arity', - 'procedure-rename', 'procedure-result-arity', 'procedure-specialize', - 'procedure-struct-type?', 'procedure?', 'process', 'process*', - 'process*/ports', 'process/ports', 'processor-count', 'progress-evt?', - 'promise-forced?', 'promise-running?', 'promise/c', 'promise/name?', - 'promise?', 'prop:arity-string', 'prop:arrow-contract', - 'prop:arrow-contract-get-info', 'prop:arrow-contract?', 'prop:blame', - 'prop:chaperone-contract', 'prop:checked-procedure', 'prop:contract', - 'prop:contracted', 'prop:custom-print-quotable', 'prop:custom-write', - 'prop:dict', 'prop:dict/contract', 'prop:equal+hash', 'prop:evt', - 'prop:exn:missing-module', 'prop:exn:srclocs', - 'prop:expansion-contexts', 'prop:flat-contract', - 'prop:impersonator-of', 'prop:input-port', - 'prop:liberal-define-context', 'prop:object-name', - 'prop:opt-chaperone-contract', 'prop:opt-chaperone-contract-get-test', - 'prop:opt-chaperone-contract?', 'prop:orc-contract', - 'prop:orc-contract-get-subcontracts', 'prop:orc-contract?', - 'prop:output-port', 'prop:place-location', 'prop:procedure', - 'prop:recursive-contract', 'prop:recursive-contract-unroll', - 'prop:recursive-contract?', 'prop:rename-transformer', 'prop:sequence', - 'prop:set!-transformer', 'prop:stream', 'proper-subset?', - 'pseudo-random-generator->vector', 'pseudo-random-generator-vector?', - 'pseudo-random-generator?', 'put-preferences', 'putenv', 'quotient', - 'quotient/remainder', 'radians->degrees', 'raise', - 'raise-argument-error', 'raise-arguments-error', 'raise-arity-error', - 'raise-blame-error', 'raise-contract-error', 'raise-mismatch-error', - 'raise-not-cons-blame-error', 'raise-range-error', - 'raise-result-error', 'raise-syntax-error', 'raise-type-error', - 'raise-user-error', 'random', 'random-seed', 'range', 'rational?', - 'rationalize', 'read', 'read-accept-bar-quote', 'read-accept-box', - 'read-accept-compiled', 'read-accept-dot', 'read-accept-graph', - 'read-accept-infix-dot', 'read-accept-lang', 'read-accept-quasiquote', - 'read-accept-reader', 'read-byte', 'read-byte-or-special', - 'read-bytes', 'read-bytes!', 'read-bytes!-evt', 'read-bytes-avail!', - 'read-bytes-avail!*', 'read-bytes-avail!-evt', - 'read-bytes-avail!/enable-break', 'read-bytes-evt', 'read-bytes-line', - 'read-bytes-line-evt', 'read-case-sensitive', 'read-cdot', 'read-char', - 'read-char-or-special', 'read-curly-brace-as-paren', - 'read-curly-brace-with-tag', 'read-decimal-as-inexact', - 'read-eval-print-loop', 'read-language', 'read-line', 'read-line-evt', - 'read-on-demand-source', 'read-square-bracket-as-paren', - 'read-square-bracket-with-tag', 'read-string', 'read-string!', - 'read-string!-evt', 'read-string-evt', 'read-syntax', - 'read-syntax/recursive', 'read/recursive', 'readtable-mapping', - 'readtable?', 'real->decimal-string', 'real->double-flonum', - 'real->floating-point-bytes', 'real->single-flonum', 'real-in', - 'real-part', 'real?', 'reencode-input-port', 'reencode-output-port', - 'regexp', 'regexp-match', 'regexp-match*', 'regexp-match-evt', - 'regexp-match-exact?', 'regexp-match-peek', - 'regexp-match-peek-immediate', 'regexp-match-peek-positions', - 'regexp-match-peek-positions*', - 'regexp-match-peek-positions-immediate', - 'regexp-match-peek-positions-immediate/end', - 'regexp-match-peek-positions/end', 'regexp-match-positions', - 'regexp-match-positions*', 'regexp-match-positions/end', - 'regexp-match/end', 'regexp-match?', 'regexp-max-lookbehind', - 'regexp-quote', 'regexp-replace', 'regexp-replace*', - 'regexp-replace-quote', 'regexp-replaces', 'regexp-split', - 'regexp-try-match', 'regexp?', 'relative-path?', 'relocate-input-port', - 'relocate-output-port', 'remainder', 'remf', 'remf*', 'remove', - 'remove*', 'remove-duplicates', 'remq', 'remq*', 'remv', 'remv*', - 'rename-contract', 'rename-file-or-directory', - 'rename-transformer-target', 'rename-transformer?', 'replace-evt', - 'reroot-path', 'resolve-path', 'resolved-module-path-name', - 'resolved-module-path?', 'rest', 'reverse', 'round', 'second', - 'seconds->date', 'security-guard?', 'semaphore-peek-evt', - 'semaphore-peek-evt?', 'semaphore-post', 'semaphore-try-wait?', - 'semaphore-wait', 'semaphore-wait/enable-break', 'semaphore?', - 'sequence->list', 'sequence->stream', 'sequence-add-between', - 'sequence-andmap', 'sequence-append', 'sequence-count', - 'sequence-filter', 'sequence-fold', 'sequence-for-each', - 'sequence-generate', 'sequence-generate*', 'sequence-length', - 'sequence-map', 'sequence-ormap', 'sequence-ref', 'sequence-tail', - 'sequence/c', 'sequence?', 'set', 'set!-transformer-procedure', - 'set!-transformer?', 'set->list', 'set->stream', 'set-add', 'set-add!', - 'set-box!', 'set-clear', 'set-clear!', 'set-copy', 'set-copy-clear', - 'set-count', 'set-empty?', 'set-eq?', 'set-equal?', 'set-eqv?', - 'set-first', 'set-for-each', 'set-implements/c', 'set-implements?', - 'set-intersect', 'set-intersect!', 'set-map', 'set-mcar!', 'set-mcdr!', - 'set-member?', 'set-mutable?', 'set-phantom-bytes!', - 'set-port-next-location!', 'set-remove', 'set-remove!', 'set-rest', - 'set-some-basic-contracts!', 'set-subtract', 'set-subtract!', - 'set-symmetric-difference', 'set-symmetric-difference!', 'set-union', - 'set-union!', 'set-weak?', 'set/c', 'set=?', 'set?', 'seteq', 'seteqv', - 'seventh', 'sgn', 'shared-bytes', 'shell-execute', 'shrink-path-wrt', - 'shuffle', 'simple-form-path', 'simplify-path', 'sin', - 'single-flonum?', 'sinh', 'sixth', 'skip-projection-wrapper?', 'sleep', - 'some-system-path->string', 'sort', 'special-comment-value', - 'special-comment?', 'special-filter-input-port', 'split-at', - 'split-at-right', 'split-common-prefix', 'split-path', 'splitf-at', - 'splitf-at-right', 'sqr', 'sqrt', 'srcloc', 'srcloc->string', - 'srcloc-column', 'srcloc-line', 'srcloc-position', 'srcloc-source', - 'srcloc-span', 'srcloc?', 'stop-after', 'stop-before', 'stream->list', - 'stream-add-between', 'stream-andmap', 'stream-append', 'stream-count', - 'stream-empty?', 'stream-filter', 'stream-first', 'stream-fold', - 'stream-for-each', 'stream-length', 'stream-map', 'stream-ormap', - 'stream-ref', 'stream-rest', 'stream-tail', 'stream/c', 'stream?', - 'string', 'string->bytes/latin-1', 'string->bytes/locale', - 'string->bytes/utf-8', 'string->immutable-string', 'string->keyword', - 'string->list', 'string->number', 'string->path', - 'string->path-element', 'string->some-system-path', 'string->symbol', - 'string->uninterned-symbol', 'string->unreadable-symbol', - 'string-append', 'string-append*', 'string-ci<=?', 'string-ci<?', - 'string-ci=?', 'string-ci>=?', 'string-ci>?', 'string-contains?', - 'string-copy', 'string-copy!', 'string-downcase', - 'string-environment-variable-name?', 'string-fill!', 'string-foldcase', - 'string-join', 'string-len/c', 'string-length', 'string-locale-ci<?', - 'string-locale-ci=?', 'string-locale-ci>?', 'string-locale-downcase', - 'string-locale-upcase', 'string-locale<?', 'string-locale=?', - 'string-locale>?', 'string-no-nuls?', 'string-normalize-nfc', - 'string-normalize-nfd', 'string-normalize-nfkc', - 'string-normalize-nfkd', 'string-normalize-spaces', 'string-port?', - 'string-prefix?', 'string-ref', 'string-replace', 'string-set!', - 'string-split', 'string-suffix?', 'string-titlecase', 'string-trim', - 'string-upcase', 'string-utf-8-length', 'string<=?', 'string<?', - 'string=?', 'string>=?', 'string>?', 'string?', 'struct->vector', - 'struct-accessor-procedure?', 'struct-constructor-procedure?', - 'struct-info', 'struct-mutator-procedure?', - 'struct-predicate-procedure?', 'struct-type-info', - 'struct-type-make-constructor', 'struct-type-make-predicate', - 'struct-type-property-accessor-procedure?', 'struct-type-property/c', - 'struct-type-property?', 'struct-type?', 'struct:arity-at-least', - 'struct:arrow-contract-info', 'struct:date', 'struct:date*', - 'struct:exn', 'struct:exn:break', 'struct:exn:break:hang-up', - 'struct:exn:break:terminate', 'struct:exn:fail', - 'struct:exn:fail:contract', 'struct:exn:fail:contract:arity', - 'struct:exn:fail:contract:blame', - 'struct:exn:fail:contract:continuation', - 'struct:exn:fail:contract:divide-by-zero', - 'struct:exn:fail:contract:non-fixnum-result', - 'struct:exn:fail:contract:variable', 'struct:exn:fail:filesystem', - 'struct:exn:fail:filesystem:errno', - 'struct:exn:fail:filesystem:exists', - 'struct:exn:fail:filesystem:missing-module', - 'struct:exn:fail:filesystem:version', 'struct:exn:fail:network', - 'struct:exn:fail:network:errno', 'struct:exn:fail:object', - 'struct:exn:fail:out-of-memory', 'struct:exn:fail:read', - 'struct:exn:fail:read:eof', 'struct:exn:fail:read:non-char', - 'struct:exn:fail:syntax', 'struct:exn:fail:syntax:missing-module', - 'struct:exn:fail:syntax:unbound', 'struct:exn:fail:unsupported', - 'struct:exn:fail:user', 'struct:srcloc', - 'struct:wrapped-extra-arg-arrow', 'struct?', 'sub1', 'subbytes', - 'subclass?', 'subclass?/c', 'subprocess', 'subprocess-group-enabled', - 'subprocess-kill', 'subprocess-pid', 'subprocess-status', - 'subprocess-wait', 'subprocess?', 'subset?', 'substring', 'suggest/c', - 'symbol->string', 'symbol-interned?', 'symbol-unreadable?', 'symbol<?', - 'symbol=?', 'symbol?', 'symbols', 'sync', 'sync/enable-break', - 'sync/timeout', 'sync/timeout/enable-break', 'syntax->datum', - 'syntax->list', 'syntax-arm', 'syntax-column', 'syntax-debug-info', - 'syntax-disarm', 'syntax-e', 'syntax-line', - 'syntax-local-bind-syntaxes', 'syntax-local-certifier', - 'syntax-local-context', 'syntax-local-expand-expression', - 'syntax-local-get-shadower', 'syntax-local-identifier-as-binding', - 'syntax-local-introduce', 'syntax-local-lift-context', - 'syntax-local-lift-expression', 'syntax-local-lift-module', - 'syntax-local-lift-module-end-declaration', - 'syntax-local-lift-provide', 'syntax-local-lift-require', - 'syntax-local-lift-values-expression', - 'syntax-local-make-definition-context', - 'syntax-local-make-delta-introducer', - 'syntax-local-module-defined-identifiers', - 'syntax-local-module-exports', - 'syntax-local-module-required-identifiers', 'syntax-local-name', - 'syntax-local-phase-level', 'syntax-local-submodules', - 'syntax-local-transforming-module-provides?', 'syntax-local-value', - 'syntax-local-value/immediate', 'syntax-original?', 'syntax-position', - 'syntax-property', 'syntax-property-preserved?', - 'syntax-property-symbol-keys', 'syntax-protect', 'syntax-rearm', - 'syntax-recertify', 'syntax-shift-phase-level', 'syntax-source', - 'syntax-source-module', 'syntax-span', 'syntax-taint', - 'syntax-tainted?', 'syntax-track-origin', - 'syntax-transforming-module-expression?', - 'syntax-transforming-with-lifts?', 'syntax-transforming?', 'syntax/c', - 'syntax?', 'system', 'system*', 'system*/exit-code', - 'system-big-endian?', 'system-idle-evt', 'system-language+country', - 'system-library-subpath', 'system-path-convention-type', 'system-type', - 'system/exit-code', 'tail-marks-match?', 'take', 'take-common-prefix', - 'take-right', 'takef', 'takef-right', 'tan', 'tanh', - 'tcp-abandon-port', 'tcp-accept', 'tcp-accept-evt', - 'tcp-accept-ready?', 'tcp-accept/enable-break', 'tcp-addresses', - 'tcp-close', 'tcp-connect', 'tcp-connect/enable-break', 'tcp-listen', - 'tcp-listener?', 'tcp-port?', 'tentative-pretty-print-port-cancel', - 'tentative-pretty-print-port-transfer', 'tenth', 'terminal-port?', - 'the-unsupplied-arg', 'third', 'thread', 'thread-cell-ref', - 'thread-cell-set!', 'thread-cell-values?', 'thread-cell?', - 'thread-dead-evt', 'thread-dead?', 'thread-group?', 'thread-receive', - 'thread-receive-evt', 'thread-resume', 'thread-resume-evt', - 'thread-rewind-receive', 'thread-running?', 'thread-send', - 'thread-suspend', 'thread-suspend-evt', 'thread-try-receive', - 'thread-wait', 'thread/suspend-to-kill', 'thread?', 'time-apply', - 'touch', 'transplant-input-port', 'transplant-output-port', 'true', - 'truncate', 'udp-addresses', 'udp-bind!', 'udp-bound?', 'udp-close', - 'udp-connect!', 'udp-connected?', 'udp-multicast-interface', - 'udp-multicast-join-group!', 'udp-multicast-leave-group!', - 'udp-multicast-loopback?', 'udp-multicast-set-interface!', - 'udp-multicast-set-loopback!', 'udp-multicast-set-ttl!', - 'udp-multicast-ttl', 'udp-open-socket', 'udp-receive!', - 'udp-receive!*', 'udp-receive!-evt', 'udp-receive!/enable-break', - 'udp-receive-ready-evt', 'udp-send', 'udp-send*', 'udp-send-evt', - 'udp-send-ready-evt', 'udp-send-to', 'udp-send-to*', 'udp-send-to-evt', - 'udp-send-to/enable-break', 'udp-send/enable-break', 'udp?', 'unbox', - 'uncaught-exception-handler', 'unit?', 'unspecified-dom', - 'unsupplied-arg?', 'use-collection-link-paths', - 'use-compiled-file-paths', 'use-user-specific-search-paths', - 'user-execute-bit', 'user-read-bit', 'user-write-bit', 'value-blame', - 'value-contract', 'values', 'variable-reference->empty-namespace', - 'variable-reference->module-base-phase', - 'variable-reference->module-declaration-inspector', - 'variable-reference->module-path-index', - 'variable-reference->module-source', 'variable-reference->namespace', - 'variable-reference->phase', - 'variable-reference->resolved-module-path', - 'variable-reference-constant?', 'variable-reference?', 'vector', - 'vector->immutable-vector', 'vector->list', - 'vector->pseudo-random-generator', 'vector->pseudo-random-generator!', - 'vector->values', 'vector-append', 'vector-argmax', 'vector-argmin', - 'vector-copy', 'vector-copy!', 'vector-count', 'vector-drop', - 'vector-drop-right', 'vector-fill!', 'vector-filter', - 'vector-filter-not', 'vector-immutable', 'vector-immutable/c', - 'vector-immutableof', 'vector-length', 'vector-map', 'vector-map!', - 'vector-member', 'vector-memq', 'vector-memv', 'vector-ref', - 'vector-set!', 'vector-set*!', 'vector-set-performance-stats!', - 'vector-split-at', 'vector-split-at-right', 'vector-take', - 'vector-take-right', 'vector/c', 'vector?', 'vectorof', 'version', - 'void', 'void?', 'weak-box-value', 'weak-box?', 'weak-set', - 'weak-seteq', 'weak-seteqv', 'will-execute', 'will-executor?', - 'will-register', 'will-try-execute', 'with-input-from-bytes', - 'with-input-from-file', 'with-input-from-string', - 'with-output-to-bytes', 'with-output-to-file', 'with-output-to-string', - 'would-be-future', 'wrap-evt', 'wrapped-extra-arg-arrow', - 'wrapped-extra-arg-arrow-extra-neg-party-argument', - 'wrapped-extra-arg-arrow-real-func', 'wrapped-extra-arg-arrow?', - 'writable<%>', 'write', 'write-byte', 'write-bytes', - 'write-bytes-avail', 'write-bytes-avail*', 'write-bytes-avail-evt', - 'write-bytes-avail/enable-break', 'write-char', 'write-special', - 'write-special-avail*', 'write-special-evt', 'write-string', - 'write-to-file', 'writeln', 'xor', 'zero?', '~.a', '~.s', '~.v', '~a', - '~e', '~r', '~s', '~v' + u'*', u'*list/c', u'+', u'-', u'/', u'<', u'</c', u'<=', u'<=/c', u'=', u'=/c', + u'>', u'>/c', u'>=', u'>=/c', u'abort-current-continuation', u'abs', + u'absolute-path?', u'acos', u'add-between', u'add1', u'alarm-evt', + u'always-evt', u'and/c', u'andmap', u'angle', u'any/c', u'append', u'append*', + u'append-map', u'apply', u'argmax', u'argmin', u'arithmetic-shift', + u'arity-at-least', u'arity-at-least-value', u'arity-at-least?', + u'arity-checking-wrapper', u'arity-includes?', u'arity=?', + u'arrow-contract-info', u'arrow-contract-info-accepts-arglist', + u'arrow-contract-info-chaperone-procedure', + u'arrow-contract-info-check-first-order', u'arrow-contract-info?', + u'asin', u'assf', u'assoc', u'assq', u'assv', u'atan', + u'bad-number-of-results', u'banner', u'base->-doms/c', u'base->-rngs/c', + u'base->?', u'between/c', u'bitwise-and', u'bitwise-bit-field', + u'bitwise-bit-set?', u'bitwise-ior', u'bitwise-not', u'bitwise-xor', + u'blame-add-car-context', u'blame-add-cdr-context', u'blame-add-context', + u'blame-add-missing-party', u'blame-add-nth-arg-context', + u'blame-add-range-context', u'blame-add-unknown-context', + u'blame-context', u'blame-contract', u'blame-fmt->-string', + u'blame-missing-party?', u'blame-negative', u'blame-original?', + u'blame-positive', u'blame-replace-negative', u'blame-source', + u'blame-swap', u'blame-swapped?', u'blame-update', u'blame-value', + u'blame?', u'boolean=?', u'boolean?', u'bound-identifier=?', u'box', + u'box-cas!', u'box-immutable', u'box-immutable/c', u'box/c', u'box?', + u'break-enabled', u'break-parameterization?', u'break-thread', + u'build-chaperone-contract-property', u'build-compound-type-name', + u'build-contract-property', u'build-flat-contract-property', + u'build-list', u'build-path', u'build-path/convention-type', + u'build-string', u'build-vector', u'byte-pregexp', u'byte-pregexp?', + u'byte-ready?', u'byte-regexp', u'byte-regexp?', u'byte?', u'bytes', + u'bytes->immutable-bytes', u'bytes->list', u'bytes->path', + u'bytes->path-element', u'bytes->string/latin-1', u'bytes->string/locale', + u'bytes->string/utf-8', u'bytes-append', u'bytes-append*', + u'bytes-close-converter', u'bytes-convert', u'bytes-convert-end', + u'bytes-converter?', u'bytes-copy', u'bytes-copy!', + u'bytes-environment-variable-name?', u'bytes-fill!', u'bytes-join', + u'bytes-length', u'bytes-no-nuls?', u'bytes-open-converter', u'bytes-ref', + u'bytes-set!', u'bytes-utf-8-index', u'bytes-utf-8-length', + u'bytes-utf-8-ref', u'bytes<?', u'bytes=?', u'bytes>?', u'bytes?', u'caaaar', + u'caaadr', u'caaar', u'caadar', u'caaddr', u'caadr', u'caar', u'cadaar', + u'cadadr', u'cadar', u'caddar', u'cadddr', u'caddr', u'cadr', + u'call-in-nested-thread', u'call-with-atomic-output-file', + u'call-with-break-parameterization', + u'call-with-composable-continuation', u'call-with-continuation-barrier', + u'call-with-continuation-prompt', u'call-with-current-continuation', + u'call-with-default-reading-parameterization', + u'call-with-escape-continuation', u'call-with-exception-handler', + u'call-with-file-lock/timeout', u'call-with-immediate-continuation-mark', + u'call-with-input-bytes', u'call-with-input-file', + u'call-with-input-file*', u'call-with-input-string', + u'call-with-output-bytes', u'call-with-output-file', + u'call-with-output-file*', u'call-with-output-string', + u'call-with-parameterization', u'call-with-semaphore', + u'call-with-semaphore/enable-break', u'call-with-values', u'call/cc', + u'call/ec', u'car', u'cartesian-product', u'cdaaar', u'cdaadr', u'cdaar', + u'cdadar', u'cdaddr', u'cdadr', u'cdar', u'cddaar', u'cddadr', u'cddar', + u'cdddar', u'cddddr', u'cdddr', u'cddr', u'cdr', u'ceiling', u'channel-get', + u'channel-put', u'channel-put-evt', u'channel-put-evt?', + u'channel-try-get', u'channel/c', u'channel?', u'chaperone-box', + u'chaperone-channel', u'chaperone-continuation-mark-key', + u'chaperone-contract-property?', u'chaperone-contract?', u'chaperone-evt', + u'chaperone-hash', u'chaperone-hash-set', u'chaperone-of?', + u'chaperone-procedure', u'chaperone-procedure*', u'chaperone-prompt-tag', + u'chaperone-struct', u'chaperone-struct-type', u'chaperone-vector', + u'chaperone?', u'char->integer', u'char-alphabetic?', u'char-blank?', + u'char-ci<=?', u'char-ci<?', u'char-ci=?', u'char-ci>=?', u'char-ci>?', + u'char-downcase', u'char-foldcase', u'char-general-category', + u'char-graphic?', u'char-in', u'char-in/c', u'char-iso-control?', + u'char-lower-case?', u'char-numeric?', u'char-punctuation?', + u'char-ready?', u'char-symbolic?', u'char-title-case?', u'char-titlecase', + u'char-upcase', u'char-upper-case?', u'char-utf-8-length', + u'char-whitespace?', u'char<=?', u'char<?', u'char=?', u'char>=?', u'char>?', + u'char?', u'check-duplicate-identifier', u'check-duplicates', + u'checked-procedure-check-and-extract', u'choice-evt', + u'class->interface', u'class-info', u'class-seal', u'class-unseal', + u'class?', u'cleanse-path', u'close-input-port', u'close-output-port', + u'coerce-chaperone-contract', u'coerce-chaperone-contracts', + u'coerce-contract', u'coerce-contract/f', u'coerce-contracts', + u'coerce-flat-contract', u'coerce-flat-contracts', u'collect-garbage', + u'collection-file-path', u'collection-path', u'combinations', u'compile', + u'compile-allow-set!-undefined', u'compile-context-preservation-enabled', + u'compile-enforce-module-constants', u'compile-syntax', + u'compiled-expression-recompile', u'compiled-expression?', + u'compiled-module-expression?', u'complete-path?', u'complex?', u'compose', + u'compose1', u'conjoin', u'conjugate', u'cons', u'cons/c', u'cons?', u'const', + u'continuation-mark-key/c', u'continuation-mark-key?', + u'continuation-mark-set->context', u'continuation-mark-set->list', + u'continuation-mark-set->list*', u'continuation-mark-set-first', + u'continuation-mark-set?', u'continuation-marks', + u'continuation-prompt-available?', u'continuation-prompt-tag?', + u'continuation?', u'contract-continuation-mark-key', + u'contract-custom-write-property-proc', u'contract-exercise', + u'contract-first-order', u'contract-first-order-passes?', + u'contract-late-neg-projection', u'contract-name', u'contract-proc', + u'contract-projection', u'contract-property?', + u'contract-random-generate', u'contract-random-generate-fail', + u'contract-random-generate-fail?', + u'contract-random-generate-get-current-environment', + u'contract-random-generate-stash', u'contract-random-generate/choose', + u'contract-stronger?', u'contract-struct-exercise', + u'contract-struct-generate', u'contract-struct-late-neg-projection', + u'contract-struct-list-contract?', u'contract-val-first-projection', + u'contract?', u'convert-stream', u'copy-directory/files', u'copy-file', + u'copy-port', u'cos', u'cosh', u'count', u'current-blame-format', + u'current-break-parameterization', u'current-code-inspector', + u'current-command-line-arguments', u'current-compile', + u'current-compiled-file-roots', u'current-continuation-marks', + u'current-contract-region', u'current-custodian', u'current-directory', + u'current-directory-for-user', u'current-drive', + u'current-environment-variables', u'current-error-port', u'current-eval', + u'current-evt-pseudo-random-generator', + u'current-force-delete-permissions', u'current-future', + u'current-gc-milliseconds', u'current-get-interaction-input-port', + u'current-inexact-milliseconds', u'current-input-port', + u'current-inspector', u'current-library-collection-links', + u'current-library-collection-paths', u'current-load', + u'current-load-extension', u'current-load-relative-directory', + u'current-load/use-compiled', u'current-locale', u'current-logger', + u'current-memory-use', u'current-milliseconds', + u'current-module-declare-name', u'current-module-declare-source', + u'current-module-name-resolver', u'current-module-path-for-load', + u'current-namespace', u'current-output-port', u'current-parameterization', + u'current-plumber', u'current-preserved-thread-cell-values', + u'current-print', u'current-process-milliseconds', u'current-prompt-read', + u'current-pseudo-random-generator', u'current-read-interaction', + u'current-reader-guard', u'current-readtable', u'current-seconds', + u'current-security-guard', u'current-subprocess-custodian-mode', + u'current-thread', u'current-thread-group', + u'current-thread-initial-stack-size', + u'current-write-relative-directory', u'curry', u'curryr', + u'custodian-box-value', u'custodian-box?', u'custodian-limit-memory', + u'custodian-managed-list', u'custodian-memory-accounting-available?', + u'custodian-require-memory', u'custodian-shutdown-all', u'custodian?', + u'custom-print-quotable-accessor', u'custom-print-quotable?', + u'custom-write-accessor', u'custom-write-property-proc', u'custom-write?', + u'date', u'date*', u'date*-nanosecond', u'date*-time-zone-name', u'date*?', + u'date-day', u'date-dst?', u'date-hour', u'date-minute', u'date-month', + u'date-second', u'date-time-zone-offset', u'date-week-day', u'date-year', + u'date-year-day', u'date?', u'datum->syntax', u'datum-intern-literal', + u'default-continuation-prompt-tag', u'degrees->radians', + u'delete-directory', u'delete-directory/files', u'delete-file', + u'denominator', u'dict->list', u'dict-can-functional-set?', + u'dict-can-remove-keys?', u'dict-clear', u'dict-clear!', u'dict-copy', + u'dict-count', u'dict-empty?', u'dict-for-each', u'dict-has-key?', + u'dict-implements/c', u'dict-implements?', u'dict-iter-contract', + u'dict-iterate-first', u'dict-iterate-key', u'dict-iterate-next', + u'dict-iterate-value', u'dict-key-contract', u'dict-keys', u'dict-map', + u'dict-mutable?', u'dict-ref', u'dict-ref!', u'dict-remove', + u'dict-remove!', u'dict-set', u'dict-set!', u'dict-set*', u'dict-set*!', + u'dict-update', u'dict-update!', u'dict-value-contract', u'dict-values', + u'dict?', u'directory-exists?', u'directory-list', u'disjoin', u'display', + u'display-lines', u'display-lines-to-file', u'display-to-file', + u'displayln', u'double-flonum?', u'drop', u'drop-common-prefix', + u'drop-right', u'dropf', u'dropf-right', u'dump-memory-stats', + u'dup-input-port', u'dup-output-port', u'dynamic->*', u'dynamic-get-field', + u'dynamic-object/c', u'dynamic-place', u'dynamic-place*', + u'dynamic-require', u'dynamic-require-for-syntax', u'dynamic-send', + u'dynamic-set-field!', u'dynamic-wind', u'eighth', u'empty', + u'empty-sequence', u'empty-stream', u'empty?', + u'environment-variables-copy', u'environment-variables-names', + u'environment-variables-ref', u'environment-variables-set!', + u'environment-variables?', u'eof', u'eof-evt', u'eof-object?', + u'ephemeron-value', u'ephemeron?', u'eprintf', u'eq-contract-val', + u'eq-contract?', u'eq-hash-code', u'eq?', u'equal-contract-val', + u'equal-contract?', u'equal-hash-code', u'equal-secondary-hash-code', + u'equal<%>', u'equal?', u'equal?/recur', u'eqv-hash-code', u'eqv?', u'error', + u'error-display-handler', u'error-escape-handler', + u'error-print-context-length', u'error-print-source-location', + u'error-print-width', u'error-value->string-handler', u'eval', + u'eval-jit-enabled', u'eval-syntax', u'even?', u'evt/c', u'evt?', + u'exact->inexact', u'exact-ceiling', u'exact-floor', u'exact-integer?', + u'exact-nonnegative-integer?', u'exact-positive-integer?', u'exact-round', + u'exact-truncate', u'exact?', u'executable-yield-handler', u'exit', + u'exit-handler', u'exn', u'exn-continuation-marks', u'exn-message', + u'exn:break', u'exn:break-continuation', u'exn:break:hang-up', + u'exn:break:hang-up?', u'exn:break:terminate', u'exn:break:terminate?', + u'exn:break?', u'exn:fail', u'exn:fail:contract', + u'exn:fail:contract:arity', u'exn:fail:contract:arity?', + u'exn:fail:contract:blame', u'exn:fail:contract:blame-object', + u'exn:fail:contract:blame?', u'exn:fail:contract:continuation', + u'exn:fail:contract:continuation?', u'exn:fail:contract:divide-by-zero', + u'exn:fail:contract:divide-by-zero?', + u'exn:fail:contract:non-fixnum-result', + u'exn:fail:contract:non-fixnum-result?', u'exn:fail:contract:variable', + u'exn:fail:contract:variable-id', u'exn:fail:contract:variable?', + u'exn:fail:contract?', u'exn:fail:filesystem', + u'exn:fail:filesystem:errno', u'exn:fail:filesystem:errno-errno', + u'exn:fail:filesystem:errno?', u'exn:fail:filesystem:exists', + u'exn:fail:filesystem:exists?', u'exn:fail:filesystem:missing-module', + u'exn:fail:filesystem:missing-module-path', + u'exn:fail:filesystem:missing-module?', u'exn:fail:filesystem:version', + u'exn:fail:filesystem:version?', u'exn:fail:filesystem?', + u'exn:fail:network', u'exn:fail:network:errno', + u'exn:fail:network:errno-errno', u'exn:fail:network:errno?', + u'exn:fail:network?', u'exn:fail:object', u'exn:fail:object?', + u'exn:fail:out-of-memory', u'exn:fail:out-of-memory?', u'exn:fail:read', + u'exn:fail:read-srclocs', u'exn:fail:read:eof', u'exn:fail:read:eof?', + u'exn:fail:read:non-char', u'exn:fail:read:non-char?', u'exn:fail:read?', + u'exn:fail:syntax', u'exn:fail:syntax-exprs', + u'exn:fail:syntax:missing-module', + u'exn:fail:syntax:missing-module-path', + u'exn:fail:syntax:missing-module?', u'exn:fail:syntax:unbound', + u'exn:fail:syntax:unbound?', u'exn:fail:syntax?', u'exn:fail:unsupported', + u'exn:fail:unsupported?', u'exn:fail:user', u'exn:fail:user?', + u'exn:fail?', u'exn:misc:match?', u'exn:missing-module-accessor', + u'exn:missing-module?', u'exn:srclocs-accessor', u'exn:srclocs?', u'exn?', + u'exp', u'expand', u'expand-once', u'expand-syntax', u'expand-syntax-once', + u'expand-syntax-to-top-form', u'expand-to-top-form', u'expand-user-path', + u'explode-path', u'expt', u'externalizable<%>', u'failure-result/c', + u'false?', u'field-names', u'fifth', u'file->bytes', u'file->bytes-lines', + u'file->lines', u'file->list', u'file->string', u'file->value', + u'file-exists?', u'file-name-from-path', u'file-or-directory-identity', + u'file-or-directory-modify-seconds', u'file-or-directory-permissions', + u'file-position', u'file-position*', u'file-size', + u'file-stream-buffer-mode', u'file-stream-port?', u'file-truncate', + u'filename-extension', u'filesystem-change-evt', + u'filesystem-change-evt-cancel', u'filesystem-change-evt?', + u'filesystem-root-list', u'filter', u'filter-map', u'filter-not', + u'filter-read-input-port', u'find-executable-path', u'find-files', + u'find-library-collection-links', u'find-library-collection-paths', + u'find-relative-path', u'find-system-path', u'findf', u'first', + u'first-or/c', u'fixnum?', u'flat-contract', u'flat-contract-predicate', + u'flat-contract-property?', u'flat-contract?', u'flat-named-contract', + u'flatten', u'floating-point-bytes->real', u'flonum?', u'floor', + u'flush-output', u'fold-files', u'foldl', u'foldr', u'for-each', u'force', + u'format', u'fourth', u'fprintf', u'free-identifier=?', + u'free-label-identifier=?', u'free-template-identifier=?', + u'free-transformer-identifier=?', u'fsemaphore-count', u'fsemaphore-post', + u'fsemaphore-try-wait?', u'fsemaphore-wait', u'fsemaphore?', u'future', + u'future?', u'futures-enabled?', u'gcd', u'generate-member-key', + u'generate-temporaries', u'generic-set?', u'generic?', u'gensym', + u'get-output-bytes', u'get-output-string', u'get-preference', + u'get/build-late-neg-projection', u'get/build-val-first-projection', + u'getenv', u'global-port-print-handler', u'group-by', u'group-execute-bit', + u'group-read-bit', u'group-write-bit', u'guard-evt', u'handle-evt', + u'handle-evt?', u'has-blame?', u'has-contract?', u'hash', u'hash->list', + u'hash-clear', u'hash-clear!', u'hash-copy', u'hash-copy-clear', + u'hash-count', u'hash-empty?', u'hash-eq?', u'hash-equal?', u'hash-eqv?', + u'hash-for-each', u'hash-has-key?', u'hash-iterate-first', + u'hash-iterate-key', u'hash-iterate-key+value', u'hash-iterate-next', + u'hash-iterate-pair', u'hash-iterate-value', u'hash-keys', u'hash-map', + u'hash-placeholder?', u'hash-ref', u'hash-ref!', u'hash-remove', + u'hash-remove!', u'hash-set', u'hash-set!', u'hash-set*', u'hash-set*!', + u'hash-update', u'hash-update!', u'hash-values', u'hash-weak?', u'hash/c', + u'hash?', u'hasheq', u'hasheqv', u'identifier-binding', + u'identifier-binding-symbol', u'identifier-label-binding', + u'identifier-prune-lexical-context', + u'identifier-prune-to-source-module', + u'identifier-remove-from-definition-context', + u'identifier-template-binding', u'identifier-transformer-binding', + u'identifier?', u'identity', u'if/c', u'imag-part', u'immutable?', + u'impersonate-box', u'impersonate-channel', + u'impersonate-continuation-mark-key', u'impersonate-hash', + u'impersonate-hash-set', u'impersonate-procedure', + u'impersonate-procedure*', u'impersonate-prompt-tag', + u'impersonate-struct', u'impersonate-vector', u'impersonator-contract?', + u'impersonator-ephemeron', u'impersonator-of?', + u'impersonator-prop:application-mark', u'impersonator-prop:blame', + u'impersonator-prop:contracted', + u'impersonator-property-accessor-procedure?', u'impersonator-property?', + u'impersonator?', u'implementation?', u'implementation?/c', u'in-bytes', + u'in-bytes-lines', u'in-combinations', u'in-cycle', u'in-dict', + u'in-dict-keys', u'in-dict-pairs', u'in-dict-values', u'in-directory', + u'in-hash', u'in-hash-keys', u'in-hash-pairs', u'in-hash-values', + u'in-immutable-hash', u'in-immutable-hash-keys', + u'in-immutable-hash-pairs', u'in-immutable-hash-values', + u'in-immutable-set', u'in-indexed', u'in-input-port-bytes', + u'in-input-port-chars', u'in-lines', u'in-list', u'in-mlist', + u'in-mutable-hash', u'in-mutable-hash-keys', u'in-mutable-hash-pairs', + u'in-mutable-hash-values', u'in-mutable-set', u'in-naturals', + u'in-parallel', u'in-permutations', u'in-port', u'in-producer', u'in-range', + u'in-sequences', u'in-set', u'in-slice', u'in-stream', u'in-string', + u'in-syntax', u'in-value', u'in-values*-sequence', u'in-values-sequence', + u'in-vector', u'in-weak-hash', u'in-weak-hash-keys', u'in-weak-hash-pairs', + u'in-weak-hash-values', u'in-weak-set', u'inexact->exact', + u'inexact-real?', u'inexact?', u'infinite?', u'input-port-append', + u'input-port?', u'inspector?', u'instanceof/c', u'integer->char', + u'integer->integer-bytes', u'integer-bytes->integer', u'integer-in', + u'integer-length', u'integer-sqrt', u'integer-sqrt/remainder', u'integer?', + u'interface->method-names', u'interface-extension?', u'interface?', + u'internal-definition-context-binding-identifiers', + u'internal-definition-context-introduce', + u'internal-definition-context-seal', u'internal-definition-context?', + u'is-a?', u'is-a?/c', u'keyword->string', u'keyword-apply', u'keyword<?', + u'keyword?', u'keywords-match', u'kill-thread', u'last', u'last-pair', + u'lcm', u'length', u'liberal-define-context?', u'link-exists?', u'list', + u'list*', u'list*of', u'list->bytes', u'list->mutable-set', + u'list->mutable-seteq', u'list->mutable-seteqv', u'list->set', + u'list->seteq', u'list->seteqv', u'list->string', u'list->vector', + u'list->weak-set', u'list->weak-seteq', u'list->weak-seteqv', + u'list-contract?', u'list-prefix?', u'list-ref', u'list-set', u'list-tail', + u'list-update', u'list/c', u'list?', u'listen-port-number?', u'listof', + u'load', u'load-extension', u'load-on-demand-enabled', u'load-relative', + u'load-relative-extension', u'load/cd', u'load/use-compiled', + u'local-expand', u'local-expand/capture-lifts', + u'local-transformer-expand', u'local-transformer-expand/capture-lifts', + u'locale-string-encoding', u'log', u'log-all-levels', u'log-level-evt', + u'log-level?', u'log-max-level', u'log-message', u'log-receiver?', + u'logger-name', u'logger?', u'magnitude', u'make-arity-at-least', + u'make-base-empty-namespace', u'make-base-namespace', u'make-bytes', + u'make-channel', u'make-chaperone-contract', + u'make-continuation-mark-key', u'make-continuation-prompt-tag', + u'make-contract', u'make-custodian', u'make-custodian-box', + u'make-custom-hash', u'make-custom-hash-types', u'make-custom-set', + u'make-custom-set-types', u'make-date', u'make-date*', + u'make-derived-parameter', u'make-directory', u'make-directory*', + u'make-do-sequence', u'make-empty-namespace', + u'make-environment-variables', u'make-ephemeron', u'make-exn', + u'make-exn:break', u'make-exn:break:hang-up', u'make-exn:break:terminate', + u'make-exn:fail', u'make-exn:fail:contract', + u'make-exn:fail:contract:arity', u'make-exn:fail:contract:blame', + u'make-exn:fail:contract:continuation', + u'make-exn:fail:contract:divide-by-zero', + u'make-exn:fail:contract:non-fixnum-result', + u'make-exn:fail:contract:variable', u'make-exn:fail:filesystem', + u'make-exn:fail:filesystem:errno', u'make-exn:fail:filesystem:exists', + u'make-exn:fail:filesystem:missing-module', + u'make-exn:fail:filesystem:version', u'make-exn:fail:network', + u'make-exn:fail:network:errno', u'make-exn:fail:object', + u'make-exn:fail:out-of-memory', u'make-exn:fail:read', + u'make-exn:fail:read:eof', u'make-exn:fail:read:non-char', + u'make-exn:fail:syntax', u'make-exn:fail:syntax:missing-module', + u'make-exn:fail:syntax:unbound', u'make-exn:fail:unsupported', + u'make-exn:fail:user', u'make-file-or-directory-link', + u'make-flat-contract', u'make-fsemaphore', u'make-generic', + u'make-handle-get-preference-locked', u'make-hash', + u'make-hash-placeholder', u'make-hasheq', u'make-hasheq-placeholder', + u'make-hasheqv', u'make-hasheqv-placeholder', + u'make-immutable-custom-hash', u'make-immutable-hash', + u'make-immutable-hasheq', u'make-immutable-hasheqv', + u'make-impersonator-property', u'make-input-port', + u'make-input-port/read-to-peek', u'make-inspector', + u'make-keyword-procedure', u'make-known-char-range-list', + u'make-limited-input-port', u'make-list', u'make-lock-file-name', + u'make-log-receiver', u'make-logger', u'make-mixin-contract', + u'make-mutable-custom-set', u'make-none/c', u'make-object', + u'make-output-port', u'make-parameter', u'make-parent-directory*', + u'make-phantom-bytes', u'make-pipe', u'make-pipe-with-specials', + u'make-placeholder', u'make-plumber', u'make-polar', u'make-prefab-struct', + u'make-primitive-class', u'make-proj-contract', + u'make-pseudo-random-generator', u'make-reader-graph', u'make-readtable', + u'make-rectangular', u'make-rename-transformer', + u'make-resolved-module-path', u'make-security-guard', u'make-semaphore', + u'make-set!-transformer', u'make-shared-bytes', u'make-sibling-inspector', + u'make-special-comment', u'make-srcloc', u'make-string', + u'make-struct-field-accessor', u'make-struct-field-mutator', + u'make-struct-type', u'make-struct-type-property', + u'make-syntax-delta-introducer', u'make-syntax-introducer', + u'make-temporary-file', u'make-tentative-pretty-print-output-port', + u'make-thread-cell', u'make-thread-group', u'make-vector', + u'make-weak-box', u'make-weak-custom-hash', u'make-weak-custom-set', + u'make-weak-hash', u'make-weak-hasheq', u'make-weak-hasheqv', + u'make-will-executor', u'map', u'match-equality-test', + u'matches-arity-exactly?', u'max', u'mcar', u'mcdr', u'mcons', u'member', + u'member-name-key-hash-code', u'member-name-key=?', u'member-name-key?', + u'memf', u'memq', u'memv', u'merge-input', u'method-in-interface?', u'min', + u'mixin-contract', u'module->exports', u'module->imports', + u'module->language-info', u'module->namespace', + u'module-compiled-cross-phase-persistent?', u'module-compiled-exports', + u'module-compiled-imports', u'module-compiled-language-info', + u'module-compiled-name', u'module-compiled-submodules', + u'module-declared?', u'module-path-index-join', + u'module-path-index-resolve', u'module-path-index-split', + u'module-path-index-submodule', u'module-path-index?', u'module-path?', + u'module-predefined?', u'module-provide-protected?', u'modulo', u'mpair?', + u'mutable-set', u'mutable-seteq', u'mutable-seteqv', u'n->th', + u'nack-guard-evt', u'namespace-anchor->empty-namespace', + u'namespace-anchor->namespace', u'namespace-anchor?', + u'namespace-attach-module', u'namespace-attach-module-declaration', + u'namespace-base-phase', u'namespace-mapped-symbols', + u'namespace-module-identifier', u'namespace-module-registry', + u'namespace-require', u'namespace-require/constant', + u'namespace-require/copy', u'namespace-require/expansion-time', + u'namespace-set-variable-value!', u'namespace-symbol->identifier', + u'namespace-syntax-introduce', u'namespace-undefine-variable!', + u'namespace-unprotect-module', u'namespace-variable-value', u'namespace?', + u'nan?', u'natural-number/c', u'negate', u'negative?', u'never-evt', + u'new-∀/c', u'new-∃/c', u'newline', u'ninth', u'non-empty-listof', + u'non-empty-string?', u'none/c', u'normal-case-path', u'normalize-arity', + u'normalize-path', u'normalized-arity?', u'not', u'not/c', u'null', u'null?', + u'number->string', u'number?', u'numerator', u'object%', u'object->vector', + u'object-info', u'object-interface', u'object-method-arity-includes?', + u'object-name', u'object-or-false=?', u'object=?', u'object?', u'odd?', + u'one-of/c', u'open-input-bytes', u'open-input-file', + u'open-input-output-file', u'open-input-string', u'open-output-bytes', + u'open-output-file', u'open-output-nowhere', u'open-output-string', + u'or/c', u'order-of-magnitude', u'ormap', u'other-execute-bit', + u'other-read-bit', u'other-write-bit', u'output-port?', u'pair?', + u'parameter-procedure=?', u'parameter/c', u'parameter?', + u'parameterization?', u'parse-command-line', u'partition', u'path->bytes', + u'path->complete-path', u'path->directory-path', u'path->string', + u'path-add-suffix', u'path-convention-type', u'path-element->bytes', + u'path-element->string', u'path-element?', u'path-for-some-system?', + u'path-list-string->path-list', u'path-only', u'path-replace-suffix', + u'path-string?', u'path<?', u'path?', u'pathlist-closure', u'peek-byte', + u'peek-byte-or-special', u'peek-bytes', u'peek-bytes!', u'peek-bytes!-evt', + u'peek-bytes-avail!', u'peek-bytes-avail!*', u'peek-bytes-avail!-evt', + u'peek-bytes-avail!/enable-break', u'peek-bytes-evt', u'peek-char', + u'peek-char-or-special', u'peek-string', u'peek-string!', + u'peek-string!-evt', u'peek-string-evt', u'peeking-input-port', + u'permutations', u'phantom-bytes?', u'pi', u'pi.f', u'pipe-content-length', + u'place-break', u'place-channel', u'place-channel-get', + u'place-channel-put', u'place-channel-put/get', u'place-channel?', + u'place-dead-evt', u'place-enabled?', u'place-kill', u'place-location?', + u'place-message-allowed?', u'place-sleep', u'place-wait', u'place?', + u'placeholder-get', u'placeholder-set!', u'placeholder?', + u'plumber-add-flush!', u'plumber-flush-all', + u'plumber-flush-handle-remove!', u'plumber-flush-handle?', u'plumber?', + u'poll-guard-evt', u'port->bytes', u'port->bytes-lines', u'port->lines', + u'port->list', u'port->string', u'port-closed-evt', u'port-closed?', + u'port-commit-peeked', u'port-count-lines!', u'port-count-lines-enabled', + u'port-counts-lines?', u'port-display-handler', u'port-file-identity', + u'port-file-unlock', u'port-next-location', u'port-number?', + u'port-print-handler', u'port-progress-evt', + u'port-provides-progress-evts?', u'port-read-handler', + u'port-try-file-lock?', u'port-write-handler', u'port-writes-atomic?', + u'port-writes-special?', u'port?', u'positive?', u'predicate/c', + u'prefab-key->struct-type', u'prefab-key?', u'prefab-struct-key', + u'preferences-lock-file-mode', u'pregexp', u'pregexp?', u'pretty-display', + u'pretty-format', u'pretty-print', u'pretty-print-.-symbol-without-bars', + u'pretty-print-abbreviate-read-macros', u'pretty-print-columns', + u'pretty-print-current-style-table', u'pretty-print-depth', + u'pretty-print-exact-as-decimal', u'pretty-print-extend-style-table', + u'pretty-print-handler', u'pretty-print-newline', + u'pretty-print-post-print-hook', u'pretty-print-pre-print-hook', + u'pretty-print-print-hook', u'pretty-print-print-line', + u'pretty-print-remap-stylable', u'pretty-print-show-inexactness', + u'pretty-print-size-hook', u'pretty-print-style-table?', + u'pretty-printing', u'pretty-write', u'primitive-closure?', + u'primitive-result-arity', u'primitive?', u'print', u'print-as-expression', + u'print-boolean-long-form', u'print-box', u'print-graph', + u'print-hash-table', u'print-mpair-curly-braces', + u'print-pair-curly-braces', u'print-reader-abbreviations', + u'print-struct', u'print-syntax-width', u'print-unreadable', + u'print-vector-length', u'printable/c', u'printable<%>', u'printf', + u'println', u'procedure->method', u'procedure-arity', + u'procedure-arity-includes/c', u'procedure-arity-includes?', + u'procedure-arity?', u'procedure-closure-contents-eq?', + u'procedure-extract-target', u'procedure-keywords', + u'procedure-reduce-arity', u'procedure-reduce-keyword-arity', + u'procedure-rename', u'procedure-result-arity', u'procedure-specialize', + u'procedure-struct-type?', u'procedure?', u'process', u'process*', + u'process*/ports', u'process/ports', u'processor-count', u'progress-evt?', + u'promise-forced?', u'promise-running?', u'promise/c', u'promise/name?', + u'promise?', u'prop:arity-string', u'prop:arrow-contract', + u'prop:arrow-contract-get-info', u'prop:arrow-contract?', u'prop:blame', + u'prop:chaperone-contract', u'prop:checked-procedure', u'prop:contract', + u'prop:contracted', u'prop:custom-print-quotable', u'prop:custom-write', + u'prop:dict', u'prop:dict/contract', u'prop:equal+hash', u'prop:evt', + u'prop:exn:missing-module', u'prop:exn:srclocs', + u'prop:expansion-contexts', u'prop:flat-contract', + u'prop:impersonator-of', u'prop:input-port', + u'prop:liberal-define-context', u'prop:object-name', + u'prop:opt-chaperone-contract', u'prop:opt-chaperone-contract-get-test', + u'prop:opt-chaperone-contract?', u'prop:orc-contract', + u'prop:orc-contract-get-subcontracts', u'prop:orc-contract?', + u'prop:output-port', u'prop:place-location', u'prop:procedure', + u'prop:recursive-contract', u'prop:recursive-contract-unroll', + u'prop:recursive-contract?', u'prop:rename-transformer', u'prop:sequence', + u'prop:set!-transformer', u'prop:stream', u'proper-subset?', + u'pseudo-random-generator->vector', u'pseudo-random-generator-vector?', + u'pseudo-random-generator?', u'put-preferences', u'putenv', u'quotient', + u'quotient/remainder', u'radians->degrees', u'raise', + u'raise-argument-error', u'raise-arguments-error', u'raise-arity-error', + u'raise-blame-error', u'raise-contract-error', u'raise-mismatch-error', + u'raise-not-cons-blame-error', u'raise-range-error', + u'raise-result-error', u'raise-syntax-error', u'raise-type-error', + u'raise-user-error', u'random', u'random-seed', u'range', u'rational?', + u'rationalize', u'read', u'read-accept-bar-quote', u'read-accept-box', + u'read-accept-compiled', u'read-accept-dot', u'read-accept-graph', + u'read-accept-infix-dot', u'read-accept-lang', u'read-accept-quasiquote', + u'read-accept-reader', u'read-byte', u'read-byte-or-special', + u'read-bytes', u'read-bytes!', u'read-bytes!-evt', u'read-bytes-avail!', + u'read-bytes-avail!*', u'read-bytes-avail!-evt', + u'read-bytes-avail!/enable-break', u'read-bytes-evt', u'read-bytes-line', + u'read-bytes-line-evt', u'read-case-sensitive', u'read-cdot', u'read-char', + u'read-char-or-special', u'read-curly-brace-as-paren', + u'read-curly-brace-with-tag', u'read-decimal-as-inexact', + u'read-eval-print-loop', u'read-language', u'read-line', u'read-line-evt', + u'read-on-demand-source', u'read-square-bracket-as-paren', + u'read-square-bracket-with-tag', u'read-string', u'read-string!', + u'read-string!-evt', u'read-string-evt', u'read-syntax', + u'read-syntax/recursive', u'read/recursive', u'readtable-mapping', + u'readtable?', u'real->decimal-string', u'real->double-flonum', + u'real->floating-point-bytes', u'real->single-flonum', u'real-in', + u'real-part', u'real?', u'reencode-input-port', u'reencode-output-port', + u'regexp', u'regexp-match', u'regexp-match*', u'regexp-match-evt', + u'regexp-match-exact?', u'regexp-match-peek', + u'regexp-match-peek-immediate', u'regexp-match-peek-positions', + u'regexp-match-peek-positions*', + u'regexp-match-peek-positions-immediate', + u'regexp-match-peek-positions-immediate/end', + u'regexp-match-peek-positions/end', u'regexp-match-positions', + u'regexp-match-positions*', u'regexp-match-positions/end', + u'regexp-match/end', u'regexp-match?', u'regexp-max-lookbehind', + u'regexp-quote', u'regexp-replace', u'regexp-replace*', + u'regexp-replace-quote', u'regexp-replaces', u'regexp-split', + u'regexp-try-match', u'regexp?', u'relative-path?', u'relocate-input-port', + u'relocate-output-port', u'remainder', u'remf', u'remf*', u'remove', + u'remove*', u'remove-duplicates', u'remq', u'remq*', u'remv', u'remv*', + u'rename-contract', u'rename-file-or-directory', + u'rename-transformer-target', u'rename-transformer?', u'replace-evt', + u'reroot-path', u'resolve-path', u'resolved-module-path-name', + u'resolved-module-path?', u'rest', u'reverse', u'round', u'second', + u'seconds->date', u'security-guard?', u'semaphore-peek-evt', + u'semaphore-peek-evt?', u'semaphore-post', u'semaphore-try-wait?', + u'semaphore-wait', u'semaphore-wait/enable-break', u'semaphore?', + u'sequence->list', u'sequence->stream', u'sequence-add-between', + u'sequence-andmap', u'sequence-append', u'sequence-count', + u'sequence-filter', u'sequence-fold', u'sequence-for-each', + u'sequence-generate', u'sequence-generate*', u'sequence-length', + u'sequence-map', u'sequence-ormap', u'sequence-ref', u'sequence-tail', + u'sequence/c', u'sequence?', u'set', u'set!-transformer-procedure', + u'set!-transformer?', u'set->list', u'set->stream', u'set-add', u'set-add!', + u'set-box!', u'set-clear', u'set-clear!', u'set-copy', u'set-copy-clear', + u'set-count', u'set-empty?', u'set-eq?', u'set-equal?', u'set-eqv?', + u'set-first', u'set-for-each', u'set-implements/c', u'set-implements?', + u'set-intersect', u'set-intersect!', u'set-map', u'set-mcar!', u'set-mcdr!', + u'set-member?', u'set-mutable?', u'set-phantom-bytes!', + u'set-port-next-location!', u'set-remove', u'set-remove!', u'set-rest', + u'set-some-basic-contracts!', u'set-subtract', u'set-subtract!', + u'set-symmetric-difference', u'set-symmetric-difference!', u'set-union', + u'set-union!', u'set-weak?', u'set/c', u'set=?', u'set?', u'seteq', u'seteqv', + u'seventh', u'sgn', u'shared-bytes', u'shell-execute', u'shrink-path-wrt', + u'shuffle', u'simple-form-path', u'simplify-path', u'sin', + u'single-flonum?', u'sinh', u'sixth', u'skip-projection-wrapper?', u'sleep', + u'some-system-path->string', u'sort', u'special-comment-value', + u'special-comment?', u'special-filter-input-port', u'split-at', + u'split-at-right', u'split-common-prefix', u'split-path', u'splitf-at', + u'splitf-at-right', u'sqr', u'sqrt', u'srcloc', u'srcloc->string', + u'srcloc-column', u'srcloc-line', u'srcloc-position', u'srcloc-source', + u'srcloc-span', u'srcloc?', u'stop-after', u'stop-before', u'stream->list', + u'stream-add-between', u'stream-andmap', u'stream-append', u'stream-count', + u'stream-empty?', u'stream-filter', u'stream-first', u'stream-fold', + u'stream-for-each', u'stream-length', u'stream-map', u'stream-ormap', + u'stream-ref', u'stream-rest', u'stream-tail', u'stream/c', u'stream?', + u'string', u'string->bytes/latin-1', u'string->bytes/locale', + u'string->bytes/utf-8', u'string->immutable-string', u'string->keyword', + u'string->list', u'string->number', u'string->path', + u'string->path-element', u'string->some-system-path', u'string->symbol', + u'string->uninterned-symbol', u'string->unreadable-symbol', + u'string-append', u'string-append*', u'string-ci<=?', u'string-ci<?', + u'string-ci=?', u'string-ci>=?', u'string-ci>?', u'string-contains?', + u'string-copy', u'string-copy!', u'string-downcase', + u'string-environment-variable-name?', u'string-fill!', u'string-foldcase', + u'string-join', u'string-len/c', u'string-length', u'string-locale-ci<?', + u'string-locale-ci=?', u'string-locale-ci>?', u'string-locale-downcase', + u'string-locale-upcase', u'string-locale<?', u'string-locale=?', + u'string-locale>?', u'string-no-nuls?', u'string-normalize-nfc', + u'string-normalize-nfd', u'string-normalize-nfkc', + u'string-normalize-nfkd', u'string-normalize-spaces', u'string-port?', + u'string-prefix?', u'string-ref', u'string-replace', u'string-set!', + u'string-split', u'string-suffix?', u'string-titlecase', u'string-trim', + u'string-upcase', u'string-utf-8-length', u'string<=?', u'string<?', + u'string=?', u'string>=?', u'string>?', u'string?', u'struct->vector', + u'struct-accessor-procedure?', u'struct-constructor-procedure?', + u'struct-info', u'struct-mutator-procedure?', + u'struct-predicate-procedure?', u'struct-type-info', + u'struct-type-make-constructor', u'struct-type-make-predicate', + u'struct-type-property-accessor-procedure?', u'struct-type-property/c', + u'struct-type-property?', u'struct-type?', u'struct:arity-at-least', + u'struct:arrow-contract-info', u'struct:date', u'struct:date*', + u'struct:exn', u'struct:exn:break', u'struct:exn:break:hang-up', + u'struct:exn:break:terminate', u'struct:exn:fail', + u'struct:exn:fail:contract', u'struct:exn:fail:contract:arity', + u'struct:exn:fail:contract:blame', + u'struct:exn:fail:contract:continuation', + u'struct:exn:fail:contract:divide-by-zero', + u'struct:exn:fail:contract:non-fixnum-result', + u'struct:exn:fail:contract:variable', u'struct:exn:fail:filesystem', + u'struct:exn:fail:filesystem:errno', + u'struct:exn:fail:filesystem:exists', + u'struct:exn:fail:filesystem:missing-module', + u'struct:exn:fail:filesystem:version', u'struct:exn:fail:network', + u'struct:exn:fail:network:errno', u'struct:exn:fail:object', + u'struct:exn:fail:out-of-memory', u'struct:exn:fail:read', + u'struct:exn:fail:read:eof', u'struct:exn:fail:read:non-char', + u'struct:exn:fail:syntax', u'struct:exn:fail:syntax:missing-module', + u'struct:exn:fail:syntax:unbound', u'struct:exn:fail:unsupported', + u'struct:exn:fail:user', u'struct:srcloc', + u'struct:wrapped-extra-arg-arrow', u'struct?', u'sub1', u'subbytes', + u'subclass?', u'subclass?/c', u'subprocess', u'subprocess-group-enabled', + u'subprocess-kill', u'subprocess-pid', u'subprocess-status', + u'subprocess-wait', u'subprocess?', u'subset?', u'substring', u'suggest/c', + u'symbol->string', u'symbol-interned?', u'symbol-unreadable?', u'symbol<?', + u'symbol=?', u'symbol?', u'symbols', u'sync', u'sync/enable-break', + u'sync/timeout', u'sync/timeout/enable-break', u'syntax->datum', + u'syntax->list', u'syntax-arm', u'syntax-column', u'syntax-debug-info', + u'syntax-disarm', u'syntax-e', u'syntax-line', + u'syntax-local-bind-syntaxes', u'syntax-local-certifier', + u'syntax-local-context', u'syntax-local-expand-expression', + u'syntax-local-get-shadower', u'syntax-local-identifier-as-binding', + u'syntax-local-introduce', u'syntax-local-lift-context', + u'syntax-local-lift-expression', u'syntax-local-lift-module', + u'syntax-local-lift-module-end-declaration', + u'syntax-local-lift-provide', u'syntax-local-lift-require', + u'syntax-local-lift-values-expression', + u'syntax-local-make-definition-context', + u'syntax-local-make-delta-introducer', + u'syntax-local-module-defined-identifiers', + u'syntax-local-module-exports', + u'syntax-local-module-required-identifiers', u'syntax-local-name', + u'syntax-local-phase-level', u'syntax-local-submodules', + u'syntax-local-transforming-module-provides?', u'syntax-local-value', + u'syntax-local-value/immediate', u'syntax-original?', u'syntax-position', + u'syntax-property', u'syntax-property-preserved?', + u'syntax-property-symbol-keys', u'syntax-protect', u'syntax-rearm', + u'syntax-recertify', u'syntax-shift-phase-level', u'syntax-source', + u'syntax-source-module', u'syntax-span', u'syntax-taint', + u'syntax-tainted?', u'syntax-track-origin', + u'syntax-transforming-module-expression?', + u'syntax-transforming-with-lifts?', u'syntax-transforming?', u'syntax/c', + u'syntax?', u'system', u'system*', u'system*/exit-code', + u'system-big-endian?', u'system-idle-evt', u'system-language+country', + u'system-library-subpath', u'system-path-convention-type', u'system-type', + u'system/exit-code', u'tail-marks-match?', u'take', u'take-common-prefix', + u'take-right', u'takef', u'takef-right', u'tan', u'tanh', + u'tcp-abandon-port', u'tcp-accept', u'tcp-accept-evt', + u'tcp-accept-ready?', u'tcp-accept/enable-break', u'tcp-addresses', + u'tcp-close', u'tcp-connect', u'tcp-connect/enable-break', u'tcp-listen', + u'tcp-listener?', u'tcp-port?', u'tentative-pretty-print-port-cancel', + u'tentative-pretty-print-port-transfer', u'tenth', u'terminal-port?', + u'the-unsupplied-arg', u'third', u'thread', u'thread-cell-ref', + u'thread-cell-set!', u'thread-cell-values?', u'thread-cell?', + u'thread-dead-evt', u'thread-dead?', u'thread-group?', u'thread-receive', + u'thread-receive-evt', u'thread-resume', u'thread-resume-evt', + u'thread-rewind-receive', u'thread-running?', u'thread-send', + u'thread-suspend', u'thread-suspend-evt', u'thread-try-receive', + u'thread-wait', u'thread/suspend-to-kill', u'thread?', u'time-apply', + u'touch', u'transplant-input-port', u'transplant-output-port', u'true', + u'truncate', u'udp-addresses', u'udp-bind!', u'udp-bound?', u'udp-close', + u'udp-connect!', u'udp-connected?', u'udp-multicast-interface', + u'udp-multicast-join-group!', u'udp-multicast-leave-group!', + u'udp-multicast-loopback?', u'udp-multicast-set-interface!', + u'udp-multicast-set-loopback!', u'udp-multicast-set-ttl!', + u'udp-multicast-ttl', u'udp-open-socket', u'udp-receive!', + u'udp-receive!*', u'udp-receive!-evt', u'udp-receive!/enable-break', + u'udp-receive-ready-evt', u'udp-send', u'udp-send*', u'udp-send-evt', + u'udp-send-ready-evt', u'udp-send-to', u'udp-send-to*', u'udp-send-to-evt', + u'udp-send-to/enable-break', u'udp-send/enable-break', u'udp?', u'unbox', + u'uncaught-exception-handler', u'unit?', u'unspecified-dom', + u'unsupplied-arg?', u'use-collection-link-paths', + u'use-compiled-file-paths', u'use-user-specific-search-paths', + u'user-execute-bit', u'user-read-bit', u'user-write-bit', u'value-blame', + u'value-contract', u'values', u'variable-reference->empty-namespace', + u'variable-reference->module-base-phase', + u'variable-reference->module-declaration-inspector', + u'variable-reference->module-path-index', + u'variable-reference->module-source', u'variable-reference->namespace', + u'variable-reference->phase', + u'variable-reference->resolved-module-path', + u'variable-reference-constant?', u'variable-reference?', u'vector', + u'vector->immutable-vector', u'vector->list', + u'vector->pseudo-random-generator', u'vector->pseudo-random-generator!', + u'vector->values', u'vector-append', u'vector-argmax', u'vector-argmin', + u'vector-copy', u'vector-copy!', u'vector-count', u'vector-drop', + u'vector-drop-right', u'vector-fill!', u'vector-filter', + u'vector-filter-not', u'vector-immutable', u'vector-immutable/c', + u'vector-immutableof', u'vector-length', u'vector-map', u'vector-map!', + u'vector-member', u'vector-memq', u'vector-memv', u'vector-ref', + u'vector-set!', u'vector-set*!', u'vector-set-performance-stats!', + u'vector-split-at', u'vector-split-at-right', u'vector-take', + u'vector-take-right', u'vector/c', u'vector?', u'vectorof', u'version', + u'void', u'void?', u'weak-box-value', u'weak-box?', u'weak-set', + u'weak-seteq', u'weak-seteqv', u'will-execute', u'will-executor?', + u'will-register', u'will-try-execute', u'with-input-from-bytes', + u'with-input-from-file', u'with-input-from-string', + u'with-output-to-bytes', u'with-output-to-file', u'with-output-to-string', + u'would-be-future', u'wrap-evt', u'wrapped-extra-arg-arrow', + u'wrapped-extra-arg-arrow-extra-neg-party-argument', + u'wrapped-extra-arg-arrow-real-func', u'wrapped-extra-arg-arrow?', + u'writable<%>', u'write', u'write-byte', u'write-bytes', + u'write-bytes-avail', u'write-bytes-avail*', u'write-bytes-avail-evt', + u'write-bytes-avail/enable-break', u'write-char', u'write-special', + u'write-special-avail*', u'write-special-evt', u'write-string', + u'write-to-file', u'writeln', u'xor', u'zero?', u'~.a', u'~.s', u'~.v', u'~a', + u'~e', u'~r', u'~s', u'~v' ) _opening_parenthesis = r'[([{]' @@ -1407,7 +1407,7 @@ class NewLispLexer(RegexLexer): name = 'NewLisp' aliases = ['newlisp'] - filenames = ['*.lsp', '*.nl'] + filenames = ['*.lsp', '*.nl', '*.kif'] mimetypes = ['text/x-newlisp', 'application/x-newlisp'] flags = re.IGNORECASE | re.MULTILINE | re.UNICODE @@ -2496,7 +2496,7 @@ class XtlangLexer(RegexLexer): 'write-char', 'zero?', ) xtlang_functions = ( - 'printf', 'toString', 'afill!', 'pfill!', 'tfill!', 'tbind', 'vfill!', + 'toString', 'afill!', 'pfill!', 'tfill!', 'tbind', 'vfill!', 'array-fill!', 'pointer-fill!', 'tuple-fill!', 'vector-fill!', 'free', 'array', 'tuple', 'list', '~', 'cset!', 'cref', '&', 'bor', 'ang-names', '<<', '>>', 'nil', 'printf', 'sprintf', 'null', 'now', diff --git a/pygments/lexers/make.py b/pygments/lexers/make.py index 7ad616dd..9b6273d7 100644 --- a/pygments/lexers/make.py +++ b/pygments/lexers/make.py @@ -90,7 +90,7 @@ class BaseMakefileLexer(RegexLexer): bygroups(Keyword, Text), 'export'), (r'export\s+', Keyword), # assignment - (r'([\w${}.-]+)(\s*)([!?:+]?=)([ \t]*)((?:.*\\\n)+|.*\n)', + (r'([\w${}().-]+)(\s*)([!?:+]?=)([ \t]*)((?:.*\\\n)+|.*\n)', bygroups(Name.Variable, Text, Operator, Text, using(BashLexer))), # strings (r'(?s)"(\\\\|\\.|[^"\\])*"', String.Double), diff --git a/pygments/lexers/markup.py b/pygments/lexers/markup.py index aac8d27e..bb4ae6c5 100644 --- a/pygments/lexers/markup.py +++ b/pygments/lexers/markup.py @@ -24,7 +24,7 @@ from pygments.util import get_bool_opt, ClassNotFound __all__ = ['BBCodeLexer', 'MoinWikiLexer', 'RstLexer', 'TexLexer', 'GroffLexer', 'MozPreprocHashLexer', 'MozPreprocPercentLexer', 'MozPreprocXulLexer', 'MozPreprocJavascriptLexer', - 'MozPreprocCssLexer'] + 'MozPreprocCssLexer', 'MarkdownLexer'] class BBCodeLexer(RegexLexer): @@ -500,3 +500,96 @@ class MozPreprocCssLexer(DelegatingLexer): super(MozPreprocCssLexer, self).__init__( CssLexer, MozPreprocPercentLexer, **options) + +class MarkdownLexer(RegexLexer): + """ + For `Markdown <https://help.github.com/categories/writing-on-github/>`_ markup. + + .. versionadded:: 2.2 + """ + name = 'markdown' + aliases = ['md'] + filenames = ['*.md'] + mimetypes = ["text/x-markdown"] + flags = re.MULTILINE + + def _handle_codeblock(self, match): + """ + match args: 1:backticks, 2:lang_name, 3:newline, 4:code, 5:backticks + """ + from pygments.lexers import get_lexer_by_name + + # section header + yield match.start(1), String , match.group(1) + yield match.start(2), String , match.group(2) + yield match.start(3), Text , match.group(3) + + # lookup lexer if wanted and existing + lexer = None + if self.handlecodeblocks: + try: + lexer = get_lexer_by_name( match.group(2).strip() ) + except ClassNotFound: + pass + code = match.group(4) + + # no lexer for this language. handle it like it was a code block + if lexer is None: + yield match.start(4), String, code + return + + for item in do_insertions([], lexer.get_tokens_unprocessed(code)): + yield item + + yield match.start(5), String , match.group(5) + + tokens = { + 'root': [ + # heading with pound prefix + (r'^(#)([^#].+\n)', bygroups(Generic.Heading, Text)), + (r'^(#{2,6})(.+\n)', bygroups(Generic.Subheading, Text)), + # task list + (r'^(\s*)([*-] )(\[[ xX]\])( .+\n)', + bygroups(Text, Keyword, Keyword, using(this, state='inline'))), + # bulleted lists + (r'^(\s*)([*-])(\s)(.+\n)', + bygroups(Text, Keyword, Text, using(this, state='inline'))), + # numbered lists + (r'^(\s*)([0-9]+\.)( .+\n)', + bygroups(Text, Keyword, using(this, state='inline'))), + # quote + (r'^(\s*>\s)(.+\n)', bygroups(Keyword, Generic.Emph)), + # text block + (r'^(```\n)([\w\W]*?)(^```$)', bygroups(String, Text, String)), + # code block with language + (r'^(```)(\w+)(\n)([\w\W]*?)(^```$)', _handle_codeblock), + + include('inline'), + ], + 'inline': [ + # escape + (r'\\.', Text), + # italics + (r'(\s)([*_][^*_]+[*_])(\W|\n)', bygroups(Text, Generic.Emph, Text)), + # bold + # warning: the following rule eats internal tags. eg. **foo _bar_ baz** bar is not italics + (r'(\s)((\*\*|__).*\3)((?=\W|\n))', bygroups(Text, Generic.Strong, None, Text)), + # "proper way" (r'(\s)([*_]{2}[^*_]+[*_]{2})((?=\W|\n))', bygroups(Text, Generic.Strong, Text)), + # strikethrough + (r'(\s)(~~[^~]+~~)((?=\W|\n))', bygroups(Text, Generic.Deleted, Text)), + # inline code + (r'`[^`]+`', String.Backtick), + # mentions and topics (twitter and github stuff) + (r'[@#][\w/:]+', Name.Entity), + # (image?) links eg: ![Image of Yaktocat](https://octodex.github.com/images/yaktocat.png) + (r'(!?\[)([^]]+)(\])(\()([^)]+)(\))', bygroups(Text, Name.Tag, Text, Text, Name.Attribute, Text)), + + # general text, must come last! + (r'[^\\\s]+', Text), + (r'.', Text), + ], + } + + def __init__(self, **options): + self.handlecodeblocks = get_bool_opt(options, 'handlecodeblocks', True) + RegexLexer.__init__(self, **options) diff --git a/pygments/lexers/monte.py b/pygments/lexers/monte.py new file mode 100644 index 00000000..aa5c75f7 --- /dev/null +++ b/pygments/lexers/monte.py @@ -0,0 +1,203 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.monte + ~~~~~~~~~~~~~~~~~~~~~ + + Lexer for the Monte programming language. + + :copyright: Copyright 2016 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.token import Comment, Error, Keyword, Name, Number, Operator, \ + Punctuation, String, Whitespace +from pygments.lexer import RegexLexer, include, words + +__all__ = ['MonteLexer'] + + +# `var` handled separately +# `interface` handled separately +_declarations = ['bind', 'def', 'fn', 'object'] +_methods = ['method', 'to'] +_keywords = [ + 'as', 'break', 'catch', 'continue', 'else', 'escape', 'exit', 'exports', + 'extends', 'finally', 'for', 'guards', 'if', 'implements', 'import', + 'in', 'match', 'meta', 'pass', 'return', 'switch', 'try', 'via', 'when', + 'while', +] +_operators = [ + # Unary + '~', '!', + # Binary + '+', '-', '*', '/', '%', '**', '&', '|', '^', '<<', '>>', + # Binary augmented + '+=', '-=', '*=', '/=', '%=', '**=', '&=', '|=', '^=', '<<=', '>>=', + # Comparison + '==', '!=', '<', '<=', '>', '>=', '<=>', + # Patterns and assignment + ':=', '?', '=~', '!~', '=>', + # Calls and sends + '.', '<-', '->', +] +_escape_pattern = ( + r'(?:\\x[0-9a-fA-F]{2}|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|' + r'\\["\'\\bftnr])') +#_char = _escape_chars + [('.', String.Char)] +_identifier = '[_a-zA-Z][_0-9a-zA-Z]*' + +_constants = [ + # Void constants + 'null', + # Bool constants + 'false', 'true', + # Double constants + 'Infinity', 'NaN', + # Special objects + 'M', 'Ref', 'throw', 'traceln', +] + +_guards = [ + 'Any', 'Binding', 'Bool', 'Bytes', 'Char', 'DeepFrozen', 'Double', + 'Empty', 'Int', 'List', 'Map', 'Near', 'NullOk', 'Same', 'Selfless', + 'Set', 'Str', 'SubrangeGuard', 'Transparent', 'Void', +] + +_safeScope = [ + '_accumulateList', '_accumulateMap', '_auditedBy', '_bind', + '_booleanFlow', '_comparer', '_equalizer', '_iterForever', '_loop', + '_makeBytes', '_makeDouble', '_makeFinalSlot', '_makeInt', '_makeList', + '_makeMap', '_makeMessageDesc', '_makeOrderedSpace', '_makeParamDesc', + '_makeProtocolDesc', '_makeSourceSpan', '_makeString', '_makeVarSlot', + '_makeVerbFacet', '_mapExtract', '_matchSame', '_quasiMatcher', + '_slotToBinding', '_splitList', '_suchThat', '_switchFailed', + '_validateFor', 'b__quasiParser', 'eval', 'import', 'm__quasiParser', + 'makeBrandPair', 'makeLazySlot', 'safeScope', 'simple__quasiParser', +] + +class MonteLexer(RegexLexer): + """ + Lexer for the `Monte <https://monte.readthedocs.io/>`_ programming language. + + .. versionadded:: 2.2 + """ + name = 'Monte' + aliases = ['monte'] + filenames = ['*.mt'] + + tokens = { + 'root': [ + # Comments + (r'#[^\n]*\n', Comment), + + # Docstrings + # Apologies for the non-greedy matcher here. + (r'/\*\*.*?\*/', String.Doc), + + # `var` declarations + (r'\bvar\b', Keyword.Declaration, 'var'), + + # `interface` declarations + (r'\binterface\b', Keyword.Declaration, 'interface'), + + # method declarations + (words(_methods, prefix='\\b', suffix='\\b'), + Keyword, 'method'), + + # All other declarations + (words(_declarations, prefix='\\b', suffix='\\b'), + Keyword.Declaration), + + # Keywords + (words(_keywords, prefix='\\b', suffix='\\b'), Keyword), + + # Literals + ('[+-]?0x[_0-9a-fA-F]+', Number.Hex), + (r'[+-]?[_0-9]+\.[_0-9]*([eE][+-]?[_0-9]+)?', Number.Float), + ('[+-]?[_0-9]+', Number.Integer), + ("'", String.Double, 'char'), + ('"', String.Double, 'string'), + + # Quasiliterals + ('`', String.Backtick, 'ql'), + + # Operators + (words(_operators), Operator), + + # Verb operators + (_identifier + '=', Operator.Word), + + # Safe scope constants + (words(_constants, prefix='\\b', suffix='\\b'), + Keyword.Pseudo), + + # Safe scope guards + (words(_guards, prefix='\\b', suffix='\\b'), Keyword.Type), + + # All other safe scope names + (words(_safeScope, prefix='\\b', suffix='\\b'), + Name.Builtin), + + # Identifiers + (_identifier, Name), + + # Punctuation + (r'\(|\)|\{|\}|\[|\]|:|,', Punctuation), + + # Whitespace + (' +', Whitespace), + + # Definite lexer errors + ('=', Error), + ], + 'char': [ + # It is definitely an error to have a char of width == 0. + ("'", Error, 'root'), + (_escape_pattern, String.Escape, 'charEnd'), + ('.', String.Char, 'charEnd'), + ], + 'charEnd': [ + ("'", String.Char, '#pop:2'), + # It is definitely an error to have a char of width > 1. + ('.', Error), + ], + # The state of things coming into an interface. + 'interface': [ + (' +', Whitespace), + (_identifier, Name.Class, '#pop'), + include('root'), + ], + # The state of things coming into a method. + 'method': [ + (' +', Whitespace), + (_identifier, Name.Function, '#pop'), + include('root'), + ], + 'string': [ + ('"', String.Double, 'root'), + (_escape_pattern, String.Escape), + (r'\n', String.Double), + ('.', String.Double), + ], + 'ql': [ + ('`', String.Backtick, 'root'), + (r'\$' + _escape_pattern, String.Escape), + (r'\$\$', String.Escape), + (r'@@', String.Escape), + (r'\$\{', String.Interpol, 'qlNest'), + (r'@\{', String.Interpol, 'qlNest'), + (r'\$' + _identifier, Name), + ('@' + _identifier, Name), + ('.', String.Backtick), + ], + 'qlNest': [ + (r'\}', String.Interpol, '#pop'), + include('root'), + ], + # The state of things immediately following `var`. + 'var': [ + (' +', Whitespace), + (_identifier, Name.Variable, '#pop'), + include('root'), + ], + } diff --git a/pygments/lexers/ncl.py b/pygments/lexers/ncl.py index 23eba786..85f46f20 100644 --- a/pygments/lexers/ncl.py +++ b/pygments/lexers/ncl.py @@ -45,7 +45,7 @@ class NCLLexer(RegexLexer): 'begin', 'break', 'continue', 'create', 'defaultapp', 'do', 'else', 'end', 'external', 'exit', 'False', 'file', 'function', 'getvalues', 'graphic', 'group', 'if', 'list', 'load', 'local', - 'new', '_Missing', 'Missing', 'new', 'noparent', 'procedure', + 'new', '_Missing', 'Missing', 'noparent', 'procedure', 'quit', 'QUIT', 'Quit', 'record', 'return', 'setvalues', 'stop', 'then', 'while'), prefix=r'\b', suffix=r'\s*\b'), Keyword), diff --git a/pygments/lexers/other.py b/pygments/lexers/other.py index afd0fda5..dd45083c 100644 --- a/pygments/lexers/other.py +++ b/pygments/lexers/other.py @@ -36,5 +36,6 @@ from pygments.lexers.urbi import UrbiscriptLexer from pygments.lexers.smalltalk import SmalltalkLexer, NewspeakLexer from pygments.lexers.installers import NSISLexer, RPMSpecLexer from pygments.lexers.textedit import AwkLexer +from pygments.lexers.smv import NuSMVLexer __all__ = [] diff --git a/pygments/lexers/php.py b/pygments/lexers/php.py index 2421738f..1931325a 100644 --- a/pygments/lexers/php.py +++ b/pygments/lexers/php.py @@ -224,7 +224,7 @@ class PhpLexer(RegexLexer): String.Interpol)), (r'(\$\{)(\S+)(\})', bygroups(String.Interpol, Name.Variable, String.Interpol)), - (r'[${\\]+', String.Double) + (r'[${\\]', String.Double) ], } diff --git a/pygments/lexers/python.py b/pygments/lexers/python.py index 7601afa8..35635ed1 100644 --- a/pygments/lexers/python.py +++ b/pygments/lexers/python.py @@ -116,7 +116,7 @@ class PythonLexer(RegexLexer): 'unichr', 'unicode', 'vars', 'xrange', 'zip'), prefix=r'(?<!\.)', suffix=r'\b'), Name.Builtin), - (r'(?<!\.)(self|None|Ellipsis|NotImplemented|False|True' + (r'(?<!\.)(self|None|Ellipsis|NotImplemented|False|True|cls' r')\b', Name.Builtin.Pseudo), (words(( 'ArithmeticError', 'AssertionError', 'AttributeError', @@ -303,7 +303,7 @@ class Python3Lexer(RegexLexer): 'sum', 'super', 'tuple', 'type', 'vars', 'zip'), prefix=r'(?<!\.)', suffix=r'\b'), Name.Builtin), - (r'(?<!\.)(self|Ellipsis|NotImplemented)\b', Name.Builtin.Pseudo), + (r'(?<!\.)(self|Ellipsis|NotImplemented|cls)\b', Name.Builtin.Pseudo), (words(( 'ArithmeticError', 'AssertionError', 'AttributeError', 'BaseException', 'BufferError', 'BytesWarning', 'DeprecationWarning', diff --git a/pygments/lexers/rnc.py b/pygments/lexers/rnc.py new file mode 100644 index 00000000..f60141e8 --- /dev/null +++ b/pygments/lexers/rnc.py @@ -0,0 +1,67 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.rnc + ~~~~~~~~~~~~~~~~~~~ + + Lexer for Relax-NG Compact syntax + + :copyright: Copyright 2016 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Punctuation + +__all__ = ['RNCCompactLexer'] + + +class RNCCompactLexer(RegexLexer): + """ + For `RelaxNG-compact <http://relaxng.org>`_ syntax. + + .. versionadded:: 2.2 + """ + + name = 'Relax-NG Compact' + aliases = ['rnc', 'rng-compact'] + filenames = ['*.rnc'] + + tokens = { + 'root': [ + (r'namespace\b', Keyword.Namespace), + (r'(?:default|datatypes)\b', Keyword.Declaration), + (r'##.*$', Comment.Preproc), + (r'#.*$', Comment.Single), + (r'"[^"]*"', String.Double), + # TODO single quoted strings and escape sequences outside of + # double-quoted strings + (r'(?:element|attribute|mixed)\b', Keyword.Declaration, 'variable'), + (r'(text\b|xsd:[^ ]+)', Keyword.Type, 'maybe_xsdattributes'), + (r'[,?&*=|~]|>>', Operator), + (r'[(){}]', Punctuation), + (r'.', Text), + ], + + # a variable has been declared using `element` or `attribute` + 'variable': [ + (r'[^{]+', Name.Variable), + (r'\{', Punctuation, '#pop'), + ], + + # after an xsd:<datatype> declaration there may be attributes + 'maybe_xsdattributes': [ + (r'\{', Punctuation, 'xsdattributes'), + (r'\}', Punctuation, '#pop'), + (r'.', Text), + ], + + # attributes take the form { key1 = value1 key2 = value2 ... } + 'xsdattributes': [ + (r'[^ =}]', Name.Attribute), + (r'=', Operator), + (r'"[^"]*"', String.Double), + (r'\}', Punctuation, '#pop'), + (r'.', Text), + ], + } diff --git a/pygments/lexers/rust.py b/pygments/lexers/rust.py index 5d1162b8..d3d98ee8 100644 --- a/pygments/lexers/rust.py +++ b/pygments/lexers/rust.py @@ -18,7 +18,7 @@ __all__ = ['RustLexer'] class RustLexer(RegexLexer): """ - Lexer for the Rust programming language (version 1.0). + Lexer for the Rust programming language (version 1.10). .. versionadded:: 1.6 """ @@ -49,17 +49,21 @@ class RustLexer(RegexLexer): (r"""\$([a-zA-Z_]\w*|\(,?|\),?|,?)""", Comment.Preproc), # Keywords (words(( - 'as', 'box', 'crate', 'do', 'else', 'enum', 'extern', # break and continue are in labels - 'fn', 'for', 'if', 'impl', 'in', 'loop', 'match', 'mut', 'priv', - 'proc', 'pub', 'ref', 'return', 'static', 'struct', - 'trait', 'true', 'type', 'unsafe', 'while'), suffix=r'\b'), + 'as', 'box', 'const', 'crate', 'else', 'extern', + 'for', 'if', 'impl', 'in', 'loop', 'match', 'move', + 'mut', 'pub', 'ref', 'return', 'static', 'super', + 'trait', 'unsafe', 'use', 'where', 'while'), suffix=r'\b'), Keyword), - (words(('alignof', 'be', 'const', 'offsetof', 'pure', 'sizeof', - 'typeof', 'once', 'unsized', 'yield'), suffix=r'\b'), + (words(('abstract', 'alignof', 'become', 'do', 'final', 'macro', + 'offsetof', 'override', 'priv', 'proc', 'pure', 'sizeof', + 'typeof', 'unsized', 'virtual', 'yield'), suffix=r'\b'), Keyword.Reserved), - (r'(mod|use)\b', Keyword.Namespace), (r'(true|false)\b', Keyword.Constant), + (r'mod\b', Keyword, 'modname'), (r'let\b', Keyword.Declaration), + (r'fn\b', Keyword, 'funcname'), + (r'(struct|enum|type|union)\b', Keyword, 'typename'), + (r'(default)(\s+)(type|fn)\b', bygroups(Keyword, Text, Keyword)), (words(('u8', 'u16', 'u32', 'u64', 'i8', 'i16', 'i32', 'i64', 'usize', 'isize', 'f32', 'f64', 'str', 'bool'), suffix=r'\b'), Keyword.Type), @@ -88,11 +92,11 @@ class RustLexer(RegexLexer): 'Ok', 'Err', 'SliceConcatExt', 'String', 'ToString', - 'Vec', - ), suffix=r'\b'), + 'Vec'), suffix=r'\b'), Name.Builtin), # Labels - (r'(break|continue)(\s*)(\'[A-Za-z_]\w*)?', bygroups(Keyword, Text.Whitespace, Name.Label)), + (r'(break|continue)(\s*)(\'[A-Za-z_]\w*)?', + bygroups(Keyword, Text.Whitespace, Name.Label)), # Character Literal (r"""'(\\['"\\nrt]|\\x[0-7][0-9a-fA-F]|\\0""" r"""|\\u\{[0-9a-fA-F]{1,6}\}|.)'""", @@ -148,6 +152,21 @@ class RustLexer(RegexLexer): (r'\*/', String.Doc, '#pop'), (r'[*/]', String.Doc), ], + 'modname': [ + (r'\s+', Text), + (r'[a-zA-Z_]\w*', Name.Namespace, '#pop'), + default('#pop'), + ], + 'funcname': [ + (r'\s+', Text), + (r'[a-zA-Z_]\w*', Name.Function, '#pop'), + default('#pop'), + ], + 'typename': [ + (r'\s+', Text), + (r'[a-zA-Z_]\w*', Name.Class, '#pop'), + default('#pop'), + ], 'number_lit': [ (r'[ui](8|16|32|64|size)', Keyword, '#pop'), (r'f(32|64)', Keyword, '#pop'), diff --git a/pygments/lexers/sas.py b/pygments/lexers/sas.py new file mode 100644 index 00000000..c91ea319 --- /dev/null +++ b/pygments/lexers/sas.py @@ -0,0 +1,228 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.sas + ~~~~~~~~~~~~~~~~~~~ + + Lexer for SAS. + + :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re +from pygments.lexer import RegexLexer, include, words +from pygments.token import Comment, Keyword, Name, Number, String, Text, \ + Other, Generic + +__all__ = ['SASLexer'] + +class SASLexer(RegexLexer): + """ + For `SAS <http://www.sas.com/>`_ files. + + .. versionadded:: 2.2 + """ + # Syntax from syntax/sas.vim by James Kidd <james.kidd@covance.com> + + name = 'SAS' + aliases = ['sas'] + filenames = ['*.SAS', '*.sas'] + mimetypes = ['text/x-sas', 'text/sas', 'application/x-sas'] + flags = re.IGNORECASE | re.MULTILINE + + builtins_macros = ( + "bquote", "nrbquote", "cmpres", "qcmpres", "compstor", "datatyp", + "display", "do", "else", "end", "eval", "global", "goto", "if", + "index", "input", "keydef", "label", "left", "length", "let", + "local", "lowcase", "macro", "mend", "nrquote", + "nrstr", "put", "qleft", "qlowcase", "qscan", + "qsubstr", "qsysfunc", "qtrim", "quote", "qupcase", "scan", + "str", "substr", "superq", "syscall", "sysevalf", "sysexec", + "sysfunc", "sysget", "syslput", "sysprod", "sysrc", "sysrput", + "then", "to", "trim", "unquote", "until", "upcase", "verify", + "while", "window" + ) + + builtins_conditionals = ( + "do", "if", "then", "else", "end", "until", "while" + ) + + builtins_statements = ( + "abort", "array", "attrib", "by", "call", "cards", "cards4", + "catname", "continue", "datalines", "datalines4", "delete", "delim", + "delimiter", "display", "dm", "drop", "endsas", "error", "file", + "filename", "footnote", "format", "goto", "in", "infile", "informat", + "input", "keep", "label", "leave", "length", "libname", "link", + "list", "lostcard", "merge", "missing", "modify", "options", "output", + "out", "page", "put", "redirect", "remove", "rename", "replace", + "retain", "return", "select", "set", "skip", "startsas", "stop", + "title", "update", "waitsas", "where", "window", "x", "systask" + ) + + builtins_sql = ( + "add", "and", "alter", "as", "cascade", "check", "create", + "delete", "describe", "distinct", "drop", "foreign", "from", + "group", "having", "index", "insert", "into", "in", "key", "like", + "message", "modify", "msgtype", "not", "null", "on", "or", + "order", "primary", "references", "reset", "restrict", "select", + "set", "table", "unique", "update", "validate", "view", "where" + ) + + builtins_functions = ( + "abs", "addr", "airy", "arcos", "arsin", "atan", "attrc", + "attrn", "band", "betainv", "blshift", "bnot", "bor", + "brshift", "bxor", "byte", "cdf", "ceil", "cexist", "cinv", + "close", "cnonct", "collate", "compbl", "compound", + "compress", "cos", "cosh", "css", "curobs", "cv", "daccdb", + "daccdbsl", "daccsl", "daccsyd", "dacctab", "dairy", "date", + "datejul", "datepart", "datetime", "day", "dclose", "depdb", + "depdbsl", "depsl", "depsyd", + "deptab", "dequote", "dhms", "dif", "digamma", + "dim", "dinfo", "dnum", "dopen", "doptname", "doptnum", + "dread", "dropnote", "dsname", "erf", "erfc", "exist", "exp", + "fappend", "fclose", "fcol", "fdelete", "fetch", "fetchobs", + "fexist", "fget", "fileexist", "filename", "fileref", + "finfo", "finv", "fipname", "fipnamel", "fipstate", "floor", + "fnonct", "fnote", "fopen", "foptname", "foptnum", "fpoint", + "fpos", "fput", "fread", "frewind", "frlen", "fsep", "fuzz", + "fwrite", "gaminv", "gamma", "getoption", "getvarc", "getvarn", + "hbound", "hms", "hosthelp", "hour", "ibessel", "index", + "indexc", "indexw", "input", "inputc", "inputn", "int", + "intck", "intnx", "intrr", "irr", "jbessel", "juldate", + "kurtosis", "lag", "lbound", "left", "length", "lgamma", + "libname", "libref", "log", "log10", "log2", "logpdf", "logpmf", + "logsdf", "lowcase", "max", "mdy", "mean", "min", "minute", + "mod", "month", "mopen", "mort", "n", "netpv", "nmiss", + "normal", "note", "npv", "open", "ordinal", "pathname", + "pdf", "peek", "peekc", "pmf", "point", "poisson", "poke", + "probbeta", "probbnml", "probchi", "probf", "probgam", + "probhypr", "probit", "probnegb", "probnorm", "probt", + "put", "putc", "putn", "qtr", "quote", "ranbin", "rancau", + "ranexp", "rangam", "range", "rank", "rannor", "ranpoi", + "rantbl", "rantri", "ranuni", "repeat", "resolve", "reverse", + "rewind", "right", "round", "saving", "scan", "sdf", "second", + "sign", "sin", "sinh", "skewness", "soundex", "spedis", + "sqrt", "std", "stderr", "stfips", "stname", "stnamel", + "substr", "sum", "symget", "sysget", "sysmsg", "sysprod", + "sysrc", "system", "tan", "tanh", "time", "timepart", "tinv", + "tnonct", "today", "translate", "tranwrd", "trigamma", + "trim", "trimn", "trunc", "uniform", "upcase", "uss", "var", + "varfmt", "varinfmt", "varlabel", "varlen", "varname", + "varnum", "varray", "varrayx", "vartype", "verify", "vformat", + "vformatd", "vformatdx", "vformatn", "vformatnx", "vformatw", + "vformatwx", "vformatx", "vinarray", "vinarrayx", "vinformat", + "vinformatd", "vinformatdx", "vinformatn", "vinformatnx", + "vinformatw", "vinformatwx", "vinformatx", "vlabel", + "vlabelx", "vlength", "vlengthx", "vname", "vnamex", "vtype", + "vtypex", "weekday", "year", "yyq", "zipfips", "zipname", + "zipnamel", "zipstate" + ) + + tokens = { + 'root': [ + include('comments'), + include('proc-data'), + include('cards-datalines'), + include('logs'), + include('general'), + (r'.', Text), + ], + # SAS is multi-line regardless, but * is ended by ; + 'comments': [ + (r'^\s*\*.*?;', Comment), + (r'/\*.*?\*/', Comment), + (r'^\s*\*(.|\n)*?;', Comment.Multiline), + (r'/[*](.|\n)*?[*]/', Comment.Multiline), + ], + # Special highlight for proc, data, quit, run + 'proc-data': [ + (r'(^|;)\s*(proc [a-zA-Z0-9_]+|data|run|quit)[\s;\n]', + Keyword.Reserved), + ], + # Special highlight cards and datalines + 'cards-datalines': [ + (r'^\s*(datalines|cards)\s*;\s*$', Keyword, 'data'), + ], + 'data': [ + (r'(.|\n)*^\s*;\s*$', Other, '#pop'), + ], + # Special highlight for put NOTE|ERROR|WARNING (order matters) + 'logs': [ + (r'\n?^\s*%?put ', Keyword, 'log-messages'), + ], + 'log-messages': [ + (r'NOTE(:|-).*', Generic, '#pop'), + (r'WARNING(:|-).*', Generic.Emph, '#pop'), + (r'ERROR(:|-).*', Generic.Error, '#pop'), + (r'(?!(WARNING|NOTE|ERROR))+', Text, '#pop'), + include('general'), + ], + 'general': [ + include('keywords'), + include('vars-strings'), + include('special'), + include('numbers'), + ], + # Keywords, statements, functions, macros + 'keywords': [ + (words(builtins_statements, + prefix = r'\b', + suffix = r'\b'), + Keyword), + (words(builtins_sql, + prefix = r'\b', + suffix = r'\b'), + Keyword), + (words(builtins_conditionals, + prefix = r'\b', + suffix = r'\b'), + Keyword), + (words(builtins_macros, + prefix = r'%', + suffix = r'\b'), + Name.Builtin), + (words(builtins_functions, + prefix = r'\b', + suffix = r'\('), + Name.Builtin), + ], + # Strings and user-defined variables and macros (order matters) + 'vars-strings': [ + (r'&[a-zA-Z_][a-zA-Z0-9_]{0,31}\.?', Name.Variable), + (r'%[a-zA-Z_][a-zA-Z0-9_]{0,31}', Name.Function), + (r'\'', String, 'string_squote'), + (r'"', String, 'string_dquote'), + ], + 'string_squote': [ + ('\'', String, '#pop'), + (r'\\\\|\\"|\\\n', String.Escape), + # AFAIK, macro variables are not evaluated in single quotes + # (r'&', Name.Variable, 'validvar'), + (r'[^$\'\\]+', String), + (r'[$\'\\]', String), + ], + 'string_dquote': [ + (r'"', String, '#pop'), + (r'\\\\|\\"|\\\n', String.Escape), + (r'&', Name.Variable, 'validvar'), + (r'[^$&"\\]+', String), + (r'[$"\\]', String), + ], + 'validvar': [ + (r'[a-zA-Z_][a-zA-Z0-9_]{0,31}\.?', Name.Variable, '#pop'), + ], + # SAS numbers and special variables + 'numbers': [ + (r'\b[+-]?([0-9]+(\.[0-9]+)?|\.[0-9]+|\.)([eE][+-]?[0-9]+)?[i]?\b', + Number), + ], + 'special': [ + (r'(null|missing|_all_|_automatic_|_character_|_n_|' + r'_infile_|_name_|_null_|_numeric_|_user_|_webout_)', + Keyword.Constant), + ], + # 'operators': [ + # (r'(-|=|<=|>=|<|>|<>|&|!=|' + # r'\||\*|\+|\^|/|!|~|~=)', Operator) + # ], + } diff --git a/pygments/lexers/scripting.py b/pygments/lexers/scripting.py index ac0f7533..5849161b 100644 --- a/pygments/lexers/scripting.py +++ b/pygments/lexers/scripting.py @@ -50,36 +50,47 @@ class LuaLexer(RegexLexer): filenames = ['*.lua', '*.wlua'] mimetypes = ['text/x-lua', 'application/x-lua'] + _comment_multiline = r'(?:--\[(?P<level>=*)\[[\w\W]*?\](?P=level)\])' + _comment_single = r'(?:--.*$)' + _space = r'(?:\s+)' + _s = r'(?:%s|%s|%s)' % (_comment_multiline, _comment_single, _space) + _name = r'(?:[^\W\d]\w*)' + tokens = { 'root': [ - # lua allows a file to start with a shebang - (r'#!(.*?)$', Comment.Preproc), + # Lua allows a file to start with a shebang. + (r'#!.*', Comment.Preproc), default('base'), ], + 'ws': [ + (_comment_multiline, Comment.Multiline), + (_comment_single, Comment.Single), + (_space, Text), + ], 'base': [ - (r'(?s)--\[(=*)\[.*?\]\1\]', Comment.Multiline), - ('--.*$', Comment.Single), + include('ws'), + (r'(?i)0x[\da-f]*(\.[\da-f]*)?(p[+-]?\d+)?', Number.Hex), (r'(?i)(\d*\.\d+|\d+\.\d*)(e[+-]?\d+)?', Number.Float), (r'(?i)\d+e[+-]?\d+', Number.Float), - ('(?i)0x[0-9a-f]*', Number.Hex), (r'\d+', Number.Integer), - (r'\n', Text), - (r'[^\S\n]', Text), # multiline strings (r'(?s)\[(=*)\[.*?\]\1\]', String), - (r'(==|~=|<=|>=|\.\.\.|\.\.|[=+\-*/%^<>#])', Operator), + (r'::', Punctuation, 'label'), + (r'\.{3}', Punctuation), + (r'[=<>|~&+\-*/%#^]+|\.\.', Operator), (r'[\[\]{}().,:;]', Punctuation), (r'(and|or|not)\b', Operator.Word), ('(break|do|else|elseif|end|for|if|in|repeat|return|then|until|' - r'while)\b', Keyword), + r'while)\b', Keyword.Reserved), + (r'goto\b', Keyword.Reserved, 'goto'), (r'(local)\b', Keyword.Declaration), (r'(true|false|nil)\b', Keyword.Constant), - (r'(function)\b', Keyword, 'funcname'), + (r'(function)\b', Keyword.Reserved, 'funcname'), (r'[A-Za-z_]\w*(\.[A-Za-z_]\w*)?', Name), @@ -88,31 +99,38 @@ class LuaLexer(RegexLexer): ], 'funcname': [ - (r'\s+', Text), - ('(?:([A-Za-z_]\w*)(\.))?([A-Za-z_]\w*)', - bygroups(Name.Class, Punctuation, Name.Function), '#pop'), + include('ws'), + (r'[.:]', Punctuation), + (r'%s(?=%s*[.:])' % (_name, _s), Name.Class), + (_name, Name.Function, '#pop'), # inline function ('\(', Punctuation, '#pop'), ], - # if I understand correctly, every character is valid in a lua string, - # so this state is only for later corrections - 'string': [ - ('.', String) + 'goto': [ + include('ws'), + (_name, Name.Label, '#pop'), + ], + + 'label': [ + include('ws'), + (r'::', Punctuation, '#pop'), + (_name, Name.Label), ], 'stringescape': [ - (r'''\\([abfnrtv\\"']|\d{1,3})''', String.Escape) + (r'\\([abfnrtv\\"\']|[\r\n]{1,2}|z\s*|x[0-9a-fA-F]{2}|\d{1,3}|' + r'u\{[0-9a-fA-F]+\})', String.Escape), ], 'sqs': [ - ("'", String, '#pop'), - include('string') + (r"'", String.Single, '#pop'), + (r"[^\\']+", String.Single), ], 'dqs': [ - ('"', String, '#pop'), - include('string') + (r'"', String.Double, '#pop'), + (r'[^\\"]+', String.Double), ] } diff --git a/pygments/lexers/shell.py b/pygments/lexers/shell.py index ad2e2d7a..ae790b9e 100644 --- a/pygments/lexers/shell.py +++ b/pygments/lexers/shell.py @@ -83,7 +83,7 @@ class BashLexer(RegexLexer): (r'&', Punctuation), (r'\|', Punctuation), (r'\s+', Text), - (r'\d+(?= |\Z)', Number), + (r'\d+\b', Number), (r'[^=\s\[\]{}()$"\'`\\<&|;]+', Text), (r'<', Text), ], @@ -137,11 +137,15 @@ class ShellSessionBaseLexer(Lexer): pos = 0 curcode = '' insertions = [] + backslash_continuation = False for match in line_re.finditer(text): line = match.group() m = re.match(self._ps1rgx, line) - if m: + if backslash_continuation: + curcode += line + backslash_continuation = curcode.endswith('\\\n') + elif m: # To support output lexers (say diff output), the output # needs to be broken by prompts whenever the output lexer # changes. @@ -151,10 +155,12 @@ class ShellSessionBaseLexer(Lexer): insertions.append((len(curcode), [(0, Generic.Prompt, m.group(1))])) curcode += m.group(2) + backslash_continuation = curcode.endswith('\\\n') elif line.startswith(self._ps2): insertions.append((len(curcode), [(0, Generic.Prompt, line[:len(self._ps2)])])) curcode += line[len(self._ps2):] + backslash_continuation = curcode.endswith('\\\n') else: if insertions: toks = innerlexer.get_tokens_unprocessed(curcode) @@ -452,9 +458,9 @@ class BatchLexer(RegexLexer): bygroups(String.Double, using(this, state='string'), Text, Punctuation)), (r'"', String.Double, ('#pop', 'for2', 'string')), - (r"('(?:%s|[\w\W])*?')([%s%s]*)(\))" % (_variable, _nl, _ws), + (r"('(?:%%%%|%s|[\w\W])*?')([%s%s]*)(\))" % (_variable, _nl, _ws), bygroups(using(this, state='sqstring'), Text, Punctuation)), - (r'(`(?:%s|[\w\W])*?`)([%s%s]*)(\))' % (_variable, _nl, _ws), + (r'(`(?:%%%%|%s|[\w\W])*?`)([%s%s]*)(\))' % (_variable, _nl, _ws), bygroups(using(this, state='bqstring'), Text, Punctuation)), include('for2') ], diff --git a/pygments/lexers/smv.py b/pygments/lexers/smv.py new file mode 100644 index 00000000..15fc9381 --- /dev/null +++ b/pygments/lexers/smv.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.smv + ~~~~~~~~~~~~~~~~~~~ + + Lexers for the SMV languages. + + :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, words +from pygments.token import Comment, Generic, Keyword, Name, Number, \ + Operator, Punctuation, Text + +__all__ = ['NuSMVLexer'] + + +class NuSMVLexer(RegexLexer): + """ + Lexer for the NuSMV language. + """ + + name = 'NuSMV' + aliases = ['nusmv'] + filenames = ['*.smv'] + mimetypes = [] + + tokens = { + 'root': [ + # Comments + (r'(?s)\/\-\-.*?\-\-/', Comment), + (r'--.*\n', Comment), + + # Reserved + (words(('MODULE','DEFINE','MDEFINE','CONSTANTS','VAR','IVAR', + 'FROZENVAR','INIT','TRANS','INVAR','SPEC','CTLSPEC','LTLSPEC', + 'PSLSPEC','COMPUTE','NAME','INVARSPEC','FAIRNESS','JUSTICE', + 'COMPASSION','ISA','ASSIGN','CONSTRAINT','SIMPWFF','CTLWFF', + 'LTLWFF','PSLWFF','COMPWFF','IN','MIN','MAX','MIRROR','PRED', + 'PREDICATES'), suffix=r'(?![\w$#-])'), Keyword.Declaration), + (r'process(?![\w$#-])', Keyword), + (words(('array','of','boolean','integer','real','word'), + suffix=r'(?![\w$#-])'), Keyword.Type), + (words(('case','esac'), suffix=r'(?![\w$#-])'), Keyword), + (words(('word1','bool','signed','unsigned','extend','resize', + 'sizeof','uwconst','swconst','init','self','count','abs','max', + 'min'), suffix=r'(?![\w$#-])'), Name.Builtin), + (words(('EX','AX','EF','AF','EG','AG','E','F','O','G','H','X','Y', + 'Z','A','U','S','V','T','BU','EBF','ABF','EBG','ABG','next', + 'mod','union','in','xor','xnor'), suffix=r'(?![\w$#-])'), + Operator.Word), + (words(('TRUE','FALSE'), suffix=r'(?![\w$#-])'), Keyword.Constant), + + # Names + (r'[a-zA-Z_][\w$#-]*', Name.Variable), + + # Operators + (r':=', Operator), + (r'[&\|\+\-\*/<>!=]', Operator), + + # Literals + (r'\-?\d+\b', Number.Integer), + (r'0[su][bB]\d*_[01_]+', Number.Bin), + (r'0[su][oO]\d*_[01234567_]+', Number.Oct), + (r'0[su][dD]\d*_[\d_]+', Number.Dec), + (r'0[su][hH]\d*_[\da-fA-F_]+', Number.Hex), + + # Whitespace, punctuation and the rest + (r'\s+', Text.Whitespace), + (r'[\(\)\[\]\{\};\?:\.,]', Punctuation), + (r'.', Generic.Error), + ] + } + diff --git a/pygments/lexers/sql.py b/pygments/lexers/sql.py index 7c06226b..e225a66e 100644 --- a/pygments/lexers/sql.py +++ b/pygments/lexers/sql.py @@ -41,17 +41,19 @@ import re from pygments.lexer import Lexer, RegexLexer, do_insertions, bygroups, words -from pygments.token import Punctuation, \ +from pygments.token import Punctuation, Whitespace, Error, \ Text, Comment, Operator, Keyword, Name, String, Number, Generic from pygments.lexers import get_lexer_by_name, ClassNotFound from pygments.util import iteritems from pygments.lexers._postgres_builtins import KEYWORDS, DATATYPES, \ PSEUDO_TYPES, PLPGSQL_KEYWORDS +from pygments.lexers import _tsql_builtins __all__ = ['PostgresLexer', 'PlPgsqlLexer', 'PostgresConsoleLexer', - 'SqlLexer', 'MySqlLexer', 'SqliteConsoleLexer', 'RqlLexer'] + 'SqlLexer', 'TransactSqlLexer', 'MySqlLexer', + 'SqliteConsoleLexer', 'RqlLexer'] line_re = re.compile('.*?\n') @@ -151,7 +153,7 @@ class PostgresLexer(PostgresBase, RegexLexer): tokens = { 'root': [ (r'\s+', Text), - (r'--.*?\n', Comment.Single), + (r'--.*\n?', Comment.Single), (r'/\*', Comment.Multiline, 'multiline-comments'), (r'(' + '|'.join(s.replace(" ", "\s+") for s in DATATYPES + PSEUDO_TYPES) @@ -378,7 +380,7 @@ class SqlLexer(RegexLexer): tokens = { 'root': [ (r'\s+', Text), - (r'--.*?\n', Comment.Single), + (r'--.*\n?', Comment.Single), (r'/\*', Comment.Multiline, 'multiline-comments'), (words(( 'ABORT', 'ABS', 'ABSOLUTE', 'ACCESS', 'ADA', 'ADD', 'ADMIN', 'AFTER', 'AGGREGATE', @@ -479,6 +481,62 @@ class SqlLexer(RegexLexer): } +class TransactSqlLexer(RegexLexer): + """ + Transact-SQL (T-SQL) is Microsoft's and Sybase's proprietary extension to + SQL. + + The list of keywords includes ODBC and keywords reserved for future use.. + """ + + name = 'Transact-SQL' + aliases = ['tsql', 't-sql'] + filenames = ['*.sql'] + mimetypes = ['text/x-tsql'] + + # Use re.UNICODE to allow non ASCII letters in names. + flags = re.IGNORECASE | re.UNICODE + tokens = { + 'root': [ + (r'\s+', Whitespace), + (r'--(?m).*?$\n?', Comment.Single), + (r'/\*', Comment.Multiline, 'multiline-comments'), + (words(_tsql_builtins.OPERATORS), Operator), + (words(_tsql_builtins.OPERATOR_WORDS, suffix=r'\b'), Operator.Word), + (words(_tsql_builtins.TYPES, suffix=r'\b'), Name.Class), + (words(_tsql_builtins.FUNCTIONS, suffix=r'\b'), Name.Function), + (r'(goto)(\s+)(\w+\b)', bygroups(Keyword, Whitespace, Name.Label)), + (words(_tsql_builtins.KEYWORDS, suffix=r'\b'), Keyword), + (r'(\[)([^]]+)(\])', bygroups(Operator, Name, Operator)), + (r'0x[0-9a-f]+', Number.Hex), + # Float variant 1, for example: 1., 1.e2, 1.2e3 + (r'[0-9]+\.[0-9]*(e[+-]?[0-9]+)?', Number.Float), + # Float variant 2, for example: .1, .1e2 + (r'\.[0-9]+(e[+-]?[0-9]+)?', Number.Float), + # Float variant 3, for example: 123e45 + (r'[0-9]+e[+-]?[0-9]+', Number.Float), + (r'[0-9]+', Number.Integer), + (r"'(''|[^'])*'", String.Single), + (r'"(""|[^"])*"', String.Symbol), + (r'[;(),.]', Punctuation), + # Below we use \w even for the first "real" character because + # tokens starting with a digit have already been recognized + # as Number above. + (r'@@\w+', Name.Builtin), + (r'@\w+', Name.Variable), + (r'(\w+)(:)', bygroups(Name.Label, Punctuation)), + (r'#?#?\w+', Name), # names for temp tables and anything else + (r'\?', Name.Variable.Magic), # parameter for prepared statements + ], + 'multiline-comments': [ + (r'/\*', Comment.Multiline, 'multiline-comments'), + (r'\*/', Comment.Multiline, '#pop'), + (r'[^/*]+', Comment.Multiline), + (r'[/*]', Comment.Multiline) + ] + } + + class MySqlLexer(RegexLexer): """ Special lexer for MySQL. @@ -492,7 +550,7 @@ class MySqlLexer(RegexLexer): tokens = { 'root': [ (r'\s+', Text), - (r'(#|--\s+).*?\n', Comment.Single), + (r'(#|--\s+).*\n?', Comment.Single), (r'/\*', Comment.Multiline, 'multiline-comments'), (r'[0-9]+', Number.Integer), (r'[0-9]*\.[0-9]+(e[+-][0-9]+)', Number.Float), diff --git a/pygments/lexers/stata.py b/pygments/lexers/stata.py new file mode 100644 index 00000000..b2be64d2 --- /dev/null +++ b/pygments/lexers/stata.py @@ -0,0 +1,106 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.stata + ~~~~~~~~~~~~~~~~~~~~~ + + Lexer for Stata + + :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, include, words +from pygments.token import Comment, Keyword, Name, Number, \ + String, Text, Operator + +from pygments.lexers._stata_builtins import builtins_base, builtins_functions + +__all__ = ['StataLexer'] + +class StataLexer(RegexLexer): + """ + For `Stata <http://www.stata.com/>`_ do files. + + .. versionadded:: 2.2 + """ + # Syntax based on + # - http://fmwww.bc.edu/RePEc/bocode/s/synlightlist.ado + # - http://github.com/isagalaev/highlight.js/blob/master/src/languages/stata.js + # - http://github.com/jpitblado/vim-stata/blob/master/syntax/stata.vim + + name = 'Stata' + aliases = ['stata', 'do'] + filenames = ['*.do', '*.ado'] + mimetypes = ['text/x-stata', 'text/stata', 'application/x-stata'] + + tokens = { + 'root': [ + include('comments'), + include('vars-strings'), + include('numbers'), + include('keywords'), + (r'.', Text), + ], + # Global and local macros; regular and special strings + 'vars-strings': [ + (r'\$[a-zA-Z_0-9\{]', Name.Variable.Global, 'var_validglobal'), + (r'`[a-zA-Z_0-9]{0,31}\'', Name.Variable), + (r'"', String, 'string_dquote'), + (r'`"', String, 'string_mquote'), + ], + # For either string type, highlight macros as macros + 'string_dquote': [ + (r'"', String, '#pop'), + (r'\\\\|\\"|\\\n', String.Escape), + (r'\$', Name.Variable.Global, 'var_validglobal'), + (r'`', Name.Variable, 'var_validlocal'), + (r'[^$\$`"\\]+', String), + (r'[$"\\]', String), + ], + 'string_mquote': [ + (r'"\'', String, '#pop'), + (r'\\\\|\\"|\\\n', String.Escape), + (r'\$', Name.Variable.Global, 'var_validglobal'), + (r'`', Name.Variable, 'var_validlocal'), + (r'[^$\$`"\\]+', String), + (r'[$"\\]', String), + ], + 'var_validglobal': [ + (r'\{?[a-zA-Z0-9_]{0,32}\}?', Name.Variable.Global, '#pop'), + ], + 'var_validlocal': [ + (r'[a-zA-Z0-9_]{0,31}\'', Name.Variable, '#pop'), + ], + # * only OK at line start, // OK anywhere + 'comments': [ + (r'^\s*\*.*$', Comment), + (r'//.*', Comment.Single), + (r'/\*.*?\*/', Comment.Multiline), + (r'/[*](.|\n)*?[*]/', Comment.Multiline), + ], + # Built in functions and statements + 'keywords': [ + (words(builtins_functions, prefix = r'\b', suffix = r'\('), + Name.Function), + (words(builtins_base, prefix = r'(^\s*|\s)', suffix = r'\b'), + Keyword), + ], + # http://www.stata.com/help.cgi?operators + 'operators': [ + (r'-|==|<=|>=|<|>|&|!=', Operator), + (r'\*|\+|\^|/|!|~|==|~=', Operator) + ], + # Stata numbers + 'numbers': [ + # decimal number + (r'\b[+-]?([0-9]+(\.[0-9]+)?|\.[0-9]+|\.)([eE][+-]?[0-9]+)?[i]?\b', + Number), + ], + # Stata formats + 'format': [ + (r'%-?\d{1,2}(\.\d{1,2})?[gfe]c?', Name.Variable), + (r'%(21x|16H|16L|8H|8L)', Name.Variable), + (r'%-?(tc|tC|td|tw|tm|tq|th|ty|tg).{0,32}', Name.Variable), + (r'%[-~]?\d{1,4}s', Name.Variable), + ] + } diff --git a/pygments/lexers/supercollider.py b/pygments/lexers/supercollider.py index cef147b8..137b753c 100644 --- a/pygments/lexers/supercollider.py +++ b/pygments/lexers/supercollider.py @@ -74,7 +74,7 @@ class SuperColliderLexer(RegexLexer): (words(('true', 'false', 'nil', 'inf'), suffix=r'\b'), Keyword.Constant), (words(( 'Array', 'Boolean', 'Date', 'Error', 'Function', 'Number', - 'Object', 'Packages', 'RegExp', 'String', 'Error', + 'Object', 'Packages', 'RegExp', 'String', 'isFinite', 'isNaN', 'parseFloat', 'parseInt', 'super', 'thisFunctionDef', 'thisFunction', 'thisMethod', 'thisProcess', 'thisThread', 'this'), suffix=r'\b'), diff --git a/pygments/lexers/templates.py b/pygments/lexers/templates.py index 3e55b6ad..e6eeaa25 100644 --- a/pygments/lexers/templates.py +++ b/pygments/lexers/templates.py @@ -44,7 +44,7 @@ __all__ = ['HtmlPhpLexer', 'XmlPhpLexer', 'CssPhpLexer', 'TeaTemplateLexer', 'LassoHtmlLexer', 'LassoXmlLexer', 'LassoCssLexer', 'LassoJavascriptLexer', 'HandlebarsLexer', 'HandlebarsHtmlLexer', 'YamlJinjaLexer', 'LiquidLexer', - 'TwigLexer', 'TwigHtmlLexer'] + 'TwigLexer', 'TwigHtmlLexer', 'Angular2Lexer', 'Angular2HtmlLexer'] class ErbLexer(Lexer): @@ -2174,3 +2174,79 @@ class TwigHtmlLexer(DelegatingLexer): def __init__(self, **options): super(TwigHtmlLexer, self).__init__(HtmlLexer, TwigLexer, **options) + + +class Angular2Lexer(RegexLexer): + """ + Generic `angular2 <http://victorsavkin.com/post/119943127151/angular-2-template-syntax>` template lexer. + + Highlights only the Angular template tags (stuff between `{{` and `}}` and + special attributes: '(event)=', '[property]=', '[(twoWayBinding)]='). + Everything else is left for a delegating lexer. + + .. versionadded:: 2.1a0 + """ + + name = "Angular2" + aliases = ['ng2'] + + tokens = { + 'root': [ + (r'[^{([*#]+', Other), + + # {{meal.name}} + (r'(\{\{)(\s*)', bygroups(Comment.Preproc, Text), 'ngExpression'), + + # (click)="deleteOrder()"; [value]="test"; [(twoWayTest)]="foo.bar" + (r'([([]+)([\w:.-]+)([\])]+)(\s*)(=)(\s*)', + bygroups(Punctuation, Name.Attribute, Punctuation, Text, Operator, Text), 'attr'), + (r'([([]+)([\w:.-]+)([\])]+)(\s*)', + bygroups(Punctuation, Name.Attribute, Punctuation, Text)), + + # *ngIf="..."; #f="ngForm" + (r'([*#])([\w:.-]+)(\s*)(=)(\s*)', + bygroups(Punctuation, Name.Attribute, Punctuation, Operator), 'attr'), + (r'([*#])([\w:.-]+)(\s*)', + bygroups(Punctuation, Name.Attribute, Punctuation)), + ], + + 'ngExpression': [ + (r'\s+(\|\s+)?', Text), + (r'\}\}', Comment.Preproc, '#pop'), + + # Literals + (r':?(true|false)', String.Boolean), + (r':?"(\\\\|\\"|[^"])*"', String.Double), + (r":?'(\\\\|\\'|[^'])*'", String.Single), + (r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|" + r"0[xX][0-9a-fA-F]+[Ll]?", Number), + + # Variabletext + (r'[a-zA-Z][\w-]*(\(.*\))?', Name.Variable), + (r'\.[\w-]+(\(.*\))?', Name.Variable), + + # inline If + (r'(\?)(\s*)([^}\s]+)(\s*)(:)(\s*)([^}\s]+)(\s*)', bygroups(Operator, Text, String, Text, Operator, Text, String, Text)), + ], + 'attr': [ + ('".*?"', String, '#pop'), + ("'.*?'", String, '#pop'), + (r'[^\s>]+', String, '#pop'), + ], + } + + +class Angular2HtmlLexer(DelegatingLexer): + """ + Subclass of the `Angular2Lexer` that highlights unlexed data with the + `HtmlLexer`. + + .. versionadded:: 2.0 + """ + + name = "HTML + Angular2" + aliases = ["html+ng2"] + filenames = ['*.ng2'] + + def __init__(self, **options): + super(Angular2HtmlLexer, self).__init__(HtmlLexer, Angular2Lexer, **options)
\ No newline at end of file diff --git a/pygments/lexers/theorem.py b/pygments/lexers/theorem.py index f8c7d0a9..6f16d030 100644 --- a/pygments/lexers/theorem.py +++ b/pygments/lexers/theorem.py @@ -394,7 +394,7 @@ class LeanLexer(RegexLexer): 'import', 'abbreviation', 'opaque_hint', 'tactic_hint', 'definition', 'renaming', 'inline', 'hiding', 'exposing', 'parameter', 'parameters', 'conjecture', 'hypothesis', 'lemma', 'corollary', 'variable', 'variables', - 'print', 'theorem', 'axiom', 'inductive', 'structure', 'universe', 'alias', + 'theorem', 'axiom', 'inductive', 'structure', 'universe', 'alias', 'help', 'options', 'precedence', 'postfix', 'prefix', 'calc_trans', 'calc_subst', 'calc_refl', 'infix', 'infixl', 'infixr', 'notation', 'eval', 'check', 'exit', 'coercion', 'end', 'private', 'using', 'namespace', @@ -415,15 +415,16 @@ class LeanLexer(RegexLexer): ) operators = ( - '!=', '#', '&', '&&', '*', '+', '-', '/', '@', '!', '`', - '-.', '->', '.', '..', '...', '::', ':>', ';', ';;', '<', - '<-', '=', '==', '>', '_', '|', '||', '~', '=>', '<=', '>=', - '/\\', '\\/', u'∀', u'Π', u'λ', u'↔', u'∧', u'∨', u'≠', u'≤', u'≥', + u'!=', u'#', u'&', u'&&', u'*', u'+', u'-', u'/', u'@', u'!', u'`', + u'-.', u'->', u'.', u'..', u'...', u'::', u':>', u';', u';;', u'<', + u'<-', u'=', u'==', u'>', u'_', u'|', u'||', u'~', u'=>', u'<=', u'>=', + u'/\\', u'\\/', u'∀', u'Π', u'λ', u'↔', u'∧', u'∨', u'≠', u'≤', u'≥', u'¬', u'⁻¹', u'⬝', u'▸', u'→', u'∃', u'ℕ', u'ℤ', u'≈', u'×', u'⌞', u'⌟', u'≡', u'⟨', u'⟩', ) - punctuation = ('(', ')', ':', '{', '}', '[', ']', u'⦃', u'⦄', ':=', ',') + punctuation = (u'(', u')', u':', u'{', u'}', u'[', u']', u'⦃', u'⦄', + u':=', u',') tokens = { 'root': [ diff --git a/pygments/lexers/varnish.py b/pygments/lexers/varnish.py index e64a601b..de8e598b 100644 --- a/pygments/lexers/varnish.py +++ b/pygments/lexers/varnish.py @@ -91,14 +91,14 @@ class VCLLexer(RegexLexer): 'resp.reason', 'bereq.url', 'beresp.do_esi', 'beresp.proto', 'client.ip', 'bereq.proto', 'server.hostname', 'remote.ip', 'req.backend_hint', 'server.identity', 'req_top.url', 'beresp.grace', 'beresp.was_304', - 'server.ip', 'bereq.uncacheable', 'now'), suffix=r'\b'), + 'server.ip', 'bereq.uncacheable'), suffix=r'\b'), Name.Variable), (r'[!%&+*\-,/<.}{>=|~]+', Operator), (r'[();]', Punctuation), (r'[,]+', Punctuation), - (words(('include', 'hash_data', 'regsub', 'regsuball', 'if', 'else', - 'elsif', 'elif', 'synth', 'synthetic', 'ban', 'synth', + (words(('hash_data', 'regsub', 'regsuball', 'if', 'else', + 'elsif', 'elif', 'synth', 'synthetic', 'ban', 'return', 'set', 'unset', 'import', 'include', 'new', 'rollback', 'call'), suffix=r'\b'), Keyword), diff --git a/pygments/lexers/verification.py b/pygments/lexers/verification.py index 4042d44e..3e77e04a 100644 --- a/pygments/lexers/verification.py +++ b/pygments/lexers/verification.py @@ -88,13 +88,14 @@ class SilverLexer(RegexLexer): 'assume', 'goto', 'while', 'if', 'elseif', 'else', 'fresh', 'constraining', 'Seq', 'Set', 'Multiset', 'union', 'intersection', 'setminus', 'subset', 'unfolding', 'in', 'old', 'forall', 'exists', - 'acc', 'wildcard', 'write', 'none', 'epsilon', 'perm', 'unique'), + 'acc', 'wildcard', 'write', 'none', 'epsilon', 'perm', 'unique', + 'apply', 'package', 'folding', 'label'), suffix=r'\b'), Keyword), (words(('Int', 'Perm', 'Bool', 'Ref'), suffix=r'\b'), Keyword.Type), include('numbers'), - (r'[!%&*+=|?:<>/-]', Operator), - (r"([{}():;,.])", Punctuation), + (r'[!%&*+=|?:<>/\-\[\]]', Operator), + (r'([{}():;,.])', Punctuation), # Identifier (r'[\w$]\w*', Name), ], diff --git a/pygments/lexers/whiley.py b/pygments/lexers/whiley.py new file mode 100644 index 00000000..0795a030 --- /dev/null +++ b/pygments/lexers/whiley.py @@ -0,0 +1,117 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.whiley + ~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for the Whiley language. + + :copyright: Copyright 2006-2016 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, bygroups, words +from pygments.token import Comment, Keyword, Name, Number, Operator, \ + Punctuation, String, Text + +__all__ = ['WhileyLexer'] + + +class WhileyLexer(RegexLexer): + """ + Lexer for the Whiley programming language. + """ + name = 'Whiley' + filenames = ['*.whiley'] + aliases = ['whiley'] + mimetypes = ['text/x-whiley'] + + # See the language specification: + # http://whiley.org/download/WhileyLanguageSpec.pdf + + tokens = { + 'root': [ + # Whitespace + (r'\s+', Text), + + # Comments + (r'//.*', Comment.Single), + # don't parse empty comment as doc comment + (r'/\*\*/', Comment.Multiline), + (r'(?s)/\*\*.*?\*/', String.Doc), + (r'(?s)/\*.*?\*/', Comment.Multiline), + + # Keywords + (words(( + 'if', 'else', 'while', 'for', 'do', 'return', + 'switch', 'case', 'default', 'break', 'continue', + 'requires', 'ensures', 'where', 'assert', 'assume', + 'all', 'no', 'some', 'in', 'is', 'new', + 'throw', 'try', 'catch', 'debug', 'skip', 'fail', + 'finite', 'total', + ), suffix=r'\b'), Keyword.Reserved), + (words(( + 'function', 'method', 'public', 'private', 'protected', + 'export', 'native', + ), suffix=r'\b'), Keyword.Declaration), + # "constant" & "type" are not keywords unless used in declarations + (r'(constant|type)(\s+)([a-zA-Z_]\w*)(\s+)(is)\b', + bygroups(Keyword.Declaration, Text, Name, Text, Keyword.Reserved)), + (r'(true|false|null)\b', Keyword.Constant), + (r'(bool|byte|int|real|any|void)\b', Keyword.Type), + # "from" is not a keyword unless used with import + (r'(import)(\s+)(\*)([^\S\n]+)(from)\b', + bygroups(Keyword.Namespace, Text, Punctuation, Text, Keyword.Namespace)), + (r'(import)(\s+)([a-zA-Z_]\w*)([^\S\n]+)(from)\b', + bygroups(Keyword.Namespace, Text, Name, Text, Keyword.Namespace)), + (r'(package|import)\b', Keyword.Namespace), + + # standard library: https://github.com/Whiley/WhileyLibs/ + (words(( + # types defined in whiley.lang.Int + 'i8', 'i16', 'i32', 'i64', + 'u8', 'u16', 'u32', 'u64', + 'uint', 'nat', + + # whiley.lang.Any + 'toString', + ), suffix=r'\b'), Name.Builtin), + + # byte literal + (r'[01]+b', Number.Bin), + + # decimal literal + (r'[0-9]+\.[0-9]+', Number.Float), + # match "1." but not ranges like "3..5" + (r'[0-9]+\.(?!\.)', Number.Float), + + # integer literal + (r'0x[0-9a-fA-F]+', Number.Hex), + (r'[0-9]+', Number.Integer), + + # character literal + (r"""'[^\\]'""", String.Char), + (r"""(')(\\['"\\btnfr])(')""", + bygroups(String.Char, String.Escape, String.Char)), + + # string literal + (r'"', String, 'string'), + + # operators and punctuation + (r'[{}()\[\],.;]', Punctuation), + (u'[+\\-*/%&|<>^!~@=:?' + # unicode operators + u'\u2200\u2203\u2205\u2282\u2286\u2283\u2287' + u'\u222A\u2229\u2264\u2265\u2208\u2227\u2228' + u']', Operator), + + # identifier + (r'[a-zA-Z_]\w*', Name), + ], + 'string': [ + (r'"', String, '#pop'), + (r'\\[btnfr]', String.Escape), + (r'\\u[0-9a-fA-F]{4}', String.Escape), + (r'\\.', String), + (r'[^\\"]+', String), + ], + } diff --git a/pygments/regexopt.py b/pygments/regexopt.py index 79903684..047c703f 100644 --- a/pygments/regexopt.py +++ b/pygments/regexopt.py @@ -54,7 +54,7 @@ def regex_opt_inner(strings, open_paren): return open_paren + regex_opt_inner(rest, '') + '|' \ + make_charset(oneletter) + close_paren # print '-> only 1-character' - return make_charset(oneletter) + return open_paren + make_charset(oneletter) + close_paren prefix = commonprefix(strings) if prefix: plen = len(prefix) diff --git a/pygments/styles/sas.py b/pygments/styles/sas.py new file mode 100644 index 00000000..8b7fc56e --- /dev/null +++ b/pygments/styles/sas.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +""" + pygments.styles.sas + ~~~~~~~~~~~~~~~~~~~ + + Style inspired by SAS' enhanced program editor. Note This is not + meant to be a complete style. It's merely meant to mimic SAS' + program editor syntax highlighting. +""" + +from pygments.style import Style +from pygments.token import Keyword, Name, Comment, String, Error, \ + Number, Other, Whitespace, Generic + + +class SasStyle(Style): + """ + Style inspired by SAS' enhanced program editor. Note This is not + meant to be a complete style. It's merely meant to mimic SAS' + program editor syntax highlighting. + """ + + default_style = '' + + styles = { + Whitespace: '#bbbbbb', + Comment: 'italic #008800', + String: '#800080', + Number: 'bold #2e8b57', + Other: 'bg:#ffffe0', + Keyword: '#2c2cff', + Keyword.Reserved: 'bold #353580', + Keyword.Constant: 'bold', + Name.Builtin: '#2c2cff', + Name.Function: 'bold italic', + Name.Variable: 'bold #2c2cff', + Generic: '#2c2cff', + Generic.Emph: '#008800', + Generic.Error: '#d30202', + Error: 'bg:#e3d2d2 #a61717' + } diff --git a/pygments/styles/stata.py b/pygments/styles/stata.py new file mode 100644 index 00000000..2d8abb7e --- /dev/null +++ b/pygments/styles/stata.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +""" + pygments.styles.stata + ~~~~~~~~~~~~~~~~~~~~~ + + Style inspired by Stata's do-file editor. Note this is not meant + to be a complete style. It's merely meant to mimic Stata's do file + editor syntax highlighting. +""" + +from pygments.style import Style +from pygments.token import Keyword, Name, Comment, String, Error, \ + Number, Operator, Whitespace + + +class StataStyle(Style): + """ + Style inspired by Stata's do-file editor. Note this is not meant + to be a complete style. It's merely meant to mimic Stata's do file + editor syntax highlighting. + """ + + default_style = '' + + styles = { + Whitespace: '#bbbbbb', + Comment: 'italic #008800', + String: '#7a2424', + Number: '#2c2cff', + Operator: '', + Keyword: 'bold #353580', + Keyword.Constant: '', + Name.Function: '#2c2cff', + Name.Variable: 'bold #35baba', + Name.Variable.Global: 'bold #b5565e', + Error: 'bg:#e3d2d2 #a61717' + } diff --git a/tests/examplefiles/capdl_example.cdl b/tests/examplefiles/capdl_example.cdl new file mode 100644 index 00000000..050e56a6 --- /dev/null +++ b/tests/examplefiles/capdl_example.cdl @@ -0,0 +1,64 @@ +#ifdef ARCH_ARM +arch arm11 +#else +arch ia32 +#endif + +objects { + my_ep = ep /* A synchronous endpoint */ + + /* Two thread control blocks */ + tcb1 = tcb + tcb2 = tcb + + /* Four frames of physical memory */ + frame1 = frame (4k) + frame2 = frame (4k) + frame3 = frame (4k) + frame4 = frame (4k) + + /* Two page tables */ + pt1 = pt + pt2 = pt + + /* Two page directories */ + pd1 = pd + pd2 = pd + + /* Two capability nodes */ + cnode1 = cnode (2 bits) + cnode2 = cnode (3 bits) +} +caps { + cnode1 { + 0x1: frame1 (RW) /* read/write */ + 0x2: my_ep (R) /* read-only */ + } + cnode2 { + 0x1: my_ep (W) /* write-only */ + } + tcb1 { + vspace: pd1 + ipc_buffer_slot: frame1 + cspace: cnode1 + } + pd1 { + 0x10: pt1 + } + pt1 { + 0x8: frame1 (RW) + 0x9: frame2 (R) + } + tcb2 { + vspace: pd2 + ipc_buffer_slot: frame3 + cspace: cnode2 + } + pd2 { + 0x10: pt2 + } + pt2 { + 0x10: frame3 (RW) + 0x12: frame4 (R) + } +} diff --git a/tests/examplefiles/demo.frt b/tests/examplefiles/demo.frt new file mode 100644 index 00000000..1b09ebb0 --- /dev/null +++ b/tests/examplefiles/demo.frt @@ -0,0 +1,3 @@ +2 3 + CR . +: F ( blah ) DUP DROP 1 + ; +1 F CR . diff --git a/tests/examplefiles/durexmania.aheui b/tests/examplefiles/durexmania.aheui new file mode 100644 index 00000000..89654c00 --- /dev/null +++ b/tests/examplefiles/durexmania.aheui @@ -0,0 +1,4 @@ +우주메이저☆듀렉스전도사♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♡먊 +삶은밥과야근밥샤주세양♡밥사밥사밥사밥사밥사땅땅땅빵☆따밦내발따밦다빵맣밥밥밥내놔밥줘밥밥밥밗땅땅땅박밝땅땅딻타밟타맣밦밣따박타맣밦밣따박타맣밦밣따박타맣박빵빵빵빵따따따따맣삶몲 +Original Source by @harunene // Run it on AheuiChem(http://yoo2001818.github.io/AheuiChem/) +https://gist.github.com/item4/ca870a63b390da6cc6f1 diff --git a/tests/examplefiles/example.bat b/tests/examplefiles/example.bat index 596f65de..2b45d2bc 100644 --- a/tests/examplefiles/example.bat +++ b/tests/examplefiles/example.bat @@ -205,5 +205,7 @@ for /f "tokens=2 delims==" %%G in ( 'assoc %+;/p extension'),%' ) &>nul ver
if errorlevel 0 if not errorlevel 1 set /a _passed+=1
goto :eof
+FOR /F %%a IN ('%%c%%') DO %%a
+rem %x% %x% %x% %x% %x% %x% %x% %x% %x% %x% %x% %x% %x% %x% %x% %x%
:/?
goto :fail
diff --git a/tests/examplefiles/example.juttle b/tests/examplefiles/example.juttle new file mode 100644 index 00000000..ae861996 --- /dev/null +++ b/tests/examplefiles/example.juttle @@ -0,0 +1,110 @@ +/* Block comment */ +/* + Multiline block + comment +*/ + +// inline comment +function juttleFunction(arg) { + if (arg == null) { + return null; + } + else if (arg == 0) { + return 'zero'; + } + else if (arg == 1) { + return "one"; + } + else { + return 1.1; + } +} + +reducer juttleReducer(field) { + var x = 0; + function update() { + x = *field; + } + + function result() { + return x; + } +} + +sub myemit(limit) { + emit -limit limit +} + +input test: text -default 'input'; +const object = { + xyz: 123, + name: 'something' +}; + +const array = [ + :2016-01-01:, + :2016-01-01T01:00:00:, + :2016-01-01T01:00:00.000:, + :2016-01-01T01:00:00.000Z:, + :2016-01-01T01:00:00.000-0800:, + :2016-01-01T01:00:00.000-08:00:, + :00:00:01:, + :00:00:00.001:, + :now:, + :beginning:, + :end:, + :forever:, + :yesterday:, + :today:, + :tomorrow:, + :1:, + :1.1:, + :1s:, + :1 second:, + :1 seconds:, + :100ms:, + :100 millisecond:, + :100 milliseconds:, + :1d:, + :1 day:, + :1 days:, + :.2h:, + :1.2h:, + :.2 hour:, + :1.2 hours:, + :.5d:, + :1.5d:, + :.5 day:, + :1.5 days:, + :5m:, + :5 minutes:, + :10w:, + :10 weeks:, + :10M:, + :10 months:, + :100y:, + :100 years:, + :1 year and 2 months and 2 days: +]; + +emit + | batch :10 minutes: + | filter x=true + | head 1 + | join + | keep x + | pace -every :1 minute: + | pass + | put y=false + | remove z + | sequence + | skip 1 + | sort field -desc + | split field + | tail 10 + | unbatch + | uniq field +; + +read adapter -last :day: 'search' AND field~/pattern/ OR field == 'string' + | write adapter diff --git a/tests/examplefiles/example.lua b/tests/examplefiles/example.lua index 0289e58c..8ecd6a13 100644 --- a/tests/examplefiles/example.lua +++ b/tests/examplefiles/example.lua @@ -247,4 +247,28 @@ function AucAdvanced.Debug.Assert(test, message) return DebugLib.Assert(addonName, test, message) end +--[==[ +Here follow further tests of Lua syntax. +]]==] +---[[ +local t = { + [ [[ +x +]==] \]]]=1|2; a={b={c={}}}, + 1, 1., 1.2, .2, 1e3, 1.e3, 1.2e3, .2e3, 1.2e+3, 1.2E-3; + 0xA, 0Xa, 0xA., 0x.F, 0xA.F, 0xA.Fp1, 0xA.FP+1, 0Xa.fp-1; +} +function t.f() + goto eof + os.exit() + :: eof :: +end + +function t . a --[==[x]==] .b --[==[y]==] -- +-- () end + . c : d (file) + return '.\a.\b.\f.\n.\r.\t.\v.\\.\".\'.\ +.\z + .\0.\00.\000.\0000.\xFa.\u{1}.\u{1234}' +end diff --git a/tests/examplefiles/example.md b/tests/examplefiles/example.md new file mode 100644 index 00000000..2befb107 --- /dev/null +++ b/tests/examplefiles/example.md @@ -0,0 +1,61 @@ +# this is a header + +## this is a 2nd level header + +* list item 1 + * list item 1.1 +* list item 2 +- list item 3 + +1. numbered list item 1 +1. numbered list item 2 + +- [ ] todo +- [x] done +- [X] done + +The following is italic: *italic* +The following is italic: _italic_ + +The following is not italic: \*italic\* +The following is not italic: \_italic\_ + +The following is not italic: snake*case*word +The following is not italic: snake_case_word + +The following is bold: **bold** **two or more words** +The following is bold: __bold__ __two or more words__ + +The following is not bold: snake**case**word +The following is not bold: snake__case__word + +The following is strikethrough: ~~bold~~ +The following is not strikethrough: snake~~case~~word + +The following is bold with italics inside: **the next _word_ should have been italics** + +> this is a quote + +> this is a multiline +> quote string thing + +this sentence `has monospace` in it + +this sentence @tweets a person about a #topic. + +[google](https://google.com/some/path.html) +![Image of Yaktocat](https://octodex.github.com/images/yaktocat.png) + +``` + * this is just unformated + __text__ +``` + +some other text + +```python +from pygments import token +# comment +``` + +some more text diff --git a/tests/examplefiles/example.ng2 b/tests/examplefiles/example.ng2 new file mode 100644 index 00000000..0f424aca --- /dev/null +++ b/tests/examplefiles/example.ng2 @@ -0,0 +1,11 @@ +<div>
+ <p>{{order.DueTime | date:'d. MMMM yyyy HH:mm'}}</p>
+ <p>Status: {{order.OrderState}}</p>
+ <button (click)="deleteOrder()" *ngIf="cancelable" [value]="test" [(twoWayTest)]="foo.bar">Remove</button>
+ <ul>
+ <li *ngFor="#meal of order.Positions">
+ {{meal.Name}}
+ </li>
+ </ul>
+ <p>Preis: <b>{{order.TotalPrice | currency:'EUR':true:'1.2-2'}}</b></p>
+</div>
\ No newline at end of file diff --git a/tests/examplefiles/example.sbl b/tests/examplefiles/example.sbl new file mode 100644 index 00000000..94efada5 --- /dev/null +++ b/tests/examplefiles/example.sbl @@ -0,0 +1,109 @@ +/* Stemmer for Esperanto in UTF-8 */
+
+strings ()
+
+integers ()
+
+booleans ( foreign )
+
+routines (
+ apostrophe
+ canonical_form
+ correlative
+ interjection
+ short_word
+ standard_suffix
+ unuj
+)
+
+externals ( stem )
+
+groupings ( vowel aiou ao ou )
+
+stringdef a' decimal '225'
+stringdef e' hex 'E9'
+stringdef i' hex 'ED'
+stringdef o' hex ' f3'
+stringdef u' hex 'fa '
+
+stringdef cx hex '0109'
+stringdef gx hex '011D'
+stringdef hx hex '0125'
+stringdef jx hex '0135'
+stringdef sx hex '015D'
+stringdef ux hex '016D'
+
+define canonical_form as repeat (
+ [substring]
+ among (
+stringescapes //
+ '/a'/' (<- 'a' set foreign)
+ '/e'/' (<- 'e' set foreign)
+ '/i'/' (<- 'i' set foreign)
+ '/o'/' (<- 'o' set foreign)
+ '/u'/' (<- 'u' set foreign)
+stringescapes `'
+ 'cx' (<- '`cx'')
+ 'gx' (<- '`gx'')
+ 'hx' (<- '`hx'')
+ 'jx' (<- '`jx'')
+ 'sx' (<- '`sx'')
+ 'ux' (<- '`ux'')
+ '' (next)
+ )
+)
+
+backwardmode (
+ stringescapes { }
+
+ define apostrophe as (
+ (['un{'}'] atlimit <- 'unu') or
+ (['l{'}'] atlimit <- 'la') or
+ (['{'}'] <- 'o')
+ )
+
+ define vowel 'aeiou'
+ define aiou vowel - 'e'
+ define ao 'ao'
+ define ou 'ou'
+
+ define short_word as not (loop (maxint * 0 + 4 / 2) gopast vowel)
+
+ define interjection as (
+ among ('adia{ux}' 'aha' 'amen' 'hola' 'hura' 'mia{ux}' 'muu' 'oho')
+ atlimit
+ )
+
+ define correlative as (
+ []
+ // Ignore -al, -am, etc. since they can't be confused with suffixes.
+ test (
+ ('a' or (try 'n'] 'e') or (try 'n' try 'j'] ou))
+ 'i'
+ try ('k' or 't' or '{cx}' or 'nen')
+ atlimit
+ )
+ delete
+ )
+
+ define unuj as (
+ [try 'n' 'j'] 'unu' atlimit delete
+ )
+
+ define standard_suffix as (
+ [
+ try ((try 'n' try 'j' ao) or (try 's' aiou) or (try 'n' 'e'))
+ try '-' try 'a{ux}'
+ ] delete
+ )
+)
+
+define stem as (
+ do canonical_form
+ not foreign
+ backwards (
+ do apostrophe
+ short_word or interjection or
+ correlative or unuj or do standard_suffix
+ )
+)
diff --git a/tests/examplefiles/example.tasm b/tests/examplefiles/example.tasm new file mode 100644 index 00000000..d7202ffb --- /dev/null +++ b/tests/examplefiles/example.tasm @@ -0,0 +1,527 @@ +;----------------------------------------------------------------------------; +; Does A* pathfinding for rockraiders and vehicles +; +; Copyright 2015 Ruben De Smet +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are +; met: +; +; (1) Redistributions of source code must retain the above copyright +; notice, this list of conditions and the following disclaimer. +; +; (2) Redistributions in binary form must reproduce the above copyright +; notice, this list of conditions and the following disclaimer in +; the documentation and/or other materials provided with the +; distribution. +; +; (3) The name of the author may not be used to +; endorse or promote products derived from this software without +; specific prior written permission. +; +; THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +; IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +; WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +; DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, +; INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +; (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +; HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +; STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +; IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +; POSSIBILITY OF SUCH DAMAGE. +; +;----------------------------------------------------------------------------; + +IDEAL +P386 +MODEL FLAT, C +ASSUME cs:_TEXT,ds:FLAT,es:FLAT,fs:FLAT,gs:FLAT + +INCLUDE "ASTAR.INC" +INCLUDE "READLVL.INC" +INCLUDE "DEBUG.INC" + +STRUC TPriorityField + heuristic dd ? + distance dd ? + x db ? + y db ? + fromx db ? + fromy db ? +ENDS + +STRUC TField + distance dd ? + x db ? + y db ? +ENDS + +CODESEG + +PROC getPath + USES ecx + ARG @@tgtx:dword, \ + @@tgty:dword \ + RETURNS eax, ebx ; eax contains x, ebx contains y + + call getLevelWidth + imul eax, [@@tgty] + add eax, [@@tgtx] + imul eax, SIZE TField + add eax, offset backtraceGraph + mov ecx, eax + + xor eax, eax + xor ebx, ebx + + mov al, [(TField ptr ecx).x] + mov bl, [(TField ptr ecx).y] + + ret +ENDP getPath + +PROC findPath + ; eax will contain a 1 when a path has been found + ; 0 otherwise. + ARG @@srcx:dword, \ + @@srcy:dword, \ + @@tgtx:dword, \ + @@tgty:dword, \ + @@type:dword \ + RETURNS eax + + ; Check whether the target field is "allowed" for + ; the selected vehicle or rock raider + call getField, [@@tgtx], [@@tgty] + mov al, [byte ptr eax] + and eax, 0FFh + + add eax, offset actionTable + mov eax, [eax] + and eax, [@@type] ; TODO: for now, rock raider is hard coded + jnz @canGoToTarget + + mov eax, 0 + ret +@canGoToTarget: + + call cleanData + mov eax, [@@type] + mov [currentType], eax + + mov eax, [@@srcx] + mov [currentOpen.x], al + mov eax, [@@srcy] + mov [currentOpen.y], al + + call distance, [@@srcx], [@@srcy], [@@tgtx], [@@tgty] + ; eax <- distance + call addOpen, [@@srcx], [@@srcy], eax, 0 + +@openListNotEmpty: + call popOpen + cmp eax, 0 + je @openListEmpty + + call addToMap + + call addClosed + + mov eax, [@@tgtx] + cmp [currentOpen.x], al + jne @nextOpen + mov eax, [@@tgty] + cmp [currentOpen.y], al + jne @nextOpen + + jmp @routeFound + + @nextOpen: + call addNeighbours, [@@tgtx], [@@tgty] + + jmp @openListNotEmpty + +@openListEmpty: + mov eax, 0 + ret + +@routeFound: + mov eax, 1 + ret +ENDP findPath + +PROC addToMap + USES eax, ecx + + call getLevelWidth + xor ecx, ecx + mov cl, [currentOpen.y] + imul eax, ecx + mov cl, [currentOpen.x] + add eax, ecx + imul eax, SIZE TField + add eax, offset backtraceGraph + + mov ecx, [currentOpen.distance] + cmp [(TField ptr eax).distance], ecx + jbe @dontAdd + + mov [(TField ptr eax).distance], ecx + mov cl, [currentOpen.fromx] + mov [(TField ptr eax).x], cl + mov cl, [currentOpen.fromy] + mov [(TField ptr eax).y], cl + +@dontAdd: + ret +ENDP addToMap + +; Is closed checks whether the field considered is "closed" for being added to the open list. +; So, it also checks whether we can go on the selected field. +PROC isClosed + USES ebx, ecx, edx + ARG @@x:dword, \ + @@y:dword RETURNS eax + + ; Check bounds first: + + call getLevelWidth + cmp [@@x], eax + ja notWithinBounds ; ja considers -1 > 10 + + call getLevelHeight + cmp [@@y], eax + ja notWithinBounds + + ; Check whether this field is "allowed" for + ; the selected vehicle or rock raider + call getField, [@@x], [@@y] + mov al, [byte ptr eax] + and eax, 0FFh + + add eax, offset actionTable + mov eax, [eax] + and eax, [currentType] ; TODO: for now, rock raider is hard coded + jnz @canGoHere + + + inc eax ; mov eax, 1 + ret + +@canGoHere: + + ; Getting here means the field is okay to walk/fly/whatever on + + xor ecx, ecx + mov cx, [closedlistSize] + cmp cx, 0 ; If empty, return 0 + jne @closedNotEmpty + + mov eax, 0 + ret + +@closedNotEmpty: + mov ebx, offset closedlist + +@loopClosed: + mov edx, [@@x] + cmp [(TField ptr ebx).x], dl + jne @nextClosed + mov edx, [@@y] + cmp [(TField ptr ebx).y], dl + jne @nextClosed + + ; If reached here, yep, contained in closed list + mov eax, 1 + ret + + @nextClosed: + add ebx, SIZE TField + dec ecx + jnz @loopClosed + + mov eax, 0 + ret + +notWithinBounds: + mov eax, 1 + ret +ENDP isClosed + +PROC addNeighbours + USES eax, ebx, ecx, edx + ARG @@tgtx:dword, \ + @@tgty:dword + ; Push all neighbours of currentOpen on openList + + xor ebx, ebx + xor ecx, ecx + + mov bl, [currentOpen.x] + mov cl, [currentOpen.y] + mov edx, [currentOpen.distance] + inc edx ; Next distance is one more. + + ; Up + dec ecx + call isClosed, ebx, ecx + cmp eax, 0 + jne @noUp + call distance, ebx, ecx, [@@tgtx], [@@tgty] + add eax, edx + call addOpen, ebx, ecx, eax, edx + @noUp: + inc ecx + + ; Right + inc ebx + call isClosed, ebx, ecx + cmp eax, 0 + jne @noRight + call distance, ebx, ecx, [@@tgtx], [@@tgty] + add eax, edx + call addOpen, ebx, ecx, eax, edx + @noRight: + dec ebx + + ; Left + dec ebx + call isClosed, ebx, ecx + cmp eax, 0 + jne @noLeft + call distance, ebx, ecx, [@@tgtx], [@@tgty] + add eax, edx + call addOpen, ebx, ecx, eax, edx + @noLeft: + inc ebx + + ; Down + inc ecx + call isClosed, ebx, ecx + cmp eax, 0 + jne @noDown + call distance, ebx, ecx, [@@tgtx], [@@tgty] + add eax, edx + call addOpen, ebx, ecx, eax, edx + @noDown: + dec ecx + + ret +ENDP addNeighbours + +PROC popOpen + ARG RETURNS eax + USES ebx, ecx, edx, esi, edi + ; eax contains the smallest current heuristic + ; ebx contains the index of that field + + cmp [openlistSize], 0 ; If empty, return 0 + jne @goForth + + mov eax, 0 + ret + +@goForth: + + mov eax, 0FFFFFFFFh ; Longest distance possible in 32 bits. + xor ebx, ebx + xor ecx, ecx ; ecx contains the current index + +@searchFurther: + mov edx, ecx + imul edx, SIZE TPriorityField + cmp [(TPriorityField ptr (openlist + edx)).heuristic], eax + ja @notBetter + ; Better guess found, put right values in eax and ebx + mov eax, [(TPriorityField ptr (openlist + edx)).heuristic] + mov ebx, ecx + +@notBetter: + + inc ecx + cmp cx, [openlistSize] + jne @searchFurther + + ; By now, we have found the right item to pop from the priorityqueue. + + ; Move the correct item in currentOpen + mov ecx, SIZE TPriorityField + mov esi, ebx + imul esi, ecx + add esi, offset openlist + + mov edi, offset currentOpen + rep movsb + + ; Now make the remove the thing from the vector + + xor ecx, ecx + mov cx, [openlistSize] + sub ecx, ebx + dec ecx + imul ecx, SIZE TPriorityField + mov edi, esi + sub edi, SIZE TPriorityField + rep movsb + + dec [openlistSize] + mov eax, 1 + ret +ENDP popOpen + +PROC addClosed + USES eax, ebx + + xor ebx, ebx + xor eax, eax + + mov bx, [closedlistSize] + imul ebx, SIZE TField + add ebx, offset closedlist ; ebx contains the target TField + + mov al, [currentOpen.x] + mov [(TField ptr ebx).x], al + mov al, [currentOpen.y] + mov [(TField ptr ebx).y], al + mov eax, [currentOpen.distance] + mov [(TField ptr ebx).distance], eax + + inc [closedlistSize] + cmp [closedlistSize], CLOSED_LIST_SIZE_MAX + jne @noProblemWithClosedVector + + xor eax, eax + mov ax, [closedlistSize] + call crash, offset closedOutOfMemory, eax + +@noProblemWithClosedVector: + ret +ENDP addClosed + +PROC addOpen + USES eax, ebx + ARG @@x:dword, \ + @@y:dword, \ + @@priority:dword, \ + @@distance:dword + + xor eax, eax + mov ax, [openlistSize] + imul eax, SIZE TPriorityField + add eax, offset openlist + + mov ebx, [@@x] + mov [(TPriorityField ptr eax).x], bl + mov ebx, [@@y] + mov [(TPriorityField ptr eax).y], bl + + mov bl, [currentOpen.x] + mov [(TPriorityField ptr eax).fromx], bl + mov bl, [currentOpen.y] + mov [(TPriorityField ptr eax).fromy], bl + + mov ebx, [@@priority] + mov [(TPriorityField ptr eax).heuristic], ebx + mov ebx, [@@distance] + mov [(TPriorityField ptr eax).distance], ebx + + inc [openlistSize] + cmp [openlistSize], OPEN_LIST_SIZE_MAX + jne @noProblem + + xor eax, eax + mov ax, [openlistSize] + call crash, offset openOutOfMemory, eax + +@noProblem: + ret +ENDP + +PROC distance + USES ebx + ARG @@srcx:dword, \ + @@srcy:dword, \ + @@tgtx:dword, \ + @@tgty:dword \ + RETURNS eax + + mov eax, [@@srcx] + sub eax, [@@tgtx] + + jns @noSignChangex + neg eax + + @noSignChangex: + + mov ebx, [@@srcy] + sub ebx, [@@tgty] + + jns @noSignChangey + neg ebx + + @noSignChangey: + add eax, ebx + ret +ENDP distance + +PROC cleanData + USES eax, ecx + mov [openlistSize], 0 + mov [closedlistSize], 0 + + mov [currentOpen.x], -1 + mov [currentOpen.y], -1 + mov [currentOpen.distance], 0 + + call getLevelWidth + mov ecx, eax + call getLevelHeight + imul ecx, eax + + mov eax, offset backtraceGraph +@fieldIter: + mov [(TField ptr eax).distance], 0ffffffffh ; Set to approximately +inf + mov [(TField ptr eax).x], 0 + mov [(TField ptr eax).y], 0 + add eax, SIZE TField + dec ecx + jnz @fieldIter + + ret +ENDP cleanData + +DATASEG + +openOutOfMemory db "Out of openlistSize memory. Hi dev: Please increase$" +closedOutOfMemory db "Out of closedlistSize memory. Hi dev: Please increase$" + +; power | discover | walking | sailing | flying +actionTable db 00001101b, \ ;EMPTY + 00001101b, \ ;RUBBLE + 00000000b, \ ;GRAVEL + 00000000b, \ ;LOOSE ROCK + 00000000b, \ ;HARD ROCK + 00000000b, \ ;MASSIVE ROCK + 00000000b, \ ;KRISTAL SOURCE + 00000000b, \ ;OREROCK + 00001011b, \ ;WATER + 00001001b, \ ;LAVA + 00001101b, \ ;SNAIL HOLE + 00001101b, \ ;EROSION + 00011101b, \ ;POWER PATH + 00011101b, \ ;BUILDING POWER PATH + 00011000b \ ;BUILDING + +UDATASEG + +currentType dd ? +currentOpen TPriorityField ? + +openlist TPriorityField OPEN_LIST_SIZE_MAX dup(?) +openlistSize dw ? +closedlist TField CLOSED_LIST_SIZE_MAX dup(?) +closedlistSize dw ? +backtraceGraph TField MAX_LEVEL_SIZE dup(?) + +END diff --git a/tests/examplefiles/example.whiley b/tests/examplefiles/example.whiley new file mode 100644 index 00000000..74b39370 --- /dev/null +++ b/tests/examplefiles/example.whiley @@ -0,0 +1,296 @@ +/** + * Example Whiley program, taken from the Whiley benchmark suite. + * https://github.com/Whiley/WyBench/blob/master/src/101_interpreter/Main.whiley + */ + +import whiley.lang.System +import whiley.lang.Int +import whiley.io.File +import string from whiley.lang.ASCII +import char from whiley.lang.ASCII + +// ==================================================== +// A simple calculator for expressions +// ==================================================== + +constant ADD is 0 +constant SUB is 1 +constant MUL is 2 +constant DIV is 3 + +// binary operation +type BOp is (int x) where ADD <= x && x <= DIV +type BinOp is { BOp op, Expr lhs, Expr rhs } + +// variables +type Var is { string id } + +// list access +type ListAccess is { + Expr src, + Expr index +} + +// expression tree +type Expr is int | // constant + Var | // variable + BinOp | // binary operator + Expr[] | // array constructor + ListAccess // list access + +// values +type Value is int | Value[] + +// stmts +type Print is { Expr rhs } +type Set is { string lhs, Expr rhs } +type Stmt is Print | Set + +// ==================================================== +// Expression Evaluator +// ==================================================== + +type RuntimeError is { string msg } +type Environment is [{string k, Value v}] + +// Evaluate an expression in a given environment reducing either to a +// value, or a runtime error. The latter occurs if evaluation gets +// "stuck" (e.g. expression is // not well-formed) +function evaluate(Expr e, Environment env) -> Value | RuntimeError: + // + if e is int: + return e + else if e is Var: + return env[e.id] + else if e is BinOp: + Value|RuntimeError lhs = evaluate(e.lhs, env) + Value|RuntimeError rhs = evaluate(e.rhs, env) + // check if stuck + if !(lhs is int && rhs is int): + return {msg: "arithmetic attempted on non-numeric value"} + // switch statement would be good + if e.op == ADD: + return lhs + rhs + else if e.op == SUB: + return lhs - rhs + else if e.op == MUL: + return lhs * rhs + else if rhs != 0: + return lhs / rhs + return {msg: "divide-by-zero"} + else if e is Expr[]: + [Value] r = [] + for i in e: + Value|RuntimeError v = evaluate(i, env) + if v is RuntimeError: + return v + else: + r = r ++ [v] + return r + else if e is ListAccess: + Value|RuntimeError src = evaluate(e.src, env) + Value|RuntimeError index = evaluate(e.index, env) + // santity checks + if src is [Value] && index is int && index >= 0 && index < |src|: + return src[index] + else: + return {msg: "invalid list access"} + else: + return 0 // dead-code + +// ==================================================== +// Expression Parser +// ==================================================== + +type State is { string input, int pos } +type SyntaxError is { string msg, int start, int end } + +function SyntaxError(string msg, int start, int end) -> SyntaxError: + return { msg: msg, start: start, end: end } + +// Top-level parse method +function parse(State st) -> (Stmt,State)|SyntaxError: + // + Var keyword, Var v + Expr e + int start = st.pos + // + keyword,st = parseIdentifier(st) + switch keyword.id: + case "print": + any r = parseAddSubExpr(st) + if !(r is SyntaxError): + e,st = r + return {rhs: e},st + else: + return r // error case + case "set": + st = parseWhiteSpace(st) + v,st = parseIdentifier(st) + any r = parseAddSubExpr(st) + if !(r is SyntaxError): + e,st = r + return {lhs: v.id, rhs: e},st + else: + return r // error case + default: + return SyntaxError("unknown statement",start,st.pos-1) + +function parseAddSubExpr(State st) -> (Expr, State)|SyntaxError: + // + Expr lhs, Expr rhs + // First, pass left-hand side + any r = parseMulDivExpr(st) + // + if r is SyntaxError: + return r + // + lhs,st = r + st = parseWhiteSpace(st) + // Second, see if there is a right-hand side + if st.pos < |st.input| && st.input[st.pos] == '+': + // add expression + st.pos = st.pos + 1 + r = parseAddSubExpr(st) + if !(r is SyntaxError): + rhs,st = r + return {op: ADD, lhs: lhs, rhs: rhs},st + else: + return r + else if st.pos < |st.input| && st.input[st.pos] == '-': + // subtract expression + st.pos = st.pos + 1 + r = parseAddSubExpr(st) + if !(r is SyntaxError): + rhs,st = r + return {op: SUB, lhs: lhs, rhs: rhs},st + else: + return r + // No right-hand side + return (lhs,st) + +function parseMulDivExpr(State st) -> (Expr, State)|SyntaxError: + // First, parse left-hand side + Expr lhs, Expr rhs + any r = parseTerm(st) + if r is SyntaxError: + return r + // + lhs,st = r + st = parseWhiteSpace(st) + // Second, see if there is a right-hand side + if st.pos < |st.input| && st.input[st.pos] == '*': + // add expression + st.pos = st.pos + 1 + r = parseMulDivExpr(st) + if !(r is SyntaxError): + rhs,st = r + return {op: MUL, lhs: lhs, rhs: rhs}, st + else: + return r + else if st.pos < |st.input| && st.input[st.pos] == '/': + // subtract expression + st.pos = st.pos + 1 + r = parseMulDivExpr(st) + if !(r is SyntaxError): + rhs,st = r + return {op: DIV, lhs: lhs, rhs: rhs}, st + else: + return r + // No right-hand side + return (lhs,st) + +function parseTerm(State st) -> (Expr, State)|SyntaxError: + // + st = parseWhiteSpace(st) + if st.pos < |st.input|: + if ASCII.isLetter(st.input[st.pos]): + return parseIdentifier(st) + else if ASCII.isDigit(st.input[st.pos]): + return parseNumber(st) + else if st.input[st.pos] == '[': + return parseList(st) + // + return SyntaxError("expecting number or variable",st.pos,st.pos) + +function parseIdentifier(State st) -> (Var, State): + // + string txt = "" + // inch forward until end of identifier reached + while st.pos < |st.input| && ASCII.isLetter(st.input[st.pos]): + txt = txt ++ [st.input[st.pos]] + st.pos = st.pos + 1 + return ({id:txt}, st) + +function parseNumber(State st) -> (Expr, State)|SyntaxError: + // inch forward until end of identifier reached + int start = st.pos + while st.pos < |st.input| && ASCII.isDigit(st.input[st.pos]): + st.pos = st.pos + 1 + // + int|null iv = Int.parse(st.input[start..st.pos]) + if iv == null: + return SyntaxError("Error parsing number",start,st.pos) + else: + return iv, st + +function parseList(State st) -> (Expr, State)|SyntaxError: + // + st.pos = st.pos + 1 // skip '[' + st = parseWhiteSpace(st) + [Expr] l = [] // initial list + bool firstTime = true + while st.pos < |st.input| && st.input[st.pos] != ']': + if !firstTime && st.input[st.pos] != ',': + return SyntaxError("expecting comma",st.pos,st.pos) + else if !firstTime: + st.pos = st.pos + 1 // skip ',' + firstTime = false + any r = parseAddSubExpr(st) + if r is SyntaxError: + return r + else: + Expr e + e,st = r + // perform annoying error check + l = l ++ [e] + st = parseWhiteSpace(st) + st.pos = st.pos + 1 + return l,st + +// Parse all whitespace upto end-of-file +function parseWhiteSpace(State st) -> State: + while st.pos < |st.input| && ASCII.isWhiteSpace(st.input[st.pos]): + st.pos = st.pos + 1 + return st + +// ==================================================== +// Main Method +// ==================================================== + +public method main(System.Console sys): + if(|sys.args| == 0): + sys.out.println("no parameter provided!") + else: + File.Reader file = File.Reader(sys.args[0]) + string input = ASCII.fromBytes(file.readAll()) + + Environment env = Environment() + State st = {pos: 0, input: input} + while st.pos < |st.input|: + Stmt s + any r = parse(st) + if r is SyntaxError: + sys.out.println("syntax error: " ++ r.msg) + return + s,st = r + Value|RuntimeError v = evaluate(s.rhs,env) + if v is RuntimeError: + sys.out.println("runtime error: " ++ v.msg) + return + if s is Set: + env[s.lhs] = v + else: + sys.out.println(r) + st = parseWhiteSpace(st) + diff --git a/tests/examplefiles/fibonacci.tokigun.aheui b/tests/examplefiles/fibonacci.tokigun.aheui new file mode 100644 index 00000000..afa2ca05 --- /dev/null +++ b/tests/examplefiles/fibonacci.tokigun.aheui @@ -0,0 +1,4 @@ +바싹반박나싼순 +뿌멓떠벌번멍뻐 +쌀삭쌀살다순옭 +어어선썬설썩옭 diff --git a/tests/examplefiles/guidance.smv b/tests/examplefiles/guidance.smv new file mode 100644 index 00000000..671d1e1c --- /dev/null +++ b/tests/examplefiles/guidance.smv @@ -0,0 +1,1124 @@ +-- +-- Shuttle Digital Autopilot +-- by Sergey Berezin (berez@cs.cmu.edu) +-- +MODULE cont_3eo_mode_select(start,smode5,vel,q_bar,apogee_alt_LT_alt_ref, + h_dot_LT_hdot_reg2,alpha_n_GRT_alpha_reg2, + delta_r_GRT_del_r_usp,v_horiz_dnrng_LT_0, + high_rate_sep,meco_confirmed) + +VAR cont_3EO_start: boolean; + RTLS_abort_declared: boolean; + region_selected : boolean; + m_mode: {mm102, mm103, mm601}; + r: {reg-1, reg0, reg1, reg2, reg3, reg102}; + step : {1,2,3,4,5,6,7,8,9,10, exit, undef}; + +ASSIGN + init(cont_3EO_start) := FALSE; + init(m_mode) := {mm102, mm103}; + init(region_selected) := FALSE; + init(RTLS_abort_declared) := FALSE; + init(r) := reg-1; + init(step) := undef; + + next(step) := + case + step = 1 & m_mode = mm102 : exit; + step = 1 : 2; + step = 2 & smode5 : 5; + step = 2 & vel = GRT_vi_3eo_max: exit; + step = 2 : 3; + step = 3 & vel = LEQ_vi_3eo_min : 6; + step = 3 : 4; + step = 4 & apogee_alt_LT_alt_ref: exit; + step = 4 : 6; + step = 5 : 6; + step = 6 & r = reg0 : exit; + step = 6 : 7; + step = 7 : 8; + step = 8 & q_bar = GRT_qbar_reg3 & !high_rate_sep : 10; + step = 8 : 9; + step = 9 : 10; + step = 10: exit; + next(start): 1; + step = exit : undef; + TRUE: step; + esac; + + next(cont_3EO_start) := + case + step = 1 & m_mode = mm102 : TRUE; + step = 10 & meco_confirmed : TRUE; + TRUE : cont_3EO_start; + esac; + + next(r) := + case + step = 1 & m_mode = mm102 : reg102; + step = 2 & !smode5 & vel = GRT_vi_3eo_max: reg0; + step = 4 & apogee_alt_LT_alt_ref: reg0; + step = 5 & v_horiz_dnrng_LT_0 & delta_r_GRT_del_r_usp : reg0; + step = 8 & q_bar = GRT_qbar_reg3 & !high_rate_sep : reg3; + step = 9: case + (h_dot_LT_hdot_reg2 & alpha_n_GRT_alpha_reg2 & + q_bar = GRT_qbar_reg1) | high_rate_sep : reg2; + TRUE : reg1; + esac; + next(step) = 1 : reg-1; + TRUE: r; + esac; + + next(RTLS_abort_declared) := + case + step = 10 & meco_confirmed & m_mode = mm103 : TRUE; + TRUE: RTLS_abort_declared; + esac; + + next(m_mode) := + case + step = 10 & meco_confirmed & m_mode = mm103 : mm601; + TRUE: m_mode; + esac; + + next(region_selected) := + case + next(step) = 1 : FALSE; + next(step) = exit : TRUE; + TRUE : region_selected; + esac; + +MODULE cont_3eo_guide(start,cont_3EO_start, mode_select_completed, et_sep_cmd, + h_dot_LT_0, q_bar_a_GRT_qbar_max_sep, m_mode, r0, + cont_minus_z_compl, t_nav-t_et_sep_GRT_dt_min_z_102, + ABS_q_orb_GRT_q_minus_z_max, ABS_r_orb_GRT_r_minus_z_max, + excess_OMS_propellant, q_bar_a_LT_qbar_oms_dump, + entry_mnvr_couter_LE_0, rcs_all_jet_inhibit, + alt_GRT_alt_min_102_dump, t_nav-t_gmtlo_LT_t_dmp_last, + pre_sep, cond_18, q_orb_LT_0, ABS_alf_err_LT_alf_sep_err, + cond_20b, cond_21, ABS_beta_n_GRT_beta_max, cond_24, cond_26, + cond_27, cond_29, mm602_OK) +VAR + step: {1,a1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20, + b20, c20, d20, 21,22,23,24,25,26,27,28,29,exit, undef}; + call_RTLS_abort_task : boolean; + first3: boolean; -- indicates if it is the first pass + first8: boolean; + first27: boolean; + s_unconv : boolean; + mode_2_indicator : boolean; + et_sep_man_initiate : boolean; + emerg_sep : boolean; + cont_3eo_pr_delay : {minus_z_reg1, minus_z_reg2, + minus_z_reg3, minus_z_reg4, minus_z_reg102, 0, 5}; + etsep_y_drift : {undef, minus_z_reg1, minus_z_reg2, + minus_z_reg3, minus_z_reg4, minus_z_reg102, 0}; + fwd_rcs_dump_enable : boolean; + fcs_accept_icnct : boolean; + oms_rcs_i_c_inh_ena_cmd : boolean; + orbiter_dump_ena : boolean; + frz_3eo : boolean; + high_rate_sep: boolean; + entry_gains : boolean; + cont_sep_cplt : boolean; + pch_cmd_reg4 : boolean; + alpha_ok : boolean; + r : {reg-1, reg0, reg1, reg2, reg3, reg4, reg102}; + early_sep : boolean; +-------------------------------------------- +----- Additional Variables ----------------- +-------------------------------------------- + rtls_lo_f_d_delay : {undef, 0}; + wcb2 : {undef, reg1_0, reg2_neg4, wcb2_3eo, reg4_0, + reg102_undef, post_sep_0}; + q_gcb_i : {undef, quat_reg1, quat_reg2, quat_reg3, quat_reg4, + quat_reg102_undef, quat_entry_M50_to_cmdbody}; + oms_nz_lim : {undef, oms_nz_lim_3eo, oms_nz_lim_iload, oms_nz_lim_std}; + contingency_nz_lim : {undef, contingency_nz_lim_3eo, + contingency_nz_lim_iload, contingency_nz_lim_std}; + + + +ASSIGN + init(entry_gains) := FALSE; + init(frz_3eo) := FALSE; + init(cont_3eo_pr_delay) := 5; + init(etsep_y_drift) := undef; + init(r) := reg-1; + init(step) := undef; + init(call_RTLS_abort_task) := FALSE; + init(first3) := TRUE; + init(first8) := TRUE; + init(first27) := TRUE; + init(cont_sep_cplt) := FALSE; + init(et_sep_man_initiate) := FALSE; + init(alpha_ok) := FALSE; + init(pch_cmd_reg4) := FALSE; + +-- Assumed initializations: + + init(rtls_lo_f_d_delay) := undef; + init(wcb2) := undef; + init(q_gcb_i) := undef; + init(oms_nz_lim) := undef; + init(contingency_nz_lim) := undef; + init(oms_rcs_i_c_inh_ena_cmd) := FALSE; + init(orbiter_dump_ena) := FALSE; +-- init(early_sep) := FALSE; + +------------- + + next(step) := nextstep; + + next(r) := + case + step = a1 & (cont_3EO_start | mode_select_completed) : r0; + step = 21 & cond_21 : reg4; + step = 23 & ABS_beta_n_GRT_beta_max & !high_rate_sep : reg1; + TRUE : r; + esac; + + next(first3) := + case + step = 3 & cont_3EO_start : FALSE; + TRUE : first3; + esac; + + next(first8) := + case + step = 8 & excess_OMS_propellant & cont_3EO_start : FALSE; + TRUE : first8; + esac; + + next(first27) := + case + step = 27 : FALSE; + TRUE: first27; + esac; + + next(s_unconv) := + case + step = 3 : FALSE; + TRUE : s_unconv; + esac; + + next(call_RTLS_abort_task) := + case + step = 3 : TRUE; + TRUE : call_RTLS_abort_task; + esac; + + next(mode_2_indicator) := + case + step = 4 : TRUE; + TRUE : mode_2_indicator; + esac; + + next(et_sep_man_initiate) := + case + step = 5 & h_dot_LT_0 & q_bar_a_GRT_qbar_max_sep & m_mode != mm102 : TRUE; + step = 14 & pre_sep : TRUE; + step = 19 & q_orb_LT_0 : TRUE; + step = d20 : TRUE; + step = 26 & cond_26 : TRUE; + step = 29 & cond_29 : TRUE; + TRUE : et_sep_man_initiate; + esac; + + next(emerg_sep) := + case + next(step) = 1 : FALSE; + step = 5 & h_dot_LT_0 & q_bar_a_GRT_qbar_max_sep & m_mode != mm102: TRUE; + TRUE : emerg_sep; + esac; + + next(cont_3eo_pr_delay) := + case + next(step) = 1 : 5; + step = 5 & h_dot_LT_0 & q_bar_a_GRT_qbar_max_sep & m_mode != mm102 : + minus_z_reg3; + step = 7 & !cont_minus_z_compl & r = reg102 & + t_nav-t_et_sep_GRT_dt_min_z_102 & + (ABS_q_orb_GRT_q_minus_z_max | ABS_r_orb_GRT_r_minus_z_max) : 0; + step = 14 & pre_sep : minus_z_reg102; + step = 19 & q_orb_LT_0 : minus_z_reg4; + step = d20 : minus_z_reg3; + step = 26 & cond_26 : minus_z_reg2; + step = 27 & first27 : minus_z_reg1; + TRUE : cont_3eo_pr_delay; + esac; + + next(etsep_y_drift) := + case + step = 5 & h_dot_LT_0 & q_bar_a_GRT_qbar_max_sep & m_mode != mm102 : + minus_z_reg3; + step = 7 & !cont_minus_z_compl & r = reg102 & + t_nav-t_et_sep_GRT_dt_min_z_102 & + (ABS_q_orb_GRT_q_minus_z_max | ABS_r_orb_GRT_r_minus_z_max) : 0; + step = 14 & pre_sep : minus_z_reg102; + step = 19 & q_orb_LT_0 : minus_z_reg4; + step = d20 : minus_z_reg3; + step = 26 & cond_26 : minus_z_reg2; + step = 27 & first27 : minus_z_reg1; + TRUE : etsep_y_drift; + esac; + + next(fwd_rcs_dump_enable) := + case + step = 8 & excess_OMS_propellant & first8 : FALSE; + TRUE : fwd_rcs_dump_enable; + esac; + + next(fcs_accept_icnct) := + case + step = 9 & q_bar_a_LT_qbar_oms_dump & r != reg102 : TRUE; + TRUE : fcs_accept_icnct; + esac; + + next(oms_rcs_i_c_inh_ena_cmd) := + case +-- next(step) = 1 & oms_rcs_i_c_inh_ena_cmd : {0,1}; + next(step) = 1 & oms_rcs_i_c_inh_ena_cmd : FALSE; -- Assumed initialization + step = 9 & q_bar_a_LT_qbar_oms_dump & r != reg102 : TRUE; + TRUE : oms_rcs_i_c_inh_ena_cmd; + esac; + + next(orbiter_dump_ena) := + case + next(start) = TRUE : FALSE; -- Assumed initialization + step = 9 & q_bar_a_LT_qbar_oms_dump & r != reg102 : TRUE; + step = 13 & alt_GRT_alt_min_102_dump & t_nav-t_gmtlo_LT_t_dmp_last : TRUE; + TRUE : orbiter_dump_ena; + esac; + + next(frz_3eo) := + case + next(step) = 1 : FALSE; + step = 10 & entry_mnvr_couter_LE_0 & !rcs_all_jet_inhibit : FALSE; + step = 28 & !et_sep_man_initiate : TRUE; + TRUE : frz_3eo; + esac; + + next(high_rate_sep) := + case + step = 10 & entry_mnvr_couter_LE_0 & !rcs_all_jet_inhibit : FALSE; + step = 25 : TRUE; + TRUE : high_rate_sep; + esac; + + next(entry_gains) := + case + next(step) = 1 : FALSE; + step = 10 & entry_mnvr_couter_LE_0 & !rcs_all_jet_inhibit : TRUE; + TRUE : entry_gains; + esac; + + next(cont_sep_cplt) := + case + next(step) = 1 : FALSE; + step = 12 & mm602_OK : TRUE; + TRUE : cont_sep_cplt; + esac; + + next(pch_cmd_reg4) := + case + next(step) = 1 : FALSE; + step = 18 & !pch_cmd_reg4 & cond_18 : TRUE; + TRUE : pch_cmd_reg4; + esac; + + next(alpha_ok) := + case + next(step) = 1 : FALSE; + step = 20 & ABS_alf_err_LT_alf_sep_err : TRUE; + TRUE : alpha_ok; + esac; + + next(early_sep) := + case + step = 27 & first27 : + case + cond_27 : TRUE; + TRUE : FALSE; + esac; + TRUE : early_sep; + esac; + +-------------------------------------------- +----- Additional Variables ----------------- +-------------------------------------------- + + next(rtls_lo_f_d_delay) := + case + next(start) = TRUE : undef; -- Assumed initialization + step = 8 & first8 & excess_OMS_propellant : 0; + TRUE : rtls_lo_f_d_delay; + esac; + + next(wcb2) := + case + next(start) = TRUE : undef; -- Assumed initialization + step = 10 & entry_mnvr_couter_LE_0 : post_sep_0; + step = 12 : case + r = reg4 : reg4_0; + TRUE : wcb2_3eo; + esac; + step = 14 & pre_sep : reg102_undef; + step = 15 : case + r = reg4 : reg4_0; + TRUE : wcb2_3eo; + esac; + step = 25 : reg2_neg4; + TRUE : wcb2; + esac; + + next(q_gcb_i) := + case + next(start) = TRUE : undef; -- Assumed initialization + step = 11 : quat_entry_M50_to_cmdbody; + step = 14 & pre_sep : quat_reg102_undef; + step = 16 : case + r = reg4 : quat_reg4; + TRUE : quat_reg3; + esac; + step = 22 : quat_reg2; + +-- Without this step the value "quat_reg2" would remain in "reg1": +-- step = 23 & ABS_beta_n_GRT_beta_max & !high_rate_sep : undef; + + TRUE : q_gcb_i; + esac; + + next(oms_nz_lim) := + case + next(start) = TRUE : undef; -- Assumed initialization + step = 9 & q_bar_a_LT_qbar_oms_dump & r != reg102 : oms_nz_lim_3eo; + step = 12 & mm602_OK : oms_nz_lim_std; + TRUE : oms_nz_lim; + esac; + + next(contingency_nz_lim) := + case + next(start) = TRUE : undef; -- Assumed initialization + step = 9 & q_bar_a_LT_qbar_oms_dump & r != reg102 : + contingency_nz_lim_3eo; + step = 12 & mm602_OK : contingency_nz_lim_std; + TRUE : contingency_nz_lim; + esac; + +DEFINE + finished := step = exit; + idle := step = undef; + + start_cont_3eo_mode_select := + case + step = 1 & !cont_3EO_start : TRUE; + TRUE : FALSE; + esac; + + nextstep := + case + step = 1 : a1; + step = a1 : case + (cont_3EO_start | mode_select_completed) : 2; + TRUE : step; + esac; + step = 2 : case + !cont_3EO_start : exit; + first3 : 3; + TRUE: 4; + esac; + step = 3 : 4; + step = 4 : case + et_sep_cmd : 7; + TRUE : 5; + esac; + step = 5 : case + h_dot_LT_0 & q_bar_a_GRT_qbar_max_sep & + m_mode != mm102 : exit; + TRUE : 6; + esac; + step = 6 : + case + r = reg102 : 13; + r in {reg3, reg4} : 15; + r = reg2 : 22; + r = reg1 : 27; + TRUE : exit; + esac; + step = 7 : case + cont_minus_z_compl : 8; + TRUE : exit; + esac; + step = 8 : case + excess_OMS_propellant & first8 : 9; + TRUE : 10; + esac; + step = 9 : exit; + step = 10 : case + !entry_mnvr_couter_LE_0 | rcs_all_jet_inhibit : exit; + TRUE : 11; + esac; + step = 11 : 12; + step = 12 : exit; + step = 13 : 14; + step = 14 : exit; + step = 15 : 16; + step = 16 : 17; + step = 17 : case + r = reg4 : 18; + TRUE : 20; + esac; + step = 18 : case + pch_cmd_reg4 | cond_18 : 19; + TRUE : exit; + esac; + step = 19 : exit; + step = 20 : case + ABS_alf_err_LT_alf_sep_err : b20; + TRUE : c20; + esac; + step = b20 : case + cond_20b : d20; + TRUE : exit; + esac; + step = c20 : case + alpha_ok : d20; + TRUE : 21; + esac; + step = d20 : exit; + TRUE : nextstep21; + esac; + + nextstep21 := + case + step = 21 : case + cond_21 : 15; + TRUE : exit; + esac; + step = 22 : 23; + step = 23 : case + ABS_beta_n_GRT_beta_max & !high_rate_sep : 27; + TRUE : 24; + esac; + step = 24 : case + cond_24 | high_rate_sep : 25; + TRUE : exit; + esac; + step = 25 : 26; + step = 26 : exit; + step = 27 : 28; + step = 28 : case + !et_sep_man_initiate : 29; + TRUE : exit; + esac; + step = 29 : exit; + start : 1; + step = exit : undef; + TRUE : step; + esac; + + post_sep_mode := step in {7,8,9,10,11,12}; + +------------------------------------------------------------------ +------------------------------------------------------------------ + +MODULE main +VAR + smode5: boolean; + vel : {GRT_vi_3eo_max, GRT_vi_3eo_min, LEQ_vi_3eo_min}; + q_bar: {GRT_qbar_reg3, GRT_qbar_reg1, LEQ_qbar_reg1}; + q_bar_a_GRT_qbar_max_sep : boolean; + q_bar_a_LT_qbar_oms_dump : boolean; + apogee_alt_LT_alt_ref : boolean; + h_dot_LT_hdot_reg2 : boolean; + h_dot_LT_0 : boolean; + alpha_n_GRT_alpha_reg2 : boolean; + delta_r_GRT_del_r_usp : boolean; + v_horiz_dnrng_LT_0: boolean; + meco_confirmed: boolean; + et_sep_cmd : boolean; + cont_minus_z_compl : boolean; + t_nav-t_et_sep_GRT_dt_min_z_102 : boolean; + ABS_q_orb_GRT_q_minus_z_max : boolean; + ABS_r_orb_GRT_r_minus_z_max : boolean; + excess_OMS_propellant : boolean; + entry_mnvr_couter_LE_0 : boolean; + rcs_all_jet_inhibit : boolean; + alt_GRT_alt_min_102_dump : boolean; + t_nav-t_gmtlo_LT_t_dmp_last : boolean; + pre_sep : boolean; + cond_18 : boolean; + q_orb_LT_0 : boolean; + ABS_alf_err_LT_alf_sep_err : boolean; + cond_20b : boolean; + cond_21 : boolean; + ABS_beta_n_GRT_beta_max : boolean; + cond_24 : boolean; + cond_26 : boolean; + cond_27 : boolean; + cond_29 : boolean; + mm602_OK : boolean; + start_guide : boolean; + mated_coast_mnvr : boolean; + + cs: cont_3eo_mode_select(cg.start_cont_3eo_mode_select, + smode5,vel,q_bar,apogee_alt_LT_alt_ref, + h_dot_LT_hdot_reg2,alpha_n_GRT_alpha_reg2, + delta_r_GRT_del_r_usp,v_horiz_dnrng_LT_0, + cg.high_rate_sep,meco_confirmed); + + cg: cont_3eo_guide(start_guide, + cs.cont_3EO_start, cs.region_selected, et_sep_cmd, + h_dot_LT_0, q_bar_a_GRT_qbar_max_sep, cs.m_mode, cs.r, + cont_minus_z_compl, t_nav-t_et_sep_GRT_dt_min_z_102, + ABS_q_orb_GRT_q_minus_z_max, ABS_r_orb_GRT_r_minus_z_max, + excess_OMS_propellant, q_bar_a_LT_qbar_oms_dump, + entry_mnvr_couter_LE_0, rcs_all_jet_inhibit, + alt_GRT_alt_min_102_dump, t_nav-t_gmtlo_LT_t_dmp_last, + pre_sep, cond_18, q_orb_LT_0, ABS_alf_err_LT_alf_sep_err, + cond_20b, cond_21, ABS_beta_n_GRT_beta_max, cond_24, cond_26, + cond_27, cond_29, mm602_OK); + +ASSIGN + init(start_guide) := FALSE; + init(mated_coast_mnvr) := FALSE; + + next(entry_mnvr_couter_LE_0) := + case + !entry_mnvr_couter_LE_0 : {FALSE, TRUE}; + TRUE : TRUE; + esac; + +--------------------------------------------------------------------- +--------------------------------------------------------------------- + next(start_guide) := + case + start_guide : FALSE; + !cg.idle : FALSE; + TRUE : {FALSE, TRUE}; + esac; + + next(smode5) := + case + fixed_values : smode5; + cg.idle : { FALSE, TRUE }; + TRUE : smode5; + esac; + + next(vel) := + case + fixed_values : vel; + cg.idle : {GRT_vi_3eo_max, GRT_vi_3eo_min, LEQ_vi_3eo_min}; + TRUE : vel; + esac; + + next(q_bar) := + case + fixed_values : q_bar; + cg.idle : {GRT_qbar_reg3, GRT_qbar_reg1, LEQ_qbar_reg1}; + TRUE : q_bar; + esac; + + next(q_bar_a_GRT_qbar_max_sep) := + case + fixed_values : q_bar_a_GRT_qbar_max_sep; + cg.idle : { FALSE, TRUE }; + TRUE : q_bar_a_GRT_qbar_max_sep; + esac; + + next(apogee_alt_LT_alt_ref) := + case + fixed_values : apogee_alt_LT_alt_ref; + cg.idle : { FALSE, TRUE }; + TRUE : apogee_alt_LT_alt_ref; + esac; + + next(h_dot_LT_hdot_reg2) := + case + fixed_values : h_dot_LT_hdot_reg2; + cg.idle : { FALSE, TRUE }; + TRUE : h_dot_LT_hdot_reg2; + esac; + + next(h_dot_LT_0) := + case + fixed_values : h_dot_LT_0; + cg.idle : { FALSE, TRUE }; + TRUE : h_dot_LT_0; + esac; + + next(alpha_n_GRT_alpha_reg2) := + case + fixed_values : alpha_n_GRT_alpha_reg2; + cg.idle : { FALSE, TRUE }; + TRUE : alpha_n_GRT_alpha_reg2; + esac; + + next(delta_r_GRT_del_r_usp) := + case + fixed_values : delta_r_GRT_del_r_usp; + cg.idle : { FALSE, TRUE }; + TRUE : delta_r_GRT_del_r_usp; + esac; + + next(v_horiz_dnrng_LT_0) := + case + fixed_values : v_horiz_dnrng_LT_0; + cg.idle : { FALSE, TRUE }; + TRUE : v_horiz_dnrng_LT_0; + esac; + + next(meco_confirmed) := + case + fixed_values : meco_confirmed; + meco_confirmed : TRUE; + cg.idle : { FALSE, TRUE }; + TRUE : meco_confirmed; + esac; + + next(et_sep_cmd) := + case + fixed_values : et_sep_cmd; + et_sep_cmd : TRUE; + cg.idle : { FALSE, TRUE }; + TRUE : et_sep_cmd; + esac; + + next(cont_minus_z_compl) := + case + fixed_values : cont_minus_z_compl; + cg.idle : { FALSE, TRUE }; + TRUE : cont_minus_z_compl; + esac; + + next(t_nav-t_et_sep_GRT_dt_min_z_102) := + case + fixed_values : t_nav-t_et_sep_GRT_dt_min_z_102; + cg.idle : { FALSE, TRUE }; + TRUE : t_nav-t_et_sep_GRT_dt_min_z_102; + esac; + + next(ABS_q_orb_GRT_q_minus_z_max) := + case + fixed_values : ABS_q_orb_GRT_q_minus_z_max; + cg.idle : { FALSE, TRUE }; + TRUE : ABS_q_orb_GRT_q_minus_z_max; + esac; + + next(ABS_r_orb_GRT_r_minus_z_max) := + case + fixed_values : ABS_r_orb_GRT_r_minus_z_max; + cg.idle : { FALSE, TRUE }; + TRUE : ABS_r_orb_GRT_r_minus_z_max; + esac; + + next(excess_OMS_propellant) := + case + fixed_values : excess_OMS_propellant; + cg.idle & excess_OMS_propellant : { FALSE, TRUE }; + TRUE : excess_OMS_propellant; + esac; + + next(q_bar_a_LT_qbar_oms_dump) := + case + fixed_values : q_bar_a_LT_qbar_oms_dump; + cg.idle : { FALSE, TRUE }; + TRUE : q_bar_a_LT_qbar_oms_dump; + esac; + + next(rcs_all_jet_inhibit) := + case + fixed_values : rcs_all_jet_inhibit; + cg.idle : { FALSE, TRUE }; + TRUE : rcs_all_jet_inhibit; + esac; + + next(alt_GRT_alt_min_102_dump) := + case + fixed_values : alt_GRT_alt_min_102_dump; + cg.idle : { FALSE, TRUE }; + TRUE : alt_GRT_alt_min_102_dump; + esac; + + next(t_nav-t_gmtlo_LT_t_dmp_last) := + case + fixed_values : t_nav-t_gmtlo_LT_t_dmp_last; + cg.idle : { FALSE, TRUE }; + TRUE : t_nav-t_gmtlo_LT_t_dmp_last; + esac; + + next(pre_sep) := + case + fixed_values : pre_sep; + cg.idle : { FALSE, TRUE }; + TRUE : pre_sep; + esac; + + next(cond_18) := + case + fixed_values : cond_18; + cg.idle : { FALSE, TRUE }; + TRUE : cond_18; + esac; + + next(q_orb_LT_0) := + case + fixed_values : q_orb_LT_0; + cg.idle : { FALSE, TRUE }; + TRUE : q_orb_LT_0; + esac; + + next(ABS_alf_err_LT_alf_sep_err) := + case + fixed_values : ABS_alf_err_LT_alf_sep_err; + cg.idle : { FALSE, TRUE }; + TRUE : ABS_alf_err_LT_alf_sep_err; + esac; + + next(cond_20b) := + case + fixed_values : cond_20b; + cg.idle : { FALSE, TRUE }; + TRUE : cond_20b; + esac; + + next(cond_21) := + case + fixed_values : cond_21; + cg.idle : { FALSE, TRUE }; + TRUE : cond_21; + esac; + + next(ABS_beta_n_GRT_beta_max) := + case + fixed_values : ABS_beta_n_GRT_beta_max; + cg.idle : { FALSE, TRUE }; + TRUE : ABS_beta_n_GRT_beta_max; + esac; + + next(cond_24) := + case + fixed_values : cond_24; + cg.idle : { FALSE, TRUE }; + TRUE : cond_24; + esac; + + next(cond_26) := + case + fixed_values : cond_26; + cg.idle : { FALSE, TRUE }; + TRUE : cond_26; + esac; + + next(cond_27) := + case + fixed_values : cond_27; + cg.idle : { FALSE, TRUE }; + TRUE : cond_27; + esac; + + next(cond_29) := + case + fixed_values : cond_29; + cg.idle : { FALSE, TRUE }; + TRUE : cond_29; + esac; + + next(mm602_OK) := + case + fixed_values : mm602_OK; + cg.idle : { FALSE, TRUE }; + TRUE : mm602_OK; + esac; + + next(mated_coast_mnvr) := + case + next(cg.step) = 1 : FALSE; + cg.step = 6 & cg.r in {reg1, reg2, reg3, reg4, reg102} : TRUE; + TRUE : mated_coast_mnvr; + esac; + +--------------------------------------------------------------------- +--------------------------------------------------------------------- +DEFINE + fixed_values := FALSE; + + output_ok := + case + cg.q_gcb_i = undef | cg.wcb2 = undef | + cg.cont_3eo_pr_delay = 5 | + cg.etsep_y_drift = undef : + case + !mated_coast_mnvr: 1; + TRUE : undef; + esac; + !mated_coast_mnvr: toint(cg.q_gcb_i = quat_entry_M50_to_cmdbody & + cg.wcb2 = post_sep_0); +-- reg1 never happens? +-- cg.r = reg1 : (cg.q_gcb_i = quat_reg1 & cg.wcb2 = reg1_0 & +-- cg.cont_3eo_pr_delay = minus_z_reg1 & +-- cg.etsep_y_drift = minus_z_reg1) | cg.emerg_sep; + cg.r = reg2 : toint((cg.q_gcb_i = quat_reg2 & cg.wcb2 = reg2_neg4 & + cg.cont_3eo_pr_delay = minus_z_reg2 & + cg.etsep_y_drift = minus_z_reg2) | cg.emerg_sep); + + cg.r = reg3 : toint((cg.q_gcb_i = quat_reg3 & cg.wcb2 = wcb2_3eo & + cg.cont_3eo_pr_delay = minus_z_reg3 & + cg.etsep_y_drift = minus_z_reg3) | cg.emerg_sep); + cg.r = reg4 : toint((cg.q_gcb_i = quat_reg4 & cg.wcb2 = reg4_0 & + cg.cont_3eo_pr_delay = minus_z_reg4 & + cg.etsep_y_drift = minus_z_reg4) | cg.emerg_sep); + cg.r = reg102 : toint((cg.q_gcb_i = quat_reg102_undef & + cg.wcb2 = reg102_undef & + cg.cont_3eo_pr_delay = minus_z_reg102 & + cg.etsep_y_drift = minus_z_reg102) | cg.emerg_sep); + TRUE : 0; + esac; + +--------------------------------------------------------------------- +-------- Specifications --------------------------------------------- +--------------------------------------------------------------------- + +-- Contingency Guide terminates + +SPEC AG(!cg.idle -> AF(cg.finished)) + +-- Contingency guide can be executed infinitely often + +SPEC AG( (cg.idle | cg.finished) -> + EF(!(cg.idle | cg.finished) & EF(cg.finished))) + +-- Contingency mode select task works fine + +SPEC AG(cs.cont_3EO_start & cs.region_selected -> + ((cs.m_mode = mm102 | meco_confirmed) & + cs.r != reg-1 & cs.r != reg0)) + +-- Bad (initial) value never happens again once region is computed +-- unless we restart the task + +--SPEC AG(cs.r != reg-1 -> !E[!cg.start_cont_3eo_mode_select U +-- cs.r = reg-1 & !cg.start_cont_3eo_mode_select]) + +-- Comment out each of the regions and see if this is still true +-- (Check, if ALL of the regions can happen) + +--SPEC AG(cs.r in {reg-1 +-- ,reg0 +-- ,reg1 +-- ,reg2 +-- ,reg3 +-- ,reg102 +-- }) + +-- Comment out each of the regions and see if this is still true +-- (Check, if ALL of the regions can happen) + +--SPEC AG(cg.r in {reg-1 +-- ,reg0 +-- ,reg1 +-- ,reg2 +-- ,reg3 +-- ,reg4 +-- ,reg102 +-- }) + +-- Mode_select starts at the next step after its "start" bit is set: + +--SPEC AG(!cg.start_cont_3eo_mode_select -> +-- AX(cg.start_cont_3eo_mode_select & cs.step in {exit, undef} -> +-- AX(cs.step = 1 & !cs.region_selected))) + +-- During major mode 103, the inertial velocity is monitored. +-- Below an I-loaded velocity, a MECO would constitute a contingency +-- abort. (Must NOT be in SMODE=5 (??)) + +SPEC AG(cg.start_cont_3eo_mode_select & cs.m_mode = mm103 & + vel = LEQ_vi_3eo_min & meco_confirmed & !smode5 -> + A[!cs.region_selected U cs.region_selected & cs.cont_3EO_start]) + +-- Above a certain inertial velocity (in mode 103), the 3E/O field +-- is blanked, indicating that a MECO at this point would not require +-- an OPS 6 contingency abort. + +SPEC AG(cs.region_selected -> + (cs.m_mode = mm103 & vel = GRT_vi_3eo_max -> !cs.cont_3EO_start)) + +-- Between the two velocities, an apogee altitude - velocity curve is +-- constructed based on the current inertial velocity. If the apogee +-- altitude is above this curve, a contingency abort capability is +-- still required and a 3E/O region index will be calculated. +-- Otherwise, the 3E/O field is blanked out and no further contingency +-- abort calculations will be performed. (Must NOT be in SMODE=5 (??)) + +SPEC AG(cg.start_cont_3eo_mode_select & cs.m_mode = mm103 & + vel = GRT_vi_3eo_min & meco_confirmed & !smode5 -> + A[!cs.region_selected U cs.region_selected & + apogee_alt_LT_alt_ref = !cs.cont_3EO_start]) + +-- For an RTLS trajectory (SMODE=5), a check is made on the downrange +-- velocity to see if the vehicle is heading away from the landing site. +-- If this is the case, a 3E/O region index is calculated. If the vehicle +-- is heading back to the landing site, and the current range to the MECO +-- R-V line is greater than an I-loaded value, a 3E/O region index is +-- calculated. Otherwise, an intact abort is possible and the 3E/O field +-- is blanked. + +SPEC AG(cg.start_cont_3eo_mode_select & smode5 & meco_confirmed & + (!v_horiz_dnrng_LT_0 | !delta_r_GRT_del_r_usp) -> + A[!cs.region_selected U cs.region_selected & cs.cont_3EO_start]) + +-- If this task is called prior to SRB separation [mm102], the 3E/O region +-- index is set to 102 and the 3E/O contingency flag is set. + +SPEC AG(cs.m_mode = mm102 & cg.start_cont_3eo_mode_select -> + AX (A [ !cs.region_selected U cs.region_selected & + cs.r = reg102 & cs.cont_3EO_start])) + +-- After SRB separation, on every pass that the 3E/O region index is +-- calculated, a check is made to see if MECO confirmed has occured. If +-- so, a check is made to see if the major mode is 103. If so, an RTLS is +-- automatically invoked to transition to major mode 601. + +SPEC AG(!cs.region_selected & cs.m_mode = mm103 & meco_confirmed -> + A[!cs.region_selected U cs.region_selected & cs.r != reg0 -> + cs.m_mode = mm601 & cs.RTLS_abort_declared]) + +-- Once the 3E/O contingency flag has been set, this task is no longer +-- executed. + +SPEC AG(cs.cont_3EO_start -> AG(!cg.start_cont_3eo_mode_select)) + +-- If MECO confirmed occurs in MM103 and an OPS 6 contingency abort +-- procedure is still required, contingency 3E/O guidance sets the +-- CONT_3EO_START flag ON. Contingency 3E/O guidance then switches +-- from its display support function into an actual auto guidance +-- steering process. [...] Contingency 3E/O guidance sets the RTLS abort +-- declared flag and the MSC performs the transition from from major mode +-- 103 to 601. + +SPEC AG(!cg.idle & !cg.finished & !cs.region_selected & cs.m_mode = mm103 -> + A[ !cg.finished U cg.finished & cs.region_selected & + (cs.cont_3EO_start -> cs.m_mode = mm601 & cs.RTLS_abort_declared) ]) + +-- If MECO confirmed occurs in a major mode 601 and a contingency abort +-- procedure is still required, contingency 3E/O guidance sets the +-- CONT_3EO_START flag ON. [...] Contingency 3E/O guidance then commands +-- 3E/O auto maneuvers in major mode 601. [What are these maneuvers??] + +SPEC AG(cg.finished & cs.m_mode = mm601 & !et_sep_cmd & + meco_confirmed & cs.cont_3EO_start -> + cg.q_gcb_i in {quat_reg1, quat_reg2, quat_reg3, quat_reg4, undef} + | cg.emerg_sep) + +-- If MECO confirmed occurs in a first stage (MM102) [...], contingency +-- 3E/O guidance will command a fast ET separation during SRB tailoff in +-- major mode 102. CONT 3E/O GUID will then command maneuver post-sep in +-- MM601 (???). [ I'm not sure what indicates fast ET sep.: emerg_sep or +-- early_sep, or what? ] + +SPEC AG(cg.finished & cs.m_mode = mm102 & meco_confirmed & pre_sep -> + cg.emerg_sep | et_sep_cmd + | cg.et_sep_man_initiate + | cg.early_sep + ) + +--------------------------------------------- +-- Invariants from Murphi code -------------- +--------------------------------------------- + +--SPEC AG(cg.finished -> (output_ok != 0 | (output_ok = undef & +-- (cg.emerg_sep | !cg.cont_sep_cplt)))) + +--SPEC AG(!cg.finished & !cg.idle -> !mated_coast_mnvr | !et_sep_cmd) + +-- Stronger version !!! + +SPEC AG(cg.finished -> output_ok != 0) + +-- Contingency Guidance shall command an ET separation +-- [under certain conditions :-]. + +SPEC AG(cs.cont_3EO_start & cg.finished & + (cg.r = reg1 -> cond_29) & + (cg.r = reg2 -> cond_24 & cond_26) & + (cg.r = reg3 -> cg.alpha_ok & + (ABS_alf_err_LT_alf_sep_err -> cond_20b)) & + (cg.r = reg4 -> cond_18 & q_orb_LT_0) & + (cg.r = reg102 -> pre_sep) -> + et_sep_cmd | cg.et_sep_man_initiate + | cg.early_sep + | cg.emerg_sep + ) + +-- Contingency Guidance shall command at most one interconnected OMS dump. + +SPEC AG(cg.finished & cg.oms_rcs_i_c_inh_ena_cmd -> + AG(!cg.oms_rcs_i_c_inh_ena_cmd -> AG(!cg.oms_rcs_i_c_inh_ena_cmd))) + +-- Contingency Guidance shall command a transition to glide RTLS +-- (flight mode 602) + +SPEC AG(cg.finished & cs.m_mode = mm601 -> + --cg.cont_sep_cplt | cg.emerg_sep | + cg.call_RTLS_abort_task) + +-- Paper, p. 28, unstated assumption 2: at step 6 the region is +-- among 102, 1-4. + +SPEC AG(cg.step = 6 -> cg.r in {reg102, reg1, reg2, reg3, reg4}) + +-- The transition to mode 602 shall not occur until the entry maneuver +-- has been calculated + +SPEC !E[cg.q_gcb_i = undef U cg.cont_sep_cplt & cg.q_gcb_i = undef] + +-- The entry maneuver calculations shall not commence until the OMS/RCS +-- interconnect, if any, is complete (??? What does it exactly mean???) +-- !!! +--SPEC AG(cg.oms_rcs_i_c_inh_ena_cmd -> +-- !E[cg.oms_rcs_i_c_inh_ena_cmd U +-- cg.q_gcb_i != undef & cg.oms_rcs_i_c_inh_ena_cmd]) + +SPEC AG(cg.oms_rcs_i_c_inh_ena_cmd -> + !E[rcs_all_jet_inhibit U + cg.q_gcb_i != undef & rcs_all_jet_inhibit]) + +-- The OMS dump shall not be considered until the -Z translation is complete. + +SPEC !E[!cont_minus_z_compl & cg.r != reg102 U cg.orbiter_dump_ena] + +-- Completion of -Z translation shall not be checked until ET separation +-- has been commanded + +SPEC !E[!et_sep_cmd U cg.step = 7] + +-- ET separation shall be commanded if and only if an abort maneuver +-- region is assigned [and again there are *certain conditions*]. + +SPEC AG(cg.finished & cs.cont_3EO_start & + (cg.r = reg1 -> cond_29) & + (cg.r = reg2 -> cond_24 & cond_26) & + (cg.r = reg3 -> cg.alpha_ok & + (ABS_alf_err_LT_alf_sep_err -> cond_20b)) & + (cg.r = reg4 -> cond_18 & q_orb_LT_0) & + (cg.r = reg102 -> pre_sep) -> + (cg.et_sep_man_initiate | et_sep_cmd + <-> cg.r in {reg1, reg2, reg3, reg4, reg102})) + +-- The assigned region can not change arbitrarily. + +-- Regions 1 and 2 may interchange, but will not switch to any other region: + +SPEC AG(cg.finished & cs.cont_3EO_start & cg.r in {reg1,reg2} -> + AG(cg.finished -> cg.r in {reg1,reg2})) + +-- Regions 3 and 4 may interchange, but will not switch to any other region: + +SPEC AG(cg.finished & cs.cont_3EO_start & cg.r in {reg3,reg4} -> + AG(cg.finished -> cg.r in {reg3,reg4})) + +-- Region 102 never changes: + +SPEC AG(cg.finished & cg.r = reg102 -> AG(cg.finished -> cg.r = reg102)) diff --git a/tests/examplefiles/hello-world.puzzlet.aheui b/tests/examplefiles/hello-world.puzzlet.aheui new file mode 100644 index 00000000..e7ef3a62 --- /dev/null +++ b/tests/examplefiles/hello-world.puzzlet.aheui @@ -0,0 +1,10 @@ +밤밣따빠밣밟따뿌 +빠맣파빨받밤뚜뭏 +돋밬탕빠맣붏두붇 +볻뫃박발뚷투뭏붖 +뫃도뫃희멓뭏뭏붘 +뫃봌토범더벌뿌뚜 +뽑뽀멓멓더벓뻐뚠 +뽀덩벐멓뻐덕더벅 + +https://github.com/aheui/snippets/blob/master/hello-world/hello-world.puzzlet.aheui diff --git a/tests/examplefiles/plain.bst b/tests/examplefiles/plain.bst new file mode 100644 index 00000000..7adf4bb0 --- /dev/null +++ b/tests/examplefiles/plain.bst @@ -0,0 +1,1097 @@ +% BibTeX standard bibliography style `plain' + % Version 0.99b (8-Dec-10 release) for BibTeX versions 0.99a or later. + % Copyright (C) 1984, 1985, 1988, 2010 Howard Trickey and Oren Patashnik. + % Unlimited copying and redistribution of this file are permitted as long as + % it is unmodified. Modifications (and redistribution of modified versions) + % are also permitted, but only if the resulting file is renamed to something + % besides btxbst.doc, plain.bst, unsrt.bst, alpha.bst, and abbrv.bst. + % This restriction helps ensure that all standard styles are identical. + % The file btxbst.doc has the documentation for this style. + +ENTRY + { address + author + booktitle + chapter + edition + editor + howpublished + institution + journal + key + month + note + number + organization + pages + publisher + school + series + title + type + volume + year + } + {} + { label } + +INTEGERS { output.state before.all mid.sentence after.sentence after.block } + +FUNCTION {init.state.consts} +{ #0 'before.all := + #1 'mid.sentence := + #2 'after.sentence := + #3 'after.block := +} + +STRINGS { s t } + +FUNCTION {output.nonnull} +{ 's := + output.state mid.sentence = + { ", " * write$ } + { output.state after.block = + { add.period$ write$ + newline$ + "\newblock " write$ + } + { output.state before.all = + 'write$ + { add.period$ " " * write$ } + if$ + } + if$ + mid.sentence 'output.state := + } + if$ + s +} + +FUNCTION {output} +{ duplicate$ empty$ + 'pop$ + 'output.nonnull + if$ +} + +FUNCTION {output.check} +{ 't := + duplicate$ empty$ + { pop$ "empty " t * " in " * cite$ * warning$ } + 'output.nonnull + if$ +} + +FUNCTION {output.bibitem} +{ newline$ + "\bibitem{" write$ + cite$ write$ + "}" write$ + newline$ + "" + before.all 'output.state := +} + +FUNCTION {fin.entry} +{ add.period$ + write$ + newline$ +} + +FUNCTION {new.block} +{ output.state before.all = + 'skip$ + { after.block 'output.state := } + if$ +} + +FUNCTION {new.sentence} +{ output.state after.block = + 'skip$ + { output.state before.all = + 'skip$ + { after.sentence 'output.state := } + if$ + } + if$ +} + +FUNCTION {not} +{ { #0 } + { #1 } + if$ +} + +FUNCTION {and} +{ 'skip$ + { pop$ #0 } + if$ +} + +FUNCTION {or} +{ { pop$ #1 } + 'skip$ + if$ +} + +FUNCTION {new.block.checka} +{ empty$ + 'skip$ + 'new.block + if$ +} + +FUNCTION {new.block.checkb} +{ empty$ + swap$ empty$ + and + 'skip$ + 'new.block + if$ +} + +FUNCTION {new.sentence.checka} +{ empty$ + 'skip$ + 'new.sentence + if$ +} + +FUNCTION {new.sentence.checkb} +{ empty$ + swap$ empty$ + and + 'skip$ + 'new.sentence + if$ +} + +FUNCTION {field.or.null} +{ duplicate$ empty$ + { pop$ "" } + 'skip$ + if$ +} + +FUNCTION {emphasize} +{ duplicate$ empty$ + { pop$ "" } + { "{\em " swap$ * "}" * } + if$ +} + +INTEGERS { nameptr namesleft numnames } + +FUNCTION {format.names} +{ 's := + #1 'nameptr := + s num.names$ 'numnames := + numnames 'namesleft := + { namesleft #0 > } + { s nameptr "{ff~}{vv~}{ll}{, jj}" format.name$ 't := + nameptr #1 > + { namesleft #1 > + { ", " * t * } + { numnames #2 > + { "," * } + 'skip$ + if$ + t "others" = + { " et~al." * } + { " and " * t * } + if$ + } + if$ + } + 't + if$ + nameptr #1 + 'nameptr := + namesleft #1 - 'namesleft := + } + while$ +} + +FUNCTION {format.authors} +{ author empty$ + { "" } + { author format.names } + if$ +} + +FUNCTION {format.editors} +{ editor empty$ + { "" } + { editor format.names + editor num.names$ #1 > + { ", editors" * } + { ", editor" * } + if$ + } + if$ +} + +FUNCTION {format.title} +{ title empty$ + { "" } + { title "t" change.case$ } + if$ +} + +FUNCTION {n.dashify} +{ 't := + "" + { t empty$ not } + { t #1 #1 substring$ "-" = + { t #1 #2 substring$ "--" = not + { "--" * + t #2 global.max$ substring$ 't := + } + { { t #1 #1 substring$ "-" = } + { "-" * + t #2 global.max$ substring$ 't := + } + while$ + } + if$ + } + { t #1 #1 substring$ * + t #2 global.max$ substring$ 't := + } + if$ + } + while$ +} + +FUNCTION {format.date} +{ year empty$ + { month empty$ + { "" } + { "there's a month but no year in " cite$ * warning$ + month + } + if$ + } + { month empty$ + 'year + { month " " * year * } + if$ + } + if$ +} + +FUNCTION {format.btitle} +{ title emphasize +} + +FUNCTION {tie.or.space.connect} +{ duplicate$ text.length$ #3 < + { "~" } + { " " } + if$ + swap$ * * +} + +FUNCTION {either.or.check} +{ empty$ + 'pop$ + { "can't use both " swap$ * " fields in " * cite$ * warning$ } + if$ +} + +FUNCTION {format.bvolume} +{ volume empty$ + { "" } + { "volume" volume tie.or.space.connect + series empty$ + 'skip$ + { " of " * series emphasize * } + if$ + "volume and number" number either.or.check + } + if$ +} + +FUNCTION {format.number.series} +{ volume empty$ + { number empty$ + { series field.or.null } + { output.state mid.sentence = + { "number" } + { "Number" } + if$ + number tie.or.space.connect + series empty$ + { "there's a number but no series in " cite$ * warning$ } + { " in " * series * } + if$ + } + if$ + } + { "" } + if$ +} + +FUNCTION {format.edition} +{ edition empty$ + { "" } + { output.state mid.sentence = + { edition "l" change.case$ " edition" * } + { edition "t" change.case$ " edition" * } + if$ + } + if$ +} + +INTEGERS { multiresult } + +FUNCTION {multi.page.check} +{ 't := + #0 'multiresult := + { multiresult not + t empty$ not + and + } + { t #1 #1 substring$ + duplicate$ "-" = + swap$ duplicate$ "," = + swap$ "+" = + or or + { #1 'multiresult := } + { t #2 global.max$ substring$ 't := } + if$ + } + while$ + multiresult +} + +FUNCTION {format.pages} +{ pages empty$ + { "" } + { pages multi.page.check + { "pages" pages n.dashify tie.or.space.connect } + { "page" pages tie.or.space.connect } + if$ + } + if$ +} + +FUNCTION {format.vol.num.pages} +{ volume field.or.null + number empty$ + 'skip$ + { "(" number * ")" * * + volume empty$ + { "there's a number but no volume in " cite$ * warning$ } + 'skip$ + if$ + } + if$ + pages empty$ + 'skip$ + { duplicate$ empty$ + { pop$ format.pages } + { ":" * pages n.dashify * } + if$ + } + if$ +} + +FUNCTION {format.chapter.pages} +{ chapter empty$ + 'format.pages + { type empty$ + { "chapter" } + { type "l" change.case$ } + if$ + chapter tie.or.space.connect + pages empty$ + 'skip$ + { ", " * format.pages * } + if$ + } + if$ +} + +FUNCTION {format.in.ed.booktitle} +{ booktitle empty$ + { "" } + { editor empty$ + { "In " booktitle emphasize * } + { "In " format.editors * ", " * booktitle emphasize * } + if$ + } + if$ +} + +FUNCTION {empty.misc.check} +{ author empty$ title empty$ howpublished empty$ + month empty$ year empty$ note empty$ + and and and and and + key empty$ not and + { "all relevant fields are empty in " cite$ * warning$ } + 'skip$ + if$ +} + +FUNCTION {format.thesis.type} +{ type empty$ + 'skip$ + { pop$ + type "t" change.case$ + } + if$ +} + +FUNCTION {format.tr.number} +{ type empty$ + { "Technical Report" } + 'type + if$ + number empty$ + { "t" change.case$ } + { number tie.or.space.connect } + if$ +} + +FUNCTION {format.article.crossref} +{ key empty$ + { journal empty$ + { "need key or journal for " cite$ * " to crossref " * crossref * + warning$ + "" + } + { "In {\em " journal * "\/}" * } + if$ + } + { "In " key * } + if$ + " \cite{" * crossref * "}" * +} + +FUNCTION {format.crossref.editor} +{ editor #1 "{vv~}{ll}" format.name$ + editor num.names$ duplicate$ + #2 > + { pop$ " et~al." * } + { #2 < + 'skip$ + { editor #2 "{ff }{vv }{ll}{ jj}" format.name$ "others" = + { " et~al." * } + { " and " * editor #2 "{vv~}{ll}" format.name$ * } + if$ + } + if$ + } + if$ +} + +FUNCTION {format.book.crossref} +{ volume empty$ + { "empty volume in " cite$ * "'s crossref of " * crossref * warning$ + "In " + } + { "Volume" volume tie.or.space.connect + " of " * + } + if$ + editor empty$ + editor field.or.null author field.or.null = + or + { key empty$ + { series empty$ + { "need editor, key, or series for " cite$ * " to crossref " * + crossref * warning$ + "" * + } + { "{\em " * series * "\/}" * } + if$ + } + { key * } + if$ + } + { format.crossref.editor * } + if$ + " \cite{" * crossref * "}" * +} + +FUNCTION {format.incoll.inproc.crossref} +{ editor empty$ + editor field.or.null author field.or.null = + or + { key empty$ + { booktitle empty$ + { "need editor, key, or booktitle for " cite$ * " to crossref " * + crossref * warning$ + "" + } + { "In {\em " booktitle * "\/}" * } + if$ + } + { "In " key * } + if$ + } + { "In " format.crossref.editor * } + if$ + " \cite{" * crossref * "}" * +} + +FUNCTION {article} +{ output.bibitem + format.authors "author" output.check + new.block + format.title "title" output.check + new.block + crossref missing$ + { journal emphasize "journal" output.check + format.vol.num.pages output + format.date "year" output.check + } + { format.article.crossref output.nonnull + format.pages output + } + if$ + new.block + note output + fin.entry +} + +FUNCTION {book} +{ output.bibitem + author empty$ + { format.editors "author and editor" output.check } + { format.authors output.nonnull + crossref missing$ + { "author and editor" editor either.or.check } + 'skip$ + if$ + } + if$ + new.block + format.btitle "title" output.check + crossref missing$ + { format.bvolume output + new.block + format.number.series output + new.sentence + publisher "publisher" output.check + address output + } + { new.block + format.book.crossref output.nonnull + } + if$ + format.edition output + format.date "year" output.check + new.block + note output + fin.entry +} + +FUNCTION {booklet} +{ output.bibitem + format.authors output + new.block + format.title "title" output.check + howpublished address new.block.checkb + howpublished output + address output + format.date output + new.block + note output + fin.entry +} + +FUNCTION {inbook} +{ output.bibitem + author empty$ + { format.editors "author and editor" output.check } + { format.authors output.nonnull + crossref missing$ + { "author and editor" editor either.or.check } + 'skip$ + if$ + } + if$ + new.block + format.btitle "title" output.check + crossref missing$ + { format.bvolume output + format.chapter.pages "chapter and pages" output.check + new.block + format.number.series output + new.sentence + publisher "publisher" output.check + address output + } + { format.chapter.pages "chapter and pages" output.check + new.block + format.book.crossref output.nonnull + } + if$ + format.edition output + format.date "year" output.check + new.block + note output + fin.entry +} + +FUNCTION {incollection} +{ output.bibitem + format.authors "author" output.check + new.block + format.title "title" output.check + new.block + crossref missing$ + { format.in.ed.booktitle "booktitle" output.check + format.bvolume output + format.number.series output + format.chapter.pages output + new.sentence + publisher "publisher" output.check + address output + format.edition output + format.date "year" output.check + } + { format.incoll.inproc.crossref output.nonnull + format.chapter.pages output + } + if$ + new.block + note output + fin.entry +} + +FUNCTION {inproceedings} +{ output.bibitem + format.authors "author" output.check + new.block + format.title "title" output.check + new.block + crossref missing$ + { format.in.ed.booktitle "booktitle" output.check + format.bvolume output + format.number.series output + format.pages output + address empty$ + { organization publisher new.sentence.checkb + organization output + publisher output + format.date "year" output.check + } + { address output.nonnull + format.date "year" output.check + new.sentence + organization output + publisher output + } + if$ + } + { format.incoll.inproc.crossref output.nonnull + format.pages output + } + if$ + new.block + note output + fin.entry +} + +FUNCTION {conference} { inproceedings } + +FUNCTION {manual} +{ output.bibitem + author empty$ + { organization empty$ + 'skip$ + { organization output.nonnull + address output + } + if$ + } + { format.authors output.nonnull } + if$ + new.block + format.btitle "title" output.check + author empty$ + { organization empty$ + { address new.block.checka + address output + } + 'skip$ + if$ + } + { organization address new.block.checkb + organization output + address output + } + if$ + format.edition output + format.date output + new.block + note output + fin.entry +} + +FUNCTION {mastersthesis} +{ output.bibitem + format.authors "author" output.check + new.block + format.title "title" output.check + new.block + "Master's thesis" format.thesis.type output.nonnull + school "school" output.check + address output + format.date "year" output.check + new.block + note output + fin.entry +} + +FUNCTION {misc} +{ output.bibitem + format.authors output + title howpublished new.block.checkb + format.title output + howpublished new.block.checka + howpublished output + format.date output + new.block + note output + fin.entry + empty.misc.check +} + +FUNCTION {phdthesis} +{ output.bibitem + format.authors "author" output.check + new.block + format.btitle "title" output.check + new.block + "PhD thesis" format.thesis.type output.nonnull + school "school" output.check + address output + format.date "year" output.check + new.block + note output + fin.entry +} + +FUNCTION {proceedings} +{ output.bibitem + editor empty$ + { organization output } + { format.editors output.nonnull } + if$ + new.block + format.btitle "title" output.check + format.bvolume output + format.number.series output + address empty$ + { editor empty$ + { publisher new.sentence.checka } + { organization publisher new.sentence.checkb + organization output + } + if$ + publisher output + format.date "year" output.check + } + { address output.nonnull + format.date "year" output.check + new.sentence + editor empty$ + 'skip$ + { organization output } + if$ + publisher output + } + if$ + new.block + note output + fin.entry +} + +FUNCTION {techreport} +{ output.bibitem + format.authors "author" output.check + new.block + format.title "title" output.check + new.block + format.tr.number output.nonnull + institution "institution" output.check + address output + format.date "year" output.check + new.block + note output + fin.entry +} + +FUNCTION {unpublished} +{ output.bibitem + format.authors "author" output.check + new.block + format.title "title" output.check + new.block + note "note" output.check + format.date output + fin.entry +} + +FUNCTION {default.type} { misc } + +MACRO {jan} {"January"} + +MACRO {feb} {"February"} + +MACRO {mar} {"March"} + +MACRO {apr} {"April"} + +MACRO {may} {"May"} + +MACRO {jun} {"June"} + +MACRO {jul} {"July"} + +MACRO {aug} {"August"} + +MACRO {sep} {"September"} + +MACRO {oct} {"October"} + +MACRO {nov} {"November"} + +MACRO {dec} {"December"} + +MACRO {acmcs} {"ACM Computing Surveys"} + +MACRO {acta} {"Acta Informatica"} + +MACRO {cacm} {"Communications of the ACM"} + +MACRO {ibmjrd} {"IBM Journal of Research and Development"} + +MACRO {ibmsj} {"IBM Systems Journal"} + +MACRO {ieeese} {"IEEE Transactions on Software Engineering"} + +MACRO {ieeetc} {"IEEE Transactions on Computers"} + +MACRO {ieeetcad} + {"IEEE Transactions on Computer-Aided Design of Integrated Circuits"} + +MACRO {ipl} {"Information Processing Letters"} + +MACRO {jacm} {"Journal of the ACM"} + +MACRO {jcss} {"Journal of Computer and System Sciences"} + +MACRO {scp} {"Science of Computer Programming"} + +MACRO {sicomp} {"SIAM Journal on Computing"} + +MACRO {tocs} {"ACM Transactions on Computer Systems"} + +MACRO {tods} {"ACM Transactions on Database Systems"} + +MACRO {tog} {"ACM Transactions on Graphics"} + +MACRO {toms} {"ACM Transactions on Mathematical Software"} + +MACRO {toois} {"ACM Transactions on Office Information Systems"} + +MACRO {toplas} {"ACM Transactions on Programming Languages and Systems"} + +MACRO {tcs} {"Theoretical Computer Science"} + +READ + +FUNCTION {sortify} +{ purify$ + "l" change.case$ +} + +INTEGERS { len } + +FUNCTION {chop.word} +{ 's := + 'len := + s #1 len substring$ = + { s len #1 + global.max$ substring$ } + 's + if$ +} + +FUNCTION {sort.format.names} +{ 's := + #1 'nameptr := + "" + s num.names$ 'numnames := + numnames 'namesleft := + { namesleft #0 > } + { nameptr #1 > + { " " * } + 'skip$ + if$ + s nameptr "{vv{ } }{ll{ }}{ ff{ }}{ jj{ }}" format.name$ 't := + nameptr numnames = t "others" = and + { "et al" * } + { t sortify * } + if$ + nameptr #1 + 'nameptr := + namesleft #1 - 'namesleft := + } + while$ +} + +FUNCTION {sort.format.title} +{ 't := + "A " #2 + "An " #3 + "The " #4 t chop.word + chop.word + chop.word + sortify + #1 global.max$ substring$ +} + +FUNCTION {author.sort} +{ author empty$ + { key empty$ + { "to sort, need author or key in " cite$ * warning$ + "" + } + { key sortify } + if$ + } + { author sort.format.names } + if$ +} + +FUNCTION {author.editor.sort} +{ author empty$ + { editor empty$ + { key empty$ + { "to sort, need author, editor, or key in " cite$ * warning$ + "" + } + { key sortify } + if$ + } + { editor sort.format.names } + if$ + } + { author sort.format.names } + if$ +} + +FUNCTION {author.organization.sort} +{ author empty$ + { organization empty$ + { key empty$ + { "to sort, need author, organization, or key in " cite$ * warning$ + "" + } + { key sortify } + if$ + } + { "The " #4 organization chop.word sortify } + if$ + } + { author sort.format.names } + if$ +} + +FUNCTION {editor.organization.sort} +{ editor empty$ + { organization empty$ + { key empty$ + { "to sort, need editor, organization, or key in " cite$ * warning$ + "" + } + { key sortify } + if$ + } + { "The " #4 organization chop.word sortify } + if$ + } + { editor sort.format.names } + if$ +} + +FUNCTION {presort} +{ type$ "book" = + type$ "inbook" = + or + 'author.editor.sort + { type$ "proceedings" = + 'editor.organization.sort + { type$ "manual" = + 'author.organization.sort + 'author.sort + if$ + } + if$ + } + if$ + " " + * + year field.or.null sortify + * + " " + * + title field.or.null + sort.format.title + * + #1 entry.max$ substring$ + 'sort.key$ := +} + +ITERATE {presort} + +SORT + +STRINGS { longest.label } + +INTEGERS { number.label longest.label.width } + +FUNCTION {initialize.longest.label} +{ "" 'longest.label := + #1 'number.label := + #0 'longest.label.width := +} + +FUNCTION {longest.label.pass} +{ number.label int.to.str$ 'label := + number.label #1 + 'number.label := + label width$ longest.label.width > + { label 'longest.label := + label width$ 'longest.label.width := + } + 'skip$ + if$ +} + +EXECUTE {initialize.longest.label} + +ITERATE {longest.label.pass} + +FUNCTION {begin.bib} +{ preamble$ empty$ + 'skip$ + { preamble$ write$ newline$ } + if$ + "\begin{thebibliography}{" longest.label * "}" * write$ newline$ +} + +EXECUTE {begin.bib} + +EXECUTE {init.state.consts} + +ITERATE {call.type$} + +FUNCTION {end.bib} +{ newline$ + "\end{thebibliography}" write$ newline$ +} + +EXECUTE {end.bib} diff --git a/tests/examplefiles/rnc_example.rnc b/tests/examplefiles/rnc_example.rnc new file mode 100644 index 00000000..a1440302 --- /dev/null +++ b/tests/examplefiles/rnc_example.rnc @@ -0,0 +1,33 @@ +# This is a sample RNC file from the tutorial for the 2003 Working Draft +# http://relaxng.org/compact-tutorial-20030326.html + +element html { + element head { + element title { text } + }, + element body { + element table { + attribute class { "addressBook" }, + element tr { + attribute class { "card" }, + element td { + attribute class { "name" }, + mixed { + element span { + attribute class { "givenName" }, + text + }?, + element span { + attribute class { "familyName" }, + text + }? + } + }, + element td { + attribute class { "email" }, + text + } + }+ + } + } +} diff --git a/tests/examplefiles/test.bib b/tests/examplefiles/test.bib new file mode 100644 index 00000000..87e558d8 --- /dev/null +++ b/tests/examplefiles/test.bib @@ -0,0 +1,77 @@ +This is an example BibTeX file. +This text is a comment. + +@preamble{"%%% example BibTeX file"} + +@Preamble{"\newcommand{\noopsort}[1]{} " + "\newcommand{\noopsort}[1]{} "} + +@String{SCI = "Science"} + +@STRING{JFernandez = "Fernandez, Julio M."} +@StRiNg{HGaub = "Gaub, Hermann E."} +@string{MGautel = "Gautel, Mathias"} +@String{FOesterhelt = "Oesterhelt, Filipp"} +@String{MRief = "Rief, Matthias"} + +@Article{rief97b, + author = MRief #" and "# MGautel #" and "# FOesterhelt + #" and "# JFernandez #" and "# HGaub, + title = "Reversible Unfolding of Individual Titin + Immunoglobulin Domains by {AFM}", + journal = SCI, + volume = 276, + number = 5315, + pages = "1109--1112", + year = 1997, + doi = "10.1126/science.276.5315.1109", + URL = "http://www.sciencemag.org/cgi/content/abstract/276/5315/1109", + eprint = "http://www.sciencemag.org/cgi/reprint/276/5315/1109.pdf", +} + + +Parens can be used instead of braces: + +@ARTICLE(ruckenstein-diffusion, + author = "Liu, Hongquin and Ruckenstein, Eli", + language = "english", + title = "Predicting the Diffusion Coefficient in Supercritical Fluids", + journal = "Ind. Eng. Chem. Res.", + volume = "36", + year = "1997", + pages = "888-895" +) + +@book{ + viktorov-methods, + author = "Викторов, Михаил Маркович", + publisher = "Л.: <<Химия>>", + title = "Методы вычисления физико-химических величин и прикладные расчёты", + language = "russian", + year = "1977", + isbn = "000-0000000000", +} + +@comment{jackson-commented-out, + author = "Jackson, P\'eter", + publisher = "Some Publisher", + language = "english", + title = "Some Title", + series = "Some series", + booktitle = "Commented Out", + number = "3", + edition = "Second", + year = "1933", + pages = "44--59" +} + +@booklet{test-booklet, + author = "de Last, Jr., First Middle", + language = "english", + title = "Just a booklet", + year = 2006, + month = jan, + address = "Moscow", + howpublished = "Published by Foo" +} + diff --git a/tests/examplefiles/test.cr b/tests/examplefiles/test.cr new file mode 100644 index 00000000..028ff6f3 --- /dev/null +++ b/tests/examplefiles/test.cr @@ -0,0 +1,2871 @@ +# Examples taken from http://crystal-lang.org/docs/ +# Copyright 2012-2016 Manas Technology Solutions. + + +require "http/server" + +server = HTTP::Server.new(8080) do |context| + context.response.content_type = "text/plain" + context.response.print "Hello world! The time is #{Time.now}" +end + +puts "Listening on http://0.0.0.0:8080" +server.listen + + +module HTTP + class RequestHandler + end +end + +alias NumericValue = Float32 | Float64 | Int32 | Int64 + +enum Time::DayOfWeek +end + + +$global_greeting = "Hello world" + +class Greeting + @@default_greeting = "Hello world" + + def initialize(@custom_greeting = nil) + end + + def print_greeting + greeting = @custom_greeting || @@default_greeting + puts greeting + end +end + + +LUCKY_NUMBERS = [3, 7, 11] +DOCUMENTATION_URL = "http://crystal-lang.org/docs" + + +module Scorecard + class Parser + def parse(score_text) + begin + score_text.scan(SCORE_PATTERN) do |match| + handle_match(match) + end + rescue err : ParseError + # handle error ... + end + end + end +end + + +module Money + CURRENCIES = { + "EUR" => 1.0, + "ARS" => 10.55, + "USD" => 1.12, + "JPY" => 134.15, + } + + class Amount + getter :currency, :value + + def initialize(@currency, @value) + end + end + + class CurrencyConversion + def initialize(@amount, @target_currency) + end + + def amount + # implement conversion ... + end + end +end + + +i = 0 +while i < 10 + proc = ->(x : Int32) do + spawn do + puts(x) + end + end + proc.call(i) + i += 1 +end + +Fiber.yield + + +# A buffered channel of capacity 2 +channel = Channel(Int32).new(2) + +spawn do + channel.send(1) + channel.send(2) + channel.send(3) +end + +3.times do |i| + puts channel.receive +end + + +class MyDictionary(K, V) +end + + +MyBox.new(1) #:: MyBox(Int32) +MyBox.new("hello") #:: MyBox(String) + + +module Moo(T) + def t + T + end +end + +class Foo(U) + include Moo(U) + + def initialize(@value : U) + end +end + +foo = Foo.new(1) +foo.t # Int32 + + +class Parent(T) +end + +class Int32Child < Parent(Int32) +end + +class GenericChild(T) < Parent(T) +end + + +class Person +end + + +a = 1 +ptr = pointerof(a) +ptr[100_000] = 2 # undefined behaviour, probably a segmentation fault + + +alias Int32OrString = Int32 | String + + +alias Int32OrNil = Int32? + + +alias Int32OrNil_ = Int32 | ::Nil + + +alias Int32Ptr = Int32* + + +alias Int32Ptr_ = Pointer(Int32) + + +alias Int32_8 = Int32[8] + + +alias Int32_8_ = StaticArray(Int32, 8) + + +alias Int32StringTuple = {Int32, String} + + +alias Int32StringTuple_ = Tuple(Int32, String) + + +alias Int32ToString = Int32 -> String + + +alias Int32ToString_ = Proc(Int32, String) + + +alias ProcThatReturnsInt32 = -> Int32 + + +alias Int32AndCharToString = Int32, Char -> String + + +alias ComplexProc = (Int32 -> Int32) -> String + + +def foo(x : Int32) + "instance" +end + +def foo(x : Int32.class) + "class" +end + +foo 1 # "instance" +foo Int32 # "class" + + +class Parent +end + +class Child1 < Parent +end + +class Child2 < Parent +end + +ary = [] of Parent.class +ary << Child1 +ary << Child2 + + +# Same as not specifying a restriction, not very useful +def foo(x : _) +end + +# A bit more useful: any two arguments Proc that returns an Int32: +def foo(x : _, _ -> Int32) +end + + +#alias SameAsInt32 = typeof(2) +#alias Int32OrString_ = typeof(1, "a") + + +class Person + def initialize(name) + @name = name + @age = 0 + end + + def name + @name + end + + def age + @age + end +end + + +john = Person.new "John" +peter = Person.new "Peter" + +john.name #=> "John" +john.age #=> 0 + +peter.name #=> "Peter" + + +class Person + def self.new(name) + instance = Person.allocate + instance.initialize(name) + instance + end + end + + +if a.is_a?(String) + # here a is a String +end + +if b.is_a?(Number) + # here b is a Number +end + + +a = some_condition ? 1 : "hello" +# a : Int32 | String + +if a.is_a?(Number) + # a : Int32 +else + # a : String +end + + +if a.is_a?(String) && b.is_a?(Number) + # here a is a String and b is a Number +end + + +a.+(b) + + +struct Vector2 + getter x, y + + def initialize(@x, @y) + end + + def +(other) + Vector2.new(x + other.x, y + other.y) + end +end + +v1 = Vector2.new(1, 2) +v2 = Vector2.new(3, 4) +v1 + v2 #=> Vector2(@x=4, @y=6) + + + + +struct Vector2 + def - + Vector2.new(-x, -y) + end +end + +v1 = Vector2.new(1, 2) +-v1 #=> Vector2(@x=-1, @y=-2) + + + + + +class MyArray + def [](index) + # ... + end + + def [](index1, index2, index3) + # ... + end + + def []=(index, value) + # ... + end +end + +array = MyArray.new + +array[1] # invokes the first method +array[1, 2, 3] # invokes the second method +array[1] = 2 # invokes the third method + +array.[](1) # invokes the first method +array.[](1, 2, 3) # invokes the second method +array.[]=(1, 2) # invokes the third method + + +raise "OH NO!" +raise Exception.new("Some error") + + +class MyException < Exception +end + + +begin + raise MyException.new("OH NO!") +rescue ex : MyException + puts "Rescued MyException: #{ex.message}" +end + + +begin + # ... +rescue ex : MyException | MyOtherException + # only MyException or MyOtherException +rescue + # any other kind of exception +ensure + puts "Cleanup..." +end + + +def some_method + something_dangerous +rescue + # execute if an exception is raised +end + + +array = [1, 2, 3] +array[4] # raises because of IndexError +array[4]? # returns nil because of index out of bounds + + +def some_proc(&block : Int32 -> Int32) + block +end + +x = 0 +proc = ->(i : Int32) { x += i } +proc = some_proc(&proc) +proc.call(1) #=> 1 +proc.call(10) #=> 11 +x #=> 11 + + +def add(x, y) + x + y +end + +adder = ->add(Int32, Int32) +adder.call(1, 2) #=> 3 + + +module Curses + class Window + end +end + +Curses::Window.new + + +module ItemsSize + def size + items.size + end +end + +class Items + include ItemsSize + + def items + [1, 2, 3] + end +end + +items = Items.new +items.size #=> 3 + + +module Base64 + extend self + + def encode64(string) + # ... + end + + def decode64(string) + # ... + end +end + +Base64.encode64 "hello" #=> "aGVsbG8=" + + +if some_condition + a = 1 +else + a = "hello" +end + +a_as_int = a as Int32 +a_as_int.abs # works, compiler knows that a_as_int is Int32 + + +ptr = Pointer(Int32).malloc(1) +ptr as Int8* #:: Pointer(Int8) + + +array = [1, 2, 3] + +# object_id returns the address of an object in memory, +# so we create a pointer with that address +ptr = Pointer(Void).new(array.object_id) + +# Now we cast that pointer to the same type, and +# we should get the same value +array2 = ptr as Array(Int32) +array2.same?(array) #=> true + + +a = 1 +b = a as Int32 | Float64 +b #:: Int32 | Float64 + + +ary = [1, 2, 3] + +# We want to create an array 1, 2, 3 of Int32 | Float64 +ary2 = ary.map { |x| x as Int32 | Float64 } + +ary2 #:: Array(Int32 | Float64) +ary2 << 1.5 # OK + + +class Person + def initialize(@name) + end + + def name + @name + end +end + +a = [] of Person +x = a.map { |f| f.name } # Error: can't infer block return type + + +a = [] of Person +x = a.map { |f| f.name as String } # OK + + +Person.new "John" + +a = [] of Person +x = a.map { |f| f.name } # OK + + +loop do + do_something + break if some_condition +end + + +class Point + def initialize(@x, @y) + end +end + +Point.new 1, 2 + +# 2 x Int32 = 2 x 4 = 8 +instance_sizeof(Point) #=> 12 + + +a = 1 +while a < 5 + a += 1 + if a == 3 + next + end + puts a +end +# The above prints the numbers 2, 4 and 5 + + +lib C + # In C: double cos(double x) + fun cos(value : Float64) : Float64 + + fun getch : Int32 + + fun srand(seed : UInt32) + + fun exit(status : Int32) : NoReturn + + fun printf(format : UInt8*, ...) : Int32 +end + +C.cos(1.5) #=> 0.0707372 +C.srand(1_u32) + +a = 1 +b = 2 +C.printf "%d + %d = %d\n", a, b, a + b + + +lib LibSDL + fun init = SDL_Init(flags : UInt32) : Int32 +end + +lib LLVMIntrinsics + fun ceil_f32 = "llvm.ceil.f32"(value : Float32) : Float32 +end + +lib MyLib + fun my_fun(some_size : LibC::SizeT) +end + +@[Link("pcre")] +lib LibPCRE +end + + +lib C + ifdef x86_64 + alias SizeT = UInt64 + else + alias SizeT = UInt32 + end + + fun memcmp(p1 : Void*, p2 : Void*, size : C::SizeT) : Int32 +end + + +lib X + enum SomeEnum + Ten = 10 + Twenty = 10 * 2 + ThirtyTwo = 1 << 5 + end +end + + +lib X + enum SomeEnum + A = 1_u32 + end +end + + +X::SomeEnum::Zero #=> 0_i8 +X::SomeEnum::Two #=> 2_i8 + + +lib X + fun callback(f : Int32 -> Int32) +end + + +f = ->(x : Int32) { x + 1 } +X.callback(f) + + +X.callback ->(x) { x + 1 } + + +X.callback nil + + +lib LibFoo + fun store_callback(callback : ->) + fun execute_callback +end + +LibFoo.store_callback ->{ raise "OH NO!" } +LibFoo.execute_callback + + +lib LibFoo + fun store_callback(callback : ->) + + @[Raises] + fun execute_callback +end + + +@[Link("pcre")] +lib PCRE + INFO_CAPTURECOUNT = 2 +end + +PCRE::INFO_CAPTURECOUNT #=> 2 + + +lib U + # In C: + # + # union IntOrFloat { + # int some_int; + # double some_float; + # }; + union IntOrFloat + some_int : Int32 + some_float : Float64 + end +end + + +value = U::IntOrFloat.new + + +value = uninitialized U::IntOrFlaot +value.some_int #=> some garbage value + + +value = U::IntOrFloat.new +value.some_int = 1 +value.some_int #=> 1 +value.some_float #=> 4.94066e-324 + + +def change_it(value) + value.some_int = 1 +end + +value = U::IntOrFloat.new +change_it value +value.some_int #=> 0 + + +lib C + # In C: + # + # struct TimeZone { + # int minutes_west; + # int dst_time; + # }; + struct TimeZone + minutes_west : Int32 + dst_time : Int32 + end +end + + +lib C + # This is a forward declaration + struct Node + end + + struct Node + node : Node* + end +end + + +tz = C::TimeZone.new + + +tz = uninitialized C::TimeZone +tz.minutes_west #=> some garbage value + + +tz = C::TimeZone.new +tz.minutes_west = 1 +tz.minutes_west #=> 1 + + +tz = C::TimeZone.new minutes_west: 1, dst_time: 2 +tz.minutes_west #=> 1 +tz.dst_time #=> 2 + + +def change_it(tz) + tz.minutes_west = 1 +end + +tz = C::TimeZone.new +change_it tz +tz.minutes_west #=> 0 + + +lib C + $errno : Int32 +end + + +C.errno #=> some value +C.errno = 0 +C.errno #=> 0 + + +lib C + @[ThreadLocal] + $errno : Int32 +end + + +lib C + fun waitpid(pid : Int32, status_ptr : Int32*, options : Int32) : Int32 +end + + +status_ptr = uninitialized Int32 + +C.waitpid(pid, pointerof(status_ptr), options) + + +C.waitpid(pid, out status_ptr, options) + + +lib X + type CInt = Int32 +end + + +ifdef x86_64 + # some specific code for 64 bits platforms +else + # some specific code for non-64 bits platforms +end + + +ifdef linux && x86_64 + # some specific code for linux 64 bits +end + + +lib C + ifdef linux + struct SomeStruct + some_field : Int32 + end + else + struct SomeStruct + some_field : Int64 + end + end +end + + +# Assigns to a local variable +local = 1 + +# Assigns to a global variable +$global = 4 + +class Testing + # Assigns to an instance variable + @instance = 2 + + # Assigns to a class variable + @@class = 3 +end + + +local += 1 # same as: local = local + 1 + +# The above is valid with these operators: +# +, -, *, /, %, |, &, ^, **, <<, >> + +local ||= 1 # same as: local || (local = 1) +local &&= 1 # same as: local && (local = 1) + + +# A setter +person.name=("John") + +# The above can be written as: +person.name = "John" + +# An indexed assignment +objects.[]=(2, 3) + +# The above can be written as: +objects[2] = 3 + +# Not assignment-related, but also syntax sugar: +objects.[](2, 3) + +# The above can be written as: +objects[2, 3] + + +person.age += 1 # same as: person.age = person.age + 1 + +person.name ||= "John" # same as: person.name || (person.name = "John") +person.name &&= "John" # same as: person.name && (person.name = "John") + +objects[1] += 2 # same as: objects[1] = objects[1] + 2 + +objects[1] ||= 2 # same as: objects[1]? || (objects[1] = 2) +objects[1] &&= 2 # same as: objects[1]? && (objects[1] = 2) + + +alias PInt32 = Pointer(Int32) + +ptr = PInt32.malloc(1) # : Pointer(Int32) + + +alias RecArray = Array(Int32) | Array(RecArray) + +ary = [] of RecArray +ary.push [1, 2, 3] +ary.push ary +ary #=> [[1, 2, 3], [...]] + + +module Json + alias Type = Nil | + Bool | + Int64 | + Float64 | + String | + Array(Type) | + Hash(String, Type) +end + + +a = 1 +if a > 0 + a = 10 +end +a #=> 10 + +b = 1 +if b > 2 + b = 10 +else + b = 20 +end +b #=> 20 + + +if some_condition + do_something +elsif some_other_condition + do_something_else +else + do_that +end + + +a = 1 +if some_condition + a = "hello" +else + a = true +end +# a : String | Bool + +b = 1 +if some_condition + b = "hello" +end +# b : Int32 | String + +if some_condition + c = 1 +else + c = "hello" +end +# c : Int32 | String + +if some_condition + d = 1 +end +# d : Int32 | Nil + + +a = 1 +if some_condition + a = "hello" + # a : String + a.size +end +# a : String | Int32 + + +if some_condition + e = 1 +else + e = "hello" + # e : String + return +end +# e : Int32 + + +enum Color : UInt8 + Red # 0 + Green # 1 + Blue = 5 # overwritten to 5 + Yellow # 6 (5 + 1) + + def red? + self == Color::Red + end +end + +Color::Red.value #:: UInt8 + + +@[Flags] +enum IOMode + Read # 1 + Write # 2 + Async # 4 +end + + +IOMode::None.value #=> 0 +IOMode::All.value #=> 7 + + +puts(Color::Red) # prints "Red" +puts(IOMode::Write | IOMode::Async) # prints "Write, Async" + + +puts Color.new(1) #=> prints "Green" + + +puts Color.new(10) #=> prints "10" + + +Color::Red.red? #=> true +Color::Blue.red? #=> false + + +def paint(color : Color) + case color + when Color::Red + # ... + else + # Unusual, but still can happen + raise "unknown color: #{color}" + end +end + +paint Color::Red + + +def paint(color : Symbol) + case color + when :red + # ... + else + raise "unknown color: #{color}" + end +end + +paint :red + + +name = "Crystal" +age = 1 + + +flower = "Tulip" +# At this point 'flower' is a String + +flower = 1 +# At this point 'flower' is an Int32 + + +class Foo + def finalize + # Invoked when Foo is garbage-collected + puts "Bye bye from #{self}!" + end +end + +# Prints "Bye bye ...!" for ever +loop do + Foo.new +end + + +# Defines a method in the program +def add(x, y) + x + y +end + +# Invokes the add method in the program +add(1, 2) #=> 3 + + +def even?(num) + if num % 2 == 0 + return true + end + + return false +end + + +def add(x, y) + x + y +end + +class Foo + def bar + # invokes the program's add method + add(1, 2) + + # invokes Foo's baz method + baz(1, 2) + end + + def baz(x, y) + x * y + end +end + + +def baz(x, y) + x + y +end + +class Foo + def bar + baz(4, 2) #=> 2 + ::baz(4, 2) #=> 6 + end + + def baz(x, y) + x - y + end +end + + +x = 1 + +def add(y) + x + y # error: undefined local variable or method 'x' +end + +add(2) + + +add 1, 2 # same as add(1, 2) + + +class Counter + @@instances = 0 + + def initialize + @@instances += 1 + end + + def self.instances + @@instances + end +end + +Counter.instances #=> 0 +Counter.new +Counter.new +Counter.new +Counter.instances #=> 3 + + +class Counter + def self.increment + @@instances += 1 + end +end + +Counter.increment # Error: undefined method '+' for Nil + + +class Parent + @@counter = 0 +end + +class Child < Parent + def self.counter + @@counter + end +end + +Child.counter #=> nil + + +unless some_condition + then_expression +else + else_expression +end + +# Can also be written as a suffix +close_door unless door_closed? + + +a = 1 +b = typeof(a) #=> Int32 + + +typeof(1, "a", 'a') #=> (Int32 | String | Char) + + +hash = {} of Int32 => String +another_hash = typeof(hash).new #:: Hash(Int32, String) + + +class Array + def self.elem_type(typ) + if typ.is_a?(Array) + elem_type(typ.first) + else + typ + end + end +end + +nest = [1, ["b", [:c, ['d']]]] +flat = Array(typeof(Array.elem_type(nest))).new +typeof(nest) #=> Array(Int32 | Array(String | Array(Symbol | Array(Char)))) +typeof(flat) #=> Array(String | Int32 | Symbol | Char) + + +a = 2 if some_condition + + +x = 0 +proc = ->{ x += 1; x } +proc.call #=> 1 +proc.call #=> 2 +x #=> 2 + + +def counter + x = 0 + ->{ x += 1; x } +end + +proc = counter +proc.call #=> 1 +proc.call #=> 2 + + +def foo + yield +end + +x = 1 +foo do + x = "hello" +end +x # : Int32 | String + + +x = 1 +foo do + x = "hello" +end +x # : Int32 | String + +x = 'a' +x # : Char + + +def capture(&block) + block +end + +x = 1 +capture { x = "hello" } + +x = 'a' +x # : Int32 | String | Char + + +def capture(&block) + block +end + +x = 1 +->{ x = "hello" } + +x = 'a' +x # : Int32 | String | Char + + +abstract class Animal + # Makes this animal talk + abstract def talk +end + +class Dog < Animal + def talk + "Woof!" + end +end + +class Cat < Animal + def talk + "Miau" + end +end + +class Person + getter pet + + def initialize(@name, @pet) + end +end + +john = Person.new "John", Dog.new +peter = Person.new "Peter", Cat.new + + +john.pet.talk #=> "Woof!" + + +a = 1 > 2 ? 3 : 4 + +# The above is the same as: +a = if 1 > 2 + 3 + else + 4 + end + + +def some_method : String + "hello" +end + + +PI = 3.14 + +module Earth + RADIUS = 6_371_000 +end + +PI #=> 3.14 +Earth::RADIUS #=> 6_371_000 + + +TEN = begin + a = 0 + while a < 10 + a += 1 + end + a +end + +TEN #=> 10 + + +class Person + getter name + + def initialize(@name) + @age = 0 + end +end + +john = Person.new "John" +john.name #=> "John" +john.name.size #=> 4 + + +one = Person.new 1 +one.name #=> 1 +one.name + 2 #=> 3 + + +john = Person.new "John" +one = Person.new 1 + + +john = Person.new "John" +one = Person.new 1 + +# Error: undefined method 'size' for Int32 +john.name.size + +# Error: no overload matches 'String#+' with types Int32 +john.name + 3 + + +john = Person.new "John" +john.name.size +one = Person.new 1 + + +class Person + getter name + + def initialize(@name) + @age = 0 + end + + def address + @address + end + + def address=(@address) + end +end + +john = Person.new "John" +john.address = "Argentina" + + +# Error: undefined method 'size' for Nil +john.address.size + + +class Person + @age = 0 + + def initialize(@name) + end +end + + +class Person + @age : Int32 + + def initialize(@name) + @age = 0 + end +end + + +a = if 2 > 1 + 3 + else + 4 + end +a #=> 3 + + +if 1 > 2 +else + 3 +end + + +def twice(&block) + yield + yield +end + + +twice() do + puts "Hello!" +end + +twice do + puts "Hello!" +end + +twice { puts "Hello!" } + + +def twice + yield 1 + yield 2 +end + +twice do |i| + puts "Got #{i}" +end + + +twice { |i| puts "Got #{i}" } + + +def many + yield 1, 2, 3 +end + +many do |x, y, z| + puts x + y + z +end + +# Output: 6 + + +def many + yield 1, 2, 3 +end + +many do |x, y| + puts x + y +end + +# Output: 3 + + +def twice + yield + yield +end + +twice do |i| + puts i.inspect +end + + +def some + yield 1, 'a' + yield true, "hello" + yield 2 +end + +some do |first, second| + # first is Int32 | Bool + # second is Char | String | Nil +end + + +method do |argument| + argument.some_method +end + + +method(&.some_method) + + +method &.some_method(arg1, arg2) + + +method &.+(2) +method &.[index] + + +def twice + v1 = yield 1 + puts v1 + + v2 = yield 2 + puts v2 +end + +twice do |i| + i + 1 +end + + +ary = [1, 2, 3] +ary.map { |x| x + 1 } #=> [2, 3, 4] +ary.select { |x| x % 2 == 1 } #=> [1, 3] + + +def transform(value) + yield value +end + +transform(1) { |x| x + 1 } #=> 2 + + +def thrice + puts "Before 1" + yield 1 + puts "Before 2" + yield 2 + puts "Before 3" + yield 3 + puts "After 3" +end + +thrice do |i| + if i == 2 + break + end +end + + +def twice + yield 1 + yield 2 +end + +twice { |i| i + 1 } #=> 3 +twice { |i| break "hello" } #=> "hello" + + +value = twice do |i| + if i == 1 + break "hello" + end + i + 1 +end +value #:: Int32 | String + + +values = twice { break 1, 2 } +values #=> {1, 2} + + +value = twice { break } +value #=> nil + + +def twice + yield 1 + yield 2 +end + +twice do |i| + if i == 1 + puts "Skipping 1" + next + end + + puts "Got #{i}" +end + + + +def twice + v1 = yield 1 + puts v1 + + v2 = yield 2 + puts v2 +end + +twice do |i| + if i == 1 + next 10 + end + + i + 1 +end + +# Output +# 10 +# 3 + + +class Foo + def one + 1 + end + + def yield_with_self + with self yield + end + + def yield_normally + yield + end +end + +def one + "one" +end + +Foo.new.yield_with_self { one } # => 1 +Foo.new.yield_normally { one } # => "one" + + +def twice + yield 1 + yield 2 +end + +twice do |i| + puts "Got: #{i}" +end + + +i = 1 +puts "Got: #{i}" +i = 2 +puts "Got: #{i}" + + +3.times do |i| + puts i +end + + +struct Int + def times + i = 0 + while i < self + yield i + i += 1 + end + end +end + + +i = 0 +while i < 3 + puts i + i += 1 +end + + +class Person + def initialize(@name) + end + + def greet + puts "Hi, I'm #{@name}" + end +end + +class Employee < Person +end + +employee = Employee.new "John" +employee.greet # "Hi, I'm John" + + +class Person + def initialize(@name) + end +end + +class Employee < Person + def initialize(@name, @company_name) + end +end + +Employee.new "John", "Acme" # OK +Employee.new "Peter" # Error: wrong number of arguments + # for 'Employee:Class#new' (1 for 2) + + +class Person + def greet(msg) + puts "Hi, #{msg}" + end +end + +class Employee < Person + def greet(msg) + puts "Hello, #{msg}" + end +end + +p = Person.new +p.greet "everyone" # "Hi, everyone" + +e = Employee.new +e.greet "everyone" # "Hello, everyone" + + +class Person + def greet(msg) + puts "Hi, #{msg}" + end +end + +class Employee < Person + def greet(msg : Int32) + puts "Hi, this is a number: #{msg}" + end +end + +e = Employee.new +e.greet "everyone" # "Hi, everyone" + +e.greet 1 # "Hi, this is a number: 1" + + +class Person + def greet(msg) + puts "Hello, "#{msg}" + end +end + +class Employee < Person + def greet(msg) + super # Same as: super(msg) + super("another message") + end +end + + +def int_to_int(&block : Int32 -> Int32) + block +end + +proc = int_to_int { |x| x + 1 } +proc.call(1) #=> 2 + + +class Model + def on_save(&block) + @on_save_callback = block + end + + def save + if callback = @on_save_callback + callback.call + end + end +end + +model = Model.new +model.on_save { puts "Saved!" } +model.save # prints "Saved!" + + +def some_proc(&block : Int32 ->) + block +end + +proc = some_proc { |x| x + 1 } +proc.call(1) # void + + +def some_proc(&block : Int32 -> _) + block +end + +proc = some_proc { |x| x + 1 } +proc.call(1) # 2 + +proc = some_proc { |x| x.to_s } +proc.call(1) # "1" + + +macro update_x + x = 1 +end + +x = 0 +update_x +x #=> 1 + + +macro dont_update_x + %x = 1 + puts %x +end + +x = 0 +dont_update_x # outputs 1 +x #=> 0 + + +macro fresh_vars_sample(*names) + # First declare vars + {% for name, index in names %} + print "Declaring: ", "%name{index}", '\n' + %name{index} = {{index}} + {% end %} + + # Then print them + {% for name, index in names %} + print "%name{index}: ", %name{index}, '\n' + {% end %} +end + +fresh_vars_sample a, b, c + +# Sample output: +# Declaring: __temp_255 +# Declaring: __temp_256 +# Declaring: __temp_257 +# __temp_255: 0 +# __temp_256: 1 +# __temp_257: 2 + + +class Object + macro def instance_vars_names : Array(String) + {{ @type.instance_vars.map &.name.stringify }} + end +end + +class Person + def initialize(@name, @age) + end +end + +person = Person.new "John", 30 +person.instance_vars_names #=> ["name", "age"] + + +class Object + macro def has_instance_var?(name) : Bool + # We cannot access name inside the macro expansion here, + # instead we need to use the macro language to construct an array + # and do the inclusion check at runtime. + {{ @type.instance_vars.map &.name.stringify }}.includes? name + end +end + +person = Person.new "John", 30 +person.has_instance_var?("name") #=> true +person.has_instance_var?("birthday") #=> false + + +class Parent + macro inherited + def {{@type.name.downcase.id}} + 1 + end + end +end + +class Child < Parent +end + +Child.new.child #=> 1 + + +macro method_missing(name, args, block) + print "Got ", {{name.id.stringify}}, " with ", {{args.size}}, " arguments", '\n' +end + +foo # Prints: Got foo with 0 arguments +bar 'a', 'b' # Prints: Got bar with 2 arguments + + +sizeof(Int32) #=> 4 +sizeof(Int64) #=> 8 + + +# On a 64 bits machine +sizeof(Pointer(Int32)) #=> 8 +sizeof(String) #=> 8 + + +a = 1 +sizeof(typeof(a)) #=> 4 + + +class Foo + macro emphasize(value) + "***#{ {{value}} }***" + end + + def yield_with_self + with self yield + end +end + +Foo.new.yield_with_self { emphasize(10) } #=> "***10***" + + +# This generates: +# +# def :foo +# 1 +# end +define_method :foo, 1 + + +macro define_method(name, content) + def {{name.id}} + {{content}} + end +end + +# This correctly generates: +# +# def foo +# 1 +# end +define_method :foo, 1 + + +macro define_method(name, content) + def {{name}} + {% if content == 1 %} + "one" + {% else %} + {{content}} + {% end %} + end +end + +define_method foo, 1 +define_method bar, 2 + +foo #=> one +bar #=> 2 + + +{% if env("TEST") %} + puts "We are in test mode" +{% end %} + + +macro define_dummy_methods(names) + {% for name, index in names %} + def {{name.id}} + {{index}} + end + {% end %} +end + +define_dummy_methods [foo, bar, baz] + +foo #=> 0 +bar #=> 1 +baz #=> 2 + + +macro define_dummy_methods(hash) + {% for key, value in hash %} + def {{key.id}} + {{value}} + end + {% end %} +end +define_dummy_methods({foo: 10, bar: 20}) +foo #=> 10 +bar #=> 20 + + +{% for name, index in ["foo", "bar", "baz"] %} + def {{name.id}} + {{index}} + end +{% end %} + +foo #=> 0 +bar #=> 1 +baz #=> 2 + + +macro define_dummy_methods(*names) + {% for name, index in names %} + def {{name.id}} + {{index}} + end + {% end %} +end + +define_dummy_methods foo, bar, baz + +foo #=> 0 +bar #=> 1 +baz #=> 2 + + +macro println(*values) + print {{*values}}, '\n' +end + +println 1, 2, 3 # outputs 123\n + + +VALUES = [1, 2, 3] + +{% for value in VALUES %} + puts {{value}} +{% end %} + + +until some_condition + do_this +end + +# The above is the same as: +while !some_condition + do_this +end + + +a = some_condition ? nil : 3 +# a is Int32 or Nil + +if a + # Since the only way to get here is if a is truthy, + # a can't be nil. So here a is Int32. + a.abs +end + + +if a = some_expression + # here a is not nil +end + + +if a && b + # here both a and b are guaranteed not to be Nil +end + + +if @a + # here @a can be nil +end + + +# First option: assign it to a variable +if a = @a + # here a can't be nil +end + +# Second option: use `Object#try` found in the standard library +@a.try do |a| + # here a can't be nil +end + + +if method # first call to a method that can return Int32 or Nil + # here we know that the first call did not return Nil + method # second call can still return Int32 or Nil +end + + +class Person + def become_older(by = 1) + @age += by + end +end + +john = Person.new "John" +john.age #=> 0 + +john.become_older +john.age #=> 1 + +john.become_older 2 +john.age #=> 3 + + +john.become_older by: 5 + + +def some_method(x, y = 1, z = 2, w = 3) + # do something... +end + +some_method 10 # x = 10, y = 1, z = 2, w = 3 +some_method 10, z: 10 # x = 10, y = 1, z = 10, w = 3 +some_method 10, w: 1, y: 2, z: 3 # x = 10, y = 2, z = 3, w = 1 + + +case exp +when value1, value2 + do_something +when value3 + do_something_else +else + do_another_thing +end + + +case var +when String + # var : String + do_something +when Int32 + # var : Int32 + do_something_else +else + # here var is neither a String nor an Int32 + do_another_thing +end + + +case num +when .even? + do_something +when .odd? + do_something_else +end + + +case +when cond1, cond2 + do_something +when cond3 + do_something_else +end + + +a = 1 +a.responds_to?(:abs) #=> true +a.responds_to?(:size) #=> false + + +foo_or_bar = /foo|bar/ +heeello = /h(e+)llo/ +integer = /\d+/ + + +r = /foo/imx + + +slash = /\// + + +r = %r(regex with slash: /) + + +"hello world" + + +"\"" # double quote +"\\" # backslash +"\e" # escape +"\f" # form feed +"\n" # newline +"\r" # carriage return +"\t" # tab +"\v" # vertical tab + + +"\101" # == "A" +"\123" # == "S" +"\12" # == "\n" +"\1" # string with one character with code point 1 + + +"\u0041" # == "A" + + +"\u{41}" # == "A" +"\u{1F52E}" # == "🔮" + + +"hello + world" # same as "hello\n world" + + +"hello " \ +"world, " \ +"no newlines" # same as "hello world, no newlines" + + +"hello \ + world, \ + no newlines" # same as "hello world, no newlines" + + +# Supports double quotes and nested parenthesis +%(hello ("world")) # same as "hello (\"world\")" + +# Supports double quotes and nested brackets +%[hello ["world"]] # same as "hello [\"world\"]" + +# Supports double quotes and nested curlies +%{hello {"world"}} # same as "hello {\"world\"}" + +# Supports double quotes and nested angles +%<hello <"world">> # same as "hello <\"world\">" + + +<<-XML +<parent> + <child /> +</parent> +XML + + +# Same as "Hello\n world" +<<-STRING + Hello + world + STRING + +# Same as " Hello\n world" +<<-STRING + Hello + world + STRING + + +a = 1 +b = 2 +"sum = #{a + b}" # "sum = 3" + + +1.0 # Float64 +1.0_f32 # Float32 +1_f32 # Float32 + +1e10 # Float64 +1.5e10 # Float64 +1.5e-7 # Float64 + ++1.3 # Float64 +-0.5 # Float64 + + +1_000_000.111_111 # better than 1000000.111111 + + +'a' +'z' +'0' +'_' +'あ' + + +'\'' # single quote +'\\' # backslash +'\e' # escape +'\f' # form feed +'\n' # newline +'\r' # carriage return +'\t' # tab +'\v' # vertical tab + + +'\101' # == 'A' +'\123' # == 'S' +'\12' # == '\n' +'\1' # code point 1 + + +'\u0041' # == 'A' + + +'\u{41}' # == 'A' +'\u{1F52E}' # == '🔮' + + +{1 => 2, 3 => 4} # Hash(Int32, Int32) +{1 => 2, 'a' => 3} # Hash(Int32 | Char, Int32) + + +{} of Int32 => Int32 # same as Hash(Int32, Int32).new + + +{key1: 'a', key2: 'b'} # Hash(Symbol, Char) + + +{"key1": 'a', "key2": 'b'} # Hash(String, Char) + + +MyType{"foo": "bar"} + + +tmp = MyType.new +tmp["foo"] = "bar" +tmp + + +tmp = MyType(typeof("foo"), typeof("bar")).new +tmp["foo"] = "bar" +tmp + + +MyType(String, String) {"foo": "bar"} + + +:hello +:good_bye + +# With spaces and symbols +:"symbol with spaces" + +# Ending with question and exclamation marks +:question? +:exclamation! + +# For the operators +:+ +:- +:* +:/ +:== +:< +:<= +:> +:>= +:! +:!= +:=~ +:!~ +:& +:| +:^ +:~ +:** +:>> +:<< +:% +:[] +:[]? +:[]= +:<=> +:=== + + +x..y # an inclusive range, in mathematics: [x, y] +x...y # an exclusive range, in mathematics: [x, y) + + +# A proc without arguments +->{ 1 } # Proc(Int32) + +# A proc with one argument +->(x : Int32) { x.to_s } # Proc(Int32, String) + +# A proc with two arguments: +->(x : Int32, y : Int32) { x + y } # Proc(Int32, Int32, Int32) + + +Proc(Int32, String).new { |x| x.to_s } # Proc(Int32, String) + + +proc = ->(x : Int32, y : Int32) { x + y } +proc.call(1, 2) #=> 3 + + +def one + 1 +end + +proc = ->one +proc.call #=> 1 + + +def plus_one(x) + x + 1 +end + +proc = ->plus_one(Int32) +proc.call(41) #=> 42 + + +str = "hello" +proc = ->str.count(Char) +proc.call('e') #=> 1 +proc.call('l') #=> 2 + + +tuple = {1, "hello", 'x'} # Tuple(Int32, String, Char) +tuple[0] #=> 1 (Int32) +tuple[1] #=> "hello" (String) +tuple[2] #=> 'x' (Char) + + +[1, 2, 3] # Array(Int32) +[1, "hello", 'x'] # Array(Int32 | String | Char) + + +[] of Int32 # same as Array(Int32).new + + +%w(one two three) # ["one", "two", "three"] + + +%i(one two three) # [:one, :two, :three] + + +MyType{1, 2, 3} + + +tmp = MyType.new +tmp << 1 +tmp << 2 +tmp << 3 +tmp + + +tmp = MyType(typeof(1, 2, 3)).new +tmp << 1 +tmp << 2 +tmp << 3 +tmp + + +MyType(Int32 | String) {1, 2, "foo"} + + +nil + + +1 # Int32 + +1_i8 # Int8 +1_i16 # Int16 +1_i32 # Int32 +1_i64 # Int64 + +1_u8 # UInt8 +1_u16 # UInt16 +1_u32 # UInt32 +1_u64 # UInt64 + ++10 # Int32 +-20 # Int32 + +2147483648 # Int64 +9223372036854775808 # UInt64 + + +1_000_000 # better than 1000000 + + +0b1101 # == 13 + + +0o123 # == 83 + + +0xFE012D # == 16646445 +0xfe012d # == 16646445 + + +true # A Bool that is true +false # A Bool that is false + + +a = 1 + +ptr = pointerof(a) +ptr.value = 2 + +a #=> 2 + + +class Point + def initialize(@x, @y) + end + + def x + @x + end + + def x_ptr + pointerof(@x) + end +end + +point = Point.new 1, 2 + +ptr = point.x_ptr +ptr.value = 10 + +point.x #=> 10 + + +def add(x : Number, y : Number) + x + y +end + +# Ok +add 1, 2 # Ok + +# Error: no overload matches 'add' with types Bool, Bool +add true, false + + +def add(x, y) + x + y +end + +add true, false + + +# A class that has a + method but isn't a Number +class Six + def +(other) + 6 + other + end +end + +# add method without type restrictions +def add(x, y) + x + y +end + +# OK +add Six.new, 10 + +# add method with type restrictions +def restricted_add(x : Number, y : Number) + x + y +end + +# Error: no overload matches 'restricted_add' with types Six, Int32 +restricted_add Six.new, 10 + + +class Person + def ==(other : self) + other.name == name + end + + def ==(other) + false + end +end + +john = Person.new "John" +another_john = Person.new "John" +peter = Person.new "Peter" + +john == another_john #=> true +john == peter #=> false (names differ) +john == 1 #=> false (because 1 is not a Person) + + +class Person + def self.compare(p1 : self, p2 : self) + p1.name == p2.name + end +end + +john = Person.new "John" +peter = Person.new "Peter" + +Person.compare(john, peter) # OK + + +def foo(x : Int32) +end + +foo 1 # OK +foo "hello" # Error + + +def foo(x : Int32.class) +end + +foo Int32 # OK +foo String # Error + + +def foo(x : Int32.class) + puts "Got Int32" +end + +def foo(x : String.class) + puts "Got String" +end + +foo Int32 # prints "Got Int32" +foo String # prints "Got String" + + +def foo(*args : Int32) +end + +def foo(*args : String) +end + +foo 1, 2, 3 # OK, invokes first overload +foo "a", "b", "c" # OK, invokes second overload +foo 1, 2, "hello" # Error +foo() # Error + + +def foo + # This is the empty-tuple case +end + + +def foo(x : T) + T +end + +foo(1) #=> Int32 +foo("hello") #=> String + + +def foo(x : Array(T)) + T +end + +foo([1, 2]) #=> Int32 +foo([1, "a"]) #=> (Int32 | String) + + +def foo(x : T.class) + Array(T) +end + +foo(Int32) #=> Array(Int32) +foo(String) #=> Array(String) + + +class Person + # Increases age by one + def become_older + @age += 1 + end + + # Increases age by the given number of years + def become_older(years : Int32) + @age += years + end + + # Increases age by the given number of years, as a String + def become_older(years : String) + @age += years.to_i + end + + # Yields the current age of this person and increases + # its age by the value returned by the block + def become_older + @age += yield @age + end +end + +person = Person.new "John" + +person.become_older +person.age #=> 1 + +person.become_older 5 +person.age #=> 6 + +person.become_older "12" +person.age #=> 18 + +person.become_older do |current_age| + current_age < 20 ? 10 : 30 +end +person.age #=> 28 + + +a = 1 +a.is_a?(Int32) #=> true +a.is_a?(String) #=> false +a.is_a?(Number) #=> true +a.is_a?(Int32 | String) #=> true + + +# One for each thread +@[ThreadLocal] +$values = [] of Int32 + + +@[AlwaysInline] +def foo + 1 +end + + +@[NoInline] +def foo + 1 +end + + +lib LibFoo + @[CallConvention("X86_StdCall")] + fun foo : Int32 +end + + +def sum(*elements) + total = 0 + elements.each do |value| + total += value + end + total +end + +# elements is Tuple(Int32, Int32, Int32, Float64) +sum 1, 2, 3, 4.5 + + +if a.responds_to?(:abs) + # here a's type will be reduced to those responding to the 'abs' method +end + + +a = some_condition ? 1 : "hello" +# a : Int32 | String + +if a.responds_to?(:abs) + # here a will be Int32, since Int32#abs exists but String#abs doesn't +else + # here a will be String +end + + +if (a = @a).responds_to?(:abs) + # here a is guaranteed to respond to `abs` +end + + +def capture(&block) + block +end + +def invoke(&block) + block.call +end + +proc = capture { puts "Hello" } +invoke(&proc) # prints "Hello" + + + + +def capture(&block) + block +end + +def twice + yield + yield +end + +proc = capture { puts "Hello" } +twice &proc + + +twice &->{ puts "Hello" } + + +def say_hello + puts "Hello" +end + +twice &->say_hello + + +def foo + yield 1 +end + +def wrap_foo + puts "Before foo" + foo do |x| + yield x + end + puts "After foo" +end + +wrap_foo do |i| + puts i +end + + +def foo + yield 1 +end + +def wrap_foo(&block : Int32 -> _) + puts "Before foo" + foo(&block) + puts "After foo" +end + +wrap_foo do |i| + puts i +end + + +foo_forward do |i| + break # error +end + + +a = 2 +while (a += 1) < 20 + if a == 10 + # goes to 'puts a' + break + end +end +puts a #=> 10 + + +class Person + private def say(message) + puts message + end + + def say_hello + say "hello" # OK, no receiver + self.say "hello" # Error, self is a receiver + + other = Person.new "Other" + other.say "hello" # Error, other is a receiver + end +end + + +class Employee < Person + def say_bye + say "bye" # OK + end +end + + +module Namespace + class Foo + protected def foo + puts "Hello" + end + end + + class Bar + def bar + # Works, because Foo and Bar are under Namespace + Foo.new.foo + end + end +end + +Namespace::Bar.new.bar + + +class Person + protected def self.say(message) + puts message + end + + def say_hello + Person.say "hello" + end +end + + +buffer = uninitialized UInt8[256] diff --git a/tests/examplefiles/test.mt b/tests/examplefiles/test.mt new file mode 100644 index 00000000..008dc88e --- /dev/null +++ b/tests/examplefiles/test.mt @@ -0,0 +1,7 @@ +exports (main) + +def main(=> currentProcess) :Int as DeepFrozen: + traceln(`Current process: $currentProcess`) + "A \r \n \x00 \u1234" + '\u1234' + return 0 diff --git a/tests/examplefiles/tsql_example.sql b/tests/examplefiles/tsql_example.sql new file mode 100644 index 00000000..cbd76091 --- /dev/null +++ b/tests/examplefiles/tsql_example.sql @@ -0,0 +1,72 @@ +-- Example Transact-SQL file. + +-- Single line comment +/* A comment + * spawning two lines. */ + /* An indented comment + * spawning multiple + * lines. */ +/* A /* nested */ comment. */ + +select + left(emp.firstname, 1) + '.' + [emp.surname] as "Name", + dep.name as [Department] +into + #temp_employee +from + employee as emp + inner join department as dep on + dep.ident_code = emp.department_id +where + emp.date_of_birth >= '1990-01-01'; +go + +declare @TextToFind nvarchar(100) = N'some +text across +multiple lines'; + +set @TextToFind varchar(32) = 'hello' + ' world'; +set @TextTiFind += '!'; + +declare @Count int = 17 * (3 - 5); + +delete from + [server].[database].[schema].[table] +where + [Text] = @TextToFind and author Not LIKE '%some%'; + +goto overthere; +overthere: + +select + 123 as "int 1", + +123 as "int 2", + -123 as "int 3", + 0x20 as "hex int", + 123.45 as "float 1", + -1.23e45 as "float 2" + +1.23E+45 as "float 3", + -1.23e-45 as "float 4", + 1. as "float 5", + .1 as "float 6", + 1.e2 as "float 7", + .1e2 as "float 8"; + +Select @@Error, $PARTITion.RangePF1(10); + +select top 3 Ähnliches from Müll; + +-- Example transaction +BEGIN TRAN + +BEGIN TRY + INSERT INTO #temp_employe(Name, Department) VALUES ('L. Miller', 'Sales') + iNsErT inTO #temp_employe(Name, Department) VaLuEs ('M. Webster', 'Helpdesk') + COMMIT TRAN +END TRY +BEGIN CATCH + print 'cannot perform transaction; rolling back'; + ROLLBACK TRAN +END CATCH + +-- Comment at end without newline.
\ No newline at end of file diff --git a/tests/test_bibtex.py b/tests/test_bibtex.py new file mode 100644 index 00000000..d007766d --- /dev/null +++ b/tests/test_bibtex.py @@ -0,0 +1,236 @@ +# -*- coding: utf-8 -*- +""" + BibTeX Test + ~~~~~~~~~~~ + + :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import textwrap +import unittest + +from pygments.lexers import BibTeXLexer, BSTLexer +from pygments.token import Token + + +class BibTeXTest(unittest.TestCase): + def setUp(self): + self.lexer = BibTeXLexer() + + def testPreamble(self): + data = u'@PREAMBLE{"% some LaTeX code here"}' + tokens = [ + (Token.Name.Class, u'@PREAMBLE'), + (Token.Punctuation, u'{'), + (Token.String, u'"'), + (Token.String, u'% some LaTeX code here'), + (Token.String, u'"'), + (Token.Punctuation, u'}'), + (Token.Text, u'\n'), + ] + self.assertEqual(list(self.lexer.get_tokens(data)), tokens) + + def testString(self): + data = u'@STRING(SCI = "Science")' + tokens = [ + (Token.Name.Class, u'@STRING'), + (Token.Punctuation, u'('), + (Token.Name.Attribute, u'SCI'), + (Token.Text, u' '), + (Token.Punctuation, u'='), + (Token.Text, u' '), + (Token.String, u'"'), + (Token.String, u'Science'), + (Token.String, u'"'), + (Token.Punctuation, u')'), + (Token.Text, u'\n'), + ] + self.assertEqual(list(self.lexer.get_tokens(data)), tokens) + + def testEntry(self): + data = u""" + This is a comment. + + @ARTICLE{ruckenstein-diffusion, + author = "Liu, Hongquin" # and # "Ruckenstein, Eli", + year = 1997, + month = JAN, + pages = "888-895" + } + """ + + tokens = [ + (Token.Comment, u'This is a comment.'), + (Token.Text, u'\n\n'), + (Token.Name.Class, u'@ARTICLE'), + (Token.Punctuation, u'{'), + (Token.Name.Label, u'ruckenstein-diffusion'), + (Token.Punctuation, u','), + (Token.Text, u'\n '), + (Token.Name.Attribute, u'author'), + (Token.Text, u' '), + (Token.Punctuation, u'='), + (Token.Text, u' '), + (Token.String, u'"'), + (Token.String, u'Liu, Hongquin'), + (Token.String, u'"'), + (Token.Text, u' '), + (Token.Punctuation, u'#'), + (Token.Text, u' '), + (Token.Name.Variable, u'and'), + (Token.Text, u' '), + (Token.Punctuation, u'#'), + (Token.Text, u' '), + (Token.String, u'"'), + (Token.String, u'Ruckenstein, Eli'), + (Token.String, u'"'), + (Token.Punctuation, u','), + (Token.Text, u'\n '), + (Token.Name.Attribute, u'year'), + (Token.Text, u' '), + (Token.Punctuation, u'='), + (Token.Text, u' '), + (Token.Number, u'1997'), + (Token.Punctuation, u','), + (Token.Text, u'\n '), + (Token.Name.Attribute, u'month'), + (Token.Text, u' '), + (Token.Punctuation, u'='), + (Token.Text, u' '), + (Token.Name.Variable, u'JAN'), + (Token.Punctuation, u','), + (Token.Text, u'\n '), + (Token.Name.Attribute, u'pages'), + (Token.Text, u' '), + (Token.Punctuation, u'='), + (Token.Text, u' '), + (Token.String, u'"'), + (Token.String, u'888-895'), + (Token.String, u'"'), + (Token.Text, u'\n'), + (Token.Punctuation, u'}'), + (Token.Text, u'\n'), + ] + self.assertEqual(list(self.lexer.get_tokens(textwrap.dedent(data))), tokens) + + def testComment(self): + data = '@COMMENT{test}' + tokens = [ + (Token.Comment, u'@COMMENT'), + (Token.Comment, u'{test}'), + (Token.Text, u'\n'), + ] + self.assertEqual(list(self.lexer.get_tokens(data)), tokens) + + def testMissingBody(self): + data = '@ARTICLE xxx' + tokens = [ + (Token.Name.Class, u'@ARTICLE'), + (Token.Text, u' '), + (Token.Error, u'x'), + (Token.Error, u'x'), + (Token.Error, u'x'), + (Token.Text, u'\n'), + ] + self.assertEqual(list(self.lexer.get_tokens(data)), tokens) + + def testMismatchedBrace(self): + data = '@PREAMBLE(""}' + tokens = [ + (Token.Name.Class, u'@PREAMBLE'), + (Token.Punctuation, u'('), + (Token.String, u'"'), + (Token.String, u'"'), + (Token.Error, u'}'), + (Token.Text, u'\n'), + ] + self.assertEqual(list(self.lexer.get_tokens(data)), tokens) + + +class BSTTest(unittest.TestCase): + def setUp(self): + self.lexer = BSTLexer() + + def testBasicBST(self): + data = """ + % BibTeX standard bibliography style `plain' + + INTEGERS { output.state before.all } + + FUNCTION {sort.format.title} + { 't := + "A " #2 + "An " #3 + "The " #4 t chop.word + chop.word + chop.word + sortify + #1 global.max$ substring$ + } + + ITERATE {call.type$} + """ + tokens = [ + (Token.Comment.SingleLine, "% BibTeX standard bibliography style `plain'"), + (Token.Text, u'\n\n'), + (Token.Keyword, u'INTEGERS'), + (Token.Text, u' '), + (Token.Punctuation, u'{'), + (Token.Text, u' '), + (Token.Name.Variable, u'output.state'), + (Token.Text, u' '), + (Token.Name.Variable, u'before.all'), + (Token.Text, u' '), + (Token.Punctuation, u'}'), + (Token.Text, u'\n\n'), + (Token.Keyword, u'FUNCTION'), + (Token.Text, u' '), + (Token.Punctuation, u'{'), + (Token.Name.Variable, u'sort.format.title'), + (Token.Punctuation, u'}'), + (Token.Text, u'\n'), + (Token.Punctuation, u'{'), + (Token.Text, u' '), + (Token.Name.Function, u"'t"), + (Token.Text, u' '), + (Token.Name.Variable, u':='), + (Token.Text, u'\n'), + (Token.Literal.String, u'"A "'), + (Token.Text, u' '), + (Token.Literal.Number, u'#2'), + (Token.Text, u'\n '), + (Token.Literal.String, u'"An "'), + (Token.Text, u' '), + (Token.Literal.Number, u'#3'), + (Token.Text, u'\n '), + (Token.Literal.String, u'"The "'), + (Token.Text, u' '), + (Token.Literal.Number, u'#4'), + (Token.Text, u' '), + (Token.Name.Variable, u't'), + (Token.Text, u' '), + (Token.Name.Variable, u'chop.word'), + (Token.Text, u'\n '), + (Token.Name.Variable, u'chop.word'), + (Token.Text, u'\n'), + (Token.Name.Variable, u'chop.word'), + (Token.Text, u'\n'), + (Token.Name.Variable, u'sortify'), + (Token.Text, u'\n'), + (Token.Literal.Number, u'#1'), + (Token.Text, u' '), + (Token.Name.Builtin, u'global.max$'), + (Token.Text, u' '), + (Token.Name.Builtin, u'substring$'), + (Token.Text, u'\n'), + (Token.Punctuation, u'}'), + (Token.Text, u'\n\n'), + (Token.Keyword, u'ITERATE'), + (Token.Text, u' '), + (Token.Punctuation, u'{'), + (Token.Name.Builtin, u'call.type$'), + (Token.Punctuation, u'}'), + (Token.Text, u'\n'), + ] + self.assertEqual(list(self.lexer.get_tokens(textwrap.dedent(data))), tokens) diff --git a/tests/test_cpp.py b/tests/test_cpp.py new file mode 100644 index 00000000..ef59965c --- /dev/null +++ b/tests/test_cpp.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +""" + CPP Tests + ~~~~~~~~~ + + :copyright: Copyright 2006-2016 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import unittest + +from pygments.lexers import CppLexer +from pygments.token import Token + + +class CppTest(unittest.TestCase): + def setUp(self): + self.lexer = CppLexer() + + def testGoodComment(self): + fragment = u'/* foo */\n' + tokens = [ + (Token.Comment.Multiline, u'/* foo */'), + (Token.Text, u'\n'), + ] + self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) + + def testOpenComment(self): + fragment = u'/* foo\n' + tokens = [ + (Token.Comment.Multiline, u'/* foo\n'), + ] + self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) diff --git a/tests/test_crystal.py b/tests/test_crystal.py new file mode 100644 index 00000000..9a1588f2 --- /dev/null +++ b/tests/test_crystal.py @@ -0,0 +1,308 @@ +# -*- coding: utf-8 -*- +""" + Basic CrystalLexer Test + ~~~~~~~~~~~~~~~~~~~~ + + :copyright: Copyright 2006-2016 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from __future__ import unicode_literals +import unittest + +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Error +from pygments.lexers import CrystalLexer + + +class CrystalTest(unittest.TestCase): + + def setUp(self): + self.lexer = CrystalLexer() + self.maxDiff = None + + def testRangeSyntax1(self): + fragment = '1...3\n' + tokens = [ + (Number.Integer, '1'), + (Operator, '...'), + (Number.Integer, '3'), + (Text, '\n'), + ] + self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) + + def testRangeSyntax2(self): + fragment = '1 .. 3\n' + tokens = [ + (Number.Integer, '1'), + (Text, ' '), + (Operator, '..'), + (Text, ' '), + (Number.Integer, '3'), + (Text, '\n'), + ] + self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) + + def testInterpolationNestedCurly(self): + fragment = ( + '"A#{ (3..5).group_by { |x| x/2}.map ' + 'do |k,v| "#{k}" end.join }" + "Z"\n') + tokens = [ + (String.Double, '"'), + (String.Double, 'A'), + (String.Interpol, '#{'), + (Text, ' '), + (Punctuation, '('), + (Number.Integer, '3'), + (Operator, '..'), + (Number.Integer, '5'), + (Punctuation, ')'), + (Operator, '.'), + (Name, 'group_by'), + (Text, ' '), + (String.Interpol, '{'), + (Text, ' '), + (Operator, '|'), + (Name, 'x'), + (Operator, '|'), + (Text, ' '), + (Name, 'x'), + (Operator, '/'), + (Number.Integer, '2'), + (String.Interpol, '}'), + (Operator, '.'), + (Name, 'map'), + (Text, ' '), + (Keyword, 'do'), + (Text, ' '), + (Operator, '|'), + (Name, 'k'), + (Punctuation, ','), + (Name, 'v'), + (Operator, '|'), + (Text, ' '), + (String.Double, '"'), + (String.Interpol, '#{'), + (Name, 'k'), + (String.Interpol, '}'), + (String.Double, '"'), + (Text, ' '), + (Keyword, 'end'), + (Operator, '.'), + (Name, 'join'), + (Text, ' '), + (String.Interpol, '}'), + (String.Double, '"'), + (Text, ' '), + (Operator, '+'), + (Text, ' '), + (String.Double, '"'), + (String.Double, 'Z'), + (String.Double, '"'), + (Text, '\n'), + ] + self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) + + def testOperatorMethods(self): + fragment = '([] of Int32).[]?(5)\n' + tokens = [ + (Punctuation, '('), + (Operator, '['), + (Operator, ']'), + (Text, ' '), + (Keyword, 'of'), + (Text, ' '), + (Name.Builtin, 'Int32'), + (Punctuation, ')'), + (Operator, '.'), + (Name.Operator, '[]?'), + (Punctuation, '('), + (Number.Integer, '5'), + (Punctuation, ')'), + (Text, '\n') + ] + self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) + + def testArrayAccess(self): + fragment = '[5][5]?\n' + tokens = [ + (Operator, '['), + (Number.Integer, '5'), + (Operator, ']'), + (Operator, '['), + (Number.Integer, '5'), + (Operator, ']?'), + (Text, '\n') + ] + self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) + + def testNumbers(self): + for kind, testset in [ + (Number.Integer, '0 1 1_000_000 1u8 11231231231121312i64'), + (Number.Float, '0.0 1.0_f32 1_f32 0f64 1e+4 1e111 1_234.567_890'), + (Number.Bin, '0b1001_0110 0b0u8'), + (Number.Oct, '0o17 0o7_i32'), + (Number.Hex, '0xdeadBEEF'), + ]: + for fragment in testset.split(): + self.assertEqual([(kind, fragment), (Text, '\n')], + list(self.lexer.get_tokens(fragment + '\n'))) + + for fragment in '01 0b2 0x129g2 0o12358'.split(): + self.assertEqual(next(self.lexer.get_tokens(fragment + '\n'))[0], + Error) + + def testChars(self): + for fragment in ["'a'", "'я'", "'\\u{1234}'", "'\n'"]: + self.assertEqual([(String.Char, fragment), (Text, '\n')], + list(self.lexer.get_tokens(fragment + '\n'))) + self.assertEqual(next(self.lexer.get_tokens("'abc'"))[0], Error) + + def testMacro(self): + fragment = ( + 'def<=>(other : self) : Int\n' + '{%for field in %w(first_name middle_name last_name)%}\n' + 'cmp={{field.id}}<=>other.{{field.id}}\n' + 'return cmp if cmp!=0\n' + '{%end%}\n' + '0\n' + 'end\n') + tokens = [ + (Keyword, 'def'), + (Name.Function, '<=>'), + (Punctuation, '('), + (Name, 'other'), + (Text, ' '), + (Punctuation, ':'), + (Text, ' '), + (Keyword.Pseudo, 'self'), + (Punctuation, ')'), + (Text, ' '), + (Punctuation, ':'), + (Text, ' '), + (Name.Builtin, 'Int'), + (Text, '\n'), + (String.Interpol, '{%'), + (Keyword, 'for'), + (Text, ' '), + (Name, 'field'), + (Text, ' '), + (Keyword, 'in'), + (Text, ' '), + (String.Other, '%w('), + (String.Other, 'first_name middle_name last_name'), + (String.Other, ')'), + (String.Interpol, '%}'), + (Text, '\n'), + (Name, 'cmp'), + (Operator, '='), + (String.Interpol, '{{'), + (Name, 'field'), + (Operator, '.'), + (Name, 'id'), + (String.Interpol, '}}'), + (Operator, '<=>'), + (Name, 'other'), + (Operator, '.'), + (String.Interpol, '{{'), + (Name, 'field'), + (Operator, '.'), + (Name, 'id'), + (String.Interpol, '}}'), + (Text, '\n'), + (Keyword, 'return'), + (Text, ' '), + (Name, 'cmp'), + (Text, ' '), + (Keyword, 'if'), + (Text, ' '), + (Name, 'cmp'), + (Operator, '!='), + (Number.Integer, '0'), + (Text, '\n'), + (String.Interpol, '{%'), + (Keyword, 'end'), + (String.Interpol, '%}'), + (Text, '\n'), + (Number.Integer, '0'), + (Text, '\n'), + (Keyword, 'end'), + (Text, '\n') + ] + self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) + + def testLib(self): + fragment = ( + '@[Link("some")]\nlib LibSome\n' + '@[CallConvention("X86_StdCall")]\nfun foo="some.foo"(thing : Void*) : LibC::Int\n' + 'end\n') + tokens = [ + (Operator, '@['), + (Name.Decorator, 'Link'), + (Punctuation, '('), + (String.Double, '"'), + (String.Double, 'some'), + (String.Double, '"'), + (Punctuation, ')'), + (Operator, ']'), + (Text, '\n'), + (Keyword, 'lib'), + (Text, ' '), + (Name.Namespace, 'LibSome'), + (Text, '\n'), + (Operator, '@['), + (Name.Decorator, 'CallConvention'), + (Punctuation, '('), + (String.Double, '"'), + (String.Double, 'X86_StdCall'), + (String.Double, '"'), + (Punctuation, ')'), + (Operator, ']'), + (Text, '\n'), + (Keyword, 'fun'), + (Text, ' '), + (Name.Function, 'foo'), + (Operator, '='), + (String.Double, '"'), + (String.Double, 'some.foo'), + (String.Double, '"'), + (Punctuation, '('), + (Name, 'thing'), + (Text, ' '), + (Punctuation, ':'), + (Text, ' '), + (Name.Builtin, 'Void'), + (Operator, '*'), + (Punctuation, ')'), + (Text, ' '), + (Punctuation, ':'), + (Text, ' '), + (Name, 'LibC'), + (Operator, '::'), + (Name.Builtin, 'Int'), + (Text, '\n'), + (Keyword, 'end'), + (Text, '\n') + ] + self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) + + def testEscapedBracestring(self): + fragment = 'str.gsub(%r{\\\\\\\\}, "/")\n' + tokens = [ + (Name, 'str'), + (Operator, '.'), + (Name, 'gsub'), + (Punctuation, '('), + (String.Regex, '%r{'), + (String.Regex, '\\\\'), + (String.Regex, '\\\\'), + (String.Regex, '}'), + (Punctuation, ','), + (Text, ' '), + (String.Double, '"'), + (String.Double, '/'), + (String.Double, '"'), + (Punctuation, ')'), + (Text, '\n'), + ] + self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) diff --git a/tests/test_data.py b/tests/test_data.py new file mode 100644 index 00000000..be371419 --- /dev/null +++ b/tests/test_data.py @@ -0,0 +1,100 @@ +# -*- coding: utf-8 -*- +""" + Data Tests + ~~~~~~~~~~ + + :copyright: Copyright 2006-2016 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import unittest + +from pygments.lexers import JsonLexer, JsonBareObjectLexer +from pygments.token import Token + + +class JsonTest(unittest.TestCase): + def setUp(self): + self.lexer = JsonLexer() + + def testBasic(self): + fragment = u'{"foo": "bar", "foo2": [1, 2, 3]}\n' + tokens = [ + (Token.Punctuation, u'{'), + (Token.Name.Tag, u'"foo"'), + (Token.Punctuation, u':'), + (Token.Text, u' '), + (Token.Literal.String.Double, u'"bar"'), + (Token.Punctuation, u','), + (Token.Text, u' '), + (Token.Name.Tag, u'"foo2"'), + (Token.Punctuation, u':'), + (Token.Text, u' '), + (Token.Punctuation, u'['), + (Token.Literal.Number.Integer, u'1'), + (Token.Punctuation, u','), + (Token.Text, u' '), + (Token.Literal.Number.Integer, u'2'), + (Token.Punctuation, u','), + (Token.Text, u' '), + (Token.Literal.Number.Integer, u'3'), + (Token.Punctuation, u']'), + (Token.Punctuation, u'}'), + (Token.Text, u'\n'), + ] + self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) + +class JsonBareObjectTest(unittest.TestCase): + def setUp(self): + self.lexer = JsonBareObjectLexer() + + def testBasic(self): + # This is the same as testBasic for JsonLexer above, except the + # enclosing curly braces are removed. + fragment = u'"foo": "bar", "foo2": [1, 2, 3]\n' + tokens = [ + (Token.Name.Tag, u'"foo"'), + (Token.Punctuation, u':'), + (Token.Text, u' '), + (Token.Literal.String.Double, u'"bar"'), + (Token.Punctuation, u','), + (Token.Text, u' '), + (Token.Name.Tag, u'"foo2"'), + (Token.Punctuation, u':'), + (Token.Text, u' '), + (Token.Punctuation, u'['), + (Token.Literal.Number.Integer, u'1'), + (Token.Punctuation, u','), + (Token.Text, u' '), + (Token.Literal.Number.Integer, u'2'), + (Token.Punctuation, u','), + (Token.Text, u' '), + (Token.Literal.Number.Integer, u'3'), + (Token.Punctuation, u']'), + (Token.Text, u'\n'), + ] + self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) + + def testClosingCurly(self): + # This can be an Error token, but should not be a can't-pop-from-stack + # exception. + fragment = '}"a"\n' + tokens = [ + (Token.Error, '}'), + (Token.Name.Tag, '"a"'), + (Token.Text, '\n'), + ] + self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) + + def testClosingCurlyInValue(self): + fragment = '"": ""}\n' + tokens = [ + (Token.Name.Tag, '""'), + (Token.Punctuation, ':'), + (Token.Text, ' '), + (Token.Literal.String.Double, '""'), + (Token.Error, '}'), + (Token.Text, '\n'), + ] + self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) + diff --git a/tests/test_examplefiles.py b/tests/test_examplefiles.py index 924e1184..f43abf9b 100644 --- a/tests/test_examplefiles.py +++ b/tests/test_examplefiles.py @@ -46,6 +46,10 @@ def test_example_files(): if not os.path.isfile(absfn): continue + extension = os.getenv('TEST_EXT') + if extension and not absfn.endswith(extension): + continue + print(absfn) with open(absfn, 'rb') as f: code = f.read() diff --git a/tests/test_ezhil.py b/tests/test_ezhil.py index 23b9cb41..15cc13b1 100644 --- a/tests/test_ezhil.py +++ b/tests/test_ezhil.py @@ -94,7 +94,8 @@ class EzhilTest(unittest.TestCase): முடி\n""" tokens = [ (Token.Comment.Single, - u'# (C) \u0bae\u0bc1\u0ba4\u0bcd\u0ba4\u0bc8\u0baf\u0bbe \u0b85\u0ba3\u0bcd\u0ba3\u0bbe\u0bae\u0bb2\u0bc8 2013, 2015\n'), + u'# (C) \u0bae\u0bc1\u0ba4\u0bcd\u0ba4\u0bc8\u0baf\u0bbe \u0b85' + u'\u0ba3\u0bcd\u0ba3\u0bbe\u0bae\u0bb2\u0bc8 2013, 2015\n'), (Token.Keyword,u'நிரல்பாகம்'), (Token.Text, u' '), (Token.Name, u'gcd'), diff --git a/tests/test_javascript.py b/tests/test_javascript.py new file mode 100644 index 00000000..59890659 --- /dev/null +++ b/tests/test_javascript.py @@ -0,0 +1,84 @@ +# -*- coding: utf-8 -*- +""" + Javascript tests + ~~~~~~~~~~~~~~~~ + + :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import unittest + +from pygments.lexers import CoffeeScriptLexer +from pygments.token import Token + +COFFEE_SLASH_GOLDEN = [ + # input_str, slashes_are_regex_here + (r'/\\/', True), + (r'/\\/i', True), + (r'/\//', True), + (r'/(\s)/', True), + ('/a{2,8}/', True), + ('/b*c?d+/', True), + ('/(capture-match)/', True), + ('/(?:do-not-capture-match)/', True), + ('/this|or|that/', True), + ('/[char-set]/', True), + ('/[^neg-char_st]/', True), + ('/^.*$/', True), + (r'/\n(\f)\0\1\d\b\cm\u1234/', True), + (r'/^.?([^/\\\n\w]*)a\1+$/.something(or_other) # something more complex', True), + ("foo = (str) ->\n /'|\"/.test str", True), + ('a = a / b / c', False), + ('a = a/b/c', False), + ('a = a/b/ c', False), + ('a = a /b/c', False), + ('a = 1 + /d/.test(a)', True), +] + +def test_coffee_slashes(): + for input_str, slashes_are_regex_here in COFFEE_SLASH_GOLDEN: + yield coffee_runner, input_str, slashes_are_regex_here + +def coffee_runner(input_str, slashes_are_regex_here): + lex = CoffeeScriptLexer() + output = list(lex.get_tokens(input_str)) + print(output) + for t, s in output: + if '/' in s: + is_regex = t is Token.String.Regex + assert is_regex == slashes_are_regex_here, (t, s) + +class CoffeeTest(unittest.TestCase): + def setUp(self): + self.lexer = CoffeeScriptLexer() + + def testMixedSlashes(self): + fragment = u'a?/foo/:1/2;\n' + tokens = [ + (Token.Name.Other, u'a'), + (Token.Operator, u'?'), + (Token.Literal.String.Regex, u'/foo/'), + (Token.Operator, u':'), + (Token.Literal.Number.Integer, u'1'), + (Token.Operator, u'/'), + (Token.Literal.Number.Integer, u'2'), + (Token.Punctuation, u';'), + (Token.Text, u'\n'), + ] + self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) + + def testBewareInfiniteLoop(self): + # This demonstrates the case that "This isn't really guarding" comment + # refers to. + fragment = '/a/x;\n' + tokens = [ + (Token.Text, ''), + (Token.Operator, '/'), + (Token.Name.Other, 'a'), + (Token.Operator, '/'), + (Token.Name.Other, 'x'), + (Token.Punctuation, ';'), + (Token.Text, '\n'), + ] + self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) diff --git a/tests/test_julia.py b/tests/test_julia.py new file mode 100644 index 00000000..08c420d3 --- /dev/null +++ b/tests/test_julia.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +""" + Julia Tests + ~~~~~~~~~~~ + + :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import unittest + +from pygments.lexers import JuliaLexer +from pygments.token import Token + + +class JuliaTests(unittest.TestCase): + def setUp(self): + self.lexer = JuliaLexer() + + def test_unicode(self): + """ + Test that unicode character, √, in an expression is recognized + """ + fragment = u's = \u221a((1/n) * sum(count .^ 2) - mu .^2)\n' + tokens = [ + (Token.Name, u's'), + (Token.Text, u' '), + (Token.Operator, u'='), + (Token.Text, u' '), + (Token.Operator, u'\u221a'), + (Token.Punctuation, u'('), + (Token.Punctuation, u'('), + (Token.Literal.Number.Integer, u'1'), + (Token.Operator, u'/'), + (Token.Name, u'n'), + (Token.Punctuation, u')'), + (Token.Text, u' '), + (Token.Operator, u'*'), + (Token.Text, u' '), + (Token.Name, u'sum'), + (Token.Punctuation, u'('), + (Token.Name, u'count'), + (Token.Text, u' '), + (Token.Operator, u'.^'), + (Token.Text, u' '), + (Token.Literal.Number.Integer, u'2'), + (Token.Punctuation, u')'), + (Token.Text, u' '), + (Token.Operator, u'-'), + (Token.Text, u' '), + (Token.Name, u'mu'), + (Token.Text, u' '), + (Token.Operator, u'.^'), + (Token.Literal.Number.Integer, u'2'), + (Token.Punctuation, u')'), + (Token.Text, u'\n'), + ] + self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) diff --git a/tests/test_php.py b/tests/test_php.py new file mode 100644 index 00000000..050ca70d --- /dev/null +++ b/tests/test_php.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +""" + PHP Tests + ~~~~~~~~~ + + :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import unittest + +from pygments.lexers import PhpLexer +from pygments.token import Token + + +class PhpTest(unittest.TestCase): + def setUp(self): + self.lexer = PhpLexer() + + def testStringEscapingRun(self): + fragment = '<?php $x="{\\""; ?>\n' + tokens = [ + (Token.Comment.Preproc, '<?php'), + (Token.Text, ' '), + (Token.Name.Variable, '$x'), + (Token.Operator, '='), + (Token.Literal.String.Double, '"'), + (Token.Literal.String.Double, '{'), + (Token.Literal.String.Escape, '\\"'), + (Token.Literal.String.Double, '"'), + (Token.Punctuation, ';'), + (Token.Text, ' '), + (Token.Comment.Preproc, '?>'), + (Token.Other, '\n'), + ] + self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) diff --git a/tests/test_properties.py b/tests/test_properties.py new file mode 100644 index 00000000..333f3d7a --- /dev/null +++ b/tests/test_properties.py @@ -0,0 +1,89 @@ +# -*- coding: utf-8 -*- +""" + Properties Tests + ~~~~~~~~~~~~~~~~ + + :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import unittest + +from pygments.lexers.configs import PropertiesLexer +from pygments.token import Token + + +class PropertiesTest(unittest.TestCase): + def setUp(self): + self.lexer = PropertiesLexer() + + def test_comments(self): + """ + Assures lines lead by either # or ! are recognized as a comment + """ + fragment = '! a comment\n# also a comment\n' + tokens = [ + (Token.Comment, '! a comment'), + (Token.Text, '\n'), + (Token.Comment, '# also a comment'), + (Token.Text, '\n'), + ] + self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) + + def test_leading_whitespace_comments(self): + fragment = ' # comment\n' + tokens = [ + (Token.Text, ' '), + (Token.Comment, '# comment'), + (Token.Text, '\n'), + ] + self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) + + def test_escaped_space_in_key(self): + fragment = 'key = value\n' + tokens = [ + (Token.Name.Attribute, 'key'), + (Token.Text, ' '), + (Token.Operator, '='), + (Token.Text, ' '), + (Token.Literal.String, 'value'), + (Token.Text, '\n'), + ] + self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) + + def test_escaped_space_in_value(self): + fragment = 'key = doubleword\\ value\n' + tokens = [ + (Token.Name.Attribute, 'key'), + (Token.Text, ' '), + (Token.Operator, '='), + (Token.Text, ' '), + (Token.Literal.String, 'doubleword\\ value'), + (Token.Text, '\n'), + ] + self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) + + def test_space_delimited_kv_pair(self): + fragment = 'key value\n' + tokens = [ + (Token.Name.Attribute, 'key'), + (Token.Text, ' '), + (Token.Literal.String, 'value\n'), + ] + self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) + + def test_just_key(self): + fragment = 'justkey\n' + tokens = [ + (Token.Name.Attribute, 'justkey'), + (Token.Text, '\n'), + ] + self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) + + def test_just_key_with_space(self): + fragment = 'just\\ key\n' + tokens = [ + (Token.Name.Attribute, 'just\\ key'), + (Token.Text, '\n'), + ] + self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) diff --git a/tests/test_python.py b/tests/test_python.py new file mode 100644 index 00000000..f5784cb1 --- /dev/null +++ b/tests/test_python.py @@ -0,0 +1,113 @@ +# -*- coding: utf-8 -*- +""" + Python Tests + ~~~~~~~~~~~~ + + :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import unittest + +from pygments.lexers import PythonLexer, Python3Lexer +from pygments.token import Token + + +class PythonTest(unittest.TestCase): + def setUp(self): + self.lexer = PythonLexer() + + def test_cls_builtin(self): + """ + Tests that a cls token gets interpreted as a Token.Name.Builtin.Pseudo + + """ + fragment = 'class TestClass():\n @classmethod\n def hello(cls):\n pass\n' + tokens = [ + (Token.Keyword, 'class'), + (Token.Text, ' '), + (Token.Name.Class, 'TestClass'), + (Token.Punctuation, '('), + (Token.Punctuation, ')'), + (Token.Punctuation, ':'), + (Token.Text, '\n'), + (Token.Text, ' '), + (Token.Name.Decorator, '@classmethod'), + (Token.Text, '\n'), + (Token.Text, ' '), + (Token.Keyword, 'def'), + (Token.Text, ' '), + (Token.Name.Function, 'hello'), + (Token.Punctuation, '('), + (Token.Name.Builtin.Pseudo, 'cls'), + (Token.Punctuation, ')'), + (Token.Punctuation, ':'), + (Token.Text, '\n'), + (Token.Text, ' '), + (Token.Keyword, 'pass'), + (Token.Text, '\n'), + ] + self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) + + +class Python3Test(unittest.TestCase): + def setUp(self): + self.lexer = Python3Lexer() + + def testNeedsName(self): + """ + Tests that '@' is recognized as an Operator + """ + fragment = u'S = (H @ beta - r).T @ inv(H @ V @ H.T) @ (H @ beta - r)\n' + tokens = [ + (Token.Name, u'S'), + (Token.Text, u' '), + (Token.Operator, u'='), + (Token.Text, u' '), + (Token.Punctuation, u'('), + (Token.Name, u'H'), + (Token.Text, u' '), + (Token.Operator, u'@'), + (Token.Text, u' '), + (Token.Name, u'beta'), + (Token.Text, u' '), + (Token.Operator, u'-'), + (Token.Text, u' '), + (Token.Name, u'r'), + (Token.Punctuation, u')'), + (Token.Operator, u'.'), + (Token.Name, u'T'), + (Token.Text, u' '), + (Token.Operator, u'@'), + (Token.Text, u' '), + (Token.Name, u'inv'), + (Token.Punctuation, u'('), + (Token.Name, u'H'), + (Token.Text, u' '), + (Token.Operator, u'@'), + (Token.Text, u' '), + (Token.Name, u'V'), + (Token.Text, u' '), + (Token.Operator, u'@'), + (Token.Text, u' '), + (Token.Name, u'H'), + (Token.Operator, u'.'), + (Token.Name, u'T'), + (Token.Punctuation, u')'), + (Token.Text, u' '), + (Token.Operator, u'@'), + (Token.Text, u' '), + (Token.Punctuation, u'('), + (Token.Name, u'H'), + (Token.Text, u' '), + (Token.Operator, u'@'), + (Token.Text, u' '), + (Token.Name, u'beta'), + (Token.Text, u' '), + (Token.Operator, u'-'), + (Token.Text, u' '), + (Token.Name, u'r'), + (Token.Punctuation, u')'), + (Token.Text, u'\n'), + ] + self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) diff --git a/tests/test_regexopt.py b/tests/test_regexopt.py index dd56a446..6322c735 100644 --- a/tests/test_regexopt.py +++ b/tests/test_regexopt.py @@ -46,6 +46,7 @@ class RegexOptTestCase(unittest.TestCase): random.randint(1, len(kwlist) - 1)) no_match = set(kwlist) - set(to_match) rex = re.compile(regex_opt(to_match)) + self.assertEqual(rex.groups, 1) for w in to_match: self.assertTrue(rex.match(w)) for w in no_match: @@ -74,3 +75,36 @@ class RegexOptTestCase(unittest.TestCase): rex = re.compile(opt) m = rex.match('abfoo') self.assertEqual(5, m.end()) + + def test_different_length_grouping(self): + opt = regex_opt(('a', 'xyz')) + print(opt) + rex = re.compile(opt) + self.assertTrue(rex.match('a')) + self.assertTrue(rex.match('xyz')) + self.assertFalse(rex.match('b')) + self.assertEqual(1, rex.groups) + + def test_same_length_grouping(self): + opt = regex_opt(('a', 'b')) + print(opt) + rex = re.compile(opt) + self.assertTrue(rex.match('a')) + self.assertTrue(rex.match('b')) + self.assertFalse(rex.match('x')) + + self.assertEqual(1, rex.groups) + groups = rex.match('a').groups() + self.assertEqual(('a',), groups) + + def test_same_length_suffix_grouping(self): + opt = regex_opt(('a', 'b'), suffix='(m)') + print(opt) + rex = re.compile(opt) + self.assertTrue(rex.match('am')) + self.assertTrue(rex.match('bm')) + self.assertFalse(rex.match('xm')) + self.assertFalse(rex.match('ax')) + self.assertEqual(2, rex.groups) + groups = rex.match('am').groups() + self.assertEqual(('a', 'm'), groups) diff --git a/tests/test_shell.py b/tests/test_shell.py index 4eb5a15a..6faac9fd 100644 --- a/tests/test_shell.py +++ b/tests/test_shell.py @@ -10,7 +10,7 @@ import unittest from pygments.token import Token -from pygments.lexers import BashLexer +from pygments.lexers import BashLexer, BashSessionLexer class BashTest(unittest.TestCase): @@ -87,3 +87,56 @@ class BashTest(unittest.TestCase): (Token.Text, u'\n'), ] self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) + + def testArrayNums(self): + fragment = u'a=(1 2 3)\n' + tokens = [ + (Token.Name.Variable, u'a'), + (Token.Operator, u'='), + (Token.Operator, u'('), + (Token.Literal.Number, u'1'), + (Token.Text, u' '), + (Token.Literal.Number, u'2'), + (Token.Text, u' '), + (Token.Literal.Number, u'3'), + (Token.Operator, u')'), + (Token.Text, u'\n'), + ] + self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) + + def testEndOfLineNums(self): + fragment = u'a=1\nb=2 # comment\n' + tokens = [ + (Token.Name.Variable, u'a'), + (Token.Operator, u'='), + (Token.Literal.Number, u'1'), + (Token.Text, u'\n'), + (Token.Name.Variable, u'b'), + (Token.Operator, u'='), + (Token.Literal.Number, u'2'), + (Token.Text, u' '), + (Token.Comment.Single, u'# comment\n'), + ] + self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) + +class BashSessionTest(unittest.TestCase): + + def setUp(self): + self.lexer = BashSessionLexer() + self.maxDiff = None + + def testNeedsName(self): + fragment = u'$ echo \\\nhi\nhi\n' + tokens = [ + (Token.Text, u''), + (Token.Generic.Prompt, u'$'), + (Token.Text, u' '), + (Token.Name.Builtin, u'echo'), + (Token.Text, u' '), + (Token.Literal.String.Escape, u'\\\n'), + (Token.Text, u'hi'), + (Token.Text, u'\n'), + (Token.Generic.Output, u'hi\n'), + ] + self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) + diff --git a/tests/test_sql.py b/tests/test_sql.py new file mode 100644 index 00000000..c5f5c758 --- /dev/null +++ b/tests/test_sql.py @@ -0,0 +1,74 @@ +# -*- coding: utf-8 -*- +""" + Pygments SQL lexers tests + ~~~~~~~~~~~~~~~~~~~~~~~~~ + + :copyright: Copyright 2006-2016 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" +import unittest + +from pygments.lexers.sql import TransactSqlLexer +from pygments.token import Comment, Name, Number, Punctuation, Whitespace + + +class TransactSqlLexerTest(unittest.TestCase): + + def setUp(self): + self.lexer = TransactSqlLexer() + + def _assertAreTokensOfType(self, examples, expected_token_type): + for test_number, example in enumerate(examples.split(), 1): + token_count = 0 + for token_type, token_value in self.lexer.get_tokens(example): + if token_type != Whitespace: + token_count += 1 + self.assertEqual( + token_type, expected_token_type, + 'token_type #%d for %s is be %s but must be %s' % + (test_number, token_value, token_type, expected_token_type)) + self.assertEqual( + token_count, 1, + '%s must yield exactly 1 token instead of %d' % + (example, token_count)) + + def _assertTokensMatch(self, text, expected_tokens_without_trailing_newline): + actual_tokens = tuple(self.lexer.get_tokens(text)) + if (len(actual_tokens) >= 1) and (actual_tokens[-1] == (Whitespace, '\n')): + actual_tokens = tuple(actual_tokens[:-1]) + self.assertEqual( + expected_tokens_without_trailing_newline, actual_tokens, + 'text must yield expected tokens: %s' % text) + + def test_can_lex_float(self): + self._assertAreTokensOfType( + '1. 1.e1 .1 1.2 1.2e3 1.2e+3 1.2e-3 1e2', Number.Float) + self._assertTokensMatch( + '1e2.1e2', + ((Number.Float, '1e2'), (Number.Float, '.1e2')) + ) + + def test_can_reject_almost_float(self): + self._assertTokensMatch( + '.e1', + ((Punctuation, '.'), (Name, 'e1'))) + + def test_can_lex_integer(self): + self._assertAreTokensOfType( + '1 23 456', Number.Integer) + + def test_can_lex_names(self): + self._assertAreTokensOfType( + u'thingy thingy123 _thingy _ _123 Ähnliches Müll #temp1 ##temp2', Name) + + def test_can_lex_comments(self): + self._assertTokensMatch('--\n', ((Comment.Single, '--\n'),)) + self._assertTokensMatch('/**/', ( + (Comment.Multiline, '/*'), (Comment.Multiline, '*/') + )) + self._assertTokensMatch('/*/**/*/', ( + (Comment.Multiline, '/*'), + (Comment.Multiline, '/*'), + (Comment.Multiline, '*/'), + (Comment.Multiline, '*/'), + )) diff --git a/tests/test_whiley.py b/tests/test_whiley.py new file mode 100644 index 00000000..f447ffec --- /dev/null +++ b/tests/test_whiley.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +""" + Whiley Test + ~~~~~~~~~~~ + + :copyright: Copyright 2006-2016 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import unittest + +from pygments.lexers import WhileyLexer +from pygments.token import Token + + +class WhileyTest(unittest.TestCase): + def setUp(self): + self.lexer = WhileyLexer() + + def testWhileyOperator(self): + fragment = u'123 \u2200 x\n' + tokens = [ + (Token.Literal.Number.Integer, u'123'), + (Token.Text, u' '), + (Token.Operator, u'\u2200'), + (Token.Text, u' '), + (Token.Name, u'x'), + (Token.Text, u'\n'), + ] + self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) |