summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTim Hatch <tim@timhatch.com>2015-10-13 09:50:10 -0700
committerTim Hatch <tim@timhatch.com>2015-10-13 09:50:10 -0700
commit1e1426b13b56038238d3744b0ca2c66c99b4bedc (patch)
treedf15cc2666e2756108ee69c58cf6b8714ae1ec8d
parent63c6e8545a1e6b10958e06cd81a4c50f85c7ccc2 (diff)
parent0e93ca51b2d9e6a5dfcf16c68830230bd2663412 (diff)
downloadpygments-1e1426b13b56038238d3744b0ca2c66c99b4bedc.tar.gz
Merged in jonashaag/pygments-main/jonashaag/fix-typo-1441310321454 (pull request #496)
Fix typo
-rw-r--r--AUTHORS6
-rw-r--r--CHANGES7
-rw-r--r--doc/docs/lexerdevelopment.rst5
-rw-r--r--pygments/formatters/terminal.py70
-rw-r--r--pygments/lexer.py1
-rw-r--r--pygments/lexers/_mapping.py6
-rw-r--r--pygments/lexers/archetype.py4
-rw-r--r--pygments/lexers/asm.py3
-rw-r--r--pygments/lexers/chapel.py8
-rw-r--r--pygments/lexers/configs.py75
-rw-r--r--pygments/lexers/esoteric.py51
-rw-r--r--pygments/lexers/julia.py5
-rw-r--r--pygments/lexers/parasail.py81
-rw-r--r--pygments/lexers/prolog.py8
-rw-r--r--pygments/lexers/rdf.py98
-rw-r--r--pygments/lexers/scripting.py277
-rw-r--r--pygments/lexers/shell.py4
-rw-r--r--pygments/lexers/templates.py10
-rw-r--r--tests/examplefiles/example.ezt32
-rw-r--r--tests/examplefiles/example.jcl31
-rw-r--r--tests/examplefiles/example.mac6
-rw-r--r--tests/examplefiles/example.tf162
-rw-r--r--tests/examplefiles/example.ttl43
-rw-r--r--tests/examplefiles/test.bpl140
-rw-r--r--tests/examplefiles/test.psl182
-rw-r--r--tests/test_lexers_other.py23
-rw-r--r--tests/test_shell.py26
-rw-r--r--tests/test_terminal_formatter.py51
28 files changed, 1338 insertions, 77 deletions
diff --git a/AUTHORS b/AUTHORS
index d4d85bb7..2d000194 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -7,7 +7,7 @@ Other contributors, listed alphabetically, are:
* Sam Aaron -- Ioke lexer
* Ali Afshar -- image formatter
-* Thomas Aglassinger -- Rexx lexer
+* Thomas Aglassinger -- Easytrieve, JCL and Rexx lexers
* Kumar Appaiah -- Debian control lexer
* Andreas Amann -- AppleScript lexer
* Timothy Armstrong -- Dart lexer fixes
@@ -38,6 +38,7 @@ Other contributors, listed alphabetically, are:
* Bryan Davis -- EBNF lexer
* Owen Durni -- Haxe lexer
* Alexander Dutton, Oxford University Computing Services -- SPARQL lexer
+* James Edwards -- Terraform lexer
* Nick Efford -- Python 3 lexer
* Sven Efftinge -- Xtend lexer
* Artem Egorkine -- terminal256 formatter
@@ -58,11 +59,14 @@ Other contributors, listed alphabetically, are:
* Patrick Gotthardt -- PHP namespaces support
* Olivier Guibe -- Asymptote lexer
* Jordi Gutiérrez Hermoso -- Octave lexer
+* Florian Hahn -- Boogie lexer
* Martin Harriman -- SNOBOL lexer
* Matthew Harrison -- SVG formatter
* Steven Hazel -- Tcl lexer
+* Dan Michael Heggø -- Turtle lexer
* Aslak Hellesøy -- Gherkin lexer
* Greg Hendershott -- Racket lexer
+* Justin Hendrick -- ParaSail lexer
* David Hess, Fish Software, Inc. -- Objective-J lexer
* Varun Hiremath -- Debian control lexer
* Rob Hoelz -- Perl 6 lexer
diff --git a/CHANGES b/CHANGES
index 87122344..3ae6d49d 100644
--- a/CHANGES
+++ b/CHANGES
@@ -18,6 +18,11 @@ Version 2.1
* Modula-2 with multi-dialect support (#1090)
* Fortran fixed format (PR#213)
* Archetype Definition language (PR#483)
+ * Terraform (PR#432)
+ * Jcl, Easytrieve (PR#208)
+ * ParaSail (PR#381)
+ * Boogie (PR#420)
+ * Turtle (PR#425)
- Added styles:
@@ -30,6 +35,8 @@ Version 2.1
- Added support for async/await to Python 3 lexer.
+- Rewrote linenos option for TerminalFormatter (it's better, but slightly
+ different output than before).
Version 2.0.3
-------------
diff --git a/doc/docs/lexerdevelopment.rst b/doc/docs/lexerdevelopment.rst
index 08069889..81e4d3fe 100644
--- a/doc/docs/lexerdevelopment.rst
+++ b/doc/docs/lexerdevelopment.rst
@@ -345,15 +345,14 @@ There are a few more things you can do with states:
`PythonLexer`'s string literal processing.
- If you want your lexer to start lexing in a different state you can modify the
- stack by overloading the `get_tokens_unprocessed()` method::
+ stack by overriding the `get_tokens_unprocessed()` method::
from pygments.lexer import RegexLexer
class ExampleLexer(RegexLexer):
tokens = {...}
- def get_tokens_unprocessed(self, text):
- stack = ['root', 'otherstate']
+ def get_tokens_unprocessed(self, text, stack=('root', 'otherstate')):
for item in RegexLexer.get_tokens_unprocessed(text, stack):
yield item
diff --git a/pygments/formatters/terminal.py b/pygments/formatters/terminal.py
index 3c4b025f..a6eb48a4 100644
--- a/pygments/formatters/terminal.py
+++ b/pygments/formatters/terminal.py
@@ -101,51 +101,35 @@ class TerminalFormatter(Formatter):
def _write_lineno(self, outfile):
self._lineno += 1
- outfile.write("\n%04d: " % self._lineno)
-
- def _format_unencoded_with_lineno(self, tokensource, outfile):
- self._write_lineno(outfile)
-
- for ttype, value in tokensource:
- if value.endswith("\n"):
- self._write_lineno(outfile)
- value = value[:-1]
- color = self.colorscheme.get(ttype)
- while color is None:
- ttype = ttype[:-1]
- color = self.colorscheme.get(ttype)
- if color:
- color = color[self.darkbg]
- spl = value.split('\n')
- for line in spl[:-1]:
- self._write_lineno(outfile)
- if line:
- outfile.write(ansiformat(color, line[:-1]))
- if spl[-1]:
- outfile.write(ansiformat(color, spl[-1]))
- else:
- outfile.write(value)
-
- outfile.write("\n")
+ outfile.write("%s%04d: " % (self._lineno != 1 and '\n' or '', self._lineno))
+
+ def _get_color(self, ttype):
+ # self.colorscheme is a dict containing usually generic types, so we
+ # have to walk the tree of dots. The base Token type must be a key,
+ # even if it's empty string, as in the default above.
+ colors = self.colorscheme.get(ttype)
+ while colors is None:
+ ttype = ttype.parent
+ colors = self.colorscheme.get(ttype)
+ return colors[self.darkbg]
def format_unencoded(self, tokensource, outfile):
if self.linenos:
- self._format_unencoded_with_lineno(tokensource, outfile)
- return
+ self._write_lineno(outfile)
for ttype, value in tokensource:
- color = self.colorscheme.get(ttype)
- while color is None:
- ttype = ttype[:-1]
- color = self.colorscheme.get(ttype)
- if color:
- color = color[self.darkbg]
- spl = value.split('\n')
- for line in spl[:-1]:
- if line:
- outfile.write(ansiformat(color, line))
- outfile.write('\n')
- if spl[-1]:
- outfile.write(ansiformat(color, spl[-1]))
- else:
- outfile.write(value)
+ color = self._get_color(ttype)
+
+ for line in value.splitlines(True):
+ if color:
+ outfile.write(ansiformat(color, line.rstrip('\n')))
+ else:
+ outfile.write(line.rstrip('\n'))
+ if line.endswith('\n'):
+ if self.linenos:
+ self._write_lineno(outfile)
+ else:
+ outfile.write('\n')
+
+ if self.linenos:
+ outfile.write("\n")
diff --git a/pygments/lexer.py b/pygments/lexer.py
index 07e81033..581508b0 100644
--- a/pygments/lexer.py
+++ b/pygments/lexer.py
@@ -14,7 +14,6 @@ from __future__ import print_function
import re
import sys
import time
-import itertools
from pygments.filter import apply_filters, Filter
from pygments.filters import get_filter_by_name
diff --git a/pygments/lexers/_mapping.py b/pygments/lexers/_mapping.py
index 2e855570..db010b79 100644
--- a/pygments/lexers/_mapping.py
+++ b/pygments/lexers/_mapping.py
@@ -51,6 +51,7 @@ LEXERS = {
'BlitzBasicLexer': ('pygments.lexers.basic', 'BlitzBasic', ('blitzbasic', 'b3d', 'bplus'), ('*.bb', '*.decls'), ('text/x-bb',)),
'BlitzMaxLexer': ('pygments.lexers.basic', 'BlitzMax', ('blitzmax', 'bmax'), ('*.bmx',), ('text/x-bmx',)),
'BooLexer': ('pygments.lexers.dotnet', 'Boo', ('boo',), ('*.boo',), ('text/x-boo',)),
+ 'BoogieLexer': ('pygments.lexers.esoteric', 'Boogie', ('boogie',), ('*.bpl',), ()),
'BrainfuckLexer': ('pygments.lexers.esoteric', 'Brainfuck', ('brainfuck', 'bf'), ('*.bf', '*.b'), ('application/x-brainfuck',)),
'BroLexer': ('pygments.lexers.dsls', 'Bro', ('bro',), ('*.bro',), ()),
'BugsLexer': ('pygments.lexers.modeling', 'BUGS', ('bugs', 'winbugs', 'openbugs'), ('*.bug',), ()),
@@ -112,6 +113,7 @@ LEXERS = {
'DylanLidLexer': ('pygments.lexers.dylan', 'DylanLID', ('dylan-lid', 'lid'), ('*.lid', '*.hdp'), ('text/x-dylan-lid',)),
'ECLLexer': ('pygments.lexers.ecl', 'ECL', ('ecl',), ('*.ecl',), ('application/x-ecl',)),
'ECLexer': ('pygments.lexers.c_like', 'eC', ('ec',), ('*.ec', '*.eh'), ('text/x-echdr', 'text/x-ecsrc')),
+ 'EasytrieveLexer': ('pygments.lexers.scripting', 'Easytrieve', ('easytrieve',), ('*.ezt', '*.mac'), ('text/x-easytrieve',)),
'EbnfLexer': ('pygments.lexers.parsers', 'EBNF', ('ebnf',), ('*.ebnf',), ('text/x-ebnf',)),
'EiffelLexer': ('pygments.lexers.eiffel', 'Eiffel', ('eiffel',), ('*.e',), ('text/x-eiffel',)),
'ElixirConsoleLexer': ('pygments.lexers.erlang', 'Elixir iex session', ('iex',), (), ('text/x-elixir-shellsession',)),
@@ -181,6 +183,7 @@ LEXERS = {
'JavascriptLexer': ('pygments.lexers.javascript', 'JavaScript', ('js', 'javascript'), ('*.js', '*.jsm'), ('application/javascript', 'application/x-javascript', 'text/x-javascript', 'text/javascript')),
'JavascriptPhpLexer': ('pygments.lexers.templates', 'JavaScript+PHP', ('js+php', 'javascript+php'), (), ('application/x-javascript+php', 'text/x-javascript+php', 'text/javascript+php')),
'JavascriptSmartyLexer': ('pygments.lexers.templates', 'JavaScript+Smarty', ('js+smarty', 'javascript+smarty'), (), ('application/x-javascript+smarty', 'text/x-javascript+smarty', 'text/javascript+smarty')),
+ 'JclLexer': ('pygments.lexers.scripting', 'JCL', ('jcl',), ('*.jcl',), ('text/x-jcl',)),
'JsonLdLexer': ('pygments.lexers.data', 'JSON-LD', ('jsonld', 'json-ld'), ('*.jsonld',), ('application/ld+json',)),
'JsonLexer': ('pygments.lexers.data', 'JSON', ('json',), ('*.json',), ('application/json',)),
'JspLexer': ('pygments.lexers.templates', 'Java Server Page', ('jsp',), ('*.jsp',), ('application/x-jsp',)),
@@ -266,6 +269,7 @@ LEXERS = {
'OpaLexer': ('pygments.lexers.ml', 'Opa', ('opa',), ('*.opa',), ('text/x-opa',)),
'OpenEdgeLexer': ('pygments.lexers.business', 'OpenEdge ABL', ('openedge', 'abl', 'progress'), ('*.p', '*.cls'), ('text/x-openedge', 'application/x-openedge')),
'PanLexer': ('pygments.lexers.dsls', 'Pan', ('pan',), ('*.pan',), ()),
+ 'ParaSailLexer': ('pygments.lexers.parasail', 'ParaSail', ('parasail',), ('*.psi', '*.psl'), ('text/x-parasail',)),
'PawnLexer': ('pygments.lexers.pawn', 'Pawn', ('pawn',), ('*.p', '*.pwn', '*.inc'), ('text/x-pawn',)),
'Perl6Lexer': ('pygments.lexers.perl', 'Perl6', ('perl6', 'pl6'), ('*.pl', '*.pm', '*.nqp', '*.p6', '*.6pl', '*.p6l', '*.pl6', '*.6pm', '*.p6m', '*.pm6', '*.t'), ('text/x-perl6', 'application/x-perl6')),
'PerlLexer': ('pygments.lexers.perl', 'Perl', ('perl', 'pl'), ('*.pl', '*.pm', '*.t'), ('text/x-perl', 'application/x-perl')),
@@ -346,10 +350,12 @@ LEXERS = {
'TclLexer': ('pygments.lexers.tcl', 'Tcl', ('tcl',), ('*.tcl', '*.rvt'), ('text/x-tcl', 'text/x-script.tcl', 'application/x-tcl')),
'TcshLexer': ('pygments.lexers.shell', 'Tcsh', ('tcsh', 'csh'), ('*.tcsh', '*.csh'), ('application/x-csh',)),
'TeaTemplateLexer': ('pygments.lexers.templates', 'Tea', ('tea',), ('*.tea',), ('text/x-tea',)),
+ 'TerraformLexer': ('pygments.lexers.configs', 'Terraform', ('terraform', 'tf'), ('*.tf',), ('application/x-tf', 'application/x-terraform')),
'TexLexer': ('pygments.lexers.markup', 'TeX', ('tex', 'latex'), ('*.tex', '*.aux', '*.toc'), ('text/x-tex', 'text/x-latex')),
'TextLexer': ('pygments.lexers.special', 'Text only', ('text',), ('*.txt',), ('text/plain',)),
'TodotxtLexer': ('pygments.lexers.textfmts', 'Todotxt', ('todotxt',), ('todo.txt', '*.todotxt'), ('text/x-todo',)),
'TreetopLexer': ('pygments.lexers.parsers', 'Treetop', ('treetop',), ('*.treetop', '*.tt'), ()),
+ 'TurtleLexer': ('pygments.lexers.rdf', 'Turtle', ('turtle',), ('*.ttl',), ('text/turtle', 'application/x-turtle')),
'TwigHtmlLexer': ('pygments.lexers.templates', 'HTML+Twig', ('html+twig',), ('*.twig',), ('text/html+twig',)),
'TwigLexer': ('pygments.lexers.templates', 'Twig', ('twig',), (), ('application/x-twig',)),
'TypeScriptLexer': ('pygments.lexers.javascript', 'TypeScript', ('ts',), ('*.ts',), ('text/x-typescript',)),
diff --git a/pygments/lexers/archetype.py b/pygments/lexers/archetype.py
index b88fa2e9..4f1b2645 100644
--- a/pygments/lexers/archetype.py
+++ b/pygments/lexers/archetype.py
@@ -274,6 +274,8 @@ class AdlLexer(AtomsLexer):
(r'^(definition)[ \t]*\n', Generic.Heading, 'cadl_section'),
(r'^([ \t]*|[ \t]+.*)\n', using(OdinLexer)),
(r'^([^"]*")(>[ \t]*\n)', bygroups(String, Punctuation)),
+ # template overlay delimiter
+ (r'^----------*\n', Text, '#pop'),
(r'^.*\n', String),
default('#pop'),
],
@@ -300,7 +302,7 @@ class AdlLexer(AtomsLexer):
default('#pop'),
],
'root': [
- (r'^(archetype|template|template_overlay|operational_template|'
+ (r'^(archetype|template_overlay|operational_template|template|'
r'speciali[sz]e)', Generic.Heading),
(r'^(language|description|ontology|terminology|annotations|'
r'component_terminologies|revision_history)[ \t]*\n',
diff --git a/pygments/lexers/asm.py b/pygments/lexers/asm.py
index c308f7fc..918ed83b 100644
--- a/pygments/lexers/asm.py
+++ b/pygments/lexers/asm.py
@@ -286,7 +286,8 @@ class LlvmLexer(RegexLexer):
r'|lshr|ashr|and|or|xor|icmp|fcmp'
r'|phi|call|trunc|zext|sext|fptrunc|fpext|uitofp|sitofp|fptoui'
- r'|fptosi|inttoptr|ptrtoint|bitcast|select|va_arg|ret|br|switch'
+ r'|fptosi|inttoptr|ptrtoint|bitcast|addrspacecast'
+ r'|select|va_arg|ret|br|switch'
r'|invoke|unwind|unreachable'
r'|indirectbr|landingpad|resume'
diff --git a/pygments/lexers/chapel.py b/pygments/lexers/chapel.py
index 6fb6920c..5b7be4dd 100644
--- a/pygments/lexers/chapel.py
+++ b/pygments/lexers/chapel.py
@@ -46,10 +46,10 @@ class ChapelLexer(RegexLexer):
'continue', 'delete', 'dmapped', 'do', 'domain', 'else', 'enum',
'export', 'extern', 'for', 'forall', 'if', 'index', 'inline',
'iter', 'label', 'lambda', 'let', 'local', 'new', 'noinit', 'on',
- 'otherwise', 'pragma', 'private', 'public', 'reduce', 'return',
- 'scan', 'select', 'serial', 'single', 'sparse', 'subdomain',
- 'sync', 'then', 'use', 'when', 'where', 'while', 'with', 'yield',
- 'zip'), suffix=r'\b'),
+ 'otherwise', 'pragma', 'private', 'public', 'reduce',
+ 'require', 'return', 'scan', 'select', 'serial', 'single',
+ 'sparse', 'subdomain', 'sync', 'then', 'use', 'when', 'where',
+ 'while', 'with', 'yield', 'zip'), suffix=r'\b'),
Keyword),
(r'(proc)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'procname'),
(r'(class|module|record|union)(\s+)', bygroups(Keyword, Text),
diff --git a/pygments/lexers/configs.py b/pygments/lexers/configs.py
index 1bd8f55a..6b00e5f4 100644
--- a/pygments/lexers/configs.py
+++ b/pygments/lexers/configs.py
@@ -18,7 +18,8 @@ from pygments.lexers.shell import BashLexer
__all__ = ['IniLexer', 'RegeditLexer', 'PropertiesLexer', 'KconfigLexer',
'Cfengine3Lexer', 'ApacheConfLexer', 'SquidConfLexer',
- 'NginxConfLexer', 'LighttpdConfLexer', 'DockerLexer']
+ 'NginxConfLexer', 'LighttpdConfLexer', 'DockerLexer',
+ 'TerraformLexer']
class IniLexer(RegexLexer):
@@ -544,3 +545,75 @@ class DockerLexer(RegexLexer):
(r'(.*\\\n)*.+', using(BashLexer)),
],
}
+
+
+class TerraformLexer(RegexLexer):
+ """
+ Lexer for `terraformi .tf files <https://www.terraform.io/>`_
+
+ .. versionadded:: 2.1
+ """
+
+ name = 'Terraform'
+ aliases = ['terraform', 'tf']
+ filenames = ['*.tf']
+ mimetypes = ['application/x-tf', 'application/x-terraform']
+
+ tokens = {
+ 'root': [
+ include('string'),
+ include('punctuation'),
+ include('curly'),
+ include('basic'),
+ include('whitespace'),
+ (r'[0-9]+', Number),
+ ],
+ 'basic': [
+ (words(('true', 'false'), prefix=r'\b', suffix=r'\b'), Keyword.Type),
+ (r'\s*/\*', Comment.Multiline, 'comment'),
+ (r'\s*#.*\n', Comment.Single),
+ (r'(.*?)(\s*)(=)', bygroups(Name.Attribute, Text, Operator)),
+ (words(('variable', 'resource', 'provider', 'provisioner', 'module'),
+ prefix=r'\b', suffix=r'\b'), Keyword.Reserved, 'function'),
+ (words(('ingress', 'egress', 'listener', 'default', 'connection'),
+ prefix=r'\b', suffix=r'\b'), Keyword.Declaration),
+ ('\$\{', String.Interpol, 'var_builtin'),
+ ],
+ 'function': [
+ (r'(\s+)(".*")(\s+)', bygroups(Text, String, Text)),
+ include('punctuation'),
+ include('curly'),
+ ],
+ 'var_builtin': [
+ (r'\$\{', String.Interpol, '#push'),
+ (words(('concat', 'file', 'join', 'lookup', 'element'),
+ prefix=r'\b', suffix=r'\b'), Name.Builtin),
+ include('string'),
+ include('punctuation'),
+ (r'\s+', Text),
+ (r'\}', String.Interpol, '#pop'),
+ ],
+ 'string':[
+ (r'(".*")', bygroups(String.Double)),
+ ],
+ 'punctuation':[
+ (r'[\[\]\(\),.]', Punctuation),
+ ],
+ # Keep this seperate from punctuation - we sometimes want to use different
+ # Tokens for { }
+ 'curly':[
+ (r'\{', Text.Punctuation),
+ (r'\}', Text.Punctuation),
+ ],
+ 'comment': [
+ (r'[^*/]', Comment.Multiline),
+ (r'/\*', Comment.Multiline, '#push'),
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'[*/]', Comment.Multiline)
+ ],
+ 'whitespace': [
+ (r'\n', Text),
+ (r'\s+', Text),
+ (r'\\\n', Text),
+ ],
+ }
diff --git a/pygments/lexers/esoteric.py b/pygments/lexers/esoteric.py
index f61b292d..1f317260 100644
--- a/pygments/lexers/esoteric.py
+++ b/pygments/lexers/esoteric.py
@@ -9,11 +9,11 @@
:license: BSD, see LICENSE for details.
"""
-from pygments.lexer import RegexLexer, include
+from pygments.lexer import RegexLexer, include, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
- Number, Punctuation, Error
+ Number, Punctuation, Error, Whitespace
-__all__ = ['BrainfuckLexer', 'BefungeLexer', 'RedcodeLexer']
+__all__ = ['BrainfuckLexer', 'BefungeLexer', 'BoogieLexer', 'RedcodeLexer']
class BrainfuckLexer(RegexLexer):
@@ -112,3 +112,48 @@ class RedcodeLexer(RegexLexer):
(r'[-+]?\d+', Number.Integer),
],
}
+
+
+class BoogieLexer(RegexLexer):
+ """
+ For `Boogie <https://boogie.codeplex.com/>`_ source code.
+
+ .. versionadded:: 2.0
+ """
+ name = 'Boogie'
+ aliases = ['boogie']
+ filenames = ['*.bpl']
+
+ tokens = {
+ 'root': [
+ # Whitespace and Comments
+ (r'\n', Whitespace),
+ (r'\s+', Whitespace),
+ (r'//[/!](.*?)\n', Comment.Doc),
+ (r'//(.*?)\n', Comment.Single),
+ (r'/\*', Comment.Multiline, 'comment'),
+
+ (words((
+ 'axiom', 'break', 'call', 'ensures', 'else', 'exists', 'function',
+ 'forall', 'if', 'invariant', 'modifies', 'procedure', 'requires',
+ 'then', 'var', 'while'),
+ suffix=r'\b'), Keyword),
+ (words(('const',), suffix=r'\b'), Keyword.Reserved),
+
+ (words(('bool', 'int', 'ref'), suffix=r'\b'), Keyword.Type),
+ include('numbers'),
+ (r"(>=|<=|:=|!=|==>|&&|\|\||[+/\-=>*<\[\]])", Operator),
+ (r"([{}():;,.])", Punctuation),
+ # Identifier
+ (r'[a-zA-Z_]\w*', Name),
+ ],
+ 'comment': [
+ (r'[^*/]+', Comment.Multiline),
+ (r'/\*', Comment.Multiline, '#push'),
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'[*/]', Comment.Multiline),
+ ],
+ 'numbers': [
+ (r'[0-9]+', Number.Integer),
+ ],
+ }
diff --git a/pygments/lexers/julia.py b/pygments/lexers/julia.py
index 1304b395..cf7c7d61 100644
--- a/pygments/lexers/julia.py
+++ b/pygments/lexers/julia.py
@@ -14,7 +14,7 @@ import re
from pygments.lexer import Lexer, RegexLexer, bygroups, combined, do_insertions
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic
-from pygments.util import shebang_matches
+from pygments.util import shebang_matches, unirange
__all__ = ['JuliaLexer', 'JuliaConsoleLexer']
@@ -91,7 +91,8 @@ class JuliaLexer(RegexLexer):
# names
(r'@[\w.]+', Name.Decorator),
- (u'[a-zA-Z_\u00A1-\U0010FFFF][a-zA-Z_0-9\u00A1-\U0010FFFF]*!*', Name),
+ (u'(?:[a-zA-Z_\u00A1-\uffff]|%s)(?:[a-zA-Z_0-9\u00A1-\uffff]|%s)*!*' %
+ ((unirange(0x10000, 0x10ffff),)*2), Name),
# numbers
(r'(\d+(_\d+)+\.\d*|\d*\.\d+(_\d+)+)([eEf][+-]?[0-9]+)?', Number.Float),
diff --git a/pygments/lexers/parasail.py b/pygments/lexers/parasail.py
new file mode 100644
index 00000000..3cfffbee
--- /dev/null
+++ b/pygments/lexers/parasail.py
@@ -0,0 +1,81 @@
+# -*- coding: utf-8 -*-
+"""
+ pygments.lexers.parasail
+ ~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexer for ParaSail.
+
+ :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import Lexer, RegexLexer, include, bygroups, using, \
+ this, combined, inherit, do_insertions, default
+from pygments.util import get_bool_opt, get_list_opt
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Literal
+
+__all__ = ['ParaSailLexer']
+
+
+class ParaSailLexer(RegexLexer):
+ """
+ For `ParaSail <http://www.parasail-lang.org>`_ source code.
+
+ .. versionadded:: 2.1
+ """
+
+ name = 'ParaSail'
+ aliases = ['parasail']
+ filenames = ['*.psi', '*.psl']
+ mimetypes = ['text/x-parasail']
+
+ flags = re.MULTILINE
+
+ tokens = {
+ 'root': [
+ (r'[^\S\n]+', Text),
+ (r'//.*?\n', Comment.Single),
+ (r'\b(and|or|xor)=', Operator.Word),
+ (r'\b(and(\s+then)?|or(\s+else)?|xor|rem|mod|'
+ r'(is|not)\s+null)\b',
+ Operator.Word),
+ # Keywords
+ (r'\b(abs|abstract|all|block|class|concurrent|const|continue|'
+ r'each|end|exit|extends|exports|forward|func|global|implements|'
+ r'import|in|interface|is|lambda|locked|new|not|null|of|op|'
+ r'optional|private|queued|ref|return|reverse|separate|some|'
+ r'type|until|var|with|'
+ # Control flow
+ r'if|then|else|elsif|case|for|while|loop)\b',
+ Keyword.Reserved),
+ (r'(abstract\s+)?(interface|class|op|func|type)',
+ Keyword.Declaration),
+ # Literals
+ (r'"[^"]*"', String),
+ (r'\\[\'ntrf"0]', String.Escape),
+ (r'#[a-zA-Z]\w*', Literal), #Enumeration
+ include('numbers'),
+ (r"'[^']'", String.Char),
+ (r'[a-zA-Z]\w*', Name),
+ # Operators and Punctuation
+ (r'(<==|==>|<=>|\*\*=|<\|=|<<=|>>=|==|!=|=\?|<=|>=|'
+ r'\*\*|<<|>>|=>|:=|\+=|-=|\*=|\||\|=|/=|\+|-|\*|/|'
+ r'\.\.|<\.\.|\.\.<|<\.\.<)',
+ Operator),
+ (r'(<|>|\[|\]|\(|\)|\||:|;|,|.|\{|\}|->)',
+ Punctuation),
+ (r'\n+', Text),
+ ],
+ 'numbers' : [
+ (r'\d[0-9_]*#[0-9a-fA-F][0-9a-fA-F_]*#', Number.Hex), # any base
+ (r'0[xX][0-9a-fA-F][0-9a-fA-F_]*', Number.Hex), # C-like hex
+ (r'0[bB][01][01_]*', Number.Bin), # C-like bin
+ (r'\d[0-9_]*\.\d[0-9_]*[eE][+-]\d[0-9_]*', # float exp
+ Number.Float),
+ (r'\d[0-9_]*\.\d[0-9_]*', Number.Float), # float
+ (r'\d[0-9_]*', Number.Integer), # integer
+ ],
+ }
diff --git a/pygments/lexers/prolog.py b/pygments/lexers/prolog.py
index 2b1c7634..7d32d7f6 100644
--- a/pygments/lexers/prolog.py
+++ b/pygments/lexers/prolog.py
@@ -155,11 +155,11 @@ class LogtalkLexer(RegexLexer):
# Term creation and decomposition
(r'(functor|arg|copy_term|numbervars|term_variables)(?=[(])', Keyword),
# Evaluable functors
- (r'(rem|m(ax|in|od)|abs|sign)(?=[(])', Keyword),
+ (r'(div|rem|m(ax|in|od)|abs|sign)(?=[(])', Keyword),
(r'float(_(integer|fractional)_part)?(?=[(])', Keyword),
- (r'(floor|truncate|round|ceiling)(?=[(])', Keyword),
+ (r'(floor|t(an|runcate)|round|ceiling)(?=[(])', Keyword),
# Other arithmetic functors
- (r'(cos|a(cos|sin|tan)|exp|log|s(in|qrt))(?=[(])', Keyword),
+ (r'(cos|a(cos|sin|tan|tan2)|exp|log|s(in|qrt)|xor)(?=[(])', Keyword),
# Term testing
(r'(var|atom(ic)?|integer|float|c(allable|ompound)|n(onvar|umber)|'
r'ground|acyclic_term)(?=[(])', Keyword),
@@ -212,7 +212,7 @@ class LogtalkLexer(RegexLexer):
(r'(==|\\==|@=<|@<|@>=|@>)', Operator),
# Evaluable functors
(r'(//|[-+*/])', Operator),
- (r'\b(e|pi|mod|rem)\b', Operator),
+ (r'\b(e|pi|div|mod|rem)\b', Operator),
# Other arithemtic functors
(r'\b\*\*\b', Operator),
# DCG rules
diff --git a/pygments/lexers/rdf.py b/pygments/lexers/rdf.py
index fb14629a..4f2037bf 100644
--- a/pygments/lexers/rdf.py
+++ b/pygments/lexers/rdf.py
@@ -12,10 +12,10 @@
import re
from pygments.lexer import RegexLexer, bygroups, default
-from pygments.token import Keyword, Punctuation, String, Number, Operator, \
+from pygments.token import Keyword, Punctuation, String, Number, Operator, Generic, \
Whitespace, Name, Literal, Comment, Text
-__all__ = ['SparqlLexer']
+__all__ = ['SparqlLexer', 'TurtleLexer']
class SparqlLexer(RegexLexer):
@@ -97,3 +97,97 @@ class SparqlLexer(RegexLexer):
default('#pop:2'),
],
}
+
+
+class TurtleLexer(RegexLexer):
+ """
+ Lexer for `Turtle <http://www.w3.org/TR/turtle/>`_ data language.
+
+ .. versionadded:: 2.0
+ """
+ name = 'Turtle'
+ aliases = ['turtle']
+ filenames = ['*.ttl']
+ mimetypes = ['text/turtle', 'application/x-turtle']
+
+ flags = re.IGNORECASE
+
+ patterns = {
+ 'PNAME_NS': r'((?:[a-zA-Z][\w-]*)?\:)', # Simplified character range
+ 'IRIREF': r'(<[^<>"{}|^`\\\x00-\x20]*>)'
+ }
+
+ # PNAME_NS PN_LOCAL (with simplified character range)
+ patterns['PrefixedName'] = r'%(PNAME_NS)s([a-z][\w-]*)' % patterns
+
+ tokens = {
+ 'root': [
+ (r'\s+', Whitespace),
+
+ # Base / prefix
+ (r'(@base|BASE)(\s+)%(IRIREF)s(\s*)(\.?)' % patterns,
+ bygroups(Keyword, Whitespace, Name.Variable, Whitespace,
+ Punctuation)),
+ (r'(@prefix|PREFIX)(\s+)%(PNAME_NS)s(\s+)%(IRIREF)s(\s*)(\.?)' % patterns,
+ bygroups(Keyword, Whitespace, Name.Namespace, Whitespace,
+ Name.Variable, Whitespace, Punctuation)),
+
+ # The shorthand predicate 'a'
+ (r'(?<=\s)a(?=\s)', Keyword.Type),
+
+ # IRIREF
+ (r'%(IRIREF)s' % patterns, Name.Variable),
+
+ # PrefixedName
+ (r'%(PrefixedName)s' % patterns,
+ bygroups(Name.Namespace, Name.Tag)),
+
+ # Comment
+ (r'#[^\n]+', Comment),
+
+ (r'\b(true|false)\b', Literal),
+ (r'[+\-]?\d*\.\d+', Number.Float),
+ (r'[+\-]?\d*(:?\.\d+)?E[+\-]?\d+', Number.Float),
+ (r'[+\-]?\d+', Number.Integer),
+ (r'[\[\](){}.;,:^]', Punctuation),
+
+ (r'"""', String, 'triple-double-quoted-string'),
+ (r'"', String, 'single-double-quoted-string'),
+ (r"'''", String, 'triple-single-quoted-string'),
+ (r"'", String, 'single-single-quoted-string'),
+ ],
+ 'triple-double-quoted-string': [
+ (r'"""', String, 'end-of-string'),
+ (r'[^\\]+', String),
+ (r'\\', String, 'string-escape'),
+ ],
+ 'single-double-quoted-string': [
+ (r'"', String, 'end-of-string'),
+ (r'[^"\\\n]+', String),
+ (r'\\', String, 'string-escape'),
+ ],
+ 'triple-single-quoted-string': [
+ (r"'''", String, 'end-of-string'),
+ (r'[^\\]+', String),
+ (r'\\', String, 'string-escape'),
+ ],
+ 'single-single-quoted-string': [
+ (r"'", String, 'end-of-string'),
+ (r"[^'\\\n]+", String),
+ (r'\\', String, 'string-escape'),
+ ],
+ 'string-escape': [
+ (r'.', String, '#pop'),
+ ],
+ 'end-of-string': [
+
+ (r'(@)([a-zA-Z]+(:?-[a-zA-Z0-9]+)*)',
+ bygroups(Operator, Generic.Emph), '#pop:2'),
+
+ (r'(\^\^)%(IRIREF)s' % patterns, bygroups(Operator, Generic.Emph), '#pop:2'),
+ (r'(\^\^)%(PrefixedName)s' % patterns, bygroups(Operator, Generic.Emph, Generic.Emph), '#pop:2'),
+
+ default('#pop:2'),
+
+ ],
+ }
diff --git a/pygments/lexers/scripting.py b/pygments/lexers/scripting.py
index 473ea7eb..c09c5ba9 100644
--- a/pygments/lexers/scripting.py
+++ b/pygments/lexers/scripting.py
@@ -14,11 +14,12 @@ import re
from pygments.lexer import RegexLexer, include, bygroups, default, combined, \
words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
- Number, Punctuation, Error, Whitespace
+ Number, Punctuation, Error, Whitespace, Other
from pygments.util import get_bool_opt, get_list_opt, iteritems
__all__ = ['LuaLexer', 'MoonScriptLexer', 'ChaiscriptLexer', 'LSLLexer',
- 'AppleScriptLexer', 'RexxLexer', 'MOOCodeLexer', 'HybrisLexer']
+ 'AppleScriptLexer', 'RexxLexer', 'MOOCodeLexer', 'HybrisLexer',
+ 'EasytrieveLexer', 'JclLexer']
class LuaLexer(RegexLexer):
@@ -921,3 +922,275 @@ class HybrisLexer(RegexLexer):
(r'[\w.]+\*?', Name.Namespace, '#pop')
],
}
+
+
+class EasytrieveLexer(RegexLexer):
+ """
+ Easytrieve Plus is a programming language for extracting, filtering and
+ converting sequential data. Furthermore it can layout data for reports.
+ It is mainly used on mainframe platforms and can access several of the
+ mainframe's native file formats. It is somewhat comparable to awk.
+
+ .. versionadded:: 2.1
+ """
+ name = 'Easytrieve'
+ aliases = ['easytrieve']
+ filenames = ['*.ezt', '*.mac']
+ mimetypes = ['text/x-easytrieve']
+ flags = 0
+
+ # Note: We cannot use r'\b' at the start and end of keywords because
+ # Easytrieve Plus delimiter characters are:
+ #
+ # * space ( )
+ # * apostrophe (')
+ # * period (.)
+ # * comma (,)
+ # * paranthesis ( and )
+ # * colon (:)
+ #
+ # Additionally words end once a '*' appears, indicatins a comment.
+ _DELIMITERS = r' \'.,():\n'
+ _DELIMITERS_OR_COMENT = _DELIMITERS + '*'
+ _DELIMITER_PATTERN = '[' + _DELIMITERS + ']'
+ _DELIMITER_PATTERN_CAPTURE = '(' + _DELIMITER_PATTERN + ')'
+ _NON_DELIMITER_OR_COMMENT_PATTERN = '[^' + _DELIMITERS_OR_COMENT + ']'
+ _OPERATORS_PATTERN = u'[.+\\-/=\\[\\](){}<>;,&%¬]'
+ _KEYWORDS = [
+ 'AFTER-BREAK', 'AFTER-LINE', 'AFTER-SCREEN', 'AIM', 'AND', 'ATTR',
+ 'BEFORE', 'BEFORE-BREAK', 'BEFORE-LINE', 'BEFORE-SCREEN', 'BUSHU',
+ 'BY', 'CALL', 'CASE', 'CHECKPOINT', 'CHKP', 'CHKP-STATUS', 'CLEAR',
+ 'CLOSE', 'COL', 'COLOR', 'COMMIT', 'CONTROL', 'COPY', 'CURSOR', 'D',
+ 'DECLARE', 'DEFAULT', 'DEFINE', 'DELETE', 'DENWA', 'DISPLAY', 'DLI',
+ 'DO', 'DUPLICATE', 'E', 'ELSE', 'ELSE-IF', 'END', 'END-CASE',
+ 'END-DO', 'END-IF', 'END-PROC', 'ENDPAGE', 'ENDTABLE', 'ENTER', 'EOF',
+ 'EQ', 'ERROR', 'EXIT', 'EXTERNAL', 'EZLIB', 'F1', 'F10', 'F11', 'F12',
+ 'F13', 'F14', 'F15', 'F16', 'F17', 'F18', 'F19', 'F2', 'F20', 'F21',
+ 'F22', 'F23', 'F24', 'F25', 'F26', 'F27', 'F28', 'F29', 'F3', 'F30',
+ 'F31', 'F32', 'F33', 'F34', 'F35', 'F36', 'F4', 'F5', 'F6', 'F7',
+ 'F8', 'F9', 'FETCH', 'FILE-STATUS', 'FILL', 'FINAL', 'FIRST',
+ 'FIRST-DUP', 'FOR', 'GE', 'GET', 'GO', 'GOTO', 'GQ', 'GR', 'GT',
+ 'HEADING', 'HEX', 'HIGH-VALUES', 'IDD', 'IDMS', 'IF', 'IN', 'INSERT',
+ 'JUSTIFY', 'KANJI-DATE', 'KANJI-DATE-LONG', 'KANJI-TIME', 'KEY',
+ 'KEY-PRESSED', 'KOKUGO', 'KUN', 'LAST-DUP', 'LE', 'LEVEL', 'LIKE',
+ 'LINE', 'LINE-COUNT', 'LINE-NUMBER', 'LINK', 'LIST', 'LOW-VALUES',
+ 'LQ', 'LS', 'LT', 'MACRO', 'MASK', 'MATCHED', 'MEND', 'MESSAGE',
+ 'MOVE', 'MSTART', 'NE', 'NEWPAGE', 'NOMASK', 'NOPRINT', 'NOT',
+ 'NOTE', 'NOVERIFY', 'NQ', 'NULL', 'OF', 'OR', 'OTHERWISE', 'PA1',
+ 'PA2', 'PA3', 'PAGE-COUNT', 'PAGE-NUMBER', 'PARM-REGISTER',
+ 'PATH-ID', 'PATTERN', 'PERFORM', 'POINT', 'POS', 'PRIMARY', 'PRINT',
+ 'PROCEDURE', 'PROGRAM', 'PUT', 'READ', 'RECORD', 'RECORD-COUNT',
+ 'RECORD-LENGTH', 'REFRESH', 'RELEASE', 'RENUM', 'REPEAT', 'REPORT',
+ 'REPORT-INPUT', 'RESHOW', 'RESTART', 'RETRIEVE', 'RETURN-CODE',
+ 'ROLLBACK', 'ROW', 'S', 'SCREEN', 'SEARCH', 'SECONDARY', 'SELECT',
+ 'SEQUENCE', 'SIZE', 'SKIP', 'SOKAKU', 'SORT', 'SQL', 'STOP', 'SUM',
+ 'SYSDATE', 'SYSDATE-LONG', 'SYSIN', 'SYSIPT', 'SYSLST', 'SYSPRINT',
+ 'SYSSNAP', 'SYSTIME', 'TALLY', 'TERM-COLUMNS', 'TERM-NAME',
+ 'TERM-ROWS', 'TERMINATION', 'TITLE', 'TO', 'TRANSFER', 'TRC',
+ 'UNIQUE', 'UNTIL', 'UPDATE', 'UPPERCASE', 'USER', 'USERID', 'VALUE',
+ 'VERIFY', 'W', 'WHEN', 'WHILE', 'WORK', 'WRITE', 'X', 'XDM', 'XRST'
+ ]
+
+ tokens = {
+ 'root': [
+ (r'\*.*\n', Comment.Single),
+ (r'\n+', Whitespace),
+ # Macro argument
+ (r'&' + _NON_DELIMITER_OR_COMMENT_PATTERN + r'+\.', Name.Variable, 'after_macro_argument'),
+ # Macro call
+ (r'%' + _NON_DELIMITER_OR_COMMENT_PATTERN + r'+', Name.Variable),
+ (r'(FILE|MACRO|REPORT)(\s+)',
+ bygroups(Keyword.Declaration, Whitespace), 'after_declaration'),
+ (r'(JOB|PARM)' + r'(' + _DELIMITER_PATTERN + r')',
+ bygroups(Keyword.Declaration, Operator)),
+ (words(_KEYWORDS, suffix=_DELIMITER_PATTERN_CAPTURE),
+ bygroups(Keyword.Reserved, Operator)),
+ (_OPERATORS_PATTERN, Operator),
+ # Procedure declaration
+ (r'(' + _NON_DELIMITER_OR_COMMENT_PATTERN + r'+)(\s*)(\.?)(\s*)(PROC)(\s*\n)',
+ bygroups(Name.Function, Whitespace, Operator, Whitespace, Keyword.Declaration, Whitespace)),
+ (r'[0-9]+\.[0-9]*', Number.Float),
+ (r'[0-9]+', Number.Integer),
+ (r"'(''|[^'])*'", String),
+ (r'\s+', Whitespace),
+ (_NON_DELIMITER_OR_COMMENT_PATTERN + r'+', Name) # Everything else just belongs to a name
+ ],
+ 'after_declaration': [
+ (_NON_DELIMITER_OR_COMMENT_PATTERN + r'+', Name.Function),
+ ('', Whitespace, '#pop')
+ ],
+ 'after_macro_argument': [
+ (r'\*.*\n', Comment.Single, '#pop'),
+ (r'\s+', Whitespace, '#pop'),
+ (_OPERATORS_PATTERN, Operator, '#pop'),
+ (r"'(''|[^'])*'", String, '#pop'),
+ (_NON_DELIMITER_OR_COMMENT_PATTERN + r'+', Name) # Everything else just belongs to a name
+ ],
+ }
+ _COMMENT_LINE_REGEX = re.compile(r'^\s*\*')
+ _MACRO_HEADER_REGEX = re.compile(r'^\s*MACRO')
+
+ def analyse_text(text):
+ """
+ Perform a structural analysis for basic Easytrieve constructs.
+ """
+ result = 0.0
+ lines = text.split('\n')
+ hasEndProc = False
+ hasHeaderComment = False
+ hasFile = False
+ hasJob = False
+ hasProc = False
+ hasParm = False
+ hasReport = False
+
+ def isCommentLine(line):
+ return EasytrieveLexer._COMMENT_LINE_REGEX.match(lines[0]) is not None
+
+ def isEmptyLine(line):
+ return not bool(line.strip())
+
+ # Remove possible empty lines and header comments.
+ while lines and (isEmptyLine(lines[0]) or isCommentLine(lines[0])):
+ if not isEmptyLine(lines[0]):
+ hasHeaderComment = True
+ del lines[0]
+
+ if EasytrieveLexer._MACRO_HEADER_REGEX.match(lines[0]):
+ # Looks like an Easytrieve macro.
+ result = 0.4
+ if hasHeaderComment:
+ result += 0.4
+ else:
+ # Scan the source for lines starting with indicators.
+ for line in lines:
+ words = line.split()
+ if (len(words) >= 2):
+ firstWord = words[0]
+ if not hasReport:
+ if not hasJob:
+ if not hasFile:
+ if not hasParm:
+ if firstWord == 'PARM':
+ hasParm = True
+ if firstWord == 'FILE':
+ hasFile = True
+ if firstWord == 'JOB':
+ hasJob = True
+ elif firstWord == 'PROC':
+ hasProc = True
+ elif firstWord == 'END-PROC':
+ hasEndProc = True
+ elif firstWord == 'REPORT':
+ hasReport = True
+
+ # Weight the findings.
+ if hasJob and (hasProc == hasEndProc):
+ if hasHeaderComment:
+ result += 0.1
+ if hasParm:
+ if hasProc:
+ # Found PARM, JOB and PROC/END-PROC:
+ # pretty sure this is Easytrieve.
+ result += 0.8
+ else:
+ # Found PARAM and JOB: probably this is Easytrieve
+ result += 0.5
+ else:
+ # Found JOB and possibly other keywords: might be Easytrieve
+ result += 0.11
+ if hasParm:
+ # Note: PARAM is not a proper English word, so this is
+ # regarded a much better indicator for Easytrieve than
+ # the other words.
+ result += 0.2
+ if hasFile:
+ result += 0.01
+ if hasReport:
+ result += 0.01
+ assert 0.0 <= result <= 1.0
+ return result
+
+
+class JclLexer(RegexLexer):
+ """
+ `Job Control Language (JCL) <http://publibz.boulder.ibm.com/cgi-bin/bookmgr_OS390/BOOKS/IEA2B570/CCONTENTS>`_
+ is a scripting language used on mainframe platforms to instruct the system
+ on how to run a batch job or start a subsystem. It is somewhat
+ comparable to MS DOS batch and Unix shell scripts.
+
+ .. versionadded:: 2.1
+ """
+ name = 'JCL'
+ aliases = ['jcl']
+ filenames = ['*.jcl']
+ mimetypes = ['text/x-jcl']
+ flags = re.IGNORECASE
+
+ tokens = {
+ 'root': [
+ (r'//\*.*\n', Comment.Single),
+ (r'//', Keyword.Pseudo, 'statement'),
+ (r'/\*', Keyword.Pseudo, 'jes2_statement'),
+ # TODO: JES3 statement
+ (r'.*\n', Other) # Input text or inline code in any language.
+ ],
+ 'statement': [
+ (r'\s*\n', Whitespace, '#pop'),
+ (r'([a-z][a-z_0-9]*)(\s+)(exec|job)(\s*)',
+ bygroups(Name.Label, Whitespace, Keyword.Reserved, Whitespace),
+ 'option'),
+ (r'[a-z][a-z_0-9]*', Name.Variable, 'statement_command'),
+ (r'\s+', Whitespace, 'statement_command'),
+ ],
+ 'statement_command': [
+ (r'\s+(command|cntl|dd|endctl|endif|else|include|jcllib|'
+ r'output|pend|proc|set|then|xmit)\s+', Keyword.Reserved, 'option'),
+ include('option')
+ ],
+ 'jes2_statement': [
+ (r'\s*\n', Whitespace, '#pop'),
+ (r'\$', Keyword, 'option'),
+ (r'\b(jobparam|message|netacct|notify|output|priority|route|'
+ r'setup|signoff|xeq|xmit)\b', Keyword, 'option'),
+ ],
+ 'option': [
+ #(r'\n', Text, 'root'),
+ (r'\*', Name.Builtin),
+ (r'[\[\](){}<>;,]', Punctuation),
+ (r'[-+*/=&%]', Operator),
+ (r'[a-z_][a-z_0-9]*', Name),
+ (r'[0-9]+\.[0-9]*', Number.Float),
+ (r'\.[0-9]+', Number.Float),
+ (r'[0-9]+', Number.Integer),
+ (r"'", String, 'option_string'),
+ (r'[ \t]+', Whitespace, 'option_comment'),
+ (r'\.', Punctuation),
+ ],
+ 'option_string': [
+ (r"(\n)(//)", bygroups(Text, Keyword.Pseudo)),
+ (r"''", String),
+ (r"[^']", String),
+ (r"'", String, '#pop'),
+ ],
+ 'option_comment': [
+ #(r'\n', Text, 'root'),
+ (r'.+', Comment.Single),
+ ]
+ }
+
+ _JOB_HEADER_PATTERN = re.compile(r'^//[a-z#$@][a-z0-9#$@]{0,7}\s+job(\s+.*)?$', re.IGNORECASE)
+
+ def analyse_text(text):
+ """
+ Recognize JCL job by header.
+ """
+ result = 0.0
+ lines = text.split('\n')
+ if len(lines) > 0:
+ if JclLexer._JOB_HEADER_PATTERN.match(lines[0]):
+ result = 1.0
+ assert 0.0 <= result <= 1.0
+ return result
+
+
diff --git a/pygments/lexers/shell.py b/pygments/lexers/shell.py
index 1e3640bf..cd9cad15 100644
--- a/pygments/lexers/shell.py
+++ b/pygments/lexers/shell.py
@@ -47,7 +47,9 @@ class BashLexer(RegexLexer):
(r'\$\(\(', Keyword, 'math'),
(r'\$\(', Keyword, 'paren'),
(r'\$\{#?', String.Interpol, 'curly'),
- (r'\$(\w+|.)', Name.Variable),
+ (r'\$[a-fA-F_][a-fA-F0-9_]*', Name.Variable), # user variable
+ (r'\$(?:\d+|[#$?!_*@-])', Name.Variable), # builtin
+ (r'\$', Text),
],
'basic': [
(r'\b(if|fi|else|while|do|done|for|then|return|function|case|'
diff --git a/pygments/lexers/templates.py b/pygments/lexers/templates.py
index bfca0d38..71055a9f 100644
--- a/pygments/lexers/templates.py
+++ b/pygments/lexers/templates.py
@@ -568,10 +568,12 @@ class MasonLexer(RegexLexer):
}
def analyse_text(text):
- rv = 0.0
- if re.search('<&', text) is not None:
- rv = 1.0
- return rv
+ result = 0.0
+ if re.search(r'</%(class|doc|init)%>', text) is not None:
+ result = 1.0
+ elif re.search(r'<&.+&>', text, re.DOTALL) is not None:
+ result = 0.11
+ return result
class MakoLexer(RegexLexer):
diff --git a/tests/examplefiles/example.ezt b/tests/examplefiles/example.ezt
new file mode 100644
index 00000000..fec2aa4c
--- /dev/null
+++ b/tests/examplefiles/example.ezt
@@ -0,0 +1,32 @@
+* Easytrieve Plus example programm.
+
+* Environtment section.
+PARM DEBUG(FLOW FLDCHK)
+
+* Library Section.
+FILE PERSNL FB(150 1800)
+ NAME 17 8 A
+ EMP# 9 5 N * Note: '#' is a valid character for names.
+ DEPT 98 3 N. GROSS 94 4 P 2
+ * ^ 2 field definitions in 1 line.
+
+* Call macro in example.mac.
+FILE EXAMPLE FB(80 200)
+%EXAMPLE SOMEFILE SOME
+
+* Activity Section.
+JOB INPUT PERSNL NAME FIRST-PROGRAM START AT-START FINISH AT_FINISH
+ PRINT PAY-RPT
+REPORT PAY-RPT LINESIZE 80
+ TITLE 01 'PERSONNEL REPORT EXAMPLE-1'
+ LINE 01 DEPT NAME EMP# GROSS
+
+* Procedure declarations.
+AT-START. PROC
+ DISPLAY 'PROCESSING...'
+END-PROC
+
+AT-FINISH
+PROC
+ DISPLAY 'DONE.'
+END-PROC
diff --git a/tests/examplefiles/example.jcl b/tests/examplefiles/example.jcl
new file mode 100644
index 00000000..18d4ae37
--- /dev/null
+++ b/tests/examplefiles/example.jcl
@@ -0,0 +1,31 @@
+//IS198CPY JOB (PYGM-TEST-001),'PYGMENTS TEST JOB',
+// CLASS=L,MSGCLASS=X,TIME=(00,10)
+//* Copy 'OLDFILE' to 'NEWFILE'.
+//COPY01 EXEC PGM=IEBGENER
+//SYSPRINT DD SYSOUT=*
+//SYSUT1 DD DSN=OLDFILE,DISP=SHR
+//SYSUT2 DD DSN=NEWFILE,
+// DISP=(NEW,CATLG,DELETE),
+// SPACE=(CYL,(40,5),RLSE), Some comment
+// DCB=(LRECL=115,BLKSIZE=1150)
+//SYSIN DD DUMMY
+/*
+//* Test line continuation in strings.
+//CONT01 EXEC PGM=IEFBR14,PARM='THIS IS A LONG PARAMETER WITHIN APOST
+// ROPHES, CONTINUED IN COLUMN 15 OF THE NEXT RECORD'
+//* Sort a couple of lines and show the result in the job log.
+//SORT01 EXEC PGM=IEFBR14
+//SORTIN DD *
+spam
+eggs
+ham
+/*
+//SORTOUT DD SYSOUT=*
+/*
+//* Test line continuation with comment at end of line continued by a
+//* character at column 72 (in this case 'X').
+//STP4 EXEC PROC=BILLING,COND.PAID=((20,LT),EVEN),
+// COND.LATE=(60,GT,FIND),
+// COND.BILL=((20,GE),(30,LT,CHGE)) THIS STATEMENT CALLS THE X
+// BILLING PROCEDURE AND SPECIFIES RETURN CODE TESTS FOR THREEX
+// PROCEDURE STEPS.
diff --git a/tests/examplefiles/example.mac b/tests/examplefiles/example.mac
new file mode 100644
index 00000000..1c3831d1
--- /dev/null
+++ b/tests/examplefiles/example.mac
@@ -0,0 +1,6 @@
+* Example Easytrieve macro declaration. For an example on calling this
+* macro, see example.ezt.
+MACRO FILENAME PREFIX
+&FILENAME.
+&PREFIX.-LINE 1 80 A
+&PREFIX.-KEY 1 8 A
diff --git a/tests/examplefiles/example.tf b/tests/examplefiles/example.tf
new file mode 100644
index 00000000..d3f02779
--- /dev/null
+++ b/tests/examplefiles/example.tf
@@ -0,0 +1,162 @@
+variable "key_name" {
+ description = "Name of the SSH keypair to use in AWS."
+}
+
+variable "key_path" {
+ description = "Path to the private portion of the SSH key specified."
+}
+
+variable "aws_region" {
+ description = "AWS region to launch servers."
+ default = "us-west-2"
+ somevar = true
+}
+
+# Ubuntu Precise 12.04 LTS (x64)
+variable "aws_amis" {
+ default = {
+ eu-west-1 = "ami-b1cf19c6"
+ us-east-1 = "ami-de7ab6b6"
+ us-west-1 = "ami-3f75767a"
+ us-west-2 = "ami-21f78e11"
+ }
+}
+
+
+
+
+
+
+provider "aws" {
+ access_key = "${myvar}"
+ secret_key = "your aws secret key"
+ region = "us-east-1"
+}
+/* multiline
+
+ comment
+
+*/
+
+
+# Single line comment
+resource "aws_instance" "example" {
+ ami = "ami-408c7f28"
+ instance_type = "t1.micro"
+ key_name = "your-aws-key-name"
+}
+
+# Create our Heroku application. Heroku will
+# automatically assign a name.
+resource "heroku_app" "web" {}
+
+# Create our DNSimple record to point to the
+# heroku application.
+resource "dnsimple_record" "web" {
+ domain = "${var.dnsimple_domain}"
+
+
+ # heroku_hostname is a computed attribute on the heroku
+ # application we can use to determine the hostname
+ value = "${heroku_app.web.heroku_hostname}"
+
+ type = "CNAME"
+ ttl = 3600
+}
+
+# The Heroku domain, which will be created and added
+# to the heroku application after we have assigned the domain
+# in DNSimple
+resource "heroku_domain" "foobar" {
+ app = "${heroku_app.web.name}"
+ hostname = "${dnsimple_record.web.hostname}"
+}
+
+
+# Specify the provider and access details
+provider "aws" {
+ region = "${var.aws_region}"
+ value = ${file("path.txt")}
+}
+
+# Our default security group to access
+# the instances over SSH and HTTP
+resource "aws_security_group" "default" {
+ name = "terraform_example"
+ description = "Used in the terraform"
+
+ # SSH access from anywhere
+ ingress {
+ from_port = 22
+ to_port = 22
+ protocol = "tcp"
+ cidr_blocks = ["0.0.0.0/0"]
+ }
+
+ # HTTP access from anywhere
+ ingress {
+ from_port = 80
+ to_port = 80
+ protocol = "tcp"
+ cidr_blocks = ["0.0.0.0/0"]
+ }
+}
+
+
+resource "aws_elb" "web" {
+ name = "terraform-example-elb"
+
+ # The same availability zone as our instance
+ availability_zones = ["${aws_instance.web.availability_zone}"]
+
+ listener {
+ instance_port = 80
+ instance_protocol = "http"
+ lb_port = 80
+ lb_protocol = "http"
+ }
+
+ # The instance is registered automatically
+ instances = ["${aws_instance.web.id}"]
+}
+
+
+resource "aws_instance" "web" {
+ # The connection block tells our provisioner how to
+ # communicate with the resource (instance)
+ connection {
+ # The default username for our AMI
+ user = "ubuntu"
+
+ # The path to your keyfile
+ key_file = "${var.key_path}"
+ }
+
+ instance_type = "m1.small"
+
+ # Lookup the correct AMI based on the region
+ # we specified
+ ami = "${lookup(var.aws_amis, var.aws_region)}"
+
+ # The name of our SSH keypair you've created and downloaded
+ # from the AWS console.
+ #
+ # https://console.aws.amazon.com/ec2/v2/home?region=us-west-2#KeyPairs:
+ #
+ key_name = "${var.key_name}"
+
+ # Our Security group to allow HTTP and SSH access
+ security_groups = ["${aws_security_group.default.name}"]
+
+ # We run a remote provisioner on the instance after creating it.
+ # In this case, we just install nginx and start it. By default,
+ # this should be on port 80
+ provisioner "remote-exec" {
+ inline = [
+ "sudo apt-get -y update",
+ "sudo apt-get -y install nginx",
+ "sudo service nginx start"
+ ]
+ }
+}
+
diff --git a/tests/examplefiles/example.ttl b/tests/examplefiles/example.ttl
new file mode 100644
index 00000000..e524d86c
--- /dev/null
+++ b/tests/examplefiles/example.ttl
@@ -0,0 +1,43 @@
+@base <http://example.com> .
+@prefix dcterms: <http://purl.org/dc/terms/>. @prefix xs: <http://www.w3.org/2001/XMLSchema> .
+@prefix mads: <http://www.loc.gov/mads/rdf/v1#> .
+@prefix skos: <http://www.w3.org/2004/02/skos/core#> .
+@PREFIX dc: <http://purl.org/dc/elements/1.1/> # SPARQL-like syntax is OK
+@prefix : <http://xmlns.com/foaf/0.1/> . # empty prefix is OK
+
+<http://example.org/#spiderman> <http://www.perceive.net/schemas/relationship/enemyOf> <http://example.org/#green-goblin> .
+
+<#doc1> a <#document>
+ dc:creator "Smith", "Jones";
+ :knows <http://getopenid.com/jsmith>
+ dcterms:hasPart [ # A comment
+ dc:title "Some title", "Some other title";
+ dc:creator "برشت، برتولد"@ar;
+ dc:date "2009"^^xs:date
+ ];
+ dc:title "A sample title", 23.0;
+ dcterms:isPartOf [
+ dc:title "another", "title"
+ ] ;
+ :exists true .
+
+<http://data.ub.uio.no/realfagstermer/006839> a mads:Topic,
+ skos:Concept ;
+ dcterms:created "2014-08-25"^^xsd:date ;
+ dcterms:modified "2014-11-12"^^xsd:date ;
+ dcterms:identifier "REAL006839" ;
+ skos:prefLabel "Flerbørstemarker"@nb,
+ "Polychaeta"@la ;
+ skos:altLabel "Flerbørsteormer"@nb,
+ "Mangebørstemark"@nb,
+ "Mangebørsteormer"@nb,
+ "Havbørsteormer"@nb,
+ "Havbørstemarker"@nb,
+ "Polycheter"@nb.
+ skos:inScheme <http://data.ub.uio.no/realfagstermer/> ;
+ skos:narrower <http://data.ub.uio.no/realfagstermer/018529>,
+ <http://data.ub.uio.no/realfagstermer/024538>,
+ <http://data.ub.uio.no/realfagstermer/026723> ;
+ skos:exactMatch <http://ntnu.no/ub/data/tekord#NTUB17114>,
+ <http://dewey.info/class/592.62/e23/>,
+ <http://aims.fao.org/aos/agrovoc/c_29110> .
diff --git a/tests/examplefiles/test.bpl b/tests/examplefiles/test.bpl
new file mode 100644
index 00000000..add25e1a
--- /dev/null
+++ b/tests/examplefiles/test.bpl
@@ -0,0 +1,140 @@
+/*
+ * Test Boogie rendering
+*/
+
+const N: int;
+axiom 0 <= N;
+
+procedure foo() {
+ break;
+}
+// array to sort as global array, because partition & quicksort have to
+var a: [int] int;
+var original: [int] int;
+var perm: [int] int;
+
+// Is array a of length N sorted?
+function is_sorted(a: [int] int, l: int, r: int): bool
+{
+ (forall j, k: int :: l <= j && j < k && k <= r ==> a[j] <= a[k])
+}
+
+// is range a[l:r] unchanged?
+function is_unchanged(a: [int] int, b: [int] int, l: int, r: int): bool {
+ (forall i: int :: l <= i && i <= r ==> a[i] == b[i])
+}
+
+function is_permutation(a: [int] int, original: [int] int, perm: [int] int, N: int): bool
+{
+ (forall k: int :: 0 <= k && k < N ==> 0 <= perm[k] && perm[k] < N) &&
+ (forall k, j: int :: 0 <= k && k < j && j < N ==> perm[k] != perm[j]) &&
+ (forall k: int :: 0 <= k && k < N ==> a[k] == original[perm[k]])
+}
+
+function count(a: [int] int, x: int, N: int) returns (int)
+{ if N == 0 then 0 else if a[N-1] == x then count(a, x, N - 1) + 1 else count(a, x, N-1) }
+
+
+/*
+function count(a: [int] int, x: int, N: int) returns (int)
+{ if N == 0 then 0 else if a[N-1] == x then count(a, x, N - 1) + 1 else count(a, x, N-1) }
+
+function is_permutation(a: [int] int, b: [int] int, l: int, r: int): bool {
+ (forall i: int :: l <= i && i <= r ==> count(a, a[i], r+1) == count(b, a[i], r+1))
+}
+*/
+
+procedure partition(l: int, r: int, N: int) returns (p: int)
+ modifies a, perm;
+ requires N > 0;
+ requires l >= 0 && l < r && r < N;
+ requires ((r+1) < N) ==> (forall k: int :: (k >= l && k <= r) ==> a[k] <= a[r+1]);
+ requires ((l-1) >= 0) ==> (forall k: int :: (k >= l && k <= r) ==> a[k] > a[l-1]);
+
+ /* a is a permutation of the original array original */
+ requires is_permutation(a, original, perm, N);
+
+ ensures (forall k: int :: (k >= l && k <= p ) ==> a[k] <= a[p]);
+ ensures (forall k: int :: (k > p && k <= r ) ==> a[k] > a[p]);
+ ensures p >= l && p <= r;
+ ensures is_unchanged(a, old(a), 0, l-1);
+ ensures is_unchanged(a, old(a), r+1, N);
+ ensures ((r+1) < N) ==> (forall k: int :: (k >= l && k <= r) ==> a[k] <= a[r+1]);
+ ensures ((l-1) >= 0) ==> (forall k: int :: (k >= l && k <= r) ==> a[k] > a[l-1]);
+
+ /* a is a permutation of the original array original */
+ ensures is_permutation(a, original, perm, N);
+{
+ var i: int;
+ var sv: int;
+ var pivot: int;
+ var tmp: int;
+
+ i := l;
+ sv := l;
+ pivot := a[r];
+
+ while (i < r)
+ invariant i <= r && i >= l;
+ invariant sv <= i && sv >= l;
+ invariant pivot == a[r];
+ invariant (forall k: int :: (k >= l && k < sv) ==> a[k] <= old(a[r]));
+ invariant (forall k: int :: (k >= sv && k < i) ==> a[k] > old(a[r]));
+
+ /* a is a permutation of the original array original */
+ invariant is_permutation(a, original, perm, N);
+
+ invariant is_unchanged(a, old(a), 0, l-1);
+ invariant is_unchanged(a, old(a), r+1, N);
+ invariant ((r+1) < N) ==> (forall k: int :: (k >= l && k <= r) ==> a[k] <= a[r+1]);
+ invariant ((l-1) >= 0) ==> (forall k: int :: (k >= l && k <= r) ==> a[k] > a[l-1]);
+ {
+ if ( a[i] <= pivot) {
+ tmp := a[i]; a[i] := a[sv]; a[sv] := tmp;
+ tmp := perm[i]; perm[i] := perm[sv]; perm[sv] := tmp;
+ sv := sv +1;
+ }
+ i := i + 1;
+ }
+
+ //swap
+ tmp := a[i]; a[i] := a[sv]; a[sv] := tmp;
+ tmp := perm[i]; perm[i] := perm[sv]; perm[sv] := tmp;
+
+ p := sv;
+}
+
+
+procedure quicksort(l: int, r: int, N: int)
+ modifies a, perm;
+
+ requires N > 0;
+ requires l >= 0 && l < r && r < N;
+ requires ((r+1) < N) ==> (forall k: int :: (k >= l && k <= r) ==> a[k] <= a[r+1]);
+ requires ((l-1) >= 0) ==> (forall k: int :: (k >= l && k <= r) ==> a[k] > a[l-1]);
+
+ /* a is a permutation of the original array original */
+ requires is_permutation(a, original, perm, N);
+
+ ensures ((r+1) < N) ==> (forall k: int :: (k >= l && k <= r) ==> a[k] <= a[r+1]);
+ ensures ((l-1) >= 0) ==> (forall k: int :: (k >= l && k <= r) ==> a[k] > a[l-1]);
+
+ ensures is_unchanged(a, old(a), 0, l-1);
+ ensures is_unchanged(a, old(a), r+1, N);
+ ensures is_sorted(a, l, r);
+
+ /* a is a permutation of the original array original */
+ ensures is_permutation(a, original, perm, N);
+{
+ var p: int;
+
+ call p := partition(l, r, N);
+
+ if ((p-1) > l) {
+ call quicksort(l, p-1, N);
+ }
+
+ if ((p+1) < r) {
+ call quicksort(p+1, r, N);
+ }
+}
diff --git a/tests/examplefiles/test.psl b/tests/examplefiles/test.psl
new file mode 100644
index 00000000..3ac99498
--- /dev/null
+++ b/tests/examplefiles/test.psl
@@ -0,0 +1,182 @@
+// This is a comment
+
+// 1. Basics
+
+// Functions
+func Add(X : Univ_Integer; Y : Univ_Integer) -> Univ_Integer is
+ return X + Y;
+end func Add;
+// End of line semi-colons are optional
+// +, +=, -, -=, *, *=, /, /=
+// all do what you'd expect (/ is integer division)
+
+// If you find Univ_Integer to be too verbose you can import Short_Names
+// which defines aliases like Int for Univ_Integer and String for Univ_String
+import PSL::Short_Names::*, *
+
+func Greetings() is
+ const S : String := "Hello, World!"
+ Println(S)
+end func Greetings
+// All declarations are 'const', 'var', or 'ref'
+// Assignment is :=, equality checks are ==, and != is not equals
+
+func Boolean_Examples(B : Bool) is
+ const And := B and #true // Parallel execution of operands
+ const And_Then := B and then #true // Short-Circuit
+ const Or := B or #false // Parallel execution of operands
+ const Or_Else := B or else #false // Short-Cirtuit
+ const Xor := B xor #true
+ var Result : Bool := #true;
+ Result and= #false;
+ Result or= #true;
+ Result xor= #false;
+end func Boolean_Examples
+// Booleans are a special type of enumeration
+// All enumerations are preceded by a sharp '#'
+
+func Fib(N : Int) {N >= 0} -> Int is
+ if N <= 1 then
+ return N
+ else
+ // Left and right side of '+' are computed in Parallel here
+ return Fib(N - 1) + Fib(N - 2)
+ end if
+end func Fib
+// '{N >= 0}' is a precondition to this function
+// Preconditions are built in to the language and checked by the compiler
+
+// ParaSail does not have mutable global variables
+// Instead, use 'var' parameters
+func Increment_All(var Nums : Vector<Int>) is
+ for each Elem of Nums concurrent loop
+ Elem += 1
+ end loop
+end func Increment_All
+// The 'concurrent' keyword in the loop header tells the compiler that
+// iterations of the loop can happen in any order.
+// It will choose the most optimal number of threads to use.
+// Other options are 'forward' and 'reverse'.
+
+func Sum_Of_Squares(N : Int) -> Int is
+ // The type of Sum is inferred
+ var Sum := 0
+ for I in 1 .. N forward loop
+ Sum += I ** 2 // ** is exponentiation
+ end loop
+end func Sum_Of_Squares
+
+func Sum_Of(N : Int; Map : func (Int) -> Int) -> Int is
+ return (for I in 1 .. N => <0> + Map(I))
+end func Sum_Of
+// It has functional aspects as well
+// Here, we're taking an (Int) -> Int function as a parameter
+// and using the inherently parallel map-reduce.
+// Initial value is enclosed with angle brackets
+
+func main(Args : Basic_Array<String>) is
+ Greetings() // Hello World
+ Println(Fib(5)) // 5
+ // Container Comprehension
+ var Vec : Vector<Int> := [for I in 0 .. 10 {I mod 2 == 0} => I ** 2]
+ // Vec = [0, 4, 16, 36, 64, 100]
+ Increment_All(Vec)
+ // Vec = [1, 5, 17, 37, 65, 101]
+ // '|' is an overloaded operator.
+ // It's usually used for concatenation or adding to a container
+ Println("First: " | Vec[1] | ", Last: " | Vec[Length(Vec)]);
+ // Vectors are 1 indexed, 0 indexed ZVectors are also available
+
+ Println(Sum_Of_Squares(3))
+
+ // Sum of fibs!
+ Println(Sum_Of(10, Fib))
+end func main
+
+// Preceding a type with 'optional' allows it to take the value 'null'
+func Divide(A, B, C : Real) -> optional Real is
+ // Real is the floating point type
+ const Epsilon := 1.0e-6;
+ if B in -Epsilon .. Epsilon then
+ return null
+ elsif C in -Epsilon .. Epsilon then
+ return null
+ else
+ return A / B + A / C
+ end if
+end func Divide
+
+// 2. Modules
+// Modules are composed of an interface and a class
+// ParaSail has object orientation features
+
+// modules can be defined as 'concurrent'
+// which allows 'locked' and 'queued' parameters
+concurrent interface Locked_Box<Content_Type is Assignable<>> is
+ // Create a box with the given content
+ func Create(C : optional Content_Type) -> Locked_Box;
+
+ // Put something into the box
+ func Put(locked var B : Locked_Box; C : Content_Type);
+
+ // Get a copy of current content
+ func Content(locked B : Locked_Box) -> optional Content_Type;
+
+ // Remove current content, leaving it null
+ func Remove(locked var B : Locked_Box) -> optional Content_Type;
+
+ // Wait until content is non-null, then return it, leaving it null.
+ func Get(queued var B : Locked_Box) -> Content_Type;
+end interface Locked_Box;
+
+concurrent class Locked_Box is
+ var Content : optional Content_Type;
+exports
+ func Create(C : optional Content_Type) -> Locked_Box is
+ return (Content => C);
+ end func Create;
+
+ func Put(locked var B : Locked_Box; C : Content_Type) is
+ B.Content := C;
+ end func Put;
+
+ func Content(locked B : Locked_Box) -> optional Content_Type is
+ return B.Content;
+ end func Content;
+
+ func Remove(locked var B : Locked_Box) -> Result : optional Content_Type is
+ // '<==' is the move operator
+ // It moves the right operand into the left operand,
+ // leaving the right null.
+ Result <== B.Content;
+ end func Remove;
+
+ func Get(queued var B : Locked_Box) -> Result : Content_Type is
+ queued until B.Content not null then
+ Result <== B.Content;
+ end func Get;
+end class Locked_Box;
+
+func Use_Box(Seed : Univ_Integer) is
+ var U_Box : Locked_Box<Univ_Integer> := Create(null);
+ // The type of 'Ran' can be left out because
+ // it is inferred from the return type of Random::Start
+ var Ran := Random::Start(Seed);
+
+ Println("Starting 100 pico-threads trying to put something in the box");
+ Println(" or take something out.");
+ for I in 1..100 concurrent loop
+ if I < 30 then
+ Println("Getting out " | Get(U_Box));
+ else
+ Println("Putting in " | I);
+ U_Box.Put(I);
+
+ // The first parameter can be moved to the front with a dot
+ // X.Foo(Y) is equivalent to Foo(X, Y)
+ end if;
+ end loop;
+
+ Println("And the winner is: " | Remove(U_Box));
+ Println("And the box is now " | Content(U_Box));
+end func Use_Box;
diff --git a/tests/test_lexers_other.py b/tests/test_lexers_other.py
index 7457d045..bb667c05 100644
--- a/tests/test_lexers_other.py
+++ b/tests/test_lexers_other.py
@@ -6,14 +6,12 @@
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-
import glob
import os
import unittest
from pygments.lexers import guess_lexer
-from pygments.lexers.scripting import RexxLexer
-
+from pygments.lexers.scripting import EasytrieveLexer, JclLexer, RexxLexer
def _exampleFilePath(filename):
return os.path.join(os.path.dirname(__file__), 'examplefiles', filename)
@@ -36,7 +34,24 @@ class AnalyseTextTest(unittest.TestCase):
self.assertEqual(guessedLexer.name, lexer.name)
def testCanRecognizeAndGuessExampleFiles(self):
- self._testCanRecognizeAndGuessExampleFiles(RexxLexer)
+ LEXERS_TO_TEST = [
+ EasytrieveLexer,
+ JclLexer,
+ RexxLexer,
+ ]
+ for lexerToTest in LEXERS_TO_TEST:
+ self._testCanRecognizeAndGuessExampleFiles(lexerToTest)
+
+
+class EasyTrieveLexerTest(unittest.TestCase):
+ def testCanGuessFromText(self):
+ self.assertLess(0, EasytrieveLexer.analyse_text('MACRO'))
+ self.assertLess(0, EasytrieveLexer.analyse_text('\nMACRO'))
+ self.assertLess(0, EasytrieveLexer.analyse_text(' \nMACRO'))
+ self.assertLess(0, EasytrieveLexer.analyse_text(' \n MACRO'))
+ self.assertLess(0, EasytrieveLexer.analyse_text('*\nMACRO'))
+ self.assertLess(0, EasytrieveLexer.analyse_text(
+ '*\n *\n\n \n*\n MACRO'))
class RexxLexerTest(unittest.TestCase):
diff --git a/tests/test_shell.py b/tests/test_shell.py
index fd5009b0..4eb5a15a 100644
--- a/tests/test_shell.py
+++ b/tests/test_shell.py
@@ -61,3 +61,29 @@ class BashTest(unittest.TestCase):
]
self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+ def testShortVariableNames(self):
+ fragment = u'x="$"\ny="$_"\nz="$abc"\n'
+ tokens = [
+ # single lone $
+ (Token.Name.Variable, u'x'),
+ (Token.Operator, u'='),
+ (Token.Literal.String.Double, u'"'),
+ (Token.Text, u'$'),
+ (Token.Literal.String.Double, u'"'),
+ (Token.Text, u'\n'),
+ # single letter shell var
+ (Token.Name.Variable, u'y'),
+ (Token.Operator, u'='),
+ (Token.Literal.String.Double, u'"'),
+ (Token.Name.Variable, u'$_'),
+ (Token.Literal.String.Double, u'"'),
+ (Token.Text, u'\n'),
+ # multi-letter user var
+ (Token.Name.Variable, u'z'),
+ (Token.Operator, u'='),
+ (Token.Literal.String.Double, u'"'),
+ (Token.Name.Variable, u'$abc'),
+ (Token.Literal.String.Double, u'"'),
+ (Token.Text, u'\n'),
+ ]
+ self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
diff --git a/tests/test_terminal_formatter.py b/tests/test_terminal_formatter.py
new file mode 100644
index 00000000..07337cd5
--- /dev/null
+++ b/tests/test_terminal_formatter.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+"""
+ Pygments terminal formatter tests
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from __future__ import print_function
+
+import unittest
+import re
+
+from pygments.util import StringIO
+from pygments.lexers.sql import PlPgsqlLexer
+from pygments.formatters import TerminalFormatter
+
+DEMO_TEXT = '''\
+-- comment
+select
+* from bar;
+'''
+DEMO_LEXER = PlPgsqlLexer
+DEMO_TOKENS = list(DEMO_LEXER().get_tokens(DEMO_TEXT))
+
+ANSI_RE = re.compile(r'\x1b[\w\W]*?m')
+
+def strip_ansi(x):
+ return ANSI_RE.sub('', x)
+
+class TerminalFormatterTest(unittest.TestCase):
+ def test_reasonable_output(self):
+ out = StringIO()
+ TerminalFormatter().format(DEMO_TOKENS, out)
+ plain = strip_ansi(out.getvalue())
+ self.assertEqual(DEMO_TEXT.count('\n'), plain.count('\n'))
+ print(repr(plain))
+
+ for a, b in zip(DEMO_TEXT.splitlines(), plain.splitlines()):
+ self.assertEqual(a, b)
+
+ def test_reasonable_output_lineno(self):
+ out = StringIO()
+ TerminalFormatter(linenos=True).format(DEMO_TOKENS, out)
+ plain = strip_ansi(out.getvalue())
+ self.assertEqual(DEMO_TEXT.count('\n') + 1, plain.count('\n'))
+ print(repr(plain))
+
+ for a, b in zip(DEMO_TEXT.splitlines(), plain.splitlines()):
+ self.assertTrue(a in b)