summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCamil Staps <info@camilstaps.nl>2015-10-04 17:27:18 +0200
committerCamil Staps <info@camilstaps.nl>2015-10-04 17:27:18 +0200
commit9131b270427a1810aae6674e4cba4c98c5afec59 (patch)
tree59485e380c817bb99f97ba53a2abe99ad2ef20ac
parent83704bc3531bcd33c55870cb45284dbc775421bc (diff)
downloadpygments-9131b270427a1810aae6674e4cba4c98c5afec59.tar.gz
Adds a Clean (http://clean.cs.ru.nl/Clean) Lexer and example file
-rw-r--r--pygments/lexers/_mapping.py1
-rw-r--r--pygments/lexers/clean.py274
-rw-r--r--pygments/lexers/functional.py1
-rw-r--r--tests/examplefiles/StdGeneric.icl92
4 files changed, 368 insertions, 0 deletions
diff --git a/pygments/lexers/_mapping.py b/pygments/lexers/_mapping.py
index 2e855570..293b7c41 100644
--- a/pygments/lexers/_mapping.py
+++ b/pygments/lexers/_mapping.py
@@ -72,6 +72,7 @@ LEXERS = {
'CheetahXmlLexer': ('pygments.lexers.templates', 'XML+Cheetah', ('xml+cheetah', 'xml+spitfire'), (), ('application/xml+cheetah', 'application/xml+spitfire')),
'CirruLexer': ('pygments.lexers.webmisc', 'Cirru', ('cirru',), ('*.cirru',), ('text/x-cirru',)),
'ClayLexer': ('pygments.lexers.c_like', 'Clay', ('clay',), ('*.clay',), ('text/x-clay',)),
+ 'CleanLexer': ('pygments.lexers.clean', 'CleanLexer', ('Clean', 'clean'), ('*.icl', '*.dcl'), ()),
'ClojureLexer': ('pygments.lexers.jvm', 'Clojure', ('clojure', 'clj'), ('*.clj',), ('text/x-clojure', 'application/x-clojure')),
'ClojureScriptLexer': ('pygments.lexers.jvm', 'ClojureScript', ('clojurescript', 'cljs'), ('*.cljs',), ('text/x-clojurescript', 'application/x-clojurescript')),
'CobolFreeformatLexer': ('pygments.lexers.business', 'COBOLFree', ('cobolfree',), ('*.cbl', '*.CBL'), ()),
diff --git a/pygments/lexers/clean.py b/pygments/lexers/clean.py
new file mode 100644
index 00000000..7fb86844
--- /dev/null
+++ b/pygments/lexers/clean.py
@@ -0,0 +1,274 @@
+# -*- coding: utf-8 -*-
+"""
+ pygments.lexers.make
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for Makefiles and similar.
+
+ :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import ExtendedRegexLexer, bygroups, words, include
+from pygments.token import *
+
+__all__ = ['CleanLexer']
+
+class CleanLexer(ExtendedRegexLexer):
+ """
+ Lexer for the general purpose, state-of-the-art, pure and lazy functional
+ programming language Clean (http://clean.cs.ru.nl/Clean).
+
+ .. versionadded: 2.1
+ """
+ name = 'CleanLexer'
+ aliases = ['Clean', 'clean']
+ filenames = ['*.icl', '*.dcl']
+
+ def __init__(self, *args, **kwargs):
+ super(CleanLexer, self).__init__(*args, **kwargs)
+ global stored_indent
+ stored_indent = 0
+
+ def check_class_not_import(lexer, match, ctx):
+ if match.group(0) == 'import':
+ yield match.start(), Keyword.Namespace, match.group(0)
+ ctx.stack = ctx.stack[:-1] + ['fromimportfunc']
+ else:
+ yield match.start(), Name.Class, match.group(0)
+ ctx.pos = match.end()
+
+ def check_instance_class(lexer, match, ctx):
+ if match.group(0) == 'instance' or match.group(0) == 'class':
+ yield match.start(), Keyword, match.group(0)
+ else:
+ yield match.start(), Name.Function, match.group(0)
+ ctx.stack = ctx.stack + ['fromimportfunctype']
+ ctx.pos = match.end()
+
+ def store_indent(lexer, match, ctx):
+ global stored_indent
+ # Tabs are four spaces:
+ # https://svn.cs.ru.nl/repos/clean-platform/trunk/doc/STANDARDS.txt
+ stored_indent = len(match.group(0).replace('\t',' '))
+ ctx.pos = match.end()
+ yield match.start(), Text, match.group(0)
+
+ def check_indent1(lexer, match, ctx):
+ global stored_indent
+ indent = len(match.group(0)) - 1
+ if indent > stored_indent:
+ yield match.start(), Whitespace, match.group(0)
+ ctx.pos = match.start() + indent + 1
+ else:
+ stored_indent = 0
+ ctx.pos = match.start()
+ ctx.stack = ctx.stack[:-1]
+ yield match.start(), Whitespace, match.group(0)[1:]
+
+ def check_indent2(lexer, match, ctx):
+ global stored_indent
+ indent = len(match.group(0)) - 1
+ if indent > stored_indent:
+ yield match.start(), Whitespace, match.group(0)
+ ctx.pos = match.start() + indent + 1
+ else:
+ stored_indent = 0
+ ctx.pos = match.start()
+ ctx.stack = ctx.stack[:-2]
+ yield match.start(), Whitespace, match.group(0)[1:]
+ if match.group(0) == '\n\n':
+ ctx.pos = ctx.pos + 1
+
+ def check_indent3(lexer, match, ctx):
+ global stored_indent
+ indent = len(match.group(0)) - 1
+ if indent > stored_indent:
+ yield match.start(), Whitespace, match.group(0)
+ ctx.pos = match.start() + indent + 1
+ else:
+ stored_indent = 0
+ ctx.pos = match.start()
+ ctx.stack = ctx.stack[:-3]
+ yield match.start(), Whitespace, match.group(0)[1:]
+ if match.group(0) == '\n\n':
+ ctx.pos = ctx.pos + 1
+
+ def skip(lexer, match, ctx):
+ ctx.stack = ctx.stack[:-1]
+ ctx.pos = match.end()
+ yield match.start(), Comment, match.group(0)
+
+ tokens = {
+ 'common': [
+ (r';', Punctuation, '#pop'),
+ (r'//', Comment, 'singlecomment')
+ ],
+ 'root': [
+ # Comments
+ (r'//.*\n', Comment.Single),
+ (r'(?s)/\*\*.*?\*/', Comment.Special),
+ (r'(?s)/\*.*?\*/', Comment.Multi),
+
+ # Modules, imports, etc.
+ (r'\b((?:implementation|definition|system)\s+)?(module)(\s+)([\w`]+)',
+ bygroups(Keyword.Namespace, Keyword.Namespace, Text, Name.Class)),
+ (r'(?<=\n)import(?=\s)', Keyword.Namespace, 'import'),
+ (r'(?<=\n)from(?=\s)', Keyword.Namespace, 'fromimport'),
+
+ # Keywords
+ # We cannot use (?s)^|(?<=\s) as prefix, so need to repeat this
+ (words(('class','instance','where','with','let','let!','with','in',
+ 'case','of','infix','infixr','infixl','generic','derive',
+ 'otherwise', 'code', 'inline'),
+ prefix=r'(?<=\s)', suffix=r'(?=\s)'), Keyword),
+ (words(('class','instance','where','with','let','let!','with','in',
+ 'case','of','infix','infixr','infixl','generic','derive',
+ 'otherwise', 'code', 'inline'),
+ prefix=r'(?s)^', suffix=r'(?=\s)'), Keyword),
+
+ # Function definitions
+ (r'(?=\{\|)', Whitespace, 'genericfunction'),
+ (r'(?<=\n)(\s*)([\w`\$\(\)=\-<>~*\^\|\+&%]+)(\s+[\w])*(\s*)(::)',
+ bygroups(store_indent, Name.Function, Keyword.Type, Whitespace, Punctuation),
+ 'functiondefargs'),
+
+ # Type definitions
+ (r'(?<=\n)([ \t]*)(::)', bygroups(store_indent, Punctuation), 'typedef'),
+
+ # Literals
+ (r'\'\\?.(?<!\\)\'', String.Char),
+ (r'\'\\\d+\'', String.Char),
+ (r'\'\\\\\'', String.Char), # (special case for '\\')
+ (r'[\+\-~]?\s*?\d+\.\d+(E[+-~]?\d+)?\b', Number.Float),
+ (r'[\+\-~]?\s*?0[0-7]\b', Number.Oct),
+ (r'[\+\-~]?\s*?0x[0-9a-fA-F]\b', Number.Hex),
+ (r'[\+\-~]?\s*?\d+\b', Number.Integer),
+ (r'"', String.Double, 'doubleqstring'),
+ (words(('True', 'False'), prefix=r'(?<=\s)', suffix=r'(?=\s)'), Literal),
+
+ # Everything else is some name
+ (r'([\w`\$%]+\.?)*[\w`\$%]+', Name),
+
+ # Punctuation
+ (r'[{}()\[\],:;\.#]', Punctuation),
+ (r'[\+\-=!<>\|&~*\^/]', Operator),
+ (r'\\\\', Operator),
+
+ # Lambda expressions
+ (r'\\.*?(->|\.|=)', Name.Function),
+
+ # Whitespace
+ (r'\s', Whitespace),
+
+ include('common')
+ ],
+ 'fromimport': [
+ include('common'),
+ (r'([\w`]+)', check_class_not_import),
+ (r'\n', Whitespace, '#pop'),
+ (r'\s', Whitespace)
+ ],
+ 'fromimportfunc': [
+ include('common'),
+ (r'([\w`\$\(\)=\-<>~*\^\|\+&%]+)', check_instance_class),
+ (r',', Punctuation),
+ (r'\n', Whitespace, '#pop'),
+ (r'\s', Whitespace)
+ ],
+ 'fromimportfunctype': [
+ include('common'),
+ (r'[{(\[]', Punctuation, 'combtype'),
+ (r',', Punctuation, '#pop'),
+ (r':;\.#]', Punctuation),
+ (r'\n', Whitespace, '#pop:2'),
+ (r'\s', Whitespace),
+ (r'.', Keyword.Type)
+ ],
+ 'combtype': [
+ include('common'),
+ (r'[})\]]', Punctuation, '#pop'),
+ (r'[{(\[]', Punctuation, '#pop'),
+ (r',:;\.#]', Punctuation),
+ (r'\s', Whitespace),
+ (r'.', Keyword.Type)
+ ],
+ 'import': [
+ include('common'),
+ (words(('from', 'import', 'as', 'qualified'),
+ prefix='(?<=\s)', suffix='(?=\s)'), Keyword.Namespace),
+ (r'[\w`]+', Name.Class),
+ (r'\n', Whitespace, '#pop'),
+ (r',', Punctuation),
+ (r'\s', Whitespace)
+ ],
+ 'singlecomment': [
+ (r'(.)(?=\n)', skip),
+ (r'.', Comment)
+ ],
+ 'doubleqstring': [
+ (r'[^\\\'"]+', String.Double),
+ (r'"', String.Double, '#pop'),
+ (r'\\.|\'', String.Double)
+ ],
+ 'typedef': [
+ include('common'),
+ (r'[\w`]+', Keyword.Type),
+ (r'[:=\|\(\),\[\]\{\}\!\*]', Punctuation),
+ (r'->', Punctuation),
+ (r'\n(?=[^\s\|])', Whitespace, '#pop'),
+ (r'\s', Whitespace),
+ (r'.', Keyword.Type)
+ ],
+ 'genericfunction': [
+ include('common'),
+ (r'\{\|', Punctuation),
+ (r'\|\}', Punctuation, '#pop'),
+ (r',', Punctuation),
+ (r'->', Punctuation),
+ (r'(\s+of\s+)(\{)', bygroups(Keyword, Punctuation), 'genericftypes'),
+ (r'\s', Whitespace),
+ (r'[\w`]+', Keyword.Type),
+ (r'[\*\(\)]', Punctuation)
+ ],
+ 'genericftypes': [
+ include('common'),
+ (r'[\w`]+', Keyword.Type),
+ (r',', Punctuation),
+ (r'\s', Whitespace),
+ (r'\}', Punctuation, '#pop')
+ ],
+ 'functiondefargs': [
+ include('common'),
+ (r'\n(\s*)', check_indent1),
+ (r'[!{}()\[\],:;\.#]', Punctuation),
+ (r'->', Punctuation, 'functiondefres'),
+ (r'^(?=\S)', Whitespace, '#pop'),
+ (r'\S', Keyword.Type),
+ (r'\s', Whitespace)
+ ],
+ 'functiondefres': [
+ include('common'),
+ (r'\n(\s*)', check_indent2),
+ (r'^(?=\S)', Whitespace, '#pop:2'),
+ (r'[!{}()\[\],:;\.#]', Punctuation),
+ (r'\|', Punctuation, 'functiondefclasses'),
+ (r'\S', Keyword.Type),
+ (r'\s', Whitespace)
+ ],
+ 'functiondefclasses': [
+ include('common'),
+ (r'\n(\s*)', check_indent3),
+ (r'^(?=\S)', Whitespace, '#pop:3'),
+ (r'[,&]', Punctuation),
+ (r'[\w`\$\(\)=\-<>~*\^\|\+&%]', Name.Function, 'functionname'),
+ (r'\s', Whitespace)
+ ],
+ 'functionname': [
+ include('common'),
+ (r'[\w`\$\(\)=\-<>~*\^\|\+&%]+', Name.Function),
+ (r'(?=\{\|)', Punctuation, 'genericfunction'),
+ (r'', Text, '#pop')
+ ]
+ }
+
diff --git a/pygments/lexers/functional.py b/pygments/lexers/functional.py
index 180d3fd4..5d4cdf0c 100644
--- a/pygments/lexers/functional.py
+++ b/pygments/lexers/functional.py
@@ -17,5 +17,6 @@ from pygments.lexers.theorem import CoqLexer
from pygments.lexers.erlang import ErlangLexer, ErlangShellLexer, \
ElixirConsoleLexer, ElixirLexer
from pygments.lexers.ml import SMLLexer, OcamlLexer, OpaLexer
+from pygments.lexers.clean import CleanLexer
__all__ = []
diff --git a/tests/examplefiles/StdGeneric.icl b/tests/examplefiles/StdGeneric.icl
new file mode 100644
index 00000000..2e6c3931
--- /dev/null
+++ b/tests/examplefiles/StdGeneric.icl
@@ -0,0 +1,92 @@
+implementation module StdGeneric
+
+import StdInt, StdMisc, StdClass, StdFunc
+
+generic bimap a b :: Bimap .a .b
+
+bimapId :: Bimap .a .a
+bimapId = { map_to = id, map_from = id }
+
+bimap{|c|} = { map_to = id, map_from = id }
+
+bimap{|PAIR|} bx by = { map_to= map_to, map_from=map_from }
+where
+ map_to (PAIR x y) = PAIR (bx.map_to x) (by.map_to y)
+ map_from (PAIR x y) = PAIR (bx.map_from x) (by.map_from y)
+bimap{|EITHER|} bl br = { map_to= map_to, map_from=map_from }
+where
+ map_to (LEFT x) = LEFT (bl.map_to x)
+ map_to (RIGHT x) = RIGHT (br.map_to x)
+ map_from (LEFT x) = LEFT (bl.map_from x)
+ map_from (RIGHT x) = RIGHT (br.map_from x)
+
+bimap{|(->)|} barg bres = { map_to = map_to, map_from = map_from }
+where
+ map_to f = comp3 bres.map_to f barg.map_from
+ map_from f = comp3 bres.map_from f barg.map_to
+
+bimap{|CONS|} barg = { map_to= map_to, map_from=map_from }
+where
+ map_to (CONS x) = CONS (barg.map_to x)
+ map_from (CONS x) = CONS (barg.map_from x)
+
+bimap{|FIELD|} barg = { map_to= map_to, map_from=map_from }
+where
+ map_to (FIELD x) = FIELD (barg.map_to x)
+ map_from (FIELD x) = FIELD (barg.map_from x)
+
+bimap{|OBJECT|} barg = { map_to= map_to, map_from=map_from }
+where
+ map_to (OBJECT x) = OBJECT (barg.map_to x)
+ map_from (OBJECT x) = OBJECT (barg.map_from x)
+
+bimap{|Bimap|} x y = {map_to = map_to, map_from = map_from}
+where
+ map_to {map_to, map_from} =
+ { map_to = comp3 y.map_to map_to x.map_from
+ , map_from = comp3 x.map_to map_from y.map_from
+ }
+ map_from {map_to, map_from} =
+ { map_to = comp3 y.map_from map_to x.map_to
+ , map_from = comp3 x.map_from map_from y.map_to
+ }
+
+comp3 :: !(.a -> .b) u:(.c -> .a) !(.d -> .c) -> u:(.d -> .b)
+comp3 f g h
+ | is_id f
+ | is_id h
+ = cast g
+ = cast (\x -> g (h x))
+ | is_id h
+ = cast (\x -> f (g x))
+ = \x -> f (g (h x))
+where
+ is_id :: !.(.a -> .b) -> Bool
+ is_id f = code inline
+ {
+ eq_desc e_StdFunc_did 0 0
+ pop_a 1
+ }
+
+ cast :: !u:a -> u:b
+ cast f = code inline
+ {
+ pop_a 0
+ }
+
+getConsPath :: !GenericConsDescriptor -> [ConsPos]
+getConsPath {gcd_index, gcd_type_def={gtd_num_conses}}
+ = doit gcd_index gtd_num_conses
+where
+ doit i n
+ | n == 0
+ = abort "getConsPath: zero conses\n"
+ | i >= n
+ = abort "getConsPath: cons index >= number of conses"
+ | n == 1
+ = []
+ | i < (n/2)
+ = [ ConsLeft : doit i (n/2) ]
+ | otherwise
+ = [ ConsRight : doit (i - (n/2)) (n - (n/2)) ]
+ \ No newline at end of file