summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeorg Brandl <georg@python.org>2016-02-16 21:59:38 +0100
committerGeorg Brandl <georg@python.org>2016-02-16 21:59:38 +0100
commit0b836b7005118f08bb88c59a00f05074547191d7 (patch)
treea879ff8ab6970888edaad393c3d91305aff4620c
parent3f69f1f0a1adfd318db2104f56be97b39ce46751 (diff)
downloadpygments-0b836b7005118f08bb88c59a00f05074547191d7.tar.gz
Clean lexer: style nits
-rw-r--r--CHANGES1
-rw-r--r--pygments/lexers/_mapping.py2
-rw-r--r--pygments/lexers/clean.py121
3 files changed, 61 insertions, 63 deletions
diff --git a/CHANGES b/CHANGES
index 4eab214c..81e22722 100644
--- a/CHANGES
+++ b/CHANGES
@@ -16,6 +16,7 @@ Version 2.2
* AMPL
* TypoScript (#1173)
* Varnish config (PR#554)
+ * Clean (PR#503)
- Added `lexers.find_lexer_class_by_name()`. (#1203)
diff --git a/pygments/lexers/_mapping.py b/pygments/lexers/_mapping.py
index c5728cf2..5337dc55 100644
--- a/pygments/lexers/_mapping.py
+++ b/pygments/lexers/_mapping.py
@@ -79,7 +79,7 @@ LEXERS = {
'CheetahXmlLexer': ('pygments.lexers.templates', 'XML+Cheetah', ('xml+cheetah', 'xml+spitfire'), (), ('application/xml+cheetah', 'application/xml+spitfire')),
'CirruLexer': ('pygments.lexers.webmisc', 'Cirru', ('cirru',), ('*.cirru',), ('text/x-cirru',)),
'ClayLexer': ('pygments.lexers.c_like', 'Clay', ('clay',), ('*.clay',), ('text/x-clay',)),
- 'CleanLexer': ('pygments.lexers.clean', 'CleanLexer', ('Clean', 'clean'), ('*.icl', '*.dcl'), ()),
+ 'CleanLexer': ('pygments.lexers.clean', 'Clean', ('clean',), ('*.icl', '*.dcl'), ()),
'ClojureLexer': ('pygments.lexers.jvm', 'Clojure', ('clojure', 'clj'), ('*.clj',), ('text/x-clojure', 'application/x-clojure')),
'ClojureScriptLexer': ('pygments.lexers.jvm', 'ClojureScript', ('clojurescript', 'cljs'), ('*.cljs',), ('text/x-clojurescript', 'application/x-clojurescript')),
'CobolFreeformatLexer': ('pygments.lexers.business', 'COBOLFree', ('cobolfree',), ('*.cbl', '*.CBL'), ()),
diff --git a/pygments/lexers/clean.py b/pygments/lexers/clean.py
index b33bf884..acff807e 100644
--- a/pygments/lexers/clean.py
+++ b/pygments/lexers/clean.py
@@ -1,35 +1,33 @@
# -*- coding: utf-8 -*-
"""
pygments.lexers.clean
- ~~~~~~~~~~~~~~~~~~~~
+ ~~~~~~~~~~~~~~~~~~~~~
Lexer for the Clean language.
- :copyright: Copyright 2016 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from pygments.lexer import ExtendedRegexLexer, LexerContext,\
- bygroups, words, include, default
-from pygments.token import Comment, Keyword, Literal, Name, Number, Operator,\
- Punctuation, String, Text, Whitespace
+from pygments.lexer import ExtendedRegexLexer, LexerContext, \
+ bygroups, words, include, default
+from pygments.token import Comment, Keyword, Literal, Name, Number, Operator, \
+ Punctuation, String, Text, Whitespace
__all__ = ['CleanLexer']
+
class CleanLexer(ExtendedRegexLexer):
"""
Lexer for the general purpose, state-of-the-art, pure and lazy functional
programming language Clean (http://clean.cs.ru.nl/Clean).
- .. versionadded: 2.1
+ .. versionadded: 2.2
"""
- name = 'CleanLexer'
- aliases = ['Clean', 'clean']
+ name = 'Clean'
+ aliases = ['clean']
filenames = ['*.icl', '*.dcl']
- def __init__(self, *args, **kwargs):
- super(CleanLexer, self).__init__(*args, **kwargs)
-
def get_tokens_unprocessed(self, text=None, context=None):
ctx = LexerContext(text, 0)
ctx.indent = 0
@@ -52,9 +50,9 @@ class CleanLexer(ExtendedRegexLexer):
ctx.pos = match.end()
def store_indent(lexer, match, ctx):
- # Tabs are four spaces:
+ # Tabs are four spaces:
# https://svn.cs.ru.nl/repos/clean-platform/trunk/doc/STANDARDS.txt
- ctx.indent = len(match.group(0).replace('\t',' '))
+ ctx.indent = len(match.group(0).replace('\t', ' '))
ctx.pos = match.end()
yield match.start(), Text, match.group(0)
@@ -100,10 +98,14 @@ class CleanLexer(ExtendedRegexLexer):
ctx.pos = match.end()
yield match.start(), Comment, match.group(0)
+ keywords = ('class', 'instance', 'where', 'with', 'let', 'let!', 'with',
+ 'in', 'case', 'of', 'infix', 'infixr', 'infixl', 'generic',
+ 'derive', 'otherwise', 'code', 'inline')
+
tokens = {
'common': [
(r';', Punctuation, '#pop'),
- (r'//', Comment, 'singlecomment')
+ (r'//', Comment, 'singlecomment'),
],
'root': [
# Comments
@@ -112,27 +114,22 @@ class CleanLexer(ExtendedRegexLexer):
(r'(?s)/\*.*?\*/', Comment.Multi),
# Modules, imports, etc.
- (r'\b((?:implementation|definition|system)\s+)?(module)(\s+)([\w`]+)',
+ (r'\b((?:implementation|definition|system)\s+)?(module)(\s+)([\w`]+)',
bygroups(Keyword.Namespace, Keyword.Namespace, Text, Name.Class)),
(r'(?<=\n)import(?=\s)', Keyword.Namespace, 'import'),
(r'(?<=\n)from(?=\s)', Keyword.Namespace, 'fromimport'),
# Keywords
# We cannot use (?s)^|(?<=\s) as prefix, so need to repeat this
- (words(('class','instance','where','with','let','let!','with','in',
- 'case','of','infix','infixr','infixl','generic','derive',
- 'otherwise', 'code', 'inline'),
- prefix=r'(?<=\s)', suffix=r'(?=\s)'), Keyword),
- (words(('class','instance','where','with','let','let!','with','in',
- 'case','of','infix','infixr','infixl','generic','derive',
- 'otherwise', 'code', 'inline'),
- prefix=r'^', suffix=r'(?=\s)'), Keyword),
+ (words(keywords, prefix=r'(?<=\s)', suffix=r'(?=\s)'), Keyword),
+ (words(keywords, prefix=r'^', suffix=r'(?=\s)'), Keyword),
# Function definitions
(r'(?=\{\|)', Whitespace, 'genericfunction'),
- (r'(?<=\n)(\s*)([\w`\$\(\)=\-<>~*\^\|\+&%]+)(\s+[\w])*(\s*)(::)',
- bygroups(store_indent, Name.Function, Keyword.Type, Whitespace, Punctuation),
- 'functiondefargs'),
+ (r'(?<=\n)(\s*)([\w`$()=\-<>~*\^|+&%]+)((?:\s+[\w])*)(\s*)(::)',
+ bygroups(store_indent, Name.Function, Keyword.Type, Whitespace,
+ Punctuation),
+ 'functiondefargs'),
# Type definitions
(r'(?<=\n)([ \t]*)(::)', bygroups(store_indent, Punctuation), 'typedef'),
@@ -142,19 +139,20 @@ class CleanLexer(ExtendedRegexLexer):
(r'\'\\?.(?<!\\)\'', String.Char),
(r'\'\\\d+\'', String.Char),
(r'\'\\\\\'', String.Char), # (special case for '\\')
- (r'[\+\-~]?\s\d+\.\d+(E[+-~]?\d+)?\b', Number.Float),
- (r'[\+\-~]?\s0[0-7]\b', Number.Oct),
- (r'[\+\-~]?\s0x[0-9a-fA-F]\b', Number.Hex),
- (r'[\+\-~]?\s\d+\b', Number.Integer),
+ (r'[+\-~]?\s*\d+\.\d+(E[+\-~]?\d+)?\b', Number.Float),
+ (r'[+\-~]?\s*0[0-7]\b', Number.Oct),
+ (r'[+\-~]?\s*0x[0-9a-fA-F]\b', Number.Hex),
+ (r'[+\-~]?\s*\d+\b', Number.Integer),
(r'"', String.Double, 'doubleqstring'),
- (words(('True', 'False'), prefix=r'(?<=\s)', suffix=r'(?=\s)'), Literal),
+ (words(('True', 'False'), prefix=r'(?<=\s)', suffix=r'(?=\s)'),
+ Literal),
# Everything else is some name
- (r'([\w`\$%]+\.?)*[\w`\$%]+', Name),
+ (r'([\w`$%]+\.?)*[\w`$%]+', Name),
# Punctuation
- (r'[{}()\[\],:;\.#]', Punctuation),
- (r'[\+\-=!<>\|&~*\^/]', Operator),
+ (r'[{}()\[\],:;.#]', Punctuation),
+ (r'[+\-=!<>|&~*\^/]', Operator),
(r'\\\\', Operator),
# Lambda expressions
@@ -163,64 +161,64 @@ class CleanLexer(ExtendedRegexLexer):
# Whitespace
(r'\s', Whitespace),
- include('common')
+ include('common'),
],
'fromimport': [
include('common'),
(r'([\w`]+)', check_class_not_import),
(r'\n', Whitespace, '#pop'),
- (r'\s', Whitespace)
+ (r'\s', Whitespace),
],
'fromimportfunc': [
include('common'),
- (r'([\w`\$\(\)=\-<>~*\^\|\+&%]+)', check_instance_class),
+ (r'([\w`$()=\-<>~*\^|+&%]+)', check_instance_class),
(r',', Punctuation),
(r'\n', Whitespace, '#pop'),
- (r'\s', Whitespace)
+ (r'\s', Whitespace),
],
'fromimportfunctype': [
include('common'),
(r'[{(\[]', Punctuation, 'combtype'),
(r',', Punctuation, '#pop'),
- (r'[:;\.#]', Punctuation),
+ (r'[:;.#]', Punctuation),
(r'\n', Whitespace, '#pop:2'),
(r'[^\S\n]+', Whitespace),
- (r'\S+', Keyword.Type)
+ (r'\S+', Keyword.Type),
],
'combtype': [
include('common'),
(r'[})\]]', Punctuation, '#pop'),
(r'[{(\[]', Punctuation, '#pop'),
- (r'[,:;\.#]', Punctuation),
+ (r'[,:;.#]', Punctuation),
(r'\s+', Whitespace),
- (r'\S+', Keyword.Type)
+ (r'\S+', Keyword.Type),
],
'import': [
include('common'),
- (words(('from', 'import', 'as', 'qualified'),
- prefix='(?<=\s)', suffix='(?=\s)'), Keyword.Namespace),
+ (words(('from', 'import', 'as', 'qualified'),
+ prefix='(?<=\s)', suffix='(?=\s)'), Keyword.Namespace),
(r'[\w`]+', Name.Class),
(r'\n', Whitespace, '#pop'),
(r',', Punctuation),
- (r'[^\S\n]+', Whitespace)
+ (r'[^\S\n]+', Whitespace),
],
'singlecomment': [
(r'(.)(?=\n)', skip),
- (r'.+(?!\n)', Comment)
+ (r'.+(?!\n)', Comment),
],
'doubleqstring': [
(r'[^\\"]+', String.Double),
(r'"', String.Double, '#pop'),
- (r'\\.', String.Double)
+ (r'\\.', String.Double),
],
'typedef': [
include('common'),
(r'[\w`]+', Keyword.Type),
- (r'[:=\|\(\),\[\]\{\}\!\*]', Punctuation),
+ (r'[:=|(),\[\]{}!*]', Punctuation),
(r'->', Punctuation),
- (r'\n(?=[^\s\|])', Whitespace, '#pop'),
+ (r'\n(?=[^\s|])', Whitespace, '#pop'),
(r'\s', Whitespace),
- (r'.', Keyword.Type)
+ (r'.', Keyword.Type),
],
'genericfunction': [
include('common'),
@@ -231,46 +229,45 @@ class CleanLexer(ExtendedRegexLexer):
(r'(\s+of\s+)(\{)', bygroups(Keyword, Punctuation), 'genericftypes'),
(r'\s', Whitespace),
(r'[\w`]+', Keyword.Type),
- (r'[\*\(\)]', Punctuation)
+ (r'[*()]', Punctuation),
],
'genericftypes': [
include('common'),
(r'[\w`]+', Keyword.Type),
(r',', Punctuation),
(r'\s', Whitespace),
- (r'\}', Punctuation, '#pop')
+ (r'\}', Punctuation, '#pop'),
],
'functiondefargs': [
include('common'),
(r'\n(\s*)', check_indent1),
- (r'[!{}()\[\],:;\.#]', Punctuation),
+ (r'[!{}()\[\],:;.#]', Punctuation),
(r'->', Punctuation, 'functiondefres'),
(r'^(?=\S)', Whitespace, '#pop'),
(r'\S', Keyword.Type),
- (r'\s', Whitespace)
+ (r'\s', Whitespace),
],
'functiondefres': [
include('common'),
(r'\n(\s*)', check_indent2),
(r'^(?=\S)', Whitespace, '#pop:2'),
- (r'[!{}()\[\],:;\.#]', Punctuation),
+ (r'[!{}()\[\],:;.#]', Punctuation),
(r'\|', Punctuation, 'functiondefclasses'),
(r'\S', Keyword.Type),
- (r'\s', Whitespace)
+ (r'\s', Whitespace),
],
'functiondefclasses': [
include('common'),
(r'\n(\s*)', check_indent3),
(r'^(?=\S)', Whitespace, '#pop:3'),
(r'[,&]', Punctuation),
- (r'[\w`\$\(\)=\-<>~*\^\|\+&%]', Name.Function, 'functionname'),
- (r'\s', Whitespace)
+ (r'[\w`$()=\-<>~*\^|+&%]', Name.Function, 'functionname'),
+ (r'\s', Whitespace),
],
'functionname': [
include('common'),
- (r'[\w`\$\(\)=\-<>~*\^\|\+&%]+', Name.Function),
+ (r'[\w`$()=\-<>~*\^|+&%]+', Name.Function),
(r'(?=\{\|)', Punctuation, 'genericfunction'),
- default('#pop')
+ default('#pop'),
]
}
-