summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlex Zimin <ziminav@gmail.com>2011-05-20 01:52:48 +0800
committerAlex Zimin <ziminav@gmail.com>2011-05-20 01:52:48 +0800
commit9140f695738b8c2ff6791926d4f190e252884d63 (patch)
treed17b92d9cf37186b8983e9523c3c6988513b51c2
parentf6332d38fca8c2b1a8e45f123935a24127fe1a52 (diff)
downloadpygments-9140f695738b8c2ff6791926d4f190e252884d63.tar.gz
Add Nemerle lexer
-rw-r--r--pygments/lexers/_mapping.py1
-rw-r--r--pygments/lexers/dotnet.py141
2 files changed, 141 insertions, 1 deletions
diff --git a/pygments/lexers/_mapping.py b/pygments/lexers/_mapping.py
index ed23cbdb..503eb9f9 100644
--- a/pygments/lexers/_mapping.py
+++ b/pygments/lexers/_mapping.py
@@ -144,6 +144,7 @@ LEXERS = {
'MyghtyLexer': ('pygments.lexers.templates', 'Myghty', ('myghty',), ('*.myt', 'autodelegate'), ('application/x-myghty',)),
'MyghtyXmlLexer': ('pygments.lexers.templates', 'XML+Myghty', ('xml+myghty',), (), ('application/xml+myghty',)),
'NasmLexer': ('pygments.lexers.asm', 'NASM', ('nasm',), ('*.asm', '*.ASM'), ('text/x-nasm',)),
+ 'NemerleLexer': ('pygments.lexers.dotnet', 'Nemerle', ('nemerle',), ('*.n',), ('text/x-nemerle',)),
'NewspeakLexer': ('pygments.lexers.other', 'Newspeak', ('newspeak',), ('*.ns2',), ('text/x-newspeak',)),
'NginxConfLexer': ('pygments.lexers.text', 'Nginx configuration file', ('nginx',), (), ('text/x-nginx-conf',)),
'NumPyLexer': ('pygments.lexers.math', 'NumPy', ('numpy',), (), ()),
diff --git a/pygments/lexers/dotnet.py b/pygments/lexers/dotnet.py
index 48feeb85..21990122 100644
--- a/pygments/lexers/dotnet.py
+++ b/pygments/lexers/dotnet.py
@@ -18,7 +18,7 @@ from pygments import unistring as uni
from pygments.lexers.web import XmlLexer
-__all__ = ['CSharpLexer', 'BooLexer', 'VbNetLexer', 'CSharpAspxLexer',
+__all__ = ['CSharpLexer', 'NemerleLexer', 'BooLexer', 'VbNetLexer', 'CSharpAspxLexer',
'VbNetAspxLexer']
@@ -137,6 +137,145 @@ class CSharpLexer(RegexLexer):
RegexLexer.__init__(self, **options)
+class NemerleLexer(RegexLexer):
+ """
+ For `Nemerle <http://nemerle.org>`_
+ source code.
+
+ Additional options accepted:
+
+ `unicodelevel`
+ Determines which Unicode characters this lexer allows for identifiers.
+ The possible values are:
+
+ * ``none`` -- only the ASCII letters and numbers are allowed. This
+ is the fastest selection.
+ * ``basic`` -- all Unicode characters from the specification except
+ category ``Lo`` are allowed.
+ * ``full`` -- all Unicode characters as specified in the C# specs
+ are allowed. Note that this means a considerable slowdown since the
+ ``Lo`` category has more than 40,000 characters in it!
+
+ The default value is ``basic``.
+
+ *New in Pygments 1.4.*
+ """
+
+ name = 'Nemerle'
+ aliases = ['nemerle']
+ filenames = ['*.n']
+ mimetypes = ['text/x-nemerle'] # inferred
+
+ flags = re.MULTILINE | re.DOTALL | re.UNICODE
+
+ # for the range of allowed unicode characters in identifiers,
+ # see http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-334.pdf
+
+ levels = {
+ 'none': '@?[_a-zA-Z][a-zA-Z0-9_]*',
+ 'basic': ('@?[_' + uni.Lu + uni.Ll + uni.Lt + uni.Lm + uni.Nl + ']' +
+ '[' + uni.Lu + uni.Ll + uni.Lt + uni.Lm + uni.Nl +
+ uni.Nd + uni.Pc + uni.Cf + uni.Mn + uni.Mc + ']*'),
+ 'full': ('@?(?:_|[^' +
+ _escape(uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl')) + '])'
+ + '[^' + _escape(uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo',
+ 'Nl', 'Nd', 'Pc', 'Cf', 'Mn',
+ 'Mc')) + ']*'),
+ }
+
+ tokens = {}
+ token_variants = True
+
+ for levelname, cs_ident in levels.items():
+ tokens[levelname] = {
+ 'root': [
+ # method names
+ (r'^([ \t]*(?:' + cs_ident + r'(?:\[\])?\s+)+?)' # return type
+ r'(' + cs_ident + ')' # method name
+ r'(\s*)(\()', # signature start
+ bygroups(using(this), Name.Function, Text, Punctuation)),
+ (r'^\s*\[.*?\]', Name.Attribute),
+ (r'[^\S\n]+', Text),
+ (r'\\\n', Text), # line continuation
+ (r'//.*?\n', Comment.Single),
+ (r'/[*](.|\n)*?[*]/', Comment.Multiline),
+ (r'\n', Text),
+ (r'\$\s*"', String, 'splice-string'),
+ (r'\$\s*<#', String, 'splice-string2'),
+ (r'<#(?:[^#>])*#>', String),
+ (r'[~!%^&*()+=|\[\]:;,.<>/?-]', Punctuation),
+ (r'[{}]', Punctuation),
+ (r'@"(\\\\|\\"|[^"])*"', String),
+ (r'"(\\\\|\\"|[^"\n])*["\n]', String),
+ (r"'\\.'|'[^\\]'", String.Char),
+ (r"[0-9](\.[0-9]*)?([eE][+-][0-9]+)?"
+ r"[flFLdD]?|0[xX][0-9a-fA-F]+[Ll]?", Number),
+ (r'#[ \t]*(if|endif|else|elif|define|undef|'
+ r'line|error|warning|region|endregion|pragma)\b.*?\n',
+ Comment.Preproc),
+ (r'\b(extern)(\s+)(alias)\b', bygroups(Keyword, Text,
+ Keyword)),
+ (r'(abstract|and|as|base|catch|def|delegate|'
+ r'enum|event|extern|false|finally|'
+ r'fun|implements|interface|internal|'
+ r'is|macro|match|matches|module|mutable|new|'
+ r'null|out|override|params|partial|private|'
+ r'protected|public|ref|sealed|static|'
+ r'syntax|this|throw|true|try|type|typeof|'
+ r'virtual|volatile|when|where|with|'
+ r'assert|assert2|async|break|checked|continue|do|else|'
+ r'ensures|for|foreach|if|late|lock|new|nolate|'
+ r'otherwise|regexp|repeat|requires|return|surroundwith|unchecked|unless|using|while|yield)\b', Keyword),
+ (r'(global)(::)', bygroups(Keyword, Punctuation)),
+ (r'(bool|byte|char|decimal|double|float|int|long|object|sbyte|'
+ r'short|string|uint|ulong|ushort)\b\??', Keyword.Type),
+ (r'(class|struct|variant|module)(\s+)', bygroups(Keyword, Text), 'class'),
+ (r'(namespace|using)(\s+)', bygroups(Keyword, Text), 'namespace'),
+ (cs_ident, Name),
+ ],
+ 'class': [
+ (cs_ident, Name.Class, '#pop')
+ ],
+ 'namespace': [
+ (r'(?=\()', Text, '#pop'), # using (resource)
+ ('(' + cs_ident + r'|\.)+', Name.Namespace, '#pop')
+ ],
+ 'splice-string': [
+ (r'[^"$]', String),
+ (r'\$\(', Name, 'splice-string-content'),
+ (r'\$', Name, 'splice-string-ident'),
+ (r'\\"', String),
+ (r'"', String, '#pop')
+ ],
+ 'splice-string2': [
+ (r'[^#>$]', String),
+ (r'\$\(', Name, 'splice-string-content'),
+ (r'\$', Name, 'splice-string-ident'),
+ (r'#>', String, '#pop')
+ ],
+ 'splice-string-content': [
+ (r'if|match', Keyword),
+ (r'[~!%^&*+=|\[\]:;,.<>/?-]', Punctuation),
+ (cs_ident, Name),
+ (r'\(', Name, '#push'),
+ (r'\)', Name, '#pop')
+ ],
+ 'splice-string-ident': [
+ (cs_ident, Name, '#pop'),
+ (r'.', String, '#pop')
+ ],
+ }
+
+ def __init__(self, **options):
+ level = get_choice_opt(options, 'unicodelevel', self.tokens.keys(), 'basic')
+ if level not in self._all_tokens:
+ # compile the regexes now
+ self._tokens = self.__class__.process_tokendef(level)
+ else:
+ self._tokens = self._all_tokens[level]
+
+ RegexLexer.__init__(self, **options)
+
class BooLexer(RegexLexer):
"""