summaryrefslogtreecommitdiff
path: root/pygments/lexers/jvm.py
diff options
context:
space:
mode:
Diffstat (limited to 'pygments/lexers/jvm.py')
-rw-r--r--pygments/lexers/jvm.py187
1 files changed, 178 insertions, 9 deletions
diff --git a/pygments/lexers/jvm.py b/pygments/lexers/jvm.py
index 83b45613..b56d4582 100644
--- a/pygments/lexers/jvm.py
+++ b/pygments/lexers/jvm.py
@@ -15,10 +15,12 @@ from pygments.lexer import Lexer, RegexLexer, include, bygroups, using, \
this
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
+from pygments.util import get_choice_opt
+from pygments import unistring as uni
__all__ = ['JavaLexer', 'ScalaLexer', 'GosuLexer', 'GosuTemplateLexer',
- 'GroovyLexer', 'IokeLexer', 'ClojureLexer']
+ 'GroovyLexer', 'IokeLexer', 'ClojureLexer', 'KotlinLexer']
class JavaLexer(RegexLexer):
@@ -117,7 +119,7 @@ class ScalaLexer(RegexLexer):
ur'lazy|match|new|override|pr(?:ivate|otected)'
ur'|re(?:quires|turn)|s(?:ealed|uper)|'
ur't(?:h(?:is|row)|ry)|va[lr]|w(?:hile|ith)|yield)\b|'
- u'(<[%:-]|=>|>:|[#=@_\u21D2\u2190])(\b|(?=\\s)|$)', Keyword),
+ u'(<[%:-]|=>|>:|[#=@_\u21D2\u2190])(\\b|(?=\\s)|$)', Keyword),
(ur':(?!%s)' % op, Keyword, 'type'),
(ur'%s%s\b' % (upper, idrest), Name.Class),
(r'(true|false|null)\b', Keyword.Constant),
@@ -125,7 +127,7 @@ class ScalaLexer(RegexLexer):
(r'(type)(\s+)', bygroups(Keyword, Text), 'type'),
(r'""".*?"""', String),
(r'"(\\\\|\\"|[^"])*"', String),
- (ur"'\\.'|'[^\\]'|'\\u[0-9a-f]{4}'", String.Char),
+ (r"'\\.'|'[^\\]'|'\\u[0-9a-f]{4}'", String.Char),
# (ur'(\.)(%s|%s|`[^`]+`)' % (idrest, op), bygroups(Operator,
# Name.Attribute)),
(idrest, Name),
@@ -133,7 +135,7 @@ class ScalaLexer(RegexLexer):
(r'\[', Operator, 'typeparam'),
(r'[\(\)\{\};,.#]', Operator),
(op, Operator),
- (ur'([0-9][0-9]*\.[0-9]*|\.[0-9]+)([eE][+-]?[0-9]+)?[fFdD]?',
+ (r'([0-9][0-9]*\.[0-9]*|\.[0-9]+)([eE][+-]?[0-9]+)?[fFdD]?',
Number.Float),
(r'0x[0-9a-f]+', Number.Hex),
(r'[0-9]+L?', Number.Integer),
@@ -142,7 +144,7 @@ class ScalaLexer(RegexLexer):
'class': [
(ur'(%s|%s|`[^`]+`)(\s*)(\[)' % (idrest, op),
bygroups(Name.Class, Text, Operator), 'typeparam'),
- (r'[\s\n]+', Text),
+ (r'\s+', Text),
(r'{', Operator, '#pop'),
(r'\(', Operator, '#pop'),
(r'//.*?\n', Comment.Single, '#pop'),
@@ -151,7 +153,7 @@ class ScalaLexer(RegexLexer):
'type': [
(r'\s+', Text),
(u'<[%:]|>:|[#_\u21D2]|forSome|type', Keyword),
- (r'([,\);}]|=>|=)([\s\n]*)', bygroups(Operator, Text), '#pop'),
+ (r'([,\);}]|=>|=)(\s*)', bygroups(Operator, Text), '#pop'),
(r'[\(\{]', Operator, '#push'),
(ur'((?:%s|%s|`[^`]+`)(?:\.(?:%s|%s|`[^`]+`))*)(\s*)(\[)' %
(idrest, op, idrest, op),
@@ -163,7 +165,7 @@ class ScalaLexer(RegexLexer):
(ur'\.|%s|%s|`[^`]+`' % (idrest, op), Keyword.Type)
],
'typeparam': [
- (r'[\s\n,]+', Text),
+ (r'[\s,]+', Text),
(u'<[%:]|=>|>:|[#_\u21D2]|forSome|type', Keyword),
(r'([\]\)\}])', Operator, '#pop'),
(r'[\(\[\{]', Operator, '#push'),
@@ -421,7 +423,7 @@ class IokeLexer(RegexLexer):
#Documentation
(r'((?<=fn\()|(?<=fnx\()|(?<=method\()|(?<=macro\()|(?<=lecro\()'
r'|(?<=syntax\()|(?<=dmacro\()|(?<=dlecro\()|(?<=dlecrox\()'
- r'|(?<=dsyntax\())[\s\n\r]*"', String.Doc, 'documentation'),
+ r'|(?<=dsyntax\())\s*"', String.Doc, 'documentation'),
#Text
(r'"', String, 'text'),
@@ -526,7 +528,7 @@ class IokeLexer(RegexLexer):
Operator),
# Punctuation
- (r'(\`\`|\`|\'\'|\'|\.|\,|@|@@|\[|\]|\(|\)|{|})', Punctuation),
+ (r'(\`\`|\`|\'\'|\'|\.|\,|@@|@|\[|\]|\(|\)|{|})', Punctuation),
#kinds
(r'[A-Z][a-zA-Z0-9_!:?]*', Name.Class),
@@ -676,3 +678,170 @@ class ClojureLexer(RegexLexer):
(r'(\(|\))', Punctuation),
],
}
+
+
+class TeaLangLexer(RegexLexer):
+ """
+ For `Tea <http://teatrove.org/>`_ source code. Only used within a
+ TeaTemplateLexer.
+
+ *New in Pygments 1.5.*
+ """
+
+ flags = re.MULTILINE | re.DOTALL
+
+ #: optional Comment or Whitespace
+ _ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
+
+ tokens = {
+ 'root': [
+ # method names
+ (r'^(\s*(?:[a-zA-Z_][a-zA-Z0-9_\.\[\]]*\s+)+?)' # return arguments
+ r'([a-zA-Z_][a-zA-Z0-9_]*)' # method name
+ r'(\s*)(\()', # signature start
+ bygroups(using(this), Name.Function, Text, Operator)),
+ (r'[^\S\n]+', Text),
+ (r'//.*?\n', Comment.Single),
+ (r'/\*.*?\*/', Comment.Multiline),
+ (r'@[a-zA-Z_][a-zA-Z0-9_\.]*', Name.Decorator),
+ (r'(and|break|else|foreach|if|in|not|or|reverse)\b',
+ Keyword),
+ (r'(as|call|define)\b', Keyword.Declaration),
+ (r'(true|false|null)\b', Keyword.Constant),
+ (r'(template)(\s+)', bygroups(Keyword.Declaration, Text), 'template'),
+ (r'(import)(\s+)', bygroups(Keyword.Namespace, Text), 'import'),
+ (r'"(\\\\|\\"|[^"])*"', String),
+ (r'\'(\\\\|\\\'|[^\'])*\'', String),
+ (r'(\.)([a-zA-Z_][a-zA-Z0-9_]*)', bygroups(Operator, Name.Attribute)),
+ (r'[a-zA-Z_][a-zA-Z0-9_]*:', Name.Label),
+ (r'[a-zA-Z_\$][a-zA-Z0-9_]*', Name),
+ (r'(isa|[.]{3}|[.]{2}|[=#!<>+-/%&;,.\*\\\(\)\[\]\{\}])', Operator),
+ (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
+ (r'0x[0-9a-f]+', Number.Hex),
+ (r'[0-9]+L?', Number.Integer),
+ (r'\n', Text)
+ ],
+ 'template': [
+ (r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop')
+ ],
+ 'import': [
+ (r'[a-zA-Z0-9_.]+\*?', Name.Namespace, '#pop')
+ ],
+ }
+
+
+class KotlinLexer(RegexLexer):
+ """
+ For `Kotlin <http://confluence.jetbrains.net/display/Kotlin/>`_
+ source code.
+
+ Additional options accepted:
+
+ `unicodelevel`
+ Determines which Unicode characters this lexer allows for identifiers.
+ The possible values are:
+
+ * ``none`` -- only the ASCII letters and numbers are allowed. This
+ is the fastest selection.
+ * ``basic`` -- all Unicode characters from the specification except
+ category ``Lo`` are allowed.
+ * ``full`` -- all Unicode characters as specified in the C# specs
+ are allowed. Note that this means a considerable slowdown since the
+ ``Lo`` category has more than 40,000 characters in it!
+
+ The default value is ``basic``.
+
+ *New in Pygments 1.5.*
+ """
+
+ name = 'Kotlin'
+ aliases = ['kotlin']
+ filenames = ['*.kt']
+ mimetypes = ['text/x-kotlin'] # inferred
+
+ flags = re.MULTILINE | re.DOTALL | re.UNICODE
+
+ # for the range of allowed unicode characters in identifiers,
+ # see http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-334.pdf
+
+ def _escape(st):
+ return st.replace(u'\\', ur'\\').replace(u'-', ur'\-').\
+ replace(u'[', ur'\[').replace(u']', ur'\]')
+
+ levels = {
+ 'none': '@?[_a-zA-Z][a-zA-Z0-9_]*',
+ 'basic': ('@?[_' + uni.Lu + uni.Ll + uni.Lt + uni.Lm + uni.Nl + ']' +
+ '[' + uni.Lu + uni.Ll + uni.Lt + uni.Lm + uni.Nl +
+ uni.Nd + uni.Pc + uni.Cf + uni.Mn + uni.Mc + ']*'),
+ 'full': ('@?(?:_|[^' +
+ _escape(uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl')) + '])'
+ + '[^' + _escape(uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo',
+ 'Nl', 'Nd', 'Pc', 'Cf', 'Mn',
+ 'Mc')) + ']*'),
+ }
+
+ tokens = {}
+ token_variants = True
+
+ for levelname, cs_ident in levels.items():
+ tokens[levelname] = {
+ 'root': [
+ # method names
+ (r'^([ \t]*(?:' + cs_ident + r'(?:\[\])?\s+)+?)' # return type
+ r'(' + cs_ident + ')' # method name
+ r'(\s*)(\()', # signature start
+ bygroups(using(this), Name.Function, Text, Punctuation)),
+ (r'^\s*\[.*?\]', Name.Attribute),
+ (r'[^\S\n]+', Text),
+ (r'\\\n', Text), # line continuation
+ (r'//.*?\n', Comment.Single),
+ (r'/[*](.|\n)*?[*]/', Comment.Multiline),
+ (r'\n', Text),
+ (r'[~!%^&*()+=|\[\]:;,.<>/?-]', Punctuation),
+ (r'[{}]', Punctuation),
+ (r'@"(""|[^"])*"', String),
+ (r'"(\\\\|\\"|[^"\n])*["\n]', String),
+ (r"'\\.'|'[^\\]'", String.Char),
+ (r"[0-9](\.[0-9]*)?([eE][+-][0-9]+)?"
+ r"[flFLdD]?|0[xX][0-9a-fA-F]+[Ll]?", Number),
+ (r'#[ \t]*(if|endif|else|elif|define|undef|'
+ r'line|error|warning|region|endregion|pragma)\b.*?\n',
+ Comment.Preproc),
+ (r'\b(extern)(\s+)(alias)\b', bygroups(Keyword, Text,
+ Keyword)),
+ (r'(abstract|as|break|catch|'
+ r'fun|continue|default|delegate|'
+ r'do|else|enum|extern|false|finally|'
+ r'fixed|for|goto|if|implicit|in|interface|'
+ r'internal|is|lock|null|'
+ r'out|override|private|protected|public|readonly|'
+ r'ref|return|sealed|sizeof|'
+ r'when|this|throw|true|try|typeof|'
+ r'unchecked|unsafe|virtual|void|while|'
+ r'get|set|new|partial|yield|val|var)\b', Keyword),
+ (r'(global)(::)', bygroups(Keyword, Punctuation)),
+ (r'(bool|byte|char|decimal|double|dynamic|float|int|long|'
+ r'short)\b\??', Keyword.Type),
+ (r'(class|struct)(\s+)', bygroups(Keyword, Text), 'class'),
+ (r'(package|using)(\s+)', bygroups(Keyword, Text), 'package'),
+ (cs_ident, Name),
+ ],
+ 'class': [
+ (cs_ident, Name.Class, '#pop')
+ ],
+ 'package': [
+ (r'(?=\()', Text, '#pop'), # using (resource)
+ ('(' + cs_ident + r'|\.)+', Name.Namespace, '#pop')
+ ]
+ }
+
+ def __init__(self, **options):
+ level = get_choice_opt(options, 'unicodelevel', self.tokens.keys(),
+ 'basic')
+ if level not in self._all_tokens:
+ # compile the regexes now
+ self._tokens = self.__class__.process_tokendef(level)
+ else:
+ self._tokens = self._all_tokens[level]
+
+ RegexLexer.__init__(self, **options)