summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTim Hatch <tim@timhatch.com>2016-06-02 17:34:17 -0700
committerTim Hatch <tim@timhatch.com>2016-06-02 17:34:17 -0700
commitcd6e2014481335bc870701359f5125a0c8b41ad8 (patch)
treecf1a5344f528219dd62f654874127a33cf829ff7
parent4d27b2eb74eb41773c7f5a420b6f733ea98429b2 (diff)
parentb5cfed047012cf4b5e79d350caa8fe10df25e55d (diff)
downloadpygments-cd6e2014481335bc870701359f5125a0c8b41ad8.tar.gz
Merge with -main
-rw-r--r--pygments/lexers/python.py4
-rw-r--r--tests/test_julia.py60
-rw-r--r--tests/test_python.py113
3 files changed, 175 insertions, 2 deletions
diff --git a/pygments/lexers/python.py b/pygments/lexers/python.py
index 7601afa8..35635ed1 100644
--- a/pygments/lexers/python.py
+++ b/pygments/lexers/python.py
@@ -116,7 +116,7 @@ class PythonLexer(RegexLexer):
'unichr', 'unicode', 'vars', 'xrange', 'zip'),
prefix=r'(?<!\.)', suffix=r'\b'),
Name.Builtin),
- (r'(?<!\.)(self|None|Ellipsis|NotImplemented|False|True'
+ (r'(?<!\.)(self|None|Ellipsis|NotImplemented|False|True|cls'
r')\b', Name.Builtin.Pseudo),
(words((
'ArithmeticError', 'AssertionError', 'AttributeError',
@@ -303,7 +303,7 @@ class Python3Lexer(RegexLexer):
'sum', 'super', 'tuple', 'type', 'vars', 'zip'), prefix=r'(?<!\.)',
suffix=r'\b'),
Name.Builtin),
- (r'(?<!\.)(self|Ellipsis|NotImplemented)\b', Name.Builtin.Pseudo),
+ (r'(?<!\.)(self|Ellipsis|NotImplemented|cls)\b', Name.Builtin.Pseudo),
(words((
'ArithmeticError', 'AssertionError', 'AttributeError',
'BaseException', 'BufferError', 'BytesWarning', 'DeprecationWarning',
diff --git a/tests/test_julia.py b/tests/test_julia.py
new file mode 100644
index 00000000..8f78e283
--- /dev/null
+++ b/tests/test_julia.py
@@ -0,0 +1,60 @@
+# -*- coding: utf-8 -*-
+"""
+ Julia Tests
+ ~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import unittest
+
+from pygments.lexers import JuliaLexer
+from pygments.token import Token
+
+
+class JuliaTests(unittest.TestCase):
+ def setUp(self):
+ self.lexer = JuliaLexer()
+
+ def test_unicode(self):
+ """
+ Test that unicode character, √, in an expression is recognized
+ """
+ fragment = u's = \u221a((1/n) * sum(count .^ 2) - mu .^2)\n'
+ tokens = [
+ (Token.Name, u's'),
+ (Token.Text, u' '),
+ (Token.Operator, u'='),
+ (Token.Text, u' '),
+ (Token.Name, u'\u221a'),
+ (Token.Punctuation, u'('),
+ (Token.Punctuation, u'('),
+ (Token.Literal.Number.Integer, u'1'),
+ (Token.Operator, u'/'),
+ (Token.Name, u'n'),
+ (Token.Punctuation, u')'),
+ (Token.Text, u' '),
+ (Token.Operator, u'*'),
+ (Token.Text, u' '),
+ (Token.Name, u'sum'),
+ (Token.Punctuation, u'('),
+ (Token.Name, u'count'),
+ (Token.Text, u' '),
+ (Token.Operator, u'.'),
+ (Token.Operator, u'^'),
+ (Token.Text, u' '),
+ (Token.Literal.Number.Integer, u'2'),
+ (Token.Punctuation, u')'),
+ (Token.Text, u' '),
+ (Token.Operator, u'-'),
+ (Token.Text, u' '),
+ (Token.Name, u'mu'),
+ (Token.Text, u' '),
+ (Token.Operator, u'.'),
+ (Token.Operator, u'^'),
+ (Token.Literal.Number.Integer, u'2'),
+ (Token.Punctuation, u')'),
+ (Token.Text, u'\n'),
+ ]
+ self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
diff --git a/tests/test_python.py b/tests/test_python.py
new file mode 100644
index 00000000..f5784cb1
--- /dev/null
+++ b/tests/test_python.py
@@ -0,0 +1,113 @@
+# -*- coding: utf-8 -*-
+"""
+ Python Tests
+ ~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import unittest
+
+from pygments.lexers import PythonLexer, Python3Lexer
+from pygments.token import Token
+
+
+class PythonTest(unittest.TestCase):
+ def setUp(self):
+ self.lexer = PythonLexer()
+
+ def test_cls_builtin(self):
+ """
+ Tests that a cls token gets interpreted as a Token.Name.Builtin.Pseudo
+
+ """
+ fragment = 'class TestClass():\n @classmethod\n def hello(cls):\n pass\n'
+ tokens = [
+ (Token.Keyword, 'class'),
+ (Token.Text, ' '),
+ (Token.Name.Class, 'TestClass'),
+ (Token.Punctuation, '('),
+ (Token.Punctuation, ')'),
+ (Token.Punctuation, ':'),
+ (Token.Text, '\n'),
+ (Token.Text, ' '),
+ (Token.Name.Decorator, '@classmethod'),
+ (Token.Text, '\n'),
+ (Token.Text, ' '),
+ (Token.Keyword, 'def'),
+ (Token.Text, ' '),
+ (Token.Name.Function, 'hello'),
+ (Token.Punctuation, '('),
+ (Token.Name.Builtin.Pseudo, 'cls'),
+ (Token.Punctuation, ')'),
+ (Token.Punctuation, ':'),
+ (Token.Text, '\n'),
+ (Token.Text, ' '),
+ (Token.Keyword, 'pass'),
+ (Token.Text, '\n'),
+ ]
+ self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+
+
+class Python3Test(unittest.TestCase):
+ def setUp(self):
+ self.lexer = Python3Lexer()
+
+ def testNeedsName(self):
+ """
+ Tests that '@' is recognized as an Operator
+ """
+ fragment = u'S = (H @ beta - r).T @ inv(H @ V @ H.T) @ (H @ beta - r)\n'
+ tokens = [
+ (Token.Name, u'S'),
+ (Token.Text, u' '),
+ (Token.Operator, u'='),
+ (Token.Text, u' '),
+ (Token.Punctuation, u'('),
+ (Token.Name, u'H'),
+ (Token.Text, u' '),
+ (Token.Operator, u'@'),
+ (Token.Text, u' '),
+ (Token.Name, u'beta'),
+ (Token.Text, u' '),
+ (Token.Operator, u'-'),
+ (Token.Text, u' '),
+ (Token.Name, u'r'),
+ (Token.Punctuation, u')'),
+ (Token.Operator, u'.'),
+ (Token.Name, u'T'),
+ (Token.Text, u' '),
+ (Token.Operator, u'@'),
+ (Token.Text, u' '),
+ (Token.Name, u'inv'),
+ (Token.Punctuation, u'('),
+ (Token.Name, u'H'),
+ (Token.Text, u' '),
+ (Token.Operator, u'@'),
+ (Token.Text, u' '),
+ (Token.Name, u'V'),
+ (Token.Text, u' '),
+ (Token.Operator, u'@'),
+ (Token.Text, u' '),
+ (Token.Name, u'H'),
+ (Token.Operator, u'.'),
+ (Token.Name, u'T'),
+ (Token.Punctuation, u')'),
+ (Token.Text, u' '),
+ (Token.Operator, u'@'),
+ (Token.Text, u' '),
+ (Token.Punctuation, u'('),
+ (Token.Name, u'H'),
+ (Token.Text, u' '),
+ (Token.Operator, u'@'),
+ (Token.Text, u' '),
+ (Token.Name, u'beta'),
+ (Token.Text, u' '),
+ (Token.Operator, u'-'),
+ (Token.Text, u' '),
+ (Token.Name, u'r'),
+ (Token.Punctuation, u')'),
+ (Token.Text, u'\n'),
+ ]
+ self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))