summaryrefslogtreecommitdiff
path: root/tests/test_ezhil.py
diff options
context:
space:
mode:
Diffstat (limited to 'tests/test_ezhil.py')
-rw-r--r--tests/test_ezhil.py318
1 files changed, 156 insertions, 162 deletions
diff --git a/tests/test_ezhil.py b/tests/test_ezhil.py
index 15cc13b1..8047a30a 100644
--- a/tests/test_ezhil.py
+++ b/tests/test_ezhil.py
@@ -7,177 +7,171 @@
:license: BSD, see LICENSE for details.
"""
-import unittest
+import pytest
from pygments.token import Operator, Number, Text, Token
from pygments.lexers import EzhilLexer
-class EzhilTest(unittest.TestCase):
+@pytest.fixture(scope='module')
+def lexer():
+ yield EzhilLexer()
- def setUp(self):
- self.lexer = EzhilLexer()
- self.maxDiff = None
-
- def testSum(self):
- fragment = u'1+3\n'
- tokens = [
- (Number.Integer, u'1'),
- (Operator, u'+'),
- (Number.Integer, u'3'),
- (Text, u'\n'),
- ]
- self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
- def testGCDExpr(self):
- fragment = u'1^3+(5-5)*gcd(a,b)\n'
- tokens = [
- (Token.Number.Integer,u'1'),
- (Token.Operator,u'^'),
- (Token.Literal.Number.Integer, u'3'),
- (Token.Operator, u'+'),
- (Token.Punctuation, u'('),
- (Token.Literal.Number.Integer, u'5'),
- (Token.Operator, u'-'),
- (Token.Literal.Number.Integer, u'5'),
- (Token.Punctuation, u')'),
- (Token.Operator, u'*'),
- (Token.Name, u'gcd'),
- (Token.Punctuation, u'('),
- (Token.Name, u'a'),
- (Token.Operator, u','),
- (Token.Name, u'b'),
- (Token.Punctuation, u')'),
- (Token.Text, u'\n')
- ]
- self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
- def testIfStatement(self):
- fragment = u"""@( 0 > 3 ) ஆனால்
- பதிப்பி "wont print"
+def test_sum(lexer):
+ fragment = u'1+3\n'
+ tokens = [
+ (Number.Integer, u'1'),
+ (Operator, u'+'),
+ (Number.Integer, u'3'),
+ (Text, u'\n'),
+ ]
+ assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_gcd_expr(lexer):
+ fragment = u'1^3+(5-5)*gcd(a,b)\n'
+ tokens = [
+ (Token.Number.Integer, u'1'),
+ (Token.Operator, u'^'),
+ (Token.Literal.Number.Integer, u'3'),
+ (Token.Operator, u'+'),
+ (Token.Punctuation, u'('),
+ (Token.Literal.Number.Integer, u'5'),
+ (Token.Operator, u'-'),
+ (Token.Literal.Number.Integer, u'5'),
+ (Token.Punctuation, u')'),
+ (Token.Operator, u'*'),
+ (Token.Name, u'gcd'),
+ (Token.Punctuation, u'('),
+ (Token.Name, u'a'),
+ (Token.Operator, u','),
+ (Token.Name, u'b'),
+ (Token.Punctuation, u')'),
+ (Token.Text, u'\n')
+ ]
+ assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_if_statement(lexer):
+ fragment = u"""@( 0 > 3 ) ஆனால்
+ பதிப்பி "wont print"
முடி"""
- tokens = [
- (Token.Operator, u'@'),
- (Token.Punctuation, u'('),
- (Token.Text, u' '),
- (Token.Literal.Number.Integer,u'0'),
- (Token.Text, u' '),
- (Token.Operator,u'>'),
- (Token.Text, u' '),
- (Token.Literal.Number.Integer, u'3'),
- (Token.Text, u' '),
- (Token.Punctuation, u')'),
- (Token.Text, u' '),
- (Token.Keyword, u'ஆனால்'),
- (Token.Text, u'\n'),
- (Token.Text, u'\t'),
- (Token.Keyword, u'பதிப்பி'),
- (Token.Text, u' '),
- (Token.Literal.String, u'"wont print"'),
- (Token.Text, u'\t'),
- (Token.Text, u'\n'),
- (Token.Keyword, u'முடி'),
- (Token.Text, u'\n')
- ]
- self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+ tokens = [
+ (Token.Operator, u'@'),
+ (Token.Punctuation, u'('),
+ (Token.Text, u' '),
+ (Token.Literal.Number.Integer, u'0'),
+ (Token.Text, u' '),
+ (Token.Operator, u'>'),
+ (Token.Text, u' '),
+ (Token.Literal.Number.Integer, u'3'),
+ (Token.Text, u' '),
+ (Token.Punctuation, u')'),
+ (Token.Text, u' '),
+ (Token.Keyword, u'ஆனால்'),
+ (Token.Text, u'\n'),
+ (Token.Text, u' '),
+ (Token.Keyword, u'பதிப்பி'),
+ (Token.Text, u' '),
+ (Token.Literal.String, u'"wont print"'),
+ (Token.Text, u'\n'),
+ (Token.Keyword, u'முடி'),
+ (Token.Text, u'\n')
+ ]
+ assert list(lexer.get_tokens(fragment)) == tokens
+
- def testFunction(self):
- fragment = u"""# (C) முத்தையா அண்ணாமலை 2013, 2015
+def test_function(lexer):
+ fragment = u"""# (C) முத்தையா அண்ணாமலை 2013, 2015
நிரல்பாகம் gcd ( x, y )
- மு = max(x,y)
- q = min(x,y)
+மு = max(x,y)
+ q = min(x,y)
- @( q == 0 ) ஆனால்
- பின்கொடு மு
- முடி
- பின்கொடு gcd( மு - q , q )
+@( q == 0 ) ஆனால்
+ பின்கொடு மு
+முடி
+பின்கொடு gcd( மு - q , q )
முடி\n"""
- tokens = [
- (Token.Comment.Single,
- u'# (C) \u0bae\u0bc1\u0ba4\u0bcd\u0ba4\u0bc8\u0baf\u0bbe \u0b85'
- u'\u0ba3\u0bcd\u0ba3\u0bbe\u0bae\u0bb2\u0bc8 2013, 2015\n'),
- (Token.Keyword,u'நிரல்பாகம்'),
- (Token.Text, u' '),
- (Token.Name, u'gcd'),
- (Token.Text, u' '),
- (Token.Punctuation, u'('),
- (Token.Text, u' '),
- (Token.Name, u'x'),
- (Token.Operator, u','),
- (Token.Text, u' '),
- (Token.Name, u'y'),
- (Token.Text, u' '),
- (Token.Punctuation, u')'),
- (Token.Text, u'\n'),
- (Token.Text, u' '),
- (Token.Name, u'\u0bae\u0bc1'),
- (Token.Text, u' '),
- (Token.Operator, u'='),
- (Token.Text, u' '),
- (Token.Name.Builtin, u'max'),
- (Token.Punctuation, u'('),
- (Token.Name, u'x'),
- (Token.Operator, u','),
- (Token.Name, u'y'),
- (Token.Punctuation, u')'),
- (Token.Text, u'\n'),
- (Token.Text, u' '),
- (Token.Name, u'q'),
- (Token.Text, u' '),
- (Token.Operator, u'='),
- (Token.Text, u' '),
- (Token.Name.Builtin, u'min'),
- (Token.Punctuation, u'('),
- (Token.Name, u'x'),
- (Token.Operator, u','),
- (Token.Name, u'y'),
- (Token.Punctuation, u')'),
- (Token.Text, u'\n'),
- (Token.Text, u'\n'),
- (Token.Text, u' '),
- (Token.Operator, u'@'),
- (Token.Punctuation, u'('),
- (Token.Text, u' '),
- (Token.Name, u'q'),
- (Token.Text, u' '),
- (Token.Operator, u'=='),
- (Token.Text, u' '),
- (Token.Literal.Number.Integer, u'0'),
- (Token.Text, u' '),
- (Token.Punctuation, u')'),
- (Token.Text, u' '),
- (Token.Keyword, u'ஆனால்'),
- (Token.Text, u'\n'),
- (Token.Text, u' '),
- (Token.Keyword, u'பின்கொடு'),
- (Token.Text, u' '),
- (Token.Name, u'\u0bae\u0bc1'),
- (Token.Text, u'\n'),
- (Token.Text, u' '),
- (Token.Keyword, u'முடி'),
- (Token.Text, u'\n'),
- (Token.Text, u' '),
- (Token.Keyword, u'\u0baa\u0bbf\u0ba9\u0bcd\u0b95\u0bca\u0b9f\u0bc1'),
- (Token.Text, u' '),
- (Token.Name, u'gcd'),
- (Token.Punctuation, u'('),
- (Token.Text, u' '),
- (Token.Name, u'\u0bae\u0bc1'),
- (Token.Text, u' '),
- (Token.Operator, u'-'),
- (Token.Text, u' '),
- (Token.Name, u'q'),
- (Token.Text, u' '),
- (Token.Operator, u','),
- (Token.Text, u' '),
- (Token.Name, u'q'),
- (Token.Text, u' '),
- (Token.Punctuation, u')'),
- (Token.Text, u'\n'),
- (Token.Keyword, u'முடி'), #u'\u0bae\u0bc1\u0b9f\u0bbf'),
- (Token.Text, u'\n')
- ]
- self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-if __name__ == "__main__":
- unittest.main()
+ tokens = [
+ (Token.Comment.Single,
+ u'# (C) \u0bae\u0bc1\u0ba4\u0bcd\u0ba4\u0bc8\u0baf\u0bbe \u0b85'
+ u'\u0ba3\u0bcd\u0ba3\u0bbe\u0bae\u0bb2\u0bc8 2013, 2015\n'),
+ (Token.Keyword, u'நிரல்பாகம்'),
+ (Token.Text, u' '),
+ (Token.Name, u'gcd'),
+ (Token.Text, u' '),
+ (Token.Punctuation, u'('),
+ (Token.Text, u' '),
+ (Token.Name, u'x'),
+ (Token.Operator, u','),
+ (Token.Text, u' '),
+ (Token.Name, u'y'),
+ (Token.Text, u' '),
+ (Token.Punctuation, u')'),
+ (Token.Text, u'\n'),
+ (Token.Name, u'\u0bae\u0bc1'),
+ (Token.Text, u' '),
+ (Token.Operator, u'='),
+ (Token.Text, u' '),
+ (Token.Name.Builtin, u'max'),
+ (Token.Punctuation, u'('),
+ (Token.Name, u'x'),
+ (Token.Operator, u','),
+ (Token.Name, u'y'),
+ (Token.Punctuation, u')'),
+ (Token.Text, u'\n'),
+ (Token.Text, u' '),
+ (Token.Name, u'q'),
+ (Token.Text, u' '),
+ (Token.Operator, u'='),
+ (Token.Text, u' '),
+ (Token.Name.Builtin, u'min'),
+ (Token.Punctuation, u'('),
+ (Token.Name, u'x'),
+ (Token.Operator, u','),
+ (Token.Name, u'y'),
+ (Token.Punctuation, u')'),
+ (Token.Text, u'\n'),
+ (Token.Text, u'\n'),
+ (Token.Operator, u'@'),
+ (Token.Punctuation, u'('),
+ (Token.Text, u' '),
+ (Token.Name, u'q'),
+ (Token.Text, u' '),
+ (Token.Operator, u'=='),
+ (Token.Text, u' '),
+ (Token.Literal.Number.Integer, u'0'),
+ (Token.Text, u' '),
+ (Token.Punctuation, u')'),
+ (Token.Text, u' '),
+ (Token.Keyword, u'ஆனால்'),
+ (Token.Text, u'\n'),
+ (Token.Text, u' '),
+ (Token.Keyword, u'பின்கொடு'),
+ (Token.Text, u' '),
+ (Token.Name, u'\u0bae\u0bc1'),
+ (Token.Text, u'\n'),
+ (Token.Keyword, u'முடி'),
+ (Token.Text, u'\n'),
+ (Token.Keyword, u'\u0baa\u0bbf\u0ba9\u0bcd\u0b95\u0bca\u0b9f\u0bc1'),
+ (Token.Text, u' '),
+ (Token.Name, u'gcd'),
+ (Token.Punctuation, u'('),
+ (Token.Text, u' '),
+ (Token.Name, u'\u0bae\u0bc1'),
+ (Token.Text, u' '),
+ (Token.Operator, u'-'),
+ (Token.Text, u' '),
+ (Token.Name, u'q'),
+ (Token.Text, u' '),
+ (Token.Operator, u','),
+ (Token.Text, u' '),
+ (Token.Name, u'q'),
+ (Token.Text, u' '),
+ (Token.Punctuation, u')'),
+ (Token.Text, u'\n'),
+ (Token.Keyword, u'முடி'), # u'\u0bae\u0bc1\u0b9f\u0bbf'),
+ (Token.Text, u'\n')
+ ]
+ assert list(lexer.get_tokens(fragment)) == tokens