summaryrefslogtreecommitdiff
path: root/tests/test_javascript.py
diff options
context:
space:
mode:
Diffstat (limited to 'tests/test_javascript.py')
-rw-r--r--tests/test_javascript.py73
1 files changed, 36 insertions, 37 deletions
diff --git a/tests/test_javascript.py b/tests/test_javascript.py
index 040d776c..25e06fdc 100644
--- a/tests/test_javascript.py
+++ b/tests/test_javascript.py
@@ -3,12 +3,10 @@
Javascript tests
~~~~~~~~~~~~~~~~
- :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
+ :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-import unittest
-
import pytest
from pygments.lexers import CoffeeScriptLexer
@@ -39,11 +37,15 @@ COFFEE_SLASH_GOLDEN = [
]
+@pytest.fixture(scope='module')
+def lexer():
+ yield CoffeeScriptLexer()
+
+
@pytest.mark.parametrize('golden', COFFEE_SLASH_GOLDEN)
-def test_coffee_slashes(golden):
+def test_coffee_slashes(lexer, golden):
input_str, slashes_are_regex_here = golden
- lex = CoffeeScriptLexer()
- output = list(lex.get_tokens(input_str))
+ output = list(lexer.get_tokens(input_str))
print(output)
for t, s in output:
if '/' in s:
@@ -51,36 +53,33 @@ def test_coffee_slashes(golden):
assert is_regex == slashes_are_regex_here, (t, s)
-class CoffeeTest(unittest.TestCase):
- def setUp(self):
- self.lexer = CoffeeScriptLexer()
+def test_mixed_slashes(lexer):
+ fragment = u'a?/foo/:1/2;\n'
+ tokens = [
+ (Token.Name.Other, u'a'),
+ (Token.Operator, u'?'),
+ (Token.Literal.String.Regex, u'/foo/'),
+ (Token.Operator, u':'),
+ (Token.Literal.Number.Integer, u'1'),
+ (Token.Operator, u'/'),
+ (Token.Literal.Number.Integer, u'2'),
+ (Token.Punctuation, u';'),
+ (Token.Text, u'\n'),
+ ]
+ assert list(lexer.get_tokens(fragment)) == tokens
- def testMixedSlashes(self):
- fragment = u'a?/foo/:1/2;\n'
- tokens = [
- (Token.Name.Other, u'a'),
- (Token.Operator, u'?'),
- (Token.Literal.String.Regex, u'/foo/'),
- (Token.Operator, u':'),
- (Token.Literal.Number.Integer, u'1'),
- (Token.Operator, u'/'),
- (Token.Literal.Number.Integer, u'2'),
- (Token.Punctuation, u';'),
- (Token.Text, u'\n'),
- ]
- self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
- def testBewareInfiniteLoop(self):
- # This demonstrates the case that "This isn't really guarding" comment
- # refers to.
- fragment = '/a/x;\n'
- tokens = [
- (Token.Text, ''),
- (Token.Operator, '/'),
- (Token.Name.Other, 'a'),
- (Token.Operator, '/'),
- (Token.Name.Other, 'x'),
- (Token.Punctuation, ';'),
- (Token.Text, '\n'),
- ]
- self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+def test_beware_infinite_loop(lexer):
+ # This demonstrates the case that "This isn't really guarding" comment
+ # refers to.
+ fragment = '/a/x;\n'
+ tokens = [
+ (Token.Text, ''),
+ (Token.Operator, '/'),
+ (Token.Name.Other, 'a'),
+ (Token.Operator, '/'),
+ (Token.Name.Other, 'x'),
+ (Token.Punctuation, ';'),
+ (Token.Text, '\n'),
+ ]
+ assert list(lexer.get_tokens(fragment)) == tokens