diff options
author | Georg Brandl <georg@python.org> | 2019-05-06 18:02:47 +0200 |
---|---|---|
committer | Georg Brandl <georg@python.org> | 2019-11-10 10:15:13 +0100 |
commit | 7827966acdb6431636520d20fc3c148ce52de59b (patch) | |
tree | 13a9316eb3eb964c22da0a08f046d44cd81470d0 /tests/test_perllexer.py | |
parent | a281ff8367a3a5f4cc17c9956e9273593558d336 (diff) | |
download | pygments-git-7827966acdb6431636520d20fc3c148ce52de59b.tar.gz |
Remove unittest classes from the test suite.
Diffstat (limited to 'tests/test_perllexer.py')
-rw-r--r-- | tests/test_perllexer.py | 250 |
1 files changed, 140 insertions, 110 deletions
diff --git a/tests/test_perllexer.py b/tests/test_perllexer.py index 8e5af3e5..8849bacf 100644 --- a/tests/test_perllexer.py +++ b/tests/test_perllexer.py @@ -8,145 +8,175 @@ """ import time -import unittest + +import pytest from pygments.token import Keyword, Name, String, Text from pygments.lexers.perl import PerlLexer -class RunawayRegexTest(unittest.TestCase): - # A previous version of the Perl lexer would spend a great deal of - # time backtracking when given particular strings. These tests show that - # the runaway backtracking doesn't happen any more (at least for the given - # cases). +@pytest.fixture(scope='module') +def lexer(): + yield PerlLexer() + + +# Test runaway regexes. +# A previous version of the Perl lexer would spend a great deal of +# time backtracking when given particular strings. These tests show that +# the runaway backtracking doesn't happen any more (at least for the given +# cases). + + +# Test helpers. + +def assert_single_token(lexer, s, token): + """Show that a given string generates only one token.""" + tokens = list(lexer.get_tokens_unprocessed(s)) + assert len(tokens) == 1 + assert s == tokens[0][2] + assert token == tokens[0][1] + + +def assert_tokens(lexer, strings, expected_tokens): + """Show that a given string generates the expected tokens.""" + tokens = list(lexer.get_tokens_unprocessed(''.join(strings))) + assert len(tokens) == len(expected_tokens) + for index, s in enumerate(strings): + assert s == tokens[index][2] + assert expected_tokens[index] == tokens[index][1] + + +def assert_fast_tokenization(lexer, s): + """Show that a given string is tokenized quickly.""" + start = time.time() + tokens = list(lexer.get_tokens_unprocessed(s)) + end = time.time() + # Isn't 10 seconds kind of a long time? Yes, but we don't want false + # positives when the tests are starved for CPU time. + if end-start > 10: + pytest.fail('tokenization took too long') + return tokens + + +# Strings. + +def test_single_quote_strings(lexer): + assert_single_token(lexer, r"'foo\tbar\\\'baz'", String) + assert_fast_tokenization(lexer, "'" + '\\'*999) + + +def test_double_quote_strings(lexer): + assert_single_token(lexer, r'"foo\tbar\\\"baz"', String) + assert_fast_tokenization(lexer, '"' + '\\'*999) + + +def test_backtick_strings(lexer): + assert_single_token(lexer, r'`foo\tbar\\\`baz`', String.Backtick) + assert_fast_tokenization(lexer, '`' + '\\'*999) + + +# Regex matches with various delimiters. + +def test_match(lexer): + assert_single_token(lexer, r'/aa\tbb/', String.Regex) + assert_fast_tokenization(lexer, '/' + '\\'*999) + + +def test_match_with_slash(lexer): + assert_tokens(lexer, ['m', '/\n\\t\\\\/'], [String.Regex, String.Regex]) + assert_fast_tokenization(lexer, 'm/xxx\n' + '\\'*999) + + +def test_match_with_bang(lexer): + assert_tokens(lexer, ['m', r'!aa\t\!bb!'], [String.Regex, String.Regex]) + assert_fast_tokenization(lexer, 'm!' + '\\'*999) + + +def test_match_with_brace(lexer): + assert_tokens(lexer, ['m', r'{aa\t\}bb}'], [String.Regex, String.Regex]) + assert_fast_tokenization(lexer, 'm{' + '\\'*999) + + +def test_match_with_angle_brackets(lexer): + assert_tokens(lexer, ['m', r'<aa\t\>bb>'], [String.Regex, String.Regex]) + assert_fast_tokenization(lexer, 'm<' + '\\'*999) + + +def test_match_with_parenthesis(lexer): + assert_tokens(lexer, ['m', r'(aa\t\)bb)'], [String.Regex, String.Regex]) + assert_fast_tokenization(lexer, 'm(' + '\\'*999) + + +def test_match_with_at_sign(lexer): + assert_tokens(lexer, ['m', r'@aa\t\@bb@'], [String.Regex, String.Regex]) + assert_fast_tokenization(lexer, 'm@' + '\\'*999) + - lexer = PerlLexer() +def test_match_with_percent_sign(lexer): + assert_tokens(lexer, ['m', r'%aa\t\%bb%'], [String.Regex, String.Regex]) + assert_fast_tokenization(lexer, 'm%' + '\\'*999) - ### Test helpers. - def assert_single_token(self, s, token): - """Show that a given string generates only one token.""" - tokens = list(self.lexer.get_tokens_unprocessed(s)) - self.assertEqual(len(tokens), 1, tokens) - self.assertEqual(s, tokens[0][2]) - self.assertEqual(token, tokens[0][1]) +def test_match_with_dollar_sign(lexer): + assert_tokens(lexer, ['m', r'$aa\t\$bb$'], [String.Regex, String.Regex]) + assert_fast_tokenization(lexer, 'm$' + '\\'*999) - def assert_tokens(self, strings, expected_tokens): - """Show that a given string generates the expected tokens.""" - tokens = list(self.lexer.get_tokens_unprocessed(''.join(strings))) - self.assertEqual(len(tokens), len(expected_tokens), tokens) - for index, s in enumerate(strings): - self.assertEqual(s, tokens[index][2]) - self.assertEqual(expected_tokens[index], tokens[index][1]) - def assert_fast_tokenization(self, s): - """Show that a given string is tokenized quickly.""" - start = time.time() - tokens = list(self.lexer.get_tokens_unprocessed(s)) - end = time.time() - # Isn't 10 seconds kind of a long time? Yes, but we don't want false - # positives when the tests are starved for CPU time. - if end-start > 10: - self.fail('tokenization took too long') - return tokens +# Regex substitutions with various delimeters. - ### Strings. +def test_substitution_with_slash(lexer): + assert_single_token(lexer, 's/aaa/bbb/g', String.Regex) + assert_fast_tokenization(lexer, 's/foo/' + '\\'*999) - def test_single_quote_strings(self): - self.assert_single_token(r"'foo\tbar\\\'baz'", String) - self.assert_fast_tokenization("'" + '\\'*999) - def test_double_quote_strings(self): - self.assert_single_token(r'"foo\tbar\\\"baz"', String) - self.assert_fast_tokenization('"' + '\\'*999) +def test_substitution_with_at_sign(lexer): + assert_single_token(lexer, r's@aaa@bbb@g', String.Regex) + assert_fast_tokenization(lexer, 's@foo@' + '\\'*999) - def test_backtick_strings(self): - self.assert_single_token(r'`foo\tbar\\\`baz`', String.Backtick) - self.assert_fast_tokenization('`' + '\\'*999) - ### Regex matches with various delimiters. +def test_substitution_with_percent_sign(lexer): + assert_single_token(lexer, r's%aaa%bbb%g', String.Regex) + assert_fast_tokenization(lexer, 's%foo%' + '\\'*999) - def test_match(self): - self.assert_single_token(r'/aa\tbb/', String.Regex) - self.assert_fast_tokenization('/' + '\\'*999) - def test_match_with_slash(self): - self.assert_tokens(['m', '/\n\\t\\\\/'], [String.Regex, String.Regex]) - self.assert_fast_tokenization('m/xxx\n' + '\\'*999) +def test_substitution_with_brace(lexer): + assert_single_token(lexer, r's{aaa}', String.Regex) + assert_fast_tokenization(lexer, 's{' + '\\'*999) - def test_match_with_bang(self): - self.assert_tokens(['m', r'!aa\t\!bb!'], [String.Regex, String.Regex]) - self.assert_fast_tokenization('m!' + '\\'*999) - def test_match_with_brace(self): - self.assert_tokens(['m', r'{aa\t\}bb}'], [String.Regex, String.Regex]) - self.assert_fast_tokenization('m{' + '\\'*999) +def test_substitution_with_angle_bracket(lexer): + assert_single_token(lexer, r's<aaa>', String.Regex) + assert_fast_tokenization(lexer, 's<' + '\\'*999) - def test_match_with_angle_brackets(self): - self.assert_tokens(['m', r'<aa\t\>bb>'], [String.Regex, String.Regex]) - self.assert_fast_tokenization('m<' + '\\'*999) - def test_match_with_parenthesis(self): - self.assert_tokens(['m', r'(aa\t\)bb)'], [String.Regex, String.Regex]) - self.assert_fast_tokenization('m(' + '\\'*999) +def test_substitution_with_square_bracket(lexer): + assert_single_token(lexer, r's[aaa]', String.Regex) + assert_fast_tokenization(lexer, 's[' + '\\'*999) - def test_match_with_at_sign(self): - self.assert_tokens(['m', r'@aa\t\@bb@'], [String.Regex, String.Regex]) - self.assert_fast_tokenization('m@' + '\\'*999) - def test_match_with_percent_sign(self): - self.assert_tokens(['m', r'%aa\t\%bb%'], [String.Regex, String.Regex]) - self.assert_fast_tokenization('m%' + '\\'*999) +def test_substitution_with_parenthesis(lexer): + assert_single_token(lexer, r's(aaa)', String.Regex) + assert_fast_tokenization(lexer, 's(' + '\\'*999) - def test_match_with_dollar_sign(self): - self.assert_tokens(['m', r'$aa\t\$bb$'], [String.Regex, String.Regex]) - self.assert_fast_tokenization('m$' + '\\'*999) - ### Regex substitutions with various delimeters. +# Namespaces/modules - def test_substitution_with_slash(self): - self.assert_single_token('s/aaa/bbb/g', String.Regex) - self.assert_fast_tokenization('s/foo/' + '\\'*999) +def test_package_statement(lexer): + assert_tokens(lexer, ['package', ' ', 'Foo'], [Keyword, Text, Name.Namespace]) + assert_tokens(lexer, ['package', ' ', 'Foo::Bar'], [Keyword, Text, Name.Namespace]) - def test_substitution_with_at_sign(self): - self.assert_single_token(r's@aaa@bbb@g', String.Regex) - self.assert_fast_tokenization('s@foo@' + '\\'*999) - def test_substitution_with_percent_sign(self): - self.assert_single_token(r's%aaa%bbb%g', String.Regex) - self.assert_fast_tokenization('s%foo%' + '\\'*999) - - def test_substitution_with_brace(self): - self.assert_single_token(r's{aaa}', String.Regex) - self.assert_fast_tokenization('s{' + '\\'*999) +def test_use_statement(lexer): + assert_tokens(lexer, ['use', ' ', 'Foo'], [Keyword, Text, Name.Namespace]) + assert_tokens(lexer, ['use', ' ', 'Foo::Bar'], [Keyword, Text, Name.Namespace]) - def test_substitution_with_angle_bracket(self): - self.assert_single_token(r's<aaa>', String.Regex) - self.assert_fast_tokenization('s<' + '\\'*999) - - def test_substitution_with_square_bracket(self): - self.assert_single_token(r's[aaa]', String.Regex) - self.assert_fast_tokenization('s[' + '\\'*999) - - def test_substitution_with_parenthesis(self): - self.assert_single_token(r's(aaa)', String.Regex) - self.assert_fast_tokenization('s(' + '\\'*999) - - ### Namespaces/modules - - def test_package_statement(self): - self.assert_tokens(['package', ' ', 'Foo'], [Keyword, Text, Name.Namespace]) - self.assert_tokens(['package', ' ', 'Foo::Bar'], [Keyword, Text, Name.Namespace]) - def test_use_statement(self): - self.assert_tokens(['use', ' ', 'Foo'], [Keyword, Text, Name.Namespace]) - self.assert_tokens(['use', ' ', 'Foo::Bar'], [Keyword, Text, Name.Namespace]) +def test_no_statement(lexer): + assert_tokens(lexer, ['no', ' ', 'Foo'], [Keyword, Text, Name.Namespace]) + assert_tokens(lexer, ['no', ' ', 'Foo::Bar'], [Keyword, Text, Name.Namespace]) - def test_no_statement(self): - self.assert_tokens(['no', ' ', 'Foo'], [Keyword, Text, Name.Namespace]) - self.assert_tokens(['no', ' ', 'Foo::Bar'], [Keyword, Text, Name.Namespace]) - def test_require_statement(self): - self.assert_tokens(['require', ' ', 'Foo'], [Keyword, Text, Name.Namespace]) - self.assert_tokens(['require', ' ', 'Foo::Bar'], [Keyword, Text, Name.Namespace]) - self.assert_tokens(['require', ' ', '"Foo/Bar.pm"'], [Keyword, Text, String]) +def test_require_statement(lexer): + assert_tokens(lexer, ['require', ' ', 'Foo'], [Keyword, Text, Name.Namespace]) + assert_tokens(lexer, ['require', ' ', 'Foo::Bar'], [Keyword, Text, Name.Namespace]) + assert_tokens(lexer, ['require', ' ', '"Foo/Bar.pm"'], [Keyword, Text, String]) |