diff options
author | Georg Brandl <georg@python.org> | 2021-01-18 21:38:53 +0100 |
---|---|---|
committer | Georg Brandl <georg@python.org> | 2021-01-20 10:52:51 +0100 |
commit | dc9bf0c256dbd88c72349822d59b25f9d8225dc6 (patch) | |
tree | 5ead8cd9fd013a4ad132d258b6ba8c7f5a5cc1c7 | |
parent | 91474bd6788c6c3124edba1a09fa20860200c92a (diff) | |
download | pygments-git-dc9bf0c256dbd88c72349822d59b25f9d8225dc6.tar.gz |
tests: code style fixups
-rw-r--r-- | pygments/lexers/_mysql_builtins.py | 4 | ||||
-rw-r--r-- | tests/conftest.py | 2 | ||||
-rw-r--r-- | tests/lexers/conftest.py | 2 | ||||
-rw-r--r-- | tests/test_crystal.py | 3 | ||||
-rw-r--r-- | tests/test_data.py | 113 | ||||
-rw-r--r-- | tests/test_guess.py | 2 | ||||
-rw-r--r-- | tests/test_html_formatter.py | 1 | ||||
-rw-r--r-- | tests/test_html_lexer.py | 45 | ||||
-rw-r--r-- | tests/test_java.py | 2 | ||||
-rw-r--r-- | tests/test_javascript.py | 11 | ||||
-rw-r--r-- | tests/test_markdown_lexer.py | 2 | ||||
-rw-r--r-- | tests/test_mysql.py | 18 | ||||
-rw-r--r-- | tests/test_sql.py | 10 | ||||
-rw-r--r-- | tests/test_tnt.py | 35 | ||||
-rw-r--r-- | tests/test_util.py | 2 |
15 files changed, 131 insertions, 121 deletions
diff --git a/pygments/lexers/_mysql_builtins.py b/pygments/lexers/_mysql_builtins.py index b6f2e2aa..dfc82bcc 100644 --- a/pygments/lexers/_mysql_builtins.py +++ b/pygments/lexers/_mysql_builtins.py @@ -1,10 +1,10 @@ """ pygments.lexers._mysql_builtins - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Self-updating data files for the MySQL lexer. - :copyright: Copyright 2020 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ diff --git a/tests/conftest.py b/tests/conftest.py index 7ff5ff18..e6a9b01a 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -12,7 +12,7 @@ The directory must match the alias of the lexer to be used. Populate only the input, then just `--update-goldens`. - :copyright: Copyright 2021 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pathlib import Path diff --git a/tests/lexers/conftest.py b/tests/lexers/conftest.py index 26d484ee..b4766421 100644 --- a/tests/lexers/conftest.py +++ b/tests/lexers/conftest.py @@ -12,7 +12,7 @@ The directory must match the alias of the lexer to be used. Populate only the input, then just `--update-goldens`. - :copyright: Copyright 2021 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ diff --git a/tests/test_crystal.py b/tests/test_crystal.py index c97103a5..61e020f1 100644 --- a/tests/test_crystal.py +++ b/tests/test_crystal.py @@ -8,8 +8,7 @@ import pytest -from pygments.token import Text, Operator, Keyword, Name, String, Number, \ - Punctuation, Error +from pygments.token import Text, String, Number, Punctuation, Error from pygments.lexers import CrystalLexer diff --git a/tests/test_data.py b/tests/test_data.py index 300ad0fe..ff209bbc 100644 --- a/tests/test_data.py +++ b/tests/test_data.py @@ -2,7 +2,7 @@ Data Tests ~~~~~~~~~~ - :copyright: Copyright 2006-2016 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ @@ -10,7 +10,7 @@ import time import pytest -from pygments.lexers.data import JsonLexer, JsonBareObjectLexer, JsonLdLexer, YamlLexer +from pygments.lexers.data import JsonLexer, JsonBareObjectLexer, JsonLdLexer from pygments.token import Token, Punctuation, Text, Number, String, Keyword, Name @@ -32,54 +32,56 @@ def lexer_json_ld(): @pytest.mark.parametrize( 'text, expected_token_types', ( - # Integers - ('0', (Number.Integer,)), - ('-1', (Number.Integer,)), - ('1234567890', (Number.Integer,)), - ('-1234567890', (Number.Integer,)), - - # Floats, including scientific notation - ('123456789.0123456789', (Number.Float,)), - ('-123456789.0123456789', (Number.Float,)), - ('1e10', (Number.Float,)), - ('-1E10', (Number.Float,)), - ('1e-10', (Number.Float,)), - ('-1E+10', (Number.Float,)), - ('1.0e10', (Number.Float,)), - ('-1.0E10', (Number.Float,)), - ('1.0e-10', (Number.Float,)), - ('-1.0E+10', (Number.Float,)), - - # Strings (escapes are tested elsewhere) - ('""', (String.Double,)), - ('"abc"', (String.Double,)), - ('"ひらがな"', (String.Double,)), - ('"123"', (String.Double,)), - ('"[]"', (String.Double,)), - ('"{}"', (String.Double,)), - ('"true"', (String.Double,)), - ('"false"', (String.Double,)), - ('"null"', (String.Double,)), - ('":,"', (String.Double,)), - - # Constants - ('true', (Keyword.Constant, )), - ('false', (Keyword.Constant, )), - ('null', (Keyword.Constant, )), - - # Whitespace - ('\u0020', (Text,)), # space - ('\u000a', (Text,)), # newline - ('\u000d', (Text,)), # carriage return - ('\u0009', (Text,)), # tab - - # Arrays - ('[]', (Punctuation,)), - ('["a", "b"]', (Punctuation, String.Double, Punctuation, Text, String.Double, Punctuation)), - - # Objects - ('{}', (Punctuation,)), - ('{"a": "b"}', (Punctuation, Name.Tag, Punctuation, Text, String.Double, Punctuation)), + # Integers + ('0', (Number.Integer,)), + ('-1', (Number.Integer,)), + ('1234567890', (Number.Integer,)), + ('-1234567890', (Number.Integer,)), + + # Floats, including scientific notation + ('123456789.0123456789', (Number.Float,)), + ('-123456789.0123456789', (Number.Float,)), + ('1e10', (Number.Float,)), + ('-1E10', (Number.Float,)), + ('1e-10', (Number.Float,)), + ('-1E+10', (Number.Float,)), + ('1.0e10', (Number.Float,)), + ('-1.0E10', (Number.Float,)), + ('1.0e-10', (Number.Float,)), + ('-1.0E+10', (Number.Float,)), + + # Strings (escapes are tested elsewhere) + ('""', (String.Double,)), + ('"abc"', (String.Double,)), + ('"ひらがな"', (String.Double,)), + ('"123"', (String.Double,)), + ('"[]"', (String.Double,)), + ('"{}"', (String.Double,)), + ('"true"', (String.Double,)), + ('"false"', (String.Double,)), + ('"null"', (String.Double,)), + ('":,"', (String.Double,)), + + # Constants + ('true', (Keyword.Constant, )), + ('false', (Keyword.Constant, )), + ('null', (Keyword.Constant, )), + + # Whitespace + ('\u0020', (Text,)), # space + ('\u000a', (Text,)), # newline + ('\u000d', (Text,)), # carriage return + ('\u0009', (Text,)), # tab + + # Arrays + ('[]', (Punctuation,)), + ('["a", "b"]', (Punctuation, String.Double, Punctuation, + Text, String.Double, Punctuation)), + + # Objects + ('{}', (Punctuation,)), + ('{"a": "b"}', (Punctuation, Name.Tag, Punctuation, + Text, String.Double, Punctuation)), ) ) def test_json_literals_positive_match(lexer_json, text, expected_token_types): @@ -87,15 +89,16 @@ def test_json_literals_positive_match(lexer_json, text, expected_token_types): tokens = list(lexer_json.get_tokens_unprocessed(text)) assert len(tokens) == len(expected_token_types) - assert all(token[1] is expected_token for token, expected_token in zip(tokens, expected_token_types)) + assert all(token[1] is expected_token + for token, expected_token in zip(tokens, expected_token_types)) assert ''.join(token[2] for token in tokens) == text @pytest.mark.parametrize( 'text', ( - '"', '\\', '/', 'b', 'f', 'n', 'r', 't', - 'u0123', 'u4567', 'u89ab', 'ucdef', 'uABCD', 'uEF01', + '"', '\\', '/', 'b', 'f', 'n', 'r', 't', + 'u0123', 'u4567', 'u89ab', 'ucdef', 'uABCD', 'uEF01', ) ) def test_json_object_key_escapes_positive_match(lexer_json, text): @@ -110,8 +113,8 @@ def test_json_object_key_escapes_positive_match(lexer_json, text): @pytest.mark.parametrize( 'text', ( - '"', '\\', '/', 'b', 'f', 'n', 'r', 't', - 'u0123', 'u4567', 'u89ab', 'ucdef', 'uABCD', 'uEF01', + '"', '\\', '/', 'b', 'f', 'n', 'r', 't', + 'u0123', 'u4567', 'u89ab', 'ucdef', 'uABCD', 'uEF01', ) ) def test_json_string_escapes_positive_match(lexer_json, text): diff --git a/tests/test_guess.py b/tests/test_guess.py index 5b43aeba..16109a4a 100644 --- a/tests/test_guess.py +++ b/tests/test_guess.py @@ -37,7 +37,7 @@ def test_guess_lexer_singularity(): @pytest.mark.skip(reason="This is identified as MIME") def test_guess_lexer_matlab(): - lx = lexers.guess_lexer(r'A \ B') + lx = guess_lexer(r'A \ B') assert lx.__class__.__name__ == 'OctaveLexer' diff --git a/tests/test_html_formatter.py b/tests/test_html_formatter.py index 3ed53e53..a4be6126 100644 --- a/tests/test_html_formatter.py +++ b/tests/test_html_formatter.py @@ -6,7 +6,6 @@ :license: BSD, see LICENSE for details. """ -import io import os import re import tempfile diff --git a/tests/test_html_lexer.py b/tests/test_html_lexer.py index cdab6310..26f1836f 100644 --- a/tests/test_html_lexer.py +++ b/tests/test_html_lexer.py @@ -2,7 +2,7 @@ HTML Lexer Tests ~~~~~~~~~~~~~~~~ - :copyright: Copyright 2020-2020 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ @@ -13,21 +13,14 @@ import pytest from pygments.lexers.html import HtmlLexer from pygments.token import Token + @pytest.fixture(scope='module') def lexer_html(): yield HtmlLexer() -def test_simple_html(lexer_html): - """ extremely basic happy-path case - - more tests are in test_examplefiles """ - - fragment = "<html>\n\t<body>\n\t\thello world\n\t</body>\n</html>" - tokens = list(lexer_html.get_tokens(fragment)) - assert all(x[1] != Token.Error for x in tokens) def test_happy_javascript_fragment(lexer_html): - """ valid, even long Javascript fragments should still get parsed ok """ + """valid, even long Javascript fragments should still get parsed ok""" fragment = "<script type=\"text/javascript\">"+"alert(\"hi\");"*2000+"</script>" start_time = time.time() @@ -35,8 +28,9 @@ def test_happy_javascript_fragment(lexer_html): assert all(x[1] != Token.Error for x in tokens) assert time.time() - start_time < 5, 'The HTML lexer might have an expensive happy-path script case' + def test_happy_css_fragment(lexer_html): - """ valid, even long CSS fragments should still get parsed ok """ + """valid, even long CSS fragments should still get parsed ok""" fragment = "<style>"+".ui-helper-hidden{display:none}"*2000+"</style>" start_time = time.time() @@ -44,8 +38,9 @@ def test_happy_css_fragment(lexer_html): assert all(x[1] != Token.Error for x in tokens) assert time.time() - start_time < 5, 'The HTML lexer might have an expensive happy-path style case' + def test_long_unclosed_javascript_fragment(lexer_html): - """ unclosed, long Javascript fragments should parse quickly """ + """unclosed, long Javascript fragments should parse quickly""" reps = 2000 fragment = "<script type=\"text/javascript\">"+"alert(\"hi\");"*reps @@ -77,8 +72,9 @@ def test_long_unclosed_javascript_fragment(lexer_html): # and of course, the newline we get for free from get_tokens assert tokens[-1] == (Token.Text, "\n") + def test_long_unclosed_css_fragment(lexer_html): - """ unclosed, long CSS fragments should parse quickly """ + """unclosed, long CSS fragments should parse quickly""" reps = 2000 fragment = "<style>"+".ui-helper-hidden{display:none}"*reps @@ -109,20 +105,21 @@ def test_long_unclosed_css_fragment(lexer_html): # and of course, the newline we get for free from get_tokens assert tokens[-1] == (Token.Text, "\n") + def test_unclosed_fragment_with_newline_recovery(lexer_html): - """ unclosed Javascript fragments should recover on the next line """ + """unclosed Javascript fragments should recover on the next line""" fragment = "<script type=\"text/javascript\">"+"alert(\"hi\");"*20+"\n<div>hi</div>" tokens = list(lexer_html.get_tokens(fragment)) recovery_tokens = [ - (Token.Punctuation, '<'), - (Token.Name.Tag, 'div'), - (Token.Punctuation, '>'), - (Token.Text, 'hi'), - (Token.Punctuation, '<'), - (Token.Punctuation, '/'), - (Token.Name.Tag, 'div'), - (Token.Punctuation, '>'), - (Token.Text, '\n')] + (Token.Punctuation, '<'), + (Token.Name.Tag, 'div'), + (Token.Punctuation, '>'), + (Token.Text, 'hi'), + (Token.Punctuation, '<'), + (Token.Punctuation, '/'), + (Token.Name.Tag, 'div'), + (Token.Punctuation, '>'), + (Token.Text, '\n'), + ] assert tokens[-1*len(recovery_tokens):] == recovery_tokens - diff --git a/tests/test_java.py b/tests/test_java.py index 4e253cee..e3f8182d 100644 --- a/tests/test_java.py +++ b/tests/test_java.py @@ -10,7 +10,7 @@ import time import pytest -from pygments.token import Keyword, Name, Number, Punctuation, String, Text +from pygments.token import String from pygments.lexers import JavaLexer diff --git a/tests/test_javascript.py b/tests/test_javascript.py index 3109f3fe..740b7831 100644 --- a/tests/test_javascript.py +++ b/tests/test_javascript.py @@ -8,8 +8,8 @@ import pytest -from pygments.lexers.javascript import JavascriptLexer, TypeScriptLexer -from pygments.token import Number, Token +from pygments.lexers.javascript import JavascriptLexer +from pygments.token import Number @pytest.fixture(scope='module') @@ -29,7 +29,8 @@ def test_float_literal_positive_matches(lexer, text): assert list(lexer.get_tokens(text))[0] == (Number.Float, text) -@pytest.mark.parametrize('text', ('.\u0b6a', '.', '1..', '1n', '1ee', '1e', '1e-', '1e--1', '1e++1', '1e1.0')) +@pytest.mark.parametrize('text', ('.\u0b6a', '.', '1..', '1n', '1ee', '1e', + '1e-', '1e--1', '1e++1', '1e1.0')) def test_float_literals_negative_matches(lexer, text): """Test text that should **not** be tokenized as float literals.""" assert list(lexer.get_tokens(text))[0] != (Number.Float, text) @@ -81,7 +82,3 @@ def test_hexadecimal_literal_positive_matches(lexer, text): def test_hexadecimal_literals_negative_matches(lexer, text): """Test text that should **not** be tokenized as hexadecimal literals.""" assert list(lexer.get_tokens(text))[0] != (Number.Hex, text) - -@pytest.fixture(scope='module') -def ts_lexer(): - yield TypeScriptLexer() diff --git a/tests/test_markdown_lexer.py b/tests/test_markdown_lexer.py index 8415c212..cbbc1a70 100644 --- a/tests/test_markdown_lexer.py +++ b/tests/test_markdown_lexer.py @@ -7,7 +7,7 @@ """ import pytest -from pygments.token import Generic, Token, String, Keyword, Name +from pygments.token import Generic, Token, String from pygments.lexers.markup import MarkdownLexer diff --git a/tests/test_mysql.py b/tests/test_mysql.py index c71cfb45..845440b9 100644 --- a/tests/test_mysql.py +++ b/tests/test_mysql.py @@ -10,16 +10,8 @@ import pytest from pygments.lexers.sql import MySqlLexer -from pygments.token import \ - Comment, \ - Keyword, \ - Literal, \ - Name, \ - Number, \ - Operator, \ - Punctuation, \ - String, \ - Text +from pygments.token import Comment, Keyword, Literal, Name, Number, Operator, \ + Punctuation, String, Text @pytest.fixture(scope='module') @@ -27,7 +19,8 @@ def lexer(): yield MySqlLexer() -@pytest.mark.parametrize('text', ('1', '22', '22 333', '22 a', '22+', '22)', '22\n333', '22\r\n333')) +@pytest.mark.parametrize('text', ('1', '22', '22 333', '22 a', '22+', '22)', + '22\n333', '22\r\n333')) def test_integer_literals_positive_match(lexer, text): """Validate that integer literals are tokenized as integers.""" token = list(lexer.get_tokens(text))[0] @@ -35,7 +28,8 @@ def test_integer_literals_positive_match(lexer, text): assert token[1] in {'1', '22'} -@pytest.mark.parametrize('text', ('1a', '1A', '1.', '1ひ', '1$', '1_', '1\u0080', '1\uffff')) +@pytest.mark.parametrize('text', ('1a', '1A', '1.', '1ひ', '1$', '1_', + '1\u0080', '1\uffff')) def test_integer_literals_negative_match(lexer, text): """Validate that non-integer texts are not matched as integers.""" assert list(lexer.get_tokens(text))[0][0] != Number.Integer diff --git a/tests/test_sql.py b/tests/test_sql.py index 590b0cd5..8fbd3eea 100644 --- a/tests/test_sql.py +++ b/tests/test_sql.py @@ -10,7 +10,7 @@ import pytest from pygments.lexers.sql import name_between_bracket_re, \ name_between_backtick_re, tsql_go_re, tsql_declare_re, \ - tsql_variable_re, MySqlLexer, SqlLexer, TransactSqlLexer + tsql_variable_re, MySqlLexer, TransactSqlLexer from pygments.token import Comment, Name, Number, Punctuation, Whitespace @@ -60,9 +60,10 @@ def test_can_lex_integer(lexer): def test_can_lex_names(lexer): - _assert_are_tokens_of_type(lexer, - 'thingy thingy123 _thingy _ _123 Ähnliches Müll #temp1 ##temp2', - Name) + _assert_are_tokens_of_type( + lexer, + 'thingy thingy123 _thingy _ _123 Ähnliches Müll #temp1 ##temp2', + Name) def test_can_lex_comments(lexer): @@ -90,7 +91,6 @@ def test_can_match_analyze_text_res(): def test_can_analyze_text(): mysql_lexer = MySqlLexer() - sql_lexer = SqlLexer() tsql_lexer = TransactSqlLexer() code_to_expected_lexer_map = { 'select `a`, `bc` from some': mysql_lexer, diff --git a/tests/test_tnt.py b/tests/test_tnt.py index 0dcfef39..b52b35c6 100644 --- a/tests/test_tnt.py +++ b/tests/test_tnt.py @@ -9,13 +9,15 @@ import pytest from pygments.lexers.tnt import TNTLexer -from pygments.token import Text, Comment, Operator, Keyword, Name, Number, \ - Punctuation, Error +from pygments.token import Text, Operator, Keyword, Name, Number, \ + Punctuation, Error + @pytest.fixture(autouse=True) def lexer(): yield TNTLexer() + # whitespace @pytest.mark.parametrize('text', (' a', ' \t0', '\n\n 3')) @@ -25,6 +27,7 @@ def test_whitespace_positive_matches(lexer, text): assert lexer.whitespace(0, text, True) == len(text) - 1 assert lexer.cur[-1] == (0, Text, text[:-1]) + @pytest.mark.parametrize('text', ('0 a=b premise', 'b=a symmetry')) def test_whitespace_negative_matches(lexer, text): """Test statements that do not start with whitespace text.""" @@ -33,6 +36,7 @@ def test_whitespace_negative_matches(lexer, text): lexer.whitespace(0, text, True) assert not lexer.cur + # terms that can go on either side of an = sign @pytest.mark.parametrize('text', ('a ', "a' ", 'b ', "c' ")) @@ -41,6 +45,7 @@ def test_variable_positive_matches(lexer, text): assert lexer.variable(0, text) == len(text) - 1 assert lexer.cur[-1] == (0, Name.Variable, text[:-1]) + @pytest.mark.parametrize('text', ("' ", 'f ', "f' ")) def test_variable_negative_matches(lexer, text): """Test fragments that should **not** be tokenized as variables.""" @@ -48,6 +53,7 @@ def test_variable_negative_matches(lexer, text): lexer.variable(0, text) assert not lexer.cur + @pytest.mark.parametrize('text', ('0', 'S0', 'SSSSS0')) def test_numeral_positive_matches(lexer, text): """Test fragments that should be tokenized as (unary) numerals.""" @@ -56,6 +62,7 @@ def test_numeral_positive_matches(lexer, text): if text != '0': assert lexer.cur[-2] == (0, Number.Integer, text[:-1]) + @pytest.mark.parametrize('text', ( '(a+b)', '(b.a)', '(c+d)' )) @@ -67,6 +74,7 @@ def test_multiterm_positive_matches(lexer, text): Name.Variable, Punctuation ] + @pytest.mark.parametrize('text', ('1', '=', 'A')) def test_term_negative_matches(lexer, text): """Test fragments that should not be tokenized as terms at all.""" @@ -74,6 +82,7 @@ def test_term_negative_matches(lexer, text): lexer.term(0, text) assert not lexer.cur + # full statements, minus rule @pytest.mark.parametrize('text', ('~a=b ', '~~~~a=b ')) @@ -82,6 +91,7 @@ def test_negator_positive_matches(lexer, text): assert lexer.formula(0, text) == len(text) - 1 assert lexer.cur[0] == (0, Operator, text[:-4]) + @pytest.mark.parametrize('text', ('Aa:a=b ', 'Eb:a=b ')) def test_quantifier_positive_matches(lexer, text): """Test statements that start with a quantifier.""" @@ -90,6 +100,7 @@ def test_quantifier_positive_matches(lexer, text): assert lexer.cur[1][1] == Name.Variable assert lexer.cur[2] == (2, Punctuation, ':') + @pytest.mark.parametrize('text', ('Aaa=b', 'Eba=b')) def test_quantifier_negative_matches(lexer, text): """Test quantifiers that are only partially valid.""" @@ -99,6 +110,7 @@ def test_quantifier_negative_matches(lexer, text): assert lexer.cur[0][1] == Keyword.Declaration assert lexer.cur[1][1] == Name.Variable + @pytest.mark.parametrize('text', ('<a=b&b=a>', '<a=b|b=a>', '<a=b]b=a>')) def test_compound_positive_matches(lexer, text): """Test statements that consist of multiple formulas compounded.""" @@ -107,6 +119,7 @@ def test_compound_positive_matches(lexer, text): assert lexer.cur[4][1] == Operator assert lexer.cur[-1] == (len(text)-1, Punctuation, '>') + @pytest.mark.parametrize('text', ('<a=b/b=a>', '<a=b&b=a ')) def test_compound_negative_matches(lexer, text): """Test statements that look like compounds but are invalid.""" @@ -114,6 +127,7 @@ def test_compound_negative_matches(lexer, text): lexer.formula(0, text) assert lexer.cur[0] == (0, Punctuation, '<') + @pytest.mark.parametrize('text', ('a=b ', 'a=0 ', '0=b ')) def test_formula_postive_matches(lexer, text): """Test the normal singular formula.""" @@ -122,12 +136,14 @@ def test_formula_postive_matches(lexer, text): assert lexer.cur[1] == (1, Operator, '=') assert lexer.cur[2][2] == text[2] + @pytest.mark.parametrize('text', ('a/b', '0+0 ')) def test_formula_negative_matches(lexer, text): """Test anything but an equals sign.""" with pytest.raises(AssertionError): lexer.formula(0, text) + # rules themselves @pytest.mark.parametrize('text', ( @@ -141,6 +157,7 @@ def test_rule_positive_matches(lexer, text): if text[-1].isdigit(): assert lexer.cur[1][1] == Number.Integer + @pytest.mark.parametrize('text', ( 'fantasy', 'carry over', 'premse', 'unjoining', 'triple-tilde', 'switcheru', 'De-Morgan', 'despecification' @@ -150,9 +167,11 @@ def test_rule_negative_matches(lexer, text): with pytest.raises(AssertionError): lexer.rule(0, text) + # referrals -@pytest.mark.parametrize('text', ('(lines 1, 2, and 4)', '(line 3,5,6)', '(lines 1, 6 and 0)')) +@pytest.mark.parametrize('text', ('(lines 1, 2, and 4)', '(line 3,5,6)', + '(lines 1, 6 and 0)')) def test_lineno_positive_matches(lexer, text): """Test line referrals.""" assert lexer.lineno(0, text) == len(text) @@ -161,8 +180,9 @@ def test_lineno_positive_matches(lexer, text): assert lexer.cur[2][1] == Number.Integer assert lexer.cur[3] == (len(text)-1, Punctuation, ')') + @pytest.mark.parametrize('text', ( - '(lines one, two, and four)1 ', # to avoid IndexError + '(lines one, two, and four)1 ', # to avoid IndexError '(lines 1 2 and 3)', '(lines 1 2 3)' )) def test_lineno_negative_matches(lexer, text): @@ -170,6 +190,7 @@ def test_lineno_negative_matches(lexer, text): with pytest.raises(AssertionError): lexer.lineno(0, text) + # worst-case: error text @pytest.mark.parametrize('text', ('asdf', 'fdsa\nasdf', 'asdf\n ')) @@ -180,11 +201,12 @@ def test_error_till_line_end(lexer, text): nl = len(text) try: end = text.find(text.split(None, 2)[1]) - except IndexError: # split failed + except IndexError: # split failed end = len(text) assert lexer.error_till_line_end(0, text) == end assert lexer.cur[0] == (0, Error, text[:nl]) + # full statement, including rule (because this can't be tested any other way) @pytest.mark.parametrize('text', ('[ push', '] pop')) @@ -192,6 +214,7 @@ def test_fantasy_positive_matches(lexer, text): """Test statements that should be tokenized as push/pop statements.""" assert lexer.get_tokens_unprocessed(text)[0] == (0, Keyword, text[0]) + # full text is already done by examplefiles, but here's some exceptions @pytest.mark.parametrize('text', ( @@ -200,4 +223,4 @@ def test_fantasy_positive_matches(lexer, text): )) def test_no_crashing(lexer, text): """Test incomplete text fragments that shouldn't crash the whole lexer.""" - assert lexer.get_tokens(text)
\ No newline at end of file + assert lexer.get_tokens(text) diff --git a/tests/test_util.py b/tests/test_util.py index eb744381..ab4826bf 100644 --- a/tests/test_util.py +++ b/tests/test_util.py @@ -6,8 +6,6 @@ :license: BSD, see LICENSE for details. """ -import re - from pytest import raises from pygments import util, console |