diff options
author | Victor Uriarte <victor.m.uriarte@intel.com> | 2016-06-19 14:20:25 -0700 |
---|---|---|
committer | Victor Uriarte <victor.m.uriarte@intel.com> | 2016-06-20 07:41:13 -0700 |
commit | 8ebd43d8109c6ecaf4964634e3bd782e8bd769d5 (patch) | |
tree | 892c56b4a2699ee790a66872d58a925acfd10bc7 | |
parent | 5d50f349cda37986bb3704e8fe25d57c78e6047a (diff) | |
download | sqlparse-8ebd43d8109c6ecaf4964634e3bd782e8bd769d5.tar.gz |
Parametrize tests
Allows for tests to continue if the first assert had failed.
In particular useful when certain change is dealing with two
almost opposing edge cases.
-rw-r--r-- | tests/test_grouping.py | 18 | ||||
-rw-r--r-- | tests/test_parse.py | 57 | ||||
-rw-r--r-- | tests/test_regressions.py | 72 | ||||
-rw-r--r-- | tests/test_split.py | 45 | ||||
-rw-r--r-- | tests/test_tokenize.py | 31 |
5 files changed, 70 insertions, 153 deletions
diff --git a/tests/test_grouping.py b/tests/test_grouping.py index 8356e16..12d7310 100644 --- a/tests/test_grouping.py +++ b/tests/test_grouping.py @@ -26,12 +26,8 @@ def test_grouping_comments(): assert len(parsed.tokens) == 2 -def test_grouping_assignment(): - s = 'foo := 1;' - parsed = sqlparse.parse(s)[0] - assert len(parsed.tokens) == 1 - assert isinstance(parsed.tokens[0], sql.Assignment) - s = 'foo := 1' +@pytest.mark.parametrize('s', ['foo := 1;', 'foo := 1']) +def test_grouping_assignment(s): parsed = sqlparse.parse(s)[0] assert len(parsed.tokens) == 1 assert isinstance(parsed.tokens[0], sql.Assignment) @@ -120,13 +116,9 @@ def test_grouping_identifier_function(): assert isinstance(p.tokens[0].tokens[0].tokens[0], sql.Function) -def test_grouping_identifier_extended(): - # issue 15 - p = sqlparse.parse('foo+100')[0] - assert isinstance(p.tokens[0], sql.Operation) - p = sqlparse.parse('foo + 100')[0] - assert isinstance(p.tokens[0], sql.Operation) - p = sqlparse.parse('foo*100')[0] +@pytest.mark.parametrize('s', ['foo+100', 'foo + 100', 'foo*100']) +def test_grouping_operation(s): + p = sqlparse.parse(s)[0] assert isinstance(p.tokens[0], sql.Operation) diff --git a/tests/test_parse.py b/tests/test_parse.py index aa2cd15..2d23425 100644 --- a/tests/test_parse.py +++ b/tests/test_parse.py @@ -25,17 +25,11 @@ def test_parse_multistatement(): assert str(stmts[1]) == sql2 -def test_parse_newlines(): - s = 'select\n*from foo;' - p = sqlparse.parse(s)[0] - assert str(p) == s - s = 'select\r\n*from foo' - p = sqlparse.parse(s)[0] - assert str(p) == s - s = 'select\r*from foo' - p = sqlparse.parse(s)[0] - assert str(p) == s - s = 'select\r\n*from foo\n' +@pytest.mark.parametrize('s', ['select\n*from foo;', + 'select\r\n*from foo', + 'select\r*from foo', + 'select\r\n*from foo\n']) +def test_parse_newlines(s): p = sqlparse.parse(s)[0] assert str(p) == s @@ -66,40 +60,23 @@ def test_parse_has_ancestor(): assert baz.has_ancestor(p) -def test_parse_float(): - t = sqlparse.parse('.5')[0].tokens - assert len(t) == 1 - assert t[0].ttype is sqlparse.tokens.Number.Float - t = sqlparse.parse('.51')[0].tokens - assert len(t) == 1 - assert t[0].ttype is sqlparse.tokens.Number.Float - t = sqlparse.parse('1.5')[0].tokens - assert len(t) == 1 - assert t[0].ttype is sqlparse.tokens.Number.Float - t = sqlparse.parse('12.5')[0].tokens +@pytest.mark.parametrize('s', ['.5', '.51', '1.5', '12.5']) +def test_parse_float(s): + t = sqlparse.parse(s)[0].tokens assert len(t) == 1 assert t[0].ttype is sqlparse.tokens.Number.Float -def test_parse_placeholder(): - def _get_tokens(s): - return sqlparse.parse(s)[0].tokens[-1].tokens - - t = _get_tokens('select * from foo where user = ?') - assert t[-1].ttype is sqlparse.tokens.Name.Placeholder - assert t[-1].value == '?' - t = _get_tokens('select * from foo where user = :1') - assert t[-1].ttype is sqlparse.tokens.Name.Placeholder - assert t[-1].value == ':1' - t = _get_tokens('select * from foo where user = :name') - assert t[-1].ttype is sqlparse.tokens.Name.Placeholder - assert t[-1].value == ':name' - t = _get_tokens('select * from foo where user = %s') - assert t[-1].ttype is sqlparse.tokens.Name.Placeholder - assert t[-1].value == '%s' - t = _get_tokens('select * from foo where user = $a') +@pytest.mark.parametrize('s, holder', [ + ('select * from foo where user = ?', '?'), + ('select * from foo where user = :1', ':1'), + ('select * from foo where user = :name', ':name'), + ('select * from foo where user = %s', '%s'), + ('select * from foo where user = $a', '$a')]) +def test_parse_placeholder(s, holder): + t = sqlparse.parse(s)[0].tokens[-1].tokens assert t[-1].ttype is sqlparse.tokens.Name.Placeholder - assert t[-1].value == '$a' + assert t[-1].value == holder def test_parse_modulo_not_placeholder(): diff --git a/tests/test_regressions.py b/tests/test_regressions.py index 71fa2bd..255493c 100644 --- a/tests/test_regressions.py +++ b/tests/test_regressions.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -import pytest # noqa +import pytest import sqlparse from sqlparse import sql, tokens as T @@ -27,21 +27,11 @@ def test_issue13(): assert str(parsed[1]).strip() == "select 'two\\'';" -def test_issue26(): +@pytest.mark.parametrize('s', ['--hello', '-- hello', '--hello\n', + '--', '--\n']) +def test_issue26(s): # parse stand-alone comments - p = sqlparse.parse('--hello')[0] - assert len(p.tokens) == 1 - assert p.tokens[0].ttype is T.Comment.Single - p = sqlparse.parse('-- hello')[0] - assert len(p.tokens) == 1 - assert p.tokens[0].ttype is T.Comment.Single - p = sqlparse.parse('--hello\n')[0] - assert len(p.tokens) == 1 - assert p.tokens[0].ttype is T.Comment.Single - p = sqlparse.parse('--')[0] - assert len(p.tokens) == 1 - assert p.tokens[0].ttype is T.Comment.Single - p = sqlparse.parse('--\n')[0] + p = sqlparse.parse(s)[0] assert len(p.tokens) == 1 assert p.tokens[0].ttype is T.Comment.Single @@ -110,33 +100,27 @@ def test_issue40(): ' FROM bar) as foo']) -def test_issue78(): +@pytest.mark.parametrize('s', ['select x.y::text as z from foo', + 'select x.y::text as "z" from foo', + 'select x."y"::text as z from foo', + 'select x."y"::text as "z" from foo', + 'select "x".y::text as z from foo', + 'select "x".y::text as "z" from foo', + 'select "x"."y"::text as z from foo', + 'select "x"."y"::text as "z" from foo']) +@pytest.mark.parametrize('func_name, result', [('get_name', 'z'), + ('get_real_name', 'y'), + ('get_parent_name', 'x'), + ('get_alias', 'z'), + ('get_typecast', 'text')]) +def test_issue78(s, func_name, result): # the bug author provided this nice examples, let's use them! - def _get_identifier(sql): - p = sqlparse.parse(sql)[0] - return p.tokens[2] - - results = (('get_name', 'z'), - ('get_real_name', 'y'), - ('get_parent_name', 'x'), - ('get_alias', 'z'), - ('get_typecast', 'text')) - variants = ( - 'select x.y::text as z from foo', - 'select x.y::text as "z" from foo', - 'select x."y"::text as z from foo', - 'select x."y"::text as "z" from foo', - 'select "x".y::text as z from foo', - 'select "x".y::text as "z" from foo', - 'select "x"."y"::text as z from foo', - 'select "x"."y"::text as "z" from foo', - ) - for variant in variants: - i = _get_identifier(variant) - assert isinstance(i, sql.Identifier) - for func_name, result in results: - func = getattr(i, func_name) - assert func() == result + p = sqlparse.parse(s)[0] + i = p.tokens[2] + assert isinstance(i, sql.Identifier) + + func = getattr(i, func_name) + assert func() == result def test_issue83(): @@ -191,7 +175,8 @@ def test_dont_alias_keywords(): assert p.tokens[2].ttype is T.Keyword -def test_format_accepts_encoding(load_file): # issue20 +def test_format_accepts_encoding(load_file): + # issue20 sql = load_file('test_cp1251.sql', 'cp1251') formatted = sqlparse.format(sql, reindent=True, encoding='cp1251') tformatted = u'insert into foo\nvalues (1); -- Песня про надежду\n' @@ -296,8 +281,7 @@ def test_issue227_gettype_cte(): with2_stmt = sqlparse.parse(""" WITH foo AS (SELECT 1 AS abc, 2 AS def), bar AS (SELECT * FROM something WHERE x > 1) - INSERT INTO elsewhere SELECT * FROM foo JOIN bar; - """) + INSERT INTO elsewhere SELECT * FROM foo JOIN bar;""") assert with2_stmt[0].get_type() == 'INSERT' diff --git a/tests/test_split.py b/tests/test_split.py index 8a2fe2d..af7c9ce 100644 --- a/tests/test_split.py +++ b/tests/test_split.py @@ -4,7 +4,7 @@ import types -import pytest # noqa +import pytest import sqlparse from sqlparse.compat import StringIO, text_type @@ -24,34 +24,17 @@ def test_split_backslash(): assert len(stmts) == 3 -def test_split_create_function(load_file): - sql = load_file('function.sql') - stmts = sqlparse.parse(sql) - assert len(stmts) == 1 - assert str(stmts[0]) == sql - - -def test_split_create_function_psql(load_file): - sql = load_file('function_psql.sql') +@pytest.mark.parametrize('fn', ['function.sql', + 'function_psql.sql', + 'function_psql2.sql', + 'function_psql3.sql']) +def test_split_create_function(load_file, fn): + sql = load_file(fn) stmts = sqlparse.parse(sql) assert len(stmts) == 1 assert text_type(stmts[0]) == sql -def test_split_create_function_psql3(load_file): - sql = load_file('function_psql3.sql') - stmts = sqlparse.parse(sql) - assert len(stmts) == 1 - assert str(stmts[0]) == sql - - -def test_split_create_function_psql2(load_file): - sql = load_file('function_psql2.sql') - stmts = sqlparse.parse(sql) - assert len(stmts) == 1 - assert str(stmts[0]) == sql - - def test_split_dashcomments(load_file): sql = load_file('dashcomment.sql') stmts = sqlparse.parse(sql) @@ -59,14 +42,12 @@ def test_split_dashcomments(load_file): assert ''.join(str(q) for q in stmts) == sql -def test_split_dashcomments_eol(): - stmts = sqlparse.parse('select foo; -- comment\n') - assert len(stmts) == 1 - stmts = sqlparse.parse('select foo; -- comment\r') - assert len(stmts) == 1 - stmts = sqlparse.parse('select foo; -- comment\r\n') - assert len(stmts) == 1 - stmts = sqlparse.parse('select foo; -- comment') +@pytest.mark.parametrize('s', ['select foo; -- comment\n', + 'select foo; -- comment\r', + 'select foo; -- comment\r\n', + 'select foo; -- comment']) +def test_split_dashcomments_eol(s): + stmts = sqlparse.parse(s) assert len(stmts) == 1 diff --git a/tests/test_tokenize.py b/tests/test_tokenize.py index a2cc388..6cc0dfa 100644 --- a/tests/test_tokenize.py +++ b/tests/test_tokenize.py @@ -28,18 +28,10 @@ def test_tokenize_backticks(): assert tokens[0] == (T.Name, '`foo`') -def test_tokenize_linebreaks(): +@pytest.mark.parametrize('s', ['foo\nbar\n', 'foo\rbar\r', + 'foo\r\nbar\r\n', 'foo\r\nbar\n']) +def test_tokenize_linebreaks(s): # issue1 - s = 'foo\nbar\n' - tokens = lexer.tokenize(s) - assert ''.join(str(x[1]) for x in tokens) == s - s = 'foo\rbar\r' - tokens = lexer.tokenize(s) - assert ''.join(str(x[1]) for x in tokens) == s - s = 'foo\r\nbar\r\n' - tokens = lexer.tokenize(s) - assert ''.join(str(x[1]) for x in tokens) == s - s = 'foo\r\nbar\n' tokens = lexer.tokenize(s) assert ''.join(str(x[1]) for x in tokens) == s @@ -159,18 +151,9 @@ def test_parse_join(expr): assert p.tokens[0].ttype is T.Keyword -def test_parse_endifloop(): - p = sqlparse.parse('END IF')[0] - assert len(p.tokens) == 1 - assert p.tokens[0].ttype is T.Keyword - p = sqlparse.parse('END IF')[0] - assert len(p.tokens) == 1 - p = sqlparse.parse('END\t\nIF')[0] - assert len(p.tokens) == 1 - assert p.tokens[0].ttype is T.Keyword - p = sqlparse.parse('END LOOP')[0] - assert len(p.tokens) == 1 - assert p.tokens[0].ttype is T.Keyword - p = sqlparse.parse('END LOOP')[0] +@pytest.mark.parametrize('s', ['END IF', 'END IF', 'END\t\nIF', + 'END LOOP', 'END LOOP', 'END\t\nLOOP']) +def test_parse_endifloop(s): + p = sqlparse.parse(s)[0] assert len(p.tokens) == 1 assert p.tokens[0].ttype is T.Keyword |