summaryrefslogtreecommitdiff
path: root/tests/test_tokenize.py
diff options
context:
space:
mode:
authorAndi Albrecht <albrecht.andi@gmail.com>2009-05-06 07:53:44 +0200
committerAndi Albrecht <albrecht.andi@gmail.com>2009-05-06 07:53:44 +0200
commit974222bcb24a5b2bf3a0e5ecd616a2c3855e8342 (patch)
tree7a366f4fd2bb286b88c23ac7f120fd3ba9954047 /tests/test_tokenize.py
parentac165c93766f19d9c503ecb2d47d6e872d54c21c (diff)
downloadsqlparse-974222bcb24a5b2bf3a0e5ecd616a2c3855e8342.tar.gz
Code cleanup and test coverage.
Diffstat (limited to 'tests/test_tokenize.py')
-rw-r--r--tests/test_tokenize.py46
1 files changed, 46 insertions, 0 deletions
diff --git a/tests/test_tokenize.py b/tests/test_tokenize.py
index 127b4db..e4ef6c3 100644
--- a/tests/test_tokenize.py
+++ b/tests/test_tokenize.py
@@ -3,7 +3,9 @@
import unittest
import types
+import sqlparse
from sqlparse import lexer
+from sqlparse import sql
from sqlparse.tokens import *
@@ -38,3 +40,47 @@ class TestTokenize(unittest.TestCase):
sql = 'foo\r\nbar\n'
tokens = lexer.tokenize(sql)
self.assertEqual(''.join(str(x[1]) for x in tokens), sql)
+
+
+class TestToken(unittest.TestCase):
+
+ def test_str(self):
+ token = sql.Token(None, 'FoO')
+ self.assertEqual(str(token), 'FoO')
+
+ def test_repr(self):
+ token = sql.Token(Keyword, 'foo')
+ tst = "<Keyword 'foo' at 0x"
+ self.assertEqual(repr(token)[:len(tst)], tst)
+ token = sql.Token(Keyword, '1234567890')
+ tst = "<Keyword '123456...' at 0x"
+ self.assertEqual(repr(token)[:len(tst)], tst)
+
+ def test_flatten(self):
+ token = sql.Token(Keyword, 'foo')
+ gen = token.flatten()
+ self.assertEqual(type(gen), types.GeneratorType)
+ lgen = list(gen)
+ self.assertEqual(lgen, [token])
+
+
+class TestTokenList(unittest.TestCase):
+
+ def test_token_first(self):
+ p = sqlparse.parse(' select foo')[0]
+ first = p.token_first()
+ self.assertEqual(first.value, 'select')
+ self.assertEqual(p.token_first(ignore_whitespace=False).value, ' ')
+ self.assertEqual(sql.TokenList([]).token_first(), None)
+
+ def test_token_matching(self):
+ t1 = sql.Token(Keyword, 'foo')
+ t2 = sql.Token(Punctuation, ',')
+ x = sql.TokenList([t1, t2])
+ self.assertEqual(x.token_matching(0, [lambda t: t.ttype is Keyword]),
+ t1)
+ self.assertEqual(x.token_matching(0,
+ [lambda t: t.ttype is Punctuation]),
+ t2)
+ self.assertEqual(x.token_matching(1, [lambda t: t.ttype is Keyword]),
+ None)