diff options
author | Thomas Aglassinger <roskakori@users.sourceforge.net> | 2016-03-21 08:29:44 +0100 |
---|---|---|
committer | Thomas Aglassinger <roskakori@users.sourceforge.net> | 2016-03-21 08:29:44 +0100 |
commit | 1d5aed09f1f7fcfa27c73d03ffc1a61e686658da (patch) | |
tree | e58d5ca42db23b95a639b942d46423d2c76d934d /tests/test_sql.py | |
parent | a4a3df4ac8c645d890df8fc264733045108a3f53 (diff) | |
download | pygments-1d5aed09f1f7fcfa27c73d03ffc1a61e686658da.tar.gz |
Added lexer for Transact-SQL as used by Microsoft SQL Server and Sybase.
Diffstat (limited to 'tests/test_sql.py')
-rw-r--r-- | tests/test_sql.py | 76 |
1 files changed, 76 insertions, 0 deletions
diff --git a/tests/test_sql.py b/tests/test_sql.py new file mode 100644 index 00000000..37a81ff8 --- /dev/null +++ b/tests/test_sql.py @@ -0,0 +1,76 @@ +# -*- coding: utf-8 -*- +""" + Pygments SQL lexers tests + ~~~~~~~~~~~~~~~~~~~~~~~~~ + + :copyright: Copyright 2006-2016 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" +import io +import os.path +import unittest + +from pygments.lexers.sql import TransactSqlLexer +from pygments.token import Comment, Error, Name, Number, Whitespace + + +class TransactSqlLexerTest(unittest.TestCase): + + def setUp(self): + self.lexer = TransactSqlLexer() + + def _assertAreTokensOfType(self, examples, expected_token_type): + for test_number, example in enumerate(examples.split(), 1): + token_count = 0 + for token_type, token_value in self.lexer.get_tokens(example): + if token_type != Whitespace: + token_count += 1 + self.assertEqual( + token_type, expected_token_type, + 'token_type #%d for %s is be %s but must be %s' % + (test_number, token_value, token_type, expected_token_type)) + self.assertEqual( + token_count, 1, + '%s must yield exactly 1 token instead of %d' % + (example, token_count)) + + def _assertTokensMatch(self, text, expected_tokens_without_trailing_newline): + actual_tokens = tuple(self.lexer.get_tokens(text)) + if (len(actual_tokens) >= 1) and (actual_tokens[-1] == (Whitespace, '\n')): + actual_tokens = tuple(actual_tokens[:-1]) + self.assertEqual( + expected_tokens_without_trailing_newline, actual_tokens, + 'text must yield expected tokens: %s' % text) + + def test_can_lex_float(self): + self._assertAreTokensOfType( + '1.2 1.2e3 1.2e+3 1.2e-3 1e2', Number.Float) + self._assertTokensMatch( + '1e2.1e2', + ((Number.Float, '1e2'), (Number.Float, '.1e2')) + ) + + def test_can_lex_names(self): + self._assertAreTokensOfType( + u'thingy thingy123 _thingy _ _123 Ähnliches Müll #temp1 ##temp2', Name) + + def test_can_lex_comments(self): + self._assertTokensMatch('--\n', ((Comment.Single, '--\n'),)) + self._assertTokensMatch('/**/', ( + (Comment.Multiline, '/*'), (Comment.Multiline, '*/') + )) + self._assertTokensMatch('/*/**/*/', ( + (Comment.Multiline, '/*'), + (Comment.Multiline, '/*'), + (Comment.Multiline, '*/'), + (Comment.Multiline, '*/'), + )) + + def test_can_lex_example_file(self): + tests_path = os.path.dirname(__file__) + example_path = os.path.join(tests_path, 'examplefiles', 'test_transact-sql.txt') + + with io.open(example_path, 'r', encoding='utf-8') as example_file: + example_code = example_file.read() + for token_type, token_value in self.lexer.get_tokens(example_code): + self.assertNotEqual(Error, token_type, 'token_value=%r' % token_value) |