1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Andi Albrecht, albrecht.andi@gmail.com
#
# This module is part of python-sqlparse and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
"""SQL Lexer"""
# This code is based on the SqlLexer in pygments.
# http://pygments.org/
# It's separated from the rest of pygments to increase performance
# and to allow some customizations.
import re
from sqlparse import tokens
from sqlparse.keywords import SQL_REGEX
from sqlparse.compat import StringIO, string_types, text_type
from sqlparse.utils import consume
class Lexer(object):
flags = re.IGNORECASE | re.UNICODE
def __init__(self):
self._tokens = {}
for state in SQL_REGEX:
self._tokens[state] = []
for tdef in SQL_REGEX[state]:
rex = re.compile(tdef[0], self.flags).match
new_state = None
if len(tdef) > 2:
# Only Multiline comments
if tdef[2] == '#pop':
new_state = -1
elif tdef[2] in SQL_REGEX:
new_state = (tdef[2],)
self._tokens[state].append((rex, tdef[1], new_state))
def get_tokens(self, text, encoding=None):
"""
Return an iterable of (tokentype, value) pairs generated from
`text`. If `unfiltered` is set to `True`, the filtering mechanism
is bypassed even if filters are defined.
Also preprocess the text, i.e. expand tabs and strip it if
wanted and applies registered filters.
Split ``text`` into (tokentype, text) pairs.
``stack`` is the inital stack (default: ``['root']``)
"""
encoding = encoding or 'utf-8'
statestack = ['root', ]
statetokens = self._tokens['root']
if isinstance(text, string_types):
text = StringIO(text)
text = text.read()
if not isinstance(text, text_type):
try:
text = text.decode(encoding)
except UnicodeDecodeError:
text = text.decode('unicode-escape')
iterable = enumerate(text)
for pos, char in iterable:
for rexmatch, action, new_state in statetokens:
m = rexmatch(text, pos)
if not m:
continue
elif isinstance(action, tokens._TokenType):
yield action, m.group()
elif callable(action):
yield action(m.group())
if isinstance(new_state, tuple):
for state in new_state:
# fixme: multiline-comments not stackable
if not (state == 'multiline-comments'
and statestack[-1] == 'multiline-comments'):
statestack.append(state)
elif isinstance(new_state, int):
del statestack[new_state:]
statetokens = self._tokens[statestack[-1]]
consume(iterable, m.end() - pos - 1)
break
else:
yield tokens.Error, char
def tokenize(sql, encoding=None):
"""Tokenize sql.
Tokenize *sql* using the :class:`Lexer` and return a 2-tuple stream
of ``(token type, value)`` items.
"""
return Lexer().get_tokens(sql, encoding)
|