summaryrefslogtreecommitdiff
path: root/sqlparse
diff options
context:
space:
mode:
Diffstat (limited to 'sqlparse')
-rw-r--r--sqlparse/keywords.py11
-rw-r--r--sqlparse/lexer.py34
2 files changed, 9 insertions, 36 deletions
diff --git a/sqlparse/keywords.py b/sqlparse/keywords.py
index c6db0a9..41edbff 100644
--- a/sqlparse/keywords.py
+++ b/sqlparse/keywords.py
@@ -19,9 +19,11 @@ SQL_REGEX = {
# $ matches *before* newline, therefore we have two patterns
# to match Comment.Single
(r'(--|# ).*?$', tokens.Comment.Single),
+ (r'/\*[\s\S]*?\*/', tokens.Comment.Multiline),
+
(r'(\r\n|\r|\n)', tokens.Newline),
(r'\s+', tokens.Whitespace),
- (r'/\*', tokens.Comment.Multiline, 'multiline-comments'),
+
(r':=', tokens.Assignment),
(r'::', tokens.Punctuation),
(r'[*]', tokens.Wildcard),
@@ -64,12 +66,6 @@ SQL_REGEX = {
(r'[;:()\[\],\.]', tokens.Punctuation),
(r'[<>=~!]+', tokens.Operator.Comparison),
(r'[+/@#%^&|`?^-]+', tokens.Operator),
- ],
- 'multiline-comments': [
- (r'/\*', tokens.Comment.Multiline, 'multiline-comments'),
- (r'\*/', tokens.Comment.Multiline, '#pop'),
- (r'[^/\*]+', tokens.Comment.Multiline),
- (r'[/*]', tokens.Comment.Multiline),
]}
KEYWORDS = {
@@ -600,7 +596,6 @@ KEYWORDS = {
'VARYING': tokens.Name.Builtin,
}
-
KEYWORDS_COMMON = {
'SELECT': tokens.Keyword.DML,
'INSERT': tokens.Keyword.DML,
diff --git a/sqlparse/lexer.py b/sqlparse/lexer.py
index 84c8e78..781da8a 100644
--- a/sqlparse/lexer.py
+++ b/sqlparse/lexer.py
@@ -24,21 +24,11 @@ class Lexer(object):
flags = re.IGNORECASE | re.UNICODE
def __init__(self):
- self._tokens = {}
-
- for state in SQL_REGEX:
- self._tokens[state] = []
-
- for tdef in SQL_REGEX[state]:
- rex = re.compile(tdef[0], self.flags).match
- new_state = None
- if len(tdef) > 2:
- # Only Multiline comments
- if tdef[2] == '#pop':
- new_state = -1
- elif tdef[2] in SQL_REGEX:
- new_state = (tdef[2],)
- self._tokens[state].append((rex, tdef[1], new_state))
+ self._tokens = []
+
+ for tdef in SQL_REGEX['root']:
+ rex = re.compile(tdef[0], self.flags).match
+ self._tokens.append((rex, tdef[1]))
def get_tokens(self, text, encoding=None):
"""
@@ -54,8 +44,6 @@ class Lexer(object):
``stack`` is the inital stack (default: ``['root']``)
"""
encoding = encoding or 'utf-8'
- statestack = ['root', ]
- statetokens = self._tokens['root']
if isinstance(text, string_types):
text = StringIO(text)
@@ -69,7 +57,7 @@ class Lexer(object):
iterable = enumerate(text)
for pos, char in iterable:
- for rexmatch, action, new_state in statetokens:
+ for rexmatch, action in self._tokens:
m = rexmatch(text, pos)
if not m:
@@ -79,16 +67,6 @@ class Lexer(object):
elif callable(action):
yield action(m.group())
- if isinstance(new_state, tuple):
- for state in new_state:
- # fixme: multiline-comments not stackable
- if not (state == 'multiline-comments'
- and statestack[-1] == 'multiline-comments'):
- statestack.append(state)
- elif isinstance(new_state, int):
- del statestack[new_state:]
- statetokens = self._tokens[statestack[-1]]
-
consume(iterable, m.end() - pos - 1)
break
else: