summaryrefslogtreecommitdiff
path: root/sqlparse/lexer.py
diff options
context:
space:
mode:
authorVictor Uriarte <victor.m.uriarte@intel.com>2016-06-03 10:34:12 -0700
committerVictor Uriarte <victor.m.uriarte@intel.com>2016-06-04 11:54:14 -0700
commit41cbd6a86d1550b6e1634bc0f3b203dabcc2698f (patch)
tree2bc1d10424201a174dcf8cd07cdf896080d5c89a /sqlparse/lexer.py
parent348ff620fa1acb807b83b173ee62807df21510e5 (diff)
downloadsqlparse-41cbd6a86d1550b6e1634bc0f3b203dabcc2698f.tar.gz
Allow re to compile once
Otherwise re will need to recompile everytime a call to tokenize is made.
Diffstat (limited to 'sqlparse/lexer.py')
-rw-r--r--sqlparse/lexer.py18
1 files changed, 6 insertions, 12 deletions
diff --git a/sqlparse/lexer.py b/sqlparse/lexer.py
index 781da8a..dd15212 100644
--- a/sqlparse/lexer.py
+++ b/sqlparse/lexer.py
@@ -12,8 +12,6 @@
# It's separated from the rest of pygments to increase performance
# and to allow some customizations.
-import re
-
from sqlparse import tokens
from sqlparse.keywords import SQL_REGEX
from sqlparse.compat import StringIO, string_types, text_type
@@ -21,16 +19,12 @@ from sqlparse.utils import consume
class Lexer(object):
- flags = re.IGNORECASE | re.UNICODE
-
- def __init__(self):
- self._tokens = []
-
- for tdef in SQL_REGEX['root']:
- rex = re.compile(tdef[0], self.flags).match
- self._tokens.append((rex, tdef[1]))
+ """Lexer
+ Empty class. Leaving for back-support
+ """
- def get_tokens(self, text, encoding=None):
+ @staticmethod
+ def get_tokens(text, encoding=None):
"""
Return an iterable of (tokentype, value) pairs generated from
`text`. If `unfiltered` is set to `True`, the filtering mechanism
@@ -57,7 +51,7 @@ class Lexer(object):
iterable = enumerate(text)
for pos, char in iterable:
- for rexmatch, action in self._tokens:
+ for rexmatch, action in SQL_REGEX:
m = rexmatch(text, pos)
if not m: