diff options
author | Victor Uriarte <victor.m.uriarte@intel.com> | 2016-06-02 21:38:23 -0700 |
---|---|---|
committer | Victor Uriarte <victor.m.uriarte@intel.com> | 2016-06-04 15:06:04 -0700 |
commit | 3fed0393a80a40ea28e5fc0cea9b526630e9f42b (patch) | |
tree | 85a6d8b71b8133b21d34c765aeb68b3f7fd32718 | |
parent | 5ce225522ba2b2a8af23c7efcbd6261bd9f09528 (diff) | |
download | sqlparse-3fed0393a80a40ea28e5fc0cea9b526630e9f42b.tar.gz |
Refactor filter-stack to simplify logic
if (self.stmtprocess or self.postprocess or
self.split_statements or self._grouping):
always evaluates to true after removing unused features
-rw-r--r-- | sqlparse/__init__.py | 1 | ||||
-rw-r--r-- | sqlparse/engine/__init__.py | 48 | ||||
-rw-r--r-- | sqlparse/engine/grouping.py | 5 |
3 files changed, 15 insertions, 39 deletions
diff --git a/sqlparse/__init__.py b/sqlparse/__init__.py index d69a3d9..cb83a71 100644 --- a/sqlparse/__init__.py +++ b/sqlparse/__init__.py @@ -66,5 +66,4 @@ def split(sql, encoding=None): :returns: A list of strings. """ stack = engine.FilterStack() - stack.split_statements = True return [u(stmt).strip() for stmt in stack.run(sql, encoding)] diff --git a/sqlparse/engine/__init__.py b/sqlparse/engine/__init__.py index e69a138..7f00c57 100644 --- a/sqlparse/engine/__init__.py +++ b/sqlparse/engine/__init__.py @@ -13,12 +13,10 @@ from sqlparse.engine.filter import StatementFilter class FilterStack(object): - def __init__(self): self.preprocess = [] self.stmtprocess = [] self.postprocess = [] - self.split_statements = False self._grouping = False def enable_grouping(self): @@ -27,42 +25,20 @@ class FilterStack(object): def run(self, sql, encoding=None): stream = lexer.tokenize(sql, encoding) # Process token stream - if self.preprocess: - for filter_ in self.preprocess: - stream = filter_.process(stream) - - if (self.stmtprocess or self.postprocess or - self.split_statements or self._grouping): - splitter = StatementFilter() - stream = splitter.process(stream) - - if self._grouping: - - def _group(stream): - for stmt in stream: - grouping.group(stmt) - yield stmt - stream = _group(stream) + for filter_ in self.preprocess: + stream = filter_.process(stream) - if self.stmtprocess: + stream = StatementFilter().process(stream) - def _run1(stream): - ret = [] - for stmt in stream: - for filter_ in self.stmtprocess: - filter_.process(stmt) - ret.append(stmt) - return ret - stream = _run1(stream) + # Output: Stream processed Statements + for stmt in stream: + if self._grouping: + stmt = grouping.group(stmt) - if self.postprocess: + for filter_ in self.stmtprocess: + filter_.process(stmt) - def _run2(stream): - for stmt in stream: - stmt.tokens = list(stmt.flatten()) - for filter_ in self.postprocess: - stmt = filter_.process(stmt) - yield stmt - stream = _run2(stream) + for filter_ in self.postprocess: + stmt = filter_.process(stmt) - return stream + yield stmt diff --git a/sqlparse/engine/grouping.py b/sqlparse/engine/grouping.py index 0ac1cb3..c680995 100644 --- a/sqlparse/engine/grouping.py +++ b/sqlparse/engine/grouping.py @@ -266,7 +266,7 @@ def align_comments(tlist): token = tlist.token_next_by(i=sql.Comment, idx=token) -def group(tlist): +def group(stmt): for func in [ group_comments, group_brackets, @@ -291,4 +291,5 @@ def group(tlist): group_foreach, group_begin, ]: - func(tlist) + func(stmt) + return stmt |