summaryrefslogtreecommitdiff
path: root/sqlparse/engine
diff options
context:
space:
mode:
authorAndi Albrecht <albrecht.andi@gmail.com>2011-09-27 12:45:30 +0200
committerAndi Albrecht <albrecht.andi@gmail.com>2011-09-27 12:45:30 +0200
commitf811fa12247330adc27f1b842167a112b1c0829c (patch)
tree455bc81faf235419724537bde5d065ea1b186f59 /sqlparse/engine
parent4b9261f4076befecbc4757c21ed0b268df546f96 (diff)
parentb0010af3ec74e57adf8910ab5d69f408cda3c475 (diff)
downloadsqlparse-f811fa12247330adc27f1b842167a112b1c0829c.tar.gz
Merged.
Diffstat (limited to 'sqlparse/engine')
-rw-r--r--sqlparse/engine/filter.py5
-rw-r--r--sqlparse/engine/grouping.py6
2 files changed, 7 insertions, 4 deletions
diff --git a/sqlparse/engine/filter.py b/sqlparse/engine/filter.py
index 89d9b15..421b3f3 100644
--- a/sqlparse/engine/filter.py
+++ b/sqlparse/engine/filter.py
@@ -61,14 +61,15 @@ class StatementFilter(TokenFilter):
if unified == 'END':
# Should this respect a preceeding BEGIN?
# In CASE ... WHEN ... END this results in a split level -1.
- self._begin_depth = max(0, self._begin_depth-1)
+ self._begin_depth = max(0, self._begin_depth - 1)
return -1
if ttype is T.Keyword.DDL and unified.startswith('CREATE'):
self._is_create = True
return 0
- if unified in ('IF', 'FOR') and self._is_create and self._begin_depth > 0:
+ if (unified in ('IF', 'FOR')
+ and self._is_create and self._begin_depth > 0):
return 1
# Default
diff --git a/sqlparse/engine/grouping.py b/sqlparse/engine/grouping.py
index 6e99782..cc75de4 100644
--- a/sqlparse/engine/grouping.py
+++ b/sqlparse/engine/grouping.py
@@ -280,13 +280,15 @@ def group_aliased(tlist):
token = tlist.token_next_by_instance(idx, (sql.Identifier, sql.Function))
while token:
next_ = tlist.token_next(tlist.token_index(token))
- if next_ is not None and isinstance(next_, (sql.Identifier, sql.Function)):
+ if next_ is not None and isinstance(next_,
+ (sql.Identifier, sql.Function)):
grp = tlist.tokens_between(token, next_)[1:]
token.tokens.extend(grp)
for t in grp:
tlist.tokens.remove(t)
idx = tlist.token_index(token) + 1
- token = tlist.token_next_by_instance(idx, (sql.Identifier, sql.Function))
+ token = tlist.token_next_by_instance(idx,
+ (sql.Identifier, sql.Function))
def group_typecasts(tlist):