summaryrefslogtreecommitdiff
path: root/sqlparse
diff options
context:
space:
mode:
Diffstat (limited to 'sqlparse')
-rw-r--r--sqlparse/engine/__init__.py4
-rw-r--r--sqlparse/engine/filter.py8
-rw-r--r--sqlparse/engine/grouping.py12
-rw-r--r--sqlparse/filters.py11
-rw-r--r--sqlparse/formatter.py4
-rw-r--r--sqlparse/lexer.py2
-rw-r--r--sqlparse/sql.py17
-rw-r--r--sqlparse/utils.py4
8 files changed, 31 insertions, 31 deletions
diff --git a/sqlparse/engine/__init__.py b/sqlparse/engine/__init__.py
index 62c82b8..4d7fe88 100644
--- a/sqlparse/engine/__init__.py
+++ b/sqlparse/engine/__init__.py
@@ -43,8 +43,8 @@ class FilterStack(object):
for filter_ in self.preprocess:
stream = filter_.process(self, stream)
- if (self.stmtprocess or self.postprocess or self.split_statements
- or self._grouping):
+ if self.stmtprocess or self.postprocess or self.split_statements \
+ or self._grouping:
splitter = StatementFilter()
stream = splitter.process(self, stream)
diff --git a/sqlparse/engine/filter.py b/sqlparse/engine/filter.py
index f7dd264..149db58 100644
--- a/sqlparse/engine/filter.py
+++ b/sqlparse/engine/filter.py
@@ -23,8 +23,8 @@ class StatementFilter:
def _change_splitlevel(self, ttype, value):
"Get the new split level (increase, decrease or remain equal)"
# PostgreSQL
- if (ttype == T.Name.Builtin
- and value.startswith('$') and value.endswith('$')):
+ if ttype == T.Name.Builtin \
+ and value.startswith('$') and value.endswith('$'):
if self._in_dbldollar:
self._in_dbldollar = False
return -1
@@ -64,8 +64,8 @@ class StatementFilter:
self._is_create = True
return 0
- if (unified in ('IF', 'FOR')
- and self._is_create and self._begin_depth > 0):
+ if unified in ('IF', 'FOR') \
+ and self._is_create and self._begin_depth > 0:
return 1
# Default
diff --git a/sqlparse/engine/grouping.py b/sqlparse/engine/grouping.py
index c8c2415..fe8b2e8 100644
--- a/sqlparse/engine/grouping.py
+++ b/sqlparse/engine/grouping.py
@@ -116,7 +116,7 @@ def group_as(tlist):
def _right_valid(token):
# Currently limited to DML/DDL. Maybe additional more non SQL reserved
# keywords should appear here (see issue8).
- return not token.ttype in (T.DML, T.DDL)
+ return token.ttype not in (T.DML, T.DDL)
def _left_valid(token):
if token.ttype is T.Keyword and token.value in ('NULL',):
@@ -191,7 +191,8 @@ def group_identifier(tlist):
i1 = tl.token_index(t1, start=i) if t1 else None
t2_end = None if i1 is None else i1 + 1
- t2 = tl.token_next_by_instance(i, (sql.Function, sql.Parenthesis), end=t2_end)
+ t2 = tl.token_next_by_instance(i, (sql.Function, sql.Parenthesis),
+ end=t2_end)
if t1 and t2:
i2 = tl.token_index(t2, start=i)
@@ -219,9 +220,10 @@ def group_identifier(tlist):
if identifier_tokens and identifier_tokens[-1].ttype is T.Whitespace:
identifier_tokens = identifier_tokens[:-1]
if not (len(identifier_tokens) == 1
- and (isinstance(identifier_tokens[0], (sql.Function, sql.Parenthesis))
- or identifier_tokens[0].ttype in (T.Literal.Number.Integer,
- T.Literal.Number.Float))):
+ and (isinstance(identifier_tokens[0], (sql.Function,
+ sql.Parenthesis))
+ or identifier_tokens[0].ttype in (
+ T.Literal.Number.Integer, T.Literal.Number.Float))):
group = tlist.group_tokens(sql.Identifier, identifier_tokens)
idx = tlist.token_index(group, start=idx) + 1
else:
diff --git a/sqlparse/filters.py b/sqlparse/filters.py
index eabf863..86624a8 100644
--- a/sqlparse/filters.py
+++ b/sqlparse/filters.py
@@ -7,7 +7,6 @@ from os.path import abspath, join
from sqlparse import sql, tokens as T
from sqlparse.compat import u, text_type
from sqlparse.engine import FilterStack
-from sqlparse.lexer import tokenize
from sqlparse.pipeline import Pipeline
from sqlparse.tokens import (Comment, Comparison, Keyword, Name, Punctuation,
String, Whitespace)
@@ -144,8 +143,6 @@ class IncludeStatement:
# Found file path to include
if token_type in String.Symbol:
-# if token_type in tokens.String.Symbol:
-
# Get path of file to include
path = join(self.dirpath, value[1:-1])
@@ -251,9 +248,9 @@ class StripWhitespaceFilter:
# Removes newlines before commas, see issue140
last_nl = None
for token in tlist.tokens[:]:
- if (token.ttype is T.Punctuation
- and token.value == ','
- and last_nl is not None):
+ if token.ttype is T.Punctuation \
+ and token.value == ',' \
+ and last_nl is not None:
tlist.tokens.remove(last_nl)
if token.is_whitespace():
last_nl = token
@@ -492,7 +489,7 @@ class RightMarginFilter:
else:
self.line = token.value.splitlines()[-1]
elif (token.is_group()
- and not token.__class__ in self.keep_together):
+ and token.__class__ not in self.keep_together):
token.tokens = self._process(stack, token, token.tokens)
else:
val = u(token)
diff --git a/sqlparse/formatter.py b/sqlparse/formatter.py
index 811f5af..ab5f370 100644
--- a/sqlparse/formatter.py
+++ b/sqlparse/formatter.py
@@ -106,8 +106,8 @@ def build_filter_stack(stack, options):
stack.enable_grouping()
stack.stmtprocess.append(filters.StripCommentsFilter())
- if (options.get('strip_whitespace', False)
- or options.get('reindent', False)):
+ if options.get('strip_whitespace', False) \
+ or options.get('reindent', False):
stack.enable_grouping()
stack.stmtprocess.append(filters.StripWhitespaceFilter())
diff --git a/sqlparse/lexer.py b/sqlparse/lexer.py
index b4eb604..d0f8e3b 100644
--- a/sqlparse/lexer.py
+++ b/sqlparse/lexer.py
@@ -123,7 +123,7 @@ class LexerMeta(type):
for state in tdef2:
assert (state in unprocessed or
state in ('#pop', '#push')), \
- 'unknown new state ' + state
+ 'unknown new state ' + state
new_state = tdef2
else:
assert False, 'unknown new state def %r' % tdef2
diff --git a/sqlparse/sql.py b/sqlparse/sql.py
index 97dd24e..aaf566b 100644
--- a/sqlparse/sql.py
+++ b/sqlparse/sql.py
@@ -232,7 +232,6 @@ class TokenList(Token):
return True
def get_sublists(self):
-# return [x for x in self.tokens if isinstance(x, TokenList)]
for x in self.tokens:
if isinstance(x, TokenList):
yield x
@@ -347,9 +346,9 @@ class TokenList(Token):
def token_index(self, token, start=0):
"""Return list index of token."""
if start > 0:
- # Performing `index` manually is much faster when starting in the middle
- # of the list of tokens and expecting to find the token near to the starting
- # index.
+ # Performing `index` manually is much faster when starting
+ # in the middle of the list of tokens and expecting to find
+ # the token near to the starting index.
for i in range(start, len(self.tokens)):
if self.tokens[i] == token:
return i
@@ -471,6 +470,7 @@ class TokenList(Token):
return tok.get_name()
return None
+
class Statement(TokenList):
"""Represents a SQL statement."""
@@ -570,6 +570,7 @@ class SquareBrackets(TokenList):
def _groupable_tokens(self):
return self.tokens[1:-1]
+
class Assignment(TokenList):
"""An assignment like 'var := val;'"""
__slots__ = ('value', 'ttype', 'tokens')
@@ -672,10 +673,10 @@ class Function(TokenList):
for t in parenthesis.tokens:
if isinstance(t, IdentifierList):
return t.get_identifiers()
- elif isinstance(t, Identifier) or \
- isinstance(t, Function) or \
- t.ttype in T.Literal:
- return [t,]
+ elif (isinstance(t, Identifier) or
+ isinstance(t, Function) or
+ t.ttype in T.Literal):
+ return [t, ]
return []
diff --git a/sqlparse/utils.py b/sqlparse/utils.py
index 3a49ac2..ae45679 100644
--- a/sqlparse/utils.py
+++ b/sqlparse/utils.py
@@ -73,7 +73,6 @@ def memoize_generator(func):
cache = Cache()
def wrapped_func(*args, **kwargs):
-# params = (args, kwargs)
params = (args, tuple(sorted(kwargs.items())))
# Look if cached
@@ -120,6 +119,7 @@ SPLIT_REGEX = re.compile(r"""
LINE_MATCH = re.compile(r'(\r\n|\r|\n)')
+
def split_unquoted_newlines(text):
"""Split a string on all unquoted newlines.
@@ -134,4 +134,4 @@ def split_unquoted_newlines(text):
outputlines.append('')
else:
outputlines[-1] += line
- return outputlines \ No newline at end of file
+ return outputlines