diff options
author | Andreas Albrecht <andreas.albrecht@Admins-MacBook-Pro.local> | 2019-03-10 07:31:37 +0100 |
---|---|---|
committer | Andreas Albrecht <andreas.albrecht@Admins-MacBook-Pro.local> | 2019-03-10 07:31:37 +0100 |
commit | 6a7eb243274e1f17c0bb99d65272c4313b9ee08c (patch) | |
tree | 001bc90e42e1595995d396ee13dafc34e9571686 /sqlparse | |
parent | bc84fdc0afdb966280d95277e1c1cddeba20ae5a (diff) | |
download | sqlparse-6a7eb243274e1f17c0bb99d65272c4313b9ee08c.tar.gz |
Code cleanup.
Diffstat (limited to 'sqlparse')
-rw-r--r-- | sqlparse/engine/grouping.py | 4 | ||||
-rw-r--r-- | sqlparse/engine/statement_splitter.py | 4 | ||||
-rw-r--r-- | sqlparse/filters/others.py | 6 | ||||
-rw-r--r-- | sqlparse/keywords.py | 8 | ||||
-rw-r--r-- | sqlparse/sql.py | 10 |
5 files changed, 17 insertions, 15 deletions
diff --git a/sqlparse/engine/grouping.py b/sqlparse/engine/grouping.py index 2cc7601..5ff819c 100644 --- a/sqlparse/engine/grouping.py +++ b/sqlparse/engine/grouping.py @@ -225,8 +225,8 @@ def group_identifier_list(tlist): m_role = T.Keyword, ('null', 'role') sqlcls = (sql.Function, sql.Case, sql.Identifier, sql.Comparison, sql.IdentifierList, sql.Operation) - ttypes = (T_NUMERICAL + T_STRING + T_NAME + - (T.Keyword, T.Comment, T.Wildcard)) + ttypes = (T_NUMERICAL + T_STRING + T_NAME + + (T.Keyword, T.Comment, T.Wildcard)) def match(token): return token.match(T.Punctuation, ',') diff --git a/sqlparse/engine/statement_splitter.py b/sqlparse/engine/statement_splitter.py index d610b5e..444b46a 100644 --- a/sqlparse/engine/statement_splitter.py +++ b/sqlparse/engine/statement_splitter.py @@ -65,8 +65,8 @@ class StatementSplitter(object): self._begin_depth = max(0, self._begin_depth - 1) return -1 - if (unified in ('IF', 'FOR', 'WHILE') and - self._is_create and self._begin_depth > 0): + if (unified in ('IF', 'FOR', 'WHILE') + and self._is_create and self._begin_depth > 0): return 1 if unified in ('END IF', 'END FOR', 'END WHILE'): diff --git a/sqlparse/filters/others.py b/sqlparse/filters/others.py index df4d861..eeb91f1 100644 --- a/sqlparse/filters/others.py +++ b/sqlparse/filters/others.py @@ -23,9 +23,9 @@ class StripCommentsFilter(object): nidx, next_ = tlist.token_next(tidx, skip_ws=False) # Replace by whitespace if prev and next exist and if they're not # whitespaces. This doesn't apply if prev or next is a parenthesis. - if (prev_ is None or next_ is None or - prev_.is_whitespace or prev_.match(T.Punctuation, '(') or - next_.is_whitespace or next_.match(T.Punctuation, ')')): + if (prev_ is None or next_ is None + or prev_.is_whitespace or prev_.match(T.Punctuation, '(') + or next_.is_whitespace or next_.match(T.Punctuation, ')')): tlist.tokens.remove(token) else: tlist.tokens[tidx] = sql.Token(T.Whitespace, ' ') diff --git a/sqlparse/keywords.py b/sqlparse/keywords.py index 985334d..6e22c8e 100644 --- a/sqlparse/keywords.py +++ b/sqlparse/keywords.py @@ -13,10 +13,10 @@ from sqlparse import tokens def is_keyword(value): val = value.upper() - return (KEYWORDS_COMMON.get(val) or - KEYWORDS_ORACLE.get(val) or - KEYWORDS_PLPGSQL.get(val) or - KEYWORDS.get(val, tokens.Name)), value + return (KEYWORDS_COMMON.get(val) + or KEYWORDS_ORACLE.get(val) + or KEYWORDS_PLPGSQL.get(val) + or KEYWORDS.get(val, tokens.Name)), value SQL_REGEX = { diff --git a/sqlparse/sql.py b/sqlparse/sql.py index a9b8dd1..2741e83 100644 --- a/sqlparse/sql.py +++ b/sqlparse/sql.py @@ -239,8 +239,9 @@ class TokenList(Token): ignored too. """ # this on is inconsistent, using Comment instead of T.Comment... - funcs = lambda tk: not ((skip_ws and tk.is_whitespace) or - (skip_cm and imt(tk, t=T.Comment, i=Comment))) + funcs = lambda tk: not ((skip_ws and tk.is_whitespace) + or (skip_cm and imt(tk, + t=T.Comment, i=Comment))) return self._token_matching(funcs)[1] def token_next_by(self, i=None, m=None, t=None, idx=-1, end=None): @@ -276,8 +277,9 @@ class TokenList(Token): if idx is None: return None, None idx += 1 # alot of code usage current pre-compensates for this - funcs = lambda tk: not ((skip_ws and tk.is_whitespace) or - (skip_cm and imt(tk, t=T.Comment, i=Comment))) + funcs = lambda tk: not ((skip_ws and tk.is_whitespace) + or (skip_cm and imt(tk, + t=T.Comment, i=Comment))) return self._token_matching(funcs, idx, reverse=_reverse) def token_index(self, token, start=0): |