summaryrefslogtreecommitdiff
path: root/sqlparse
diff options
context:
space:
mode:
authorAndi Albrecht <albrecht.andi@gmail.com>2010-07-04 20:53:02 +0200
committerAndi Albrecht <albrecht.andi@gmail.com>2010-07-04 20:53:02 +0200
commit3bfa75a7d2042cdd56245034b9467b28f578ce9c (patch)
tree7617a56c60146f544778b25c1188159bfcd9b961 /sqlparse
parent2a090575f6d0571208a6d3a6ae57c31c4adbc653 (diff)
downloadsqlparse-3bfa75a7d2042cdd56245034b9467b28f578ce9c.tar.gz
PEP8: Code cleanup.
Diffstat (limited to 'sqlparse')
-rw-r--r--sqlparse/engine/__init__.py4
-rw-r--r--sqlparse/engine/grouping.py50
-rw-r--r--sqlparse/filters.py18
-rw-r--r--sqlparse/formatter.py1
-rw-r--r--sqlparse/lexer.py16
-rw-r--r--sqlparse/sql.py12
-rw-r--r--sqlparse/tokens.py35
7 files changed, 76 insertions, 60 deletions
diff --git a/sqlparse/engine/__init__.py b/sqlparse/engine/__init__.py
index 02f84c6..3e2822b 100644
--- a/sqlparse/engine/__init__.py
+++ b/sqlparse/engine/__init__.py
@@ -49,6 +49,7 @@ class FilterStack(object):
stream = splitter.process(self, stream)
if self._grouping:
+
def _group(stream):
for stmt in stream:
grouping.group(stmt)
@@ -56,6 +57,7 @@ class FilterStack(object):
stream = _group(stream)
if self.stmtprocess:
+
def _run1(stream):
ret = []
for stmt in stream:
@@ -66,6 +68,7 @@ class FilterStack(object):
stream = _run1(stream)
if self.postprocess:
+
def _run2(stream):
for stmt in stream:
stmt.tokens = list(self._flatten(stmt.tokens))
@@ -75,4 +78,3 @@ class FilterStack(object):
stream = _run2(stream)
return stream
-
diff --git a/sqlparse/engine/grouping.py b/sqlparse/engine/grouping.py
index 010a282..c39a5cc 100644
--- a/sqlparse/engine/grouping.py
+++ b/sqlparse/engine/grouping.py
@@ -8,7 +8,7 @@ from sqlparse import tokens as T
def _group_left_right(tlist, ttype, value, cls,
check_right=lambda t: True,
- check_left = lambda t: True,
+ check_left=lambda t: True,
include_semicolon=False):
[_group_left_right(sgroup, ttype, value, cls, check_right,
include_semicolon) for sgroup in tlist.get_sublists()
@@ -19,10 +19,10 @@ def _group_left_right(tlist, ttype, value, cls,
right = tlist.token_next(tlist.token_index(token))
left = tlist.token_prev(tlist.token_index(token))
if right is None or not check_right(right):
- token = tlist.token_next_match(tlist.token_index(token)+1,
+ token = tlist.token_next_match(tlist.token_index(token) + 1,
ttype, value)
elif left is None or not check_right(left):
- token = tlist.token_next_match(tlist.token_index(token)+1,
+ token = tlist.token_next_match(tlist.token_index(token) + 1,
ttype, value)
else:
if include_semicolon:
@@ -42,9 +42,10 @@ def _group_left_right(tlist, ttype, value, cls,
left.tokens.extend(tokens)
for t in tokens:
tlist.tokens.remove(t)
- token = tlist.token_next_match(tlist.token_index(left)+1,
+ token = tlist.token_next_match(tlist.token_index(left) + 1,
ttype, value)
+
def _group_matching(tlist, start_ttype, start_value, end_ttype, end_value,
cls, include_semicolon=False, recurse=False):
def _find_matching(i, tl, stt, sva, ett, eva):
@@ -70,7 +71,7 @@ def _group_matching(tlist, start_ttype, start_value, end_ttype, end_value,
end = _find_matching(tidx, tlist, start_ttype, start_value,
end_ttype, end_value)
if end is None:
- idx = tidx+1
+ idx = tidx + 1
else:
if include_semicolon:
next_ = tlist.token_next(tlist.token_index(end))
@@ -79,17 +80,21 @@ def _group_matching(tlist, start_ttype, start_value, end_ttype, end_value,
group = tlist.group_tokens(cls, tlist.tokens_between(token, end))
_group_matching(group, start_ttype, start_value,
end_ttype, end_value, cls, include_semicolon)
- idx = tlist.token_index(group)+1
+ idx = tlist.token_index(group) + 1
token = tlist.token_next_match(idx, start_ttype, start_value)
+
def group_if(tlist):
_group_matching(tlist, T.Keyword, 'IF', T.Keyword, 'END IF', sql.If, True)
+
def group_for(tlist):
_group_matching(tlist, T.Keyword, 'FOR', T.Keyword, 'END LOOP',
sql.For, True)
+
def group_as(tlist):
+
def _right_valid(token):
# Currently limited to DML/DDL. Maybe additional more non SQL reserved
# keywords should appear here (see issue8).
@@ -97,11 +102,14 @@ def group_as(tlist):
_group_left_right(tlist, T.Keyword, 'AS', sql.Identifier,
check_right=_right_valid)
+
def group_assignment(tlist):
_group_left_right(tlist, T.Assignment, ':=', sql.Assignment,
include_semicolon=True)
+
def group_comparsion(tlist):
+
def _parts_valid(token):
return (token.ttype in (T.String.Symbol, T.Name, T.Number,
T.Number.Integer, T.Literal)
@@ -122,8 +130,7 @@ def group_identifier(tlist):
or y.ttype is T.Operator),
lambda y: (y.ttype in (T.String.Symbol,
T.Name,
- T.Wildcard))
- ))
+ T.Wildcard))))
for t in tl.tokens[i:]:
if x.next()(t):
yield t
@@ -140,13 +147,13 @@ def group_identifier(tlist):
if token is None:
token = tlist.token_next_by_type(idx, (T.String.Symbol, T.Name))
while token:
- identifier_tokens = [token]+list(
+ identifier_tokens = [token] + list(
_consume_cycle(tlist,
- tlist.token_index(token)+1))
+ tlist.token_index(token) + 1))
if not (len(identifier_tokens) == 1
and isinstance(identifier_tokens[0], sql.Function)):
group = tlist.group_tokens(sql.Identifier, identifier_tokens)
- idx = tlist.token_index(group)+1
+ idx = tlist.token_index(group) + 1
else:
idx += 1
token = tlist.token_next_by_instance(idx, sql.Function)
@@ -182,7 +189,7 @@ def group_identifier_list(tlist):
if not bpassed or not apassed:
# Something's wrong here, skip ahead to next ","
start = None
- tcomma = tlist.token_next_match(tlist.token_index(tcomma)+1,
+ tcomma = tlist.token_next_match(tlist.token_index(tcomma) + 1,
T.Punctuation, ',')
else:
if start is None:
@@ -193,7 +200,7 @@ def group_identifier_list(tlist):
tokens = tlist.tokens_between(start, after)
group = tlist.group_tokens(sql.IdentifierList, tokens)
start = None
- tcomma = tlist.token_next_match(tlist.token_index(group)+1,
+ tcomma = tlist.token_next_match(tlist.token_index(group) + 1,
T.Punctuation, ',')
else:
tcomma = next_
@@ -203,6 +210,7 @@ def group_parenthesis(tlist):
_group_matching(tlist, T.Punctuation, '(', T.Punctuation, ')',
sql.Parenthesis)
+
def group_comments(tlist):
[group_comments(sgroup) for sgroup in tlist.get_sublists()
if not isinstance(sgroup, sql.Comment)]
@@ -210,7 +218,7 @@ def group_comments(tlist):
token = tlist.token_next_by_type(idx, T.Comment)
while token:
tidx = tlist.token_index(token)
- end = tlist.token_not_matching(tidx+1,
+ end = tlist.token_not_matching(tidx + 1,
[lambda t: t.ttype in T.Comment,
lambda t: t.is_whitespace()])
if end is None:
@@ -223,6 +231,7 @@ def group_comments(tlist):
idx = tlist.token_index(group)
token = tlist.token_next_by_type(idx, T.Comment)
+
def group_where(tlist):
[group_where(sgroup) for sgroup in tlist.get_sublists()
if not isinstance(sgroup, sql.Where)]
@@ -231,16 +240,17 @@ def group_where(tlist):
stopwords = ('ORDER', 'GROUP', 'LIMIT', 'UNION')
while token:
tidx = tlist.token_index(token)
- end = tlist.token_next_match(tidx+1, T.Keyword, stopwords)
+ end = tlist.token_next_match(tidx + 1, T.Keyword, stopwords)
if end is None:
end = tlist._groupable_tokens[-1]
else:
- end = tlist.tokens[tlist.token_index(end)-1]
+ end = tlist.tokens[tlist.token_index(end) - 1]
group = tlist.group_tokens(sql.Where,
tlist.tokens_between(token, end))
idx = tlist.token_index(group)
token = tlist.token_next_match(idx, T.Keyword, 'WHERE')
+
def group_aliased(tlist):
[group_aliased(sgroup) for sgroup in tlist.get_sublists()
if not isinstance(sgroup, sql.Identifier)]
@@ -253,7 +263,7 @@ def group_aliased(tlist):
token.tokens.extend(grp)
for t in grp:
tlist.tokens.remove(t)
- idx = tlist.token_index(token)+1
+ idx = tlist.token_index(token) + 1
token = tlist.token_next_by_instance(idx, sql.Identifier)
@@ -269,11 +279,11 @@ def group_functions(tlist):
while token:
next_ = tlist.token_next(token)
if not isinstance(next_, sql.Parenthesis):
- idx = tlist.token_index(token)+1
+ idx = tlist.token_index(token) + 1
else:
func = tlist.group_tokens(sql.Function,
tlist.tokens_between(token, next_))
- idx = tlist.token_index(func)+1
+ idx = tlist.token_index(func) + 1
token = tlist.token_next_by_type(idx, T.Name)
@@ -291,5 +301,5 @@ def group(tlist):
group_comparsion,
group_identifier_list,
group_if,
- group_for,]:
+ group_for]:
func(tlist)
diff --git a/sqlparse/filters.py b/sqlparse/filters.py
index a31e0b4..2015674 100644
--- a/sqlparse/filters.py
+++ b/sqlparse/filters.py
@@ -119,15 +119,15 @@ class ReindentFilter(Filter):
def _get_offset(self, token):
all_ = list(self._curr_stmt.flatten())
idx = all_.index(token)
- raw = ''.join(unicode(x) for x in all_[:idx+1])
+ raw = ''.join(unicode(x) for x in all_[:idx + 1])
line = raw.splitlines()[-1]
# Now take current offset into account and return relative offset.
- full_offset = len(line)-(len(self.char*(self.width*self.indent)))
+ full_offset = len(line) - len(self.char * (self.width * self.indent))
return full_offset - self.offset
def nl(self):
# TODO: newline character should be configurable
- ws = '\n'+(self.char*((self.indent*self.width)+self.offset))
+ ws = '\n' + (self.char * ((self.indent * self.width) + self.offset))
return sql.Token(T.Whitespace, ws)
def _split_kwds(self, tlist):
@@ -151,7 +151,7 @@ class ReindentFilter(Filter):
else:
nl = self.nl()
tlist.insert_before(token, nl)
- token = tlist.token_next_match(tlist.token_index(nl)+offset,
+ token = tlist.token_next_match(tlist.token_index(nl) + offset,
T.Keyword, split_words, regex=True)
def _split_statements(self, tlist):
@@ -165,7 +165,7 @@ class ReindentFilter(Filter):
if prev:
nl = self.nl()
tlist.insert_before(token, nl)
- token = tlist.token_next_by_type(tlist.token_index(token)+1,
+ token = tlist.token_next_by_type(tlist.token_index(token) + 1,
(T.Keyword.DDL, T.Keyword.DML))
def _process(self, tlist):
@@ -199,7 +199,7 @@ class ReindentFilter(Filter):
identifiers = tlist.get_identifiers()
if len(identifiers) > 1 and not tlist.within(sql.Function):
first = list(identifiers[0].flatten())[0]
- num_offset = self._get_offset(first)-len(first.value)
+ num_offset = self._get_offset(first) - len(first.value)
self.offset += num_offset
for token in identifiers[1:]:
tlist.insert_before(token, self.nl())
@@ -210,12 +210,12 @@ class ReindentFilter(Filter):
is_first = True
num_offset = None
case = tlist.tokens[0]
- outer_offset = self._get_offset(case)-len(case.value)
+ outer_offset = self._get_offset(case) - len(case.value)
self.offset += outer_offset
for cond, value in tlist.get_cases():
if is_first:
is_first = False
- num_offset = self._get_offset(cond[0])-len(cond[0].value)
+ num_offset = self._get_offset(cond[0]) - len(cond[0].value)
self.offset += num_offset
continue
if cond is None:
@@ -335,7 +335,7 @@ class OutputPythonFilter(Filter):
after_lb = token.value.split('\n', 1)[1]
yield sql.Token(T.Text, " '")
yield sql.Token(T.Whitespace, '\n')
- for i in range(len(varname)+4):
+ for i in range(len(varname) + 4):
yield sql.Token(T.Whitespace, ' ')
yield sql.Token(T.Text, "'")
if after_lb: # it's the indendation
diff --git a/sqlparse/formatter.py b/sqlparse/formatter.py
index 1955b5d..c03e9a9 100644
--- a/sqlparse/formatter.py
+++ b/sqlparse/formatter.py
@@ -118,4 +118,3 @@ def build_filter_stack(stack, options):
stack.postprocess.append(fltr)
return stack
-
diff --git a/sqlparse/lexer.py b/sqlparse/lexer.py
index 0e00687..38acfcd 100644
--- a/sqlparse/lexer.py
+++ b/sqlparse/lexer.py
@@ -32,6 +32,7 @@ class combined(tuple):
# tuple.__init__ doesn't do anything
pass
+
def is_keyword(value):
test = value.upper()
return KEYWORDS_COMMON.get(test, KEYWORDS.get(test, tokens.Name)), value
@@ -84,7 +85,8 @@ class LexerMeta(type):
% (tdef[0], state, cls, err)))
assert type(tdef[1]) is tokens._TokenType or callable(tdef[1]), \
- 'token type must be simple type or callable, not %r' % (tdef[1],)
+ ('token type must be simple type or callable, not %r'
+ % (tdef[1],))
if len(tdef) == 2:
new_state = None
@@ -108,7 +110,8 @@ class LexerMeta(type):
cls._tmpname += 1
itokens = []
for istate in tdef2:
- assert istate != state, 'circular state ref %r' % istate
+ assert istate != state, \
+ 'circular state ref %r' % istate
itokens.extend(cls._process_state(unprocessed,
processed, istate))
processed[new_state] = itokens
@@ -147,8 +150,6 @@ class LexerMeta(type):
return type.__call__(cls, *args, **kwds)
-
-
class Lexer:
__metaclass__ = LexerMeta
@@ -176,7 +177,8 @@ class Lexer:
(r'[0-9]+', tokens.Number.Integer),
# TODO: Backslash escapes?
(r"(''|'.*?[^\\]')", tokens.String.Single),
- (r'(""|".*?[^\\]")', tokens.String.Symbol), # not a real string literal in ANSI SQL
+ # not a real string literal in ANSI SQL:
+ (r'(""|".*?[^\\]")', tokens.String.Symbol),
(r'(LEFT |RIGHT )?(INNER |OUTER )?JOIN\b', tokens.Keyword),
(r'END( IF| LOOP)?\b', tokens.Keyword),
(r'CREATE( OR REPLACE)?\b', tokens.Keyword.DDL),
@@ -189,8 +191,7 @@ class Lexer:
(r'\*/', tokens.Comment.Multiline, '#pop'),
(r'[^/\*]+', tokens.Comment.Multiline),
(r'[/*]', tokens.Comment.Multiline)
- ]
- }
+ ]}
def __init__(self):
self.filters = []
@@ -246,7 +247,6 @@ class Lexer:
stream = apply_filters(stream, self.filters, self)
return stream
-
def get_tokens_unprocessed(self, text, stack=('root',)):
"""
Split ``text`` into (tokentype, text) pairs.
diff --git a/sqlparse/sql.py b/sqlparse/sql.py
index cc4f434..4349cd5 100644
--- a/sqlparse/sql.py
+++ b/sqlparse/sql.py
@@ -43,7 +43,7 @@ class Token(object):
def _get_repr_value(self):
raw = unicode(self)
if len(raw) > 7:
- short = raw[:6]+u'...'
+ short = raw[:6] + u'...'
else:
short = raw
return re.sub('\s+', ' ', short)
@@ -271,7 +271,7 @@ class TokenList(Token):
return None
if not isinstance(idx, int):
idx = self.token_index(idx)
- while idx < len(self.tokens)-1:
+ while idx < len(self.tokens) - 1:
idx += 1
if self.tokens[idx].is_whitespace() and skip_ws:
continue
@@ -291,7 +291,8 @@ class TokenList(Token):
offset = 0
else:
offset = 1
- return self.tokens[self.token_index(start):self.token_index(end)+offset]
+ return self.tokens[
+ self.token_index(start):self.token_index(end) + offset]
def group_tokens(self, grp_cls, tokens):
"""Replace tokens by an instance of *grp_cls*."""
@@ -443,22 +444,27 @@ class Assignment(TokenList):
"""An assignment like 'var := val;'"""
__slots__ = ('value', 'ttype', 'tokens')
+
class If(TokenList):
"""An 'if' clause with possible 'else if' or 'else' parts."""
__slots__ = ('value', 'ttype', 'tokens')
+
class For(TokenList):
"""A 'FOR' loop."""
__slots__ = ('value', 'ttype', 'tokens')
+
class Comparsion(TokenList):
"""A comparsion used for example in WHERE clauses."""
__slots__ = ('value', 'ttype', 'tokens')
+
class Comment(TokenList):
"""A comment."""
__slots__ = ('value', 'ttype', 'tokens')
+
class Where(TokenList):
"""A WHERE clause."""
__slots__ = ('value', 'ttype', 'tokens')
diff --git a/sqlparse/tokens.py b/sqlparse/tokens.py
index 5d1458a..25dbe9f 100644
--- a/sqlparse/tokens.py
+++ b/sqlparse/tokens.py
@@ -40,31 +40,31 @@ class _TokenType(tuple):
return 'Token' + (self and '.' or '') + '.'.join(self)
-Token = _TokenType()
+Token = _TokenType()
# Special token types
-Text = Token.Text
-Whitespace = Text.Whitespace
-Newline = Whitespace.Newline
-Error = Token.Error
+Text = Token.Text
+Whitespace = Text.Whitespace
+Newline = Whitespace.Newline
+Error = Token.Error
# Text that doesn't belong to this lexer (e.g. HTML in PHP)
-Other = Token.Other
+Other = Token.Other
# Common token types for source code
-Keyword = Token.Keyword
-Name = Token.Name
-Literal = Token.Literal
-String = Literal.String
-Number = Literal.Number
+Keyword = Token.Keyword
+Name = Token.Name
+Literal = Token.Literal
+String = Literal.String
+Number = Literal.Number
Punctuation = Token.Punctuation
-Operator = Token.Operator
-Comparsion = Operator.Comparsion
-Wildcard = Token.Wildcard
-Comment = Token.Comment
-Assignment = Token.Assignement
+Operator = Token.Operator
+Comparsion = Operator.Comparsion
+Wildcard = Token.Wildcard
+Comment = Token.Comment
+Assignment = Token.Assignement
# Generic types for non-source code
-Generic = Token.Generic
+Generic = Token.Generic
# String and some others are not direct childs of Token.
# alias them:
@@ -81,4 +81,3 @@ Group = Token.Group
Group.Parenthesis = Token.Group.Parenthesis
Group.Comment = Token.Group.Comment
Group.Where = Token.Group.Where
-