summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSylvain Th?nault <sylvain.thenault@logilab.fr>2013-12-04 19:15:36 +0100
committerSylvain Th?nault <sylvain.thenault@logilab.fr>2013-12-04 19:15:36 +0100
commit677d71a9b71b73e3169e45167e372499f594bdc1 (patch)
tree0f4462fd67ea358ad1f4bd598e835e494a3df369
parentef800f8179613ab674609f3bb89edb98f489dc32 (diff)
downloadpylint-677d71a9b71b73e3169e45167e372499f594bdc1.tar.gz
fix indentation in various places. Damned googlers :p
-rw-r--r--checkers/format.py170
-rw-r--r--checkers/variables.py24
-rw-r--r--utils.py16
3 files changed, 105 insertions, 105 deletions
diff --git a/checkers/format.py b/checkers/format.py
index 470f4bf..f307f33 100644
--- a/checkers/format.py
+++ b/checkers/format.py
@@ -198,75 +198,75 @@ class FormatChecker(BaseTokenChecker):
self._keywords_with_parens.add('print')
def _check_keyword_parentheses(self, tokens, start):
- """Check that there are not unnecessary parens after a keyword.
-
- Parens are unnecessary if there is exactly one balanced outer pair on a
- line, and it is followed by a colon, and contains no commas (i.e. is not a
- tuple).
-
- Args:
- tokens: list of Tokens; the entire list of Tokens.
- start: int; the position of the keyword in the token list.
- """
- # If the next token is not a paren, we're fine.
- if tokens[start+1][1] != '(':
- return
-
- found_comma = False
- found_and_or = False
- depth = 0
- keyword_token = tokens[start][1]
- line_num = tokens[start][2][0]
-
- for i in xrange(start, len(tokens) - 1):
- token = tokens[i]
-
- # If we hit a newline, then assume any parens were for continuation.
- if token[0] == tokenize.NL:
- return
-
- if token[1] == '(':
- depth += 1
- elif token[1] == ')':
- depth -= 1
- if not depth:
- # ')' can't happen after if (foo), since it would be a syntax error.
- if (tokens[i+1][1] in (':', ')', ']', '}', 'in') or
- tokens[i+1][0] in (tokenize.NEWLINE, tokenize.ENDMARKER,
- tokenize.COMMENT)):
- # The empty tuple () is always accepted.
- if i == start + 2:
- return
- if keyword_token == 'not':
- if not found_and_or:
- self.add_message('C0325', line=line_num,
- args=keyword_token)
- elif keyword_token in ('return', 'yield'):
- self.add_message('C0325', line=line_num,
- args=keyword_token)
- elif keyword_token not in self._keywords_with_parens:
- if not (tokens[i+1][1] == 'in' and found_and_or):
- self.add_message('C0325', line=line_num,
- args=keyword_token)
- return
- elif depth == 1:
- # This is a tuple, which is always acceptable.
- if token[1] == ',':
- return
- # 'and' and 'or' are the only boolean operators with lower precedence
- # than 'not', so parens are only required when they are found.
- elif token[1] in ('and', 'or'):
- found_and_or = True
- # A yield inside an expression must always be in parentheses,
- # quit early without error.
- elif token[1] == 'yield':
- return
- # A generator expression always has a 'for' token in it, and
- # the 'for' token is only legal inside parens when it is in a
- # generator expression. The parens are necessary here, so bail
- # without an error.
- elif token[1] == 'for':
- return
+ """Check that there are not unnecessary parens after a keyword.
+
+ Parens are unnecessary if there is exactly one balanced outer pair on a
+ line, and it is followed by a colon, and contains no commas (i.e. is not a
+ tuple).
+
+ Args:
+ tokens: list of Tokens; the entire list of Tokens.
+ start: int; the position of the keyword in the token list.
+ """
+ # If the next token is not a paren, we're fine.
+ if tokens[start+1][1] != '(':
+ return
+
+ found_comma = False
+ found_and_or = False
+ depth = 0
+ keyword_token = tokens[start][1]
+ line_num = tokens[start][2][0]
+
+ for i in xrange(start, len(tokens) - 1):
+ token = tokens[i]
+
+ # If we hit a newline, then assume any parens were for continuation.
+ if token[0] == tokenize.NL:
+ return
+
+ if token[1] == '(':
+ depth += 1
+ elif token[1] == ')':
+ depth -= 1
+ if not depth:
+ # ')' can't happen after if (foo), since it would be a syntax error.
+ if (tokens[i+1][1] in (':', ')', ']', '}', 'in') or
+ tokens[i+1][0] in (tokenize.NEWLINE, tokenize.ENDMARKER,
+ tokenize.COMMENT)):
+ # The empty tuple () is always accepted.
+ if i == start + 2:
+ return
+ if keyword_token == 'not':
+ if not found_and_or:
+ self.add_message('C0325', line=line_num,
+ args=keyword_token)
+ elif keyword_token in ('return', 'yield'):
+ self.add_message('C0325', line=line_num,
+ args=keyword_token)
+ elif keyword_token not in self._keywords_with_parens:
+ if not (tokens[i+1][1] == 'in' and found_and_or):
+ self.add_message('C0325', line=line_num,
+ args=keyword_token)
+ return
+ elif depth == 1:
+ # This is a tuple, which is always acceptable.
+ if token[1] == ',':
+ return
+ # 'and' and 'or' are the only boolean operators with lower precedence
+ # than 'not', so parens are only required when they are found.
+ elif token[1] in ('and', 'or'):
+ found_and_or = True
+ # A yield inside an expression must always be in parentheses,
+ # quit early without error.
+ elif token[1] == 'yield':
+ return
+ # A generator expression always has a 'for' token in it, and
+ # the 'for' token is only legal inside parens when it is in a
+ # generator expression. The parens are necessary here, so bail
+ # without an error.
+ elif token[1] == 'for':
+ return
def _opening_bracket(self, tokens, i):
self._bracket_stack.append(tokens[i][1])
@@ -384,30 +384,30 @@ class FormatChecker(BaseTokenChecker):
return self._bracket_stack[-1] == left
def _prepare_token_dispatcher(self):
- raw = [
- (_KEYWORD_TOKENS,
- self._check_keyword_parentheses),
+ raw = [
+ (_KEYWORD_TOKENS,
+ self._check_keyword_parentheses),
- (_OPENING_BRACKETS, self._opening_bracket),
+ (_OPENING_BRACKETS, self._opening_bracket),
- (_CLOSING_BRACKETS, self._closing_bracket),
+ (_CLOSING_BRACKETS, self._closing_bracket),
- (['='], self._check_equals_spacing),
+ (['='], self._check_equals_spacing),
- (_SPACED_OPERATORS, self._check_surrounded_by_space),
+ (_SPACED_OPERATORS, self._check_surrounded_by_space),
- ([','], self._handle_comma),
+ ([','], self._handle_comma),
- ([':'], self._handle_colon),
+ ([':'], self._handle_colon),
- (['lambda'], self._open_lambda),
- ]
+ (['lambda'], self._open_lambda),
+ ]
- dispatch = {}
- for tokens, handler in raw:
- for token in tokens:
- dispatch[token] = handler
- return dispatch
+ dispatch = {}
+ for tokens, handler in raw:
+ for token in tokens:
+ dispatch[token] = handler
+ return dispatch
def process_tokens(self, tokens):
"""process tokens and search for :
diff --git a/checkers/variables.py b/checkers/variables.py
index 0d35884..7f4ff1b 100644
--- a/checkers/variables.py
+++ b/checkers/variables.py
@@ -556,7 +556,7 @@ builtins. Remember that you should avoid to define new builtins when possible.'
"""
if not isinstance(node.targets[0], (astroid.Tuple, astroid.List)):
return
-
+
targets = node.targets[0].itered()
if any(not isinstance(target_node, astroid.AssName)
for target_node in targets):
@@ -572,19 +572,19 @@ builtins. Remember that you should avoid to define new builtins when possible.'
""" Check for unbalanced tuple unpacking
and unpacking non sequences.
"""
- if isinstance(infered, (astroid.Tuple, astroid.List)):
+ if isinstance(infered, (astroid.Tuple, astroid.List)):
values = infered.itered()
if len(targets) != len(values):
if node.root().name == infered.root().name:
- location = infered.lineno or 'unknown'
+ location = infered.lineno or 'unknown'
else:
- location = '%s (%s)' % (infered.lineno or 'unknown',
- infered.root().name)
+ location = '%s (%s)' % (infered.lineno or 'unknown',
+ infered.root().name)
self.add_message('unbalanced-tuple-unpacking',
node=node,
args=(location,
- len(targets),
+ len(targets),
len(values)))
else:
if infered is astroid.YES:
@@ -596,14 +596,14 @@ builtins. Remember that you should avoid to define new builtins when possible.'
except astroid.NotFoundError:
continue
else:
- break
- else:
+ break
+ else:
if node.root().name == infered.root().name:
- location = infered.lineno or 'unknown'
+ location = infered.lineno or 'unknown'
else:
- location = '%s (%s)' % (infered.lineno or 'unknown',
- infered.root().name)
-
+ location = '%s (%s)' % (infered.lineno or 'unknown',
+ infered.root().name)
+
self.add_message('unpacking-non-sequence',
node=node,
args=(location, ))
diff --git a/utils.py b/utils.py
index 3aa16a7..05e8b41 100644
--- a/utils.py
+++ b/utils.py
@@ -208,14 +208,14 @@ class MessagesHandlerMixIn(object):
self._suppression_mapping = {}
def add_renamed_message(self, old_id, old_symbol, new_symbol):
- """Register the old ID and symbol for a warning that was renamed.
-
- This allows users to keep using the old ID/symbol in suppressions.
- """
- msg = self.check_message_id(new_symbol)
- msg.old_names.append((old_id, old_symbol))
- self._alternative_names[old_id] = msg
- self._alternative_names[old_symbol] = msg
+ """Register the old ID and symbol for a warning that was renamed.
+
+ This allows users to keep using the old ID/symbol in suppressions.
+ """
+ msg = self.check_message_id(new_symbol)
+ msg.old_names.append((old_id, old_symbol))
+ self._alternative_names[old_id] = msg
+ self._alternative_names[old_symbol] = msg
def register_messages(self, checker):
"""register a dictionary of messages