summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPierre Sassoulas <pierre.sassoulas@gmail.com>2022-04-19 17:16:59 +0200
committerGitHub <noreply@github.com>2022-04-19 17:16:59 +0200
commit58a4067d4b70c5e05eca23d57c3e6ad30d1788bd (patch)
treee921a773234b57e298cbb17c64426c802633035e
parentac6efbff8ba8145407dad16b927b8b9df5976ee6 (diff)
downloadpylint-git-58a4067d4b70c5e05eca23d57c3e6ad30d1788bd.tar.gz
Add typing to BaseChecker.process_token (#6273)
Co-authored-by: Daniƫl van Noord <13665637+DanielNoord@users.noreply.github.com>
-rw-r--r--pylint/checkers/base_checker.py3
-rw-r--r--pylint/checkers/format.py6
-rw-r--r--pylint/checkers/misc.py2
-rw-r--r--pylint/checkers/raw_metrics.py9
-rw-r--r--pylint/checkers/refactoring/refactoring_checker.py2
-rw-r--r--pylint/checkers/spelling.py2
-rw-r--r--pylint/checkers/strings.py2
-rw-r--r--pylint/extensions/check_elif.py3
-rw-r--r--pylint/interfaces.py3
-rw-r--r--pylint/lint/pylinter.py2
10 files changed, 20 insertions, 14 deletions
diff --git a/pylint/checkers/base_checker.py b/pylint/checkers/base_checker.py
index c8e65c87a..fd01bfaa1 100644
--- a/pylint/checkers/base_checker.py
+++ b/pylint/checkers/base_checker.py
@@ -7,6 +7,7 @@ from __future__ import annotations
import functools
import warnings
from inspect import cleandoc
+from tokenize import TokenInfo
from typing import TYPE_CHECKING, Any
from astroid import nodes
@@ -210,6 +211,6 @@ class BaseChecker(_ArgumentsProvider):
class BaseTokenChecker(BaseChecker):
"""Base class for checkers that want to have access to the token stream."""
- def process_tokens(self, tokens):
+ def process_tokens(self, tokens: list[TokenInfo]) -> None:
"""Should be overridden by subclasses."""
raise NotImplementedError()
diff --git a/pylint/checkers/format.py b/pylint/checkers/format.py
index d2fcab13e..590daeed4 100644
--- a/pylint/checkers/format.py
+++ b/pylint/checkers/format.py
@@ -428,7 +428,7 @@ class FormatChecker(BaseTokenChecker):
dispatch[token] = handler
return dispatch
- def process_tokens(self, tokens):
+ def process_tokens(self, tokens: list[tokenize.TokenInfo]) -> None:
"""Process tokens and search for :
_ too long lines (i.e. longer than <max_chars>)
@@ -507,14 +507,14 @@ class FormatChecker(BaseTokenChecker):
"too-many-lines"
)[0]
names = (message_definition.msgid, "too-many-lines")
- line = next(
+ lineno = next(
filter(None, (self.linter._pragma_lineno.get(name) for name in names)),
1,
)
self.add_message(
"too-many-lines",
args=(line_num, self.linter.config.max_module_lines),
- line=line,
+ line=lineno,
)
# See if there are any trailing lines. Do not complain about empty
diff --git a/pylint/checkers/misc.py b/pylint/checkers/misc.py
index 73ab25e45..dea45e3d6 100644
--- a/pylint/checkers/misc.py
+++ b/pylint/checkers/misc.py
@@ -134,7 +134,7 @@ class EncodingChecker(BaseChecker):
for lineno, line in enumerate(stream):
self._check_encoding(lineno + 1, line, encoding)
- def process_tokens(self, tokens):
+ def process_tokens(self, tokens: list[tokenize.TokenInfo]) -> None:
"""Inspect the source to find fixme problems."""
if not self.linter.config.notes:
return
diff --git a/pylint/checkers/raw_metrics.py b/pylint/checkers/raw_metrics.py
index 9e0fe08f1..3940bcfff 100644
--- a/pylint/checkers/raw_metrics.py
+++ b/pylint/checkers/raw_metrics.py
@@ -73,7 +73,7 @@ class RawMetricsChecker(BaseTokenChecker):
"""Init statistics."""
self.linter.stats.reset_code_count()
- def process_tokens(self, tokens):
+ def process_tokens(self, tokens: list[tokenize.TokenInfo]) -> None:
"""Update stats."""
i = 0
tokens = list(tokens)
@@ -86,7 +86,9 @@ class RawMetricsChecker(BaseTokenChecker):
JUNK = (tokenize.NL, tokenize.INDENT, tokenize.NEWLINE, tokenize.ENDMARKER)
-def get_type(tokens, start_index):
+def get_type(
+ tokens: list[tokenize.TokenInfo], start_index: int
+) -> tuple[int, int, Literal["code", "docstring", "comment", "empty"]]:
"""Return the line type : docstring, comment, code, empty."""
i = start_index
start = tokens[i][2]
@@ -109,7 +111,8 @@ def get_type(tokens, start_index):
line_type = "empty"
elif i < len(tokens) and tokens[i][0] == tokenize.NEWLINE:
i += 1
- return i, pos[0] - start[0] + 1, line_type
+ # Mypy fails to infer the literal of line_type
+ return i, pos[0] - start[0] + 1, line_type # type: ignore[return-value]
def register(linter: PyLinter) -> None:
diff --git a/pylint/checkers/refactoring/refactoring_checker.py b/pylint/checkers/refactoring/refactoring_checker.py
index 79ae3ce90..d0248fb64 100644
--- a/pylint/checkers/refactoring/refactoring_checker.py
+++ b/pylint/checkers/refactoring/refactoring_checker.py
@@ -590,7 +590,7 @@ class RefactoringChecker(checkers.BaseTokenChecker):
self.add_message("simplifiable-if-statement", node=node, args=(reduced_to,))
- def process_tokens(self, tokens):
+ def process_tokens(self, tokens: list[tokenize.TokenInfo]) -> None:
# Process tokens and look for 'if' or 'elif'
for index, token in enumerate(tokens):
token_string = token[1]
diff --git a/pylint/checkers/spelling.py b/pylint/checkers/spelling.py
index 4eb5e5de0..b6c770fbd 100644
--- a/pylint/checkers/spelling.py
+++ b/pylint/checkers/spelling.py
@@ -402,7 +402,7 @@ class SpellingChecker(BaseTokenChecker):
args = (word, original_line, indicator, f"'{all_suggestion}'")
self.add_message(msgid, line=line_num, args=args)
- def process_tokens(self, tokens):
+ def process_tokens(self, tokens: list[tokenize.TokenInfo]) -> None:
if not self.initialized:
return
diff --git a/pylint/checkers/strings.py b/pylint/checkers/strings.py
index 6ba541b69..8d8369617 100644
--- a/pylint/checkers/strings.py
+++ b/pylint/checkers/strings.py
@@ -698,7 +698,7 @@ class StringConstantChecker(BaseTokenChecker):
def process_module(self, node: nodes.Module) -> None:
self._unicode_literals = "unicode_literals" in node.future_imports
- def process_tokens(self, tokens):
+ def process_tokens(self, tokens: list[tokenize.TokenInfo]) -> None:
encoding = "ascii"
for i, (tok_type, token, start, _, line) in enumerate(tokens):
if tok_type == tokenize.ENCODING:
diff --git a/pylint/extensions/check_elif.py b/pylint/extensions/check_elif.py
index 874220459..61609cf01 100644
--- a/pylint/extensions/check_elif.py
+++ b/pylint/extensions/check_elif.py
@@ -4,6 +4,7 @@
from __future__ import annotations
+from tokenize import TokenInfo
from typing import TYPE_CHECKING
from astroid import nodes
@@ -38,7 +39,7 @@ class ElseifUsedChecker(BaseTokenChecker):
def _init(self):
self._elifs = {}
- def process_tokens(self, tokens):
+ def process_tokens(self, tokens: list[TokenInfo]) -> None:
"""Process tokens and look for 'if' or 'elif'."""
self._elifs = {
begin: token for _, token, begin, _, _ in tokens if token in {"elif", "if"}
diff --git a/pylint/interfaces.py b/pylint/interfaces.py
index fe1320b8a..c5c564aea 100644
--- a/pylint/interfaces.py
+++ b/pylint/interfaces.py
@@ -7,6 +7,7 @@
from __future__ import annotations
from collections import namedtuple
+from tokenize import TokenInfo
from typing import TYPE_CHECKING
from astroid import nodes
@@ -88,7 +89,7 @@ class IRawChecker(IChecker):
class ITokenChecker(IChecker):
"""Interface for checkers that need access to the token list."""
- def process_tokens(self, tokens):
+ def process_tokens(self, tokens: list[TokenInfo]) -> None:
"""Process a module.
Tokens is a list of all source code tokens in the file.
diff --git a/pylint/lint/pylinter.py b/pylint/lint/pylinter.py
index 5a2771e88..6b0f36a8e 100644
--- a/pylint/lint/pylinter.py
+++ b/pylint/lint/pylinter.py
@@ -520,7 +520,7 @@ class PyLinter(
# block level option handling #############################################
# see func_block_disable_msg.py test case for expected behaviour
- def process_tokens(self, tokens):
+ def process_tokens(self, tokens: list[tokenize.TokenInfo]) -> None:
"""Process tokens from the current module to search for module/block level
options.
"""