summaryrefslogtreecommitdiff
path: root/tests/benchmark
diff options
context:
space:
mode:
authorDaniël van Noord <13665637+DanielNoord@users.noreply.github.com>2022-09-03 17:06:40 +0200
committerDaniël van Noord <13665637+DanielNoord@users.noreply.github.com>2022-09-03 18:05:24 +0200
commit5dda4d45f396c28a70f1419c510836357f97dcbc (patch)
treeb23792dccec2cc7e9d6bb2ebf0fcf88b806e8263 /tests/benchmark
parent880095c7266134a218e5f97075d1adeaa5b34b79 (diff)
downloadpylint-git-5dda4d45f396c28a70f1419c510836357f97dcbc.tar.gz
Add typing to benchmark tests
Diffstat (limited to 'tests/benchmark')
-rw-r--r--tests/benchmark/test_baseline_benchmarks.py39
1 files changed, 27 insertions, 12 deletions
diff --git a/tests/benchmark/test_baseline_benchmarks.py b/tests/benchmark/test_baseline_benchmarks.py
index 6fb1cdf18..42521b593 100644
--- a/tests/benchmark/test_baseline_benchmarks.py
+++ b/tests/benchmark/test_baseline_benchmarks.py
@@ -13,6 +13,7 @@ from unittest.mock import patch
import pytest
from astroid import nodes
+from pytest_benchmark.fixture import BenchmarkFixture
from pylint.checkers import BaseRawFileChecker
from pylint.lint import PyLinter, check_parallel
@@ -22,7 +23,7 @@ from pylint.typing import FileItem
from pylint.utils import register_plugins
-def _empty_filepath():
+def _empty_filepath() -> str:
return os.path.abspath(
os.path.join(
os.path.dirname(__file__), "..", "input", "benchmark_minimal_file.py"
@@ -114,7 +115,7 @@ class TestEstablishBaselineBenchmarks:
)
lot_of_files = 500
- def test_baseline_benchmark_j1(self, benchmark):
+ def test_baseline_benchmark_j1(self, benchmark: BenchmarkFixture) -> None:
"""Establish a baseline of pylint performance with no work.
We will add extra Checkers in other benchmarks.
@@ -131,7 +132,7 @@ class TestEstablishBaselineBenchmarks:
), f"Expected no errors to be thrown: {pprint.pformat(linter.reporter.messages)}"
@pytest.mark.needs_two_cores
- def test_baseline_benchmark_j2(self, benchmark):
+ def test_baseline_benchmark_j2(self, benchmark: BenchmarkFixture) -> None:
"""Establish a baseline of pylint performance with no work across threads.
Same as `test_baseline_benchmark_j1` but we use -j2 with 2 fake files to
@@ -154,7 +155,9 @@ class TestEstablishBaselineBenchmarks:
), f"Expected no errors to be thrown: {pprint.pformat(linter.reporter.messages)}"
@pytest.mark.needs_two_cores
- def test_baseline_benchmark_check_parallel_j2(self, benchmark):
+ def test_baseline_benchmark_check_parallel_j2(
+ self, benchmark: BenchmarkFixture
+ ) -> None:
"""Should demonstrate times very close to `test_baseline_benchmark_j2`."""
linter = PyLinter(reporter=Reporter())
@@ -167,7 +170,7 @@ class TestEstablishBaselineBenchmarks:
linter.msg_status == 0
), f"Expected no errors to be thrown: {pprint.pformat(linter.reporter.messages)}"
- def test_baseline_lots_of_files_j1(self, benchmark):
+ def test_baseline_lots_of_files_j1(self, benchmark: BenchmarkFixture) -> None:
"""Establish a baseline with only 'main' checker being run in -j1.
We do not register any checkers except the default 'main', so the cost is just
@@ -187,7 +190,7 @@ class TestEstablishBaselineBenchmarks:
), f"Expected no errors to be thrown: {pprint.pformat(linter.reporter.messages)}"
@pytest.mark.needs_two_cores
- def test_baseline_lots_of_files_j2(self, benchmark):
+ def test_baseline_lots_of_files_j2(self, benchmark: BenchmarkFixture) -> None:
"""Establish a baseline with only 'main' checker being run in -j2.
As with the -j1 variant above `test_baseline_lots_of_files_j1`, we do not
@@ -207,7 +210,9 @@ class TestEstablishBaselineBenchmarks:
linter.msg_status == 0
), f"Expected no errors to be thrown: {pprint.pformat(linter.reporter.messages)}"
- def test_baseline_lots_of_files_j1_empty_checker(self, benchmark):
+ def test_baseline_lots_of_files_j1_empty_checker(
+ self, benchmark: BenchmarkFixture
+ ) -> None:
"""Baselines pylint for a single extra checker being run in -j1, for N-files.
We use a checker that does no work, so the cost is just that of the system at
@@ -228,7 +233,9 @@ class TestEstablishBaselineBenchmarks:
), f"Expected no errors to be thrown: {pprint.pformat(linter.reporter.messages)}"
@pytest.mark.needs_two_cores
- def test_baseline_lots_of_files_j2_empty_checker(self, benchmark):
+ def test_baseline_lots_of_files_j2_empty_checker(
+ self, benchmark: BenchmarkFixture
+ ) -> None:
"""Baselines pylint for a single extra checker being run in -j2, for N-files.
We use a checker that does no work, so the cost is just that of the system at
@@ -248,7 +255,9 @@ class TestEstablishBaselineBenchmarks:
linter.msg_status == 0
), f"Expected no errors to be thrown: {pprint.pformat(linter.reporter.messages)}"
- def test_baseline_benchmark_j1_single_working_checker(self, benchmark):
+ def test_baseline_benchmark_j1_single_working_checker(
+ self, benchmark: BenchmarkFixture
+ ) -> None:
"""Establish a baseline of single-worker performance for PyLinter.
Here we mimic a single Checker that does some work so that we can see the
@@ -275,7 +284,9 @@ class TestEstablishBaselineBenchmarks:
), f"Expected no errors to be thrown: {pprint.pformat(linter.reporter.messages)}"
@pytest.mark.needs_two_cores
- def test_baseline_benchmark_j2_single_working_checker(self, benchmark):
+ def test_baseline_benchmark_j2_single_working_checker(
+ self, benchmark: BenchmarkFixture
+ ) -> None:
"""Establishes baseline of multi-worker performance for PyLinter/check_parallel.
We expect this benchmark to take less time that test_baseline_benchmark_j1,
@@ -302,7 +313,9 @@ class TestEstablishBaselineBenchmarks:
linter.msg_status == 0
), f"Expected no errors to be thrown: {pprint.pformat(linter.reporter.messages)}"
- def test_baseline_benchmark_j1_all_checks_single_file(self, benchmark):
+ def test_baseline_benchmark_j1_all_checks_single_file(
+ self, benchmark: BenchmarkFixture
+ ) -> None:
"""Runs a single file, with -j1, against all checkers/Extensions."""
args = [self.empty_filepath, "--enable=all", "--enable-all-extensions"]
runner = benchmark(Run, args, reporter=Reporter(), exit=False)
@@ -314,7 +327,9 @@ class TestEstablishBaselineBenchmarks:
runner.linter.msg_status == 0
), f"Expected no errors to be thrown: {pprint.pformat(runner.linter.reporter.messages)}"
- def test_baseline_benchmark_j1_all_checks_lots_of_files(self, benchmark):
+ def test_baseline_benchmark_j1_all_checks_lots_of_files(
+ self, benchmark: BenchmarkFixture
+ ) -> None:
"""Runs lots of files, with -j1, against all plug-ins.
... that's the intent at least.