summaryrefslogtreecommitdiff
path: root/tests/benchmark
diff options
context:
space:
mode:
authorDaniël van Noord <13665637+DanielNoord@users.noreply.github.com>2022-02-22 10:19:29 +0100
committerDaniël van Noord <13665637+DanielNoord@users.noreply.github.com>2022-02-22 16:14:07 +0100
commit8dbb2eaf9c1e7a19f0d53995ae4600b24e7f9e18 (patch)
tree4fe56b461e20cf41324f8e9839b93429c1caec9c /tests/benchmark
parent6b1413949deaebf95b2cdc0c1f67e28e16b3205e (diff)
downloadpylint-git-8dbb2eaf9c1e7a19f0d53995ae4600b24e7f9e18.tar.gz
Don't assume runners have more than 2 cores available for benchmarking
Diffstat (limited to 'tests/benchmark')
-rw-r--r--tests/benchmark/test_baseline_benchmarks.py48
1 files changed, 24 insertions, 24 deletions
diff --git a/tests/benchmark/test_baseline_benchmarks.py b/tests/benchmark/test_baseline_benchmarks.py
index 4865a3bd5..5617996df 100644
--- a/tests/benchmark/test_baseline_benchmarks.py
+++ b/tests/benchmark/test_baseline_benchmarks.py
@@ -142,37 +142,37 @@ class TestEstablishBaselineBenchmarks:
linter.msg_status == 0
), f"Expected no errors to be thrown: {pprint.pformat(linter.reporter.messages)}"
- def test_baseline_benchmark_j10(self, benchmark):
+ def test_baseline_benchmark_j2(self, benchmark):
"""Establish a baseline of pylint performance with no work across threads.
- Same as `test_baseline_benchmark_j1` but we use -j10 with 10 fake files to
+ Same as `test_baseline_benchmark_j1` but we use -j2 with 2 fake files to
ensure end-to-end-system invoked.
Because this is also so simple, if this regresses something very serious has
happened.
"""
linter = PyLinter(reporter=Reporter())
- linter.config.jobs = 10
+ linter.config.jobs = 2
# Create file per worker, using all workers
fileinfos = [self.empty_filepath for _ in range(linter.config.jobs)]
- assert linter.config.jobs == 10
+ assert linter.config.jobs == 2
assert len(linter._checkers) == 1, "Should have 'master'"
benchmark(linter.check, fileinfos)
assert (
linter.msg_status == 0
), f"Expected no errors to be thrown: {pprint.pformat(linter.reporter.messages)}"
- def test_baseline_benchmark_check_parallel_j10(self, benchmark):
- """Should demonstrate times very close to `test_baseline_benchmark_j10`."""
+ def test_baseline_benchmark_check_parallel_j2(self, benchmark):
+ """Should demonstrate times very close to `test_baseline_benchmark_j2`."""
linter = PyLinter(reporter=Reporter())
# Create file per worker, using all workers
fileinfos = [self.empty_file_info for _ in range(linter.config.jobs)]
assert len(linter._checkers) == 1, "Should have 'master'"
- benchmark(check_parallel, linter, jobs=10, files=fileinfos)
+ benchmark(check_parallel, linter, jobs=2, files=fileinfos)
assert (
linter.msg_status == 0
), f"Expected no errors to be thrown: {pprint.pformat(linter.reporter.messages)}"
@@ -196,20 +196,20 @@ class TestEstablishBaselineBenchmarks:
linter.msg_status == 0
), f"Expected no errors to be thrown: {pprint.pformat(linter.reporter.messages)}"
- def test_baseline_lots_of_files_j10(self, benchmark):
- """Establish a baseline with only 'master' checker being run in -j10.
+ def test_baseline_lots_of_files_j2(self, benchmark):
+ """Establish a baseline with only 'master' checker being run in -j2.
As with the -j1 variant above `test_baseline_lots_of_files_j1`, we do not
register any checkers except the default 'master', so the cost is just that of
- the check_parallel system across 10 workers, plus the overhead of PyLinter
+ the check_parallel system across 2 workers, plus the overhead of PyLinter
"""
if benchmark.disabled:
benchmark(print, "skipping, only benchmark large file counts")
return # _only_ run this test is profiling
linter = PyLinter(reporter=Reporter())
- linter.config.jobs = 10
+ linter.config.jobs = 2
fileinfos = [self.empty_filepath for _ in range(self.lot_of_files)]
- assert linter.config.jobs == 10
+ assert linter.config.jobs == 2
assert len(linter._checkers) == 1, "Should have 'master'"
benchmark(linter.check, fileinfos)
assert (
@@ -236,8 +236,8 @@ class TestEstablishBaselineBenchmarks:
linter.msg_status == 0
), f"Expected no errors to be thrown: {pprint.pformat(linter.reporter.messages)}"
- def test_baseline_lots_of_files_j10_empty_checker(self, benchmark):
- """Baselines pylint for a single extra checker being run in -j10, for N-files.
+ def test_baseline_lots_of_files_j2_empty_checker(self, benchmark):
+ """Baselines pylint for a single extra checker being run in -j2, for N-files.
We use a checker that does no work, so the cost is just that of the system at
scale, across workers
@@ -246,10 +246,10 @@ class TestEstablishBaselineBenchmarks:
benchmark(print, "skipping, only benchmark large file counts")
return # _only_ run this test is profiling
linter = PyLinter(reporter=Reporter())
- linter.config.jobs = 10
+ linter.config.jobs = 2
linter.register_checker(NoWorkChecker(linter))
fileinfos = [self.empty_filepath for _ in range(self.lot_of_files)]
- assert linter.config.jobs == 10
+ assert linter.config.jobs == 2
assert len(linter._checkers) == 2, "Should have 'master' and 'sleeper'"
benchmark(linter.check, fileinfos)
assert (
@@ -260,7 +260,7 @@ class TestEstablishBaselineBenchmarks:
"""Establish a baseline of single-worker performance for PyLinter.
Here we mimic a single Checker that does some work so that we can see the
- impact of running a simple system with -j1 against the same system with -j10.
+ impact of running a simple system with -j1 against the same system with -j2.
We expect this benchmark to take very close to
`numfiles*SleepingChecker.sleep_duration`
@@ -272,8 +272,8 @@ class TestEstablishBaselineBenchmarks:
linter.register_checker(SleepingChecker(linter))
# Check the same number of files as
- # `test_baseline_benchmark_j10_single_working_checker`
- fileinfos = [self.empty_filepath for _ in range(10)]
+ # `test_baseline_benchmark_j2_single_working_checker`
+ fileinfos = [self.empty_filepath for _ in range(2)]
assert linter.config.jobs == 1
assert len(linter._checkers) == 2, "Should have 'master' and 'sleeper'"
@@ -282,27 +282,27 @@ class TestEstablishBaselineBenchmarks:
linter.msg_status == 0
), f"Expected no errors to be thrown: {pprint.pformat(linter.reporter.messages)}"
- def test_baseline_benchmark_j10_single_working_checker(self, benchmark):
+ def test_baseline_benchmark_j2_single_working_checker(self, benchmark):
"""Establishes baseline of multi-worker performance for PyLinter/check_parallel.
We expect this benchmark to take less time that test_baseline_benchmark_j1,
`error_margin*(1/J)*(numfiles*SleepingChecker.sleep_duration)`
Because of the cost of the framework and system the performance difference will
- *not* be 1/10 of -j1 versions.
+ *not* be 1/2 of -j1 versions.
"""
if benchmark.disabled:
benchmark(print, "skipping, do not want to sleep in main tests")
return # _only_ run this test is profiling
linter = PyLinter(reporter=Reporter())
- linter.config.jobs = 10
+ linter.config.jobs = 2
linter.register_checker(SleepingChecker(linter))
# Check the same number of files as
# `test_baseline_benchmark_j1_single_working_checker`
- fileinfos = [self.empty_filepath for _ in range(10)]
+ fileinfos = [self.empty_filepath for _ in range(2)]
- assert linter.config.jobs == 10
+ assert linter.config.jobs == 2
assert len(linter._checkers) == 2, "Should have 'master' and 'sleeper'"
benchmark(linter.check, fileinfos)
assert (