summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBen Gamari <ben@smart-cactus.org>2020-07-04 22:22:31 -0400
committerMarge Bot <ben+marge-bot@smart-cactus.org>2020-08-18 15:40:05 -0400
commit194b25ee97d93bc4bcb5bed9a0454debba7f2b6a (patch)
treeba6f5e6da3be8b52ac7b87f794f8b6895a49f337
parenta87a0b498f4c93c33e3db8d7f68fbaa5d812b408 (diff)
downloadhaskell-194b25ee97d93bc4bcb5bed9a0454debba7f2b6a.tar.gz
testsuite: Allow baseline commit to be set explicitly
-rw-r--r--testsuite/driver/perf_notes.py19
-rw-r--r--testsuite/driver/runtests.py20
-rw-r--r--testsuite/driver/testglobals.py5
-rw-r--r--testsuite/driver/testlib.py3
4 files changed, 32 insertions, 15 deletions
diff --git a/testsuite/driver/perf_notes.py b/testsuite/driver/perf_notes.py
index c04c2da2c4..2bc88cd27c 100644
--- a/testsuite/driver/perf_notes.py
+++ b/testsuite/driver/perf_notes.py
@@ -76,8 +76,7 @@ PerfStat = NamedTuple('PerfStat', [('test_env', TestEnv),
# A baseline recovered form stored metrics.
Baseline = NamedTuple('Baseline', [('perfStat', PerfStat),
- ('commit', GitHash),
- ('commitDepth', int)])
+ ('commit', GitHash)])
class MetricChange(Enum):
# The metric appears to have no baseline and is presumably a new test.
@@ -402,7 +401,8 @@ def baseline_metric(commit: GitHash,
name: TestName,
test_env: TestEnv,
metric: MetricName,
- way: WayName
+ way: WayName,
+ baseline_ref: Optional[GitRef]
) -> Optional[Baseline]:
# For performance reasons (in order to avoid calling commit_hash), we assert
# commit is already a commit hash.
@@ -411,6 +411,8 @@ def baseline_metric(commit: GitHash,
# Get all recent commit hashes.
commit_hashes = baseline_commit_log(commit)
+ baseline_commit = commit_hash(baseline_ref) if baseline_ref else None
+
def has_expected_change(commit: GitHash) -> bool:
return get_allowed_perf_changes(commit).get(name) is not None
@@ -418,11 +420,18 @@ def baseline_metric(commit: GitHash,
def find_baseline(namespace: NoteNamespace,
test_env: TestEnv
) -> Optional[Baseline]:
+ if baseline_commit is not None:
+ current_metric = get_commit_metric(namespace, baseline_commit, test_env, name, metric, way)
+ if current_metric is not None:
+ return Baseline(current_metric, baseline_commit)
+ else:
+ return None
+
for depth, current_commit in list(enumerate(commit_hashes))[1:]:
# Check for a metric on this commit.
current_metric = get_commit_metric(namespace, current_commit, test_env, name, metric, way)
if current_metric is not None:
- return Baseline(current_metric, current_commit, depth)
+ return Baseline(current_metric, current_commit)
# Stop if there is an expected change at this commit. In that case
# metrics on ancestor commits will not be a valid baseline.
@@ -552,7 +561,7 @@ def check_stats_change(actual: PerfStat,
result = passed()
if not change_allowed:
error = str(change) + ' from ' + baseline.perfStat.test_env + \
- ' baseline @ HEAD~' + str(baseline.commitDepth)
+ ' baseline @ %s' % baseline.commit
print(actual.metric, error + ':')
result = failBecause('stat ' + error, tag='stat')
diff --git a/testsuite/driver/runtests.py b/testsuite/driver/runtests.py
index 44b2221ffe..a7e689df3b 100644
--- a/testsuite/driver/runtests.py
+++ b/testsuite/driver/runtests.py
@@ -27,7 +27,7 @@ from testutil import getStdout, Watcher, str_warn, str_info
from testglobals import getConfig, ghc_env, getTestRun, TestConfig, \
TestOptions, brokens, PerfMetric
from my_typing import TestName
-from perf_notes import MetricChange, inside_git_repo, is_worktree_dirty, format_perf_stat
+from perf_notes import MetricChange, GitRef, inside_git_repo, is_worktree_dirty, format_perf_stat
from junit import junit
import term_color
from term_color import Color, colored
@@ -70,6 +70,7 @@ parser.add_argument("--verbose", type=int, choices=[0,1,2,3,4,5], help="verbose
parser.add_argument("--junit", type=argparse.FileType('wb'), help="output testsuite summary in JUnit format")
parser.add_argument("--broken-test", action="append", default=[], help="a test name to mark as broken for this run")
parser.add_argument("--test-env", default='local', help="Override default chosen test-env.")
+parser.add_argument("--perf-baseline", type=GitRef, metavar='COMMIT', help="Baseline commit for performance comparsons.")
perf_group.add_argument("--skip-perf-tests", action="store_true", help="skip performance tests")
perf_group.add_argument("--only-perf-tests", action="store_true", help="Only do performance tests")
@@ -101,6 +102,7 @@ config.metrics_file = args.metrics_file
hasMetricsFile = config.metrics_file is not None
config.summary_file = args.summary_file
config.no_print_summary = args.no_print_summary
+config.baseline_commit = args.perf_baseline
if args.only:
config.only = args.only
@@ -351,8 +353,8 @@ def tabulate_metrics(metrics: List[PerfMetric]) -> None:
rel = 100 * (val1 - val0) / val0
print("{space:24} {herald:40} {value:15.3f} [{direction}, {rel:2.1f}%]".format(
space = "",
- herald = "(baseline @ HEAD~{depth})".format(
- depth = metric.baseline.commitDepth),
+ herald = "(baseline @ {commit})".format(
+ commit = metric.baseline.commit),
value = val0,
direction = metric.change,
rel = rel
@@ -422,6 +424,8 @@ else:
# Dump metrics data.
print("\nPerformance Metrics (test environment: {}):\n".format(config.test_env))
+ if config.baseline_commit:
+ print('Performance baseline: %s\n' % config.baseline_commit)
if any(t.metrics):
tabulate_metrics(t.metrics)
else:
@@ -477,19 +481,19 @@ else:
summary(t, sys.stdout, config.no_print_summary, config.supports_colors)
# Write perf stats if any exist or if a metrics file is specified.
- stats = [stat for (_, stat, __) in t.metrics]
+ stats_metrics = [stat for (_, stat, __) in t.metrics] # type: List[PerfStat]
if hasMetricsFile:
- print('Appending ' + str(len(stats)) + ' stats to file: ' + config.metrics_file)
+ print('Appending ' + str(len(stats_metrics)) + ' stats to file: ' + config.metrics_file)
with open(config.metrics_file, 'a') as f:
- f.write("\n" + Perf.format_perf_stat(stats))
- elif inside_git_repo() and any(stats):
+ f.write("\n" + Perf.format_perf_stat(stats_metrics))
+ elif inside_git_repo() and any(stats_metrics):
if is_worktree_dirty():
print()
print(str_warn('Performance Metrics NOT Saved') + \
' working tree is dirty. Commit changes or use ' + \
'--metrics-file to save metrics to a file.')
else:
- Perf.append_perf_stat(stats)
+ Perf.append_perf_stat(stats_metrics)
# Write summary
if config.summary_file:
diff --git a/testsuite/driver/testglobals.py b/testsuite/driver/testglobals.py
index c358a660d9..c9c2ef5be8 100644
--- a/testsuite/driver/testglobals.py
+++ b/testsuite/driver/testglobals.py
@@ -4,7 +4,7 @@
from my_typing import *
from pathlib import Path
-from perf_notes import MetricChange, PerfStat, Baseline, MetricOracles
+from perf_notes import MetricChange, PerfStat, Baseline, MetricOracles, GitRef
from datetime import datetime
# -----------------------------------------------------------------------------
@@ -163,6 +163,9 @@ class TestConfig:
# run.
self.broken_tests = set() # type: Set[TestName]
+ # Baseline commit for performane metric comparisons.
+ self.baseline_commit = None # type: Optional[GitRef]
+
# Should we skip performance tests
self.skip_perf_tests = False
diff --git a/testsuite/driver/testlib.py b/testsuite/driver/testlib.py
index fc83eb6477..104d3345b3 100644
--- a/testsuite/driver/testlib.py
+++ b/testsuite/driver/testlib.py
@@ -477,7 +477,8 @@ def _collect_stats(name: TestName, opts, metrics, deviation, is_compiler_stats_t
metric = '{}/{}'.format(tag, metric_name)
def baselineByWay(way, target_commit, metric=metric):
return Perf.baseline_metric( \
- target_commit, name, config.test_env, metric, way)
+ target_commit, name, config.test_env, metric, way, \
+ config.baseline_commit )
opts.stats_range_fields[metric] = MetricOracles(baseline=baselineByWay,
deviation=deviation)