summaryrefslogtreecommitdiff
path: root/testsuite/driver
diff options
context:
space:
mode:
authorBen Gamari <ben@smart-cactus.org>2019-06-24 13:25:50 -0400
committerMarge Bot <ben+marge-bot@smart-cactus.org>2019-07-09 22:56:53 -0400
commit42ff8653bd5ce7f00af5783f2973393ebfcd7cc7 (patch)
tree831e8b48896260b498af6a5108b16ccda68b2cb5 /testsuite/driver
parent24782b89907ab36fb5aef3a17584f4c10f1e2690 (diff)
downloadhaskell-42ff8653bd5ce7f00af5783f2973393ebfcd7cc7.tar.gz
testsuite: Fix #16818
Renames performance metrics to include whether they are compile-time or runtime metrics.
Diffstat (limited to 'testsuite/driver')
-rw-r--r--testsuite/driver/testlib.py13
1 files changed, 10 insertions, 3 deletions
diff --git a/testsuite/driver/testlib.py b/testsuite/driver/testlib.py
index 6fb9e5be87..2e87925fda 100644
--- a/testsuite/driver/testlib.py
+++ b/testsuite/driver/testlib.py
@@ -420,13 +420,17 @@ def _collect_stats(name: TestName, opts, metrics, deviation, is_compiler_stats_t
opts.is_stats_test = True
if is_compiler_stats_test:
opts.is_compiler_stats_test = True
+ tag = 'compile_time'
+ else:
+ tag = 'runtime'
# Compiler performance numbers change when debugging is on, making the results
# useless and confusing. Therefore, skip if debugging is on.
if config.compiler_debugged and is_compiler_stats_test:
opts.skip = True
- for metric in metrics:
+ for metric_name in metrics:
+ metric = '{}/{}'.format(tag, metric_name)
def baselineByWay(way, target_commit, metric=metric):
return Perf.baseline_metric( \
target_commit, name, config.test_env, metric, way)
@@ -1273,9 +1277,12 @@ def check_stats(name, way, stats_file, range_fields) -> Any:
return failBecause(str(e))
for (metric, baseline_and_dev) in range_fields.items():
- field_match = re.search('\("' + metric + '", "([0-9]+)"\)', stats_file_contents)
+ # Remove any metric prefix e.g. "runtime/" and "compile_time/"
+ stat_file_metric = metric.split("/")[-1]
+
+ field_match = re.search('\\("' + stat_file_metric + '", "([0-9]+)"\\)', stats_file_contents)
if field_match is None:
- print('Failed to find metric: ', metric)
+ print('Failed to find metric: ', stat_file_metric)
metric_result = failBecause('no such stats metric')
else:
val = field_match.group(1)