summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBen Gamari <ben@smart-cactus.org>2019-06-24 13:25:50 -0400
committerDavid Eichmann <EichmannD@gmail.com>2019-07-08 14:55:59 +0100
commitc95706d5b9752745b6841747a6ff4d583e191876 (patch)
treee131825cd81a1fe236312f74eeeba8a1a37018ae
parent2fd1ed541ae55a30ef65e18dc09bba993f37c70e (diff)
downloadhaskell-wip/T16818.tar.gz
testsuite: Fix #16818wip/T16818
Renames performance metrics to include whether they are compile-time or runtime metrics.
-rw-r--r--testsuite/driver/testlib.py13
1 files changed, 10 insertions, 3 deletions
diff --git a/testsuite/driver/testlib.py b/testsuite/driver/testlib.py
index 6fb9e5be87..2e87925fda 100644
--- a/testsuite/driver/testlib.py
+++ b/testsuite/driver/testlib.py
@@ -420,13 +420,17 @@ def _collect_stats(name: TestName, opts, metrics, deviation, is_compiler_stats_t
opts.is_stats_test = True
if is_compiler_stats_test:
opts.is_compiler_stats_test = True
+ tag = 'compile_time'
+ else:
+ tag = 'runtime'
# Compiler performance numbers change when debugging is on, making the results
# useless and confusing. Therefore, skip if debugging is on.
if config.compiler_debugged and is_compiler_stats_test:
opts.skip = True
- for metric in metrics:
+ for metric_name in metrics:
+ metric = '{}/{}'.format(tag, metric_name)
def baselineByWay(way, target_commit, metric=metric):
return Perf.baseline_metric( \
target_commit, name, config.test_env, metric, way)
@@ -1273,9 +1277,12 @@ def check_stats(name, way, stats_file, range_fields) -> Any:
return failBecause(str(e))
for (metric, baseline_and_dev) in range_fields.items():
- field_match = re.search('\("' + metric + '", "([0-9]+)"\)', stats_file_contents)
+ # Remove any metric prefix e.g. "runtime/" and "compile_time/"
+ stat_file_metric = metric.split("/")[-1]
+
+ field_match = re.search('\\("' + stat_file_metric + '", "([0-9]+)"\\)', stats_file_contents)
if field_match is None:
- print('Failed to find metric: ', metric)
+ print('Failed to find metric: ', stat_file_metric)
metric_result = failBecause('no such stats metric')
else:
val = field_match.group(1)