diff options
-rw-r--r-- | testsuite/driver/testlib.py | 13 |
1 files changed, 10 insertions, 3 deletions
diff --git a/testsuite/driver/testlib.py b/testsuite/driver/testlib.py index 6fb9e5be87..2e87925fda 100644 --- a/testsuite/driver/testlib.py +++ b/testsuite/driver/testlib.py @@ -420,13 +420,17 @@ def _collect_stats(name: TestName, opts, metrics, deviation, is_compiler_stats_t opts.is_stats_test = True if is_compiler_stats_test: opts.is_compiler_stats_test = True + tag = 'compile_time' + else: + tag = 'runtime' # Compiler performance numbers change when debugging is on, making the results # useless and confusing. Therefore, skip if debugging is on. if config.compiler_debugged and is_compiler_stats_test: opts.skip = True - for metric in metrics: + for metric_name in metrics: + metric = '{}/{}'.format(tag, metric_name) def baselineByWay(way, target_commit, metric=metric): return Perf.baseline_metric( \ target_commit, name, config.test_env, metric, way) @@ -1273,9 +1277,12 @@ def check_stats(name, way, stats_file, range_fields) -> Any: return failBecause(str(e)) for (metric, baseline_and_dev) in range_fields.items(): - field_match = re.search('\("' + metric + '", "([0-9]+)"\)', stats_file_contents) + # Remove any metric prefix e.g. "runtime/" and "compile_time/" + stat_file_metric = metric.split("/")[-1] + + field_match = re.search('\\("' + stat_file_metric + '", "([0-9]+)"\\)', stats_file_contents) if field_match is None: - print('Failed to find metric: ', metric) + print('Failed to find metric: ', stat_file_metric) metric_result = failBecause('no such stats metric') else: val = field_match.group(1) |