diff options
author | Ben Gamari <ben@smart-cactus.org> | 2021-03-19 11:00:05 -0400 |
---|---|---|
committer | Ben Gamari <ben@smart-cactus.org> | 2021-03-19 19:58:52 -0400 |
commit | d03d876185da2db9b397c58a383c0eac1892cafc (patch) | |
tree | f84725dcacb763322556fa6a392d3cb40b054f58 /testsuite | |
parent | 0a9866854384875e88406e495aba6594ac47799d (diff) | |
download | haskell-d03d876185da2db9b397c58a383c0eac1892cafc.tar.gz |
gitlab-ci: Ignore performance improvements in marge jobs
Currently we have far too many merge failures due to cumulative
performance improvements. Avoid this by accepting metric decreases in
marge-bot jobs.
Fixes #19562.
Diffstat (limited to 'testsuite')
-rw-r--r-- | testsuite/driver/testlib.py | 25 |
1 files changed, 13 insertions, 12 deletions
diff --git a/testsuite/driver/testlib.py b/testsuite/driver/testlib.py index 4ccc790e37..b5d2744e58 100644 --- a/testsuite/driver/testlib.py +++ b/testsuite/driver/testlib.py @@ -1511,7 +1511,7 @@ def check_stats(name: TestName, field_match = re.search('\\("' + stat_file_metric + '", "([0-9]+)"\\)', stats_file_contents) if field_match is None: print('Failed to find metric: ', stat_file_metric) - metric_result = failBecause('no such stats metric') + result = failBecause('no such stats metric') else: val = field_match.group(1) assert val is not None @@ -1528,24 +1528,25 @@ def check_stats(name: TestName, perf_change = MetricChange.NewMetric else: tolerance_dev = baseline_and_dev.deviation - (change, metric_result) = Perf.check_stats_change( + (perf_change, metric_result) = Perf.check_stats_change( perf_stat, baseline, tolerance_dev, config.allowed_perf_changes, config.verbose >= 4) + t.metrics.append(PerfMetric(change=perf_change, stat=perf_stat, baseline=baseline)) - # If any metric fails then the test fails. - # Note, the remaining metrics are still run so that - # a complete list of changes can be presented to the user. - if metric_result.passFail == 'fail': - if config.ignore_perf_increases and change == MetricChange.Increase: - pass - elif config.ignore_perf_decreases and change == MetricChange.Decrease: - pass - else: - result = metric_result + # If any metric fails then the test fails. + # Note, the remaining metrics are still run so that + # a complete list of changes can be presented to the user. + if metric_result.passFail == 'fail': + if config.ignore_perf_increases and perf_change == MetricChange.Increase: + metric_result = passed() + elif config.ignore_perf_decreases and perf_change == MetricChange.Decrease: + metric_result = passed() + + result = metric_result return result |