summaryrefslogtreecommitdiff
path: root/testsuite
diff options
context:
space:
mode:
authorBen Gamari <ben@smart-cactus.org>2021-02-24 08:34:38 -0500
committerBen Gamari <ben@smart-cactus.org>2021-02-24 11:15:41 -0500
commit7151eaa37b8842f34e75b5e254d8185c3ce73a3d (patch)
tree35f3c4fe158d27d56535decfad505bbd2d82ac9c /testsuite
parent5b187ab84005e0bbc5699290116c5a186414ff7f (diff)
downloadhaskell-7151eaa37b8842f34e75b5e254d8185c3ce73a3d.tar.gz
testsuite: Introduce flag to ignore performance failures
Needed by #19025.
Diffstat (limited to 'testsuite')
-rw-r--r--testsuite/driver/runtests.py2
-rw-r--r--testsuite/driver/testglobals.py3
-rw-r--r--testsuite/driver/testlib.py2
3 files changed, 6 insertions, 1 deletions
diff --git a/testsuite/driver/runtests.py b/testsuite/driver/runtests.py
index c0482d0f9d..577851cf46 100644
--- a/testsuite/driver/runtests.py
+++ b/testsuite/driver/runtests.py
@@ -76,6 +76,7 @@ parser.add_argument("--perf-baseline", type=GitRef, metavar='COMMIT', help="Base
parser.add_argument("--test-package-db", dest="test_package_db", action="append", help="Package db providing optional packages used by the testsuite.")
perf_group.add_argument("--skip-perf-tests", action="store_true", help="skip performance tests")
perf_group.add_argument("--only-perf-tests", action="store_true", help="Only do performance tests")
+perf_group.add_argument("--ignore-perf-failures", action="store_true", help="Don't fail due to out-of-tolerance perf tests")
args = parser.parse_args()
@@ -151,6 +152,7 @@ if args.verbose is not None:
forceSkipPerfTests = not hasMetricsFile and not inside_git_repo()
config.skip_perf_tests = args.skip_perf_tests or forceSkipPerfTests
config.only_perf_tests = args.only_perf_tests
+config.ignore_perf_failures = args.ignore_perf_failures
if args.test_env:
config.test_env = args.test_env
diff --git a/testsuite/driver/testglobals.py b/testsuite/driver/testglobals.py
index 2b3dd48b68..117df41eb6 100644
--- a/testsuite/driver/testglobals.py
+++ b/testsuite/driver/testglobals.py
@@ -31,6 +31,9 @@ class TestConfig:
self.run_only_some_tests = False
self.only = set()
+ # Don't fail on out-of-tolerance stat failures
+ self.ignore_perf_failures = False
+
# Accept new output which differs from the sample?
self.accept = False
self.accept_platform = False
diff --git a/testsuite/driver/testlib.py b/testsuite/driver/testlib.py
index c2838ae5bf..2375ea8c20 100644
--- a/testsuite/driver/testlib.py
+++ b/testsuite/driver/testlib.py
@@ -1539,7 +1539,7 @@ def check_stats(name: TestName,
# If any metric fails then the test fails.
# Note, the remaining metrics are still run so that
# a complete list of changes can be presented to the user.
- if metric_result.passFail == 'fail':
+ if metric_result.passFail == 'fail' and not config.ignore_perf_failures:
result = metric_result
return result