summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--testsuite/driver/runtests.py4
-rw-r--r--testsuite/driver/testglobals.py7
-rw-r--r--testsuite/driver/testlib.py3
3 files changed, 13 insertions, 1 deletions
diff --git a/testsuite/driver/runtests.py b/testsuite/driver/runtests.py
index 9eed68bc37..e587a1150d 100644
--- a/testsuite/driver/runtests.py
+++ b/testsuite/driver/runtests.py
@@ -26,6 +26,7 @@ import subprocess
from testutil import getStdout, Watcher, str_warn, str_info
from testglobals import getConfig, ghc_env, getTestRun, TestConfig, \
TestOptions, brokens, PerfMetric
+from my_typing import TestName
from perf_notes import MetricChange, inside_git_repo, is_worktree_dirty, format_perf_stat
from junit import junit
import term_color
@@ -67,6 +68,7 @@ parser.add_argument("--skipway", action="append", help="skip this way")
parser.add_argument("--threads", type=int, help="threads to run simultaneously")
parser.add_argument("--verbose", type=int, choices=[0,1,2,3,4,5], help="verbose (Values 0 through 5 accepted)")
parser.add_argument("--junit", type=argparse.FileType('wb'), help="output testsuite summary in JUnit format")
+parser.add_argument("--broken-test", action="append", default=[], help="a test name to mark as broken for this run")
parser.add_argument("--test-env", default='local', help="Override default chosen test-env.")
perf_group.add_argument("--skip-perf-tests", action="store_true", help="skip performance tests")
perf_group.add_argument("--only-perf-tests", action="store_true", help="Only do performance tests")
@@ -123,6 +125,8 @@ if args.skipway:
config.run_ways = [w for w in config.run_ways if w not in args.skipway]
config.compile_ways = [w for w in config.compile_ways if w not in args.skipway]
+config.broken_tests |= {TestName(t) for t in args.broken_test}
+
if args.threads:
config.threads = args.threads
config.use_threads = True
diff --git a/testsuite/driver/testglobals.py b/testsuite/driver/testglobals.py
index adf4112ca6..ceee5df9a8 100644
--- a/testsuite/driver/testglobals.py
+++ b/testsuite/driver/testglobals.py
@@ -153,6 +153,10 @@ class TestConfig:
self.threads = 1
self.use_threads = False
+ # tests which should be considered to be broken during this testsuite
+ # run.
+ self.broken_tests = set() # type: Set[TestName]
+
# Should we skip performance tests
self.skip_perf_tests = False
@@ -399,6 +403,7 @@ class TestOptions:
global default_testopts
default_testopts = TestOptions()
-# (bug, directory, name) of tests marked broken
+# (bug, directory, name) of tests marked broken. Used by config.list_broken
+# feature.
global brokens
brokens = [] # type: List[Tuple[IssueNumber, str, str]]
diff --git a/testsuite/driver/testlib.py b/testsuite/driver/testlib.py
index 35e9bd060d..5c7a1bd8d7 100644
--- a/testsuite/driver/testlib.py
+++ b/testsuite/driver/testlib.py
@@ -868,6 +868,9 @@ def test(name: TestName,
executeSetups([thisdir_settings, setup], name, myTestOpts)
+ if name in config.broken_tests:
+ myTestOpts.expect = 'fail'
+
thisTest = lambda watcher: runTest(watcher, myTestOpts, name, func, args)
if myTestOpts.alone:
aloneTests.append(thisTest)