summaryrefslogtreecommitdiff
path: root/testsuite
diff options
context:
space:
mode:
authorBen Gamari <ben@smart-cactus.org>2019-06-25 18:16:32 -0400
committerBen Gamari <ben@well-typed.com>2019-06-26 08:20:54 -0400
commit44d08c32f5038bd395bf24df3f5c9f0297a3d0f0 (patch)
tree4a36dd07cab6c6f6e7810d8c6c7e20717b3dd882 /testsuite
parent551b79e4c2685f0c6de16e274efd686f26ca3876 (diff)
downloadhaskell-44d08c32f5038bd395bf24df3f5c9f0297a3d0f0.tar.gz
testsuite: Run and report on fragile tests
This allows us to run (but ignore the result of) fragile testcases. Hopefully this should allow us to more easily spot when a fragile test becomes un-fragile.
Diffstat (limited to 'testsuite')
-rw-r--r--testsuite/driver/junit.py3
-rw-r--r--testsuite/driver/testglobals.py4
-rw-r--r--testsuite/driver/testlib.py25
3 files changed, 23 insertions, 9 deletions
diff --git a/testsuite/driver/junit.py b/testsuite/driver/junit.py
index 9ff00ec1cb..180a81ab15 100644
--- a/testsuite/driver/junit.py
+++ b/testsuite/driver/junit.py
@@ -18,7 +18,8 @@ def junit(t: TestRun) -> ET.ElementTree:
for res_type, group in [('stat failure', t.unexpected_stat_failures),
('unexpected failure', t.unexpected_failures),
- ('unexpected pass', t.unexpected_passes)]:
+ ('unexpected pass', t.unexpected_passes),
+ ('fragile', t.fragile_results)]:
for tr in group:
testcase = ET.SubElement(testsuite, 'testcase',
classname = tr.way,
diff --git a/testsuite/driver/testglobals.py b/testsuite/driver/testglobals.py
index 3fb809698f..3d273cb647 100644
--- a/testsuite/driver/testglobals.py
+++ b/testsuite/driver/testglobals.py
@@ -232,6 +232,7 @@ class TestRun:
self.unexpected_passes = []
self.unexpected_failures = []
self.unexpected_stat_failures = []
+ self.fragile_results = []
# List of all metrics measured in this test run.
# [(change, PerfStat)] where change is one of the MetricChange
@@ -253,6 +254,9 @@ class TestOptions:
# skip this test?
self.skip = False
+ # the test is known to be fragile in these ways
+ self.fragile_ways = []
+
# skip these ways
self.omit_ways = []
diff --git a/testsuite/driver/testlib.py b/testsuite/driver/testlib.py
index ea7f61d8f7..6fb9e5be87 100644
--- a/testsuite/driver/testlib.py
+++ b/testsuite/driver/testlib.py
@@ -260,23 +260,23 @@ def _expect_pass(way):
def fragile( bug: IssueNumber ):
"""
- Indicates that the test should be skipped due to fragility documented in
- the given ticket.
+ Indicates that failures of this test should be ignored due to fragility
+ documented in the given ticket.
"""
def helper( name, opts, bug=bug ):
record_broken(name, opts, bug)
- opts.skip = True
+ opts.fragile_ways += config.way_flags.keys()
return helper
def fragile_for( bug: IssueNumber, ways: List[WayName] ):
"""
- Indicates that the test should be skipped due to fragility in the given
- test ways as documented in the given ticket.
+ Indicates that failures of this test should be ignored due to fragility in
+ the given test ways as documented in the given ticket.
"""
def helper( name, opts, bug=bug, ways=ways ):
record_broken(name, opts, bug)
- opts.omit_ways += ways
+ opts.fragile_ways += ways
return helper
@@ -424,7 +424,7 @@ def _collect_stats(name: TestName, opts, metrics, deviation, is_compiler_stats_t
# Compiler performance numbers change when debugging is on, making the results
# useless and confusing. Therefore, skip if debugging is on.
if config.compiler_debugged and is_compiler_stats_test:
- opts.skip = 1
+ opts.skip = True
for metric in metrics:
def baselineByWay(way, target_commit, metric=metric):
@@ -990,7 +990,10 @@ def do_test(name: TestName, way: WayName, func, args, files: Set[str]) -> None:
directory = re.sub('^\\.[/\\\\]', '', str(opts.testdir))
- if passFail == 'pass':
+ if way in opts.fragile_ways:
+ if_verbose(1, '*** fragile test %s resulted in %s' % (full_name, passFail))
+ t.fragile_results.append(TestResult(directory, name, 'fragile %s' % passFail, way))
+ elif passFail == 'pass':
if _expect_pass(way):
t.expected_passes.append(TestResult(directory, name, "", way))
t.n_expected_passes += 1
@@ -2297,6 +2300,8 @@ def summary(t: TestRun, file: TextIO, short=False, color=False) -> None:
+ ' unexpected failures\n'
+ repr(len(t.unexpected_stat_failures)).rjust(8)
+ ' unexpected stat failures\n'
+ + repr(len(t.fragile_results)).rjust(8)
+ + ' fragile tests\n'
+ '\n')
if t.unexpected_passes:
@@ -2319,6 +2324,10 @@ def summary(t: TestRun, file: TextIO, short=False, color=False) -> None:
file.write('Framework warnings:\n')
printTestInfosSummary(file, t.framework_warnings)
+ if t.fragile_results:
+ file.write('Fragile tests:\n')
+ printTestInfosSummary(file, t.fragile_results)
+
if stopping():
file.write('WARNING: Testsuite run was terminated early\n')