summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBen Gamari <ben@smart-cactus.org>2020-03-30 11:30:52 -0400
committerBen Gamari <ben@smart-cactus.org>2020-03-30 17:09:41 -0400
commitf501d2006bdbc778e5f869f7032fc4ee82f9412b (patch)
treefaed85d5b5b5a69260d773acc37ab56c7cbd5b0f
parentf024b6e385bd1448968b7bf20de05f655c815bae (diff)
downloadhaskell-f501d2006bdbc778e5f869f7032fc4ee82f9412b.tar.gz
testsuite: Refactor representation of expected test outcomeswip/T17987
This turns the the expected test outcome from a str into a proper enumeration.
-rw-r--r--testsuite/driver/testglobals.py17
-rw-r--r--testsuite/driver/testlib.py35
2 files changed, 34 insertions, 18 deletions
diff --git a/testsuite/driver/testglobals.py b/testsuite/driver/testglobals.py
index ceee5df9a8..785082f0c8 100644
--- a/testsuite/driver/testglobals.py
+++ b/testsuite/driver/testglobals.py
@@ -6,6 +6,7 @@ from my_typing import *
from pathlib import Path
from perf_notes import MetricChange, PerfStat, Baseline, MetricOracles
from datetime import datetime
+from enum import Enum
# -----------------------------------------------------------------------------
# Configuration info
@@ -261,6 +262,20 @@ t = TestRun()
def getTestRun() -> TestRun:
return t
+class ExpectedOutcome(Enum):
+ """
+ Whether we expect a test to pass or why it we expect it to fail.
+ """
+
+ # The test should pass
+ PASS = 'pass'
+ # The test should fail (e.g. when testing an error message)
+ FAIL = 'fail'
+ # The test should fail because it is currently broken
+ BROKEN = 'broken'
+ # The test should fail because we are lacking a library it requires
+ MISSING_LIB = 'missing-lib'
+
# -----------------------------------------------------------------------------
# Information about the current test
@@ -282,7 +297,7 @@ class TestOptions:
self.extra_ways = [] # type: List[WayName]
# the result we normally expect for this test
- self.expect = 'pass'
+ self.expect = ExpectedOutcome.PASS # type: ExpectedOutcome
# override the expected result for certain ways
self.expect_fail_for = [] # type: List[WayName]
diff --git a/testsuite/driver/testlib.py b/testsuite/driver/testlib.py
index 5c7a1bd8d7..04f8ee2f97 100644
--- a/testsuite/driver/testlib.py
+++ b/testsuite/driver/testlib.py
@@ -19,7 +19,8 @@ import collections
import subprocess
from testglobals import config, ghc_env, default_testopts, brokens, t, \
- TestRun, TestResult, TestOptions, PerfMetric
+ TestRun, TestResult, TestOptions, PerfMetric, \
+ ExpectedOutcome
from testutil import strip_quotes, lndir, link_or_copy_file, passed, \
failBecause, testing_metrics, \
PassFail
@@ -114,7 +115,7 @@ def expect_fail( name, opts ):
# The compiler, testdriver, OS or platform is missing a certain
# feature, and we don't plan to or can't fix it now or in the
# future.
- opts.expect = 'fail';
+ opts.expect = ExpectedOutcome.FAIL
def reqlib( lib ):
return lambda name, opts, l=lib: _reqlib (name, opts, l )
@@ -174,28 +175,28 @@ def have_library(lib: str) -> bool:
def _reqlib( name, opts, lib ):
if not have_library(lib):
- opts.expect = 'missing-lib'
+ opts.expect = ExpectedOutcome.MISSING_LIB
def req_haddock( name, opts ):
if not config.haddock:
- opts.expect = 'missing-lib'
+ opts.expect = ExpectedOutcome.MISSING_LIB
def req_profiling( name, opts ):
'''Require the profiling libraries (add 'GhcLibWays += p' to mk/build.mk)'''
if not config.have_profiling:
- opts.expect = 'fail'
+ opts.expect = ExpectedOutcome.FAIL
def req_shared_libs( name, opts ):
if not config.have_shared_libs:
- opts.expect = 'fail'
+ opts.expect = ExpectedOutcome.FAIL
def req_interp( name, opts ):
if not config.have_interp:
- opts.expect = 'fail'
+ opts.expect = ExpectedOutcome.FAIL
def req_rts_linker( name, opts ):
if not config.have_RTS_linker:
- opts.expect = 'fail'
+ opts.expect = ExpectedOutcome.FAIL
def req_th( name, opts ):
"""
@@ -210,7 +211,7 @@ def req_th( name, opts ):
def req_smp( name, opts ):
if not config.have_smp:
- opts.expect = 'fail'
+ opts.expect = ExpectedOutcome.FAIL
def ignore_stdout(name, opts):
opts.ignore_stdout = True
@@ -269,7 +270,7 @@ def expect_broken( bug: IssueNumber ):
"""
def helper( name: TestName, opts ):
record_broken(name, opts, bug)
- opts.expect = 'fail';
+ opts.expect = ExpectedOutcome.FAIL
return helper
@@ -291,7 +292,7 @@ def record_broken(name: TestName, opts, bug: IssueNumber):
def _expect_pass(way):
# Helper function. Not intended for use in .T files.
opts = getTestOpts()
- return opts.expect == 'pass' and way not in opts.expect_fail_for
+ return opts.expect == ExpectedOutcome.PASS and way not in opts.expect_fail_for
# -----
@@ -869,7 +870,7 @@ def test(name: TestName,
executeSetups([thisdir_settings, setup], name, myTestOpts)
if name in config.broken_tests:
- myTestOpts.expect = 'fail'
+ myTestOpts.expect = ExpectedOutcome.BROKEN
thisTest = lambda watcher: runTest(watcher, myTestOpts, name, func, args)
if myTestOpts.alone:
@@ -1081,14 +1082,14 @@ def do_test(name: TestName,
print_output = config.verbose >= 3)
# If user used expect_broken then don't record failures of pre_cmd
- if exit_code != 0 and opts.expect not in ['fail']:
+ if exit_code != 0 and opts.expect not in [ExpectedOutcome.FAIL]:
framework_fail(name, way, 'pre_cmd failed: {0}'.format(exit_code))
if_verbose(1, '** pre_cmd was "{0}".'.format(override_options(opts.pre_cmd)))
result = func(*[name,way] + args)
- if opts.expect not in ['pass', 'fail', 'missing-lib']:
- framework_fail(name, way, 'bad expected ' + opts.expect)
+ if opts.expect not in [ExpectedOutcome.PASS, ExpectedOutcome.FAIL, ExpectedOutcome.MISSING_LIB]:
+ framework_fail(name, way, 'bad expected ' + opts.expect.value)
try:
passFail = result.passFail
@@ -1126,7 +1127,7 @@ def do_test(name: TestName,
stderr=result.stderr)
t.unexpected_failures.append(tr)
else:
- if opts.expect == 'missing-lib':
+ if opts.expect == ExpectedOutcome.MISSING_LIB:
t.missing_libs.append(TestResult(directory, name, 'missing-lib', way))
else:
t.n_expected_failures += 1
@@ -1958,7 +1959,7 @@ def compare_outputs(way: WayName,
elif diff_file: diff_file.open('ab').close() # Make sure the file exists still as
# we will try to read it later
- if config.accept and (getTestOpts().expect == 'fail' or
+ if config.accept and (getTestOpts().expect == ExpectedOutcome.FAIL or
way in getTestOpts().expect_fail_for):
if_verbose(1, 'Test is expected to fail. Not accepting new output.')
return False