diff options
author | Ben Gamari <ben@smart-cactus.org> | 2021-03-22 12:02:21 -0400 |
---|---|---|
committer | Marge Bot <ben+marge-bot@smart-cactus.org> | 2021-04-02 05:16:35 -0400 |
commit | 78ca4a27fd960033dcb16f65f15a0007b1062cc0 (patch) | |
tree | 79aa740220e5767004137bda7e5888f6c8b5cf81 | |
parent | ee55d57e90ea497d389a5a47504174b213ba0598 (diff) | |
download | haskell-78ca4a27fd960033dcb16f65f15a0007b1062cc0.tar.gz |
testsuite: Make passFail a boolean
-rw-r--r-- | testsuite/driver/testlib.py | 27 | ||||
-rw-r--r-- | testsuite/driver/testutil.py | 10 |
2 files changed, 13 insertions, 24 deletions
diff --git a/testsuite/driver/testlib.py b/testsuite/driver/testlib.py index b76f02f3a4..e2818e4ab8 100644 --- a/testsuite/driver/testlib.py +++ b/testsuite/driver/testlib.py @@ -22,7 +22,7 @@ from testglobals import config, ghc_env, default_testopts, brokens, t, \ TestRun, TestResult, TestOptions, PerfMetric from testutil import strip_quotes, lndir, link_or_copy_file, passed, \ failBecause, testing_metrics, \ - PassFail, memoize + PassFail, badResult, memoize from term_color import Color, colored import testutil from cpu_features import have_cpu_feature @@ -1156,29 +1156,24 @@ def do_test(name: TestName, if opts.expect not in ['pass', 'fail', 'missing-lib']: framework_fail(name, way, 'bad expected ' + opts.expect) - try: - passFail = result.passFail - except (KeyError, TypeError): - passFail = 'No passFail found' - directory = re.sub('^\\.[/\\\\]', '', str(opts.testdir)) if way in opts.fragile_ways: - if_verbose(1, '*** fragile test %s resulted in %s' % (full_name, passFail)) - if passFail == 'pass': + if_verbose(1, '*** fragile test %s resulted in %s' % (full_name, 'pass' if result.passed else 'fail')) + if result.passed: t.fragile_passes.append(TestResult(directory, name, 'fragile', way)) else: t.fragile_failures.append(TestResult(directory, name, 'fragile', way, stdout=result.stdout, stderr=result.stderr)) - elif passFail == 'pass': + elif result.passed: if _expect_pass(way): t.expected_passes.append(TestResult(directory, name, "", way)) t.n_expected_passes += 1 else: if_verbose(1, '*** unexpected pass for %s' % full_name) t.unexpected_passes.append(TestResult(directory, name, 'unexpected', way)) - elif passFail == 'fail': + else: if _expect_pass(way): reason = result.reason tag = result.tag @@ -1196,8 +1191,6 @@ def do_test(name: TestName, t.missing_libs.append(TestResult(directory, name, 'missing-lib', way)) else: t.n_expected_failures += 1 - else: - framework_fail(name, way, 'bad result ' + passFail) # Make is often invoked with -s, which means if it fails, we get # no feedback at all. This is annoying. So let's remove the option @@ -1227,14 +1220,6 @@ def framework_warn(name: TestName, way: WayName, reason: str) -> None: if_verbose(1, '*** framework warning for %s %s ' % (full_name, reason)) t.framework_warnings.append(TestResult(directory, name, reason, way)) -def badResult(result: PassFail) -> bool: - try: - if result.passFail == 'pass': - return False - return True - except (KeyError, TypeError): - return True - # ----------------------------------------------------------------------------- # Generic command tests @@ -1540,7 +1525,7 @@ def check_stats(name: TestName, # If any metric fails then the test fails. # Note, the remaining metrics are still run so that # a complete list of changes can be presented to the user. - if metric_result.passFail == 'fail': + if not metric_result.passed: if config.ignore_perf_increases and perf_change == MetricChange.Increase: metric_result = passed() elif config.ignore_perf_decreases and perf_change == MetricChange.Decrease: diff --git a/testsuite/driver/testutil.py b/testsuite/driver/testutil.py index 074214639e..064fd8086f 100644 --- a/testsuite/driver/testutil.py +++ b/testsuite/driver/testutil.py @@ -9,8 +9,9 @@ import threading from my_typing import * + PassFail = NamedTuple('PassFail', - [('passFail', str), + [('passed', bool), ('reason', str), ('tag', Optional[str]), ('stderr', Optional[str]), @@ -18,8 +19,11 @@ PassFail = NamedTuple('PassFail', ('hc_opts', Optional[str]), ]) +def badResult(result: PassFail) -> bool: + return not result.passed + def passed(hc_opts=None) -> PassFail: - return PassFail(passFail='pass', + return PassFail(passed=True, reason='', tag=None, stderr=None, @@ -31,7 +35,7 @@ def failBecause(reason: str, stderr: str=None, stdout: str=None ) -> PassFail: - return PassFail(passFail='fail', reason=reason, tag=tag, + return PassFail(passed=False, reason=reason, tag=tag, stderr=stderr, stdout=stdout, hc_opts=None) def strip_quotes(s: str) -> str: |