summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBen Gamari <ben@smart-cactus.org>2019-01-24 14:20:11 -0500
committerMarge Bot <ben+marge-bot@smart-cactus.org>2019-02-10 08:37:59 -0500
commit224fec6983e16ecfc44a80d47e591a2425468eaf (patch)
tree08cf0a5b5352c48d99c9783fb657d18e938ba461
parenta48753bdbc99cda36890e851950f5b79e1c3b2b2 (diff)
downloadhaskell-224fec6983e16ecfc44a80d47e591a2425468eaf.tar.gz
testsuite: Report stdout and stderr in JUnit output
This patch makes the JUnit output more useful as now we also report the stdout/stderr in the message which can be used to quickly identify why a test is failing without downloading the log. This also introduces TestResult, previously we were simply passing around tuples, making things the implementation rather difficult to follow and harder to extend.
-rw-r--r--testsuite/driver/junit.py23
-rw-r--r--testsuite/driver/testglobals.py15
-rw-r--r--testsuite/driver/testlib.py58
-rw-r--r--testsuite/driver/testutil.py7
4 files changed, 69 insertions, 34 deletions
diff --git a/testsuite/driver/junit.py b/testsuite/driver/junit.py
index f5daec1e47..ec840181b9 100644
--- a/testsuite/driver/junit.py
+++ b/testsuite/driver/junit.py
@@ -13,26 +13,27 @@ def junit(t):
for res_type, group in [('stat failure', t.unexpected_stat_failures),
('unexpected failure', t.unexpected_failures)]:
- for (directory, testname, reason, way) in group:
+ for tr in group:
testcase = ET.SubElement(testsuite, 'testcase',
- classname = way,
- name = '%s(%s)' % (testname, way))
+ classname = tr.way,
+ name = '%s(%sb)' % (tr.testname, tr.way))
+ new_reason = "\n".join([tr.reason, "STDERR:", tr.stderr.decode("utf-8")]) if tr.stderr else tr.reason
result = ET.SubElement(testcase, 'failure',
type = res_type,
- message = reason)
+ message = new_reason)
- for (directory, testname, reason, way) in t.framework_failures:
+ for tr in t.framework_failures:
testcase = ET.SubElement(testsuite, 'testcase',
- classname = way,
- name = '%s(%s)' % (testname, way))
+ classname = tr.way,
+ name = '%s(%s)' % (tr.testname, tr.way))
result = ET.SubElement(testcase, 'error',
type = "framework failure",
- message = reason)
+ message = tr.reason)
- for (directory, testname, way) in t.expected_passes:
+ for tr in t.expected_passes:
testcase = ET.SubElement(testsuite, 'testcase',
- classname = way,
- name = '%s(%s)' % (testname, way))
+ classname = tr.way,
+ name = '%s(%s)' % (tr.testname, tr.way))
return ET.ElementTree(testsuites)
diff --git a/testsuite/driver/testglobals.py b/testsuite/driver/testglobals.py
index 0e0240db8e..de1d1e660a 100644
--- a/testsuite/driver/testglobals.py
+++ b/testsuite/driver/testglobals.py
@@ -151,6 +151,20 @@ ghc_env = os.environ.copy()
# -----------------------------------------------------------------------------
# Information about the current test run
+class TestResult:
+ """
+ A result from the execution of a test. These live in the expected_passes,
+ framework_failures, framework_warnings, unexpected_passes,
+ unexpected_failures, unexpected_stat_failures lists of TestRun.
+ """
+ __slots__ = 'directory', 'testname', 'reason', 'way', 'stderr'
+ def __init__(self, directory, testname, reason, way, stderr=None):
+ self.directory = directory
+ self.testname = testname
+ self.reason = reason
+ self.way = way
+ self.stderr = stderr
+
class TestRun:
def __init__(self):
self.start_time = None
@@ -161,6 +175,7 @@ class TestRun:
self.n_expected_passes = 0
self.n_expected_failures = 0
+ # type: List[TestResult]
self.missing_libs = []
self.framework_failures = []
self.framework_warnings = []
diff --git a/testsuite/driver/testlib.py b/testsuite/driver/testlib.py
index dd3b4262eb..710800b9f0 100644
--- a/testsuite/driver/testlib.py
+++ b/testsuite/driver/testlib.py
@@ -18,8 +18,8 @@ from pathlib import PurePath
import collections
import subprocess
-from testglobals import config, ghc_env, default_testopts, brokens, t
-from testutil import strip_quotes, lndir, link_or_copy_file, passed, failBecause, str_fail, str_pass
+from testglobals import config, ghc_env, default_testopts, brokens, t, TestResult
+from testutil import strip_quotes, lndir, link_or_copy_file, passed, failBecause, failBecauseStderr, str_fail, str_pass
from cpu_features import have_cpu_feature
import perf_notes as Perf
from perf_notes import MetricChange
@@ -940,24 +940,25 @@ def do_test(name, way, func, args, files):
if passFail == 'pass':
if _expect_pass(way):
- t.expected_passes.append((directory, name, way))
+ t.expected_passes.append(TestResult(directory, name, "", way))
t.n_expected_passes += 1
else:
if_verbose(1, '*** unexpected pass for %s' % full_name)
- t.unexpected_passes.append((directory, name, 'unexpected', way))
+ t.unexpected_passes.append(TestResult(directory, name, 'unexpected', way))
elif passFail == 'fail':
if _expect_pass(way):
reason = result['reason']
tag = result.get('tag')
if tag == 'stat':
if_verbose(1, '*** unexpected stat test failure for %s' % full_name)
- t.unexpected_stat_failures.append((directory, name, reason, way))
+ t.unexpected_stat_failures.append(TestResult(directory, name, reason, way))
else:
if_verbose(1, '*** unexpected failure for %s' % full_name)
- t.unexpected_failures.append((directory, name, reason, way))
+ result = TestResult(directory, name, reason, way, stderr=result.get('stderr'))
+ t.unexpected_failures.append(result)
else:
if opts.expect == 'missing-lib':
- t.missing_libs.append((directory, name, 'missing-lib', way))
+ t.missing_libs.append(TestResult(directory, name, 'missing-lib', way))
else:
t.n_expected_failures += 1
else:
@@ -980,14 +981,14 @@ def framework_fail(name, way, reason):
directory = re.sub('^\\.[/\\\\]', '', opts.testdir)
full_name = name + '(' + way + ')'
if_verbose(1, '*** framework failure for %s %s ' % (full_name, reason))
- t.framework_failures.append((directory, name, way, reason))
+ t.framework_failures.append(TestResult(directory, name, reason, way))
def framework_warn(name, way, reason):
opts = getTestOpts()
directory = re.sub('^\\.[/\\\\]', '', opts.testdir)
full_name = name + '(' + way + ')'
if_verbose(1, '*** framework warning for %s %s ' % (full_name, reason))
- t.framework_warnings.append((directory, name, way, reason))
+ t.framework_warnings.append(TestResult(directory, name, reason, way))
def badResult(result):
try:
@@ -1089,15 +1090,20 @@ def do_compile(name, way, should_fail, top_mod, extra_mods, extra_hc_opts, **kwa
expected_stderr_file = find_expected_file(name, 'stderr')
actual_stderr_file = add_suffix(name, 'comp.stderr')
+ diff_file_name = in_testdir(add_suffix(name, 'comp.diff'))
if not compare_outputs(way, 'stderr',
join_normalisers(getTestOpts().extra_errmsg_normaliser,
normalise_errmsg),
expected_stderr_file, actual_stderr_file,
+ diff_file=diff_file_name,
whitespace_normaliser=getattr(getTestOpts(),
"whitespace_normaliser",
normalise_whitespace)):
- return failBecause('stderr mismatch')
+ stderr = open(diff_file_name, 'rb').read()
+ os.remove(diff_file_name)
+ return failBecauseStderr('stderr mismatch', stderr=stderr )
+
# no problems found, this test passed
return passed()
@@ -1291,10 +1297,11 @@ def simple_build(name, way, extra_hc_opts, should_fail, top_mod, link, addsuf, b
exit_code = runCmd(cmd, None, stdout, stderr, opts.compile_timeout_multiplier)
+ actual_stderr_path = in_testdir(name, 'comp.stderr')
+
if exit_code != 0 and not should_fail:
if config.verbose >= 1 and _expect_pass(way):
print('Compile failed (exit code {0}) errors were:'.format(exit_code))
- actual_stderr_path = in_testdir(name, 'comp.stderr')
dump_file(actual_stderr_path)
# ToDo: if the sub-shell was killed by ^C, then exit
@@ -1306,10 +1313,12 @@ def simple_build(name, way, extra_hc_opts, should_fail, top_mod, link, addsuf, b
if should_fail:
if exit_code == 0:
- return failBecause('exit code 0')
+ stderr_contents = open(actual_stderr_path, 'rb').read()
+ return failBecauseStderr('exit code 0', stderr_contents)
else:
if exit_code != 0:
- return failBecause('exit code non-0')
+ stderr_contents = open(actual_stderr_path, 'rb').read()
+ return failBecauseStderr('exit code non-0', stderr_contents)
return passed()
@@ -1622,7 +1631,7 @@ def check_prof_ok(name, way):
# new output. Returns true if output matched or was accepted, false
# otherwise. See Note [Output comparison] for the meaning of the
# normaliser and whitespace_normaliser parameters.
-def compare_outputs(way, kind, normaliser, expected_file, actual_file,
+def compare_outputs(way, kind, normaliser, expected_file, actual_file, diff_file=None,
whitespace_normaliser=lambda x:x):
expected_path = in_srcdir(expected_file)
@@ -1657,6 +1666,7 @@ def compare_outputs(way, kind, normaliser, expected_file, actual_file,
# See Note [Output comparison].
r = runCmd('diff -uw "{0}" "{1}"'.format(expected_normalised_path,
actual_normalised_path),
+ stdout=diff_file,
print_output=True)
# If for some reason there were no non-whitespace differences,
@@ -1664,7 +1674,10 @@ def compare_outputs(way, kind, normaliser, expected_file, actual_file,
if r == 0:
r = runCmd('diff -u "{0}" "{1}"'.format(expected_normalised_path,
actual_normalised_path),
+ stdout=diff_file,
print_output=True)
+ elif diff_file: open(diff_file, 'ab').close() # Make sure the file exists still as
+ # we will try to read it later
if config.accept and (getTestOpts().expect == 'fail' or
way in getTestOpts().expect_fail_for):
@@ -2154,19 +2167,22 @@ def summary(t, file, short=False, color=False):
file.write('WARNING: Testsuite run was terminated early\n')
def printUnexpectedTests(file, testInfoss):
- unexpected = set(name for testInfos in testInfoss
- for (_, name, _, _) in testInfos
- if not name.endswith('.T'))
+ unexpected = set(result.testname
+ for testInfos in testInfoss
+ for result in testInfos
+ if not result.testname.endswith('.T'))
if unexpected:
file.write('Unexpected results from:\n')
file.write('TEST="' + ' '.join(sorted(unexpected)) + '"\n')
file.write('\n')
def printTestInfosSummary(file, testInfos):
- maxDirLen = max(len(directory) for (directory, _, _, _) in testInfos)
- for (directory, name, reason, way) in testInfos:
- directory = directory.ljust(maxDirLen)
- file.write(' {directory} {name} [{reason}] ({way})\n'.format(**locals()))
+ maxDirLen = max(len(tr.directory) for tr in testInfos)
+ for result in testInfos:
+ directory = result.directory.ljust(maxDirLen)
+ file.write(' {directory} {r.testname} [{r.reason}] ({r.way})\n'.format(
+ r = result,
+ directory = directory))
file.write('\n')
def modify_lines(s, f):
diff --git a/testsuite/driver/testutil.py b/testsuite/driver/testutil.py
index 6e0c2684d7..d5bd2f33c3 100644
--- a/testsuite/driver/testutil.py
+++ b/testsuite/driver/testutil.py
@@ -8,8 +8,11 @@ import threading
def passed():
return {'passFail': 'pass'}
-def failBecause(reason, tag=None):
- return {'passFail': 'fail', 'reason': reason, 'tag': tag}
+def failBecauseStderr(reason, stderr, tag=None):
+ return failBecause(reason, tag, stderr=stderr)
+
+def failBecause(reason, tag=None, **kwargs):
+ return (dict ({'passFail': 'fail', 'reason': reason, 'tag': tag}, **kwargs))
def strip_quotes(s):
# Don't wrap commands to subprocess.call/Popen in quotes.