diff options
author | Krzysztof Gogolewski <krz.gogolewski@gmail.com> | 2014-10-19 13:41:24 +0200 |
---|---|---|
committer | Krzysztof Gogolewski <krz.gogolewski@gmail.com> | 2014-10-19 13:41:24 +0200 |
commit | d576fc38d9493c4979217fa36565f1f97fcc03d4 (patch) | |
tree | 01a0f1e2b46d840f94e4062c229f8f949d215b5f /testsuite/driver | |
parent | abfbb0d6aa65bf6f664fd86eecc72bd3a28bb0b9 (diff) | |
download | haskell-d576fc38d9493c4979217fa36565f1f97fcc03d4.tar.gz |
Python 3 support, second attempt (Trac #9184)
Summary:
This is a fixup of https://phabricator.haskell.org/D233
The only difference is in findTFiles (first commit), which
previously broke Windows runner; now I translated literally
instead attempting to improve it, and checked it works.
Test Plan:
I used validate under 2,3 on Linux and under 2 on msys2.
On Windows I've seen a large number of failures, but they don't
seem to be connected with the patch.
Reviewers: hvr, simonmar, thomie, austin
Reviewed By: austin
Subscribers: thomie, carter, ezyang, simonmar
Differential Revision: https://phabricator.haskell.org/D310
GHC Trac Issues: #9184
Diffstat (limited to 'testsuite/driver')
-rw-r--r-- | testsuite/driver/runtests.py | 67 | ||||
-rw-r--r-- | testsuite/driver/testlib.py | 177 | ||||
-rw-r--r-- | testsuite/driver/testutil.py | 34 |
3 files changed, 131 insertions, 147 deletions
diff --git a/testsuite/driver/runtests.py b/testsuite/driver/runtests.py index 571165a290..4e497e84e1 100644 --- a/testsuite/driver/runtests.py +++ b/testsuite/driver/runtests.py @@ -2,6 +2,8 @@ # (c) Simon Marlow 2002 # +from __future__ import print_function + import sys import os import string @@ -21,6 +23,11 @@ try: except: pass +PYTHON3 = sys.version_info >= (3, 0) +if PYTHON3: + print("*** WARNING: running testsuite using Python 3.\n" + "*** Python 3 support is experimental. See Trac #9184.") + from testutil import * from testglobals import * @@ -52,12 +59,12 @@ opts, args = getopt.getopt(sys.argv[1:], "e:", long_options) for opt,arg in opts: if opt == '--config': - execfile(arg) + exec(open(arg).read()) # -e is a string to execute from the command line. For example: # testframe -e 'config.compiler=ghc-5.04' if opt == '-e': - exec arg + exec(arg) if opt == '--rootdir': config.rootdirs.append(arg) @@ -83,9 +90,9 @@ for opt,arg in opts: sys.stderr.write("ERROR: requested way \'" + arg + "\' does not exist\n") sys.exit(1) - config.other_ways = filter(neq(arg), config.other_ways) - config.run_ways = filter(neq(arg), config.run_ways) - config.compile_ways = filter(neq(arg), config.compile_ways) + config.other_ways = [w for w in config.other_ways if w != arg] + config.run_ways = [w for w in config.run_ways if w != arg] + config.compile_ways = [w for w in config.compile_ways if w != arg] if opt == '--threads': config.threads = int(arg) @@ -117,17 +124,17 @@ if config.use_threads == 1: maj = int(re.sub('[^0-9].*', '', str(maj))) min = int(re.sub('[^0-9].*', '', str(min))) pat = int(re.sub('[^0-9].*', '', str(pat))) - if (maj, min, pat) < (2, 5, 2): - print "Warning: Ignoring request to use threads as python version < 2.5.2" - config.use_threads = 0 + if (maj, min) < (2, 6): + print("Python < 2.6 is not supported") + sys.exit(1) # We also need to disable threads for python 2.7.2, because of # this bug: http://bugs.python.org/issue13817 elif (maj, min, pat) == (2, 7, 2): - print "Warning: Ignoring request to use threads as python version is 2.7.2" - print "See http://bugs.python.org/issue13817 for details." + print("Warning: Ignoring request to use threads as python version is 2.7.2") + print("See http://bugs.python.org/issue13817 for details.") config.use_threads = 0 if windows: - print "Warning: Ignoring request to use threads as running on Windows" + print("Warning: Ignoring request to use threads as running on Windows") config.use_threads = 0 config.cygwin = False @@ -180,10 +187,10 @@ else: h.close() if v != '': os.environ['LC_ALL'] = v - print "setting LC_ALL to", v + print("setting LC_ALL to", v) else: - print 'WARNING: No UTF8 locale found.' - print 'You may get some spurious test failures.' + print('WARNING: No UTF8 locale found.') + print('You may get some spurious test failures.') # This has to come after arg parsing as the args can change the compiler get_compiler_info() @@ -230,7 +237,7 @@ if config.use_threads: if config.timeout == -1: config.timeout = int(read_no_crs(config.top + '/timeout/calibrate.out')) -print 'Timeout is ' + str(config.timeout) +print('Timeout is ' + str(config.timeout)) # ----------------------------------------------------------------------------- # The main dude @@ -240,40 +247,44 @@ if config.rootdirs == []: t_files = findTFiles(config.rootdirs) -print 'Found', len(t_files), '.T files...' +print('Found', len(t_files), '.T files...') t = getTestRun() # Avoid cmd.exe built-in 'date' command on Windows t.start_time = time.localtime() -print 'Beginning test run at', time.strftime("%c %Z",t.start_time) +print('Beginning test run at', time.strftime("%c %Z",t.start_time)) -# set stdout to unbuffered (is this the best way to do it?) sys.stdout.flush() -sys.stdout = os.fdopen(sys.__stdout__.fileno(), "w", 0) +if PYTHON3: + # in Python 3, we output text, which cannot be unbuffered + sys.stdout = os.fdopen(sys.__stdout__.fileno(), "w") +else: + # set stdout to unbuffered (is this the best way to do it?) + sys.stdout = os.fdopen(sys.__stdout__.fileno(), "w", 0) # First collect all the tests to be run for file in t_files: if_verbose(2, '====> Scanning %s' % file) newTestDir(os.path.dirname(file)) try: - execfile(file) - except: - print '*** framework failure: found an error while executing ', file, ':' + exec(open(file).read()) + except Exception: + print('*** framework failure: found an error while executing ', file, ':') t.n_framework_failures = t.n_framework_failures + 1 traceback.print_exc() if config.list_broken: global brokens - print '' - print 'Broken tests:' - print (' '.join(map (lambda (b, d, n) : '#' + str(b) + '(' + d + '/' + n + ')', brokens))) - print '' + print('') + print('Broken tests:') + print(' '.join(map (lambda bdn: '#' + str(bdn[0]) + '(' + bdn[1] + '/' + bdn[2] + ')', brokens))) + print('') if t.n_framework_failures != 0: - print 'WARNING:', str(t.n_framework_failures), 'framework failures!' - print '' + print('WARNING:', str(t.n_framework_failures), 'framework failures!') + print('') else: # Now run all the tests if config.use_threads: diff --git a/testsuite/driver/testlib.py b/testsuite/driver/testlib.py index e3562f7c54..93b18a8614 100644 --- a/testsuite/driver/testlib.py +++ b/testsuite/driver/testlib.py @@ -2,8 +2,7 @@ # (c) Simon Marlow 2002 # -# This allows us to use the "with X:" syntax with python 2.5: -from __future__ import with_statement +from __future__ import print_function import shutil import sys @@ -16,7 +15,6 @@ import time import datetime import copy import glob -import types from math import ceil, trunc have_subprocess = False @@ -24,15 +22,17 @@ try: import subprocess have_subprocess = True except: - print "Warning: subprocess not found, will fall back to spawnv" + print("Warning: subprocess not found, will fall back to spawnv") -from string import join from testglobals import * from testutil import * if config.use_threads: import threading - import thread + try: + import thread + except ImportError: # Python 3 + import _thread as thread global wantToStop wantToStop = False @@ -99,7 +99,7 @@ def reqlib( lib ): have_lib = {} def _reqlib( name, opts, lib ): - if have_lib.has_key(lib): + if lib in have_lib: got_it = have_lib[lib] else: if have_subprocess: @@ -284,7 +284,7 @@ def _stats_num_field( name, opts, field, expecteds ): if field in opts.stats_range_fields: framework_fail(name, 'duplicate-numfield', 'Duplicate ' + field + ' num_field check') - if type(expecteds) is types.ListType: + if type(expecteds) is list: for (b, expected, dev) in expecteds: if b: opts.stats_range_fields[field] = (expected, dev) @@ -512,9 +512,10 @@ def two_normalisers(f, g): # Function for composing two opt-fns together def executeSetups(fs, name, opts): - if type(fs) is types.ListType: + if type(fs) is list: # If we have a list of setups, then execute each one - map (lambda f : executeSetups(f, name, opts), fs) + for f in fs: + executeSetups(f, name, opts) else: # fs is a single function, so just apply it fs(name, opts) @@ -625,8 +626,7 @@ def test_common_work (name, opts, func, args): all_ways = ['normal'] # A test itself can request extra ways by setting opts.extra_ways - all_ways = all_ways + filter(lambda way: way not in all_ways, - opts.extra_ways) + all_ways = all_ways + [way for way in opts.extra_ways if way not in all_ways] t.total_test_cases = t.total_test_cases + len(all_ways) @@ -639,7 +639,7 @@ def test_common_work (name, opts, func, args): and way not in getTestOpts().omit_ways # Which ways we are asked to skip - do_ways = filter (ok_way,all_ways) + do_ways = list(filter (ok_way,all_ways)) # In fast mode, we skip all but one way if config.fast and len(do_ways) > 0: @@ -658,8 +658,8 @@ def test_common_work (name, opts, func, args): if getTestOpts().cleanup != '' and (config.clean_only or do_ways != []): pretest_cleanup(name) - clean(map (lambda suff: name + suff, - ['', '.exe', '.exe.manifest', '.genscript', + clean([name + suff for suff in [ + '', '.exe', '.exe.manifest', '.genscript', '.stderr.normalised', '.stdout.normalised', '.run.stderr.normalised', '.run.stdout.normalised', '.comp.stderr.normalised', '.comp.stdout.normalised', @@ -667,12 +667,13 @@ def test_common_work (name, opts, func, args): '.stats', '.comp.stats', '.hi', '.o', '.prof', '.exe.prof', '.hc', '_stub.h', '_stub.c', '_stub.o', - '.hp', '.exe.hp', '.ps', '.aux', '.hcr', '.eventlog'])) + '.hp', '.exe.hp', '.ps', '.aux', '.hcr', '.eventlog']]) if func == multi_compile or func == multi_compile_fail: extra_mods = args[1] - clean(map (lambda (f,x): replace_suffix(f, 'o'), extra_mods)) - clean(map (lambda (f,x): replace_suffix(f, 'hi'), extra_mods)) + clean([replace_suffix(fx[0],'o') for fx in extra_mods]) + clean([replace_suffix(fx[0], 'hi') for fx in extra_mods]) + clean(getTestOpts().clean_files) @@ -712,7 +713,7 @@ def test_common_work (name, opts, func, args): files_written_not_removed[name] = [f] except: pass - except Exception, e: + except Exception as e: framework_fail(name, 'runTest', 'Unhandled exception: ' + str(e)) def clean(strs): @@ -724,19 +725,19 @@ def clean_full_path(name): try: # Remove files... os.remove(name) - except OSError, e1: + except OSError as e1: try: # ... and empty directories os.rmdir(name) - except OSError, e2: + except OSError as e2: # We don't want to fail here, but we do want to know # what went wrong, so print out the exceptions. # ENOENT isn't a problem, though, as we clean files # that don't necessarily exist. if e1.errno != errno.ENOENT: - print e1 + print(e1) if e2.errno != errno.ENOENT: - print e2 + print(e2) def do_test(name, way, func, args): full_name = name + '(' + way + ')' @@ -761,7 +762,7 @@ def do_test(name, way, func, args): framework_fail(name, way, 'pre-command exception') try: - result = apply(func, [name,way] + args) + result = func(*[name,way] + args) finally: if config.use_threads: t.lock.acquire() @@ -892,7 +893,8 @@ def run_command( name, way, cmd ): def ghci_script( name, way, script ): # filter out -fforce-recomp from compiler_always_flags, because we're # actually testing the recompilation behaviour in the GHCi tests. - flags = filter(lambda f: f != '-fforce-recomp', getTestOpts().compiler_always_flags) + flags = [f for f in getTestOpts().compiler_always_flags if f != '-fforce-recomp'] + flags.append(getTestOpts().extra_hc_opts) if getTestOpts().outputdir != None: flags.extend(["-outputdir", getTestOpts().outputdir]) @@ -900,10 +902,10 @@ def ghci_script( name, way, script ): # We pass HC and HC_OPTS as environment variables, so that the # script can invoke the correct compiler by using ':! $HC $HC_OPTS' cmd = "HC='" + config.compiler + "' " + \ - "HC_OPTS='" + join(flags,' ') + "' " + \ + "HC_OPTS='" + ' '.join(flags) + "' " + \ "'" + config.compiler + "'" + \ ' --interactive -v0 -ignore-dot-ghci ' + \ - join(flags,' ') + ' '.join(flags) getTestOpts().stdin = script return simple_run( name, way, cmd, getTestOpts().extra_run_opts ) @@ -967,7 +969,7 @@ def do_compile( name, way, should_fail, top_mod, extra_mods, extra_hc_opts ): return passed() def compile_cmp_asm( name, way, extra_hc_opts ): - print 'Compile only, extra args = ', extra_hc_opts + print('Compile only, extra args = ', extra_hc_opts) pretest_cleanup(name) result = simple_build( name + '.cmm', way, '-keep-s-files -O ' + extra_hc_opts, 0, '', 0, 0, 0) @@ -1049,7 +1051,7 @@ def checkStats(name, way, stats_file, range_fields): for (field, (expected, dev)) in range_fields.items(): m = re.search('\("' + field + '", "([0-9]+)"\)', contents) if m == None: - print 'Failed to find field: ', field + print('Failed to find field: ', field) result = failBecause('no such stats field') val = int(m.group(1)) @@ -1059,12 +1061,12 @@ def checkStats(name, way, stats_file, range_fields): deviation = round(((float(val) * 100)/ expected) - 100, 1) if val < lowerBound: - print field, 'value is too low:' - print '(If this is because you have improved GHC, please' - print 'update the test so that GHC doesn\'t regress again)' + print(field, 'value is too low:') + print('(If this is because you have improved GHC, please') + print('update the test so that GHC doesn\'t regress again)') result = failBecause('stat too good') if val > upperBound: - print field, 'value is too high:' + print(field, 'value is too high:') result = failBecause('stat not good enough') if val < lowerBound or val > upperBound or config.verbose >= 4: @@ -1072,9 +1074,11 @@ def checkStats(name, way, stats_file, range_fields): valLen = len(valStr) expectedStr = str(expected) expectedLen = len(expectedStr) - length = max(map (lambda x : len(str(x)), [expected, lowerBound, upperBound, val])) + length = max(len(str(x)) for x in [expected, lowerBound, upperBound, val]) + def display(descr, val, extra): - print descr, string.rjust(str(val), length), extra + print(descr, str(val).rjust(length), extra) + display(' Expected ' + full_name + ' ' + field + ':', expected, '+/-' + str(dev) + '%') display(' Lower bound ' + full_name + ' ' + field + ':', lowerBound, '') display(' Upper bound ' + full_name + ' ' + field + ':', upperBound, '') @@ -1149,15 +1153,15 @@ def simple_build( name, way, extra_hc_opts, should_fail, top_mod, link, addsuf, comp_flags = copy.copy(getTestOpts().compiler_always_flags) if noforce: - comp_flags = filter(lambda f: f != '-fforce-recomp', comp_flags) + comp_flags = [f for f in comp_flags if f != '-fforce-recomp'] if getTestOpts().outputdir != None: comp_flags.extend(["-outputdir", getTestOpts().outputdir]) cmd = 'cd ' + getTestOpts().testdir + " && " + cmd_prefix + "'" \ + config.compiler + "' " \ - + join(comp_flags,' ') + ' ' \ + + ' '.join(comp_flags) + ' ' \ + to_do + ' ' + srcname + ' ' \ - + join(config.way_flags(name)[way],' ') + ' ' \ + + ' '.join(config.way_flags(name)[way]) + ' ' \ + extra_hc_opts + ' ' \ + opts.extra_hc_opts + ' ' \ + '>' + errname + ' 2>&1' @@ -1166,7 +1170,7 @@ def simple_build( name, way, extra_hc_opts, should_fail, top_mod, link, addsuf, if result != 0 and not should_fail: actual_stderr = qualify(name, 'comp.stderr') - if_verbose(1,'Compile failed (status ' + `result` + ') errors were:') + if_verbose(1,'Compile failed (status ' + repr(result) + ') errors were:') if_verbose_dump(1,actual_stderr) # ToDo: if the sub-shell was killed by ^C, then exit @@ -1250,7 +1254,7 @@ def simple_run( name, way, prog, args ): # check the exit code if exit_code != opts.exit_code: - print 'Wrong exit code (expected', opts.exit_code, ', actual', exit_code, ')' + print('Wrong exit code (expected', opts.exit_code, ', actual', exit_code, ')') dump_stdout(name) dump_stderr(name) return failBecause('bad exit code') @@ -1282,7 +1286,7 @@ def rts_flags(way): if args == []: return '' else: - return '+RTS ' + join(args,' ') + ' -RTS' + return '+RTS ' + ' '.join(args) + ' -RTS' # ----------------------------------------------------------------------------- # Run a program in the interpreter and check its output @@ -1339,9 +1343,9 @@ def interpreter_run( name, way, extra_hc_opts, compile_only, top_mod ): flags.extend(["-outputdir", getTestOpts().outputdir]) cmd = "'" + config.compiler + "' " \ - + join(flags,' ') + ' ' \ + + ' '.join(flags) + ' ' \ + srcname + ' ' \ - + join(config.way_flags(name)[way],' ') + ' ' \ + + ' '.join(config.way_flags(name)[way]) + ' ' \ + extra_hc_opts + ' ' \ + getTestOpts().extra_hc_opts + ' ' \ + '<' + scriptname + ' 1>' + outname + ' 2>' + errname @@ -1366,7 +1370,7 @@ def interpreter_run( name, way, extra_hc_opts, compile_only, top_mod ): # check the exit code if exit_code != getTestOpts().exit_code: - print 'Wrong exit code (expected', getTestOpts().exit_code, ', actual', exit_code, ')' + print('Wrong exit code (expected', getTestOpts().exit_code, ', actual', exit_code, ')') dump_stdout(name) dump_stderr(name) return failBecause('bad exit code') @@ -1428,8 +1432,8 @@ def check_stdout_ok( name ): expected_stdout_file, actual_stdout_file) def dump_stdout( name ): - print 'Stdout:' - print read_no_crs(qualify(name, 'run.stdout')) + print('Stdout:') + print(read_no_crs(qualify(name, 'run.stdout'))) def check_stderr_ok( name ): if getTestOpts().with_namebase == None: @@ -1451,8 +1455,8 @@ def check_stderr_ok( name ): expected_stderr_file, actual_stderr_file) def dump_stderr( name ): - print "Stderr:" - print read_no_crs(qualify(name, 'run.stderr')) + print("Stderr:") + print(read_no_crs(qualify(name, 'run.stderr'))) def read_no_crs(file): str = '' @@ -1487,13 +1491,13 @@ def check_hp_ok(name): if (gsResult == 0): return (True) else: - print "hp2ps output for " + name + "is not valid PostScript" + print("hp2ps output for " + name + "is not valid PostScript") else: return (True) # assume postscript is valid without ghostscript else: - print "hp2ps did not generate PostScript for " + name + print("hp2ps did not generate PostScript for " + name) return (False) else: - print "hp2ps error when processing heap profile for " + name + print("hp2ps error when processing heap profile for " + name) return(False) def check_prof_ok(name): @@ -1501,11 +1505,11 @@ def check_prof_ok(name): prof_file = qualify(name,'prof') if not os.path.exists(prof_file): - print prof_file + " does not exist" + print(prof_file + " does not exist") return(False) if os.path.getsize(qualify(name,'prof')) == 0: - print prof_file + " is empty" + print(prof_file + " is empty") return(False) if getTestOpts().with_namebase == None: @@ -1667,16 +1671,16 @@ def normalise_asm( str ): out = '\n'.join(out) return out -def if_verbose( n, str ): +def if_verbose( n, s ): if config.verbose >= n: - print str + print(s) def if_verbose_dump( n, f ): if config.verbose >= n: try: - print open(f).read() + print(open(f).read()) except: - print '' + print('') def rawSystem(cmd_and_args): # We prefer subprocess.call to os.spawnv as the latter @@ -1904,7 +1908,7 @@ def checkForFilesWrittenProblems(file): if len(files_written_not_removed) > 0: file.write("\n") file.write("\nSome files written but not removed:\n") - tests = files_written_not_removed.keys() + tests = list(files_written_not_removed.keys()) tests.sort() for t in tests: for f in files_written_not_removed[t]: @@ -1916,7 +1920,7 @@ def checkForFilesWrittenProblems(file): if len(bad_file_usages) > 0: file.write("\n") file.write("\nSome bad file usages:\n") - tests = bad_file_usages.keys() + tests = list(bad_file_usages.keys()) tests.sort() for t in tests: for f in bad_file_usages[t]: @@ -1931,7 +1935,7 @@ def genGSCmd(psfile): def gsNotWorking(): global gs_working - print "GhostScript not available for hp2ps tests" + print("GhostScript not available for hp2ps tests") global gs_working gs_working = 0 @@ -1941,7 +1945,7 @@ if config.have_profiling: if resultGood == 0: resultBad = runCmdExitCode(genGSCmd(config.confdir + '/bad.ps')); if resultBad != 0: - print "GhostScript available for hp2ps tests" + print("GhostScript available for hp2ps tests") gs_working = 1; else: gsNotWorking(); @@ -2008,7 +2012,7 @@ def platform_wordsize_qualify( name, suff ): for vers in ['-' + config.compiler_maj_version, '']] dir = glob.glob(basepath + '*') - dir = map (lambda d: normalise_slashes_(d), dir) + dir = [normalise_slashes_(d) for d in dir] for (platformSpecific, f) in paths: if f in dir: @@ -2041,14 +2045,17 @@ def pretest_cleanup(name): # not interested in the return code # ----------------------------------------------------------------------------- -# Return a list of all the files ending in '.T' below the directory dir. +# Return a list of all the files ending in '.T' below directories roots. def findTFiles(roots): - return concat(map(findTFiles_,roots)) + # It would be better to use os.walk, but that + # gives backslashes on Windows, which trip the + # testsuite later :-( + return [filename for root in roots for filename in findTFiles_(root)] def findTFiles_(path): if os.path.isdir(path): - paths = map(lambda x, p=path: p + '/' + x, os.listdir(path)) + paths = [path + '/' + x for x in os.listdir(path)] return findTFiles(paths) elif path[-2:] == '.T': return [path] @@ -2064,28 +2071,28 @@ def summary(t, file): printUnexpectedTests(file, [t.unexpected_passes, t.unexpected_failures]) file.write('OVERALL SUMMARY for test run started at ' + time.strftime("%c %Z", t.start_time) + '\n' - + string.rjust(str(datetime.timedelta(seconds= - round(time.time() - time.mktime(t.start_time)))), 8) + + str(datetime.timedelta(seconds= + round(time.time() - time.mktime(t.start_time)))).rjust(8) + ' spent to go through\n' - + string.rjust(`t.total_tests`, 8) + + repr(t.total_tests).rjust(8) + ' total tests, which gave rise to\n' - + string.rjust(`t.total_test_cases`, 8) + + repr(t.total_test_cases).rjust(8) + ' test cases, of which\n' - + string.rjust(`t.n_tests_skipped`, 8) + + repr(t.n_tests_skipped).rjust(8) + ' were skipped\n' + '\n' - + string.rjust(`t.n_missing_libs`, 8) + + repr(t.n_missing_libs).rjust(8) + ' had missing libraries\n' - + string.rjust(`t.n_expected_passes`, 8) + + repr(t.n_expected_passes).rjust(8) + ' expected passes\n' - + string.rjust(`t.n_expected_failures`, 8) + + repr(t.n_expected_failures).rjust(8) + ' expected failures\n' + '\n' - + string.rjust(`t.n_framework_failures`, 8) + + repr(t.n_framework_failures).rjust(8) + ' caused framework failures\n' - + string.rjust(`t.n_unexpected_passes`, 8) + + repr(t.n_unexpected_passes).rjust(8) + ' unexpected passes\n' - + string.rjust(`t.n_unexpected_failures`, 8) + + repr(t.n_unexpected_failures).rjust(8) + ' unexpected failures\n' + '\n') @@ -2108,7 +2115,7 @@ def printUnexpectedTests(file, testInfoss): for testInfos in testInfoss: directories = testInfos.keys() for directory in directories: - tests = testInfos[directory].keys() + tests = list(testInfos[directory].keys()) unexpected += tests if unexpected != []: file.write('Unexpected results from:\n') @@ -2116,30 +2123,30 @@ def printUnexpectedTests(file, testInfoss): file.write('\n') def printPassingTestInfosSummary(file, testInfos): - directories = testInfos.keys() + directories = list(testInfos.keys()) directories.sort() - maxDirLen = max(map ((lambda x : len(x)), directories)) + maxDirLen = max(len(x) for x in directories) for directory in directories: - tests = testInfos[directory].keys() + tests = list(testInfos[directory].keys()) tests.sort() for test in tests: file.write(' ' + directory.ljust(maxDirLen + 2) + test + \ - ' (' + join(testInfos[directory][test],',') + ')\n') + ' (' + ','.join(testInfos[directory][test]) + ')\n') file.write('\n') def printFailingTestInfosSummary(file, testInfos): - directories = testInfos.keys() + directories = list(testInfos.keys()) directories.sort() - maxDirLen = max(map ((lambda x : len(x)), directories)) + maxDirLen = max(len(d) for d in directories) for directory in directories: - tests = testInfos[directory].keys() + tests = list(testInfos[directory].keys()) tests.sort() for test in tests: reasons = testInfos[directory][test].keys() for reason in reasons: file.write(' ' + directory.ljust(maxDirLen + 2) + test + \ ' [' + reason + ']' + \ - ' (' + join(testInfos[directory][test][reason],',') + ')\n') + ' (' + ','.join(testInfos[directory][test][reason]) + ')\n') file.write('\n') def getStdout(cmd): diff --git a/testsuite/driver/testutil.py b/testsuite/driver/testutil.py index 0738683111..ec45e93987 100644 --- a/testsuite/driver/testutil.py +++ b/testsuite/driver/testutil.py @@ -1,39 +1,5 @@ # ----------------------------------------------------------------------------- # Utils - -def id(a): - return a - -def eq(x): - return lambda y,z=x: y == z - -def neq(x): - return lambda y,z=x: y != z - -def append(x,y): - return x + y - -def concat(xs): - return reduce(append,xs,[]) - -def chop(s): - if s[len(s)-1:] == '\n': - return s[:len(s)-1] - else: - return s - -def all(p,xs): - for x in xs: - if not p(x): - return False - return True - -def elem(xs): - return lambda x: x in xs - -def notElem(xs): - return lambda x: x not in xs - def version_to_ints(v): return [ int(x) for x in v.split('.') ] |