summaryrefslogtreecommitdiff
path: root/Lib/test/libregrtest
diff options
context:
space:
mode:
Diffstat (limited to 'Lib/test/libregrtest')
-rw-r--r--Lib/test/libregrtest/__init__.py5
-rw-r--r--Lib/test/libregrtest/cmdline.py347
-rw-r--r--Lib/test/libregrtest/main.py532
-rw-r--r--Lib/test/libregrtest/refleak.py269
-rw-r--r--Lib/test/libregrtest/runtest.py245
-rw-r--r--Lib/test/libregrtest/runtest_mp.py245
-rw-r--r--Lib/test/libregrtest/save_env.py285
-rw-r--r--Lib/test/libregrtest/setup.py121
8 files changed, 2049 insertions, 0 deletions
diff --git a/Lib/test/libregrtest/__init__.py b/Lib/test/libregrtest/__init__.py
new file mode 100644
index 0000000000..7ba0e6e535
--- /dev/null
+++ b/Lib/test/libregrtest/__init__.py
@@ -0,0 +1,5 @@
+# We import importlib *ASAP* in order to test #15386
+import importlib
+
+from test.libregrtest.cmdline import _parse_args, RESOURCE_NAMES
+from test.libregrtest.main import main
diff --git a/Lib/test/libregrtest/cmdline.py b/Lib/test/libregrtest/cmdline.py
new file mode 100644
index 0000000000..d621f5f9f3
--- /dev/null
+++ b/Lib/test/libregrtest/cmdline.py
@@ -0,0 +1,347 @@
+import argparse
+import os
+import sys
+from test import support
+
+
+USAGE = """\
+python -m test [options] [test_name1 [test_name2 ...]]
+python path/to/Lib/test/regrtest.py [options] [test_name1 [test_name2 ...]]
+"""
+
+DESCRIPTION = """\
+Run Python regression tests.
+
+If no arguments or options are provided, finds all files matching
+the pattern "test_*" in the Lib/test subdirectory and runs
+them in alphabetical order (but see -M and -u, below, for exceptions).
+
+For more rigorous testing, it is useful to use the following
+command line:
+
+python -E -Wd -m test [options] [test_name1 ...]
+"""
+
+EPILOG = """\
+Additional option details:
+
+-r randomizes test execution order. You can use --randseed=int to provide an
+int seed value for the randomizer; this is useful for reproducing troublesome
+test orders.
+
+-s On the first invocation of regrtest using -s, the first test file found
+or the first test file given on the command line is run, and the name of
+the next test is recorded in a file named pynexttest. If run from the
+Python build directory, pynexttest is located in the 'build' subdirectory,
+otherwise it is located in tempfile.gettempdir(). On subsequent runs,
+the test in pynexttest is run, and the next test is written to pynexttest.
+When the last test has been run, pynexttest is deleted. In this way it
+is possible to single step through the test files. This is useful when
+doing memory analysis on the Python interpreter, which process tends to
+consume too many resources to run the full regression test non-stop.
+
+-S is used to continue running tests after an aborted run. It will
+maintain the order a standard run (ie, this assumes -r is not used).
+This is useful after the tests have prematurely stopped for some external
+reason and you want to start running from where you left off rather
+than starting from the beginning.
+
+-f reads the names of tests from the file given as f's argument, one
+or more test names per line. Whitespace is ignored. Blank lines and
+lines beginning with '#' are ignored. This is especially useful for
+whittling down failures involving interactions among tests.
+
+-L causes the leaks(1) command to be run just before exit if it exists.
+leaks(1) is available on Mac OS X and presumably on some other
+FreeBSD-derived systems.
+
+-R runs each test several times and examines sys.gettotalrefcount() to
+see if the test appears to be leaking references. The argument should
+be of the form stab:run:fname where 'stab' is the number of times the
+test is run to let gettotalrefcount settle down, 'run' is the number
+of times further it is run and 'fname' is the name of the file the
+reports are written to. These parameters all have defaults (5, 4 and
+"reflog.txt" respectively), and the minimal invocation is '-R :'.
+
+-M runs tests that require an exorbitant amount of memory. These tests
+typically try to ascertain containers keep working when containing more than
+2 billion objects, which only works on 64-bit systems. There are also some
+tests that try to exhaust the address space of the process, which only makes
+sense on 32-bit systems with at least 2Gb of memory. The passed-in memlimit,
+which is a string in the form of '2.5Gb', determines howmuch memory the
+tests will limit themselves to (but they may go slightly over.) The number
+shouldn't be more memory than the machine has (including swap memory). You
+should also keep in mind that swap memory is generally much, much slower
+than RAM, and setting memlimit to all available RAM or higher will heavily
+tax the machine. On the other hand, it is no use running these tests with a
+limit of less than 2.5Gb, and many require more than 20Gb. Tests that expect
+to use more than memlimit memory will be skipped. The big-memory tests
+generally run very, very long.
+
+-u is used to specify which special resource intensive tests to run,
+such as those requiring large file support or network connectivity.
+The argument is a comma-separated list of words indicating the
+resources to test. Currently only the following are defined:
+
+ all - Enable all special resources.
+
+ none - Disable all special resources (this is the default).
+
+ audio - Tests that use the audio device. (There are known
+ cases of broken audio drivers that can crash Python or
+ even the Linux kernel.)
+
+ curses - Tests that use curses and will modify the terminal's
+ state and output modes.
+
+ largefile - It is okay to run some test that may create huge
+ files. These tests can take a long time and may
+ consume >2GB of disk space temporarily.
+
+ network - It is okay to run tests that use external network
+ resource, e.g. testing SSL support for sockets.
+
+ decimal - Test the decimal module against a large suite that
+ verifies compliance with standards.
+
+ cpu - Used for certain CPU-heavy tests.
+
+ subprocess Run all tests for the subprocess module.
+
+ urlfetch - It is okay to download files required on testing.
+
+ gui - Run tests that require a running GUI.
+
+ tzdata - Run tests that require timezone data.
+
+To enable all resources except one, use '-uall,-<resource>'. For
+example, to run all the tests except for the gui tests, give the
+option '-uall,-gui'.
+"""
+
+
+RESOURCE_NAMES = ('audio', 'curses', 'largefile', 'network',
+ 'decimal', 'cpu', 'subprocess', 'urlfetch', 'gui', 'tzdata')
+
+class _ArgParser(argparse.ArgumentParser):
+
+ def error(self, message):
+ super().error(message + "\nPass -h or --help for complete help.")
+
+
+def _create_parser():
+ # Set prog to prevent the uninformative "__main__.py" from displaying in
+ # error messages when using "python -m test ...".
+ parser = _ArgParser(prog='regrtest.py',
+ usage=USAGE,
+ description=DESCRIPTION,
+ epilog=EPILOG,
+ add_help=False,
+ formatter_class=argparse.RawDescriptionHelpFormatter)
+
+ # Arguments with this clause added to its help are described further in
+ # the epilog's "Additional option details" section.
+ more_details = ' See the section at bottom for more details.'
+
+ group = parser.add_argument_group('General options')
+ # We add help explicitly to control what argument group it renders under.
+ group.add_argument('-h', '--help', action='help',
+ help='show this help message and exit')
+ group.add_argument('--timeout', metavar='TIMEOUT', type=float,
+ help='dump the traceback and exit if a test takes '
+ 'more than TIMEOUT seconds; disabled if TIMEOUT '
+ 'is negative or equals to zero')
+ group.add_argument('--wait', action='store_true',
+ help='wait for user input, e.g., allow a debugger '
+ 'to be attached')
+ group.add_argument('--slaveargs', metavar='ARGS')
+ group.add_argument('-S', '--start', metavar='START',
+ help='the name of the test at which to start.' +
+ more_details)
+
+ group = parser.add_argument_group('Verbosity')
+ group.add_argument('-v', '--verbose', action='count',
+ help='run tests in verbose mode with output to stdout')
+ group.add_argument('-w', '--verbose2', action='store_true',
+ help='re-run failed tests in verbose mode')
+ group.add_argument('-W', '--verbose3', action='store_true',
+ help='display test output on failure')
+ group.add_argument('-q', '--quiet', action='store_true',
+ help='no output unless one or more tests fail')
+ group.add_argument('-o', '--slowest', action='store_true', dest='print_slow',
+ help='print the slowest 10 tests')
+ group.add_argument('--header', action='store_true',
+ help='print header with interpreter info')
+
+ group = parser.add_argument_group('Selecting tests')
+ group.add_argument('-r', '--randomize', action='store_true',
+ help='randomize test execution order.' + more_details)
+ group.add_argument('--randseed', metavar='SEED',
+ dest='random_seed', type=int,
+ help='pass a random seed to reproduce a previous '
+ 'random run')
+ group.add_argument('-f', '--fromfile', metavar='FILE',
+ help='read names of tests to run from a file.' +
+ more_details)
+ group.add_argument('-x', '--exclude', action='store_true',
+ help='arguments are tests to *exclude*')
+ group.add_argument('-s', '--single', action='store_true',
+ help='single step through a set of tests.' +
+ more_details)
+ group.add_argument('-m', '--match', metavar='PAT',
+ dest='match_tests',
+ help='match test cases and methods with glob pattern PAT')
+ group.add_argument('-G', '--failfast', action='store_true',
+ help='fail as soon as a test fails (only with -v or -W)')
+ group.add_argument('-u', '--use', metavar='RES1,RES2,...',
+ action='append', type=resources_list,
+ help='specify which special resource intensive tests '
+ 'to run.' + more_details)
+ group.add_argument('-M', '--memlimit', metavar='LIMIT',
+ help='run very large memory-consuming tests.' +
+ more_details)
+ group.add_argument('--testdir', metavar='DIR',
+ type=relative_filename,
+ help='execute test files in the specified directory '
+ '(instead of the Python stdlib test suite)')
+
+ group = parser.add_argument_group('Special runs')
+ group.add_argument('-l', '--findleaks', action='store_true',
+ help='if GC is available detect tests that leak memory')
+ group.add_argument('-L', '--runleaks', action='store_true',
+ help='run the leaks(1) command just before exit.' +
+ more_details)
+ group.add_argument('-R', '--huntrleaks', metavar='RUNCOUNTS',
+ type=huntrleaks,
+ help='search for reference leaks (needs debug build, '
+ 'very slow).' + more_details)
+ group.add_argument('-j', '--multiprocess', metavar='PROCESSES',
+ dest='use_mp', type=int,
+ help='run PROCESSES processes at once')
+ group.add_argument('-T', '--coverage', action='store_true',
+ dest='trace',
+ help='turn on code coverage tracing using the trace '
+ 'module')
+ group.add_argument('-D', '--coverdir', metavar='DIR',
+ type=relative_filename,
+ help='directory where coverage files are put')
+ group.add_argument('-N', '--nocoverdir',
+ action='store_const', const=None, dest='coverdir',
+ help='put coverage files alongside modules')
+ group.add_argument('-t', '--threshold', metavar='THRESHOLD',
+ type=int,
+ help='call gc.set_threshold(THRESHOLD)')
+ group.add_argument('-n', '--nowindows', action='store_true',
+ help='suppress error message boxes on Windows')
+ group.add_argument('-F', '--forever', action='store_true',
+ help='run the specified tests in a loop, until an '
+ 'error happens')
+ group.add_argument('--list-tests', action='store_true',
+ help="only write the name of tests that will be run, "
+ "don't execute them")
+ group.add_argument('-P', '--pgo', dest='pgo', action='store_true',
+ help='enable Profile Guided Optimization training')
+
+ return parser
+
+
+def relative_filename(string):
+ # CWD is replaced with a temporary dir before calling main(), so we
+ # join it with the saved CWD so it ends up where the user expects.
+ return os.path.join(support.SAVEDCWD, string)
+
+
+def huntrleaks(string):
+ args = string.split(':')
+ if len(args) not in (2, 3):
+ raise argparse.ArgumentTypeError(
+ 'needs 2 or 3 colon-separated arguments')
+ nwarmup = int(args[0]) if args[0] else 5
+ ntracked = int(args[1]) if args[1] else 4
+ fname = args[2] if len(args) > 2 and args[2] else 'reflog.txt'
+ return nwarmup, ntracked, fname
+
+
+def resources_list(string):
+ u = [x.lower() for x in string.split(',')]
+ for r in u:
+ if r == 'all' or r == 'none':
+ continue
+ if r[0] == '-':
+ r = r[1:]
+ if r not in RESOURCE_NAMES:
+ raise argparse.ArgumentTypeError('invalid resource: ' + r)
+ return u
+
+
+def _parse_args(args, **kwargs):
+ # Defaults
+ ns = argparse.Namespace(testdir=None, verbose=0, quiet=False,
+ exclude=False, single=False, randomize=False, fromfile=None,
+ findleaks=False, use_resources=None, trace=False, coverdir='coverage',
+ runleaks=False, huntrleaks=False, verbose2=False, print_slow=False,
+ random_seed=None, use_mp=None, verbose3=False, forever=False,
+ header=False, failfast=False, match_tests=None, pgo=False)
+ for k, v in kwargs.items():
+ if not hasattr(ns, k):
+ raise TypeError('%r is an invalid keyword argument '
+ 'for this function' % k)
+ setattr(ns, k, v)
+ if ns.use_resources is None:
+ ns.use_resources = []
+
+ parser = _create_parser()
+ # Issue #14191: argparse doesn't support "intermixed" positional and
+ # optional arguments. Use parse_known_args() as workaround.
+ ns.args = parser.parse_known_args(args=args, namespace=ns)[1]
+ for arg in ns.args:
+ if arg.startswith('-'):
+ parser.error("unrecognized arguments: %s" % arg)
+ sys.exit(1)
+
+ if ns.single and ns.fromfile:
+ parser.error("-s and -f don't go together!")
+ if ns.use_mp is not None and ns.trace:
+ parser.error("-T and -j don't go together!")
+ if ns.use_mp is not None and ns.findleaks:
+ parser.error("-l and -j don't go together!")
+ if ns.failfast and not (ns.verbose or ns.verbose3):
+ parser.error("-G/--failfast needs either -v or -W")
+ if ns.pgo and (ns.verbose or ns.verbose2 or ns.verbose3):
+ parser.error("--pgo/-v don't go together!")
+
+ if ns.nowindows:
+ print("Warning: the --nowindows (-n) option is deprecated. "
+ "Use -vv to display assertions in stderr.", file=sys.stderr)
+
+ if ns.quiet:
+ ns.verbose = 0
+ if ns.timeout is not None:
+ if ns.timeout <= 0:
+ ns.timeout = None
+ if ns.use_mp is not None:
+ if ns.use_mp <= 0:
+ # Use all cores + extras for tests that like to sleep
+ ns.use_mp = 2 + (os.cpu_count() or 1)
+ if ns.use:
+ for a in ns.use:
+ for r in a:
+ if r == 'all':
+ ns.use_resources[:] = RESOURCE_NAMES
+ continue
+ if r == 'none':
+ del ns.use_resources[:]
+ continue
+ remove = False
+ if r[0] == '-':
+ remove = True
+ r = r[1:]
+ if remove:
+ if r in ns.use_resources:
+ ns.use_resources.remove(r)
+ elif r not in ns.use_resources:
+ ns.use_resources.append(r)
+ if ns.random_seed is not None:
+ ns.randomize = True
+
+ return ns
diff --git a/Lib/test/libregrtest/main.py b/Lib/test/libregrtest/main.py
new file mode 100644
index 0000000000..f0effc973c
--- /dev/null
+++ b/Lib/test/libregrtest/main.py
@@ -0,0 +1,532 @@
+import datetime
+import faulthandler
+import locale
+import os
+import platform
+import random
+import re
+import sys
+import sysconfig
+import tempfile
+import textwrap
+import time
+from test.libregrtest.cmdline import _parse_args
+from test.libregrtest.runtest import (
+ findtests, runtest,
+ STDTESTS, NOTTESTS, PASSED, FAILED, ENV_CHANGED, SKIPPED, RESOURCE_DENIED,
+ INTERRUPTED, CHILD_ERROR,
+ PROGRESS_MIN_TIME, format_test_result)
+from test.libregrtest.setup import setup_tests
+from test import support
+try:
+ import gc
+except ImportError:
+ gc = None
+
+
+# When tests are run from the Python build directory, it is best practice
+# to keep the test files in a subfolder. This eases the cleanup of leftover
+# files using the "make distclean" command.
+if sysconfig.is_python_build():
+ TEMPDIR = os.path.join(sysconfig.get_config_var('srcdir'), 'build')
+else:
+ TEMPDIR = tempfile.gettempdir()
+TEMPDIR = os.path.abspath(TEMPDIR)
+
+
+def format_duration(seconds):
+ if seconds < 1.0:
+ return '%.0f ms' % (seconds * 1e3)
+ if seconds < 60.0:
+ return '%.0f sec' % seconds
+
+ minutes, seconds = divmod(seconds, 60.0)
+ return '%.0f min %.0f sec' % (minutes, seconds)
+
+
+class Regrtest:
+ """Execute a test suite.
+
+ This also parses command-line options and modifies its behavior
+ accordingly.
+
+ tests -- a list of strings containing test names (optional)
+ testdir -- the directory in which to look for tests (optional)
+
+ Users other than the Python test suite will certainly want to
+ specify testdir; if it's omitted, the directory containing the
+ Python test suite is searched for.
+
+ If the tests argument is omitted, the tests listed on the
+ command-line will be used. If that's empty, too, then all *.py
+ files beginning with test_ will be used.
+
+ The other default arguments (verbose, quiet, exclude,
+ single, randomize, findleaks, use_resources, trace, coverdir,
+ print_slow, and random_seed) allow programmers calling main()
+ directly to set the values that would normally be set by flags
+ on the command line.
+ """
+ def __init__(self):
+ # Namespace of command line options
+ self.ns = None
+
+ # tests
+ self.tests = []
+ self.selected = []
+
+ # test results
+ self.good = []
+ self.bad = []
+ self.skipped = []
+ self.resource_denieds = []
+ self.environment_changed = []
+ self.interrupted = False
+
+ # used by --slow
+ self.test_times = []
+
+ # used by --coverage, trace.Trace instance
+ self.tracer = None
+
+ # used by --findleaks, store for gc.garbage
+ self.found_garbage = []
+
+ # used to display the progress bar "[ 3/100]"
+ self.start_time = time.monotonic()
+ self.test_count = ''
+ self.test_count_width = 1
+
+ # used by --single
+ self.next_single_test = None
+ self.next_single_filename = None
+
+ def accumulate_result(self, test, result):
+ ok, test_time = result
+ if ok not in (CHILD_ERROR, INTERRUPTED):
+ self.test_times.append((test_time, test))
+ if ok == PASSED:
+ self.good.append(test)
+ elif ok == FAILED:
+ self.bad.append(test)
+ elif ok == ENV_CHANGED:
+ self.environment_changed.append(test)
+ elif ok == SKIPPED:
+ self.skipped.append(test)
+ elif ok == RESOURCE_DENIED:
+ self.skipped.append(test)
+ self.resource_denieds.append(test)
+
+ def display_progress(self, test_index, test):
+ if self.ns.quiet:
+ return
+ if self.bad and not self.ns.pgo:
+ fmt = "{time} [{test_index:{count_width}}{test_count}/{nbad}] {test_name}"
+ else:
+ fmt = "{time} [{test_index:{count_width}}{test_count}] {test_name}"
+ test_time = time.monotonic() - self.start_time
+ test_time = datetime.timedelta(seconds=int(test_time))
+ line = fmt.format(count_width=self.test_count_width,
+ test_index=test_index,
+ test_count=self.test_count,
+ nbad=len(self.bad),
+ test_name=test,
+ time=test_time)
+ print(line, flush=True)
+
+ def parse_args(self, kwargs):
+ ns = _parse_args(sys.argv[1:], **kwargs)
+
+ if ns.timeout and not hasattr(faulthandler, 'dump_traceback_later'):
+ print("Warning: The timeout option requires "
+ "faulthandler.dump_traceback_later", file=sys.stderr)
+ ns.timeout = None
+
+ if ns.threshold is not None and gc is None:
+ print('No GC available, ignore --threshold.', file=sys.stderr)
+ ns.threshold = None
+
+ if ns.findleaks:
+ if gc is not None:
+ # Uncomment the line below to report garbage that is not
+ # freeable by reference counting alone. By default only
+ # garbage that is not collectable by the GC is reported.
+ pass
+ #gc.set_debug(gc.DEBUG_SAVEALL)
+ else:
+ print('No GC available, disabling --findleaks',
+ file=sys.stderr)
+ ns.findleaks = False
+
+ # Strip .py extensions.
+ removepy(ns.args)
+
+ return ns
+
+ def find_tests(self, tests):
+ self.tests = tests
+
+ if self.ns.single:
+ self.next_single_filename = os.path.join(TEMPDIR, 'pynexttest')
+ try:
+ with open(self.next_single_filename, 'r') as fp:
+ next_test = fp.read().strip()
+ self.tests = [next_test]
+ except OSError:
+ pass
+
+ if self.ns.fromfile:
+ self.tests = []
+ # regex to match 'test_builtin' in line:
+ # '0:00:00 [ 4/400] test_builtin -- test_dict took 1 sec'
+ regex = (r'^(?:[0-9]+:[0-9]+:[0-9]+ *)?'
+ r'(?:\[[0-9/ ]+\] *)?'
+ r'(test_[a-zA-Z0-9_]+)')
+ regex = re.compile(regex)
+ with open(os.path.join(support.SAVEDCWD, self.ns.fromfile)) as fp:
+ for line in fp:
+ line = line.strip()
+ if line.startswith('#'):
+ continue
+ match = regex.match(line)
+ if match is None:
+ continue
+ self.tests.append(match.group(1))
+
+ removepy(self.tests)
+
+ stdtests = STDTESTS[:]
+ nottests = NOTTESTS.copy()
+ if self.ns.exclude:
+ for arg in self.ns.args:
+ if arg in stdtests:
+ stdtests.remove(arg)
+ nottests.add(arg)
+ self.ns.args = []
+
+ # if testdir is set, then we are not running the python tests suite, so
+ # don't add default tests to be executed or skipped (pass empty values)
+ if self.ns.testdir:
+ alltests = findtests(self.ns.testdir, list(), set())
+ else:
+ alltests = findtests(self.ns.testdir, stdtests, nottests)
+
+ if not self.ns.fromfile:
+ self.selected = self.tests or self.ns.args or alltests
+ else:
+ self.selected = self.tests
+ if self.ns.single:
+ self.selected = self.selected[:1]
+ try:
+ pos = alltests.index(self.selected[0])
+ self.next_single_test = alltests[pos + 1]
+ except IndexError:
+ pass
+
+ # Remove all the selected tests that precede start if it's set.
+ if self.ns.start:
+ try:
+ del self.selected[:self.selected.index(self.ns.start)]
+ except ValueError:
+ print("Couldn't find starting test (%s), using all tests"
+ % self.ns.start, file=sys.stderr)
+
+ if self.ns.randomize:
+ if self.ns.random_seed is None:
+ self.ns.random_seed = random.randrange(10000000)
+ random.seed(self.ns.random_seed)
+ random.shuffle(self.selected)
+
+ def list_tests(self):
+ for name in self.selected:
+ print(name)
+
+ def rerun_failed_tests(self):
+ self.ns.verbose = True
+ self.ns.failfast = False
+ self.ns.verbose3 = False
+ self.ns.match_tests = None
+
+ print("Re-running failed tests in verbose mode")
+ for test in self.bad[:]:
+ print("Re-running test %r in verbose mode" % test, flush=True)
+ try:
+ self.ns.verbose = True
+ ok = runtest(self.ns, test)
+ except KeyboardInterrupt:
+ self.interrupted = True
+ # print a newline separate from the ^C
+ print()
+ break
+ else:
+ if ok[0] in {PASSED, ENV_CHANGED, SKIPPED, RESOURCE_DENIED}:
+ self.bad.remove(test)
+ else:
+ if self.bad:
+ print(count(len(self.bad), 'test'), "failed again:")
+ printlist(self.bad)
+
+ def display_result(self):
+ if self.interrupted:
+ # print a newline after ^C
+ print()
+ print("Test suite interrupted by signal SIGINT.")
+ executed = set(self.good) | set(self.bad) | set(self.skipped)
+ omitted = set(self.selected) - executed
+ print(count(len(omitted), "test"), "omitted:")
+ printlist(omitted)
+
+ # If running the test suite for PGO then no one cares about
+ # results.
+ if self.ns.pgo:
+ return
+
+ if self.good and not self.ns.quiet:
+ if (not self.bad
+ and not self.skipped
+ and not self.interrupted
+ and len(self.good) > 1):
+ print("All", end=' ')
+ print(count(len(self.good), "test"), "OK.")
+
+ if self.ns.print_slow:
+ self.test_times.sort(reverse=True)
+ print()
+ print("10 slowest tests:")
+ for time, test in self.test_times[:10]:
+ print("- %s: %s" % (test, format_duration(time)))
+
+ if self.bad:
+ print()
+ print(count(len(self.bad), "test"), "failed:")
+ printlist(self.bad)
+
+ if self.environment_changed:
+ print()
+ print("{} altered the execution environment:".format(
+ count(len(self.environment_changed), "test")))
+ printlist(self.environment_changed)
+
+ if self.skipped and not self.ns.quiet:
+ print()
+ print(count(len(self.skipped), "test"), "skipped:")
+ printlist(self.skipped)
+
+ def run_tests_sequential(self):
+ if self.ns.trace:
+ import trace
+ self.tracer = trace.Trace(trace=False, count=True)
+
+ save_modules = sys.modules.keys()
+
+ print("Run tests sequentially")
+
+ previous_test = None
+ for test_index, test in enumerate(self.tests, 1):
+ start_time = time.monotonic()
+
+ text = test
+ if previous_test:
+ text = '%s -- %s' % (text, previous_test)
+ self.display_progress(test_index, text)
+
+ if self.tracer:
+ # If we're tracing code coverage, then we don't exit with status
+ # if on a false return value from main.
+ cmd = ('result = runtest(self.ns, test); '
+ 'self.accumulate_result(test, result)')
+ ns = dict(locals())
+ self.tracer.runctx(cmd, globals=globals(), locals=ns)
+ result = ns['result']
+ else:
+ try:
+ result = runtest(self.ns, test)
+ except KeyboardInterrupt:
+ self.interrupted = True
+ self.accumulate_result(test, (INTERRUPTED, None))
+ break
+ else:
+ self.accumulate_result(test, result)
+
+ previous_test = format_test_result(test, result[0])
+ test_time = time.monotonic() - start_time
+ if test_time >= PROGRESS_MIN_TIME:
+ previous_test = "%s in %s" % (previous_test, format_duration(test_time))
+ elif result[0] == PASSED:
+ # be quiet: say nothing if the test passed shortly
+ previous_test = None
+
+ if self.ns.findleaks:
+ gc.collect()
+ if gc.garbage:
+ print("Warning: test created", len(gc.garbage), end=' ')
+ print("uncollectable object(s).")
+ # move the uncollectable objects somewhere so we don't see
+ # them again
+ self.found_garbage.extend(gc.garbage)
+ del gc.garbage[:]
+
+ # Unload the newly imported modules (best effort finalization)
+ for module in sys.modules.keys():
+ if module not in save_modules and module.startswith("test."):
+ support.unload(module)
+
+ if previous_test:
+ print(previous_test)
+
+ def _test_forever(self, tests):
+ while True:
+ for test in tests:
+ yield test
+ if self.bad:
+ return
+
+ def run_tests(self):
+ # For a partial run, we do not need to clutter the output.
+ if (self.ns.verbose
+ or self.ns.header
+ or not (self.ns.pgo or self.ns.quiet or self.ns.single
+ or self.tests or self.ns.args)):
+ # Print basic platform information
+ print("==", platform.python_implementation(), *sys.version.split())
+ print("== ", platform.platform(aliased=True),
+ "%s-endian" % sys.byteorder)
+ print("== ", "hash algorithm:", sys.hash_info.algorithm,
+ "64bit" if sys.maxsize > 2**32 else "32bit")
+ print("== cwd:", os.getcwd())
+ print("== encodings: locale=%s, FS=%s"
+ % (locale.getpreferredencoding(False),
+ sys.getfilesystemencoding()))
+ print("Testing with flags:", sys.flags)
+
+ if self.ns.randomize:
+ print("Using random seed", self.ns.random_seed)
+
+ if self.ns.forever:
+ self.tests = self._test_forever(list(self.selected))
+ self.test_count = ''
+ self.test_count_width = 3
+ else:
+ self.tests = iter(self.selected)
+ self.test_count = '/{}'.format(len(self.selected))
+ self.test_count_width = len(self.test_count) - 1
+
+ if self.ns.use_mp:
+ from test.libregrtest.runtest_mp import run_tests_multiprocess
+ run_tests_multiprocess(self)
+ else:
+ self.run_tests_sequential()
+
+ def finalize(self):
+ if self.next_single_filename:
+ if self.next_single_test:
+ with open(self.next_single_filename, 'w') as fp:
+ fp.write(self.next_single_test + '\n')
+ else:
+ os.unlink(self.next_single_filename)
+
+ if self.tracer:
+ r = self.tracer.results()
+ r.write_results(show_missing=True, summary=True,
+ coverdir=self.ns.coverdir)
+
+ print()
+ duration = time.monotonic() - self.start_time
+ print("Total duration: %s" % format_duration(duration))
+
+ if self.bad:
+ result = "FAILURE"
+ elif self.interrupted:
+ result = "INTERRUPTED"
+ else:
+ result = "SUCCESS"
+ print("Tests result: %s" % result)
+
+ if self.ns.runleaks:
+ os.system("leaks %d" % os.getpid())
+
+ def main(self, tests=None, **kwargs):
+ global TEMPDIR
+
+ if sysconfig.is_python_build():
+ try:
+ os.mkdir(TEMPDIR)
+ except FileExistsError:
+ pass
+
+ # Define a writable temp dir that will be used as cwd while running
+ # the tests. The name of the dir includes the pid to allow parallel
+ # testing (see the -j option).
+ test_cwd = 'test_python_{}'.format(os.getpid())
+ test_cwd = os.path.join(TEMPDIR, test_cwd)
+
+ # Run the tests in a context manager that temporarily changes the CWD to a
+ # temporary and writable directory. If it's not possible to create or
+ # change the CWD, the original CWD will be used. The original CWD is
+ # available from support.SAVEDCWD.
+ with support.temp_cwd(test_cwd, quiet=True):
+ self._main(tests, kwargs)
+
+ def _main(self, tests, kwargs):
+ self.ns = self.parse_args(kwargs)
+
+ if self.ns.slaveargs is not None:
+ from test.libregrtest.runtest_mp import run_tests_slave
+ run_tests_slave(self.ns.slaveargs)
+
+ if self.ns.wait:
+ input("Press any key to continue...")
+
+ support.PGO = self.ns.pgo
+
+ setup_tests(self.ns)
+
+ self.find_tests(tests)
+
+ if self.ns.list_tests:
+ self.list_tests()
+ sys.exit(0)
+
+ self.run_tests()
+ self.display_result()
+
+ if self.ns.verbose2 and self.bad:
+ self.rerun_failed_tests()
+
+ self.finalize()
+ sys.exit(len(self.bad) > 0 or self.interrupted)
+
+
+def removepy(names):
+ if not names:
+ return
+ for idx, name in enumerate(names):
+ basename, ext = os.path.splitext(name)
+ if ext == '.py':
+ names[idx] = basename
+
+
+def count(n, word):
+ if n == 1:
+ return "%d %s" % (n, word)
+ else:
+ return "%d %ss" % (n, word)
+
+
+def printlist(x, width=70, indent=4):
+ """Print the elements of iterable x to stdout.
+
+ Optional arg width (default 70) is the maximum line length.
+ Optional arg indent (default 4) is the number of blanks with which to
+ begin each line.
+ """
+
+ blanks = ' ' * indent
+ # Print the sorted list: 'x' may be a '--random' list or a set()
+ print(textwrap.fill(' '.join(str(elt) for elt in sorted(x)), width,
+ initial_indent=blanks, subsequent_indent=blanks))
+
+
+def main(tests=None, **kwargs):
+ """Run the Python suite."""
+ Regrtest().main(tests=tests, **kwargs)
diff --git a/Lib/test/libregrtest/refleak.py b/Lib/test/libregrtest/refleak.py
new file mode 100644
index 0000000000..3b3d2458b1
--- /dev/null
+++ b/Lib/test/libregrtest/refleak.py
@@ -0,0 +1,269 @@
+import errno
+import os
+import re
+import sys
+import warnings
+from inspect import isabstract
+from test import support
+
+
+try:
+ MAXFD = os.sysconf("SC_OPEN_MAX")
+except Exception:
+ MAXFD = 256
+
+
+def fd_count():
+ """Count the number of open file descriptors"""
+ if sys.platform.startswith(('linux', 'freebsd')):
+ try:
+ names = os.listdir("/proc/self/fd")
+ return len(names)
+ except FileNotFoundError:
+ pass
+
+ count = 0
+ for fd in range(MAXFD):
+ try:
+ # Prefer dup() over fstat(). fstat() can require input/output
+ # whereas dup() doesn't.
+ fd2 = os.dup(fd)
+ except OSError as e:
+ if e.errno != errno.EBADF:
+ raise
+ else:
+ os.close(fd2)
+ count += 1
+ return count
+
+
+def dash_R(the_module, test, indirect_test, huntrleaks):
+ """Run a test multiple times, looking for reference leaks.
+
+ Returns:
+ False if the test didn't leak references; True if we detected refleaks.
+ """
+ # This code is hackish and inelegant, but it seems to do the job.
+ import copyreg
+ import collections.abc
+
+ if not hasattr(sys, 'gettotalrefcount'):
+ raise Exception("Tracking reference leaks requires a debug build "
+ "of Python")
+
+ # Save current values for dash_R_cleanup() to restore.
+ fs = warnings.filters[:]
+ ps = copyreg.dispatch_table.copy()
+ pic = sys.path_importer_cache.copy()
+ try:
+ import zipimport
+ except ImportError:
+ zdc = None # Run unmodified on platforms without zipimport support
+ else:
+ zdc = zipimport._zip_directory_cache.copy()
+ abcs = {}
+ for abc in [getattr(collections.abc, a) for a in collections.abc.__all__]:
+ if not isabstract(abc):
+ continue
+ for obj in abc.__subclasses__() + [abc]:
+ abcs[obj] = obj._abc_registry.copy()
+
+ nwarmup, ntracked, fname = huntrleaks
+ fname = os.path.join(support.SAVEDCWD, fname)
+ repcount = nwarmup + ntracked
+ rc_deltas = [0] * repcount
+ alloc_deltas = [0] * repcount
+ fd_deltas = [0] * repcount
+
+ print("beginning", repcount, "repetitions", file=sys.stderr)
+ print(("1234567890"*(repcount//10 + 1))[:repcount], file=sys.stderr,
+ flush=True)
+ # initialize variables to make pyflakes quiet
+ rc_before = alloc_before = fd_before = 0
+ for i in range(repcount):
+ indirect_test()
+ alloc_after, rc_after, fd_after = dash_R_cleanup(fs, ps, pic, zdc,
+ abcs)
+ print('.', end='', flush=True)
+ if i >= nwarmup:
+ rc_deltas[i] = rc_after - rc_before
+ alloc_deltas[i] = alloc_after - alloc_before
+ fd_deltas[i] = fd_after - fd_before
+ alloc_before = alloc_after
+ rc_before = rc_after
+ fd_before = fd_after
+ print(file=sys.stderr)
+ # These checkers return False on success, True on failure
+ def check_rc_deltas(deltas):
+ return any(deltas)
+ def check_alloc_deltas(deltas):
+ # At least 1/3rd of 0s
+ if 3 * deltas.count(0) < len(deltas):
+ return True
+ # Nothing else than 1s, 0s and -1s
+ if not set(deltas) <= {1,0,-1}:
+ return True
+ return False
+ failed = False
+ for deltas, item_name, checker in [
+ (rc_deltas, 'references', check_rc_deltas),
+ (alloc_deltas, 'memory blocks', check_alloc_deltas),
+ (fd_deltas, 'file descriptors', check_rc_deltas)]:
+ if checker(deltas):
+ msg = '%s leaked %s %s, sum=%s' % (
+ test, deltas[nwarmup:], item_name, sum(deltas))
+ print(msg, file=sys.stderr, flush=True)
+ with open(fname, "a") as refrep:
+ print(msg, file=refrep)
+ refrep.flush()
+ failed = True
+ return failed
+
+
+def dash_R_cleanup(fs, ps, pic, zdc, abcs):
+ import gc, copyreg
+ import collections.abc
+ from weakref import WeakSet
+
+ # Restore some original values.
+ warnings.filters[:] = fs
+ copyreg.dispatch_table.clear()
+ copyreg.dispatch_table.update(ps)
+ sys.path_importer_cache.clear()
+ sys.path_importer_cache.update(pic)
+ try:
+ import zipimport
+ except ImportError:
+ pass # Run unmodified on platforms without zipimport support
+ else:
+ zipimport._zip_directory_cache.clear()
+ zipimport._zip_directory_cache.update(zdc)
+
+ # clear type cache
+ sys._clear_type_cache()
+
+ # Clear ABC registries, restoring previously saved ABC registries.
+ for abc in [getattr(collections.abc, a) for a in collections.abc.__all__]:
+ if not isabstract(abc):
+ continue
+ for obj in abc.__subclasses__() + [abc]:
+ obj._abc_registry = abcs.get(obj, WeakSet()).copy()
+ obj._abc_cache.clear()
+ obj._abc_negative_cache.clear()
+
+ clear_caches()
+
+ # Collect cyclic trash and read memory statistics immediately after.
+ func1 = sys.getallocatedblocks
+ func2 = sys.gettotalrefcount
+ gc.collect()
+ return func1(), func2(), fd_count()
+
+
+def clear_caches():
+ import gc
+
+ # Clear the warnings registry, so they can be displayed again
+ for mod in sys.modules.values():
+ if hasattr(mod, '__warningregistry__'):
+ del mod.__warningregistry__
+
+ # Flush standard output, so that buffered data is sent to the OS and
+ # associated Python objects are reclaimed.
+ for stream in (sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__):
+ if stream is not None:
+ stream.flush()
+
+ # Clear assorted module caches.
+ # Don't worry about resetting the cache if the module is not loaded
+ try:
+ distutils_dir_util = sys.modules['distutils.dir_util']
+ except KeyError:
+ pass
+ else:
+ distutils_dir_util._path_created.clear()
+ re.purge()
+
+ try:
+ _strptime = sys.modules['_strptime']
+ except KeyError:
+ pass
+ else:
+ _strptime._regex_cache.clear()
+
+ try:
+ urllib_parse = sys.modules['urllib.parse']
+ except KeyError:
+ pass
+ else:
+ urllib_parse.clear_cache()
+
+ try:
+ urllib_request = sys.modules['urllib.request']
+ except KeyError:
+ pass
+ else:
+ urllib_request.urlcleanup()
+
+ try:
+ linecache = sys.modules['linecache']
+ except KeyError:
+ pass
+ else:
+ linecache.clearcache()
+
+ try:
+ mimetypes = sys.modules['mimetypes']
+ except KeyError:
+ pass
+ else:
+ mimetypes._default_mime_types()
+
+ try:
+ filecmp = sys.modules['filecmp']
+ except KeyError:
+ pass
+ else:
+ filecmp._cache.clear()
+
+ try:
+ struct = sys.modules['struct']
+ except KeyError:
+ pass
+ else:
+ struct._clearcache()
+
+ try:
+ doctest = sys.modules['doctest']
+ except KeyError:
+ pass
+ else:
+ doctest.master = None
+
+ try:
+ ctypes = sys.modules['ctypes']
+ except KeyError:
+ pass
+ else:
+ ctypes._reset_cache()
+
+ try:
+ typing = sys.modules['typing']
+ except KeyError:
+ pass
+ else:
+ for f in typing._cleanups:
+ f()
+
+ gc.collect()
+
+
+def warm_caches():
+ # char cache
+ s = bytes(range(256))
+ for i in range(256):
+ s[i:i+1]
+ # unicode cache
+ [chr(i) for i in range(256)]
+ # int cache
+ list(range(-5, 257))
diff --git a/Lib/test/libregrtest/runtest.py b/Lib/test/libregrtest/runtest.py
new file mode 100644
index 0000000000..ba0df0a317
--- /dev/null
+++ b/Lib/test/libregrtest/runtest.py
@@ -0,0 +1,245 @@
+import faulthandler
+import importlib
+import io
+import os
+import sys
+import time
+import traceback
+import unittest
+from test import support
+from test.libregrtest.refleak import dash_R, clear_caches
+from test.libregrtest.save_env import saved_test_environment
+
+
+# Test result constants.
+PASSED = 1
+FAILED = 0
+ENV_CHANGED = -1
+SKIPPED = -2
+RESOURCE_DENIED = -3
+INTERRUPTED = -4
+CHILD_ERROR = -5 # error in a child process
+
+_FORMAT_TEST_RESULT = {
+ PASSED: '%s passed',
+ FAILED: '%s failed',
+ ENV_CHANGED: '%s failed (env changed)',
+ SKIPPED: '%s skipped',
+ RESOURCE_DENIED: '%s skipped (resource denied)',
+ INTERRUPTED: '%s interrupted',
+ CHILD_ERROR: '%s crashed',
+}
+
+# Minimum duration of a test to display its duration or to mention that
+# the test is running in background
+PROGRESS_MIN_TIME = 30.0 # seconds
+
+# small set of tests to determine if we have a basically functioning interpreter
+# (i.e. if any of these fail, then anything else is likely to follow)
+STDTESTS = [
+ 'test_grammar',
+ 'test_opcodes',
+ 'test_dict',
+ 'test_builtin',
+ 'test_exceptions',
+ 'test_types',
+ 'test_unittest',
+ 'test_doctest',
+ 'test_doctest2',
+ 'test_support'
+]
+
+# set of tests that we don't want to be executed when using regrtest
+NOTTESTS = set()
+
+
+def format_test_result(test_name, result):
+ fmt = _FORMAT_TEST_RESULT.get(result, "%s")
+ return fmt % test_name
+
+
+def findtests(testdir=None, stdtests=STDTESTS, nottests=NOTTESTS):
+ """Return a list of all applicable test modules."""
+ testdir = findtestdir(testdir)
+ names = os.listdir(testdir)
+ tests = []
+ others = set(stdtests) | nottests
+ for name in names:
+ mod, ext = os.path.splitext(name)
+ if mod[:5] == "test_" and ext in (".py", "") and mod not in others:
+ tests.append(mod)
+ return stdtests + sorted(tests)
+
+
+def runtest(ns, test):
+ """Run a single test.
+
+ ns -- regrtest namespace of options
+ test -- the name of the test
+
+ Returns the tuple (result, test_time), where result is one of the
+ constants:
+
+ INTERRUPTED KeyboardInterrupt when run under -j
+ RESOURCE_DENIED test skipped because resource denied
+ SKIPPED test skipped for some other reason
+ ENV_CHANGED test failed because it changed the execution environment
+ FAILED test failed
+ PASSED test passed
+ """
+
+ output_on_failure = ns.verbose3
+
+ use_timeout = (ns.timeout is not None)
+ if use_timeout:
+ faulthandler.dump_traceback_later(ns.timeout, exit=True)
+ try:
+ support.match_tests = ns.match_tests
+ if ns.failfast:
+ support.failfast = True
+ if output_on_failure:
+ support.verbose = True
+
+ # Reuse the same instance to all calls to runtest(). Some
+ # tests keep a reference to sys.stdout or sys.stderr
+ # (eg. test_argparse).
+ if runtest.stringio is None:
+ stream = io.StringIO()
+ runtest.stringio = stream
+ else:
+ stream = runtest.stringio
+ stream.seek(0)
+ stream.truncate()
+
+ orig_stdout = sys.stdout
+ orig_stderr = sys.stderr
+ try:
+ sys.stdout = stream
+ sys.stderr = stream
+ result = runtest_inner(ns, test, display_failure=False)
+ if result[0] != PASSED:
+ output = stream.getvalue()
+ orig_stderr.write(output)
+ orig_stderr.flush()
+ finally:
+ sys.stdout = orig_stdout
+ sys.stderr = orig_stderr
+ else:
+ support.verbose = ns.verbose # Tell tests to be moderately quiet
+ result = runtest_inner(ns, test, display_failure=not ns.verbose)
+ return result
+ finally:
+ if use_timeout:
+ faulthandler.cancel_dump_traceback_later()
+ cleanup_test_droppings(test, ns.verbose)
+runtest.stringio = None
+
+
+def runtest_inner(ns, test, display_failure=True):
+ support.unload(test)
+
+ test_time = 0.0
+ refleak = False # True if the test leaked references.
+ try:
+ if test.startswith('test.') or ns.testdir:
+ abstest = test
+ else:
+ # Always import it from the test package
+ abstest = 'test.' + test
+ clear_caches()
+ with saved_test_environment(test, ns.verbose, ns.quiet, pgo=ns.pgo) as environment:
+ start_time = time.time()
+ the_module = importlib.import_module(abstest)
+ # If the test has a test_main, that will run the appropriate
+ # tests. If not, use normal unittest test loading.
+ test_runner = getattr(the_module, "test_main", None)
+ if test_runner is None:
+ def test_runner():
+ loader = unittest.TestLoader()
+ tests = loader.loadTestsFromModule(the_module)
+ for error in loader.errors:
+ print(error, file=sys.stderr)
+ if loader.errors:
+ raise Exception("errors while loading tests")
+ support.run_unittest(tests)
+ test_runner()
+ if ns.huntrleaks:
+ refleak = dash_R(the_module, test, test_runner, ns.huntrleaks)
+ test_time = time.time() - start_time
+ except support.ResourceDenied as msg:
+ if not ns.quiet and not ns.pgo:
+ print(test, "skipped --", msg, flush=True)
+ return RESOURCE_DENIED, test_time
+ except unittest.SkipTest as msg:
+ if not ns.quiet and not ns.pgo:
+ print(test, "skipped --", msg, flush=True)
+ return SKIPPED, test_time
+ except KeyboardInterrupt:
+ raise
+ except support.TestFailed as msg:
+ if not ns.pgo:
+ if display_failure:
+ print("test", test, "failed --", msg, file=sys.stderr,
+ flush=True)
+ else:
+ print("test", test, "failed", file=sys.stderr, flush=True)
+ return FAILED, test_time
+ except:
+ msg = traceback.format_exc()
+ if not ns.pgo:
+ print("test", test, "crashed --", msg, file=sys.stderr,
+ flush=True)
+ return FAILED, test_time
+ else:
+ if refleak:
+ return FAILED, test_time
+ if environment.changed:
+ return ENV_CHANGED, test_time
+ return PASSED, test_time
+
+
+def cleanup_test_droppings(testname, verbose):
+ import shutil
+ import stat
+ import gc
+
+ # First kill any dangling references to open files etc.
+ # This can also issue some ResourceWarnings which would otherwise get
+ # triggered during the following test run, and possibly produce failures.
+ gc.collect()
+
+ # Try to clean up junk commonly left behind. While tests shouldn't leave
+ # any files or directories behind, when a test fails that can be tedious
+ # for it to arrange. The consequences can be especially nasty on Windows,
+ # since if a test leaves a file open, it cannot be deleted by name (while
+ # there's nothing we can do about that here either, we can display the
+ # name of the offending test, which is a real help).
+ for name in (support.TESTFN,
+ "db_home",
+ ):
+ if not os.path.exists(name):
+ continue
+
+ if os.path.isdir(name):
+ kind, nuker = "directory", shutil.rmtree
+ elif os.path.isfile(name):
+ kind, nuker = "file", os.unlink
+ else:
+ raise SystemError("os.path says %r exists but is neither "
+ "directory nor file" % name)
+
+ if verbose:
+ print("%r left behind %s %r" % (testname, kind, name))
+ try:
+ # if we have chmod, fix possible permissions problems
+ # that might prevent cleanup
+ if (hasattr(os, 'chmod')):
+ os.chmod(name, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
+ nuker(name)
+ except Exception as msg:
+ print(("%r left behind %s %r and it couldn't be "
+ "removed: %s" % (testname, kind, name, msg)), file=sys.stderr)
+
+
+def findtestdir(path=None):
+ return path or os.path.dirname(os.path.dirname(__file__)) or os.curdir
diff --git a/Lib/test/libregrtest/runtest_mp.py b/Lib/test/libregrtest/runtest_mp.py
new file mode 100644
index 0000000000..9604c16600
--- /dev/null
+++ b/Lib/test/libregrtest/runtest_mp.py
@@ -0,0 +1,245 @@
+import faulthandler
+import json
+import os
+import queue
+import sys
+import time
+import traceback
+import types
+from test import support
+try:
+ import threading
+except ImportError:
+ print("Multiprocess option requires thread support")
+ sys.exit(2)
+
+from test.libregrtest.runtest import (
+ runtest, INTERRUPTED, CHILD_ERROR, PROGRESS_MIN_TIME,
+ format_test_result)
+from test.libregrtest.setup import setup_tests
+
+
+# Display the running tests if nothing happened last N seconds
+PROGRESS_UPDATE = 30.0 # seconds
+
+# If interrupted, display the wait progress every N seconds
+WAIT_PROGRESS = 2.0 # seconds
+
+
+def run_test_in_subprocess(testname, ns):
+ """Run the given test in a subprocess with --slaveargs.
+
+ ns is the option Namespace parsed from command-line arguments. regrtest
+ is invoked in a subprocess with the --slaveargs argument; when the
+ subprocess exits, its return code, stdout and stderr are returned as a
+ 3-tuple.
+ """
+ from subprocess import Popen, PIPE
+
+ ns_dict = vars(ns)
+ slaveargs = (ns_dict, testname)
+ slaveargs = json.dumps(slaveargs)
+
+ cmd = [sys.executable, *support.args_from_interpreter_flags(),
+ '-X', 'faulthandler',
+ '-m', 'test.regrtest',
+ '--slaveargs', slaveargs]
+ if ns.pgo:
+ cmd += ['--pgo']
+
+ # Running the child from the same working directory as regrtest's original
+ # invocation ensures that TEMPDIR for the child is the same when
+ # sysconfig.is_python_build() is true. See issue 15300.
+ popen = Popen(cmd,
+ stdout=PIPE, stderr=PIPE,
+ universal_newlines=True,
+ close_fds=(os.name != 'nt'),
+ cwd=support.SAVEDCWD)
+ with popen:
+ stdout, stderr = popen.communicate()
+ retcode = popen.wait()
+ return retcode, stdout, stderr
+
+
+def run_tests_slave(slaveargs):
+ ns_dict, testname = json.loads(slaveargs)
+ ns = types.SimpleNamespace(**ns_dict)
+
+ setup_tests(ns)
+
+ try:
+ result = runtest(ns, testname)
+ except KeyboardInterrupt:
+ result = INTERRUPTED, ''
+ except BaseException as e:
+ traceback.print_exc()
+ result = CHILD_ERROR, str(e)
+
+ print() # Force a newline (just in case)
+ print(json.dumps(result), flush=True)
+ sys.exit(0)
+
+
+# We do not use a generator so multiple threads can call next().
+class MultiprocessIterator:
+
+ """A thread-safe iterator over tests for multiprocess mode."""
+
+ def __init__(self, tests):
+ self.interrupted = False
+ self.lock = threading.Lock()
+ self.tests = tests
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ with self.lock:
+ if self.interrupted:
+ raise StopIteration('tests interrupted')
+ return next(self.tests)
+
+
+class MultiprocessThread(threading.Thread):
+ def __init__(self, pending, output, ns):
+ super().__init__()
+ self.pending = pending
+ self.output = output
+ self.ns = ns
+ self.current_test = None
+ self.start_time = None
+
+ def _runtest(self):
+ try:
+ test = next(self.pending)
+ except StopIteration:
+ self.output.put((None, None, None, None))
+ return True
+
+ try:
+ self.start_time = time.monotonic()
+ self.current_test = test
+
+ retcode, stdout, stderr = run_test_in_subprocess(test, self.ns)
+ finally:
+ self.current_test = None
+
+ stdout, _, result = stdout.strip().rpartition("\n")
+ if retcode != 0:
+ result = (CHILD_ERROR, "Exit code %s" % retcode)
+ self.output.put((test, stdout.rstrip(), stderr.rstrip(),
+ result))
+ return True
+
+ if not result:
+ self.output.put((None, None, None, None))
+ return True
+
+ result = json.loads(result)
+ self.output.put((test, stdout.rstrip(), stderr.rstrip(),
+ result))
+ return False
+
+ def run(self):
+ try:
+ stop = False
+ while not stop:
+ stop = self._runtest()
+ except BaseException:
+ self.output.put((None, None, None, None))
+ raise
+
+
+def run_tests_multiprocess(regrtest):
+ output = queue.Queue()
+ pending = MultiprocessIterator(regrtest.tests)
+ test_timeout = regrtest.ns.timeout
+ use_timeout = (test_timeout is not None)
+
+ workers = [MultiprocessThread(pending, output, regrtest.ns)
+ for i in range(regrtest.ns.use_mp)]
+ print("Run tests in parallel using %s child processes"
+ % len(workers))
+ for worker in workers:
+ worker.start()
+
+ def get_running(workers):
+ running = []
+ for worker in workers:
+ current_test = worker.current_test
+ if not current_test:
+ continue
+ dt = time.monotonic() - worker.start_time
+ if dt >= PROGRESS_MIN_TIME:
+ running.append('%s (%.0f sec)' % (current_test, dt))
+ return running
+
+ finished = 0
+ test_index = 1
+ get_timeout = max(PROGRESS_UPDATE, PROGRESS_MIN_TIME)
+ try:
+ while finished < regrtest.ns.use_mp:
+ if use_timeout:
+ faulthandler.dump_traceback_later(test_timeout, exit=True)
+
+ try:
+ item = output.get(timeout=get_timeout)
+ except queue.Empty:
+ running = get_running(workers)
+ if running and not regrtest.ns.pgo:
+ print('running: %s' % ', '.join(running))
+ continue
+
+ test, stdout, stderr, result = item
+ if test is None:
+ finished += 1
+ continue
+ regrtest.accumulate_result(test, result)
+
+ # Display progress
+ ok, test_time = result
+ text = format_test_result(test, ok)
+ if (ok not in (CHILD_ERROR, INTERRUPTED)
+ and test_time >= PROGRESS_MIN_TIME
+ and not regrtest.ns.pgo):
+ text += ' (%.0f sec)' % test_time
+ running = get_running(workers)
+ if running and not regrtest.ns.pgo:
+ text += ' -- running: %s' % ', '.join(running)
+ regrtest.display_progress(test_index, text)
+
+ # Copy stdout and stderr from the child process
+ if stdout:
+ print(stdout, flush=True)
+ if stderr and not regrtest.ns.pgo:
+ print(stderr, file=sys.stderr, flush=True)
+
+ if result[0] == INTERRUPTED:
+ raise KeyboardInterrupt
+ if result[0] == CHILD_ERROR:
+ msg = "Child error on {}: {}".format(test, result[1])
+ raise Exception(msg)
+ test_index += 1
+ except KeyboardInterrupt:
+ regrtest.interrupted = True
+ pending.interrupted = True
+ print()
+ finally:
+ if use_timeout:
+ faulthandler.cancel_dump_traceback_later()
+
+ # If tests are interrupted, wait until tests complete
+ wait_start = time.monotonic()
+ while True:
+ running = [worker.current_test for worker in workers]
+ running = list(filter(bool, running))
+ if not running:
+ break
+
+ dt = time.monotonic() - wait_start
+ line = "Waiting for %s (%s tests)" % (', '.join(running), len(running))
+ if dt >= WAIT_PROGRESS:
+ line = "%s since %.0f sec" % (line, dt)
+ print(line)
+ for worker in workers:
+ worker.join(WAIT_PROGRESS)
diff --git a/Lib/test/libregrtest/save_env.py b/Lib/test/libregrtest/save_env.py
new file mode 100644
index 0000000000..96ad3af8df
--- /dev/null
+++ b/Lib/test/libregrtest/save_env.py
@@ -0,0 +1,285 @@
+import builtins
+import locale
+import logging
+import os
+import shutil
+import sys
+import sysconfig
+import warnings
+from test import support
+try:
+ import threading
+except ImportError:
+ threading = None
+try:
+ import _multiprocessing, multiprocessing.process
+except ImportError:
+ multiprocessing = None
+
+
+# Unit tests are supposed to leave the execution environment unchanged
+# once they complete. But sometimes tests have bugs, especially when
+# tests fail, and the changes to environment go on to mess up other
+# tests. This can cause issues with buildbot stability, since tests
+# are run in random order and so problems may appear to come and go.
+# There are a few things we can save and restore to mitigate this, and
+# the following context manager handles this task.
+
+class saved_test_environment:
+ """Save bits of the test environment and restore them at block exit.
+
+ with saved_test_environment(testname, verbose, quiet):
+ #stuff
+
+ Unless quiet is True, a warning is printed to stderr if any of
+ the saved items was changed by the test. The attribute 'changed'
+ is initially False, but is set to True if a change is detected.
+
+ If verbose is more than 1, the before and after state of changed
+ items is also printed.
+ """
+
+ changed = False
+
+ def __init__(self, testname, verbose=0, quiet=False, *, pgo=False):
+ self.testname = testname
+ self.verbose = verbose
+ self.quiet = quiet
+ self.pgo = pgo
+
+ # To add things to save and restore, add a name XXX to the resources list
+ # and add corresponding get_XXX/restore_XXX functions. get_XXX should
+ # return the value to be saved and compared against a second call to the
+ # get function when test execution completes. restore_XXX should accept
+ # the saved value and restore the resource using it. It will be called if
+ # and only if a change in the value is detected.
+ #
+ # Note: XXX will have any '.' replaced with '_' characters when determining
+ # the corresponding method names.
+
+ resources = ('sys.argv', 'cwd', 'sys.stdin', 'sys.stdout', 'sys.stderr',
+ 'os.environ', 'sys.path', 'sys.path_hooks', '__import__',
+ 'warnings.filters', 'asyncore.socket_map',
+ 'logging._handlers', 'logging._handlerList', 'sys.gettrace',
+ 'sys.warnoptions',
+ # multiprocessing.process._cleanup() may release ref
+ # to a thread, so check processes first.
+ 'multiprocessing.process._dangling', 'threading._dangling',
+ 'sysconfig._CONFIG_VARS', 'sysconfig._INSTALL_SCHEMES',
+ 'files', 'locale', 'warnings.showwarning',
+ 'shutil_archive_formats', 'shutil_unpack_formats',
+ )
+
+ def get_sys_argv(self):
+ return id(sys.argv), sys.argv, sys.argv[:]
+ def restore_sys_argv(self, saved_argv):
+ sys.argv = saved_argv[1]
+ sys.argv[:] = saved_argv[2]
+
+ def get_cwd(self):
+ return os.getcwd()
+ def restore_cwd(self, saved_cwd):
+ os.chdir(saved_cwd)
+
+ def get_sys_stdout(self):
+ return sys.stdout
+ def restore_sys_stdout(self, saved_stdout):
+ sys.stdout = saved_stdout
+
+ def get_sys_stderr(self):
+ return sys.stderr
+ def restore_sys_stderr(self, saved_stderr):
+ sys.stderr = saved_stderr
+
+ def get_sys_stdin(self):
+ return sys.stdin
+ def restore_sys_stdin(self, saved_stdin):
+ sys.stdin = saved_stdin
+
+ def get_os_environ(self):
+ return id(os.environ), os.environ, dict(os.environ)
+ def restore_os_environ(self, saved_environ):
+ os.environ = saved_environ[1]
+ os.environ.clear()
+ os.environ.update(saved_environ[2])
+
+ def get_sys_path(self):
+ return id(sys.path), sys.path, sys.path[:]
+ def restore_sys_path(self, saved_path):
+ sys.path = saved_path[1]
+ sys.path[:] = saved_path[2]
+
+ def get_sys_path_hooks(self):
+ return id(sys.path_hooks), sys.path_hooks, sys.path_hooks[:]
+ def restore_sys_path_hooks(self, saved_hooks):
+ sys.path_hooks = saved_hooks[1]
+ sys.path_hooks[:] = saved_hooks[2]
+
+ def get_sys_gettrace(self):
+ return sys.gettrace()
+ def restore_sys_gettrace(self, trace_fxn):
+ sys.settrace(trace_fxn)
+
+ def get___import__(self):
+ return builtins.__import__
+ def restore___import__(self, import_):
+ builtins.__import__ = import_
+
+ def get_warnings_filters(self):
+ return id(warnings.filters), warnings.filters, warnings.filters[:]
+ def restore_warnings_filters(self, saved_filters):
+ warnings.filters = saved_filters[1]
+ warnings.filters[:] = saved_filters[2]
+
+ def get_asyncore_socket_map(self):
+ asyncore = sys.modules.get('asyncore')
+ # XXX Making a copy keeps objects alive until __exit__ gets called.
+ return asyncore and asyncore.socket_map.copy() or {}
+ def restore_asyncore_socket_map(self, saved_map):
+ asyncore = sys.modules.get('asyncore')
+ if asyncore is not None:
+ asyncore.close_all(ignore_all=True)
+ asyncore.socket_map.update(saved_map)
+
+ def get_shutil_archive_formats(self):
+ # we could call get_archives_formats() but that only returns the
+ # registry keys; we want to check the values too (the functions that
+ # are registered)
+ return shutil._ARCHIVE_FORMATS, shutil._ARCHIVE_FORMATS.copy()
+ def restore_shutil_archive_formats(self, saved):
+ shutil._ARCHIVE_FORMATS = saved[0]
+ shutil._ARCHIVE_FORMATS.clear()
+ shutil._ARCHIVE_FORMATS.update(saved[1])
+
+ def get_shutil_unpack_formats(self):
+ return shutil._UNPACK_FORMATS, shutil._UNPACK_FORMATS.copy()
+ def restore_shutil_unpack_formats(self, saved):
+ shutil._UNPACK_FORMATS = saved[0]
+ shutil._UNPACK_FORMATS.clear()
+ shutil._UNPACK_FORMATS.update(saved[1])
+
+ def get_logging__handlers(self):
+ # _handlers is a WeakValueDictionary
+ return id(logging._handlers), logging._handlers, logging._handlers.copy()
+ def restore_logging__handlers(self, saved_handlers):
+ # Can't easily revert the logging state
+ pass
+
+ def get_logging__handlerList(self):
+ # _handlerList is a list of weakrefs to handlers
+ return id(logging._handlerList), logging._handlerList, logging._handlerList[:]
+ def restore_logging__handlerList(self, saved_handlerList):
+ # Can't easily revert the logging state
+ pass
+
+ def get_sys_warnoptions(self):
+ return id(sys.warnoptions), sys.warnoptions, sys.warnoptions[:]
+ def restore_sys_warnoptions(self, saved_options):
+ sys.warnoptions = saved_options[1]
+ sys.warnoptions[:] = saved_options[2]
+
+ # Controlling dangling references to Thread objects can make it easier
+ # to track reference leaks.
+ def get_threading__dangling(self):
+ if not threading:
+ return None
+ # This copies the weakrefs without making any strong reference
+ return threading._dangling.copy()
+ def restore_threading__dangling(self, saved):
+ if not threading:
+ return
+ threading._dangling.clear()
+ threading._dangling.update(saved)
+
+ # Same for Process objects
+ def get_multiprocessing_process__dangling(self):
+ if not multiprocessing:
+ return None
+ # Unjoined process objects can survive after process exits
+ multiprocessing.process._cleanup()
+ # This copies the weakrefs without making any strong reference
+ return multiprocessing.process._dangling.copy()
+ def restore_multiprocessing_process__dangling(self, saved):
+ if not multiprocessing:
+ return
+ multiprocessing.process._dangling.clear()
+ multiprocessing.process._dangling.update(saved)
+
+ def get_sysconfig__CONFIG_VARS(self):
+ # make sure the dict is initialized
+ sysconfig.get_config_var('prefix')
+ return (id(sysconfig._CONFIG_VARS), sysconfig._CONFIG_VARS,
+ dict(sysconfig._CONFIG_VARS))
+ def restore_sysconfig__CONFIG_VARS(self, saved):
+ sysconfig._CONFIG_VARS = saved[1]
+ sysconfig._CONFIG_VARS.clear()
+ sysconfig._CONFIG_VARS.update(saved[2])
+
+ def get_sysconfig__INSTALL_SCHEMES(self):
+ return (id(sysconfig._INSTALL_SCHEMES), sysconfig._INSTALL_SCHEMES,
+ sysconfig._INSTALL_SCHEMES.copy())
+ def restore_sysconfig__INSTALL_SCHEMES(self, saved):
+ sysconfig._INSTALL_SCHEMES = saved[1]
+ sysconfig._INSTALL_SCHEMES.clear()
+ sysconfig._INSTALL_SCHEMES.update(saved[2])
+
+ def get_files(self):
+ return sorted(fn + ('/' if os.path.isdir(fn) else '')
+ for fn in os.listdir())
+ def restore_files(self, saved_value):
+ fn = support.TESTFN
+ if fn not in saved_value and (fn + '/') not in saved_value:
+ if os.path.isfile(fn):
+ support.unlink(fn)
+ elif os.path.isdir(fn):
+ support.rmtree(fn)
+
+ _lc = [getattr(locale, lc) for lc in dir(locale)
+ if lc.startswith('LC_')]
+ def get_locale(self):
+ pairings = []
+ for lc in self._lc:
+ try:
+ pairings.append((lc, locale.setlocale(lc, None)))
+ except (TypeError, ValueError):
+ continue
+ return pairings
+ def restore_locale(self, saved):
+ for lc, setting in saved:
+ locale.setlocale(lc, setting)
+
+ def get_warnings_showwarning(self):
+ return warnings.showwarning
+ def restore_warnings_showwarning(self, fxn):
+ warnings.showwarning = fxn
+
+ def resource_info(self):
+ for name in self.resources:
+ method_suffix = name.replace('.', '_')
+ get_name = 'get_' + method_suffix
+ restore_name = 'restore_' + method_suffix
+ yield name, getattr(self, get_name), getattr(self, restore_name)
+
+ def __enter__(self):
+ self.saved_values = dict((name, get()) for name, get, restore
+ in self.resource_info())
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ saved_values = self.saved_values
+ del self.saved_values
+ support.gc_collect() # Some resources use weak references
+ for name, get, restore in self.resource_info():
+ current = get()
+ original = saved_values.pop(name)
+ # Check for changes to the resource's value
+ if current != original:
+ self.changed = True
+ restore(original)
+ if not self.quiet and not self.pgo:
+ print(f"Warning -- {name} was modified by {self.testname}",
+ file=sys.stderr, flush=True)
+ if self.verbose > 1:
+ print(f" Before: {original}\n After: {current} ",
+ file=sys.stderr, flush=True)
+ return False
diff --git a/Lib/test/libregrtest/setup.py b/Lib/test/libregrtest/setup.py
new file mode 100644
index 0000000000..1d24531fc2
--- /dev/null
+++ b/Lib/test/libregrtest/setup.py
@@ -0,0 +1,121 @@
+import atexit
+import faulthandler
+import os
+import signal
+import sys
+import unittest
+from test import support
+try:
+ import gc
+except ImportError:
+ gc = None
+
+from test.libregrtest.refleak import warm_caches
+
+
+def setup_tests(ns):
+ # Display the Python traceback on fatal errors (e.g. segfault)
+ faulthandler.enable(all_threads=True)
+
+ # Display the Python traceback on SIGALRM or SIGUSR1 signal
+ signals = []
+ if hasattr(signal, 'SIGALRM'):
+ signals.append(signal.SIGALRM)
+ if hasattr(signal, 'SIGUSR1'):
+ signals.append(signal.SIGUSR1)
+ for signum in signals:
+ faulthandler.register(signum, chain=True)
+
+ replace_stdout()
+ support.record_original_stdout(sys.stdout)
+
+ if ns.testdir:
+ # Prepend test directory to sys.path, so runtest() will be able
+ # to locate tests
+ sys.path.insert(0, os.path.abspath(ns.testdir))
+
+ # Some times __path__ and __file__ are not absolute (e.g. while running from
+ # Lib/) and, if we change the CWD to run the tests in a temporary dir, some
+ # imports might fail. This affects only the modules imported before os.chdir().
+ # These modules are searched first in sys.path[0] (so '' -- the CWD) and if
+ # they are found in the CWD their __file__ and __path__ will be relative (this
+ # happens before the chdir). All the modules imported after the chdir, are
+ # not found in the CWD, and since the other paths in sys.path[1:] are absolute
+ # (site.py absolutize them), the __file__ and __path__ will be absolute too.
+ # Therefore it is necessary to absolutize manually the __file__ and __path__ of
+ # the packages to prevent later imports to fail when the CWD is different.
+ for module in sys.modules.values():
+ if hasattr(module, '__path__'):
+ for index, path in enumerate(module.__path__):
+ module.__path__[index] = os.path.abspath(path)
+ if hasattr(module, '__file__'):
+ module.__file__ = os.path.abspath(module.__file__)
+
+ # MacOSX (a.k.a. Darwin) has a default stack size that is too small
+ # for deeply recursive regular expressions. We see this as crashes in
+ # the Python test suite when running test_re.py and test_sre.py. The
+ # fix is to set the stack limit to 2048.
+ # This approach may also be useful for other Unixy platforms that
+ # suffer from small default stack limits.
+ if sys.platform == 'darwin':
+ try:
+ import resource
+ except ImportError:
+ pass
+ else:
+ soft, hard = resource.getrlimit(resource.RLIMIT_STACK)
+ newsoft = min(hard, max(soft, 1024*2048))
+ resource.setrlimit(resource.RLIMIT_STACK, (newsoft, hard))
+
+ if ns.huntrleaks:
+ unittest.BaseTestSuite._cleanup = False
+
+ # Avoid false positives due to various caches
+ # filling slowly with random data:
+ warm_caches()
+
+ if ns.memlimit is not None:
+ support.set_memlimit(ns.memlimit)
+
+ if ns.threshold is not None:
+ gc.set_threshold(ns.threshold)
+
+ try:
+ import msvcrt
+ except ImportError:
+ pass
+ else:
+ msvcrt.SetErrorMode(msvcrt.SEM_FAILCRITICALERRORS|
+ msvcrt.SEM_NOALIGNMENTFAULTEXCEPT|
+ msvcrt.SEM_NOGPFAULTERRORBOX|
+ msvcrt.SEM_NOOPENFILEERRORBOX)
+ try:
+ msvcrt.CrtSetReportMode
+ except AttributeError:
+ # release build
+ pass
+ else:
+ for m in [msvcrt.CRT_WARN, msvcrt.CRT_ERROR, msvcrt.CRT_ASSERT]:
+ if ns.verbose and ns.verbose >= 2:
+ msvcrt.CrtSetReportMode(m, msvcrt.CRTDBG_MODE_FILE)
+ msvcrt.CrtSetReportFile(m, msvcrt.CRTDBG_FILE_STDERR)
+ else:
+ msvcrt.CrtSetReportMode(m, 0)
+
+ support.use_resources = ns.use_resources
+
+
+def replace_stdout():
+ """Set stdout encoder error handler to backslashreplace (as stderr error
+ handler) to avoid UnicodeEncodeError when printing a traceback"""
+ stdout = sys.stdout
+ sys.stdout = open(stdout.fileno(), 'w',
+ encoding=stdout.encoding,
+ errors="backslashreplace",
+ closefd=False,
+ newline='\n')
+
+ def restore_stdout():
+ sys.stdout.close()
+ sys.stdout = stdout
+ atexit.register(restore_stdout)