summaryrefslogtreecommitdiff
path: root/logilab/common/pytest.py
diff options
context:
space:
mode:
Diffstat (limited to 'logilab/common/pytest.py')
-rw-r--r--logilab/common/pytest.py630
1 files changed, 381 insertions, 249 deletions
diff --git a/logilab/common/pytest.py b/logilab/common/pytest.py
index 6819c01..0f89ddf 100644
--- a/logilab/common/pytest.py
+++ b/logilab/common/pytest.py
@@ -124,6 +124,7 @@ import traceback
from inspect import isgeneratorfunction, isclass, FrameInfo
from random import shuffle
from itertools import dropwhile
+
# mypy error: Module 'unittest.runner' has no attribute '_WritelnDecorator'
# but it does
from unittest.runner import _WritelnDecorator # type: ignore
@@ -135,6 +136,7 @@ from logilab.common.deprecation import deprecated
from logilab.common.fileutils import abspath_listdir
from logilab.common import textutils
from logilab.common import testlib, STD_BLACKLIST
+
# use the same unittest module as testlib
from logilab.common.testlib import unittest, start_interactive_mode
from logilab.common.testlib import nocoverage, pause_trace, replace_trace # bwcompat
@@ -142,6 +144,7 @@ from logilab.common.debugger import Debugger, colorize_source
import doctest
import unittest as unittest_legacy
+
if not getattr(unittest_legacy, "__package__", None):
try:
import unittest2.suite as unittest_suite
@@ -154,18 +157,24 @@ else:
try:
import django
from logilab.common.modutils import modpath_from_file, load_module_from_modpath
+
DJANGO_FOUND = True
except ImportError:
DJANGO_FOUND = False
-CONF_FILE = 'pytestconf.py'
+CONF_FILE = "pytestconf.py"
TESTFILE_RE = re.compile("^((unit)?test.*|smoketest)\.py$")
+
+
def this_is_a_testfile(filename: str) -> Optional[Match]:
"""returns True if `filename` seems to be a test file"""
return TESTFILE_RE.match(osp.basename(filename))
+
TESTDIR_RE = re.compile("^(unit)?tests?$")
+
+
def this_is_a_testdir(dirpath: str) -> Optional[Match]:
"""returns True if `filename` seems to be a test directory"""
return TESTDIR_RE.match(osp.basename(dirpath))
@@ -176,10 +185,10 @@ def load_pytest_conf(path, parser):
and / or tester.
"""
namespace = {}
- exec(open(path, 'rb').read(), namespace)
- if 'update_parser' in namespace:
- namespace['update_parser'](parser)
- return namespace.get('CustomPyTester', PyTester)
+ exec(open(path, "rb").read(), namespace)
+ if "update_parser" in namespace:
+ namespace["update_parser"](parser)
+ return namespace.get("CustomPyTester", PyTester)
def project_root(parser, projdir=os.getcwd()):
@@ -189,8 +198,7 @@ def project_root(parser, projdir=os.getcwd()):
conf_file_path = osp.join(curdir, CONF_FILE)
if osp.isfile(conf_file_path):
testercls = load_pytest_conf(conf_file_path, parser)
- while this_is_a_testdir(curdir) or \
- osp.isfile(osp.join(curdir, '__init__.py')):
+ while this_is_a_testdir(curdir) or osp.isfile(osp.join(curdir, "__init__.py")):
newdir = osp.normpath(osp.join(curdir, os.pardir))
if newdir == curdir:
break
@@ -204,6 +212,7 @@ def project_root(parser, projdir=os.getcwd()):
class GlobalTestReport(object):
"""this class holds global test statistics"""
+
def __init__(self):
self.ran = 0
self.skipped = 0
@@ -218,7 +227,7 @@ class GlobalTestReport(object):
"""integrates new test information into internal statistics"""
ran = testresult.testsRun
self.ran += ran
- self.skipped += len(getattr(testresult, 'skipped', ()))
+ self.skipped += len(getattr(testresult, "skipped", ()))
self.failures += len(testresult.failures)
self.errors += len(testresult.errors)
self.ttime += ttime
@@ -243,27 +252,24 @@ class GlobalTestReport(object):
def __str__(self):
"""this is just presentation stuff"""
- line1 = ['Ran %s test cases in %.2fs (%.2fs CPU)'
- % (self.ran, self.ttime, self.ctime)]
+ line1 = ["Ran %s test cases in %.2fs (%.2fs CPU)" % (self.ran, self.ttime, self.ctime)]
if self.errors:
- line1.append('%s errors' % self.errors)
+ line1.append("%s errors" % self.errors)
if self.failures:
- line1.append('%s failures' % self.failures)
+ line1.append("%s failures" % self.failures)
if self.skipped:
- line1.append('%s skipped' % self.skipped)
+ line1.append("%s skipped" % self.skipped)
modulesok = self.modulescount - len(self.errmodules)
if self.errors or self.failures:
- line2 = '%s modules OK (%s failed)' % (modulesok,
- len(self.errmodules))
- descr = ', '.join(['%s [%s/%s]' % info for info in self.errmodules])
- line3 = '\nfailures: %s' % descr
+ line2 = "%s modules OK (%s failed)" % (modulesok, len(self.errmodules))
+ descr = ", ".join(["%s [%s/%s]" % info for info in self.errmodules])
+ line3 = "\nfailures: %s" % descr
elif modulesok:
- line2 = 'All %s modules OK' % modulesok
- line3 = ''
+ line2 = "All %s modules OK" % modulesok
+ line3 = ""
else:
- return ''
- return '%s\n%s%s' % (', '.join(line1), line2, line3)
-
+ return ""
+ return "%s\n%s%s" % (", ".join(line1), line2, line3)
def remove_local_modules_from_sys(testdir):
@@ -282,7 +288,7 @@ def remove_local_modules_from_sys(testdir):
for modname, mod in list(sys.modules.items()):
if mod is None:
continue
- if not hasattr(mod, '__file__'):
+ if not hasattr(mod, "__file__"):
# this is the case of some built-in modules like sys, imp, marshal
continue
modfile = mod.__file__
@@ -292,7 +298,6 @@ def remove_local_modules_from_sys(testdir):
del sys.modules[modname]
-
class PyTester(object):
"""encapsulates testrun logic"""
@@ -317,6 +322,7 @@ class PyTester(object):
def set_errcode(self, errcode):
self._errcode = errcode
+
errcode = property(get_errcode, set_errcode)
def testall(self, exitfirst=False):
@@ -358,9 +364,11 @@ class PyTester(object):
restartfile = open(FILE_RESTART, "w")
restartfile.close()
except Exception:
- print("Error while overwriting succeeded test file :",
- osp.join(os.getcwd(), FILE_RESTART),
- file=sys.__stderr__)
+ print(
+ "Error while overwriting succeeded test file :",
+ osp.join(os.getcwd(), FILE_RESTART),
+ file=sys.__stderr__,
+ )
raise
# run test and collect information
prog = self.testfile(filename, batchmode=True)
@@ -386,17 +394,24 @@ class PyTester(object):
restartfile = open(FILE_RESTART, "w")
restartfile.close()
except Exception:
- print("Error while overwriting succeeded test file :",
- osp.join(os.getcwd(), FILE_RESTART), file=sys.__stderr__)
+ print(
+ "Error while overwriting succeeded test file :",
+ osp.join(os.getcwd(), FILE_RESTART),
+ file=sys.__stderr__,
+ )
raise
modname = osp.basename(filename)[:-3]
- print((' %s ' % osp.basename(filename)).center(70, '='),
- file=sys.__stderr__)
+ print((" %s " % osp.basename(filename)).center(70, "="), file=sys.__stderr__)
try:
tstart, cstart = time(), process_time()
try:
- testprog = SkipAwareTestProgram(modname, batchmode=batchmode, cvg=self.cvg,
- options=self.options, outstream=sys.stderr)
+ testprog = SkipAwareTestProgram(
+ modname,
+ batchmode=batchmode,
+ cvg=self.cvg,
+ options=self.options,
+ outstream=sys.stderr,
+ )
except KeyboardInterrupt:
raise
except SystemExit as exc:
@@ -408,9 +423,9 @@ class PyTester(object):
return None
except Exception:
self.report.failed_to_test_module(filename)
- print('unhandled exception occurred while testing', modname,
- file=sys.stderr)
+ print("unhandled exception occurred while testing", modname, file=sys.stderr)
import traceback
+
traceback.print_exc(file=sys.stderr)
return None
@@ -423,23 +438,23 @@ class PyTester(object):
os.chdir(here)
-
class DjangoTester(PyTester):
-
def load_django_settings(self, dirname):
"""try to find project's setting and load it"""
curdir = osp.abspath(dirname)
previousdir = curdir
- while not osp.isfile(osp.join(curdir, 'settings.py')) and \
- osp.isfile(osp.join(curdir, '__init__.py')):
+ while not osp.isfile(osp.join(curdir, "settings.py")) and osp.isfile(
+ osp.join(curdir, "__init__.py")
+ ):
newdir = osp.normpath(osp.join(curdir, os.pardir))
if newdir == curdir:
- raise AssertionError('could not find settings.py')
+ raise AssertionError("could not find settings.py")
previousdir = curdir
curdir = newdir
# late django initialization
- settings = load_module_from_modpath(modpath_from_file(osp.join(curdir, 'settings.py')))
+ settings = load_module_from_modpath(modpath_from_file(osp.join(curdir, "settings.py")))
from django.core.management import setup_environ
+
setup_environ(settings)
settings.DEBUG = False
self.settings = settings
@@ -451,6 +466,7 @@ class DjangoTester(PyTester):
# Those imports must be done **after** setup_environ was called
from django.test.utils import setup_test_environment
from django.test.utils import create_test_db
+
setup_test_environment()
create_test_db(verbosity=0)
self.dbname = self.settings.TEST_DATABASE_NAME
@@ -459,8 +475,9 @@ class DjangoTester(PyTester):
# Those imports must be done **after** setup_environ was called
from django.test.utils import teardown_test_environment
from django.test.utils import destroy_test_db
+
teardown_test_environment()
- print('destroying', self.dbname)
+ print("destroying", self.dbname)
destroy_test_db(self.dbname, verbosity=0)
def testall(self, exitfirst=False):
@@ -468,16 +485,16 @@ class DjangoTester(PyTester):
which can be considered as a testdir and runs every test there
"""
for dirname, dirs, files in os.walk(os.getcwd()):
- for skipped in ('CVS', '.svn', '.hg'):
+ for skipped in ("CVS", ".svn", ".hg"):
if skipped in dirs:
dirs.remove(skipped)
- if 'tests.py' in files:
+ if "tests.py" in files:
if not self.testonedir(dirname, exitfirst):
break
dirs[:] = []
else:
basename = osp.basename(dirname)
- if basename in ('test', 'tests'):
+ if basename in ("test", "tests"):
print("going into", dirname)
# we found a testdir, let's explore it !
if not self.testonedir(dirname, exitfirst):
@@ -492,11 +509,10 @@ class DjangoTester(PyTester):
"""
# special django behaviour : if tests are splitted in several files,
# remove the main tests.py file and tests each test file separately
- testfiles = [fpath for fpath in abspath_listdir(testdir)
- if this_is_a_testfile(fpath)]
+ testfiles = [fpath for fpath in abspath_listdir(testdir) if this_is_a_testfile(fpath)]
if len(testfiles) > 1:
try:
- testfiles.remove(osp.join(testdir, 'tests.py'))
+ testfiles.remove(osp.join(testdir, "tests.py"))
except ValueError:
pass
for filename in testfiles:
@@ -519,8 +535,7 @@ class DjangoTester(PyTester):
os.chdir(dirname)
self.load_django_settings(dirname)
modname = osp.basename(filename)[:-3]
- print((' %s ' % osp.basename(filename)).center(70, '='),
- file=sys.stderr)
+ print((" %s " % osp.basename(filename)).center(70, "="), file=sys.stderr)
try:
try:
tstart, cstart = time(), process_time()
@@ -534,10 +549,11 @@ class DjangoTester(PyTester):
raise
except Exception as exc:
import traceback
+
traceback.print_exc()
self.report.failed_to_test_module(filename)
- print('unhandled exception occurred while testing', modname)
- print('error: %s' % exc)
+ print("unhandled exception occurred while testing", modname)
+ print("error: %s" % exc)
return None
finally:
self.after_testfile()
@@ -549,9 +565,11 @@ def make_parser():
"""creates the OptionParser instance
"""
from optparse import OptionParser
+
parser = OptionParser(usage=PYTEST_DOC)
parser.newargs = []
+
def rebuild_cmdline(option, opt, value, parser):
"""carry the option to unittest_main"""
parser.newargs.append(opt)
@@ -564,50 +582,89 @@ def make_parser():
setattr(parser.values, option.dest, True)
def capture_and_rebuild(option, opt, value, parser):
- warnings.simplefilter('ignore', DeprecationWarning)
+ warnings.simplefilter("ignore", DeprecationWarning)
rebuild_cmdline(option, opt, value, parser)
# logilab-pytest options
- parser.add_option('-t', dest='testdir', default=None,
- help="directory where the tests will be found")
- parser.add_option('-d', dest='dbc', default=False,
- action="store_true", help="enable design-by-contract")
+ parser.add_option(
+ "-t", dest="testdir", default=None, help="directory where the tests will be found"
+ )
+ parser.add_option(
+ "-d", dest="dbc", default=False, action="store_true", help="enable design-by-contract"
+ )
# unittest_main options provided and passed through logilab-pytest
- parser.add_option('-v', '--verbose', callback=rebuild_cmdline,
- action="callback", help="Verbose output")
- parser.add_option('-i', '--pdb', callback=rebuild_and_store,
- dest="pdb", action="callback",
- help="Enable test failure inspection")
- parser.add_option('-x', '--exitfirst', callback=rebuild_and_store,
- dest="exitfirst", default=False,
- action="callback", help="Exit on first failure "
- "(only make sense when logilab-pytest run one test file)")
- parser.add_option('-R', '--restart', callback=rebuild_and_store,
- dest="restart", default=False,
- action="callback",
- help="Restart tests from where it failed (implies exitfirst) "
- "(only make sense if tests previously ran with exitfirst only)")
- parser.add_option('--color', callback=rebuild_cmdline,
- action="callback",
- help="colorize tracebacks")
- parser.add_option('-s', '--skip',
- # XXX: I wish I could use the callback action but it
- # doesn't seem to be able to get the value
- # associated to the option
- action="store", dest="skipped", default=None,
- help="test names matching this name will be skipped "
- "to skip several patterns, use commas")
- parser.add_option('-q', '--quiet', callback=rebuild_cmdline,
- action="callback", help="Minimal output")
- parser.add_option('-P', '--profile', default=None, dest='profile',
- help="Profile execution and store data in the given file")
- parser.add_option('-m', '--match', default=None, dest='tags_pattern',
- help="only execute test whose tag match the current pattern")
+ parser.add_option(
+ "-v", "--verbose", callback=rebuild_cmdline, action="callback", help="Verbose output"
+ )
+ parser.add_option(
+ "-i",
+ "--pdb",
+ callback=rebuild_and_store,
+ dest="pdb",
+ action="callback",
+ help="Enable test failure inspection",
+ )
+ parser.add_option(
+ "-x",
+ "--exitfirst",
+ callback=rebuild_and_store,
+ dest="exitfirst",
+ default=False,
+ action="callback",
+ help="Exit on first failure " "(only make sense when logilab-pytest run one test file)",
+ )
+ parser.add_option(
+ "-R",
+ "--restart",
+ callback=rebuild_and_store,
+ dest="restart",
+ default=False,
+ action="callback",
+ help="Restart tests from where it failed (implies exitfirst) "
+ "(only make sense if tests previously ran with exitfirst only)",
+ )
+ parser.add_option(
+ "--color", callback=rebuild_cmdline, action="callback", help="colorize tracebacks"
+ )
+ parser.add_option(
+ "-s",
+ "--skip",
+ # XXX: I wish I could use the callback action but it
+ # doesn't seem to be able to get the value
+ # associated to the option
+ action="store",
+ dest="skipped",
+ default=None,
+ help="test names matching this name will be skipped "
+ "to skip several patterns, use commas",
+ )
+ parser.add_option(
+ "-q", "--quiet", callback=rebuild_cmdline, action="callback", help="Minimal output"
+ )
+ parser.add_option(
+ "-P",
+ "--profile",
+ default=None,
+ dest="profile",
+ help="Profile execution and store data in the given file",
+ )
+ parser.add_option(
+ "-m",
+ "--match",
+ default=None,
+ dest="tags_pattern",
+ help="only execute test whose tag match the current pattern",
+ )
if DJANGO_FOUND:
- parser.add_option('-J', '--django', dest='django', default=False,
- action="store_true",
- help='use logilab-pytest for django test cases')
+ parser.add_option(
+ "-J",
+ "--django",
+ dest="django",
+ default=False,
+ action="store_true",
+ help="use logilab-pytest for django test cases",
+ )
return parser
@@ -617,7 +674,7 @@ def parseargs(parser):
"""
# parse the command line
options, args = parser.parse_args()
- filenames = [arg for arg in args if arg.endswith('.py')]
+ filenames = [arg for arg in args if arg.endswith(".py")]
if filenames:
if len(filenames) > 1:
parser.error("only one filename is acceptable")
@@ -629,7 +686,7 @@ def parseargs(parser):
testlib.ENABLE_DBC = options.dbc
newargs = parser.newargs
if options.skipped:
- newargs.extend(['--skip', options.skipped])
+ newargs.extend(["--skip", options.skipped])
# restart implies exitfirst
if options.restart:
options.exitfirst = True
@@ -639,8 +696,7 @@ def parseargs(parser):
return options, explicitfile
-
-@deprecated('[logilab-common 1.3] logilab-pytest is deprecated, use another test runner')
+@deprecated("[logilab-common 1.3] logilab-pytest is deprecated, use another test runner")
def run():
parser = make_parser()
rootdir, testercls = project_root(parser)
@@ -648,8 +704,8 @@ def run():
# mock a new command line
sys.argv[1:] = parser.newargs
cvg = None
- if not '' in sys.path:
- sys.path.insert(0, '')
+ if not "" in sys.path:
+ sys.path.insert(0, "")
if DJANGO_FOUND and options.django:
tester = DjangoTester(cvg, options)
else:
@@ -664,21 +720,24 @@ def run():
try:
if options.profile:
import hotshot
+
prof = hotshot.Profile(options.profile)
prof.runcall(cmd, *args)
prof.close()
- print('profile data saved in', options.profile)
+ print("profile data saved in", options.profile)
else:
cmd(*args)
except SystemExit:
raise
except:
import traceback
+
traceback.print_exc()
finally:
tester.show_report()
sys.exit(tester.errcode)
+
class SkipAwareTestProgram(unittest.TestProgram):
# XXX: don't try to stay close to unittest.py, use optparse
USAGE = """\
@@ -705,15 +764,23 @@ Examples:
%(progName)s MyTestCase - run all 'test*' test methods
in MyTestCase
"""
- def __init__(self, module='__main__', defaultTest=None, batchmode=False,
- cvg=None, options=None, outstream=sys.stderr):
+
+ def __init__(
+ self,
+ module="__main__",
+ defaultTest=None,
+ batchmode=False,
+ cvg=None,
+ options=None,
+ outstream=sys.stderr,
+ ):
self.batchmode = batchmode
self.cvg = cvg
self.options = options
self.outstream = outstream
super(SkipAwareTestProgram, self).__init__(
- module=module, defaultTest=defaultTest,
- testLoader=NonStrictTestLoader())
+ module=module, defaultTest=defaultTest, testLoader=NonStrictTestLoader()
+ )
def parseArgs(self, argv):
self.pdbmode = False
@@ -724,40 +791,51 @@ Examples:
self.colorize = False
self.profile_name = None
import getopt
+
try:
- options, args = getopt.getopt(argv[1:], 'hHvixrqcp:s:m:P:',
- ['help', 'verbose', 'quiet', 'pdb',
- 'exitfirst', 'restart',
- 'skip=', 'color', 'match=', 'profile='])
+ options, args = getopt.getopt(
+ argv[1:],
+ "hHvixrqcp:s:m:P:",
+ [
+ "help",
+ "verbose",
+ "quiet",
+ "pdb",
+ "exitfirst",
+ "restart",
+ "skip=",
+ "color",
+ "match=",
+ "profile=",
+ ],
+ )
for opt, value in options:
- if opt in ('-h', '-H', '--help'):
+ if opt in ("-h", "-H", "--help"):
self.usageExit()
- if opt in ('-i', '--pdb'):
+ if opt in ("-i", "--pdb"):
self.pdbmode = True
- if opt in ('-x', '--exitfirst'):
+ if opt in ("-x", "--exitfirst"):
self.exitfirst = True
- if opt in ('-r', '--restart'):
+ if opt in ("-r", "--restart"):
self.restart = True
self.exitfirst = True
- if opt in ('-q', '--quiet'):
+ if opt in ("-q", "--quiet"):
self.verbosity = 0
- if opt in ('-v', '--verbose'):
+ if opt in ("-v", "--verbose"):
self.verbosity = 2
- if opt in ('-s', '--skip'):
- self.skipped_patterns = [pat.strip() for pat in
- value.split(', ')]
- if opt == '--color':
+ if opt in ("-s", "--skip"):
+ self.skipped_patterns = [pat.strip() for pat in value.split(", ")]
+ if opt == "--color":
self.colorize = True
- if opt in ('-m', '--match'):
- #self.tags_pattern = value
+ if opt in ("-m", "--match"):
+ # self.tags_pattern = value
self.options["tag_pattern"] = value
- if opt in ('-P', '--profile'):
+ if opt in ("-P", "--profile"):
self.profile_name = value
self.testLoader.skipped_patterns = self.skipped_patterns
if len(args) == 0 and self.defaultTest is None:
- suitefunc = getattr(self.module, 'suite', None)
- if isinstance(suitefunc, (types.FunctionType,
- types.MethodType)):
+ suitefunc = getattr(self.module, "suite", None)
+ if isinstance(suitefunc, (types.FunctionType, types.MethodType)):
self.test = self.module.suite()
else:
self.test = self.testLoader.loadTestsFromModule(self.module)
@@ -766,7 +844,7 @@ Examples:
self.test_pattern = args[0]
self.testNames = args
else:
- self.testNames = (self.defaultTest, )
+ self.testNames = (self.defaultTest,)
self.createTests()
except getopt.error as msg:
self.usageExit(msg)
@@ -774,21 +852,24 @@ Examples:
def runTests(self):
if self.profile_name:
import cProfile
- cProfile.runctx('self._runTests()', globals(), locals(), self.profile_name )
+
+ cProfile.runctx("self._runTests()", globals(), locals(), self.profile_name)
else:
return self._runTests()
def _runTests(self):
- self.testRunner = SkipAwareTextTestRunner(verbosity=self.verbosity,
- stream=self.outstream,
- exitfirst=self.exitfirst,
- pdbmode=self.pdbmode,
- cvg=self.cvg,
- test_pattern=self.test_pattern,
- skipped_patterns=self.skipped_patterns,
- colorize=self.colorize,
- batchmode=self.batchmode,
- options=self.options)
+ self.testRunner = SkipAwareTextTestRunner(
+ verbosity=self.verbosity,
+ stream=self.outstream,
+ exitfirst=self.exitfirst,
+ pdbmode=self.pdbmode,
+ cvg=self.cvg,
+ test_pattern=self.test_pattern,
+ skipped_patterns=self.skipped_patterns,
+ colorize=self.colorize,
+ batchmode=self.batchmode,
+ options=self.options,
+ )
def removeSucceededTests(obj, succTests):
""" Recursive function that removes succTests from
@@ -801,32 +882,33 @@ Examples:
if isinstance(el, unittest.TestSuite):
removeSucceededTests(el, succTests)
elif isinstance(el, unittest.TestCase):
- descr = '.'.join((el.__class__.__module__,
- el.__class__.__name__,
- el._testMethodName))
+ descr = ".".join(
+ (el.__class__.__module__, el.__class__.__name__, el._testMethodName)
+ )
if descr in succTests:
obj.remove(el)
+
# take care, self.options may be None
- if getattr(self.options, 'restart', False):
+ if getattr(self.options, "restart", False):
# retrieve succeeded tests from FILE_RESTART
try:
- restartfile = open(FILE_RESTART, 'r')
+ restartfile = open(FILE_RESTART, "r")
try:
- succeededtests = list(elem.rstrip('\n\r') for elem in
- restartfile.readlines())
+ succeededtests = list(elem.rstrip("\n\r") for elem in restartfile.readlines())
removeSucceededTests(self.test, succeededtests)
finally:
restartfile.close()
except Exception as ex:
- raise Exception("Error while reading succeeded tests into %s: %s"
- % (osp.join(os.getcwd(), FILE_RESTART), ex))
+ raise Exception(
+ "Error while reading succeeded tests into %s: %s"
+ % (osp.join(os.getcwd(), FILE_RESTART), ex)
+ )
result = self.testRunner.run(self.test)
# help garbage collection: we want TestSuite, which hold refs to every
# executed TestCase, to be gc'ed
del self.test
- if getattr(result, "debuggers", None) and \
- getattr(self, "pdbmode", None):
+ if getattr(result, "debuggers", None) and getattr(self, "pdbmode", None):
start_interactive_mode(result)
if not getattr(self, "batchmode", None):
sys.exit(not result.wasSuccessful())
@@ -834,13 +916,20 @@ Examples:
class SkipAwareTextTestRunner(unittest.TextTestRunner):
-
- def __init__(self, stream=sys.stderr, verbosity=1,
- exitfirst=False, pdbmode=False, cvg=None, test_pattern=None,
- skipped_patterns=(), colorize=False, batchmode=False,
- options=None):
- super(SkipAwareTextTestRunner, self).__init__(stream=stream,
- verbosity=verbosity)
+ def __init__(
+ self,
+ stream=sys.stderr,
+ verbosity=1,
+ exitfirst=False,
+ pdbmode=False,
+ cvg=None,
+ test_pattern=None,
+ skipped_patterns=(),
+ colorize=False,
+ batchmode=False,
+ options=None,
+ ):
+ super(SkipAwareTextTestRunner, self).__init__(stream=stream, verbosity=verbosity)
self.exitfirst = exitfirst
self.pdbmode = pdbmode
self.cvg = cvg
@@ -859,23 +948,23 @@ class SkipAwareTextTestRunner(unittest.TextTestRunner):
else:
if isinstance(test, testlib.TestCase):
meth = test._get_test_method()
- testname = '%s.%s' % (test.__name__, meth.__name__)
+ testname = "%s.%s" % (test.__name__, meth.__name__)
elif isinstance(test, types.FunctionType):
func = test
testname = func.__name__
elif isinstance(test, types.MethodType):
cls = test.__self__.__class__
- testname = '%s.%s' % (cls.__name__, test.__name__)
+ testname = "%s.%s" % (cls.__name__, test.__name__)
else:
- return True # Not sure when this happens
+ return True # Not sure when this happens
if isgeneratorfunction(test) and skipgenerator:
- return self.does_match_tags(test) # Let inner tests decide at run time
+ return self.does_match_tags(test) # Let inner tests decide at run time
if self._this_is_skipped(testname):
- return False # this was explicitly skipped
+ return False # this was explicitly skipped
if self.test_pattern is not None:
try:
- classpattern, testpattern = self.test_pattern.split('.')
- klass, name = testname.split('.')
+ classpattern, testpattern = self.test_pattern.split(".")
+ klass, name = testname.split(".")
if classpattern not in klass or testpattern not in name:
return False
except ValueError:
@@ -886,18 +975,24 @@ class SkipAwareTextTestRunner(unittest.TextTestRunner):
def does_match_tags(self, test: Callable) -> bool:
if self.options is not None:
- tags_pattern = getattr(self.options, 'tags_pattern', None)
+ tags_pattern = getattr(self.options, "tags_pattern", None)
if tags_pattern is not None:
- tags = getattr(test, 'tags', testlib.Tags())
+ tags = getattr(test, "tags", testlib.Tags())
if tags.inherit and isinstance(test, types.MethodType):
- tags = tags | getattr(test.__self__.__class__, 'tags', testlib.Tags())
+ tags = tags | getattr(test.__self__.__class__, "tags", testlib.Tags())
return tags.match(tags_pattern)
- return True # no pattern
-
- def _makeResult(self) -> 'SkipAwareTestResult':
- return SkipAwareTestResult(self.stream, self.descriptions,
- self.verbosity, self.exitfirst,
- self.pdbmode, self.cvg, self.colorize)
+ return True # no pattern
+
+ def _makeResult(self) -> "SkipAwareTestResult":
+ return SkipAwareTestResult(
+ self.stream,
+ self.descriptions,
+ self.verbosity,
+ self.exitfirst,
+ self.pdbmode,
+ self.cvg,
+ self.colorize,
+ )
def run(self, test):
"Run the given test case or test suite."
@@ -910,43 +1005,48 @@ class SkipAwareTextTestRunner(unittest.TextTestRunner):
if not self.batchmode:
self.stream.writeln(result.separator2)
run = result.testsRun
- self.stream.writeln("Ran %d test%s in %.3fs" %
- (run, run != 1 and "s" or "", timeTaken))
+ self.stream.writeln("Ran %d test%s in %.3fs" % (run, run != 1 and "s" or "", timeTaken))
self.stream.writeln()
if not result.wasSuccessful():
if self.colorize:
- self.stream.write(textutils.colorize_ansi("FAILED", color='red'))
+ self.stream.write(textutils.colorize_ansi("FAILED", color="red"))
else:
self.stream.write("FAILED")
else:
if self.colorize:
- self.stream.write(textutils.colorize_ansi("OK", color='green'))
+ self.stream.write(textutils.colorize_ansi("OK", color="green"))
else:
self.stream.write("OK")
- failed, errored, skipped = map(len, (result.failures,
- result.errors,
- result.skipped))
+ failed, errored, skipped = map(len, (result.failures, result.errors, result.skipped))
det_results = []
- for name, value in (("failures", result.failures),
- ("errors",result.errors),
- ("skipped", result.skipped)):
+ for name, value in (
+ ("failures", result.failures),
+ ("errors", result.errors),
+ ("skipped", result.skipped),
+ ):
if value:
det_results.append("%s=%i" % (name, len(value)))
if det_results:
self.stream.write(" (")
- self.stream.write(', '.join(det_results))
+ self.stream.write(", ".join(det_results))
self.stream.write(")")
self.stream.writeln("")
return result
class SkipAwareTestResult(unittest._TextTestResult):
-
- def __init__(self, stream: _WritelnDecorator, descriptions: bool, verbosity: int,
- exitfirst: bool = False, pdbmode: bool = False, cvg: Optional[Any] = None, colorize: bool = False) -> None:
- super(SkipAwareTestResult, self).__init__(stream,
- descriptions, verbosity)
+ def __init__(
+ self,
+ stream: _WritelnDecorator,
+ descriptions: bool,
+ verbosity: int,
+ exitfirst: bool = False,
+ pdbmode: bool = False,
+ cvg: Optional[Any] = None,
+ colorize: bool = False,
+ ) -> None:
+ super(SkipAwareTestResult, self).__init__(stream, descriptions, verbosity)
self.skipped: List[Tuple[Any, Any]] = []
self.debuggers: List = []
self.fail_descrs: List = []
@@ -959,10 +1059,10 @@ class SkipAwareTestResult(unittest._TextTestResult):
self.verbose = verbosity > 1
def descrs_for(self, flavour: str) -> List[Tuple[int, str]]:
- return getattr(self, '%s_descrs' % flavour.lower())
+ return getattr(self, "%s_descrs" % flavour.lower())
def _create_pdb(self, test_descr: str, flavour: str) -> None:
- self.descrs_for(flavour).append( (len(self.debuggers), test_descr) )
+ self.descrs_for(flavour).append((len(self.debuggers), test_descr))
if self.pdbmode:
self.debuggers.append(self.pdbclass(sys.exc_info()[2]))
@@ -982,34 +1082,34 @@ class SkipAwareTestResult(unittest._TextTestResult):
--verbose is passed
"""
exctype, exc, tb = err
- output = ['Traceback (most recent call last)']
+ output = ["Traceback (most recent call last)"]
frames = inspect.getinnerframes(tb)
colorize = self.colorize
frames = enumerate(self._iter_valid_frames(frames))
for index, (frame, filename, lineno, funcname, ctx, ctxindex) in frames:
filename = osp.abspath(filename)
- if ctx is None: # pyc files or C extensions for instance
- source = '<no source available>'
+ if ctx is None: # pyc files or C extensions for instance
+ source = "<no source available>"
else:
- source = ''.join(ctx)
+ source = "".join(ctx)
if colorize:
- filename = textutils.colorize_ansi(filename, 'magenta')
+ filename = textutils.colorize_ansi(filename, "magenta")
source = colorize_source(source)
output.append(' File "%s", line %s, in %s' % (filename, lineno, funcname))
- output.append(' %s' % source.strip())
+ output.append(" %s" % source.strip())
if self.verbose:
- output.append('%r == %r' % (dir(frame), test.__module__))
- output.append('')
- output.append(' ' + ' local variables '.center(66, '-'))
+ output.append("%r == %r" % (dir(frame), test.__module__))
+ output.append("")
+ output.append(" " + " local variables ".center(66, "-"))
for varname, value in sorted(frame.f_locals.items()):
- output.append(' %s: %r' % (varname, value))
- if varname == 'self': # special handy processing for self
+ output.append(" %s: %r" % (varname, value))
+ if varname == "self": # special handy processing for self
for varname, value in sorted(vars(value).items()):
- output.append(' self.%s: %r' % (varname, value))
- output.append(' ' + '-' * 66)
- output.append('')
- output.append(''.join(traceback.format_exception_only(exctype, exc)))
- return '\n'.join(output)
+ output.append(" self.%s: %r" % (varname, value))
+ output.append(" " + "-" * 66)
+ output.append("")
+ output.append("".join(traceback.format_exception_only(exctype, exc)))
+ return "\n".join(output)
def addError(self, test, err):
"""err -> (exc_type, exc, tcbk)"""
@@ -1022,21 +1122,21 @@ class SkipAwareTestResult(unittest._TextTestResult):
self.shouldStop = True
descr = self.getDescription(test)
super(SkipAwareTestResult, self).addError(test, err)
- self._create_pdb(descr, 'error')
+ self._create_pdb(descr, "error")
def addFailure(self, test, err):
if self.exitfirst:
self.shouldStop = True
descr = self.getDescription(test)
super(SkipAwareTestResult, self).addFailure(test, err)
- self._create_pdb(descr, 'fail')
+ self._create_pdb(descr, "fail")
def addSkip(self, test, reason):
self.skipped.append((test, reason))
if self.showAll:
self.stream.writeln("SKIPPED")
elif self.dots:
- self.stream.write('S')
+ self.stream.write("S")
def printErrors(self) -> None:
super(SkipAwareTestResult, self).printErrors()
@@ -1047,7 +1147,7 @@ class SkipAwareTestResult(unittest._TextTestResult):
for test, err in self.skipped:
descr = self.getDescription(test)
self.stream.writeln(self.separator1)
- self.stream.writeln("%s: %s" % ('SKIPPED', descr))
+ self.stream.writeln("%s: %s" % ("SKIPPED", descr))
self.stream.writeln("\t%s" % err)
def printErrorList(self, flavour, errors):
@@ -1056,32 +1156,42 @@ class SkipAwareTestResult(unittest._TextTestResult):
self.stream.writeln("%s: %s" % (flavour, descr))
self.stream.writeln(self.separator2)
self.stream.writeln(err)
- self.stream.writeln('no stdout'.center(len(self.separator2)))
- self.stream.writeln('no stderr'.center(len(self.separator2)))
+ self.stream.writeln("no stdout".center(len(self.separator2)))
+ self.stream.writeln("no stderr".center(len(self.separator2)))
from .decorators import monkeypatch
+
orig_call = testlib.TestCase.__call__
-@monkeypatch(testlib.TestCase, '__call__')
-def call(self: Any, result: SkipAwareTestResult = None, runcondition: Optional[Callable] = None, options: Optional[Any] = None) -> None:
+
+
+@monkeypatch(testlib.TestCase, "__call__")
+def call(
+ self: Any,
+ result: SkipAwareTestResult = None,
+ runcondition: Optional[Callable] = None,
+ options: Optional[Any] = None,
+) -> None:
orig_call(self, result=result, runcondition=runcondition, options=options)
# mypy: Item "None" of "Optional[Any]" has no attribute "exitfirst"
# we check it first in the if
if hasattr(options, "exitfirst") and options.exitfirst: # type: ignore
# add this test to restart file
try:
- restartfile = open(FILE_RESTART, 'a')
+ restartfile = open(FILE_RESTART, "a")
try:
- descr = '.'.join((self.__class__.__module__,
- self.__class__.__name__,
- self._testMethodName))
- restartfile.write(descr+os.linesep)
+ descr = ".".join(
+ (self.__class__.__module__, self.__class__.__name__, self._testMethodName)
+ )
+ restartfile.write(descr + os.linesep)
finally:
restartfile.close()
except Exception:
- print("Error while saving succeeded test into",
- osp.join(os.getcwd(), FILE_RESTART),
- file=sys.__stderr__)
+ print(
+ "Error while saving succeeded test into",
+ osp.join(os.getcwd(), FILE_RESTART),
+ file=sys.__stderr__,
+ )
raise
@@ -1129,7 +1239,7 @@ class NonStrictTestLoader(unittest.TestLoader):
for obj in vars(module).values():
if isclass(obj) and issubclass(obj, unittest.TestCase):
classname = obj.__name__
- if classname[0] == '_' or self._this_is_skipped(classname):
+ if classname[0] == "_" or self._this_is_skipped(classname):
continue
methodnames = []
# obj is a TestCase class
@@ -1147,14 +1257,16 @@ class NonStrictTestLoader(unittest.TestLoader):
suite = getattr(module, suitename)()
except AttributeError:
return []
- assert hasattr(suite, '_tests'), \
- "%s.%s is not a valid TestSuite" % (module.__name__, suitename)
+ assert hasattr(suite, "_tests"), "%s.%s is not a valid TestSuite" % (
+ module.__name__,
+ suitename,
+ )
# python2.3 does not implement __iter__ on suites, we need to return
# _tests explicitly
return suite._tests
def loadTestsFromName(self, name, module=None):
- parts = name.split('.')
+ parts = name.split(".")
if module is None or len(parts) > 2:
# let the base class do its job here
return [super(NonStrictTestLoader, self).loadTestsFromName(name)]
@@ -1162,34 +1274,35 @@ class NonStrictTestLoader(unittest.TestLoader):
collected = []
if len(parts) == 1:
pattern = parts[0]
- if callable(getattr(module, pattern, None)
- ) and pattern not in tests:
+ if callable(getattr(module, pattern, None)) and pattern not in tests:
# consider it as a suite
return self.loadTestsFromSuite(module, pattern)
if pattern in tests:
# case python unittest_foo.py MyTestTC
klass, methodnames = tests[pattern]
for methodname in methodnames:
- collected = [klass(methodname)
- for methodname in methodnames]
+ collected = [klass(methodname) for methodname in methodnames]
else:
# case python unittest_foo.py something
for klass, methodnames in tests.values():
# skip methodname if matched by skipped_patterns
for skip_pattern in self.skipped_patterns:
- methodnames = [methodname
- for methodname in methodnames
- if skip_pattern not in methodname]
- collected += [klass(methodname)
- for methodname in methodnames
- if pattern in methodname]
+ methodnames = [
+ methodname
+ for methodname in methodnames
+ if skip_pattern not in methodname
+ ]
+ collected += [
+ klass(methodname) for methodname in methodnames if pattern in methodname
+ ]
elif len(parts) == 2:
# case "MyClass.test_1"
classname, pattern = parts
klass, methodnames = tests.get(classname, (None, []))
for methodname in methodnames:
- collected = [klass(methodname) for methodname in methodnames
- if pattern in methodname]
+ collected = [
+ klass(methodname) for methodname in methodnames if pattern in methodname
+ ]
return collected
def _this_is_skipped(self, testedname: str) -> bool:
@@ -1202,10 +1315,9 @@ class NonStrictTestLoader(unittest.TestLoader):
"""
is_skipped = self._this_is_skipped
classname = testCaseClass.__name__
- if classname[0] == '_' or is_skipped(classname):
+ if classname[0] == "_" or is_skipped(classname):
return []
- testnames = super(NonStrictTestLoader, self).getTestCaseNames(
- testCaseClass)
+ testnames = super(NonStrictTestLoader, self).getTestCaseNames(testCaseClass)
return [testname for testname in testnames if not is_skipped(testname)]
@@ -1214,13 +1326,27 @@ class NonStrictTestLoader(unittest.TestLoader):
# It is used to monkeypatch the original implementation to support
# extra runcondition and options arguments (see in testlib.py)
-def _ts_run(self: Any, result: SkipAwareTestResult, debug: bool = False, runcondition: Callable = None, options: Optional[Any] = None) -> SkipAwareTestResult:
+
+def _ts_run(
+ self: Any,
+ result: SkipAwareTestResult,
+ debug: bool = False,
+ runcondition: Callable = None,
+ options: Optional[Any] = None,
+) -> SkipAwareTestResult:
self._wrapped_run(result, runcondition=runcondition, options=options)
self._tearDownPreviousClass(None, result)
self._handleModuleTearDown(result)
return result
-def _ts_wrapped_run(self: Any, result: SkipAwareTestResult, debug: bool = False, runcondition: Callable = None, options: Optional[Any] = None) -> SkipAwareTestResult:
+
+def _ts_wrapped_run(
+ self: Any,
+ result: SkipAwareTestResult,
+ debug: bool = False,
+ runcondition: Callable = None,
+ options: Optional[Any] = None,
+) -> SkipAwareTestResult:
for test in self:
if result.shouldStop:
break
@@ -1229,8 +1355,9 @@ def _ts_wrapped_run(self: Any, result: SkipAwareTestResult, debug: bool = False,
self._handleModuleFixture(test, result)
self._handleClassSetUp(test, result)
result._previousTestClass = test.__class__
- if (getattr(test.__class__, '_classSetupFailed', False) or
- getattr(result, '_moduleSetUpFailed', False)):
+ if getattr(test.__class__, "_classSetupFailed", False) or getattr(
+ result, "_moduleSetUpFailed", False
+ ):
continue
# --- modifications to deal with _wrapped_run ---
@@ -1240,7 +1367,7 @@ def _ts_wrapped_run(self: Any, result: SkipAwareTestResult, debug: bool = False,
# test(result)
# else:
# test.debug()
- if hasattr(test, '_wrapped_run'):
+ if hasattr(test, "_wrapped_run"):
try:
test._wrapped_run(result, debug, runcondition=runcondition, options=options)
except TypeError:
@@ -1255,13 +1382,20 @@ def _ts_wrapped_run(self: Any, result: SkipAwareTestResult, debug: bool = False,
# --- end of modifications to deal with _wrapped_run ---
return result
+
if sys.version_info >= (2, 7):
# The function below implements a modified version of the
# TestSuite.run method that is provided with python 2.7, in
# unittest/suite.py
- def _ts_run(self: Any, result: SkipAwareTestResult, debug: bool = False, runcondition: Callable = None, options: Optional[Any] = None) -> SkipAwareTestResult:
+ def _ts_run(
+ self: Any,
+ result: SkipAwareTestResult,
+ debug: bool = False,
+ runcondition: Callable = None,
+ options: Optional[Any] = None,
+ ) -> SkipAwareTestResult:
topLevel = False
- if getattr(result, '_testRunEntered', False) is False:
+ if getattr(result, "_testRunEntered", False) is False:
result._testRunEntered = topLevel = True
self._wrapped_run(result, debug, runcondition, options)
@@ -1287,8 +1421,7 @@ def enable_dbc(*args):
from logilab.aspects.weaver import weaver
from logilab.aspects.lib.contracts import ContractAspect
except ImportError:
- sys.stderr.write(
- 'Warning: logilab.aspects is not available. Contracts disabled.')
+ sys.stderr.write("Warning: logilab.aspects is not available. Contracts disabled.")
return False
for arg in args:
weaver.weave_module(arg, ContractAspect)
@@ -1304,13 +1437,12 @@ unittest.TestProgram = SkipAwareTestProgram
if sys.version_info >= (2, 4):
doctest.DocTestCase.__bases__ = (testlib.TestCase,)
# XXX check python2.6 compatibility
- #doctest.DocTestCase._cleanups = []
- #doctest.DocTestCase._out = []
+ # doctest.DocTestCase._cleanups = []
+ # doctest.DocTestCase._out = []
else:
unittest.FunctionTestCase.__bases__ = (testlib.TestCase,)
unittest.TestSuite.run = _ts_run
unittest.TestSuite._wrapped_run = _ts_wrapped_run
-if __name__ == '__main__':
+if __name__ == "__main__":
run()
-